├── .gitignore ├── CHANGELOG.md ├── LICENSE ├── Makefile ├── README.rst ├── RELEASE.md ├── bin └── test-empire ├── circle.yml ├── conf ├── empire │ ├── README.md │ ├── empire.yaml │ └── example.env ├── example.yaml ├── rds │ ├── README.rst │ ├── aurora │ │ ├── aurora.env │ │ └── aurora.yaml │ ├── mysql.env │ ├── mysql.yaml │ ├── postgres.env │ └── postgres.yaml └── stage.env ├── setup.cfg ├── setup.py ├── stacker_blueprints ├── __init__.py ├── asg.py ├── aws_lambda.py ├── bastion.py ├── cloudwatch_logs.py ├── dynamodb.py ├── ec2.py ├── ecr.py ├── efs.py ├── elasticache │ ├── __init__.py │ ├── base.py │ └── redis.py ├── elasticsearch.py ├── empire │ ├── __init__.py │ ├── base.py │ ├── controller.py │ ├── daemon.py │ ├── minion.py │ └── policies.py ├── firehose │ ├── __init__.py │ ├── base.py │ ├── redshift.py │ └── s3.py ├── generic.py ├── iam_roles.py ├── kms.py ├── policies.py ├── postgres.py ├── rds │ ├── __init__.py │ ├── aurora │ │ ├── __init__.py │ │ └── base.py │ ├── base.py │ ├── mysql.py │ └── postgres.py ├── route53.py ├── s3.py ├── security_rules.py ├── sns.py ├── sqs.py ├── util.py ├── vpc.py └── vpc_flow_logs.py └── tests ├── __init__.py ├── fixtures └── blueprints │ ├── buckets.json │ ├── dynamodb_autoscaling.json │ ├── dynamodb_table.json │ ├── ec2_instances.json │ ├── kms_key_a.json │ ├── kms_key_b.json │ ├── kms_key_c.json │ ├── queues.json │ ├── route53_dnsrecords.json │ ├── route53_dnsrecords_zone_name.json │ ├── route53_record_set_groups.json │ ├── s3_static_website.json │ ├── test_asg_flexible_autoscaling_group.json │ ├── test_aws_lambda_Function.json │ ├── test_aws_lambda_FunctionScheduler.json │ ├── test_aws_lambda_Function_event_source_mapping.json │ ├── test_aws_lambda_Function_extended_statements.json │ ├── test_aws_lambda_Function_external_role.json │ ├── test_aws_lambda_Function_with_alias_full_name_arn.json │ ├── test_aws_lambda_Function_with_alias_partial_name.json │ ├── test_aws_lambda_Function_with_alias_provided_version.json │ ├── test_aws_lambda_Function_with_vpc_config.json │ ├── test_cloudwatch_logs_subscription_filters.json │ ├── test_efs_ElasticFileSystem.json │ ├── test_generic_GenericResourceCreator.json │ ├── test_vpc2_with_internal_zone.json │ ├── test_vpc2_without_internal_zone.json │ └── topics.json ├── test_asg.py ├── test_aws_lambda.py ├── test_cloudwatch_logs.py ├── test_dynamodb.py ├── test_ec2.py ├── test_efs.py ├── test_generic.py ├── test_kms.py ├── test_route53.py ├── test_s3.py ├── test_sns.py ├── test_sqs.py └── test_vpc.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm 2 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 3 | 4 | # User-specific stuff: 5 | .idea/**/workspace.xml 6 | .idea/**/tasks.xml 7 | .idea/dictionaries 8 | 9 | # Sensitive or high-churn files: 10 | .idea/**/dataSources/ 11 | .idea/**/dataSources.ids 12 | .idea/**/dataSources.xml 13 | .idea/**/dataSources.local.xml 14 | .idea/**/sqlDataSources.xml 15 | .idea/**/dynamic.xml 16 | .idea/**/uiDesigner.xml 17 | 18 | # Gradle: 19 | .idea/**/gradle.xml 20 | .idea/**/libraries 21 | 22 | # CMake 23 | cmake-build-debug/ 24 | 25 | # Mongo Explorer plugin: 26 | .idea/**/mongoSettings.xml 27 | 28 | ## File-based project format: 29 | *.iws 30 | 31 | ## Plugin-specific files: 32 | 33 | # IntelliJ 34 | out/ 35 | 36 | # mpeltonen/sbt-idea plugin 37 | .idea_modules/ 38 | 39 | # JIRA plugin 40 | atlassian-ide-plugin.xml 41 | 42 | # Cursive Clojure plugin 43 | .idea/replstate.xml 44 | 45 | # Crashlytics plugin (for Android Studio and IntelliJ) 46 | com_crashlytics_export_strings.xml 47 | crashlytics.properties 48 | crashlytics-build.properties 49 | fabric.properties 50 | 51 | # Compiled source # 52 | ################### 53 | *.com 54 | *.class 55 | *.dll 56 | *.exe 57 | *.o 58 | *.so 59 | 60 | # Packages # 61 | ############ 62 | # it's better to unpack these files and commit the raw source 63 | # git has its own built in compression methods 64 | *.7z 65 | *.dmg 66 | *.gz 67 | *.iso 68 | *.jar 69 | *.rar 70 | *.tar 71 | *.zip 72 | 73 | # Logs and databases # 74 | ###################### 75 | *.log 76 | *.sql 77 | *.sqlite 78 | 79 | # OS generated files # 80 | ###################### 81 | .DS_Store* 82 | ehthumbs.db 83 | Icon? 84 | Thumbs.db 85 | 86 | # Vagrant 87 | .vagrant 88 | 89 | # Editor crap 90 | *.sw* 91 | *~ 92 | 93 | # Byte-compiled python 94 | *.pyc 95 | 96 | # Package directory 97 | build/ 98 | 99 | # Build object file directory 100 | objdir/ 101 | dist/ 102 | *.egg-info 103 | .eggs/ 104 | 105 | # nosetest --with-coverage dumps these in CWD 106 | .coverage 107 | 108 | Vagrantfile 109 | 110 | vm_setup.sh 111 | 112 | # Ignore development conf/env files 113 | dev.yaml 114 | dev.env 115 | 116 | # Mac 117 | .DS_Store 118 | 119 | # Ignore blueprint fixture results 120 | *.json-result 121 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ## 1.0.8 (2019-07-25) 2 | 3 | - Update Redis settings validation 4 | 5 | ## 1.0.7 (2018-02-11) 6 | 7 | - FlexibleAutoScalingGroup blueprint [GH-156] 8 | - Update s3 policies with Remind in-house copies [GH-158] 9 | - Setup correct BucketPolicy for public websites in s3 [GH-159] 10 | 11 | ## 1.0.6 (2017-12-08) 12 | 13 | - Add s3:PutObjectVersionAcl action to s3 policies [GH-150] 14 | - Fix sns.py trying to create an sqs policy for non-sqs-type topics [GH-151] 15 | - Fix default for topics with no subscriptions [GH-153] 16 | 17 | ## 1.0.5 (2017-11-01) 18 | 19 | This is a minor release to deal with dependency conflicts between 20 | stacker & stacker\_blueprints, specifically around troposphere & awacs. 21 | 22 | ## 1.0.4 (2017-10-30) 23 | 24 | - Convert SQS Queue blueprint to TroposphereType [GH-132] 25 | - Allow overriding of Code object in aws\_lambda.Function subclasses [GH-133] 26 | - FunctionScheduler (Cloudwatch Events based) blueprint [GH-134] 27 | - route53 VPC private hosted zones [GH-135] 28 | - Add lambda external role support [GH-136] 29 | - Add lambda version support [GH-138] 30 | - Add lambda alias support [GH-139] 31 | - Add stream spec for aws lambda [GH-146] 32 | 33 | ## 1.0.3 (2017-08-24) 34 | 35 | - New iam Roles blueprint [GH-106] 36 | - Add bastion security group output [GH-113] 37 | - Add PutObjectACL action [GH-114] 38 | - Add default db name in RDS [GH-115] 39 | - Fix Elasticache subnets [GH-116] 40 | - Fix issue w/ SnapshotRetnetionLimit [GH-117] 41 | - Add FifoQueue parameter to sqs.Queues [GH-118] 42 | - KMS refactor [GH-119] 43 | - Route53 refactor [GH-120] 44 | - Add ELB hostedZoneId if missing for Alias targets in Route53 [GH-121] 45 | - Generic Resource Creator [GH-122] 46 | - DNS Hosted Zone Comments in Route53 [GH-123] 47 | - Skip record\_set if Enabled key is False [GH-126] 48 | - Make A & CNAME share the same label [GH-127] 49 | 50 | ## 1.0.2 (2017-05-18) 51 | 52 | - Basic VPC Flow Logs blueprint [GH-94] 53 | - Basic KMS Key blueprint [GH-95] 54 | - Updated Firehose blueprints [GH-96] 55 | - Add website url to s3 bucket [GH-97] 56 | - Cloudwatch Log filters blueprint [GH-98] 57 | - Simple Lambda Function blueprint [GH-99] 58 | - Route53 recordset blueprint [GH-102] 59 | - Minor fixes for Aurora blueprints [GH-111] 60 | 61 | ## 1.0.1 (2017-04-13) 62 | 63 | - Update examples to use explicit output lookups [GH-82] 64 | - Fix vpc parameters [GH-83] 65 | - Fix elasticsearch replication group [GH-84] 66 | - Add s3 policies [GH-85] 67 | - Fix bad empire merge [GH-86] 68 | - Remove repeated values [GH-87] 69 | - Fix elasticache template [GH-90] 70 | - Change missed Refs [GH-92] 71 | 72 | ## 1.0.0 (2017-03-04) 73 | 74 | - New low-level security group rule blueprint [GH-56] 75 | - Update firehose blueprint to fully use variables [GH-57] 76 | - convert dynamodb to TroposphereType [GH-58] 77 | - Update elasticache to fully use variables [GH-59] 78 | - Update VPC to fully use variables [GH-60] 79 | - give empire daemon access to ECR [GH-70] 80 | - simple ECS repo blueprint [GH-72] 81 | - update RDS to fully use variables [GH-76] 82 | - Initial aurora blueprints [GH-77] 83 | - s3 blueprint [GH-80] 84 | 85 | ## 0.7.6 (2017-01-19) 86 | 87 | - Fix empire minion ECR access [GH-70] 88 | - Fix SQS Queue Policy issue w/ multiple SQS queues [GH-71] 89 | - Simple ECR repository blueprint [GH-72] 90 | 91 | ## 0.7.4 (2017-01-06) 92 | 93 | - Remove version and family checking from RDS [GH-67] 94 | 95 | ## 0.7.3 (2016-11-28) 96 | 97 | - Add low-level security group rule blueprint [GH-56] 98 | - Relax troposphere dependency [GH-64] 99 | 100 | ## 0.7.2 (2016-10-19) 101 | 102 | - Add Elasticsearch Domain [GH-47] 103 | - Fix namespace issue in firehose blueprint [GH-48] 104 | - Setup flake8 in CI, cleanup bad pep8 blueprnts [GH-50] 105 | - Update empire blueprints to empire 0.11.0 & fix various bugs [GH-51] 106 | 107 | ## 0.7.1 (2016-09-27) 108 | 109 | - Fix typo in RDS base blueprint introduced in GH-29 [GH-44] 110 | 111 | ## 0.7.0 (2016-09-23) 112 | 113 | This is the first release to include blueprints with the new Blueprint Variables 114 | concept introduced in stacker 0.8. 115 | 116 | - output EmpireMinionRole [GH-18] 117 | - allow users & groups for firehose [GH-19] 118 | - KMS integration for firehose [GH-20] 119 | - Update empire stacks to use Empire Daemon [GH-22] 120 | - Add test helper for empire stacks [GH-26] 121 | - Allow use of existing security group with RDS [GH-29] 122 | - Move to compatible release versions for all dependencies [GH-30] 123 | - Add SNS, SQS, DynamoDB Blueprints [GH-43] 124 | 125 | ## 0.6.5 (2016-05-31) 126 | 127 | - Fix issue w/ firehose support relying on unreleased awacs features 128 | 129 | ## 0.6.4 (2016-05-29) 130 | 131 | - Make internal zone first in DNS in VPC blueprints [#7] 132 | - Add support for NAT Gateways [#10] 133 | - Add stack to help creating firehose delivery streams [#16] 134 | 135 | ## 0.6.3 (2016-05-16) 136 | 137 | - Add support for ACM certificates 138 | - Add new RDS db versions 139 | 140 | ## 0.6.2 (2016-02-11) 141 | 142 | - Update dependency to include new compatible stacker release 0.6.1 143 | 144 | ## 0.6.1 (2016-01-24) 145 | 146 | - Update empire blueprints & configs for empire 0.10.0 [GH-6] 147 | 148 | ## 0.6.0 (2016-01-07) 149 | 150 | - Pull in recent ASG changes that were merged [GH-2] 151 | - Initial elasticache blueprints [GH-3] 152 | - Fix blank env string change [GH-4] 153 | 154 | ## 0.5.4 (2015-12-08) 155 | 156 | - Initial release 157 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 2-Clause License 2 | 3 | Copyright (c) 2015, Remind101, Inc. 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 17 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 20 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | test: 2 | flake8 stacker_blueprints 3 | python setup.py test 4 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | DEPRECATED 2 | ================== 3 | 4 | This repository has been moved to 5 | https://github.com/cloudtools/stacker_blueprints and is only kept here for 6 | archival purposes. Please submit all issues & pull requests to the new repo. 7 | 8 | Thanks! 9 | 10 | stacker_blueprints 11 | ================== 12 | 13 | An attempt at a common Blueprint library for use with `stacker `_. 14 | 15 | If you're new to stacker you may use `stacker_cookiecutter `_ to setup your project. 16 | -------------------------------------------------------------------------------- /RELEASE.md: -------------------------------------------------------------------------------- 1 | # Steps to release a new version 2 | 3 | ## Preparing for the release 4 | 5 | - Check out a branch named for the version: `git checkout -b release-1.1.1` 6 | - Change version in setup.py and stacker\_blueprints/\_\_init\_\_.py 7 | - Update CHANGELOG.md with changes made since last release (see below for helpful 8 | command) 9 | - add changed files: `git add setup.py stacker_blueprints/\_\_init\_\_.py CHANGELOG.md` 10 | - Commit changes: `git commit -m "Release 1.1.1"` 11 | - Create a signed tag: `git tag --sign -m "Release 1.1.1" 1.1.1` 12 | - Push branch up to git: `git push -u origin release-1.1.1` 13 | - Open a PR for the release, ensure that tests pass 14 | 15 | ## Releasing 16 | 17 | - Push tag: `git push --tags` 18 | - Merge PR into master, checkout master locally: `git checkout master; git pull` 19 | - Create PyPI release: `python setup.py sdist upload --sign` 20 | - Update github release page: https://github.com/remind101/stacker\_blueprints/releases 21 | - use the contents of the latest CHANGELOG entry for the body. 22 | 23 | # Helper to create CHANGELOG entries 24 | git log --reverse --pretty=format:"%s" | tail -100 | sed 's/^/- /' 25 | -------------------------------------------------------------------------------- /bin/test-empire: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script will read any values defined within a dev.env file and export 4 | # them as `--env` command line arguments when running stacker commands. This is 5 | # useful while developing the example empire blueprint because it allows us to 6 | # have an example.env file with placeholders and comments that will be filled 7 | # in with the developer's values. 8 | 9 | CMD="stacker $1 conf/empire/example.env conf/empire/empire.yaml ${@:2}" 10 | while read -r line || [[ -n "$line" ]] 11 | do 12 | CMD="$CMD --env $line" 13 | done < "dev.env" 14 | 15 | echo $CMD 16 | $CMD 17 | -------------------------------------------------------------------------------- /circle.yml: -------------------------------------------------------------------------------- 1 | machine: 2 | timezone: America/Los_Angeles 3 | 4 | dependencies: 5 | pre: 6 | - pip install flake8 7 | 8 | test: 9 | override: 10 | - make test 11 | -------------------------------------------------------------------------------- /conf/empire/README.md: -------------------------------------------------------------------------------- 1 | # 'production' Empire example 2 | 3 | This is meant to be a base guideline for building a production ready 4 | [Empire][Empire] cluster. 5 | 6 | First copy the `example.env` & `empire.yaml` files somewhere for you to modify. You 7 | should not use the one in these directories directly for any real use as they may 8 | change in the future, which could cause issues for your existing deployment. 9 | 10 | In order to launch it, you'll want to modify the 11 | [example.env][example.env] to fit your needs. It's worth reading through the 12 | [example.env][example.env] & [empire.yaml][empire.yaml] for the comments that 13 | detail how things will be built. Once the env is to your liking, you can launch 14 | the environment by running the following command: 15 | 16 | ``` 17 | stacker build -r conf/empire/example.env conf/empire/empire.yaml 18 | 19 | # Example: 20 | stacker build -r us-east-1 conf/empire/example.env conf/empire/empire.yaml 21 | ``` 22 | 23 | Right now it takes around 20-30 minutes to finish bringing up the entire 24 | environment due largely to how long it takes RDS to build the Empire database. 25 | 26 | Rather than use the ECS Container AMI that Amazon provides, the Empire team 27 | has built their own [Empire AMI][empire_ami] based on Ubuntu 14.04. 28 | 29 | # Security 30 | 31 | These blueprints & stack definitions assume a base level of security, but could 32 | likely be tightened up quite a bit more to suit your needs. Some basics: 33 | 34 | - SSL on the Empire API ELB 35 | - All hosts except NAT & bastion hosts in private subnets with no public 36 | addresses 37 | - Bastion hosts (ssh) and Empire API ELB (https) access are firewalled to a 38 | single trusted CIDR range 39 | - Empire Minions & Controllers have no direct network access to each other 40 | - The Empire database is in the private VPC and can only be accessed by the 41 | Empire Controller hosts. 42 | - Github authentication is setup on the Empire API (provided you give all of 43 | the necessary variables for github in the environment) 44 | 45 | That said - if you see something that we missed, please let me know! 46 | 47 | [Empire]: https://github.com/remind101/empire/ 48 | [example.env]: https://github.com/remind101/stacker_blueprints/blob/master/conf/empire/example.env 49 | [empire.yaml]: https://github.com/remind101/stacker_blueprints/blob/master/conf/empire/empire.yaml 50 | [empire_ami]: https://github.com/remind101/empire_ami 51 | [stacker_blueprints]: https://github.com/remind101/stacker_blueprints 52 | -------------------------------------------------------------------------------- /conf/empire/example.env: -------------------------------------------------------------------------------- 1 | # azcount is the # of AvailabilityZones to attempt to build in. You 2 | # should build in as many as you can afford in order to provide for 3 | # better fault tolerance. Note: Since this is all done in a VPC, you 4 | # need to find out how many AZs can handle VPC subnets for the 5 | # region you are building in. As of this writing, here are the max 6 | # allowed azcount's for each zone: 7 | # us-east-1: 4, us-west-1: 2, us-west-2: 3, eu-west-1: 3 8 | # Note: The minimum allowed azcount is 2. 9 | azcount: 2 10 | 11 | # namespace is a unique name that the stacks will be built under. This value 12 | # will be used to prefix the CloudFormation stack names as well as the s3 13 | # bucket that contains revisions of the stacker templates. This is the only 14 | # required environment variable. 15 | namespace: 16 | 17 | # An external domain where empire. CNAME will be created 18 | external_domain: 19 | ssh_key_name: 20 | # Only clients from this IP will be able to talk to the Empire Controller 21 | # and ssh to the bastion hosts 22 | trusted_network_cidr: 23 | 24 | nat_instance_type: m3.medium 25 | 26 | bastion_instance_type: m3.medium 27 | 28 | # Used by the controller and the ECS Agent on the minions to pull 29 | # down private images from the registry 30 | docker_registry: https://index.docker.io/v1/ 31 | docker_registry_user: 32 | docker_registry_password: 33 | docker_registry_email: 34 | 35 | # Disk size for RDS instance in GB 36 | empiredb_disk_size: 10 37 | empiredb_instance_type: db.m3.large 38 | 39 | # This username & password will be created automatically on the empire 40 | # database, and will be shared with Empire 41 | empiredb_user: 42 | empiredb_password: 43 | 44 | # Change to anything non-blank to disable streaming logs (enabled by default) 45 | empire_disable_streaming_logs: !!str 46 | 47 | empire_minion_min_instance_count: 3 48 | empire_minion_max_instance_count: 10 49 | empire_minion_instance_type: c4.xlarge 50 | 51 | empire_environment: example 52 | 53 | empire_controller_min_instance_count: 2 54 | empire_controller_max_instance_count: 2 55 | empire_controller_instance_type: m3.medium 56 | 57 | # If you change this, it may require blueprint changes as well to deal 58 | # with changes to Environment variables the empire daemon expects/uses 59 | empire_daemon_docker_image: remind101/empire:0.11.0 60 | 61 | # This cert needs to be uploaded into AWS ahead of time, and will be used for 62 | # the ELB in front of the Empire API. Use "acm" for AWS Certificate Manager 63 | # certs, and "iam" or blank for certs uploaded to IAM. 64 | empire_daemon_cert_type: !!str 65 | empire_daemon_cert_name: !!str 66 | 67 | # This is used for github authentication - you need to setup a new Oauth 68 | # application in Github in your github organization 69 | # https://github.com/organizations/:organization/settings/applications 70 | # See: http://empire.readthedocs.org/en/latest/configuration/#github-authentication 71 | empire_daemon_github_client_id: 72 | empire_daemon_github_client_secret: 73 | empire_daemon_github_organization: 74 | 75 | # This is used for github deployment webhooks. See: 76 | # http://empire.readthedocs.org/en/latest/configuration/#github-deployments 77 | # Disabled by default (!!str is a blank string) 78 | empire_daemon_github_webhooks_secret: !!str 79 | empire_daemon_github_deployments_environment: !!str 80 | 81 | # Just a random string used to sign access tokens for clients in Empire 82 | # should be somewhere between 32-64 characters long 83 | empire_daemon_token_secret: 84 | 85 | # Set to an empty string to disable sending empire daemon events to the 86 | # application logstream (via kinesis) 87 | empire_daemon_log_streamer: kinesis 88 | 89 | # Where to send the empire daemon logs - sends to cloudwatch. Can also be 90 | # set to 'stdout' if you only want to send it to stdout on each controller 91 | empire_daemon_run_logs_backend: cloudwatch 92 | 93 | # If empire_daemon_run_logs_backend is set to cloudwatch, you can set this 94 | # to the name of an existing cloudwatch log group to send the logs there. If 95 | # an empty string, a new cloudwatch log group will be created for you 96 | empire_daemon_run_logs_cloudwatch_group: !!str 97 | 98 | # Set to SNS if you want to send all empire daemon events into an SNS topic 99 | empire_daemon_events_backend: stdout 100 | 101 | # If empire_daemon_events_backend is set to SNS, set this if you want to use 102 | # an existing SNS topic. If not provided, an SNS topic will be created for you 103 | empire_daemon_events_sns_topic_name: !!str 104 | 105 | # Set to true to enable debug logging from the AWS SDK library 106 | empire_daemon_aws_debug: false 107 | 108 | # Set to true to require commit messages for certain commands: 109 | # run, set, restart, deploy, etc 110 | empire_daemon_require_commit_messages: false 111 | 112 | # The scheduler to use in Empire 113 | empire_daemon_scheduler: cloudformation-migration 114 | -------------------------------------------------------------------------------- /conf/example.yaml: -------------------------------------------------------------------------------- 1 | # Hooks require a path. 2 | # If the build should stop when a hook fails, set required to true. 3 | # pre_build happens before the build 4 | # post_build happens after the build 5 | pre_build: 6 | - path: stacker.hooks.route53.create_domain 7 | required: true 8 | # Additional args can be passed as a dict of key/value pairs in kwargs 9 | # kwargs: 10 | # post_build: 11 | 12 | mappings: 13 | AmiMap: 14 | us-east-1: 15 | NAT: ami-ad227cc4 16 | ubuntu1404: ami-74e27e1c 17 | bastion: ami-74e27e1c 18 | us-west-2: 19 | NAT: ami-290f4119 20 | ubuntu1404: ami-5189a661 21 | bastion: ami-5189a661 22 | 23 | vpc_parameters: &vpc_parameters 24 | VpcId: ${output vpc::VpcId} # parametrs with ::'s in them refer to :: 25 | DefaultSG: ${output vpc::DefaultSG} 26 | PublicSubnets: ${output vpc::PublicSubnets} 27 | PrivateSubnets: ${output vpc::PrivateSubnets} 28 | AvailabilityZones: ${output vpc::AvailabilityZones} 29 | 30 | stacks: 31 | - name: vpc 32 | class_path: stacker_blueprints.vpc.VPC 33 | # Flag to enable/disable a stack. Default is true 34 | enabled: true 35 | variables: 36 | # Only build 2 AZs, can be overridden with -p on the command line 37 | # Note: If you want more than 4 AZs you should add more subnets below 38 | # Also you need at least 2 AZs in order to use the DB because 39 | # of the fact that the DB blueprint uses MultiAZ 40 | AZCount: 2 41 | # Enough subnets for 4 AZs 42 | PublicSubnets: 43 | - 10.128.0.0/24 44 | - 10.128.1.0/24 45 | - 10.128.2.0/24 46 | - 10.128.3.0/24 47 | PrivateSubnets: 48 | - 10.128.8.0/22 49 | - 10.128.12.0/22 50 | - 10.128.16.0/22 51 | - 10.128.20.0/22 52 | CidrBlock: 10.128.0.0/16 53 | # Uncomment if you want an internal hosted zone for the VPC 54 | # If provided, it will be added to the dns search path of the DHCP 55 | # Options 56 | InternalDomain: internal 57 | - name: bastion 58 | class_path: stacker_blueprints.bastion.Bastion 59 | enabled: true 60 | parameters: 61 | # Extends the parameters dict with the contents of the vpc_parameters 62 | # anchor. Basically we're including all VPC Outputs in the parameters 63 | # of the bastion stack. Note: Stacker figures out, automatically, which 64 | # parameters the stack actually needs and only submits those to each 65 | # stack. For example, most stacks are in the PrivateSubnets, but not 66 | # the PublicSubnets, but stacker deals with it for you. 67 | << : *vpc_parameters 68 | InstanceType: m3.medium 69 | OfficeNetwork: 203.0.113.0/24 70 | MinSize: 2 71 | MaxSize: 2 72 | SshKeyName: default 73 | ImageName: bastion 74 | - name: myDB 75 | class_path: stacker_blueprints.postgres.PostgresRDS 76 | # this stack is locked, which means it will not update unless you pass the 77 | # stack name "myDB" on the command line with --force 78 | locked: true 79 | enabled: true 80 | parameters: 81 | << : *vpc_parameters 82 | InstanceType: db.m3.medium 83 | AllocatedStorage: 10 84 | MasterUser: dbuser 85 | MasterUserPassword: ExamplePassword! 86 | DBName: db1 87 | # If the following are uncommented and you set an InternalDomain above 88 | # in the VPC a CNAME alias of InternalHostname will be setup pointing at 89 | # the database. 90 | #InternalZoneId: vpc::InternalZoneId 91 | #InternalZoneName: vpc::InternalZoneName 92 | #InternalHostname: mydb 93 | - name: myWeb 94 | class_path: stacker_blueprints.asg.AutoscalingGroup 95 | enabled: true 96 | parameters: 97 | << : *vpc_parameters 98 | InstanceType: m3.medium 99 | ImageName: ubuntu1404 100 | MinSize: 2 101 | MaxSize: 2 102 | SshKeyName: default 103 | # If commented out, no load balancer will be created. 104 | ELBHostName: mysite 105 | # Uncomment if you have a cert loaded in EC2 already and want to enable 106 | # SSL on the load balancer. 107 | #ELBCertName: mycert 108 | -------------------------------------------------------------------------------- /conf/rds/README.rst: -------------------------------------------------------------------------------- 1 | ===================== 2 | Example RDS v2 Stacks 3 | ===================== 4 | 5 | This directory contains configs for testing out version 2 of the RDS stack 6 | framework. 7 | -------------------------------------------------------------------------------- /conf/rds/aurora/aurora.env: -------------------------------------------------------------------------------- 1 | # namespace is a unique name that the stacks will be built under. This value 2 | # will be used to prefix the CloudFormation stack names as well as the s3 3 | # bucket that contains revisions of the stacker templates. This is the only 4 | # required environment variable. 5 | namespace: 6 | 7 | # VPC settings 8 | azcount: 2 9 | 10 | # cluster settings 11 | cluster_database_name: mydb 12 | cluster_database_family: aurora5.6 13 | cluster_master_user: root 14 | cluster_master_user_password: SECRETPASSWORD 15 | cluster_storage_encrypted: true 16 | cluster_hostname: mycluster 17 | 18 | # instance1 settings 19 | instance1_db_instance_type: db.r3.large 20 | instance1_allow_major_version_upgrade: false 21 | instance1_auto_minor_version_upgrade: true 22 | instance1_name: instance1 23 | 24 | # instance2 settings 25 | instance2_db_instance_type: db.r3.large 26 | instance2_allow_major_version_upgrade: false 27 | instance2_auto_minor_version_upgrade: true 28 | instance2_name: instance2 29 | -------------------------------------------------------------------------------- /conf/rds/aurora/aurora.yaml: -------------------------------------------------------------------------------- 1 | vpc_variables: &vpc_variables 2 | VpcId: ${output vpc::VpcId} 3 | DefaultSG: ${output vpc::DefaultSG} 4 | PublicSubnets: ${output vpc::PublicSubnets} 5 | PrivateSubnets: ${output vpc::PrivateSubnets} 6 | AvailabilityZones: ${output vpc::AvailabilityZones} 7 | 8 | stacks: 9 | - name: vpc 10 | class_path: stacker_blueprints.vpc.VPC 11 | variables: 12 | # AZCount is the # of AvailabilityZones to attempt to build in. You 13 | # should build in as many as you can afford in order to provide for 14 | # better fault tolerance. Note: Since this is all done in a VPC, you 15 | # need to find out how many AZs can handle VPC subnets for the 16 | # region you are building in. As of this writing, here are the max 17 | # allowed AZCount's for each zone: 18 | # us-east-1: 4, us-west-1: 2, us-west-2: 3, eu-west-1: 3 19 | # Note: The minimum allowed AZCount is 2. 20 | AZCount: ${azcount} 21 | # Enough subnets for 4 AZs 22 | PublicSubnets: 23 | - 10.128.0.0/24 24 | - 10.128.1.0/24 25 | - 10.128.2.0/24 26 | - 10.128.3.0/24 27 | PrivateSubnets: 28 | - 10.128.8.0/22 29 | - 10.128.12.0/22 30 | - 10.128.16.0/22 31 | - 10.128.20.0/22 32 | InternalDomain: internal 33 | # CidrBlock needs to be hold all of the Public & Private subnets above 34 | CidrBlock: 10.128.0.0/16 35 | UseNatGateway: true 36 | - name: auroraCluster 37 | class_path: stacker_blueprints.rds.aurora.base.AuroraCluster 38 | variables: 39 | << : *vpc_variables 40 | DatabaseName: ${cluster_database_name} 41 | DBFamily: ${cluster_database_family} 42 | Subnets: ${output vpc::PrivateSubnets} 43 | EngineVersion: 5.6.10a 44 | MasterUser: ${cluster_master_user} 45 | MasterUserPassword: ${cluster_master_user_password} 46 | StorageEncrypted: ${cluster_storage_encrypted} 47 | InternalZoneName: ${output vpc::InternalZoneName} 48 | InternalZoneId: ${output vpc::InternalZoneId} 49 | InternalHostname: ${cluster_hostname} 50 | - name: auroraInstance1 51 | class_path: stacker_blueprints.rds.base.ClusterInstance 52 | variables: 53 | << : *vpc_variables 54 | Engine: aurora 55 | DBClusterIdentifier: ${output auroraCluster::Cluster} 56 | Subnets: ${output vpc::PrivateSubnets} 57 | InstanceType: ${instance1_db_instance_type} 58 | AllowMajorVersionUpgrade: ${instance1_allow_major_version_upgrade} 59 | AutoMinorVersionUpgrade: ${instance1_auto_minor_version_upgrade} 60 | InternalZoneName: ${output vpc::InternalZoneName} 61 | InternalZoneId: ${output vpc::InternalZoneId} 62 | InternalHostname: ${instance1_name} 63 | DBInstanceIdentifier: ${instance1_name} 64 | - name: auroraInstance2 65 | class_path: stacker_blueprints.rds.base.ClusterInstance 66 | variables: 67 | << : *vpc_variables 68 | Engine: aurora 69 | DBClusterIdentifier: ${output auroraCluster::Cluster} 70 | Subnets: ${output vpc::PrivateSubnets} 71 | InstanceType: ${instance2_db_instance_type} 72 | AllowMajorVersionUpgrade: ${instance2_allow_major_version_upgrade} 73 | AutoMinorVersionUpgrade: ${instance2_auto_minor_version_upgrade} 74 | InternalZoneName: ${output vpc::InternalZoneName} 75 | InternalZoneId: ${output vpc::InternalZoneId} 76 | InternalHostname: ${instance2_name} 77 | DBInstanceIdentifier: ${instance2_name} 78 | -------------------------------------------------------------------------------- /conf/rds/mysql.env: -------------------------------------------------------------------------------- 1 | # namespace is a unique name that the stacks will be built under. This value 2 | # will be used to prefix the CloudFormation stack names as well as the s3 3 | # bucket that contains revisions of the stacker templates. This is the only 4 | # required environment variable. 5 | namespace: 6 | 7 | # VPC settings 8 | azcount: 2 9 | 10 | # Master settings 11 | master_db_instance_type: db.m3.large 12 | master_allow_major_version_upgrade: false 13 | master_auto_minor_version_upgrade: true 14 | master_storage_size: 100 15 | master_iops: 1000 16 | master_name: mysql-master 17 | master_db_family: mysql5.6 18 | master_engine_version: 5.6.23 19 | master_engine_major_version: !!str 5.6 20 | master_storage_encrypted: true 21 | master_db_user: myuser 22 | master_db_passwd: SECRETPASSWORD 23 | master_db_name: mydb 24 | master_multi_az: false 25 | 26 | # replica settings 27 | replica_db_instance_type: db.m3.large 28 | replica_allow_major_version_upgrade: false 29 | replica_auto_minor_version_upgrade: true 30 | replica_name: mysql-replica 31 | replica_storage_size: 100 32 | replica_iops: 1000 33 | replica_db_family: mysql5.6 34 | replica_engine_version: 5.6.23 35 | replica_engine_major_version: !!str 5.6 36 | replica_storage_encrypted: true 37 | -------------------------------------------------------------------------------- /conf/rds/mysql.yaml: -------------------------------------------------------------------------------- 1 | vpc_variables: &vpc_variables 2 | VpcId: ${output vpc::VpcId} 3 | DefaultSG: ${output vpc::DefaultSG} 4 | PublicSubnets: ${output vpc::PublicSubnets} 5 | PrivateSubnets: ${output vpc::PrivateSubnets} 6 | AvailabilityZones: ${output vpc::AvailabilityZones} 7 | 8 | stacks: 9 | - name: vpc 10 | class_path: stacker_blueprints.vpc.VPC 11 | variables: 12 | # AZCount is the # of AvailabilityZones to attempt to build in. You 13 | # should build in as many as you can afford in order to provide for 14 | # better fault tolerance. Note: Since this is all done in a VPC, you 15 | # need to find out how many AZs can handle VPC subnets for the 16 | # region you are building in. As of this writing, here are the max 17 | # allowed AZCount's for each zone: 18 | # us-east-1: 4, us-west-1: 2, us-west-2: 3, eu-west-1: 3 19 | # Note: The minimum allowed AZCount is 2. 20 | AZCount: ${azcount} 21 | # Enough subnets for 4 AZs 22 | PublicSubnets: 23 | - 10.128.0.0/24 24 | - 10.128.1.0/24 25 | - 10.128.2.0/24 26 | - 10.128.3.0/24 27 | PrivateSubnets: 28 | - 10.128.8.0/22 29 | - 10.128.12.0/22 30 | - 10.128.16.0/22 31 | - 10.128.20.0/22 32 | InternalDomain: internal 33 | # CidrBlock needs to be hold all of the Public & Private subnets above 34 | CidrBlock: 10.128.0.0/16 35 | UseNatGateway: true 36 | - name: mysqlMaster 37 | class_path: stacker_blueprints.rds.mysql.MasterInstance 38 | variables: 39 | << : *vpc_variables 40 | Subnets: ${output vpc::PrivateSubnets} 41 | InstanceType: ${master_db_instance_type} 42 | AllowMajorVersionUpgrade: ${master_allow_major_version_upgrade} 43 | AutoMinorVersionUpgrade: ${master_auto_minor_version_upgrade} 44 | AllocatedStorage: ${master_storage_size} 45 | IOPS: ${master_iops} 46 | InternalZoneName: ${output vpc::InternalZoneName} 47 | InternalZoneId: ${output vpc::InternalZoneId} 48 | InternalHostname: ${master_name} 49 | DBInstanceIdentifier: ${master_name} 50 | DBFamily: ${master_db_family} 51 | EngineVersion: ${master_engine_version} 52 | EngineMajorVersion: ${master_engine_major_version} 53 | StorageEncrypted: ${master_storage_encrypted} 54 | # MasterInstance specific 55 | MasterUser: ${master_db_user} 56 | MasterUserPassword: ${master_db_passwd} 57 | DatabaseName: ${master_db_name} 58 | MultiAZ: ${master_multi_az} 59 | - name: mysqlReplica 60 | class_path: stacker_blueprints.rds.mysql.ReadReplica 61 | variables: 62 | << : *vpc_variables 63 | Subnets: ${output vpc::PrivateSubnets} 64 | InstanceType: ${replica_db_instance_type} 65 | AllowMajorVersionUpgrade: ${replica_allow_major_version_upgrade} 66 | AutoMinorVersionUpgrade: ${replica_auto_minor_version_upgrade} 67 | AllocatedStorage: ${replica_storage_size} 68 | IOPS: ${replica_iops} 69 | InternalZoneName: ${output vpc::InternalZoneName} 70 | InternalZoneId: ${output vpc::InternalZoneId} 71 | InternalHostname: ${replica_name} 72 | DBInstanceIdentifier: ${replica_name} 73 | DBFamily: ${replica_db_family} 74 | EngineVersion: ${replica_engine_version} 75 | EngineMajorVersion: ${replica_engine_major_version} 76 | StorageEncrypted: ${replica_replica_storage_encrypted} 77 | # ReadReplica Specific 78 | MasterDatabaseId: ${output postgresMaster::DBInstance} 79 | -------------------------------------------------------------------------------- /conf/rds/postgres.env: -------------------------------------------------------------------------------- 1 | # namespace is a unique name that the stacks will be built under. This value 2 | # will be used to prefix the CloudFormation stack names as well as the s3 3 | # bucket that contains revisions of the stacker templates. This is the only 4 | # required environment variable. 5 | namespace: 6 | 7 | # VPC settings 8 | azcount: 2 9 | 10 | # Master settings 11 | master_db_instance_type: db.m3.large 12 | master_allow_major_version_upgrade: false 13 | master_auto_minor_version_upgrade: true 14 | master_storage_size: 100 15 | master_iops: 1000 16 | master_name: postgres-master 17 | master_db_family: postgres9.4 18 | master_engine_version: 9.4.1 19 | master_engine_major_version: !!str 9.4 20 | master_storage_encrypted: true 21 | master_db_user: myuser 22 | master_db_passwd: SECRETPASSWORD 23 | master_db_name: mydb 24 | master_multi_az: false 25 | 26 | # replica settings 27 | replica_db_instance_type: db.m3.large 28 | replica_allow_major_version_upgrade: false 29 | replica_auto_minor_version_upgrade: true 30 | replica_name: postgres-replica 31 | replica_storage_size: 100 32 | replica_iops: 1000 33 | replica_db_family: postgres9.4 34 | replica_engine_version: 9.4.1 35 | replica_engine_major_version: !!str 9.4 36 | replica_storage_encrypted: true 37 | -------------------------------------------------------------------------------- /conf/rds/postgres.yaml: -------------------------------------------------------------------------------- 1 | vpc_variables: &vpc_variables 2 | VpcId: ${output vpc::VpcId} 3 | DefaultSG: ${output vpc::DefaultSG} 4 | PublicSubnets: ${output vpc::PublicSubnets} 5 | PrivateSubnets: ${output vpc::PrivateSubnets} 6 | AvailabilityZones: ${output vpc::AvailabilityZones} 7 | 8 | stacks: 9 | - name: vpc 10 | class_path: stacker_blueprints.vpc.VPC 11 | variables: 12 | # AZCount is the # of AvailabilityZones to attempt to build in. You 13 | # should build in as many as you can afford in order to provide for 14 | # better fault tolerance. Note: Since this is all done in a VPC, you 15 | # need to find out how many AZs can handle VPC subnets for the 16 | # region you are building in. As of this writing, here are the max 17 | # allowed AZCount's for each zone: 18 | # us-east-1: 4, us-west-1: 2, us-west-2: 3, eu-west-1: 3 19 | # Note: The minimum allowed AZCount is 2. 20 | AZCount: ${azcount} 21 | # Enough subnets for 4 AZs 22 | PublicSubnets: 23 | - 10.128.0.0/24 24 | - 10.128.1.0/24 25 | - 10.128.2.0/24 26 | - 10.128.3.0/24 27 | PrivateSubnets: 28 | - 10.128.8.0/22 29 | - 10.128.12.0/22 30 | - 10.128.16.0/22 31 | - 10.128.20.0/22 32 | InternalDomain: internal 33 | # CidrBlock needs to be hold all of the Public & Private subnets above 34 | CidrBlock: 10.128.0.0/16 35 | UseNatGateway: true 36 | - name: postgresMaster 37 | class_path: stacker_blueprints.rds.postgres.MasterInstance 38 | variables: 39 | << : *vpc_variables 40 | Subnets: ${output vpc::PrivateSubnets} 41 | InstanceType: ${master_db_instance_type} 42 | AllowMajorVersionUpgrade: ${master_allow_major_version_upgrade} 43 | AutoMinorVersionUpgrade: ${master_auto_minor_version_upgrade} 44 | AllocatedStorage: ${master_storage_size} 45 | IOPS: ${master_iops} 46 | InternalZoneName: ${output vpc::InternalZoneName} 47 | InternalZoneId: ${output vpc::InternalZoneId} 48 | InternalHostname: ${master_name} 49 | DBInstanceIdentifier: ${master_name} 50 | DBFamily: ${master_db_family} 51 | EngineVersion: ${master_engine_version} 52 | EngineMajorVersion: ${master_engine_major_version} 53 | StorageEncrypted: ${master_storage_encrypted} 54 | # MasterInstance specific 55 | MasterUser: ${master_db_user} 56 | MasterUserPassword: ${master_db_passwd} 57 | DatabaseName: ${master_db_name} 58 | MultiAZ: ${master_multi_az} 59 | - name: postgresReplica 60 | class_path: stacker_blueprints.rds.postgres.ReadReplica 61 | variables: 62 | << : *vpc_variables 63 | Subnets: ${output vpc::PrivateSubnets} 64 | InstanceType: ${replica_db_instance_type} 65 | AllowMajorVersionUpgrade: ${replica_allow_major_version_upgrade} 66 | AutoMinorVersionUpgrade: ${replica_auto_minor_version_upgrade} 67 | AllocatedStorage: ${replica_storage_size} 68 | IOPS: ${replica_iops} 69 | InternalZoneName: ${output vpc::InternalZoneName} 70 | InternalZoneId: ${output vpc::InternalZoneId} 71 | InternalHostname: ${replica_name} 72 | DBInstanceIdentifier: ${replica_name} 73 | DBFamily: ${replica_db_family} 74 | EngineVersion: ${replica_engine_version} 75 | EngineMajorVersion: ${replica_engine_major_version} 76 | StorageEncrypted: ${replica_replica_storage_encrypted} 77 | # ReadReplica Specific 78 | MasterDatabaseId: ${output postgresMaster::DBInstance} 79 | -------------------------------------------------------------------------------- /conf/stage.env: -------------------------------------------------------------------------------- 1 | # This is an example of the format for environment files 2 | # You can use a flag dictionary in yaml format. Values can be strings or 3 | # numbers, but cannot be more complex data types (dictionaries, lists, etc).j 4 | 5 | # namespace is a unique name that the stacks will be built under. This value 6 | # will be used to prefix the CloudFormation stack names as well as the s3 7 | # bucket that contains revisions of the stacker templates. This is the only 8 | # required environment variable. 9 | namespace: 10 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | description-file = README.rst 3 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | from setuptools import setup, find_packages 3 | 4 | src_dir = os.path.dirname(__file__) 5 | 6 | install_requires = [ 7 | # See thread here: 8 | # https://remind.slack.com/archives/C03GHL501/p1520983157000263 9 | # Hope to remove lock on python-dateutil someday 10 | "python-dateutil==2.6.1", 11 | "stacker>=1.0.1", 12 | "troposphere>=1.9.5", 13 | "awacs>=0.6.0", 14 | ] 15 | 16 | tests_require = [ 17 | "nose", 18 | "mock~=2.0.0", 19 | "stacker>=1.1.1", 20 | ] 21 | 22 | 23 | def read(filename): 24 | full_path = os.path.join(src_dir, filename) 25 | with open(full_path) as fd: 26 | return fd.read() 27 | 28 | 29 | if __name__ == "__main__": 30 | setup( 31 | name="stacker_blueprints", 32 | version="1.0.7", 33 | author="Michael Barrett", 34 | author_email="loki77@gmail.com", 35 | license="New BSD license", 36 | url="https://github.com/remind101/stacker_blueprints", 37 | description="Default blueprints for stacker", 38 | long_description=read("README.rst"), 39 | packages=find_packages(), 40 | install_requires=install_requires, 41 | tests_require=tests_require, 42 | test_suite="nose.collector", 43 | ) 44 | -------------------------------------------------------------------------------- /stacker_blueprints/__init__.py: -------------------------------------------------------------------------------- 1 | __version__ = "1.0.7" 2 | -------------------------------------------------------------------------------- /stacker_blueprints/bastion.py: -------------------------------------------------------------------------------- 1 | # Bastion Stack 2 | # 3 | # This stack configures our bastion host(s). 4 | # http://en.wikipedia.org/wiki/Bastion_host 5 | # 6 | # These hosts are the only SSH entrypoint into the VPC. To SSH to a host inside 7 | # the VPC you must first SSH to a bastion host, and then SSH from that host to 8 | # another inside the VPC. 9 | 10 | from troposphere import Ref, ec2, autoscaling, FindInMap, Output 11 | from troposphere.autoscaling import Tag as ASTag 12 | 13 | from stacker.blueprints.base import Blueprint 14 | from stacker.blueprints.variables.types import ( 15 | CFNCommaDelimitedList, 16 | CFNNumber, 17 | CFNString, 18 | EC2KeyPairKeyName, 19 | EC2SecurityGroupId, 20 | EC2SubnetIdList, 21 | EC2VPCId, 22 | ) 23 | 24 | CLUSTER_SG_NAME = "BastionSecurityGroup" 25 | 26 | 27 | class Bastion(Blueprint): 28 | VARIABLES = { 29 | "VpcId": {"type": EC2VPCId, "description": "Vpc Id"}, 30 | "DefaultSG": {"type": EC2SecurityGroupId, 31 | "description": "Top level security group."}, 32 | "PublicSubnets": {"type": EC2SubnetIdList, 33 | "description": "Subnets to deploy public " 34 | "instances in."}, 35 | "PrivateSubnets": {"type": EC2SubnetIdList, 36 | "description": "Subnets to deploy private " 37 | "instances in."}, 38 | "AvailabilityZones": {"type": CFNCommaDelimitedList, 39 | "description": "Availability Zones to deploy " 40 | "instances in."}, 41 | "InstanceType": {"type": CFNString, 42 | "description": "EC2 Instance Type", 43 | "default": "m3.medium"}, 44 | "MinSize": {"type": CFNNumber, 45 | "description": "Minimum # of instances.", 46 | "default": "1"}, 47 | "MaxSize": {"type": CFNNumber, 48 | "description": "Maximum # of instances.", 49 | "default": "5"}, 50 | "SshKeyName": {"type": EC2KeyPairKeyName}, 51 | "OfficeNetwork": { 52 | "type": CFNString, 53 | "description": "CIDR block allowed to connect to bastion hosts."}, 54 | "ImageName": { 55 | "type": CFNString, 56 | "description": "The image name to use from the AMIMap (usually " 57 | "found in the config file.)", 58 | "default": "bastion"}, 59 | } 60 | 61 | def create_security_groups(self): 62 | t = self.template 63 | cluster_rules = [] 64 | cluster_rules.append( 65 | ec2.SecurityGroupRule(IpProtocol='tcp', 66 | FromPort=22, ToPort=22, 67 | CidrIp=Ref('OfficeNetwork'))) 68 | sg = t.add_resource( 69 | ec2.SecurityGroup(CLUSTER_SG_NAME, 70 | GroupDescription='BastionSecurityGroup', 71 | SecurityGroupIngress=cluster_rules, 72 | VpcId=Ref("VpcId"))) 73 | 74 | t.add_output( 75 | Output( 76 | 'SecurityGroup', 77 | Value=Ref(sg) 78 | ) 79 | ) 80 | 81 | # Make it so the bastion hosts can ssh into any other host. 82 | t.add_resource( 83 | ec2.SecurityGroupIngress( 84 | 'AllowSSHAnywhere', 85 | IpProtocol='tcp', 86 | FromPort=22, 87 | ToPort=22, 88 | SourceSecurityGroupId=Ref(CLUSTER_SG_NAME), 89 | GroupId=Ref('DefaultSG'))) 90 | 91 | def create_autoscaling_group(self): 92 | t = self.template 93 | t.add_resource( 94 | autoscaling.LaunchConfiguration( 95 | 'BastionLaunchConfig', 96 | AssociatePublicIpAddress=True, 97 | ImageId=FindInMap( 98 | 'AmiMap', Ref("AWS::Region"), Ref("ImageName")), 99 | InstanceType=Ref("InstanceType"), 100 | KeyName=Ref("SshKeyName"), 101 | UserData=self.generate_user_data(), 102 | SecurityGroups=[Ref("DefaultSG"), Ref(CLUSTER_SG_NAME)])) 103 | t.add_resource( 104 | autoscaling.AutoScalingGroup( 105 | 'BastionAutoscalingGroup', 106 | AvailabilityZones=Ref("AvailabilityZones"), 107 | LaunchConfigurationName=Ref("BastionLaunchConfig"), 108 | MinSize=Ref("MinSize"), 109 | MaxSize=Ref("MaxSize"), 110 | VPCZoneIdentifier=Ref("PublicSubnets"), 111 | Tags=[ASTag('Name', 'bastion', True)])) 112 | 113 | def generate_user_data(self): 114 | return '' 115 | 116 | def create_template(self): 117 | self.create_security_groups() 118 | self.create_autoscaling_group() 119 | -------------------------------------------------------------------------------- /stacker_blueprints/cloudwatch_logs.py: -------------------------------------------------------------------------------- 1 | from stacker.blueprints.base import Blueprint 2 | from stacker.blueprints.variables.types import TroposphereType 3 | 4 | from troposphere import logs, Output, Ref 5 | 6 | 7 | LOG_RETENTION_VALUES = [ 8 | 0, 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, 365, 400, 545, 731, 1827, 9 | 3653 10 | ] 11 | LOG_RETENTION_STRINGS = [str(x) for x in LOG_RETENTION_VALUES] 12 | 13 | 14 | def validate_cloudwatch_log_retention(value): 15 | if value not in LOG_RETENTION_VALUES: 16 | raise ValueError( 17 | "%d is not a valid retention period. Must be one of: %s" % ( 18 | value, 19 | ', '.join(LOG_RETENTION_STRINGS) 20 | ) 21 | ) 22 | return value 23 | 24 | 25 | class SubscriptionFilters(Blueprint): 26 | 27 | VARIABLES = { 28 | "SubscriptionFilters": { 29 | "type": TroposphereType(logs.SubscriptionFilter, many=True), 30 | "description": "Subscription filters to create.", 31 | } 32 | } 33 | 34 | def create_template(self): 35 | t = self.template 36 | variables = self.get_variables() 37 | 38 | for _filter in variables["SubscriptionFilters"]: 39 | t.add_resource(_filter) 40 | t.add_output( 41 | Output( 42 | "%sName" % _filter.title, 43 | Value=Ref(_filter) 44 | ) 45 | ) 46 | -------------------------------------------------------------------------------- /stacker_blueprints/ec2.py: -------------------------------------------------------------------------------- 1 | from stacker.blueprints.base import Blueprint 2 | from stacker.blueprints.variables.types import TroposphereType 3 | 4 | from troposphere import ( 5 | ec2, 6 | Output, 7 | ) 8 | 9 | 10 | class Instances(Blueprint): 11 | """ Manages the creation of EC2 Instance resources. """ 12 | 13 | VARIABLES = { 14 | "Instances": { 15 | "type": TroposphereType(ec2.Instance, many=True), 16 | "description": "Dictionary of EC2 Instance definitions.", 17 | }, 18 | } 19 | 20 | def create_template(self): 21 | t = self.template 22 | variables = self.get_variables() 23 | 24 | for instance in variables["Instances"]: 25 | t.add_resource(instance) 26 | title = instance.title 27 | t.add_output( 28 | Output(title + "InstanceId", Value=instance.Ref()) 29 | ) 30 | t.add_output( 31 | Output( 32 | title + "AZ", Value=instance.GetAtt("AvailabilityZone") 33 | ) 34 | ) 35 | 36 | t.add_output( 37 | Output( 38 | title + "PrivateDnsName", 39 | Value=instance.GetAtt("PrivateDnsName") 40 | ) 41 | ) 42 | 43 | t.add_output( 44 | Output( 45 | title + "PublicDnsName", 46 | Value=instance.GetAtt("PublicDnsName") 47 | ) 48 | ) 49 | 50 | t.add_output( 51 | Output( 52 | title + "PrivateIp", 53 | Value=instance.GetAtt("PrivateIp") 54 | ) 55 | ) 56 | 57 | t.add_output( 58 | Output( 59 | title + "PublicIp", 60 | Value=instance.GetAtt("PublicIp") 61 | ) 62 | ) 63 | -------------------------------------------------------------------------------- /stacker_blueprints/ecr.py: -------------------------------------------------------------------------------- 1 | from stacker.blueprints.base import Blueprint 2 | 3 | from troposphere import ecr 4 | 5 | 6 | class Repositories(Blueprint): 7 | 8 | VARIABLES = { 9 | "Repositories": { 10 | "type": list, 11 | "description": "A list of repository names to create." 12 | } 13 | } 14 | 15 | def create_template(self): 16 | t = self.template 17 | variables = self.get_variables() 18 | 19 | for repo in variables["Repositories"]: 20 | t.add_resource( 21 | ecr.Repository( 22 | "%sRepository" % repo, 23 | RepositoryName=repo, 24 | ) 25 | ) 26 | -------------------------------------------------------------------------------- /stacker_blueprints/efs.py: -------------------------------------------------------------------------------- 1 | from troposphere import ec2, efs 2 | from troposphere import Join, Output, Ref, Tags 3 | 4 | from stacker.blueprints.base import Blueprint 5 | from stacker.blueprints.variables.types import TroposphereType 6 | from stacker.exceptions import ValidatorError 7 | 8 | from stacker_blueprints.util import merge_tags 9 | 10 | 11 | class ElasticFileSystem(Blueprint): 12 | VARIABLES = { 13 | 'VpcId': { 14 | 'type': str, 15 | 'description': 'VPC ID to create resources' 16 | }, 17 | 'PerformanceMode': { 18 | 'type': str, 19 | 'description': 'The performance mode of the file system', 20 | 'default': 'generalPurpose' 21 | }, 22 | 'Tags': { 23 | 'type': dict, 24 | 'description': 'Tags to associate with the created resources', 25 | 'default': {} 26 | }, 27 | 'Subnets': { 28 | 'type': list, 29 | 'description': 'List of subnets to deploy private mount targets in' 30 | }, 31 | 'IpAddresses': { 32 | 'type': list, 33 | 'description': 'List of IP addresses to assign to mount targets. ' 34 | 'Omit or make empty to assign automatically. ' 35 | 'Corresponds to Subnets listed in the same order.', 36 | 'default': [] 37 | }, 38 | 'SecurityGroups': { 39 | 'type': TroposphereType(ec2.SecurityGroup, many=True, 40 | optional=True, validate=False), 41 | 'description': "Dictionary of titles to SecurityGroups " 42 | "definitions to be created and assigned to this " 43 | "filesystem's MountTargets. " 44 | "The VpcId property will be filled automatically, " 45 | "so it should not be included. \n" 46 | "The IDs of the created groups will be exported as " 47 | "a comma-separated list in the " 48 | "EfsNewSecurityGroupIds output.\n" 49 | "Omit this parameter or set it to an empty " 50 | "dictionary to not create any groups. In that " 51 | "case the ExistingSecurityGroups variable must not " 52 | "be empty", 53 | 'default': {} 54 | }, 55 | 'ExtraSecurityGroups': { 56 | 'type': list, 57 | 'description': "List of existing SecurityGroup IDs to be asigned " 58 | "to this filesystem's MountTargets", 59 | 'default': [] 60 | } 61 | } 62 | 63 | def validate_efs_security_groups(self): 64 | validator = '{}.{}'.format(type(self).__name__, 65 | 'validate_efs_security_groups') 66 | v = self.get_variables() 67 | count = len(v['SecurityGroups'] or []) + len(v['ExtraSecurityGroups']) 68 | 69 | if count == 0: 70 | raise ValidatorError( 71 | 'SecurityGroups,ExtraSecurityGroups', validator, count, 72 | 'At least one SecurityGroup must be provided') 73 | elif count > 5: 74 | raise ValidatorError( 75 | 'SecurityGroups,ExtraSecurityGroups', validator, count, 76 | 'At most five total SecurityGroups must be provided') 77 | 78 | def validate_efs_subnets(self): 79 | validator = '{}.{}'.format(type(self).__name__, 'validate_efs_subnets') 80 | v = self.get_variables() 81 | 82 | subnet_count = len(v['Subnets']) 83 | if not subnet_count: 84 | raise ValidatorError( 85 | 'Subnets', validator, v['Subnets'], 86 | 'At least one Subnet must be provided') 87 | 88 | ip_count = len(v['IpAddresses']) 89 | if ip_count and ip_count != subnet_count: 90 | raise ValidatorError( 91 | 'IpAddresses', validator, v['IpAddresses'], 92 | 'The number of IpAddresses must match the number of Subnets') 93 | 94 | def resolve_variables(self, provided_variables): 95 | super(ElasticFileSystem, self).resolve_variables(provided_variables) 96 | 97 | self.validate_efs_security_groups() 98 | self.validate_efs_subnets() 99 | 100 | def prepare_efs_security_groups(self): 101 | t = self.template 102 | v = self.get_variables() 103 | 104 | created_groups = [] 105 | for sg in v['SecurityGroups']: 106 | sg.VpcId = v['VpcId'] 107 | sg.Tags = merge_tags(v['Tags'], getattr(sg, 'Tags', {})) 108 | 109 | sg = t.add_resource(sg) 110 | created_groups.append(sg) 111 | 112 | created_group_ids = list(map(Ref, created_groups)) 113 | t.add_output(Output( 114 | 'EfsNewSecurityGroupIds', 115 | Value=Join(',', created_group_ids))) 116 | 117 | groups_ids = created_group_ids + v['ExtraSecurityGroups'] 118 | return groups_ids 119 | 120 | def create_efs_filesystem(self): 121 | t = self.template 122 | v = self.get_variables() 123 | 124 | fs = t.add_resource(efs.FileSystem( 125 | 'EfsFileSystem', 126 | FileSystemTags=Tags(v['Tags']), 127 | PerformanceMode=v['PerformanceMode'])) 128 | 129 | t.add_output(Output( 130 | 'EfsFileSystemId', 131 | Value=Ref(fs))) 132 | 133 | return fs 134 | 135 | def create_efs_mount_targets(self, fs): 136 | t = self.template 137 | v = self.get_variables() 138 | 139 | groups = self.prepare_efs_security_groups() 140 | subnets = v['Subnets'] 141 | ips = v['IpAddresses'] 142 | 143 | mount_targets = [] 144 | for i, subnet in enumerate(subnets): 145 | mount_target = efs.MountTarget( 146 | 'EfsMountTarget{}'.format(i + 1), 147 | FileSystemId=Ref(fs), 148 | SubnetId=subnet, 149 | SecurityGroups=groups) 150 | 151 | if ips: 152 | mount_target.IpAddress = ips[i] 153 | 154 | mount_target = t.add_resource(mount_target) 155 | mount_targets.append(mount_target) 156 | 157 | t.add_output(Output( 158 | 'EfsMountTargetIds', 159 | Value=Join(',', list(map(Ref, mount_targets))))) 160 | 161 | def create_template(self): 162 | fs = self.create_efs_filesystem() 163 | self.create_efs_mount_targets(fs) 164 | -------------------------------------------------------------------------------- /stacker_blueprints/elasticache/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/remind101/stacker_blueprints/97791cc7c3f1c17e9d1547a45c059fae5b1cc204/stacker_blueprints/elasticache/__init__.py -------------------------------------------------------------------------------- /stacker_blueprints/elasticache/redis.py: -------------------------------------------------------------------------------- 1 | from . import base 2 | 3 | 4 | class RedisReplicationGroup(base.BaseReplicationGroup): 5 | def engine(self): 6 | return "redis" 7 | 8 | def get_engine_versions(self): 9 | return ["2.6.13", "2.8.19", "2.8.21", "2.8.22", "2.8.23", "2.8.24", 10 | "2.8.6", "3.2.4", "3.2.6", "3.2.10", "4.0.10", "5.0.0", 11 | "5.0.3", "5.0.4"] 12 | 13 | def get_parameter_group_family(self): 14 | return ["redis2.6", "redis2.8", "redis3.2", "redis4.0", "redis5.0"] 15 | -------------------------------------------------------------------------------- /stacker_blueprints/elasticsearch.py: -------------------------------------------------------------------------------- 1 | """AWS Elasticsearch Service. 2 | 3 | Blueprint to configure AWS Elasticsearch service. 4 | 5 | Example:: 6 | 7 | - name: elasticsearch 8 | class_path: stacker_blueprints.elasticsearch.Domain 9 | variables: 10 | Roles: 11 | - ${empireMinion::IAMRole} 12 | InternalZoneId: ${vpc::InternalZoneId} 13 | InternalZoneName: ${vpc::InternalZoneName} 14 | InternalHostName: es 15 | 16 | """ 17 | import awacs.es 18 | from awacs.aws import ( 19 | Allow, 20 | Condition, 21 | IpAddress, 22 | Policy, 23 | Principal, 24 | Everybody, 25 | SourceIp, 26 | Statement, 27 | ) 28 | from stacker.blueprints.base import Blueprint 29 | from troposphere import ( 30 | elasticsearch, 31 | iam, 32 | route53, 33 | GetAtt, 34 | Join, 35 | Output, 36 | Ref, 37 | ) 38 | 39 | ES_DOMAIN = "ESDomain" 40 | DNS_RECORD = "ESDomainDNSRecord" 41 | POLICY_NAME = "ESDomainAccessPolicy" 42 | 43 | 44 | class Domain(Blueprint): 45 | 46 | VARIABLES = { 47 | "Roles": { 48 | "type": list, 49 | "description": ( 50 | "List of roles that should have access to the ES domain.")}, 51 | "InternalZoneId": { 52 | "type": str, 53 | "default": "", 54 | "description": "Internal zone id, if you have one."}, 55 | "InternalZoneName": { 56 | "type": str, 57 | "default": "", 58 | "description": "Internal zone name, if you have one."}, 59 | "InternalHostName": { 60 | "type": str, 61 | "default": "", 62 | "description": "Internal domain name, if you have one."}, 63 | "AdvancedOptions": { 64 | "type": dict, 65 | "default": {}, 66 | "description": ( 67 | "Additional options to specify for the Amazon ES domain" 68 | )}, 69 | "DomainName": { 70 | "type": str, 71 | "default": "", 72 | "description": "A name for the Amazon ES domain."}, 73 | "EBSOptions": { 74 | "type": dict, 75 | "default": {}, 76 | "description": ( 77 | "The configurations of Amazon Elastic Block Store (Amazon " 78 | "EBS) volumes that are attached to data nodes in the Amazon " 79 | "ES domain" 80 | )}, 81 | "ElasticsearchClusterConfig": { 82 | "type": dict, 83 | "default": {}, 84 | "description": ( 85 | "The cluster configuration for the Amazon ES domain." 86 | )}, 87 | "ElasticsearchVersion": { 88 | "type": str, 89 | "default": "2.3", 90 | "description": "The version of Elasticsearch to use."}, 91 | "SnapshotOptions": { 92 | "type": dict, 93 | "default": {}, 94 | "description": ( 95 | "The automated snapshot configuration for the Amazon ES " 96 | "domain indices." 97 | )}, 98 | "Tags": { 99 | "type": list, 100 | "default": [], 101 | "description": ( 102 | "An arbitrary set of tags (key-value pairs) to associate with " 103 | "the Amazon ES domain." 104 | )}, 105 | "TrustedNetworks": { 106 | "type": list, 107 | "description": ( 108 | "List of CIDR blocks allowed to connect to the ES cluster" 109 | ), 110 | "default": []}, 111 | } 112 | 113 | def get_allowed_actions(self): 114 | return [ 115 | awacs.es.Action("ESHttpGet"), 116 | awacs.es.Action("ESHttpHead"), 117 | awacs.es.Action("ESHttpPost"), 118 | awacs.es.Action("ESHttpDelete")] 119 | 120 | def create_dns_record(self): 121 | t = self.template 122 | variables = self.get_variables() 123 | should_create_dns = all([ 124 | variables["InternalZoneId"], 125 | variables["InternalZoneName"], 126 | variables["InternalHostName"], 127 | ]) 128 | if should_create_dns: 129 | t.add_resource( 130 | route53.RecordSetType( 131 | DNS_RECORD, 132 | HostedZoneId=variables["InternalZoneId"], 133 | Comment="ES Domain CNAME Record", 134 | Name="{}.{}".format(variables["InternalHostName"], 135 | variables["InternalZoneName"]), 136 | Type="CNAME", 137 | TTL="120", 138 | ResourceRecords=[GetAtt(ES_DOMAIN, "DomainEndpoint")], 139 | )) 140 | t.add_output(Output("CNAME", Value=Ref(DNS_RECORD))) 141 | 142 | def create_domain(self): 143 | t = self.template 144 | variables = self.get_variables() 145 | params = { 146 | "ElasticsearchVersion": variables["ElasticsearchVersion"], 147 | } 148 | 149 | policy = self.get_access_policy() 150 | if policy: 151 | params["AccessPolicies"] = policy 152 | 153 | # Add any optional keys to the params dict. ES didn't have great 154 | # support for passing empty values for these keys when this was 155 | # created. 156 | optional_keys = ["AdvancedOptions", "DomainName", "EBSOptions", 157 | "SnapshotOptions", "Tags"] 158 | 159 | for key in optional_keys: 160 | optional = variables[key] 161 | if optional: 162 | params[key] = optional 163 | 164 | domain = elasticsearch.Domain.from_dict(ES_DOMAIN, params) 165 | t.add_resource(domain) 166 | t.add_output(Output("DomainArn", Value=GetAtt(ES_DOMAIN, "DomainArn"))) 167 | t.add_output(Output("DomainEndpoint", Value=GetAtt(ES_DOMAIN, 168 | "DomainEndpoint"))) 169 | 170 | def create_roles_policy(self): 171 | t = self.template 172 | variables = self.get_variables() 173 | statements = [ 174 | Statement( 175 | Effect=Allow, 176 | Action=self.get_allowed_actions(), 177 | Resource=[Join("/", [GetAtt(ES_DOMAIN, "DomainArn"), "*"])])] 178 | t.add_resource( 179 | iam.PolicyType( 180 | POLICY_NAME, 181 | PolicyName=POLICY_NAME, 182 | PolicyDocument=Policy(Statement=statements), 183 | Roles=variables["Roles"])) 184 | 185 | def get_access_policy(self): 186 | policy = None 187 | variables = self.get_variables() 188 | 189 | statements = [] 190 | for trusted_network in variables["TrustedNetworks"]: 191 | condition = Condition(IpAddress({SourceIp: trusted_network})) 192 | statements.append( 193 | Statement( 194 | Effect=Allow, 195 | Action=self.get_allowed_actions(), 196 | Condition=condition, 197 | Principal=Principal(Everybody))) 198 | 199 | if statements: 200 | policy = Policy(Statement=statements) 201 | return policy 202 | 203 | def create_template(self): 204 | self.create_domain() 205 | self.create_dns_record() 206 | self.create_roles_policy() 207 | -------------------------------------------------------------------------------- /stacker_blueprints/empire/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/remind101/stacker_blueprints/97791cc7c3f1c17e9d1547a45c059fae5b1cc204/stacker_blueprints/empire/__init__.py -------------------------------------------------------------------------------- /stacker_blueprints/empire/base.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from troposphere import Base64, Join 4 | 5 | from stacker.blueprints.base import Blueprint 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | 10 | class EmpireBase(Blueprint): 11 | def create_conditions(self): 12 | logger.debug("No conditions to setup for %s", self.name) 13 | 14 | def create_security_groups(self): 15 | logger.debug("No security_groups to setup for %s", self.name) 16 | 17 | def create_ecs_cluster(self): 18 | logger.debug("No ecs cluster to setup for %s", self.name) 19 | 20 | def create_load_balancer(self): 21 | logger.debug("No load_balancer to setup for %s", self.name) 22 | 23 | def create_iam_profile(self): 24 | logger.debug("No iam_profile to setup for %s", self.name) 25 | 26 | def create_autoscaling_group(self): 27 | logger.debug("No autoscaling_group to setup for %s", self.name) 28 | 29 | def generate_seed_contents(self): 30 | raise Exception('Empire subclass must define seed contents') 31 | 32 | def generate_user_data(self): 33 | contents = Join("", self.generate_seed_contents()) 34 | stanza = Base64(Join( 35 | "", 36 | [ 37 | "#cloud-config\n", 38 | "write_files:\n", 39 | " - encoding: b64\n", 40 | " content: ", Base64(contents), "\n", 41 | " owner: root:root\n", 42 | " path: /etc/empire/seed\n", 43 | " permissions: 0640\n" 44 | ] 45 | )) 46 | return stanza 47 | 48 | def create_template(self): 49 | self.create_conditions() 50 | self.create_security_groups() 51 | self.create_ecs_cluster() 52 | self.create_load_balancer() 53 | self.create_iam_profile() 54 | self.create_autoscaling_group() 55 | -------------------------------------------------------------------------------- /stacker_blueprints/empire/controller.py: -------------------------------------------------------------------------------- 1 | from troposphere import ( 2 | Ref, 3 | Output, 4 | GetAtt, 5 | FindInMap, 6 | ) 7 | from troposphere import ( 8 | ec2, 9 | autoscaling, 10 | ecs, 11 | ) 12 | from troposphere.autoscaling import Tag as ASTag 13 | from troposphere.iam import ( 14 | InstanceProfile, 15 | Policy, 16 | Role, 17 | ) 18 | 19 | from awacs.helpers.trust import ( 20 | get_default_assumerole_policy, 21 | ) 22 | 23 | from stacker.blueprints.variables.types import ( 24 | CFNCommaDelimitedList, 25 | CFNNumber, 26 | CFNString, 27 | EC2KeyPairKeyName, 28 | EC2SecurityGroupId, 29 | EC2SubnetIdList, 30 | EC2VPCId, 31 | ) 32 | 33 | from .base import EmpireBase 34 | 35 | from .policies import ecs_agent_policy 36 | 37 | CLUSTER_SG_NAME = "EmpireControllerSecurityGroup" 38 | 39 | 40 | class EmpireController(EmpireBase): 41 | VARIABLES = { 42 | "VpcId": { 43 | "type": EC2VPCId, 44 | "description": "Vpc Id"}, 45 | "DefaultSG": { 46 | "type": EC2SecurityGroupId, 47 | "description": "Top level security group."}, 48 | "PrivateSubnets": { 49 | "type": EC2SubnetIdList, 50 | "description": "Subnets to deploy private instances in."}, 51 | "AvailabilityZones": { 52 | "type": CFNCommaDelimitedList, 53 | "description": "Availability Zones to deploy instances in."}, 54 | "InstanceType": { 55 | "type": CFNString, 56 | "description": "Empire AWS Instance Type", 57 | "default": "m3.medium"}, 58 | "MinHosts": { 59 | "type": CFNNumber, 60 | "description": "Minimum # of empire minion instances.", 61 | "default": "2"}, 62 | "MaxHosts": { 63 | "type": CFNNumber, 64 | "description": "Maximum # of empire minion instances.", 65 | "default": "3"}, 66 | "SshKeyName": { 67 | "type": EC2KeyPairKeyName}, 68 | "ImageName": { 69 | "type": CFNString, 70 | "description": ( 71 | "The image name to use from the AMIMap (usually found in the " 72 | "config file.)" 73 | ), 74 | "default": "empire"}, 75 | "DatabaseSecurityGroup": { 76 | "type": EC2SecurityGroupId, 77 | "description": "Security group of Empire database."}, 78 | "DockerRegistry": { 79 | "type": CFNString, 80 | "description": ( 81 | "Optional docker registry where private images are located." 82 | ), 83 | "default": "https://index.docker.io/v1/"}, 84 | "DockerRegistryUser": { 85 | "type": CFNString, 86 | "description": "User for authentication with docker registry."}, 87 | "DockerRegistryPassword": { 88 | "type": CFNString, 89 | "no_echo": True, 90 | "description": ( 91 | "Password for authentication with docker registry." 92 | )}, 93 | "DockerRegistryEmail": { 94 | "type": CFNString, 95 | "description": "Email for authentication with docker registry."}, 96 | } 97 | 98 | def create_security_groups(self): 99 | t = self.template 100 | 101 | t.add_resource( 102 | ec2.SecurityGroup( 103 | CLUSTER_SG_NAME, 104 | GroupDescription=CLUSTER_SG_NAME, 105 | VpcId=Ref("VpcId"))) 106 | 107 | t.add_output( 108 | Output("SecurityGroup", Value=Ref(CLUSTER_SG_NAME))) 109 | 110 | # Allow access to the DB 111 | t.add_resource( 112 | ec2.SecurityGroupIngress( 113 | "EmpireControllerDBAccess", 114 | IpProtocol="tcp", FromPort=5432, ToPort=5432, 115 | SourceSecurityGroupId=Ref(CLUSTER_SG_NAME), 116 | GroupId=Ref("DatabaseSecurityGroup"))) 117 | 118 | def create_ecs_cluster(self): 119 | t = self.template 120 | t.add_resource(ecs.Cluster("EmpireControllerCluster")) 121 | t.add_output( 122 | Output("ECSCluster", Value=Ref("EmpireControllerCluster"))) 123 | 124 | def build_block_device(self): 125 | volume = autoscaling.EBSBlockDevice(VolumeSize="50") 126 | return [autoscaling.BlockDeviceMapping( 127 | DeviceName="/dev/sdh", Ebs=volume)] 128 | 129 | def generate_iam_policies(self): 130 | return [ 131 | Policy( 132 | PolicyName="ecs-agent", 133 | PolicyDocument=ecs_agent_policy(), 134 | )] 135 | 136 | def create_iam_profile(self): 137 | t = self.template 138 | # Role for Empire Controllers 139 | t.add_resource( 140 | Role( 141 | "EmpireControllerRole", 142 | AssumeRolePolicyDocument=get_default_assumerole_policy(), 143 | Path="/", 144 | Policies=self.generate_iam_policies())) 145 | 146 | t.add_resource( 147 | InstanceProfile( 148 | "EmpireControllerProfile", 149 | Path="/", 150 | Roles=[Ref("EmpireControllerRole")])) 151 | t.add_output(Output("IAMRole", Value=Ref("EmpireControllerRole"))) 152 | 153 | def generate_seed_contents(self): 154 | seed = [ 155 | "EMPIRE_HOSTGROUP=controller\n", 156 | "ECS_CLUSTER=", Ref("EmpireControllerCluster"), "\n", 157 | "DOCKER_REGISTRY=", Ref("DockerRegistry"), "\n", 158 | "DOCKER_USER=", Ref("DockerRegistryUser"), "\n", 159 | "DOCKER_PASS=", Ref("DockerRegistryPassword"), "\n", 160 | "DOCKER_EMAIL=", Ref("DockerRegistryEmail"), "\n", 161 | ] 162 | return seed 163 | 164 | def create_autoscaling_group(self): 165 | t = self.template 166 | t.add_resource( 167 | autoscaling.LaunchConfiguration( 168 | "EmpireControllerLaunchConfig", 169 | IamInstanceProfile=GetAtt("EmpireControllerProfile", "Arn"), 170 | ImageId=FindInMap( 171 | "AmiMap", 172 | Ref("AWS::Region"), 173 | Ref("ImageName")), 174 | BlockDeviceMappings=self.build_block_device(), 175 | InstanceType=Ref("InstanceType"), 176 | KeyName=Ref("SshKeyName"), 177 | UserData=self.generate_user_data(), 178 | SecurityGroups=[Ref("DefaultSG"), Ref(CLUSTER_SG_NAME)])) 179 | t.add_resource( 180 | autoscaling.AutoScalingGroup( 181 | "EmpireControllerAutoscalingGroup", 182 | AvailabilityZones=Ref("AvailabilityZones"), 183 | LaunchConfigurationName=Ref("EmpireControllerLaunchConfig"), 184 | MinSize=Ref("MinHosts"), 185 | MaxSize=Ref("MaxHosts"), 186 | VPCZoneIdentifier=Ref("PrivateSubnets"), 187 | Tags=[ASTag("Name", "empire_controller", True)])) 188 | -------------------------------------------------------------------------------- /stacker_blueprints/firehose/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/remind101/stacker_blueprints/97791cc7c3f1c17e9d1547a45c059fae5b1cc204/stacker_blueprints/firehose/__init__.py -------------------------------------------------------------------------------- /stacker_blueprints/firehose/redshift.py: -------------------------------------------------------------------------------- 1 | from troposphere import firehose, logs, Output, Ref, GetAtt 2 | 3 | from stacker.blueprints.variables.types import CFNString 4 | 5 | from .base import BaseDeliveryStream 6 | 7 | DELIVERY_STREAM = "DeliveryStream" 8 | REDSHIFT_LOG_STREAM = "RedshiftLogStream" 9 | 10 | 11 | class DeliveryStream(BaseDeliveryStream): 12 | def defined_variables(self): 13 | variables = super(DeliveryStream, self).defined_variables() 14 | 15 | additional = { 16 | "JDBCURL": { 17 | "type": str, 18 | "description": "The URL used to connect to redshift" 19 | }, 20 | "Username": { 21 | "type": str, 22 | "description": "The user for the redshift table" 23 | }, 24 | "Password": { 25 | "type": CFNString, 26 | "description": "The password for the redshift user", 27 | "no_echo": True, 28 | }, 29 | "TableName": { 30 | "type": str, 31 | "description": "The redshift table" 32 | }, 33 | "CopyOptions": { 34 | "type": str, 35 | "description": "Copy Options used by the redshift copy " 36 | "command.", 37 | "default": "JSON 'auto' ACCEPTINVCHARS BLANKSASNULL " 38 | "EMPTYASNULL GZIP STATUPDATE OFF COMPUPDATE OFF", 39 | }, 40 | } 41 | 42 | variables.update(additional) 43 | return variables 44 | 45 | def create_log_stream(self): 46 | t = self.template 47 | super(DeliveryStream, self).create_log_stream() 48 | 49 | self.redshift_log_stream = t.add_resource( 50 | logs.LogStream( 51 | REDSHIFT_LOG_STREAM, 52 | LogGroupName=Ref(self.log_group), 53 | DependsOn=self.log_group.title 54 | ) 55 | ) 56 | 57 | t.add_output( 58 | Output( 59 | "RedshiftLogStreamName", 60 | Value=Ref(self.redshift_log_stream) 61 | ) 62 | ) 63 | 64 | def create_delivery_stream(self): 65 | t = self.template 66 | variables = self.get_variables() 67 | 68 | s3_dest_config = firehose.S3Configuration( 69 | **self.s3_destination_config_dict() 70 | ) 71 | 72 | redshift_config = firehose.RedshiftDestinationConfiguration( 73 | RoleARN=GetAtt(self.role, "Arn"), 74 | ClusterJDBCURL=variables['JDBCURL'], 75 | CopyCommand=firehose.CopyCommand( 76 | CopyOptions=variables["CopyOptions"], 77 | DataTableName=variables['TableName'] 78 | ), 79 | Username=variables['Username'], 80 | Password=variables['Password'].ref, 81 | S3Configuration=s3_dest_config, 82 | CloudWatchLoggingOptions=self.cloudwatch_logging_options( 83 | self.log_group, 84 | self.redshift_log_stream 85 | ) 86 | ) 87 | 88 | self.delivery_stream = t.add_resource( 89 | firehose.DeliveryStream( 90 | DELIVERY_STREAM, 91 | RedshiftDestinationConfiguration=redshift_config 92 | ) 93 | ) 94 | -------------------------------------------------------------------------------- /stacker_blueprints/firehose/s3.py: -------------------------------------------------------------------------------- 1 | from troposphere import firehose 2 | 3 | from .base import BaseDeliveryStream 4 | 5 | DELIVERY_STREAM = "DeliveryStream" 6 | 7 | 8 | class DeliveryStream(BaseDeliveryStream): 9 | def create_delivery_stream(self): 10 | t = self.template 11 | 12 | s3_dest_config = firehose.S3DestinationConfiguration( 13 | **self.s3_destination_config_dict() 14 | ) 15 | 16 | self.delivery_stream = t.add_resource( 17 | firehose.DeliveryStream( 18 | DELIVERY_STREAM, 19 | S3DestinationConfiguration=s3_dest_config 20 | ) 21 | ) 22 | -------------------------------------------------------------------------------- /stacker_blueprints/generic.py: -------------------------------------------------------------------------------- 1 | from troposphere import ( 2 | Ref, Output 3 | ) 4 | 5 | from stacker.blueprints.base import Blueprint 6 | from stacker.util import load_object_from_string 7 | 8 | 9 | class GenericResourceCreator(Blueprint): 10 | """ Generic Blueprint for creating a resource 11 | 12 | Example config - this would create a stack with a single resource in it, 13 | an ec2.Volume resource: 14 | 15 | - name: generic-resource-volume 16 | class_path: blueprints.generic.GenericResourceCreator 17 | variables: 18 | Class: ec2.Volume 19 | Output: VolumeId 20 | Properties: 21 | VolumeType: gp2 22 | Size: 5 23 | Encrypted: true 24 | AvailabilityZone: us-east-1b 25 | 26 | """ 27 | 28 | VARIABLES = { 29 | 'Class': 30 | {'type': str, 31 | 'description': 'The troposphere class to create, ' 32 | 'e.g.: ec2.Volume'}, 33 | 'Output': 34 | {'type': str, 35 | 'description': 'The output field that should be created, ' 36 | 'e.g.: VolumeId'}, 37 | 'Properties': 38 | {'type': dict, 39 | 'description': 'The list of properties to use for the ' 40 | 'Troposphere class'}, 41 | } 42 | 43 | def add_cfn_description(self): 44 | """ Boilerplate for CFN Template 45 | 46 | 47 | *** NOTE *** Template Version Reminder 48 | 49 | Make Sure you bump up the template version number above if submitting 50 | updates to the repo. This is the only way we can tell which version of 51 | a template is in place on a running resouce. 52 | """ 53 | template = self.template 54 | template.add_version('2010-09-09') 55 | template.add_description('Generic Resource Creator - 1.0.0') 56 | 57 | def setup_resource(self): 58 | """ Setting Up Resource """ 59 | template = self.template 60 | variables = self.get_variables() 61 | 62 | tclass = variables['Class'] 63 | tprops = variables['Properties'] 64 | output = variables['Output'] 65 | 66 | klass = load_object_from_string('troposphere.' + tclass) 67 | 68 | instance = klass.from_dict('ResourceRefName', tprops) 69 | 70 | template.add_resource(instance) 71 | template.add_output(Output( 72 | output, 73 | Description="A reference to the object created in this blueprint", 74 | Value=Ref(instance) 75 | )) 76 | 77 | def create_template(self): 78 | """ Create the CFN template """ 79 | self.add_cfn_description() 80 | self.setup_resource() 81 | -------------------------------------------------------------------------------- /stacker_blueprints/iam_roles.py: -------------------------------------------------------------------------------- 1 | from stacker.blueprints.base import Blueprint 2 | 3 | from troposphere import ( 4 | GetAtt, 5 | Output, 6 | Ref, 7 | Sub, 8 | iam, 9 | ) 10 | 11 | from awacs.aws import Policy 12 | from awacs.helpers.trust import ( 13 | get_default_assumerole_policy, 14 | get_lambda_assumerole_policy 15 | ) 16 | 17 | 18 | class Roles(Blueprint): 19 | VARIABLES = { 20 | "Ec2Roles": { 21 | "type": list, 22 | "description": "names of ec2 roles to create", 23 | "default": [], 24 | }, 25 | "LambdaRoles": { 26 | "type": list, 27 | "description": "names of lambda roles to create", 28 | "default": [], 29 | }, 30 | } 31 | 32 | def __init__(self, *args, **kwargs): 33 | super(Roles, self).__init__(*args, **kwargs) 34 | self.roles = [] 35 | self.policies = [] 36 | 37 | def create_role(self, name, assumerole_policy): 38 | t = self.template 39 | 40 | role = t.add_resource( 41 | iam.Role( 42 | name, 43 | AssumeRolePolicyDocument=assumerole_policy, 44 | ) 45 | ) 46 | 47 | t.add_output( 48 | Output(name + "RoleName", Value=Ref(role)) 49 | ) 50 | t.add_output( 51 | Output(name + "RoleArn", Value=GetAtt(role.title, "Arn")) 52 | ) 53 | 54 | self.roles.append(role) 55 | return role 56 | 57 | def create_ec2_role(self, name): 58 | return self.create_role(name, get_default_assumerole_policy()) 59 | 60 | def create_lambda_role(self, name): 61 | return self.create_role(name, get_lambda_assumerole_policy()) 62 | 63 | def generate_policy_statements(self): 64 | """Should be overridden on a subclass to create policy statements. 65 | 66 | By subclassing this blueprint, and overriding this method to generate 67 | a list of :class:`awacs.aws.Statement` types, a 68 | :class:`troposphere.iam.PolicyType` will be created and attached to 69 | the roles specified here. 70 | 71 | If not specified, no Policy will be created. 72 | """ 73 | 74 | return [] 75 | 76 | def create_policy(self, name): 77 | statements = self.generate_policy_statements() 78 | if not statements: 79 | return 80 | 81 | t = self.template 82 | 83 | policy = t.add_resource( 84 | iam.PolicyType( 85 | "{}Policy".format(name), 86 | PolicyName=Sub("${AWS::StackName}-${Name}-policy", Name=name), 87 | PolicyDocument=Policy( 88 | Statement=statements, 89 | ), 90 | Roles=[Ref(role) for role in self.roles], 91 | ) 92 | ) 93 | 94 | t.add_output( 95 | Output(name + "PolicyName", Value=Ref(policy)) 96 | ) 97 | self.policies.append(policy) 98 | 99 | def create_template(self): 100 | variables = self.get_variables() 101 | 102 | for role in variables['Ec2Roles']: 103 | self.create_ec2_role(role) 104 | 105 | for role in variables['LambdaRoles']: 106 | self.create_lambda_role(role) 107 | 108 | self.create_policy() 109 | -------------------------------------------------------------------------------- /stacker_blueprints/kms.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from awacs.aws import ( 4 | Allow, 5 | AWSPrincipal, 6 | Policy, 7 | Statement, 8 | ) 9 | 10 | import awacs.kms 11 | 12 | from troposphere import ( 13 | Join, 14 | Output, 15 | Ref, 16 | kms, 17 | ) 18 | 19 | from stacker.blueprints.base import Blueprint 20 | 21 | logger = logging.getLogger(__name__) 22 | 23 | 24 | def kms_key_root_statements(): 25 | root_arn = Join(":", ["arn:aws:iam:", Ref("AWS::AccountId"), "root"]) 26 | 27 | return [ 28 | Statement( 29 | Sid="Enable IAM User Permissions", 30 | Effect=Allow, 31 | Principal=AWSPrincipal(root_arn), 32 | Action=[ 33 | awacs.kms.Action("*"), 34 | ], 35 | Resource=["*"] 36 | ) 37 | ] 38 | 39 | 40 | def kms_key_policy(): 41 | """ Creates a key policy for use of a KMS Key. """ 42 | 43 | statements = [] 44 | statements.extend(kms_key_root_statements()) 45 | 46 | return Policy( 47 | Version="2012-10-17", 48 | Id="root-account-access", 49 | Statement=statements 50 | ) 51 | 52 | 53 | class Key(Blueprint): 54 | VARIABLES = { 55 | "KeyAlias": { 56 | "type": str, 57 | "description": "The alias to give the key.", 58 | "default": "", 59 | }, 60 | "Properties": { 61 | "type": dict, 62 | "description": "A dictionary of KMS key attributes which should " 63 | "match the attributes for AWS::KMS::Key " 64 | "Cloudformation resource. Note: You should " 65 | "not supply a `KeyPolicy` attribute.", 66 | "default": {}, 67 | }, 68 | "Attributes": { 69 | "type": dict, 70 | "description": "Deprecated. Use Properties instead.", 71 | "default": {}, 72 | } 73 | } 74 | 75 | def generate_key_policy(self): 76 | return kms_key_policy() 77 | 78 | def create_template(self): 79 | t = self.template 80 | variables = self.get_variables() 81 | 82 | key_policy = self.generate_key_policy() 83 | props = variables["Properties"] 84 | 85 | if variables["Attributes"]: 86 | raise DeprecationWarning( 87 | "Attributes was deprecated, use Properties instead.") 88 | 89 | if "KeyPolicy" in props: 90 | logger.warning("KeyPolicy provided, but not used. To write " 91 | "your own policy you need to subclass this " 92 | "blueprint and override `generate_key_policy`.") 93 | props["KeyPolicy"] = key_policy 94 | 95 | key = t.add_resource( 96 | kms.Key.from_dict("Key", props) 97 | ) 98 | 99 | key_arn = Join( 100 | "", 101 | [ 102 | "arn:aws:kms:", 103 | Ref("AWS::Region"), 104 | ":", 105 | Ref("AWS::AccountId"), 106 | ":key/", 107 | Ref(key) 108 | ] 109 | ) 110 | 111 | t.add_output(Output("KeyArn", Value=key_arn)) 112 | t.add_output(Output("KeyId", Value=Ref(key))) 113 | 114 | key_alias = variables["KeyAlias"] 115 | if key_alias: 116 | if not key_alias.startswith("alias/"): 117 | key_alias = "alias/%s" % key_alias 118 | alias = t.add_resource( 119 | kms.Alias( 120 | "Alias", 121 | AliasName="%s" % key_alias, 122 | TargetKeyId=Ref(key) 123 | ) 124 | ) 125 | 126 | t.add_output(Output("KeyAlias", Value=Ref(alias))) 127 | -------------------------------------------------------------------------------- /stacker_blueprints/policies.py: -------------------------------------------------------------------------------- 1 | from awacs.aws import ( 2 | Action, 3 | Allow, 4 | Policy, 5 | Principal, 6 | Statement, 7 | ) 8 | 9 | from troposphere import ( 10 | Sub, 11 | Join, 12 | Region, 13 | AccountId, 14 | AWSHelperFn 15 | ) 16 | 17 | from awacs import ( 18 | sts, 19 | s3, 20 | logs, 21 | ec2, 22 | dynamodb, 23 | cloudwatch, 24 | ) 25 | 26 | 27 | def make_simple_assume_statement(*principals): 28 | return Statement( 29 | Principal=Principal('Service', principals), 30 | Effect=Allow, 31 | Action=[sts.AssumeRole]) 32 | 33 | 34 | def make_simple_assume_policy(*principals): 35 | return Policy( 36 | Statement=[ 37 | make_simple_assume_statement(*principals)]) 38 | 39 | 40 | def dynamodb_arn(table_name): 41 | return 'arn:aws:dynamodb:::table/{}'.format(table_name) 42 | 43 | 44 | def dynamodb_arns(table_names): 45 | return [dynamodb_arn(table_name) for table_name in table_names] 46 | 47 | 48 | def s3_arn(bucket): 49 | if isinstance(bucket, AWSHelperFn): 50 | return Sub('arn:aws:s3:::${Bucket}', Bucket=bucket) 51 | else: 52 | return 'arn:aws:s3:::%s' % bucket 53 | 54 | 55 | def s3_objects_arn(bucket, folder="*"): 56 | if isinstance(bucket, AWSHelperFn): 57 | return Sub('arn:aws:s3:::${Bucket}/%s' % folder, Bucket=bucket) 58 | else: 59 | return 'arn:aws:s3:::%s/%s' % (bucket, folder) 60 | 61 | 62 | def read_only_s3_bucket_policy_statements(buckets, folder="*"): 63 | """ Read only policy an s3 bucket. """ 64 | list_buckets = [s3_arn(b) for b in buckets] 65 | object_buckets = [s3_objects_arn(b, folder) for b in buckets] 66 | 67 | bucket_resources = list_buckets + object_buckets 68 | 69 | return [ 70 | Statement( 71 | Effect=Allow, 72 | Resource=[s3_arn("*")], 73 | Action=[s3.ListAllMyBuckets] 74 | ), 75 | Statement( 76 | Effect=Allow, 77 | Resource=bucket_resources, 78 | Action=[Action('s3', 'Get*'), Action('s3', 'List*')] 79 | ) 80 | ] 81 | 82 | 83 | def read_only_s3_bucket_policy(buckets): 84 | return Policy(Statement=read_only_s3_bucket_policy_statements(buckets)) 85 | 86 | 87 | def read_write_s3_bucket_policy_statements(buckets, folder="*"): 88 | list_buckets = [s3_arn(b) for b in buckets] 89 | object_buckets = [s3_objects_arn(b, folder) for b in buckets] 90 | return [ 91 | Statement( 92 | Effect="Allow", 93 | Action=[ 94 | s3.GetBucketLocation, 95 | s3.ListAllMyBuckets, 96 | ], 97 | Resource=[s3_arn("*")] 98 | ), 99 | Statement( 100 | Effect=Allow, 101 | Action=[ 102 | s3.ListBucket, 103 | s3.GetBucketVersioning, 104 | ], 105 | Resource=list_buckets, 106 | ), 107 | Statement( 108 | Effect=Allow, 109 | Action=[ 110 | s3.GetObject, 111 | s3.PutObject, 112 | s3.PutObjectAcl, 113 | s3.DeleteObject, 114 | s3.GetObjectVersion, 115 | s3.DeleteObjectVersion, 116 | ], 117 | Resource=object_buckets, 118 | ), 119 | ] 120 | 121 | 122 | def read_write_s3_bucket_policy(buckets): 123 | return Policy(Statement=read_write_s3_bucket_policy_statements(buckets)) 124 | 125 | 126 | def static_website_bucket_policy(bucket): 127 | """ 128 | Attach this policy directly to an S3 bucket to make it a static website. 129 | This policy grants read access to **all unauthenticated** users. 130 | """ 131 | return Policy( 132 | Statement=[ 133 | Statement( 134 | Effect=Allow, 135 | Principal=Principal("*"), 136 | Action=[s3.GetObject], 137 | Resource=[s3_objects_arn(bucket)], 138 | ) 139 | ] 140 | ) 141 | 142 | 143 | def log_stream_arn(log_group_name, log_stream_name): 144 | return Join( 145 | '', 146 | [ 147 | "arn:aws:logs:", Region, ":", AccountId, ":log-group:", 148 | log_group_name, ":log-stream:", log_stream_name 149 | ] 150 | ) 151 | 152 | 153 | def write_to_cloudwatch_logs_stream_statements(log_group_name, 154 | log_stream_name): 155 | return [ 156 | Statement( 157 | Effect=Allow, 158 | Action=[logs.PutLogEvents], 159 | Resource=[log_stream_arn(log_group_name, log_stream_name)] 160 | ) 161 | ] 162 | 163 | 164 | def write_to_cloudwatch_logs_stream_policy(log_group_name, log_stream_name): 165 | return Policy( 166 | Statement=write_to_cloudwatch_logs_stream_statements(log_group_name, 167 | log_stream_name) 168 | ) 169 | 170 | 171 | def cloudwatch_logs_write_statements(log_group=None): 172 | resources = ["arn:aws:logs:*:*:*"] 173 | if log_group: 174 | log_group_parts = ["arn:aws:logs:", Region, ":", AccountId, 175 | ":log-group:", log_group] 176 | log_group_arn = Join("", log_group_parts) 177 | log_stream_wild = Join("", log_group_parts + [":*"]) 178 | resources = [log_group_arn, log_stream_wild] 179 | 180 | return [ 181 | Statement( 182 | Effect=Allow, 183 | Resource=resources, 184 | Action=[ 185 | logs.CreateLogGroup, 186 | logs.CreateLogStream, 187 | logs.PutLogEvents 188 | ] 189 | ) 190 | ] 191 | 192 | 193 | def lambda_basic_execution_statements(function_name): 194 | log_group = Join("/", ["/aws/lambda", function_name]) 195 | return cloudwatch_logs_write_statements(log_group) 196 | 197 | 198 | def lambda_basic_execution_policy(function_name): 199 | return Policy(Statement=lambda_basic_execution_statements(function_name)) 200 | 201 | 202 | def lambda_vpc_execution_statements(): 203 | """Allow Lambda to manipuate EC2 ENIs for VPC support.""" 204 | return [ 205 | Statement( 206 | Effect=Allow, 207 | Resource=['*'], 208 | Action=[ 209 | ec2.CreateNetworkInterface, 210 | ec2.DescribeNetworkInterfaces, 211 | ec2.DeleteNetworkInterface, 212 | ] 213 | ) 214 | ] 215 | 216 | 217 | def flowlogs_assumerole_policy(): 218 | return make_simple_assume_policy("vpc-flow-logs.amazonaws.com") 219 | 220 | 221 | # reference: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-dynamodb-table.html#cfn-dynamodb-table-examples-application-autoscaling # noqa 222 | def dynamodb_autoscaling_policy(tables): 223 | """Policy to allow AutoScaling a list of DynamoDB tables.""" 224 | return Policy( 225 | Statement=[ 226 | Statement( 227 | Effect=Allow, 228 | Resource=dynamodb_arns(tables), 229 | Action=[ 230 | dynamodb.DescribeTable, 231 | dynamodb.UpdateTable, 232 | ] 233 | ), 234 | Statement( 235 | Effect=Allow, 236 | Resource=['*'], 237 | Action=[ 238 | cloudwatch.PutMetricAlarm, 239 | cloudwatch.DescribeAlarms, 240 | cloudwatch.GetMetricStatistics, 241 | cloudwatch.SetAlarmState, 242 | cloudwatch.DeleteAlarms, 243 | ] 244 | ), 245 | ] 246 | ) 247 | -------------------------------------------------------------------------------- /stacker_blueprints/postgres.py: -------------------------------------------------------------------------------- 1 | from troposphere import ( 2 | Ref, ec2, Output, GetAtt, Not, Equals, Condition, And, Join 3 | ) 4 | from troposphere.rds import DBInstance, DBSubnetGroup 5 | from troposphere.route53 import RecordSetType 6 | 7 | from stacker.blueprints.base import Blueprint 8 | from stacker.blueprints.variables.types import ( 9 | CFNNumber, 10 | CFNString, 11 | EC2SubnetIdList, 12 | EC2VPCId, 13 | ) 14 | 15 | RDS_INSTANCE_NAME = "PostgresRDS%s" 16 | RDS_SUBNET_GROUP = "%sSubnetGroup" 17 | RDS_SG_NAME = "RdsSG%s" 18 | 19 | 20 | class PostgresRDS(Blueprint): 21 | VARIABLES = { 22 | 'VpcId': {'type': EC2VPCId, 'description': 'Vpc Id'}, 23 | 'PrivateSubnets': {'type': EC2SubnetIdList, 24 | 'description': 'Subnets to deploy private ' 25 | 'instances in.'}, 26 | 'InstanceType': {'type': CFNString, 27 | 'description': 'AWS RDS Instance Type', 28 | 'default': 'db.m3.large'}, 29 | 'AllocatedStorage': {'type': CFNNumber, 30 | 'description': 'Space, in GB, to allocate to RDS ' 31 | 'instance.', 32 | 'default': '10'}, 33 | 'MasterUser': {'type': CFNString, 34 | 'description': 'Name of the master user in the db.', 35 | 'default': 'dbuser'}, 36 | 'MasterUserPassword': {'type': CFNString, 37 | 'description': 'Master user password.'}, 38 | 'PreferredBackupWindow': { 39 | 'type': CFNString, 40 | 'description': 'A (minimum 30 minute) window in HH:MM-HH:MM ' 41 | 'format in UTC for backups. Default: 3am-4am', 42 | 'default': '11:00-12:00'}, 43 | 'DBName': { 44 | 'type': CFNString, 45 | 'description': 'Initial db to create in database.'}, 46 | "InternalZoneId": { 47 | "type": CFNString, 48 | "default": "", 49 | "description": "Internal zone Id, if you have one."}, 50 | "InternalZoneName": { 51 | "type": CFNString, 52 | "default": "", 53 | "description": "Internal zone name, if you have one."}, 54 | "InternalHostname": { 55 | "type": CFNString, 56 | "default": "", 57 | "description": "Internal domain name, if you have one."}, 58 | } 59 | 60 | def create_conditions(self): 61 | self.template.add_condition( 62 | "HasInternalZone", 63 | Not(Equals(Ref("InternalZoneId"), ""))) 64 | self.template.add_condition( 65 | "HasInternalZoneName", 66 | Not(Equals(Ref("InternalZoneName"), ""))) 67 | self.template.add_condition( 68 | "HasInternalHostname", 69 | Not(Equals(Ref("InternalHostname"), ""))) 70 | self.template.add_condition( 71 | "CreateInternalHostname", 72 | And(Condition("HasInternalZone"), 73 | Condition("HasInternalZoneName"), 74 | Condition("HasInternalHostname"))) 75 | 76 | def create_subnet_group(self): 77 | t = self.template 78 | t.add_resource( 79 | DBSubnetGroup( 80 | RDS_SUBNET_GROUP % self.name, 81 | DBSubnetGroupDescription="%s VPC subnet group." % self.name, 82 | SubnetIds=Ref('PrivateSubnets'))) 83 | 84 | def create_security_group(self): 85 | t = self.template 86 | sg_name = RDS_SG_NAME % self.name 87 | sg = t.add_resource( 88 | ec2.SecurityGroup( 89 | sg_name, 90 | GroupDescription='%s RDS security group' % sg_name, 91 | VpcId=Ref("VpcId"))) 92 | t.add_output(Output("SecurityGroup", Value=Ref(sg))) 93 | 94 | def create_rds(self): 95 | t = self.template 96 | db_name = RDS_INSTANCE_NAME % self.name 97 | t.add_resource( 98 | DBInstance( 99 | db_name, 100 | AllocatedStorage=Ref('AllocatedStorage'), 101 | AllowMajorVersionUpgrade=False, 102 | AutoMinorVersionUpgrade=True, 103 | BackupRetentionPeriod=30, 104 | DBName=Ref('DBName'), 105 | DBInstanceClass=Ref('InstanceType'), 106 | DBSubnetGroupName=Ref(RDS_SUBNET_GROUP % self.name), 107 | Engine='postgres', 108 | EngineVersion='9.3.14', 109 | MasterUsername=Ref('MasterUser'), 110 | MasterUserPassword=Ref('MasterUserPassword'), 111 | MultiAZ=True, 112 | PreferredBackupWindow=Ref('PreferredBackupWindow'), 113 | VPCSecurityGroups=[Ref(RDS_SG_NAME % self.name), ])) 114 | 115 | endpoint = GetAtt(db_name, 'Endpoint.Address') 116 | 117 | # Setup CNAME to db 118 | t.add_resource( 119 | RecordSetType( 120 | '%sDnsRecord' % db_name, 121 | # Appends a '.' to the end of the domain 122 | HostedZoneId=Ref("InternalZoneId"), 123 | Comment='RDS DB CNAME Record', 124 | Name=Join(".", [Ref("InternalHostname"), 125 | Ref("InternalZoneName")]), 126 | Type='CNAME', 127 | TTL='120', 128 | ResourceRecords=[endpoint], 129 | Condition="CreateInternalHostname")) 130 | t.add_output(Output('DBAddress', Value=endpoint)) 131 | t.add_output( 132 | Output( 133 | 'DBCname', 134 | Condition="CreateInternalHostname", 135 | Value=Ref("%sDnsRecord" % db_name))) 136 | 137 | def create_template(self): 138 | self.create_conditions() 139 | self.create_subnet_group() 140 | self.create_security_group() 141 | self.create_rds() 142 | -------------------------------------------------------------------------------- /stacker_blueprints/rds/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/remind101/stacker_blueprints/97791cc7c3f1c17e9d1547a45c059fae5b1cc204/stacker_blueprints/rds/__init__.py -------------------------------------------------------------------------------- /stacker_blueprints/rds/aurora/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/remind101/stacker_blueprints/97791cc7c3f1c17e9d1547a45c059fae5b1cc204/stacker_blueprints/rds/aurora/__init__.py -------------------------------------------------------------------------------- /stacker_blueprints/rds/mysql.py: -------------------------------------------------------------------------------- 1 | from .base import MasterInstance, ReadReplica 2 | 3 | 4 | class MySQLMixin(object): 5 | def engine(self): 6 | return "MySQL" 7 | 8 | 9 | class MasterInstance(MySQLMixin, MasterInstance): 10 | pass 11 | 12 | 13 | class ReadReplica(MySQLMixin, ReadReplica): 14 | pass 15 | -------------------------------------------------------------------------------- /stacker_blueprints/rds/postgres.py: -------------------------------------------------------------------------------- 1 | from .base import MasterInstance, ReadReplica 2 | 3 | 4 | class PostgresMixin(object): 5 | def engine(self): 6 | return "postgres" 7 | 8 | 9 | class MasterInstance(PostgresMixin, MasterInstance): 10 | pass 11 | 12 | 13 | class ReadReplica(PostgresMixin, ReadReplica): 14 | pass 15 | -------------------------------------------------------------------------------- /stacker_blueprints/s3.py: -------------------------------------------------------------------------------- 1 | from stacker.blueprints.base import Blueprint 2 | from troposphere import ( 3 | FindInMap, 4 | GetAtt, 5 | Output, 6 | Sub, 7 | Ref, 8 | Region, 9 | s3, 10 | iam, 11 | ) 12 | 13 | from .policies import ( 14 | s3_arn, 15 | read_only_s3_bucket_policy, 16 | read_write_s3_bucket_policy, 17 | static_website_bucket_policy, 18 | ) 19 | 20 | # reference: 21 | # https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region 22 | S3_WEBSITE_ENDPOINTS = { 23 | "us-east-2": {"endpoint": "s3-website.us-east-2.amazonaws.com"}, 24 | "us-east-1": {"endpoint": "s3-website-us-east-1.amazonaws.com"}, 25 | "us-west-1": {"endpoint": "s3-website-us-west-1.amazonaws.com"}, 26 | "us-west-2": {"endpoint": "s3-website-us-west-2.amazonaws.com"}, 27 | "ca-central-1": {"endpoint": "s3-website.ca-central-1.amazonaws.com"}, 28 | "ap-south-1": {"endpoint": "s3-website.ap-south-1.amazonaws.com"}, 29 | "ap-northeast-2": {"endpoint": "s3-website.ap-northeast-2.amazonaws.com"}, 30 | "ap-southeast-1": {"endpoint": "s3-website-ap-southeast-1.amazonaws.com"}, 31 | "ap-southeast-2": {"endpoint": "s3-website-ap-southeast-2.amazonaws.com"}, 32 | "ap-northeast-1": {"endpoint": "s3-website-ap-northeast-1.amazonaws.com"}, 33 | "eu-central-1": {"endpoint": "s3-website.eu-central-1.amazonaws.com"}, 34 | "eu-west-1": {"endpoint": "s3-website-eu-west-1.amazonaws.com"}, 35 | "eu-west-2": {"endpoint": "s3-website.eu-west-2.amazonaws.com"}, 36 | "eu-west-3": {"endpoint": "s3-website.eu-west-3.amazonaws.com"}, 37 | "sa-east-1": {"endpoint": "s3-website-sa-east-1.amazonaws.com"}, 38 | } 39 | 40 | 41 | class Buckets(Blueprint): 42 | VARIABLES = { 43 | "Buckets": { 44 | "type": dict, 45 | "description": "A dictionary of buckets to create. The key " 46 | "being the CFN logical resource name, the " 47 | "value being a dictionary of attributes for " 48 | "the troposphere s3.Bucket type.", 49 | "default": {} 50 | }, 51 | "ReadWriteRoles": { 52 | "type": list, 53 | "description": "A list of roles that should have read/write " 54 | "access to the buckets created.", 55 | "default": [] 56 | }, 57 | "ReadRoles": { 58 | "type": list, 59 | "description": "A list of roles that should have read-only " 60 | "access to the buckets created.", 61 | "default": [] 62 | }, 63 | 64 | } 65 | 66 | def create_template(self): 67 | t = self.template 68 | variables = self.get_variables() 69 | 70 | bucket_ids = [] 71 | 72 | for title, attrs in variables["Buckets"].items(): 73 | bucket_id = Ref(title) 74 | t.add_resource(s3.Bucket.from_dict(title, attrs)) 75 | t.add_output(Output(title + "BucketId", Value=bucket_id)) 76 | t.add_output(Output(title + "BucketArn", Value=s3_arn(bucket_id))) 77 | t.add_output( 78 | Output( 79 | title + "BucketDomainName", 80 | Value=GetAtt(title, "DomainName") 81 | ) 82 | ) 83 | if "WebsiteConfiguration" in attrs: 84 | t.add_mapping("WebsiteEndpoints", S3_WEBSITE_ENDPOINTS) 85 | 86 | t.add_resource( 87 | s3.BucketPolicy( 88 | title + "BucketPolicy", 89 | Bucket=bucket_id, 90 | PolicyDocument=static_website_bucket_policy(bucket_id), 91 | ) 92 | ) 93 | t.add_output( 94 | Output( 95 | title + "WebsiteUrl", 96 | Value=GetAtt(title, "WebsiteURL") 97 | ) 98 | ) 99 | t.add_output( 100 | Output( 101 | title + "WebsiteEndpoint", 102 | Value=FindInMap( 103 | "WebsiteEndpoints", Region, "endpoint" 104 | ) 105 | ) 106 | ) 107 | 108 | bucket_ids.append(bucket_id) 109 | 110 | read_write_roles = variables["ReadWriteRoles"] 111 | if read_write_roles: 112 | t.add_resource( 113 | iam.PolicyType( 114 | "ReadWritePolicy", 115 | PolicyName=Sub("${AWS::StackName}ReadWritePolicy"), 116 | PolicyDocument=read_write_s3_bucket_policy( 117 | bucket_ids 118 | ), 119 | Roles=read_write_roles, 120 | ) 121 | ) 122 | 123 | read_only_roles = variables["ReadRoles"] 124 | if read_only_roles: 125 | t.add_resource( 126 | iam.PolicyType( 127 | "ReadPolicy", 128 | PolicyName=Sub("${AWS::StackName}ReadPolicy"), 129 | PolicyDocument=read_only_s3_bucket_policy( 130 | bucket_ids 131 | ), 132 | Roles=read_only_roles, 133 | ) 134 | ) 135 | -------------------------------------------------------------------------------- /stacker_blueprints/security_rules.py: -------------------------------------------------------------------------------- 1 | from troposphere.ec2 import SecurityGroupIngress, SecurityGroupEgress 2 | from stacker.blueprints.base import Blueprint 3 | 4 | CLASS_MAP = { 5 | "IngressRules": SecurityGroupIngress, 6 | "EgressRules": SecurityGroupEgress, 7 | } 8 | 9 | 10 | class Rules(Blueprint): 11 | """Used to add Ingress/Egress rules to existing security groups. 12 | 13 | This blueprint uses two variables: 14 | IngressRules: 15 | A dict with keys of the virtual titles for each rule, and with the 16 | value being a dict of the parameters taken directly from: 17 | http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-security-group-ingress.html 18 | EgressRules: 19 | A dict with keys of the virtual titles for each rule, and with the 20 | value being a dict of the parameters taken directly from: 21 | http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ec2-security-group-egress.html 22 | 23 | An example: 24 | 25 | name: mySecurityRules 26 | class_path: stacker_blueprints.security_rules.Rules 27 | variables: 28 | IngressRules: 29 | All80ToWebserverGroup: 30 | CidrIp: 0.0.0.0/0 31 | FromPort: 80 32 | ToPort: 80 33 | GroupId: ${output WebserverStack::SecurityGroup} 34 | IpProtocol: tcp 35 | """ 36 | 37 | VARIABLES = { 38 | "IngressRules": { 39 | "type": dict, 40 | "description": "A dict of ingress rules where the key is the " 41 | "name of the rule to create, and the value is " 42 | "a dictionary of keys/values based on the " 43 | "attributes of the " 44 | ":class:`troposphere.ec2.SecurityGroupIngress` " 45 | "class.", 46 | "default": {}, 47 | }, 48 | "EgressRules": { 49 | "type": dict, 50 | "description": "A dict of ingress rules where the key is the " 51 | "name of the rule to create, and the value is " 52 | "a dictionary of keys/values based on the " 53 | "attributes of the " 54 | ":class:`troposphere.ec2.SecurityGroupEgress` " 55 | "class.", 56 | "default": {}, 57 | } 58 | } 59 | 60 | def create_security_rules(self): 61 | t = self.template 62 | variables = self.get_variables() 63 | for rule_type, rule_class in CLASS_MAP.items(): 64 | for rule_title, rule_attrs in variables[rule_type].items(): 65 | t.add_resource(rule_class.from_dict(rule_title, rule_attrs)) 66 | 67 | def create_template(self): 68 | self.create_security_rules() 69 | -------------------------------------------------------------------------------- /stacker_blueprints/sns.py: -------------------------------------------------------------------------------- 1 | from stacker.blueprints.base import Blueprint 2 | 3 | from troposphere import ( 4 | sns, 5 | sqs, 6 | Ref, 7 | GetAtt, 8 | Output, 9 | ) 10 | 11 | from . import util 12 | 13 | import awacs 14 | import awacs.sqs 15 | 16 | from awacs.aws import ( 17 | Policy, 18 | Statement, 19 | Condition, 20 | ArnEquals, 21 | Principal, 22 | ) 23 | 24 | 25 | def queue_policy(sns_arn, sqs_arns): 26 | stmts = [] 27 | for arn in sqs_arns: 28 | stmts.append( 29 | Statement( 30 | Effect="Allow", 31 | Principal=Principal("*"), 32 | Action=[awacs.sqs.SendMessage], 33 | Resource=[arn], 34 | Condition=Condition( 35 | ArnEquals({"aws:SourceArn": sns_arn}) 36 | ) 37 | ) 38 | ) 39 | 40 | return Policy(Statement=stmts) 41 | 42 | 43 | def validate_topic(topic): 44 | sns_topic_properties = [ 45 | "DisplayName", 46 | "Subscription", 47 | ] 48 | 49 | util.check_properties(topic, sns_topic_properties, "SNS") 50 | 51 | return topic 52 | 53 | 54 | def validate_topics(topics): 55 | validated_topics = {} 56 | for topic_name, topic_config in topics.iteritems(): 57 | validated_topics[topic_name] = validate_topic(topic_config) 58 | 59 | return validated_topics 60 | 61 | 62 | class Topics(Blueprint): 63 | """ 64 | Manages the creation of SNS topics. 65 | """ 66 | 67 | VARIABLES = { 68 | "Topics": { 69 | "type": dict, 70 | "description": "Dictionary of SNS Topic definitions", 71 | "validator": validate_topics, 72 | } 73 | } 74 | 75 | def create_template(self): 76 | variables = self.get_variables() 77 | 78 | for topic_name, topic_config in variables["Topics"].iteritems(): 79 | self.create_topic(topic_name, topic_config) 80 | 81 | def create_sqs_policy(self, topic_name, topic_arn, topic_subs): 82 | """ 83 | This method creates the SQS policy needed for an SNS subscription. It 84 | also takes the ARN of the SQS queue and converts it to the URL needed 85 | for the subscription, as that takes a URL rather than the ARN. 86 | """ 87 | t = self.template 88 | 89 | arn_endpoints = [] 90 | url_endpoints = [] 91 | for sub in topic_subs: 92 | arn_endpoints.append(sub["Endpoint"]) 93 | split_endpoint = sub["Endpoint"].split(":") 94 | queue_url = "https://%s.%s.amazonaws.com/%s/%s" % ( 95 | split_endpoint[2], # literally "sqs" 96 | split_endpoint[3], # AWS region 97 | split_endpoint[4], # AWS ID 98 | split_endpoint[5], # Queue name 99 | ) 100 | url_endpoints.append(queue_url) 101 | 102 | policy_doc = queue_policy(topic_arn, arn_endpoints) 103 | 104 | t.add_resource( 105 | sqs.QueuePolicy( 106 | topic_name + "SubPolicy", 107 | PolicyDocument=policy_doc, 108 | Queues=url_endpoints, 109 | ) 110 | ) 111 | 112 | def create_topic(self, topic_name, topic_config): 113 | """ 114 | Creates the SNS topic, along with any subscriptions requested. 115 | """ 116 | topic_subs = [] 117 | t = self.template 118 | 119 | if "Subscription" in topic_config: 120 | topic_subs = topic_config["Subscription"] 121 | 122 | t.add_resource( 123 | sns.Topic.from_dict( 124 | topic_name, 125 | topic_config 126 | ) 127 | ) 128 | 129 | topic_arn = Ref(topic_name) 130 | 131 | t.add_output( 132 | Output(topic_name + "Name", Value=GetAtt(topic_name, "TopicName")) 133 | ) 134 | t.add_output(Output(topic_name + "Arn", Value=topic_arn)) 135 | 136 | sqs_subs = [sub for sub in topic_subs if sub["Protocol"] == "sqs"] 137 | if sqs_subs: 138 | self.create_sqs_policy(topic_name, topic_arn, sqs_subs) 139 | -------------------------------------------------------------------------------- /stacker_blueprints/sqs.py: -------------------------------------------------------------------------------- 1 | from stacker.blueprints.base import Blueprint 2 | from stacker.blueprints.variables.types import TroposphereType 3 | 4 | from troposphere import ( 5 | sqs, 6 | Ref, 7 | GetAtt, 8 | Output, 9 | ) 10 | 11 | 12 | class Queues(Blueprint): 13 | """Manages the creation of SQS queues.""" 14 | 15 | VARIABLES = { 16 | "Queues": { 17 | "type": TroposphereType(sqs.Queue, many=True), 18 | "description": "Dictionary of SQS queue definitions", 19 | }, 20 | } 21 | 22 | def create_template(self): 23 | t = self.template 24 | variables = self.get_variables() 25 | 26 | for queue in variables["Queues"]: 27 | t.add_resource(queue) 28 | t.add_output( 29 | Output(queue.title + "Arn", Value=GetAtt(queue, "Arn")) 30 | ) 31 | t.add_output(Output(queue.title + "Url", Value=Ref(queue))) 32 | -------------------------------------------------------------------------------- /stacker_blueprints/util.py: -------------------------------------------------------------------------------- 1 | from collections import Mapping 2 | 3 | from troposphere import Tags 4 | 5 | 6 | def check_properties(properties, allowed_properties, resource): 7 | """Checks the list of properties in the properties variable against the 8 | property list provided by the allowed_properties variable. If any property 9 | does not match the properties in allowed_properties, a ValueError is 10 | raised to prevent unexpected behavior when creating resources. 11 | 12 | properties: The config (as dict) provided by the configuration file 13 | allowed_properties: A list of strings representing the available params 14 | for a resource. 15 | resource: A string naming the resource in question for the error 16 | message. 17 | """ 18 | for key in properties.keys(): 19 | if key not in allowed_properties: 20 | raise ValueError( 21 | "%s is not a valid property of %s" % (key, resource) 22 | ) 23 | 24 | 25 | def _tags_to_dict(tag_list): 26 | return dict((tag['Key'], tag['Value']) for tag in tag_list) 27 | 28 | 29 | def merge_tags(left, right, factory=Tags): 30 | """ 31 | Merge two sets of tags into a new troposphere object 32 | 33 | Args: 34 | left (Union[dict, troposphere.Tags]): dictionary or Tags object to be 35 | merged with lower priority 36 | right (Union[dict, troposphere.Tags]): dictionary or Tags object to be 37 | merged with higher priority 38 | factory (type): Type of object to create. Defaults to the troposphere 39 | Tags class. 40 | """ 41 | 42 | if isinstance(left, Mapping): 43 | tags = dict(left) 44 | elif hasattr(left, 'tags'): 45 | tags = _tags_to_dict(left.tags) 46 | else: 47 | tags = _tags_to_dict(left) 48 | 49 | if isinstance(right, Mapping): 50 | tags.update(right) 51 | elif hasattr(left, 'tags'): 52 | tags.update(_tags_to_dict(right.tags)) 53 | else: 54 | tags.update(_tags_to_dict(right)) 55 | 56 | return factory(**tags) 57 | -------------------------------------------------------------------------------- /stacker_blueprints/vpc_flow_logs.py: -------------------------------------------------------------------------------- 1 | from troposphere import ( 2 | GetAtt, 3 | Join, 4 | Output, 5 | Ref, 6 | iam, 7 | logs, 8 | ec2, 9 | ) 10 | 11 | from troposphere.iam import Policy as TropoPolicy 12 | 13 | from stacker.blueprints.base import Blueprint 14 | 15 | from awacs.aws import ( 16 | Statement, 17 | Policy, 18 | ) 19 | 20 | import awacs 21 | import awacs.logs 22 | 23 | from .policies import flowlogs_assumerole_policy 24 | from .cloudwatch_logs import ( 25 | LOG_RETENTION_STRINGS, 26 | validate_cloudwatch_log_retention 27 | ) 28 | 29 | ALLOWED_TRAFFIC_TYPES = ["ACCEPT", "REJECT", "ALL"] 30 | JOINED_TRAFFIC_TYPES = '/'.join(ALLOWED_TRAFFIC_TYPES) 31 | LOG_RETENTION_DEFAULT = 0 32 | CLOUDWATCH_ROLE_NAME = "Role" 33 | FLOW_LOG_GROUP_NAME = "LogGroup" 34 | FLOW_LOG_STREAM_NAME = "LogStream" 35 | 36 | 37 | def vpc_flow_log_cloudwatch_policy(log_group_arn): 38 | return Policy( 39 | Statement=[ 40 | Statement( 41 | Effect="Allow", 42 | Action=[ 43 | awacs.logs.DescribeLogGroups 44 | ], 45 | Resource=["*"], 46 | ), 47 | Statement( 48 | Effect="Allow", 49 | Action=[ 50 | awacs.logs.CreateLogStream, 51 | awacs.logs.DescribeLogStreams, 52 | awacs.logs.PutLogEvents, 53 | ], 54 | Resource=[ 55 | log_group_arn, 56 | Join('', [log_group_arn, ":*"]), 57 | ], 58 | ), 59 | ] 60 | ) 61 | 62 | 63 | def validate_traffic_type(traffic_type): 64 | if traffic_type not in ALLOWED_TRAFFIC_TYPES: 65 | raise ValueError( 66 | "Traffic type must be one of the following: " + 67 | "%s" % JOINED_TRAFFIC_TYPES 68 | ) 69 | 70 | return traffic_type 71 | 72 | 73 | class FlowLogs(Blueprint): 74 | VARIABLES = { 75 | "Retention": { 76 | "type": int, 77 | "description": "Time in days to retain Cloudwatch Logs. Accepted " 78 | "values: %s. Default 0 - retain forever." % ( 79 | ', '.join(LOG_RETENTION_STRINGS)), 80 | "default": LOG_RETENTION_DEFAULT, 81 | "validator": validate_cloudwatch_log_retention, 82 | 83 | }, 84 | "VpcId": { 85 | "type": str, 86 | "description": "ID of the VPC that flow logs will be enabled " 87 | "for.", 88 | }, 89 | "TrafficType": { 90 | "type": str, 91 | "description": "Type of traffic to log. Must be one of the " 92 | "following: %s" % JOINED_TRAFFIC_TYPES, 93 | "validator": validate_traffic_type, 94 | "default": "ALL", 95 | }, 96 | } 97 | 98 | def create_template(self): 99 | t = self.template 100 | variables = self.get_variables() 101 | 102 | self.log_group = t.add_resource( 103 | logs.LogGroup( 104 | FLOW_LOG_GROUP_NAME, 105 | RetentionInDays=variables["Retention"], 106 | ) 107 | ) 108 | 109 | t.add_output( 110 | Output( 111 | "%sName" % FLOW_LOG_GROUP_NAME, 112 | Value=Ref(self.log_group) 113 | ) 114 | ) 115 | t.add_output( 116 | Output( 117 | "%sArn" % FLOW_LOG_GROUP_NAME, 118 | Value=GetAtt(self.log_group, "Arn") 119 | ) 120 | ) 121 | 122 | self.role = t.add_resource( 123 | iam.Role( 124 | CLOUDWATCH_ROLE_NAME, 125 | AssumeRolePolicyDocument=flowlogs_assumerole_policy(), 126 | Path="/", 127 | Policies=[ 128 | TropoPolicy( 129 | PolicyName="vpc_cloudwatch_flowlog_policy", 130 | PolicyDocument=vpc_flow_log_cloudwatch_policy( 131 | GetAtt(self.log_group, "Arn") 132 | ), 133 | ), 134 | ] 135 | ) 136 | ) 137 | 138 | t.add_output( 139 | Output( 140 | "%sName" % CLOUDWATCH_ROLE_NAME, 141 | Value=Ref(self.role) 142 | ) 143 | ) 144 | role_arn = GetAtt(self.role, "Arn") 145 | t.add_output( 146 | Output( 147 | "%sArn" % CLOUDWATCH_ROLE_NAME, 148 | Value=role_arn 149 | ) 150 | ) 151 | 152 | self.log_stream = t.add_resource( 153 | ec2.FlowLog( 154 | FLOW_LOG_STREAM_NAME, 155 | DeliverLogsPermissionArn=role_arn, 156 | LogGroupName=Ref(FLOW_LOG_GROUP_NAME), 157 | ResourceId=variables["VpcId"], 158 | ResourceType="VPC", 159 | TrafficType=variables["TrafficType"], 160 | ) 161 | ) 162 | 163 | t.add_output( 164 | Output( 165 | "%sName" % FLOW_LOG_STREAM_NAME, 166 | Value=Ref(self.log_stream) 167 | ) 168 | ) 169 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/remind101/stacker_blueprints/97791cc7c3f1c17e9d1547a45c059fae5b1cc204/tests/__init__.py -------------------------------------------------------------------------------- /tests/fixtures/blueprints/dynamodb_table.json: -------------------------------------------------------------------------------- 1 | { 2 | "Outputs": { 3 | "UserTableName": { 4 | "Value": { 5 | "Ref": "UserTable" 6 | } 7 | }, 8 | "UserTableStreamArn": { 9 | "Value": { 10 | "Fn::GetAtt": [ 11 | "UserTable", 12 | "StreamArn" 13 | ] 14 | } 15 | } 16 | }, 17 | "Resources": { 18 | "UserTable": { 19 | "Properties": { 20 | "AttributeDefinitions": [ 21 | { 22 | "AttributeName": "id", 23 | "AttributeType": "S" 24 | }, 25 | { 26 | "AttributeName": "name", 27 | "AttributeType": "S" 28 | } 29 | ], 30 | "KeySchema": [ 31 | { 32 | "AttributeName": "id", 33 | "KeyType": "HASH" 34 | }, 35 | { 36 | "AttributeName": "name", 37 | "KeyType": "RANGE" 38 | } 39 | ], 40 | "ProvisionedThroughput": { 41 | "ReadCapacityUnits": 5, 42 | "WriteCapacityUnits": 5 43 | }, 44 | "StreamSpecification": { 45 | "StreamViewType": "ALL" 46 | }, 47 | "TableName": "test-user-table" 48 | }, 49 | "Type": "AWS::DynamoDB::Table" 50 | } 51 | } 52 | } -------------------------------------------------------------------------------- /tests/fixtures/blueprints/ec2_instances.json: -------------------------------------------------------------------------------- 1 | { 2 | "Outputs": { 3 | "MyInstanceAZ": { 4 | "Value": { 5 | "Fn::GetAtt": [ 6 | "MyInstance", 7 | "AvailabilityZone" 8 | ] 9 | } 10 | }, 11 | "MyInstanceInstanceId": { 12 | "Value": { 13 | "Ref": "MyInstance" 14 | } 15 | }, 16 | "MyInstancePrivateDnsName": { 17 | "Value": { 18 | "Fn::GetAtt": [ 19 | "MyInstance", 20 | "PrivateDnsName" 21 | ] 22 | } 23 | }, 24 | "MyInstancePrivateIp": { 25 | "Value": { 26 | "Fn::GetAtt": [ 27 | "MyInstance", 28 | "PrivateIp" 29 | ] 30 | } 31 | }, 32 | "MyInstancePublicDnsName": { 33 | "Value": { 34 | "Fn::GetAtt": [ 35 | "MyInstance", 36 | "PublicDnsName" 37 | ] 38 | } 39 | }, 40 | "MyInstancePublicIp": { 41 | "Value": { 42 | "Fn::GetAtt": [ 43 | "MyInstance", 44 | "PublicIp" 45 | ] 46 | } 47 | } 48 | }, 49 | "Resources": { 50 | "MyInstance": { 51 | "Properties": { 52 | "ImageId": "ami-abc12345" 53 | }, 54 | "Type": "AWS::EC2::Instance" 55 | } 56 | } 57 | } -------------------------------------------------------------------------------- /tests/fixtures/blueprints/kms_key_a.json: -------------------------------------------------------------------------------- 1 | { 2 | "Outputs": { 3 | "KeyAlias": { 4 | "Value": { 5 | "Ref": "Alias" 6 | } 7 | }, 8 | "KeyArn": { 9 | "Value": { 10 | "Fn::Join": [ 11 | "", 12 | [ 13 | "arn:aws:kms:", 14 | { 15 | "Ref": "AWS::Region" 16 | }, 17 | ":", 18 | { 19 | "Ref": "AWS::AccountId" 20 | }, 21 | ":key/", 22 | { 23 | "Ref": "Key" 24 | } 25 | ] 26 | ] 27 | } 28 | }, 29 | "KeyId": { 30 | "Value": { 31 | "Ref": "Key" 32 | } 33 | } 34 | }, 35 | "Resources": { 36 | "Alias": { 37 | "Properties": { 38 | "AliasName": "alias/a-test-key", 39 | "TargetKeyId": { 40 | "Ref": "Key" 41 | } 42 | }, 43 | "Type": "AWS::KMS::Alias" 44 | }, 45 | "Key": { 46 | "Properties": { 47 | "Description": "a KMS test-key.", 48 | "KeyPolicy": { 49 | "Id": "root-account-access", 50 | "Statement": [ 51 | { 52 | "Action": [ 53 | "kms:*" 54 | ], 55 | "Effect": "Allow", 56 | "Principal": { 57 | "AWS": { 58 | "Fn::Join": [ 59 | ":", 60 | [ 61 | "arn:aws:iam:", 62 | { 63 | "Ref": "AWS::AccountId" 64 | }, 65 | "root" 66 | ] 67 | ] 68 | } 69 | }, 70 | "Resource": [ 71 | "*" 72 | ], 73 | "Sid": "Enable IAM User Permissions" 74 | } 75 | ], 76 | "Version": "2012-10-17" 77 | } 78 | }, 79 | "Type": "AWS::KMS::Key" 80 | } 81 | } 82 | } -------------------------------------------------------------------------------- /tests/fixtures/blueprints/kms_key_b.json: -------------------------------------------------------------------------------- 1 | { 2 | "Outputs": { 3 | "KeyAlias": { 4 | "Value": { 5 | "Ref": "Alias" 6 | } 7 | }, 8 | "KeyArn": { 9 | "Value": { 10 | "Fn::Join": [ 11 | "", 12 | [ 13 | "arn:aws:kms:", 14 | { 15 | "Ref": "AWS::Region" 16 | }, 17 | ":", 18 | { 19 | "Ref": "AWS::AccountId" 20 | }, 21 | ":key/", 22 | { 23 | "Ref": "Key" 24 | } 25 | ] 26 | ] 27 | } 28 | }, 29 | "KeyId": { 30 | "Value": { 31 | "Ref": "Key" 32 | } 33 | } 34 | }, 35 | "Resources": { 36 | "Alias": { 37 | "Properties": { 38 | "AliasName": "alias/b-test-key", 39 | "TargetKeyId": { 40 | "Ref": "Key" 41 | } 42 | }, 43 | "Type": "AWS::KMS::Alias" 44 | }, 45 | "Key": { 46 | "Properties": { 47 | "Description": "b KMS test-key.", 48 | "KeyPolicy": { 49 | "Id": "root-account-access", 50 | "Statement": [ 51 | { 52 | "Action": [ 53 | "kms:*" 54 | ], 55 | "Effect": "Allow", 56 | "Principal": { 57 | "AWS": { 58 | "Fn::Join": [ 59 | ":", 60 | [ 61 | "arn:aws:iam:", 62 | { 63 | "Ref": "AWS::AccountId" 64 | }, 65 | "root" 66 | ] 67 | ] 68 | } 69 | }, 70 | "Resource": [ 71 | "*" 72 | ], 73 | "Sid": "Enable IAM User Permissions" 74 | } 75 | ], 76 | "Version": "2012-10-17" 77 | } 78 | }, 79 | "Type": "AWS::KMS::Key" 80 | } 81 | } 82 | } -------------------------------------------------------------------------------- /tests/fixtures/blueprints/kms_key_c.json: -------------------------------------------------------------------------------- 1 | { 2 | "Outputs": { 3 | "KeyAlias": { 4 | "Value": { 5 | "Ref": "Alias" 6 | } 7 | }, 8 | "KeyArn": { 9 | "Value": { 10 | "Fn::Join": [ 11 | "", 12 | [ 13 | "arn:aws:kms:", 14 | { 15 | "Ref": "AWS::Region" 16 | }, 17 | ":", 18 | { 19 | "Ref": "AWS::AccountId" 20 | }, 21 | ":key/", 22 | { 23 | "Ref": "Key" 24 | } 25 | ] 26 | ] 27 | } 28 | }, 29 | "KeyId": { 30 | "Value": { 31 | "Ref": "Key" 32 | } 33 | } 34 | }, 35 | "Resources": { 36 | "Alias": { 37 | "Properties": { 38 | "AliasName": "alias/c-test-key", 39 | "TargetKeyId": { 40 | "Ref": "Key" 41 | } 42 | }, 43 | "Type": "AWS::KMS::Alias" 44 | }, 45 | "Key": { 46 | "Properties": { 47 | "KeyPolicy": { 48 | "Id": "root-account-access", 49 | "Statement": [ 50 | { 51 | "Action": [ 52 | "kms:*" 53 | ], 54 | "Effect": "Allow", 55 | "Principal": { 56 | "AWS": { 57 | "Fn::Join": [ 58 | ":", 59 | [ 60 | "arn:aws:iam:", 61 | { 62 | "Ref": "AWS::AccountId" 63 | }, 64 | "root" 65 | ] 66 | ] 67 | } 68 | }, 69 | "Resource": [ 70 | "*" 71 | ], 72 | "Sid": "Enable IAM User Permissions" 73 | } 74 | ], 75 | "Version": "2012-10-17" 76 | } 77 | }, 78 | "Type": "AWS::KMS::Key" 79 | } 80 | } 81 | } -------------------------------------------------------------------------------- /tests/fixtures/blueprints/queues.json: -------------------------------------------------------------------------------- 1 | { 2 | "Outputs": { 3 | "FifoArn": { 4 | "Value": { 5 | "Fn::GetAtt": [ 6 | "Fifo", 7 | "Arn" 8 | ] 9 | } 10 | }, 11 | "FifoUrl": { 12 | "Value": { 13 | "Ref": "Fifo" 14 | } 15 | }, 16 | "RedrivePolicyArn": { 17 | "Value": { 18 | "Fn::GetAtt": [ 19 | "RedrivePolicy", 20 | "Arn" 21 | ] 22 | } 23 | }, 24 | "RedrivePolicyUrl": { 25 | "Value": { 26 | "Ref": "RedrivePolicy" 27 | } 28 | }, 29 | "SimpleArn": { 30 | "Value": { 31 | "Fn::GetAtt": [ 32 | "Simple", 33 | "Arn" 34 | ] 35 | } 36 | }, 37 | "SimpleUrl": { 38 | "Value": { 39 | "Ref": "Simple" 40 | } 41 | } 42 | }, 43 | "Resources": { 44 | "Fifo": { 45 | "Properties": { 46 | "FifoQueue": true, 47 | "QueueName": "Fifo.fifo" 48 | }, 49 | "Type": "AWS::SQS::Queue" 50 | }, 51 | "RedrivePolicy": { 52 | "Properties": { 53 | "RedrivePolicy": { 54 | "deadLetterTargetArn": "arn:aws:sqs:us-east-1:123456789:dlq", 55 | "maxReceiveCount": 3 56 | } 57 | }, 58 | "Type": "AWS::SQS::Queue" 59 | }, 60 | "Simple": { 61 | "Properties": { 62 | "DelaySeconds": 15, 63 | "MaximumMessageSize": 4096, 64 | "ReceiveMessageWaitTimeSeconds": 15, 65 | "VisibilityTimeout": 600 66 | }, 67 | "Type": "AWS::SQS::Queue" 68 | } 69 | } 70 | } -------------------------------------------------------------------------------- /tests/fixtures/blueprints/route53_dnsrecords.json: -------------------------------------------------------------------------------- 1 | { 2 | "Outputs": { 3 | "HostedZoneId": { 4 | "Value": "fake_zone_id" 5 | } 6 | }, 7 | "Resources": { 8 | "154ad64949b7d01dc1d117e306f5ef2c": { 9 | "Properties": { 10 | "HostedZoneId": "fake_zone_id", 11 | "Name": "host2.testdomain.com.", 12 | "ResourceRecords": [ 13 | "10.0.0.2" 14 | ], 15 | "Type": "A" 16 | }, 17 | "Type": "AWS::Route53::RecordSet" 18 | }, 19 | "d8df5bad0c9f04ee2c1f12f25a46a67c": { 20 | "Properties": { 21 | "HostedZoneId": "fake_zone_id", 22 | "Name": "host.testdomain.com.", 23 | "ResourceRecords": [ 24 | "10.0.0.1" 25 | ], 26 | "Type": "A" 27 | }, 28 | "Type": "AWS::Route53::RecordSet" 29 | } 30 | } 31 | } -------------------------------------------------------------------------------- /tests/fixtures/blueprints/route53_dnsrecords_zone_name.json: -------------------------------------------------------------------------------- 1 | { 2 | "Outputs": { 3 | "HostedZoneId": { 4 | "Value": { 5 | "Ref": "HostedZone" 6 | } 7 | }, 8 | "NameServers": { 9 | "Value": { 10 | "Fn::Join": [ 11 | ",", 12 | { 13 | "Fn::GetAtt": [ 14 | "HostedZone", 15 | "NameServers" 16 | ] 17 | } 18 | ] 19 | } 20 | } 21 | }, 22 | "Resources": { 23 | "154ad64949b7d01dc1d117e306f5ef2c": { 24 | "Properties": { 25 | "Comment": "This is host2's record. : )", 26 | "HostedZoneId": { 27 | "Ref": "HostedZone" 28 | }, 29 | "Name": "host2.testdomain.com.", 30 | "ResourceRecords": [ 31 | "10.0.0.2" 32 | ], 33 | "Type": "A" 34 | }, 35 | "Type": "AWS::Route53::RecordSet" 36 | }, 37 | "HostedZone": { 38 | "Properties": { 39 | "HostedZoneConfig": { 40 | "Comment": "test-testdomain-com" 41 | }, 42 | "Name": "testdomain.com" 43 | }, 44 | "Type": "AWS::Route53::HostedZone" 45 | }, 46 | "d8df5bad0c9f04ee2c1f12f25a46a67c": { 47 | "Properties": { 48 | "HostedZoneId": { 49 | "Ref": "HostedZone" 50 | }, 51 | "Name": "host.testdomain.com.", 52 | "ResourceRecords": [ 53 | "10.0.0.1" 54 | ], 55 | "Type": "A" 56 | }, 57 | "Type": "AWS::Route53::RecordSet" 58 | } 59 | } 60 | } -------------------------------------------------------------------------------- /tests/fixtures/blueprints/route53_record_set_groups.json: -------------------------------------------------------------------------------- 1 | { 2 | "Outputs": { 3 | "HostedZoneId": { 4 | "Value": "fake_zone_id" 5 | } 6 | }, 7 | "Resources": { 8 | "Frontend": { 9 | "Properties": { 10 | "HostedZoneId": "fake_zone_id", 11 | "RecordSets": [ 12 | { 13 | "Name": "mysite.example.com", 14 | "ResourceRecords": [ 15 | "example-ec2.amazonaws.com" 16 | ], 17 | "SetIdentifier": "Frontend One", 18 | "Type": "CNAME", 19 | "Weight": "4" 20 | }, 21 | { 22 | "Name": "mysite.example.com", 23 | "ResourceRecords": [ 24 | "example-ec2-larger.amazonaws.com" 25 | ], 26 | "SetIdentifier": "Frontend Two", 27 | "Type": "CNAME", 28 | "Weight": "6" 29 | } 30 | ] 31 | }, 32 | "Type": "AWS::Route53::RecordSetGroup" 33 | } 34 | } 35 | } -------------------------------------------------------------------------------- /tests/fixtures/blueprints/test_asg_flexible_autoscaling_group.json: -------------------------------------------------------------------------------- 1 | { 2 | "Outputs": { 3 | "AutoScalingGroup": { 4 | "Value": { 5 | "Ref": "AutoScalingGroup" 6 | } 7 | }, 8 | "LaunchConfiguration": { 9 | "Value": { 10 | "Ref": "LaunchConfiguration" 11 | } 12 | } 13 | }, 14 | "Resources": { 15 | "AutoScalingGroup": { 16 | "Properties": { 17 | "AvailabilityZones": [ 18 | "us-east-1a", 19 | "us-east-1b" 20 | ], 21 | "LaunchConfigurationName": { 22 | "Ref": "LaunchConfiguration" 23 | }, 24 | "MaxSize": 3, 25 | "MinSize": 1 26 | }, 27 | "Type": "AWS::AutoScaling::AutoScalingGroup" 28 | }, 29 | "LaunchConfiguration": { 30 | "Properties": { 31 | "ImageId": "i-abc1234", 32 | "InstanceType": "m3.medium", 33 | "KeyName": "mock_ssh_key", 34 | "SecurityGroups": [ 35 | "sg-abc1234", 36 | "sg-bcd2345" 37 | ] 38 | }, 39 | "Type": "AWS::AutoScaling::LaunchConfiguration" 40 | } 41 | } 42 | } -------------------------------------------------------------------------------- /tests/fixtures/blueprints/test_aws_lambda_Function.json: -------------------------------------------------------------------------------- 1 | { 2 | "Outputs": { 3 | "FunctionArn": { 4 | "Value": { 5 | "Fn::GetAtt": [ 6 | "Function", 7 | "Arn" 8 | ] 9 | } 10 | }, 11 | "FunctionName": { 12 | "Value": { 13 | "Ref": "Function" 14 | } 15 | }, 16 | "LatestVersion": { 17 | "Value": { 18 | "Fn::GetAtt": [ 19 | "LatestVersion", 20 | "Version" 21 | ] 22 | } 23 | }, 24 | "LatestVersionArn": { 25 | "Value": { 26 | "Ref": "LatestVersion" 27 | } 28 | }, 29 | "PolicyName": { 30 | "Value": { 31 | "Ref": "Policy" 32 | } 33 | }, 34 | "RoleArn": { 35 | "Value": { 36 | "Fn::GetAtt": [ 37 | "Role", 38 | "Arn" 39 | ] 40 | } 41 | }, 42 | "RoleName": { 43 | "Value": { 44 | "Ref": "Role" 45 | } 46 | } 47 | }, 48 | "Resources": { 49 | "Function": { 50 | "Properties": { 51 | "Code": { 52 | "S3Bucket": "test_bucket", 53 | "S3Key": "code_key" 54 | }, 55 | "DeadLetterConfig": { 56 | "TargetArn": "arn:aws:sqs:us-east-1:12345:dlq" 57 | }, 58 | "Description": "Test function.", 59 | "Environment": { 60 | "Variables": { 61 | "Env1": "Value1" 62 | } 63 | }, 64 | "Handler": "handler", 65 | "KmsKeyArn": "arn:aws:kms:us-east-1:12345:key", 66 | "MemorySize": 128, 67 | "Role": { 68 | "Fn::GetAtt": [ 69 | "Role", 70 | "Arn" 71 | ] 72 | }, 73 | "Runtime": "python2.7", 74 | "Timeout": 3, 75 | "VpcConfig": { 76 | "Ref": "AWS::NoValue" 77 | } 78 | }, 79 | "Type": "AWS::Lambda::Function" 80 | }, 81 | "LatestVersion": { 82 | "Properties": { 83 | "FunctionName": { 84 | "Ref": "Function" 85 | } 86 | }, 87 | "Type": "AWS::Lambda::Version" 88 | }, 89 | "Policy": { 90 | "Properties": { 91 | "PolicyDocument": { 92 | "Statement": [ 93 | { 94 | "Action": [ 95 | "logs:CreateLogGroup", 96 | "logs:CreateLogStream", 97 | "logs:PutLogEvents" 98 | ], 99 | "Effect": "Allow", 100 | "Resource": [ 101 | { 102 | "Fn::Join": [ 103 | "", 104 | [ 105 | "arn:aws:logs:", 106 | { 107 | "Ref": "AWS::Region" 108 | }, 109 | ":", 110 | { 111 | "Ref": "AWS::AccountId" 112 | }, 113 | ":log-group:", 114 | { 115 | "Fn::Join": [ 116 | "/", 117 | [ 118 | "/aws/lambda", 119 | { 120 | "Ref": "Function" 121 | } 122 | ] 123 | ] 124 | } 125 | ] 126 | ] 127 | }, 128 | { 129 | "Fn::Join": [ 130 | "", 131 | [ 132 | "arn:aws:logs:", 133 | { 134 | "Ref": "AWS::Region" 135 | }, 136 | ":", 137 | { 138 | "Ref": "AWS::AccountId" 139 | }, 140 | ":log-group:", 141 | { 142 | "Fn::Join": [ 143 | "/", 144 | [ 145 | "/aws/lambda", 146 | { 147 | "Ref": "Function" 148 | } 149 | ] 150 | ] 151 | }, 152 | ":*" 153 | ] 154 | ] 155 | } 156 | ] 157 | } 158 | ] 159 | }, 160 | "PolicyName": { 161 | "Fn::Sub": "${AWS::StackName}-policy" 162 | }, 163 | "Roles": [ 164 | { 165 | "Ref": "Role" 166 | } 167 | ] 168 | }, 169 | "Type": "AWS::IAM::Policy" 170 | }, 171 | "Role": { 172 | "Properties": { 173 | "AssumeRolePolicyDocument": { 174 | "Statement": [ 175 | { 176 | "Action": [ 177 | "sts:AssumeRole" 178 | ], 179 | "Effect": "Allow", 180 | "Principal": { 181 | "Service": [ 182 | "lambda.amazonaws.com" 183 | ] 184 | } 185 | } 186 | ] 187 | } 188 | }, 189 | "Type": "AWS::IAM::Role" 190 | } 191 | } 192 | } -------------------------------------------------------------------------------- /tests/fixtures/blueprints/test_aws_lambda_FunctionScheduler.json: -------------------------------------------------------------------------------- 1 | { 2 | "Resources": { 3 | "MyTestFuncSchedule": { 4 | "Properties": { 5 | "Description": "The AWS Lambda schedule for my-powerful-test-function", 6 | "ScheduleExpression": "rate(15 minutes)", 7 | "State": "ENABLED", 8 | "Targets": [ 9 | { 10 | "Arn": "arn:aws:lambda:us-east-1:01234:function:my-Function-162L1234", 11 | "Id": "my-powerful-test-function" 12 | } 13 | ] 14 | }, 15 | "Type": "AWS::Events::Rule" 16 | }, 17 | "PermToInvokeFunctionForMyPowerfulTestFunction": { 18 | "Properties": { 19 | "Action": "lambda:InvokeFunction", 20 | "FunctionName": "arn:aws:lambda:us-east-1:01234:function:my-Function-162L1234", 21 | "Principal": "events.amazonaws.com", 22 | "SourceArn": { 23 | "Fn::GetAtt": [ 24 | "MyTestFuncSchedule", 25 | "Arn" 26 | ] 27 | } 28 | }, 29 | "Type": "AWS::Lambda::Permission" 30 | } 31 | } 32 | } -------------------------------------------------------------------------------- /tests/fixtures/blueprints/test_aws_lambda_Function_extended_statements.json: -------------------------------------------------------------------------------- 1 | { 2 | "Outputs": { 3 | "FunctionArn": { 4 | "Value": { 5 | "Fn::GetAtt": [ 6 | "Function", 7 | "Arn" 8 | ] 9 | } 10 | }, 11 | "FunctionName": { 12 | "Value": { 13 | "Ref": "Function" 14 | } 15 | }, 16 | "LatestVersion": { 17 | "Value": { 18 | "Fn::GetAtt": [ 19 | "LatestVersion", 20 | "Version" 21 | ] 22 | } 23 | }, 24 | "LatestVersionArn": { 25 | "Value": { 26 | "Ref": "LatestVersion" 27 | } 28 | }, 29 | "PolicyName": { 30 | "Value": { 31 | "Ref": "Policy" 32 | } 33 | }, 34 | "RoleArn": { 35 | "Value": { 36 | "Fn::GetAtt": [ 37 | "Role", 38 | "Arn" 39 | ] 40 | } 41 | }, 42 | "RoleName": { 43 | "Value": { 44 | "Ref": "Role" 45 | } 46 | } 47 | }, 48 | "Resources": { 49 | "Function": { 50 | "Properties": { 51 | "Code": { 52 | "S3Bucket": "test_bucket", 53 | "S3Key": "code_key" 54 | }, 55 | "DeadLetterConfig": { 56 | "TargetArn": "arn:aws:sqs:us-east-1:12345:dlq" 57 | }, 58 | "Description": "Test function.", 59 | "Environment": { 60 | "Variables": { 61 | "Env1": "Value1" 62 | } 63 | }, 64 | "Handler": "handler", 65 | "KmsKeyArn": "arn:aws:kms:us-east-1:12345:key", 66 | "MemorySize": 128, 67 | "Role": { 68 | "Fn::GetAtt": [ 69 | "Role", 70 | "Arn" 71 | ] 72 | }, 73 | "Runtime": "python2.7", 74 | "Timeout": 3, 75 | "VpcConfig": { 76 | "Ref": "AWS::NoValue" 77 | } 78 | }, 79 | "Type": "AWS::Lambda::Function" 80 | }, 81 | "LatestVersion": { 82 | "Properties": { 83 | "FunctionName": { 84 | "Ref": "Function" 85 | } 86 | }, 87 | "Type": "AWS::Lambda::Version" 88 | }, 89 | "Policy": { 90 | "Properties": { 91 | "PolicyDocument": { 92 | "Statement": [ 93 | { 94 | "Action": [ 95 | "logs:CreateLogGroup", 96 | "logs:CreateLogStream", 97 | "logs:PutLogEvents" 98 | ], 99 | "Effect": "Allow", 100 | "Resource": [ 101 | { 102 | "Fn::Join": [ 103 | "", 104 | [ 105 | "arn:aws:logs:", 106 | { 107 | "Ref": "AWS::Region" 108 | }, 109 | ":", 110 | { 111 | "Ref": "AWS::AccountId" 112 | }, 113 | ":log-group:", 114 | { 115 | "Fn::Join": [ 116 | "/", 117 | [ 118 | "/aws/lambda", 119 | { 120 | "Ref": "Function" 121 | } 122 | ] 123 | ] 124 | } 125 | ] 126 | ] 127 | }, 128 | { 129 | "Fn::Join": [ 130 | "", 131 | [ 132 | "arn:aws:logs:", 133 | { 134 | "Ref": "AWS::Region" 135 | }, 136 | ":", 137 | { 138 | "Ref": "AWS::AccountId" 139 | }, 140 | ":log-group:", 141 | { 142 | "Fn::Join": [ 143 | "/", 144 | [ 145 | "/aws/lambda", 146 | { 147 | "Ref": "Function" 148 | } 149 | ] 150 | ] 151 | }, 152 | ":*" 153 | ] 154 | ] 155 | } 156 | ] 157 | }, 158 | { 159 | "Action": [ 160 | "ec2:DescribeInstances" 161 | ], 162 | "Effect": "Allow", 163 | "Resource": [ 164 | "*" 165 | ] 166 | } 167 | ] 168 | }, 169 | "PolicyName": { 170 | "Fn::Sub": "${AWS::StackName}-policy" 171 | }, 172 | "Roles": [ 173 | { 174 | "Ref": "Role" 175 | } 176 | ] 177 | }, 178 | "Type": "AWS::IAM::Policy" 179 | }, 180 | "Role": { 181 | "Properties": { 182 | "AssumeRolePolicyDocument": { 183 | "Statement": [ 184 | { 185 | "Action": [ 186 | "sts:AssumeRole" 187 | ], 188 | "Effect": "Allow", 189 | "Principal": { 190 | "Service": [ 191 | "lambda.amazonaws.com" 192 | ] 193 | } 194 | } 195 | ] 196 | } 197 | }, 198 | "Type": "AWS::IAM::Role" 199 | } 200 | } 201 | } -------------------------------------------------------------------------------- /tests/fixtures/blueprints/test_aws_lambda_Function_external_role.json: -------------------------------------------------------------------------------- 1 | { 2 | "Outputs": { 3 | "FunctionArn": { 4 | "Value": { 5 | "Fn::GetAtt": [ 6 | "Function", 7 | "Arn" 8 | ] 9 | } 10 | }, 11 | "FunctionName": { 12 | "Value": { 13 | "Ref": "Function" 14 | } 15 | }, 16 | "LatestVersion": { 17 | "Value": { 18 | "Fn::GetAtt": [ 19 | "LatestVersion", 20 | "Version" 21 | ] 22 | } 23 | }, 24 | "LatestVersionArn": { 25 | "Value": { 26 | "Ref": "LatestVersion" 27 | } 28 | } 29 | }, 30 | "Resources": { 31 | "Function": { 32 | "Properties": { 33 | "Code": { 34 | "S3Bucket": "test_bucket", 35 | "S3Key": "code_key" 36 | }, 37 | "DeadLetterConfig": { 38 | "TargetArn": "arn:aws:sqs:us-east-1:12345:dlq" 39 | }, 40 | "Description": "Test function.", 41 | "Environment": { 42 | "Variables": { 43 | "Env1": "Value1" 44 | } 45 | }, 46 | "Handler": "handler", 47 | "KmsKeyArn": "arn:aws:kms:us-east-1:12345:key", 48 | "MemorySize": 128, 49 | "Role": "my-fake-role", 50 | "Runtime": "python2.7", 51 | "Timeout": 3, 52 | "VpcConfig": { 53 | "Ref": "AWS::NoValue" 54 | } 55 | }, 56 | "Type": "AWS::Lambda::Function" 57 | }, 58 | "LatestVersion": { 59 | "Properties": { 60 | "FunctionName": { 61 | "Ref": "Function" 62 | } 63 | }, 64 | "Type": "AWS::Lambda::Version" 65 | } 66 | } 67 | } -------------------------------------------------------------------------------- /tests/fixtures/blueprints/test_aws_lambda_Function_with_alias_partial_name.json: -------------------------------------------------------------------------------- 1 | { 2 | "Outputs": { 3 | "AliasArn": { 4 | "Value": { 5 | "Ref": "Alias" 6 | } 7 | }, 8 | "FunctionArn": { 9 | "Value": { 10 | "Fn::GetAtt": [ 11 | "Function", 12 | "Arn" 13 | ] 14 | } 15 | }, 16 | "FunctionName": { 17 | "Value": { 18 | "Ref": "Function" 19 | } 20 | }, 21 | "LatestVersion": { 22 | "Value": { 23 | "Fn::GetAtt": [ 24 | "LatestVersion", 25 | "Version" 26 | ] 27 | } 28 | }, 29 | "LatestVersionArn": { 30 | "Value": { 31 | "Ref": "LatestVersion" 32 | } 33 | }, 34 | "PolicyName": { 35 | "Value": { 36 | "Ref": "Policy" 37 | } 38 | }, 39 | "RoleArn": { 40 | "Value": { 41 | "Fn::GetAtt": [ 42 | "Role", 43 | "Arn" 44 | ] 45 | } 46 | }, 47 | "RoleName": { 48 | "Value": { 49 | "Ref": "Role" 50 | } 51 | } 52 | }, 53 | "Resources": { 54 | "Alias": { 55 | "Properties": { 56 | "FunctionName": { 57 | "Ref": "Function" 58 | }, 59 | "FunctionVersion": "$LATEST", 60 | "Name": "prod" 61 | }, 62 | "Type": "AWS::Lambda::Alias" 63 | }, 64 | "Function": { 65 | "Properties": { 66 | "Code": { 67 | "S3Bucket": "test_bucket", 68 | "S3Key": "code_key" 69 | }, 70 | "DeadLetterConfig": { 71 | "TargetArn": "arn:aws:sqs:us-east-1:12345:dlq" 72 | }, 73 | "Description": "Test function.", 74 | "Environment": { 75 | "Variables": { 76 | "Env1": "Value1" 77 | } 78 | }, 79 | "Handler": "handler", 80 | "KmsKeyArn": "arn:aws:kms:us-east-1:12345:key", 81 | "MemorySize": 128, 82 | "Role": { 83 | "Fn::GetAtt": [ 84 | "Role", 85 | "Arn" 86 | ] 87 | }, 88 | "Runtime": "python2.7", 89 | "Timeout": 3, 90 | "VpcConfig": { 91 | "Ref": "AWS::NoValue" 92 | } 93 | }, 94 | "Type": "AWS::Lambda::Function" 95 | }, 96 | "LatestVersion": { 97 | "Properties": { 98 | "FunctionName": { 99 | "Ref": "Function" 100 | } 101 | }, 102 | "Type": "AWS::Lambda::Version" 103 | }, 104 | "Policy": { 105 | "Properties": { 106 | "PolicyDocument": { 107 | "Statement": [ 108 | { 109 | "Action": [ 110 | "logs:CreateLogGroup", 111 | "logs:CreateLogStream", 112 | "logs:PutLogEvents" 113 | ], 114 | "Effect": "Allow", 115 | "Resource": [ 116 | { 117 | "Fn::Join": [ 118 | "", 119 | [ 120 | "arn:aws:logs:", 121 | { 122 | "Ref": "AWS::Region" 123 | }, 124 | ":", 125 | { 126 | "Ref": "AWS::AccountId" 127 | }, 128 | ":log-group:", 129 | { 130 | "Fn::Join": [ 131 | "/", 132 | [ 133 | "/aws/lambda", 134 | { 135 | "Ref": "Function" 136 | } 137 | ] 138 | ] 139 | } 140 | ] 141 | ] 142 | }, 143 | { 144 | "Fn::Join": [ 145 | "", 146 | [ 147 | "arn:aws:logs:", 148 | { 149 | "Ref": "AWS::Region" 150 | }, 151 | ":", 152 | { 153 | "Ref": "AWS::AccountId" 154 | }, 155 | ":log-group:", 156 | { 157 | "Fn::Join": [ 158 | "/", 159 | [ 160 | "/aws/lambda", 161 | { 162 | "Ref": "Function" 163 | } 164 | ] 165 | ] 166 | }, 167 | ":*" 168 | ] 169 | ] 170 | } 171 | ] 172 | } 173 | ] 174 | }, 175 | "PolicyName": { 176 | "Fn::Sub": "${AWS::StackName}-policy" 177 | }, 178 | "Roles": [ 179 | { 180 | "Ref": "Role" 181 | } 182 | ] 183 | }, 184 | "Type": "AWS::IAM::Policy" 185 | }, 186 | "Role": { 187 | "Properties": { 188 | "AssumeRolePolicyDocument": { 189 | "Statement": [ 190 | { 191 | "Action": [ 192 | "sts:AssumeRole" 193 | ], 194 | "Effect": "Allow", 195 | "Principal": { 196 | "Service": [ 197 | "lambda.amazonaws.com" 198 | ] 199 | } 200 | } 201 | ] 202 | } 203 | }, 204 | "Type": "AWS::IAM::Role" 205 | } 206 | } 207 | } -------------------------------------------------------------------------------- /tests/fixtures/blueprints/test_aws_lambda_Function_with_alias_provided_version.json: -------------------------------------------------------------------------------- 1 | { 2 | "Outputs": { 3 | "AliasArn": { 4 | "Value": { 5 | "Ref": "Alias" 6 | } 7 | }, 8 | "FunctionArn": { 9 | "Value": { 10 | "Fn::GetAtt": [ 11 | "Function", 12 | "Arn" 13 | ] 14 | } 15 | }, 16 | "FunctionName": { 17 | "Value": { 18 | "Ref": "Function" 19 | } 20 | }, 21 | "LatestVersion": { 22 | "Value": { 23 | "Fn::GetAtt": [ 24 | "LatestVersion", 25 | "Version" 26 | ] 27 | } 28 | }, 29 | "LatestVersionArn": { 30 | "Value": { 31 | "Ref": "LatestVersion" 32 | } 33 | }, 34 | "PolicyName": { 35 | "Value": { 36 | "Ref": "Policy" 37 | } 38 | }, 39 | "RoleArn": { 40 | "Value": { 41 | "Fn::GetAtt": [ 42 | "Role", 43 | "Arn" 44 | ] 45 | } 46 | }, 47 | "RoleName": { 48 | "Value": { 49 | "Ref": "Role" 50 | } 51 | } 52 | }, 53 | "Resources": { 54 | "Alias": { 55 | "Properties": { 56 | "FunctionName": { 57 | "Ref": "Function" 58 | }, 59 | "FunctionVersion": "1", 60 | "Name": "prod" 61 | }, 62 | "Type": "AWS::Lambda::Alias" 63 | }, 64 | "Function": { 65 | "Properties": { 66 | "Code": { 67 | "S3Bucket": "test_bucket", 68 | "S3Key": "code_key" 69 | }, 70 | "DeadLetterConfig": { 71 | "TargetArn": "arn:aws:sqs:us-east-1:12345:dlq" 72 | }, 73 | "Description": "Test function.", 74 | "Environment": { 75 | "Variables": { 76 | "Env1": "Value1" 77 | } 78 | }, 79 | "Handler": "handler", 80 | "KmsKeyArn": "arn:aws:kms:us-east-1:12345:key", 81 | "MemorySize": 128, 82 | "Role": { 83 | "Fn::GetAtt": [ 84 | "Role", 85 | "Arn" 86 | ] 87 | }, 88 | "Runtime": "python2.7", 89 | "Timeout": 3, 90 | "VpcConfig": { 91 | "Ref": "AWS::NoValue" 92 | } 93 | }, 94 | "Type": "AWS::Lambda::Function" 95 | }, 96 | "LatestVersion": { 97 | "Properties": { 98 | "FunctionName": { 99 | "Ref": "Function" 100 | } 101 | }, 102 | "Type": "AWS::Lambda::Version" 103 | }, 104 | "Policy": { 105 | "Properties": { 106 | "PolicyDocument": { 107 | "Statement": [ 108 | { 109 | "Action": [ 110 | "logs:CreateLogGroup", 111 | "logs:CreateLogStream", 112 | "logs:PutLogEvents" 113 | ], 114 | "Effect": "Allow", 115 | "Resource": [ 116 | { 117 | "Fn::Join": [ 118 | "", 119 | [ 120 | "arn:aws:logs:", 121 | { 122 | "Ref": "AWS::Region" 123 | }, 124 | ":", 125 | { 126 | "Ref": "AWS::AccountId" 127 | }, 128 | ":log-group:", 129 | { 130 | "Fn::Join": [ 131 | "/", 132 | [ 133 | "/aws/lambda", 134 | { 135 | "Ref": "Function" 136 | } 137 | ] 138 | ] 139 | } 140 | ] 141 | ] 142 | }, 143 | { 144 | "Fn::Join": [ 145 | "", 146 | [ 147 | "arn:aws:logs:", 148 | { 149 | "Ref": "AWS::Region" 150 | }, 151 | ":", 152 | { 153 | "Ref": "AWS::AccountId" 154 | }, 155 | ":log-group:", 156 | { 157 | "Fn::Join": [ 158 | "/", 159 | [ 160 | "/aws/lambda", 161 | { 162 | "Ref": "Function" 163 | } 164 | ] 165 | ] 166 | }, 167 | ":*" 168 | ] 169 | ] 170 | } 171 | ] 172 | } 173 | ] 174 | }, 175 | "PolicyName": { 176 | "Fn::Sub": "${AWS::StackName}-policy" 177 | }, 178 | "Roles": [ 179 | { 180 | "Ref": "Role" 181 | } 182 | ] 183 | }, 184 | "Type": "AWS::IAM::Policy" 185 | }, 186 | "Role": { 187 | "Properties": { 188 | "AssumeRolePolicyDocument": { 189 | "Statement": [ 190 | { 191 | "Action": [ 192 | "sts:AssumeRole" 193 | ], 194 | "Effect": "Allow", 195 | "Principal": { 196 | "Service": [ 197 | "lambda.amazonaws.com" 198 | ] 199 | } 200 | } 201 | ] 202 | } 203 | }, 204 | "Type": "AWS::IAM::Role" 205 | } 206 | } 207 | } -------------------------------------------------------------------------------- /tests/fixtures/blueprints/test_cloudwatch_logs_subscription_filters.json: -------------------------------------------------------------------------------- 1 | { 2 | "Outputs": { 3 | "Filter1Name": { 4 | "Value": { 5 | "Ref": "Filter1" 6 | } 7 | }, 8 | "Filter2Name": { 9 | "Value": { 10 | "Ref": "Filter2" 11 | } 12 | } 13 | }, 14 | "Resources": { 15 | "Filter1": { 16 | "Properties": { 17 | "DestinationArn": { 18 | "Fn::GetAtt": [ 19 | "KinesisStream1", 20 | "Arn" 21 | ] 22 | }, 23 | "FilterPattern": "{$.userIdentity.type = Root}", 24 | "LogGroupName": { 25 | "Ref": "LogGroup1" 26 | } 27 | }, 28 | "Type": "AWS::Logs::SubscriptionFilter" 29 | }, 30 | "Filter2": { 31 | "Properties": { 32 | "DestinationArn": { 33 | "Fn::GetAtt": [ 34 | "KinesisStream2", 35 | "Arn" 36 | ] 37 | }, 38 | "FilterPattern": "{$.userIdentity.type = Root}", 39 | "LogGroupName": { 40 | "Ref": "LogGroup2" 41 | } 42 | }, 43 | "Type": "AWS::Logs::SubscriptionFilter" 44 | } 45 | } 46 | } -------------------------------------------------------------------------------- /tests/fixtures/blueprints/test_efs_ElasticFileSystem.json: -------------------------------------------------------------------------------- 1 | { 2 | "Outputs": { 3 | "EfsFileSystemId": { 4 | "Value": { 5 | "Ref": "EfsFileSystem" 6 | } 7 | }, 8 | "EfsMountTargetIds": { 9 | "Value": { 10 | "Fn::Join": [ 11 | ",", 12 | [ 13 | { 14 | "Ref": "EfsMountTarget1" 15 | }, 16 | { 17 | "Ref": "EfsMountTarget2" 18 | } 19 | ] 20 | ] 21 | } 22 | }, 23 | "EfsNewSecurityGroupIds": { 24 | "Value": { 25 | "Fn::Join": [ 26 | ",", 27 | [ 28 | { 29 | "Ref": "EfsSg1" 30 | }, 31 | { 32 | "Ref": "EfsSg2" 33 | } 34 | ] 35 | ] 36 | } 37 | } 38 | }, 39 | "Resources": { 40 | "EfsFileSystem": { 41 | "Properties": { 42 | "FileSystemTags": [ 43 | { 44 | "Key": "Hello", 45 | "Value": "World" 46 | } 47 | ], 48 | "PerformanceMode": "generalPurpose" 49 | }, 50 | "Type": "AWS::EFS::FileSystem" 51 | }, 52 | "EfsMountTarget1": { 53 | "Properties": { 54 | "FileSystemId": { 55 | "Ref": "EfsFileSystem" 56 | }, 57 | "IpAddress": "172.16.1.10", 58 | "SecurityGroups": [ 59 | { 60 | "Ref": "EfsSg1" 61 | }, 62 | { 63 | "Ref": "EfsSg2" 64 | }, 65 | "sg-22222222", 66 | "sg-33333333" 67 | ], 68 | "SubnetId": "subnet-11111111" 69 | }, 70 | "Type": "AWS::EFS::MountTarget" 71 | }, 72 | "EfsMountTarget2": { 73 | "Properties": { 74 | "FileSystemId": { 75 | "Ref": "EfsFileSystem" 76 | }, 77 | "IpAddress": "172.16.2.10", 78 | "SecurityGroups": [ 79 | { 80 | "Ref": "EfsSg1" 81 | }, 82 | { 83 | "Ref": "EfsSg2" 84 | }, 85 | "sg-22222222", 86 | "sg-33333333" 87 | ], 88 | "SubnetId": "subnet-22222222" 89 | }, 90 | "Type": "AWS::EFS::MountTarget" 91 | }, 92 | "EfsSg1": { 93 | "Properties": { 94 | "GroupDescription": "EFS SG 1", 95 | "SecurityGroupIngress": [ 96 | { 97 | "CidrIp": "172.16.0.0/12", 98 | "FromPort": 2049, 99 | "IpProtocol": "tcp", 100 | "ToPort": 2049 101 | } 102 | ], 103 | "Tags": [ 104 | { 105 | "Key": "Foo", 106 | "Value": "Bar" 107 | }, 108 | { 109 | "Key": "Hello", 110 | "Value": "World" 111 | } 112 | ], 113 | "VpcId": "vpc-11111111" 114 | }, 115 | "Type": "AWS::EC2::SecurityGroup" 116 | }, 117 | "EfsSg2": { 118 | "Properties": { 119 | "GroupDescription": "EFS SG 2", 120 | "SecurityGroupIngress": [ 121 | { 122 | "FromPort": 2049, 123 | "IpProtocol": "tcp", 124 | "SourceSecurityGroupId": "sg-11111111", 125 | "ToPort": 2049 126 | } 127 | ], 128 | "Tags": [ 129 | { 130 | "Key": "Hello", 131 | "Value": "World" 132 | } 133 | ], 134 | "VpcId": "vpc-11111111" 135 | }, 136 | "Type": "AWS::EC2::SecurityGroup" 137 | } 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /tests/fixtures/blueprints/test_generic_GenericResourceCreator.json: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Description": "Generic Resource Creator - 1.0.0", 4 | "Outputs": { 5 | "VolumeId": { 6 | "Description": "A reference to the object created in this blueprint", 7 | "Value": { 8 | "Ref": "ResourceRefName" 9 | } 10 | } 11 | }, 12 | "Resources": { 13 | "ResourceRefName": { 14 | "Properties": { 15 | "AvailabilityZone": "us-east-1b", 16 | "Encrypted": "true", 17 | "Size": "600", 18 | "VolumeType": "gp2" 19 | }, 20 | "Type": "AWS::EC2::Volume" 21 | } 22 | } 23 | } -------------------------------------------------------------------------------- /tests/fixtures/blueprints/test_vpc2_with_internal_zone.json: -------------------------------------------------------------------------------- 1 | { 2 | "Outputs": { 3 | "CidrBlock": { 4 | "Value": { 5 | "Fn::GetAtt": [ 6 | "MyVPC", 7 | "CidrBlock" 8 | ] 9 | } 10 | }, 11 | "CidrBlockAssociations": { 12 | "Value": { 13 | "Fn::GetAtt": [ 14 | "MyVPC", 15 | "CidrBlockAssociations" 16 | ] 17 | } 18 | }, 19 | "DHCPOptionsId": { 20 | "Value": { 21 | "Ref": "DHCPOptions" 22 | } 23 | }, 24 | "DefaultNetworkAcl": { 25 | "Value": { 26 | "Fn::GetAtt": [ 27 | "MyVPC", 28 | "DefaultNetworkAcl" 29 | ] 30 | } 31 | }, 32 | "DefaultSecurityGroup": { 33 | "Value": { 34 | "Fn::GetAtt": [ 35 | "MyVPC", 36 | "DefaultSecurityGroup" 37 | ] 38 | } 39 | }, 40 | "InternalZoneId": { 41 | "Value": { 42 | "Ref": "MyInternalZone" 43 | } 44 | }, 45 | "InternalZoneName": { 46 | "Value": "internal." 47 | }, 48 | "InternetGatewayId": { 49 | "Value": { 50 | "Ref": "InternetGateway" 51 | } 52 | }, 53 | "Ipv6CidrBlocks": { 54 | "Value": { 55 | "Fn::GetAtt": [ 56 | "MyVPC", 57 | "Ipv6CidrBlocks" 58 | ] 59 | } 60 | }, 61 | "VPCDHCPOptionsAssociation": { 62 | "Value": { 63 | "Ref": "VPCDHCPOptionsAssociation" 64 | } 65 | }, 66 | "VPCGatewayAttachmentId": { 67 | "Value": { 68 | "Ref": "VPCGatewayAttachment" 69 | } 70 | }, 71 | "VpcId": { 72 | "Value": { 73 | "Ref": "MyVPC" 74 | } 75 | } 76 | }, 77 | "Resources": { 78 | "DHCPOptions": { 79 | "Properties": { 80 | "DomainName": "internal.", 81 | "DomainNameServers": [ 82 | "AmazonProvidedDNS" 83 | ] 84 | }, 85 | "Type": "AWS::EC2::DHCPOptions" 86 | }, 87 | "InternetGateway": { 88 | "Type": "AWS::EC2::InternetGateway" 89 | }, 90 | "MyInternalZone": { 91 | "Properties": { 92 | "Name": "internal.", 93 | "VPCs": [ 94 | { 95 | "VPCId": { 96 | "Ref": "MyVPC" 97 | }, 98 | "VPCRegion": { 99 | "Ref": "AWS::Region" 100 | } 101 | } 102 | ] 103 | }, 104 | "Type": "AWS::Route53::HostedZone" 105 | }, 106 | "MyVPC": { 107 | "Properties": { 108 | "CidrBlock": "10.0.0.0/16" 109 | }, 110 | "Type": "AWS::EC2::VPC" 111 | }, 112 | "VPCDHCPOptionsAssociation": { 113 | "Properties": { 114 | "DhcpOptionsId": { 115 | "Ref": "DHCPOptions" 116 | }, 117 | "VpcId": { 118 | "Ref": "MyVPC" 119 | } 120 | }, 121 | "Type": "AWS::EC2::VPCDHCPOptionsAssociation" 122 | }, 123 | "VPCGatewayAttachment": { 124 | "Properties": { 125 | "InternetGatewayId": { 126 | "Ref": "InternetGateway" 127 | }, 128 | "VpcId": { 129 | "Ref": "MyVPC" 130 | } 131 | }, 132 | "Type": "AWS::EC2::VPCGatewayAttachment" 133 | } 134 | } 135 | } -------------------------------------------------------------------------------- /tests/fixtures/blueprints/test_vpc2_without_internal_zone.json: -------------------------------------------------------------------------------- 1 | { 2 | "Outputs": { 3 | "CidrBlock": { 4 | "Value": { 5 | "Fn::GetAtt": [ 6 | "MyVPC", 7 | "CidrBlock" 8 | ] 9 | } 10 | }, 11 | "CidrBlockAssociations": { 12 | "Value": { 13 | "Fn::GetAtt": [ 14 | "MyVPC", 15 | "CidrBlockAssociations" 16 | ] 17 | } 18 | }, 19 | "DHCPOptionsId": { 20 | "Value": { 21 | "Ref": "DHCPOptions" 22 | } 23 | }, 24 | "DefaultNetworkAcl": { 25 | "Value": { 26 | "Fn::GetAtt": [ 27 | "MyVPC", 28 | "DefaultNetworkAcl" 29 | ] 30 | } 31 | }, 32 | "DefaultSecurityGroup": { 33 | "Value": { 34 | "Fn::GetAtt": [ 35 | "MyVPC", 36 | "DefaultSecurityGroup" 37 | ] 38 | } 39 | }, 40 | "InternetGatewayId": { 41 | "Value": { 42 | "Ref": "InternetGateway" 43 | } 44 | }, 45 | "Ipv6CidrBlocks": { 46 | "Value": { 47 | "Fn::GetAtt": [ 48 | "MyVPC", 49 | "Ipv6CidrBlocks" 50 | ] 51 | } 52 | }, 53 | "VPCDHCPOptionsAssociation": { 54 | "Value": { 55 | "Ref": "VPCDHCPOptionsAssociation" 56 | } 57 | }, 58 | "VPCGatewayAttachmentId": { 59 | "Value": { 60 | "Ref": "VPCGatewayAttachment" 61 | } 62 | }, 63 | "VpcId": { 64 | "Value": { 65 | "Ref": "MyVPC" 66 | } 67 | } 68 | }, 69 | "Resources": { 70 | "DHCPOptions": { 71 | "Properties": { 72 | "DomainName": { 73 | "Ref": "AWS::NoValue" 74 | }, 75 | "DomainNameServers": [ 76 | "AmazonProvidedDNS" 77 | ] 78 | }, 79 | "Type": "AWS::EC2::DHCPOptions" 80 | }, 81 | "InternetGateway": { 82 | "Type": "AWS::EC2::InternetGateway" 83 | }, 84 | "MyVPC": { 85 | "Properties": { 86 | "CidrBlock": "10.0.0.0/16" 87 | }, 88 | "Type": "AWS::EC2::VPC" 89 | }, 90 | "VPCDHCPOptionsAssociation": { 91 | "Properties": { 92 | "DhcpOptionsId": { 93 | "Ref": "DHCPOptions" 94 | }, 95 | "VpcId": { 96 | "Ref": "MyVPC" 97 | } 98 | }, 99 | "Type": "AWS::EC2::VPCDHCPOptionsAssociation" 100 | }, 101 | "VPCGatewayAttachment": { 102 | "Properties": { 103 | "InternetGatewayId": { 104 | "Ref": "InternetGateway" 105 | }, 106 | "VpcId": { 107 | "Ref": "MyVPC" 108 | } 109 | }, 110 | "Type": "AWS::EC2::VPCGatewayAttachment" 111 | } 112 | } 113 | } -------------------------------------------------------------------------------- /tests/fixtures/blueprints/topics.json: -------------------------------------------------------------------------------- 1 | { 2 | "Outputs": { 3 | "ExampleArn": { 4 | "Value": { 5 | "Ref": "Example" 6 | } 7 | }, 8 | "ExampleName": { 9 | "Value": { 10 | "Fn::GetAtt": [ 11 | "Example", 12 | "TopicName" 13 | ] 14 | } 15 | }, 16 | "WithoutSubscriptionArn": { 17 | "Value": { 18 | "Ref": "WithoutSubscription" 19 | } 20 | }, 21 | "WithoutSubscriptionName": { 22 | "Value": { 23 | "Fn::GetAtt": [ 24 | "WithoutSubscription", 25 | "TopicName" 26 | ] 27 | } 28 | } 29 | }, 30 | "Resources": { 31 | "Example": { 32 | "Properties": { 33 | "DisplayName": "ExampleTopic", 34 | "Subscription": [ 35 | { 36 | "Endpoint": "arn:aws:sqs:us-east-1:123456788901:example-queue", 37 | "Protocol": "sqs" 38 | }, 39 | { 40 | "Endpoint": "postmaster@example.com", 41 | "Protocol": "email" 42 | } 43 | ] 44 | }, 45 | "Type": "AWS::SNS::Topic" 46 | }, 47 | "ExampleSubPolicy": { 48 | "Properties": { 49 | "PolicyDocument": { 50 | "Statement": [ 51 | { 52 | "Action": [ 53 | "sqs:SendMessage" 54 | ], 55 | "Condition": { 56 | "ArnEquals": { 57 | "aws:SourceArn": { 58 | "Ref": "Example" 59 | } 60 | } 61 | }, 62 | "Effect": "Allow", 63 | "Principal": "*", 64 | "Resource": [ 65 | "arn:aws:sqs:us-east-1:123456788901:example-queue" 66 | ] 67 | } 68 | ] 69 | }, 70 | "Queues": [ 71 | "https://sqs.us-east-1.amazonaws.com/123456788901/example-queue" 72 | ] 73 | }, 74 | "Type": "AWS::SQS::QueuePolicy" 75 | }, 76 | "WithoutSubscription": { 77 | "Properties": { 78 | "DisplayName": "SampleTopicWithoutSub" 79 | }, 80 | "Type": "AWS::SNS::Topic" 81 | } 82 | } 83 | } -------------------------------------------------------------------------------- /tests/test_asg.py: -------------------------------------------------------------------------------- 1 | from stacker.context import Context 2 | from stacker.config import Config 3 | from stacker.variables import Variable 4 | 5 | from stacker_blueprints.asg import FlexibleAutoScalingGroup 6 | from stacker.blueprints.testutil import BlueprintTestCase 7 | 8 | 9 | class TestBlueprint(BlueprintTestCase): 10 | def setUp(self): 11 | self.launch_config = { 12 | "ImageId": "i-abc1234", 13 | "InstanceType": "m3.medium", 14 | "KeyName": "mock_ssh_key", 15 | "SecurityGroups": ["sg-abc1234", "sg-bcd2345"], 16 | } 17 | self.asg_config = { 18 | "MinSize": 1, 19 | "MaxSize": 3, 20 | } 21 | 22 | self.common_variables = { 23 | "LaunchConfiguration": { 24 | "LaunchConfiguration": self.launch_config, 25 | }, 26 | "AutoScalingGroup": { 27 | "AutoScalingGroup": self.asg_config 28 | }, 29 | } 30 | self.ctx = Context(config=Config({"namespace": "test"})) 31 | 32 | def create_blueprint(self, name): 33 | return FlexibleAutoScalingGroup(name, self.ctx) 34 | 35 | def generate_variables(self): 36 | return [Variable(k, v) for k, v in self.common_variables.items()] 37 | 38 | def test_create_template_provided_launch_config_name(self): 39 | blueprint = self.create_blueprint( 40 | "test_asg_flexible_autoscaling_group_provided_launch_config" 41 | ) 42 | 43 | self.asg_config["LaunchConfigurationName"] = "launch_config" 44 | 45 | blueprint.resolve_variables(self.generate_variables()) 46 | with self.assertRaises(ValueError): 47 | blueprint.create_template() 48 | 49 | def test_create_template(self): 50 | blueprint = self.create_blueprint( 51 | "test_asg_flexible_autoscaling_group" 52 | ) 53 | 54 | self.asg_config["AvailabilityZones"] = ["us-east-1a", "us-east-1b"] 55 | 56 | blueprint.resolve_variables(self.generate_variables()) 57 | blueprint.create_template() 58 | self.assertRenderedBlueprint(blueprint) 59 | -------------------------------------------------------------------------------- /tests/test_aws_lambda.py: -------------------------------------------------------------------------------- 1 | from types import MethodType 2 | 3 | from stacker.context import Context 4 | from stacker.config import Config 5 | from stacker.variables import Variable 6 | from stacker_blueprints.aws_lambda import Function, FunctionScheduler 7 | from stacker.blueprints.testutil import BlueprintTestCase 8 | 9 | from troposphere.awslambda import Code 10 | 11 | from awacs.aws import Statement, Allow 12 | import awacs.ec2 13 | 14 | 15 | class TestBlueprint(BlueprintTestCase): 16 | def setUp(self): 17 | self.code = Code(S3Bucket="test_bucket", S3Key="code_key") 18 | self.common_variables = { 19 | "Code": self.code, 20 | "DeadLetterArn": "arn:aws:sqs:us-east-1:12345:dlq", 21 | "Description": "Test function.", 22 | "Environment": {"Env1": "Value1"}, 23 | "Handler": "handler", 24 | "KmsKeyArn": "arn:aws:kms:us-east-1:12345:key", 25 | "MemorySize": 128, 26 | "Runtime": "python2.7", 27 | "Timeout": 3, 28 | } 29 | self.ctx = Context(config=Config({'namespace': 'test'})) 30 | 31 | def create_blueprint(self, name): 32 | return Function(name, self.ctx) 33 | 34 | def generate_variables(self): 35 | return [Variable(k, v) for k, v in self.common_variables.items()] 36 | 37 | def test_create_template_base(self): 38 | blueprint = self.create_blueprint('test_aws_lambda_Function') 39 | 40 | blueprint.resolve_variables(self.generate_variables()) 41 | blueprint.create_template() 42 | self.assertRenderedBlueprint(blueprint) 43 | 44 | def test_create_template_with_external_role(self): 45 | blueprint = self.create_blueprint( 46 | 'test_aws_lambda_Function_external_role' 47 | ) 48 | self.common_variables["Role"] = "my-fake-role" 49 | 50 | blueprint.resolve_variables(self.generate_variables()) 51 | blueprint.create_template() 52 | self.assertRenderedBlueprint(blueprint) 53 | 54 | def test_create_template_vpc_config(self): 55 | blueprint = self.create_blueprint( 56 | 'test_aws_lambda_Function_with_vpc_config' 57 | ) 58 | self.common_variables["VpcConfig"] = { 59 | "SecurityGroupIds": ["sg-1", "sg-2", "sg-3"], 60 | "SubnetIds": ["subnet-1", "subnet-2", "subnet-3"], 61 | } 62 | 63 | blueprint.resolve_variables(self.generate_variables()) 64 | blueprint.create_template() 65 | self.assertRenderedBlueprint(blueprint) 66 | 67 | def test_create_template_with_alias_full_name_arn(self): 68 | blueprint = self.create_blueprint( 69 | 'test_aws_lambda_Function_with_alias_full_name_arn' 70 | ) 71 | self.common_variables["AliasName"] = ("arn:aws:lambda:aws-region:" 72 | "acct-id:function:helloworld:" 73 | "PROD") 74 | 75 | blueprint.resolve_variables(self.generate_variables()) 76 | blueprint.create_template() 77 | self.assertRenderedBlueprint(blueprint) 78 | 79 | def test_create_template_with_alias_partial_name(self): 80 | blueprint = self.create_blueprint( 81 | 'test_aws_lambda_Function_with_alias_partial_name' 82 | ) 83 | self.common_variables["AliasName"] = "prod" 84 | 85 | blueprint.resolve_variables(self.generate_variables()) 86 | blueprint.create_template() 87 | self.assertRenderedBlueprint(blueprint) 88 | 89 | def test_create_template_with_alias_provided_version(self): 90 | blueprint = self.create_blueprint( 91 | 'test_aws_lambda_Function_with_alias_provided_version' 92 | ) 93 | 94 | self.common_variables["AliasName"] = "prod" 95 | self.common_variables["AliasVersion"] = "1" 96 | 97 | blueprint.resolve_variables(self.generate_variables()) 98 | blueprint.create_template() 99 | self.assertRenderedBlueprint(blueprint) 100 | 101 | def test_create_template_event_source_mapping(self): 102 | blueprint = self.create_blueprint( 103 | 'test_aws_lambda_Function_event_source_mapping' 104 | ) 105 | self.common_variables["EventSourceMapping"] = { 106 | "EventSourceArn": "arn:aws:dynamodb:us-east-1:12345:table/" 107 | "FakeTable/stream/FakeStream", 108 | "StartingPosition": "0", 109 | } 110 | 111 | blueprint.resolve_variables(self.generate_variables()) 112 | blueprint.create_template() 113 | self.assertRenderedBlueprint(blueprint) 114 | 115 | def test_create_template_extended_statements(self): 116 | blueprint = self.create_blueprint( 117 | 'test_aws_lambda_Function_extended_statements' 118 | ) 119 | 120 | def extended_statements(self): 121 | return [ 122 | Statement( 123 | Effect=Allow, 124 | Resource=["*"], 125 | Action=[awacs.ec2.DescribeInstances], 126 | ) 127 | ] 128 | 129 | # Patch the extended_policy_statements method 130 | blueprint.extended_policy_statements = MethodType( 131 | extended_statements, 132 | blueprint 133 | ) 134 | 135 | blueprint.resolve_variables(self.generate_variables()) 136 | blueprint.create_template() 137 | self.assertRenderedBlueprint(blueprint) 138 | 139 | 140 | class TestFunctionScheduler(BlueprintTestCase): 141 | def setUp(self): 142 | self.ctx = Context({'namespace': 'test'}) 143 | 144 | def test_create_template(self): 145 | blueprint = FunctionScheduler('test_aws_lambda_FunctionScheduler', 146 | self.ctx) 147 | blueprint.resolve_variables( 148 | [ 149 | Variable( 150 | "CloudwatchEventsRule", 151 | { 152 | "MyTestFuncSchedule": { 153 | "Description": "The AWS Lambda schedule for " 154 | "my-powerful-test-function", 155 | "ScheduleExpression": "rate(15 minutes)", 156 | "State": "ENABLED", 157 | "Targets": [ 158 | { 159 | "Id": "my-powerful-test-function", 160 | "Arn": "arn:aws:lambda:us-east-1:01234:" 161 | "function:my-Function-162L1234" 162 | }, 163 | ], 164 | } 165 | } 166 | ) 167 | ] 168 | ) 169 | blueprint.create_template() 170 | self.assertRenderedBlueprint(blueprint) 171 | -------------------------------------------------------------------------------- /tests/test_cloudwatch_logs.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from stacker.blueprints.testutil import BlueprintTestCase 4 | from stacker.context import Context 5 | from stacker.config import Config 6 | from stacker.variables import Variable 7 | 8 | from stacker_blueprints.cloudwatch_logs import SubscriptionFilters 9 | 10 | from troposphere import GetAtt, Ref 11 | 12 | 13 | class TestSubscriptionFilters(BlueprintTestCase): 14 | def setUp(self): 15 | self.ctx = Context(config=Config({'namespace': 'test'})) 16 | 17 | def test_create_template(self): 18 | blueprint = SubscriptionFilters( 19 | 'test_cloudwatch_logs_subscription_filters', 20 | self.ctx 21 | ) 22 | 23 | blueprint.resolve_variables( 24 | [ 25 | Variable( 26 | "SubscriptionFilters", 27 | { 28 | "Filter1": { 29 | "DestinationArn": GetAtt("KinesisStream1", "Arn"), 30 | "FilterPattern": "{$.userIdentity.type = Root}", 31 | "LogGroupName": Ref("LogGroup1"), 32 | }, 33 | "Filter2": { 34 | "DestinationArn": GetAtt("KinesisStream2", "Arn"), 35 | "FilterPattern": "{$.userIdentity.type = Root}", 36 | "LogGroupName": Ref("LogGroup2"), 37 | }, 38 | } 39 | ) 40 | ] 41 | ) 42 | blueprint.create_template() 43 | self.assertRenderedBlueprint(blueprint) 44 | 45 | 46 | if __name__ == '__main__': 47 | unittest.main() 48 | -------------------------------------------------------------------------------- /tests/test_dynamodb.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from stacker.context import Context 3 | from stacker.variables import Variable 4 | from stacker.blueprints.testutil import BlueprintTestCase 5 | 6 | import stacker_blueprints.dynamodb 7 | 8 | class TestDynamoDB(BlueprintTestCase): 9 | def setUp(self): 10 | self.dynamodb_variables = [ 11 | Variable( 12 | 'Tables', 13 | { 14 | "UserTable": { 15 | "TableName": "test-user-table", 16 | "KeySchema": [ 17 | { 18 | "AttributeName": "id", 19 | "KeyType": "HASH", 20 | }, 21 | { 22 | "AttributeName": "name", 23 | "KeyType": "RANGE", 24 | }, 25 | ], 26 | "AttributeDefinitions": [ 27 | { 28 | "AttributeName": "id", 29 | "AttributeType": "S", 30 | }, 31 | { 32 | "AttributeName": "name", 33 | "AttributeType": "S", 34 | }, 35 | ], 36 | "ProvisionedThroughput": { 37 | "ReadCapacityUnits": 5, 38 | "WriteCapacityUnits": 5, 39 | }, 40 | "StreamSpecification": { 41 | "StreamViewType": "ALL", 42 | } 43 | } 44 | } 45 | ) 46 | ] 47 | self.dynamodb_autoscaling_variables = [ 48 | Variable( 49 | "AutoScalingConfigs", 50 | [ 51 | { 52 | "table": "test-user-table", 53 | "read": {"min": 5, "max": 100, "target": 75.0}, 54 | "write": {"min": 5, "max": 50, "target": 80.0}, 55 | }, 56 | { 57 | "table": "test-group-table", 58 | "read": {"min": 10, "max": 50, "scale-in-cooldown": 180, "scale-out-cooldown": 180}, 59 | "write": {"max": 25}, 60 | }, 61 | ] 62 | ) 63 | ] 64 | 65 | def test_dynamodb_table(self): 66 | ctx = Context({'namespace': 'test', 'environment': 'test'}) 67 | blueprint = stacker_blueprints.dynamodb.DynamoDB('dynamodb_table', ctx) 68 | blueprint.resolve_variables(self.dynamodb_variables) 69 | blueprint.create_template() 70 | self.assertRenderedBlueprint(blueprint) 71 | 72 | def test_dynamodb_autoscaling(self): 73 | ctx = Context({'namespace': 'test', 'environment': 'test'}) 74 | blueprint = stacker_blueprints.dynamodb.AutoScaling('dynamodb_autoscaling', ctx) 75 | blueprint.resolve_variables(self.dynamodb_autoscaling_variables) 76 | blueprint.create_template() 77 | self.assertRenderedBlueprint(blueprint) 78 | -------------------------------------------------------------------------------- /tests/test_ec2.py: -------------------------------------------------------------------------------- 1 | from stacker.context import Context 2 | from stacker.variables import Variable 3 | from stacker_blueprints.ec2 import Instances 4 | from stacker.blueprints.testutil import BlueprintTestCase 5 | 6 | 7 | class TestBlueprint(BlueprintTestCase): 8 | def setUp(self): 9 | self.common_variables = [ 10 | Variable( 11 | "Instances", { 12 | "MyInstance": { 13 | "ImageId": "ami-abc12345", 14 | } 15 | } 16 | ) 17 | ] 18 | 19 | self.ctx = Context({'namespace': 'test', 'environment': 'test'}) 20 | 21 | def test_ec2_instances(self): 22 | blueprint = Instances("ec2_instances", self.ctx) 23 | blueprint.resolve_variables(self.common_variables) 24 | blueprint.create_template() 25 | self.assertRenderedBlueprint(blueprint) 26 | -------------------------------------------------------------------------------- /tests/test_efs.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from stacker.blueprints.testutil import BlueprintTestCase 4 | from stacker.context import Context 5 | from stacker.exceptions import ValidatorError 6 | from stacker.variables import Variable 7 | 8 | from stacker_blueprints.efs import ElasticFileSystem 9 | 10 | 11 | EFS_VARIABLES = { 12 | 'VpcId': 'vpc-11111111', 13 | 'PerformanceMode': 'generalPurpose', 14 | 'Tags': { 15 | 'Hello': 'World' 16 | }, 17 | 'Subnets': ['subnet-11111111', 'subnet-22222222'], 18 | 'IpAddresses': ['172.16.1.10', '172.16.2.10'], 19 | 'SecurityGroups': { 20 | 'EfsSg1': { 21 | 'GroupDescription': 'EFS SG 1', 22 | 'SecurityGroupIngress': [ 23 | {'IpProtocol': 'tcp', 'FromPort': 2049, 'ToPort': 2049, 24 | 'CidrIp': '172.16.0.0/12'} 25 | ], 26 | 'Tags': [{'Key': 'Foo', 'Value': 'Bar'}] 27 | }, 28 | 'EfsSg2': { 29 | 'GroupDescription': 'EFS SG 2', 30 | 'SecurityGroupIngress': [ 31 | {'IpProtocol': 'tcp', 'FromPort': 2049, 'ToPort': 2049, 32 | 'SourceSecurityGroupId': 'sg-11111111'} 33 | ] 34 | } 35 | }, 36 | 'ExtraSecurityGroups': ['sg-22222222', 'sg-33333333'] 37 | } 38 | 39 | 40 | class TestElasticFileSystem(BlueprintTestCase): 41 | def setUp(self): 42 | self.ctx = Context({'namespace': 'test'}) 43 | 44 | def test_create_template(self): 45 | blueprint = ElasticFileSystem('test_efs_ElasticFileSystem', self.ctx) 46 | variables = EFS_VARIABLES 47 | blueprint.resolve_variables( 48 | [Variable(k, v) for k, v in variables.items()]) 49 | blueprint.create_template() 50 | self.assertRenderedBlueprint(blueprint) 51 | 52 | def test_validate_security_group_count_empty(self): 53 | blueprint = ElasticFileSystem('test_efs_ElasticFileSystem', self.ctx) 54 | variables = EFS_VARIABLES.copy() 55 | variables['SecurityGroups'] = {} 56 | variables['ExtraSecurityGroups'] = [] 57 | 58 | with self.assertRaises(ValidatorError): 59 | blueprint.resolve_variables( 60 | [Variable(k, v) for k, v in variables.items()]) 61 | 62 | def test_validate_security_group_count_exceeded(self): 63 | blueprint = ElasticFileSystem('test_efs_ElasticFileSystem', self.ctx) 64 | variables = EFS_VARIABLES.copy() 65 | variables['ExtraSecurityGroups'] = ['sg-22222222'] * 4 66 | 67 | with self.assertRaises(ValidatorError): 68 | blueprint.resolve_variables( 69 | [Variable(k, v) for k, v in variables.items()]) 70 | 71 | def test_validate_subnets_empty(self): 72 | blueprint = ElasticFileSystem('test_efs_ElasticFileSystem', self.ctx) 73 | variables = EFS_VARIABLES.copy() 74 | variables['Subnets'] = [] 75 | 76 | with self.assertRaises(ValidatorError): 77 | blueprint.resolve_variables( 78 | [Variable(k, v) for k, v in variables.items()]) 79 | 80 | def test_validate_subnets_ip_addresses_unmatching(self): 81 | blueprint = ElasticFileSystem('test_efs_ElasticFileSystem', self.ctx) 82 | variables = EFS_VARIABLES.copy() 83 | variables['IpAddresses'] = ['172.16.1.10'] 84 | 85 | with self.assertRaises(ValidatorError): 86 | blueprint.resolve_variables( 87 | [Variable(k, v) for k, v in variables.items()]) 88 | 89 | 90 | if __name__ == '__main__': 91 | unittest.main() 92 | -------------------------------------------------------------------------------- /tests/test_generic.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from stacker.blueprints.testutil import BlueprintTestCase 4 | from stacker.context import Context 5 | from stacker.config import Config 6 | from stacker.variables import Variable 7 | 8 | from stacker_blueprints.generic import GenericResourceCreator 9 | 10 | 11 | class TestGenericResourceCreator(BlueprintTestCase): 12 | def setUp(self): 13 | self.ctx = Context(config=Config({'namespace': 'test'})) 14 | 15 | def test_create_template(self): 16 | blueprint = GenericResourceCreator( 17 | 'test_generic_GenericResourceCreator', self.ctx 18 | ) 19 | blueprint.resolve_variables( 20 | [ 21 | Variable('Class', 'ec2.Volume'), 22 | Variable('Output', 'VolumeId'), 23 | Variable('Properties', { 24 | 'VolumeType': 'gp2', 25 | 'Size': '600', 26 | 'Encrypted': 'true', 27 | 'AvailabilityZone': 'us-east-1b', 28 | }), 29 | ] 30 | ) 31 | blueprint.create_template() 32 | self.assertRenderedBlueprint(blueprint) 33 | 34 | 35 | if __name__ == '__main__': 36 | unittest.main() 37 | -------------------------------------------------------------------------------- /tests/test_kms.py: -------------------------------------------------------------------------------- 1 | from stacker.context import Context 2 | from stacker.config import Config 3 | from stacker.variables import Variable 4 | 5 | from stacker_blueprints.kms import Key 6 | 7 | from stacker.blueprints.testutil import BlueprintTestCase 8 | 9 | 10 | class TestKmsKey(BlueprintTestCase): 11 | def setUp(self): 12 | self.ctx = Context(config=Config({'namespace': 'test'})) 13 | 14 | def test_kms_key(self): 15 | blueprint = Key('kms_key_a', self.ctx) 16 | blueprint.resolve_variables( 17 | [ 18 | Variable("KeyAlias", "alias/a-test-key"), 19 | Variable("Properties", {"Description": "a KMS test-key."}), 20 | ] 21 | ) 22 | blueprint.create_template() 23 | self.assertRenderedBlueprint(blueprint) 24 | 25 | def test_kms_key_alias_not_in_keyalias(self): 26 | blueprint = Key('kms_key_b', self.ctx) 27 | blueprint.resolve_variables( 28 | [ 29 | Variable("KeyAlias", "b-test-key"), 30 | Variable("Properties", {"Description": "b KMS test-key."}), 31 | ] 32 | ) 33 | blueprint.create_template() 34 | self.assertRenderedBlueprint(blueprint) 35 | 36 | def test_kms_key_without_properties(self): 37 | blueprint = Key('kms_key_c', self.ctx) 38 | blueprint.resolve_variables( 39 | [ 40 | Variable("KeyAlias", "alias/c-test-key"), 41 | ] 42 | ) 43 | blueprint.create_template() 44 | self.assertRenderedBlueprint(blueprint) 45 | 46 | def test_kms_key_attributes_is_deprecated(self): 47 | blueprint = Key('kms_key_attributes_deprecated', self.ctx) 48 | blueprint.resolve_variables( 49 | [ 50 | Variable("KeyAlias", "c-test-key"), 51 | Variable("Attributes", {"Description": "c KMS test-key."}), 52 | ] 53 | ) 54 | with self.assertRaises(DeprecationWarning): 55 | blueprint.create_template() 56 | -------------------------------------------------------------------------------- /tests/test_s3.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from stacker.context import Context, Config 3 | from stacker.variables import Variable 4 | from stacker_blueprints.s3 import Buckets 5 | from stacker.blueprints.testutil import BlueprintTestCase 6 | 7 | 8 | class TestBlueprint(BlueprintTestCase): 9 | def setUp(self): 10 | self.variables = [ 11 | Variable('Buckets', { 12 | 'Simple': {}, 13 | 'Cycle': { 14 | 'LifecycleConfiguration': { 15 | 'Rules': [{ 16 | 'Status': 'Enabled', 17 | 'ExpirationInDays': 40, 18 | }], 19 | }, 20 | } 21 | }), 22 | Variable('ReadRoles', [ 23 | 'Role1', 24 | 'Role2', 25 | ]), 26 | Variable('ReadWriteRoles', [ 27 | 'Role3', 28 | 'Role4', 29 | ]), 30 | ] 31 | 32 | def test_s3(self): 33 | ctx = Context(config=Config({'namespace': 'test'})) 34 | blueprint = Buckets('buckets', ctx) 35 | blueprint.resolve_variables(self.variables) 36 | blueprint.create_template() 37 | self.assertRenderedBlueprint(blueprint) 38 | 39 | def test_s3_static_website(self): 40 | """Test a static website blog bucket.""" 41 | ctx = Context(config=Config({'namespace': 'test'})) 42 | blueprint = Buckets('s3_static_website', ctx) 43 | 44 | v = self.variables = [ 45 | Variable('Buckets', { 46 | 'Blog': { 47 | 'AccessControl': 'PublicRead', 48 | 'WebsiteConfiguration' : { 49 | 'IndexDocument': 'index.html' 50 | } 51 | }, 52 | }), 53 | Variable('ReadRoles', [ 54 | 'Role1', 55 | 'Role2', 56 | ]), 57 | Variable('ReadWriteRoles', [ 58 | 'Role3', 59 | 'Role4', 60 | ]), 61 | ] 62 | 63 | blueprint.resolve_variables(v) 64 | blueprint.create_template() 65 | self.assertRenderedBlueprint(blueprint) 66 | -------------------------------------------------------------------------------- /tests/test_sns.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from stacker.context import Context 3 | from stacker.variables import Variable 4 | from stacker_blueprints.sns import Topics 5 | from stacker.blueprints.testutil import BlueprintTestCase 6 | 7 | class TestBlueprint(BlueprintTestCase): 8 | def setUp(self): 9 | self.variables = [ 10 | Variable('Topics', { 11 | 'WithoutSubscription': { 12 | 'DisplayName': 'SampleTopicWithoutSub', 13 | }, 14 | 'Example': { 15 | 'DisplayName': 'ExampleTopic', 16 | 'Subscription': [ 17 | { 18 | 'Endpoint': 'arn:aws:sqs:us-east-1:123456788901:example-queue', 19 | 'Protocol': 'sqs', 20 | }, 21 | { 22 | 'Endpoint': 'postmaster@example.com', 23 | 'Protocol': 'email', 24 | }, 25 | ] 26 | }, 27 | }), 28 | ] 29 | 30 | def test_sns(self): 31 | ctx = Context({'namespace': 'test', 'environment': 'test'}) 32 | blueprint = Topics('topics', ctx) 33 | blueprint.resolve_variables(self.variables) 34 | blueprint.create_template() 35 | self.assertRenderedBlueprint(blueprint) 36 | -------------------------------------------------------------------------------- /tests/test_sqs.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from stacker.context import Context 3 | from stacker.variables import Variable 4 | from stacker_blueprints.sqs import Queues 5 | from stacker.blueprints.testutil import BlueprintTestCase 6 | 7 | class TestBlueprint(BlueprintTestCase): 8 | def setUp(self): 9 | self.variables = [ 10 | Variable('Queues', { 11 | 'Simple': { 12 | 'DelaySeconds': 15, 13 | 'MaximumMessageSize': 4096, 14 | 'ReceiveMessageWaitTimeSeconds': 15, 15 | 'VisibilityTimeout': 600, 16 | }, 17 | 'Fifo': { 18 | 'FifoQueue': True, 19 | 'QueueName': 'Fifo.fifo', 20 | }, 21 | 'RedrivePolicy': { 22 | 'RedrivePolicy': { 23 | 'deadLetterTargetArn': 'arn:aws:sqs:us-east-1:123456789:dlq', 24 | 'maxReceiveCount': 3, 25 | } 26 | }}) 27 | ] 28 | 29 | def test_sqs(self): 30 | ctx = Context({'namespace': 'test', 'environment': 'test'}) 31 | blueprint = Queues('queues', ctx) 32 | blueprint.resolve_variables(self.variables) 33 | blueprint.create_template() 34 | self.assertRenderedBlueprint(blueprint) 35 | -------------------------------------------------------------------------------- /tests/test_vpc.py: -------------------------------------------------------------------------------- 1 | from stacker.context import Context 2 | from stacker.config import Config 3 | from stacker.variables import Variable 4 | from stacker_blueprints.vpc import VPC2 5 | from stacker.blueprints.testutil import BlueprintTestCase 6 | 7 | from troposphere.route53 import HostedZone 8 | 9 | VPC_NAME = "MyVPC" 10 | 11 | 12 | class TestVPC2(BlueprintTestCase): 13 | def setUp(self): 14 | self.ctx = Context(config=Config({'namespace': 'test'})) 15 | self.common_variables = { 16 | "VPC": { 17 | VPC_NAME: { 18 | "CidrBlock": "10.0.0.0/16" 19 | } 20 | } 21 | } 22 | 23 | def create_blueprint(self, name): 24 | return VPC2(name, self.ctx) 25 | 26 | def generate_variables(self, variable_dict=None): 27 | variable_dict = variable_dict or {} 28 | self.common_variables.update(variable_dict) 29 | 30 | return [Variable(k, v) for k, v in self.common_variables.items()] 31 | 32 | def test_vpc2_without_internal_zone(self): 33 | bp = self.create_blueprint("test_vpc2_without_internal_zone") 34 | 35 | bp.resolve_variables(self.generate_variables()) 36 | bp.create_template() 37 | self.assertRenderedBlueprint(bp) 38 | self.assertIn(VPC_NAME, bp.template.resources) 39 | for r in bp.template.resources.values(): 40 | self.assertNotIsInstance(r, HostedZone) 41 | 42 | def test_vpc2_with_internal_zone(self): 43 | bp = self.create_blueprint("test_vpc2_with_internal_zone") 44 | 45 | variables = { 46 | "InternalZone": { 47 | "MyInternalZone": { 48 | "Name": "internal." 49 | } 50 | } 51 | } 52 | 53 | bp.resolve_variables(self.generate_variables(variables)) 54 | bp.create_template() 55 | self.assertRenderedBlueprint(bp) 56 | self.assertIn(VPC_NAME, bp.template.resources) 57 | zone = bp.template.resources["MyInternalZone"] 58 | self.assertEquals(zone.VPCs[0].VPCId.data["Ref"], VPC_NAME) 59 | dhcp = bp.template.resources["DHCPOptions"] 60 | self.assertEquals(dhcp.DomainName, "internal.") 61 | --------------------------------------------------------------------------------