├── .gitignore ├── Dockerfile ├── License.md ├── Makefile ├── Readme.md ├── bastion ├── main.tf └── user_data.sh ├── circle.yml ├── defaults └── main.tf ├── dhcp └── main.tf ├── dns └── main.tf ├── docs.md ├── ecs-cluster ├── files │ └── cloud-config.yml.tpl └── main.tf ├── elb └── main.tf ├── iam-role └── main.tf ├── iam-user └── main.tf ├── images ├── instance.png ├── networking.png ├── service.png └── stack.png ├── main.tf ├── packer ├── base │ ├── packer.yml │ ├── root │ │ ├── etc │ │ │ ├── cron.hourly │ │ │ │ └── logrotate │ │ │ ├── default │ │ │ │ └── docker │ │ │ ├── sysctl.d │ │ │ │ └── 10-stack.conf │ │ │ └── systemd │ │ │ │ └── system │ │ │ │ ├── bootstrap.service │ │ │ │ ├── docker.service │ │ │ │ ├── format-var-lib-docker.service │ │ │ │ └── var-lib-docker.mount │ │ └── usr │ │ │ └── local │ │ │ └── bin │ │ │ ├── bootstrap │ │ │ └── if-addr │ └── scripts │ │ ├── base.sh │ │ ├── bootstrap.sh │ │ ├── docker.sh │ │ └── ixgbevf.sh └── ecs │ ├── packer.yml │ ├── root │ └── etc │ │ ├── ecs │ │ └── ecs.config │ │ └── systemd │ │ └── system │ │ ├── ecs-agent.service │ │ └── ecs-logs.service │ └── scripts │ ├── ecs.sh │ └── iam-roles.sh ├── rds-cluster └── main.tf ├── rds └── main.tf ├── s3-logs ├── main.tf └── policy.json ├── scripts ├── docs.sh └── test.sh ├── security-groups └── main.tf ├── service └── main.tf ├── task └── main.tf ├── tools ├── pack-ami ├── readme.md ├── roll-ami └── tfvar-ami ├── vpc └── main.tf ├── web-service ├── elb │ └── main.tf └── main.tf └── worker └── main.tf /.gitignore: -------------------------------------------------------------------------------- 1 | example/keys 2 | .terraform 3 | *.tfstate 4 | *.tfstate* 5 | *.swp 6 | *~ 7 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Build as: docker build -t aws-stack . 2 | # Run as: docker run --rm -it --net=host \ 3 | # -v $PWD:$PWD -w $PWD \ 4 | # -v /tmp:/tmp -v ~/.aws:/root/.aws \ 5 | # -e AWS_DEFAULT_REGION=us-east-1 aws-stack make amis 6 | 7 | FROM python:3.5-slim 8 | RUN pip3 install mypy-lang==0.4 flake8==2.5.4 pyyaml boto3 9 | RUN apt-get update \ 10 | && apt-get install -y curl unzip make \ 11 | && apt-get clean 12 | 13 | COPY tools /usr/local/bin 14 | RUN curl -sL "https://releases.hashicorp.com/terraform/0.7.2/terraform_0.7.2_linux_amd64.zip"> terraform.zip \ 15 | && unzip terraform.zip \ 16 | && mv terraform /usr/local/bin 17 | 18 | RUN curl -sL "https://releases.hashicorp.com/packer/0.10.1/packer_0.10.1_linux_amd64.zip" > packer.zip \ 19 | && unzip packer.zip \ 20 | && mv packer /usr/local/bin 21 | 22 | RUN curl -sL -o /usr/local/bin/dumb-init https://github.com/Yelp/dumb-init/releases/download/v1.1.3/dumb-init_1.1.3_amd64 && chmod +x /usr/local/bin/dumb-init 23 | 24 | ENTRYPOINT ["/usr/local/bin/dumb-init"] 25 | 26 | ADD . /src 27 | 28 | RUN cd /src && make install 29 | 30 | -------------------------------------------------------------------------------- /License.md: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2016 Segment.io, Inc. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | 2 | SRC = $(wildcard *.tf ./*/*.tf) 3 | platform := $(shell uname) 4 | pydeps := pyyaml boto3 5 | modules = $(shell ls -1 ./*.tf ./*/*.tf | xargs -I % dirname %) 6 | 7 | tools := \ 8 | ./tools/pack-ami \ 9 | ./tools/roll-ami \ 10 | ./tools/tfvar-ami 11 | 12 | tools := $(patsubst ./tools/%,/usr/local/bin/%,${tools}) 13 | 14 | # The install rule sets up the development environment on the machine it's ran 15 | # on. 16 | install: install-third-party-tools install-python-dependencies install-tools 17 | 18 | ifeq (${platform},Darwin) 19 | install-third-party-tools: 20 | brew install packer terraform python3 21 | else 22 | install-third-party-tools: 23 | @echo "${platform} is a platform we have no presets for, you'll have to install the third party dependencies manually (packer, terraform, python3)" 24 | endif 25 | 26 | ifeq (${platform},Darwin) 27 | install-python-dependencies: 28 | sudo -H pip3 install --upgrade ${pydeps} 29 | else 30 | install-python-dependencies: 31 | pip3 install --upgrade pyyaml boto3 32 | endif 33 | 34 | install-tools: $(tools) 35 | 36 | /usr/local/bin/%: ./tools/% 37 | install -S -m 0755 $< /usr/local/bin 38 | 39 | amis: 40 | pack-ami build -p ./packer -t base -r 41 | 42 | plan-ami: 43 | pack-ami plan -p ./packer -t ${template} 44 | 45 | validate-ami: 46 | pack-ami validate -p ./packer -t ${template} 47 | 48 | build-ami: 49 | pack-ami build -p ./packer -t ${template} 50 | 51 | test: 52 | @bash scripts/test.sh 53 | 54 | docs.md: $(SRC) 55 | @bash scripts/docs.sh 56 | 57 | .PHONY: install-third-party-tools install-python-dependencies build-ami plan-ami validate-ami amis 58 | -------------------------------------------------------------------------------- /Readme.md: -------------------------------------------------------------------------------- 1 | # ⚠️ Unmaintained ⚠️ 2 | 3 | This repository is unmaintained, but left as a historical relic for any wishing to adapt it. Godspeed! 4 | 5 | # Segment Stack [![CircleCI](https://circleci.com/gh/segmentio/stack.svg?style=shield&circle-token=21d1df0dfd7e405582403f65cd1a270f9f52d7a4)](https://circleci.com/gh/segmentio/stack) 6 | 7 | [terraform]: https://terraform.io 8 | [remote-state]: https://www.terraform.io/docs/commands/remote-config.html 9 | [aws-credentials]: http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#cli-quick-configuration 10 | [aws-vault]: https://github.com/99designs/aws-vault 11 | [aws]: http://aws.amazon.com/ 12 | [docker-hub]: https://hub.docker.com/ 13 | [keypair]: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html#having-ec2-create-your-key-pair 14 | 15 | The Segment Stack is a set of [Terraform][terraform] modules for configuring production infrastructure with AWS, Docker, and ECS. 16 | It's a more 'curated' set of defaults for configuring your AWS environment, while still allowing you to fully customize it. 17 | 18 | *To get more background on the Segment Stack you can read [this blog post](https://segment.com/blog/the-segment-aws-stack/) about its history.* 19 | 20 | The Stack comes with: 21 | 22 | - an auto-scaling group of instances to run your services 23 | - a multi-az VPC with different subnets for availability 24 | - self-managed services run via docker and ECS 25 | - an ELB and ECS definition for each service 26 | - docker logs that populate in CloudWatch 27 | - a bastion node for manual SSH access 28 | - automatic ELB logging to S3 29 | 30 | Start from scratch or selectively add it to your existing infrastructure, the Stack is yours to customize and tweak. 31 | 32 | ## Quickstart 33 | 34 | _To run the stack, you'll need AWS access and Terraform installed, check out the [requirements](#requirements) section._ 35 | 36 | The easiest way to get the Stack up and running is by creating a Terraform definition for it, copy this snippet in a file 37 | named `terraform.tf`: 38 | ```hcl 39 | module "stack" { 40 | source = "github.com/segmentio/stack" 41 | environment = "prod" 42 | key_name = "my-key-name" 43 | name = "my-app" 44 | } 45 | ``` 46 | This is the _base_ configuration, that will provision everything you need to run your services. 47 | 48 | From there, you'll want to plan, which will stage the changeset 49 | 50 | $ terraform plan 51 | 52 | And if the changes look good, apply them to your infrastructure 53 | 54 | $ terraform apply 55 | 56 | This will automatically setup your basic networking configuration with an auto-scaling default cluster running ECS. 57 | 58 | Now that we've got all the basics setup, how about adding a service? 59 | 60 | Services pull images from Docker Hub and then run the images as containers via ECS. They are automatically discoverable at `` and will run with zero-downtime deploys. 61 | We can can use the `stack//service` module to automatically provision all of the required parts of the service, including a load balancer, ECS service, and Route53 DNS entry. 62 | 63 | Here's a sample service definition, try adding it to your `terraform.tf` file. 64 | 65 | ```hcl 66 | module "nginx" { 67 | # this sources from the "stack//service" module 68 | source = "github.com/segmentio/stack//service" 69 | name = "my-app" 70 | image = "nginx" 71 | port = 80 72 | environment = "${module.stack.environment}" 73 | cluster = "${module.stack.cluster}" 74 | iam_role = "${module.stack.iam_role}" 75 | security_groups = "${module.stack.internal_elb}" 76 | subnet_ids = "${module.stack.internal_subnets}" 77 | log_bucket = "${module.stack.log_bucket_id}" 78 | zone_id = "${module.stack.zone_id}" 79 | } 80 | ``` 81 | 82 | Once the nginx service has been added, simply run another plan and apply: 83 | 84 | $ terraform plan 85 | $ terraform apply 86 | 87 | Your service should automatically be up and running. You can SSH into your bastion host (find the ip by running `terraform output`) and connect using the service name: 88 | 89 | $ ssh -i ubuntu@ 90 | $ curl http://nginx.stack.local/ 91 | 92 | *The bastion IP should have been shown by the terraform output when it created the stack for the first time. If you missed it you can still get it from the AWS console.* 93 | 94 | ## Requirements 95 | 96 | Before we start, you'll first need: 97 | 98 | - [ ] an [AWS account][aws] with API access 99 | - [ ] locally configured [AWS credentials][aws-credentials] or a tool like [aws-vault][aws-vault] 100 | - [ ] to [create a keypair][keypair] in AWS 101 | - [ ] Docker images of your services uploaded to [Docker Hub][docker-hub] 102 | - [ ] download and install [terraform][terraform] 103 | 104 | ## Architecture 105 | 106 | At a high level, the Stack creates a multi-az VPC, a single auto-scaling cluster, and service definitions within ECS. 107 | 108 | ![](./images/stack.png) 109 | 110 | Your instances are automatically distributed across the VPC, addresses are translated by NAT gateways, and services are all discoverable via route53 and ELBs. 111 | 112 | We'll walk through how each of these fit together in this architecture document. 113 | 114 | ### Networking 115 | 116 | ![](./images/networking.png) 117 | 118 | By default, the Stack will create a VPC in a single region, amongst multiple availability zones (AZs). The default mask for this VPC is 119 | 120 | 10.30.0.0/16 121 | 122 | The address was chosen to be internal, and to not conflict with other pieces of infrastructure you might run. But, it can also be configured with its own CIDR range. 123 | 124 | Each availability zone will get its own external and internal subnets. Most of our infrastructure will live in the *internal* subnet so that they are not externally accessible to the internet. 125 | 126 | If you'd like to scale to multiple regions (outside the scope of the current stack), simply add one to the second octet. 127 | 128 | 10.31.0.0/16 -- my new region 129 | 130 | To span across availability zones, the regional 16-bit mask becomes 18-bits. 131 | 132 | 10.30.0.0/18 - AZ A 133 | 10.30.64.0/18 - AZ B 134 | 10.30.128.0/18 - AZ C 135 | 10.30.192.0/18 - Spare 136 | 137 | To subdivide each availability zone into spaces for internal, external and to have spare room for growth; use a 19-bit mask for internal, and a 20-bit mask for external. The external space is smaller because only a few instances and load-balancers should be provisioned into it. 138 | 139 | 10.30.0.0/18 - AZ A 140 | 141 | 10.30.0.0/19 internal 142 | 10.30.32.0/20 external 143 | 10.30.48.0/20 spare 144 | 145 | 10.30.64.0/18 - AZ B 146 | 147 | 10.30.64.0/19 internal 148 | 10.30.96.0/20 external 149 | 10.30.112.0/20 spare 150 | 151 | 10.30.128.0/18 - AZ C 152 | 153 | 10.30.128.0/19 internal 154 | 10.30.160.0/20 external 155 | 10.30.176.0/20 spare 156 | 157 | The VPC itself will contain a single network gateway to route 158 | traffic in and out of the different subnets. The Stack terraform will automatically create 3 separate [NAT Gateways][nat-gateway] in each of the different subnets. 159 | 160 | Traffic from each internal subnet to the outside world will run through the associated NAT gateway. 161 | 162 | Alternatively, setting the `use_nat_instances` VPC module variable to true, will use [EC2 NAT instances][nat-instances] instead of the NAT gateway. NAT instances cost less than the NAT gateway, can be shutdown when not in use, and may be preferred in development environments. By default, NAT instances will not use [Elastic IPs][elastic-ip] to avoid a small hourly charge if the NAT instances are not running full time. To use Elastic IPs for the NAT instances, set the `use_eip_with_nat_instances` VPC module variable to true. 163 | 164 | For further reading, check out these sources: 165 | 166 | - [Recommended Address Space](http://serverfault.com/questions/630022/what-is-the-recommended-cidr-when-creating-vpc-on-aws) 167 | - [Practical VPC Design](https://medium.com/aws-activate-startup-blog/practical-vpc-design-8412e1a18dcc) 168 | 169 | [nat-gateway]: http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-nat-gateway.html 170 | [nat-instances]: http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_NAT_Instance.html 171 | [elastic-ip]: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html 172 | 173 | ### Instances 174 | 175 | ![](./images/instance.png) 176 | 177 | Each instance in an ecs-cluster is provisioned using an AMI built in the `./packer` directory. By default, this AMI is based off the 16.04 Ubuntu LTS image, and runs all the base programs within **systemd**. 178 | 179 | After boot, systemd will run each of its targets, which includes booting Docker and the ECS agent. The ECS agent will register the instance with a particular cluster, pulled from the environment variables on the instance. 180 | 181 | ### Services 182 | 183 | ![](./images/service.png) 184 | 185 | Stack services run within ECS. They include a few key pieces: 186 | 187 | - an ECS task definition 188 | - an ECS service definition 189 | - an internal ELB 190 | - an internal route53 entry 191 | 192 | The task definition tells ECS _what_ docker image to run (nginx), and _how_ to run it (env vars, arguments, etc). The service definition tells ECS how many containers of a task to run, and on which cluster to run the containers. The ELB routes traffic to the containers in a service, and route53 assigns a 'nice' name to the ELB. 193 | 194 | Service discovery works via vanilla DNS. Whenever a service is provisioned, it will also create an accompanying ELB that routes to the containers in the service. The route53 entry for the ELB provisioned by the 'auth' service would be: 195 | 196 | $ curl http://auth.stack.local 197 | 198 | For more complicated service discovery which handles cases like versioning, we'd recommend using a service like [Consul][consul] or [etcd][etcd]. 199 | 200 | [consul]: https://www.consul.io/ 201 | [etcd]: https://github.com/coreos/etcd 202 | 203 | ### Bastion 204 | 205 | The bastion host acts as the "jump point" for the rest of the infrastructure. Since most of our instances aren't exposed to the external internet, the bastion acts as the gatekeeper for any direct SSH access. 206 | 207 | The bastion is provisioned using the key name that you pass to the stack (and hopefully have stored somewhere). If you ever need to access an instance directly, you can do it by "jumping through" the bastion: 208 | 209 | $ terraform output # print the bastion ip 210 | $ ssh -i ubuntu@ ssh ubuntu@ 211 | 212 | ### Logging 213 | 214 | The default AMI that instances of the ECS cluster are running ships with the ecs-agent and a program called ecs-logs pre-configured. While ecs-agent takes care of scheduling services, ecs-logs is in charge of reading the service logs and uploading them to CloudWatch. 215 | This is all configured automatically by the default Stack settings. 216 | 217 | ecs-logs creates one CloudWatch Logs Group for each service, then in each of the groups, a CloudWatch Logs Stream named after the docker container running the service will hold all the logs generated by the service. 218 | 219 | If you're interested in digging further into how ecs-logs work here is the github repository where it's hosted: 220 | 221 | - https://github.com/segmentio/ecs-logs 222 | 223 | ## Module Reference 224 | 225 | To see the full reference for each individual module, see our [reference page](./docs.md). 226 | 227 | You can reference modules individually by name: 228 | 229 | ```hcl 230 | module "vpc" { 231 | source = "github.com/segmentio/stack//vpc" 232 | name = "${var.name}" 233 | environment = "${var.environment}" 234 | cidr = "${var.cidr}" 235 | internal_subnets = "${var.internal_subnets}" 236 | external_subnets = "${var.external_subnets}" 237 | availability_zones = "${var.availability_zones}" 238 | } 239 | ``` 240 | 241 | ## Developing 242 | 243 | You can customize any part of the stack you'd like. 244 | 245 | ### AMIs 246 | 247 | All of the default AMIs that ship with stack are build using [packer][packer]. If you'd like to build your own, you can make changes to the `./packer` directory and then re-build using: 248 | 249 | $ make amis 250 | 251 | [packer]: https://www.packer.io 252 | 253 | ### Terraform 254 | 255 | Stack is all vanilla Terraform and AWS, so you can customize it by simply forking the repository and referencing your own modules internally. 256 | 257 | ## Examples 258 | 259 | To dig further down into what you can build with the Segment Stack we have put together an example app that shows how to configure a small infrastructure from scratch: 260 | 261 | - https://github.com/segmentio/pingdummy 262 | 263 | ## Authors 264 | 265 | - [Calvin French-Owen](https://github.com/calvinfo) 266 | - [Amir Abu Shareb](https://github.com/yields) 267 | - [Achille Roussel](https://github.com/achille-roussel) 268 | - [Kevin Lo](https://github.com/liquidy) 269 | - [Rick Branson](https://github.com/rbranson) 270 | 271 | ## License 272 | 273 | Released under the MIT License 274 | 275 | (The MIT License) 276 | 277 | Copyright (c) 2016 Segment friends@segment.com 278 | 279 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 280 | 281 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 282 | 283 | THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 284 | -------------------------------------------------------------------------------- /bastion/main.tf: -------------------------------------------------------------------------------- 1 | /** 2 | * The bastion host acts as the "jump point" for the rest of the infrastructure. 3 | * Since most of our instances aren't exposed to the external internet, the bastion acts as the gatekeeper for any direct SSH access. 4 | * The bastion is provisioned using the key name that you pass to the stack (and hopefully have stored somewhere). 5 | * If you ever need to access an instance directly, you can do it by "jumping through" the bastion. 6 | * 7 | * $ terraform output # print the bastion ip 8 | * $ ssh -i ubuntu@ ssh ubuntu@ 9 | * 10 | * Usage: 11 | * 12 | * module "bastion" { 13 | * source = "github.com/segmentio/stack/bastion" 14 | * region = "us-west-2" 15 | * security_groups = "sg-1,sg-2" 16 | * vpc_id = "vpc-12" 17 | * key_name = "ssh-key" 18 | * subnet_id = "pub-1" 19 | * environment = "prod" 20 | * } 21 | * 22 | */ 23 | 24 | variable "instance_type" { 25 | default = "t2.micro" 26 | description = "Instance type, see a list at: https://aws.amazon.com/ec2/instance-types/" 27 | } 28 | 29 | variable "region" { 30 | description = "AWS Region, e.g us-west-2" 31 | } 32 | 33 | variable "security_groups" { 34 | description = "a comma separated lists of security group IDs" 35 | } 36 | 37 | variable "vpc_id" { 38 | description = "VPC ID" 39 | } 40 | 41 | variable "key_name" { 42 | description = "The SSH key pair, key name" 43 | } 44 | 45 | variable "subnet_id" { 46 | description = "A external subnet id" 47 | } 48 | 49 | variable "environment" { 50 | description = "Environment tag, e.g prod" 51 | } 52 | 53 | module "ami" { 54 | source = "github.com/terraform-community-modules/tf_aws_ubuntu_ami/ebs" 55 | region = "${var.region}" 56 | distribution = "trusty" 57 | instance_type = "${var.instance_type}" 58 | } 59 | 60 | resource "aws_instance" "bastion" { 61 | ami = "${module.ami.ami_id}" 62 | source_dest_check = false 63 | instance_type = "${var.instance_type}" 64 | subnet_id = "${var.subnet_id}" 65 | key_name = "${var.key_name}" 66 | vpc_security_group_ids = ["${split(",",var.security_groups)}"] 67 | monitoring = true 68 | user_data = "${file(format("%s/user_data.sh", path.module))}" 69 | 70 | tags { 71 | Name = "bastion" 72 | Environment = "${var.environment}" 73 | } 74 | } 75 | 76 | resource "aws_eip" "bastion" { 77 | instance = "${aws_instance.bastion.id}" 78 | vpc = true 79 | } 80 | 81 | // Bastion external IP address. 82 | output "external_ip" { 83 | value = "${aws_eip.bastion.public_ip}" 84 | } 85 | -------------------------------------------------------------------------------- /bastion/user_data.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | cat < foo 5 | Host * 6 | IdentityFile ~/.ssh/key.pem 7 | User ubuntu 8 | EOF 9 | -------------------------------------------------------------------------------- /circle.yml: -------------------------------------------------------------------------------- 1 | 2 | dependencies: 3 | override: 4 | - sudo curl -L# https://releases.hashicorp.com/terraform/0.7.5/terraform_0.7.5_linux_amd64.zip -o /usr/local/bin/tf.zip 5 | - cd /usr/local/bin && sudo unzip tf.zip 6 | 7 | test: 8 | override: 9 | - make test 10 | -------------------------------------------------------------------------------- /defaults/main.tf: -------------------------------------------------------------------------------- 1 | /** 2 | * This module is used to set configuration defaults for the AWS infrastructure. 3 | * It doesn't provide much value when used on its own because terraform makes it 4 | * hard to do dynamic generations of things like subnets, for now it's used as 5 | * a helper module for the stack. 6 | * 7 | * Usage: 8 | * 9 | * module "defaults" { 10 | * source = "github.com/segmentio/stack/defaults" 11 | * region = "us-east-1" 12 | * cidr = "10.0.0.0/16" 13 | * } 14 | * 15 | */ 16 | 17 | variable "region" { 18 | description = "The AWS region" 19 | } 20 | 21 | variable "cidr" { 22 | description = "The CIDR block to provision for the VPC" 23 | } 24 | 25 | variable "default_ecs_ami" { 26 | default = { 27 | us-east-1 = "ami-dde4e6ca" 28 | us-west-1 = "ami-6d21770d" 29 | us-west-2 = "ami-97da70f7" 30 | eu-west-1 = "ami-c41f3bb7" 31 | eu-central-1 = "ami-4ba16024" 32 | ap-northeast-1 = "ami-90ea86f7" 33 | ap-northeast-2 = "ami-8a4b9ce4" 34 | ap-southeast-1 = "ami-d603afb5" 35 | ap-southeast-2 = "ami-1ddce47e" 36 | sa-east-1 = "ami-29039a45" 37 | } 38 | } 39 | 40 | # http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/enable-access-logs.html#attach-bucket-policy 41 | variable "default_log_account_ids" { 42 | default = { 43 | us-east-1 = "127311923021" 44 | us-west-2 = "797873946194" 45 | us-west-1 = "027434742980" 46 | eu-west-1 = "156460612806" 47 | eu-central-1 = "054676820928" 48 | ap-southeast-1 = "114774131450" 49 | ap-northeast-1 = "582318560864" 50 | ap-southeast-2 = "783225319266" 51 | ap-northeast-2 = "600734575887" 52 | sa-east-1 = "507241528517" 53 | us-gov-west-1 = "048591011584" 54 | cn-north-1 = "638102146993" 55 | } 56 | } 57 | 58 | output "domain_name_servers" { 59 | value = "${cidrhost(var.cidr, 2)}" 60 | } 61 | 62 | output "ecs_ami" { 63 | value = "${lookup(var.default_ecs_ami, var.region)}" 64 | } 65 | 66 | output "s3_logs_account_id" { 67 | value = "${lookup(var.default_log_account_ids, var.region)}" 68 | } 69 | -------------------------------------------------------------------------------- /dhcp/main.tf: -------------------------------------------------------------------------------- 1 | variable "name" { 2 | description = "The domain name to setup DHCP for" 3 | } 4 | 5 | variable "vpc_id" { 6 | description = "The ID of the VPC to setup DHCP for" 7 | } 8 | 9 | variable "servers" { 10 | description = "A comma separated list of the IP addresses of internal DHCP servers" 11 | } 12 | 13 | resource "aws_vpc_dhcp_options" "dns_resolver" { 14 | domain_name = "${var.name}" 15 | domain_name_servers = ["${split(",", var.servers)}"] 16 | } 17 | 18 | resource "aws_vpc_dhcp_options_association" "dns_resolver" { 19 | vpc_id = "${var.vpc_id}" 20 | dhcp_options_id = "${aws_vpc_dhcp_options.dns_resolver.id}" 21 | } 22 | -------------------------------------------------------------------------------- /dns/main.tf: -------------------------------------------------------------------------------- 1 | /** 2 | * The dns module creates a local route53 zone that serves 3 | * as a service discovery utility. For example a service 4 | * resource with the name `auth` and a dns module 5 | * with the name `stack.local`, the service address will be `auth.stack.local`. 6 | * 7 | * Usage: 8 | * 9 | * module "dns" { 10 | * source = "github.com/segment/stack" 11 | * name = "stack.local" 12 | * } 13 | * 14 | */ 15 | 16 | variable "name" { 17 | description = "Zone name, e.g stack.local" 18 | } 19 | 20 | variable "vpc_id" { 21 | description = "The VPC ID (omit to create a public zone)" 22 | default = "" 23 | } 24 | 25 | resource "aws_route53_zone" "main" { 26 | name = "${var.name}" 27 | vpc_id = "${var.vpc_id}" 28 | comment = "" 29 | } 30 | 31 | // The domain name. 32 | output "name" { 33 | value = "${var.name}" 34 | } 35 | 36 | // The zone ID. 37 | output "zone_id" { 38 | value = "${aws_route53_zone.main.zone_id}" 39 | } 40 | 41 | // A comma separated list of the zone name servers. 42 | output "name_servers" { 43 | value = "${join(",",aws_route53_zone.main.name_servers)}" 44 | } 45 | -------------------------------------------------------------------------------- /docs.md: -------------------------------------------------------------------------------- 1 | # Stack 2 | 3 | The stack module combines sub modules to create a complete 4 | stack with `vpc`, a default ecs cluster with auto scaling 5 | and a bastion node that enables you to access all instances. 6 | 7 | Usage: 8 | 9 | module "stack" { 10 | source = "github.com/segmentio/stack" 11 | name = "mystack" 12 | environment = "prod" 13 | } 14 | 15 | ## Available Modules 16 | 17 | * [stack](#stack) 18 | * [bastion](#bastion) 19 | * [defaults](#defaults) 20 | * [dhcp](#dhcp) 21 | * [dns](#dns) 22 | * [ecs-cluster](#ecs-cluster) 23 | * [elb](#elb) 24 | * [iam-user](#iam-user) 25 | * [rds-cluster](#rds-cluster) 26 | * [s3-logs](#s3-logs) 27 | * [security-groups](#security-groups) 28 | * [service](#service) 29 | * [task](#task) 30 | * [vpc](#vpc) 31 | * [web-service](#web-service) 32 | * [worker](#worker) 33 | 34 | ## Inputs 35 | 36 | | Name | Description | Default | Required | 37 | |------|-------------|:-----:|:-----:| 38 | | name | the name of your stack, e.g. "segment" | - | yes | 39 | | environment | the name of your environment, e.g. "prod-west" | - | yes | 40 | | key_name | the name of the ssh key to use, e.g. "internal-key" | - | yes | 41 | | domain_name | the internal DNS name to use with services | `stack.local` | no | 42 | | domain_name_servers | the internal DNS servers, defaults to the internal route53 server of the VPC | `` | no | 43 | | region | the AWS region in which resources are created, you must set the availability_zones variable as well if you define this value to something other than the default | `us-west-2` | no | 44 | | cidr | the CIDR block to provision for the VPC, if set to something other than the default, both internal_subnets and external_subnets have to be defined as well | `10.30.0.0/16` | no | 45 | | internal_subnets | a list of CIDRs for internal subnets in your VPC, must be set if the cidr variable is defined, needs to have as many elements as there are availability zones | `` | no | 46 | | external_subnets | a list of CIDRs for external subnets in your VPC, must be set if the cidr variable is defined, needs to have as many elements as there are availability zones | `` | no | 47 | | use_nat_instances | use NAT EC2 instances instead of the NAT gateway service | `false` | no | 48 | | use_eip_with_nat_instances | use Elastic IPs with NAT instances if `use_nat_instances` is true | `false` | no | 49 | | nat_instance_type | the EC2 instance type for NAT instances if `use_nat_instances` is true | `t2.nano` | no | 50 | | nat_instance_ssh_key_name | the name of the ssh key to use with NAT instances if `use_nat_instances` is true | "" | no | 51 | | availability_zones | a comma-separated list of availability zones, defaults to all AZ of the region, if set to something other than the defaults, both internal_subnets and external_subnets have to be defined as well | `` | no | 52 | | bastion_instance_type | Instance type for the bastion | `t2.micro` | no | 53 | | ecs_cluster_name | the name of the cluster, if not specified the variable name will be used | `` | no | 54 | | ecs_instance_type | the instance type to use for your default ecs cluster | `m4.large` | no | 55 | | ecs_instance_ebs_optimized | use EBS - not all instance types support EBS | `true` | no | 56 | | ecs_min_size | the minimum number of instances to use in the default ecs cluster | `3` | no | 57 | | ecs_max_size | the maximum number of instances to use in the default ecs cluster | `100` | no | 58 | | ecs_desired_capacity | the desired number of instances to use in the default ecs cluster | `3` | no | 59 | | ecs_root_volume_size | the size of the ecs instance root volume | `25` | no | 60 | | ecs_docker_volume_size | the size of the ecs instance docker volume | `25` | no | 61 | | ecs_docker_auth_type | The docker auth type, see https://godoc.org/github.com/aws/amazon-ecs-agent/agent/engine/dockerauth for the possible values | `` | no | 62 | | ecs_docker_auth_data | A JSON object providing the docker auth data, see https://godoc.org/github.com/aws/amazon-ecs-agent/agent/engine/dockerauth for the supported formats | `` | no | 63 | | ecs_security_groups | A comma separated list of security groups from which ingest traffic will be allowed on the ECS cluster, it defaults to allowing ingress traffic on port 22 and coming from the ELBs | `` | no | 64 | | ecs_ami | The AMI that will be used to launch EC2 instances in the ECS cluster | `` | no | 65 | | extra_cloud_config_type | Extra cloud config type | `text/cloud-config` | no | 66 | | extra_cloud_config_content | Extra cloud config content | `` | no | 67 | 68 | ## Outputs 69 | 70 | | Name | Description | 71 | |------|-------------| 72 | | region | The region in which the infra lives. | 73 | | bastion_ip | The bastion host IP. | 74 | | zone_id | The internal route53 zone ID. | 75 | | internal_elb | Security group for internal ELBs. | 76 | | external_elb | Security group for external ELBs. | 77 | | internal_subnets | Comma separated list of internal subnet IDs. | 78 | | external_subnets | Comma separated list of external subnet IDs. | 79 | | iam_role | ECS Service IAM role. | 80 | | iam_role_default_ecs_role_id | Default ECS role ID. Useful if you want to add a new policy to that role. | 81 | | log_bucket_id | S3 bucket ID for ELB logs. | 82 | | domain_name | The internal domain name, e.g "stack.local". | 83 | | environment | The environment of the stack, e.g "prod". | 84 | | cluster | The default ECS cluster name. | 85 | | availability_zones | The VPC availability zones. | 86 | | vpc_security_group | The VPC security group ID. | 87 | | vpc_id | The VPC ID. | 88 | | ecs_cluster_security_group_id | The default ECS cluster security group ID. | 89 | | internal_route_tables | Comma separated list of internal route table IDs. | 90 | | external_route_tables | The external route table ID. | 91 | 92 | # bastion 93 | 94 | The bastion host acts as the "jump point" for the rest of the infrastructure. 95 | Since most of our instances aren't exposed to the external internet, the bastion acts as the gatekeeper for any direct SSH access. 96 | The bastion is provisioned using the key name that you pass to the stack (and hopefully have stored somewhere). 97 | If you ever need to access an instance directly, you can do it by "jumping through" the bastion. 98 | 99 | $ terraform output # print the bastion ip 100 | $ ssh -i ubuntu@ ssh ubuntu@ 101 | 102 | Usage: 103 | 104 | module "bastion" { 105 | source = "github.com/segmentio/stack/bastion" 106 | region = "us-west-2" 107 | security_groups = "sg-1,sg-2" 108 | vpc_id = "vpc-12" 109 | key_name = "ssh-key" 110 | subnet_id = "pub-1" 111 | environment = "prod" 112 | } 113 | 114 | 115 | 116 | ## Inputs 117 | 118 | | Name | Description | Default | Required | 119 | |------|-------------|:-----:|:-----:| 120 | | instance_type | Instance type, see a list at: https://aws.amazon.com/ec2/instance-types/ | `t2.micro` | no | 121 | | region | AWS Region, e.g us-west-2 | - | yes | 122 | | security_groups | a comma separated lists of security group IDs | - | yes | 123 | | vpc_id | VPC ID | - | yes | 124 | | key_name | The SSH key pair, key name | - | yes | 125 | | subnet_id | A external subnet id | - | yes | 126 | | environment | Environment tag, e.g prod | - | yes | 127 | 128 | ## Outputs 129 | 130 | | Name | Description | 131 | |------|-------------| 132 | | external_ip | Bastion external IP address. | 133 | 134 | # defaults 135 | 136 | This module is used to set configuration defaults for the AWS infrastructure. 137 | It doesn't provide much value when used on its own because terraform makes it 138 | hard to do dynamic generations of things like subnets, for now it's used as 139 | a helper module for the stack. 140 | 141 | Usage: 142 | 143 | module "defaults" { 144 | source = "github.com/segmentio/stack/defaults" 145 | region = "us-east-1" 146 | cidr = "10.0.0.0/16" 147 | } 148 | 149 | 150 | 151 | ## Inputs 152 | 153 | | Name | Description | Default | Required | 154 | |------|-------------|:-----:|:-----:| 155 | | region | The AWS region | - | yes | 156 | | cidr | The CIDR block to provision for the VPC | - | yes | 157 | | default_ecs_ami | | `` | no | 158 | | default_log_account_ids | # http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/enable-access-logs.html#attach-bucket-policy | `` | no | 159 | 160 | ## Outputs 161 | 162 | | Name | Description | 163 | |------|-------------| 164 | | domain_name_servers | | 165 | | ecs_ami | | 166 | | s3_logs_account_id | | 167 | 168 | # dhcp 169 | 170 | 171 | ## Inputs 172 | 173 | | Name | Description | Default | Required | 174 | |------|-------------|:-----:|:-----:| 175 | | name | The domain name to setup DHCP for | - | yes | 176 | | vpc_id | The ID of the VPC to setup DHCP for | - | yes | 177 | | servers | A comma separated list of the IP addresses of internal DHCP servers | - | yes | 178 | 179 | # dns 180 | 181 | The dns module creates a local route53 zone that serves 182 | as a service discovery utility. For example a service 183 | resource with the name `auth` and a dns module 184 | with the name `stack.local`, the service address will be `auth.stack.local`. 185 | 186 | Usage: 187 | 188 | module "dns" { 189 | source = "github.com/segment/stack" 190 | name = "stack.local" 191 | } 192 | 193 | 194 | 195 | ## Inputs 196 | 197 | | Name | Description | Default | Required | 198 | |------|-------------|:-----:|:-----:| 199 | | name | Zone name, e.g stack.local | - | yes | 200 | | vpc_id | The VPC ID (omit to create a public zone) | `` | no | 201 | 202 | ## Outputs 203 | 204 | | Name | Description | 205 | |------|-------------| 206 | | name | The domain name. | 207 | | zone_id | The zone ID. | 208 | | name_servers | A comma separated list of the zone name servers. | 209 | 210 | # ecs-cluster 211 | 212 | ECS Cluster creates a cluster with the following features: 213 | 214 | - Autoscaling groups 215 | - Instance tags for filtering 216 | - EBS volume for docker resources 217 | 218 | 219 | Usage: 220 | 221 | module "cdn" { 222 | source = "github.com/segmentio/stack/ecs-cluster" 223 | environment = "prod" 224 | name = "cdn" 225 | vpc_id = "vpc-id" 226 | image_id = "ami-id" 227 | subnet_ids = ["1" ,"2"] 228 | key_name = "ssh-key" 229 | security_groups = "1,2" 230 | iam_instance_profile = "id" 231 | region = "us-west-2" 232 | availability_zones = ["a", "b"] 233 | instance_type = "t2.small" 234 | } 235 | 236 | 237 | 238 | ## Inputs 239 | 240 | | Name | Description | Default | Required | 241 | |------|-------------|:-----:|:-----:| 242 | | name | The cluster name, e.g cdn | - | yes | 243 | | environment | Environment tag, e.g prod | - | yes | 244 | | vpc_id | VPC ID | - | yes | 245 | | image_id | AMI Image ID | - | yes | 246 | | subnet_ids | List of subnet IDs | - | yes | 247 | | key_name | SSH key name to use | - | yes | 248 | | security_groups | Comma separated list of security groups | - | yes | 249 | | iam_instance_profile | Instance profile ARN to use in the launch configuration | - | yes | 250 | | region | AWS Region | - | yes | 251 | | availability_zones | List of AZs | - | yes | 252 | | instance_type | The instance type to use, e.g t2.small | - | yes | 253 | | instance_ebs_optimized | When set to true the instance will be launched with EBS optimized turned on | `true` | no | 254 | | min_size | Minimum instance count | `3` | no | 255 | | max_size | Maxmimum instance count | `100` | no | 256 | | desired_capacity | Desired instance count | `3` | no | 257 | | associate_public_ip_address | Should created instances be publicly accessible (if the SG allows) | `false` | no | 258 | | root_volume_size | Root volume size in GB | `25` | no | 259 | | docker_volume_size | Attached EBS volume size in GB | `25` | no | 260 | | docker_auth_type | The docker auth type, see https://godoc.org/github.com/aws/amazon-ecs-agent/agent/engine/dockerauth for the possible values | `` | no | 261 | | docker_auth_data | A JSON object providing the docker auth data, see https://godoc.org/github.com/aws/amazon-ecs-agent/agent/engine/dockerauth for the supported formats | `` | no | 262 | | extra_cloud_config_type | Extra cloud config type | `text/cloud-config` | no | 263 | | extra_cloud_config_content | Extra cloud config content | `` | no | 264 | 265 | ## Outputs 266 | 267 | | Name | Description | 268 | |------|-------------| 269 | | name | The cluster name, e.g cdn | 270 | | security_group_id | The cluster security group ID. | 271 | 272 | # elb 273 | 274 | The ELB module creates an ELB, security group 275 | a route53 record and a service healthcheck. 276 | It is used by the service module. 277 | 278 | 279 | ## Inputs 280 | 281 | | Name | Description | Default | Required | 282 | |------|-------------|:-----:|:-----:| 283 | | name | ELB name, e.g cdn | - | yes | 284 | | subnet_ids | Comma separated list of subnet IDs | - | yes | 285 | | environment | Environment tag, e.g prod | - | yes | 286 | | port | Instance port | - | yes | 287 | | security_groups | Comma separated list of security group IDs | - | yes | 288 | | dns_name | Route53 record name | - | yes | 289 | | healthcheck | Healthcheck path | - | yes | 290 | | protocol | Protocol to use, HTTP or TCP | - | yes | 291 | | zone_id | Route53 zone ID to use for dns_name | - | yes | 292 | | log_bucket | S3 bucket name to write ELB logs into | - | yes | 293 | 294 | ## Outputs 295 | 296 | | Name | Description | 297 | |------|-------------| 298 | | name | The ELB name. | 299 | | id | The ELB ID. | 300 | | dns | The ELB dns_name. | 301 | | fqdn | FQDN built using the zone domain and name | 302 | | zone_id | The zone id of the ELB | 303 | 304 | # iam-user 305 | 306 | The module creates an IAM user. 307 | 308 | Usage: 309 | 310 | module "my_user" { 311 | name = "user" 312 | policy = <> /etc/environment 4 | - echo 'SERVER_GROUP=${name}' >> /etc/environment 5 | - echo 'SERVER_REGION=${region}' >> /etc/environment 6 | 7 | - mkdir -p /etc/ecs 8 | - echo 'ECS_CLUSTER=${name}' >> /etc/ecs/ecs.config 9 | - echo 'ECS_ENGINE_AUTH_TYPE=${docker_auth_type}' >> /etc/ecs/ecs.config 10 | - > 11 | echo 'ECS_ENGINE_AUTH_DATA=${docker_auth_data}' >> /etc/ecs/ecs.config 12 | -------------------------------------------------------------------------------- /ecs-cluster/main.tf: -------------------------------------------------------------------------------- 1 | /** 2 | * ECS Cluster creates a cluster with the following features: 3 | * 4 | * - Autoscaling groups 5 | * - Instance tags for filtering 6 | * - EBS volume for docker resources 7 | * 8 | * 9 | * Usage: 10 | * 11 | * module "cdn" { 12 | * source = "github.com/segmentio/stack/ecs-cluster" 13 | * environment = "prod" 14 | * name = "cdn" 15 | * vpc_id = "vpc-id" 16 | * image_id = "ami-id" 17 | * subnet_ids = ["1" ,"2"] 18 | * key_name = "ssh-key" 19 | * security_groups = "1,2" 20 | * iam_instance_profile = "id" 21 | * region = "us-west-2" 22 | * availability_zones = ["a", "b"] 23 | * instance_type = "t2.small" 24 | * } 25 | * 26 | */ 27 | 28 | variable "name" { 29 | description = "The cluster name, e.g cdn" 30 | } 31 | 32 | variable "environment" { 33 | description = "Environment tag, e.g prod" 34 | } 35 | 36 | variable "vpc_id" { 37 | description = "VPC ID" 38 | } 39 | 40 | variable "image_id" { 41 | description = "AMI Image ID" 42 | } 43 | 44 | variable "subnet_ids" { 45 | description = "List of subnet IDs" 46 | type = "list" 47 | } 48 | 49 | variable "key_name" { 50 | description = "SSH key name to use" 51 | } 52 | 53 | variable "security_groups" { 54 | description = "Comma separated list of security groups" 55 | } 56 | 57 | variable "iam_instance_profile" { 58 | description = "Instance profile ARN to use in the launch configuration" 59 | } 60 | 61 | variable "region" { 62 | description = "AWS Region" 63 | } 64 | 65 | variable "availability_zones" { 66 | description = "List of AZs" 67 | type = "list" 68 | } 69 | 70 | variable "instance_type" { 71 | description = "The instance type to use, e.g t2.small" 72 | } 73 | 74 | variable "instance_ebs_optimized" { 75 | description = "When set to true the instance will be launched with EBS optimized turned on" 76 | default = true 77 | } 78 | 79 | variable "min_size" { 80 | description = "Minimum instance count" 81 | default = 3 82 | } 83 | 84 | variable "max_size" { 85 | description = "Maxmimum instance count" 86 | default = 100 87 | } 88 | 89 | variable "desired_capacity" { 90 | description = "Desired instance count" 91 | default = 3 92 | } 93 | 94 | variable "associate_public_ip_address" { 95 | description = "Should created instances be publicly accessible (if the SG allows)" 96 | default = false 97 | } 98 | 99 | variable "root_volume_size" { 100 | description = "Root volume size in GB" 101 | default = 25 102 | } 103 | 104 | variable "docker_volume_size" { 105 | description = "Attached EBS volume size in GB" 106 | default = 25 107 | } 108 | 109 | variable "docker_auth_type" { 110 | description = "The docker auth type, see https://godoc.org/github.com/aws/amazon-ecs-agent/agent/engine/dockerauth for the possible values" 111 | default = "" 112 | } 113 | 114 | variable "docker_auth_data" { 115 | description = "A JSON object providing the docker auth data, see https://godoc.org/github.com/aws/amazon-ecs-agent/agent/engine/dockerauth for the supported formats" 116 | default = "" 117 | } 118 | 119 | variable "extra_cloud_config_type" { 120 | description = "Extra cloud config type" 121 | default = "text/cloud-config" 122 | } 123 | 124 | variable "extra_cloud_config_content" { 125 | description = "Extra cloud config content" 126 | default = "" 127 | } 128 | 129 | resource "aws_security_group" "cluster" { 130 | name = "${var.name}-ecs-cluster" 131 | vpc_id = "${var.vpc_id}" 132 | description = "Allows traffic from and to the EC2 instances of the ${var.name} ECS cluster" 133 | 134 | ingress { 135 | from_port = 0 136 | to_port = 0 137 | protocol = -1 138 | security_groups = ["${split(",", var.security_groups)}"] 139 | } 140 | 141 | egress { 142 | from_port = 0 143 | to_port = 0 144 | protocol = -1 145 | cidr_blocks = ["0.0.0.0/0"] 146 | } 147 | 148 | tags { 149 | Name = "ECS cluster (${var.name})" 150 | Environment = "${var.environment}" 151 | } 152 | 153 | lifecycle { 154 | create_before_destroy = true 155 | } 156 | } 157 | 158 | resource "aws_ecs_cluster" "main" { 159 | name = "${var.name}" 160 | 161 | lifecycle { 162 | create_before_destroy = true 163 | } 164 | } 165 | 166 | data "template_file" "ecs_cloud_config" { 167 | template = "${file("${path.module}/files/cloud-config.yml.tpl")}" 168 | 169 | vars { 170 | environment = "${var.environment}" 171 | name = "${var.name}" 172 | region = "${var.region}" 173 | docker_auth_type = "${var.docker_auth_type}" 174 | docker_auth_data = "${var.docker_auth_data}" 175 | } 176 | } 177 | 178 | data "template_cloudinit_config" "cloud_config" { 179 | gzip = false 180 | base64_encode = false 181 | 182 | part { 183 | content_type = "text/cloud-config" 184 | content = "${data.template_file.ecs_cloud_config.rendered}" 185 | } 186 | 187 | part { 188 | content_type = "${var.extra_cloud_config_type}" 189 | content = "${var.extra_cloud_config_content}" 190 | } 191 | } 192 | 193 | resource "aws_launch_configuration" "main" { 194 | name_prefix = "${format("%s-", var.name)}" 195 | 196 | image_id = "${var.image_id}" 197 | instance_type = "${var.instance_type}" 198 | ebs_optimized = "${var.instance_ebs_optimized}" 199 | iam_instance_profile = "${var.iam_instance_profile}" 200 | key_name = "${var.key_name}" 201 | security_groups = ["${aws_security_group.cluster.id}"] 202 | user_data = "${data.template_cloudinit_config.cloud_config.rendered}" 203 | associate_public_ip_address = "${var.associate_public_ip_address}" 204 | 205 | # root 206 | root_block_device { 207 | volume_type = "gp2" 208 | volume_size = "${var.root_volume_size}" 209 | } 210 | 211 | # docker 212 | ebs_block_device { 213 | device_name = "/dev/xvdcz" 214 | volume_type = "gp2" 215 | volume_size = "${var.docker_volume_size}" 216 | } 217 | 218 | lifecycle { 219 | create_before_destroy = true 220 | } 221 | } 222 | 223 | resource "aws_autoscaling_group" "main" { 224 | name = "${var.name}" 225 | 226 | availability_zones = ["${var.availability_zones}"] 227 | vpc_zone_identifier = ["${var.subnet_ids}"] 228 | launch_configuration = "${aws_launch_configuration.main.id}" 229 | min_size = "${var.min_size}" 230 | max_size = "${var.max_size}" 231 | desired_capacity = "${var.desired_capacity}" 232 | termination_policies = ["OldestLaunchConfiguration", "Default"] 233 | 234 | tag { 235 | key = "Name" 236 | value = "${var.name}" 237 | propagate_at_launch = true 238 | } 239 | 240 | tag { 241 | key = "Cluster" 242 | value = "${var.name}" 243 | propagate_at_launch = true 244 | } 245 | 246 | tag { 247 | key = "Environment" 248 | value = "${var.environment}" 249 | propagate_at_launch = true 250 | } 251 | 252 | lifecycle { 253 | create_before_destroy = true 254 | } 255 | } 256 | 257 | resource "aws_autoscaling_policy" "scale_up" { 258 | name = "${var.name}-scaleup" 259 | scaling_adjustment = 1 260 | adjustment_type = "ChangeInCapacity" 261 | cooldown = 300 262 | autoscaling_group_name = "${aws_autoscaling_group.main.name}" 263 | 264 | lifecycle { 265 | create_before_destroy = true 266 | } 267 | } 268 | 269 | resource "aws_autoscaling_policy" "scale_down" { 270 | name = "${var.name}-scaledown" 271 | scaling_adjustment = -1 272 | adjustment_type = "ChangeInCapacity" 273 | cooldown = 300 274 | autoscaling_group_name = "${aws_autoscaling_group.main.name}" 275 | 276 | lifecycle { 277 | create_before_destroy = true 278 | } 279 | } 280 | 281 | resource "aws_cloudwatch_metric_alarm" "cpu_high" { 282 | alarm_name = "${var.name}-cpureservation-high" 283 | comparison_operator = "GreaterThanOrEqualToThreshold" 284 | evaluation_periods = "2" 285 | metric_name = "CPUReservation" 286 | namespace = "AWS/ECS" 287 | period = "300" 288 | statistic = "Maximum" 289 | threshold = "90" 290 | 291 | dimensions { 292 | ClusterName = "${aws_ecs_cluster.main.name}" 293 | } 294 | 295 | alarm_description = "Scale up if the cpu reservation is above 90% for 10 minutes" 296 | alarm_actions = ["${aws_autoscaling_policy.scale_up.arn}"] 297 | 298 | lifecycle { 299 | create_before_destroy = true 300 | } 301 | } 302 | 303 | resource "aws_cloudwatch_metric_alarm" "memory_high" { 304 | alarm_name = "${var.name}-memoryreservation-high" 305 | comparison_operator = "GreaterThanOrEqualToThreshold" 306 | evaluation_periods = "2" 307 | metric_name = "MemoryReservation" 308 | namespace = "AWS/ECS" 309 | period = "300" 310 | statistic = "Maximum" 311 | threshold = "90" 312 | 313 | dimensions { 314 | ClusterName = "${aws_ecs_cluster.main.name}" 315 | } 316 | 317 | alarm_description = "Scale up if the memory reservation is above 90% for 10 minutes" 318 | alarm_actions = ["${aws_autoscaling_policy.scale_up.arn}"] 319 | 320 | lifecycle { 321 | create_before_destroy = true 322 | } 323 | 324 | # This is required to make cloudwatch alarms creation sequential, AWS doesn't 325 | # support modifying alarms concurrently. 326 | depends_on = ["aws_cloudwatch_metric_alarm.cpu_high"] 327 | } 328 | 329 | resource "aws_cloudwatch_metric_alarm" "cpu_low" { 330 | alarm_name = "${var.name}-cpureservation-low" 331 | comparison_operator = "LessThanOrEqualToThreshold" 332 | evaluation_periods = "2" 333 | metric_name = "CPUReservation" 334 | namespace = "AWS/ECS" 335 | period = "300" 336 | statistic = "Maximum" 337 | threshold = "10" 338 | 339 | dimensions { 340 | ClusterName = "${aws_ecs_cluster.main.name}" 341 | } 342 | 343 | alarm_description = "Scale down if the cpu reservation is below 10% for 10 minutes" 344 | alarm_actions = ["${aws_autoscaling_policy.scale_down.arn}"] 345 | 346 | lifecycle { 347 | create_before_destroy = true 348 | } 349 | 350 | # This is required to make cloudwatch alarms creation sequential, AWS doesn't 351 | # support modifying alarms concurrently. 352 | depends_on = ["aws_cloudwatch_metric_alarm.memory_high"] 353 | } 354 | 355 | resource "aws_cloudwatch_metric_alarm" "memory_low" { 356 | alarm_name = "${var.name}-memoryreservation-low" 357 | comparison_operator = "LessThanOrEqualToThreshold" 358 | evaluation_periods = "2" 359 | metric_name = "MemoryReservation" 360 | namespace = "AWS/ECS" 361 | period = "300" 362 | statistic = "Maximum" 363 | threshold = "10" 364 | 365 | dimensions { 366 | ClusterName = "${aws_ecs_cluster.main.name}" 367 | } 368 | 369 | alarm_description = "Scale down if the memory reservation is below 10% for 10 minutes" 370 | alarm_actions = ["${aws_autoscaling_policy.scale_down.arn}"] 371 | 372 | lifecycle { 373 | create_before_destroy = true 374 | } 375 | 376 | # This is required to make cloudwatch alarms creation sequential, AWS doesn't 377 | # support modifying alarms concurrently. 378 | depends_on = ["aws_cloudwatch_metric_alarm.cpu_low"] 379 | } 380 | 381 | // The cluster name, e.g cdn 382 | output "name" { 383 | value = "${var.name}" 384 | } 385 | 386 | // The cluster security group ID. 387 | output "security_group_id" { 388 | value = "${aws_security_group.cluster.id}" 389 | } 390 | -------------------------------------------------------------------------------- /elb/main.tf: -------------------------------------------------------------------------------- 1 | /** 2 | * The ELB module creates an ELB, security group 3 | * a route53 record and a service healthcheck. 4 | * It is used by the service module. 5 | */ 6 | 7 | variable "name" { 8 | description = "ELB name, e.g cdn" 9 | } 10 | 11 | variable "subnet_ids" { 12 | description = "Comma separated list of subnet IDs" 13 | } 14 | 15 | variable "environment" { 16 | description = "Environment tag, e.g prod" 17 | } 18 | 19 | variable "port" { 20 | description = "Instance port" 21 | } 22 | 23 | variable "security_groups" { 24 | description = "Comma separated list of security group IDs" 25 | } 26 | 27 | variable "dns_name" { 28 | description = "Route53 record name" 29 | } 30 | 31 | variable "healthcheck" { 32 | description = "Healthcheck path" 33 | } 34 | 35 | variable "protocol" { 36 | description = "Protocol to use, HTTP or TCP" 37 | } 38 | 39 | variable "zone_id" { 40 | description = "Route53 zone ID to use for dns_name" 41 | } 42 | 43 | variable "log_bucket" { 44 | description = "S3 bucket name to write ELB logs into" 45 | } 46 | 47 | /** 48 | * Resources. 49 | */ 50 | 51 | resource "aws_elb" "main" { 52 | name = "${var.name}" 53 | 54 | internal = true 55 | cross_zone_load_balancing = true 56 | subnets = ["${split(",", var.subnet_ids)}"] 57 | security_groups = ["${split(",",var.security_groups)}"] 58 | 59 | idle_timeout = 30 60 | connection_draining = true 61 | connection_draining_timeout = 15 62 | 63 | listener { 64 | lb_port = 80 65 | lb_protocol = "${var.protocol}" 66 | instance_port = "${var.port}" 67 | instance_protocol = "${var.protocol}" 68 | } 69 | 70 | health_check { 71 | healthy_threshold = 2 72 | unhealthy_threshold = 2 73 | timeout = 5 74 | target = "${var.protocol}:${var.port}${var.healthcheck}" 75 | interval = 30 76 | } 77 | 78 | access_logs { 79 | bucket = "${var.log_bucket}" 80 | } 81 | 82 | tags { 83 | Name = "${var.name}-balancer" 84 | Service = "${var.name}" 85 | Environment = "${var.environment}" 86 | } 87 | } 88 | 89 | resource "aws_route53_record" "main" { 90 | zone_id = "${var.zone_id}" 91 | name = "${var.dns_name}" 92 | type = "A" 93 | 94 | alias { 95 | name = "${aws_elb.main.dns_name}" 96 | zone_id = "${aws_elb.main.zone_id}" 97 | evaluate_target_health = false 98 | } 99 | } 100 | 101 | /** 102 | * Outputs. 103 | */ 104 | 105 | // The ELB name. 106 | output "name" { 107 | value = "${aws_elb.main.name}" 108 | } 109 | 110 | // The ELB ID. 111 | output "id" { 112 | value = "${aws_elb.main.id}" 113 | } 114 | 115 | // The ELB dns_name. 116 | output "dns" { 117 | value = "${aws_elb.main.dns_name}" 118 | } 119 | 120 | // FQDN built using the zone domain and name 121 | output "fqdn" { 122 | value = "${aws_route53_record.main.fqdn}" 123 | } 124 | 125 | // The zone id of the ELB 126 | output "zone_id" { 127 | value = "${aws_elb.main.zone_id}" 128 | } 129 | -------------------------------------------------------------------------------- /iam-role/main.tf: -------------------------------------------------------------------------------- 1 | variable "name" { 2 | description = "The name of the stack to use in security groups" 3 | } 4 | 5 | variable "environment" { 6 | description = "The name of the environment for this stack" 7 | } 8 | 9 | resource "aws_iam_role" "default_ecs_role" { 10 | name = "ecs-role-${var.name}-${var.environment}" 11 | 12 | assume_role_policy = <> /etc/hosts 28 | fi 29 | } 30 | 31 | # SERVER_REGION is only defined when user-data exist, this prevents the hostname 32 | # from being changed when the script is ran by the instance building an AMI. 33 | if test $SERVER_REGION; then 34 | configure_hostname 35 | fi 36 | -------------------------------------------------------------------------------- /packer/base/root/usr/local/bin/if-addr: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # This script outputs the IP address associated with the network interface which 5 | # name was passed as argument. 6 | usage() { 7 | echo 'usage: if-addr [interface]' 8 | exit 1 9 | } 10 | 11 | NETWORK_INTERFACE=$1 12 | test $NETWORK_INTERFACE || usage 13 | 14 | ip addr | awk ' 15 | /^[0-9]+:/ { 16 | sub(/:/,"",$2); iface=$2 } 17 | /^[[:space:]]*inet / { 18 | split($2, a, "/") 19 | print iface" "a[1] 20 | }' | grep $NETWORK_INTERFACE | cut -d' ' -f2 21 | -------------------------------------------------------------------------------- /packer/base/scripts/base.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | systemctl disable apt-daily.service 5 | systemctl disable apt-daily.timer 6 | 7 | apt-get update -y 8 | apt-get upgrade -y 9 | 10 | apt-get install -y \ 11 | build-essential \ 12 | git \ 13 | wget \ 14 | dkms \ 15 | apt-transport-https \ 16 | ca-certificates \ 17 | python-apt \ 18 | python-pip \ 19 | curl \ 20 | netcat \ 21 | ngrep \ 22 | dstat \ 23 | nmon \ 24 | iptraf \ 25 | iftop \ 26 | iotop \ 27 | atop \ 28 | mtr \ 29 | tree \ 30 | unzip \ 31 | sysdig \ 32 | git \ 33 | htop \ 34 | jq \ 35 | ntp \ 36 | logrotate \ 37 | dhcping \ 38 | dhcpdump 39 | 40 | pip install awscli 41 | 42 | apt-get dist-upgrade -y 43 | -------------------------------------------------------------------------------- /packer/base/scripts/bootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | systemctl daemon-reload 5 | systemctl enable bootstrap.service 6 | -------------------------------------------------------------------------------- /packer/base/scripts/docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D 5 | echo 'deb https://apt.dockerproject.org/repo ubuntu-xenial main' > /etc/apt/sources.list.d/docker.list 6 | 7 | apt-get update -y 8 | apt-get purge -y lxc-docker 9 | apt-cache policy docker-engine 10 | 11 | apt-get install -o Dpkg::Options::="--force-confold" -y \ 12 | linux-image-extra-$(uname -r) \ 13 | docker-engine 14 | 15 | gpasswd -a ubuntu docker 16 | 17 | systemctl daemon-reload 18 | systemctl enable format-var-lib-docker.service 19 | systemctl enable var-lib-docker.mount 20 | systemctl enable docker.service 21 | -------------------------------------------------------------------------------- /packer/base/scripts/ixgbevf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | wget -q -N -P /tmp/ "sourceforge.net/projects/e1000/files/ixgbevf stable/3.1.2/ixgbevf-3.1.2.tar.gz" 5 | tar -xzf /tmp/ixgbevf-3.1.2.tar.gz 6 | mv ixgbevf-3.1.2 /usr/src/ 7 | 8 | cat < docs.md 10 | 11 | echo "Generating docs for stack" 12 | printf "# Stack\n\n" >> docs.md 13 | terraform-docs md . >> docs.md 14 | 15 | for m in $modules; do 16 | if [[ "$m" != "iam-role" ]]; then 17 | echo "generating docs for $m" 18 | printf "# $m\n\n" >> docs.md 19 | terraform-docs md $m >> docs.md 20 | fi 21 | done 22 | -------------------------------------------------------------------------------- /scripts/test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | modules=$(find -mindepth 2 -name *.tf -printf '%P\n' | xargs -I % dirname %) 4 | 5 | (terraform validate . && echo "√ stack") || exit 1 6 | 7 | for m in $modules; do 8 | (terraform validate $m && echo "√ $m") || exit 1 9 | done 10 | -------------------------------------------------------------------------------- /security-groups/main.tf: -------------------------------------------------------------------------------- 1 | /** 2 | * Creates basic security groups to be used by instances and ELBs. 3 | */ 4 | 5 | variable "name" { 6 | description = "The name of the security groups serves as a prefix, e.g stack" 7 | } 8 | 9 | variable "vpc_id" { 10 | description = "The VPC ID" 11 | } 12 | 13 | variable "environment" { 14 | description = "The environment, used for tagging, e.g prod" 15 | } 16 | 17 | variable "cidr" { 18 | description = "The cidr block to use for internal security groups" 19 | } 20 | 21 | resource "aws_security_group" "internal_elb" { 22 | name = "${format("%s-%s-internal-elb", var.name, var.environment)}" 23 | vpc_id = "${var.vpc_id}" 24 | description = "Allows internal ELB traffic" 25 | 26 | ingress { 27 | from_port = 80 28 | to_port = 80 29 | protocol = "tcp" 30 | cidr_blocks = ["${var.cidr}"] 31 | } 32 | 33 | egress { 34 | from_port = 0 35 | to_port = 0 36 | protocol = -1 37 | cidr_blocks = ["0.0.0.0/0"] 38 | } 39 | 40 | lifecycle { 41 | create_before_destroy = true 42 | } 43 | 44 | tags { 45 | Name = "${format("%s internal elb", var.name)}" 46 | Environment = "${var.environment}" 47 | } 48 | } 49 | 50 | resource "aws_security_group" "external_elb" { 51 | name = "${format("%s-%s-external-elb", var.name, var.environment)}" 52 | vpc_id = "${var.vpc_id}" 53 | description = "Allows external ELB traffic" 54 | 55 | ingress { 56 | from_port = 80 57 | to_port = 80 58 | protocol = "tcp" 59 | cidr_blocks = ["0.0.0.0/0"] 60 | } 61 | 62 | ingress { 63 | from_port = 443 64 | to_port = 443 65 | protocol = "tcp" 66 | cidr_blocks = ["0.0.0.0/0"] 67 | } 68 | 69 | egress { 70 | from_port = 0 71 | to_port = 0 72 | protocol = -1 73 | cidr_blocks = ["0.0.0.0/0"] 74 | } 75 | 76 | lifecycle { 77 | create_before_destroy = true 78 | } 79 | 80 | tags { 81 | Name = "${format("%s external elb", var.name)}" 82 | Environment = "${var.environment}" 83 | } 84 | } 85 | 86 | resource "aws_security_group" "external_ssh" { 87 | name = "${format("%s-%s-external-ssh", var.name, var.environment)}" 88 | description = "Allows ssh from the world" 89 | vpc_id = "${var.vpc_id}" 90 | 91 | ingress { 92 | from_port = 22 93 | to_port = 22 94 | protocol = "tcp" 95 | cidr_blocks = ["0.0.0.0/0"] 96 | } 97 | 98 | egress { 99 | from_port = 0 100 | to_port = 0 101 | protocol = "-1" 102 | cidr_blocks = ["0.0.0.0/0"] 103 | } 104 | 105 | lifecycle { 106 | create_before_destroy = true 107 | } 108 | 109 | tags { 110 | Name = "${format("%s external ssh", var.name)}" 111 | Environment = "${var.environment}" 112 | } 113 | } 114 | 115 | resource "aws_security_group" "internal_ssh" { 116 | name = "${format("%s-%s-internal-ssh", var.name, var.environment)}" 117 | description = "Allows ssh from bastion" 118 | vpc_id = "${var.vpc_id}" 119 | 120 | ingress { 121 | from_port = 22 122 | to_port = 22 123 | protocol = "tcp" 124 | security_groups = ["${aws_security_group.external_ssh.id}"] 125 | } 126 | 127 | egress { 128 | from_port = 0 129 | to_port = 0 130 | protocol = "tcp" 131 | cidr_blocks = ["${var.cidr}"] 132 | } 133 | 134 | lifecycle { 135 | create_before_destroy = true 136 | } 137 | 138 | tags { 139 | Name = "${format("%s internal ssh", var.name)}" 140 | Environment = "${var.environment}" 141 | } 142 | } 143 | 144 | // External SSH allows ssh connections on port 22 from the world. 145 | output "external_ssh" { 146 | value = "${aws_security_group.external_ssh.id}" 147 | } 148 | 149 | // Internal SSH allows ssh connections from the external ssh security group. 150 | output "internal_ssh" { 151 | value = "${aws_security_group.internal_ssh.id}" 152 | } 153 | 154 | // Internal ELB allows internal traffic. 155 | output "internal_elb" { 156 | value = "${aws_security_group.internal_elb.id}" 157 | } 158 | 159 | // External ELB allows traffic from the world. 160 | output "external_elb" { 161 | value = "${aws_security_group.external_elb.id}" 162 | } 163 | -------------------------------------------------------------------------------- /service/main.tf: -------------------------------------------------------------------------------- 1 | /** 2 | * The service module creates an ecs service, task definition 3 | * elb and a route53 record under the local service zone (see the dns module). 4 | * 5 | * Usage: 6 | * 7 | * module "auth_service" { 8 | * source = "github.com/segmentio/stack/service" 9 | * name = "auth-service" 10 | * image = "auth-service" 11 | * cluster = "default" 12 | * } 13 | * 14 | */ 15 | 16 | /** 17 | * Required Variables. 18 | */ 19 | 20 | variable "environment" { 21 | description = "Environment tag, e.g prod" 22 | } 23 | 24 | variable "image" { 25 | description = "The docker image name, e.g nginx" 26 | } 27 | 28 | variable "name" { 29 | description = "The service name, if empty the service name is defaulted to the image name" 30 | default = "" 31 | } 32 | 33 | variable "version" { 34 | description = "The docker image version" 35 | default = "latest" 36 | } 37 | 38 | variable "subnet_ids" { 39 | description = "Comma separated list of subnet IDs that will be passed to the ELB module" 40 | } 41 | 42 | variable "security_groups" { 43 | description = "Comma separated list of security group IDs that will be passed to the ELB module" 44 | } 45 | 46 | variable "port" { 47 | description = "The container host port" 48 | } 49 | 50 | variable "cluster" { 51 | description = "The cluster name or ARN" 52 | } 53 | 54 | variable "dns_name" { 55 | description = "The DNS name to use, e.g nginx" 56 | } 57 | 58 | variable "log_bucket" { 59 | description = "The S3 bucket ID to use for the ELB" 60 | } 61 | 62 | /** 63 | * Options. 64 | */ 65 | 66 | variable "healthcheck" { 67 | description = "Path to a healthcheck endpoint" 68 | default = "/" 69 | } 70 | 71 | variable "container_port" { 72 | description = "The container port" 73 | default = 3000 74 | } 75 | 76 | variable "command" { 77 | description = "The raw json of the task command" 78 | default = "[]" 79 | } 80 | 81 | variable "env_vars" { 82 | description = "The raw json of the task env vars" 83 | default = "[]" 84 | } 85 | 86 | variable "desired_count" { 87 | description = "The desired count" 88 | default = 2 89 | } 90 | 91 | variable "memory" { 92 | description = "The number of MiB of memory to reserve for the container" 93 | default = 512 94 | } 95 | 96 | variable "cpu" { 97 | description = "The number of cpu units to reserve for the container" 98 | default = 512 99 | } 100 | 101 | variable "protocol" { 102 | description = "The ELB protocol, HTTP or TCP" 103 | default = "HTTP" 104 | } 105 | 106 | variable "iam_role" { 107 | description = "IAM Role ARN to use" 108 | } 109 | 110 | variable "zone_id" { 111 | description = "The zone ID to create the record in" 112 | } 113 | 114 | variable "deployment_minimum_healthy_percent" { 115 | description = "lower limit (% of desired_count) of # of running tasks during a deployment" 116 | default = 100 117 | } 118 | 119 | variable "deployment_maximum_percent" { 120 | description = "upper limit (% of desired_count) of # of running tasks during a deployment" 121 | default = 200 122 | } 123 | 124 | /** 125 | * Resources. 126 | */ 127 | 128 | resource "aws_ecs_service" "main" { 129 | name = "${module.task.name}" 130 | cluster = "${var.cluster}" 131 | task_definition = "${module.task.arn}" 132 | desired_count = "${var.desired_count}" 133 | iam_role = "${var.iam_role}" 134 | deployment_minimum_healthy_percent = "${var.deployment_minimum_healthy_percent}" 135 | deployment_maximum_percent = "${var.deployment_maximum_percent}" 136 | 137 | load_balancer { 138 | elb_name = "${module.elb.id}" 139 | container_name = "${module.task.name}" 140 | container_port = "${var.container_port}" 141 | } 142 | 143 | lifecycle { 144 | create_before_destroy = true 145 | } 146 | } 147 | 148 | module "task" { 149 | source = "../task" 150 | 151 | name = "${coalesce(var.name, replace(var.image, "/", "-"))}" 152 | image = "${var.image}" 153 | image_version = "${var.version}" 154 | command = "${var.command}" 155 | env_vars = "${var.env_vars}" 156 | memory = "${var.memory}" 157 | cpu = "${var.cpu}" 158 | 159 | ports = < ' + self.name, msg, *args) 413 | 414 | class Script(object): 415 | 416 | def __init__(self, name=None, content=None): 417 | self.name = name 418 | self.content = content 419 | 420 | def __repr__(self): 421 | return 'Script { name = %s }' % self.name 422 | 423 | def __str__(self): 424 | return self.name 425 | 426 | TIMESTAMP = time.strftime('%Y.%m.%d-%H.%M.%S', time.gmtime()) 427 | 428 | COMMANDS = { 429 | 'build' : command_build, 430 | 'plan' : command_plan, 431 | 'validate': command_validate, 432 | } 433 | 434 | BEGIN_SCRIPT = """#!/bin/bash 435 | set -e 436 | 437 | if [ "$(ls -A /tmp/root)" ] 438 | then 439 | cd /tmp/root 440 | cp -r * / 441 | fi 442 | 443 | systemctl daemon-reload 444 | """ 445 | 446 | END_SCRIPT = """#!/bin/bash 447 | set -e 448 | 449 | if [ "$(ls -A /tmp/root)" ] 450 | then 451 | cd /tmp/root 452 | cp -r * / 453 | fi 454 | 455 | systemctl daemon-reload 456 | 457 | apt-get -y autoremove 458 | apt-get -y autoclean 459 | 460 | rm -rf /tmp/* 461 | rm -rf /var/tmp/* 462 | rm -rf $HOME/.ssh/authorized_keys 463 | 464 | for f in $(find /var/log -type f) ; do 465 | dd if=/dev/null of=$f 466 | done 467 | """ 468 | 469 | if __name__ == '__main__': 470 | main(sys.argv[1:]) 471 | -------------------------------------------------------------------------------- /tools/readme.md: -------------------------------------------------------------------------------- 1 | # Prerequisites: 2 | - AWS Cli configured with your credentials 3 | 4 | # Usage: 5 | - Go to tools directory `cd tools/` 6 | - Generate Base AMI image `./pack-ami build -t base -p ../packer/` 7 | - Generate ECS AMI image `./pack-ami build -t ecs -p ../packer/` 8 | - Your new Base and ECS AMIs are available in your AWS account 9 | - You can delete the Base AMI image 10 | - Set the ECS AMI Permission to public 11 | - Edit the `defaults/maint.tf` file and set the new ECS AMI ID corresponding to the zone where your AMI has been created 12 | - Copy your ECS AMI to each zone available in `defaults/maint.tf` AND DO NOT FORGET TO MAKE THEM PUBLIC TOO. 13 | -------------------------------------------------------------------------------- /tools/roll-ami: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import argparse 3 | import boto3 4 | import os 5 | import queue 6 | import sys 7 | import threading 8 | import time 9 | import copy 10 | 11 | asg = boto3.client('autoscaling') 12 | ec2 = boto3.client('ec2') 13 | ecs = boto3.client('ecs') 14 | 15 | 16 | def main(argv): 17 | options = parse_arguments(argv) 18 | conf = None 19 | 20 | info('fetching autoscaling group %s...', options.cluster) 21 | group = fetch_auto_scaling_group(options.cluster) 22 | 23 | info('fetching launch configuration %s...', options.cluster) 24 | conf = fetch_launch_configuration(group['LaunchConfigurationName']) 25 | 26 | instances = filter_instances_to_cycle(group['Instances'], conf['LaunchConfigurationName'], options.force) 27 | step_size = compute_step_size(options.number, len(instances)) 28 | 29 | info('cycling instances %s at a time (%s exist in the group)', step_size, len(group['Instances'])) 30 | info('using image %s', conf['ImageId']) 31 | 32 | if options.dry_run: 33 | print_summary(instances) 34 | return 35 | 36 | new_instances = [ ] 37 | while True: 38 | instances, conf = run(options, step_size, conf, options.force, new_instances) 39 | new_instances += instances 40 | 41 | def run(options, step_size=1, launch_config=None, force=False, ignore_instances=None): 42 | group = fetch_auto_scaling_group(options.cluster) 43 | conf = fetch_launch_configuration(group['LaunchConfigurationName']) 44 | 45 | if launch_config is not None and conf['LaunchConfigurationName'] != launch_config['LaunchConfigurationName']: 46 | error('launch configuration changed while cycling EC2 instances, aborting!') 47 | sys.exit(1) 48 | 49 | instances = filter_instances_to_cycle(group['Instances'], conf['LaunchConfigurationName'], force, ignore_instances) 50 | step_size = min(step_size, len(instances)) 51 | 52 | if not instances: 53 | sys.exit(0) 54 | 55 | old_instances = instances[:step_size] 56 | desired_capacity = group['DesiredCapacity'] 57 | max_size = group['MaxSize'] 58 | count = len(old_instances) 59 | 60 | # Increase the auto scaling group capacity to start new instances, 61 | # the max size is also modified if it was too low to raise the desired 62 | # capacity. 63 | d = desired_capacity + count 64 | m = max_size 65 | if d > m: 66 | m = d 67 | info('increasing desired capacity of auto scaling group to %s', d) 68 | asg.update_auto_scaling_group( 69 | AutoScalingGroupName = options.cluster, 70 | DesiredCapacity = d, 71 | MaxSize = m, 72 | ) 73 | 74 | # Look for `count` new instances that will be coming up in the auto 75 | # scaling group (instances that didn't exist before). 76 | info('looking for the ids of new EC2 instances...') 77 | new_instances = [ ] 78 | while len(new_instances) < count: 79 | time.sleep(1) 80 | starting = fetch_auto_scaling_instances(options.cluster) 81 | starting = filter_new_instances(starting, group['Instances']) 82 | for instance in starting: 83 | if instance not in new_instances: 84 | new_instances.append(instance) 85 | info('%s - launched', instance) 86 | 87 | # Wait for all new instances to become ready. 88 | info('waiting for new EC2 instances to become ready...') 89 | tmp_instances = set(new_instances) 90 | while tmp_instances: 91 | time.sleep(1) 92 | for instance in asg.describe_auto_scaling_instances( 93 | InstanceIds = list(tmp_instances), 94 | )['AutoScalingInstances']: 95 | if instance['LifecycleState'] == 'InService': 96 | info('%s - ready', instance['InstanceId']) 97 | tmp_instances.remove(instance['InstanceId']) 98 | 99 | # Wait for all new instances to be in a running state. 100 | info('waiting for new EC2 instances to be in a running state...') 101 | tmp_instances = set(new_instances) 102 | while tmp_instances: 103 | time.sleep(1) 104 | for instance in ec2.describe_instance_status( 105 | InstanceIds = list(tmp_instances), 106 | Filters = [ 107 | { 'Name': 'instance-state-name', 'Values': ['running'] }, 108 | { 'Name': 'instance-status.reachability', 'Values': ['passed'] }, 109 | ], 110 | )['InstanceStatuses']: 111 | info('%s - running', instance['InstanceId']) 112 | tmp_instances.remove(instance['InstanceId']) 113 | 114 | container_instance_id_map = build_container_instance_map(options.cluster) 115 | 116 | # Drain old instances 117 | info('draining ECS container instances...') 118 | for instance in old_instances: 119 | info('%s - draining', instance) 120 | ecs.update_container_instances_state( 121 | cluster=options.cluster, 122 | containerInstances=[container_instance_id_map[instance] for instance in old_instances], 123 | status='DRAINING') 124 | 125 | draining_instances = copy.copy(old_instances) 126 | while draining_instances: 127 | time.sleep(5) 128 | ci_info = ecs.describe_container_instances( 129 | cluster=options.cluster, 130 | containerInstances=[container_instance_id_map[instance] for instance in draining_instances]) 131 | for container_instance in ci_info['containerInstances']: 132 | if container_instance['runningTasksCount'] == 0: 133 | draining_instances.remove(container_instance['ec2InstanceId']) 134 | info('%s - drained', container_instance['ec2InstanceId']) 135 | 136 | # Terminates the old instances that aren't necessary anymore (the ones 137 | # that were picked by the iterator). 138 | for instance in old_instances: 139 | asg.terminate_instance_in_auto_scaling_group( 140 | InstanceId = instance, 141 | ShouldDecrementDesiredCapacity = True, 142 | ) 143 | info('%s - terminated', instance) 144 | 145 | return new_instances, conf 146 | 147 | def log(msg, *args): 148 | msg += '\n' 149 | if args: 150 | msg %= args 151 | sys.stdout.write(msg) 152 | 153 | def warn(msg, *args): 154 | log('warn: ' + msg, *args) 155 | 156 | def error(msg, *args): 157 | log('error: ' + msg, *args) 158 | sys.exit(1) 159 | 160 | def info(msg, *args): 161 | log('==> roll: ' + msg, *args) 162 | 163 | def parse_arguments(argv): 164 | parser = argparse.ArgumentParser() 165 | parser.add_argument('-c', '--cluster', default='default', help="The name of the cluster to roll EC2 instances in") 166 | parser.add_argument('-n', '--number', default='25%', help="The number of EC2 instances to roll at once, may be absolute or a percentage") 167 | parser.add_argument('--force', action='store_true', help="When specified the program will cycle instances even if the launch configuration did not change") 168 | parser.add_argument('--dry-run', action='store_true', help="When specified the program stops before making any changes") 169 | return parser.parse_args(argv) 170 | 171 | def print_summary(instances): 172 | print() 173 | print(' The following instances would by cycled:') 174 | for instance in instances: 175 | print(' *', instance) 176 | print() 177 | 178 | def compute_step_size(number, count): 179 | step_size = 0 180 | 181 | if number.endswith('%'): 182 | percentage = float(number[:-1]) 183 | if percentage <= 0 or percentage > 100: 184 | error('invalid percentage of instances to roll %s', percentage) 185 | step_size = round(count * (percentage / 100.0)) 186 | else: 187 | step_size = int(number) 188 | 189 | return min(max(1, step_size), count) 190 | 191 | def fetch_auto_scaling_group(name): 192 | group = asg.describe_auto_scaling_groups( 193 | AutoScalingGroupNames=[name], 194 | ) 195 | return group['AutoScalingGroups'][0] 196 | 197 | def fetch_auto_scaling_instances(name): 198 | group = fetch_auto_scaling_group(name) 199 | return group['Instances'] 200 | 201 | def fetch_launch_configuration(name): 202 | conf = asg.describe_launch_configurations( 203 | LaunchConfigurationNames=[name], 204 | ) 205 | return conf['LaunchConfigurations'][0] 206 | 207 | def filter_instances_to_cycle(instances, launch_config, force=False, ignore_instances=None): 208 | to_cycle = [ ] 209 | 210 | for instance in instances: 211 | if instance.get('LifecycleState') not in ('Pending', 'Pending:Wait', 'Pending:Proceed', 'Quarantined', 'InService'): 212 | continue 213 | if not force and instance.get('LaunchConfigurationName') == launch_config: 214 | continue 215 | to_cycle.append(instance['InstanceId']) 216 | 217 | if ignore_instances is not None: 218 | to_cycle = [i for i in to_cycle if i not in ignore_instances] 219 | 220 | to_cycle.sort() 221 | return to_cycle 222 | 223 | def filter_new_instances(instances, group_instances): 224 | group_instances = dict((i['InstanceId'], True) for i in group_instances) 225 | new_instances = [ ] 226 | 227 | for instance in instances: 228 | if instance.get('LifecycleState') not in ('Pending', 'Pending:Wait', 'Pending:Proceed', 'Quarantined', 'InService'): 229 | continue 230 | if group_instances.get(instance['InstanceId']): 231 | continue 232 | new_instances.append(instance['InstanceId']) 233 | 234 | new_instances.sort() 235 | return new_instances 236 | 237 | def build_container_instance_map(cluster): 238 | container_instances = ecs.list_container_instances(cluster=cluster) 239 | full_info = ecs.describe_container_instances(cluster=cluster, 240 | containerInstances=container_instances['containerInstanceArns']) 241 | 242 | container_instance_map = {} 243 | for ci in full_info['containerInstances']: 244 | container_instance_map[ci['ec2InstanceId']] = ci['containerInstanceArn'] 245 | return container_instance_map 246 | 247 | def iter_instance_groups(instances, group_size): 248 | while instances: 249 | n = min(group_size, len(instances)) 250 | instance_group, instances = instances[:n], instances[n:] 251 | yield instance_group 252 | 253 | if __name__ == '__main__': 254 | main(sys.argv[1:]) 255 | -------------------------------------------------------------------------------- /tools/tfvar-ami: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script is a helper for generating the terraform variable when an AMI is 4 | # built. 5 | # The program expects to receive the output of a pack-ami command on stdin and 6 | # outputs the terraform variable definition. 7 | set -e 8 | 9 | usage() { 10 | echo 'usage: generate-ami-var [template]' 11 | exit 1 12 | } 13 | 14 | template=$1 15 | test $template || usage 16 | 17 | ami=$(grep ami- | cut -d: -f2 | sed 's/ //') 18 | printf "variable \"${template}_ami\" {\n default = \"${ami}\"\n}" 19 | -------------------------------------------------------------------------------- /vpc/main.tf: -------------------------------------------------------------------------------- 1 | variable "cidr" { 2 | description = "The CIDR block for the VPC." 3 | } 4 | 5 | variable "external_subnets" { 6 | description = "List of external subnets" 7 | type = "list" 8 | } 9 | 10 | variable "internal_subnets" { 11 | description = "List of internal subnets" 12 | type = "list" 13 | } 14 | 15 | variable "environment" { 16 | description = "Environment tag, e.g prod" 17 | } 18 | 19 | variable "availability_zones" { 20 | description = "List of availability zones" 21 | type = "list" 22 | } 23 | 24 | variable "name" { 25 | description = "Name tag, e.g stack" 26 | default = "stack" 27 | } 28 | 29 | variable "use_nat_instances" { 30 | description = "If true, use EC2 NAT instances instead of the AWS NAT gateway service." 31 | default = false 32 | } 33 | 34 | variable "nat_instance_type" { 35 | description = "Only if use_nat_instances is true, which EC2 instance type to use for the NAT instances." 36 | default = "t2.nano" 37 | } 38 | 39 | variable "use_eip_with_nat_instances" { 40 | description = "Only if use_nat_instances is true, whether to assign Elastic IPs to the NAT instances. IF this is set to false, NAT instances use dynamically assigned IPs." 41 | default = false 42 | } 43 | 44 | # This data source returns the newest Amazon NAT instance AMI 45 | data "aws_ami" "nat_ami" { 46 | most_recent = true 47 | 48 | filter { 49 | name = "owner-alias" 50 | values = ["amazon"] 51 | } 52 | 53 | filter { 54 | name = "name" 55 | values = ["amzn-ami-vpc-nat*"] 56 | } 57 | } 58 | 59 | variable "nat_instance_ssh_key_name" { 60 | description = "Only if use_nat_instance is true, the optional SSH key-pair to assign to NAT instances." 61 | default = "" 62 | } 63 | 64 | /** 65 | * VPC 66 | */ 67 | 68 | resource "aws_vpc" "main" { 69 | cidr_block = "${var.cidr}" 70 | enable_dns_support = true 71 | enable_dns_hostnames = true 72 | 73 | tags { 74 | Name = "${var.name}" 75 | Environment = "${var.environment}" 76 | } 77 | } 78 | 79 | /** 80 | * Gateways 81 | */ 82 | 83 | resource "aws_internet_gateway" "main" { 84 | vpc_id = "${aws_vpc.main.id}" 85 | 86 | tags { 87 | Name = "${var.name}" 88 | Environment = "${var.environment}" 89 | } 90 | } 91 | 92 | resource "aws_nat_gateway" "main" { 93 | # Only create this if not using NAT instances. 94 | count = "${(1 - var.use_nat_instances) * length(var.internal_subnets)}" 95 | allocation_id = "${element(aws_eip.nat.*.id, count.index)}" 96 | subnet_id = "${element(aws_subnet.external.*.id, count.index)}" 97 | depends_on = ["aws_internet_gateway.main"] 98 | } 99 | 100 | resource "aws_eip" "nat" { 101 | # Create these only if: 102 | # NAT instances are used and Elastic IPs are used with them, 103 | # or if the NAT gateway service is used (NAT instances are not used). 104 | count = "${signum((var.use_nat_instances * var.use_eip_with_nat_instances) + (var.use_nat_instances == 0 ? 1 : 0)) * length(var.internal_subnets)}" 105 | 106 | vpc = true 107 | } 108 | 109 | resource "aws_security_group" "nat_instances" { 110 | # Create this only if using NAT instances, vs. the NAT gateway service. 111 | count = "${0 + var.use_nat_instances}" 112 | name = "nat" 113 | description = "Allow traffic from clients into NAT instances" 114 | 115 | ingress { 116 | from_port = 0 117 | to_port = 65535 118 | protocol = "udp" 119 | cidr_blocks = "${var.internal_subnets}" 120 | } 121 | 122 | ingress { 123 | from_port = 0 124 | to_port = 65535 125 | protocol = "tcp" 126 | cidr_blocks = "${var.internal_subnets}" 127 | } 128 | 129 | egress { 130 | from_port = 0 131 | to_port = 0 132 | protocol = "-1" 133 | cidr_blocks = ["0.0.0.0/0"] 134 | } 135 | 136 | vpc_id = "${aws_vpc.main.id}" 137 | } 138 | 139 | resource "aws_instance" "nat_instance" { 140 | # Create these only if using NAT instances, vs. the NAT gateway service. 141 | count = "${(0 + var.use_nat_instances) * length(var.internal_subnets)}" 142 | availability_zone = "${element(var.availability_zones, count.index)}" 143 | 144 | tags { 145 | Name = "${var.name}-${format("internal-%03d NAT", count.index+1)}" 146 | Environment = "${var.environment}" 147 | } 148 | 149 | volume_tags { 150 | Name = "${var.name}-${format("internal-%03d NAT", count.index+1)}" 151 | Environment = "${var.environment}" 152 | } 153 | 154 | key_name = "${var.nat_instance_ssh_key_name}" 155 | ami = "${data.aws_ami.nat_ami.id}" 156 | instance_type = "${var.nat_instance_type}" 157 | source_dest_check = false 158 | 159 | # associate_public_ip_address is not used,, 160 | # as public subnets have map_public_ip_on_launch set to true. 161 | # Also, using associate_public_ip_address causes issues with 162 | # stopped NAT instances which do not use an Elastic IP. 163 | # - For more details: https://github.com/terraform-providers/terraform-provider-aws/issues/343 164 | subnet_id = "${element(aws_subnet.external.*.id, count.index)}" 165 | 166 | vpc_security_group_ids = ["${aws_security_group.nat_instances.id}"] 167 | 168 | lifecycle { 169 | # Ignore changes to the NAT AMI data source. 170 | ignore_changes = ["ami"] 171 | } 172 | } 173 | 174 | resource "aws_eip_association" "nat_instance_eip" { 175 | # Create these only if using NAT instances, vs. the NAT gateway service. 176 | count = "${(0 + (var.use_nat_instances * var.use_eip_with_nat_instances)) * length(var.internal_subnets)}" 177 | instance_id = "${element(aws_instance.nat_instance.*.id, count.index)}" 178 | allocation_id = "${element(aws_eip.nat.*.id, count.index)}" 179 | } 180 | 181 | /** 182 | * Subnets. 183 | */ 184 | 185 | resource "aws_subnet" "internal" { 186 | vpc_id = "${aws_vpc.main.id}" 187 | cidr_block = "${element(var.internal_subnets, count.index)}" 188 | availability_zone = "${element(var.availability_zones, count.index)}" 189 | count = "${length(var.internal_subnets)}" 190 | 191 | tags { 192 | Name = "${var.name}-${format("internal-%03d", count.index+1)}" 193 | Environment = "${var.environment}" 194 | } 195 | } 196 | 197 | resource "aws_subnet" "external" { 198 | vpc_id = "${aws_vpc.main.id}" 199 | cidr_block = "${element(var.external_subnets, count.index)}" 200 | availability_zone = "${element(var.availability_zones, count.index)}" 201 | count = "${length(var.external_subnets)}" 202 | map_public_ip_on_launch = true 203 | 204 | tags { 205 | Name = "${var.name}-${format("external-%03d", count.index+1)}" 206 | Environment = "${var.environment}" 207 | } 208 | } 209 | 210 | /** 211 | * Route tables 212 | */ 213 | 214 | resource "aws_route_table" "external" { 215 | vpc_id = "${aws_vpc.main.id}" 216 | 217 | tags { 218 | Name = "${var.name}-external-001" 219 | Environment = "${var.environment}" 220 | } 221 | } 222 | 223 | resource "aws_route" "external" { 224 | route_table_id = "${aws_route_table.external.id}" 225 | destination_cidr_block = "0.0.0.0/0" 226 | gateway_id = "${aws_internet_gateway.main.id}" 227 | } 228 | 229 | resource "aws_route_table" "internal" { 230 | count = "${length(var.internal_subnets)}" 231 | vpc_id = "${aws_vpc.main.id}" 232 | 233 | tags { 234 | Name = "${var.name}-${format("internal-%03d", count.index+1)}" 235 | Environment = "${var.environment}" 236 | } 237 | } 238 | 239 | resource "aws_route" "internal" { 240 | # Create this only if using the NAT gateway service, vs. NAT instances. 241 | count = "${(1 - var.use_nat_instances) * length(compact(var.internal_subnets))}" 242 | route_table_id = "${element(aws_route_table.internal.*.id, count.index)}" 243 | destination_cidr_block = "0.0.0.0/0" 244 | nat_gateway_id = "${element(aws_nat_gateway.main.*.id, count.index)}" 245 | } 246 | 247 | resource "aws_route" "internal_nat_instance" { 248 | count = "${(0 + var.use_nat_instances) * length(compact(var.internal_subnets))}" 249 | route_table_id = "${element(aws_route_table.internal.*.id, count.index)}" 250 | destination_cidr_block = "0.0.0.0/0" 251 | instance_id = "${element(aws_instance.nat_instance.*.id, count.index)}" 252 | } 253 | 254 | /** 255 | * Route associations 256 | */ 257 | 258 | resource "aws_route_table_association" "internal" { 259 | count = "${length(var.internal_subnets)}" 260 | subnet_id = "${element(aws_subnet.internal.*.id, count.index)}" 261 | route_table_id = "${element(aws_route_table.internal.*.id, count.index)}" 262 | } 263 | 264 | resource "aws_route_table_association" "external" { 265 | count = "${length(var.external_subnets)}" 266 | subnet_id = "${element(aws_subnet.external.*.id, count.index)}" 267 | route_table_id = "${aws_route_table.external.id}" 268 | } 269 | 270 | /** 271 | * Outputs 272 | */ 273 | 274 | // The VPC ID 275 | output "id" { 276 | value = "${aws_vpc.main.id}" 277 | } 278 | 279 | // The VPC CIDR 280 | output "cidr_block" { 281 | value = "${aws_vpc.main.cidr_block}" 282 | } 283 | 284 | // A comma-separated list of subnet IDs. 285 | output "external_subnets" { 286 | value = ["${aws_subnet.external.*.id}"] 287 | } 288 | 289 | // A list of subnet IDs. 290 | output "internal_subnets" { 291 | value = ["${aws_subnet.internal.*.id}"] 292 | } 293 | 294 | // The default VPC security group ID. 295 | output "security_group" { 296 | value = "${aws_vpc.main.default_security_group_id}" 297 | } 298 | 299 | // The list of availability zones of the VPC. 300 | output "availability_zones" { 301 | value = ["${aws_subnet.external.*.availability_zone}"] 302 | } 303 | 304 | // The internal route table ID. 305 | output "internal_rtb_id" { 306 | value = "${join(",", aws_route_table.internal.*.id)}" 307 | } 308 | 309 | // The external route table ID. 310 | output "external_rtb_id" { 311 | value = "${aws_route_table.external.id}" 312 | } 313 | 314 | // The list of EIPs associated with the internal subnets. 315 | output "internal_nat_ips" { 316 | value = ["${aws_eip.nat.*.public_ip}"] 317 | } 318 | -------------------------------------------------------------------------------- /web-service/elb/main.tf: -------------------------------------------------------------------------------- 1 | /** 2 | * The ELB module creates an ELB, security group 3 | * a route53 record and a service healthcheck. 4 | * It is used by the service module. 5 | */ 6 | 7 | variable "name" { 8 | description = "ELB name, e.g cdn" 9 | } 10 | 11 | variable "subnet_ids" { 12 | description = "Comma separated list of subnet IDs" 13 | } 14 | 15 | variable "environment" { 16 | description = "Environment tag, e.g prod" 17 | } 18 | 19 | variable "port" { 20 | description = "Instance port" 21 | } 22 | 23 | variable "security_groups" { 24 | description = "Comma separated list of security group IDs" 25 | } 26 | 27 | variable "healthcheck" { 28 | description = "Healthcheck path" 29 | } 30 | 31 | variable "log_bucket" { 32 | description = "S3 bucket name to write ELB logs into" 33 | } 34 | 35 | variable "external_dns_name" { 36 | description = "The subdomain under which the ELB is exposed externally, defaults to the task name" 37 | } 38 | 39 | variable "internal_dns_name" { 40 | description = "The subdomain under which the ELB is exposed internally, defaults to the task name" 41 | } 42 | 43 | variable "external_zone_id" { 44 | description = "The zone ID to create the record in" 45 | } 46 | 47 | variable "internal_zone_id" { 48 | description = "The zone ID to create the record in" 49 | } 50 | 51 | variable "ssl_certificate_id" { 52 | } 53 | 54 | /** 55 | * Resources. 56 | */ 57 | 58 | resource "aws_elb" "main" { 59 | name = "${var.name}" 60 | 61 | internal = false 62 | cross_zone_load_balancing = true 63 | subnets = ["${split(",", var.subnet_ids)}"] 64 | security_groups = ["${split(",",var.security_groups)}"] 65 | 66 | idle_timeout = 30 67 | connection_draining = true 68 | connection_draining_timeout = 15 69 | 70 | listener { 71 | lb_port = 80 72 | lb_protocol = "http" 73 | instance_port = "${var.port}" 74 | instance_protocol = "http" 75 | } 76 | 77 | listener { 78 | lb_port = 443 79 | lb_protocol = "https" 80 | instance_port = "${var.port}" 81 | instance_protocol = "http" 82 | ssl_certificate_id = "${var.ssl_certificate_id}" 83 | } 84 | 85 | health_check { 86 | healthy_threshold = 2 87 | unhealthy_threshold = 2 88 | timeout = 5 89 | target = "HTTP:${var.port}${var.healthcheck}" 90 | interval = 30 91 | } 92 | 93 | access_logs { 94 | bucket = "${var.log_bucket}" 95 | } 96 | 97 | tags { 98 | Name = "${var.name}-balancer" 99 | Service = "${var.name}" 100 | Environment = "${var.environment}" 101 | } 102 | } 103 | 104 | resource "aws_route53_record" "external" { 105 | zone_id = "${var.external_zone_id}" 106 | name = "${var.external_dns_name}" 107 | type = "A" 108 | 109 | alias { 110 | zone_id = "${aws_elb.main.zone_id}" 111 | name = "${aws_elb.main.dns_name}" 112 | evaluate_target_health = false 113 | } 114 | } 115 | 116 | resource "aws_route53_record" "internal" { 117 | zone_id = "${var.internal_zone_id}" 118 | name = "${var.internal_dns_name}" 119 | type = "A" 120 | 121 | alias { 122 | zone_id = "${aws_elb.main.zone_id}" 123 | name = "${aws_elb.main.dns_name}" 124 | evaluate_target_health = false 125 | } 126 | } 127 | 128 | /** 129 | * Outputs. 130 | */ 131 | 132 | // The ELB name. 133 | output "name" { 134 | value = "${aws_elb.main.name}" 135 | } 136 | 137 | // The ELB ID. 138 | output "id" { 139 | value = "${aws_elb.main.id}" 140 | } 141 | 142 | // The ELB dns_name. 143 | output "dns" { 144 | value = "${aws_elb.main.dns_name}" 145 | } 146 | 147 | // FQDN built using the zone domain and name (external) 148 | output "external_fqdn" { 149 | value = "${aws_route53_record.external.fqdn}" 150 | } 151 | 152 | // FQDN built using the zone domain and name (internal) 153 | output "internal_fqdn" { 154 | value = "${aws_route53_record.internal.fqdn}" 155 | } 156 | 157 | // The zone id of the ELB 158 | output "zone_id" { 159 | value = "${aws_elb.main.zone_id}" 160 | } 161 | -------------------------------------------------------------------------------- /web-service/main.tf: -------------------------------------------------------------------------------- 1 | /** 2 | * The web-service is similar to the `service` module, but the 3 | * it provides a __public__ ELB instead. 4 | * 5 | * Usage: 6 | * 7 | * module "auth_service" { 8 | * source = "github.com/segmentio/stack/service" 9 | * name = "auth-service" 10 | * image = "auth-service" 11 | * cluster = "default" 12 | * } 13 | * 14 | */ 15 | 16 | /** 17 | * Required Variables. 18 | */ 19 | 20 | variable "environment" { 21 | description = "Environment tag, e.g prod" 22 | } 23 | 24 | variable "image" { 25 | description = "The docker image name, e.g nginx" 26 | } 27 | 28 | variable "name" { 29 | description = "The service name, if empty the service name is defaulted to the image name" 30 | default = "" 31 | } 32 | 33 | variable "version" { 34 | description = "The docker image version" 35 | default = "latest" 36 | } 37 | 38 | variable "subnet_ids" { 39 | description = "Comma separated list of subnet IDs that will be passed to the ELB module" 40 | } 41 | 42 | variable "security_groups" { 43 | description = "Comma separated list of security group IDs that will be passed to the ELB module" 44 | } 45 | 46 | variable "port" { 47 | description = "The container host port" 48 | } 49 | 50 | variable "cluster" { 51 | description = "The cluster name or ARN" 52 | } 53 | 54 | variable "log_bucket" { 55 | description = "The S3 bucket ID to use for the ELB" 56 | } 57 | 58 | variable "ssl_certificate_id" { 59 | description = "SSL Certificate ID to use" 60 | } 61 | 62 | variable "iam_role" { 63 | description = "IAM Role ARN to use" 64 | } 65 | 66 | variable "external_dns_name" { 67 | description = "The subdomain under which the ELB is exposed externally, defaults to the task name" 68 | default = "" 69 | } 70 | 71 | variable "internal_dns_name" { 72 | description = "The subdomain under which the ELB is exposed internally, defaults to the task name" 73 | default = "" 74 | } 75 | 76 | variable "external_zone_id" { 77 | description = "The zone ID to create the record in" 78 | } 79 | 80 | variable "internal_zone_id" { 81 | description = "The zone ID to create the record in" 82 | } 83 | 84 | /** 85 | * Options. 86 | */ 87 | 88 | variable "healthcheck" { 89 | description = "Path to a healthcheck endpoint" 90 | default = "/" 91 | } 92 | 93 | variable "container_port" { 94 | description = "The container port" 95 | default = 3000 96 | } 97 | 98 | variable "command" { 99 | description = "The raw json of the task command" 100 | default = "[]" 101 | } 102 | 103 | variable "env_vars" { 104 | description = "The raw json of the task env vars" 105 | default = "[]" 106 | } 107 | 108 | variable "desired_count" { 109 | description = "The desired count" 110 | default = 2 111 | } 112 | 113 | variable "memory" { 114 | description = "The number of MiB of memory to reserve for the container" 115 | default = 512 116 | } 117 | 118 | variable "cpu" { 119 | description = "The number of cpu units to reserve for the container" 120 | default = 512 121 | } 122 | 123 | variable "deployment_minimum_healthy_percent" { 124 | description = "lower limit (% of desired_count) of # of running tasks during a deployment" 125 | default = 100 126 | } 127 | 128 | variable "deployment_maximum_percent" { 129 | description = "upper limit (% of desired_count) of # of running tasks during a deployment" 130 | default = 200 131 | } 132 | 133 | /** 134 | * Resources. 135 | */ 136 | 137 | resource "aws_ecs_service" "main" { 138 | name = "${module.task.name}" 139 | cluster = "${var.cluster}" 140 | task_definition = "${module.task.arn}" 141 | desired_count = "${var.desired_count}" 142 | iam_role = "${var.iam_role}" 143 | deployment_minimum_healthy_percent = "${var.deployment_minimum_healthy_percent}" 144 | deployment_maximum_percent = "${var.deployment_maximum_percent}" 145 | 146 | load_balancer { 147 | elb_name = "${module.elb.id}" 148 | container_name = "${module.task.name}" 149 | container_port = "${var.container_port}" 150 | } 151 | 152 | lifecycle { 153 | create_before_destroy = true 154 | } 155 | } 156 | 157 | module "task" { 158 | source = "../task" 159 | 160 | name = "${coalesce(var.name, replace(var.image, "/", "-"))}" 161 | image = "${var.image}" 162 | image_version = "${var.version}" 163 | command = "${var.command}" 164 | env_vars = "${var.env_vars}" 165 | memory = "${var.memory}" 166 | cpu = "${var.cpu}" 167 | 168 | ports = <