├── version ├── postgres └── main.tf ├── 00_debug.sh ├── ci ├── fixtures │ └── compose │ │ └── docker-compose.yml ├── compose.yml └── tasks │ └── compose.yml ├── scripts ├── build-and-replace-web-and-workers.sh ├── build-and-replace-web-from-scratch.sh └── build-and-replace-workers-from-scratch.sh ├── outputs.tf ├── glide.yaml ├── recreate.sh ├── my-latest-ami.sh ├── hello.yml ├── latest-ami-ubuntu.sh ├── latest-ami-docker.sh ├── .gitignore ├── 00_install_concourse.sh.tpl ├── cmd ├── ip_util.go ├── ask_util.go ├── root.go ├── restore_save.go ├── util.go └── up.go ├── packer.json ├── concourse-baked.json ├── main.go ├── autoscaling ├── utilization │ └── enabled │ │ └── main.tf ├── schedule │ └── enabled │ │ └── main.tf └── hooks │ └── enabled │ └── main.tf ├── 02_start_concourse_worker.sh.tpl ├── docker-baked.json ├── Makefile ├── terraform.sh ├── glide.lock ├── concourse ├── config_test.go └── config.go ├── 01_start_concourse_web.sh.tpl ├── variables.tf ├── README.md ├── LICENSE └── main.tf /version: -------------------------------------------------------------------------------- 1 | 0.0.5 -------------------------------------------------------------------------------- /postgres/main.tf: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /00_debug.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | curl http://169.254.169.254/latest/user-data 4 | -------------------------------------------------------------------------------- /ci/fixtures/compose/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | redis: 4 | image: redis 5 | volumes: 6 | logvolume01: {} 7 | -------------------------------------------------------------------------------- /scripts/build-and-replace-web-and-workers.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | packer build -var source_ami=$(./latest-ami-docker.sh) concourse-baked.json && ./my-latest-ami.sh && ./recreate.sh web && ./recreate.sh worker 4 | -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | output "concourse_web_endpoint" { 2 | value = "${template_file.start_concourse_web.vars.external_url}" 3 | } 4 | 5 | output "concourse_web_elb_dns_name" { 6 | value = "${aws_elb.web-elb.dns_name}" 7 | } 8 | -------------------------------------------------------------------------------- /glide.yaml: -------------------------------------------------------------------------------- 1 | package: github.com/mumoshu/concourse-aws 2 | import: 3 | - package: github.com/spf13/cobra 4 | - package: github.com/spf13/viper 5 | - package: gopkg.in/yaml.v2 6 | - package: gopkg.in/readline.v1 7 | - package: github.com/aws/aws-sdk-go 8 | -------------------------------------------------------------------------------- /scripts/build-and-replace-web-from-scratch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | packer build -var source_ami=$(./latest-ami-ubuntu.sh) docker-baked.json && packer build -var source_ami=$(./latest-ami-docker.sh) concourse-baked.json && ./my-latest-ami.sh && ./recreate.sh web 4 | -------------------------------------------------------------------------------- /scripts/build-and-replace-workers-from-scratch.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | packer build -var source_ami=$(./latest-ami-ubuntu.sh) docker-baked.json && packer build -var source_ami=$(./latest-ami-docker.sh) concourse-baked.json && ./my-latest-ami.sh && ./recreate.sh worker 4 | -------------------------------------------------------------------------------- /recreate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function web() { 4 | terraform taint template_cloudinit_config.web && ./terraform.sh apply 5 | } 6 | 7 | function worker() { 8 | terraform taint template_cloudinit_config.worker && ./terraform.sh apply 9 | } 10 | 11 | "$@" 12 | -------------------------------------------------------------------------------- /my-latest-ami.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | aws ec2 describe-images --owners self --filters Name=virtualization-type,Values=hvm Name=root-device-type,Values=ebs Name=architecture,Values=x86_64 Name=name,Values="packer-concourse-*" | jq -r ".Images | sort_by(.CreationDate) | .[].ImageId" | tail -n 1 4 | -------------------------------------------------------------------------------- /hello.yml: -------------------------------------------------------------------------------- 1 | jobs: 2 | - name: hello-world 3 | plan: 4 | - task: say-hello 5 | config: 6 | platform: linux 7 | image_resource: 8 | type: docker-image 9 | source: {repository: ubuntu} 10 | run: 11 | path: echo 12 | args: ["Hello, world!"] -------------------------------------------------------------------------------- /latest-ami-ubuntu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | aws ec2 describe-images --owners self --filters Name=virtualization-type,Values=hvm Name=root-device-type,Values=ebs Name=architecture,Values=x86_64 Name=name,Values="packer-ubuntu-xenial-*" | jq -r ".Images | sort_by(.CreationDate) | .[].ImageId" | tail -n 1 4 | -------------------------------------------------------------------------------- /latest-ami-docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | aws ec2 describe-images --owners self --filters Name=virtualization-type,Values=hvm Name=root-device-type,Values=ebs Name=architecture,Values=x86_64 Name=name,Values="packer-ubuntu-xenial-docker-*" | jq -r ".Images | sort_by(.CreationDate) | .[].ImageId" | tail -n 1 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | host_key* 3 | session_signing_key* 4 | worker_key* 5 | authorized_worker_keys* 6 | .envrc 7 | # Terraform 8 | .terraform 9 | *.tfstate* 10 | # Emacs 11 | *~ 12 | \#*\# 13 | # InteliJ IDEA modules 14 | *.iml 15 | vendor/ 16 | bin/ 17 | cluster.yml 18 | cluster.yaml 19 | secrets.yml 20 | -------------------------------------------------------------------------------- /00_install_concourse.sh.tpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash -v 2 | #apt-get update -y 3 | #apt-get install -y nginx > /tmp/nginx.log 4 | 5 | if ! which concourse; then 6 | curl -v -L https://github.com/concourse/concourse/releases/download/v1.0.0/concourse_linux_amd64 -o concourse 7 | chmod +x concourse 8 | mv concourse /usr/local/bin/concourse 9 | fi 10 | -------------------------------------------------------------------------------- /cmd/ip_util.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "io/ioutil" 5 | "net/http" 6 | "strings" 7 | ) 8 | 9 | func ObtainExternalIp() string { 10 | resp, err := http.Get("https://myexternalip.com/raw") 11 | if err != nil { 12 | panic(err) 13 | } 14 | defer resp.Body.Close() 15 | body, err := ioutil.ReadAll(resp.Body) 16 | if err != nil { 17 | panic(err) 18 | } 19 | return strings.TrimSpace(string(body)) 20 | } 21 | -------------------------------------------------------------------------------- /packer.json: -------------------------------------------------------------------------------- 1 | { 2 | "builders": [{ 3 | "type": "amazon-ebs", 4 | "region": "ap-northeast-1", 5 | "source_ami": "ami-0417e362", 6 | "instance_type": "t2.micro", 7 | "ssh_username": "ubuntu", 8 | "ami_name": "packer-ubuntu-xenial-{{timestamp}}" 9 | }], 10 | "provisioners": [{ 11 | "type": "shell", 12 | "inline": [ 13 | "sudo apt-get update", 14 | "sudo apt-get install linux-image-extra-$(uname -r) -y" 15 | ] 16 | }] 17 | } 18 | -------------------------------------------------------------------------------- /ci/compose.yml: -------------------------------------------------------------------------------- 1 | resources: 2 | - name: concourse-aws 3 | type: git 4 | source: 5 | uri: git@github.com:mumoshu/concourse-aws.git 6 | branch: master 7 | private_key: {{gh-private-key}} 8 | skip_ssl_verification: true 9 | - name: redis 10 | type: docker-image 11 | source: 12 | repository: redis 13 | 14 | jobs: 15 | - name: hello-docker-compose 16 | plan: 17 | - aggregate: 18 | - get: concourse-aws 19 | - get: redis 20 | params: {save: true} 21 | - task: smoke-test 22 | privileged: true 23 | file: concourse-aws/ci/tasks/compose.yml 24 | -------------------------------------------------------------------------------- /concourse-baked.json: -------------------------------------------------------------------------------- 1 | { 2 | "builders": [{ 3 | "type": "amazon-ebs", 4 | "region": "ap-northeast-1", 5 | "source_ami": "{{user `source_ami`}}", 6 | "instance_type": "t2.micro", 7 | "ssh_username": "ubuntu", 8 | "ami_name": "packer-concourse-v2.5.1-{{timestamp}}" 9 | }], 10 | "provisioners": [{ 11 | "type": "shell", 12 | "inline": [ 13 | "uname -a", 14 | "curl -v -L https://github.com/concourse/concourse/releases/download/v2.5.1/concourse_linux_amd64 -o concourse", 15 | "chmod +x concourse", 16 | "sudo mv concourse /usr/local/bin/concourse" 17 | ] 18 | }] 19 | } 20 | -------------------------------------------------------------------------------- /main.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 NAME HERE 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package main 16 | 17 | import "github.com/mumoshu/concourse-aws/cmd" 18 | 19 | func main() { 20 | cmd.Execute() 21 | } 22 | -------------------------------------------------------------------------------- /autoscaling/utilization/enabled/main.tf: -------------------------------------------------------------------------------- 1 | resource "aws_autoscaling_policy" "add-worker" { 2 | name = "${var.target_asg_name}-add-worker" 3 | scaling_adjustment = 1 4 | adjustment_type = "ChangeInCapacity" 5 | cooldown = 300 6 | autoscaling_group_name = "${var.target_asg_name}" 7 | } 8 | 9 | resource "aws_cloudwatch_metric_alarm" "worker-is-busy" { 10 | alarm_name = "${var.target_asg_name}-is-busy" 11 | comparison_operator = "GreaterThanOrEqualToThreshold" 12 | evaluation_periods = "2" 13 | metric_name = "CPUUtilization" 14 | namespace = "AWS/EC2" 15 | period = "120" 16 | statistic = "Average" 17 | threshold = "80" 18 | dimensions { 19 | AutoScalingGroupName = "${var.target_asg_name}" 20 | } 21 | alarm_description = "This metric monitor ec2 cpu utilization" 22 | alarm_actions = ["${aws_autoscaling_policy.add-worker.arn}"] 23 | } 24 | 25 | variable "target_asg_name" {} 26 | -------------------------------------------------------------------------------- /02_start_concourse_worker.sh.tpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | exec > /var/log/02_start_concourse_worker.log 2>&1 4 | set -x 5 | 6 | CONCOURSE_PATH=/var/lib/concourse 7 | 8 | mkdir -p $CONCOURSE_PATH 9 | 10 | echo "${tsa_host}" > $CONCOURSE_PATH/tsa_host 11 | echo "${tsa_public_key}" > $CONCOURSE_PATH/tsa_public_key 12 | echo "${tsa_worker_private_key}" > $CONCOURSE_PATH/tsa_worker_private_key 13 | curl http://169.254.169.254/latest/meta-data/local-ipv4 > $CONCOURSE_PATH/peer_ip 14 | 15 | cd $CONCOURSE_PATH 16 | 17 | docker info 18 | service docker status 19 | service docker stop 20 | 21 | # --peer-ip $(cat peer_ip) \ 22 | concourse worker \ 23 | --work-dir $CONCOURSE_PATH \ 24 | --peer-ip $(cat peer_ip) \ 25 | --bind-ip $(cat peer_ip) \ 26 | --baggageclaim-bind-ip $(cat peer_ip) \ 27 | --tsa-host $(cat tsa_host) \ 28 | --tsa-public-key tsa_public_key \ 29 | --tsa-worker-private-key tsa_worker_private_key 2>&1 > $CONCOURSE_PATH/concourse_worker.log & 30 | 31 | echo $! > $CONCOURSE_PATH/pid 32 | -------------------------------------------------------------------------------- /docker-baked.json: -------------------------------------------------------------------------------- 1 | { 2 | "builders": [{ 3 | "type": "amazon-ebs", 4 | "region": "ap-northeast-1", 5 | "source_ami": "{{user `source_ami`}}", 6 | "instance_type": "t2.micro", 7 | "ssh_username": "ubuntu", 8 | "ami_name": "packer-ubuntu-xenial-docker-{{timestamp}}" 9 | }], 10 | "provisioners": [{ 11 | "type": "shell", 12 | "inline": [ 13 | "uname -a", 14 | "sudo apt-get update", 15 | "sudo apt-get --yes upgrade", 16 | "sudo apt-get install apt-transport-https ca-certificates -y", 17 | "sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D", 18 | "sudo bash -c 'echo deb https://apt.dockerproject.org/repo ubuntu-xenial main > /etc/apt/sources.list.d/docker.list'", 19 | "sudo apt-get update", 20 | "sudo apt-cache policy docker-engine", 21 | "sudo apt-get install docker-engine -y", 22 | "#sudo service docker start", 23 | "sudo docker info", 24 | "sudo docker run hello-world" 25 | ] 26 | }] 27 | } 28 | -------------------------------------------------------------------------------- /autoscaling/schedule/enabled/main.tf: -------------------------------------------------------------------------------- 1 | resource "aws_autoscaling_schedule" "add_workers_before_working_time" { 2 | scheduled_action_name = "add_workers_before_working_time" 3 | min_size = "${var.num_workers_during_working_time}" 4 | max_size = "${var.max_num_workers_during_working_time}" 5 | desired_capacity = "${var.num_workers_during_working_time}" 6 | # 9:30 JST 7 | recurrence = "30 0 * * MON-FRI" 8 | autoscaling_group_name = "${var.target_asg_name}" 9 | } 10 | 11 | resource "aws_autoscaling_schedule" "rem_workers_after_working_time" { 12 | scheduled_action_name = "rem_workers_after_working_time" 13 | min_size = 0 14 | max_size = 0 15 | desired_capacity = "${var.num_workers_during_non_working_time}" 16 | # 19:30 JST 17 | recurrence = "30 10 * * MON-FRI" 18 | autoscaling_group_name = "${var.target_asg_name}" 19 | } 20 | 21 | variable "target_asg_name" {} 22 | variable "num_workers_during_working_time" {} 23 | variable "max_num_workers_during_working_time" {} 24 | variable "num_workers_during_non_working_time" {} 25 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | gofmt: 2 | find . -path ./vendor -prune -type f -o -name '*.go' -exec gofmt -d {} + | tee /dev/stderr 3 | find . -path ./vendor -prune -type f -o -name '*.go' -exec gofmt -w {} + | tee /dev/stderr 4 | test: 5 | test -z '$(shell find . -path ./vendor -prune -type f -o -name '*.go' -exec gofmt -d {} + | tee /dev/stderr)' 6 | go test $(shell glide novendor) 7 | 8 | build: test 9 | go build -a -tags netgo -installsuffix netgo -o bin/concourse-aws ./ 10 | 11 | print-version: 12 | echo $$(cat version) 13 | 14 | publish: build 15 | ghr -u mumoshu -r concourse-aws -c master --prerelease v$$(cat version) bin/ 16 | 17 | publish-latest: build 18 | ghr -u mumoshu -r concourse-aws -c master --replace --prerelease latest bin/ 19 | 20 | it-dcind: 21 | fly -t test execute --config ci/tasks/compose.yml --privileged --input docker-image-resource=docker-image-resource/ 22 | 23 | it-pipeline-dcind: 24 | fly -t test sync 25 | fly -t test dp -p compose 26 | fly -t test set-pipeline -c ci/compose.yml -p compose -l ./secrets.yml 27 | fly -t test unpause-pipeline -p compose 28 | fly -t test check-resource -r compose/docker-image-resource 29 | -------------------------------------------------------------------------------- /terraform.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x 4 | 5 | if [ ! -e host_key ]; then 6 | ssh-keygen -t rsa -f host_key -N '' 7 | fi 8 | 9 | if [ ! -e worker_key ]; then 10 | ssh-keygen -t rsa -f worker_key -N '' 11 | fi 12 | 13 | if [ ! -e session_signing_key ]; then 14 | ssh-keygen -t rsa -f session_signing_key -N '' 15 | fi 16 | 17 | cp worker_key.pub authorized_worker_keys 18 | 19 | subnet_id=$CONCOURSE_SUBNET_ID 20 | 21 | vpc_id=$(aws ec2 describe-subnets --subnet-id $subnet_id | jq -r .Subnets[].VpcId) 22 | 23 | echo $vpc_id 24 | 25 | subcommand=$1; shift; 26 | 27 | if [ "$subcommand" = 'get' ]; then 28 | terraform $subcommand "$@" 29 | exit $? 30 | fi 31 | 32 | terraform $subcommand -var aws_region=ap-northeast-1 -var availability_zones=ap-northeast-1a,ap-northeast-1c -var key_name=cw_kuoka -var subnet_id=$CONCOURSE_DB_SUBNET_IDS -var vpc_id=$vpc_id -var db_instance_class=db.t2.micro -var db_username=concourse -var db_password=concourse -var db_subnet_ids=$CONCOURSE_DB_SUBNET_IDS \ 33 | -var tsa_host_key=host_key \ 34 | -var session_signing_key=session_signing_key \ 35 | -var tsa_authorized_keys=worker_key.pub \ 36 | -var tsa_public_key=host_key.pub \ 37 | -var tsa_worker_private_key=worker_key \ 38 | -var ami=$(./my-latest-ami.sh) \ 39 | -var in_access_allowed_cidrs=$CONCOURSE_IN_ACCESS_ALLOWED_CIDRS \ 40 | -var worker_instance_profile=$CONCOURSE_WORKER_INSTANCE_PROFILE \ 41 | "$@" 42 | -------------------------------------------------------------------------------- /ci/tasks/compose.yml: -------------------------------------------------------------------------------- 1 | platform: linux 2 | 3 | image_resource: 4 | type: docker-image 5 | source: {repository: "mumoshu/dcind", tag: "latest"} 6 | 7 | inputs: 8 | - name: redis 9 | - name: concourse-aws 10 | 11 | run: 12 | path: sh 13 | args: 14 | - -exc 15 | - | 16 | mount -v 17 | cat /proc/mounts 18 | df -aT 19 | # To load the `start_docker` function defined in /docker-lib.sh from the docker image `mumoshu/dcind` 20 | source /docker-lib.sh 21 | start_docker 22 | # Note that `docker save` and then `docker load` does work for caching images but not for caching layers since Docker 1.10.0 23 | # So `docker-compose pull`ing after `docker load`ing results in pulling images again 24 | # See https://github.com/docker/docker/issues/20380 25 | docker load -i redis/image 26 | # Just `docker load`ing isn't enough to tell docker-compose use that 27 | # See (1) and (2) for more info 28 | # (1) https://github.com/concourse/docker-image-resource/commit/3caa71774aee04de91febbc78bf7614333083ed3 29 | # (2) https://github.com/concourse/docker-image-resource/blob/master/assets/out#L89 30 | docker tag "$(cat redis/image-id)" "$(cat redis/repository):$(cat redis/tag)" 31 | docker images 32 | docker info 33 | docker-compose version 34 | cd concourse-aws/ci/fixtures/compose 35 | docker-compose up -d 36 | sleep 10 37 | docker-compose logs 38 | docker-compose stop --timeout 30 39 | -------------------------------------------------------------------------------- /cmd/ask_util.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "gopkg.in/readline.v1" 6 | "strings" 7 | ) 8 | 9 | type AskOptions struct { 10 | Candidates []string 11 | Validate func(string) error 12 | Default string 13 | } 14 | 15 | func AskForRequiredInput(prompt string, opts ...AskOptions) string { 16 | pcItems := []readline.PrefixCompleterInterface{} 17 | validate := func(item string) error { return nil } 18 | defaultValue := "" 19 | if len(opts) > 0 { 20 | o := opts[0] 21 | for _, c := range o.Candidates { 22 | pcItems = append(pcItems, readline.PcItem(c)) 23 | } 24 | if o.Validate != nil { 25 | validate = o.Validate 26 | } 27 | if o.Default != "" { 28 | defaultValue = o.Default 29 | } 30 | } 31 | var completer = readline.NewPrefixCompleter( 32 | pcItems..., 33 | ) 34 | 35 | fullPrompt := "" 36 | if defaultValue != "" { 37 | fullPrompt = fmt.Sprintf("%s(Default: %s)> ", prompt, defaultValue) 38 | } else { 39 | fullPrompt = fmt.Sprintf("%s> ", prompt) 40 | } 41 | 42 | rl, err := readline.NewEx(&readline.Config{ 43 | Prompt: fullPrompt, 44 | AutoComplete: completer, 45 | }) 46 | if err != nil { 47 | panic(err) 48 | } 49 | defer rl.Close() 50 | 51 | for { 52 | input, err := rl.Readline() 53 | if err == readline.ErrInterrupt { 54 | panic(err) 55 | } 56 | input = strings.TrimSpace(input) 57 | if err == nil { // or io.EOF 58 | if input == "" { 59 | input = defaultValue 60 | } 61 | r := validate(input) 62 | if r == nil { 63 | return input 64 | } else { 65 | fmt.Printf("%s is not valid: %v\n", input, r) 66 | } 67 | } 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /glide.lock: -------------------------------------------------------------------------------- 1 | hash: 69ec776c6ddc74930afdc70bfe979ea5261fe9464dbe47cd35aff5c3c76fffbd 2 | updated: 2016-05-18T15:33:54.547994235+09:00 3 | imports: 4 | - name: github.com/aws/aws-sdk-go 5 | version: 8fd3586fd7d0a1ae87031b09da0d214588460e51 6 | - name: github.com/BurntSushi/toml 7 | version: f0aeabca5a127c4078abb8c8d64298b147264b55 8 | - name: github.com/fsnotify/fsnotify 9 | version: 30411dbcefb7a1da7e84f75530ad3abe4011b4f8 10 | - name: github.com/hashicorp/hcl 11 | version: 9a905a34e6280ce905da1a32344b25e81011197a 12 | subpackages: 13 | - hcl/ast 14 | - hcl/parser 15 | - hcl/token 16 | - json/parser 17 | - hcl/scanner 18 | - hcl/strconv 19 | - json/scanner 20 | - json/token 21 | - name: github.com/inconshreveable/mousetrap 22 | version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 23 | - name: github.com/magiconair/properties 24 | version: c265cfa48dda6474e208715ca93e987829f572f8 25 | - name: github.com/mitchellh/mapstructure 26 | version: d2dd0262208475919e1a362f675cfc0e7c10e905 27 | - name: github.com/spf13/cast 28 | version: 27b586b42e29bec072fe7379259cc719e1289da6 29 | - name: github.com/spf13/cobra 30 | version: 0f866a6211e33cde2091d9290c08f6afd6c9ebbc 31 | - name: github.com/spf13/jwalterweatherman 32 | version: 33c24e77fb80341fe7130ee7c594256ff08ccc46 33 | - name: github.com/spf13/pflag 34 | version: cb88ea77998c3f024757528e3305022ab50b43be 35 | - name: github.com/spf13/viper 36 | version: d8a428b8a30606e1d0b355d91edf282609ade1a6 37 | - name: golang.org/x/crypto 38 | version: b6789ab629056511030d652d851e7dc10c9e9c9e 39 | subpackages: 40 | - ssh/terminal 41 | - name: golang.org/x/sys 42 | version: d4feaf1a7e61e1d9e79e6c4e76c6349e9cab0a03 43 | subpackages: 44 | - unix 45 | - name: gopkg.in/readline.v1 46 | version: 64a71f22be412fa183322657872f81a757eb8665 47 | - name: gopkg.in/yaml.v2 48 | version: a83829b6f1293c91addabc89d0571c246397bbf4 49 | devImports: [] 50 | -------------------------------------------------------------------------------- /autoscaling/hooks/enabled/main.tf: -------------------------------------------------------------------------------- 1 | # This article may help you understand what we do here 2 | # https://dzone.com/articles/graceful-shutdown-using-aws-autoscaling-groups-and 3 | 4 | resource "aws_sqs_queue" "graceful_termination_queue" { 5 | name = "${var.prefix}graceful_termination_queue" 6 | } 7 | 8 | resource "aws_iam_role" "autoscaling_role" { 9 | name = "${var.prefix}autoscaling_role" 10 | assume_role_policy = < 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package cmd 16 | 17 | import ( 18 | "fmt" 19 | "log" 20 | "os" 21 | 22 | "github.com/mitchellh/go-homedir" 23 | "github.com/spf13/cobra" 24 | ) 25 | 26 | var cfgDir string 27 | 28 | // RootCmd represents the base command when called without any subcommands 29 | var RootCmd = &cobra.Command{ 30 | Use: "concourse-aws", 31 | Short: "Scalable Concourse CI Server on AWS", 32 | Long: `This tool can provision scalable concourse ci servers on AWS. This also supports to sync(get/put) configuration files with S3. 33 | `, 34 | // Uncomment the following line if your bare application 35 | // has an action associated with it: 36 | // Run: func(cmd *cobra.Command, args []string) { }, 37 | } 38 | 39 | // Execute adds all child commands to the root command sets flags appropriately. 40 | // This is called by main.main(). It only needs to happen once to the rootCmd. 41 | func Execute() { 42 | if err := RootCmd.Execute(); err != nil { 43 | fmt.Println(err) 44 | os.Exit(-1) 45 | } 46 | } 47 | 48 | func init() { 49 | cobra.OnInitialize(initConfig) 50 | 51 | // Here you will define your flags and configuration settings. 52 | // Cobra supports Persistent Flags, which, if defined here, 53 | // will be global for your application. 54 | RootCmd.PersistentFlags().StringVar(&cfgDir, 55 | "config-dir", 56 | fmt.Sprintf("%s/%s", defaultCfgDir(), ".concourse-aws/"), 57 | "directory name in which configurations are stored.") 58 | } 59 | 60 | // initConfig reads in config file and ENV variables if set. 61 | func initConfig() { 62 | } 63 | 64 | func defaultCfgDir() string { 65 | homeDir, err := homedir.Dir() 66 | if err != nil { 67 | log.Panic("cannot resolve user home directory.") 68 | } 69 | expandedHomeDir, err := homedir.Expand(homeDir) 70 | if err != nil { 71 | log.Panic("cannot expand user home directory.") 72 | } 73 | return expandedHomeDir 74 | } 75 | -------------------------------------------------------------------------------- /concourse/config_test.go: -------------------------------------------------------------------------------- 1 | package concourse 2 | 3 | import ( 4 | "reflect" 5 | "testing" 6 | ) 7 | 8 | var validConfigs = []struct { 9 | providedYaml string 10 | expectedConfig Config 11 | }{ 12 | { 13 | providedYaml: ` 14 | --- 15 | prefix: sample 16 | region: ap-northeast-1 17 | key_name: cw_kuoka 18 | web_instance_type: t2.small 19 | worker_instance_type: t2.medium 20 | subnet_ids: 21 | - subnet-11111914 22 | - subnet-2222fc48 23 | accessible_cidrs: 123.123.234.234/32,234.234.234.234/32 24 | db_engine_version: "9.4.7" 25 | asg_min: 0 26 | asg_max: 2 27 | web_asg_desired: 1 28 | worker_asg_desired: 2 29 | github_auth_client_id: dummydummy 30 | github_auth_client_secret: dummydummydummy 31 | github_auth_organizations: [org1, org2] 32 | github_auth_teams: [org3/team1, org3/team2] 33 | github_auth_users: [] 34 | elb_protocol: "https" 35 | elb_port: 443 36 | custom_external_domain_name: "some.where" 37 | ssl_certificate_arn: "arn://dummydummy" 38 | `, 39 | expectedConfig: Config{ 40 | Prefix: "sample", 41 | Region: "ap-northeast-1", 42 | KeyName: "cw_kuoka", 43 | InstanceType: "", 44 | WebInstanceType: "t2.small", 45 | WorkerInstanceType: "t2.medium", 46 | SubnetIds: []string{"subnet-11111914", "subnet-2222fc48"}, 47 | AccessibleCIDRS: "123.123.234.234/32,234.234.234.234/32", 48 | DBEngineVersion: "9.4.7", 49 | AsgMin: "0", 50 | AsgMax: "2", 51 | WebAsgDesired: "1", 52 | WorkerAsgDesired: "2", 53 | ElbProtocol: "https", 54 | ElbPort: 443, 55 | CustomExternalDomainName: "some.where", 56 | SSLCertificateArn: "arn://dummydummy", 57 | GithubAuthClientId: "dummydummy", 58 | GithubAuthClientSecret: "dummydummydummy", 59 | GithubAuthOrganizations: []string{"org1", "org2"}, 60 | GithubAuthTeams: []string{"org3/team1", "org3/team2"}, 61 | GithubAuthUsers: []string{}, 62 | }, 63 | }, 64 | } 65 | 66 | func TestParsing(t *testing.T) { 67 | for _, c := range validConfigs { 68 | actual, err := ConfigFromString(c.providedYaml) 69 | if err != nil { 70 | t.Errorf("Failed to parse config: %s: %v", c.providedYaml, err) 71 | } 72 | if !reflect.DeepEqual(actual, c.expectedConfig) { 73 | t.Errorf("Produced config does not match against expected config. Produced:\n%+v\nExpected:\n%+v\n", actual, c.expectedConfig) 74 | } 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /concourse/config.go: -------------------------------------------------------------------------------- 1 | package concourse 2 | 3 | import ( 4 | "fmt" 5 | "gopkg.in/yaml.v2" 6 | "io/ioutil" 7 | "log" 8 | ) 9 | 10 | type Config struct { 11 | Prefix string `yaml:"prefix"` 12 | Region string `yaml:"region"` 13 | KeyName string `yaml:"key_name"` 14 | SubnetIds []string `yaml:"subnet_ids"` 15 | VpcId string `yaml:"vpc_id"` 16 | AvailabilityZones []string `yaml:"availability_zones"` 17 | AccessibleCIDRS string `yaml:"accessible_cidrs"` 18 | DBInstanceClass string `yaml:"db_instance_class"` 19 | DBEngineVersion string `yaml:"db_engine_version"` 20 | // Deprecated: Use WebInstanceType and WorkerInstanceType instead. 21 | InstanceType string `yaml:"instance_type"` 22 | WebInstanceType string `yaml:"web_instance_type"` 23 | WorkerInstanceType string `yaml:"worker_instance_type"` 24 | WorkerInstanceProfile string `yaml:"worker_instance_profile"` 25 | AMI string `yaml:"ami_id"` 26 | AsgMin string `yaml:"asg_min"` 27 | AsgMax string `yaml:"asg_max"` 28 | WebAsgDesired string `yaml:"web_asg_desired"` 29 | WorkerAsgDesired string `yaml:"worker_asg_desired"` 30 | ElbProtocol string `yaml:"elb_protocol"` 31 | ElbPort int `yaml:"elb_port"` 32 | CustomExternalDomainName string `yaml:"custom_external_domain_name"` 33 | SSLCertificateArn string `yaml:"ssl_certificate_arn"` 34 | BasicAuthUsername string `yaml:"basic_auth_username"` 35 | BasicAuthPassword string `yaml:"basic_auth_password"` 36 | GithubAuthClientId string `yaml:"github_auth_client_id"` 37 | GithubAuthClientSecret string `yaml:"github_auth_client_secret"` 38 | GithubAuthOrganizations []string `yaml:"github_auth_organizations"` 39 | GithubAuthTeams []string `yaml:"github_auth_teams"` 40 | GithubAuthUsers []string `yaml:"github_auth_users"` 41 | } 42 | 43 | func ConfigFromFile(filename string) (*Config, error) { 44 | data, err := ioutil.ReadFile(filename) 45 | if err != nil { 46 | return nil, err 47 | } 48 | 49 | c, err := ConfigFromBytes(data) 50 | if err != nil { 51 | return nil, fmt.Errorf("file %s: %v", filename, err) 52 | } 53 | 54 | return &c, nil 55 | } 56 | 57 | func ConfigFromString(data string) (Config, error) { 58 | return ConfigFromBytes([]byte(data)) 59 | } 60 | 61 | func ConfigFromBytes(data []byte) (Config, error) { 62 | c := Config{} 63 | 64 | err := yaml.Unmarshal(data, &c) 65 | if err != nil { 66 | log.Fatalf("error: %v", err) 67 | } 68 | return c, err 69 | } 70 | -------------------------------------------------------------------------------- /01_start_concourse_web.sh.tpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | exec > /var/log/01_start_concourse_web.log 2>&1 4 | set -x 5 | 6 | CONCOURSE_PATH=/var/lib/concourse 7 | 8 | mkdir -p $CONCOURSE_PATH 9 | 10 | echo "${session_signing_key}" > $CONCOURSE_PATH/session_signing_key 11 | echo "${tsa_host_key}" > $CONCOURSE_PATH/tsa_host_key 12 | echo "${tsa_authorized_keys}" > $CONCOURSE_PATH/tsa_authorized_keys 13 | echo "${postgres_data_source}" > $CONCOURSE_PATH/postgres_data_source 14 | echo "${external_url}" > $CONCOURSE_PATH/external_url 15 | echo "${github_auth_organizations}" > $CONCOURSE_PATH/github_auth_arganizations 16 | echo "${github_auth_teams}" > $CONCOURSE_PATH/github_auth_teams 17 | echo "${github_auth_users}" > $CONCOURSE_PATH/github_auth_users 18 | curl http://169.254.169.254/latest/meta-data/local-ipv4 > $CONCOURSE_PATH/peer_ip 19 | 20 | if [ "z${basic_auth_username}" != "z" ]; then 21 | BASIC_AUTH_OPTS="--basic-auth-username ${basic_auth_username} --basic_auth_password ${basic_auth_password}" 22 | fi 23 | 24 | GITHUB_AUTH_OPTS=() 25 | if [ "z${github_auth_client_id}" != "z" ]; then 26 | GITHUB_AUTH_OPTS+=("--github-auth-client-id") 27 | GITHUB_AUTH_OPTS+=("${github_auth_client_id}") 28 | GITHUB_AUTH_OPTS+=("--github-auth-client-secret") 29 | GITHUB_AUTH_OPTS+=("${github_auth_client_secret}") 30 | 31 | if [ "z${github_auth_organizations}" != "z" ]; then 32 | str="${github_auth_organizations}" 33 | IFS_ORIGINAL="$$IFS" 34 | IFS=, 35 | arr=($$str) 36 | IFS="$$IFS_ORIGINAL" 37 | for o in "$${arr[@]}"; do 38 | GITHUB_AUTH_OPTS+=("--github-auth-organization") 39 | GITHUB_AUTH_OPTS+=("$$o") 40 | done 41 | fi 42 | if [ "z${github_auth_teams}" != "z" ]; then 43 | str="${github_auth_teams}" 44 | IFS_ORIGINAL="$$IFS" 45 | IFS=, 46 | arr=($$str) 47 | IFS="$$IFS_ORIGINAL" 48 | for t in "$${arr[@]}"; do 49 | GITHUB_AUTH_OPTS+=("--github-auth-team") 50 | GITHUB_AUTH_OPTS+=("$$t") 51 | done 52 | fi 53 | if [ "z${github_auth_users}" != "z" ]; then 54 | str="${github_auth_users}" 55 | IFS_ORIGINAL="$$IFS" 56 | IFS=, 57 | arr=($$str) 58 | IFS="$$IFS_ORIGINAL" 59 | for u in "$${arr[@]}"; do 60 | GITHUB_AUTH_OPTS+=("--github-auth-user") 61 | GITHUB_AUTH_OPTS+=("$$u") 62 | done 63 | fi 64 | fi 65 | 66 | cd $CONCOURSE_PATH 67 | 68 | concourse web --session-signing-key session_signing_key \ 69 | --tsa-host-key tsa_host_key --tsa-authorized-keys tsa_authorized_keys \ 70 | --external-url $(cat external_url) \ 71 | --postgres-data-source $(cat postgres_data_source) \ 72 | $BASIC_AUTH_OPTS \ 73 | "$${GITHUB_AUTH_OPTS[@]}" \ 74 | 2>&1 > $CONCOURSE_PATH/concourse_web.log & 75 | 76 | echo $! > $CONCOURSE_PATH/pid 77 | -------------------------------------------------------------------------------- /cmd/restore_save.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Shingo Omura 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package cmd 16 | 17 | import ( 18 | "log" 19 | "strings" 20 | 21 | "github.com/spf13/cobra" 22 | ) 23 | 24 | // 25 | // Flags 26 | // 27 | var bucketRegion string 28 | var bucketName string 29 | 30 | // 31 | // commands 32 | // 33 | var assetFileNames = []string{ 34 | "cluster.yml", 35 | "terraform.tfstate", 36 | "host_key", 37 | "host_key.pub", 38 | "worker_key", 39 | "worker_key.pub", 40 | "session_signing_key", 41 | "session_signing_key.pub", 42 | "authorized_worker_keys", 43 | } 44 | 45 | var save = &cobra.Command{ 46 | Use: "save", 47 | Short: "Save state files to S3", 48 | Long: `Put state files below to specified S3. Keys to be stored in specified bucket are the same with filenames. 49 | - ` + strings.Join(assetFileNames[:], ", "), 50 | Run: RunSave, 51 | } 52 | 53 | var restore = &cobra.Command{ 54 | Use: "restore", 55 | Short: "Restore state files from S3", 56 | Long: `Restore state files below from specified S3. Keys pulled from specified bucket are the same with filenames. 57 | - ` + strings.Join(assetFileNames[:], ", "), 58 | Run: RunRestore, 59 | } 60 | 61 | func RunSave(cmd *cobra.Command, args []string) { 62 | if len(bucketName) < 1 || len(bucketRegion) < 1 { 63 | log.Panic("--bucket and --bucket-region are required.") 64 | } 65 | 66 | PutFilesToS3(bucketRegion, bucketName, cfgDir, assetFileNames) 67 | } 68 | 69 | func RunRestore(cmd *cobra.Command, args []string) { 70 | if len(bucketName) < 1 { 71 | log.Panic("--bucket is required.") 72 | } 73 | 74 | makeCfgDirIfNotExists() 75 | 76 | GetFilesFromS3(bucketRegion, bucketName, cfgDir, assetFileNames) 77 | } 78 | 79 | func init() { 80 | RootCmd.AddCommand(restore) 81 | restore.Flags().StringVar(&bucketName, "bucket", "", "S3 bucket name to which assets will be uploaded.") 82 | restore.Flags().StringVar(&bucketRegion, "bucket-region", "", "Region of S3 Bucket specified by --bucket") 83 | 84 | RootCmd.AddCommand(save) 85 | save.Flags().StringVar(&bucketName, "bucket", "", "S3 bucket name to which assets will be uploaded.") 86 | save.Flags().StringVar(&bucketRegion, "bucket-region", "", "Region of S3 Bucket specified by --bucket") 87 | } 88 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | variable "prefix" { 2 | description = "Prefix for every resource created by this template" 3 | default = "concourse-" 4 | } 5 | 6 | variable "aws_region" { 7 | description = "The AWS region to create things in." 8 | default = "us-east-1" 9 | } 10 | 11 | # ubuntu-trusty-14.04 (x64) 12 | variable "aws_amis" { 13 | default = { 14 | "us-east-1" = "ami-5f709f34" 15 | "us-west-2" = "ami-7f675e4f" 16 | "ap-northeast-1" = "ami-a21529cc" 17 | } 18 | } 19 | 20 | variable "ami" { 21 | } 22 | 23 | variable "availability_zones" { 24 | default = "us-east-1b,us-east-1c,us-east-1d,us-east-1e" 25 | description = "List of availability zones, use AWS CLI to find your " 26 | } 27 | 28 | variable "key_name" { 29 | description = "Name of AWS key pair" 30 | } 31 | 32 | variable "web_instance_type" { 33 | default = "t2.micro" 34 | description = "AWS instance type for web" 35 | } 36 | 37 | variable "worker_instance_type" { 38 | default = "t2.micro" 39 | description = "AWS instance type for worker" 40 | } 41 | 42 | variable "asg_min" { 43 | description = "Min numbers of servers in ASG" 44 | default = "0" 45 | } 46 | 47 | variable "asg_max" { 48 | description = "Max numbers of servers in ASG" 49 | default = "2" 50 | } 51 | 52 | variable "web_asg_desired" { 53 | description = "Desired numbers of web servers in ASG" 54 | # Setting this gte 2 result in `fly execute --input foo=bar` to fail with errors like: "bad response uploading bits (404 Not Found)" or "gunzip: invalid magic" 55 | default = "1" 56 | } 57 | 58 | variable "worker_asg_desired" { 59 | description = "Desired numbers of servers in ASG" 60 | default = "2" 61 | } 62 | 63 | variable "elb_listener_lb_port" { 64 | description = "" 65 | default = "80" 66 | } 67 | 68 | variable "use_custom_elb_port" { 69 | default = 0 70 | } 71 | 72 | variable "elb_listener_lb_protocol" { 73 | default = "http" 74 | } 75 | 76 | variable "elb_listener_instance_port" { 77 | description = "" 78 | default = "8080" 79 | } 80 | 81 | variable "in_access_allowed_cidrs" { 82 | description = "" 83 | } 84 | 85 | variable "subnet_id" { 86 | description = "" 87 | } 88 | 89 | variable "db_subnet_ids" { 90 | description = "" 91 | } 92 | 93 | variable "vpc_id" { 94 | description = "" 95 | } 96 | 97 | variable "db_username" { 98 | description = "" 99 | } 100 | 101 | variable "db_password" { 102 | description = "" 103 | } 104 | 105 | variable "db_instance_class" { 106 | description = "t2.micro" 107 | } 108 | 109 | variable "db_engine_version" { 110 | description = "engine version of rds engine ('postgres')" 111 | } 112 | 113 | variable "tsa_host_key" { 114 | description = "" 115 | } 116 | 117 | variable "session_signing_key" { 118 | description = "" 119 | } 120 | 121 | variable "tsa_authorized_keys" { 122 | description = "" 123 | } 124 | 125 | variable "tsa_public_key" { 126 | description = "" 127 | } 128 | 129 | variable "tsa_worker_private_key" { 130 | description = "" 131 | } 132 | 133 | variable "tsa_port" { 134 | description = "" 135 | default = "2222" 136 | } 137 | 138 | variable "worker_instance_profile" { 139 | description = "IAM instance profile name to be used by Concourse workers. Can be an empty string to not specify it (no instance profile is used then)" 140 | } 141 | 142 | variable "basic_auth_username" { 143 | default = "" 144 | } 145 | 146 | variable "basic_auth_password" { 147 | default = "" 148 | } 149 | 150 | variable "github_auth_client_id" { 151 | default = "" 152 | } 153 | 154 | variable "github_auth_client_secret" { 155 | default = "" 156 | } 157 | 158 | variable "github_auth_organizations" { 159 | default = "" 160 | } 161 | 162 | variable "github_auth_teams" { 163 | default = "" 164 | } 165 | 166 | variable "github_auth_users" { 167 | default = "" 168 | } 169 | 170 | variable "custom_external_domain_name" { 171 | default = "" 172 | description ="don't include http[s]://" 173 | } 174 | 175 | variable "use_custom_external_domain_name" { 176 | default = 0 177 | } 178 | 179 | variable "ssl_certificate_arn" { 180 | default = "" 181 | } 182 | -------------------------------------------------------------------------------- /cmd/util.go: -------------------------------------------------------------------------------- 1 | package cmd 2 | 3 | import ( 4 | "fmt" 5 | "log" 6 | "os" 7 | 8 | "github.com/aws/aws-sdk-go/aws" 9 | "github.com/aws/aws-sdk-go/aws/awsutil" 10 | "github.com/aws/aws-sdk-go/aws/session" 11 | "github.com/aws/aws-sdk-go/service/ec2" 12 | "github.com/aws/aws-sdk-go/service/s3" 13 | "github.com/aws/aws-sdk-go/service/s3/s3manager" 14 | ) 15 | 16 | func ListRegions() []string { 17 | svc := ec2.New(session.New(), &aws.Config{Region: aws.String("ap-northeast-1")}) 18 | 19 | params := &ec2.DescribeRegionsInput{ 20 | DryRun: aws.Bool(false), 21 | } 22 | resp, err := svc.DescribeRegions(params) 23 | 24 | regions := []string{} 25 | for _, r := range resp.Regions { 26 | regions = append(regions, *r.RegionName) 27 | } 28 | 29 | if err != nil { 30 | fmt.Println(err.Error()) 31 | return nil 32 | } 33 | return regions 34 | } 35 | 36 | func ListKeys(region string) []string { 37 | svc := ec2.New(session.New(), &aws.Config{Region: aws.String(region)}) 38 | 39 | params := &ec2.DescribeKeyPairsInput{ 40 | DryRun: aws.Bool(false), 41 | } 42 | resp, err := svc.DescribeKeyPairs(params) 43 | 44 | keyPairs := []string{} 45 | for _, k := range resp.KeyPairs { 46 | keyPairs = append(keyPairs, *k.KeyName) 47 | } 48 | 49 | if err != nil { 50 | fmt.Println(err.Error()) 51 | return []string{} 52 | } 53 | return keyPairs 54 | } 55 | 56 | func ListVPCs(region string) []string { 57 | svc := ec2.New(session.New(), &aws.Config{Region: aws.String(region)}) 58 | 59 | params := &ec2.DescribeVpcsInput{ 60 | DryRun: aws.Bool(false), 61 | } 62 | resp, err := svc.DescribeVpcs(params) 63 | 64 | if err != nil { 65 | panic(err) 66 | } 67 | 68 | vpcs := []string{} 69 | for _, c := range resp.Vpcs { 70 | vpcs = append(vpcs, *c.VpcId) 71 | } 72 | 73 | return vpcs 74 | 75 | } 76 | 77 | func ListAvailabilityZones(region string) []string { 78 | svc := ec2.New(session.New(), &aws.Config{Region: aws.String(region)}) 79 | 80 | params := &ec2.DescribeAvailabilityZonesInput{ 81 | DryRun: aws.Bool(false), 82 | } 83 | resp, err := svc.DescribeAvailabilityZones(params) 84 | 85 | if err != nil { 86 | panic(err) 87 | } 88 | 89 | azs := []string{} 90 | for _, z := range resp.AvailabilityZones { 91 | azs = append(azs, *z.ZoneName) 92 | } 93 | 94 | return azs 95 | 96 | } 97 | 98 | func ListSubnets(region string, vpcId string, az string) []string { 99 | svc := ec2.New(session.New(), &aws.Config{Region: aws.String(region)}) 100 | 101 | params := &ec2.DescribeSubnetsInput{ 102 | DryRun: aws.Bool(false), 103 | Filters: []*ec2.Filter{ 104 | { 105 | Name: aws.String("availabilityZone"), 106 | Values: []*string{ 107 | aws.String(az), 108 | }, 109 | }, 110 | { 111 | Name: aws.String("vpc-id"), 112 | Values: []*string{ 113 | aws.String(vpcId), 114 | }, 115 | }, 116 | }, 117 | } 118 | resp, err := svc.DescribeSubnets(params) 119 | 120 | if err != nil { 121 | panic(err) 122 | } 123 | 124 | subnets := []string{} 125 | for _, s := range resp.Subnets { 126 | subnets = append(subnets, *s.SubnetId) 127 | } 128 | 129 | return subnets 130 | 131 | } 132 | 133 | func PutFilesToS3(region string, bucketName string, path string, filenames []string) { 134 | svc := s3.New(session.New(), &aws.Config{Region: aws.String(region)}) 135 | 136 | for _, filename := range filenames { 137 | file, err := os.Open(fmt.Sprintf("%s%s", path, filename)) 138 | if err != nil { 139 | log.Panic(err.Error()) 140 | } 141 | defer file.Close() 142 | 143 | resp, err := svc.PutObject(&s3.PutObjectInput{ 144 | Bucket: aws.String(bucketName), 145 | Key: aws.String(filename), 146 | Body: file, 147 | }) 148 | 149 | if err != nil { 150 | log.Panic(err.Error()) 151 | } 152 | 153 | log.Println(awsutil.StringValue(resp)) 154 | } 155 | } 156 | 157 | func GetFilesFromS3(region string, bucketName string, path string, filenames []string) { 158 | downloader := s3manager.NewDownloader(session.New(&aws.Config{Region: aws.String(region)})) 159 | 160 | for _, filename := range filenames { 161 | file, err := os.Create(fmt.Sprintf("%s%s", path, filename)) 162 | if err != nil { 163 | log.Panic("Failed to create file", err) 164 | } 165 | defer file.Close() 166 | 167 | numBytes, err := downloader.Download(file, &s3.GetObjectInput{ 168 | Bucket: aws.String(bucketName), 169 | Key: aws.String(filename), 170 | }) 171 | 172 | if err != nil { 173 | log.Panic(err.Error()) 174 | } 175 | 176 | log.Println("Downloaded file: ", file.Name(), " (", numBytes, "bytes)") 177 | } 178 | } 179 | 180 | func prefixConfigDir(filename string) string { 181 | return fmt.Sprintf("%s%s", cfgDir, filename) 182 | } 183 | 184 | func makeCfgDirIfNotExists() { 185 | if _, err := os.Stat(cfgDir); os.IsNotExist(err) { 186 | // permission should be 0700 187 | // becuat the config dir contains ssh keys. 188 | if err := os.Mkdir(cfgDir, 0777); err != nil { 189 | log.Panic(err) 190 | } 191 | } 192 | } 193 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ### Auto-scaling Concourse CI on AWS with Terraform 2 | 3 | http://www.slideshare.net/mumoshu/autoscaled-concourse-ci-on-aws-wo-bosh 4 | 5 | ## Recommended Usage: Using `concourse-aws` binary 6 | 7 | 1. Install [packer](https://github.com/mitchellh/packer) and [terraform(< 0.7.0)](https://github.com/hashicorp/terraform) 8 | * for now, concourse-aws supports only terraform(< 0.7.0) due to this issue: https://github.com/hashicorp/terraform/issues/7971 9 | 10 | 2. Create 1 VPC and 2 subnets in it 11 | 12 | 3. Clone this repository 13 | 14 | ``` 15 | git clone https://github.com/mumoshu/concourse-aws 16 | ``` 17 | 18 | 4. Download latest concourse-aws binary from [Github releases](https://github.com/mumoshu/concourse-aws/releases/) and place the binary in your concourse-aws directory. 19 | 20 | 21 | 5. Run `concourse-aws up` 22 | 23 | ``` 24 | cd concourse-aws 25 | 26 | ./build-amis.sh 27 | 28 | ./concourse-aws up 29 | ``` 30 | 31 | And then, `concourse-aws` will prompt you to provide required parameters(region, availability zone, subnet id, cidr, and vice versa) 32 | 33 | ### Upgrading Concourse workers with latest binaries 34 | 35 | ``` 36 | $ git pull --rebase origin master 37 | $ ./build-concourse-ami.sh 38 | $ vi cluster.yml # and update `ami_id` with the one produced by `build-concourse-ami.sh` 39 | $ ./concourse-aws up 40 | ``` 41 | 42 | ### Syncing configurations and states with S3 43 | 44 | Users would sometime like to save and restore configurations and states on AWS resources, which is actually managed by terraform, with external data storage. concourse-aws supports to save/restore states files with S3. You can these operation with `save/restore` command like below: 45 | 46 | ``` 47 | # Save 48 | # this will save configurations and states on AWS resources to S3 bucket. 49 | ./concourse-aws save --bucket --bucket-region 50 | ``` 51 | 52 | ``` 53 | # restore 54 | # this will pull configurations and states on AWS resources to S3 bucket. 55 | ./concourse-aws restore --bucket --bucket-region 56 | ``` 57 | 58 | Note: Saved/restored files 59 | - `cluster.yml`: configuration file which can be generated by `concourse-aws` interactively. 60 | - SSH keys used for communicating between concourse servers. These key files can be also automatically generated by `concourse-aws` interactively. 61 | - `host_key`,`host_key.pub` 62 | - `worker_key`,`worker_key.pub` 63 | - `session_signing_key`,`session_signing_key.pub` 64 | - `authorized_worker_keys` 65 | - `terraform.tfstate` 66 | - states of AWS resources managed by terraform 67 | 68 | ## Advanced Usage: Using shell scripts and terraform directly 69 | 70 | 1. Install [packer](https://github.com/mitchellh/packer) and [terraform](https://github.com/hashicorp/terraform) 71 | 72 | 2. Create 1 VPC and 2 subnets in it 73 | 74 | 3. Set up required environment variables required by the wrapper script for terraform 75 | ``` 76 | $ cat >> .envrc <<<' 77 | export AWS_ACCESS_KEY_ID= 78 | export AWS_SECRET_ACCESS_KEY= 79 | export CONCOURSE_IN_ACCESS_ALLOWED_CIDRS="/32,/32" 80 | export CONCOURSE_SUBNET_ID=, 81 | export CONCOURSE_DB_SUBNET_IDS=, 82 | ' 83 | ``` 84 | 85 | Install [direnv](https://github.com/direnv/direnv) and allow it to read `.envrc` created in the previous step. 86 | 87 | ``` 88 | $ direnv allow 89 | ``` 90 | 91 | 4. The same for optional ones 92 | ``` 93 | $ export CONCOURSE_WORKER_INSTANCE_PROFILE= 94 | ``` 95 | 96 | 5. Edit terraform variables and Run the following commands to build required AMIs and to provision a Concourse CI cluster 97 | ``` 98 | $ ./build-amis.sh 99 | $ vi ./variables.tf 100 | $ ./terraform.sh get 101 | $ ./terraform.sh plan 102 | $ ./terraform.sh apply 103 | ``` 104 | 105 | 6. Open your browser and confirm that the Concourse CI is running on AWS: 106 | ``` 107 | # This will extract the public hostname for your load balancer from terraform output and open your default browser 108 | $ open http://$(terraform output | ruby -e 'puts STDIN.first.split(" = ").last') 109 | ``` 110 | 111 | 7. Follow the Concourse CI tutorial and experiment as you like: 112 | ``` 113 | $ export CONCOURSE_URL=http://$(terraform output | ruby -e 'puts STDIN.first.split(" = ").last') 114 | $ fly -t test login -c $CONCOURSE_URL 115 | $ fly -t test set-pipeline -p hello-world -c hello.yml 116 | $ fly -t test unpause-pipeline -p hello-world 117 | ``` 118 | See http://concourse.ci/hello-world.html for more information and the `hello.yml` referenced in the above example. 119 | 120 | 8. Modify autoscaling groups' desired capacity to scale out/in webs or workers. 121 | 122 | ## Why did you actually created this? 123 | 124 | [BOSH](https://github.com/cloudfoundry/bosh) looks [very promising to me according to what problems it solves](https://bosh.io/docs/problems.html). 125 | However I was too lazy to learn it for now mainly because: 126 | 127 | * I'm not going to use IaaS other than AWS for the time being 128 | * learning it to JUST try Concourse CI might be too much in the short term though 129 | 130 | ## You may also find those projects useful 131 | 132 | * [Concourse CI docker image](https://github.com/MeteoGroup/concourse-ci) 133 | * [gregarcara/concourse-docker](https://github.com/gregarcara/concourse-docker) 134 | * [jtarchie/concourse-docker-compose](https://github.com/jtarchie/concourse-docker-compose) 135 | * I wonder if I could run docker containers instead of concourse ci's standalone binaries using this 136 | * Maybe more up-to-date than [starkandwayne/terraform-concourse](https://github.com/starkandwayne/terraform-concourse) 137 | * [motevets/concourse-in-a-box](https://github.com/motevets/concourse-in-a-box) to quickly get concourse up-and-running on a single Ubuntu 14.04 EC2 instance 138 | 139 | ## Contributing 140 | 141 | ### Making changes 142 | 143 | The concourse-aws binary needs to be built for every architecture and pushed to GitHub Releases manually whenever the concourse-aws binary has code changes. Every significant change to the functionality should result in a bump of the version number in the `version` file. 144 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright 2016 Yusuke KUOKA 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | # Specify the provider and access details 2 | provider "aws" { 3 | region = "${var.aws_region}" 4 | } 5 | 6 | #module "postgres" { 7 | # source = "./postgres" 8 | # access_allowed_security_groups = "${aws_security_group.atc.id}" 9 | #} 10 | 11 | module "autoscaling_hooks" { 12 | source = "./autoscaling/hooks/enabled" 13 | target_asg_name = "${aws_autoscaling_group.worker-asg.name}" 14 | prefix = "${var.prefix}" 15 | } 16 | 17 | module "autoscaling_schedule" { 18 | source = "./autoscaling/schedule/enabled" 19 | target_asg_name = "${aws_autoscaling_group.worker-asg.name}" 20 | num_workers_during_working_time = 2 21 | max_num_workers_during_working_time = "${var.asg_max}" 22 | num_workers_during_non_working_time = 0 23 | } 24 | 25 | module "autoscaling_utilization" { 26 | source = "./autoscaling/utilization/enabled" 27 | target_asg_name = "${aws_autoscaling_group.worker-asg.name}" 28 | } 29 | 30 | resource "aws_elb" "web-elb" { 31 | name = "${var.prefix}concourse-lb" 32 | 33 | # The same availability zone as our instances 34 | # Only one of SubnetIds or AvailabilityZones may be specified 35 | #availability_zones = ["${split(",", var.availability_zones)}"] 36 | security_groups = ["${aws_security_group.external_lb.id}"] 37 | subnets = ["${split(",", var.subnet_id)}"] 38 | cross_zone_load_balancing = "true" 39 | 40 | listener { 41 | instance_port = "${var.elb_listener_instance_port}" 42 | instance_protocol = "http" 43 | lb_port = "${var.elb_listener_lb_port}" 44 | lb_protocol = "${var.elb_listener_lb_protocol}" 45 | ssl_certificate_id = "${var.ssl_certificate_arn}" 46 | } 47 | 48 | listener { 49 | instance_port = "${var.tsa_port}" 50 | instance_protocol = "tcp" 51 | lb_port = "${var.tsa_port}" 52 | lb_protocol = "tcp" 53 | } 54 | 55 | health_check { 56 | healthy_threshold = 2 57 | unhealthy_threshold = 2 58 | timeout = 3 59 | target = "TCP:${var.elb_listener_instance_port}" 60 | interval = 30 61 | } 62 | } 63 | 64 | resource "aws_autoscaling_group" "web-asg" { 65 | # See "Phasing in" an Autoscaling Group? https://groups.google.com/forum/#!msg/terraform-tool/7Gdhv1OAc80/iNQ93riiLwAJ 66 | # * Recreation of the launch configuration triggers recreation of this ASG and its EC2 instances 67 | # * Modification to the lc (change to referring AMI) triggers recreation of this ASG 68 | name = "${var.prefix}${aws_launch_configuration.web-lc.name}${var.ami}" 69 | availability_zones = ["${split(",", var.availability_zones)}"] 70 | max_size = "${var.asg_max}" 71 | min_size = "${var.asg_min}" 72 | desired_capacity = "${var.web_asg_desired}" 73 | launch_configuration = "${aws_launch_configuration.web-lc.name}" 74 | load_balancers = ["${aws_elb.web-elb.name}"] 75 | vpc_zone_identifier = ["${split(",", var.subnet_id)}"] 76 | tag { 77 | key = "Name" 78 | value = "${var.prefix}web" 79 | propagate_at_launch = "true" 80 | } 81 | lifecycle { 82 | create_before_destroy = true 83 | } 84 | } 85 | 86 | resource "aws_autoscaling_group" "worker-asg" { 87 | name = "${var.prefix}${aws_launch_configuration.worker-lc.name}${var.ami}" 88 | availability_zones = ["${split(",", var.availability_zones)}"] 89 | max_size = "${var.asg_max}" 90 | min_size = "${var.asg_min}" 91 | desired_capacity = "${var.worker_asg_desired}" 92 | launch_configuration = "${aws_launch_configuration.worker-lc.name}" 93 | vpc_zone_identifier = ["${split(",", var.subnet_id)}"] 94 | tag { 95 | key = "Name" 96 | value = "${var.prefix}worker" 97 | propagate_at_launch = "true" 98 | } 99 | lifecycle { 100 | create_before_destroy = true 101 | } 102 | } 103 | 104 | resource "aws_launch_configuration" "web-lc" { 105 | # Omit launch configuration name to avoid collisions on create_before_destroy 106 | # ref. https://github.com/hashicorp/terraform/issues/1109#issuecomment-97970885 107 | #image_id = "${lookup(var.aws_amis, var.aws_region)}" 108 | image_id = "${var.ami}" 109 | instance_type = "${var.web_instance_type}" 110 | security_groups = ["${aws_security_group.default.id}","${aws_security_group.atc.id}","${aws_security_group.tsa.id}"] 111 | user_data = "${template_cloudinit_config.web.rendered}" 112 | key_name = "${var.key_name}" 113 | associate_public_ip_address = true 114 | lifecycle { 115 | create_before_destroy = true 116 | } 117 | } 118 | 119 | resource "aws_launch_configuration" "worker-lc" { 120 | #image_id = "${lookup(var.aws_amis, var.aws_region)}" 121 | image_id = "${var.ami}" 122 | instance_type = "${var.worker_instance_type}" 123 | security_groups = ["${aws_security_group.default.id}", "${aws_security_group.worker.id}"] 124 | user_data = "${template_cloudinit_config.worker.rendered}" 125 | key_name = "${var.key_name}" 126 | associate_public_ip_address = true 127 | iam_instance_profile = "${var.worker_instance_profile}" 128 | root_block_device { 129 | # For fast booting, we use gp2 130 | volume_type = "gp2" 131 | # You need enough capacity to avoid the following error while docker export & untar'ing: 132 | # 133 | # *snip* 134 | # tar: etc/alternatives: Cannot stat: Input/output error 135 | # tar: etc: Cannot stat: Input/output error 136 | # tar: dev: Cannot stat: Input/output error 137 | # tar: bin: Cannot stat: Input/output error 138 | # tar: Exiting with failure status due to previous errors 139 | # 140 | # resource script '/opt/resource/in [/tmp/build/get]' failed: exit status 2 141 | # 142 | # Or the following error when tried to run the job: 143 | # resource_pool: creating container directory: mkdir /var/lib/concourse/linux/depot/hntrh2no0mh: no space left on device 144 | volume_size = "50" 145 | delete_on_termination = true 146 | } 147 | lifecycle { 148 | create_before_destroy = true 149 | } 150 | } 151 | 152 | resource "template_file" "install_concourse" { 153 | template = "${file("${path.module}/00_install_concourse.sh.tpl")}" 154 | } 155 | 156 | resource "template_file" "start_concourse_web" { 157 | template = "${file("${path.module}/01_start_concourse_web.sh.tpl")}" 158 | 159 | vars { 160 | session_signing_key = "${file("${var.session_signing_key}")}" 161 | tsa_host_key = "${file("${var.tsa_host_key}")}" 162 | tsa_authorized_keys = "${file("${var.tsa_authorized_keys}")}" 163 | postgres_data_source = "postgres://${var.db_username}:${var.db_password}@${aws_db_instance.default.endpoint}/concourse" 164 | external_url = "${var.elb_listener_lb_protocol}://${element(split(",","${aws_elb.web-elb.dns_name},${var.custom_external_domain_name}"), var.use_custom_external_domain_name)}${element(split(",",",:${var.elb_listener_lb_port}"), var.use_custom_elb_port)}" 165 | basic_auth_username = "${var.basic_auth_username}" 166 | basic_auth_password = "${var.basic_auth_password}" 167 | github_auth_client_id = "${var.github_auth_client_id}" 168 | github_auth_client_secret = "${var.github_auth_client_secret}" 169 | github_auth_organizations = "${var.github_auth_organizations}" 170 | github_auth_teams = "${var.github_auth_teams}" 171 | github_auth_users = "${var.github_auth_users}" 172 | } 173 | } 174 | 175 | resource "template_file" "start_concourse_worker" { 176 | template = "${file("${path.module}/02_start_concourse_worker.sh.tpl")}" 177 | 178 | vars { 179 | tsa_host = "${aws_elb.web-elb.dns_name}" 180 | tsa_public_key = "${file("${var.tsa_public_key}")}" 181 | tsa_worker_private_key = "${file("${var.tsa_worker_private_key}")}" 182 | } 183 | } 184 | 185 | resource "template_cloudinit_config" "web" { 186 | # Make both turned off until https://github.com/hashicorp/terraform/issues/4794 is fixed 187 | gzip = false 188 | base64_encode = false 189 | 190 | part { 191 | content_type = "text/x-shellscript" 192 | content = "${template_file.install_concourse.rendered}" 193 | } 194 | 195 | part { 196 | content_type = "text/x-shellscript" 197 | content = "${template_file.start_concourse_web.rendered}" 198 | } 199 | 200 | lifecycle { 201 | create_before_destroy = true 202 | } 203 | } 204 | 205 | resource "template_cloudinit_config" "worker" { 206 | # Make both turned off until https://github.com/hashicorp/terraform/issues/4794 is fixed 207 | gzip = false 208 | base64_encode = false 209 | 210 | part { 211 | content_type = "text/x-shellscript" 212 | content = "${template_file.install_concourse.rendered}" 213 | } 214 | 215 | part { 216 | content_type = "text/x-shellscript" 217 | content = "${template_file.start_concourse_worker.rendered}" 218 | } 219 | 220 | lifecycle { 221 | create_before_destroy = true 222 | } 223 | } 224 | 225 | resource "aws_security_group" "default" { 226 | name_prefix = "${var.prefix}default" 227 | description = "concourse ${var.prefix}default" 228 | vpc_id = "${var.vpc_id}" 229 | 230 | # SSH access from a specific CIDRS 231 | ingress { 232 | from_port = 22 233 | to_port = 22 234 | protocol = "tcp" 235 | cidr_blocks = [ "${split(",", var.in_access_allowed_cidrs)}" ] 236 | } 237 | 238 | # outbound internet access 239 | egress { 240 | from_port = 0 241 | to_port = 0 242 | protocol = "-1" 243 | cidr_blocks = ["0.0.0.0/0"] 244 | } 245 | 246 | lifecycle { 247 | create_before_destroy = true 248 | } 249 | } 250 | 251 | resource "aws_security_group" "atc" { 252 | name_prefix = "${var.prefix}atc" 253 | description = "concourse ${var.prefix}atc" 254 | vpc_id = "${var.vpc_id}" 255 | 256 | lifecycle { 257 | create_before_destroy = true 258 | } 259 | } 260 | 261 | resource "aws_security_group_rule" "allow_external_lb_to_atc_access" { 262 | type = "ingress" 263 | from_port = "${var.elb_listener_instance_port}" 264 | to_port = "${var.elb_listener_instance_port}" 265 | protocol = "tcp" 266 | 267 | security_group_id = "${aws_security_group.tsa.id}" 268 | source_security_group_id = "${aws_security_group.external_lb.id}" 269 | } 270 | 271 | resource "aws_security_group_rule" "allow_atc_to_worker_access" { 272 | type = "ingress" 273 | from_port = "0" 274 | to_port = "65535" 275 | protocol = "tcp" 276 | 277 | security_group_id = "${aws_security_group.worker.id}" 278 | source_security_group_id = "${aws_security_group.atc.id}" 279 | } 280 | 281 | resource "aws_security_group" "tsa" { 282 | name_prefix = "${var.prefix}tsa" 283 | description = "concourse ${var.prefix}tsa" 284 | vpc_id = "${var.vpc_id}" 285 | 286 | # outbound internet access 287 | egress { 288 | from_port = 0 289 | to_port = 0 290 | protocol = "-1" 291 | cidr_blocks = ["0.0.0.0/0"] 292 | } 293 | 294 | lifecycle { 295 | create_before_destroy = true 296 | } 297 | } 298 | 299 | resource "aws_security_group_rule" "allow_worker_to_tsa_access" { 300 | type = "ingress" 301 | from_port = 2222 302 | to_port = 2222 303 | protocol = "tcp" 304 | 305 | security_group_id = "${aws_security_group.tsa.id}" 306 | source_security_group_id = "${aws_security_group.worker.id}" 307 | } 308 | 309 | resource "aws_security_group_rule" "allow_external_lb_to_tsa_access" { 310 | type = "ingress" 311 | from_port = 2222 312 | to_port = 2222 313 | protocol = "tcp" 314 | 315 | security_group_id = "${aws_security_group.tsa.id}" 316 | source_security_group_id = "${aws_security_group.external_lb.id}" 317 | } 318 | 319 | resource "aws_security_group" "worker" { 320 | name_prefix = "${var.prefix}worker" 321 | description = "concourse ${var.prefix}worker" 322 | vpc_id = "${var.vpc_id}" 323 | 324 | # outbound internet access 325 | egress { 326 | from_port = 0 327 | to_port = 0 328 | protocol = "-1" 329 | cidr_blocks = ["0.0.0.0/0"] 330 | } 331 | 332 | lifecycle { 333 | create_before_destroy = true 334 | } 335 | } 336 | 337 | resource "aws_security_group" "external_lb" { 338 | name_prefix = "${var.prefix}lb" 339 | description = "concourse ${var.prefix}lb" 340 | 341 | vpc_id = "${var.vpc_id}" 342 | 343 | # HTTP access from a specific CIDRS 344 | ingress { 345 | from_port = "${var.elb_listener_lb_port}" 346 | to_port = "${var.elb_listener_lb_port}" 347 | protocol = "tcp" 348 | cidr_blocks = [ "${split(",", var.in_access_allowed_cidrs)}" ] 349 | } 350 | 351 | ingress { 352 | from_port = "${var.tsa_port}" 353 | to_port = "${var.tsa_port}" 354 | protocol = "tcp" 355 | cidr_blocks = ["0.0.0.0/0"] 356 | } 357 | 358 | egress { 359 | from_port = 0 360 | to_port = 0 361 | protocol = "-1" 362 | cidr_blocks = ["0.0.0.0/0"] 363 | } 364 | 365 | lifecycle { 366 | create_before_destroy = true 367 | } 368 | } 369 | 370 | resource "aws_security_group" "db" { 371 | name_prefix = "${var.prefix}db" 372 | description = "concourse ${var.prefix}db" 373 | vpc_id = "${var.vpc_id}" 374 | 375 | # outbound internet access 376 | egress { 377 | from_port = 0 378 | to_port = 0 379 | protocol = "-1" 380 | cidr_blocks = ["0.0.0.0/0"] 381 | } 382 | 383 | lifecycle { 384 | create_before_destroy = true 385 | } 386 | } 387 | 388 | resource "aws_security_group_rule" "allow_db_access_from_atc" { 389 | type = "ingress" 390 | from_port = 5432 391 | to_port = 5432 392 | protocol = "tcp" 393 | 394 | security_group_id = "${aws_security_group.db.id}" 395 | source_security_group_id = "${aws_security_group.atc.id}" 396 | } 397 | 398 | resource "aws_db_instance" "default" { 399 | depends_on = ["aws_security_group.db"] 400 | identifier = "${var.prefix}db" 401 | allocated_storage = "10" 402 | engine = "postgres" 403 | engine_version = "${var.db_engine_version}" 404 | instance_class = "${var.db_instance_class}" 405 | name = "concourse" 406 | username = "${var.db_username}" 407 | password = "${var.db_password}" 408 | vpc_security_group_ids = ["${aws_security_group.db.id}"] 409 | db_subnet_group_name = "${aws_db_subnet_group.db.id}" 410 | } 411 | 412 | resource "aws_db_subnet_group" "db" { 413 | name = "${var.prefix}db" 414 | description = "group of subnets for concourse db" 415 | subnet_ids = ["${split(",", var.db_subnet_ids)}"] 416 | } 417 | -------------------------------------------------------------------------------- /cmd/up.go: -------------------------------------------------------------------------------- 1 | // Copyright © 2016 Yusuke KUOKA 2 | // 3 | // Licensed under the Apache License, Version 2.0 (the "License"); 4 | // you may not use this file except in compliance with the License. 5 | // You may obtain a copy of the License at 6 | // 7 | // http://www.apache.org/licenses/LICENSE-2.0 8 | // 9 | // Unless required by applicable law or agreed to in writing, software 10 | // distributed under the License is distributed on an "AS IS" BASIS, 11 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | // See the License for the specific language governing permissions and 13 | // limitations under the License. 14 | 15 | package cmd 16 | 17 | import ( 18 | "fmt" 19 | "io/ioutil" 20 | "log" 21 | "os" 22 | "os/exec" 23 | "strconv" 24 | "strings" 25 | 26 | "github.com/mumoshu/concourse-aws/concourse" 27 | "github.com/spf13/cobra" 28 | "gopkg.in/yaml.v2" 29 | ) 30 | 31 | // upCmd represents the up command 32 | var upCmd = &cobra.Command{ 33 | Use: "up", 34 | Short: "Spin up or Update concourse on aws", 35 | Long: `This spins up scalable concourse ci servers interactively. This command supports to update(implemented by terraform update) states of concourse ci servrers.`, 36 | Run: Run, 37 | } 38 | 39 | func Run(cmd *cobra.Command, args []string) { 40 | c, err := concourse.ConfigFromFile(prefixConfigDir("cluster.yml")) 41 | if err != nil && os.IsNotExist(err) { 42 | fmt.Printf("Creating cluster.yml") 43 | c = InteractivelyCreateConfig() 44 | WriteConfigFile(c, prefixConfigDir("cluster.yml")) 45 | } 46 | // fmt.Printf("config:%+v", c) 47 | TerraformRun("plan", c) 48 | possibleAnswers := []string{"y", "n"} 49 | yesOrNo := AskForRequiredInput("Apply?", AskOptions{Candidates: possibleAnswers, Validate: mustBeIncludedIn(possibleAnswers), Default: "y"}) 50 | if yesOrNo == "y" { 51 | TerraformRun("apply", c) 52 | } 53 | } 54 | 55 | func mustBeIncludedIn(candidates []string) func(string) error { 56 | return func(item string) error { 57 | for _, r := range candidates { 58 | if r == item { 59 | return nil 60 | } 61 | } 62 | return fmt.Errorf("%s must be one of: %v", item, candidates) 63 | } 64 | } 65 | 66 | func InteractivelyCreateConfig() *concourse.Config { 67 | prefix := AskForRequiredInput("Prefix", AskOptions{Default: "concourse-"}) 68 | 69 | regions := ListRegions() 70 | region := AskForRequiredInput("Region", AskOptions{Candidates: regions, Validate: mustBeIncludedIn(regions), Default: "ap-northeast-1"}) 71 | 72 | out, _ := exec.Command("./my-latest-ami.sh").CombinedOutput() 73 | latestAmiId := strings.TrimSpace(string(out)) 74 | amiId := AskForRequiredInput("AMI ID", AskOptions{ 75 | Default: latestAmiId, 76 | Candidates: []string{latestAmiId}, 77 | }) 78 | 79 | keys := ListKeys(region) 80 | keyName := AskForRequiredInput("KeyName", AskOptions{Candidates: keys, Validate: mustBeIncludedIn(keys)}) 81 | 82 | vpcIds := ListVPCs(region) 83 | vpcId := AskForRequiredInput("VPC ID", AskOptions{Candidates: vpcIds, Validate: mustBeIncludedIn(vpcIds)}) 84 | 85 | candidateZones := ListAvailabilityZones(region) 86 | subnetIds := []string{} 87 | availabilityZones := []string{} 88 | 89 | for i := 1; i <= 2; i++ { 90 | az := AskForRequiredInput(fmt.Sprintf("Availability Zone %d", i), AskOptions{Candidates: candidateZones, Validate: mustBeIncludedIn(candidateZones)}) 91 | b := candidateZones[:0] 92 | for _, x := range candidateZones { 93 | if x != az { 94 | b = append(b, x) 95 | } 96 | } 97 | 98 | candidateSubnets := ListSubnets(region, vpcId, az) 99 | subnetId := AskForRequiredInput(fmt.Sprintf("Subnet %d", i), AskOptions{Candidates: candidateSubnets, Validate: mustBeIncludedIn(candidateSubnets)}) 100 | 101 | availabilityZones = append(availabilityZones, az) 102 | subnetIds = append(subnetIds, subnetId) 103 | } 104 | 105 | accessibleCIDRS := AskForRequiredInput("AccessibleCIDRS(commma separated)", AskOptions{Default: fmt.Sprintf("%s/32", ObtainExternalIp())}) 106 | 107 | dbInstanceClass := AskForRequiredInput("DB Instance Class", AskOptions{Default: "db.t2.micro"}) 108 | dbEngineVersion := AskForRequiredInput("DB Engine(Postgres) Version", AskOptions{Default: "9.4.7"}) 109 | 110 | webInstanceType := AskForRequiredInput("Concourse Web Instance Type", AskOptions{Default: "t2.micro"}) 111 | workerInstanceType := AskForRequiredInput("Concourse Worker Instance Type", AskOptions{Default: "t2.micro"}) 112 | 113 | asgMin := AskForRequiredInput("Min numbers of servers in ASG(Web, Worker)", AskOptions{Default: "0"}) 114 | asgMax := AskForRequiredInput("Max numbers of servers in ASG(Web, Worker)", AskOptions{Default: "2"}) 115 | webAsgDesired := AskForRequiredInput("Desired numbers of web servers in ASG", AskOptions{Default: "1"}) 116 | workerAsgDesired := AskForRequiredInput("Desired numbers of servers in ASG", AskOptions{Default: "2"}) 117 | 118 | possibleElbProtocols := []string{"http", "https"} 119 | defaultElbPorts := map[string]string{ 120 | "http": "80", 121 | "https": "443", 122 | } 123 | elbProtocol := AskForRequiredInput("Protocol for ELB", AskOptions{ 124 | Default: possibleElbProtocols[0], 125 | Candidates: possibleElbProtocols, 126 | Validate: mustBeIncludedIn(possibleElbProtocols), 127 | }) 128 | elbPort, err := strconv.Atoi(AskForRequiredInput("Port for ELB", AskOptions{ 129 | Default: defaultElbPorts[elbProtocol], 130 | })) 131 | if err != nil { 132 | log.Fatal(err) 133 | panic(err) 134 | } 135 | sslCertificateArn := "" 136 | if elbProtocol == "https" { 137 | sslCertificateArn = AskForRequiredInput("SSL ARN", AskOptions{Default: ""}) 138 | } 139 | customExternalDomainName := AskForRequiredInput("Custom External Domain Name(just hit enter for skip, e.g. some.cool.com)", AskOptions{Default: ""}) 140 | 141 | username := AskForRequiredInput("Basic Auth Username(just hit enter for skip)", AskOptions{Default: ""}) 142 | password := "" 143 | if username != "" { 144 | password = AskForRequiredInput("Basic Auth Password", AskOptions{Default: "bar"}) 145 | } 146 | 147 | ghClientId := AskForRequiredInput("Github Auth Client Id(just hit enter for skip)", AskOptions{Default: ""}) 148 | ghClientSecret := "" 149 | ghOrgs := []string{} 150 | ghTeams := []string{} 151 | ghUsers := []string{} 152 | if ghClientId != "" { 153 | ghClientSecret = AskForRequiredInput("Github Auth Client Secret(just hit enter for skip)", AskOptions{Default: ""}) 154 | ghOrgsInput := AskForRequiredInput("Github Auth Organizations(comma separated)", AskOptions{Default: ""}) 155 | if ghOrgsInput != "" { 156 | ghOrgs = strings.Split(ghOrgsInput, ",") 157 | } 158 | 159 | ghTeamsInput := AskForRequiredInput("Github Auth Teams(comma separated, e.g. ORG/TEAM)", AskOptions{Default: ""}) 160 | if ghTeamsInput != "" { 161 | ghTeams = strings.Split(ghTeamsInput, ",") 162 | } 163 | 164 | ghUsersInput := AskForRequiredInput("Github Auth Users(comma separated)", AskOptions{Default: ""}) 165 | if ghUsersInput != "" { 166 | ghUsers = strings.Split(ghUsersInput, ",") 167 | } 168 | } 169 | 170 | if username == "" && ghClientId == "" { 171 | fmt.Println("WARNING WARNING WARNING WARNING WARNING") 172 | fmt.Println("!!! No Authentication configured !!!") 173 | fmt.Println("WARNING WARNING WARNING WARNING WARNING") 174 | possibleAnswers := []string{"y", "n"} 175 | yesOrNo := AskForRequiredInput("Do you really want to procceed?", AskOptions{Candidates: possibleAnswers, Validate: mustBeIncludedIn(possibleAnswers), Default: "n"}) 176 | if yesOrNo == "n" { 177 | os.Exit(1) 178 | } 179 | } 180 | 181 | return &concourse.Config{ 182 | Prefix: prefix, 183 | Region: region, 184 | KeyName: keyName, 185 | AccessibleCIDRS: accessibleCIDRS, 186 | VpcId: vpcId, 187 | SubnetIds: subnetIds, 188 | AvailabilityZones: availabilityZones, 189 | DBInstanceClass: dbInstanceClass, 190 | DBEngineVersion: dbEngineVersion, 191 | WebInstanceType: webInstanceType, 192 | WorkerInstanceType: workerInstanceType, 193 | AMI: amiId, 194 | AsgMin: asgMin, 195 | AsgMax: asgMax, 196 | WebAsgDesired: webAsgDesired, 197 | WorkerAsgDesired: workerAsgDesired, 198 | ElbProtocol: elbProtocol, 199 | ElbPort: elbPort, 200 | CustomExternalDomainName: customExternalDomainName, 201 | SSLCertificateArn: sslCertificateArn, 202 | BasicAuthUsername: username, 203 | BasicAuthPassword: password, 204 | GithubAuthClientId: ghClientId, 205 | GithubAuthClientSecret: ghClientSecret, 206 | GithubAuthOrganizations: ghOrgs, 207 | GithubAuthTeams: ghTeams, 208 | GithubAuthUsers: ghUsers, 209 | } 210 | } 211 | 212 | func WriteConfigFile(config *concourse.Config, path string) { 213 | d, err := yaml.Marshal(&config) 214 | if err != nil { 215 | panic(err) 216 | } 217 | 218 | makeCfgDirIfNotExists() 219 | 220 | if ioutil.WriteFile(path, []byte(d), 0644) != nil { 221 | panic(err) 222 | } 223 | } 224 | 225 | func SSHGenKeyIfNotExist(keyFileName string) { 226 | if _, err := os.Stat(keyFileName); os.IsNotExist(err) { 227 | log.Println(fmt.Sprintf("generating ssh key: %s", keyFileName)) 228 | args := []string{ 229 | "-t", "rsa", 230 | "-f", keyFileName, 231 | "-N", "", 232 | } 233 | cmd := exec.Command("ssh-keygen", args...) 234 | cmd.Stdout = os.Stdout 235 | cmd.Stderr = os.Stderr 236 | if err := cmd.Run(); err != nil { 237 | log.Fatal(err) 238 | panic(err) 239 | } 240 | } 241 | } 242 | 243 | func TerraformRun(subcommand string, c *concourse.Config) { 244 | // auto ssh key creation 245 | SSHGenKeyIfNotExist(prefixConfigDir("host_key")) 246 | SSHGenKeyIfNotExist(prefixConfigDir("worker_key")) 247 | SSHGenKeyIfNotExist(prefixConfigDir("session_signing_key")) 248 | cp := exec.Command("cp", 249 | prefixConfigDir("worker_key.pub"), 250 | prefixConfigDir("authorized_worker_keys")) 251 | cp.Stdout = os.Stdout 252 | cp.Stderr = os.Stderr 253 | if err := cp.Run(); err != nil { 254 | log.Fatal(err) 255 | panic(err) 256 | } 257 | 258 | useCustomExternalDomainName := 0 259 | if len(c.CustomExternalDomainName) > 0 { 260 | useCustomExternalDomainName = 1 261 | } 262 | useCustomElbPort := 0 263 | if !(c.ElbPort == 80 || c.ElbPort == 443) { 264 | useCustomElbPort = 1 265 | } 266 | 267 | // for backward compatibility 268 | // instance_type will be copied to web_instance_type and worker_instance_type only if they are not used. 269 | if c.InstanceType != "" { 270 | fmt.Println("WARNING: instance_type is deprecated. Use web_instance_type and worker_instance_type instead") 271 | if c.WebInstanceType == "" { 272 | c.WebInstanceType = c.InstanceType 273 | } 274 | if c.WorkerInstanceType == "" { 275 | c.WorkerInstanceType = c.InstanceType 276 | } 277 | } 278 | 279 | args := []string{ 280 | subcommand, 281 | "-state", fmt.Sprintf("%s%s", cfgDir, "terraform.tfstate"), 282 | "-var", fmt.Sprintf("aws_region=%s", c.Region), 283 | "-var", fmt.Sprintf("availability_zones=%s", strings.Join(c.AvailabilityZones, ",")), 284 | "-var", fmt.Sprintf("key_name=%s", c.KeyName), 285 | "-var", fmt.Sprintf("subnet_id=%s", strings.Join(c.SubnetIds, ",")), 286 | "-var", fmt.Sprintf("vpc_id=%s", c.VpcId), 287 | "-var", fmt.Sprintf("db_instance_class=%s", c.DBInstanceClass), 288 | "-var", fmt.Sprintf("db_engine_version=%s", c.DBEngineVersion), 289 | "-var", fmt.Sprintf("web_instance_type=%s", c.WebInstanceType), 290 | "-var", fmt.Sprintf("worker_instance_type=%s", c.WorkerInstanceType), 291 | "-var", "db_username=concourse", 292 | "-var", "db_password=concourse", 293 | "-var", fmt.Sprintf("db_subnet_ids=%s", strings.Join(c.SubnetIds, ",")), 294 | "-var", fmt.Sprintf("tsa_host_key=%s", prefixConfigDir("host_key")), 295 | "-var", fmt.Sprintf("session_signing_key=%s", prefixConfigDir("session_signing_key")), 296 | "-var", fmt.Sprintf("tsa_authorized_keys=%s", prefixConfigDir("worker_key.pub")), 297 | "-var", fmt.Sprintf("tsa_public_key=%s", prefixConfigDir("host_key.pub")), 298 | "-var", fmt.Sprintf("tsa_worker_private_key=%s", prefixConfigDir("worker_key")), 299 | "-var", fmt.Sprintf("ami=%s", c.AMI), 300 | "-var", fmt.Sprintf("in_access_allowed_cidrs=%s", c.AccessibleCIDRS), 301 | "-var", fmt.Sprintf("elb_listener_lb_protocol=%s", c.ElbProtocol), 302 | "-var", fmt.Sprintf("elb_listener_lb_port=%d", c.ElbPort), 303 | "-var", fmt.Sprintf("use_custom_elb_port=%d", useCustomElbPort), 304 | "-var", fmt.Sprintf("ssl_certificate_arn=%s", c.SSLCertificateArn), 305 | "-var", fmt.Sprintf("use_custom_external_domain_name=%d", useCustomExternalDomainName), 306 | "-var", fmt.Sprintf("custom_external_domain_name=%s", c.CustomExternalDomainName), 307 | "-var", fmt.Sprintf("worker_instance_profile=%s", c.WorkerInstanceProfile), 308 | "-var", fmt.Sprintf("basic_auth_username=%s", c.BasicAuthUsername), 309 | "-var", fmt.Sprintf("basic_auth_password=%s", c.BasicAuthPassword), 310 | "-var", fmt.Sprintf("github_auth_client_id=%s", c.GithubAuthClientId), 311 | "-var", fmt.Sprintf("github_auth_client_secret=%s", c.GithubAuthClientSecret), 312 | } 313 | 314 | if len(c.Prefix) > 0 { 315 | args = append(args, 316 | "-var", fmt.Sprintf("prefix=%s", c.Prefix), 317 | ) 318 | } 319 | 320 | if len(c.AsgMin) > 0 { 321 | args = append(args, 322 | "-var", fmt.Sprintf("asg_min=%s", c.AsgMin), 323 | ) 324 | } 325 | if len(c.AsgMax) > 0 { 326 | args = append(args, 327 | "-var", fmt.Sprintf("asg_max=%s", c.AsgMax), 328 | ) 329 | } 330 | if len(c.WebAsgDesired) > 0 { 331 | args = append(args, 332 | "-var", fmt.Sprintf("web_asg_desired=%s", c.WebAsgDesired), 333 | ) 334 | } 335 | if len(c.WorkerAsgDesired) > 0 { 336 | args = append(args, 337 | "-var", fmt.Sprintf("worker_asg_desired=%s", c.WorkerAsgDesired), 338 | ) 339 | } 340 | 341 | if len(c.GithubAuthOrganizations) > 0 { 342 | args = append(args, 343 | "-var", fmt.Sprintf("github_auth_organizations=%s", strings.Join(c.GithubAuthOrganizations, ",")), 344 | ) 345 | } 346 | if len(c.GithubAuthTeams) > 0 { 347 | args = append(args, 348 | "-var", fmt.Sprintf("github_auth_teams=%s", strings.Join(c.GithubAuthTeams, ",")), 349 | ) 350 | } 351 | if len(c.GithubAuthUsers) > 0 { 352 | args = append(args, 353 | "-var", fmt.Sprintf("github_auth_users=%s", strings.Join(c.GithubAuthUsers, ",")), 354 | ) 355 | } 356 | 357 | log.Println("Running terraform get") 358 | get := exec.Command("terraform", "get") 359 | get.Stdout = os.Stdout 360 | get.Stderr = os.Stderr 361 | getErr := get.Run() 362 | if getErr != nil { 363 | log.Fatal(getErr) 364 | panic(getErr) 365 | } 366 | 367 | log.Println(fmt.Sprintf("Running terraform with args: %s", args)) 368 | cmd := exec.Command("terraform", args...) 369 | cmd.Stdout = os.Stdout 370 | cmd.Stderr = os.Stderr 371 | err := cmd.Run() 372 | if err != nil { 373 | log.Fatal(err) 374 | panic(err) 375 | } 376 | } 377 | 378 | func init() { 379 | RootCmd.AddCommand(upCmd) 380 | 381 | // Here you will define your flags and configuration settings. 382 | 383 | // Cobra supports Persistent Flags which will work for this command 384 | // and all subcommands, e.g.: 385 | // upCmd.PersistentFlags().String("foo", "", "A help for foo") 386 | 387 | // Cobra supports local flags which will only run when this command 388 | // is called directly, e.g.: 389 | // upCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") 390 | 391 | } 392 | --------------------------------------------------------------------------------