├── .editorconfig ├── .example-aws-iam-policy.json ├── .example.env ├── .github └── PULL_REQUEST_TEMPLATE.md ├── .gitignore ├── .rubocop.yml ├── .rubocop_todo.yml ├── .ruby-version ├── .travis.yml ├── CODE_OF_CONDUCT.md ├── Gemfile ├── Gemfile.lock ├── LICENSE ├── Makefile ├── README.md ├── assets ├── bits │ ├── apt_force_confdef.conf │ ├── ensure-tfw.bash │ ├── maybe-reboot.bash │ ├── terraform-user-bootstrap.bash │ └── travis-combined-env ├── nat │ └── var │ │ └── tmp │ │ ├── nat-conntracker-confs │ │ ├── fail2ban-action-iptables-blocktype.local │ │ ├── fail2ban-filter-nat-conntracker.conf │ │ ├── fail2ban-jail-nat-conntracker.conf │ │ └── fail2ban.local │ │ └── nftables.conf ├── rsyslog │ └── rsyslog.conf └── travis-worker │ ├── bats_helpers.bash │ ├── check-docker-health.bash │ ├── check-docker-health.bats │ ├── check-docker-health.crontab │ ├── check-unregister-netdevice.bash │ ├── check-unregister-netdevice.bats │ ├── clean-up-containers.bash │ ├── clean-up-containers.bats │ ├── clean-up-containers.crontab │ ├── high-cpu-check.bash │ ├── high-cpu-check.crontab │ ├── rsyslog-watch-upstart.conf │ ├── tfw-admin-clean-containers.service │ ├── travis-worker-wrapper │ ├── travis-worker.service │ ├── travis-worker@.service │ └── unregister-netdevice.crontab ├── aws.mk ├── bin ├── build-cache-configure ├── chirp-assign-aws-creds ├── ensure-terraform ├── env-url-to-parts ├── gce-export-net ├── gce-export-workers ├── gce-import-net ├── gce-import-workers ├── gcloud-nats-by-zone ├── gcloud-recreate-nat-instances ├── generate-github-ssh-users ├── generate-latest-docker-image-tags ├── generate-latest-gce-images ├── generate-latest-worker-version ├── generate-macstadium-nat-ips ├── generate-tfvars ├── heroku-dump-shell-config ├── heroku-wait-deploy-scale ├── lookup-gce-project ├── mac-worker-pool-size ├── nat-conntracker-configure ├── post-flight ├── pre-flight-checks ├── set-k8s-context ├── show-current-docker-images ├── show-proposed-docker-images ├── tfplandiff ├── travis-env-set-docker-config-secrets ├── travis-worker-verify-config └── write-config-files ├── build-caching-production-1 ├── Makefile └── main.tf ├── build-caching-production-2 ├── Makefile └── main.tf ├── build-caching-production-3 ├── Makefile └── main.tf ├── build-caching-staging-1 ├── Makefile └── main.tf ├── build-production-1 ├── Makefile └── main.tf ├── chirp-production-1 ├── Makefile └── main.tf ├── dns-production-0 ├── Makefile └── main.tf ├── gce-production-1 ├── Makefile ├── main.tf └── service_accounts.tf ├── gce-production-2 ├── Makefile └── main.tf ├── gce-production-3 ├── Makefile └── main.tf ├── gce-production-net-1 ├── Makefile ├── main.tf └── nat-conntracker.env ├── gce-production-net-2 ├── Makefile ├── main.tf └── nat-conntracker.env ├── gce-production-net-3 ├── Makefile ├── main.tf └── nat-conntracker.env ├── gce-staging-1 ├── Makefile └── main.tf ├── gce-staging-net-1 ├── Makefile ├── main.tf └── nat-conntracker.env ├── gce.mk ├── lib └── heroku_client.rb ├── macstadium-prod-1 ├── Makefile ├── dhcp.tf └── main.tf ├── macstadium-prod-2 ├── Makefile ├── dhcp.tf └── main.tf ├── macstadium-staging ├── .example.env ├── Makefile └── main.tf ├── modules ├── aws_iam_user_s3 │ └── main.tf ├── fair_use_reporting │ └── account.tf ├── gce_kubernetes │ ├── cluster.tf │ ├── context.tf │ ├── service_accounts.tf │ └── variables.tf ├── gce_net_services │ ├── nat.tf │ ├── networks.tf │ └── variables.tf ├── gce_net_workers │ ├── bastion-cloud-config.yml.tpl │ ├── bastion-cloud-init.bash │ ├── bastion.tf │ ├── firewall.tf │ ├── nat-cloud-config.yml.tpl │ ├── nat-cloud-init.bash │ ├── nat.tf │ ├── networks.tf │ ├── output.tf │ └── variables.tf ├── gce_project │ └── project.tf ├── gce_remote_docker │ ├── cloud-config.yml.tpl │ ├── cloud-init.bash │ ├── daemon-direct-lvm.json │ ├── docker.env │ └── main.tf ├── gce_squignix │ ├── cloud-config.yml.tpl │ ├── cloud-init.bash │ ├── main.tf │ ├── nginx-conf.d-default.conf.tpl │ ├── squignix-list-cached-urls │ ├── squignix-wrapper │ ├── squignix.env │ └── squignix.service ├── gce_worker │ ├── accounts-com-free.tf │ ├── accounts-com.tf │ ├── accounts-org.tf │ ├── kubernetes-com-free.tf │ ├── kubernetes-com-premium-c2.tf │ ├── kubernetes-com-premium-hack.tf │ ├── kubernetes-com-premium-n2.tf │ ├── kubernetes-com.tf │ ├── kubernetes-org.tf │ ├── outputs.tf │ ├── role-worker.tf │ └── variables.tf ├── gce_worker_group │ ├── gcloud-cleanup.tf │ ├── outputs.tf │ ├── variables.tf │ └── workers.tf ├── macstadium_dhcp_server │ ├── dhcp-server.tf │ ├── dhcpd.conf.tpl │ ├── install-dhcpd.sh │ ├── variables.tf │ └── vsphere.tf ├── macstadium_inventory │ ├── folders.tf │ └── variables.tf ├── macstadium_k8s_cluster │ ├── master.tf │ ├── nodes.tf │ ├── providers.tf │ ├── scripts │ │ ├── create-master.sh │ │ ├── guard.sh.tpl │ │ ├── install-docker.sh │ │ ├── install-kubernetes.sh │ │ ├── kubeadm-token.sh │ │ └── kubeadm_config.py │ └── variables.tf ├── rabbitmq_user │ └── main.tf └── warmer │ └── main.tf ├── runtests ├── terraform-common.mk ├── tmp └── .gitkeep ├── travis-ci-prod-services-1 ├── Makefile ├── backend.tf ├── modules.tf ├── providers.tf └── variables.tf ├── travis-ci-staging-services-1 ├── Makefile ├── backend.tf ├── modules.tf ├── providers.tf └── variables.tf └── trvs.mk /.editorconfig: -------------------------------------------------------------------------------- 1 | [*] 2 | indent_style = space 3 | indent_size = 2 4 | 5 | [{Makefile,*.mk}] 6 | indent_style = tab 7 | indent_size = 4 8 | -------------------------------------------------------------------------------- /.example-aws-iam-policy.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Action": [ 7 | "ec2:*", 8 | "elasticloadbalancing:*", 9 | "route53:*", 10 | "autoscaling:*", 11 | "cloudwatch:*", 12 | "SNS:*" 13 | ], 14 | "Resource": [ 15 | "*" 16 | ] 17 | }, 18 | { 19 | "Effect": "Allow", 20 | "Action": [ 21 | "s3:*" 22 | ], 23 | "Resource": [ 24 | "arn:aws:s3:::travis-terraform-state", 25 | "arn:aws:s3:::travis-terraform-state/*", 26 | "arn:aws:s3:::travis-shared-1-registry-images", 27 | "arn:aws:s3:::travis-shared-1-registry-images/*", 28 | "arn:aws:s3:::travis-shared-2-registry-images", 29 | "arn:aws:s3:::travis-shared-2-registry-images/*" 30 | ] 31 | }, 32 | { 33 | "Effect": "Allow", 34 | "Action": [ 35 | "iam:GetUser" 36 | ], 37 | "Resource": [ 38 | "arn:aws:iam::341288657826:user/igor-terraform" 39 | ] 40 | }, 41 | { 42 | "Effect": "Allow", 43 | "Action": [ 44 | "iam:*" 45 | ], 46 | "Resource": [ 47 | "arn:aws:iam::341288657826:role/*", 48 | "arn:aws:iam::341288657826:user/registry-shared-1", 49 | "arn:aws:iam::341288657826:user/registry-shared-2", 50 | "arn:aws:iam::*:user/cyclist-*", 51 | "arn:aws:iam::*:user/worker-*", 52 | "arn:aws:iam::*:user/build-trace-*" 53 | ] 54 | }, 55 | { 56 | "Effect": "Allow", 57 | "Action": [ 58 | "dynamodb:*" 59 | ], 60 | "Resource": [ 61 | "arn:aws:dynamodb:us-east-1:341288657826:table/travis-terraform-state" 62 | ] 63 | } 64 | ] 65 | } 66 | -------------------------------------------------------------------------------- /.example.env: -------------------------------------------------------------------------------- 1 | export AWS_ACCESS_KEY=AKFAFAFAFAFAFAFAFAFAFAFAF 2 | export AWS_SECRET_KEY=SKFAFAFAFAFAFAFAFAFAFAFAFAFAFAFAFAFAFAFAFAFAFAF 3 | export AWS_REGION=us-east-1 4 | 5 | export HEROKU_API_KEY=010101-0101-fafafafafa 6 | 7 | export GITHUB_USERNAME=yourname 8 | export GITHUB_TOKEN=AFAFAFAFAFAFAFAFAFAFAFAFAFAF 9 | 10 | export TF_VAR_ssh_user=yourusername 11 | 12 | export TF_INSTALLATION_PREFIX=$HOME/.cache/travis-terraform-config 13 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## What is the problem that this PR is trying to fix? 2 | 3 | ## What approach did you choose and why? 4 | 5 | ## How can you test this? 6 | 7 | ## What feedback would you like, if any? 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.tfplan 2 | *.tfstate.*.backup 3 | *.tfstate.backup 4 | *.tfvars 5 | 6 | !gce-*/instance-counts.auto.tfvars 7 | 8 | .env 9 | .envrc 10 | .terraform/ 11 | /tmp/ 12 | assets/*.tar.bz2 13 | aws-*/config/ 14 | crash.log 15 | debug.tmp 16 | gce-*/config/ 17 | build-*/config/ 18 | graph.png 19 | local-development-0/local-development-0.tfstate 20 | macstadium-*/config/ 21 | tfstate 22 | .cache/ 23 | config/ 24 | .bundle/ 25 | vendor/ 26 | -------------------------------------------------------------------------------- /.rubocop.yml: -------------------------------------------------------------------------------- 1 | inherit_from: .rubocop_todo.yml 2 | 3 | AllCops: 4 | TargetRubyVersion: 2.5 5 | 6 | Style/Documentation: 7 | Enabled: false 8 | 9 | Style/AccessModifierDeclarations: 10 | EnforcedStyle: inline 11 | -------------------------------------------------------------------------------- /.rubocop_todo.yml: -------------------------------------------------------------------------------- 1 | # This configuration was generated by 2 | # `rubocop --auto-gen-config` 3 | # on 2018-12-07 00:06:34 -0500 using RuboCop version 0.60.0. 4 | # The point is for the user to remove these configuration records 5 | # one by one as the offenses are removed from the code base. 6 | # Note that changes in the inspected code, or installation of new 7 | # versions of RuboCop, may require this file to be generated again. 8 | 9 | # Offense count: 22 10 | Metrics/AbcSize: 11 | Max: 80 12 | 13 | # Offense count: 2 14 | # Configuration parameters: CountComments, ExcludedMethods. 15 | # ExcludedMethods: refine 16 | Metrics/BlockLength: 17 | Max: 30 18 | 19 | # Offense count: 2 20 | # Configuration parameters: CountComments. 21 | Metrics/ClassLength: 22 | Max: 138 23 | 24 | # Offense count: 1 25 | Metrics/CyclomaticComplexity: 26 | Max: 7 27 | 28 | # Offense count: 30 29 | # Configuration parameters: CountComments, ExcludedMethods. 30 | Metrics/MethodLength: 31 | Max: 81 32 | 33 | # Offense count: 4 34 | # Configuration parameters: Blacklist. 35 | # Blacklist: (?-mix:(^|\s)(EO[A-Z]{1}|END)(\s|$)) 36 | Naming/HeredocDelimiterNaming: 37 | Exclude: 38 | - 'bin/gcloud-nats-by-zone' 39 | - 'bin/generate-latest-gce-images' 40 | - 'bin/write-config-files' 41 | 42 | # Offense count: 4 43 | Style/DoubleNegation: 44 | Exclude: 45 | - 'bin/gce-export-net' 46 | - 'bin/gce-export-workers' 47 | - 'bin/gce-import-net' 48 | - 'bin/gce-import-workers' 49 | 50 | # Offense count: 1 51 | # Cop supports --auto-correct. 52 | # Configuration parameters: EnforcedStyle, AllowInnerSlashes. 53 | # SupportedStyles: slashes, percent_r, mixed 54 | Style/RegexpLiteral: 55 | Exclude: 56 | - 'bin/travis-worker-verify-config' 57 | 58 | # Offense count: 9 59 | # Configuration parameters: AllowHeredoc, AllowURI, URISchemes, IgnoreCopDirectives, IgnoredPatterns. 60 | # URISchemes: http, https 61 | Metrics/LineLength: 62 | Max: 145 63 | -------------------------------------------------------------------------------- /.ruby-version: -------------------------------------------------------------------------------- 1 | 2.5.3 2 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: ruby 2 | dist: xenial 3 | group: edge 4 | cache: 5 | directories: 6 | - "${HOME}/bin" 7 | - "${HOME}/.cache/travis-terraform-config" 8 | bundler: true 9 | env: 10 | global: 11 | - PATH="${HOME}/bin:${PATH}" 12 | - TMPDIR="${TMPDIR:-/tmp}" 13 | before_install: 14 | - eval "$(gimme 1.11.1)" 15 | - make deps 16 | script: 17 | - make test 18 | - make assert-clean 19 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | In the interest of fostering an open and welcoming environment, we as 6 | contributors and maintainers pledge to making participation in our project and 7 | our community a harassment-free experience for everyone, regardless of age, body 8 | size, disability, ethnicity, gender identity and expression, level of experience, 9 | education, socio-economic status, nationality, personal appearance, race, 10 | religion, or sexual identity and orientation. 11 | 12 | ## Our Standards 13 | 14 | Examples of behavior that contributes to creating a positive environment 15 | include: 16 | 17 | * Using welcoming and inclusive language 18 | * Being respectful of differing viewpoints and experiences 19 | * Gracefully accepting constructive criticism 20 | * Focusing on what is best for the community 21 | * Showing empathy towards other community members 22 | 23 | Examples of unacceptable behavior by participants include: 24 | 25 | * The use of sexualized language or imagery and unwelcome sexual attention or 26 | advances 27 | * Trolling, insulting/derogatory comments, and personal or political attacks 28 | * Public or private harassment 29 | * Publishing others' private information, such as a physical or electronic 30 | address, without explicit permission 31 | * Other conduct which could reasonably be considered inappropriate in a 32 | professional setting 33 | 34 | ## Our Responsibilities 35 | 36 | Project maintainers are responsible for clarifying the standards of acceptable 37 | behavior and are expected to take appropriate and fair corrective action in 38 | response to any instances of unacceptable behavior. 39 | 40 | Project maintainers have the right and responsibility to remove, edit, or 41 | reject comments, commits, code, wiki edits, issues, and other contributions 42 | that are not aligned to this Code of Conduct, or to ban temporarily or 43 | permanently any contributor for other behaviors that they deem inappropriate, 44 | threatening, offensive, or harmful. 45 | 46 | ## Scope 47 | 48 | This Code of Conduct applies both within project spaces and in public spaces 49 | when an individual is representing the project or its community. Examples of 50 | representing a project or community include using an official project e-mail 51 | address, posting via an official social media account, or acting as an appointed 52 | representative at an online or offline event. Representation of a project may be 53 | further defined and clarified by project maintainers. 54 | 55 | ## Enforcement 56 | 57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 58 | reported by contacting the project team at 59 | contact+terraform-config@travis-ci.com. All complaints will be reviewed and 60 | investigated and will result in a response that is deemed necessary and 61 | appropriate to the circumstances. The project team is obligated to maintain 62 | confidentiality with regard to the reporter of an incident. Further details of 63 | specific enforcement policies may be posted separately. 64 | 65 | Project maintainers who do not follow or enforce the Code of Conduct in good 66 | faith may face temporary or permanent repercussions as determined by other 67 | members of the project's leadership. 68 | 69 | ## Attribution 70 | 71 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, 72 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html 73 | 74 | [homepage]: https://www.contributor-covenant.org 75 | 76 | -------------------------------------------------------------------------------- /Gemfile: -------------------------------------------------------------------------------- 1 | # frozen_string_literal: true 2 | 3 | source 'https://rubygems.org' 4 | 5 | gem 'rubocop' 6 | -------------------------------------------------------------------------------- /Gemfile.lock: -------------------------------------------------------------------------------- 1 | GEM 2 | remote: https://rubygems.org/ 3 | specs: 4 | ast (2.4.0) 5 | jaro_winkler (1.5.1) 6 | parallel (1.12.1) 7 | parser (2.5.3.0) 8 | ast (~> 2.4.0) 9 | powerpack (0.1.2) 10 | rainbow (3.0.0) 11 | rubocop (0.60.0) 12 | jaro_winkler (~> 1.5.1) 13 | parallel (~> 1.10) 14 | parser (>= 2.5, != 2.5.1.1) 15 | powerpack (~> 0.1) 16 | rainbow (>= 2.2.2, < 4.0) 17 | ruby-progressbar (~> 1.7) 18 | unicode-display_width (~> 1.4.0) 19 | ruby-progressbar (1.10.0) 20 | unicode-display_width (1.4.0) 21 | 22 | PLATFORMS 23 | ruby 24 | 25 | DEPENDENCIES 26 | rubocop 27 | 28 | BUNDLED WITH 29 | 1.17.1 30 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright © 2018 Travis CI GmbH 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | SHELLCHECK_URL := https://s3.amazonaws.com/travis-blue-public/binaries/ubuntu/14.04/x86_64/shellcheck-0.4.4.tar.bz2 2 | SHFMT_URL := https://github.com/mvdan/sh/releases/download/v2.5.0/shfmt_v2.5.0_linux_amd64 3 | TFPLAN2JSON_URL := github.com/travis-ci/tfplan2json 4 | TFPLAN2JSON_V11_URL := gopkg.in/travis-ci/tfplan2json.v11 5 | PROVIDER_TRAVIS_URL := github.com/travis-ci/terraform-provider-travis 6 | 7 | DEPS := \ 8 | .ensure-provider-travis \ 9 | .ensure-rubocop \ 10 | .ensure-shellcheck \ 11 | .ensure-shfmt \ 12 | .ensure-terraforms \ 13 | .ensure-tfplan2json 14 | 15 | GOPATH_BIN := $(shell go env GOPATH | awk -F: '{ print $$1 }')/bin 16 | 17 | SHELL := bash 18 | 19 | GIT := git 20 | GO := go 21 | CURL := curl 22 | 23 | .PHONY: test 24 | test: 25 | ./runtests --env .example.env 26 | 27 | include $(shell git rev-parse --show-toplevel)/terraform-common.mk 28 | 29 | .PHONY: assert-clean 30 | assert-clean: 31 | $(GIT) diff --exit-code 32 | $(GIT) diff --cached --exit-code 33 | 34 | .PHONY: deps 35 | deps: $(DEPS) 36 | 37 | .PHONY: .ensure-terraforms 38 | .ensure-terraforms: 39 | $(GIT) ls-files '*/Makefile' | \ 40 | xargs -n 1 $(MAKE) .echo-tf-version -f 2>/dev/null | \ 41 | grep -v make | \ 42 | sort | \ 43 | uniq | while read -r tf_version; do \ 44 | ./bin/ensure-terraform $${tf_version}; \ 45 | done 46 | 47 | .PHONY: .ensure-rubocop 48 | .ensure-rubocop: 49 | bundle version &>/dev/null || gem install bundler 50 | bundle exec rubocop --version &>/dev/null || bundle install 51 | 52 | .PHONY: .ensure-shellcheck 53 | .ensure-shellcheck: 54 | if [[ ! -x "$(HOME)/bin/shellcheck" ]]; then \ 55 | $(CURL) -sSL "$(SHELLCHECK_URL)" | tar -C "$(HOME)/bin" -xjf -; \ 56 | fi 57 | 58 | .PHONY: .ensure-shfmt 59 | .ensure-shfmt: $(GOPATH_BIN)/shfmt 60 | 61 | $(GOPATH_BIN)/shfmt: 62 | $(CURL) -sSL -o $@ $(SHFMT_URL) 63 | chmod +x $@ 64 | 65 | .PHONY: .ensure-tfplan2json 66 | .ensure-tfplan2json: 67 | $(SHELL) -c 'eval $$(gimme 1.11.1) && $(GO) get -u "$(TFPLAN2JSON_URL)"' 68 | $(SHELL) -c 'eval $$(gimme 1.9.7) && $(GO) get -u "$(TFPLAN2JSON_V11_URL)"' 69 | 70 | .PHONY: .ensure-provider-travis 71 | .ensure-provider-travis: 72 | $(GO) get -u "$(PROVIDER_TRAVIS_URL)" 73 | mkdir -p $(HOME)/.terraform.d/plugins 74 | cp -v $(GOPATH_BIN)/terraform-provider-travis $(HOME)/.terraform.d/plugins/ 75 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # terraform-config 2 | 3 | This contains all of the Terraform bits for hosted Travis CI :cloud:. 4 | 5 | This is what allows us to manage our cloud environments from a central place, 6 | and change them over time. It should be possible to bring up (or re-create) a 7 | complete environment with a few `make` tasks. 8 | 9 | ## Status 10 | 11 | In production. Patches welcome. Please review the [code of 12 | conduct](./CODE_OF_CONDUCT.md). 13 | 14 | ## Infrastructure 15 | 16 | Terraform manages pretty much everything that is not running on Heroku, and even 17 | a little bit of some of what is running on Heroku. We use terraform to manage 18 | our main cloud environments as well as some other services: 19 | 20 | * Amazon Web Services 21 | * Google Cloud Platform 22 | * Macstadium 23 | * OpenStack 24 | 25 | ## Requirements 26 | 27 | * [terraform](https://www.terraform.io/) 0.9.0+ 28 | * `trvs`, a Travis CI tool shrouded in mystery, along with access to 29 | secret secrets for making secret stuff 30 | * Ruby 2.2 or higher (to make sure trvs functions correctly) 31 | * [jq](https://stedolan.github.io/jq/) 32 | 33 | 34 | ## Set-up 35 | 36 | * Clone this repo 37 | * Make sure `trvs` is installed and added to your `$PATH`. (You can try running 38 | `trvs generate-config -H travis-scheduler-prod` to check) 39 | * Set all required environment variables (see the list below). This can achieved 40 | by doing something like: 41 | * Manually sourcing an `.env` file (like `.example.env`) 42 | * Using [autoenv](https://github.com/kennethreitz/autoenv) 43 | * Fetching values from your own pass vault 44 | 45 | #### Required environment variables 46 | 47 | * `AWS_ACCESS_KEY` 48 | * `AWS_REGION` 49 | * `AWS_SECRET_KEY` 50 | * `GITHUB_TOKEN` 51 | * `GITHUB_USERNAME` 52 | * `HEROKU_API_KEY` 53 | * `TF_VAR_ssh_user` 54 | * `TRAVIS_KEYCHAIN_DIR` - should be the parent directory of your keychain repos 55 | 56 | #### Notes 57 | 58 | MacStadium & GCE access creds are shared and come from keychain, not 59 | personal accounts, so there are no infrastructure-specific access keys 60 | for them. 61 | 62 | `$TF_VAR_ssh_user` isn't needed for AWS and can just be set to `$USER`, if your 63 | local username and your SSH username are the same. If you have an SSH key 64 | passphrase, consider starting `ssh-agent` and doing `ssh-add`. 65 | 66 | See http://rabexc.org/posts/using-ssh-agent for more details. 67 | 68 | 69 | ## Usage 70 | 71 | ``` bash 72 | # move into a given infrastructure directory, e.g.: 73 | cd ./gce-staging-1 74 | 75 | # terraform plan, which will automatically configure terraform from remote and 76 | # generate config files via `trvs` 77 | make plan 78 | 79 | # if it looks OK, terraform apply 80 | make apply 81 | 82 | # as some configuration is generated and cached locally, changes to 83 | # configuration sources may require cleaning before further plan/apply 84 | make clean 85 | ``` 86 | 87 | ## Troubleshooting tips 88 | 89 | * Running `make check` will verify a few common setup requirements. 90 | * Verify you have been added to the relevant Heroku organizations. 91 | * Try passing the `-d` flag to `make` to see which commands are being 92 | run. 93 | * this will show various curl commands (e.g. heroku) which may be 94 | silenced (`-fs`); try running these directly without the `-fs` 95 | flags to make sure they succeed 96 | * `terraform console` will allow you to use an interactive console for 97 | testing interpolations and looking into the existing state. 98 | * Terraform state errors may be due to insufficient AWS permissions. See the 99 | [`.example-aws-iam-policy.json`](./.example-aws-iam-policy.json) for example 100 | minimum permissions. 101 | 102 | ## License 103 | 104 | See [`./LICENSE`](./LICENSE). 105 | -------------------------------------------------------------------------------- /assets/bits/apt_force_confdef.conf: -------------------------------------------------------------------------------- 1 | # vim:filetype=aptconf 2 | Dpkg::Options { 3 | "--force-confdef"; 4 | "--force-confold"; 5 | } 6 | -------------------------------------------------------------------------------- /assets/bits/ensure-tfw.bash: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | 4 | ensure_tfw() { 5 | : "${TMPDIR:=/var/tmp}" 6 | : "${USRLOCALDIR:=/usr/local}" 7 | 8 | apt-get update -yqq 9 | apt-get install -yqq curl make 10 | 11 | rm -rf "${TMPDIR}/tfw-install" 12 | mkdir -p "${TMPDIR}/tfw-install" 13 | curl -sSL https://api.github.com/repos/travis-ci/tfw/tarball/master | 14 | tar -C "${TMPDIR}/tfw-install" --strip-components=1 -xzf - 15 | make -C "${TMPDIR}/tfw-install" install PREFIX="${USRLOCALDIR}" 16 | } 17 | 18 | ensure_tfw 19 | -------------------------------------------------------------------------------- /assets/bits/maybe-reboot.bash: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | 4 | maybe_reboot() { 5 | : "${FIRSTBOOT:=/.first-boot}" 6 | 7 | if [[ -s "${FIRSTBOOT}" ]]; then 8 | logger first boot detected: "$(cat "${FIRSTBOOT}")" 9 | return 10 | fi 11 | 12 | date -u >"${FIRSTBOOT}" 13 | systemctl reboot 14 | } 15 | 16 | maybe_reboot 17 | -------------------------------------------------------------------------------- /assets/bits/terraform-user-bootstrap.bash: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | 4 | terraform_user_bootstrap() { 5 | set -o xtrace 6 | 7 | : "${VARTMP:=/var/tmp}" 8 | : "${ETCSUDOERSD:=/etc/sudoers.d}" 9 | 10 | if ! getent passwd terraform &>/dev/null; then 11 | useradd terraform 12 | fi 13 | 14 | usermod -a -G sudo terraform 15 | 16 | if [[ "${TRAVIS_INSTANCE_TERRAFORM_PASSWORD}" ]]; then 17 | set +o xtrace 18 | echo "terraform:${TRAVIS_INSTANCE_TERRAFORM_PASSWORD}" | chpasswd || true 19 | set -o xtrace 20 | fi 21 | 22 | cat >"${ETCSUDOERSD}/terraform" <"${out}" 14 | fi 15 | 16 | for config_file in \ 17 | travis-enterprise \ 18 | "${job}-chef" \ 19 | "${job}" \ 20 | "${job}-cloud-init" \ 21 | "${job}-local"; do 22 | if [ -f "${ETC_DEFAULT}/${config_file}" ]; then 23 | echo "# ${ETC_DEFAULT}/${config_file}" 24 | sed "s/^export //;s/\"//g;s/'//g" "${ETC_DEFAULT}/${config_file}" 25 | fi 26 | done 27 | } 28 | 29 | main "$@" 30 | -------------------------------------------------------------------------------- /assets/nat/var/tmp/nat-conntracker-confs/fail2ban-action-iptables-blocktype.local: -------------------------------------------------------------------------------- 1 | [Init] 2 | blocktype = DROP 3 | -------------------------------------------------------------------------------- /assets/nat/var/tmp/nat-conntracker-confs/fail2ban-filter-nat-conntracker.conf: -------------------------------------------------------------------------------- 1 | [Definition] 2 | failregex = ^.*time=\S+ level=WARNING over threshold=\d+ src= dst=\S+ count=\d+.*$ 3 | ignoreregex = 4 | 5 | [Init] 6 | journalmatch = _SYSTEMD_UNIT=nat-conntracker.service 7 | -------------------------------------------------------------------------------- /assets/nat/var/tmp/nat-conntracker-confs/fail2ban-jail-nat-conntracker.conf: -------------------------------------------------------------------------------- 1 | [nat-conntracker] 2 | backend = systemd 3 | banaction = iptables-allports 4 | bantime = 30 5 | chain = FORWARD 6 | enabled = true 7 | ignoreip = 127.0.0.1/8 10.10.0.0/16 8 | maxretry = 1 9 | protocol = all 10 | -------------------------------------------------------------------------------- /assets/nat/var/tmp/nat-conntracker-confs/fail2ban.local: -------------------------------------------------------------------------------- 1 | [Definition] 2 | logtarget = SYSLOG 3 | -------------------------------------------------------------------------------- /assets/nat/var/tmp/nftables.conf: -------------------------------------------------------------------------------- 1 | table ip nat { 2 | chain prerouting { 3 | type nat hook prerouting priority 0; policy accept; 4 | } 5 | 6 | chain postrouting { 7 | type nat hook postrouting priority 100; policy accept; 8 | masquerade 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /assets/rsyslog/rsyslog.conf: -------------------------------------------------------------------------------- 1 | # Managed via terraform :heart: 2 | $MaxMessageSize 2k 3 | $PreserveFQDN off 4 | $ModLoad imuxsock 5 | $ModLoad imklog 6 | $ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat 7 | $RepeatedMsgReduction on 8 | $WorkDirectory /var/spool/rsyslog 9 | $FileOwner syslog 10 | $FileGroup adm 11 | $FileCreateMode 0640 12 | $DirCreateMode 0755 13 | $Umask 0022 14 | $IncludeConfig /etc/rsyslog.d/*.conf 15 | -------------------------------------------------------------------------------- /assets/travis-worker/bats_helpers.bash: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | aws_asg_setup() { 4 | export RUNDIR="${BATS_TMPDIR}/run" 5 | export ETCDIR="${BATS_TMPDIR}/etc" 6 | export VARTMP="${BATS_TMPDIR}/var/tmp" 7 | export MOCKLOG="${BATS_TMPDIR}/logs/mock.log" 8 | export KILL_COMMAND="${BATS_TMPDIR}/bin/kill_mocked" 9 | export CREDS_FILE="/dev/null" 10 | 11 | mkdir -p \ 12 | "${RUNDIR}" \ 13 | "${VARTMP}" \ 14 | "${ETCDIR}/rsyslog.d" \ 15 | "${ETCDIR}/default" \ 16 | "${ETCDIR}/systemd/system" \ 17 | "${ETCDIR}/init" \ 18 | "${BATS_TMPDIR:?}/bin" \ 19 | "${BATS_TMPDIR}/logs" \ 20 | "${BATS_TMPDIR}/returns" 21 | 22 | rm -f "${MOCKLOG}" 23 | 24 | touch \ 25 | "${ETCDIR}/hosts" \ 26 | "${ETCDIR}/hostname" \ 27 | "${MOCKLOG}" \ 28 | "${VARTMP}/travis-worker.service" 29 | 30 | echo "i-${RANDOM}" >"${RUNDIR}/instance-id" 31 | echo "flibbity-flob-${RANDOM}.example.com" >"${RUNDIR}/registry-hostname" 32 | 33 | cat >"${ETCDIR}/default/travis-worker-bats" <"${BATS_TMPDIR}/bin/mock" < \$(basename \${0})" "\$@" >>${MOCKLOG} 41 | if [[ -f "${BATS_TMPDIR}/returns/\$(basename \${0})" ]]; then 42 | cat "${BATS_TMPDIR}/returns/\$(basename \${0})" 43 | exit 0 44 | fi 45 | echo "\${RANDOM}\${RANDOM}\${RANDOM}" 46 | EOF 47 | chmod +x "${BATS_TMPDIR}/bin/mock" 48 | 49 | for cmd in \ 50 | awk \ 51 | chown \ 52 | curl \ 53 | dmesg \ 54 | date \ 55 | docker \ 56 | iptables \ 57 | kill_mocked \ 58 | logger \ 59 | pidof \ 60 | sed \ 61 | service \ 62 | shutdown \ 63 | sleep \ 64 | sysctl \ 65 | systemctl \ 66 | timeout; do 67 | pushd "${BATS_TMPDIR}/bin" &>/dev/null 68 | ln -svf mock "${cmd}" 69 | popd &>/dev/null 70 | done 71 | 72 | export PATH="${BATS_TMPDIR}/bin:${PATH}" 73 | } 74 | 75 | aws_asg_teardown() { 76 | rm -rf \ 77 | "${RUNDIR}" \ 78 | "${ETCDIR}" \ 79 | "${BATS_TMPDIR:?}/bin" \ 80 | "${BATS_TMPDIR}/logs" \ 81 | "${BATS_TMPDIR}/returns" 82 | } 83 | 84 | assert_cmd() { 85 | grep -E "$1" "${MOCKLOG}" 86 | } 87 | -------------------------------------------------------------------------------- /assets/travis-worker/check-docker-health.bash: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o pipefail 3 | set -o errexit 4 | 5 | # Sometimes, the docker service will be running, but certain commands (docker info) will hang indefinitely. 6 | # This script detects this behavior and implodes the instance when it occurs. 7 | 8 | main() { 9 | local warmup_grace_period=600 10 | local pre_implosion_sleep="${POST_SHUTDOWN_SLEEP}" 11 | local sleep_time="${DOCKER_PS_SLEEP_TIME}" 12 | local run_d="${RUNDIR}" 13 | : "${pre_implosion_sleep:=300}" 14 | : "${sleep_time:=5}" 15 | : "${run_d:=/var/tmp/travis-run.d}" 16 | : "${KILL_COMMAND:=kill}" 17 | 18 | if [[ -f "${run_d}/implode.confirm" ]]; then 19 | __handle_implode_confirm "${run_d}" 20 | __die imploded 42 21 | fi 22 | 23 | # Don't run this unless instance has been up at least $warmup_grace_period seconds 24 | uptime_as_int=$(printf "%.0f\n" "$(awk '{ print $1}' /proc/uptime)") 25 | if [[ "${uptime_as_int}" -lt "${warmup_grace_period}" ]]; then 26 | logger "Not checking docker health yet, as uptime is still less than ${warmup_grace_period} seconds" 27 | __die noop 0 28 | fi 29 | 30 | logger "Checking docker health..." 31 | result=$(timeout "${sleep_time}"s docker info) || true 32 | 33 | if [ -z "${result}" ]; then 34 | __handle_unresponsive_docker "${run_d}" "${pre_implosion_sleep}" 35 | __die imploding 86 36 | fi 37 | 38 | if [ -e "${run_d}/implode" ]; then 39 | logger "docker no longer seems unhealthy; canceling implosion." 40 | mv "${run_d}/implode" "${run_d}/implode.canceled.$(date --iso-8601=minutes)" 41 | fi 42 | 43 | __die noop 0 44 | } 45 | 46 | __handle_implode_confirm() { 47 | local run_d="${1}" 48 | 49 | local reason 50 | reason="$(cat "${run_d}/implode.confirm" 2>/dev/null)" 51 | : "${reason:=not sure why}" 52 | logger "imploding due to unhealthy docker; shutting down now!" 53 | "${SHUTDOWN:-/sbin/shutdown}" -P now "imploding because ${reason}" 54 | } 55 | 56 | __handle_unresponsive_docker() { 57 | local run_d="${1}" 58 | local pre_implosion_sleep="${2}" 59 | 60 | msg="docker is still unhealthy, implosion will continue" 61 | if [ ! -e "${run_d}/implode" ]; then 62 | msg="docker appears to be unhealthy, initiating implosion" 63 | echo "$msg" >"${run_d}/implode" 64 | fi 65 | logger "$msg" 66 | 67 | logger "Sleeping ${pre_implosion_sleep}" 68 | sleep "${pre_implosion_sleep}" 69 | 70 | if [ ! -e "${run_d}/implode" ]; then 71 | logger "docker previously reported as unhealthy, but ${run_d}/implode not found; not imploding?" 72 | __die noop 0 73 | fi 74 | 75 | pid="$(pidof travis-worker)" || true 76 | if [ -z "$pid" ]; then 77 | msg="No PID found for travis-worker, and docker is unhealthy; confirming implosion via cron" 78 | echo "$msg" >"${run_d}/implode.confirm" 79 | logger "$msg" 80 | else 81 | logger "Running '${KILL_COMMAND} -TERM $pid' to kill travis-worker due to unhealthy docker." 82 | "${KILL_COMMAND}" -TERM "$pid" 83 | fi 84 | } 85 | 86 | __die() { 87 | local status="${1}" 88 | local code="${2}" 89 | logger "time=$(date -u +%Y%m%dT%H%M%S) " \ 90 | "prog=$(basename "${0}") status=${status}" 91 | exit "${code}" 92 | } 93 | 94 | main "$@" 95 | -------------------------------------------------------------------------------- /assets/travis-worker/check-docker-health.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | load bats_helpers 4 | 5 | setup() { 6 | export DOCKER_PS_SLEEP_TIME=2 7 | export POST_SHUTDOWN_SLEEP=0.1 8 | export SHUTDOWN=shutdown 9 | aws_asg_setup 10 | } 11 | 12 | teardown() { 13 | aws_asg_teardown 14 | } 15 | 16 | run_check_docker_health() { 17 | bash "${BATS_TEST_DIRNAME}/check-docker-health.bash" 18 | } 19 | 20 | @test "handles implosion confirmation" { 21 | cat >"${BATS_TMPDIR}/returns/date" <"${RUNDIR}/implode.confirm" 25 | run run_check_docker_health 26 | 27 | assert_cmd 'shutdown -P now.+imploding because docker appears to be unhealthy' 28 | [ "${status}" -eq 42 ] 29 | assert_cmd "logger time=20171030T153252 prog=check-docker-health.bash status=imploded" 30 | } 31 | 32 | @test "handles implosion confirmation when docker is unhealthy" { 33 | cat >"${BATS_TMPDIR}/returns/date" <"${BATS_TMPDIR}/returns/timeout" <"${BATS_TMPDIR}/returns/awk" <"${BATS_TMPDIR}/returns/date" <"${BATS_TMPDIR}/returns/timeout" <"${RUNDIR}/implode" <"${BATS_TMPDIR}/returns/date" <"${BATS_TMPDIR}/returns/timeout" <"${BATS_TMPDIR}/returns/awk" <"${BATS_TMPDIR}/returns/pidof" <"${RUNDIR}/implode" <"${BATS_TMPDIR}/returns/date" <> /var/log/syslog 4 | -------------------------------------------------------------------------------- /assets/travis-worker/check-unregister-netdevice.bash: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | set -o pipefail 4 | 5 | main() { 6 | local post_sleep="${POST_SHUTDOWN_SLEEP}" 7 | local max_err="${MAX_ERROR_COUNT}" 8 | local run_d="${RUNDIR}" 9 | local error_count 10 | : "${post_sleep:=300}" 11 | : "${max_err:=9}" 12 | : "${run_d:=/var/tmp/travis-run.d}" 13 | 14 | if [[ -f "${run_d}/implode.confirm" ]]; then 15 | __handle_implode_confirm "${run_d}" "${post_sleep}" 16 | __die imploded 42 17 | fi 18 | 19 | error_count="$( 20 | "${DMESG:-dmesg}" | 21 | grep -c 'unregister_netdevice: waiting for lo to become free' 22 | )" 23 | 24 | if [[ "${error_count}" -gt "${max_err}" ]]; then 25 | __handle_exceeded_max_unregister_netdevice "${run_d}" "${error_count}" 26 | __die imploding 86 27 | fi 28 | 29 | __die noop 0 30 | } 31 | 32 | __handle_implode_confirm() { 33 | local run_d="${1}" 34 | local post_sleep="${2}" 35 | 36 | local reason 37 | reason="$(cat "${run_d}/implode.confirm" 2>/dev/null)" 38 | : "${reason:=not sure why}" 39 | "${SHUTDOWN:-/sbin/shutdown}" -P now "imploding because ${reason}" 40 | sleep "${post_sleep}" 41 | } 42 | 43 | __handle_exceeded_max_unregister_netdevice() { 44 | local run_d="${1}" 45 | local error_count="${2}" 46 | 47 | echo "detected unregister_netdevice via dmesg count=${error_count}" | 48 | tee "${run_d}/implode" 49 | "${DOCKER:-docker}" kill -s INT travis-worker 50 | } 51 | 52 | __die() { 53 | local status="${1}" 54 | local code="${2}" 55 | echo "time=$(date -u +%Y%m%dT%H%M%S) " \ 56 | "prog=$(basename "${0}") status=${status}" 57 | exit "${code}" 58 | } 59 | 60 | main "$@" 61 | -------------------------------------------------------------------------------- /assets/travis-worker/check-unregister-netdevice.bats: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bats 2 | 3 | load bats_helpers 4 | 5 | setup() { 6 | export POST_SHUTDOWN_SLEEP=0.1 7 | export SHUTDOWN=shutdown 8 | aws_asg_setup 9 | } 10 | 11 | teardown() { 12 | aws_asg_teardown 13 | } 14 | 15 | run_check_unregister_netdevice() { 16 | bash "${BATS_TEST_DIRNAME}/check-unregister-netdevice.bash" 17 | } 18 | 19 | @test "handles implosion confirmation" { 20 | echo 'i cannot go on' >"${RUNDIR}/implode.confirm" 21 | run run_check_unregister_netdevice 22 | assert_cmd 'shutdown -P now.+imploding because i cannot go on' 23 | assert_cmd 'sleep 0.1' 24 | [ "${status}" -eq 42 ] 25 | [[ "${output}" =~ status=imploded ]] 26 | } 27 | 28 | @test "handles implosion confirmation with reasons unknown" { 29 | touch "${RUNDIR}/implode.confirm" 30 | run run_check_unregister_netdevice 31 | assert_cmd 'shutdown -P now.+imploding because not sure why' 32 | assert_cmd 'sleep 0.1' 33 | [ "${status}" -eq 42 ] 34 | [[ "${output}" =~ status=imploded ]] 35 | } 36 | 37 | @test "is a no-op if detected errors are below threshold" { 38 | export MAX_ERROR_COUNT=3 39 | cat >>"${BATS_TMPDIR}/returns/dmesg" <>"${BATS_TMPDIR}/returns/dmesg" <"${BATS_TMPDIR}/returns/docker" <"${BATS_TMPDIR}/returns/date" <> /var/log/syslog 4 | -------------------------------------------------------------------------------- /assets/travis-worker/high-cpu-check.bash: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | set -o pipefail 4 | 5 | __logger() { 6 | local level 7 | local msg 8 | local date 9 | local log_msg 10 | 11 | level="${1}" && shift 12 | msg="${1}" && shift 13 | date="$(date --iso-8601=seconds)" 14 | log_msg="tag=cron time=${date} level=${level} msg=\"${msg}\"" 15 | for bit in "${@}"; do 16 | log_msg="${log_msg} ${bit}" 17 | done 18 | 19 | logger -t "$(basename "${0}")" "${log_msg}" 20 | } 21 | 22 | __die() { 23 | local status="${1}" 24 | local code="${2}" 25 | 26 | __logger "info" \ 27 | "cron finished" \ 28 | "status=${status}" 29 | exit "${code}" 30 | } 31 | 32 | report_containers() { 33 | local max_cpu 34 | local instance_id 35 | local instance_ip 36 | 37 | max_cpu="${1}" 38 | : "${RUNDIR:=/var/tmp/travis-run.d}" 39 | instance_id="$(cat "${RUNDIR}/instance-id")" 40 | instance_ip="$(cat "${RUNDIR}/instance-ipv4")" 41 | 42 | IFS=$'\n' 43 | stats="$(docker stats --no-stream --format "{{.Container}} {{.CPUPerc}} {{.Name}}")" 44 | 45 | echo "${stats}" | while IFS=" " read -r cid usage_as_float name; do 46 | usage_as_float="${usage_as_float//%/}" 47 | usage_as_int=${usage_as_float/.*/} 48 | [ -z "${usage_as_float}" ] && continue 49 | if [ "${usage_as_int}" -ge "${max_cpu}" ]; then 50 | __logger "info" \ 51 | "cpu usage detected" \ 52 | "status=noop" \ 53 | "instance_id=${instance_id}" \ 54 | "instance_ip=${instance_ip}" \ 55 | "cid=${cid}" \ 56 | "cpu_usage=${usage_as_float}" \ 57 | "name=${name}" 58 | fi 59 | done 60 | 61 | count="$(echo "${stats}" | wc -l)" 62 | [ -z "${stats}" ] && count="0" 63 | 64 | __logger "info" \ 65 | "checked cpu usage of ${count} running containers" \ 66 | "status=noop" \ 67 | "instance_id=${instance_id}" \ 68 | "instance_ip=${instance_ip}" 69 | } 70 | 71 | main() { 72 | # shellcheck disable=SC2153 73 | : "${MAX_CPU:=0}" 74 | 75 | local max_cpu 76 | max_cpu="${MAX_CPU}" 77 | 78 | { 79 | report_containers "${max_cpu}" | while read -r cid; do 80 | echo "checking cid: ${cid}" 81 | done 82 | } 83 | } 84 | 85 | main "$@" 86 | -------------------------------------------------------------------------------- /assets/travis-worker/high-cpu-check.crontab: -------------------------------------------------------------------------------- 1 | SHELL = /bin/bash 2 | 3 | */5 * * * * root /var/tmp/travis-run.d/high-cpu-check &>> /var/log/syslog 4 | -------------------------------------------------------------------------------- /assets/travis-worker/rsyslog-watch-upstart.conf: -------------------------------------------------------------------------------- 1 | $ModLoad imfile 2 | 3 | $InputFileName /var/log/upstart/travis-worker.log 4 | $InputFileTag travis-worker 5 | $InputFileStateFile state_file_travis_worker 6 | $InputFileFacility local7 7 | $InputRunFileMonitor 8 | $InputFilePollInterval 10 9 | -------------------------------------------------------------------------------- /assets/travis-worker/tfw-admin-clean-containers.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=TFW Admin Clean Containers 3 | After=travis-worker.service 4 | Requires=travis-worker.service 5 | 6 | [Service] 7 | EnvironmentFile=/var/tmp/travis-run.d/travis-worker.env 8 | ExecStart=/usr/local/bin/tfw admin-clean-containers 9 | Restart=always 10 | SyslogIdentifier=tfw-admin-clean-containers 11 | WorkingDirectory=/ 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /assets/travis-worker/travis-worker-wrapper: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | 4 | main() { 5 | local name="${1:-travis-worker}" 6 | 7 | docker stop "${name}" &>/dev/null || true 8 | docker rm -f "${name}" &>/dev/null || true 9 | 10 | local env_file 11 | if tfw --version 2>/dev/null; then 12 | env_file="$(tfw writeenv travis-worker)" 13 | else 14 | env_file=/var/tmp/travis-run.d/travis-worker 15 | travis-combined-env travis-worker "${env_file}" 16 | fi 17 | 18 | set -o allexport 19 | # shellcheck source=/dev/null 20 | source "${env_file}" 21 | 22 | if [[ "${TRAVIS_WORKER_PROVIDER_NAME}" == gce ]]; then 23 | local gce_zone 24 | gce_zone="$(__fetch_gce_zone)" 25 | if [ -z "${gce_zone}" ]; then 26 | gce_zone=us-central1-b 27 | fi 28 | echo "TRAVIS_WORKER_GCE_ZONE=${gce_zone}" >>"${env_file}" 29 | fi 30 | 31 | if [[ ! "${TRAVIS_WORKER_LIBRATO_SOURCE}" ]]; then 32 | local librato_source 33 | librato_source="$(__build_librato_source "$(hostname)" "${name}")" 34 | echo "TRAVIS_WORKER_LIBRATO_SOURCE=${librato_source}" >>"${env_file}" 35 | fi 36 | 37 | if [ -f "${TRAVIS_WORKER_PRESTART_HOOK}" ]; then 38 | "${TRAVIS_WORKER_PRESTART_HOOK}" 39 | fi 40 | 41 | exec docker run \ 42 | --rm \ 43 | --name "${name}" \ 44 | --hostname "$(hostname)" \ 45 | --userns host \ 46 | -v /var/tmp:/var/tmp \ 47 | -v /var/run:/var/run \ 48 | --env-file "${env_file}" \ 49 | "${TRAVIS_WORKER_SELF_IMAGE}" travis-worker 50 | } 51 | 52 | __fetch_gce_zone() { 53 | curl -sSL \ 54 | "http://metadata.google.internal/computeMetadata/v1/instance/zone" \ 55 | -H "Metadata-Flavor: Google" | 56 | awk -F/ '{ print $NF }' 57 | } 58 | 59 | __build_librato_source() { 60 | local host_name="${1}" 61 | local name="${2}" 62 | 63 | if [[ "${name}" == "travis-worker" ]]; then 64 | echo "${host_name}" 65 | return 66 | fi 67 | 68 | echo "${host_name}-${name/travis-worker-/}" 69 | } 70 | 71 | main "$@" 72 | -------------------------------------------------------------------------------- /assets/travis-worker/travis-worker.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Travis Worker 3 | After=docker.service 4 | Requires=docker.service 5 | 6 | [Service] 7 | ExecStart=/usr/local/bin/travis-worker-wrapper 8 | ExecStopPost=/bin/sleep 5 9 | Restart=always 10 | SyslogIdentifier=travis-worker 11 | User=travis 12 | WorkingDirectory=/ 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /assets/travis-worker/travis-worker@.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Travis Worker 3 | After=docker.service 4 | Requires=docker.service 5 | 6 | [Service] 7 | ExecStart=/usr/local/bin/travis-worker-wrapper travis-worker-%I 8 | ExecStopPost=/bin/sleep 5 9 | Restart=always 10 | SyslogIdentifier=travis-worker 11 | User=travis 12 | WorkingDirectory=/ 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /assets/travis-worker/unregister-netdevice.crontab: -------------------------------------------------------------------------------- 1 | SHELL = /bin/bash 2 | 3 | * * * * * root /var/tmp/travis-run.d/check-unregister-netdevice &>/dev/null 4 | -------------------------------------------------------------------------------- /aws.mk: -------------------------------------------------------------------------------- 1 | TOP := $(shell git rev-parse --show-toplevel) 2 | 3 | include $(TOP)/terraform-common.mk 4 | include $(TOP)/trvs.mk 5 | 6 | .PHONY: default 7 | default: hello 8 | 9 | CONFIG_FILES := \ 10 | config/travis-build-com.env \ 11 | config/travis-build-org.env \ 12 | config/travis-com.env \ 13 | config/travis-org.env \ 14 | config/worker-com.env \ 15 | config/worker-org.env 16 | 17 | .PHONY: .config 18 | .config: $(CONFIG_FILES) $(ENV_NAME).auto.tfvars 19 | 20 | $(CONFIG_FILES): config/.written 21 | 22 | .PHONY: diff-docker-images 23 | diff-docker-images: 24 | @diff -u \ 25 | --label a/docker-images \ 26 | <($(TOP)/bin/show-current-docker-images) \ 27 | --label b/docker-images \ 28 | <($(TOP)/bin/show-proposed-docker-images "$(ENV_NAME).auto.tfvars") 29 | -------------------------------------------------------------------------------- /bin/build-cache-configure: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | # frozen_string_literal: true 3 | 4 | require 'json' 5 | require 'optparse' 6 | 7 | require_relative '../lib/heroku_client' 8 | 9 | def main 10 | options = { 11 | apps: [], 12 | fqdn: '', 13 | heroku_api_hostname: 'api.heroku.com', 14 | heroku_api_key: ENV.fetch('HEROKU_API_KEY', '').strip 15 | } 16 | 17 | OptionParser.new do |opts| 18 | opts.on( 19 | '-a', '--apps=APPS', 20 | 'name of travis-build heroku apps (space or comma-delimited)' 21 | ) { |v| options[:apps] = v.split(/[ ,]/).map(&:strip) } 22 | 23 | opts.on( 24 | '-f', '--fqdn=FQDN', 25 | 'fully-qualified domain name of build cache' 26 | ) { |v| options[:fqdn] = v.strip } 27 | 28 | opts.on( 29 | '--heroku-api-hostname=HOSTNAME', 30 | 'hostname of Heroku API' 31 | ) { |v| options[:heroku_api_hostname] = v.strip } 32 | 33 | opts.on( 34 | '--heroku-api-key=HEROKU_API_KEY', 35 | 'key to use with Heroku API' 36 | ) { |v| options[:heroku_api_key] = v.strip } 37 | end.parse! 38 | 39 | raise 'Missing HEROKU_API_KEY' if options[:heroku_api_key].empty? 40 | raise 'No apps specified' if options[:apps].empty? 41 | 42 | heroku = HerokuClient.new( 43 | api_key: options.fetch(:heroku_api_key), 44 | api_host: options.fetch(:heroku_api_hostname) 45 | ) 46 | 47 | options.fetch(:apps).each do |app_name| 48 | heroku.patch( 49 | JSON.generate( 50 | 'TRAVIS_BUILD_APT_PROXY' => "http://#{options.fetch(:fqdn)}" 51 | ), 52 | "/apps/#{app_name}/config-vars" 53 | ) 54 | end 55 | 56 | 0 57 | end 58 | 59 | exit(main) if $PROGRAM_NAME == __FILE__ 60 | -------------------------------------------------------------------------------- /bin/chirp-assign-aws-creds: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | 4 | main() { 5 | local access_key_id="${1}" 6 | shift 7 | local secret_key="${1}" 8 | shift 9 | 10 | for repo in "${@}"; do 11 | travis env set -r "${repo}" AWS_ACCESS_KEY "${access_key_id}" 12 | travis env set -r "${repo}" AWS_SECRET_KEY "${secret_key}" 13 | done 14 | } 15 | 16 | main "${@}" 17 | -------------------------------------------------------------------------------- /bin/ensure-terraform: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | set -o pipefail 4 | 5 | main() { 6 | local tf_version="${1}" 7 | 8 | if [[ ! "${tf_version}" ]]; then 9 | echo "Usage: $(basename "${0}") " >&2 10 | exit 1 11 | fi 12 | 13 | if uname -a | grep Darwin; then local arch="darwin"; else local arch="linux"; fi 14 | 15 | : "${TF_INSTALL_MISSING:=1}" 16 | : "${TF_INSTALLATION_PREFIX:=${HOME}/.cache/travis-terraform-config}" 17 | : "${TF_DOWNLOAD_SERVER:=https://releases.hashicorp.com}" 18 | : "${TMPDIR:=/tmp}" 19 | 20 | mkdir -p "${TF_INSTALLATION_PREFIX}" 21 | local tf="${TF_INSTALLATION_PREFIX}/terraform-${tf_version}" 22 | 23 | if [[ ! -x "${tf}" && "${TF_INSTALL_MISSING}" == 0 ]]; then 24 | echo "===> Terraform ${tf_version} required." 25 | echo " Run this:" 26 | echo " ${0} ${tf_version}" 27 | echo 28 | exit 1 29 | fi 30 | 31 | if [[ -x "${tf}" ]]; then 32 | exit 0 33 | fi 34 | 35 | local tf_version_clean="${tf_version#v}" 36 | local tf_url="${TF_DOWNLOAD_SERVER}" 37 | tf_url="${tf_url}/terraform/${tf_version_clean}" 38 | tf_url="${tf_url}/terraform_${tf_version_clean}_${arch}_amd64.zip" 39 | 40 | echo "===> Ensuring Terraform ${tf_version} is present." 41 | echo " URL: ${tf_url}" 42 | echo 43 | 44 | pushd "${TMPDIR}" &>/dev/null 45 | curl -sSL -o terraform.zip "${tf_url}" 46 | unzip terraform.zip 47 | mv -v terraform "${tf}" 48 | chmod +x "${tf}" 49 | popd &>/dev/null 50 | "${tf}" version 51 | } 52 | 53 | main "$@" 54 | -------------------------------------------------------------------------------- /bin/env-url-to-parts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | # frozen_string_literal: true 3 | 4 | require 'uri' 5 | 6 | def main(argv = ARGV) 7 | url_varname = argv.fetch(0) 8 | out_dir = argv.fetch(1) 9 | suffix = argv.fetch(2).upcase 10 | 11 | parsed = URI(ENV.fetch(url_varname)) 12 | 13 | %w[scheme user password host port path query].each do |part| 14 | basename = "#{url_varname}_#{part.upcase}_#{suffix}" 15 | File.open(File.join(out_dir, basename), 'w') do |f| 16 | f.puts parsed.send(part).to_s + "\n" 17 | end 18 | end 19 | end 20 | 21 | module URI 22 | class AMQP < Generic 23 | DEFAULT_PORT = 5672 24 | end 25 | 26 | class AMQPS < Generic 27 | DEFAULT_PORT = 5671 28 | end 29 | 30 | @@schemes['AMQP'] = AMQP 31 | @@schemes['AMQPS'] = AMQPS 32 | end 33 | 34 | main if $PROGRAM_NAME == __FILE__ 35 | -------------------------------------------------------------------------------- /bin/gce-export-net: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | # frozen_string_literal: true 3 | 4 | require 'optparse' 5 | 6 | def main 7 | options = { 8 | module_name: ENV.fetch('MODULE_NAME', 'gce_project_1'), 9 | noop: false, 10 | terraform: ENV.fetch('TERRAFORM', 'terraform') 11 | } 12 | 13 | OptionParser.new do |opts| 14 | opts.on('-m', '--module-name=MODULE_NAME') do |v| 15 | options[:module_name] = v.strip 16 | end 17 | 18 | opts.on('-n', '--noop') do 19 | options[:noop] = true 20 | end 21 | 22 | opts.on('-T', '--terraform=TERRAFORM') do |v| 23 | options[:terraform] = v.strip 24 | end 25 | end.parse! 26 | 27 | module_name = options.fetch(:module_name) 28 | noop = !!options.fetch(:noop) 29 | terraform = options.fetch(:terraform) 30 | 31 | command = %W[#{terraform} state rm] + ( 32 | %w[ 33 | aws_route53_record.bastion-b 34 | aws_route53_record.nat-b 35 | google_compute_address.bastion-b 36 | google_compute_address.bastion[0] 37 | google_compute_address.nat-b 38 | google_compute_firewall.allow_internal 39 | google_compute_firewall.allow_jobs_nat 40 | google_compute_firewall.allow_public_icmp 41 | google_compute_firewall.allow_public_ssh 42 | google_compute_firewall.deny_target_ip 43 | google_compute_instance.bastion-b 44 | google_compute_instance.bastion[0] 45 | google_compute_network.main 46 | google_compute_subnetwork.build_com 47 | google_compute_subnetwork.build_org 48 | google_compute_subnetwork.jobs_com 49 | google_compute_subnetwork.jobs_org 50 | google_compute_subnetwork.public 51 | google_compute_subnetwork.workers 52 | ].map { |r| "module.#{module_name}.#{r}" } 53 | ) 54 | 55 | warn("---> #{command.join(' ')}") 56 | system(*command) unless noop 57 | 58 | 0 59 | end 60 | 61 | exit(main) if $PROGRAM_NAME == __FILE__ 62 | -------------------------------------------------------------------------------- /bin/gce-export-workers: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | # frozen_string_literal: true 3 | 4 | require 'optparse' 5 | 6 | def main 7 | options = { 8 | counts: { 9 | com: Integer(ENV.fetch('COUNT_COM', '0')), 10 | org: Integer(ENV.fetch('COUNT_ORG', '0')) 11 | }, 12 | module_name: ENV.fetch('MODULE_NAME', 'gce_project_1'), 13 | noop: false, 14 | terraform: ENV.fetch('TERRAFORM', 'terraform'), 15 | zones: ENV.fetch('ZONES', 'a,b,c,f').split(',').map(&:strip) 16 | } 17 | 18 | OptionParser.new do |opts| 19 | opts.on('--count-com=COUNT_COM', Integer) do |v| 20 | options[:counts][:com] = v 21 | end 22 | 23 | opts.on('--count-org=COUNT_ORG', Integer) do |v| 24 | options[:counts][:org] = v 25 | end 26 | 27 | opts.on('-m', '--module-name=MODULE_NAME') do |v| 28 | options[:module_name] = v.strip 29 | end 30 | 31 | opts.on('-n', '--noop') do 32 | options[:noop] = true 33 | end 34 | 35 | opts.on('-T', '--terraform=TERRAFORM') do |v| 36 | options[:terraform] = v.strip 37 | end 38 | 39 | opts.on('-Z', '--zones=ZONES') do |v| 40 | options[:zones] = v.split(',').map(&:strip) 41 | end 42 | end.parse! 43 | 44 | counts = options.fetch(:counts) 45 | module_name = options.fetch(:module_name) 46 | noop = !!options.fetch(:noop) 47 | terraform = options.fetch(:terraform) 48 | zones = options.fetch(:zones) 49 | 50 | command = %W[#{terraform} state rm] 51 | 52 | %i[com org].each do |site| 53 | zones.each do |zone| 54 | (counts.fetch(site) / zones.length).times do |n| 55 | command << %W[ 56 | module.#{module_name}.module.gce_worker_#{zone} 57 | google_compute_instance.worker_#{site}[#{n}] 58 | ].join('.') 59 | end 60 | end 61 | end 62 | 63 | warn("---> #{command.join(' ')}") 64 | system(*command) unless noop 65 | 66 | 0 67 | end 68 | 69 | exit(main) if $PROGRAM_NAME == __FILE__ 70 | -------------------------------------------------------------------------------- /bin/gce-import-net: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | # frozen_string_literal: true 3 | 4 | require 'optparse' 5 | 6 | def main 7 | options = { 8 | env: ENV.fetch('ENV', 'staging'), 9 | index: ENV.fetch('INDEX', '1'), 10 | noop: false, 11 | project: ENV.fetch('PROJECT', 'notset'), 12 | region: ENV.fetch('REGION', 'us-central1'), 13 | terraform: ENV.fetch('TERRAFORM', 'terraform') 14 | } 15 | 16 | OptionParser.new do |opts| 17 | opts.on('-e', '--env=ENV') do |v| 18 | options[:env] = v.strip 19 | end 20 | 21 | opts.on('-i', '--index=INDEX') do |v| 22 | options[:index] = v.strip 23 | end 24 | 25 | opts.on('-n', '--noop') do 26 | options[:noop] = true 27 | end 28 | 29 | opts.on('-p', '--project=PROJECT') do |v| 30 | options[:project] = v.strip 31 | end 32 | 33 | opts.on('-r', '--region=REGION') do |v| 34 | options[:region] = v.strip 35 | end 36 | 37 | opts.on('-T', '--terraform=TERRAFORM') do |v| 38 | options[:terraform] = v.strip 39 | end 40 | end.parse! 41 | 42 | env = options.fetch(:env) 43 | index = options.fetch(:index) 44 | noop = !!options.fetch(:noop) 45 | project = options.fetch(:project) 46 | region = options.fetch(:region) 47 | terraform = options.fetch(:terraform) 48 | 49 | { 50 | 'google_compute_address.bastion[0]' => 'bastion-b', 51 | 'google_compute_firewall.allow_internal' => 'allow-internal', 52 | 'google_compute_firewall.allow_jobs_nat' => 'allow-jobs-nat', 53 | 'google_compute_firewall.allow_public_icmp' => 'allow-public-icmp', 54 | 'google_compute_firewall.allow_public_ssh' => 'allow-public-ssh', 55 | 'google_compute_firewall.deny_target_ip' => 'deny-target-ip', 56 | 'google_compute_instance.bastion[0]' => 57 | "#{project}/#{region}-b/#{env}-#{index}-bastion-b", 58 | 'google_compute_network.main' => 'main', 59 | 'google_compute_subnetwork.jobs_com' => "#{region}/jobs-com", 60 | 'google_compute_subnetwork.jobs_org' => "#{region}/jobs-org", 61 | 'google_compute_subnetwork.public' => "#{region}/public", 62 | 'google_compute_subnetwork.workers' => "#{region}/workers" 63 | }.each do |resource, importable| 64 | command = %W[ 65 | #{terraform} import module.gce_net.#{resource} #{importable} 66 | ] 67 | warn("---> #{command.join(' ')}") 68 | next if noop 69 | 70 | system(*command) || warn('---? already imported?') 71 | end 72 | 73 | 0 74 | end 75 | 76 | exit(main) if $PROGRAM_NAME == __FILE__ 77 | -------------------------------------------------------------------------------- /bin/gce-import-workers: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | # frozen_string_literal: true 3 | 4 | require 'optparse' 5 | 6 | def main 7 | options = { 8 | counts: { 9 | com: Integer(ENV.fetch('COUNT_COM', '0')), 10 | org: Integer(ENV.fetch('COUNT_ORG', '0')) 11 | }, 12 | env: ENV.fetch('ENV', 'staging'), 13 | index: ENV.fetch('INDEX', '1'), 14 | module_name: ENV.fetch('MODULE_NAME', 'gce_worker_group'), 15 | noop: false, 16 | project: ENV.fetch('PROJECT', 'notset'), 17 | region: ENV.fetch('REGION', 'us-central1'), 18 | terraform: ENV.fetch('TERRAFORM', 'terraform'), 19 | zones: ENV.fetch('ZONES', 'a,b,c,f').split(',').map(&:strip) 20 | } 21 | 22 | OptionParser.new do |opts| 23 | opts.on('--count-com=COUNT_COM', Integer) do |v| 24 | options[:counts][:com] = v 25 | end 26 | 27 | opts.on('--count-org=COUNT_ORG', Integer) do |v| 28 | options[:counts][:org] = v 29 | end 30 | 31 | opts.on('-e', '--env=ENV') do |v| 32 | options[:env] = v.strip 33 | end 34 | 35 | opts.on('-i', '--index=INDEX') do |v| 36 | options[:index] = v.strip 37 | end 38 | 39 | opts.on('-m', '--module-name=MODULE_NAME') do |v| 40 | options[:module_name] = v.strip 41 | end 42 | 43 | opts.on('-n', '--noop') do 44 | options[:noop] = true 45 | end 46 | 47 | opts.on('-p', '--project=PROJECT') do |v| 48 | options[:project] = v.strip 49 | end 50 | 51 | opts.on('-r', '--region=REGION') do |v| 52 | options[:region] = v.strip 53 | end 54 | 55 | opts.on('-T', '--terraform=TERRAFORM') do |v| 56 | options[:terraform] = v.strip 57 | end 58 | 59 | opts.on('-Z', '--zones=ZONES') do |v| 60 | options[:zones] = v.split(',').map(&:strip) 61 | end 62 | end.parse! 63 | 64 | counts = options.fetch(:counts) 65 | env = options.fetch(:env) 66 | index = options.fetch(:index) 67 | module_name = options.fetch(:module_name) 68 | noop = !!options.fetch(:noop) 69 | project = options.fetch(:project) 70 | region = options.fetch(:region) 71 | terraform = options.fetch(:terraform) 72 | zones = options.fetch(:zones) 73 | 74 | to_import = {} 75 | 76 | %i[com org].each do |site| 77 | counts.fetch(site).times do |n| 78 | key = "module.gce_workers.google_compute_instance.worker_#{site}[#{n}]" 79 | zone = zones[n % zones.length] 80 | ic = (n / zones.length) + 1 81 | to_import[key] = %W[ 82 | #{project} 83 | #{region}-#{zone} 84 | #{env}-#{index}-worker-#{site}-#{zone}-#{ic}-gce 85 | ].join('/') 86 | end 87 | end 88 | 89 | to_import.each do |resource, importable| 90 | command = %W[ 91 | #{terraform} import module.#{module_name}.#{resource} #{importable} 92 | ] 93 | warn("---> #{command.join(' ')}") 94 | next if noop 95 | 96 | system(*command) || warn('---? already imported?') 97 | end 98 | 99 | 0 100 | end 101 | 102 | exit(main) if $PROGRAM_NAME == __FILE__ 103 | -------------------------------------------------------------------------------- /bin/gcloud-nats-by-zone: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | # frozen_string_literal: true 3 | 4 | require 'json' 5 | 6 | NOTSET = '' 7 | 8 | def main(argv: ARGV) 9 | return usage if $stdin.tty? || argv.include?('-h') || argv.include?('--help') 10 | 11 | in_args = JSON.parse($stdin.read) 12 | zones = in_args.fetch('zones', '').split(',').map(&:strip).reject(&:empty?) 13 | count = Integer(in_args.fetch('count', zones.length)) 14 | project = in_args.fetch('project') 15 | region = in_args.fetch('region') 16 | retries = Integer(in_args.fetch('retries', '4')) 17 | retry_sleep = Integer(in_args.fetch('retry_sleep', '120')) 18 | accounting = %w[1 yes on true].include?( 19 | ENV.fetch( 20 | 'ACCOUNTING', in_args.fetch('accounting', true) 21 | ).to_s 22 | ) 23 | retried = 0 24 | 25 | instances = {} 26 | while retried <= retries 27 | instances = fetch_nats_by_zone(zones, count, project, region) 28 | break unless accounting 29 | break if instances.length == count 30 | 31 | warn( 32 | "#{$PROGRAM_NAME}: sleeping=#{retry_sleep}s " \ 33 | "instances_length=#{instances.length} count=#{count}" 34 | ) 35 | 36 | sleep(retry_sleep) 37 | retried += 1 38 | end 39 | 40 | $stdout.puts(JSON.pretty_generate(instances)) 41 | 0 42 | end 43 | 44 | def fetch_nats_by_zone(zones, count, project, region) 45 | instances = {} 46 | instances_command = %w[ 47 | gcloud compute instance-groups list-instances 48 | ] 49 | 50 | zones.each do |zone| 51 | (count / zones.length).times do |count_index| 52 | full_command = instances_command + %W[ 53 | nat-#{zone}-#{count_index + 1} 54 | --project=#{project} 55 | --zone=#{region}-#{zone} 56 | --format="value(instance)" 57 | ] 58 | instance_name = `#{full_command.join(' ')}`.strip 59 | next if instance_name.empty? 60 | 61 | instances["nat-#{zone}-#{count_index + 1}"] = %W[ 62 | projects 63 | #{project} 64 | zones 65 | #{region}-#{zone} 66 | instances 67 | #{instance_name} 68 | ].join('/') 69 | rescue StandardError => e 70 | warn e 71 | end 72 | end 73 | 74 | instances 75 | end 76 | 77 | def usage 78 | warn <<~EOF 79 | Usage: #{$PROGRAM_NAME} [-h|--help] 80 | 81 | Print a mapping of {zone}-{index}=>{instance-id} given a JSON blob 82 | containing the following arguments provided via stdin: 83 | 84 | accounting - boolean controlling count check and retries, default=false 85 | count - total number of expected instances, default=zones.length 86 | project - the gcloud project name *REQUIRED* 87 | region - the region in which to look for nat instances *REQUIRED* 88 | retries - number of total retries, default=4 89 | retry_sleep - sleep interval between retries, default=120 90 | zones - a comma-delimited list of zones within the region 91 | EOF 92 | 2 93 | end 94 | 95 | exit(main) if $PROGRAM_NAME == __FILE__ 96 | -------------------------------------------------------------------------------- /bin/gcloud-recreate-nat-instances: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | # frozen_string_literal: true 3 | 4 | require 'optparse' 5 | require 'json' 6 | 7 | def main 8 | options = { env: '', noop: false } 9 | OptionParser.new do |opts| 10 | opts.banner = <<~BANNER 11 | Usage: gcloud-recreate-nat-instances [options] 12 | 13 | Recreates the NAT instances managed by the instance groups specified using 14 | the GCE_NAT_GROUPS environment variable. Also requires GCE_NAT_PROJECT 15 | and GCE_NAT_REGION to be set. 16 | 17 | BANNER 18 | 19 | opts.on('-E', '--env=ENV_FILE') do |f| 20 | options[:env] = f.strip 21 | end 22 | 23 | opts.on('-n', '--noop') do 24 | options[:noop] = true 25 | end 26 | 27 | opts.on('--help') do 28 | puts opts 29 | exit 30 | end 31 | end.parse! 32 | 33 | env = Hash[ENV] 34 | env.merge!(source_env(options[:env])) unless options[:env].empty? 35 | 36 | project = env.fetch('GCE_NAT_PROJECT') 37 | region = env.fetch('GCE_NAT_REGION') 38 | groups = env.fetch('GCE_NAT_GROUPS').split(',').map(&:strip) 39 | 40 | groups_zones = groups.map do |group| 41 | [group, "#{region}-#{group.split('-').fetch(1)}"] 42 | end 43 | groups_zones = Hash[groups_zones] 44 | 45 | groups_zones.each do |instance_group, zone| 46 | instance_list = list_instances(instance_group, zone, project) 47 | instances = instance_list.map { |r| r.fetch('instance').split('/').last } 48 | command = %W[ 49 | gcloud compute instance-groups managed recreate-instances 50 | #{instance_group} --zone=#{zone} --project=#{project} 51 | --instances=#{instances.join(',')} 52 | ] 53 | run_command(command, options.fetch(:noop)) 54 | end 55 | 56 | 0 57 | end 58 | 59 | def list_instances(instance_group, zone, project) 60 | command = %W[ 61 | gcloud compute instance-groups list-instances 62 | #{instance_group} 63 | --zone=#{zone} 64 | --project=#{project} 65 | --format=json 66 | ] 67 | JSON.parse(`#{command.join(' ')}`) 68 | end 69 | 70 | def run_command(command, noop) 71 | if noop 72 | puts "---> NOOP: #{command.join(' ').inspect}" 73 | else 74 | puts "---> RUNNING: #{command.join(' ').inspect}" 75 | system(*command) 76 | end 77 | end 78 | 79 | def source_env(env_file) 80 | base_env = `bash -c 'printenv'`.split($RS).map do |l| 81 | l.strip.split('=', 2) 82 | end 83 | base_env = Hash[base_env] 84 | sourced_env = `bash -c "source #{env_file}; printenv"`.split($RS).map do |l| 85 | l.strip.split('=', 2) 86 | end 87 | sourced_env = Hash[sourced_env] 88 | base_env.keys.each { |k| sourced_env.delete(k) } 89 | sourced_env 90 | end 91 | 92 | exit(main) if $PROGRAM_NAME == __FILE__ 93 | -------------------------------------------------------------------------------- /bin/generate-github-ssh-users: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | 4 | main() { 5 | [[ "${GITHUB_TOKEN}" ]] || { 6 | echo "Missing \${GITHUB_TOKEN}" >&2 7 | exit 1 8 | } 9 | 10 | [[ "${GITHUB_USERNAME}" ]] || { 11 | echo "Missing \${GITHUB_USERNAME}" >&2 12 | exit 1 13 | } 14 | 15 | : "${GITHUB_ORG:=travis-infrastructure}" 16 | : "${GITHUB_TEAM:=ssh-access}" 17 | 18 | echo "github_users = \"$( 19 | __github_users "${GITHUB_USERNAME}:${GITHUB_TOKEN}" \ 20 | "${GITHUB_ORG}" "${GITHUB_TEAM}" | 21 | tr "\n" " " | sed 's/ *$//' 22 | )\"" 23 | } 24 | 25 | __github_users() { 26 | local auth="$1" 27 | local org="$2" 28 | local team="$3" 29 | 30 | local team_id 31 | # FIXME: this fails in an unclear way if user hasn't been added to GitHub teams (?) 32 | team_id="$( 33 | __ghcurl "${auth}" "/orgs/${org}/teams" | 34 | jq -r ".[]|select(.name==\"${team}\")|.id" 35 | )" 36 | # TODO: if this doesn't work, the user probably needs to be added to the travis-infrastructure organization. 37 | __ghcurl "${auth}" "/teams/${team_id}/members" | jq -r '.[]|.login' | 38 | LC_ALL=C sort | 39 | while read -r login; do 40 | __ghcurl "${auth}" "/users/${login}" | 41 | jq -r '[(.name|split(" ")|.[0]|ascii_downcase), .login]|join(":")' 42 | done 43 | } 44 | 45 | __ghcurl() { 46 | local auth="$1" 47 | shift 48 | local path="$1" 49 | shift 50 | 51 | curl -s "https://${auth}@api.github.com${path}" "$@" 52 | } 53 | 54 | main "$@" 55 | -------------------------------------------------------------------------------- /bin/generate-latest-docker-image-tags: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | # frozen_string_literal: true 3 | 4 | require 'json' 5 | require 'net/http' 6 | require 'openssl' 7 | 8 | DEFAULT_STACKS = %w[ 9 | android 10 | erlang 11 | go 12 | haskell 13 | jvm 14 | nodejs 15 | perl 16 | php 17 | python 18 | ruby 19 | amethyst 20 | connie 21 | garnet 22 | ].freeze 23 | 24 | GITSHA1 = /^[0-9a-f]{7}$/.freeze 25 | 26 | def main 27 | stacks = ENV['STACKS'].to_s.split.map(&:strip) 28 | stacks = DEFAULT_STACKS if stacks.empty? 29 | stacks.each do |stack| 30 | tag = latest_docker_image_tag("travisci/ci-#{stack}") 31 | $stdout.puts(%(latest_docker_image_#{stack} = #{tag.to_s.inspect})) 32 | end 33 | 34 | %w[ 35 | gesund 36 | nat-conntracker 37 | worker 38 | ].each do |image| 39 | tag = latest_docker_image_tag("travisci/#{image}") 40 | $stdout.puts( 41 | %(latest_docker_image_#{image.tr('-', '_')} = #{tag.to_s.inspect}) 42 | ) 43 | end 44 | 45 | 0 46 | end 47 | 48 | # FIXME: doesn't work if docker isn't installed 49 | def latest_docker_image_tag(image) 50 | qs = URI.encode_www_form( 51 | service: 'registry.docker.io', 52 | scope: "repository:#{image}:pull" 53 | ) 54 | 55 | token = docker_index_get( 56 | "/token?#{qs}", 57 | host: 'auth.docker.io' 58 | ).fetch('token') 59 | 60 | tags = docker_index_get( 61 | "/v2/#{image}/tags/list", 62 | headers: { 'Authorization' => "Bearer #{token}" } 63 | ).fetch('tags') 64 | 65 | # FIXME: this fails if user is not a member of Docker Hub org/team 66 | return "#{image}:#{tags.first}" if tags.all? { |t| t.match?(GITSHA1) } 67 | 68 | "#{image}:#{fancy_sorted(tags).reverse.fetch(0)}" 69 | end 70 | 71 | def fancy_sorted(tags) 72 | tags.sort do |a, b| 73 | if a =~ /^packer/ && b =~ /^packer/ 74 | Integer(a.split('-')[1]) <=> Integer(b.split('-')[1]) 75 | elsif a =~ /^v?\d/ && b =~ /^v?\d/ 76 | vers2int(a) <=> vers2int(b) 77 | else 78 | a <=> b 79 | end 80 | end 81 | end 82 | 83 | def vers2int(vers) 84 | return 0 if vers.match?(GITSHA1) 85 | 86 | parts = vers.sub(/^v/, '').split('-') 87 | parts = parts.map { |part| part.split('.') }.flatten.compact 88 | parts.reject! { |part| part.start_with?('g', 'dirty') } 89 | parts << 0 if parts.length < 4 90 | parts.map { |part| format('%03d', Integer(part)) }.join.to_i 91 | end 92 | 93 | def docker_index_get(path, host: 'index.docker.io', headers: {}, 94 | debug: ENV['DOCKER_HTTP_DEBUG'] == '1') 95 | http = Net::HTTP.new(host, 443) 96 | http.use_ssl = true 97 | http.verify_mode = OpenSSL::SSL::VERIFY_PEER 98 | http.set_debug_output($stderr) if debug 99 | 100 | request = Net::HTTP::Get.new(path) 101 | request['Accept'] = 'application/json' 102 | headers.each do |key, value| 103 | request[key] = value 104 | end 105 | response = http.request(request) 106 | JSON.parse(response.body) 107 | end 108 | 109 | exit(main) if $PROGRAM_NAME == __FILE__ 110 | -------------------------------------------------------------------------------- /bin/generate-latest-gce-images: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | # frozen_string_literal: true 3 | 4 | require 'json' 5 | 6 | DEFAULT_GCE_PROJECT_ID = 'eco-emissary-99515' 7 | DEFAULT_IMAGE_PREFIXES = 'bastion' 8 | 9 | def main(argv: ARGV) 10 | return usage if argv.include?('-h') || argv.include?('--help') 11 | 12 | project = ENV['GCE_PROJECT_ID'] || DEFAULT_GCE_PROJECT_ID 13 | prefixes = ( 14 | ENV['IMAGE_PREFIXES'] || DEFAULT_IMAGE_PREFIXES 15 | ).split.map(&:strip) 16 | 17 | prefixes.each do |prefix| 18 | name = fetch_latest_image_name(prefix, project) 19 | $stdout.puts %(latest_gce_#{prefix}_image = "#{name}") 20 | end 21 | 22 | 0 23 | end 24 | 25 | def fetch_latest_image_name(prefix, project) 26 | command = %W[ 27 | gcloud compute images list 28 | --project=#{project} 29 | --filter='name~^#{prefix}-' 30 | --format=json 31 | ].join(' ') 32 | images = JSON.parse(`#{command}`) 33 | images.sort! { |a, b| a.fetch('name') <=> b.fetch('name') } 34 | images.last.fetch('selfLink') 35 | end 36 | 37 | def usage 38 | warn <<~EOF 39 | Usage: #{$PROGRAM_NAME} [-h|--help] 40 | 41 | Generate a list of (some of) the latest known GCE images as tfvars. 42 | EOF 43 | 2 44 | end 45 | 46 | exit(main) if $PROGRAM_NAME == __FILE__ 47 | -------------------------------------------------------------------------------- /bin/generate-latest-worker-version: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | 4 | main() { 5 | [[ "${GITHUB_TOKEN}" ]] || { 6 | echo "Missing \${GITHUB_TOKEN}" >&2 7 | exit 1 8 | } 9 | 10 | [[ "${GITHUB_USERNAME}" ]] || { 11 | echo "Missing \${GITHUB_USERNAME}" >&2 12 | exit 1 13 | } 14 | 15 | local auth="${GITHUB_USERNAME}:${GITHUB_TOKEN}" 16 | 17 | echo "latest_travis_worker_version = \"$( 18 | curl -s "https://${auth}@api.github.com/repos/travis-ci/worker/releases" | 19 | jq -r '.[]|.tag_name' | 20 | __semver_max 21 | )\"" 22 | } 23 | 24 | __semver_max() { 25 | local max='v0.0.0' 26 | local int_max='1000000000' 27 | local int_v 28 | 29 | while read -r v; do 30 | int_v="${v/v/}" 31 | int_v="${int_v//./ }" 32 | int_v="$(echo "${int_v}" | xargs printf '1%03d%03d%03d\n')" 33 | if [[ "${int_v}" > "${int_max}" ]]; then 34 | max="${v}" 35 | int_max="${int_v}" 36 | fi 37 | done 38 | 39 | echo "${max}" 40 | } 41 | 42 | main "${@}" 43 | -------------------------------------------------------------------------------- /bin/generate-macstadium-nat-ips: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | # frozen_string_literal: true 3 | 4 | require 'ipaddr' 5 | require 'json' 6 | 7 | def main 8 | nets = ENV.fetch( 9 | 'TRAVIS_MACSTADIUM_NAT_NETS', 10 | '207.254.16.35/32 207.254.16.36/30' 11 | ).split(/[ ,]/).map(&:strip) 12 | 13 | addrs = nets.map { |net| IPAddr.new(net).to_range.map(&:to_s) } 14 | addrs.flatten! 15 | addrs.sort! { |a, b| a.split('.').map(&:to_i) <=> b.split('.').map(&:to_i) } 16 | 17 | out = $stdout 18 | out = File.open(ARGV.first, 'w') if ARGV.first 19 | 20 | out.puts( 21 | JSON.pretty_generate( 22 | macstadium_production_nat_addrs: addrs 23 | ) 24 | ) 25 | 26 | 0 27 | end 28 | 29 | exit(main) if $PROGRAM_NAME == __FILE__ 30 | -------------------------------------------------------------------------------- /bin/generate-tfvars: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | 4 | main() { 5 | local top 6 | top="$(git rev-parse --show-toplevel)" 7 | local out="${1}" 8 | if [[ "${out}" ]]; then 9 | exec 1>"${out}" 10 | fi 11 | 12 | if ! "${top}/bin/generate-github-ssh-users"; then 13 | __cleanup "${out}" 14 | exit 1 15 | fi 16 | 17 | if ! "${top}/bin/generate-latest-docker-image-tags"; then 18 | __cleanup "${out}" 19 | exit 1 20 | fi 21 | 22 | if ! "${top}/bin/generate-latest-worker-version"; then 23 | __cleanup "${out}" 24 | exit 1 25 | fi 26 | 27 | if ! "${top}/bin/generate-latest-gce-images"; then 28 | __cleanup "${out}" 29 | exit 1 30 | fi 31 | } 32 | 33 | __cleanup() { 34 | if [[ "${1}" && -f "${1}" ]]; then 35 | rm -f "${1}" 36 | fi 37 | } 38 | 39 | main "$@" 40 | -------------------------------------------------------------------------------- /bin/heroku-dump-shell-config: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | # frozen_string_literal: true 3 | 4 | require 'json' 5 | 6 | require_relative '../lib/heroku_client' 7 | 8 | def main(argv: ARGV) 9 | prog = File.basename($PROGRAM_NAME) 10 | 11 | argv.each do |arg| 12 | if /-h|--help|help/.match?(arg) 13 | show_usage 14 | return 0 15 | end 16 | end 17 | 18 | if argv.empty? 19 | show_usage 20 | return 1 21 | end 22 | 23 | unless ENV.key?('HEROKU_API_KEY') 24 | warn 'Missing $HEROKU_API_KEY' 25 | return 2 26 | end 27 | 28 | out = $stdout 29 | outname = argv[1] 30 | unless outname.nil? 31 | warn "#{prog}: writing to #{outname.inspect}" 32 | out = File.open(outname, 'w') 33 | end 34 | 35 | heroku = HerokuClient.new(api_key: ENV.fetch('HEROKU_API_KEY')) 36 | 37 | dumped = heroku.get("/apps/#{argv.fetch(0)}/config-vars").map do |key, value| 38 | "export #{key.upcase}=#{value.to_s.inspect}" 39 | end 40 | 41 | dumped.sort.each do |entry| 42 | out.puts entry 43 | end 44 | 45 | 0 46 | end 47 | 48 | def show_usage 49 | $stdout.puts <<~USAGE 50 | Usage: #{prog} 51 | 52 | Dump Heroku config in shell format for app named 53 | 54 | #{prog} lovely-lil-app 55 | USAGE 56 | end 57 | 58 | def prog 59 | @prog ||= File.basename($PROGRAM_NAME) 60 | end 61 | 62 | exit(main) if $PROGRAM_NAME == __FILE__ 63 | -------------------------------------------------------------------------------- /bin/heroku-wait-deploy-scale: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | # frozen_string_literal: true 3 | 4 | require 'optparse' 5 | 6 | require_relative '../lib/heroku_client' 7 | 8 | def main 9 | options = { 10 | repo_slug: '', 11 | heroku_app: '', 12 | ps_scale: [], 13 | version: 'master' 14 | } 15 | 16 | OptionParser.new do |opts| 17 | opts.on( 18 | '-r', '--repo=REPO_SLUG' 19 | ) { |v| options[:repo_slug] = v.strip } 20 | 21 | opts.on( 22 | '-a', '--app=HEROKU_APP' 23 | ) { |v| options[:heroku_app] = v.strip } 24 | 25 | opts.on( 26 | '-p', '--ps-scale=PS_SCALE' 27 | ) { |v| options[:ps_scale] += v.split(/[ ,]/).map(&:strip) } 28 | 29 | opts.on( 30 | '-V', '--deploy-version=VERSION' 31 | ) { |v| options[:version] = v.strip } 32 | end.parse! 33 | 34 | raise 'Missing HEROKU_API_KEY' unless ENV.key?('HEROKU_API_KEY') 35 | 36 | heroku = HerokuClient.new(api_key: ENV.fetch('HEROKU_API_KEY')) 37 | 38 | unless heroku.wait(options.fetch(:heroku_app)) 39 | warn "---> timeout waiting for app=#{options.fetch(:heroku_app)}" 40 | return 1 41 | end 42 | 43 | heroku.deploy( 44 | options.fetch(:repo_slug), 45 | options.fetch(:heroku_app), 46 | options.fetch(:version) 47 | ) 48 | heroku.scale( 49 | options.fetch(:heroku_app), 50 | options.fetch(:ps_scale) 51 | ) 52 | 53 | 0 54 | end 55 | 56 | exit(main) if $PROGRAM_NAME == __FILE__ 57 | -------------------------------------------------------------------------------- /bin/lookup-gce-project: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | # frozen_string_literal: true 3 | 4 | def main 5 | env_name = ARGV.fetch(0) 6 | 7 | project = { 8 | 'gce-staging-1' => 'travis-staging-1', 9 | 'gce-staging-net-1' => 'travis-staging-1', 10 | 'gce-production-1' => 'eco-emissary-99515', 11 | 'gce-production-net-1' => 'eco-emissary-99515' 12 | }.fetch(env_name, "travis-ci-prod-#{env_name.split('-').last}") 13 | 14 | $stdout.puts(project) 15 | 16 | 0 17 | end 18 | 19 | exit(main) if $PROGRAM_NAME == __FILE__ 20 | -------------------------------------------------------------------------------- /bin/mac-worker-pool-size: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | # frozen_string_literal: true 3 | 4 | require 'optparse' 5 | 6 | options = { 7 | vms_per_host: 7.0, 8 | org_percent: 0.6, 9 | hosts: 36, 10 | org_workers: 4, 11 | com_workers: 4, 12 | staging_workers: 4 13 | } 14 | 15 | OptionParser.new do |opts| 16 | opts.on('--vms-per-host=NUM', Float, 'The maximum number of VMs to run on each host') do |vms_per_host| 17 | options[:vms_per_host] = vms_per_host 18 | end 19 | opts.on('--org-percent=NUM', Float, 'The ratio of worker processors to run for .org jobs vs. .com jobs (between 0.0 and 1.0)') do |org_percent| 20 | options[:org_percent] = org_percent 21 | end 22 | opts.on('--hosts=NUM', Integer, 'The number of hosts in the production cluster of the datacenter') do |hosts| 23 | options[:hosts] = hosts 24 | end 25 | end.parse! 26 | 27 | concurrent_vms = options[:hosts] * options[:vms_per_host] - options[:staging_workers] 28 | puts "Concurrent VMs: #{concurrent_vms.round}" 29 | 30 | org_pool_size = (concurrent_vms * options[:org_percent]) / options[:org_workers] 31 | puts "Org Worker Pool Size: #{org_pool_size.round}" 32 | 33 | com_pool_size = (concurrent_vms * (1.0 - options[:org_percent])) / options[:com_workers] 34 | puts "Com Worker Pool Size: #{com_pool_size.round}" 35 | -------------------------------------------------------------------------------- /bin/nat-conntracker-configure: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | # frozen_string_literal: true 3 | 4 | require 'json' 5 | require 'net/http' 6 | require 'net/https' 7 | require 'optparse' 8 | 9 | def main 10 | options = { 11 | app: '', 12 | dst_ignore: [], 13 | heroku_api_hostname: 'api.heroku.com', 14 | heroku_api_key: ENV.fetch('HEROKU_API_KEY', ''), 15 | src_ignore: [] 16 | } 17 | 18 | OptionParser.new do |opts| 19 | opts.on( 20 | '-a', '--app=APP', 21 | 'name of nat-conntracker heroku app' 22 | ) { |v| options[:app] = v.strip } 23 | 24 | opts.on( 25 | '-d', '--dst-ignore=CIDRS', 26 | 'comma-delimited list of destination CIDRs to ignore' 27 | ) { |v| options[:dst_ignore] = v.split(',').map(&:strip) } 28 | 29 | opts.on( 30 | '-s', '--src-ignore=CIDRS', 31 | 'comma-delimited list of source CIDRs to ignore' 32 | ) { |v| options[:src_ignore] = v.split(',').map(&:strip) } 33 | 34 | opts.on( 35 | '--heroku-api-hostname=HOSTNAME', 36 | 'hostname of Heroku API from which to fetch stuff' 37 | ) { |v| options[:heroku_api_hostname] = v.strip } 38 | 39 | opts.on( 40 | '--heroku-api-key=KEY', 41 | 'API key to use with Heroku API' 42 | ) { |v| options[:heroku_api_key] = v.strip } 43 | end.parse! 44 | 45 | expand_private!(options[:dst_ignore]) 46 | expand_private!(options[:src_ignore]) 47 | 48 | unless Array(options[:dst_ignore]).empty? 49 | dst_command = %W[ 50 | trvs redis-cli #{options[:app]} REDIS_URL SADD nat-conntracker:dst-ignore 51 | ] + Array(options[:dst_ignore]) 52 | warn("---> #{dst_command.join(' ')}") 53 | system(*dst_command) 54 | end 55 | 56 | unless Array(options[:src_ignore]).empty? 57 | src_command = %W[ 58 | trvs redis-cli #{options[:app]} REDIS_URL SADD nat-conntracker:src-ignore 59 | ] + Array(options[:src_ignore]) 60 | warn("---> #{src_command.join(' ')}") 61 | system(*src_command) 62 | end 63 | 64 | 0 65 | end 66 | 67 | def expand_private!(cidrs) 68 | cidrs.map! do |cidr| 69 | if cidr == 'private' 70 | PRIVATE_SUBNETS 71 | else 72 | cidr 73 | end 74 | end 75 | cidrs.flatten! 76 | cidrs 77 | end 78 | 79 | PRIVATE_SUBNETS = %w[ 80 | 10.0.0.0/8 81 | 127.0.0.0/8 82 | 169.254.0.0/16 83 | 172.16.0.0/12 84 | 192.0.2.0/24 85 | 192.168.0.0/16 86 | ].freeze 87 | 88 | exit(main) if $PROGRAM_NAME == __FILE__ 89 | -------------------------------------------------------------------------------- /bin/post-flight: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | 4 | main() { 5 | slack_webhook="$(trvs generate-config -n --app terraform-config terraform_common | grep slack_webhook | awk '{print $2}')" 6 | [[ "${slack_webhook}" ]] || { 7 | echo 'slack_webhook could not be retrieved from keychain. Check output of "trvs generate-config -n --app terraform-config terraform_common"' 8 | exit 1 9 | } 10 | 11 | local graphname 12 | graphname="$(basename "$(pwd)")" 13 | 14 | if [[ "${graphname}" != *production* ]] && [[ "${graphname}" != *macstadium-pod* ]]; then 15 | echo "Skipping Slack notification for '${graphname}'" 16 | exit 0 17 | fi 18 | 19 | local formatted_text 20 | formatted_text="$(__format_text "$(ps -o args= "${PPID}")" "${graphname}")" 21 | payload='{ 22 | "channel": "#infra-terraform", 23 | "username": "terraform-config", 24 | "text": "'"${formatted_text}"'", 25 | "icon_emoji": ":terraform:" 26 | }' 27 | 28 | echo "Sending Slack notification for ${graphname}... " 29 | curl -X POST --data-urlencode "payload=${payload}" "${slack_webhook}" 30 | } 31 | 32 | __format_text() { 33 | local cmd="${1}" 34 | local graphname="${2}" 35 | local user="${SLACK_USER:-$USER}" 36 | 37 | cat </dev/null 61 | git diff --name-only 62 | git diff --cached --name-only 63 | popd &>/dev/null 64 | } 65 | 66 | main "${@}" 67 | -------------------------------------------------------------------------------- /bin/set-k8s-context: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | name=$1 4 | 5 | host=$(terraform output cluster_host) 6 | cluster_ca_certificate=$(terraform output cluster_ca_certificate) 7 | client_certificate=$(terraform output client_certificate) 8 | client_key=$(terraform output client_key) 9 | 10 | echo "$cluster_ca_certificate" | base64 --decode >/tmp/k8s_ca 11 | kubectl config set-cluster "$name" --certificate-authority=/tmp/k8s_ca --server="$host" --embed-certs 12 | rm /tmp/k8s_ca 13 | 14 | echo "$client_certificate" | base64 --decode >/tmp/k8s_cert 15 | echo "$client_key" | base64 --decode >/tmp/k8s_key 16 | kubectl config set-credentials "$name" --client-certificate=/tmp/k8s_cert --client-key=/tmp/k8s_key --embed-certs 17 | rm /tmp/k8s_cert 18 | rm /tmp/k8s_key 19 | 20 | # Yo dawg, I heard you like "$name" 21 | kubectl config set-context "$name" --cluster="$name" --user="$name" --namespace="$name" 22 | -------------------------------------------------------------------------------- /bin/show-current-docker-images: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | 4 | main() { 5 | terraform show | 6 | awk '/^export.+DOCKER_IMAGE/ { 7 | gsub(/"/, "", $2); 8 | gsub(/=/, " ", $2); 9 | sub(/TRAVIS_WORKER_DOCKER_IMAGE_/, "", $2); 10 | print $2 " " $3 11 | }' | 12 | sed 's/ *$//' | 13 | tr '[:upper:]' '[:lower:]' | 14 | LC_ALL=C sort | 15 | uniq 16 | } 17 | 18 | main "$@" 19 | -------------------------------------------------------------------------------- /bin/show-proposed-docker-images: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | 4 | main() { 5 | local tfvars="${1}" 6 | local top 7 | top="$(git rev-parse --show-toplevel)" 8 | 9 | "${top}/bin/show-current-docker-images" | while read -r line; do 10 | local line_parts=(${line}) 11 | local lang="${line_parts[0]}" 12 | local image="${line_parts[1]}" 13 | local stack 14 | stack="$(echo "${image}" | sed 's,.*travisci/ci-,,;s,:.*,,')" 15 | awk "/^latest_docker_image_${stack}/ { 16 | gsub(/\"/, \"\", \$3); 17 | print \"${lang} \" \$3 18 | }" "${tfvars}" | 19 | sed 's/ *$//' 20 | done 21 | } 22 | 23 | main "$@" 24 | -------------------------------------------------------------------------------- /bin/tfplandiff: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | # frozen_string_literal: true 3 | 4 | def main 5 | tfplan_filename = ARGV.first 6 | raise 'Missing {tfplan} input as first argument' if tfplan_filename.nil? 7 | 8 | differ = ENV.fetch('TFPLANDIFF_DIFFER', 'diff') 9 | tfversion = ENV.fetch('PROD_TF_VERSION', '???') 10 | 11 | require 'base64' 12 | require 'json' 13 | require 'tmpdir' 14 | require 'yaml' 15 | 16 | tfplan2json_command = "tfplan2json #{tfplan_filename}" 17 | tfplan2json_command = "tfplan2json.v11 <#{tfplan_filename}" if tfversion < 'v0.12' 18 | 19 | tfplan = JSON.parse(`#{tfplan2json_command}`) 20 | 21 | trunc_filename = tfplan_filename.sub(Dir.pwd, '.') 22 | $stdout.puts <<~PREAMBLE 23 | Terraform #{tfplan['TerraformVersion']} plandiff from #{trunc_filename} 24 | 25 | Backend: #{tfplan['Backend']['type']} 26 | Destroy?: #{tfplan['Destroy']} 27 | State: 28 | version=#{tfplan['State']['version']} 29 | terraform_version=#{tfplan['State']['terraform_version']} 30 | serial=#{tfplan['State']['serial']} 31 | lineage=#{tfplan['State']['lineage']} 32 | 33 | PREAMBLE 34 | 35 | tfplan.fetch('Diff').fetch('Modules').each do |mod| 36 | mod.fetch('Resources').each do |n, v| 37 | $stdout.puts(gen_resource_diff(n, v, differ: differ)) 38 | end 39 | end 40 | 41 | 0 42 | end 43 | 44 | def gen_resource_diff(resource_name, definition, differ: 'diff') 45 | out = [] 46 | 47 | Dir.mktmpdir(%w[tfplandiff- -tmp]) do |tmpdir| 48 | (definition.fetch('Attributes') || []).each do |attr_name, attr_def| 49 | attr_def = expand_attribute(attr_name, attr_def) 50 | 51 | a_name = File.join(tmpdir, "a-#{attr_name}") 52 | b_name = File.join(tmpdir, "b-#{attr_name}") 53 | 54 | File.write(a_name, attr_def.fetch('Old') + "\n") 55 | File.write(b_name, attr_def.fetch('New') + "\n") 56 | 57 | diff_command = %W[ 58 | #{differ} -U 5 59 | --label a/#{resource_name}/#{attr_name} 60 | #{a_name} 61 | --label b/#{resource_name}/#{attr_name} 62 | #{b_name} 63 | ] 64 | diff_bytes = `#{diff_command.join(' ')}`.chomp 65 | 66 | out << (diff_bytes + "\n") unless diff_bytes.strip.empty? 67 | end 68 | end 69 | 70 | out.join("\n") 71 | end 72 | 73 | def expand_attribute(attr_name, attr_def) 74 | ATTRIBUTE_EXPANSIONS.each do |name_match, expander| 75 | return expander.call(attr_def) if attr_name =~ name_match 76 | end 77 | attr_def 78 | end 79 | 80 | def expand_user_data(attr_def) 81 | expanded = Marshal.load(Marshal.dump(attr_def)) 82 | 83 | %w[Old New].each do |key| 84 | value = attr_def.fetch(key) 85 | 86 | begin 87 | loaded_yaml = YAML.safe_load(value) 88 | 89 | unless loaded_yaml.respond_to?(:key?) && loaded_yaml.key?('write_files') 90 | expanded[key] = value 91 | next 92 | end 93 | 94 | Array(loaded_yaml['write_files']).each_with_index do |filedef, i| 95 | next unless filedef.fetch('encoding') == 'b64' 96 | 97 | filedef['content'] = Base64.decode64(filedef['content']) 98 | loaded_yaml['write_files'][i] = filedef 99 | end 100 | 101 | expanded[key] = YAML.dump(loaded_yaml) 102 | rescue StandardError => e 103 | warn e 104 | expanded[key] = value 105 | end 106 | end 107 | 108 | expanded 109 | end 110 | 111 | ATTRIBUTE_EXPANSIONS = { 112 | /user_data/i => method(:expand_user_data), 113 | /^metadata\.user-data$/i => method(:expand_user_data), 114 | /^content$/i => method(:expand_user_data) 115 | }.freeze 116 | 117 | exit(main) if $PROGRAM_NAME == __FILE__ 118 | -------------------------------------------------------------------------------- /bin/travis-env-set-docker-config-secrets: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ruby 2 | # frozen_string_literal: true 3 | 4 | require 'optparse' 5 | 6 | def main 7 | options = { 8 | client_config_url_base64: '', 9 | docker_host: '', 10 | repository: '' 11 | } 12 | 13 | OptionParser.new do |opts| 14 | opts.on('--repository=REPOSITORY') do |v| 15 | options[:repository] = v.strip 16 | end 17 | 18 | opts.on('--docker-host=DOCKER_HOST') do |v| 19 | options[:docker_host] = v.strip 20 | end 21 | 22 | opts.on('--client-config-url-base64=CLIENT_CONFIG_URL_BASE64') do |v| 23 | options[:client_config_url_base64] = v.strip 24 | end 25 | end.parse! 26 | 27 | options.clone.each do |key, value| 28 | raise "missing value for --#{key.tr('_', '-')}" if value.strip.empty? 29 | end 30 | 31 | { 32 | 'DOCKER_CLIENT_CONFIG_URL' => options[:client_config_url_base64], 33 | 'DOCKER_HOST' => options[:docker_host] 34 | }.each do |key, value| 35 | next if system( 36 | 'travis', 'env', 'set', key, value, '--repo', options[:repository] 37 | ) 38 | 39 | raise "failed to set #{key}" 40 | end 41 | 42 | 0 43 | end 44 | 45 | exit(main) if $PROGRAM_NAME == __FILE__ 46 | -------------------------------------------------------------------------------- /build-caching-production-1/Makefile: -------------------------------------------------------------------------------- 1 | AMQP_URL_COM_VARNAME := CLOUDAMQP_URL 2 | AMQP_URL_ORG_VARNAME := CLOUDAMQP_GRAY_URL 3 | ENV_SHORT := production 4 | INFRA := build-caching 5 | TOP := $(shell git rev-parse --show-toplevel) 6 | 7 | include $(TOP)/terraform-common.mk 8 | include $(TOP)/trvs.mk 9 | 10 | .config: $(ENV_NAME).auto.tfvars $(TRVS_INFRA_ENV_TFVARS) 11 | 12 | $(TRVS_INFRA_ENV_TFVARS): 13 | trvs generate-config -f json -a terraform-config -e terraform_common -o $@ 14 | -------------------------------------------------------------------------------- /build-caching-production-1/main.tf: -------------------------------------------------------------------------------- 1 | variable "env" { 2 | default = "production" 3 | } 4 | 5 | variable "github_users" {} 6 | 7 | variable "index" { 8 | default = 1 9 | } 10 | 11 | variable "librato_email" {} 12 | variable "librato_token" {} 13 | 14 | variable "project" { 15 | default = "eco-emissary-99515" 16 | } 17 | 18 | variable "region" { 19 | default = "us-central1" 20 | } 21 | 22 | variable "syslog_address_com" {} 23 | 24 | terraform { 25 | backend "s3" { 26 | bucket = "travis-terraform-state" 27 | key = "terraform-config/build-caching-production-1.tfstate" 28 | region = "us-east-1" 29 | encrypt = "true" 30 | dynamodb_table = "travis-terraform-state" 31 | } 32 | } 33 | 34 | provider "google" { 35 | project = "${var.project}" 36 | region = "${var.region}" 37 | } 38 | 39 | provider "google-beta" { 40 | project = "${var.project}" 41 | region = "${var.region}" 42 | } 43 | 44 | provider "aws" {} 45 | 46 | module "gce_squignix" { 47 | source = "../modules/gce_squignix" 48 | 49 | env = "${var.env}" 50 | github_users = "${var.github_users}" 51 | index = "${var.index}" 52 | librato_email = "${var.librato_email}" 53 | librato_token = "${var.librato_token}" 54 | region = "${var.region}" 55 | syslog_address = "${var.syslog_address_com}" 56 | } 57 | -------------------------------------------------------------------------------- /build-caching-production-2/Makefile: -------------------------------------------------------------------------------- 1 | AMQP_URL_COM_VARNAME := CLOUDAMQP_URL 2 | AMQP_URL_ORG_VARNAME := CLOUDAMQP_GRAY_URL 3 | ENV_SHORT := production 4 | INFRA := build-caching 5 | TOP := $(shell git rev-parse --show-toplevel) 6 | 7 | include $(TOP)/terraform-common.mk 8 | include $(TOP)/trvs.mk 9 | 10 | .config: $(ENV_NAME).auto.tfvars $(TRVS_INFRA_ENV_TFVARS) 11 | 12 | $(TRVS_INFRA_ENV_TFVARS): 13 | trvs generate-config -f json -a terraform-config -e terraform_common -o $@ 14 | -------------------------------------------------------------------------------- /build-caching-production-2/main.tf: -------------------------------------------------------------------------------- 1 | variable "env" { 2 | default = "production" 3 | } 4 | 5 | variable "github_users" {} 6 | 7 | variable "index" { 8 | default = 2 9 | } 10 | 11 | variable "librato_email" {} 12 | variable "librato_token" {} 13 | 14 | variable "project" { 15 | default = "travis-ci-prod-2" 16 | } 17 | 18 | variable "region" { 19 | default = "us-central1" 20 | } 21 | 22 | variable "syslog_address_com" {} 23 | 24 | terraform { 25 | backend "s3" { 26 | bucket = "travis-terraform-state" 27 | key = "terraform-config/build-caching-production-2.tfstate" 28 | region = "us-east-1" 29 | encrypt = "true" 30 | dynamodb_table = "travis-terraform-state" 31 | } 32 | } 33 | 34 | provider "google" { 35 | project = "${var.project}" 36 | region = "${var.region}" 37 | } 38 | 39 | provider "google-beta" { 40 | project = "${var.project}" 41 | region = "${var.region}" 42 | } 43 | 44 | provider "aws" {} 45 | 46 | module "gce_squignix" { 47 | source = "../modules/gce_squignix" 48 | 49 | env = "${var.env}" 50 | github_users = "${var.github_users}" 51 | index = "${var.index}" 52 | librato_email = "${var.librato_email}" 53 | librato_token = "${var.librato_token}" 54 | region = "${var.region}" 55 | syslog_address = "${var.syslog_address_com}" 56 | } 57 | -------------------------------------------------------------------------------- /build-caching-production-3/Makefile: -------------------------------------------------------------------------------- 1 | AMQP_URL_COM_VARNAME := CLOUDAMQP_URL 2 | AMQP_URL_ORG_VARNAME := CLOUDAMQP_GRAY_URL 3 | ENV_SHORT := production 4 | INFRA := build-caching 5 | TOP := $(shell git rev-parse --show-toplevel) 6 | 7 | include $(TOP)/terraform-common.mk 8 | include $(TOP)/trvs.mk 9 | 10 | .config: $(ENV_NAME).auto.tfvars $(TRVS_INFRA_ENV_TFVARS) 11 | 12 | $(TRVS_INFRA_ENV_TFVARS): 13 | trvs generate-config -f json -a terraform-config -e terraform_common -o $@ 14 | 15 | plan: 16 | @echo DISABLED OK 17 | 18 | apply: 19 | @echo NO REALLY DONT DO THIS 20 | -------------------------------------------------------------------------------- /build-caching-production-3/main.tf: -------------------------------------------------------------------------------- 1 | variable "env" { 2 | default = "production" 3 | } 4 | 5 | variable "github_users" {} 6 | 7 | variable "index" { 8 | default = 3 9 | } 10 | 11 | variable "librato_email" {} 12 | variable "librato_token" {} 13 | 14 | variable "project" { 15 | default = "travis-ci-prod-3" 16 | } 17 | 18 | variable "region" { 19 | default = "us-central1" 20 | } 21 | 22 | variable "syslog_address_com" {} 23 | 24 | terraform { 25 | backend "s3" { 26 | bucket = "travis-terraform-state" 27 | key = "terraform-config/build-caching-production-3.tfstate" 28 | region = "us-east-1" 29 | encrypt = "true" 30 | dynamodb_table = "travis-terraform-state" 31 | } 32 | } 33 | 34 | provider "google" { 35 | project = "${var.project}" 36 | region = "${var.region}" 37 | } 38 | 39 | provider "google-beta" { 40 | project = "${var.project}" 41 | region = "${var.region}" 42 | } 43 | 44 | provider "aws" {} 45 | 46 | module "gce_squignix" { 47 | source = "../modules/gce_squignix" 48 | 49 | env = "${var.env}" 50 | github_users = "${var.github_users}" 51 | index = "${var.index}" 52 | librato_email = "${var.librato_email}" 53 | librato_token = "${var.librato_token}" 54 | region = "${var.region}" 55 | syslog_address = "${var.syslog_address_com}" 56 | } 57 | -------------------------------------------------------------------------------- /build-caching-staging-1/Makefile: -------------------------------------------------------------------------------- 1 | AMQP_URL_COM_VARNAME := CLOUDAMQP_URL 2 | AMQP_URL_ORG_VARNAME := CLOUDAMQP_GRAY_URL 3 | ENV_SHORT := staging 4 | INFRA := build-caching 5 | TOP := $(shell git rev-parse --show-toplevel) 6 | 7 | include $(TOP)/terraform-common.mk 8 | include $(TOP)/trvs.mk 9 | 10 | .config: $(ENV_NAME).auto.tfvars $(TRVS_INFRA_ENV_TFVARS) 11 | 12 | $(TRVS_INFRA_ENV_TFVARS): 13 | trvs generate-config -f json -a terraform-config -e terraform_common -o $@ 14 | -------------------------------------------------------------------------------- /build-caching-staging-1/main.tf: -------------------------------------------------------------------------------- 1 | variable "env" { 2 | default = "staging" 3 | } 4 | 5 | variable "github_users" {} 6 | 7 | variable "index" { 8 | default = 1 9 | } 10 | 11 | variable "librato_email" {} 12 | variable "librato_token" {} 13 | 14 | variable "project" { 15 | default = "travis-staging-1" 16 | } 17 | 18 | variable "region" { 19 | default = "us-central1" 20 | } 21 | 22 | variable "syslog_address_com" {} 23 | 24 | terraform { 25 | backend "s3" { 26 | bucket = "travis-terraform-state" 27 | key = "terraform-config/build-caching-staging-1.tfstate" 28 | region = "us-east-1" 29 | encrypt = "true" 30 | dynamodb_table = "travis-terraform-state" 31 | } 32 | } 33 | 34 | provider "google" { 35 | project = "${var.project}" 36 | region = "${var.region}" 37 | } 38 | 39 | provider "google-beta" { 40 | project = "${var.project}" 41 | region = "${var.region}" 42 | } 43 | 44 | provider "aws" {} 45 | 46 | module "gce_squignix" { 47 | source = "../modules/gce_squignix" 48 | 49 | cache_size_mb = 51200 50 | env = "${var.env}" 51 | github_users = "${var.github_users}" 52 | index = "${var.index}" 53 | librato_email = "${var.librato_email}" 54 | librato_token = "${var.librato_token}" 55 | region = "${var.region}" 56 | syslog_address = "${var.syslog_address_com}" 57 | } 58 | 59 | resource "null_resource" "build_cache_config" { 60 | triggers { 61 | records = "${module.gce_squignix.dns_fqdn}" 62 | } 63 | 64 | provisioner "local-exec" { 65 | command = <$@ 11 | 12 | $(ENV_NAME).auto.tfvars: 13 | @echo "{}" >$@ 14 | -------------------------------------------------------------------------------- /chirp-production-1/main.tf: -------------------------------------------------------------------------------- 1 | variable "chirp_artifacts_bucket_name" { 2 | default = "travis-ci-chirp-artifacts" 3 | } 4 | 5 | variable "chirp_com_production_repo" { 6 | default = "travis-infrastructure/chirp-com-production" 7 | } 8 | 9 | variable "chirp_org_production_repo" { 10 | default = "travis-repos/chirp-org-production" 11 | } 12 | 13 | variable "chirp_repo" { 14 | default = "travis-ci/chirp" 15 | } 16 | 17 | variable "env" { 18 | default = "production" 19 | } 20 | 21 | variable "index" { 22 | default = 1 23 | } 24 | 25 | variable "region" { 26 | default = "us-east-1" 27 | } 28 | 29 | terraform { 30 | backend "s3" { 31 | bucket = "travis-terraform-state" 32 | key = "terraform-config/chirp-production-1.tfstate" 33 | region = "us-east-1" 34 | encrypt = "true" 35 | dynamodb_table = "travis-terraform-state" 36 | } 37 | } 38 | 39 | provider "aws" {} 40 | 41 | resource "aws_s3_bucket" "chirp_artifacts" { 42 | bucket = "${var.chirp_artifacts_bucket_name}" 43 | } 44 | 45 | resource "aws_iam_user" "chirp" { 46 | name = "chirp-${var.env}-${var.index}" 47 | } 48 | 49 | resource "aws_iam_access_key" "chirp" { 50 | user = "${aws_iam_user.chirp.name}" 51 | depends_on = ["aws_iam_user.chirp"] 52 | } 53 | 54 | resource "aws_iam_user_policy" "chirp_actions" { 55 | name = "chirp_actions_${var.env}_${var.index}" 56 | user = "${aws_iam_user.chirp.name}" 57 | 58 | policy = <$@ 15 | 16 | $(ENV_NAME).auto.tfvars: 17 | $(TOP)/bin/generate-macstadium-nat-ips $@ 18 | -------------------------------------------------------------------------------- /gce-production-1/Makefile: -------------------------------------------------------------------------------- 1 | AMQP_URL_COM_VARNAME := CLOUDAMQP_URL 2 | AMQP_URL_ORG_VARNAME := CLOUDAMQP_GRAY_URL 3 | CONTEXT := $(shell terraform output context) 4 | 5 | include $(shell git rev-parse --show-toplevel)/gce.mk 6 | 7 | .PHONY: context 8 | context: 9 | $(TOP)/bin/set-k8s-context ${CONTEXT} 10 | -------------------------------------------------------------------------------- /gce-production-1/service_accounts.tf: -------------------------------------------------------------------------------- 1 | data "terraform_remote_state" "staging_1" { 2 | backend = "s3" 3 | 4 | config { 5 | bucket = "travis-terraform-state" 6 | key = "terraform-config/gce-staging-1.tfstate" 7 | region = "us-east-1" 8 | dynamodb_table = "travis-terraform-state" 9 | } 10 | } 11 | 12 | data "terraform_remote_state" "production_2" { 13 | backend = "s3" 14 | 15 | config { 16 | bucket = "travis-terraform-state" 17 | key = "terraform-config/gce-production-2.tfstate" 18 | region = "us-east-1" 19 | dynamodb_table = "travis-terraform-state" 20 | } 21 | } 22 | 23 | data "terraform_remote_state" "production_3" { 24 | backend = "s3" 25 | 26 | config { 27 | bucket = "travis-terraform-state" 28 | key = "terraform-config/gce-production-3.tfstate" 29 | region = "us-east-1" 30 | dynamodb_table = "travis-terraform-state" 31 | } 32 | } 33 | 34 | resource "google_project_iam_member" "staging_1_workers" { 35 | count = "${length(data.terraform_remote_state.staging_1.workers_service_account_emails)}" 36 | project = "${var.project}" 37 | role = "roles/compute.imageUser" 38 | member = "serviceAccount:${element(data.terraform_remote_state.staging_1.workers_service_account_emails, count.index)}" 39 | } 40 | 41 | resource "google_project_iam_member" "production_2_workers" { 42 | count = "${length(data.terraform_remote_state.production_2.workers_service_account_emails)}" 43 | project = "${var.project}" 44 | role = "roles/compute.imageUser" 45 | member = "serviceAccount:${element(data.terraform_remote_state.production_2.workers_service_account_emails, count.index)}" 46 | } 47 | 48 | resource "google_project_iam_member" "production_3_workers" { 49 | count = "${length(data.terraform_remote_state.production_3.workers_service_account_emails)}" 50 | project = "${var.project}" 51 | role = "roles/compute.imageUser" 52 | member = "serviceAccount:${element(data.terraform_remote_state.production_3.workers_service_account_emails, count.index)}" 53 | } 54 | -------------------------------------------------------------------------------- /gce-production-2/Makefile: -------------------------------------------------------------------------------- 1 | AMQP_URL_COM_VARNAME := CLOUDAMQP_URL 2 | AMQP_URL_ORG_VARNAME := CLOUDAMQP_GRAY_URL 3 | CONTEXT := $(shell terraform output context) 4 | 5 | include $(shell git rev-parse --show-toplevel)/gce.mk 6 | 7 | .PHONY: context 8 | context: 9 | $(TOP)/bin/set-k8s-context ${CONTEXT} 10 | -------------------------------------------------------------------------------- /gce-production-2/main.tf: -------------------------------------------------------------------------------- 1 | variable "env" { 2 | default = "production" 3 | } 4 | 5 | variable "index" { 6 | default = 2 7 | } 8 | 9 | variable "k8s_default_namespace" { 10 | default = "gce-production-2" 11 | } 12 | 13 | variable "project" { 14 | default = "travis-ci-prod-2" 15 | } 16 | 17 | terraform { 18 | backend "s3" { 19 | bucket = "travis-terraform-state" 20 | key = "terraform-config/gce-production-2.tfstate" 21 | region = "us-east-1" 22 | encrypt = "true" 23 | dynamodb_table = "travis-terraform-state" 24 | } 25 | } 26 | 27 | provider "google" { 28 | project = "${var.project}" 29 | region = "us-central1" 30 | } 31 | 32 | provider "aws" {} 33 | 34 | provider "kubernetes" { 35 | config_context = "${module.gke_cluster_1.context}" 36 | } 37 | 38 | data "terraform_remote_state" "vpc" { 39 | backend = "s3" 40 | 41 | config { 42 | bucket = "travis-terraform-state" 43 | key = "terraform-config/gce-production-net-2.tfstate" 44 | region = "us-east-1" 45 | dynamodb_table = "travis-terraform-state" 46 | } 47 | } 48 | 49 | module "aws_iam_user_s3_com" { 50 | source = "../modules/aws_iam_user_s3" 51 | 52 | iam_user_name = "worker-gce-${var.env}-${var.index}-com" 53 | s3_bucket_name = "build-trace.travis-ci.com" 54 | } 55 | 56 | module "aws_iam_user_s3_org" { 57 | source = "../modules/aws_iam_user_s3" 58 | 59 | iam_user_name = "worker-gce-${var.env}-${var.index}-org" 60 | s3_bucket_name = "build-trace.travis-ci.org" 61 | } 62 | 63 | module "gce_worker_group" { 64 | source = "../modules/gce_worker_group" 65 | 66 | aws_com_id = "${module.aws_iam_user_s3_com.id}" 67 | aws_com_secret = "${module.aws_iam_user_s3_com.secret}" 68 | aws_com_trace_bucket = "${module.aws_iam_user_s3_com.bucket}" 69 | aws_org_id = "${module.aws_iam_user_s3_org.id}" 70 | aws_org_secret = "${module.aws_iam_user_s3_org.secret}" 71 | aws_org_trace_bucket = "${module.aws_iam_user_s3_org.bucket}" 72 | env = "${var.env}" 73 | index = "${var.index}" 74 | k8s_default_namespace = "${var.k8s_default_namespace}" 75 | project = "${var.project}" 76 | region = "us-central1" 77 | } 78 | 79 | module "gke_cluster_1" { 80 | source = "../modules/gce_kubernetes" 81 | 82 | cluster_name = "gce-production-2" 83 | default_namespace = "${var.k8s_default_namespace}" 84 | network = "${data.terraform_remote_state.vpc.gce_network_main}" 85 | pool_name = "default" 86 | project = "${var.project}" 87 | region = "us-central1" 88 | subnetwork = "${data.terraform_remote_state.vpc.gce_subnetwork_gke_cluster}" 89 | 90 | node_locations = ["us-central1-b", "us-central1-c"] 91 | node_pool_tags = ["gce-workers"] 92 | min_node_count = 4 93 | max_node_count = 50 94 | machine_type = "c2-standard-4" 95 | } 96 | 97 | // Use these outputs to be able to easily set up a context in kubectl on the local machine. 98 | output "cluster_host" { 99 | value = "${module.gke_cluster_1.host}" 100 | } 101 | 102 | output "cluster_ca_certificate" { 103 | value = "${module.gke_cluster_1.cluster_ca_certificate}" 104 | sensitive = true 105 | } 106 | 107 | output "client_certificate" { 108 | value = "${module.gke_cluster_1.client_certificate}" 109 | sensitive = true 110 | } 111 | 112 | output "client_key" { 113 | value = "${module.gke_cluster_1.client_key}" 114 | sensitive = true 115 | } 116 | 117 | output "context" { 118 | value = "${module.gke_cluster_1.context}" 119 | } 120 | 121 | output "workers_service_account_emails" { 122 | value = ["${module.gce_worker_group.workers_service_account_emails}"] 123 | } 124 | -------------------------------------------------------------------------------- /gce-production-3/Makefile: -------------------------------------------------------------------------------- 1 | AMQP_URL_COM_VARNAME := CLOUDAMQP_URL 2 | AMQP_URL_ORG_VARNAME := CLOUDAMQP_GRAY_URL 3 | CONTEXT := $(shell terraform output context) 4 | 5 | include $(shell git rev-parse --show-toplevel)/gce.mk 6 | 7 | .PHONY: context 8 | context: 9 | $(TOP)/bin/set-k8s-context ${CONTEXT} 10 | -------------------------------------------------------------------------------- /gce-production-3/main.tf: -------------------------------------------------------------------------------- 1 | variable "env" { 2 | default = "production" 3 | } 4 | 5 | variable "index" { 6 | default = 3 7 | } 8 | 9 | variable "k8s_default_namespace" { 10 | default = "gce-production-3" 11 | } 12 | 13 | variable "project" { 14 | default = "travis-ci-prod-3" 15 | } 16 | 17 | terraform { 18 | backend "s3" { 19 | bucket = "travis-terraform-state" 20 | key = "terraform-config/gce-production-3.tfstate" 21 | region = "us-east-1" 22 | encrypt = "true" 23 | dynamodb_table = "travis-terraform-state" 24 | } 25 | } 26 | 27 | provider "google" { 28 | project = "${var.project}" 29 | region = "us-central1" 30 | } 31 | 32 | provider "aws" {} 33 | 34 | provider "kubernetes" { 35 | config_context = "${module.gke_cluster_1.context}" 36 | } 37 | 38 | data "terraform_remote_state" "vpc" { 39 | backend = "s3" 40 | 41 | config { 42 | bucket = "travis-terraform-state" 43 | key = "terraform-config/gce-production-net-3.tfstate" 44 | region = "us-east-1" 45 | dynamodb_table = "travis-terraform-state" 46 | } 47 | } 48 | 49 | module "aws_iam_user_s3_com" { 50 | source = "../modules/aws_iam_user_s3" 51 | 52 | iam_user_name = "worker-gce-${var.env}-${var.index}-com" 53 | s3_bucket_name = "build-trace.travis-ci.com" 54 | } 55 | 56 | module "aws_iam_user_s3_org" { 57 | source = "../modules/aws_iam_user_s3" 58 | 59 | iam_user_name = "worker-gce-${var.env}-${var.index}-org" 60 | s3_bucket_name = "build-trace.travis-ci.org" 61 | } 62 | 63 | module "gce_worker_group" { 64 | source = "../modules/gce_worker_group" 65 | 66 | aws_com_id = "${module.aws_iam_user_s3_com.id}" 67 | aws_com_secret = "${module.aws_iam_user_s3_com.secret}" 68 | aws_com_trace_bucket = "${module.aws_iam_user_s3_com.bucket}" 69 | aws_org_id = "${module.aws_iam_user_s3_org.id}" 70 | aws_org_secret = "${module.aws_iam_user_s3_org.secret}" 71 | aws_org_trace_bucket = "${module.aws_iam_user_s3_org.bucket}" 72 | env = "${var.env}" 73 | index = "${var.index}" 74 | k8s_default_namespace = "${var.k8s_default_namespace}" 75 | project = "${var.project}" 76 | region = "us-central1" 77 | } 78 | 79 | module "gke_cluster_1" { 80 | source = "../modules/gce_kubernetes" 81 | 82 | cluster_name = "gce-production-3" 83 | default_namespace = "${var.k8s_default_namespace}" 84 | network = "${data.terraform_remote_state.vpc.gce_network_main}" 85 | pool_name = "default" 86 | project = "${var.project}" 87 | region = "us-central1" 88 | subnetwork = "${data.terraform_remote_state.vpc.gce_subnetwork_gke_cluster}" 89 | 90 | node_locations = ["us-central1-b", "us-central1-c"] 91 | node_pool_tags = ["gce-workers"] 92 | min_node_count = 4 93 | max_node_count = 50 94 | machine_type = "c2-standard-4" 95 | } 96 | 97 | // Use these outputs to be able to easily set up a context in kubectl on the local machine. 98 | output "cluster_host" { 99 | value = "${module.gke_cluster_1.host}" 100 | } 101 | 102 | output "cluster_ca_certificate" { 103 | value = "${module.gke_cluster_1.cluster_ca_certificate}" 104 | sensitive = true 105 | } 106 | 107 | output "client_certificate" { 108 | value = "${module.gke_cluster_1.client_certificate}" 109 | sensitive = true 110 | } 111 | 112 | output "client_key" { 113 | value = "${module.gke_cluster_1.client_key}" 114 | sensitive = true 115 | } 116 | 117 | output "context" { 118 | value = "${module.gke_cluster_1.context}" 119 | } 120 | 121 | output "workers_service_account_emails" { 122 | value = ["${module.gce_worker_group.workers_service_account_emails}"] 123 | } 124 | -------------------------------------------------------------------------------- /gce-production-net-1/Makefile: -------------------------------------------------------------------------------- 1 | AMQP_URL_COM_VARNAME := CLOUDAMQP_URL 2 | AMQP_URL_ORG_VARNAME := CLOUDAMQP_GRAY_URL 3 | ENV_SHORT := production 4 | 5 | include $(shell git rev-parse --show-toplevel)/gce.mk 6 | -------------------------------------------------------------------------------- /gce-production-net-1/nat-conntracker.env: -------------------------------------------------------------------------------- 1 | export NAT_CONNTRACKER_CONNTRACK_ARGS=-E+conntrack+--output+xml+--event-mask+NEW+--buffer-size+25000000 2 | export NAT_CONNTRACKER_CONN_THRESHOLD=100 3 | export NAT_CONNTRACKER_GIT_REF=master 4 | export NAT_CONNTRACKER_TOP_N=100 5 | -------------------------------------------------------------------------------- /gce-production-net-2/Makefile: -------------------------------------------------------------------------------- 1 | AMQP_URL_COM_VARNAME := CLOUDAMQP_URL 2 | AMQP_URL_ORG_VARNAME := CLOUDAMQP_GRAY_URL 3 | ENV_SHORT := production 4 | 5 | include $(shell git rev-parse --show-toplevel)/gce.mk 6 | -------------------------------------------------------------------------------- /gce-production-net-2/main.tf: -------------------------------------------------------------------------------- 1 | variable "deny_target_ip_ranges" { 2 | default = [] 3 | } 4 | 5 | variable "env" { 6 | default = "production" 7 | } 8 | 9 | variable "gce_bastion_image" { 10 | default = "https://www.googleapis.com/compute/v1/projects/eco-emissary-99515/global/images/bastion-1519767738-74530dd" 11 | } 12 | 13 | variable "gce_heroku_org" {} 14 | 15 | variable "gce_nat_image" { 16 | default = "https://www.googleapis.com/compute/v1/projects/eco-emissary-99515/global/images/tfw-1520467760-573cd26" 17 | } 18 | 19 | variable "github_users" {} 20 | 21 | variable "index" { 22 | default = 2 23 | } 24 | 25 | variable "nat_conntracker_src_ignore" { 26 | type = "list" 27 | } 28 | 29 | variable "nat_conntracker_dst_ignore" { 30 | type = "list" 31 | } 32 | 33 | variable "project" { 34 | default = "travis-ci-prod-2" 35 | } 36 | 37 | variable "region" { 38 | default = "us-central1" 39 | } 40 | 41 | variable "rigaer_strasse_8_ipv4" {} 42 | variable "syslog_address_com" {} 43 | variable "syslog_address_org" {} 44 | 45 | variable "travisci_net_external_zone_id" { 46 | default = "Z2RI61YP4UWSIO" 47 | } 48 | 49 | terraform { 50 | backend "s3" { 51 | bucket = "travis-terraform-state" 52 | key = "terraform-config/gce-production-net-2.tfstate" 53 | region = "us-east-1" 54 | encrypt = "true" 55 | dynamodb_table = "travis-terraform-state" 56 | } 57 | } 58 | 59 | provider "google-beta" { 60 | project = "${var.project}" 61 | region = "${var.region}" 62 | } 63 | 64 | provider "aws" {} 65 | provider "heroku" {} 66 | 67 | module "gce_net" { 68 | source = "../modules/gce_net_workers" 69 | 70 | bastion_config = "${file("config/bastion.env")}" 71 | bastion_image = "${var.gce_bastion_image}" 72 | deny_target_ip_ranges = ["${var.deny_target_ip_ranges}"] 73 | env = "${var.env}" 74 | 75 | github_users = "${var.github_users}" 76 | heroku_org = "${var.gce_heroku_org}" 77 | index = "${var.index}" 78 | nat_config = "${file("config/nat.env")}" 79 | nat_conntracker_config = "${file("nat-conntracker.env")}" 80 | nat_conntracker_dst_ignore = ["${var.nat_conntracker_dst_ignore}"] 81 | nat_conntracker_src_ignore = ["${var.nat_conntracker_src_ignore}"] 82 | nat_count_per_zone = 2 83 | nat_image = "${var.gce_nat_image}" 84 | nat_machine_type = "n1-standard-4" 85 | project = "${var.project}" 86 | public_subnet_cidr_range = "10.10.1.0/24" 87 | rigaer_strasse_8_ipv4 = "${var.rigaer_strasse_8_ipv4}" 88 | syslog_address = "${var.syslog_address_com}" 89 | travisci_net_external_zone_id = "${var.travisci_net_external_zone_id}" 90 | } 91 | 92 | output "gce_network_main" { 93 | value = "${module.gce_net.gce_network_main}" 94 | } 95 | 96 | output "gce_subnetwork_gke_cluster" { 97 | value = "${module.gce_net.gce_subnetwork_gke_cluster}" 98 | } 99 | -------------------------------------------------------------------------------- /gce-production-net-2/nat-conntracker.env: -------------------------------------------------------------------------------- 1 | export NAT_CONNTRACKER_CONNTRACK_ARGS=-E+conntrack+--output+xml+--event-mask+NEW+--buffer-size+25000000 2 | export NAT_CONNTRACKER_CONN_THRESHOLD=100 3 | export NAT_CONNTRACKER_GIT_REF=master 4 | export NAT_CONNTRACKER_TOP_N=100 5 | -------------------------------------------------------------------------------- /gce-production-net-3/Makefile: -------------------------------------------------------------------------------- 1 | AMQP_URL_COM_VARNAME := CLOUDAMQP_URL 2 | AMQP_URL_ORG_VARNAME := CLOUDAMQP_GRAY_URL 3 | ENV_SHORT := production 4 | 5 | include $(shell git rev-parse --show-toplevel)/gce.mk 6 | -------------------------------------------------------------------------------- /gce-production-net-3/main.tf: -------------------------------------------------------------------------------- 1 | variable "deny_target_ip_ranges" { 2 | default = [] 3 | } 4 | 5 | variable "env" { 6 | default = "production" 7 | } 8 | 9 | variable "gce_bastion_image" { 10 | default = "https://www.googleapis.com/compute/v1/projects/eco-emissary-99515/global/images/bastion-1519767738-74530dd" 11 | } 12 | 13 | variable "gce_heroku_org" {} 14 | 15 | variable "gce_nat_image" { 16 | default = "https://www.googleapis.com/compute/v1/projects/eco-emissary-99515/global/images/tfw-1520467760-573cd26" 17 | } 18 | 19 | variable "github_users" {} 20 | 21 | variable "index" { 22 | default = 3 23 | } 24 | 25 | variable "nat_conntracker_src_ignore" { 26 | type = "list" 27 | } 28 | 29 | variable "nat_conntracker_dst_ignore" { 30 | type = "list" 31 | } 32 | 33 | variable "project" { 34 | default = "travis-ci-prod-3" 35 | } 36 | 37 | variable "region" { 38 | default = "us-central1" 39 | } 40 | 41 | variable "rigaer_strasse_8_ipv4" {} 42 | variable "syslog_address_com" {} 43 | variable "syslog_address_org" {} 44 | 45 | variable "travisci_net_external_zone_id" { 46 | default = "Z2RI61YP4UWSIO" 47 | } 48 | 49 | terraform { 50 | backend "s3" { 51 | bucket = "travis-terraform-state" 52 | key = "terraform-config/gce-production-net-3.tfstate" 53 | region = "us-east-1" 54 | encrypt = "true" 55 | dynamodb_table = "travis-terraform-state" 56 | } 57 | } 58 | 59 | provider "google-beta" { 60 | project = "${var.project}" 61 | region = "${var.region}" 62 | } 63 | 64 | provider "aws" {} 65 | provider "heroku" {} 66 | 67 | module "gce_net" { 68 | source = "../modules/gce_net_workers" 69 | 70 | bastion_config = "${file("config/bastion.env")}" 71 | bastion_image = "${var.gce_bastion_image}" 72 | deny_target_ip_ranges = ["${var.deny_target_ip_ranges}"] 73 | env = "${var.env}" 74 | 75 | github_users = "${var.github_users}" 76 | heroku_org = "${var.gce_heroku_org}" 77 | index = "${var.index}" 78 | nat_config = "${file("config/nat.env")}" 79 | nat_conntracker_config = "${file("nat-conntracker.env")}" 80 | nat_conntracker_dst_ignore = ["${var.nat_conntracker_dst_ignore}"] 81 | nat_conntracker_src_ignore = ["${var.nat_conntracker_src_ignore}"] 82 | nat_count_per_zone = 2 83 | nat_image = "${var.gce_nat_image}" 84 | nat_machine_type = "n1-standard-4" 85 | project = "${var.project}" 86 | public_subnet_cidr_range = "10.10.1.0/24" 87 | rigaer_strasse_8_ipv4 = "${var.rigaer_strasse_8_ipv4}" 88 | syslog_address = "${var.syslog_address_com}" 89 | travisci_net_external_zone_id = "${var.travisci_net_external_zone_id}" 90 | } 91 | 92 | output "gce_network_main" { 93 | value = "${module.gce_net.gce_network_main}" 94 | } 95 | 96 | output "gce_subnetwork_gke_cluster" { 97 | value = "${module.gce_net.gce_subnetwork_gke_cluster}" 98 | } 99 | -------------------------------------------------------------------------------- /gce-production-net-3/nat-conntracker.env: -------------------------------------------------------------------------------- 1 | export NAT_CONNTRACKER_CONNTRACK_ARGS=-E+conntrack+--output+xml+--event-mask+NEW+--buffer-size+25000000 2 | export NAT_CONNTRACKER_CONN_THRESHOLD=100 3 | export NAT_CONNTRACKER_GIT_REF=master 4 | export NAT_CONNTRACKER_TOP_N=100 5 | -------------------------------------------------------------------------------- /gce-staging-1/Makefile: -------------------------------------------------------------------------------- 1 | AMQP_URL_COM_VARNAME := CLOUDAMQP_URL 2 | AMQP_URL_ORG_VARNAME := CLOUDAMQP_GRAY_URL 3 | CONTEXT := $(shell terraform output context) 4 | 5 | include $(shell git rev-parse --show-toplevel)/gce.mk 6 | 7 | .PHONY: context 8 | context: 9 | $(TOP)/bin/set-k8s-context ${CONTEXT} 10 | -------------------------------------------------------------------------------- /gce-staging-net-1/Makefile: -------------------------------------------------------------------------------- 1 | AMQP_URL_COM_VARNAME := CLOUDAMQP_URL 2 | AMQP_URL_ORG_VARNAME := CLOUDAMQP_GRAY_URL 3 | ENV_SHORT := staging 4 | JOB_BOARD_HOST := job-board-staging.travis-ci.com 5 | TRAVIS_BUILD_COM_HOST := build-staging.travis-ci.com 6 | TRAVIS_BUILD_ORG_HOST := build-staging.travis-ci.org 7 | 8 | include $(shell git rev-parse --show-toplevel)/gce.mk 9 | -------------------------------------------------------------------------------- /gce-staging-net-1/main.tf: -------------------------------------------------------------------------------- 1 | variable "deny_target_ip_ranges" { 2 | default = [] 3 | } 4 | 5 | variable "env" { 6 | default = "staging" 7 | } 8 | 9 | variable "latest_docker_image_gesund" {} 10 | variable "latest_docker_image_nat_conntracker" {} 11 | variable "latest_gce_bastion_image" {} 12 | 13 | variable "gce_heroku_org" {} 14 | 15 | variable "gce_nat_image" { 16 | # TODO: replace with vanilla ubuntu bionic image 17 | default = "https://www.googleapis.com/compute/v1/projects/eco-emissary-99515/global/images/tfw-1520467760-573cd26" 18 | } 19 | 20 | variable "github_users" {} 21 | 22 | variable "index" { 23 | default = 1 24 | } 25 | 26 | variable "nat_conntracker_src_ignore" { 27 | type = "list" 28 | } 29 | 30 | variable "nat_conntracker_dst_ignore" { 31 | type = "list" 32 | } 33 | 34 | variable "project" { 35 | default = "travis-staging-1" 36 | } 37 | 38 | variable "region" { 39 | default = "us-central1" 40 | } 41 | 42 | variable "rigaer_strasse_8_ipv4" {} 43 | variable "syslog_address_com" {} 44 | variable "syslog_address_org" {} 45 | 46 | variable "travisci_net_external_zone_id" { 47 | default = "Z2RI61YP4UWSIO" 48 | } 49 | 50 | terraform { 51 | backend "s3" { 52 | bucket = "travis-terraform-state" 53 | key = "terraform-config/gce-staging-net-1.tfstate" 54 | region = "us-east-1" 55 | encrypt = "true" 56 | dynamodb_table = "travis-terraform-state" 57 | } 58 | } 59 | 60 | provider "google-beta" { 61 | project = "${var.project}" 62 | region = "${var.region}" 63 | } 64 | 65 | provider "aws" {} 66 | provider "heroku" {} 67 | 68 | module "gce_net" { 69 | source = "../modules/gce_net_workers" 70 | 71 | bastion_config = "${file("config/bastion.env")}" 72 | bastion_image = "${var.latest_gce_bastion_image}" 73 | deny_target_ip_ranges = ["${var.deny_target_ip_ranges}"] 74 | env = "${var.env}" 75 | 76 | # TODO: replace with var.latest_docker_image_gesund 77 | gesund_self_image = "travisci/gesund:0.1.0" 78 | 79 | github_users = "${var.github_users}" 80 | heroku_org = "${var.gce_heroku_org}" 81 | index = "${var.index}" 82 | nat_config = "${file("config/nat.env")}" 83 | nat_conntracker_config = "${file("nat-conntracker.env")}" 84 | nat_conntracker_dst_ignore = ["${var.nat_conntracker_dst_ignore}"] 85 | nat_conntracker_src_ignore = ["${var.nat_conntracker_src_ignore}"] 86 | nat_count_per_zone = 2 87 | nat_image = "${var.gce_nat_image}" 88 | nat_machine_type = "g1-small" 89 | project = "${var.project}" 90 | rigaer_strasse_8_ipv4 = "${var.rigaer_strasse_8_ipv4}" 91 | syslog_address = "${var.syslog_address_com}" 92 | travisci_net_external_zone_id = "${var.travisci_net_external_zone_id}" 93 | 94 | nat_conntracker_redis_plan = "hobby-dev" 95 | nat_conntracker_self_image = "${var.latest_docker_image_nat_conntracker}" 96 | } 97 | 98 | output "gce_network_main" { 99 | value = "${module.gce_net.gce_network_main}" 100 | } 101 | 102 | output "gce_subnetwork_gke_cluster" { 103 | value = "${module.gce_net.gce_subnetwork_gke_cluster}" 104 | } 105 | -------------------------------------------------------------------------------- /gce-staging-net-1/nat-conntracker.env: -------------------------------------------------------------------------------- 1 | export NAT_CONNTRACKER_CONNTRACK_ARGS=-E+conntrack+--output+xml+--event-mask+NEW+--buffer-size+25000000 2 | export NAT_CONNTRACKER_CONN_THRESHOLD=50 3 | export NAT_CONNTRACKER_GIT_REF=master 4 | export NAT_CONNTRACKER_TOP_N=10 5 | -------------------------------------------------------------------------------- /gce.mk: -------------------------------------------------------------------------------- 1 | TOP := $(shell git rev-parse --show-toplevel) 2 | 3 | include $(TOP)/terraform-common.mk 4 | include $(TOP)/trvs.mk 5 | 6 | WRITE_CONFIG_OPTS := --write-bastion --write-nat --env-tail $(ENV_TAIL) 7 | 8 | .PHONY: default 9 | default: hello 10 | 11 | CONFIG_FILES := \ 12 | config/bastion.env \ 13 | config/gce-workers-$(ENV_SHORT).json \ 14 | config/worker-com.env \ 15 | config/worker-org.env \ 16 | $(NATBZ2) 17 | 18 | .PHONY: .config 19 | .config: $(CONFIG_FILES) $(ENV_NAME).auto.tfvars 20 | 21 | $(CONFIG_FILES): config/.written config/.gce-keys-written 22 | 23 | # Imports network resources from a GCE project that used a single terraform 24 | # graph to manage network resources and workers into a separate network-only 25 | # graph a la "gce-staging-net-1" for "gce-staging-1". This target is intended 26 | # to be run within a given "net" graph directory such as "gce-production-net-5". 27 | .PHONY: import-net 28 | import-net: 29 | $(TOP)/bin/gce-import-net \ 30 | --env $(ENV_SHORT) \ 31 | --index $(shell awk -F- '{ print $$NF }' <<<$(ENV_NAME)) \ 32 | --project $(shell $(TOP)/bin/lookup-gce-project $(ENV_NAME)) \ 33 | --terraform $(TERRAFORM) 34 | 35 | # Removes state references from a GCE project that has migrated network 36 | # resources to a network-only terraform graph (see `import-net` above). This 37 | # target is intended to be run within a given "non-net" graph directory such as 38 | # "gce-production-5". 39 | .PHONY: export-net 40 | export-net: 41 | $(TOP)/bin/gce-export-net --terraform $(TERRAFORM) 42 | -------------------------------------------------------------------------------- /macstadium-prod-1/Makefile: -------------------------------------------------------------------------------- 1 | include $(shell git rev-parse --show-toplevel)/terraform-common.mk 2 | 3 | .PHONY: default 4 | default: hello 5 | 6 | INDEX ?= 1 7 | 8 | include $(shell git rev-parse --show-toplevel)/trvs.mk 9 | 10 | .PHONY: .config 11 | .config: $(ENV_NAME).auto.tfvars 12 | 13 | .PHONY: context 14 | context: 15 | $(TOP)/bin/set-k8s-context macstadium-prod-1 16 | -------------------------------------------------------------------------------- /macstadium-prod-1/dhcp.tf: -------------------------------------------------------------------------------- 1 | module "dhcp_server" { 2 | source = "../modules/macstadium_dhcp_server" 3 | index = 1 4 | datacenter = "pod-1" 5 | cluster = "MacPro_Pod_1" 6 | datastore = "DataCore1_1" 7 | internal_network_label = "Internal" 8 | jobs_network_label = "Jobs-1" 9 | jobs_network_subnet = "10.182.0.0/18" 10 | mac_address = "00:50:56:84:b4:81" 11 | travisci_net_external_zone_id = "${var.travisci_net_external_zone_id}" 12 | ssh_user = "${var.ssh_user}" 13 | } 14 | -------------------------------------------------------------------------------- /macstadium-prod-2/Makefile: -------------------------------------------------------------------------------- 1 | include $(shell git rev-parse --show-toplevel)/terraform-common.mk 2 | 3 | .PHONY: default 4 | default: hello 5 | 6 | INDEX ?= 2 7 | 8 | include $(shell git rev-parse --show-toplevel)/trvs.mk 9 | 10 | .PHONY: .config 11 | .config: $(ENV_NAME).auto.tfvars 12 | -------------------------------------------------------------------------------- /macstadium-prod-2/dhcp.tf: -------------------------------------------------------------------------------- 1 | module "dhcp_server" { 2 | source = "../modules/macstadium_dhcp_server" 3 | index = 2 4 | datacenter = "pod-2" 5 | cluster = "MacPro_Pod_2" 6 | datastore = "DataCore1_3" 7 | internal_network_label = "Internal" 8 | jobs_network_label = "Jobs-2" 9 | jobs_network_subnet = "10.182.128.0/18" 10 | mac_address = "00:50:56:ab:d3:e4" 11 | travisci_net_external_zone_id = "${var.travisci_net_external_zone_id}" 12 | ssh_user = "${var.ssh_user}" 13 | } 14 | -------------------------------------------------------------------------------- /macstadium-prod-2/main.tf: -------------------------------------------------------------------------------- 1 | variable "travisci_net_external_zone_id" { 2 | default = "Z2RI61YP4UWSIO" 3 | } 4 | 5 | variable "ssh_user" { 6 | description = "your username on the Linux VM instances" 7 | } 8 | 9 | variable "vsphere_user" {} 10 | variable "vsphere_password" {} 11 | variable "vsphere_server" {} 12 | 13 | variable "custom_1_name" {} 14 | variable "custom_2_name" {} 15 | variable "custom_4_name" {} 16 | variable "custom_5_name" {} 17 | variable "custom_6_name" {} 18 | variable "custom_7_name" {} 19 | 20 | terraform { 21 | backend "s3" { 22 | bucket = "travis-terraform-state" 23 | key = "terraform-config/macstadium-pod-2-cluster-terraform.tfstate" 24 | region = "us-east-1" 25 | encrypt = "true" 26 | dynamodb_table = "travis-terraform-state" 27 | } 28 | } 29 | 30 | provider "aws" { 31 | region = "us-east-1" 32 | } 33 | 34 | provider "vsphere" { 35 | user = "${var.vsphere_user}" 36 | password = "${var.vsphere_password}" 37 | vsphere_server = "${var.vsphere_server}" 38 | allow_unverified_ssl = true 39 | } 40 | 41 | module "inventory" { 42 | source = "../modules/macstadium_inventory" 43 | datacenter = "pod-2" 44 | custom_1_name = "${var.custom_1_name}" 45 | custom_2_name = "${var.custom_2_name}" 46 | custom_4_name = "${var.custom_4_name}" 47 | custom_5_name = "${var.custom_5_name}" 48 | custom_6_name = "${var.custom_6_name}" 49 | custom_7_name = "${var.custom_7_name}" 50 | } 51 | 52 | module "kubernetes_cluster" { 53 | source = "../modules/macstadium_k8s_cluster" 54 | name_prefix = "cluster-2" 55 | ip_base = 90 56 | node_count = 3 57 | datacenter = "pod-2" 58 | cluster = "MacPro_Pod_2" 59 | datastore = "DataCore1_3" 60 | internal_network_label = "Internal" 61 | jobs_network_label = "Jobs-2" 62 | jobs_network_subnet = "10.182.128.0/18" 63 | 64 | mac_addresses = [ 65 | "00:50:56:ab:0b:aa", 66 | "00:50:56:ab:0b:ab", 67 | "00:50:56:ab:0b:ac", 68 | ] 69 | 70 | // Kubernetes 1.14.0 71 | master_vanilla_image = "travis-ci-centos7-internal-kubernetes-1554237268" 72 | node_vanilla_image = "travis-ci-centos7-internal-kubernetes-1554237268" 73 | 74 | travisci_net_external_zone_id = "${var.travisci_net_external_zone_id}" 75 | ssh_user = "${var.ssh_user}" 76 | } 77 | -------------------------------------------------------------------------------- /macstadium-staging/.example.env: -------------------------------------------------------------------------------- 1 | # Change this if your user you SSH with is different than your local user 2 | export TF_VAR_ssh_user="$USER" 3 | -------------------------------------------------------------------------------- /macstadium-staging/Makefile: -------------------------------------------------------------------------------- 1 | include $(shell git rev-parse --show-toplevel)/terraform-common.mk 2 | 3 | .PHONY: default 4 | default: hello 5 | 6 | INDEX ?= 1 7 | 8 | include $(shell git rev-parse --show-toplevel)/trvs.mk 9 | 10 | .PHONY: .config 11 | .config: $(ENV_NAME).auto.tfvars 12 | 13 | .PHONY: context 14 | context: 15 | $(TOP)/bin/set-k8s-context $(ENV_NAME) 16 | -------------------------------------------------------------------------------- /macstadium-staging/main.tf: -------------------------------------------------------------------------------- 1 | variable "travisci_net_external_zone_id" { 2 | default = "Z2RI61YP4UWSIO" 3 | } 4 | 5 | variable "ssh_user" { 6 | description = "your username on the Linux VM instances" 7 | } 8 | 9 | variable "vsphere_user" {} 10 | variable "vsphere_password" {} 11 | variable "vsphere_server" {} 12 | 13 | terraform { 14 | backend "s3" { 15 | bucket = "travis-terraform-state" 16 | key = "terraform-config/macstadium-staging-terraform.tfstate" 17 | region = "us-east-1" 18 | encrypt = "true" 19 | dynamodb_table = "travis-terraform-state" 20 | } 21 | } 22 | 23 | provider "aws" { 24 | region = "us-east-1" 25 | } 26 | 27 | provider "vsphere" { 28 | version = "~> 1.8" 29 | 30 | user = "${var.vsphere_user}" 31 | password = "${var.vsphere_password}" 32 | vsphere_server = "${var.vsphere_server}" 33 | allow_unverified_ssl = true 34 | } 35 | 36 | module "kubernetes_cluster" { 37 | source = "../modules/macstadium_k8s_cluster" 38 | name_prefix = "cluster-staging" 39 | ip_base = 100 40 | node_count = 2 41 | datacenter = "pod-1" 42 | cluster = "MacPro_Staging_1" 43 | datastore = "DataCore1_1" 44 | internal_network_label = "Internal" 45 | jobs_network_label = "Jobs-1" 46 | jobs_network_subnet = "10.182.0.0/18" 47 | 48 | mac_addresses = [ 49 | "00:50:56:84:0b:b1", 50 | "00:50:56:84:0b:b2", 51 | ] 52 | 53 | travisci_net_external_zone_id = "${var.travisci_net_external_zone_id}" 54 | ssh_user = "${var.ssh_user}" 55 | } 56 | 57 | // Use these outputs to be able to easily set up a context in kubectl on the local machine. 58 | output "cluster_host" { 59 | value = "${module.kubernetes_cluster.host}" 60 | } 61 | 62 | output "cluster_ca_certificate" { 63 | value = "${module.kubernetes_cluster.cluster_ca_certificate}" 64 | sensitive = true 65 | } 66 | 67 | output "client_certificate" { 68 | value = "${module.kubernetes_cluster.client_certificate}" 69 | sensitive = true 70 | } 71 | 72 | output "client_key" { 73 | value = "${module.kubernetes_cluster.client_key}" 74 | sensitive = true 75 | } 76 | 77 | // These users are for the worker instances that will run on the cluster. 78 | // The credentials are outputs so they can be copied into the keychain. 79 | // 80 | // If the users ever get recreated, those credentials need to get copied 81 | // again so the Kubernetes secrets can be updated. 82 | // 83 | // This is not ideal, so I'd like to find a better way to manage this at 84 | // point. 85 | 86 | module "worker_com_s3_user" { 87 | source = "../modules/aws_iam_user_s3" 88 | iam_user_name = "worker-macstadium-staging-com" 89 | s3_bucket_name = "build-trace-staging.travis-ci.com" 90 | } 91 | 92 | output "worker_com_access_key" { 93 | value = "${module.worker_com_s3_user.id}" 94 | sensitive = true 95 | } 96 | 97 | output "worker_com_secret_key" { 98 | value = "${module.worker_com_s3_user.secret}" 99 | sensitive = true 100 | } 101 | 102 | module "worker_org_s3_user" { 103 | source = "../modules/aws_iam_user_s3" 104 | iam_user_name = "worker-macstadium-staging-org" 105 | s3_bucket_name = "build-trace-staging.travis-ci.org" 106 | } 107 | 108 | output "worker_org_access_key" { 109 | value = "${module.worker_org_s3_user.id}" 110 | sensitive = true 111 | } 112 | 113 | output "worker_org_secret_key" { 114 | value = "${module.worker_org_s3_user.secret}" 115 | sensitive = true 116 | } 117 | -------------------------------------------------------------------------------- /modules/aws_iam_user_s3/main.tf: -------------------------------------------------------------------------------- 1 | variable "iam_user_name" {} 2 | variable "s3_bucket_name" {} 3 | 4 | resource "aws_iam_user" "s3_user" { 5 | name = "${var.iam_user_name}" 6 | } 7 | 8 | resource "aws_iam_access_key" "s3_user" { 9 | user = "${aws_iam_user.s3_user.name}" 10 | depends_on = ["aws_iam_user.s3_user"] 11 | } 12 | 13 | resource "aws_iam_user_policy" "s3_user_policy" { 14 | name = "${aws_iam_user.s3_user.name}_policy" 15 | user = "${aws_iam_user.s3_user.name}" 16 | 17 | policy = <"$conf" </dev/null || true 36 | sleep 10 37 | let i+=10 38 | done 39 | } 40 | 41 | __setup_tfw() { 42 | "${VARLIBDIR}/cloud/scripts/per-boot/00-ensure-tfw" || true 43 | 44 | logger running tfw bootstrap 45 | tfw bootstrap 46 | 47 | chown -R root:root "${ETCDIR}/sudoers" "${ETCDIR}/sudoers.d" 48 | 49 | logger running tfw admin-bootstrap 50 | tfw admin-bootstrap 51 | 52 | systemctl restart sshd || true 53 | } 54 | 55 | __setup_squignix() { 56 | eval "$(tfw printenv squignix)" 57 | 58 | tfw extract squignix "${SQUIGNIX_IMAGE}" 59 | 60 | systemctl enable squignix || true 61 | systemctl start squignix || true 62 | } 63 | 64 | main "${@}" 65 | -------------------------------------------------------------------------------- /modules/gce_squignix/nginx-conf.d-default.conf.tpl: -------------------------------------------------------------------------------- 1 | # vim:filetype=nginx 2 | # https://www.nginx.com/blog/nginx-caching-guide/ 3 | # https://nginx.org/en/docs/http/ngx_http_proxy_module.html 4 | 5 | proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=my_cache:10m inactive=180m max_size=${max_size} use_temp_path=off; 6 | 7 | proxy_cache my_cache; 8 | proxy_cache_background_update on; 9 | proxy_cache_convert_head on; 10 | proxy_cache_lock on; 11 | proxy_cache_methods GET HEAD; 12 | proxy_cache_revalidate on; 13 | proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504; 14 | proxy_cache_valid 180m; 15 | 16 | proxy_ignore_headers 17 | Cache-Control 18 | Expires 19 | Set-Cookie 20 | Vary; 21 | # "Pragma" ends up with things like "no-cache" in it, which also cramps our style 22 | proxy_set_header Pragma ''; 23 | 24 | # HSTS is good, but it really cramps our style (and makes wget sad) 25 | proxy_set_header Strict-Transport-Security ''; 26 | 27 | # docker + ipv6 = bad vibes 28 | resolver 1.1.1.1 1.0.0.1 ipv6=off; 29 | 30 | log_format cached 31 | '$upstream_cache_status $status $request_method "$scheme://$host:$server_port$request_uri" $server_protocol ' 32 | '[$time_local] $remote_addr ' 33 | '"$http_user_agent"'; 34 | access_log off; # turned back on in the "server" blocks (to avoid overlapping logging settings) 35 | 36 | server { 37 | listen 80 reuseport; 38 | listen 11371 reuseport; 39 | 40 | access_log /var/log/nginx/access.log cached; 41 | 42 | location /__squignix_health__ { 43 | return 200 "vigorous\n"; 44 | add_header Content-Type text/plain; 45 | } 46 | 47 | location / { 48 | if ($http_x_squignix) { 49 | # prevent infinite recursion 50 | return 429 'Squignix Redirecting To Itself\n'; 51 | } 52 | 53 | proxy_pass $scheme://$host:$server_port; 54 | proxy_set_header Host $http_host; 55 | proxy_set_header X-Squignix true; 56 | 57 | add_header X-Cache-Status $upstream_cache_status; 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /modules/gce_squignix/squignix-list-cached-urls: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | set -o pipefail 4 | 5 | main() { 6 | local name=squignix 7 | if [[ "${1}" ]]; then 8 | name="${1}" 9 | fi 10 | 11 | exec docker exec "${name}" \ 12 | grep -Erho '^KEY: .*' /var/cache/nginx | 13 | cut -d' ' -f2- | 14 | sort -u 15 | } 16 | 17 | main "${@}" 18 | -------------------------------------------------------------------------------- /modules/gce_squignix/squignix-wrapper: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -o errexit 3 | 4 | main() { 5 | [[ "${QUIET}" ]] || set -o xtrace 6 | 7 | : "${SQUIGNIX_IMAGE:=tianon/squignix}" 8 | 9 | local name=squignix 10 | if [[ "${1}" ]]; then 11 | name="${1}" 12 | fi 13 | 14 | local env_file 15 | env_file="$(tfw writeenv squignix)" 16 | 17 | set -o allexport 18 | # shellcheck source=/dev/null 19 | source "${env_file}" 20 | 21 | mkdir -p /var/tmp/nginx-cache 22 | exec docker run \ 23 | --rm \ 24 | --name "${name}" \ 25 | -p 80:80 \ 26 | -v /var/tmp/nginx-conf.d:/etc/nginx/conf.d \ 27 | -v /var/tmp/nginx-cache:/var/cache/nginx \ 28 | "${SQUIGNIX_IMAGE}" 29 | } 30 | 31 | main "${@}" 32 | -------------------------------------------------------------------------------- /modules/gce_squignix/squignix.env: -------------------------------------------------------------------------------- 1 | export SQUIGNIX_IMAGE=tianon/squignix 2 | -------------------------------------------------------------------------------- /modules/gce_squignix/squignix.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Squignix 3 | After=docker.service 4 | Requires=docker.service 5 | 6 | [Service] 7 | ExecStart=/usr/local/bin/squignix-wrapper 8 | ExecStopPost=/bin/sleep 5 9 | Restart=always 10 | SyslogIdentifier=squignix 11 | WorkingDirectory=/ 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /modules/gce_worker/accounts-com-free.tf: -------------------------------------------------------------------------------- 1 | resource "google_service_account" "workers_com_free" { 2 | account_id = "workers-com-free-${lookup(var.regions_abbrev, var.region, "unk")}" 3 | display_name = "travis-worker processes com free ${var.region}" 4 | } 5 | 6 | resource "google_project_iam_member" "workers_com_free" { 7 | role = "projects/${var.project}/roles/${google_project_iam_custom_role.worker.role_id}" 8 | member = "serviceAccount:${google_service_account.workers_com_free.email}" 9 | } 10 | 11 | resource "google_service_account_key" "workers_com_free" { 12 | service_account_id = "${google_service_account.workers_com_free.email}" 13 | } 14 | -------------------------------------------------------------------------------- /modules/gce_worker/accounts-com.tf: -------------------------------------------------------------------------------- 1 | resource "google_service_account" "workers_com" { 2 | account_id = "workers-com-${lookup(var.regions_abbrev, var.region, "unk")}" 3 | display_name = "travis-worker processes com ${var.region}" 4 | } 5 | 6 | resource "google_project_iam_member" "workers_com" { 7 | role = "projects/${var.project}/roles/${google_project_iam_custom_role.worker.role_id}" 8 | member = "serviceAccount:${google_service_account.workers_com.email}" 9 | } 10 | 11 | resource "google_service_account_key" "workers_com" { 12 | service_account_id = "${google_service_account.workers_com.email}" 13 | } 14 | -------------------------------------------------------------------------------- /modules/gce_worker/accounts-org.tf: -------------------------------------------------------------------------------- 1 | resource "google_service_account" "workers_org" { 2 | account_id = "workers-org-${lookup(var.regions_abbrev, var.region, "unk")}" 3 | display_name = "travis-worker processes org ${var.region}" 4 | } 5 | 6 | resource "google_project_iam_member" "workers_org" { 7 | role = "projects/${var.project}/roles/${google_project_iam_custom_role.worker.role_id}" 8 | member = "serviceAccount:${google_service_account.workers_org.email}" 9 | } 10 | 11 | resource "google_service_account_key" "workers_org" { 12 | service_account_id = "${google_service_account.workers_org.email}" 13 | } 14 | -------------------------------------------------------------------------------- /modules/gce_worker/kubernetes-com-free.tf: -------------------------------------------------------------------------------- 1 | resource "kubernetes_secret" "worker_com_free_config" { 2 | metadata { 3 | name = "worker-com-free-terraform" 4 | namespace = "${var.k8s_namespace}" 5 | } 6 | 7 | data = { 8 | TRAVIS_WORKER_GCE_ACCOUNT_JSON = "${base64decode(google_service_account_key.workers_com_free.private_key)}" 9 | TRAVIS_WORKER_STACKDRIVER_TRACE_ACCOUNT_JSON = "${base64decode(google_service_account_key.workers_com_free.private_key)}" 10 | TRAVIS_WORKER_GCE_NETWORK = "main" 11 | TRAVIS_WORKER_GCE_SUBNETWORK = "jobs-com" 12 | TRAVIS_WORKER_AWS_ACCESS_KEY_ID = "${var.aws_com_id}" 13 | TRAVIS_WORKER_AWS_SECRET_ACCESS_KEY = "${var.aws_com_secret}" 14 | TRAVIS_WORKER_BUILD_TRACE_S3_BUCKET = "${var.aws_com_trace_bucket}" 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /modules/gce_worker/kubernetes-com-premium-c2.tf: -------------------------------------------------------------------------------- 1 | resource "kubernetes_secret" "worker_com_premium_c2_config" { 2 | metadata { 3 | name = "worker-premium-c2-terraform" 4 | namespace = "${var.k8s_namespace}" 5 | } 6 | 7 | data = { 8 | TRAVIS_WORKER_GCE_ACCOUNT_JSON = "${base64decode(google_service_account_key.workers_com_free.private_key)}" 9 | TRAVIS_WORKER_STACKDRIVER_TRACE_ACCOUNT_JSON = "${base64decode(google_service_account_key.workers_com_free.private_key)}" 10 | TRAVIS_WORKER_GCE_NETWORK = "main" 11 | TRAVIS_WORKER_GCE_SUBNETWORK = "jobs-com" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /modules/gce_worker/kubernetes-com-premium-hack.tf: -------------------------------------------------------------------------------- 1 | resource "kubernetes_secret" "worker_com_premium_hack_config" { 2 | metadata { 3 | name = "worker-premium-hack-terraform" 4 | namespace = "${var.k8s_namespace}" 5 | } 6 | 7 | data = { 8 | TRAVIS_WORKER_GCE_ACCOUNT_JSON = "${base64decode(google_service_account_key.workers_com_free.private_key)}" 9 | TRAVIS_WORKER_STACKDRIVER_TRACE_ACCOUNT_JSON = "${base64decode(google_service_account_key.workers_com_free.private_key)}" 10 | TRAVIS_WORKER_GCE_NETWORK = "main" 11 | TRAVIS_WORKER_GCE_SUBNETWORK = "jobs-com" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /modules/gce_worker/kubernetes-com-premium-n2.tf: -------------------------------------------------------------------------------- 1 | resource "kubernetes_secret" "worker_com_premium_n2_config" { 2 | metadata { 3 | name = "worker-premium-n2-terraform" 4 | namespace = "${var.k8s_namespace}" 5 | } 6 | 7 | data = { 8 | TRAVIS_WORKER_GCE_ACCOUNT_JSON = "${base64decode(google_service_account_key.workers_com_free.private_key)}" 9 | TRAVIS_WORKER_STACKDRIVER_TRACE_ACCOUNT_JSON = "${base64decode(google_service_account_key.workers_com_free.private_key)}" 10 | TRAVIS_WORKER_GCE_NETWORK = "main" 11 | TRAVIS_WORKER_GCE_SUBNETWORK = "jobs-com" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /modules/gce_worker/kubernetes-com.tf: -------------------------------------------------------------------------------- 1 | resource "kubernetes_secret" "worker_com_config" { 2 | metadata { 3 | name = "worker-com-terraform" 4 | namespace = "${var.k8s_namespace}" 5 | } 6 | 7 | data = { 8 | TRAVIS_WORKER_GCE_ACCOUNT_JSON = "${base64decode(google_service_account_key.workers_com.private_key)}" 9 | TRAVIS_WORKER_STACKDRIVER_TRACE_ACCOUNT_JSON = "${base64decode(google_service_account_key.workers_com.private_key)}" 10 | TRAVIS_WORKER_GCE_NETWORK = "main" 11 | TRAVIS_WORKER_GCE_SUBNETWORK = "jobs-com" 12 | TRAVIS_WORKER_AWS_ACCESS_KEY_ID = "${var.aws_com_id}" 13 | TRAVIS_WORKER_AWS_SECRET_ACCESS_KEY = "${var.aws_com_secret}" 14 | TRAVIS_WORKER_BUILD_TRACE_S3_BUCKET = "${var.aws_com_trace_bucket}" 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /modules/gce_worker/kubernetes-org.tf: -------------------------------------------------------------------------------- 1 | resource "kubernetes_secret" "worker_org_config" { 2 | metadata { 3 | name = "worker-org-terraform" 4 | namespace = "${var.k8s_namespace}" 5 | } 6 | 7 | data = { 8 | TRAVIS_WORKER_GCE_ACCOUNT_JSON = "${base64decode(google_service_account_key.workers_org.private_key)}" 9 | TRAVIS_WORKER_STACKDRIVER_TRACE_ACCOUNT_JSON = "${base64decode(google_service_account_key.workers_org.private_key)}" 10 | TRAVIS_WORKER_GCE_NETWORK = "main" 11 | TRAVIS_WORKER_GCE_SUBNETWORK = "jobs-org" 12 | TRAVIS_WORKER_AWS_ACCESS_KEY_ID = "${var.aws_org_id}" 13 | TRAVIS_WORKER_AWS_SECRET_ACCESS_KEY = "${var.aws_org_secret}" 14 | TRAVIS_WORKER_BUILD_TRACE_S3_BUCKET = "${var.aws_org_trace_bucket}" 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /modules/gce_worker/outputs.tf: -------------------------------------------------------------------------------- 1 | # The images for the jobs are created on prod-1. To access these images from 2 | # the other projects, we loop these accounts to prod-1 and give them permissions 3 | # in prod-1. See gce-production-1/service_accounts.tf 4 | 5 | output "workers_service_account_emails" { 6 | value = [ 7 | "${google_service_account.workers_org.email}", 8 | "${google_service_account.workers_com.email}", 9 | "${google_service_account.workers_com_free.email}", 10 | ] 11 | } 12 | -------------------------------------------------------------------------------- /modules/gce_worker/role-worker.tf: -------------------------------------------------------------------------------- 1 | resource "google_project_iam_custom_role" "worker" { 2 | role_id = "worker" 3 | title = "travis-worker" 4 | description = "A travis-worker process that can do travis-worky stuff" 5 | 6 | permissions = [ 7 | "cloudtrace.traces.patch", 8 | "compute.acceleratorTypes.get", 9 | "compute.acceleratorTypes.list", 10 | "compute.addresses.create", 11 | "compute.addresses.createInternal", 12 | "compute.addresses.delete", 13 | "compute.addresses.deleteInternal", 14 | "compute.addresses.get", 15 | "compute.addresses.list", 16 | "compute.addresses.setLabels", 17 | "compute.addresses.use", 18 | "compute.addresses.useInternal", 19 | "compute.diskTypes.get", 20 | "compute.diskTypes.list", 21 | "compute.disks.create", 22 | "compute.disks.createSnapshot", 23 | "compute.disks.delete", 24 | "compute.disks.get", 25 | "compute.disks.getIamPolicy", 26 | "compute.disks.list", 27 | "compute.disks.resize", 28 | "compute.disks.setIamPolicy", 29 | "compute.disks.setLabels", 30 | "compute.disks.update", 31 | "compute.disks.use", 32 | "compute.disks.useReadOnly", 33 | "compute.globalOperations.get", 34 | "compute.globalOperations.list", 35 | "compute.images.list", 36 | "compute.images.useReadOnly", 37 | "compute.instances.addAccessConfig", 38 | "compute.instances.addMaintenancePolicies", 39 | "compute.instances.attachDisk", 40 | "compute.instances.create", 41 | "compute.instances.delete", 42 | "compute.instances.deleteAccessConfig", 43 | "compute.instances.detachDisk", 44 | "compute.instances.get", 45 | "compute.instances.getGuestAttributes", 46 | "compute.instances.getIamPolicy", 47 | "compute.instances.getSerialPortOutput", 48 | "compute.instances.list", 49 | "compute.instances.listReferrers", 50 | "compute.instances.osAdminLogin", 51 | "compute.instances.osLogin", 52 | "compute.instances.removeMaintenancePolicies", 53 | "compute.instances.reset", 54 | "compute.instances.setDeletionProtection", 55 | "compute.instances.setDiskAutoDelete", 56 | "compute.instances.setIamPolicy", 57 | "compute.instances.setLabels", 58 | "compute.instances.setMachineResources", 59 | "compute.instances.setMachineType", 60 | "compute.instances.setMetadata", 61 | "compute.instances.setMinCpuPlatform", 62 | "compute.instances.setScheduling", 63 | "compute.instances.setServiceAccount", 64 | "compute.instances.setShieldedVmIntegrityPolicy", 65 | "compute.instances.setTags", 66 | "compute.instances.start", 67 | "compute.instances.startWithEncryptionKey", 68 | "compute.instances.stop", 69 | "compute.instances.update", 70 | "compute.instances.updateAccessConfig", 71 | "compute.instances.updateNetworkInterface", 72 | "compute.instances.updateShieldedVmConfig", 73 | "compute.instances.use", 74 | "compute.instanceGroups.get", 75 | "compute.instanceGroups.list", 76 | "compute.machineTypes.get", 77 | "compute.machineTypes.list", 78 | "compute.networks.get", 79 | "compute.networks.list", 80 | "compute.networks.use", 81 | "compute.projects.get", 82 | "compute.regions.get", 83 | "compute.regions.list", 84 | "compute.subnetworks.get", 85 | "compute.subnetworks.list", 86 | "compute.subnetworks.use", 87 | "compute.subnetworks.useExternalIp", 88 | "compute.zoneOperations.get", 89 | "compute.zoneOperations.list", 90 | "compute.zones.get", 91 | "compute.zones.list", 92 | ] 93 | } 94 | -------------------------------------------------------------------------------- /modules/gce_worker/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_com_id" {} 2 | variable "aws_com_secret" {} 3 | variable "aws_com_trace_bucket" {} 4 | variable "aws_org_id" {} 5 | variable "aws_org_secret" {} 6 | variable "aws_org_trace_bucket" {} 7 | variable "k8s_namespace" {} 8 | variable "project" {} 9 | variable "region" {} 10 | 11 | variable "regions_abbrev" { 12 | default = { 13 | "us-central1" = "uc1" 14 | "us-east1" = "ue1" 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /modules/gce_worker_group/gcloud-cleanup.tf: -------------------------------------------------------------------------------- 1 | resource "google_storage_bucket" "gcloud_cleanup_archive" { 2 | name = "gcloud-cleanup-${var.env}-${var.index}" 3 | project = "${var.project}" 4 | 5 | versioning { 6 | enabled = false 7 | } 8 | 9 | lifecycle_rule { 10 | action { 11 | type = "Delete" 12 | } 13 | 14 | condition { 15 | age = "${var.gcloud_cleanup_archive_retention_days}" 16 | } 17 | } 18 | } 19 | 20 | resource "google_project_iam_custom_role" "gcloud_cleaner" { 21 | role_id = "gcloud_cleaner" 22 | title = "Gcloud Cleaner" 23 | description = "A gcloud-cleanup process that can clean and archive stuff" 24 | 25 | permissions = [ 26 | "cloudtrace.traces.patch", 27 | "compute.disks.delete", 28 | "compute.disks.get", 29 | "compute.disks.list", 30 | "compute.disks.update", 31 | "compute.globalOperations.get", 32 | "compute.globalOperations.list", 33 | "compute.images.delete", 34 | "compute.images.get", 35 | "compute.images.list", 36 | "compute.instances.delete", 37 | "compute.instances.deleteAccessConfig", 38 | "compute.instances.detachDisk", 39 | "compute.instances.get", 40 | "compute.instances.getSerialPortOutput", 41 | "compute.instances.list", 42 | "compute.instances.reset", 43 | "compute.instances.stop", 44 | "compute.instances.update", 45 | "compute.regions.get", 46 | "compute.regions.list", 47 | "compute.zones.get", 48 | "compute.zones.list", 49 | "storage.objects.create", 50 | "storage.objects.update", 51 | ] 52 | } 53 | 54 | resource "google_service_account" "gcloud_cleanup" { 55 | account_id = "gcloud-cleanup" 56 | display_name = "Gcloud Cleanup" 57 | project = "${var.project}" 58 | } 59 | 60 | resource "google_project_iam_member" "gcloud_cleaner" { 61 | project = "${var.project}" 62 | role = "projects/${var.project}/roles/${google_project_iam_custom_role.gcloud_cleaner.role_id}" 63 | member = "serviceAccount:${google_service_account.gcloud_cleanup.email}" 64 | } 65 | 66 | resource "google_service_account_key" "gcloud_cleanup" { 67 | service_account_id = "${google_service_account.gcloud_cleanup.email}" 68 | } 69 | 70 | resource "kubernetes_secret" "gcloud_cleanup_config" { 71 | metadata { 72 | name = "gcloud-cleanup-terraform" 73 | namespace = "${var.k8s_default_namespace}" 74 | } 75 | 76 | data = { 77 | GCLOUD_CLEANUP_ARCHIVE_BUCKET = "${google_storage_bucket.gcloud_cleanup_archive.name}" 78 | GCLOUD_CLEANUP_ACCOUNT_JSON = "${base64decode(google_service_account_key.gcloud_cleanup.private_key)}" 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /modules/gce_worker_group/outputs.tf: -------------------------------------------------------------------------------- 1 | output "workers_service_account_emails" { 2 | value = ["${module.gce_workers.workers_service_account_emails}"] 3 | } 4 | -------------------------------------------------------------------------------- /modules/gce_worker_group/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_com_id" {} 2 | variable "aws_com_secret" {} 3 | variable "aws_com_trace_bucket" {} 4 | variable "aws_org_id" {} 5 | variable "aws_org_secret" {} 6 | variable "aws_org_trace_bucket" {} 7 | variable "env" {} 8 | variable "index" {} 9 | variable "k8s_default_namespace" {} 10 | variable "project" {} 11 | variable "region" {} 12 | 13 | variable "gcloud_cleanup_archive_retention_days" { 14 | default = 8 15 | } 16 | -------------------------------------------------------------------------------- /modules/gce_worker_group/workers.tf: -------------------------------------------------------------------------------- 1 | module "gce_workers" { 2 | source = "../gce_worker" 3 | 4 | aws_com_id = "${var.aws_com_id}" 5 | aws_com_secret = "${var.aws_com_secret}" 6 | aws_com_trace_bucket = "${var.aws_com_trace_bucket}" 7 | aws_org_id = "${var.aws_org_id}" 8 | aws_org_secret = "${var.aws_org_secret}" 9 | aws_org_trace_bucket = "${var.aws_org_trace_bucket}" 10 | k8s_namespace = "${var.k8s_default_namespace}" 11 | project = "${var.project}" 12 | region = "${var.region}" 13 | } 14 | -------------------------------------------------------------------------------- /modules/macstadium_dhcp_server/dhcpd.conf.tpl: -------------------------------------------------------------------------------- 1 | subnet ${jobs_subnet} netmask ${jobs_subnet_netmask} { 2 | option domain-name "${domain_name}"; 3 | range ${jobs_subnet_begin} ${jobs_subnet_end}; 4 | option routers ${jobs_gateway}; 5 | option domain-name-servers 1.1.1.1, 1.0.0.1; 6 | default-lease-time ${dhcp_lease_default_time}; 7 | max-lease-time ${dhcp_lease_max_time}; 8 | } 9 | -------------------------------------------------------------------------------- /modules/macstadium_dhcp_server/install-dhcpd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -o errexit 3 | 4 | main() { 5 | #grab the dhcp server package 6 | sudo yum install -y dhcp 7 | 8 | # Configure dhcpd 9 | sudo mv "/tmp/dhcpd.conf" "/etc/dhcp/dhcpd.conf" 10 | 11 | # Start and enable the service 12 | sudo systemctl enable dhcpd 13 | sudo systemctl start dhcpd 14 | } 15 | 16 | main "$@" 17 | -------------------------------------------------------------------------------- /modules/macstadium_dhcp_server/variables.tf: -------------------------------------------------------------------------------- 1 | variable "index" {} 2 | 3 | variable "datacenter" { 4 | description = "The name of the vCenter datacenter that will run the created VMs" 5 | } 6 | 7 | variable "cluster" { 8 | description = "The vCenter compute cluster that should run the created VMs" 9 | } 10 | 11 | variable "datastore" { 12 | description = "The VMWare datastore that should hold the VM disks and configuration" 13 | } 14 | 15 | variable "internal_network_label" { 16 | description = "The label for the internal network for the MacStadium VPN" 17 | } 18 | 19 | variable "jobs_network_label" { 20 | description = "The label for the jobs network for the MacStadium VPN" 21 | } 22 | 23 | variable "jobs_network_subnet" { 24 | description = "The subnet for the jobs network where this cluster is running" 25 | } 26 | 27 | variable "mac_address" { 28 | description = "The MAC address assigned to the DHCP server VM on the jobs network" 29 | } 30 | 31 | variable "vanilla_image" { 32 | default = "travis-ci-centos7-internal-vanilla-1549473064" 33 | } 34 | 35 | variable "travisci_net_external_zone_id" { 36 | description = "The zone ID for the travisci.net DNS zone" 37 | } 38 | 39 | variable "ssh_user" { 40 | description = "your SSH username on our vanilla Linux images" 41 | } 42 | -------------------------------------------------------------------------------- /modules/macstadium_dhcp_server/vsphere.tf: -------------------------------------------------------------------------------- 1 | data "vsphere_datacenter" "dc" { 2 | name = "${var.datacenter}" 3 | } 4 | 5 | data "vsphere_datastore" "datastore" { 6 | name = "${var.datastore}" 7 | datacenter_id = "${data.vsphere_datacenter.dc.id}" 8 | } 9 | 10 | data "vsphere_compute_cluster" "cluster" { 11 | name = "${var.cluster}" 12 | datacenter_id = "${data.vsphere_datacenter.dc.id}" 13 | } 14 | 15 | data "vsphere_network" "internal" { 16 | name = "${var.internal_network_label}" 17 | datacenter_id = "${data.vsphere_datacenter.dc.id}" 18 | } 19 | 20 | data "vsphere_network" "jobs" { 21 | name = "${var.jobs_network_label}" 22 | datacenter_id = "${data.vsphere_datacenter.dc.id}" 23 | } 24 | 25 | data "vsphere_virtual_machine" "vanilla_template" { 26 | name = "Vanilla VMs/${var.vanilla_image}" 27 | datacenter_id = "${data.vsphere_datacenter.dc.id}" 28 | } 29 | -------------------------------------------------------------------------------- /modules/macstadium_inventory/folders.tf: -------------------------------------------------------------------------------- 1 | data "vsphere_datacenter" "dc" { 2 | name = "${var.datacenter}" 3 | } 4 | 5 | resource "vsphere_folder" "base_vms" { 6 | path = "Base VMs" 7 | type = "vm" 8 | datacenter_id = "${data.vsphere_datacenter.dc.id}" 9 | } 10 | 11 | resource "vsphere_folder" "build_vms" { 12 | path = "Build VMs" 13 | type = "vm" 14 | datacenter_id = "${data.vsphere_datacenter.dc.id}" 15 | } 16 | 17 | resource "vsphere_folder" "internal_vms" { 18 | path = "Internal VMs" 19 | type = "vm" 20 | datacenter_id = "${data.vsphere_datacenter.dc.id}" 21 | } 22 | 23 | resource "vsphere_folder" "vanilla_vms" { 24 | path = "Vanilla VMs" 25 | type = "vm" 26 | datacenter_id = "${data.vsphere_datacenter.dc.id}" 27 | } 28 | 29 | resource "vsphere_folder" "custom_1_vms" { 30 | path = "${var.custom_1_name} Build VMs" 31 | type = "vm" 32 | datacenter_id = "${data.vsphere_datacenter.dc.id}" 33 | } 34 | 35 | resource "vsphere_folder" "custom_2_vms" { 36 | path = "${var.custom_2_name} Build VMs" 37 | type = "vm" 38 | datacenter_id = "${data.vsphere_datacenter.dc.id}" 39 | } 40 | 41 | resource "vsphere_folder" "custom_4_vms" { 42 | path = "${var.custom_4_name} Build VMs" 43 | type = "vm" 44 | datacenter_id = "${data.vsphere_datacenter.dc.id}" 45 | } 46 | 47 | resource "vsphere_folder" "custom_5_vms" { 48 | path = "${var.custom_5_name} Build VMs" 49 | type = "vm" 50 | datacenter_id = "${data.vsphere_datacenter.dc.id}" 51 | } 52 | 53 | resource "vsphere_folder" "custom_6_vms" { 54 | path = "${var.custom_6_name} Build VMs" 55 | type = "vm" 56 | datacenter_id = "${data.vsphere_datacenter.dc.id}" 57 | } 58 | 59 | resource "vsphere_folder" "custom_7_vms" { 60 | path = "${var.custom_7_name} Build VMs" 61 | type = "vm" 62 | datacenter_id = "${data.vsphere_datacenter.dc.id}" 63 | } 64 | -------------------------------------------------------------------------------- /modules/macstadium_inventory/variables.tf: -------------------------------------------------------------------------------- 1 | variable "datacenter" { 2 | description = "The name of the datacenter in vSphere" 3 | } 4 | 5 | variable "custom_1_name" {} 6 | variable "custom_2_name" {} 7 | variable "custom_4_name" {} 8 | variable "custom_5_name" {} 9 | variable "custom_6_name" {} 10 | variable "custom_7_name" {} 11 | -------------------------------------------------------------------------------- /modules/macstadium_k8s_cluster/nodes.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | node_vm_prefix = "${var.name_prefix}-node" 3 | } 4 | 5 | resource "vsphere_virtual_machine" "nodes" { 6 | depends_on = ["vsphere_virtual_machine.master"] 7 | 8 | count = "${var.node_count}" 9 | 10 | name = "${local.node_vm_prefix}-${count.index + 1}" 11 | folder = "${var.folder}" 12 | resource_pool_id = "${data.vsphere_compute_cluster.cluster.resource_pool_id}" 13 | datastore_id = "${data.vsphere_datastore.datastore.id}" 14 | 15 | num_cpus = 4 16 | memory = 4096 17 | guest_id = "${data.vsphere_virtual_machine.node_vanilla_template.guest_id}" 18 | scsi_type = "${data.vsphere_virtual_machine.node_vanilla_template.scsi_type}" 19 | 20 | disk { 21 | label = "disk0" 22 | size = "${data.vsphere_virtual_machine.node_vanilla_template.disks.0.size}" 23 | eagerly_scrub = "${data.vsphere_virtual_machine.node_vanilla_template.disks.0.eagerly_scrub}" 24 | thin_provisioned = "${data.vsphere_virtual_machine.node_vanilla_template.disks.0.thin_provisioned}" 25 | } 26 | 27 | network_interface { 28 | network_id = "${data.vsphere_network.internal.id}" 29 | } 30 | 31 | network_interface { 32 | network_id = "${data.vsphere_network.jobs.id}" 33 | use_static_mac = true 34 | mac_address = "${var.mac_addresses[count.index]}" 35 | } 36 | 37 | clone { 38 | template_uuid = "${data.vsphere_virtual_machine.node_vanilla_template.id}" 39 | 40 | customize { 41 | network_interface { 42 | ipv4_address = "${cidrhost("10.182.64.0/18", var.ip_base + count.index + 1)}" 43 | ipv4_netmask = 18 44 | } 45 | 46 | network_interface { 47 | ipv4_address = "${cidrhost(var.jobs_network_subnet, var.ip_base + count.index + 1)}" 48 | ipv4_netmask = 18 49 | } 50 | 51 | linux_options { 52 | host_name = "${local.node_vm_prefix}-${count.index + 1}" 53 | domain = "macstadium-us-se-1.travisci.net" 54 | } 55 | 56 | ipv4_gateway = "10.182.64.1" 57 | dns_server_list = ["1.1.1.1", "1.0.0.1"] 58 | dns_suffix_list = ["vsphere.local"] 59 | } 60 | } 61 | 62 | wait_for_guest_net_routable = false 63 | 64 | connection { 65 | host = "${self.clone.0.customize.0.network_interface.0.ipv4_address}" 66 | user = "${var.ssh_user}" 67 | agent = true 68 | } 69 | 70 | provisioner "remote-exec" { 71 | inline = [ 72 | "sudo ${lookup(data.external.kubeadm_join.result, "command")}", 73 | ] 74 | } 75 | } 76 | 77 | resource "aws_route53_record" "nodes" { 78 | count = "${var.node_count}" 79 | zone_id = "${var.travisci_net_external_zone_id}" 80 | name = "${local.node_vm_prefix}-${count.index + 1}.macstadium-us-se-1.travisci.net" 81 | type = "A" 82 | ttl = 300 83 | records = ["${element(vsphere_virtual_machine.nodes.*.clone.0.customize.0.network_interface.0.ipv4_address, count.index)}"] 84 | } 85 | -------------------------------------------------------------------------------- /modules/macstadium_k8s_cluster/providers.tf: -------------------------------------------------------------------------------- 1 | data "vsphere_datacenter" "dc" { 2 | name = "${var.datacenter}" 3 | } 4 | 5 | data "vsphere_datastore" "datastore" { 6 | name = "${var.datastore}" 7 | datacenter_id = "${data.vsphere_datacenter.dc.id}" 8 | } 9 | 10 | data "vsphere_compute_cluster" "cluster" { 11 | name = "${var.cluster}" 12 | datacenter_id = "${data.vsphere_datacenter.dc.id}" 13 | } 14 | 15 | data "vsphere_network" "internal" { 16 | name = "${var.internal_network_label}" 17 | datacenter_id = "${data.vsphere_datacenter.dc.id}" 18 | } 19 | 20 | data "vsphere_network" "jobs" { 21 | name = "${var.jobs_network_label}" 22 | datacenter_id = "${data.vsphere_datacenter.dc.id}" 23 | } 24 | 25 | /* 26 | data "vsphere_network" "management" { 27 | name = "${var.management_network_label}" 28 | datacenter_id = "${data.vsphere_datacenter.dc.id}" 29 | } 30 | */ 31 | 32 | data "vsphere_virtual_machine" "master_vanilla_template" { 33 | name = "Vanilla VMs/${var.master_vanilla_image}" 34 | datacenter_id = "${data.vsphere_datacenter.dc.id}" 35 | } 36 | 37 | data "vsphere_virtual_machine" "node_vanilla_template" { 38 | name = "Vanilla VMs/${var.node_vanilla_image}" 39 | datacenter_id = "${data.vsphere_datacenter.dc.id}" 40 | } 41 | -------------------------------------------------------------------------------- /modules/macstadium_k8s_cluster/scripts/create-master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Install jq to be able to get the join token back to Terraform later 4 | apt-get install -y jq 5 | 6 | # This CIDR is required by the Flannel network provider 7 | kubeadm init --pod-network-cidr=10.244.0.0/16 8 | 9 | # This allows us to use kubectl as root on the master VM 10 | export KUBECONFIG=/etc/kubernetes/admin.conf 11 | mkdir -p /root/.kube 12 | cp $KUBECONFIG /root/.kube/config 13 | 14 | # Install the flannel network provider 15 | kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/bc79dd1505b0c8681ece4de4c0d86c5cd2643275/Documentation/kube-flannel.yml 16 | -------------------------------------------------------------------------------- /modules/macstadium_k8s_cluster/scripts/guard.sh.tpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # vim: set ft=sh : 3 | 4 | set -e 5 | 6 | ORG="${org}" 7 | ADMIN_TEAM="${admin_team}" 8 | 9 | GUARD_DATA_DIR=$(mktemp -d) 10 | export GUARD_DATA_DIR 11 | 12 | curl -Lo /usr/local/bin/guard https://github.com/appscode/guard/releases/download/0.3.0/guard-linux-amd64 \ 13 | && chmod +x /usr/local/bin/guard 14 | 15 | /usr/local/bin/guard init ca 16 | /usr/local/bin/guard init server --ips="10.96.10.96" 17 | /usr/local/bin/guard init client "$ORG" -o github 18 | 19 | # Delete any existing Guard instance 20 | kubectl delete deployment guard -n kube-system || true 21 | 22 | /usr/local/bin/guard get installer --auth-providers=github >"$GUARD_DATA_DIR/installer.yaml" 23 | kubectl apply -f "$GUARD_DATA_DIR/installer.yaml" 24 | 25 | mkdir -p /etc/kubernetes/guard 26 | /usr/local/bin/guard get webhook-config "$ORG" -o github --addr="10.96.10.96:443" >/etc/kubernetes/guard/webhook.yaml 27 | 28 | python <