├── .gitignore ├── README.md ├── gce-credentials.json.template ├── gen_secrets.sh ├── nomad ├── helloworld.nomad ├── run_job.sh ├── spark-exec-master.nomad └── spark-exec-slave.nomad ├── packer ├── config │ ├── consul │ │ ├── consul-template.hcl │ │ ├── consul-template.tpl │ │ ├── consul_server.json │ │ ├── default.json │ │ ├── graphite.json │ │ ├── nomad_client.json │ │ ├── nomad_server.json │ │ ├── redis.json │ │ ├── statsite.json │ │ ├── upstart.consul │ │ ├── upstart.consul-template │ │ ├── upstart.haproxy │ │ └── utility.json │ ├── nomad │ │ ├── client.hcl │ │ ├── default.hcl │ │ ├── images │ │ │ ├── nginx │ │ │ │ ├── Dockerfile │ │ │ │ ├── consul_template │ │ │ │ │ ├── nginx.ctmpl │ │ │ │ │ └── nginx.hcl │ │ │ │ └── nginx.conf │ │ │ ├── nodejs │ │ │ │ ├── Dockerfile │ │ │ │ ├── index.js │ │ │ │ └── package.json │ │ │ └── redis │ │ │ │ └── Dockerfile │ │ ├── nomad_join.sh │ │ ├── server.hcl │ │ └── upstart.nomad │ ├── statsite │ │ ├── default.conf │ │ └── upstart.statsite │ └── vault │ │ ├── default.hcl │ │ └── upstart.vault ├── gce_consul_server.json ├── gce_nomad_agent.json ├── gce_nomad_server.json ├── gce_utility.json ├── gce_vault_server.json └── scripts │ ├── consul_server.sh │ ├── consul_server │ └── consul_server.sh │ ├── nomad │ ├── agent │ │ ├── docker.sh │ │ ├── git_repo.sh │ │ ├── java8.sh │ │ └── nomad_agent.sh │ ├── nomad.sh │ └── server │ │ └── nomad_server.sh │ ├── nomad_agent.sh │ ├── nomad_server.sh │ ├── shared.sh │ ├── shared │ ├── cleanup.sh │ ├── collectd.sh │ ├── consul.sh │ ├── dependencies.sh │ ├── go.sh │ ├── local_proxy │ │ ├── consul_template.sh │ │ ├── dnsmasq.sh │ │ └── haproxy.sh │ └── trust_root_cert.sh │ ├── utility.sh │ ├── utility │ ├── graphite.sh │ ├── redis.sh │ ├── statsite.sh │ └── utility.sh │ ├── vault │ └── vault.sh │ └── vault_server.sh ├── terraform ├── _env │ └── gce │ │ ├── gce.tf │ │ └── terraform.tfvars.template ├── gce │ ├── compute │ │ ├── consul_server │ │ │ └── gce_consul_server.tf │ │ ├── gce_compute.tf │ │ ├── nomad_client │ │ │ └── gce_nomad_client.tf │ │ ├── nomad_client_igm │ │ │ └── gce_nomad_client_igm.tf │ │ ├── nomad_server │ │ │ └── gce_nomad_server.tf │ │ ├── utility │ │ │ └── gce_utility.tf │ │ └── vault_server │ │ │ └── gce_vault_server.tf │ ├── network │ │ └── gce_network.tf │ └── region │ │ └── gce_region.tf └── templates │ ├── consul_server │ ├── consul_server.sh.tpl │ └── consul_server.tf │ ├── join │ ├── join.sh.tpl │ └── join.tf │ ├── mount_ssd │ ├── mount_ssd.sh.tpl │ └── mount_ssd.tf │ ├── nomad_client │ ├── nomad_client.sh.tpl │ └── nomad_client.tf │ ├── nomad_job │ ├── helloworld │ │ ├── helloworld.nomad.tpl │ │ └── helloworld.tf │ ├── nomad_job.tf │ └── redis │ │ ├── redis.nomad.tpl │ │ └── redis.tf │ ├── nomad_server │ ├── nomad_server.sh.tpl │ └── nomad_server.tf │ ├── pq │ ├── pq.sh.tpl │ └── pq.tf │ ├── utility │ ├── utility.sh.tpl │ └── utility.tf │ └── vault_server │ ├── vault_server.sh.tpl │ └── vault_server.tf └── vault ├── init.sh ├── issue_cert.sh ├── setup_pki.sh └── unseal.sh /.gitignore: -------------------------------------------------------------------------------- 1 | gce-credentials.json 2 | id_rsa 3 | id_rsa.pub 4 | terraform.tfvars 5 | terraform.tfstate 6 | terraform.tfstate.backup 7 | .terraform 8 | gce_googlecompute.pem 9 | .DS_Store 10 | gce-creds.tar.gz 11 | credentials 12 | *.key 13 | *.cert 14 | *.crt 15 | .idea 16 | *.bak 17 | backup 18 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # hashistack 2 | 3 | 4 | ## Setup 5 | 6 | ### Generate some keys for your deployment 7 | ``` 8 | # Fill out prompts for your root certificate and a vault certificate 9 | ./gen_secrets.sh 10 | # This also copies tfvars files from our templates and generates secrets for consul. Once the tfvars files are created, you can modify them directly if needed to customize your deployment 11 | ``` 12 | 13 | ### Download google cloud credentials 14 | Name them `gce-credentials.json` and put them in this folder 15 | 16 | ### Build the packer images 17 | ``` 18 | export GCE_PROJECT_ID=YOUR_GOOGLE_PROJECT_ID 19 | export GCE_DEFAULT_ZONE=us-central1-b 20 | export GCE_SOURCE_IMAGE=ubuntu-1404-trusty-v20160114e 21 | 22 | packer build packer/gce_consul_server.json 23 | packer build packer/gce_vault_server.json 24 | packer build packer/gce_nomad_server.json 25 | packer build packer/gce_nomad_client.json 26 | packer build packer/gce_utility.json 27 | ``` 28 | 29 | ### Fill in the version numbers from your build images in your .tfvars file 30 | 31 | You'll need to swap the version numbers in your `terraform/_env/gce/terraform.tfvars` to match those built by packer for your project. 32 | 33 | ### Apply terraform 34 | 35 | `cd terraform/_env/gce; terraform apply` 36 | 37 | ### Initialize Vault 38 | ``` 39 | cd vault; 40 | ## Initialize vault 41 | VAULT_SERVER=ip.ad.dr.ess ./init.sh # This stores your keys in credentials/vault.keys file. Separate them and be careful with them. 42 | ## Unseal all your vaults 43 | VAULT_SERVER=ip.ad.dr.ess ./unseal.sh 44 | VAULT_SERVER=ip.ad.dr.ess2 ./unseal.sh 45 | ## Setup PKI with the vault CA, generated by gen_secrets.sh (earlier) 46 | VAULT_SERVER=ip.ad.dr.ess DOMAIN="example.com" ./setup_pki.sh 47 | ``` 48 | 49 | ### Launch nomad tasks 50 | 51 | `cd nomad; NOMAD_SERVER=ip.ad.dr.ess ./run_job.sh helloworld.nomad` 52 | 53 | ### Remotely connect to your nomad services 54 | 55 | Open a tunnel: 56 | `ssh -i id_rsa -L 7777:spark-master.service.consul:8080 ubuntu@ip.ad.dr.ess -N ` 57 | 58 | Hit your local endpoint 59 | `curl localhost:7777` 60 | -------------------------------------------------------------------------------- /gce-credentials.json.template: -------------------------------------------------------------------------------- 1 | { 2 | "type": "service_account", 3 | "project_id": "CHANGEME", 4 | "private_key_id": "CHANGEME", 5 | "private_key": "CHANGEME", 6 | "client_email": "CHANGEME", 7 | "client_id": "CHANGEME", 8 | "auth_uri": "https://accounts.google.com/o/oauth2/auth", 9 | "token_uri": "https://accounts.google.com/o/oauth2/token", 10 | "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", 11 | "client_x509_cert_url": "CHANGEME" 12 | } 13 | -------------------------------------------------------------------------------- /gen_secrets.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mkdir credentials 4 | cd credentials 5 | 6 | # Generate ssh keys for logging onto launched machines 7 | ssh-keygen -t rsa -b 2048 -f id_rsa -P "" 8 | 9 | echo "===========================================================" 10 | 11 | cat < serialfile 28 | touch certindex 29 | 30 | tee vault-ca.conf </dev/null | openssl base64` 100 | find ../terraform/_env -type f -name '*.tfvars' -print0 | xargs -0 sed -i '.bak' "s#CONSUL_SERVER_ENCRYPT_KEY#$SECRET#g" 101 | -------------------------------------------------------------------------------- /nomad/helloworld.nomad: -------------------------------------------------------------------------------- 1 | job "helloworld-v1" { 2 | region = "gce-us-central1" 3 | datacenters = ["gce-us-central1"] 4 | type = "service" 5 | priority = 50 6 | 7 | update { 8 | stagger = "30s" 9 | max_parallel = 1 10 | } 11 | 12 | group "hello-group" { 13 | count = 3 14 | 15 | constraint { 16 | attribute = "${node.datacenter}" 17 | value = "gce-us-central1" 18 | } 19 | 20 | task "hello-task" { 21 | driver = "docker" 22 | config { 23 | image = "eveld/helloworld:1.0.0" 24 | port_map { 25 | http = 8080 26 | } 27 | } 28 | 29 | resources { 30 | cpu = 100 31 | memory = 200 32 | network { 33 | mbits = 1 34 | port "http" {} 35 | } 36 | } 37 | 38 | logs { 39 | max_files = 1 40 | max_file_size = 5 41 | } 42 | 43 | service { 44 | name = "helloworld" 45 | tags = ["global", "us-central1", "routed"] 46 | port = "http" 47 | 48 | check { 49 | name = "hello alive" 50 | type = "tcp" 51 | interval = "10s" 52 | timeout = "2s" 53 | } 54 | } 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /nomad/run_job.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | scp -i ../id_rsa $1 ubuntu@$NOMAD_SERVER:/opt/nomad/jobs/ 5 | ssh -i ../id_rsa ubuntu@$NOMAD_SERVER nomad run -verbose /opt/nomad/jobs/$1 6 | -------------------------------------------------------------------------------- /nomad/spark-exec-master.nomad: -------------------------------------------------------------------------------- 1 | job "spark-master" { 2 | region = "gce-us-central1" 3 | datacenters = ["gce-us-central1"] 4 | 5 | constraint { 6 | attribute = "${attr.kernel.name}" 7 | value = "linux" 8 | } 9 | 10 | update { 11 | stagger = "10s" 12 | 13 | max_parallel = 1 14 | } 15 | 16 | group "spark" { 17 | # count = 1 18 | 19 | restart { 20 | attempts = 10 21 | interval = "5m" 22 | 23 | delay = "25s" 24 | 25 | mode = "delay" 26 | } 27 | 28 | task "spark" { 29 | driver = "exec" 30 | 31 | config { 32 | command = "/bin/sh" 33 | args = ["local/jars/start-master.sh"] 34 | } 35 | 36 | service { 37 | name = "${TASKGROUP}-master" 38 | tags = ["global", "spark", "master", "routed"] 39 | port = "spark" 40 | check { 41 | name = "alive" 42 | type = "tcp" 43 | interval = "10s" 44 | timeout = "2s" 45 | } 46 | } 47 | 48 | resources { 49 | cpu = 500 # 500 MHz 50 | memory = 256 # 256MB 51 | network { 52 | mbits = 10 53 | port "spark" { 54 | static = 7077 55 | } 56 | } 57 | } 58 | 59 | artifact { 60 | source = "https://s3-us-west-2.amazonaws.com/mustwin-files/spark-exec.zip" 61 | } 62 | } 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /nomad/spark-exec-slave.nomad: -------------------------------------------------------------------------------- 1 | job "spark-slave" { 2 | region = "gce-us-central1" 3 | datacenters = ["gce-us-central1"] 4 | 5 | constraint { 6 | attribute = "${attr.kernel.name}" 7 | value = "linux" 8 | } 9 | 10 | update { 11 | stagger = "10s" 12 | 13 | max_parallel = 1 14 | } 15 | 16 | group "spark" { 17 | # count = 1 18 | 19 | restart { 20 | attempts = 10 21 | interval = "5m" 22 | 23 | delay = "25s" 24 | 25 | mode = "delay" 26 | } 27 | 28 | task "spark" { 29 | driver = "exec" 30 | 31 | config { 32 | command = "/bin/sh" 33 | args = ["local/jars/start-slave.sh", "spark-master.service.consul"] 34 | } 35 | 36 | service { 37 | name = "${TASKGROUP}-slave" 38 | tags = ["global", "spark", "slave", "routed"] 39 | port = "db" 40 | check { 41 | name = "alive" 42 | type = "tcp" 43 | interval = "10s" 44 | timeout = "2s" 45 | } 46 | } 47 | 48 | resources { 49 | cpu = 500 # 500 MHz 50 | memory = 256 # 256MB 51 | network { 52 | mbits = 10 53 | port "db" {} 54 | } 55 | } 56 | 57 | artifact { 58 | source = "https://s3-us-west-2.amazonaws.com/mustwin-files/spark-exec.zip" 59 | } 60 | } 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /packer/config/consul/consul-template.hcl: -------------------------------------------------------------------------------- 1 | // This is the address of the Consul agent. By default, this is 127.0.0.1:8500, 2 | // which is the default bind and port for a local Consul agent. It is not 3 | // recommended that you communicate directly with a Consul server, and instead 4 | // communicate with the local Consul agent. There are many reasons for this, 5 | // most importantly the Consul agent is able to multiplex connections to the 6 | // Consul server and reduce the number of open HTTP connections. Additionally, 7 | // it provides a "well-known" IP address for which clients can connect. 8 | consul = "127.0.0.1:8500" 9 | 10 | // This is the ACL token to use when connecting to Consul. If you did not 11 | // enable ACLs on your Consul cluster, you do not need to set this option. 12 | // 13 | // This option is also available via the environment variable CONSUL_TOKEN. 14 | // token = "abcd1234" 15 | 16 | // This is the signal to listen for to trigger a reload event. The default 17 | // value is shown below. Setting this value to the empty string will cause CT 18 | // to not listen for any reload signals. 19 | //reload_signal = "SIGHUP" 20 | 21 | // This is the signal to listen for to trigger a core dump event. The default 22 | // value is shown below. Setting this value to the empty string will cause CT 23 | // to not listen for any core dump signals. 24 | //dump_signal = "SIGQUIT" 25 | 26 | // This is the signal to listen for to trigger a graceful stop. The default 27 | // value is shown below. Setting this value to the empty string will cause CT 28 | // to not listen for any graceful stop signals. 29 | //kill_signal = "SIGINT" 30 | 31 | // This is the amount of time to wait before retrying a connection to Consul. 32 | // Consul Template is highly fault tolerant, meaning it does not exit in the 33 | // face of failure. Instead, it uses exponential back-off and retry functions to 34 | // wait for the cluster to become available, as is customary in distributed 35 | // systems. 36 | retry = "10s" 37 | 38 | // This is the maximum interval to allow "stale" data. By default, only the 39 | // Consul leader will respond to queries; any requests to a follower will 40 | // forward to the leader. In large clusters with many requests, this is not as 41 | // scalable, so this option allows any follower to respond to a query, so long 42 | // as the last-replicated data is within these bounds. Higher values result in 43 | // less cluster load, but are more likely to have outdated data. 44 | max_stale = "10m" 45 | 46 | // This is the log level. If you find a bug in Consul Template, please enable 47 | // debug logs so we can help identify the issue. This is also available as a 48 | // command line flag. 49 | log_level = "warn" 50 | 51 | // This is the path to store a PID file which will contain the process ID of the 52 | // Consul Template process. This is useful if you plan to send custom signals 53 | // to the process. 54 | pid_file = "/var/run/consul-template.pid" 55 | 56 | // This is the quiescence timers; it defines the minimum and maximum amount of 57 | // time to wait for the cluster to reach a consistent state before rendering a 58 | // template. This is useful to enable in systems that have a lot of flapping, 59 | // because it will reduce the the number of times a template is rendered. 60 | wait = "5s:10s" 61 | 62 | // This denotes the start of the configuration section for Vault. All values 63 | // contained in this section pertain to Vault. 64 | //vault { 65 | // This is the address of the Vault leader. The protocol (http(s)) portion 66 | // of the address is required. 67 | // address = "https://vault.service.consul:8200" 68 | 69 | // This is the token to use when communicating with the Vault server. 70 | // Like other tools that integrate with Vault, Consul Template makes the 71 | // assumption that you provide it with a Vault token; it does not have the 72 | // incorporated logic to generate tokens via Vault's auth methods. 73 | // 74 | // This value can also be specified via the environment variable VAULT_TOKEN. 75 | // token = "abcd1234" 76 | 77 | // This option tells Consul Template to automatically renew the Vault token 78 | // given. If you are unfamiliar with Vault's architecture, Vault requires 79 | // tokens be renewed at some regular interval or they will be revoked. Consul 80 | // Template will automatically renew the token at half the lease duration of 81 | // the token. The default value is true, but this option can be disabled if 82 | // you want to renew the Vault token using an out-of-band process. 83 | // 84 | // Note that secrets specified in a template (using {{secret}} for example) 85 | // are always renewed, even if this option is set to false. This option only 86 | // applies to the top-level Vault token itself. 87 | // renew = true 88 | 89 | // This section details the SSL options for connecting to the Vault server. 90 | // Please see the SSL options below for more information (they are the same). 91 | // ssl { 92 | // ... 93 | // } 94 | // } 95 | 96 | // This block specifies the basic authentication information to pass with the 97 | // request. For more information on authentication, please see the Consul 98 | // documentation. 99 | auth { 100 | enabled = false 101 | username = "test" 102 | password = "test" 103 | } 104 | 105 | // This block configures the SSL options for connecting to the Consul server. 106 | ssl { 107 | // This enables SSL. Specifying any option for SSL will also enable it. 108 | enabled = false 109 | 110 | // This enables SSL peer verification. The default value is "true", which 111 | // will check the global CA chain to make sure the given certificates are 112 | // valid. If you are using a self-signed certificate that you have not added 113 | // to the CA chain, you may want to disable SSL verification. However, please 114 | // understand this is a potential security vulnerability. 115 | verify = false 116 | 117 | // This is the path to the certificate to use to authenticate. If just a 118 | // certificate is provided, it is assumed to contain both the certificate and 119 | // the key to convert to an X509 certificate. If both the certificate and 120 | // key are specified, Consul Template will automatically combine them into an 121 | // X509 certificate for you. 122 | cert = "/path/to/client/cert" 123 | key = "/path/to/client/key" 124 | 125 | // This is the path to the certificate authority to use as a CA. This is 126 | // useful for self-signed certificates or for organizations using their own 127 | // internal certificate authority. 128 | ca_cert = "/path/to/ca" 129 | } 130 | 131 | // This block defines the configuration for connecting to a syslog server for 132 | // logging. 133 | syslog { 134 | // This enables syslog logging. Specifying any other option also enables 135 | // syslog logging. 136 | enabled = false 137 | 138 | // This is the name of the syslog facility to log to. 139 | facility = "LOCAL5" 140 | } 141 | 142 | // This block defines the configuration for de-duplication mode. Please see the 143 | // de-duplication mode documentation later in the README for more information 144 | // on how de-duplication mode operates. 145 | deduplicate { 146 | // This enables de-duplication mode. Specifying any other options also enables 147 | // de-duplication mode. 148 | enabled = true 149 | 150 | // This is the prefix to the path in Consul's KV store where de-duplication 151 | // templates will be pre-rendered and stored. 152 | prefix = "consul-template/dedup/" 153 | } 154 | 155 | // This block defines the configuration for exec mode. Please see the exec mode 156 | // documentation at the bottom of this README for more information on how exec 157 | // mode operates and the caveats of this mode. 158 | //exec { 159 | // This is the command to exec as a child process. There can be only one 160 | // command per Consul Template process. 161 | // command = "/usr/bin/app" 162 | 163 | // This is a random splay to wait before killing the command. The default 164 | // value is 0 (no wait), but large clusters should consider setting a splay 165 | // value to prevent all child processes from reloading at the same time when 166 | // data changes occur. When this value is set to non-zero, Consul Template 167 | // will wait a random period of time up to the splay value before reloading 168 | // or killing the child process. This can be used to prevent the thundering 169 | // herd problem on applications that do not gracefully reload. 170 | // splay = "5s" 171 | 172 | // This defines the signal that will be sent to the child process when a 173 | // change occurs in a watched template. The signal will only be sent after 174 | // the process is started, and the process will only be started after all 175 | // dependent templates have been rendered at least once. The default value 176 | // is "" (empty or nil), which tells Consul Template to restart the child 177 | // process instead of sending it a signal. This is useful for legacy 178 | // applications or applications that cannot properly reload their 179 | // configuration without a full reload. 180 | // reload_signal = "SIGUSR1" 181 | 182 | // This defines the signal sent to the child process when Consul Template is 183 | // gracefully shutting down. The application should begin a graceful cleanup. 184 | // If the application does not terminate before the `kill_timeout`, it will 185 | // be terminated (effectively "kill -9"). The default value is "SIGTERM". 186 | // kill_signal = "SIGINT" 187 | 188 | // This defines the amount of time to wait for the child process to gracefully 189 | // terminate when Consul Template exits. After this specified time, the child 190 | // process will be force-killed (effectively "kill -9"). The default value is 191 | // "30s". 192 | // kill_timeout = "2s" 193 | //} 194 | 195 | // This block defines the configuration for a template. Unlike other blocks, 196 | // this block may be specified multiple times to configure multiple templates. 197 | // It is also possible to configure templates via the CLI directly. 198 | template { 199 | // This is the source file on disk to use as the input template. This is often 200 | // called the "Consul Template template". This option is required. 201 | source = "/etc/consul-template.d/consul-template.tpl" 202 | 203 | // This is the destination path on disk where the source template will render. 204 | // If the parent directories do not exist, Consul Template will attempt to 205 | // create them. 206 | destination = "/etc/haproxy/haproxy.cfg" 207 | 208 | // This is the optional command to run when the template is rendered. The 209 | // command will only run if the resulting template changes. The command must 210 | // return within 30s (configurable), and it must have a successful exit code. 211 | // Consul Template is not a replacement for a process monitor or init system. 212 | command = "/usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /var/run/haproxy.pid -sf $(cat /var/run/haproxy.pid || '') || true" 213 | 214 | // This is the maximum amount of time to wait for the optional command to 215 | // return. Default is 30s. 216 | command_timeout = "60s" 217 | 218 | // This is the permission to render the file. If this option is left 219 | // unspecified, Consul Template will attempt to match the permissions of the 220 | // file that already exists at the destination path. If no file exists at that 221 | // path, the permissions are 0644. 222 | perms = 0600 223 | 224 | // This option backs up the previously rendered template at the destination 225 | // path before writing a new one. It keeps exactly one backup. This option is 226 | // useful for preventing accidental changes to the data without having a 227 | // rollback strategy. 228 | backup = true 229 | 230 | // These are the delimiters to use in the template. The default is "{{" and 231 | // "}}", but for some templates, it may be easier to use a different delimiter 232 | // that does not conflict with the output file itself. 233 | left_delimiter = "{{" 234 | right_delimiter = "}}" 235 | 236 | // This is the `minimum(:maximum)` to wait before rendering a new template to 237 | // disk and triggering a command, separated by a colon (`:`). If the optional 238 | // maximum value is omitted, it is assumed to be 4x the required minimum value. 239 | // This is a numeric time with a unit suffix ("5s"). There is no default value. 240 | // The wait value for a template takes precedence over any globally-configured 241 | // wait. 242 | wait = "2s:6s" 243 | } 244 | -------------------------------------------------------------------------------- /packer/config/consul/consul-template.tpl: -------------------------------------------------------------------------------- 1 | global 2 | maxconn {{key_or_default "service/haproxy/maxconn" "5000"}} 3 | chroot /var/lib/haproxy 4 | user haproxy 5 | group haproxy 6 | daemon 7 | 8 | defaults 9 | mode {{key_or_default "service/haproxy/mode" "tcp"}} 10 | contimeout 5000 11 | clitimeout 50000 12 | srvtimeout 50000 13 | errorfile 400 /etc/haproxy/errors/400.http 14 | errorfile 403 /etc/haproxy/errors/403.http 15 | errorfile 408 /etc/haproxy/errors/408.http 16 | errorfile 500 /etc/haproxy/errors/500.http 17 | errorfile 502 /etc/haproxy/errors/502.http 18 | errorfile 503 /etc/haproxy/errors/503.http 19 | errorfile 504 /etc/haproxy/errors/504.http 20 | 21 | listen stats :81 22 | balance 23 | mode http 24 | stats enable 25 | stats auth me:password 26 | 27 | listen tcp-in 28 | mode tcp 29 | balance roundrobin 30 | bind *:80 31 | {{range $tag, $services := services | byTag}}{{if eq $tag "routed"}}{{range $service := $services}}{{range service $service.Name}}server {{.Name}}.service {{.Address}}:{{.Port}} 32 | {{end}}{{end}}{{end}}{{end}} 33 | -------------------------------------------------------------------------------- /packer/config/consul/consul_server.json: -------------------------------------------------------------------------------- 1 | { 2 | "server": true, 3 | "rejoin_after_leave": true, 4 | "bootstrap_expect": {{ bootstrap_expect }}, 5 | "statsite_addr": "statsite.service.consul:8125", 6 | "statsite_prefix": "consul.consul_server", 7 | "encrypt": "{{ consul_server_encrypt_key }}", 8 | "ca_file": "/etc/consul.d/ssl/root.crt", 9 | "cert_file": "/etc/consul.d/ssl/consul.crt", 10 | "key_file": "/etc/consul.d/ssl/consul.key", 11 | "verify_incoming": true, 12 | "verify_outgoing": true, 13 | "service": { 14 | "name": "consul-server", 15 | "tags": ["{{ tags }}"] 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /packer/config/consul/default.json: -------------------------------------------------------------------------------- 1 | { 2 | "data_dir": "{{ data_dir }}", 3 | "ui_dir": "/opt/consul/ui", 4 | "client_addr": "0.0.0.0", 5 | "bind_addr": "0.0.0.0", 6 | "advertise_addr": "{{ local_ip }}", 7 | "datacenter": "{{ datacenter }}", 8 | "node_name": "{{ node_name }}", 9 | "encrypt": "{{ consul_server_encrypt_key }}", 10 | "ca_file": "/etc/consul.d/ssl/root.crt", 11 | "cert_file": "/etc/consul.d/ssl/consul.crt", 12 | "key_file": "/etc/consul.d/ssl/consul.key", 13 | "verify_incoming": true, 14 | "verify_outgoing": true, 15 | "log_level": "{{ log_level }}", 16 | "skip_leave_on_interrupt": true, 17 | "leave_on_terminate": true, 18 | "dns_config": { 19 | "allow_stale": true 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /packer/config/consul/graphite.json: -------------------------------------------------------------------------------- 1 | { 2 | "service": { 3 | "name": "graphite", 4 | "port": 80, 5 | "check": { 6 | "id": "graphite", 7 | "name": "Running on port 80", 8 | "http": "http://localhost:80", 9 | "interval": "10s", 10 | "timeout": "1s" 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /packer/config/consul/nomad_client.json: -------------------------------------------------------------------------------- 1 | { 2 | "statsite_addr": "statsite.service.consul:8125", 3 | "statsite_prefix": "consul.nomad_client", 4 | "service": { 5 | "name": "nomad-client", 6 | "tags": ["{{ tags }}"], 7 | "port": 4646, 8 | "check": { 9 | "id": "nomad-client", 10 | "name": "Running on port 4646", 11 | "tcp": "localhost:4646", 12 | "interval": "10s", 13 | "timeout": "1s" 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /packer/config/consul/nomad_server.json: -------------------------------------------------------------------------------- 1 | { 2 | "statsite_addr": "statsite.service.consul:8125", 3 | "statsite_prefix": "consul.nomad_server", 4 | "service": { 5 | "name": "nomad-server", 6 | "tags": ["{{ tags }}"], 7 | "port": 4646, 8 | "check": { 9 | "id": "nomad-server", 10 | "name": "Running on port 4646", 11 | "tcp": "localhost:4646", 12 | "interval": "10s", 13 | "timeout": "1s" 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /packer/config/consul/redis.json: -------------------------------------------------------------------------------- 1 | { 2 | "service": { 3 | "name": "redis", 4 | "port": 6379, 5 | "checks": [ 6 | { 7 | "id": "redis-localhost", 8 | "name": "Can connect on localhost:6379", 9 | "tcp": "localhost:6379", 10 | "interval": "10s", 11 | "timeout": "1s" 12 | }, 13 | { 14 | "id": "redis-local-ip", 15 | "name": "Can connect on {{ local_ip }}:6379", 16 | "tcp": "{{ local_ip }}:6379", 17 | "interval": "10s", 18 | "timeout": "1s" 19 | } 20 | ] 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /packer/config/consul/statsite.json: -------------------------------------------------------------------------------- 1 | { 2 | "service": { 3 | "name": "statsite", 4 | "port": 8125, 5 | "checks": [ 6 | { 7 | "id": "statsite-localhost", 8 | "name": "Can connect on localhost:8125", 9 | "tcp": "localhost:8125", 10 | "interval": "10s", 11 | "timeout": "1s" 12 | }, 13 | { 14 | "id": "statsite-local-ip", 15 | "name": "Can connect on {{ local_ip }}:8125", 16 | "tcp": "{{ local_ip }}:8125", 17 | "interval": "10s", 18 | "timeout": "1s" 19 | } 20 | ] 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /packer/config/consul/upstart.consul: -------------------------------------------------------------------------------- 1 | description "Consul agent" 2 | 3 | start on runlevel [2345] 4 | stop on runlevel [!2345] 5 | 6 | # Respawn infinitely 7 | respawn limit unlimited 8 | 9 | console log 10 | 11 | nice -10 12 | limit nofile 65535 65535 13 | 14 | pre-start script 15 | while [ ! -f /etc/consul.d/configured ] 16 | do 17 | DT=$(date '+%Y/%m/%d %H:%M:%S') 18 | echo "$DT: Waiting on configuration" 19 | sleep 1 20 | done 21 | end script 22 | 23 | script 24 | if [ -f "/etc/service/consul" ]; then 25 | . /etc/service/consul 26 | fi 27 | 28 | # Make sure to use all our CPUs, because Consul can block a scheduler thread 29 | export GOMAXPROCS=`nproc` 30 | 31 | exec /usr/local/bin/consul agent -config-dir="/etc/consul.d" \$${CONSUL_FLAGS} >>/var/log/consul.log 2>&1 32 | end script 33 | 34 | post-start script 35 | end script 36 | -------------------------------------------------------------------------------- /packer/config/consul/upstart.consul-template: -------------------------------------------------------------------------------- 1 | description "haproxy server" 2 | 3 | start on runlevel [2345] 4 | stop on runlevel [!2345] 5 | 6 | # Respawn infinitely 7 | respawn limit unlimited 8 | 9 | console log 10 | 11 | nice -10 12 | limit nofile 65535 65535 13 | 14 | pre-start script 15 | exec 2>>/dev/.initramfs/consul-template.log 16 | set -x 17 | 18 | count=`consul members | grep server | wc -l` 19 | while [ $count -lt 1 ] 20 | do 21 | DT=$(date '+%Y/%m/%d %H:%M:%S') 22 | echo "$DT: Waiting on configuration" 23 | sleep 1 24 | count=`consul members | grep server | wc -l` 25 | done 26 | end script 27 | 28 | script 29 | exec 2>>/dev/.initramfs/consul-template.log 30 | set -x 31 | if [ -f "/etc/service/consul-template" ]; then 32 | . /etc/service/consul-template 33 | fi 34 | 35 | exec /usr/local/bin/consul-template -log-level debug -config /etc/consul-template.d/consul-template.hcl >>/var/log/consul-template.log 2>&1 36 | end script 37 | -------------------------------------------------------------------------------- /packer/config/consul/upstart.haproxy: -------------------------------------------------------------------------------- 1 | description "haproxy server" 2 | 3 | start on runlevel [2345] 4 | stop on runlevel [!2345] 5 | 6 | # Respawn infinitely 7 | respawn limit unlimited 8 | 9 | console log 10 | 11 | nice -10 12 | limit nofile 65535 65535 13 | 14 | pre-start script 15 | exec 2>>/dev/.initramfs/haproxy-template.log 16 | set -x 17 | # The default config has no listen line 18 | count=`grep listen /etc/haproxy/haproxy.cfg | wc -l` 19 | while [ $count -lt 1 ] 20 | do 21 | DT=$(date '+%Y/%m/%d %H:%M:%S') 22 | echo "$DT: Waiting on configuration" 23 | sleep 1 24 | count=`grep listen /etc/haproxy/haproxy.cfg | wc -l` 25 | done 26 | end script 27 | 28 | script 29 | exec 2>>/dev/.initramfs/haproxy-template.log 30 | set -x 31 | if [ -f "/etc/service/haproxy" ]; then 32 | . /etc/service/haproxy 33 | fi 34 | 35 | exec /usr/sbin/haproxy -f /etc/haproxy/haproxy.cfg -p /var/run/haproxy.pid >>/var/log/haproxy.log 2>&1 36 | end script 37 | -------------------------------------------------------------------------------- /packer/config/consul/utility.json: -------------------------------------------------------------------------------- 1 | { 2 | "statsite_addr": "statsite.service.consul:8125", 3 | "statsite_prefix": "consul.utility", 4 | "service": { 5 | "name": "utility", 6 | "tags": ["{{ tags }}"] 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /packer/config/nomad/client.hcl: -------------------------------------------------------------------------------- 1 | client { 2 | enabled = true 3 | node_class = "{{ node_class }}" 4 | 5 | client_max_port = 15000 6 | 7 | options { 8 | "docker.cleanup.image" = "0" 9 | "driver.raw_exec.enable" = "1" 10 | } 11 | 12 | meta { 13 | region = "{{ region }}" 14 | machine_type = "{{ machine_type }}" 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /packer/config/nomad/default.hcl: -------------------------------------------------------------------------------- 1 | data_dir = "{{ data_dir }}" 2 | enable_debug = true 3 | bind_addr = "0.0.0.0" 4 | region = "{{ region }}" 5 | datacenter = "{{ datacenter }}" 6 | name = "{{ name }}" 7 | log_level = "{{ log_level }}" 8 | 9 | advertise { 10 | http = "{{ local_ip }}:4646" 11 | rpc = "{{ local_ip }}:4647" 12 | serf = "{{ local_ip }}:4648" 13 | } 14 | 15 | telemetry { 16 | statsite_address = "statsite.service.consul:8125" 17 | disable_hostname = true 18 | } 19 | -------------------------------------------------------------------------------- /packer/config/nomad/images/nginx/Dockerfile: -------------------------------------------------------------------------------- 1 | ############################################################ 2 | # Dockerfile to build Nginx Installed Containers 3 | # Based on Ubuntu 4 | ############################################################ 5 | 6 | # Set the base image to Ubuntu 7 | FROM ubuntu 8 | 9 | # File Author / Maintainer 10 | MAINTAINER HashiCorp 11 | 12 | # Install Nginx 13 | 14 | # Add application repository URL to the default sources 15 | # RUN echo "deb http://archive.ubuntu.com/ubuntu/ raring main universe" >> /etc/apt/sources.list 16 | 17 | # Update the repository 18 | RUN apt-get update 19 | 20 | # Install necessary tools 21 | RUN apt-get install -y wget dialog net-tools curl unzip nginx 22 | 23 | # Remove the default Nginx configuration file 24 | RUN rm -v /etc/nginx/nginx.conf 25 | 26 | # Copy a configuration file from the current directory 27 | ADD nginx.conf /etc/nginx/ 28 | 29 | # Append "daemon off;" to the beginning of the configuration 30 | RUN echo "daemon off;" >> /etc/nginx/nginx.conf 31 | 32 | # Install consul-template 33 | RUN cd /tmp 34 | 35 | # Download consul-template 36 | RUN curl -L https://releases.hashicorp.com/consul-template/0.11.1/consul-template_0.11.1_linux_amd64.zip > consul_template.zip 37 | 38 | # Unzip consul-template 39 | RUN unzip consul_template.zip -d /usr/local/bin 40 | 41 | # Update consul-template permissions 42 | RUN chmod 0755 /usr/local/bin/consul-template && \ 43 | chown root:root /usr/local/bin/consul-template 44 | 45 | # Create consul-template configuration folders 46 | RUN mkdir -p /etc/consul_template.d && \ 47 | chmod 755 /etc/consul_template.d && \ 48 | mkdir -p /opt/consul_template && \ 49 | chmod 755 /opt/consul_template 50 | 51 | # Copy custom configuration files from the current directory 52 | ADD consul_template/nginx.hcl /etc/consul_template.d/ 53 | ADD consul_template/nginx.ctmpl /opt/consul_template/ 54 | 55 | # Expose ports 56 | EXPOSE 80 57 | 58 | # Set the default command to execute when creating a new container 59 | CMD /usr/local/bin/consul-template -config "/etc/consul_template.d" >>/var/log/consul_template.log 2>&1 60 | -------------------------------------------------------------------------------- /packer/config/nomad/images/nginx/consul_template/nginx.ctmpl: -------------------------------------------------------------------------------- 1 | worker_processes 4; 2 | 3 | events { worker_connections 1024; } 4 | 5 | http { 6 | upstream nodejs { 7 | least_conn;{{range service "nodejs"}} 8 | server {{.Address}}:{{.Port}} weight=10 max_fails=3 fail_timeout=30s;{{end}} 9 | } 10 | 11 | server { 12 | listen 80; 13 | 14 | location / { 15 | proxy_pass http://nodejs; 16 | proxy_http_version 1.1; 17 | proxy_set_header Upgrade $http_upgrade; 18 | proxy_set_header Connection 'upgrade'; 19 | proxy_set_header Host $host; 20 | proxy_cache_bypass $http_upgrade; 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /packer/config/nomad/images/nginx/consul_template/nginx.hcl: -------------------------------------------------------------------------------- 1 | consul = "127.0.0.1:8500" 2 | max_stale = "10m" 3 | retry = "5s" 4 | log_level = "warn" 5 | 6 | template { 7 | source = "/opt/consul_template/nginx.ctmpl" 8 | destination = "/etc/nginx/nginx.conf" 9 | command = "service nginx restart" 10 | } 11 | -------------------------------------------------------------------------------- /packer/config/nomad/images/nginx/nginx.conf: -------------------------------------------------------------------------------- 1 | worker_processes 1; 2 | 3 | events { worker_connections 1024; } 4 | 5 | http { 6 | 7 | sendfile on; 8 | 9 | server { 10 | 11 | listen 80; 12 | 13 | location / { 14 | proxy_pass http://httpstat.us/; 15 | proxy_set_header X-Real-IP $remote_addr; 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /packer/config/nomad/images/nodejs/Dockerfile: -------------------------------------------------------------------------------- 1 | # Set the base image to Ubuntu 2 | FROM ubuntu 3 | 4 | # File Author / Maintainer 5 | MAINTAINER HashiCorp 6 | 7 | # Install Node.js and other dependencies 8 | RUN apt-get update && \ 9 | apt-get -y install curl && \ 10 | curl -sL https://deb.nodesource.com/setup | sudo bash - && \ 11 | apt-get -y install python build-essential nodejs 12 | 13 | # Install nodemon 14 | RUN npm install -g nodemon 15 | 16 | # Provides cached layer for node_modules 17 | ADD package.json /tmp/package.json 18 | RUN cd /tmp && npm install 19 | RUN mkdir -p /src && cp -a /tmp/node_modules /src/ 20 | 21 | # Define working directory 22 | WORKDIR /src 23 | ADD . /src 24 | 25 | # Expose port 26 | EXPOSE 8080 27 | 28 | # Run app using nodemon 29 | CMD nodemon /src/index.js >>/var/log/nodejs.log 2>&1 30 | -------------------------------------------------------------------------------- /packer/config/nomad/images/nodejs/index.js: -------------------------------------------------------------------------------- 1 | var express = require('express'), 2 | http = require('http'), 3 | redis = require('redis'), 4 | app = express(), 5 | client = redis.createClient(process.env.REDIS_PORT, process.env.REDIS_ADDR); 6 | 7 | app.get('/', function(req, res, next) { 8 | client.incr('counter', function(err, counter) { 9 | if(err) return next(err); 10 | 11 | res.send('This page has been viewed ' + counter + ' times!'); 12 | }); 13 | }); 14 | 15 | http.createServer(app).listen(process.env.NOMAD_PORT_http || 8080, function() { 16 | console.log('Listening on port ' + (process.env.NOMAD_PORT_http || 8080)); 17 | }); 18 | -------------------------------------------------------------------------------- /packer/config/nomad/images/nodejs/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "nodejs", 3 | "version": "1.0.0", 4 | "description": "Node.js Application Counter", 5 | "main": "index.js", 6 | "author": "HashiCorp", 7 | "license": "ISC", 8 | "dependencies": { 9 | "express": "^4.12.3", 10 | "hiredis": "^0.2.0", 11 | "mocha": "^2.2.1", 12 | "redis": "^0.12.1" 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /packer/config/nomad/images/redis/Dockerfile: -------------------------------------------------------------------------------- 1 | # Set the base image to Ubuntu 2 | FROM ubuntu 3 | 4 | # File Author / Maintainer 5 | MAINTAINER HashiCorp 6 | 7 | # Update the repository and install Redis Server 8 | RUN apt-get update && apt-get install -y redis-server 9 | 10 | # Expose Redis port 6379 11 | EXPOSE 6379 12 | 13 | # Run Redis Server 14 | ENTRYPOINT ["/usr/bin/redis-server"] 15 | -------------------------------------------------------------------------------- /packer/config/nomad/nomad_join.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | logger() { 5 | DT=$(date '+%Y/%m/%d %H:%M:%S') 6 | echo "$DT nomad_join.sh: $1" 7 | echo "$DT nomad_join.sh: $1" | sudo tee -a /var/log/user_data.log > /dev/null 8 | } 9 | 10 | logger "Begin script" 11 | 12 | NOMAD_JOIN_NAME=$1 13 | logger "Nomad join name: ${NOMAD_JOIN_NAME}" 14 | SERVER=$2 15 | logger "Nomad server: ${SERVER}" 16 | 17 | servers() { 18 | PASSING=$(curl -s "http://127.0.0.1:8500/v1/health/service/${NOMAD_JOIN_NAME}") 19 | 20 | # Check if valid json is returned, otherwise jq command fails 21 | if [[ "$PASSING" == [{* ]]; then 22 | echo $(echo $PASSING | jq -r '.[].Node.Address' | tr '\n' ' ') 23 | fi 24 | } 25 | 26 | NOMAD_SERVERS=$(servers) 27 | logger "Initial Nomad servers: $NOMAD_SERVERS" 28 | NOMAD_SERVER_LEN=$(echo $NOMAD_SERVERS | wc -w) 29 | logger "Initial Nomad server length: $NOMAD_SERVER_LEN" 30 | SLEEPTIME=1 31 | 32 | while [ $NOMAD_SERVER_LEN -lt 3 ] 33 | do 34 | if [ $SLEEPTIME -gt 20 ]; then 35 | logger "ERROR: NOMAD SETUP NOT COMPLETE! Manual intervention required." 36 | exit 2 37 | else 38 | logger "Waiting for optimum quorum size, currently: $NOMAD_SERVER_LEN, waiting $SLEEPTIME seconds" 39 | NOMAD_SERVERS=$(servers) 40 | logger "Nomad servers: $NOMAD_SERVERS" 41 | NOMAD_SERVER_LEN=$(echo $NOMAD_SERVERS | wc -w) 42 | logger "Nomad server length: $NOMAD_SERVER_LEN" 43 | sleep $SLEEPTIME 44 | SLEEPTIME=$((SLEEPTIME + 1)) 45 | fi 46 | done 47 | 48 | logger "Nomad server join" 49 | 50 | if [ -z "$SERVER" ] || [ "$SERVER" == "client" ]; then 51 | # Adding port 4647 for clients to join 52 | NOMAD_SERVERS="${NOMAD_SERVERS} " 53 | NOMAD_SERVERS=${NOMAD_SERVERS// /$':4647 '} 54 | logger "Nomad client joining: ${NOMAD_SERVERS}" 55 | nomad client-config -update-servers ${NOMAD_SERVERS} 56 | else 57 | logger "Nomad server joining: ${NOMAD_SERVERS}" 58 | nomad server-join ${NOMAD_SERVERS} 59 | fi 60 | 61 | logger "Done" 62 | -------------------------------------------------------------------------------- /packer/config/nomad/server.hcl: -------------------------------------------------------------------------------- 1 | addresses { 2 | rpc = "{{ local_ip }}" 3 | serf = "{{ local_ip }}" 4 | } 5 | 6 | server { 7 | enabled = true 8 | bootstrap_expect = {{ bootstrap_expect }} 9 | heartbeat_grace = "30s" 10 | } 11 | -------------------------------------------------------------------------------- /packer/config/nomad/upstart.nomad: -------------------------------------------------------------------------------- 1 | description "Nomad agent" 2 | 3 | start on runlevel [2345] 4 | stop on runlevel [!2345] 5 | 6 | # Respawn infinitely 7 | respawn limit unlimited 8 | 9 | console log 10 | 11 | nice -10 12 | limit nofile 65535 65535 13 | 14 | pre-start script 15 | while [ ! -f /etc/nomad.d/configured ] 16 | do 17 | DT=$(date '+%Y/%m/%d %H:%M:%S') 18 | echo "$DT: Waiting on configuration" 19 | sleep 1 20 | done 21 | end script 22 | 23 | script 24 | if [ -f "/etc/service/nomad" ]; then 25 | . /etc/service/nomad 26 | fi 27 | 28 | exec /usr/local/bin/nomad agent -config="/etc/nomad.d" \$${NOMAD_FLAGS} >>/var/log/nomad.log 2>&1 29 | end script 30 | 31 | post-start script 32 | end script 33 | -------------------------------------------------------------------------------- /packer/config/statsite/default.conf: -------------------------------------------------------------------------------- 1 | [statsite] 2 | binary_stream=false 3 | port = 8125 4 | tcp_port = 8125 5 | udp_port = 8125 6 | input_counter = "numStats" 7 | log_level = DEBUG 8 | flush_interval = 1 9 | timer_eps = 0.01 10 | set_eps = 0.02 11 | stream_cmd = tee -a {{ data_dir }}/sink.log | python -u /usr/share/statsite/sinks/graphite.py localhost 2003 12 | -------------------------------------------------------------------------------- /packer/config/statsite/upstart.statsite: -------------------------------------------------------------------------------- 1 | description "Statsite agent" 2 | 3 | start on runlevel [2345] 4 | stop on runlevel [!2345] 5 | 6 | # Respawn infinitely 7 | respawn limit unlimited 8 | 9 | console log 10 | 11 | nice -10 12 | limit nofile 65535 65535 13 | 14 | pre-start script 15 | while [ ! -f /etc/statsite.d/configured ] 16 | do 17 | DT=$(date '+%Y/%m/%d %H:%M:%S') 18 | echo "$DT: Waiting on configuration" 19 | sleep 1 20 | done 21 | end script 22 | 23 | script 24 | if [ -f "/etc/service/statsite" ]; then 25 | . /etc/service/statsite 26 | fi 27 | 28 | exec /usr/local/bin/statsite -f /etc/statsite.d/default.conf >>/var/log/statsite.log 2>&1 29 | end script 30 | 31 | post-start script 32 | end script 33 | -------------------------------------------------------------------------------- /packer/config/vault/default.hcl: -------------------------------------------------------------------------------- 1 | backend "consul" { 2 | cluster_name = "vault" 3 | address = "127.0.0.1:8500" 4 | path = "vault" 5 | tls_ca_file = "/etc/consul.d/ssl/root.crt" 6 | tls_cert_file = "/etc/consul.d/ssl/consul.crt" 7 | tls_key_file = "/etc/consul.d/ssl/consul.key" 8 | // TODO: add consul ACL token 9 | } 10 | 11 | listener "tcp" { 12 | address = "127.0.0.1:8200" 13 | tls_cert_file = "/etc/vault.d/vault.crt" 14 | tls_key_file = "/etc/vault.d/vault.key" 15 | } 16 | 17 | telemetry { 18 | statsite_address = "127.0.0.1:8125" 19 | disable_hostname = true 20 | } 21 | -------------------------------------------------------------------------------- /packer/config/vault/upstart.vault: -------------------------------------------------------------------------------- 1 | description "Vault agent" 2 | 3 | start on runlevel [2345] 4 | stop on runlevel [!2345] 5 | 6 | # Respawn infinitely 7 | respawn limit unlimited 8 | 9 | console log 10 | 11 | nice -10 12 | limit nofile 65535 65535 13 | 14 | pre-start script 15 | while [ ! -f /etc/vault.d/configured ] 16 | do 17 | DT=$(date '+%Y/%m/%d %H:%M:%S') 18 | echo "$DT: Waiting on configuration" 19 | sleep 1 20 | done 21 | end script 22 | 23 | script 24 | if [ -f "/etc/service/vault" ]; then 25 | . /etc/service/vault 26 | fi 27 | 28 | # Make sure to use all our CPUs, because Consul can block a scheduler thread 29 | export GOMAXPROCS=`nproc` 30 | 31 | exec /usr/local/bin/vault server -config=/etc/vault.d \$${VAULT_FLAGS} >>/var/log/vault.log 2>&1 32 | end script 33 | 34 | post-start script 35 | end script 36 | -------------------------------------------------------------------------------- /packer/gce_consul_server.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "gce_project_id": "{{env `GCE_PROJECT_ID`}}", 4 | "gce_zone": "{{env `GCE_DEFAULT_ZONE`}}", 5 | "gce_source_image": "{{env `GCE_SOURCE_IMAGE`}}", 6 | "gce_account_file": "gce-credentials.json", 7 | "name": "c1m-consul-server", 8 | "scripts_dir": "packer/scripts", 9 | "config_dir": "packer/config", 10 | "ssh_username": "ubuntu", 11 | "dns_listen_addr": "0.0.0.0" 12 | }, 13 | "builders": [ 14 | { 15 | "type": "googlecompute", 16 | "project_id": "{{user `gce_project_id`}}", 17 | "account_file": "{{user `gce_account_file`}}", 18 | "zone": "{{user `gce_zone`}}", 19 | "network": "default", 20 | "source_image": "{{user `gce_source_image`}}", 21 | "ssh_username": "{{user `ssh_username`}}", 22 | "image_name": "packer-{{user `name`}}-{{timestamp}}", 23 | "image_description": "packer-{{user `name`}}-image", 24 | "use_internal_ip": false, 25 | "tags": [ 26 | "{{user `name`}}" 27 | ] 28 | } 29 | ], 30 | "provisioners": [ 31 | { 32 | "type": "shell", 33 | "execute_command": "echo {{user `ssh_username`}} | {{ .Vars }} sudo -E -S sh '{{ .Path }}'", 34 | "inline": [ 35 | "mkdir -p /ops/{{user `scripts_dir`}}", 36 | "chmod a+w /ops/{{user `scripts_dir`}}", 37 | "mkdir -p /ops/{{user `config_dir`}}", 38 | "chmod a+w /ops/{{user `config_dir`}}" 39 | ] 40 | }, 41 | { 42 | "type": "file", 43 | "source": "{{user `scripts_dir`}}/.", 44 | "destination": "/ops/{{user `scripts_dir`}}" 45 | }, 46 | { 47 | "type": "file", 48 | "source": "{{user `config_dir`}}/.", 49 | "destination": "/ops/{{user `config_dir`}}" 50 | }, 51 | { 52 | "type": "shell", 53 | "execute_command": "echo {{user `ssh_username`}} | {{ .Vars }} sudo -E -S sh '{{ .Path }}'", 54 | "inline": [ 55 | "sh /ops/{{user `scripts_dir`}}/shared.sh {{user `config_dir`}} {{user `dns_listen_addr`}}", 56 | "sh /ops/{{user `scripts_dir`}}/consul_server.sh {{user `config_dir`}}", 57 | "sh /ops/{{user `scripts_dir`}}/shared/cleanup.sh" 58 | ] 59 | } 60 | ] 61 | } 62 | -------------------------------------------------------------------------------- /packer/gce_nomad_agent.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "gce_project_id": "{{env `GCE_PROJECT_ID`}}", 4 | "gce_zone": "{{env `GCE_DEFAULT_ZONE`}}", 5 | "gce_source_image": "{{env `GCE_SOURCE_IMAGE`}}", 6 | "gce_account_file": "gce-credentials.json", 7 | "name": "c1m-nomad-client", 8 | "scripts_dir": "packer/scripts", 9 | "config_dir": "packer/config", 10 | "ssh_username": "ubuntu", 11 | "dns_listen_addr": "127.0.0.1" 12 | }, 13 | "builders": [ 14 | { 15 | "type": "googlecompute", 16 | "project_id": "{{user `gce_project_id`}}", 17 | "account_file": "{{user `gce_account_file`}}", 18 | "zone": "{{user `gce_zone`}}", 19 | "network": "default", 20 | "source_image": "{{user `gce_source_image`}}", 21 | "ssh_username": "{{user `ssh_username`}}", 22 | "image_name": "packer-{{user `name`}}-{{timestamp}}", 23 | "image_description": "packer-{{user `name`}}-image", 24 | "use_internal_ip": false, 25 | "tags": [ 26 | "{{user `name`}}" 27 | ] 28 | } 29 | ], 30 | "provisioners": [ 31 | { 32 | "type": "shell", 33 | "execute_command": "echo {{user `ssh_username`}} | {{ .Vars }} sudo -E -S sh '{{ .Path }}'", 34 | "inline": [ 35 | "mkdir -p /ops/{{user `scripts_dir`}}", 36 | "chmod a+w /ops/{{user `scripts_dir`}}", 37 | "mkdir -p /ops/{{user `config_dir`}}", 38 | "chmod a+w /ops/{{user `config_dir`}}" 39 | ] 40 | }, 41 | { 42 | "type": "file", 43 | "source": "{{user `scripts_dir`}}/.", 44 | "destination": "/ops/{{user `scripts_dir`}}" 45 | }, 46 | { 47 | "type": "file", 48 | "source": "{{user `config_dir`}}/.", 49 | "destination": "/ops/{{user `config_dir`}}" 50 | }, 51 | { 52 | "type": "shell", 53 | "execute_command": "echo {{user `ssh_username`}} | {{ .Vars }} sudo -E -S sh '{{ .Path }}'", 54 | "inline": [ 55 | "sh /ops/{{user `scripts_dir`}}/shared.sh {{user `config_dir`}} {{user `dns_listen_addr`}}", 56 | "sh /ops/{{user `scripts_dir`}}/nomad_agent.sh {{user `config_dir`}} {{user `ssh_username`}}", 57 | "sh /ops/{{user `scripts_dir`}}/shared/cleanup.sh" 58 | ] 59 | } 60 | ] 61 | } 62 | -------------------------------------------------------------------------------- /packer/gce_nomad_server.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "gce_project_id": "{{env `GCE_PROJECT_ID`}}", 4 | "gce_zone": "{{env `GCE_DEFAULT_ZONE`}}", 5 | "gce_source_image": "{{env `GCE_SOURCE_IMAGE`}}", 6 | "gce_account_file": "gce-credentials.json", 7 | "name": "c1m-nomad-server", 8 | "scripts_dir": "packer/scripts", 9 | "config_dir": "packer/config", 10 | "ssh_username": "ubuntu", 11 | "dns_listen_addr": "127.0.0.1" 12 | }, 13 | "builders": [ 14 | { 15 | "type": "googlecompute", 16 | "project_id": "{{user `gce_project_id`}}", 17 | "account_file": "{{user `gce_account_file`}}", 18 | "zone": "{{user `gce_zone`}}", 19 | "network": "default", 20 | "source_image": "{{user `gce_source_image`}}", 21 | "ssh_username": "{{user `ssh_username`}}", 22 | "image_name": "packer-{{user `name`}}-{{timestamp}}", 23 | "image_description": "packer-{{user `name`}}-image", 24 | "use_internal_ip": false, 25 | "tags": [ 26 | "{{user `name`}}" 27 | ] 28 | } 29 | ], 30 | "provisioners": [ 31 | { 32 | "type": "shell", 33 | "execute_command": "echo {{user `ssh_username`}} | {{ .Vars }} sudo -E -S sh '{{ .Path }}'", 34 | "inline": [ 35 | "mkdir -p /ops/{{user `scripts_dir`}}", 36 | "chmod a+w /ops/{{user `scripts_dir`}}", 37 | "mkdir -p /ops/{{user `config_dir`}}", 38 | "chmod a+w /ops/{{user `config_dir`}}" 39 | ] 40 | }, 41 | { 42 | "type": "file", 43 | "source": "{{user `scripts_dir`}}/.", 44 | "destination": "/ops/{{user `scripts_dir`}}" 45 | }, 46 | { 47 | "type": "file", 48 | "source": "{{user `config_dir`}}/.", 49 | "destination": "/ops/{{user `config_dir`}}" 50 | }, 51 | { 52 | "type": "shell", 53 | "execute_command": "echo {{user `ssh_username`}} | {{ .Vars }} sudo -E -S sh '{{ .Path }}'", 54 | "inline": [ 55 | "sh /ops/{{user `scripts_dir`}}/shared.sh {{user `config_dir`}} {{user `dns_listen_addr`}}", 56 | "sh /ops/{{user `scripts_dir`}}/nomad_server.sh {{user `config_dir`}}", 57 | "sh /ops/{{user `scripts_dir`}}/shared/cleanup.sh" 58 | ] 59 | } 60 | ] 61 | } 62 | -------------------------------------------------------------------------------- /packer/gce_utility.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "gce_project_id": "{{env `GCE_PROJECT_ID`}}", 4 | "gce_zone": "{{env `GCE_DEFAULT_ZONE`}}", 5 | "gce_source_image": "{{env `GCE_SOURCE_IMAGE`}}", 6 | "gce_account_file": "gce-credentials.json", 7 | "name": "c1m-utility", 8 | "scripts_dir": "packer/scripts", 9 | "config_dir": "packer/config", 10 | "ssh_username": "ubuntu", 11 | "dns_listen_addr": "127.0.0.1" 12 | }, 13 | 14 | "builders": [ 15 | { 16 | "type": "googlecompute", 17 | "project_id": "{{user `gce_project_id`}}", 18 | "account_file": "{{user `gce_account_file`}}", 19 | "zone": "{{user `gce_zone`}}", 20 | "network": "default", 21 | "source_image": "{{user `gce_source_image`}}", 22 | "ssh_username": "{{user `ssh_username`}}", 23 | "image_name": "packer-{{user `name`}}-{{timestamp}}", 24 | "image_description": "packer-{{user `name`}}-image", 25 | "use_internal_ip": false, 26 | "tags": [ 27 | "{{user `name`}}" 28 | ] 29 | } 30 | ], 31 | "provisioners": [ 32 | { 33 | "type": "shell", 34 | "execute_command": "echo {{user `ssh_username`}} | {{ .Vars }} sudo -E -S sh '{{ .Path }}'", 35 | "inline": [ 36 | "mkdir -p /ops/{{user `scripts_dir`}}", 37 | "chmod a+w /ops/{{user `scripts_dir`}}", 38 | "mkdir -p /ops/{{user `config_dir`}}", 39 | "chmod a+w /ops/{{user `config_dir`}}" 40 | ] 41 | }, 42 | { 43 | "type": "file", 44 | "source": "{{user `scripts_dir`}}/.", 45 | "destination": "/ops/{{user `scripts_dir`}}" 46 | }, 47 | { 48 | "type": "file", 49 | "source": "{{user `config_dir`}}/.", 50 | "destination": "/ops/{{user `config_dir`}}" 51 | }, 52 | { 53 | "type": "shell", 54 | "execute_command": "echo {{user `ssh_username`}} | {{ .Vars }} sudo -E -S sh '{{ .Path }}'", 55 | "inline": [ 56 | "sh /ops/{{user `scripts_dir`}}/shared.sh {{user `config_dir`}} {{user `dns_listen_addr`}}", 57 | "sh /ops/{{user `scripts_dir`}}/utility.sh {{user `config_dir`}}", 58 | "sh /ops/{{user `scripts_dir`}}/shared/cleanup.sh" 59 | ] 60 | } 61 | ] 62 | 63 | } 64 | -------------------------------------------------------------------------------- /packer/gce_vault_server.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "gce_project_id": "{{env `GCE_PROJECT_ID`}}", 4 | "gce_zone": "{{env `GCE_DEFAULT_ZONE`}}", 5 | "gce_source_image": "{{env `GCE_SOURCE_IMAGE`}}", 6 | "gce_account_file": "gce-credentials.json", 7 | "name": "c1m-vault-server", 8 | "scripts_dir": "packer/scripts", 9 | "config_dir": "packer/config", 10 | "ssh_username": "ubuntu", 11 | "dns_listen_addr": "127.0.0.1" 12 | }, 13 | "builders": [ 14 | { 15 | "type": "googlecompute", 16 | "project_id": "{{user `gce_project_id`}}", 17 | "account_file": "{{user `gce_account_file`}}", 18 | "zone": "{{user `gce_zone`}}", 19 | "network": "default", 20 | "source_image": "{{user `gce_source_image`}}", 21 | "ssh_username": "{{user `ssh_username`}}", 22 | "image_name": "packer-{{user `name`}}-{{timestamp}}", 23 | "image_description": "packer-{{user `name`}}-image", 24 | "use_internal_ip": false, 25 | "tags": [ 26 | "{{user `name`}}" 27 | ] 28 | } 29 | ], 30 | "provisioners": [ 31 | { 32 | "type": "shell", 33 | "execute_command": "echo {{user `ssh_username`}} | {{ .Vars }} sudo -E -S sh '{{ .Path }}'", 34 | "inline": [ 35 | "mkdir -p /ops/{{user `scripts_dir`}}", 36 | "chmod a+w /ops/{{user `scripts_dir`}}", 37 | "mkdir -p /ops/{{user `config_dir`}}", 38 | "chmod a+w /ops/{{user `config_dir`}}" 39 | ] 40 | }, 41 | { 42 | "type": "file", 43 | "source": "{{user `scripts_dir`}}/.", 44 | "destination": "/ops/{{user `scripts_dir`}}" 45 | }, 46 | { 47 | "type": "file", 48 | "source": "{{user `config_dir`}}/.", 49 | "destination": "/ops/{{user `config_dir`}}" 50 | }, 51 | { 52 | "type": "shell", 53 | "execute_command": "echo {{user `ssh_username`}} | {{ .Vars }} sudo -E -S sh '{{ .Path }}'", 54 | "inline": [ 55 | "sh /ops/{{user `scripts_dir`}}/shared.sh {{user `config_dir`}} {{user `dns_listen_addr`}}", 56 | "sh /ops/{{user `scripts_dir`}}/vault_server.sh {{user `config_dir`}}", 57 | "sh /ops/{{user `scripts_dir`}}/shared/cleanup.sh" 58 | ] 59 | } 60 | ] 61 | } 62 | -------------------------------------------------------------------------------- /packer/scripts/consul_server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | sh $(dirname $0)/consul_server/consul_server.sh $1 5 | -------------------------------------------------------------------------------- /packer/scripts/consul_server/consul_server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | logger() { 5 | DT=$(date '+%Y/%m/%d %H:%M:%S') 6 | echo "$DT consul_server.sh: $1" 7 | } 8 | 9 | logger "Executing" 10 | 11 | CONFIGDIR=/ops/$1 12 | 13 | logger "Configure server" 14 | cp ${CONFIGDIR}/consul/consul_server.json /etc/consul.d/. 15 | 16 | logger "Completed" 17 | -------------------------------------------------------------------------------- /packer/scripts/nomad/agent/docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | logger() { 5 | DT=$(date '+%Y/%m/%d %H:%M:%S') 6 | echo "$DT docker.sh: $1" 7 | } 8 | 9 | logger "Executing" 10 | 11 | USER=$1 12 | 13 | logger "Installing Docker" 14 | 15 | curl -sSL https://get.docker.com/ | sh 16 | sh -c "echo \"DOCKER_OPTS='--dns 127.0.0.1 --dns 8.8.8.8 --dns-search service.consul'\" >> /etc/default/docker" 17 | usermod -aG docker $USER 18 | 19 | service docker restart 20 | 21 | logger "Completed" 22 | -------------------------------------------------------------------------------- /packer/scripts/nomad/agent/git_repo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | logger() { 5 | DT=$(date '+%Y/%m/%d %H:%M:%S') 6 | echo "$DT git_repo.sh: $1" 7 | } 8 | 9 | logger "Executing" 10 | 11 | ORG=$1 12 | REPO=$2 13 | CHECKOUT=$3 14 | GITUSERNAME=$4 15 | GITPASSWORD=$5 16 | GOSRC=/opt/go/src 17 | 18 | mkdir -p "$GOSRC" 19 | chmod 0755 $GOSRC 20 | 21 | ORGPATH=$GOSRC/github.com/$ORG 22 | REPOPATH=$ORGPATH/$REPO 23 | 24 | if ! [ -d "$ORGPATH" ]; then 25 | mkdir -p "$ORGPATH" 26 | chmod 0755 $ORGPATH 27 | fi 28 | 29 | cd $ORGPATH 30 | 31 | if ! [ -d "$REPOPATH" ] || ! [ "$(ls -A $REPOPATH)" ]; then 32 | logger "Fetching ${ORG}/${REPO} from GitHub" 33 | 34 | if [ -z "$GITUSERNAME" ] || [ -z "$GITPASSWORD" ]; then 35 | git clone https://github.com/${ORG}/${REPO}.git 36 | else 37 | git clone https://${GITUSERNAME}:${GITPASSWORD}@github.com/${ORG}/${REPO}.git 38 | fi 39 | fi 40 | 41 | mkdir -p "$REPOPATH" 42 | cd $REPOPATH 43 | 44 | git checkout $CHECKOUT 45 | 46 | logger "Completed" 47 | -------------------------------------------------------------------------------- /packer/scripts/nomad/agent/java8.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | logger() { 5 | DT=$(date '+%Y/%m/%d %H:%M:%S') 6 | echo "$DT haproxy.sh: $1" 7 | } 8 | 9 | CONFIGDIR=/ops/$1/consul 10 | 11 | logger "Installing Java 8" 12 | 13 | add-apt-repository -y ppa:webupd8team/java 14 | apt-get update -y 15 | # Automatically accept the license agreement 16 | echo debconf shared/accepted-oracle-license-v1-1 select true | sudo debconf-set-selections 17 | apt-get install -y oracle-java8-installer 18 | 19 | logger "Completed" 20 | -------------------------------------------------------------------------------- /packer/scripts/nomad/agent/nomad_agent.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | logger() { 5 | DT=$(date '+%Y/%m/%d %H:%M:%S') 6 | echo "$DT nomad_agent.sh: $1" 7 | } 8 | 9 | logger "Executing" 10 | 11 | CONFIGDIR=/ops/$1 12 | GODIR=/usr/local 13 | GOROOT=$GODIR/go 14 | GOPATH=/opt/go 15 | GOSRC=$GOPATH/src 16 | 17 | export GOROOT=$GOROOT 18 | export GOPATH=$GOPATH 19 | export PATH=$PATH:$GOROOT/bin:$GOPATH/bin 20 | 21 | ORG=hashicorp 22 | REPO=c1m 23 | CHECKOUT=master 24 | ORGPATH=$GOSRC/github.com/$ORG 25 | REPOPATH=$ORGPATH/$REPO/schedbench 26 | 27 | logger "Pulling $ORG/$REPO repo" 28 | sh $(dirname $0)/git_repo.sh $ORG $REPO $CHECKOUT 29 | 30 | logger "Building $REPO binaries in $REPOPATH/bin" 31 | cd ${REPOPATH}/tests/nomad 32 | 33 | logger "make docker - builds classlogger and Docker image containing classlogger" 34 | make docker 35 | 36 | cp classlogger/classlogger /usr/bin/. 37 | chmod 0755 /usr/bin/classlogger 38 | chown root:root /usr/bin/classlogger 39 | 40 | logger "Configure client" 41 | cp ${CONFIGDIR}/nomad/client.hcl /etc/nomad.d/. 42 | cp ${CONFIGDIR}/consul/nomad_client.json /etc/consul.d/. 43 | 44 | logger "Completed" 45 | -------------------------------------------------------------------------------- /packer/scripts/nomad/nomad.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | logger() { 5 | DT=$(date '+%Y/%m/%d %H:%M:%S') 6 | echo "$DT nomad.sh: $1" 7 | } 8 | 9 | # For some reason, fetching nomad fails the first time around, so we retry 10 | retry() { 11 | local n=1 12 | local max=5 13 | local delay=15 14 | while true; do 15 | "$@" && break || { 16 | if [ $n -lt $max ]; then 17 | n=$((n+1)) 18 | # No output on failure to allow redirecting output 19 | sleep $delay; 20 | else 21 | fail "The command has failed after $n attempts." 22 | fi 23 | } 24 | done 25 | } 26 | 27 | logger "Executing" 28 | 29 | cd /tmp 30 | 31 | CONFIGDIR=/ops/$1/nomad 32 | NOMADVERSION=0.4.1 33 | NOMADDOWNLOAD=https://releases.hashicorp.com/nomad/${NOMADVERSION}/nomad_${NOMADVERSION}_linux_amd64.zip 34 | NOMADCONFIGDIR=/etc/nomad.d 35 | NOMADDIR=/opt/nomad 36 | 37 | logger "Fetching Nomad" 38 | retry curl -L $NOMADDOWNLOAD > nomad.zip 39 | 40 | logger "Installing Nomad" 41 | unzip nomad.zip -d /usr/local/bin 42 | chmod 0755 /usr/local/bin/nomad 43 | chown root:root /usr/local/bin/nomad 44 | 45 | logger "Configuring Nomad" 46 | mkdir -p "$NOMADCONFIGDIR" 47 | chmod 0755 $NOMADCONFIGDIR 48 | mkdir -p "$NOMADDIR" 49 | chmod 0777 $NOMADDIR 50 | mkdir -p "$NOMADDIR/data" 51 | 52 | # Nomad config 53 | cp $CONFIGDIR/default.hcl $NOMADCONFIGDIR/. 54 | 55 | # Upstart config 56 | cp $CONFIGDIR/upstart.nomad /etc/init/nomad.conf 57 | 58 | # Nomad join script 59 | cp $CONFIGDIR/nomad_join.sh $NOMADDIR/. 60 | chmod +x $NOMADDIR/nomad_join.sh 61 | 62 | logger "Completed" 63 | -------------------------------------------------------------------------------- /packer/scripts/nomad/server/nomad_server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | logger() { 5 | DT=$(date '+%Y/%m/%d %H:%M:%S') 6 | echo "$DT nomad_server.sh: $1" 7 | } 8 | 9 | logger "Executing" 10 | 11 | CONFIGDIR=/ops/$1 12 | 13 | logger "Create C1M job file directory" 14 | mkdir -p "/opt/nomad/jobs" 15 | chmod 0777 -R /opt/nomad 16 | 17 | logger "Configure Nomad server" 18 | cp ${CONFIGDIR}/nomad/server.hcl /etc/nomad.d/. 19 | cp ${CONFIGDIR}/consul/nomad_server.json /etc/consul.d/. 20 | 21 | logger "Completed" 22 | -------------------------------------------------------------------------------- /packer/scripts/nomad_agent.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | sh $(dirname $0)/nomad/nomad.sh $1 4 | sh $(dirname $0)/nomad/agent/java8.sh $1 5 | sh $(dirname $0)/nomad/agent/docker.sh $2 6 | sh $(dirname $0)/nomad/agent/nomad_agent.sh $1 # This calls nomad/agent/git_repo.sh 7 | -------------------------------------------------------------------------------- /packer/scripts/nomad_server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | sh $(dirname $0)/nomad/nomad.sh $1 4 | sh $(dirname $0)/nomad/server/nomad_server.sh $1 5 | -------------------------------------------------------------------------------- /packer/scripts/shared.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | sh $(dirname $0)/shared/dependencies.sh 4 | sh $(dirname $0)/shared/trust_root_cert.sh $1 5 | sh $(dirname $0)/shared/go.sh 6 | sh $(dirname $0)/shared/collectd.sh 7 | sh $(dirname $0)/shared/consul.sh $1 8 | sh $(dirname $0)/shared/local_proxy/consul_template.sh $1 9 | sh $(dirname $0)/shared/local_proxy/haproxy.sh $1 10 | sh $(dirname $0)/shared/local_proxy/dnsmasq.sh $2 11 | -------------------------------------------------------------------------------- /packer/scripts/shared/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | logger() { 5 | DT=$(date '+%Y/%m/%d %H:%M:%S') 6 | echo "$DT cleanup.sh: $1" 7 | } 8 | 9 | logger "Executing" 10 | 11 | logger "Cleanup" 12 | apt-get -y autoremove 13 | apt-get -y clean 14 | # TODO: remove build essentials, etc 15 | 16 | rm -rf /tmp/* 17 | rm -rf /ops 18 | 19 | logger "Completed" 20 | -------------------------------------------------------------------------------- /packer/scripts/shared/collectd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | logger() { 5 | DT=$(date '+%Y/%m/%d %H:%M:%S') 6 | echo "$DT collectd.sh: $1" 7 | } 8 | 9 | logger "Executing" 10 | 11 | logger "Installing collectd" 12 | apt-get -y install collectd collectd-utils 13 | 14 | CONF_DIR=/etc/collectd 15 | CONF=${CONF_DIR}/collectd.conf 16 | PLUGIN_CONF_DIR=${CONF_DIR}/plugins 17 | 18 | sed -i -- "s/#BaseDir \"\/var\/lib\/collectd\"/BaseDir \"\/var\/lib\/collectd\"/g" $CONF 19 | sed -i -- "s/#PluginDir \"\/usr\/lib\/collectd\"/PluginDir \"\/usr\/lib\/collectd\"/g" $CONF 20 | sed -i -- "s/#Interval 10/Interval 10/g" $CONF 21 | sed -i -- "s/#ReadThreads 5/ReadThreads 5/g" $CONF 22 | 23 | cat <>${CONF} 24 | Include "${PLUGIN_CONF_DIR}/*.conf" 25 | 26 | LoadPlugin "write_graphite" 27 | 28 | 29 | Host "graphite.service.consul" 30 | Port "2003" 31 | Prefix "collectd." 32 | #Protocol "udp" 33 | 34 | 35 | 36 | 37 | 38 | Host "graphite.service.consul" 39 | Port "2003" 40 | Prefix "collectd." 41 | #Postfix "" 42 | #Protocol "udp" 43 | #LogSendErrors false 44 | EscapeCharacter "_" 45 | SeparateInstances true 46 | StoreRates false 47 | AlwaysAppendDS false 48 | 49 | 50 | 51 | LoadPlugin "logfile" 52 | 53 | LogLevel "info" 54 | File "/var/log/collectd.log" 55 | Timestamp true 56 | 57 | EOF 58 | 59 | mkdir -p "${PLUGIN_CONF_DIR}" 60 | cat <${PLUGIN_CONF_DIR}/graphite.conf 61 | 62 | 63 | Host "graphite.service.consul" 64 | Port "2003" 65 | Prefix "collectd." 66 | #Protocol "udp" 67 | 68 | 69 | 70 | 71 | 72 | Host "graphite.service.consul" 73 | Port "2003" 74 | Prefix "collectd." 75 | #Postfix "" 76 | #Protocol "udp" 77 | #LogSendErrors false 78 | EscapeCharacter "_" 79 | SeparateInstances true 80 | StoreRates false 81 | AlwaysAppendDS false 82 | 83 | 84 | EOF 85 | 86 | cat <${PLUGIN_CONF_DIR}/logfile.conf 87 | 88 | LogLevel "info" 89 | File "/var/log/collectd.log" 90 | Timestamp true 91 | 92 | EOF 93 | 94 | logger "Completed" 95 | -------------------------------------------------------------------------------- /packer/scripts/shared/consul.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | logger() { 5 | DT=$(date '+%Y/%m/%d %H:%M:%S') 6 | echo "$DT consul.sh: $1" 7 | } 8 | 9 | logger "Executing" 10 | 11 | cd /tmp 12 | 13 | CONFIGDIR=/ops/$1/consul 14 | CONSULVERSION=0.7.0 15 | CONSULDOWNLOAD=https://releases.hashicorp.com/consul/${CONSULVERSION}/consul_${CONSULVERSION}_linux_amd64.zip 16 | CONSULWEBUI=https://releases.hashicorp.com/consul/${CONSULVERSION}/consul_${CONSULVERSION}_web_ui.zip 17 | CONSULCONFIGDIR=/etc/consul.d 18 | CONSULDIR=/opt/consul 19 | 20 | logger "Fetching Consul" 21 | curl -L $CONSULDOWNLOAD > consul.zip 22 | 23 | logger "Installing Consul" 24 | unzip consul.zip -d /usr/local/bin 25 | chmod 0755 /usr/local/bin/consul 26 | chown root:root /usr/local/bin/consul 27 | 28 | logger "Configuring Consul" 29 | mkdir -p "$CONSULCONFIGDIR"/ssl 30 | chmod -R 0755 $CONSULCONFIGDIR 31 | mkdir -p "$CONSULDIR" 32 | chmod 0755 $CONSULDIR 33 | 34 | # Consul config 35 | cp $CONFIGDIR/default.json $CONSULCONFIGDIR/. 36 | cp $CONFIGDIR/root.crt $CONSULCONFIGDIR/ssl/ 37 | cp $CONFIGDIR/consul.crt $CONSULCONFIGDIR/ssl/ 38 | cp $CONFIGDIR/consul.key $CONSULCONFIGDIR/ssl/ 39 | 40 | # Upstart config 41 | cp $CONFIGDIR/upstart.consul /etc/init/consul.conf 42 | 43 | curl -L $CONSULWEBUI > ui.zip 44 | unzip ui.zip -d $CONSULDIR/ui 45 | 46 | logger "Completed" 47 | -------------------------------------------------------------------------------- /packer/scripts/shared/dependencies.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | logger() { 5 | DT=$(date '+%Y/%m/%d %H:%M:%S') 6 | echo "$DT dependencies.sh: $1" 7 | } 8 | 9 | logger "Executing" 10 | 11 | logger "Update the box" 12 | apt-get -y update 13 | # Sometimes gce assets don't work, added "&& true" so we don't fail on upgrades 14 | apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y --fix-missing upgrade && true 15 | 16 | logger "Install dependencies" 17 | apt-get -y install curl zip unzip tar git build-essential jq 18 | 19 | logger "Completed" 20 | -------------------------------------------------------------------------------- /packer/scripts/shared/go.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | logger() { 5 | DT=$(date '+%Y/%m/%d %H:%M:%S') 6 | echo "$DT go.sh: $1" 7 | } 8 | 9 | logger "Executing" 10 | 11 | cd /tmp 12 | 13 | GOVERSION=1.6 14 | GODOWNLOAD=https://storage.googleapis.com/golang/go${GOVERSION}.linux-amd64.tar.gz 15 | PROFILE=/etc/profile 16 | GODIR=/usr/local 17 | GOROOT=$GODIR/go 18 | GOPATH=/opt/go 19 | 20 | export GOROOT=$GOROOT 21 | export GOPATH=$GOPATH 22 | export PATH=$PATH:$GOROOT/bin:$GOPATH/bin 23 | 24 | logger "Fetching Go" 25 | curl -L $GODOWNLOAD > go.tar.gz 26 | 27 | logger "Installing Go" 28 | tar -C $GODIR -xzf go.tar.gz 29 | chmod 0755 $GOROOT 30 | chown root:root $GOROOT 31 | 32 | logger "Configuring Go" 33 | cat <>${PROFILE} 34 | 35 | export GOROOT=$GOROOT 36 | export GOPATH=$GOPATH 37 | export PATH=$PATH:$GOROOT/bin:$GOPATH/bin 38 | EOF 39 | 40 | mkdir -p "$GOPATH/bin" 41 | chmod 0755 $GOPATH/bin 42 | chown root:root $GOPATH/bin 43 | 44 | logger "Completed" 45 | -------------------------------------------------------------------------------- /packer/scripts/shared/local_proxy/consul_template.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | logger() { 5 | DT=$(date '+%Y/%m/%d %H:%M:%S') 6 | echo "$DT consul_template.sh: $1" 7 | } 8 | 9 | logger "Installing consul-template" 10 | 11 | CONFIGDIR=/ops/$1/consul 12 | CONSULTEMPLATEVERSION=0.15.0 13 | CONSULTEMPLATEDOWNLOAD=https://releases.hashicorp.com/consul-template/${CONSULTEMPLATEVERSION}/consul-template_${CONSULTEMPLATEVERSION}_linux_amd64.zip 14 | CONSULTEMPLATECONFIGDIR=/etc/consul-template.d 15 | 16 | cd /tmp 17 | 18 | logger "Fetching Consul-Template" 19 | logger "curl -L $CONSULTEMPLATEDOWNLOAD > consultemplate.zip" 20 | curl -L $CONSULTEMPLATEDOWNLOAD > consultemplate.zip 21 | 22 | logger "Installing Consul-Template" 23 | unzip consultemplate.zip -d /usr/local/bin 24 | chmod 0755 /usr/local/bin/consul-template 25 | chown root:root /usr/local/bin/consul-template 26 | 27 | logger "Configuring Consul" 28 | mkdir -p "$CONSULTEMPLATECONFIGDIR" 29 | chmod 0755 $CONSULTEMPLATECONFIGDIR 30 | 31 | # Consul config 32 | cp $CONFIGDIR/consul-template.hcl $CONSULTEMPLATECONFIGDIR/ 33 | cp $CONFIGDIR/consul-template.tpl $CONSULTEMPLATECONFIGDIR/ 34 | 35 | # Upstart config 36 | cp $CONFIGDIR/upstart.consul-template /etc/init/consul-template.conf 37 | 38 | logger "Completed" 39 | -------------------------------------------------------------------------------- /packer/scripts/shared/local_proxy/dnsmasq.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | logger() { 5 | DT=$(date '+%Y/%m/%d %H:%M:%S') 6 | echo "$DT dnsmasq.sh: $1" 7 | } 8 | 9 | logger "Executing" 10 | 11 | DNSLISTENADDR=$1 12 | 13 | logger "Installing Dnsmasq" 14 | apt-get -y install dnsmasq-base dnsmasq 15 | 16 | logger "Configuring Dnsmasq" 17 | cat </etc/dnsmasq.d/consul 18 | server=/consul/127.0.0.1#8600 19 | listen-address=$DNSLISTENADDR 20 | bind-interfaces 21 | EOF 22 | 23 | cat /etc/dnsmasq.d/consul 24 | 25 | logger "Restarting dnsmasq" 26 | service dnsmasq start || service dnsmasq restart 27 | 28 | logger "Completed" 29 | -------------------------------------------------------------------------------- /packer/scripts/shared/local_proxy/haproxy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | logger() { 5 | DT=$(date '+%Y/%m/%d %H:%M:%S') 6 | echo "$DT haproxy.sh: $1" 7 | } 8 | 9 | CONFIGDIR=/ops/$1/consul 10 | 11 | logger "Installing HAProxy" 12 | apt-get -y install haproxy 13 | 14 | # Upstart config 15 | cp $CONFIGDIR/upstart.haproxy /etc/init/haproxy.conf 16 | 17 | 18 | logger "Installing Dnsmasq" 19 | apt-get -y install dnsmasq-base dnsmasq 20 | 21 | logger "Configuring Dnsmasq" 22 | cat </etc/dnsmasq.d/haproxy 23 | address=/.service/127.0.0.2 24 | EOF 25 | 26 | cat /etc/dnsmasq.d/haproxy 27 | 28 | logger "Restarting dnsmasq" 29 | service dnsmasq start || service dnsmasq restart 30 | 31 | logger "Completed" 32 | -------------------------------------------------------------------------------- /packer/scripts/shared/trust_root_cert.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CONFIGDIR=/ops/$1/vault 4 | CERTTARGET=/usr/local/share/ca-certificates/ 5 | 6 | mkdir -p $CERTTARGET 7 | cp $CONFIGDIR/root.crt $CERTTARGET 8 | 9 | sudo update-ca-certificates 10 | -------------------------------------------------------------------------------- /packer/scripts/utility.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | sh $(dirname $0)/utility/graphite.sh 5 | sh $(dirname $0)/utility/redis.sh 6 | sh $(dirname $0)/utility/statsite.sh $1 7 | sh $(dirname $0)/utility/utility.sh $1 8 | -------------------------------------------------------------------------------- /packer/scripts/utility/graphite.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | logger() { 5 | DT=$(date '+%Y/%m/%d %H:%M:%S') 6 | echo "$DT graphite.sh: $1" 7 | } 8 | 9 | logger "Executing" 10 | 11 | logger "Installing graphite components" 12 | apt-get -y install \ 13 | graphite-web graphite-carbon \ 14 | postgresql libpq-dev python-psycopg2 \ 15 | apache2 libapache2-mod-wsgi 16 | 17 | logger "Configuring graphite web application" 18 | sed -i -- "s/#SECRET_KEY = 'UNSAFE_DEFAULT'/SECRET_KEY = 'a_salty_string'/g" /etc/graphite/local_settings.py 19 | sed -i -- "s/#TIME_ZONE = 'America\/Los_Angeles'/TIME_ZONE = 'America\/Los_Angeles'/g" /etc/graphite/local_settings.py 20 | sed -i -- "s/#USE_REMOTE_USER_AUTHENTICATION = True/USE_REMOTE_USER_AUTHENTICATION = True/g" /etc/graphite/local_settings.py 21 | 22 | logger "Configuring graphite web application for Postgres" 23 | USERNAME=graphite 24 | PASSWORD=password 25 | 26 | logger "Configuring Django database user and database for Postgres" 27 | sudo -u postgres psql -c "CREATE USER ${USERNAME} WITH PASSWORD '${PASSWORD}'" 28 | sudo -u postgres psql -c "CREATE DATABASE graphite WITH OWNER ${USERNAME}" 29 | 30 | logger "Configuring graphite web application for Postgres" 31 | sed -i -- "s/'NAME': '\/var\/lib\/graphite\/graphite.db'/'NAME': 'graphite'/g" /etc/graphite/local_settings.py 32 | sed -i -- "s/'ENGINE': 'django.db.backends.sqlite3'/'ENGINE': 'django.db.backends.postgresql_psycopg2'/g" /etc/graphite/local_settings.py 33 | sed -i -- "s/'USER': ''/'USER': '${USERNAME}'/g" /etc/graphite/local_settings.py 34 | sed -i -- "s/'PASSWORD': ''/'PASSWORD': '${PASSWORD}'/g" /etc/graphite/local_settings.py 35 | sed -i -- "s/'HOST': ''/'HOST': '127.0.0.1'/g" /etc/graphite/local_settings.py 36 | 37 | logger "Sync the database" 38 | graphite-manage syncdb --noinput 39 | 40 | logger "Configuring carbon" 41 | sed -i -- "s/CARBON_CACHE_ENABLED=false/CARBON_CACHE_ENABLED=true/g" /etc/default/graphite-carbon 42 | sed -i -- "s/ENABLE_LOGROTATION = False/ENABLE_LOGROTATION = True/g" /etc/carbon/carbon.conf 43 | 44 | logger "Configuring storage schemas" 45 | cat <>/etc/carbon/storage-schemas.conf 46 | 47 | [test] 48 | pattern = ^test\. 49 | retentions = 10s:10m,1m:1h,10m:1d 50 | EOF 51 | 52 | service carbon-cache start 53 | 54 | logger "Configuring Apache" 55 | a2dissite 000-default 56 | cp /usr/share/graphite-web/apache2-graphite.conf /etc/apache2/sites-available 57 | a2ensite apache2-graphite 58 | service apache2 reload 59 | 60 | logger "Completed" 61 | -------------------------------------------------------------------------------- /packer/scripts/utility/redis.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | logger() { 5 | DT=$(date '+%Y/%m/%d %H:%M:%S') 6 | echo "$DT redis.sh: $1" 7 | } 8 | 9 | logger "Executing" 10 | 11 | logger "Installing" 12 | apt-get install -y redis-server 13 | 14 | logger "Configuring Redis" 15 | sed -i -- "s/bind 127.0.0.1/bind 0.0.0.0/g" /etc/redis/redis.conf 16 | 17 | logger "Completed" 18 | -------------------------------------------------------------------------------- /packer/scripts/utility/statsite.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | logger() { 5 | DT=$(date '+%Y/%m/%d %H:%M:%S') 6 | echo "$DT statsite.sh: $1" 7 | } 8 | 9 | logger "Executing" 10 | 11 | cd /tmp 12 | rm -rf statsite 13 | 14 | CONFIGDIR=/ops/$1/statsite 15 | STATSITECONFIGDIR=/etc/statsite.d 16 | 17 | logger "Install statsite dependencies" 18 | apt-get -y install git build-essential scons autoconf libtool 19 | 20 | logger "Fetching statsite" 21 | git clone --depth 1 https://github.com/armon/statsite.git 22 | 23 | logger "Installing statsite" 24 | cd statsite 25 | ./bootstrap.sh 26 | ./configure 27 | make 28 | cp src/statsite /usr/local/bin/. 29 | chmod 0755 /usr/local/bin/statsite 30 | chown root:root /usr/local/bin/statsite 31 | 32 | logger "Configuring statsite" 33 | mkdir -p "$STATSITECONFIGDIR" 34 | chmod 0755 $STATSITECONFIGDIR 35 | mkdir -p /opt/statsite 36 | chmod 0755 /opt/statsite 37 | mkdir -p /usr/share/statsite/sinks/ 38 | chmod 0755 /usr/share/statsite/sinks/ 39 | cp sinks/* /usr/share/statsite/sinks/ 40 | 41 | # Statsite config 42 | cp $CONFIGDIR/default.conf $STATSITECONFIGDIR/. 43 | 44 | # Upstart config 45 | cp $CONFIGDIR/upstart.statsite /etc/init/statsite.conf 46 | 47 | logger "Completed" 48 | -------------------------------------------------------------------------------- /packer/scripts/utility/utility.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | logger() { 5 | DT=$(date '+%Y/%m/%d %H:%M:%S') 6 | echo "$DT utility.sh: $1" 7 | } 8 | 9 | logger "Executing" 10 | 11 | CONFIGDIR=/ops/$1 12 | 13 | logger "Configure utility" 14 | cp ${CONFIGDIR}/consul/utility.json /etc/consul.d/. 15 | cp ${CONFIGDIR}/consul/redis.json /etc/consul.d/. 16 | cp ${CONFIGDIR}/consul/statsite.json /etc/consul.d/. 17 | cp ${CONFIGDIR}/consul/graphite.json /etc/consul.d/. 18 | 19 | logger "Completed" 20 | -------------------------------------------------------------------------------- /packer/scripts/vault/vault.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | logger() { 5 | DT=$(date '+%Y/%m/%d %H:%M:%S') 6 | echo "$DT vault.sh: $1" 7 | } 8 | 9 | # For some reason, fetching nomad fails the first time around, so we retry 10 | retry() { 11 | local n=1 12 | local max=5 13 | local delay=15 14 | while true; do 15 | "$@" && break || { 16 | if [ $n -lt $max ]; then 17 | n=$((n+1)) 18 | # No output on failure to allow redirecting output 19 | sleep $delay; 20 | else 21 | fail "The command has failed after $n attempts." 22 | fi 23 | } 24 | done 25 | } 26 | 27 | logger "Executing" 28 | 29 | cd /tmp 30 | 31 | CONFIGDIR=/ops/$1/vault 32 | VAULTVERSION=0.6.4 33 | VAULTSHA=04d87dd553aed59f3fe316222217a8d8777f40115a115dac4d88fac1611c51a6 34 | VAULTDOWNLOAD=https://releases.hashicorp.com/vault/${VAULTVERSION}/vault_${VAULTVERSION}_linux_amd64.zip 35 | VAULTCONFIGDIR=/etc/vault.d 36 | VAULTDIR=/opt/vault 37 | 38 | logger "Fetching Vault" 39 | retry curl -L $VAULTDOWNLOAD > vault.zip 40 | 41 | echo "$VAULTSHA vault.zip" | sha256sum -c | grep OK 42 | if [ $? -ne 0 ]; then 43 | logger "ERROR: VAULT DOES NOT MATCH CHECKSUM" 44 | exit 1 45 | fi 46 | 47 | logger "Installing Vault" 48 | unzip vault.zip -d /usr/local/bin 49 | chmod 0755 /usr/local/bin/vault 50 | chown root:root /usr/local/bin/vault 51 | 52 | logger "Configuring Vault" 53 | mkdir -p "$VAULTCONFIGDIR" 54 | chmod 0755 $VAULTCONFIGDIR 55 | mkdir -p "$VAULTDIR" 56 | chmod 0755 $VAULTDIR 57 | 58 | # Vault config 59 | cp $CONFIGDIR/default.hcl $VAULTCONFIGDIR/. 60 | 61 | # Upstart config 62 | cp $CONFIGDIR/upstart.vault /etc/init/vault.conf 63 | 64 | # Vault CA info 65 | cp $CONFIGDIR/vault.crt $VAULTCONFIGDIR/. 66 | cp $CONFIGDIR/vault.key $VAULTCONFIGDIR/. 67 | 68 | 69 | logger "Completed" 70 | -------------------------------------------------------------------------------- /packer/scripts/vault_server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | sh $(dirname $0)/vault/vault.sh $1 4 | -------------------------------------------------------------------------------- /terraform/_env/gce/gce.tf: -------------------------------------------------------------------------------- 1 | variable "name" { } 2 | variable "region" { } 3 | variable "project_id" { } 4 | variable "credentials_file" { } 5 | variable "public_key_file" { } 6 | variable "private_key_file" { } 7 | 8 | variable "artifact_type" { default = "google.image" } 9 | variable "consul_log_level" { default = "INFO" } 10 | variable "nomad_log_level" { default = "INFO" } 11 | variable "node_classes" { default = "5" } 12 | 13 | variable "utility_artifact_name" { } 14 | variable "utility_artifact_version" { default = "latest" } 15 | variable "consul_server_artifact_name" { } 16 | variable "consul_server_artifact_version" { default = "latest" } 17 | variable "consul_server_encrypt_key" { } 18 | variable "nomad_server_artifact_name" { } 19 | variable "nomad_server_artifact_version" { default = "latest" } 20 | variable "nomad_client_artifact_name" { } 21 | variable "nomad_client_artifact_version" { default = "latest" } 22 | variable "vault_server_artifact_name" { } 23 | variable "vault_server_artifact_version" { default = "latest" } 24 | 25 | variable "us_central1_region" { default = "us-central1" } 26 | variable "us_central1_cidr" { default = "10.139.0.0/16" } 27 | variable "us_central1_zones" { default = "us-central1-b,us-central1-c,us-central1-f" } 28 | 29 | variable "utility_machine" { default = "n1-standard-8" } 30 | variable "utility_disk" { default = "50" } 31 | variable "consul_server_machine" { default = "n1-standard-32" } 32 | variable "consul_server_disk" { default = "10" } 33 | variable "consul_servers" { default = "3" } 34 | variable "nomad_server_machine" { default = "n1-standard-32" } 35 | variable "nomad_server_disk" { default = "500" } 36 | variable "nomad_servers" { default = "5" } 37 | variable "nomad_client_machine" { default = "n1-standard-8" } 38 | variable "nomad_client_disk" { default = "20" } 39 | variable "nomad_client_groups" { default = "10" } 40 | variable "nomad_clients" { default = "5000" } 41 | variable "vault_server_machine" { default = "n1-standard-1" } 42 | variable "vault_server_disk" { default = "10" } 43 | variable "vault_servers" { default = "2" } 44 | 45 | provider "google" { 46 | region = "${var.region}" 47 | project = "${var.project_id}" 48 | credentials = "${file("${var.credentials_file}")}" 49 | } 50 | 51 | module "us_central1" { 52 | source = "../../gce/region" 53 | 54 | name = "${var.name}" 55 | project_id = "${var.project_id}" 56 | credentials = "${file("${var.credentials_file}")}" 57 | 58 | region = "${var.us_central1_region}" 59 | cidr = "${var.us_central1_cidr}" 60 | zones = "${var.us_central1_zones}" 61 | ssh_keys = "ubuntu:${file("${var.public_key_file}")}" 62 | private_key = "${file("${var.private_key_file}")}" 63 | 64 | artifact_type = "${var.artifact_type}" 65 | consul_log_level = "${var.consul_log_level}" 66 | nomad_log_level = "${var.nomad_log_level}" 67 | node_classes = "${var.node_classes}" 68 | 69 | utility_artifact_name = "${var.utility_artifact_name}" 70 | utility_artifact_version = "${var.utility_artifact_version}" 71 | utility_machine = "${var.utility_machine}" 72 | utility_disk = "${var.utility_disk}" 73 | 74 | consul_server_artifact_name = "${var.consul_server_artifact_name}" 75 | consul_server_artifact_version = "${var.consul_server_artifact_version}" 76 | consul_server_machine = "${var.consul_server_machine}" 77 | consul_server_disk = "${var.consul_server_disk}" 78 | consul_servers = "${var.consul_servers}" 79 | consul_server_encrypt_key = "${var.consul_server_encrypt_key}" 80 | 81 | nomad_server_artifact_name = "${var.nomad_server_artifact_name}" 82 | nomad_server_artifact_version = "${var.nomad_server_artifact_version}" 83 | nomad_server_machine = "${var.nomad_server_machine}" 84 | nomad_server_disk = "${var.nomad_server_disk}" 85 | nomad_servers = "${var.nomad_servers}" 86 | 87 | nomad_client_artifact_name = "${var.nomad_client_artifact_name}" 88 | nomad_client_artifact_version = "${var.nomad_client_artifact_version}" 89 | nomad_client_machine = "${var.nomad_client_machine}" 90 | nomad_client_disk = "${var.nomad_client_disk}" 91 | nomad_client_groups = "${var.nomad_client_groups}" 92 | nomad_clients = "${var.nomad_clients}" 93 | 94 | vault_server_artifact_name = "${var.vault_server_artifact_name}" 95 | vault_server_artifact_version = "${var.vault_server_artifact_version}" 96 | vault_server_machine = "${var.vault_server_machine}" 97 | vault_server_disk = "${var.vault_server_disk}" 98 | vault_servers = "${var.vault_servers}" 99 | } 100 | 101 | // output "us_central1_info" { value = "${module.us_central1.info}" } 102 | -------------------------------------------------------------------------------- /terraform/_env/gce/terraform.tfvars.template: -------------------------------------------------------------------------------- 1 | name = "c1m" 2 | region = "us-central1" 3 | project_id = "nomadspark-143720" // CHANGEME 4 | credentials_file = "../../../gce-credentials.json" # GCE account credentials 5 | public_key_file = "../../../credentials/id_rsa.pub" # Added to all GCE instances and must be prefixed by the user ID which is allowed 6 | private_key_file = "../../../credentials/id_rsa" 7 | 8 | artifact_type = "google.image" 9 | consul_log_level = "INFO" 10 | nomad_log_level = "INFO" 11 | node_classes = "5" # Number of node_classes we will be using for the challenge 12 | 13 | artifact_prefix = "packer" 14 | utility_artifact_name = "c1m-utility" 15 | utility_artifact_version = "1474481298" 16 | consul_server_artifact_name = "c1m-consul-server" 17 | consul_server_artifact_version = "1483745969" 18 | nomad_server_artifact_name = "c1m-nomad-server" 19 | nomad_server_artifact_version = "1483754127" 20 | nomad_client_artifact_name = "c1m-nomad-client" 21 | nomad_client_artifact_version = "1483754133" 22 | vault_server_artifact_name = "c1m-vault-server" 23 | vault_server_artifact_version = "1483758641" 24 | 25 | consul_server_encrypt_key = "CONSUL_SERVER_ENCRYPT_KEY" 26 | 27 | us_central1_cidr = "10.140.0.0/16" 28 | us_central1_zones = "us-central1-b" # ,us-central1-c,us-central1-f" # us-central1-a doesn't have n1_standard_32 29 | 30 | # This creates client servers = nomad_clients * nomad_client_group 31 | # Currently client_groups as single region only, but they're distributed 32 | # accross us_central1_zones 33 | utility_machine = "n1-standard-1" # "n1-standard-8" 34 | utility_disk = "50" # In GB 35 | consul_server_machine = "n1-standard-2" # "n1-standard-32" 36 | consul_server_disk = "10" # In GB 37 | consul_servers = "3" 38 | vault_server_machine = "n1-standard-1" # "n1-standard-32" 39 | vault_server_disk = "10" # In GB 40 | vault_servers = "2" 41 | nomad_server_machine = "n1-standard-2" # "n1-standard-32" 42 | nomad_server_disk = "500" # In GB 43 | nomad_servers = "3" 44 | nomad_client_machine = "n1-standard-2" # "n1-standard-8" 45 | nomad_client_disk = "20" # In GB 46 | nomad_client_groups = "1" 47 | nomad_clients = "3" 48 | -------------------------------------------------------------------------------- /terraform/gce/compute/consul_server/gce_consul_server.tf: -------------------------------------------------------------------------------- 1 | variable "name" { default = "consul-server" } 2 | variable "project_id" { } 3 | variable "region" { } 4 | variable "credentials" { } 5 | variable "network" { default = "default" } 6 | variable "zones" { } 7 | variable "image" { } 8 | variable "machine_type" { } 9 | variable "disk_size" { default = "10" } 10 | variable "mount_dir" { default = "/mnt/ssd0" } 11 | variable "local_ssd_name" { default = "local-ssd-0" } 12 | variable "consul_join_name" { default = "consul-server?passing" } 13 | variable "servers" { } 14 | variable "consul_log_level" { } 15 | variable "ssh_keys" { } 16 | variable "private_key" { } 17 | variable "consul_server_encrypt_key" {} 18 | 19 | 20 | provider "google" { 21 | region = "${var.region}" 22 | alias = "${var.region}" 23 | project = "${var.project_id}" 24 | credentials = "${var.credentials}" 25 | } 26 | 27 | module "consul_server_template" { 28 | source = "../../../templates/consul_server" 29 | } 30 | 31 | resource "template_file" "consul_server" { 32 | template = "${module.consul_server_template.user_data}" 33 | count = "${var.servers}" 34 | 35 | vars { 36 | private_key = "${var.private_key}" 37 | data_dir = "/opt" 38 | provider = "gce" 39 | region = "gce-${var.region}" 40 | datacenter = "gce-${var.region}" 41 | bootstrap_expect = "${var.servers}" 42 | zone = "${element(split(",", var.zones), count.index % length(split(",", var.zones)))}" 43 | machine_type = "${var.machine_type}" 44 | consul_log_level = "${var.consul_log_level}" 45 | local_ip_url = "-H \"Metadata-Flavor: Google\" http://169.254.169.254/computeMetadata/v1/instance/network-interfaces/0/ip" 46 | consul_server_encrypt_key = "${var.consul_server_encrypt_key}" 47 | } 48 | } 49 | 50 | module "mount_ssd_template" { 51 | source = "../../../templates/mount_ssd" 52 | 53 | mount_dir = "${var.mount_dir}" 54 | local_ssd_name = "google-${var.local_ssd_name}" 55 | } 56 | 57 | resource "google_compute_instance" "consul_server" { 58 | provider = "google.${var.region}" 59 | name = "${var.name}-${element(split(",", var.zones), count.index % length(split(",", var.zones)))}-${var.machine_type}-${count.index + 1}" 60 | machine_type = "${var.machine_type}" 61 | zone = "${element(split(",", var.zones), count.index % length(split(",", var.zones)))}" 62 | count = "${var.servers}" 63 | 64 | # Used for dependency management 65 | #output "name" { value = "${var.name}-${element(split(",", var.zones), count.index % length(split(",", var.zones)))}-${var.machine_type}-${count.index + 1}" } 66 | 67 | tags = [ 68 | "${var.name}-${element(split(",", var.zones), count.index % length(split(",", var.zones)))}-${count.index + 1}", 69 | "${var.name}", 70 | "${element(split(",", var.zones), count.index % length(split(",", var.zones)))}", 71 | "${var.machine_type}", 72 | ] 73 | 74 | disk { 75 | image = "${var.image}" 76 | type = "pd-ssd" 77 | size = "${var.disk_size}" 78 | } 79 | 80 | /* 81 | disk { 82 | type = "local-ssd" 83 | scratch = true 84 | device_name = "${var.local_ssd_name}" 85 | } 86 | */ 87 | 88 | network_interface { 89 | network = "${var.network}" 90 | 91 | access_config { 92 | } 93 | } 94 | 95 | metadata { 96 | sshKeys = "${var.ssh_keys}" 97 | } 98 | 99 | metadata_startup_script = "${element(template_file.consul_server.*.rendered, count.index % var.servers)}" 100 | } 101 | 102 | module "consul_cluster_join_template" { 103 | source = "../../../templates/join" 104 | 105 | consul_servers = "${join(" ", google_compute_instance.consul_server.*.network_interface.0.address)}" 106 | } 107 | 108 | module "redis_pq_template" { 109 | source = "../../../templates/pq" 110 | 111 | service = "redis" 112 | consul_join_name = "${var.consul_join_name}" 113 | } 114 | 115 | module "nodejs_pq_template" { 116 | source = "../../../templates/pq" 117 | 118 | service = "nodejs" 119 | consul_join_name = "${var.consul_join_name}" 120 | } 121 | 122 | resource "null_resource" "join_and_prepared_queries" { 123 | depends_on = ["google_compute_instance.consul_server"] 124 | 125 | triggers { 126 | private_ips = "${join(",", google_compute_instance.consul_server.*.network_interface.0.address)}" 127 | } 128 | 129 | connection { 130 | user = "ubuntu" 131 | host = "${google_compute_instance.consul_server.0.network_interface.0.access_config.0.assigned_nat_ip}" 132 | private_key = "${var.private_key}" 133 | } 134 | 135 | provisioner "remote-exec" { 136 | inline = [ 137 | "${module.consul_cluster_join_template.script}", 138 | "${module.redis_pq_template.script}", 139 | "${module.nodejs_pq_template.script}", 140 | ] 141 | } 142 | } 143 | 144 | output "names" { value = "${join(",", google_compute_instance.consul_server.*.name)}" } 145 | output "machine_types" { value = "${join(",", google_compute_instance.consul_server.*.machine_type)}" } 146 | output "private_ips" { value = "${join(" ", google_compute_instance.consul_server.*.network_interface.0.address)}" } 147 | output "public_ips" { value = "${join(",", google_compute_instance.consul_server.*.network_interface.0.access_config.0.assigned_nat_ip)}" } 148 | -------------------------------------------------------------------------------- /terraform/gce/compute/gce_compute.tf: -------------------------------------------------------------------------------- 1 | variable "name" { } 2 | variable "project_id" { } 3 | variable "credentials" { } 4 | variable "region" { } 5 | variable "network" { } 6 | variable "zones" { } 7 | variable "node_classes" { } 8 | variable "consul_log_level" { } 9 | variable "nomad_log_level" { } 10 | 11 | variable "ssh_keys" { } 12 | variable "private_key" { } 13 | 14 | variable "utility_image" { } 15 | variable "utility_machine" { } 16 | variable "utility_disk" { } 17 | 18 | variable "consul_server_image" { } 19 | variable "consul_server_machine" { } 20 | variable "consul_server_disk" { } 21 | variable "consul_servers" { } 22 | variable "consul_server_encrypt_key" { } 23 | 24 | variable "nomad_server_image" { } 25 | variable "nomad_server_machine" { } 26 | variable "nomad_server_disk" { } 27 | variable "nomad_servers" { } 28 | 29 | variable "nomad_client_image" { } 30 | variable "nomad_client_machine" { } 31 | variable "nomad_client_disk" { } 32 | variable "nomad_client_groups" { } 33 | variable "nomad_clients" { } 34 | 35 | variable "vault_server_image" { } 36 | variable "vault_server_machine" { } 37 | variable "vault_server_disk" { } 38 | variable "vault_servers" { } 39 | 40 | module "consul_servers" { 41 | source = "./consul_server" 42 | 43 | name = "${var.name}-consul-server" 44 | project_id = "${var.project_id}" 45 | credentials = "${var.credentials}" 46 | region = "${var.region}" 47 | network = "${var.network}" 48 | zones = "${var.zones}" 49 | image = "${var.consul_server_image}" 50 | machine_type = "${var.consul_server_machine}" 51 | disk_size = "${var.consul_server_disk}" 52 | servers = "${var.consul_servers}" 53 | consul_log_level = "${var.consul_log_level}" 54 | ssh_keys = "${var.ssh_keys}" 55 | private_key = "${var.private_key}" 56 | consul_server_encrypt_key = "${var.consul_server_encrypt_key}" 57 | } 58 | 59 | module "utility" { 60 | source = "./utility" 61 | 62 | name = "${var.name}-utility" 63 | project_id = "${var.project_id}" 64 | credentials = "${var.credentials}" 65 | region = "${var.region}" 66 | network = "${var.network}" 67 | zones = "${var.zones}" 68 | image = "${var.utility_image}" 69 | machine_type = "${var.utility_machine}" 70 | disk_size = "${var.utility_disk}" 71 | consul_log_level = "${var.consul_log_level}" 72 | ssh_keys = "${var.ssh_keys}" 73 | private_key = "${var.private_key}" 74 | consul_servers = "${module.consul_servers.private_ips}" 75 | consul_server_encrypt_key = "${var.consul_server_encrypt_key}" 76 | } 77 | 78 | module "nomad_servers" { 79 | source = "./nomad_server" 80 | 81 | name = "${var.name}-nomad-server" 82 | project_id = "${var.project_id}" 83 | credentials = "${var.credentials}" 84 | region = "${var.region}" 85 | network = "${var.network}" 86 | zones = "${var.zones}" 87 | image = "${var.nomad_server_image}" 88 | machine_type = "${var.nomad_server_machine}" 89 | disk_size = "${var.nomad_server_disk}" 90 | servers = "${var.nomad_servers}" 91 | nomad_log_level = "${var.nomad_log_level}" 92 | consul_log_level = "${var.consul_log_level}" 93 | ssh_keys = "${var.ssh_keys}" 94 | private_key = "${var.private_key}" 95 | consul_servers = "${module.consul_servers.private_ips}" 96 | consul_server_encrypt_key = "${var.consul_server_encrypt_key}" 97 | } 98 | 99 | 100 | // Raw Nodes 101 | module "nomad_client" { 102 | source = "./nomad_client" 103 | 104 | name = "${var.name}-nomad-client" 105 | project_id = "${var.project_id}" 106 | credentials = "${var.credentials}" 107 | region = "${var.region}" 108 | network = "${var.network}" 109 | zones = "${var.zones}" 110 | image = "${var.nomad_client_image}" 111 | machine_type = "${var.nomad_client_machine}" 112 | disk_size = "${var.nomad_client_disk}" 113 | #groups = "${var.nomad_client_groups}" 114 | nomad_clients = "${var.nomad_clients}" 115 | node_classes = "${var.node_classes}" 116 | nomad_log_level = "${var.nomad_log_level}" 117 | consul_log_level = "${var.consul_log_level}" 118 | ssh_keys = "${var.ssh_keys}" 119 | private_key = "${var.private_key}" 120 | consul_servers = "${module.consul_servers.private_ips}" 121 | consul_server_encrypt_key = "${var.consul_server_encrypt_key}" 122 | } 123 | 124 | module "vault_servers" { 125 | source = "./vault_server" 126 | 127 | name = "${var.name}-vault-server" 128 | project_id = "${var.project_id}" 129 | credentials = "${var.credentials}" 130 | region = "${var.region}" 131 | network = "${var.network}" 132 | zones = "${var.zones}" 133 | image = "${var.vault_server_image}" 134 | machine_type = "${var.vault_server_machine}" 135 | disk_size = "${var.vault_server_disk}" 136 | servers = "${var.vault_servers}" 137 | consul_log_level = "${var.consul_log_level}" 138 | ssh_keys = "${var.ssh_keys}" 139 | private_key = "${var.private_key}" 140 | consul_servers = "${module.consul_servers.private_ips}" 141 | consul_server_encrypt_key = "${var.consul_server_encrypt_key}" 142 | } 143 | 144 | 145 | 146 | /* 147 | // IGM Nodes 148 | module "nomad_clients_igm" { 149 | source = "./nomad_client_igm" 150 | 151 | name = "${var.name}-nomad-client" 152 | project_id = "${var.project_id}" 153 | credentials = "${var.credentials}" 154 | region = "${var.region}" 155 | network = "${var.network}" 156 | zones = "${var.zones}" 157 | image = "${var.nomad_client_image}" 158 | machine_type = "${var.nomad_client_machine}" 159 | disk_size = "${var.nomad_client_disk}" 160 | groups = "${var.nomad_client_groups}" 161 | nomad_clients = "${var.nomad_clients}" 162 | node_classes = "${var.node_classes}" 163 | nomad_log_level = "${var.nomad_log_level}" 164 | consul_log_level = "${var.consul_log_level}" 165 | ssh_keys = "${var.ssh_keys}" 166 | private_key = "${var.private_key}" 167 | consul_servers = "${module.consul_servers.private_ips}" 168 | } 169 | */ 170 | /* 171 | output "utility_name" { value = "${module.utility.name}" } 172 | output "utility_machine_type" { value = "${module.utility.machine_type}" } 173 | output "utility_private_ip" { value = "${module.utility.private_ip}" } 174 | output "utility_public_ip" { value = "${module.utility.public_ip}" } 175 | 176 | output "consul_server_names" { value = "${module.consul_servers.names}" } 177 | output "consul_server_machine_types" { value = "${module.consul_servers.machine_types}" } 178 | output "consul_server_private_ips" { value = "${module.consul_servers.private_ips}" } 179 | output "consul_server_public_ips" { value = "${module.consul_servers.public_ips}" } 180 | 181 | output "nomad_server_names" { value = "${module.nomad_servers.names}" } 182 | output "nomad_server_machine_types" { value = "${module.nomad_servers.machine_types}" } 183 | output "nomad_server_private_ips" { value = "${module.nomad_servers.private_ips}" } 184 | output "nomad_server_public_ips" { value = "${module.nomad_servers.public_ips}" } 185 | */ 186 | -------------------------------------------------------------------------------- /terraform/gce/compute/nomad_client/gce_nomad_client.tf: -------------------------------------------------------------------------------- 1 | variable "name" { default = "nomad-client" } 2 | variable "project_id" { } 3 | variable "credentials" { } 4 | variable "region" { } 5 | variable "network" { default = "default" } 6 | variable "zones" { } 7 | variable "image" { } 8 | variable "machine_type" { } 9 | variable "disk_size" { default = "10" } 10 | variable "mount_dir" { default = "/mnt/ssd0" } 11 | variable "local_ssd_name" { default = "local-ssd-0" } 12 | variable "nomad_clients" { } 13 | variable "node_classes" { } 14 | variable "nomad_join_name" { default = "nomad-server?passing" } 15 | variable "nomad_log_level" { } 16 | variable "consul_log_level" { } 17 | variable "ssh_keys" { } 18 | variable "private_key" { } 19 | variable "consul_servers" { } 20 | variable "consul_server_encrypt_key" { } 21 | 22 | provider "google" { 23 | region = "${var.region}" 24 | alias = "${var.region}" 25 | project = "${var.project_id}" 26 | credentials = "${var.credentials}" 27 | } 28 | 29 | module "nomad_client_template" { 30 | source = "../../../templates/nomad_client" 31 | } 32 | 33 | module "consul_cluster_join_template" { 34 | source = "../../../templates/join" 35 | 36 | consul_servers = "${var.consul_servers}" 37 | } 38 | 39 | resource "template_file" "nomad_client" { 40 | template = "${module.nomad_client_template.user_data}" 41 | count = "${var.node_classes}" 42 | 43 | vars { 44 | private_key = "${var.private_key}" 45 | data_dir = "/opt" 46 | provider = "gce" 47 | region = "gce-${var.region}" 48 | datacenter = "gce-${var.region}" 49 | zone = "${element(split(",", var.zones), count.index % length(split(",", var.zones)))}" 50 | machine_type = "${var.machine_type}" 51 | node_class = "class_${count.index + 1}" 52 | nomad_join_name = "${var.nomad_join_name}" 53 | nomad_log_level = "${var.nomad_log_level}" 54 | consul_log_level = "${var.consul_log_level}" 55 | local_ip_url = "-H \"Metadata-Flavor: Google\" http://169.254.169.254/computeMetadata/v1/instance/network-interfaces/0/ip" 56 | consul_join_script = "${module.consul_cluster_join_template.script}" 57 | consul_server_encrypt_key = "${var.consul_server_encrypt_key}" 58 | } 59 | } 60 | output "script" { value = "${template_file.nomad_client.rendered}" } 61 | 62 | module "mount_ssd_template" { 63 | source = "../../../templates/mount_ssd" 64 | 65 | mount_dir = "${var.mount_dir}" 66 | local_ssd_name = "google-${var.local_ssd_name}" 67 | } 68 | 69 | resource "google_compute_instance" "nomad_client" { 70 | provider = "google.${var.region}" 71 | count = "${var.nomad_clients}" 72 | name = "${var.name}-${element(split(",", var.zones), (count.index % var.node_classes) % (length(split(",", var.zones))))}-${var.machine_type}-${count.index + 1}" 73 | machine_type = "${var.machine_type}" 74 | zone = "${element(split(",", var.zones), (count.index % var.node_classes) % (length(split(",", var.zones))))}" 75 | 76 | tags = [ 77 | "${var.name}-${element(split(",", var.zones), (count.index % var.node_classes) % (length(split(",", var.zones))))}-${var.machine_type}-${count.index + 1}", 78 | "${var.name}", 79 | "${element(split(",", var.zones), (count.index % var.node_classes) % (length(split(",", var.zones))))}", 80 | "${var.machine_type}", 81 | "class-${(count.index % var.node_classes) + 1}", 82 | ] 83 | 84 | disk { 85 | image = "${var.image}" 86 | type = "pd-ssd" 87 | size = "${var.disk_size}" 88 | } 89 | 90 | /* 91 | disk { 92 | type = "local-ssd" 93 | scratch = true 94 | device_name = "${var.local_ssd_name}" 95 | } 96 | */ 97 | 98 | network_interface { 99 | network = "${var.network}" 100 | 101 | access_config { 102 | } 103 | } 104 | 105 | metadata { 106 | sshKeys = "${var.ssh_keys}" 107 | } 108 | metadata_startup_script = "${element(template_file.nomad_client.*.rendered, count.index % var.node_classes)}" 109 | } 110 | -------------------------------------------------------------------------------- /terraform/gce/compute/nomad_client_igm/gce_nomad_client_igm.tf: -------------------------------------------------------------------------------- 1 | variable "name" { default = "nomad-client-igm" } 2 | variable "project_id" { } 3 | variable "credentials" { } 4 | variable "region" { } 5 | variable "network" { default = "default" } 6 | variable "zones" { } 7 | variable "image" { } 8 | variable "machine_type" { } 9 | variable "disk_size" { default = "10" } 10 | variable "mount_dir" { default = "/mnt/ssd0" } 11 | variable "local_ssd_name" { default = "local-ssd-0" } 12 | variable "groups" { } 13 | variable "nomad_clients" { } 14 | variable "node_classes" { } 15 | variable "nomad_join_name" { default = "nomad-server?passing" } 16 | variable "nomad_log_level" { } 17 | variable "consul_log_level" { } 18 | variable "ssh_keys" { } 19 | variable "private_key" { } 20 | variable "consul_servers" { } 21 | 22 | provider "google" { 23 | region = "${var.region}" 24 | alias = "${var.region}" 25 | project = "${var.project_id}" 26 | credentials = "${var.credentials}" 27 | } 28 | 29 | module "consul_cluster_join_template" { 30 | source = "../../../templates/join" 31 | 32 | consul_servers = "${var.consul_servers}" 33 | } 34 | 35 | module "nomad_client_template" { 36 | source = "../../../templates/nomad_client" 37 | } 38 | 39 | resource "template_file" "nomad_client_igm" { 40 | template = "${module.nomad_client_template.user_data}" 41 | count = "${var.groups}" 42 | 43 | vars { 44 | consul_join_script = "${module.consul_cluster_join_template.script}" 45 | private_key = "${var.private_key}" 46 | data_dir = "/opt" 47 | provider = "gce" 48 | region = "gce-${var.region}" 49 | datacenter = "gce-${var.region}" 50 | zone = "${element(split(",", var.zones), count.index % length(split(",", var.zones)))}" 51 | machine_type = "${var.machine_type}" 52 | node_class = "class_${count.index % var.node_classes + 1}" 53 | nomad_join_name = "${var.nomad_join_name}" 54 | nomad_log_level = "${var.nomad_log_level}" 55 | consul_log_level = "${var.consul_log_level}" 56 | local_ip_url = "-H \"Metadata-Flavor: Google\" http://169.254.169.254/computeMetadata/v1/instance/network-interfaces/0/ip" 57 | } 58 | } 59 | 60 | module "mount_ssd_template" { 61 | source = "../../../templates/mount_ssd" 62 | 63 | mount_dir = "${var.mount_dir}" 64 | local_ssd_name = "google-${var.local_ssd_name}" 65 | } 66 | 67 | resource "google_compute_instance_template" "nomad_client_igm" { 68 | provider = "google.${var.region}" 69 | count = "${var.groups}" 70 | name = "${var.name}-${element(split(",", var.zones), count.index % length(split(",", var.zones)))}-${var.machine_type}-${count.index + 1}" 71 | description = "${var.name}-${element(split(",", var.zones), count.index % length(split(",", var.zones)))}-${var.machine_type}-${count.index + 1}" 72 | instance_description = "${var.name}-${element(split(",", var.zones), count.index % length(split(",", var.zones)))}-${var.machine_type}-${count.index + 1}" 73 | machine_type = "${var.machine_type}" 74 | 75 | tags = [ 76 | "${var.name}-${element(split(",", var.zones), count.index % length(split(",", var.zones)))}-${var.machine_type}-${count.index + 1}", 77 | "${var.name}", 78 | "${element(split(",", var.zones), count.index % length(split(",", var.zones)))}", 79 | "${var.machine_type}", 80 | "class-${count.index % var.node_classes + 1}", 81 | ] 82 | 83 | disk { 84 | boot = true 85 | source_image = "${var.image}" 86 | disk_type = "pd-ssd" 87 | disk_size_gb = "${var.disk_size}" 88 | } 89 | 90 | /* 91 | disk { 92 | disk_type = "local-ssd" 93 | type = "SCRATCH" 94 | device_name = "${var.local_ssd_name}" 95 | } 96 | */ 97 | 98 | network_interface { 99 | network = "${var.network}" 100 | 101 | access_config { 102 | } 103 | } 104 | 105 | metadata { 106 | sshKeys = "${var.ssh_keys}" 107 | startup-script = "${element(template_file.nomad_client_igm.*.rendered, count.index)}" 108 | } 109 | } 110 | 111 | resource "google_compute_instance_group_manager" "nomad_client_igm" { 112 | provider = "google.${var.region}" 113 | count = "${var.groups}" 114 | name = "${var.name}-${element(split(",", var.zones), count.index % length(split(",", var.zones)))}-${var.machine_type}-${count.index + 1}" 115 | target_size = "${var.nomad_clients / var.groups}" 116 | instance_template = "${element(google_compute_instance_template.nomad_client_igm.*.self_link, count.index)}" 117 | base_instance_name = "${var.name}-${element(split(",", var.zones), count.index % length(split(",", var.zones)))}-${var.machine_type}-${count.index + 1}" 118 | zone = "${element(split(",", var.zones), count.index % length(split(",", var.zones)))}" 119 | } 120 | 121 | /* 122 | resource "google_compute_autoscaler" "nomad_client_igm" { 123 | provider = "google.${var.region}" 124 | count = "${var.groups}" 125 | name = "${var.name}-${element(split(",", var.zones), count.index % length(split(",", var.zones)))}-${var.machine_type}-${count.index + 1}" 126 | zone = "${element(split(",", var.zones), count.index % length(split(",", var.zones)))}" 127 | target = "${element(google_compute_instance_group_manager.nomad_client_igm.*.self_link, count.index)}" 128 | 129 | autoscaling_policy { 130 | max_replicas = "${var.nomad_clients / var.groups}" 131 | min_replicas = "${var.nomad_clients / var.groups}" 132 | 133 | cpu_utilization { 134 | target = 0.5 135 | } 136 | } 137 | } 138 | */ 139 | -------------------------------------------------------------------------------- /terraform/gce/compute/nomad_server/gce_nomad_server.tf: -------------------------------------------------------------------------------- 1 | variable "name" { default = "nomad-server" } 2 | variable "project_id" { } 3 | variable "credentials" { } 4 | variable "region" { } 5 | variable "network" { default = "default" } 6 | variable "zones" { } 7 | variable "image" { } 8 | variable "machine_type" { } 9 | variable "disk_size" { default = "10" } 10 | variable "mount_dir" { default = "/mnt/ssd0" } 11 | variable "local_ssd_name" { default = "local-ssd-0" } 12 | variable "servers" { } 13 | variable "nomad_join_name" { default = "nomad-server?passing" } 14 | variable "nomad_log_level" { } 15 | variable "consul_log_level" { } 16 | variable "ssh_keys" { } 17 | variable "private_key" { } 18 | variable "consul_servers" { } 19 | variable "consul_server_encrypt_key" { } 20 | 21 | provider "google" { 22 | region = "${var.region}" 23 | alias = "${var.region}" 24 | project = "${var.project_id}" 25 | credentials = "${var.credentials}" 26 | } 27 | 28 | module "nomad_server_template" { 29 | source = "../../../templates/nomad_server" 30 | } 31 | 32 | resource "template_file" "nomad_server" { 33 | template = "${module.nomad_server_template.user_data}" 34 | count = "${var.servers}" 35 | 36 | vars { 37 | private_key = "${var.private_key}" 38 | data_dir = "/opt" 39 | provider = "gce" 40 | region = "gce-${var.region}" 41 | datacenter = "gce-${var.region}" 42 | bootstrap_expect = "${var.servers}" 43 | zone = "${element(split(",", var.zones), count.index % length(split(",", var.zones)))}" 44 | machine_type = "${var.machine_type}" 45 | nomad_join_name = "${var.nomad_join_name}" 46 | nomad_log_level = "${var.nomad_log_level}" 47 | consul_log_level = "${var.consul_log_level}" 48 | local_ip_url = "-H \"Metadata-Flavor: Google\" http://169.254.169.254/computeMetadata/v1/instance/network-interfaces/0/ip" 49 | consul_server_encrypt_key = "${var.consul_server_encrypt_key}" 50 | } 51 | } 52 | 53 | module "consul_cluster_join_template" { 54 | source = "../../../templates/join" 55 | 56 | consul_servers = "${var.consul_servers}" 57 | } 58 | 59 | module "mount_ssd_template" { 60 | source = "../../../templates/mount_ssd" 61 | 62 | mount_dir = "${var.mount_dir}" 63 | local_ssd_name = "google-${var.local_ssd_name}" 64 | } 65 | 66 | resource "google_compute_instance" "nomad_server" { 67 | provider = "google.${var.region}" 68 | count = "${var.servers}" 69 | name = "${var.name}-${element(split(",", var.zones), count.index % length(split(",", var.zones)))}-${var.machine_type}-${count.index + 1}" 70 | zone = "${element(split(",", var.zones), count.index % length(split(",", var.zones)))}" 71 | machine_type = "${var.machine_type}" 72 | 73 | tags = [ 74 | "${var.name}-${element(split(",", var.zones), count.index % length(split(",", var.zones)))}-${count.index + 1}", 75 | "${var.name}", 76 | "${element(split(",", var.zones), count.index % length(split(",", var.zones)))}", 77 | "${var.machine_type}", 78 | ] 79 | 80 | disk { 81 | image = "${var.image}" 82 | type = "pd-ssd" 83 | size = "${var.disk_size}" 84 | } 85 | 86 | /* 87 | disk { 88 | type = "local-ssd" 89 | scratch = true 90 | device_name = "${var.local_ssd_name}" 91 | } 92 | */ 93 | 94 | network_interface { 95 | network = "${var.network}" 96 | 97 | access_config { 98 | } 99 | } 100 | 101 | metadata { 102 | sshKeys = "${var.ssh_keys}" 103 | } 104 | 105 | metadata_startup_script = "${element(template_file.nomad_server.*.rendered, count.index % var.servers)}" 106 | 107 | provisioner "remote-exec" { 108 | connection { 109 | user = "ubuntu" 110 | private_key = "${var.private_key}" 111 | } 112 | inline = [ 113 | "${module.consul_cluster_join_template.script}", 114 | ] 115 | } 116 | } 117 | 118 | output "names" { value = "${join(",", google_compute_instance.nomad_server.*.name)}" } 119 | output "machine_types" { value = "${join(",", google_compute_instance.nomad_server.*.machine_type)}" } 120 | output "private_ips" { value = "${join(",", google_compute_instance.nomad_server.*.network_interface.0.address)}" } 121 | output "public_ips" { value = "${join(",", google_compute_instance.nomad_server.*.network_interface.0.access_config.0.assigned_nat_ip)}" } 122 | -------------------------------------------------------------------------------- /terraform/gce/compute/utility/gce_utility.tf: -------------------------------------------------------------------------------- 1 | variable "name" { default = "utility" } 2 | variable "project_id" { } 3 | variable "credentials" { } 4 | variable "region" { } 5 | variable "network" { default = "default" } 6 | variable "zones" { } 7 | variable "image" { } 8 | variable "machine_type" { } 9 | variable "disk_size" { default = "10" } 10 | variable "mount_dir" { default = "/mnt/ssd0" } 11 | variable "local_ssd_name" { default = "local-ssd-0" } 12 | variable "consul_log_level" { } 13 | variable "ssh_keys" { } 14 | variable "private_key" { } 15 | variable "consul_servers" { } 16 | variable "consul_server_encrypt_key" { } 17 | 18 | provider "google" { 19 | region = "${var.region}" 20 | alias = "${var.region}" 21 | project = "${var.project_id}" 22 | credentials = "${var.credentials}" 23 | } 24 | 25 | module "utility_template" { 26 | source = "../../../templates/utility" 27 | } 28 | 29 | data "template_file" "utility" { 30 | template = "${module.utility_template.user_data}" 31 | 32 | vars { 33 | private_key = "${var.private_key}" 34 | data_dir = "/opt" 35 | provider = "gce" 36 | region = "gce-${var.region}" 37 | datacenter = "gce-${var.region}" 38 | zone = "${element(split(",", var.zones), count.index % length(split(",", var.zones)))}" 39 | machine_type = "${var.machine_type}" 40 | consul_log_level = "${var.consul_log_level}" 41 | local_ip_url = "-H \"Metadata-Flavor: Google\" http://169.254.169.254/computeMetadata/v1/instance/network-interfaces/0/ip" 42 | consul_server_encrypt_key = "${var.consul_server_encrypt_key}" 43 | } 44 | } 45 | 46 | module "mount_ssd_template" { 47 | source = "../../../templates/mount_ssd" 48 | 49 | mount_dir = "${var.mount_dir}" 50 | local_ssd_name = "google-${var.local_ssd_name}" 51 | } 52 | 53 | module "consul_cluster_join_template" { 54 | source = "../../../templates/join" 55 | 56 | consul_servers = "${var.consul_servers}" 57 | } 58 | 59 | resource "google_compute_instance" "utility" { 60 | provider = "google.${var.region}" 61 | name = "${var.name}-${element(split(",", var.zones), count.index % length(split(",", var.zones)))}-${var.machine_type}-${count.index + 1}" 62 | machine_type = "${var.machine_type}" 63 | zone = "${element(split(",", var.zones), count.index)}" 64 | 65 | tags = [ 66 | "${var.name}-${element(split(",", var.zones), count.index % length(split(",", var.zones)))}-${count.index + 1}", 67 | "${var.name}", 68 | "${element(split(",", var.zones), count.index % length(split(",", var.zones)))}", 69 | "${var.machine_type}", 70 | ] 71 | 72 | disk { 73 | image = "${var.image}" 74 | type = "pd-ssd" 75 | size = "${var.disk_size}" 76 | } 77 | 78 | /* 79 | disk { 80 | type = "local-ssd" 81 | scratch = true 82 | device_name = "${var.local_ssd_name}" 83 | } 84 | */ 85 | 86 | network_interface { 87 | network = "${var.network}" 88 | 89 | access_config { 90 | } 91 | } 92 | 93 | metadata { 94 | sshKeys = "${var.ssh_keys}" 95 | } 96 | 97 | metadata_startup_script = "${data.template_file.utility.rendered}" 98 | 99 | provisioner "remote-exec" { 100 | connection { 101 | user = "ubuntu" 102 | private_key = "${var.private_key}" 103 | } 104 | inline = [ 105 | "${module.consul_cluster_join_template.script}", 106 | ] 107 | } 108 | } 109 | 110 | resource "google_compute_firewall" "allow-http" { 111 | name = "${var.name}-allow-http" 112 | network = "${var.network}" 113 | 114 | allow { 115 | protocol = "tcp" 116 | ports = ["80"] 117 | } 118 | 119 | source_ranges = ["0.0.0.0/0"] 120 | target_tags = ["${var.name}", "utility"] 121 | } 122 | 123 | output "name" { value = "${google_compute_instance.utility.name}" } 124 | output "machine_type" { value = "${google_compute_instance.utility.machine_type}" } 125 | output "private_ip" { value = "${google_compute_instance.utility.network_interface.0.address}" } 126 | output "public_ip" { value = "${google_compute_instance.utility.network_interface.0.access_config.0.assigned_nat_ip}" } 127 | -------------------------------------------------------------------------------- /terraform/gce/compute/vault_server/gce_vault_server.tf: -------------------------------------------------------------------------------- 1 | variable "name" { default = "vault-server" } 2 | variable "project_id" { } 3 | variable "credentials" { } 4 | variable "region" { } 5 | variable "network" { default = "default" } 6 | variable "zones" { } 7 | variable "image" { } 8 | variable "machine_type" { } 9 | variable "disk_size" { default = "10" } 10 | variable "mount_dir" { default = "/mnt/ssd0" } 11 | variable "local_ssd_name" { default = "local-ssd-0" } 12 | variable "servers" { } 13 | variable "consul_log_level" { } 14 | variable "ssh_keys" { } 15 | variable "private_key" { } 16 | variable "consul_servers" { } 17 | variable "consul_server_encrypt_key" { } 18 | 19 | provider "google" { 20 | region = "${var.region}" 21 | alias = "${var.region}" 22 | project = "${var.project_id}" 23 | credentials = "${var.credentials}" 24 | } 25 | 26 | 27 | module "vault_server_template" { 28 | source = "../../../templates/vault_server" 29 | } 30 | 31 | resource "template_file" "vault_server" { 32 | template = "${module.vault_server_template.user_data}" 33 | count = "${var.servers}" 34 | 35 | vars { 36 | private_key = "${var.private_key}" 37 | data_dir = "/opt" 38 | provider = "gce" 39 | region = "gce-${var.region}" 40 | datacenter = "gce-${var.region}" 41 | bootstrap_expect = "${var.servers}" 42 | zone = "${element(split(",", var.zones), count.index % length(split(",", var.zones)))}" 43 | machine_type = "${var.machine_type}" 44 | consul_log_level = "${var.consul_log_level}" 45 | local_ip_url = "-H \"Metadata-Flavor: Google\" http://169.254.169.254/computeMetadata/v1/instance/network-interfaces/0/ip" 46 | consul_server_encrypt_key = "${var.consul_server_encrypt_key}" 47 | } 48 | } 49 | 50 | 51 | module "consul_cluster_join_template" { 52 | source = "../../../templates/join" 53 | 54 | consul_servers = "${var.consul_servers}" 55 | } 56 | 57 | module "mount_ssd_template" { 58 | source = "../../../templates/mount_ssd" 59 | 60 | mount_dir = "${var.mount_dir}" 61 | local_ssd_name = "google-${var.local_ssd_name}" 62 | } 63 | 64 | resource "google_compute_instance" "vault_server" { 65 | provider = "google.${var.region}" 66 | count = "${var.servers}" 67 | name = "${var.name}-${element(split(",", var.zones), count.index % length(split(",", var.zones)))}-${var.machine_type}-${count.index + 1}" 68 | zone = "${element(split(",", var.zones), count.index % length(split(",", var.zones)))}" 69 | machine_type = "${var.machine_type}" 70 | 71 | tags = [ 72 | "${var.name}-${element(split(",", var.zones), count.index % length(split(",", var.zones)))}-${count.index + 1}", 73 | "${var.name}", 74 | "${element(split(",", var.zones), count.index % length(split(",", var.zones)))}", 75 | "${var.machine_type}", 76 | ] 77 | 78 | disk { 79 | image = "${var.image}" 80 | type = "pd-ssd" 81 | size = "${var.disk_size}" 82 | } 83 | 84 | /* 85 | disk { 86 | type = "local-ssd" 87 | scratch = true 88 | device_name = "${var.local_ssd_name}" 89 | } 90 | */ 91 | 92 | network_interface { 93 | network = "${var.network}" 94 | 95 | access_config { 96 | } 97 | } 98 | 99 | metadata { 100 | sshKeys = "${var.ssh_keys}" 101 | } 102 | 103 | metadata_startup_script = "${element(template_file.vault_server.*.rendered, count.index % var.servers)}" 104 | 105 | provisioner "remote-exec" { 106 | connection { 107 | user = "ubuntu" 108 | private_key = "${var.private_key}" 109 | } 110 | inline = [ 111 | "${module.consul_cluster_join_template.script}", 112 | ] 113 | } 114 | } 115 | 116 | output "names" { value = "${join(",", google_compute_instance.vault_server.*.name)}" } 117 | output "machine_types" { value = "${join(",", google_compute_instance.vault_server.*.machine_type)}" } 118 | output "private_ips" { value = "${join(",", google_compute_instance.vault_server.*.network_interface.0.address)}" } 119 | output "public_ips" { value = "${join(",", google_compute_instance.vault_server.*.network_interface.0.access_config.0.assigned_nat_ip)}" } 120 | -------------------------------------------------------------------------------- /terraform/gce/network/gce_network.tf: -------------------------------------------------------------------------------- 1 | variable "name" { } 2 | variable "cidr" { } 3 | 4 | resource "google_compute_network" "network" { 5 | name = "${var.name}" 6 | ipv4_range = "${var.cidr}" 7 | } 8 | 9 | resource "google_compute_firewall" "allow-internal" { 10 | name = "${var.name}-allow-internal" 11 | network = "${google_compute_network.network.name}" 12 | 13 | allow { 14 | protocol = "icmp" 15 | } 16 | 17 | allow { 18 | protocol = "tcp" 19 | ports = ["0-65535"] 20 | } 21 | 22 | allow { 23 | protocol = "udp" 24 | ports = ["0-65535"] 25 | } 26 | 27 | source_ranges = [ 28 | "${var.cidr}" 29 | ] 30 | } 31 | 32 | resource "google_compute_firewall" "allow-ssh" { 33 | name = "${var.name}-allow-ssh" 34 | network = "${google_compute_network.network.name}" 35 | 36 | allow { 37 | protocol = "tcp" 38 | ports = ["22"] 39 | } 40 | 41 | source_ranges = ["0.0.0.0/0"] 42 | } 43 | 44 | output "name" { value = "${google_compute_network.network.name}" } 45 | output "vpc_cidr" { value = "${var.cidr}" } 46 | -------------------------------------------------------------------------------- /terraform/gce/region/gce_region.tf: -------------------------------------------------------------------------------- 1 | variable "name" { } 2 | variable "project_id" { } 3 | variable "credentials" { } 4 | 5 | variable "region" { } 6 | variable "cidr" { } 7 | variable "zones" { } 8 | variable "ssh_keys" { } 9 | variable "private_key" { } 10 | 11 | variable "artifact_type" { default = "google.image" } 12 | variable "consul_log_level" { } 13 | variable "nomad_log_level" { } 14 | variable "node_classes" { } 15 | variable "artifact_prefix" { default = "packer" } 16 | 17 | variable "utility_artifact_name" { } 18 | variable "utility_artifact_version" { default = "latest" } 19 | variable "utility_machine" { } 20 | variable "utility_disk" { } 21 | 22 | variable "consul_server_artifact_name" { } 23 | variable "consul_server_artifact_version" { default = "latest" } 24 | variable "consul_server_machine" { } 25 | variable "consul_server_disk" { } 26 | variable "consul_servers" { } 27 | variable "consul_server_encrypt_key" { } 28 | 29 | variable "vault_server_artifact_name" { } 30 | variable "vault_server_artifact_version" { default = "latest" } 31 | variable "vault_server_machine" { } 32 | variable "vault_server_disk" { } 33 | variable "vault_servers" { } 34 | 35 | variable "nomad_server_artifact_name" { } 36 | variable "nomad_server_artifact_version" { default = "latest" } 37 | variable "nomad_server_machine" { } 38 | variable "nomad_server_disk" { } 39 | variable "nomad_servers" { } 40 | 41 | variable "nomad_client_artifact_name" { } 42 | variable "nomad_client_artifact_version" { default = "latest" } 43 | variable "nomad_client_machine" { } 44 | variable "nomad_client_disk" { } 45 | variable "nomad_client_groups" { } 46 | variable "nomad_clients" { } 47 | 48 | provider "google" { 49 | region = "${var.region}" 50 | alias = "${var.region}" 51 | project = "${var.project_id}" 52 | credentials = "${var.credentials}" 53 | } 54 | 55 | module "network" { 56 | source = "../network" 57 | 58 | name = "${var.name}" 59 | cidr = "${var.cidr}" 60 | } 61 | 62 | module "compute" { 63 | source = "../compute" 64 | 65 | name = "${var.name}" 66 | project_id = "${var.project_id}" 67 | credentials = "${var.credentials}" 68 | region = "${var.region}" 69 | network = "${module.network.name}" 70 | zones = "${var.zones}" 71 | node_classes = "${var.node_classes}" 72 | consul_log_level = "${var.consul_log_level}" 73 | nomad_log_level = "${var.nomad_log_level}" 74 | ssh_keys = "${var.ssh_keys}" 75 | private_key = "${var.private_key}" 76 | 77 | utility_image = "${var.artifact_prefix}-${var.utility_artifact_name}-${var.utility_artifact_version}" 78 | utility_machine = "${var.utility_machine}" 79 | utility_disk = "${var.utility_disk}" 80 | 81 | consul_server_image = "${var.artifact_prefix}-${var.consul_server_artifact_name}-${var.consul_server_artifact_version}" 82 | consul_server_machine = "${var.consul_server_machine}" 83 | consul_server_disk = "${var.consul_server_disk}" 84 | consul_servers = "${var.consul_servers}" 85 | consul_server_encrypt_key = "${var.consul_server_encrypt_key}" 86 | 87 | vault_server_image = "${var.artifact_prefix}-${var.vault_server_artifact_name}-${var.vault_server_artifact_version}" 88 | vault_server_machine = "${var.vault_server_machine}" 89 | vault_server_disk = "${var.vault_server_disk}" 90 | vault_servers = "${var.vault_servers}" 91 | 92 | nomad_server_image = "${var.artifact_prefix}-${var.nomad_server_artifact_name}-${var.nomad_server_artifact_version}" 93 | nomad_server_machine = "${var.nomad_server_machine}" 94 | nomad_server_disk = "${var.nomad_server_disk}" 95 | nomad_servers = "${var.nomad_servers}" 96 | 97 | nomad_client_image = "${var.artifact_prefix}-${var.nomad_client_artifact_name}-${var.nomad_client_artifact_version}" 98 | nomad_client_machine = "${var.nomad_client_machine}" 99 | nomad_client_disk = "${var.nomad_client_disk}" 100 | nomad_client_groups = "${var.nomad_client_groups}" 101 | nomad_clients = "${var.nomad_clients}" 102 | } 103 | 104 | output "region" { value = "${var.region}" } 105 | output "network" { value = "${module.network.name}" } 106 | output "vpc_cidr" { value = "${module.network.vpc_cidr}" } 107 | 108 | /* 109 | output "info" { 110 | value = < /dev/null 8 | } 9 | 10 | logger "Begin script" 11 | 12 | NODE_NAME="$(hostname)" 13 | logger "Node name: $NODE_NAME" 14 | 15 | METADATA_LOCAL_IP=`curl ${local_ip_url}` 16 | logger "Local IP: $METADATA_LOCAL_IP" 17 | 18 | logger "Configuring Consul default" 19 | CONSUL_DEFAULT_CONFIG=/etc/consul.d/default.json 20 | CONSUL_DATA_DIR=${data_dir}/consul/data 21 | 22 | sudo mkdir -p $CONSUL_DATA_DIR 23 | sudo chmod 0755 $CONSUL_DATA_DIR 24 | 25 | sudo sed -i -- "s/{{ data_dir }}/$${CONSUL_DATA_DIR//\//\\\/}/g" $CONSUL_DEFAULT_CONFIG 26 | sudo sed -i -- "s/{{ local_ip }}/$METADATA_LOCAL_IP/g" $CONSUL_DEFAULT_CONFIG 27 | sudo sed -i -- "s/{{ datacenter }}/${datacenter}/g" $CONSUL_DEFAULT_CONFIG 28 | sudo sed -i -- "s/{{ node_name }}/$NODE_NAME/g" $CONSUL_DEFAULT_CONFIG 29 | sudo sed -i -- "s/{{ log_level }}/${consul_log_level}/g" $CONSUL_DEFAULT_CONFIG 30 | sudo sed -i -- "s/{{ consul_server_encrypt_key }}/${consul_server_encrypt_key}/g" $CONSUL_DEFAULT_CONFIG 31 | 32 | logger "Configuring Consul server" 33 | CONSUL_SERVER_CONFIG=/etc/consul.d/consul_server.json 34 | 35 | sudo sed -i -- "s/{{ bootstrap_expect }}/${bootstrap_expect}/g" $CONSUL_SERVER_CONFIG 36 | sudo sed -i -- "s/\"{{ tags }}\"/\"${provider}\", \"${region}\", \"${zone}\", \"${machine_type}\"/g" $CONSUL_SERVER_CONFIG 37 | 38 | echo $(date '+%s') | sudo tee -a /etc/consul.d/configured > /dev/null 39 | sudo service consul start || sudo service consul restart 40 | 41 | logger "Done" 42 | -------------------------------------------------------------------------------- /terraform/templates/consul_server/consul_server.tf: -------------------------------------------------------------------------------- 1 | output "user_data" { value = "${path.module}/consul_server.sh.tpl" } 2 | -------------------------------------------------------------------------------- /terraform/templates/join/join.sh.tpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | logger() { 4 | DT=$(date '+%Y/%m/%d %H:%M:%S') 5 | echo "$DT join.sh: $1" 6 | echo "$DT join.sh: $1" | sudo tee -a /var/log/user_data.log > /dev/null 7 | } 8 | 9 | logger "Begin script" 10 | 11 | logger "running: consul join ${consul_servers}" 12 | 13 | set +e # Don't exit on errors while we wait for consul to start 14 | consul join ${consul_servers} 15 | retval=$? 16 | SLEEPTIME=1 17 | while [ $retval -ne 0 ]; do 18 | if [ $SLEEPTIME -gt 25 ]; then 19 | logger "ERROR: CONSUL SETUP NOT COMPLETE! Couldn't execute `join` Manual intervention required." 20 | exit $retval 21 | else 22 | logger "Consul join failed, retrying in $SLEEPTIME seconds" 23 | sleep $SLEEPTIME 24 | SLEEPTIME=$((SLEEPTIME + 1)) 25 | consul join ${consul_servers} 26 | retval=$? 27 | fi 28 | done 29 | set -e 30 | 31 | echo "Join succeeded, waiting for peers..." 32 | 33 | SLEEPTIME=1 34 | CONSUL_PEERS=`consul info | egrep "known_servers|num_peers" | tr ' ' '\n' | tail -n 1` 35 | while [ $CONSUL_PEERS -lt 2 ] 36 | do 37 | if [ $SLEEPTIME -gt 15 ]; then 38 | logger "ERROR: CONSUL SETUP NOT COMPLETE! Peers didn't join. Manual intervention required." 39 | exit 2 40 | else 41 | logger "Waiting for optimum quorum size, currently: $CONSUL_PEERS, waiting $SLEEPTIME seconds" 42 | sleep $SLEEPTIME 43 | SLEEPTIME=$((SLEEPTIME + 1)) 44 | CONSUL_PEERS=`consul info | egrep "known_servers|num_peers" | tr ' ' '\n' | tail -n 1` 45 | fi 46 | done 47 | 48 | sleep 15 # Wait for Consul service to join and elect leader 49 | 50 | logger "End script" 51 | -------------------------------------------------------------------------------- /terraform/templates/join/join.tf: -------------------------------------------------------------------------------- 1 | variable "consul_servers" { } 2 | 3 | resource "template_file" "join" { 4 | template = "${file("${path.module}/join.sh.tpl")}" 5 | 6 | vars { 7 | consul_servers = "${var.consul_servers}" 8 | } 9 | } 10 | 11 | output "script" { value = "${template_file.join.rendered}" } 12 | -------------------------------------------------------------------------------- /terraform/templates/mount_ssd/mount_ssd.sh.tpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | logger() { 5 | DT=$(date '+%Y/%m/%d %H:%M:%S') 6 | echo "$DT mount_ssd.sh: $1" 7 | echo "$DT mount_ssd.sh: $1" | sudo tee -a /var/log/user_data.log > /dev/null 8 | } 9 | 10 | logger "Begin script" 11 | 12 | logger "Mount local SSD" 13 | sudo mkdir -p ${mount_dir} 14 | sudo mkfs.ext4 -F /dev/disk/by-id/${local_ssd_name} 15 | sudo mount -o discard,defaults /dev/disk/by-id/${local_ssd_name} ${mount_dir} 16 | sudo chmod a+w ${mount_dir} 17 | 18 | logger "Optimize local SSD" 19 | echo deadline | sudo tee -a /sys/block/sdb/queue/scheduler 20 | echo 1 | sudo tee -a /sys/block/sdb/queue/iosched/fifo_batch 21 | echo "tmpfs /tmp tmpfs defaults,noatime,mode=1777 0 0" | sudo tee -a /etc/fstab 22 | 23 | if [ ! -f /home/ubuntu/c1m/reboot ]; then 24 | logger "Local SSD reboot" 25 | sudo reboot 26 | exit 0 27 | fi 28 | 29 | logger "Done" 30 | 31 | exit 0 32 | -------------------------------------------------------------------------------- /terraform/templates/mount_ssd/mount_ssd.tf: -------------------------------------------------------------------------------- 1 | variable "mount_dir" { } 2 | variable "local_ssd_name" { } 3 | 4 | resource "template_file" "mount_ssd" { 5 | template = "${file("${path.module}/mount_ssd.sh.tpl")}" 6 | 7 | vars { 8 | mount_dir = "${var.mount_dir}" 9 | local_ssd_name = "${var.local_ssd_name}" 10 | } 11 | } 12 | 13 | output "script" { value = "${template_file.mount_ssd.rendered}" } 14 | -------------------------------------------------------------------------------- /terraform/templates/nomad_client/nomad_client.sh.tpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | logger() { 5 | DT=$(date '+%Y/%m/%d %H:%M:%S') 6 | echo "$DT nomad_client.sh: $1" 7 | echo "$DT nomad_client.sh: $1" | sudo tee -a /var/log/user_data.log > /dev/null 8 | } 9 | 10 | logger "Begin script" 11 | 12 | logger "Configure Nomad Client" 13 | NODE_NAME="$(hostname)" 14 | logger "Node name: $NODE_NAME" 15 | 16 | METADATA_LOCAL_IP=`curl ${local_ip_url}` 17 | logger "Local IP: $METADATA_LOCAL_IP" 18 | 19 | logger "Configuring Consul default" 20 | CONSUL_DEFAULT_CONFIG=/etc/consul.d/default.json 21 | CONSUL_DATA_DIR=${data_dir}/consul/data 22 | 23 | sudo mkdir -p $CONSUL_DATA_DIR 24 | sudo chmod 0755 $CONSUL_DATA_DIR 25 | 26 | sudo sed -i -- "s/{{ data_dir }}/$${CONSUL_DATA_DIR//\//\\\/}/g" $CONSUL_DEFAULT_CONFIG 27 | sudo sed -i -- "s/{{ local_ip }}/$METADATA_LOCAL_IP/g" $CONSUL_DEFAULT_CONFIG 28 | sudo sed -i -- "s/{{ datacenter }}/${datacenter}/g" $CONSUL_DEFAULT_CONFIG 29 | sudo sed -i -- "s/{{ node_name }}/$NODE_NAME/g" $CONSUL_DEFAULT_CONFIG 30 | sudo sed -i -- "s/{{ log_level }}/${consul_log_level}/g" $CONSUL_DEFAULT_CONFIG 31 | sudo sed -i -- "s/{{ consul_server_encrypt_key }}/${consul_server_encrypt_key}/g" $CONSUL_DEFAULT_CONFIG 32 | 33 | logger "Configuring Consul Nomad client" 34 | CONSUL_NOMAD_CLIENT_CONFIG=/etc/consul.d/nomad_client.json 35 | 36 | sudo sed -i -- "s/\"{{ tags }}\"/\"${provider}\", \"${region}\", \"${zone}\", \"${machine_type}\", \"${node_class}\"/g" $CONSUL_NOMAD_CLIENT_CONFIG 37 | 38 | echo $(date '+%s') | sudo tee -a /etc/consul.d/configured > /dev/null 39 | sudo service consul start || sudo service consul restart 40 | 41 | logger "Running Join Script" 42 | 43 | ${ consul_join_script } 44 | 45 | logger "Configuring Docker" 46 | DOCKER_DATA_DIR=${data_dir}/docker/data 47 | 48 | sudo mkdir -p $DOCKER_DATA_DIR 49 | sudo chmod 0755 $DOCKER_DATA_DIR 50 | 51 | sudo sed -i -- "s/service.consul/service.consul -g $${DOCKER_DATA_DIR//\//\\\/}/g" /etc/default/docker 52 | 53 | sudo service docker restart 54 | 55 | logger "Configuring Nomad default" 56 | NOMAD_DEFAULT_CONFIG=/etc/nomad.d/default.hcl 57 | NOMAD_DATA_DIR=${data_dir}/nomad/data 58 | 59 | sudo mkdir -p $NOMAD_DATA_DIR 60 | sudo chmod 0755 $NOMAD_DATA_DIR 61 | 62 | sudo sed -i -- "s/{{ data_dir }}/$${NOMAD_DATA_DIR//\//\\\/}/g" $NOMAD_DEFAULT_CONFIG 63 | sudo sed -i -- "s/{{ region }}/${region}/g" $NOMAD_DEFAULT_CONFIG 64 | sudo sed -i -- "s/{{ datacenter }}/${datacenter}/g" $NOMAD_DEFAULT_CONFIG 65 | sudo sed -i -- "s/{{ local_ip }}/$METADATA_LOCAL_IP/g" $NOMAD_DEFAULT_CONFIG 66 | sudo sed -i -- "s/{{ node_id }}/$NODE_NAME/g" $NOMAD_DEFAULT_CONFIG 67 | sudo sed -i -- "s/{{ name }}/$NODE_NAME/g" $NOMAD_DEFAULT_CONFIG 68 | sudo sed -i -- "s/{{ log_level }}/${nomad_log_level}/g" $NOMAD_DEFAULT_CONFIG 69 | 70 | logger "Configure Nomad client" 71 | 72 | NOMAD_CLIENT_CONFIG=/etc/nomad.d/client.hcl 73 | 74 | sudo sed -i -- "s/{{ node_id }}/$NODE_NAME/g" $NOMAD_CLIENT_CONFIG 75 | sudo sed -i -- "s/{{ region }}/${region}/g" $NOMAD_CLIENT_CONFIG 76 | sudo sed -i -- "s/{{ machine_type }}/${machine_type}/g" $NOMAD_CLIENT_CONFIG 77 | sudo sed -i -- 's/{{ node_class }}/${node_class}/g' $NOMAD_CLIENT_CONFIG 78 | 79 | echo $(date '+%s') | sudo tee -a /etc/nomad.d/configured > /dev/null 80 | sudo service nomad start || sudo service nomad restart 81 | 82 | logger "Nomad server join: ${nomad_join_name}" 83 | sleep 15 # Wait for Nomad service to fully boot 84 | sudo /opt/nomad/nomad_join.sh "${nomad_join_name}" 85 | 86 | logger "Done" 87 | -------------------------------------------------------------------------------- /terraform/templates/nomad_client/nomad_client.tf: -------------------------------------------------------------------------------- 1 | output "user_data" { value = "${path.module}/nomad_client.sh.tpl" } 2 | -------------------------------------------------------------------------------- /terraform/templates/nomad_job/helloworld/helloworld.nomad.tpl: -------------------------------------------------------------------------------- 1 | job "helloworld-v1" { 2 | region = "${region}" 3 | datacenters = ["${datacenter}"] 4 | type = "service" 5 | priority = 50 6 | 7 | update { 8 | stagger = "30s" 9 | max_parallel = 1 10 | } 11 | 12 | group "hello-group" { 13 | count = ${count} 14 | 15 | constraint { 16 | attribute = "\$${node.datacenter}" 17 | value = "${datacenter}" 18 | } 19 | 20 | task "hello-task" { 21 | driver = "docker" 22 | config { 23 | image = "${image}" 24 | port_map { 25 | http = 8080 26 | } 27 | } 28 | 29 | resources { 30 | cpu = 100 31 | memory = 200 32 | network { 33 | mbits = 1 34 | port "http" {} 35 | } 36 | } 37 | 38 | logs { 39 | max_files = 1 40 | max_file_size = 5 41 | } 42 | 43 | service { 44 | name = "redis" 45 | tags = ["global", "${region}"] 46 | port = "db" 47 | 48 | check { 49 | name = "hello alive" 50 | type = "tcp" 51 | interval = "10s" 52 | timeout = "2s" 53 | } 54 | } 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /terraform/templates/nomad_job/helloworld/helloworld.tf: -------------------------------------------------------------------------------- 1 | variable "region" { } 2 | variable "datacenter" { } 3 | variable "count" { } 4 | variable "image" { } 5 | 6 | resource "template_file" "helloworld" { 7 | template = "${file("${path.module}/helloworld.nomad.tpl")}" 8 | 9 | vars { 10 | region = "${var.region}" 11 | datacenter = "${var.datacenter}" 12 | count = "${var.count}" 13 | image = "${var.image}" 14 | } 15 | } 16 | 17 | output "job" { value = "${template_file.helloworld.rendered}" } 18 | -------------------------------------------------------------------------------- /terraform/templates/nomad_job/nomad_job.tf: -------------------------------------------------------------------------------- 1 | variable "region" { default = "global" } 2 | variable "datacenter" { default = "global" } 3 | 4 | variable "redis_count" { default = "1" } 5 | variable "redis_image" { default = "hashidemo/redis:latest" } 6 | 7 | variable "helloworld_count" { default = 3 } 8 | variable "helloworld_image" { default = "eveld/helloworld:1.0.0" } 9 | 10 | module "helloworld" { 11 | source = "./helloworld" 12 | 13 | region = "${var.region}" 14 | datacenter = "${var.datacenter}" 15 | count = "1" 16 | image = "${var.helloworld_image}" 17 | } 18 | output "helloworld_job" { value = "${module.helloworld.job}" } 19 | 20 | module "redis" { 21 | source = "./redis" 22 | 23 | region = "${var.region}" 24 | datacenter = "${var.datacenter}" 25 | count = "${var.redis_count}" 26 | image = "${var.redis_image}" 27 | } 28 | output "redis_job" { value = "${module.redis.job}" } 29 | -------------------------------------------------------------------------------- /terraform/templates/nomad_job/redis/redis.nomad.tpl: -------------------------------------------------------------------------------- 1 | job "redis" { 2 | region = "${region}" 3 | datacenters = ["${datacenter}"] 4 | type = "service" 5 | priority = 50 6 | 7 | update { 8 | stagger = "10s" 9 | max_parallel = 1 10 | } 11 | 12 | group "redis" { 13 | count = ${count} 14 | 15 | constraint { 16 | attribute = "\$${node.datacenter}" 17 | value = "${datacenter}" 18 | } 19 | 20 | restart { 21 | mode = "delay" 22 | interval = "5m" 23 | attempts = 10 24 | delay = "25s" 25 | } 26 | 27 | task "redis" { 28 | driver = "docker" 29 | 30 | config { 31 | image = "${image}" 32 | 33 | port_map { 34 | db = 6379 35 | } 36 | } 37 | 38 | resources { 39 | cpu = 20 40 | memory = 15 41 | disk = 10 42 | 43 | network { 44 | mbits = 1 45 | 46 | port "db" { 47 | static = 6379 48 | } 49 | } 50 | } 51 | 52 | logs { 53 | max_files = 1 54 | max_file_size = 5 55 | } 56 | 57 | service { 58 | name = "redis" 59 | tags = ["global", "${region}"] 60 | port = "db" 61 | 62 | check { 63 | name = "redis alive" 64 | type = "tcp" 65 | interval = "10s" 66 | timeout = "2s" 67 | } 68 | } 69 | } 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /terraform/templates/nomad_job/redis/redis.tf: -------------------------------------------------------------------------------- 1 | variable "region" { } 2 | variable "datacenter" { } 3 | variable "count" { } 4 | variable "image" { } 5 | 6 | resource "template_file" "redis" { 7 | template = "${file("${path.module}/redis.nomad.tpl")}" 8 | 9 | vars { 10 | region = "${var.region}" 11 | datacenter = "${var.datacenter}" 12 | count = "${var.count}" 13 | image = "${var.image}" 14 | } 15 | } 16 | 17 | output "job" { value = "${template_file.redis.rendered}" } 18 | -------------------------------------------------------------------------------- /terraform/templates/nomad_server/nomad_server.sh.tpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | logger() { 5 | DT=$(date '+%Y/%m/%d %H:%M:%S') 6 | echo "$DT nomad_server.sh: $1" 7 | echo "$DT nomad_server.sh: $1" | sudo tee -a /var/log/user_data.log > /dev/null 8 | } 9 | 10 | logger "Begin script" 11 | 12 | logger "Configure Nomad Server" 13 | NODE_NAME="$(hostname)" 14 | logger "Node name: $NODE_NAME" 15 | 16 | METADATA_LOCAL_IP=`curl ${local_ip_url}` 17 | logger "Local IP: $METADATA_LOCAL_IP" 18 | 19 | logger "Configuring Consul default" 20 | CONSUL_DEFAULT_CONFIG=/etc/consul.d/default.json 21 | CONSUL_DATA_DIR=${data_dir}/consul/data 22 | 23 | sudo mkdir -p $CONSUL_DATA_DIR 24 | sudo chmod 0755 $CONSUL_DATA_DIR 25 | 26 | sudo sed -i -- "s/{{ data_dir }}/$${CONSUL_DATA_DIR//\//\\\/}/g" $CONSUL_DEFAULT_CONFIG 27 | sudo sed -i -- "s/{{ local_ip }}/$METADATA_LOCAL_IP/g" $CONSUL_DEFAULT_CONFIG 28 | sudo sed -i -- "s/{{ datacenter }}/${datacenter}/g" $CONSUL_DEFAULT_CONFIG 29 | sudo sed -i -- "s/{{ node_name }}/$NODE_NAME/g" $CONSUL_DEFAULT_CONFIG 30 | sudo sed -i -- "s/{{ log_level }}/${consul_log_level}/g" $CONSUL_DEFAULT_CONFIG 31 | sudo sed -i -- "s/{{ consul_server_encrypt_key }}/${consul_server_encrypt_key}/g" $CONSUL_DEFAULT_CONFIG 32 | 33 | logger "Configuring Consul Nomad server" 34 | CONSUL_NOMAD_SERVER_CONFIG=/etc/consul.d/nomad_server.json 35 | 36 | sudo sed -i -- "s/\"{{ tags }}\"/\"${provider}\", \"${region}\", \"${zone}\", \"${machine_type}\"/g" $CONSUL_NOMAD_SERVER_CONFIG 37 | 38 | echo $(date '+%s') | sudo tee -a /etc/consul.d/configured > /dev/null 39 | sudo service consul start || sudo service consul restart 40 | 41 | logger "Configuring Nomad default" 42 | NOMAD_DEFAULT_CONFIG=/etc/nomad.d/default.hcl 43 | NOMAD_DATA_DIR=${data_dir}/nomad/data 44 | 45 | sudo mkdir -p $NOMAD_DATA_DIR 46 | sudo chmod 0755 $NOMAD_DATA_DIR 47 | 48 | sudo sed -i -- "s/{{ data_dir }}/$${NOMAD_DATA_DIR//\//\\\/}/g" $NOMAD_DEFAULT_CONFIG 49 | sudo sed -i -- "s/{{ region }}/${region}/g" $NOMAD_DEFAULT_CONFIG 50 | sudo sed -i -- "s/{{ datacenter }}/${datacenter}/g" $NOMAD_DEFAULT_CONFIG 51 | sudo sed -i -- "s/{{ local_ip }}/$METADATA_LOCAL_IP/g" $NOMAD_DEFAULT_CONFIG 52 | sudo sed -i -- "s/{{ node_id }}/$NODE_NAME/g" $NOMAD_DEFAULT_CONFIG 53 | sudo sed -i -- "s/{{ name }}/$NODE_NAME/g" $NOMAD_DEFAULT_CONFIG 54 | sudo sed -i -- "s/{{ log_level }}/${nomad_log_level}/g" $NOMAD_DEFAULT_CONFIG 55 | 56 | logger "Configuring Nomad server" 57 | NOMAD_SERVER_CONFIG=/etc/nomad.d/server.hcl 58 | 59 | sudo sed -i -- "s/{{ local_ip }}/$METADATA_LOCAL_IP/g" $NOMAD_SERVER_CONFIG 60 | sudo sed -i -- "s/{{ bootstrap_expect }}/${bootstrap_expect}/g" $NOMAD_SERVER_CONFIG 61 | 62 | echo $(date '+%s') | sudo tee -a /etc/nomad.d/configured > /dev/null 63 | sudo service nomad start || sudo service nomad restart 64 | 65 | logger "Nomad server join: ${nomad_join_name}" 66 | sleep 15 # Wait for Nomad service to fully boot 67 | sudo /opt/nomad/nomad_join.sh "${nomad_join_name}" "server" 68 | 69 | logger "Done" 70 | -------------------------------------------------------------------------------- /terraform/templates/nomad_server/nomad_server.tf: -------------------------------------------------------------------------------- 1 | output "user_data" { value = "${path.module}/nomad_server.sh.tpl" } 2 | -------------------------------------------------------------------------------- /terraform/templates/pq/pq.sh.tpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | logger() { 5 | DT=$(date '+%Y/%m/%d %H:%M:%S') 6 | echo "$DT pq.sh: $1" 7 | echo "$DT pq.sh: $1" | sudo tee -a /var/log/user_data.log > /dev/null 8 | } 9 | 10 | logger "Begin script" 11 | 12 | servers() { 13 | PASSING=$(curl -s "http://127.0.0.1:8500/v1/health/service/${consul_join_name}") 14 | 15 | # Check if valid json is returned, otherwise jq command fails 16 | if [[ "$PASSING" == [{* ]]; then 17 | echo $(echo $PASSING | jq -r '.[].Node.Address' | tr '\n' ' ') 18 | fi 19 | } 20 | 21 | sleep 15 # Wait for Consul service to fully boot 22 | CONSUL_SERVERS=$(servers) 23 | logger "Initial Consul servers: $CONSUL_SERVERS" 24 | CONSUL_SERVER_LEN=$(echo $CONSUL_SERVERS | wc -w) 25 | logger "Initial Consul server length: $CONSUL_SERVER_LEN" 26 | SLEEPTIME=1 27 | 28 | while [ $CONSUL_SERVER_LEN -lt 2 ] 29 | do 30 | if [ $SLEEPTIME -gt 15 ]; then 31 | logger "ERROR: CONSUL SETUP NOT COMPLETE! Manual intervention required." 32 | exit 2 33 | else 34 | logger "Waiting for optimum quorum size, currently: $CONSUL_SERVER_LEN, waiting $SLEEPTIME seconds" 35 | CONSUL_SERVERS=$(servers) 36 | logger "Consul servers: $CONSUL_SERVERS" 37 | CONSUL_SERVER_LEN=$(echo $CONSUL_SERVERS | wc -w) 38 | logger "Consul server length: $CONSUL_SERVER_LEN" 39 | sleep $SLEEPTIME 40 | SLEEPTIME=$((SLEEPTIME + 1)) 41 | fi 42 | done 43 | 44 | CONSUL_ADDR=http://127.0.0.1:8500 45 | 46 | logger "Temporarily registering ${service} service for Prepared Query" 47 | logger "$( 48 | curl \ 49 | -H "Content-Type: application/json" \ 50 | -LX PUT \ 51 | -d '{ "Name": "${service}" }' \ 52 | $CONSUL_ADDR/v1/agent/service/register 53 | )" 54 | 55 | logger "Registering ${service} Prepared Query" 56 | logger "$( 57 | curl \ 58 | -H "Content-Type: application/json" \ 59 | -LX POST \ 60 | -d \ 61 | '{ 62 | "Name": "${service}", 63 | "Service": { 64 | "Service": "${service}", 65 | "Failover": { 66 | "NearestN": 3 67 | }, 68 | "OnlyPassing": true, 69 | "Tags": ["global"] 70 | }, 71 | "DNS": { 72 | "TTL": "10s" 73 | } 74 | }' $CONSUL_ADDR/v1/query 75 | )" 76 | 77 | logger "Deregistering ${service} service" 78 | logger "$( 79 | curl $CONSUL_ADDR/v1/agent/service/deregister/${service} 80 | )" 81 | 82 | sudo service consul start || sudo service consul restart 83 | 84 | logger "Done" 85 | -------------------------------------------------------------------------------- /terraform/templates/pq/pq.tf: -------------------------------------------------------------------------------- 1 | variable "service" { } 2 | variable "consul_join_name" { } 3 | 4 | resource "template_file" "pq" { 5 | template = "${file("${path.module}/pq.sh.tpl")}" 6 | 7 | vars { 8 | service = "${var.service}" 9 | consul_join_name = "${var.consul_join_name}" 10 | } 11 | } 12 | 13 | output "script" { value = "${template_file.pq.rendered}" } 14 | -------------------------------------------------------------------------------- /terraform/templates/utility/utility.sh.tpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | logger() { 5 | DT=$(date '+%Y/%m/%d %H:%M:%S') 6 | echo "$DT utility.sh: $1" 7 | echo "$DT utility.sh: $1" | sudo tee -a /var/log/user_data.log > /dev/null 8 | } 9 | 10 | logger "Begin script" 11 | 12 | NODE_NAME="$(hostname)" 13 | logger "Node name: $NODE_NAME" 14 | 15 | METADATA_LOCAL_IP=`curl ${local_ip_url}` 16 | logger "Local IP: $METADATA_LOCAL_IP" 17 | 18 | logger "Configuring Consul" 19 | CONSUL_DEFAULT_CONFIG=/etc/consul.d/default.json 20 | CONSUL_DATA_DIR=${data_dir}/consul/data 21 | 22 | sudo mkdir -p $CONSUL_DATA_DIR 23 | sudo chmod 0755 $CONSUL_DATA_DIR 24 | 25 | sudo sed -i -- "s/{{ data_dir }}/$${CONSUL_DATA_DIR//\//\\\/}/g" $CONSUL_DEFAULT_CONFIG 26 | sudo sed -i -- "s/{{ datacenter }}/${datacenter}/g" $CONSUL_DEFAULT_CONFIG 27 | sudo sed -i -- "s/{{ node_name }}/$NODE_NAME/g" $CONSUL_DEFAULT_CONFIG 28 | sudo sed -i -- "s/{{ local_ip }}/$METADATA_LOCAL_IP/g" $CONSUL_DEFAULT_CONFIG 29 | sudo sed -i -- "s/{{ log_level }}/${consul_log_level}/g" $CONSUL_DEFAULT_CONFIG 30 | sudo sed -i -- "s/{{ consul_server_encrypt_key }}/${consul_server_encrypt_key}/g" $CONSUL_DEFAULT_CONFIG 31 | 32 | logger "Configuring Consul Utility" 33 | CONSUL_UTILITY_CONFIG=/etc/consul.d/utility.json 34 | 35 | sudo sed -i -- "s/\"{{ tags }}\"/\"${provider}\", \"${region}\", \"${zone}\", \"${machine_type}\"/g" $CONSUL_UTILITY_CONFIG 36 | 37 | logger "Configuring Consul Redis and Statsite" 38 | sudo sed -i -- "s/{{ local_ip }}/$METADATA_LOCAL_IP/g" /etc/consul.d/redis.json 39 | sudo sed -i -- "s/{{ local_ip }}/$METADATA_LOCAL_IP/g" /etc/consul.d/statsite.json 40 | 41 | echo $(date '+%s') | sudo tee -a /etc/consul.d/configured > /dev/null 42 | sudo service consul start || sudo service consul restart 43 | 44 | STATSITE_DATA_DIR=${data_dir}/statsite/data 45 | 46 | sudo mkdir -p $STATSITE_DATA_DIR 47 | sudo chmod 0755 $STATSITE_DATA_DIR 48 | 49 | sudo sed -i -- "s/{{ data_dir }}/$${STATSITE_DATA_DIR//\//\\\/}/g" /etc/statsite.d/default.conf 50 | 51 | echo $(date '+%s') | sudo tee -a /etc/statsite.d/configured > /dev/null 52 | sudo service statsite start || sudo service statsite restart 53 | 54 | logger "Done" 55 | -------------------------------------------------------------------------------- /terraform/templates/utility/utility.tf: -------------------------------------------------------------------------------- 1 | output "user_data" { value = "${path.module}/utility.sh.tpl" } 2 | -------------------------------------------------------------------------------- /terraform/templates/vault_server/vault_server.sh.tpl: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | logger() { 5 | DT=$(date '+%Y/%m/%d %H:%M:%S') 6 | echo "$DT vault_server.sh: $1" 7 | echo "$DT vault_server.sh: $1" | sudo tee -a /var/log/user_data.log > /dev/null 8 | } 9 | 10 | logger "Begin script" 11 | 12 | logger "Configure Vault Server" 13 | NODE_NAME="$(hostname)" 14 | logger "Node name: $NODE_NAME" 15 | 16 | METADATA_LOCAL_IP=`curl ${local_ip_url}` 17 | logger "Local IP: $METADATA_LOCAL_IP" 18 | 19 | logger "Configuring Consul default" 20 | CONSUL_DEFAULT_CONFIG=/etc/consul.d/default.json 21 | CONSUL_DATA_DIR=${data_dir}/consul/data 22 | 23 | sudo mkdir -p $CONSUL_DATA_DIR 24 | sudo chmod 0755 $CONSUL_DATA_DIR 25 | 26 | sudo sed -i -- "s/{{ data_dir }}/$${CONSUL_DATA_DIR//\//\\\/}/g" $CONSUL_DEFAULT_CONFIG 27 | sudo sed -i -- "s/{{ local_ip }}/$METADATA_LOCAL_IP/g" $CONSUL_DEFAULT_CONFIG 28 | sudo sed -i -- "s/{{ datacenter }}/${datacenter}/g" $CONSUL_DEFAULT_CONFIG 29 | sudo sed -i -- "s/{{ node_name }}/$NODE_NAME/g" $CONSUL_DEFAULT_CONFIG 30 | sudo sed -i -- "s/{{ log_level }}/${consul_log_level}/g" $CONSUL_DEFAULT_CONFIG 31 | sudo sed -i -- "s/{{ consul_server_encrypt_key }}/${consul_server_encrypt_key}/g" $CONSUL_DEFAULT_CONFIG 32 | 33 | echo $(date '+%s') | sudo tee -a /etc/consul.d/configured > /dev/null 34 | sudo service consul start || sudo service consul restart 35 | 36 | 37 | logger "Configuring Vault server" 38 | VAULT_SERVER_CONFIG=/etc/vault.d/default.hcl 39 | 40 | #sudo sed -i -- "s/\"{{ tags }}\"/\"${provider}\", \"${region}\", \"${zone}\", \"${machine_type}\"/g" $CONSUL_NOMAD_SERVER_CONFIG 41 | 42 | echo $(date '+%s') | sudo tee -a /etc/vault.d/configured > /dev/null 43 | sudo service vault start || sudo service vault restart 44 | 45 | sleep 15 # Wait for vault service to fully boot 46 | 47 | logger "Done" 48 | -------------------------------------------------------------------------------- /terraform/templates/vault_server/vault_server.tf: -------------------------------------------------------------------------------- 1 | output "user_data" { value = "${path.module}/vault_server.sh.tpl" } 2 | -------------------------------------------------------------------------------- /vault/init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | : ${VAULT_SERVER?"Need to set VAULT_SERVER"} 3 | 4 | set -e 5 | 6 | ssh -i ../credentials/id_rsa ubuntu@$VAULT_SERVER "vault init" > $(dirname $0)/../credentials/vault.keys 7 | -------------------------------------------------------------------------------- /vault/issue_cert.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | : ${VAULT_SERVER?"Need to set VAULT_SERVER"} 3 | : ${DOMAIN?"Need to set DOMAIN, e.g. example.com"} 4 | : ${SUB?"Need to set SUB, e.g. sub in sub.example.com"} 5 | 6 | VAULT_TOKEN=`cat $(dirname $0)/../credentials/vault.keys | grep 'Initial Root Token' | cut -f 2 -d ':'` 7 | VAULT_AUTH_CMD="vault auth $VAULT_TOKEN" 8 | 9 | # Issue a cert for an acme.com subdomain valid for 1 week 10 | echo "Issue a subdomain cert" 11 | CERT=$SUB.$DOMAIN.crt 12 | SUBDOMAIN=${DOMAIN//\./\_} 13 | ssh -i ../credentials/id_rsa ubuntu@$VAULT_SERVER "$VAULT_AUTH_CMD; vault write pki/issue/$SUBDOMAIN common_name=\"$SUB.$DOMAIN\" ttl=\"168h\" " 14 | -------------------------------------------------------------------------------- /vault/setup_pki.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | : ${VAULT_SERVER?"Need to set VAULT_SERVER"} 3 | : ${DOMAIN?"Need to set DOMAIN, e.g. 'example.com'"} 4 | 5 | set -e 6 | 7 | VAULT_TOKEN=`cat $(dirname $0)/../credentials/vault.keys | grep 'Initial Root Token' | cut -f 2 -d ':'` 8 | VAULT_AUTH_CMD="vault auth $VAULT_TOKEN" 9 | VAULT_CERT=../packer/config/vault/vault.crt 10 | VAULT_KEY=../packer/config/vault/vault.key 11 | VAULT_INTERMEDIATE=vault_intermediate.crt 12 | 13 | ssh -i ../credentials/id_rsa ubuntu@$VAULT_SERVER "$VAULT_AUTH_CMD; vault mount pki" 14 | 15 | cat $VAULT_CERT $VAULT_KEY > $VAULT_INTERMEDIATE 16 | scp -i ../credentials/id_rsa $VAULT_INTERMEDIATE ubuntu@$VAULT_SERVER:~/$VAULT_INTERMEDIATE 17 | ssh -i ../credentials/id_rsa ubuntu@$VAULT_SERVER "$VAULT_AUTH_CMD; vault write pki/config/ca pem_bundle=@$VAULT_INTERMEDIATE" 18 | ssh -i ../credentials/id_rsa ubuntu@$VAULT_SERVER "rm ~/$VAULT_INTERMEDIATE" 19 | rm $VAULT_INTERMEDIATE 20 | 21 | # Create role for issuing acme.com certificates 22 | # Max least time is 14 days 23 | echo "Create a role for subdomain certs for $DOMAIN" 24 | SUBDOMAIN=${DOMAIN//\./\_} 25 | ssh -i ../credentials/id_rsa ubuntu@$VAULT_SERVER "$VAULT_AUTH_CMD; vault write pki/roles/$SUBDOMAIN allowed_domains=\"$DOMAIN\" lease_max=\"336h\" allow_subdomains=true allow_base_domain=true allow_bare_domains=true" 26 | -------------------------------------------------------------------------------- /vault/unseal.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | : ${VAULT_SERVER?"Need to set VAULT_SERVER"} 3 | set -e 4 | 5 | IFS=$'\n' 6 | CREDENTIALS=`head -n 5 ../credentials/vault.keys | cut -c 15-` 7 | echo $CREDENTIALS 8 | for key in ${CREDENTIALS}; do 9 | echo "vault unseal $key" 10 | ssh -i ../credentials/id_rsa ubuntu@$VAULT_SERVER "vault unseal $key" 11 | done 12 | --------------------------------------------------------------------------------