├── .gitignore
├── Dockerfile
├── LICENSE
├── README.md
├── cluster
├── connect-vault.sh
├── main.tf
├── outputs.tf
├── setup-tunnels.sh
├── user-data-client.sh
├── user-data-server.sh
├── variables.tf
└── vpc.tf
├── docker-entry.sh
├── docs
├── Diagram.drawio
└── diagram.png
├── image
├── build.pkr.hcl
└── variables.pkr.hcl
├── jobs
├── app.nomad
├── postgres.nomad
└── traefik.nomad
├── shared
├── config
│ ├── consul-template.hcl
│ ├── consul-template.service
│ ├── consul.json
│ ├── consul.service
│ ├── consul_client.json
│ ├── nomad.hcl
│ ├── nomad.service
│ ├── nomad_client.hcl
│ ├── resolv.conf
│ ├── resolved.conf
│ ├── vault.hcl
│ └── vault.service
└── scripts
│ ├── client.sh
│ ├── server.sh
│ └── setup.sh
└── vault
├── auth-backends.tf
├── databases.tf
├── init-vault.sh
├── main.tf
├── outputs.tf
├── policies.tf
├── secret-engines.tf
└── variables.tf
/.gitignore:
--------------------------------------------------------------------------------
1 | # Local .terraform directories
2 | **/.terraform/*
3 |
4 | # .tfstate files
5 | *.tfstate
6 | *.tfstate.*
7 |
8 | # Crash log files
9 | crash.log
10 |
11 | # Ignore any .tfvars files that are generated automatically for each Terraform run. Most
12 | # .tfvars files are managed as part of configuration and so should be included in
13 | # version control.
14 | #
15 | # example.tfvars
16 |
17 | .terraform.lock.hcl
18 |
19 | # Ignore override files as they are usually used to override resources locally and so
20 | # are not checked in
21 | override.tf
22 | override.tf.json
23 | *_override.tf
24 | *_override.tf.json
25 |
26 | # Include override files you do wish to add to version control using negated pattern
27 | #
28 | # !example_override.tf
29 |
30 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
31 | # example: *tfplan*
32 |
33 | # Vault creds
34 | vault-credentials.json
35 |
36 | # SSH keys
37 | id_rsa
38 | id_rsa.pub
--------------------------------------------------------------------------------
/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:20.04
2 |
3 | # Update
4 | RUN apt-get -y update \
5 | && apt-get -y dist-upgrade
6 |
7 | # Install some prerequisites and utils
8 | RUN apt-get -y install \
9 | curl \
10 | gnupg2 \
11 | lsb-release \
12 | software-properties-common \
13 | vim \
14 | openssh-client \
15 | jq
16 |
17 | # Install vault, nomad, terraform, packer and consul
18 | RUN curl -fsSL https://apt.releases.hashicorp.com/gpg | apt-key add -
19 | RUN apt-add-repository "deb [arch=amd64] https://apt.releases.hashicorp.com $(lsb_release -cs) main"
20 | RUN apt-get -y install vault nomad consul terraform packer
21 |
22 | # https://github.com/hashicorp/vault/issues/10924#issuecomment-846123151
23 | RUN apt-get install --reinstall -y vault
24 |
25 | # These will be valid when SSH tunnels are up and running
26 | ENV CONSUL_HTTP_ADDR=127.0.0.1:8500
27 | ENV VAULT_ADDR=http://127.0.0.1:8200
28 | ENV NOMAD_ADDR=http://127.0.0.1:4646
29 |
30 |
31 | COPY docker-entry.sh /scripts/docker-entry.sh
32 | RUN chmod +x /scripts/docker-entry.sh
33 | CMD ["/scripts/docker-entry.sh"]
34 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2021 Fredrik Meringdal
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Digital Ocean Hashicorp Cluster
2 | This is a nice hosting stack for side projects
3 | - Cheap hosting (About $20 per month)
4 | - Easy deployment of the cluster infrastructure and applications running on it
5 | - Dynamic Credentials
6 | - Easy certificate management
7 | - Understandable cluster architecture (Compared to K8S)
8 | - Fun tools to use
9 |
10 |
11 | ## Architecture
12 | 
13 |
14 |
15 |
Architecture notes
16 | Services
17 |
18 | - Consul:
19 |
20 | - Service mesh
21 | - Traefik has native integration with consul for discovering apps and removing / adding apps based on their health
22 |
23 |
24 | - Vault:
25 |
26 | - Encrypted Key Value store for secrets
27 | - Dynamic app secrets. Every time an app needs postgresql access it will receive a short lived username / password pair
28 |
29 |
30 | - Nomad:
31 |
32 | - Nomad will be used to manage the apps we deploy
33 |
34 |
35 | - Traefik:
36 |
37 | - Cloud native load balancer deployed on the public ingress node so that it is reachable from the internet
38 |
39 |
40 |
41 | Server Droplet
42 |
43 | - Not publicly accessible
44 | - Server instance of Consul
45 | - Server instance of Vault
46 | - Server instance of Nomad
47 |
48 | Worker Droplet
49 |
50 | - Not publicly accessible
51 | - Client instance of Consul
52 | - Client instance of Nomad
53 | - The different kinds of services / apps / jobs you need to run. Examples are postgresql database, docker container, jar file or a node js app. All of them are managed by Nomad
54 |
55 | Ingress Droplet
56 |
57 | - Publicly accessible
58 | - Client instance of Consul
59 | - Client instance of Nomad
60 | - Traefik managed by Nomad for ingress and certificate management
61 |
62 | Tools used for deploying the architecture
63 |
64 | - Docker (Consistent deployment environment)
65 | - Terraform (Infrastructure)
66 | - Packer (Creating immutable droplet images)
67 |
68 |
69 |
70 | ## Deployment Prerequisites
71 | - Docker
72 | - DigitalOcean token
73 | - 30 minutes
74 |
75 | ## Setup environment
76 | There are quite a few tools used for deploying this architecture so it is therefore recommended to use docker for a consistent deployment environment.
77 |
78 | ```bash
79 | # Build the docker image
80 | docker build -t hashiplatform-deploy .
81 | # Run the docker image and mount this repo into it. The ports are so that
82 | # we can access the UI for Nomad, Vault, Consul, Traefik etc
83 | docker run \
84 | -e DO_TOKEN="REPLACE_ME_WITH_DIGITAL_OCEAN_TOKEN" \
85 | -p 4646:4646 -p 8081:8081 -p 8200:8200 -p 8500:8500 \
86 | -v $(pwd):/hashiplatform-deploy \
87 | -it hashiplatform-deploy
88 | # Move into project directory
89 | cd hashiplatform-deploy
90 | ```
91 |
92 | ## Build the Droplet Image
93 | Packer is the go-to tool for creating immutable machine images. We will use it to create
94 | the image which our cluster droplets consists of.
95 |
96 | ```bash
97 | cd image
98 | packer init .
99 | packer build .
100 | cd ..
101 | ```
102 |
103 | ## Cluster infrastructure
104 | We will use terraform to deploy the droplets, configure the firewall and vpc of the cluster.
105 |
106 | ```bash
107 | cd cluster
108 | # Create SSH key to access droplets
109 | ssh-keygen -q -t rsa -N '' -f ./id_rsa
110 |
111 | # Init terraform
112 | terraform init
113 |
114 | # Deploy droplets
115 | terraform apply
116 |
117 | # Create SSH tunnel into the server droplets so they can be accessed on localhost
118 | # Remember that the server droplets are private so we need to tunnel through the ingress droplet
119 | # to access them
120 | ./setup-tunnels.sh
121 | cd ..
122 | ```
123 |
124 | ## Deploying some jobs on Nomad
125 | Lets use our newly created cluster and start by deploying Traefik and PostgreSQL
126 |
127 | ```bash
128 | cd jobs
129 | nomad run traefik.nomad
130 | # You can now see traefik UI at localhost:8081
131 | nomad run postgres.nomad
132 | cd ..
133 | ```
134 |
135 | ## Initializing Vault
136 | Our app will need Vault secrets so we need to initialize and unseal Vault before it can be used. We will also manage all of the Vault configuration with Terraform.
137 |
138 | ```bash
139 | cd vault
140 |
141 | # Initialize and unseal vault cluster
142 | ./init-vault.sh
143 |
144 | # Make sure the new environment variables set in init-vault script
145 | # is sourced to our shell
146 | source ~/.bashrc
147 | export VAULT_TOKEN=$VAULT_TOKEN
148 |
149 | # Init terraform
150 | terraform init
151 |
152 | # Configure Vault (adding policies, secret engines etc)
153 | terraform apply
154 | cd ..
155 | ```
156 |
157 | ## Connect Nomad and Vault
158 | Nomad needs a Vault token in order to query Vault for secrets that it can pass on to the jobs deployed on Nomad. This script will restart the Nomad servers with the Vault token generated in the
159 | Terraform configuration for Vault in the previous step.
160 |
161 | ```bash
162 | cd cluster
163 | ./connect_vault.sh
164 | cd ..
165 |
166 | ```
167 | ## Deploying an app
168 | Lets now connect all the pieces together by deploying a app on Nomad that fetches dynamic PostgreSQL credentials from Vault and is exposed publicly by Traefik.
169 |
170 | The app will be available at http://FLOATING_IP:80, you can find the floting IP from the cluster terraform output or in the digitalocean ui. The app will show you the secrets it has received and also let you make DB queries.
171 |
172 | ```bash
173 | cd jobs
174 | nomad run app.nomad
175 | cd ..
176 | # Optional: Get the floating ip
177 | cd cluster
178 | terraform output ingress_floating_ip
179 | cd ..
180 | ```
181 |
182 | ## Adding HTTPS
183 | Traefik has really nice integration with lets encrypt and can manage your certificates (creation, renewal etc). These are the steps needed to activate HTTPS:
184 | 1. Point your domain to the floating ip created in the cluster terraform infrastructure
185 | 2. In `traefik.nomad` uncomment the `certificatesResolvers.myresolver.acme` section and add your own email.
186 | 3. In `app.nomad` your service tags should look like this (Remember to replace `YOUR_DOMAIN_NAME`):
187 | ```
188 | "traefik.enable=true",
189 | "traefik.http.routers.app.entryPoints=http,websecure",
190 | "traefik.http.routers.app.rule=Host(`YOUR_DOMAIN_NAME`)",
191 | "traefik.http.routers.app.tls.certResolver=myresolver",
192 | ```
193 |
--------------------------------------------------------------------------------
/cluster/connect-vault.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | SERVER_DROPLET_IPS=$(terraform output server_droplet_ips)
4 | INGRESS_IP=$(terraform output ingress_droplet_ip)
5 | # Remove double quotes
6 | INGRESS_IP=$(echo "$INGRESS_IP" | tr -d '"')
7 |
8 |
9 | NOMAD_VAULT_POLICY_NAME=$(terraform -chdir=../vault output nomad_server_policy_name)
10 | # Remove double quotes
11 | NOMAD_VAULT_POLICY_NAME=$(echo "$NOMAD_VAULT_POLICY_NAME" | tr -d '"')
12 | NOMAD_VAULT_TOKEN=$(vault token create -policy $NOMAD_VAULT_POLICY_NAME -period 72h -orphan)
13 | NOMAD_VAULT_TOKEN=$(echo $NOMAD_VAULT_TOKEN | grep -o 'token [^ ]*' | awk '{print $2}')
14 |
15 | IFS=',' read -r -a NOMAD_SERVERS <<< "$SERVER_DROPLET_IPS"
16 |
17 | # SSH into each nomad server and insert vault token to its config
18 | for NOMAD_SERVER in "${NOMAD_SERVERS[@]}"
19 | do
20 |
21 | # Remove double quotes
22 | NOMAD_SERVER=$(echo "$NOMAD_SERVER" | tr -d '"')
23 |
24 | echo "Trying to connect to nomad server root@$NOMAD_SERVER ..."
25 | ssh -i ./id_rsa -o StrictHostKeyChecking=no -o ProxyCommand="ssh -i ./id_rsa -o StrictHostKeyChecking=no -W %h:%p root@$INGRESS_IP" root@$NOMAD_SERVER /bin/bash << EOF
26 | echo "Successfully connected to server via ingress!"
27 |
28 | # Insert the vault token to nomad config file
29 | sed -i 's/NOMAD_VAULT_TOKEN/$NOMAD_VAULT_TOKEN/' /etc/nomad.d/nomad.hcl
30 | # Restart nomad to pick up the changes to its config file
31 | sudo systemctl restart nomad
32 |
33 | echo "Done"
34 | echo "Exiting server ..."
35 | EOF
36 | echo "Exited from server $NOMAD_SERVER"
37 | done
38 | exit 0
39 |
40 | echo "Done configuring nomad servers"
41 |
--------------------------------------------------------------------------------
/cluster/main.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | required_providers {
3 | digitalocean = {
4 | source = "digitalocean/digitalocean"
5 | version = "~> 2.0"
6 | }
7 | }
8 | }
9 |
10 |
11 | # Configure the DigitalOcean Provider
12 | provider "digitalocean" {
13 | token = var.do_token
14 | }
15 |
16 | data "digitalocean_images" "cluster_server" {
17 | filter {
18 | key = "private"
19 | values = ["true"]
20 | }
21 | filter {
22 | key = "name"
23 | values = ["nomad_cluster"]
24 | }
25 | sort {
26 | key = "created"
27 | direction = "desc"
28 | }
29 | }
30 |
31 | data "template_file" "user_data_server" {
32 | template = file("${path.root}/user-data-server.sh")
33 |
34 | vars = {
35 | nomad_servers_count = var.nomad_servers_count
36 | retry_join = chomp(
37 | join(
38 | " ",
39 | formatlist("%s=%s", keys(local.retry_join), values(local.retry_join)),
40 | ),
41 | )
42 | }
43 | }
44 |
45 | data "template_file" "user_data_client" {
46 | template = file("${path.root}/user-data-client.sh")
47 |
48 | vars = {
49 | retry_join = chomp(
50 | join(
51 | " ",
52 | formatlist("%s=%s ", keys(local.retry_join), values(local.retry_join)),
53 | ),
54 | )
55 | }
56 | }
57 |
58 | resource "digitalocean_ssh_key" "default" {
59 | name = "Nomad Cluster"
60 | public_key = file("${path.root}/id_rsa.pub")
61 | }
62 |
63 | resource "digitalocean_droplet" "nomad_server" {
64 | count = var.nomad_servers_count
65 | image = data.digitalocean_images.cluster_server.images[0].id
66 | # Consul members name must be unique
67 | name = "nomad-cluster-server-${count.index}"
68 | region = var.do_region
69 | size = var.server_droplet_size
70 | user_data = data.template_file.user_data_server.rendered
71 | ssh_keys = [digitalocean_ssh_key.default.fingerprint]
72 | vpc_uuid = digitalocean_vpc.cluster.id
73 |
74 | tags = [
75 | local.retry_join.tag_name
76 | ]
77 | }
78 |
79 | resource "digitalocean_droplet" "nomad_client" {
80 | count = var.nomad_clients_count
81 | image = data.digitalocean_images.cluster_server.images[0].id
82 | # Consul members name must be unique
83 | name = "nomad-cluster-general-client-${count.index}"
84 | region = var.do_region
85 | size = var.client_droplet_size
86 | user_data = data.template_file.user_data_client.rendered
87 | ssh_keys = [digitalocean_ssh_key.default.fingerprint]
88 | vpc_uuid = digitalocean_vpc.cluster.id
89 |
90 | tags = [
91 | local.retry_join.tag_name
92 | ]
93 | }
94 |
95 | resource "digitalocean_droplet" "ingress_client" {
96 | image = data.digitalocean_images.cluster_server.images[0].id
97 | # Consul members name must be unique
98 | name = "nomad-cluster-ingress"
99 | region = var.do_region
100 | vpc_uuid = digitalocean_vpc.cluster.id
101 | size = var.ingress_droplet_size
102 | user_data = data.template_file.user_data_client.rendered
103 | ssh_keys = [digitalocean_ssh_key.default.fingerprint]
104 |
105 | tags = [
106 | local.retry_join.tag_name
107 | ]
108 | }
109 |
110 | resource "digitalocean_floating_ip" "cluster_ingress" {
111 | region = var.do_region
112 | }
113 |
114 | resource "digitalocean_floating_ip_assignment" "cluster_ingress" {
115 | ip_address = digitalocean_floating_ip.cluster_ingress.ip_address
116 | droplet_id = digitalocean_droplet.ingress_client.id
117 | }
118 |
119 | locals {
120 | cluster_droplet_ids = concat(
121 | [digitalocean_droplet.ingress_client.id],
122 | digitalocean_droplet.nomad_client.*.id,
123 | digitalocean_droplet.nomad_server.*.id
124 | )
125 | }
126 |
127 | # Firewall
128 | resource "digitalocean_firewall" "cluster_traffic" {
129 | name = "nomad-cluster-intra-traffic"
130 |
131 | droplet_ids = concat(
132 | digitalocean_droplet.nomad_client.*.id,
133 | digitalocean_droplet.nomad_server.*.id
134 | )
135 |
136 |
137 | inbound_rule {
138 | protocol = "tcp"
139 | port_range = "1-65535"
140 | source_droplet_ids = local.cluster_droplet_ids
141 | }
142 | inbound_rule {
143 | protocol = "udp"
144 | port_range = "1-65535"
145 | source_droplet_ids = local.cluster_droplet_ids
146 | }
147 | inbound_rule {
148 | protocol = "icmp"
149 | port_range = "1-65535"
150 | source_droplet_ids = local.cluster_droplet_ids
151 | }
152 |
153 | outbound_rule {
154 | protocol = "tcp"
155 | port_range = "1-65535"
156 | destination_addresses = ["0.0.0.0/0", "::/0"]
157 | }
158 | outbound_rule {
159 | protocol = "udp"
160 | port_range = "1-65535"
161 | destination_addresses = ["0.0.0.0/0", "::/0"]
162 | }
163 | outbound_rule {
164 | protocol = "icmp"
165 | port_range = "1-65535"
166 | destination_addresses = ["0.0.0.0/0", "::/0"]
167 | }
168 | }
169 |
170 | resource "digitalocean_firewall" "ingress" {
171 | name = "nomad-cluster-ingress"
172 |
173 | droplet_ids = [digitalocean_droplet.ingress_client.id]
174 |
175 |
176 | # All tcp traffic on port 22, 80 and 443 from outside
177 | inbound_rule {
178 | protocol = "tcp"
179 | port_range = "22"
180 | source_addresses = ["0.0.0.0/0", "::/0"]
181 | }
182 | inbound_rule {
183 | protocol = "tcp"
184 | port_range = "80"
185 | source_addresses = ["0.0.0.0/0", "::/0"]
186 | }
187 | inbound_rule {
188 | protocol = "tcp"
189 | port_range = "443"
190 | source_addresses = ["0.0.0.0/0", "::/0"]
191 | }
192 |
193 | # All traffic from cluster
194 | inbound_rule {
195 | protocol = "tcp"
196 | port_range = "1-65535"
197 | source_droplet_ids = local.cluster_droplet_ids
198 | }
199 | inbound_rule {
200 | protocol = "udp"
201 | port_range = "1-65535"
202 | source_droplet_ids = local.cluster_droplet_ids
203 | }
204 | inbound_rule {
205 | protocol = "icmp"
206 | port_range = "1-65535"
207 | source_droplet_ids = local.cluster_droplet_ids
208 | }
209 |
210 | outbound_rule {
211 | protocol = "tcp"
212 | port_range = "1-65535"
213 | destination_addresses = ["0.0.0.0/0", "::/0"]
214 | }
215 | outbound_rule {
216 | protocol = "udp"
217 | port_range = "1-65535"
218 | destination_addresses = ["0.0.0.0/0", "::/0"]
219 | }
220 | outbound_rule {
221 | protocol = "icmp"
222 | port_range = "1-65535"
223 | destination_addresses = ["0.0.0.0/0", "::/0"]
224 | }
225 | }
226 |
--------------------------------------------------------------------------------
/cluster/outputs.tf:
--------------------------------------------------------------------------------
1 |
2 | output "ingress_floating_ip" {
3 | value = digitalocean_floating_ip.cluster_ingress.ip_address
4 | }
5 |
6 | output "server_droplet_ips" {
7 | value = join(",", digitalocean_droplet.nomad_server[*].ipv4_address)
8 | }
9 |
10 | output "ingress_droplet_ip" {
11 | value = digitalocean_droplet.ingress_client.ipv4_address
12 | }
13 |
14 | // output "consul_access_cmd" {
15 | // value = format(<> /root/.bashrc
10 | export TF_VAR_do_token=$DO_TOKEN
11 | # Default packer do_token input variables to this token
12 | echo "PKR_VAR_do_token=$DO_TOKEN" >> /root/.bashrc
13 | export PKR_VAR_do_token=$DO_TOKEN
14 |
15 | /bin/bash
--------------------------------------------------------------------------------
/docs/Diagram.drawio:
--------------------------------------------------------------------------------
1 | 7VtZc6M4EP41rtp9MAUSAvwYO/HsVGVmZyuzm52nlAwyJgbECvnI/vptmcOAGM9lx5msc1ndQuj4+utWCzLAk2T7RtBs8Y4HLB4gM9gO8PUAIQuNPPhQmqdC4zlOoQhFFBQqc6+4i/5lZctKu4oClpe6QiU5j2WUtZU+T1Pmy5aOCsE37cvmPA5aioyGTFPc+TTWtfdRIBeFFpnI3Vf8xqJwUXVtOaOiJqHV1eVU8gUN+KahwjcDPBGcy6KUbCcsVqvXXpjpZ2rrkQmWyq9p8AmRu9kt/+hvyO398s2KfNwOh+Vd1jRelTP+68OkHK98qlYh41EqdytJxvAD/UzMAYGaiZIMRDqKruy2FZYuqXu0FV3ZbSus7u2tTv9Wd4ANhSa1bm92+jcbA4QfPOYrGUcpm9Q2Z4IyFDSIAIoJj7kAXcpTWL3xQiYxSBYUN4tIsruM+mpVN8AX0M15Kkurt1Allwuv7gpGk6lysg0VwQy6yW0jFHyV7bp8C3bfW/uwznzVXAq+ZNWQBggj2/MsW3UUxXFnqGsmZASmfxVHobqr5KoTWkoxm0t1Rxh/lIa3O+kam+WYG11cXY3dsQf6gOYLFpQT0a21Mj3olW0bqtJ63zCeMCme4JKylpS+pHQlbsmrzZ6XnoMK3aJBSdes/EHpC8L6znu2QKEkzDeQZ6SRZzBBgysYl3nHBEwsh9K14FnMpCr+ogwXPjH8wmohkyjPWNb/qrGuCRGs6810ak6xBmoJXmux+5Fs2pal4wbfU3ytI34E5Dy7hZxl9UBnOjp0cOWJoKtG0IfdJFY81qEzwe4Fy5VI0wD+WhWOCRdMTYeLJRNnxfQE2JEWdtglPdgRHTuMyYmwIxp0Bdv2iF0C2HcEsHNFKp4+ZIIlUc76Apbje2w27wQs0AeUeXP/B6LWsaOTVfmqysnZupNz7R6iWPapiGLpu7u3tQu7UOVClbNRpR1TbAtpVMHVrq21HyD4x6nybrrykt/fz//8Y76mYn376I1vevIgjRgValGyyxnHu8+rPNubBa2EebRV61Uv+C2dsfgDzyMZ7aCdcSl58llEyh6uF1KqNPdKTQ1NN5uNEVDJ/SQf0jyH7YXhw03QFHkqRk8tB3YkhFjIG84EbFAW/pquYll14SuDMPJ1WGOoAdYD62cxrDP7yt25WMPQcQ27Z1c3cowqST46jujV4GiOcIUj+IN8FZ8ISBuRDpCohqeJ5cggPcFr5J4ISPxqgLRMswIy5QkNToQjIZ0kq4eQrm3Y9jOCaOmgsSBkVSzkQi54yFMa3+y1Y4h0aVCHm/01t1xhsQu6j0zKpzLC0pXk7ZDMtpH8WzWHuF5Inxo119vyzjvhqRJSmG+jkRLrVkrYN9tJVbtifmpSh2GDNeAr4bMDi1WCIKkImfySi9PNQLCYymjdHsfREXVfDS2fy79i8Ke4xUzHdDVm2vVFTWKSI4TJ3szA02C8r84uLnnBJS84U15AzHYIs03P0E+bsNOzD0HEOVUMw5cY9tUxrDpxeNlBrOdc5BLFvnAKjIjRyfieN4z1W5EO5Hu1xW4c4h8+fw8I8wK7z2l6aIYd59sP4c3jrDfB7cW2bd0RItfT19oancoR9pDkbI7QPOgIj+nQzK90aPisDq3nadZP6tCeKVt2YVeOnC9SzMaG25Mwk1MlzEgH8pU4tPpxyCGHhnvDxwldmvs/dGnkBz1V2fSDygcbAWvUfvyFuke8hQctW3Vwq4fx/VDqD4UvHvAgITGC5OrlecCed2peGo4B93ODxjJKUyOXqyDioJRsmQJAYQTlom5fxXmcA55zQROm3hHJlVJQNo+W+5KRpcd6NGO2z5tIz0adIGPU42rdU+1Q0E/Az1UWcxoYm2gZJSyIqMFFqGgLcqZkKANfE0izFGqLVTJTxqB+wWin73nAjMf8AWyA71gKhEaemW2H3ZrjIe1UNKyRRhrSI2ygHv46xuhkWZnz8rFWWORr6ABcaqpOO6ebbAgZtNztsUpTUDiDSwaeTBWlpo90TYcsy4dFw+EO0OOB6bRTPuLotHVwDVsTzYb6+GjqZ8VnRLPzzqK5+zoYcWfUX8XUgM3NQZAVhdVrbNOM51K9I/NPfGR8CRkZLmoHXM+rgWuh3OOZzVPh+6Ii7rfjC9F4yUS5k4JumAI0YHP17gOUID/aaXLuRzQuojdc/zCnPptxvnyAuQl2TJSLRw0tIuM+lD3LGPWcZH8H0CDu/42g2FDv/xsD3/wH
--------------------------------------------------------------------------------
/docs/diagram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fmeringdal/do-hashicorp-cluster/a774328ad1b5c853e6f8b2c28053f7b42c27ae55/docs/diagram.png
--------------------------------------------------------------------------------
/image/build.pkr.hcl:
--------------------------------------------------------------------------------
1 | source "digitalocean" "default" {
2 | api_token = var.do_token
3 | region = var.do_region
4 | image = "ubuntu-20-04-x64"
5 | size = "s-1vcpu-1gb"
6 | ssh_username = "root"
7 | snapshot_name = var.snapshot_name
8 | droplet_name = var.droplet_name
9 | }
10 |
11 | build {
12 | sources = ["source.digitalocean.default"]
13 |
14 | provisioner "shell" {
15 | inline = [
16 | "sudo mkdir /ops",
17 | "sudo chmod 777 /ops"
18 | ]
19 | }
20 |
21 | provisioner "file" {
22 | source = "../shared"
23 | destination = "/ops"
24 | }
25 |
26 | provisioner "shell" {
27 | script = "../shared/scripts/setup.sh"
28 | }
29 | }
30 |
31 | packer {
32 | required_plugins {
33 | digitalocean = {
34 | version = ">= 1.0.0"
35 | source = "github.com/digitalocean/digitalocean"
36 | }
37 | }
38 | }
--------------------------------------------------------------------------------
/image/variables.pkr.hcl:
--------------------------------------------------------------------------------
1 | variable "do_token" {
2 | type = string
3 | }
4 |
5 | variable "do_region" {
6 | type = string
7 | default = "lon1"
8 | }
9 |
10 | variable "droplet_name" {
11 | type = string
12 | default = "nomad-cluster-packer"
13 | }
14 |
15 | variable "snapshot_name" {
16 | type = string
17 | default = "nomad_cluster"
18 | }
--------------------------------------------------------------------------------
/jobs/app.nomad:
--------------------------------------------------------------------------------
1 | job "app" {
2 | datacenters = ["dc1"]
3 |
4 | constraint {
5 | attribute = "${attr.unique.hostname}"
6 | operator = "regexp"
7 | value = "nomad-cluster-general-client-[0-9]+$"
8 | }
9 |
10 | group "app" {
11 | count = 1
12 |
13 | network {
14 | port "http"{
15 | to = 80
16 | }
17 | }
18 |
19 | service {
20 | name = "app"
21 | port = "http"
22 |
23 | tags = [
24 | "traefik.enable=true",
25 | "traefik.http.routers.app.entryPoints=http",
26 | "traefik.http.routers.app.rule=Path(`/`)"
27 | ]
28 |
29 | check {
30 | type = "http"
31 | path = "/"
32 | interval = "30s"
33 | timeout = "2s"
34 | }
35 | }
36 |
37 | task "app" {
38 | vault {
39 | policies = ["default_nomad_job"]
40 |
41 | change_mode = "signal"
42 | change_signal = "SIGUSR1"
43 | }
44 |
45 | template {
46 | data = <&1 | grep 'HTTP/1.1 200 OK'` ]]; then
47 | curl -L $NOMAD_BINARY > nomad.zip
48 | sudo unzip -o nomad.zip -d /usr/local/bin
49 | sudo chmod 0755 /usr/local/bin/nomad
50 | sudo chown root:root /usr/local/bin/nomad
51 | fi
52 |
53 | sed -i "s/IP_ADDRESS/$IP_ADDRESS/g" $CONFIGDIR/nomad_client.hcl
54 | sudo cp $CONFIGDIR/nomad_client.hcl $NOMADCONFIGDIR/nomad.hcl
55 | sudo cp $CONFIGDIR/nomad.service /etc/systemd/system/nomad.service
56 |
57 | sudo systemctl enable nomad.service
58 | sudo systemctl start nomad.service
59 | sleep 10
60 | export NOMAD_ADDR=http://$IP_ADDRESS:4646
61 |
62 | # Consul Template
63 | sudo cp $CONFIGDIR/consul-template.hcl $CONSULTEMPLATECONFIGDIR/consul-template.hcl
64 | sudo cp $CONFIGDIR/consul-template.service /etc/systemd/system/consul-template.service
65 |
66 | # Add hostname to /etc/hosts
67 | echo "127.0.0.1 $(hostname)" | sudo tee --append /etc/hosts
68 |
69 | # Add Docker bridge network IP to /etc/resolv.conf (at the top)
70 | # echo "nameserver $DOCKER_BRIDGE_IP_ADDRESS" | sudo tee /etc/resolv.conf.new
71 | # cat /etc/resolv.conf | sudo tee --append /etc/resolv.conf.new
72 | # sudo mv /etc/resolv.conf.new /etc/resolv.conf
73 |
74 |
75 | sudo rm /etc/resolv.conf
76 | sudo cp $CONFIGDIR/resolv.conf /etc/resolv.conf
77 |
78 | # Make docker also pickup the newest resolv.conf
79 | sudo systemctl restart docker
80 |
81 | # Set env vars for tool CLIs
82 | echo "export VAULT_ADDR=http://$IP_ADDRESS:8200" | sudo tee --append /home/$HOME_DIR/.bashrc
83 | echo "export NOMAD_ADDR=http://$IP_ADDRESS:4646" | sudo tee --append /home/$HOME_DIR/.bashrc
84 |
--------------------------------------------------------------------------------
/shared/scripts/server.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | CONFIGDIR=/ops/shared/config
6 |
7 | CONSULCONFIGDIR=/etc/consul.d
8 | VAULTCONFIGDIR=/etc/vault.d
9 | NOMADCONFIGDIR=/etc/nomad.d
10 | CONSULTEMPLATECONFIGDIR=/etc/consul-template.d
11 | HOME_DIR=ubuntu
12 |
13 | # Wait for network
14 | sleep 15
15 |
16 | SERVER_COUNT=$1
17 | RETRY_JOIN=$2
18 | NOMAD_BINARY=$3
19 |
20 | # Get IP from metadata service
21 | IP_ADDRESS=$(curl http://169.254.169.254/metadata/v1/interfaces/public/0/ipv4/address)
22 | # IP_ADDRESS="$(/sbin/ifconfig eth0 | grep 'inet addr:' | cut -d: -f2 | awk '{ print $1}')"
23 |
24 | # Consul
25 | sed -i "s/IP_ADDRESS/$IP_ADDRESS/g" $CONFIGDIR/consul.json
26 | sed -i "s/SERVER_COUNT/$SERVER_COUNT/g" $CONFIGDIR/consul.json
27 | sed -i "s/RETRY_JOIN/$RETRY_JOIN/g" $CONFIGDIR/consul.json
28 | sudo cp $CONFIGDIR/consul.json $CONSULCONFIGDIR
29 | sudo cp $CONFIGDIR/consul.service /etc/systemd/system/consul.service
30 |
31 | # https://learn.hashicorp.com/tutorials/consul/dns-forwarding#systemd-resolved-setup
32 | # Stop port 53 listener
33 | sudo systemctl stop systemd-resolved
34 | sudo systemctl disable systemd-resolved
35 |
36 | sudo rm /etc/systemd/resolved.conf
37 | sudo mv $CONFIGDIR/resolved.conf /etc/systemd/resolved.conf
38 | sudo ln -sf /run/systemd/resolve/resolv.conf /etc/resolv.conf
39 |
40 | # Now consul can use port 53
41 | export CONSUL_ALLOW_PRIVILEGED_PORTS=yes
42 | sudo systemctl enable consul.service
43 | sudo systemctl start consul.service
44 |
45 |
46 | sleep 10
47 | export CONSUL_HTTP_ADDR=$IP_ADDRESS:8500
48 | export CONSUL_RPC_ADDR=$IP_ADDRESS:8400
49 |
50 | # Vault
51 | sed -i "s/IP_ADDRESS/$IP_ADDRESS/g" $CONFIGDIR/vault.hcl
52 | sudo cp $CONFIGDIR/vault.hcl $VAULTCONFIGDIR
53 | sudo cp $CONFIGDIR/vault.service /etc/systemd/system/vault.service
54 |
55 | sudo systemctl enable vault.service
56 | sudo systemctl start vault.service
57 |
58 | # Nomad
59 |
60 | ## Replace existing Nomad binary if remote file exists
61 | if [[ `wget -S --spider $NOMAD_BINARY 2>&1 | grep 'HTTP/1.1 200 OK'` ]]; then
62 | curl -L $NOMAD_BINARY > nomad.zip
63 | sudo unzip -o nomad.zip -d /usr/local/bin
64 | sudo chmod 0755 /usr/local/bin/nomad
65 | sudo chown root:root /usr/local/bin/nomad
66 | fi
67 |
68 | sed -i "s/IP_ADDRESS/$IP_ADDRESS/g" $CONFIGDIR/nomad.hcl
69 | sed -i "s/SERVER_COUNT/$SERVER_COUNT/g" $CONFIGDIR/nomad.hcl
70 | sudo cp $CONFIGDIR/nomad.hcl $NOMADCONFIGDIR
71 | sudo cp $CONFIGDIR/nomad.service /etc/systemd/system/nomad.service
72 |
73 | sudo systemctl enable nomad.service
74 | sudo systemctl start nomad.service
75 | sleep 10
76 | export NOMAD_ADDR=http://$IP_ADDRESS:4646
77 |
78 | # Consul Template
79 | sudo cp $CONFIGDIR/consul-template.hcl $CONSULTEMPLATECONFIGDIR/consul-template.hcl
80 | sudo cp $CONFIGDIR/consul-template.service /etc/systemd/system/consul-template.service
81 |
82 | # Add hostname to /etc/hosts
83 | echo "127.0.0.1 $(hostname)" | sudo tee --append /etc/hosts
84 |
85 | # Add Docker bridge network IP to /etc/resolv.conf (at the top)
86 | # echo "nameserver $DOCKER_BRIDGE_IP_ADDRESS" | sudo tee /etc/resolv.conf.new
87 | # cat /etc/resolv.conf | sudo tee --append /etc/resolv.conf.new
88 | # sudo mv /etc/resolv.conf.new /etc/resolv.conf
89 |
90 | sudo rm /etc/resolv.conf
91 | sudo cp $CONFIGDIR/resolv.conf /etc/resolv.conf
92 |
93 | # Make docker also pickup the newest resolv.conf
94 | sudo systemctl restart docker
95 |
96 | # Set env vars for tool CLIs
97 | echo "export CONSUL_RPC_ADDR=$IP_ADDRESS:8400" | sudo tee --append /home/$HOME_DIR/.bashrc
98 | echo "export CONSUL_HTTP_ADDR=$IP_ADDRESS:8500" | sudo tee --append /home/$HOME_DIR/.bashrc
99 | echo "export VAULT_ADDR=http://$IP_ADDRESS:8200" | sudo tee --append /home/$HOME_DIR/.bashrc
100 | echo "export NOMAD_ADDR=http://$IP_ADDRESS:4646" | sudo tee --append /home/$HOME_DIR/.bashrc
101 |
--------------------------------------------------------------------------------
/shared/scripts/setup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | # Disable interactive apt prompts
6 | export DEBIAN_FRONTEND=noninteractive
7 |
8 | # https://stackoverflow.com/questions/54327058/aws-ami-need-to-explicitly-remove-apt-locks-when-provisioning-ami-from-bionic
9 | while [ ! -f /var/lib/cloud/instance/boot-finished ]; do
10 | echo "Waiting for cloud init ..."
11 | sleep 5
12 | done
13 |
14 | while fuser /var/lib/apt/lists/lock >/dev/null 2>&1 ; do
15 | echo "Waiting for cloud init ..."
16 | sleep 5
17 | done
18 |
19 | # sudo rm -r /var/lib/apt/lists/*
20 |
21 |
22 | cd /ops
23 |
24 | CONFIGDIR=/ops/shared/config
25 |
26 | CONSULVERSION=1.10.0
27 | CONSULDOWNLOAD=https://releases.hashicorp.com/consul/${CONSULVERSION}/consul_${CONSULVERSION}_linux_amd64.zip
28 | CONSULCONFIGDIR=/etc/consul.d
29 | CONSULDIR=/opt/consul
30 |
31 | VAULTVERSION=1.7.2
32 | VAULTDOWNLOAD=https://releases.hashicorp.com/vault/${VAULTVERSION}/vault_${VAULTVERSION}_linux_amd64.zip
33 | VAULTCONFIGDIR=/etc/vault.d
34 | VAULTDIR=/opt/vault
35 |
36 | NOMADVERSION=1.1.2
37 | NOMADDOWNLOAD=https://releases.hashicorp.com/nomad/${NOMADVERSION}/nomad_${NOMADVERSION}_linux_amd64.zip
38 | NOMADCONFIGDIR=/etc/nomad.d
39 | NOMADDIR=/opt/nomad
40 |
41 | CONSULTEMPLATEVERSION=0.26.0
42 | CONSULTEMPLATEDOWNLOAD=https://releases.hashicorp.com/consul-template/${CONSULTEMPLATEVERSION}/consul-template_${CONSULTEMPLATEVERSION}_linux_amd64.zip
43 | CONSULTEMPLATECONFIGDIR=/etc/consul-template.d
44 | CONSULTEMPLATEDIR=/opt/consul-template
45 |
46 | # Dependencies
47 | sudo apt-get update
48 | # https://superuser.com/questions/1412054/non-interactive-apt-upgrade
49 | # https://serverfault.com/questions/48724/100-non-interactive-debian-dist-upgrade
50 | apt-get \
51 | -o Dpkg::Options::=--force-confold \
52 | -o Dpkg::Options::=--force-confdef \
53 | -y --allow-downgrades \
54 | --allow-remove-essential \
55 | --allow-change-held-packages \
56 | dist-upgrade
57 | # sudo apt-get -y dist-upgrade
58 | sudo apt-get -y upgrade
59 | sudo apt-get -y autoremove
60 | sudo apt-get install -y unzip tree redis-tools jq curl tmux software-properties-common
61 |
62 |
63 | # Disable the firewall
64 | sudo ufw disable || echo "ufw not installed"
65 |
66 | # Consul
67 | curl -L $CONSULDOWNLOAD > consul.zip
68 |
69 | ## Install
70 | sudo unzip consul.zip -d /usr/local/bin
71 | sudo chmod 0755 /usr/local/bin/consul
72 | sudo chown root:root /usr/local/bin/consul
73 |
74 | ## Configure
75 | sudo mkdir -p $CONSULCONFIGDIR
76 | sudo chmod 755 $CONSULCONFIGDIR
77 | sudo mkdir -p $CONSULDIR
78 | sudo chmod 755 $CONSULDIR
79 |
80 | # Vault
81 | curl -L $VAULTDOWNLOAD > vault.zip
82 |
83 | ## Install
84 | sudo unzip vault.zip -d /usr/local/bin
85 | sudo chmod 0755 /usr/local/bin/vault
86 | sudo chown root:root /usr/local/bin/vault
87 |
88 | ## Configure
89 | sudo mkdir -p $VAULTCONFIGDIR
90 | sudo chmod 755 $VAULTCONFIGDIR
91 | sudo mkdir -p $VAULTDIR
92 | sudo chmod 755 $VAULTDIR
93 |
94 | # Nomad
95 |
96 | curl -L $NOMADDOWNLOAD > nomad.zip
97 |
98 | ## Install
99 | sudo unzip nomad.zip -d /usr/local/bin
100 | sudo chmod 0755 /usr/local/bin/nomad
101 | sudo chown root:root /usr/local/bin/nomad
102 |
103 | ## Configure
104 | sudo mkdir -p $NOMADCONFIGDIR
105 | sudo chmod 755 $NOMADCONFIGDIR
106 | sudo mkdir -p $NOMADDIR
107 | sudo chmod 755 $NOMADDIR
108 |
109 | # Consul Template
110 |
111 | curl -L $CONSULTEMPLATEDOWNLOAD > consul-template.zip
112 |
113 | ## Install
114 | sudo unzip consul-template.zip -d /usr/local/bin
115 | sudo chmod 0755 /usr/local/bin/consul-template
116 | sudo chown root:root /usr/local/bin/consul-template
117 |
118 | ## Configure
119 | sudo mkdir -p $CONSULTEMPLATECONFIGDIR
120 | sudo chmod 755 $CONSULTEMPLATECONFIGDIR
121 | sudo mkdir -p $CONSULTEMPLATEDIR
122 | sudo chmod 755 $CONSULTEMPLATEDIR
123 |
124 |
125 | # Docker
126 | distro=$(lsb_release -si | tr '[:upper:]' '[:lower:]')
127 | sudo apt-get install -y apt-transport-https ca-certificates gnupg2
128 | curl -fsSL https://download.docker.com/linux/debian/gpg | sudo apt-key add -
129 | sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/${distro} $(lsb_release -cs) stable"
130 | sudo apt-get install -y docker-ce
131 |
132 |
133 | # Node JS and npm
134 | curl -sL https://deb.nodesource.com/setup_14.x | sudo bash -
135 | sudo apt-get install -y nodejs gcc g++ make
136 |
--------------------------------------------------------------------------------
/vault/auth-backends.tf:
--------------------------------------------------------------------------------
1 | resource "vault_auth_backend" "userpass" {
2 | type = "userpass"
3 |
4 | tune {
5 | default_lease_ttl = "90000s"
6 | max_lease_ttl = "90000s"
7 | }
8 | }
9 |
10 | resource "vault_generic_endpoint" "u1" {
11 | depends_on = [vault_auth_backend.userpass]
12 | path = "auth/userpass/users/u1"
13 | ignore_absent_fields = true
14 |
15 | data_json = < vault-credentials.json
10 |
11 | VAULT_ROOT_TOKEN=$(cat vault-credentials.json | jq '.root_token')
12 | # Remove double quotes
13 | VAULT_ROOT_TOKEN=$(echo "$VAULT_ROOT_TOKEN" | tr -d '"')
14 |
15 | echo "VAULT_TOKEN=$VAULT_ROOT_TOKEN" >> ~/.bashrc
16 | # Make sure vault token is sourced, so start new shell
17 | echo "Vault is initialized"
18 | echo "Unsealing vault ..."
19 |
20 | unseal_keys=$(cat vault-credentials.json | jq -r '.unseal_keys_b64[]' | head -$KEY_THRESHOLD)
21 | for unseal_key in ${unseal_keys[@]}; do
22 | vault operator unseal $unseal_key
23 | done
24 |
25 | echo "Vault is unsealed and ready for use"
--------------------------------------------------------------------------------
/vault/main.tf:
--------------------------------------------------------------------------------
1 |
2 | terraform {
3 | required_version = ">= 0.11.8"
4 | }
5 |
6 | provider "vault" {
7 | address = "http://127.0.0.1:8200"
8 | }
--------------------------------------------------------------------------------
/vault/outputs.tf:
--------------------------------------------------------------------------------
1 | output "nomad_server_token_cmd" {
2 | value = "vault token create -policy ${vault_policy.nomad_server.name} -period 72h -orphan"
3 | }
4 |
5 | output "nomad_server_policy_name" {
6 | value = vault_policy.nomad_server.name
7 | }
--------------------------------------------------------------------------------
/vault/policies.tf:
--------------------------------------------------------------------------------
1 | resource "vault_policy" "admin" {
2 | name = "admin"
3 |
4 | policy = <