├── aws ├── packer │ ├── .keep │ ├── example.vars │ ├── fetch-tag.sh │ ├── consul.json │ └── vault.json └── terraform │ ├── .keep │ ├── example.tfvars │ └── main.tf ├── azure ├── packer │ ├── .keep │ ├── example.vars │ ├── vault.json │ └── consul.json └── terraform │ ├── .keep │ ├── example.tfvars │ └── main.tf ├── gcp ├── packer │ ├── .keep │ ├── example.vars │ ├── vault.json │ └── consul.json └── terraform │ ├── .keep │ ├── example.tfvars │ └── main.tf ├── ansible ├── group_vars │ ├── consul │ │ ├── .keep │ │ └── main.yml │ ├── aws.yml │ ├── gcp.yml │ ├── azure.yml │ ├── example.yml │ └── vault │ │ └── main.yml ├── roles │ ├── consul │ │ ├── files │ │ │ ├── .keep │ │ │ ├── tokens │ │ │ │ └── .keep │ │ │ └── policies │ │ │ │ ├── haproxy.hcl │ │ │ │ ├── agent.hcl │ │ │ │ └── vault.hcl │ │ ├── templates │ │ │ ├── tmpfiles-consul.conf.j2 │ │ │ ├── syslog.conf.j2 │ │ │ ├── logrotate.j2 │ │ │ ├── consul_systemd.service.j2 │ │ │ ├── agent-bootstrap.sh.j2 │ │ │ └── consul-config.json.j2 │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── configure.yml │ │ │ ├── main.yml │ │ │ ├── user.yml │ │ │ ├── logging.yml │ │ │ ├── prepare-bootstrap.yml │ │ │ ├── firewall.yml │ │ │ ├── directories.yml │ │ │ ├── tls.yml │ │ │ └── install.yml │ │ └── defaults │ │ │ └── main.yml │ ├── vault │ │ ├── files │ │ │ └── .keep │ │ ├── templates │ │ │ ├── tmpfiles-vault.conf.j2 │ │ │ ├── syslog.conf.j2 │ │ │ ├── logrotate.j2 │ │ │ ├── vault_systemd.service.j2 │ │ │ └── vault-config.json.j2 │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── configure.yml │ │ │ ├── main.yml │ │ │ ├── firewall.yml │ │ │ ├── user.yml │ │ │ ├── logging.yml │ │ │ ├── directories.yml │ │ │ ├── install.yml │ │ │ └── tls.yml │ │ └── defaults │ │ │ └── main.yml │ ├── haproxy-consul-template │ │ ├── files │ │ │ └── .keep │ │ ├── tasks │ │ │ ├── install-extra-packages.yml │ │ │ ├── firewall.yml │ │ │ ├── consul-template-sudoers.yml │ │ │ ├── main.yml │ │ │ ├── install-haproxy-packages.yml │ │ │ ├── configure-consul-template.yml │ │ │ ├── consul-template-directories.yml │ │ │ ├── tls.yml │ │ │ └── install-consul-template.yml │ │ ├── templates │ │ │ ├── tmpfiles-consul-template.conf.j2 │ │ │ ├── consul-template_systemd_unit.service.j2 │ │ │ ├── consul_template_config.hcl.j2 │ │ │ └── haproxy.cfg.ctmpl.j2 │ │ ├── handlers │ │ │ └── main.yml │ │ └── defaults │ │ │ └── main.yml │ └── ufw │ │ └── tasks │ │ └── main.yml ├── consul.yml └── vault.yml ├── .gitignore ├── tls-bootstrap └── bootstrap.sh └── README.md /aws/packer/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /azure/packer/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gcp/packer/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /aws/terraform/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /azure/terraform/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /gcp/terraform/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ansible/group_vars/consul/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ansible/roles/consul/files/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ansible/roles/vault/files/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ansible/roles/consul/files/tokens/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ansible/roles/haproxy-consul-template/files/.keep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ansible/group_vars/aws.yml: -------------------------------------------------------------------------------- 1 | consul_datacentre: "example-dc" 2 | -------------------------------------------------------------------------------- /ansible/group_vars/gcp.yml: -------------------------------------------------------------------------------- 1 | consul_datacentre: "example-dc" 2 | haproxy_enable_incoming_proxy_protocol: false 3 | -------------------------------------------------------------------------------- /ansible/group_vars/azure.yml: -------------------------------------------------------------------------------- 1 | consul_datacentre: "example-dc" 2 | haproxy_enable_incoming_proxy_protocol: false 3 | -------------------------------------------------------------------------------- /ansible/consul.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: all 4 | roles: 5 | - ufw 6 | - consul 7 | become: yes 8 | 9 | ... 10 | -------------------------------------------------------------------------------- /ansible/roles/vault/templates/tmpfiles-vault.conf.j2: -------------------------------------------------------------------------------- 1 | D {{ vault_pid_dir }} 0755 vault vault - 2 | D {{ vault_tmp_dir }} 0755 vault vault - -------------------------------------------------------------------------------- /gcp/packer/example.vars: -------------------------------------------------------------------------------- 1 | { 2 | "gcp_account_file_path": "", 3 | "gcp_project_id": "" 4 | } 5 | -------------------------------------------------------------------------------- /ansible/roles/consul/templates/tmpfiles-consul.conf.j2: -------------------------------------------------------------------------------- 1 | D {{ consul_pid_dir }} 0755 consul consul - 2 | D {{ consul_tmp_dir }} 0755 consul consul - -------------------------------------------------------------------------------- /ansible/roles/vault/templates/syslog.conf.j2: -------------------------------------------------------------------------------- 1 | if $programname == 'vault' then {{ vault_log_dir }}/vault.log 2 | if $programname == 'vault' then ~ 3 | -------------------------------------------------------------------------------- /ansible/group_vars/consul/main.yml: -------------------------------------------------------------------------------- 1 | consul_crt_file: consul-servers.crt 2 | consul_ca_crt_file: consul-ca.crt 3 | consul_key_file: consul-servers.key 4 | -------------------------------------------------------------------------------- /ansible/roles/consul/templates/syslog.conf.j2: -------------------------------------------------------------------------------- 1 | if $programname == 'consul' then {{ consul_log_dir }}/consul.log 2 | if $programname == 'consul' then ~ 3 | -------------------------------------------------------------------------------- /ansible/roles/vault/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: restart_rsyslog 4 | systemd: 5 | name: rsyslog 6 | state: restarted 7 | 8 | ... 9 | -------------------------------------------------------------------------------- /aws/packer/example.vars: -------------------------------------------------------------------------------- 1 | { 2 | "aws_access_key": "", 3 | "aws_secret_key": "", 4 | "region": "" 5 | } 6 | -------------------------------------------------------------------------------- /ansible/roles/consul/files/policies/haproxy.hcl: -------------------------------------------------------------------------------- 1 | node_prefix "vault" { 2 | policy = "read" 3 | } 4 | 5 | service "vault" { 6 | policy = "read" 7 | } 8 | -------------------------------------------------------------------------------- /ansible/roles/consul/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: restart_rsyslog 4 | systemd: 5 | name: rsyslog 6 | state: restarted 7 | 8 | ... 9 | -------------------------------------------------------------------------------- /ansible/roles/haproxy-consul-template/tasks/install-extra-packages.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Install jq 4 | apt: 5 | name: jq 6 | state: present 7 | 8 | ... 9 | -------------------------------------------------------------------------------- /azure/packer/example.vars: -------------------------------------------------------------------------------- 1 | { 2 | "subscription_id": "", 3 | "location": "", 4 | "resource_group": "" 5 | } 6 | -------------------------------------------------------------------------------- /ansible/vault.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: all 4 | roles: 5 | - ufw 6 | - consul 7 | - vault 8 | - haproxy-consul-template 9 | become: yes 10 | 11 | ... 12 | -------------------------------------------------------------------------------- /aws/terraform/example.tfvars: -------------------------------------------------------------------------------- 1 | vault_hostname = "vault.example.com" 2 | consul_hostname = "consul.example.com" 3 | trusted_external_ips = [] 4 | vault_instance_image_filters = ["vault-*", "vault-*"] 5 | -------------------------------------------------------------------------------- /azure/terraform/example.tfvars: -------------------------------------------------------------------------------- 1 | vault_hostname = "vault.example.com" 2 | consul_hostname = "consul.example.com" 3 | trusted_external_ips = [] 4 | vault_instance_image_filters = ["vault-", "vault-"] 5 | -------------------------------------------------------------------------------- /ansible/roles/ufw/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Allow SSH through ufw 4 | ufw: 5 | direction: in 6 | proto: tcp 7 | to_port: '22' 8 | rule: allow 9 | 10 | - name: Enable ufw 11 | ufw: 12 | state: enabled 13 | 14 | ... 15 | -------------------------------------------------------------------------------- /ansible/roles/consul/templates/logrotate.j2: -------------------------------------------------------------------------------- 1 | {{ consul_log_dir }}/*.log { 2 | daily 3 | rotate 30 4 | missingok 5 | notifempty 6 | compress 7 | postrotate 8 | systemctl kill -s HUP rsyslog.service 9 | endscript 10 | } 11 | -------------------------------------------------------------------------------- /ansible/roles/vault/tasks/configure.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Copy vault config 4 | template: 5 | src: vault-config.json.j2 6 | dest: "{{ vault_config_dir }}/vault.json" 7 | owner: root 8 | group: vault 9 | mode: 0640 10 | 11 | ... 12 | -------------------------------------------------------------------------------- /ansible/roles/consul/tasks/configure.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Copy consul config 4 | template: 5 | src: consul-config.json.j2 6 | dest: "{{ consul_config_dir }}/consul.json" 7 | owner: root 8 | group: consul 9 | mode: 0640 10 | 11 | ... 12 | -------------------------------------------------------------------------------- /ansible/roles/haproxy-consul-template/templates/tmpfiles-consul-template.conf.j2: -------------------------------------------------------------------------------- 1 | D {{ consul_template_pid_dir }} 0755 {{ consul_template_user }} {{ consul_template_user }} - 2 | D {{ consul_template_tmp_dir }} 0755 {{ consul_template_user }} {{ consul_template_user }} - 3 | -------------------------------------------------------------------------------- /ansible/roles/haproxy-consul-template/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: restart_consul_template 4 | service: 5 | name: consul-template 6 | state: restarted 7 | 8 | - name: restart_rsyslog 9 | service: 10 | name: rsyslog 11 | state: restarted 12 | -------------------------------------------------------------------------------- /gcp/terraform/example.tfvars: -------------------------------------------------------------------------------- 1 | vault_hostname = "vault.example.com" 2 | consul_hostname = "consul.example.com" 3 | trusted_external_ips = [] 4 | credentials = "" 5 | project = "" 6 | region = "" 7 | -------------------------------------------------------------------------------- /ansible/roles/consul/files/policies/agent.hcl: -------------------------------------------------------------------------------- 1 | node_prefix "consul" { 2 | policy = "write" 3 | } 4 | 5 | node_prefix "vault" { 6 | policy = "write" 7 | } 8 | 9 | service_prefix "consul" { 10 | policy = "write" 11 | } 12 | 13 | service_prefix "" { 14 | policy = "read" 15 | } 16 | -------------------------------------------------------------------------------- /ansible/roles/vault/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - import_tasks: "user.yml" 4 | - import_tasks: "install.yml" 5 | - import_tasks: "directories.yml" 6 | - import_tasks: "configure.yml" 7 | - import_tasks: "tls.yml" 8 | - import_tasks: "logging.yml" 9 | - import_tasks: "firewall.yml" 10 | 11 | ... 12 | -------------------------------------------------------------------------------- /ansible/roles/vault/templates/logrotate.j2: -------------------------------------------------------------------------------- 1 | {{ vault_log_dir }}/*.log { 2 | daily 3 | rotate 30 4 | missingok 5 | notifempty 6 | compress 7 | postrotate 8 | systemctl kill -s HUP rsyslog.service 9 | systemctl kill -s HUP vault.service 10 | endscript 11 | } 12 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.key 2 | *.tfstate 3 | *.tfstate.backup 4 | *.DS_Store 5 | .terraform 6 | ansible/roles/consul/files/tokens/* 7 | !ansible/roles/consul/files/tokens/.keep 8 | master-token 9 | ansible/roles/consul/files/consul_* 10 | ansible/roles/haproxy-consul-template/files/consul-template_* 11 | ansible/roles/vault/files/vault_* 12 | -------------------------------------------------------------------------------- /ansible/roles/consul/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - import_tasks: "user.yml" 4 | - import_tasks: "install.yml" 5 | - import_tasks: "directories.yml" 6 | - import_tasks: "configure.yml" 7 | - import_tasks: "tls.yml" 8 | - import_tasks: "logging.yml" 9 | - import_tasks: "firewall.yml" 10 | - import_tasks: "prepare-bootstrap.yml" 11 | 12 | ... 13 | -------------------------------------------------------------------------------- /ansible/roles/vault/tasks/firewall.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Allow HTTPS through firewall 4 | ufw: 5 | proto: tcp 6 | rule: allow 7 | direction: in 8 | to_port: "8200" 9 | 10 | - name: Allow forwarding through firewall 11 | ufw: 12 | proto: any 13 | rule: allow 14 | direction: in 15 | to_port: "8201" 16 | 17 | ... 18 | -------------------------------------------------------------------------------- /ansible/roles/haproxy-consul-template/tasks/firewall.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Allow HTTP through firewall 4 | ufw: 5 | direction: "in" 6 | proto: "tcp" 7 | rule: "allow" 8 | to_port: "80" 9 | 10 | - name: Allow HTTPS through firewall 11 | ufw: 12 | direction: "in" 13 | proto: "tcp" 14 | rule: "allow" 15 | to_port: "443" 16 | ... 17 | -------------------------------------------------------------------------------- /ansible/roles/vault/tasks/user.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Create private user group 4 | group: 5 | name: vault 6 | 7 | - name: Create vault user 8 | user: 9 | name: vault 10 | create_home: false 11 | home: "{{ vault_config_dir }}" 12 | group: vault 13 | groups: users 14 | shell: /bin/false 15 | password: "{{ vault_user_password_hash }}" 16 | system: true 17 | 18 | ... 19 | -------------------------------------------------------------------------------- /ansible/roles/consul/tasks/user.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Create private user group 4 | group: 5 | name: consul 6 | 7 | - name: Create consul user 8 | user: 9 | name: consul 10 | create_home: false 11 | home: "{{ consul_config_dir }}" 12 | group: consul 13 | groups: users 14 | shell: /bin/false 15 | password: "{{ consul_user_password_hash }}" 16 | system: true 17 | 18 | ... 19 | -------------------------------------------------------------------------------- /ansible/roles/haproxy-consul-template/tasks/consul-template-sudoers.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Enable sudo for all consul-template commands 4 | lineinfile: 5 | line: "{{ consul_template_user }} ALL=(ALL) NOPASSWD: /bin/systemctl restart haproxy" 6 | dest: "/etc/sudoers.d/consul-template" 7 | owner: root 8 | group: root 9 | mode: 0440 10 | create: yes 11 | validate: '/usr/sbin/visudo -cf %s' 12 | -------------------------------------------------------------------------------- /ansible/roles/haproxy-consul-template/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - import_tasks: "install-haproxy-packages.yml" 4 | - import_tasks: "install-consul-template.yml" 5 | - import_tasks: "consul-template-directories.yml" 6 | - import_tasks: "tls.yml" 7 | - import_tasks: "configure-consul-template.yml" 8 | - import_tasks: "consul-template-sudoers.yml" 9 | - import_tasks: "install-extra-packages.yml" 10 | - import_tasks: "firewall.yml" 11 | 12 | ... 13 | -------------------------------------------------------------------------------- /ansible/roles/consul/tasks/logging.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Copy logrotate config 4 | template: 5 | src: logrotate.j2 6 | dest: /etc/logrotate.d/consul 7 | owner: root 8 | group: root 9 | mode: 0644 10 | 11 | - name: Copy syslog config 12 | template: 13 | src: syslog.conf.j2 14 | dest: /etc/rsyslog.d/consul.conf 15 | owner: root 16 | group: root 17 | mode: 0644 18 | notify: restart_rsyslog 19 | 20 | ... 21 | -------------------------------------------------------------------------------- /ansible/roles/consul/tasks/prepare-bootstrap.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Copy agent bootstrap script 4 | template: 5 | src: agent-bootstrap.sh.j2 6 | dest: /etc/consul.d/agent-bootstrap.sh 7 | mode: 0744 8 | owner: root 9 | 10 | - name: Copy agent token 11 | copy: 12 | dest: "/etc/consul.d/agent-token" 13 | content: "{{ consul_agent_acl_token }}" 14 | owner: consul 15 | group: root 16 | mode: 0660 17 | 18 | ... 19 | -------------------------------------------------------------------------------- /ansible/roles/vault/tasks/logging.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Copy logrotate config 4 | template: 5 | src: logrotate.j2 6 | dest: /etc/logrotate.d/vault 7 | owner: root 8 | group: root 9 | mode: 0644 10 | 11 | - name: Copy syslog config 12 | template: 13 | src: syslog.conf.j2 14 | dest: /etc/rsyslog.d/vault.conf 15 | owner: root 16 | group: root 17 | mode: 0644 18 | notify: restart_rsyslog 19 | 20 | ... 21 | -------------------------------------------------------------------------------- /ansible/roles/haproxy-consul-template/tasks/install-haproxy-packages.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Prepare APT for PPA 4 | apt: 5 | name: software-properties-common 6 | state: present 7 | 8 | - name: Add HAProxy PPA 9 | apt_repository: 10 | repo: ppa:vbernat/haproxy-2.7 11 | 12 | - name: Install HAProxy package 13 | apt: 14 | name: haproxy 15 | state: present 16 | 17 | - name: Enable systemd service 18 | systemd: 19 | name: haproxy 20 | enabled: yes 21 | 22 | ... 23 | -------------------------------------------------------------------------------- /ansible/roles/consul/files/policies/vault.hcl: -------------------------------------------------------------------------------- 1 | node_prefix "consul" { 2 | policy = "write" 3 | } 4 | 5 | node_prefix "vault" { 6 | policy = "write" 7 | } 8 | 9 | service_prefix "consul" { 10 | policy = "write" 11 | } 12 | 13 | service "vault" { 14 | policy = "write" 15 | } 16 | 17 | service_prefix "" { 18 | policy = "read" 19 | } 20 | 21 | key_prefix "vault/" { 22 | policy = "write" 23 | } 24 | 25 | agent_prefix "" { 26 | policy = "write" 27 | } 28 | 29 | session_prefix "" { 30 | policy = "write" 31 | } 32 | -------------------------------------------------------------------------------- /ansible/group_vars/example.yml: -------------------------------------------------------------------------------- 1 | consul_user_password_hash: "$6$ufl3OpgY5$9y8.WgmbjXoWjWZtYmn.glLx3u7fH4Vmnts7kscd0Nn1BSLJSjRP7TJf5Sj93GLsHR.Rnp3i2oPbGW37nPjSZ/" 2 | consul_gossip_encryption_key: "VEMZ9AFKup9uYwgAGdLrMA==" 3 | consul_template_user_password_hash: "$6$ufl3OpgY5$9y8.WgmbjXoWjWZtYmn.glLx3u7fH4Vmnts7kscd0Nn1BSLJSjRP7TJf5Sj93GLsHR.Rnp3i2oPbGW37nPjSZ/" 4 | vault_user_password_hash: "$6$ufl3OpgY5$9y8.WgmbjXoWjWZtYmn.glLx3u7fH4Vmnts7kscd0Nn1BSLJSjRP7TJf5Sj93GLsHR.Rnp3i2oPbGW37nPjSZ/" 5 | 6 | consul_datacenter: "example-dc" 7 | vault_lb_hostname: vault.example.com 8 | -------------------------------------------------------------------------------- /ansible/roles/consul/tasks/firewall.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Allow HTTPS through firewall 4 | ufw: 5 | proto: tcp 6 | rule: allow 7 | direction: in 8 | to_port: "8501" 9 | 10 | - name: Allow SERF through firewall 11 | ufw: 12 | proto: any 13 | rule: allow 14 | direction: in 15 | to_port: "8301" 16 | 17 | - name: Allow RPC through firewall 18 | ufw: 19 | proto: tcp 20 | rule: allow 21 | direction: in 22 | to_port: "8300" 23 | 24 | - name: Allow gRPC through firewall 25 | ufw: 26 | proto: tcp 27 | rule: allow 28 | direction: in 29 | to_port: "8503" 30 | 31 | ... 32 | -------------------------------------------------------------------------------- /ansible/roles/vault/templates/vault_systemd.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=vault agent 3 | Requires=network-online.target consul.service 4 | After=network-online.target consul.service 5 | 6 | [Service] 7 | User=vault 8 | Group=vault 9 | PIDFile={{ vault_pid_dir }}/vault.pid 10 | PermissionsStartOnly=true 11 | StandardOutput=syslog 12 | StandardError=syslog 13 | SyslogIdentifier=vault 14 | ExecStart={{ vault_binary_install_dir }}/vault server -config={{ vault_config_dir }} 15 | ExecReload=/bin/kill -HUP $MAINPID 16 | KillMode=process 17 | KillSignal=SIGTERM 18 | Restart=on-failure 19 | RestartSec=42s 20 | TimeoutStopSec=15 21 | 22 | [Install] 23 | WantedBy=multi-user.target 24 | -------------------------------------------------------------------------------- /ansible/group_vars/vault/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | consul_agent_only: true 3 | consul_crt_file: consul-agents.crt 4 | consul_ca_crt_file: consul-ca.crt 5 | consul_key_file: consul-agents.key 6 | vault_consulauth_ca_crt_file: consul-ca.crt 7 | vault_consulauth_crt_file: consul-agents.crt 8 | vault_consulauth_key_file: consul-agents.key 9 | consul_template_consulauth_ca_crt_file: consul-ca.crt 10 | consul_template_consulauth_crt_file: consul-agents.crt 11 | consul_template_consulauth_key_file: consul-agents.key 12 | vault_consul_acl_token: "{{ lookup('file', 'roles/consul/files/tokens/vault') }}" 13 | consul_template_consul_token: "{{ lookup('file', 'roles/consul/files/tokens/haproxy') }}" 14 | ... 15 | -------------------------------------------------------------------------------- /ansible/roles/vault/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | vault_version: 1.13.2 3 | vault_verify_binary_checksum: true 4 | vault_download_filename: "vault_{{ vault_version }}_linux_amd64.zip" 5 | vault_overwrite_binary: false 6 | # vault_user_password_hash: 7 | vault_binary_install_dir: /usr/local/bin 8 | vault_pid_dir: /run/vault 9 | vault_tmp_dir: /tmp/vault 10 | vault_config_dir: /etc/vault.d 11 | vault_data_dir: /opt/vault 12 | vault_log_dir: /var/log/vault 13 | vault_crt_file: vault.crt 14 | vault_ca_crt_file: vault-ca.crt 15 | vault_key_file: vault.key 16 | vault_consulauth_ca_crt_file: consul-ca.crt 17 | vault_consulauth_crt_file: consul.crt 18 | vault_consulauth_key_file: consul.key 19 | # vault_consul_acl_token: 20 | 21 | ... 22 | -------------------------------------------------------------------------------- /ansible/roles/consul/templates/consul_systemd.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Consul agent 3 | Requires=network-online.target 4 | After=network-online.target 5 | 6 | [Service] 7 | User=consul 8 | Group=consul 9 | PIDFile={{ consul_pid_dir }}/consul.pid 10 | PermissionsStartOnly=true 11 | StandardOutput=syslog 12 | StandardError=syslog 13 | SyslogIdentifier=consul 14 | ExecStartPre={{ consul_binary_install_dir }}/consul validate {{ consul_config_dir }} 15 | ExecStart={{ consul_binary_install_dir }}/consul agent \ 16 | -config-dir={{ consul_config_dir }} \ 17 | -pid-file={{ consul_pid_dir }}/consul.pid 18 | ExecReload=/bin/kill -HUP $MAINPID 19 | KillMode=process 20 | KillSignal=SIGTERM 21 | Restart=on-failure 22 | RestartSec=42s 23 | TimeoutStopSec=15 24 | 25 | [Install] 26 | WantedBy=multi-user.target 27 | -------------------------------------------------------------------------------- /ansible/roles/haproxy-consul-template/tasks/configure-consul-template.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Copy config file 4 | template: 5 | src: consul_template_config.hcl.j2 6 | dest: "{{ consul_template_config_dir }}/consul-template.hcl" 7 | owner: root 8 | group: "{{ consul_template_user }}" 9 | mode: 0640 10 | notify: restart_consul_template 11 | 12 | - name: Copy HAProxy template 13 | template: 14 | src: "haproxy.cfg.ctmpl.j2" 15 | dest: "{{ consul_template_config_dir }}/templates/haproxy.cfg.ctmpl" 16 | owner: root 17 | group: "{{ consul_template_user }}" 18 | mode: 0640 19 | notify: restart_consul_template 20 | 21 | - name: Ensure consul-template is started and enabled 22 | systemd: 23 | name: consul-template 24 | state: started 25 | enabled: yes 26 | daemon_reload: yes 27 | -------------------------------------------------------------------------------- /ansible/roles/haproxy-consul-template/templates/consul-template_systemd_unit.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Consul-template dynamic config daemon 3 | Requires=network-online.target 4 | After=network.target 5 | 6 | [Service] 7 | User={{ consul_template_user }} 8 | Group={{ consul_template_user }} 9 | PIDFile={{ consul_template_pid_dir }}/consul-template.pid 10 | Restart=on-failure 11 | StandardOutput=syslog 12 | StandardError=syslog 13 | SyslogIdentifier={{ consul_template_syslog_program_name }} 14 | ExecStart={{ consul_template_binary_install_dir }}/consul-template $OPTIONS -pid-file={{ consul_template_pid_dir }}/consul-template.pid -config={{ consul_template_config_dir }}/consul-template.hcl 15 | ExecReload=/bin/kill -s HUP $MAINPID 16 | KillSignal=SIGINT 17 | TimeoutStopSec=15 18 | 19 | [Install] 20 | WantedBy=multi-user.target 21 | -------------------------------------------------------------------------------- /ansible/roles/consul/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | consul_version: 1.15.2 3 | consul_verify_binary_checksum: true 4 | consul_download_filename: "consul_{{ consul_version }}_linux_amd64.zip" 5 | consul_overwrite_binary: false 6 | # consul_user_password_hash: 7 | consul_binary_install_dir: /usr/local/bin 8 | consul_pid_dir: /run/consul 9 | consul_tmp_dir: /tmp/consul 10 | consul_config_dir: /etc/consul.d 11 | consul_data_dir: /opt/consul 12 | consul_log_dir: /var/log/consul 13 | consul_crt_file: consul.crt 14 | consul_ca_crt_file: consul-ca.crt 15 | consul_key_file: consul.key 16 | # consul_gossip_encryption_key: # Populate this with the output from consul-keygen 17 | consul_agent_acl_token: "{{ lookup('file', 'tokens/agent') }}" 18 | consul_default_acl_token: "{{ lookup('file', 'tokens/agent') }}" 19 | consul_datacenter: "eu-west-2" 20 | consul_agent_only: false 21 | ... 22 | -------------------------------------------------------------------------------- /ansible/roles/vault/tasks/directories.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Create config dir 4 | file: 5 | path: "{{ vault_config_dir }}" 6 | state: directory 7 | owner: root 8 | group: vault 9 | mode: 0750 10 | 11 | - name: Create data directory 12 | file: 13 | path: "{{ vault_data_dir }}" 14 | state: directory 15 | owner: vault 16 | group: vault 17 | mode: 0750 18 | 19 | - name: Create log directory 20 | file: 21 | path: "{{ vault_log_dir }}" 22 | state: directory 23 | owner: "syslog" 24 | group: vault 25 | mode: 0775 26 | 27 | - name: Setup tmpfiles config 28 | template: 29 | src: tmpfiles-vault.conf.j2 30 | dest: /etc/tmpfiles.d/vault.conf 31 | register: tmpfilesd_result 32 | 33 | - name: Ensure directories under tmpfiles.d exist 34 | shell: "systemd-tmpfiles --create --remove" 35 | when: tmpfilesd_result.changed 36 | 37 | ... 38 | -------------------------------------------------------------------------------- /ansible/roles/consul/tasks/directories.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Create config dir 4 | file: 5 | path: "{{ consul_config_dir }}" 6 | state: directory 7 | owner: root 8 | group: consul 9 | mode: 0750 10 | 11 | - name: Create data directory 12 | file: 13 | path: "{{ consul_data_dir }}" 14 | state: directory 15 | owner: consul 16 | group: consul 17 | mode: 0750 18 | 19 | - name: Create log directory 20 | file: 21 | path: "{{ consul_log_dir }}" 22 | state: directory 23 | owner: "syslog" 24 | group: consul 25 | mode: 0775 26 | 27 | - name: Setup tmpfiles config 28 | template: 29 | src: tmpfiles-consul.conf.j2 30 | dest: /etc/tmpfiles.d/consul.conf 31 | register: tmpfilesd_result 32 | 33 | - name: Ensure directories under tmpfiles.d exist 34 | shell: "systemd-tmpfiles --create --remove" 35 | when: tmpfilesd_result.changed 36 | 37 | ... 38 | -------------------------------------------------------------------------------- /ansible/roles/vault/templates/vault-config.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "pid_file": "{{ vault_pid_dir }}/vault.pid", 3 | "ui": true, 4 | "disable_mlock": true, 5 | "log_format": "json", 6 | "listener": [{ 7 | "tcp": { 8 | "address": "0.0.0.0:8200", 9 | "proxy_protocol_behavior": "deny_unauthorized", 10 | "proxy_protocol_authorized_addrs": [], 11 | "tls_cert_file": "{{ vault_config_dir }}/{{ vault_crt_file|basename }}", 12 | "tls_key_file": "{{ vault_config_dir }}/{{ vault_key_file|basename }}" 13 | } 14 | }], 15 | "storage": { 16 | "consul": { 17 | "address": "127.0.0.1:8501", 18 | "scheme": "https", 19 | "token": "{{ vault_consul_acl_token }}", 20 | "tls_ca_file": "{{ vault_config_dir }}/{{ vault_consulauth_ca_crt_file|basename }}", 21 | "tls_cert_file":"{{ vault_config_dir }}/{{ vault_consulauth_crt_file|basename }}" , 22 | "tls_key_file": "{{ vault_config_dir }}/{{ vault_consulauth_key_file|basename }}" 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /ansible/roles/haproxy-consul-template/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | consul_template_version: 0.31.0 3 | consul_template_verify_binary_checksum: true 4 | consul_template_download_filename: "consul-template_{{ consul_template_version }}_linux_amd64.zip" 5 | consul_template_overwrite_binary: false 6 | consul_template_user: consul-template 7 | # consul_template_user_password_hash: 8 | consul_template_binary_install_dir: /usr/local/bin 9 | consul_template_pid_dir: /run/consul-template 10 | consul_template_tmp_dir: /tmp/consul-template 11 | consul_template_config_dir: /etc/consul-template.d 12 | consul_template_log_dir: /var/log/consul-template 13 | vault_ca_crt_file: vault-ca.crt 14 | consul_template_consulauth_ca_crt_file: consul-ca.crt 15 | consul_template_consulauth_crt_file: consul.crt 16 | consul_template_consulauth_key_file: consul.key 17 | consul_template_syslog_program_name: consul-template 18 | # consul_template_consul_token: 19 | vault_lb_hostname: vault.example.com 20 | haproxy_enable_incoming_proxy_protocol: true 21 | ... 22 | -------------------------------------------------------------------------------- /ansible/roles/haproxy-consul-template/tasks/consul-template-directories.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Create config directory 4 | file: 5 | path: "{{ consul_template_config_dir }}" 6 | state: directory 7 | owner: root 8 | group: "{{ consul_template_user }}" 9 | mode: 0775 10 | 11 | - name: Create templates directory 12 | file: 13 | path: "{{ consul_template_config_dir }}/templates" 14 | state: directory 15 | owner: root 16 | group: "{{ consul_template_user }}" 17 | mode: 0775 18 | 19 | - name: Setup tmpfiles config 20 | template: 21 | src: tmpfiles-consul-template.conf.j2 22 | dest: /etc/tmpfiles.d/consul-template.conf 23 | register: tmpfilesd_result 24 | 25 | - name: Ensure directories under tmpfiles.d exist 26 | shell: "systemd-tmpfiles --create --remove" 27 | when: tmpfilesd_result.changed 28 | 29 | - name: Change group ownership on HAProxy directories 30 | file: 31 | path: /etc/haproxy 32 | group: "{{ consul_template_user }}" 33 | mode: 0775 34 | recurse: yes 35 | -------------------------------------------------------------------------------- /aws/packer/fetch-tag.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Adapted from https://priocept.com/2017/02/14/aws-tag-retrieval-from-within-an-ec2-instance/ 3 | 4 | if [ -z $1 ]; then 5 | SCRIPT_NAME=`basename "$0"` 6 | echo >&2 "Usage: $SCRIPT_NAME " 7 | exit 1 8 | fi 9 | 10 | # check that aws and ec2metadata commands are installed 11 | command -v aws >/dev/null 2>&1 || { echo >&2 'aws command not installed.'; exit 2; } 12 | command -v ec2metadata >/dev/null 2>&1 || { echo >&2 'ec2metadata command not installed.'; exit 3; } 13 | 14 | INSTANCE_ID=$(ec2metadata --instance-id | cut -d ' ' -f2) 15 | FILTER_PARAMS=( --filters "Name=key,Values=$1" "Name=resource-type,Values=instance" "Name=resource-id,Values=$INSTANCE_ID" ) 16 | 17 | REGION=$(ec2metadata --availability-zone | cut -d ' ' -f2) 18 | REGION=${REGION%?} 19 | 20 | TAG_VALUES=$(aws ec2 describe-tags --output text --region "$REGION" "${FILTER_PARAMS[@]}") 21 | if [ $? -ne 0 ]; then 22 | echo >&2 "Error retrieving tag value." 23 | exit 4 24 | fi 25 | 26 | TAG_VALUE=$(echo "$TAG_VALUES" | cut -f5) 27 | echo "$TAG_VALUE" 28 | -------------------------------------------------------------------------------- /ansible/roles/consul/tasks/tls.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Copy CA crt 4 | copy: 5 | src: "{{ consul_ca_crt_file }}" 6 | dest: "{{ consul_config_dir }}/{{ consul_ca_crt_file|basename }}" 7 | owner: root 8 | group: consul 9 | 10 | - name: Copy crt 11 | copy: 12 | src: "{{ consul_crt_file }}" 13 | dest: "{{ consul_config_dir }}/{{ consul_crt_file|basename }}" 14 | owner: root 15 | group: consul 16 | mode: 0640 17 | 18 | - name: Copy key 19 | copy: 20 | src: "{{ consul_key_file }}" 21 | dest: "{{ consul_config_dir }}/{{ consul_key_file|basename }}" 22 | owner: root 23 | group: consul 24 | mode: 0640 25 | 26 | - name: Copy CA crt to system store 27 | copy: 28 | src: "{{ consul_config_dir }}/{{ consul_ca_crt_file|basename }}" 29 | dest: "/usr/local/share/ca-certificates/{{ consul_ca_crt_file|basename }}" 30 | owner: root 31 | group: root 32 | mode: 0644 33 | remote_src: yes 34 | register: consul_ca_system_store_result 35 | 36 | - name: Update system CA bundle 37 | shell: update-ca-certificates 38 | when: consul_ca_system_store_result.changed 39 | 40 | ... 41 | -------------------------------------------------------------------------------- /ansible/roles/haproxy-consul-template/templates/consul_template_config.hcl.j2: -------------------------------------------------------------------------------- 1 | max_stale = "10m" 2 | log_level = "warn" 3 | pid_file = "{{ consul_template_pid_dir }}/consul-template.pid" 4 | wait { 5 | min = "5s" 6 | max = "10s" 7 | } 8 | 9 | consul { 10 | address = "127.0.0.1:8501" 11 | token = "{{ consul_template_consul_token }}" 12 | ssl { 13 | enabled = true 14 | verify = true 15 | cert = "{{ consul_template_config_dir }}/{{ consul_template_consulauth_crt_file|basename }}" 16 | key = "{{ consul_template_config_dir }}/{{ consul_template_consulauth_key_file|basename }}" 17 | ca_cert = "{{ consul_template_config_dir }}/{{ consul_template_consulauth_ca_crt_file|basename }}" 18 | } 19 | } 20 | 21 | template { 22 | source = "{{ consul_template_config_dir }}/templates/haproxy.cfg.ctmpl" 23 | destination = "/etc/haproxy/haproxy.cfg" 24 | create_dest_dirs = true 25 | command = "sudo /bin/systemctl restart haproxy" 26 | command_timeout = "60s" 27 | error_on_missing_key = true 28 | perms = 0600 29 | backup = true 30 | left_delimiter = "" 32 | wait { 33 | min = "2s" 34 | max = "10s" 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /ansible/roles/consul/templates/agent-bootstrap.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CONSUL_OPTIONS="-ca-file=/etc/ssl/certs/ca-certificates.crt -client-cert={{ consul_config_dir }}/{{ consul_crt_file|basename }} -client-key={{ consul_config_dir }}/{{ consul_key_file|basename }} -http-addr=https://127.0.0.1:8501" 4 | 5 | systemctl stop consul 6 | echo > /var/log/consul/consul.log 7 | AGENT_MASTER_TOKEN=$(uuidgen | tr -d '[:space:]') 8 | RETRY_JOIN_CONFIG=$(cat /etc/consul.d/retry-join-config) 9 | rm -f /etc/consul.d/retry-join-config 10 | jq -r --arg agent_master_token "$AGENT_MASTER_TOKEN" '.acl.tokens.agent_master = $agent_master_token' /etc/consul.d/consul.json > /etc/consul.d/consul.json.new 11 | jq -r --arg retry_join_config "$RETRY_JOIN_CONFIG" '.retry_join = [$retry_join_config]' /etc/consul.d/consul.json.new > /etc/consul.d/consul.json 12 | rm -f /etc/consul.d/consul.json.new 13 | systemctl start consul 14 | sleep 15 15 | consul acl set-agent-token $CONSUL_OPTIONS -token=$AGENT_MASTER_TOKEN default "$(cat /etc/consul.d/agent-token)" 16 | consul acl set-agent-token $CONSUL_OPTIONS -token=$AGENT_MASTER_TOKEN agent "$(cat /etc/consul.d/agent-token)" 17 | rm -f /etc/consul.d/agent-token 18 | jq -r 'del(.encrypt) | del(.acl.tokens)' /etc/consul.d/consul.json > /etc/consul.d/consul.json.new 19 | mv /etc/consul.d/consul.json.new /etc/consul.d/consul.json 20 | systemctl restart consul 21 | touch /etc/consul.d/agent-bootstrap-complete 22 | -------------------------------------------------------------------------------- /ansible/roles/consul/templates/consul-config.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "acl": { 3 | "enabled": true, 4 | "default_policy": "deny", 5 | "enable_token_persistence": true 6 | }, 7 | {% if not consul_agent_only %} 8 | "bootstrap_expect": 3, 9 | "connect": { 10 | "enabled": false 11 | }, 12 | "client_addr": "0.0.0.0", 13 | {% endif %} 14 | "data_dir": "{{ consul_data_dir }}", 15 | "datacenter": "{{ consul_datacenter }}", 16 | "primary_datacenter": "{{ consul_datacenter }}", 17 | "encrypt": "{{ consul_gossip_encryption_key }}", 18 | "enable_agent_tls_for_checks": true, 19 | "encrypt_verify_incoming": true, 20 | "encrypt_verify_outgoing": true, 21 | "leave_on_terminate": true, 22 | "log_json": true, 23 | "peering": { 24 | "enabled": false 25 | }, 26 | "ports": { 27 | "dns": -1, 28 | "http": -1, 29 | "https": 8501, 30 | "grpc": -1, 31 | "grpc_tls": 8503 32 | }, 33 | "rejoin_after_leave": true, 34 | "server": {{ (not consul_agent_only)|lower }}, 35 | "tls": { 36 | "defaults": { 37 | "ca_file": "/etc/ssl/certs/ca-certificates.crt", 38 | "cert_file": "{{ consul_config_dir }}/{{ consul_crt_file|basename }}", 39 | "key_file": "{{ consul_config_dir }}/{{ consul_key_file|basename }}", 40 | "verify_incoming": true, 41 | "verify_outgoing": true 42 | }, 43 | "internal_rpc": { 44 | "verify_server_hostname": true 45 | } 46 | }, 47 | "ui_config": { 48 | "enabled": true 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /aws/packer/consul.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "aws_access_key": null, 4 | "aws_secret_key": null, 5 | "region": null 6 | }, 7 | "builders": [ 8 | { 9 | "type": "amazon-ebs", 10 | "access_key": "{{user `aws_access_key`}}", 11 | "secret_key": "{{user `aws_secret_key`}}", 12 | "region": "{{user `region`}}", 13 | "source_ami": "ami-0fb391cce7a602d1f", 14 | "instance_type": "t2.medium", 15 | "ami_name": "consul-{{isotime | clean_resource_name}}", 16 | "ssh_username": "ubuntu", 17 | "tags": { 18 | "system": "consul" 19 | } 20 | } 21 | ], 22 | "provisioners": [ 23 | { 24 | "type": "shell", 25 | "inline": [ 26 | "timeout 60s bash -c \"while ! [ -f /var/lib/cloud/instance/boot-finished ]; do echo 'Waiting on cloud-init...'; sleep 2; done\"", 27 | "sudo apt-get update", 28 | "sudo apt-get upgrade -y || true" 29 | ] 30 | }, 31 | { 32 | "type": "ansible", 33 | "playbook_file": "{{ template_dir }}/../../ansible/consul.yml", 34 | "groups": ["consul", "aws", "example"], 35 | "host_alias": "{{ replace_all \"-\" \"\" uuid }}", 36 | "inventory_directory": "{{ template_dir }}/../../ansible", 37 | "user": "ubuntu" 38 | } 39 | ] 40 | } 41 | -------------------------------------------------------------------------------- /aws/packer/vault.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "aws_access_key": null, 4 | "aws_secret_key": null, 5 | "region": null 6 | }, 7 | "builders": [ 8 | { 9 | "type": "amazon-ebs", 10 | "access_key": "{{user `aws_access_key`}}", 11 | "secret_key": "{{user `aws_secret_key`}}", 12 | "region": "{{user `region`}}", 13 | "source_ami": "ami-0fb391cce7a602d1f", 14 | "instance_type": "t2.medium", 15 | "ami_name": "vault-{{isotime | clean_resource_name}}", 16 | "ssh_username": "ubuntu", 17 | "tags": { 18 | "system": "vault" 19 | } 20 | } 21 | ], 22 | "provisioners": [ 23 | { 24 | "type": "file", 25 | "source": "fetch-tag.sh", 26 | "destination": "/tmp/fetch-tag" 27 | }, 28 | { 29 | "type": "shell", 30 | "inline": [ 31 | "timeout 60s bash -c \"while ! [ -f /var/lib/cloud/instance/boot-finished ]; do echo 'Waiting on cloud-init...'; sleep 2; done\"", 32 | "sudo apt-get update", 33 | "sudo apt-get upgrade -y || true", 34 | "sudo apt-get install awscli cloud-utils -y", 35 | "sudo mv /tmp/fetch-tag /usr/local/bin/", 36 | "sudo chmod a+x /usr/local/bin/fetch-tag" 37 | ] 38 | }, 39 | { 40 | "type": "ansible", 41 | "playbook_file": "{{ template_dir }}/../../ansible/vault.yml", 42 | "groups": ["vault", "aws", "example"], 43 | "host_alias": "{{ replace_all \"-\" \"\" uuid }}", 44 | "inventory_directory": "{{ template_dir }}/../../ansible", 45 | "user": "ubuntu" 46 | } 47 | ] 48 | } 49 | -------------------------------------------------------------------------------- /gcp/packer/vault.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "gcp_account_file_path": null, 4 | "gcp_project_id": null 5 | }, 6 | "builders": [ 7 | { 8 | "type": "googlecompute", 9 | "account_file": "{{user `gcp_account_file_path`}}", 10 | "project_id": "{{user `gcp_project_id`}}", 11 | "source_image": "ubuntu-2204-jammy-v20220810", 12 | "source_image_family": "ubuntu-2204-lts", 13 | "zone": "europe-west2-b", 14 | "disable_default_service_account": true, 15 | "image_name": "vault-{{isotime | clean_resource_name}}", 16 | "image_family": "vault", 17 | "ssh_username": "ubuntu", 18 | "machine_type": "n1-standard-1", 19 | "image_labels": { 20 | "system": "vault" 21 | } 22 | } 23 | ], 24 | "provisioners": [ 25 | { 26 | "type": "shell", 27 | "inline": [ 28 | "timeout 60s bash -c \"while ! [ -f /var/lib/cloud/instance/boot-finished ]; do echo 'Waiting on cloud-init...'; sleep 2; done\"", 29 | "sudo apt-get update", 30 | "sudo apt-get upgrade -y || true" 31 | ] 32 | }, 33 | { 34 | "type": "ansible", 35 | "playbook_file": "{{ template_dir }}/../../ansible/vault.yml", 36 | "groups": ["vault", "gcp", "example"], 37 | "host_alias": "{{ replace_all \"-\" \"\" uuid }}", 38 | "inventory_directory": "{{ template_dir }}/../../ansible", 39 | "user": "ubuntu" 40 | } 41 | ] 42 | } 43 | -------------------------------------------------------------------------------- /gcp/packer/consul.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "gcp_account_file_path": null, 4 | "gcp_project_id": null 5 | }, 6 | "builders": [ 7 | { 8 | "type": "googlecompute", 9 | "account_file": "{{user `gcp_account_file_path`}}", 10 | "project_id": "{{user `gcp_project_id`}}", 11 | "source_image": "ubuntu-2204-jammy-v20220810", 12 | "source_image_family": "ubuntu-2204-lts", 13 | "zone": "europe-west2-b", 14 | "disable_default_service_account": true, 15 | "image_name": "consul-{{isotime | clean_resource_name}}", 16 | "image_family": "consul", 17 | "ssh_username": "ubuntu", 18 | "machine_type": "n1-standard-1", 19 | "image_labels": { 20 | "system": "consul" 21 | } 22 | } 23 | ], 24 | "provisioners": [ 25 | { 26 | "type": "shell", 27 | "inline": [ 28 | "timeout 60s bash -c \"while ! [ -f /var/lib/cloud/instance/boot-finished ]; do echo 'Waiting on cloud-init...'; sleep 2; done\"", 29 | "sudo apt-get update", 30 | "sudo apt-get upgrade -y || true" 31 | ] 32 | }, 33 | { 34 | "type": "ansible", 35 | "playbook_file": "{{ template_dir }}/../../ansible/consul.yml", 36 | "groups": ["consul", "gcp", "example"], 37 | "host_alias": "{{ replace_all \"-\" \"\" uuid }}", 38 | "inventory_directory": "{{ template_dir }}/../../ansible", 39 | "user": "ubuntu" 40 | } 41 | ] 42 | } 43 | -------------------------------------------------------------------------------- /ansible/roles/vault/tasks/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Install dependencies 4 | package: 5 | name: "{{ item }}" 6 | state: present 7 | loop: 8 | - unzip 9 | - jq 10 | 11 | - name: Fetch binary checksums file 12 | get_url: 13 | url: "https://releases.hashicorp.com/vault/{{ vault_version }}/vault_{{ vault_version }}_SHA256SUMS" 14 | dest: "{{ role_path }}/files/" 15 | force: yes 16 | delegate_to: localhost 17 | become: no 18 | when: vault_verify_binary_checksum 19 | 20 | - name: Find binary checksum 21 | shell: "grep {{ vault_download_filename }} {{ role_path }}/files/vault_{{ vault_version }}_SHA256SUMS | grep -oE '^\\S+'" 22 | delegate_to: localhost 23 | become: no 24 | when: vault_verify_binary_checksum 25 | register: vault_binary_expected_checksum 26 | 27 | - name: Fetch vault binary 28 | get_url: 29 | url: "https://releases.hashicorp.com/vault/{{ vault_version }}/{{ vault_download_filename }}" 30 | dest: "{{ role_path }}/files/" 31 | checksum: "{{ (vault_verify_binary_checksum)|ternary('sha256:'+vault_binary_expected_checksum.stdout, '') }}" 32 | delegate_to: localhost 33 | become: no 34 | 35 | - name: Remove existing binary 36 | file: 37 | path: "{{ vault_binary_install_dir }}/vault" 38 | state: absent 39 | when: vault_overwrite_binary 40 | 41 | - name: Install binary 42 | unarchive: 43 | src: "{{ vault_download_filename }}" 44 | dest: "{{ vault_binary_install_dir }}/" 45 | creates: "{{ vault_binary_install_dir }}/vault" 46 | owner: root 47 | group: root 48 | mode: 0755 49 | 50 | - name: Install vault systemd service unit 51 | template: 52 | src: vault_systemd.service.j2 53 | dest: /etc/systemd/system/vault.service 54 | owner: root 55 | group: root 56 | mode: 0644 57 | register: vault_systemd_unit_result 58 | 59 | - name: Reload systemd if necessary 60 | systemd: 61 | daemon_reload: yes 62 | enabled: yes 63 | name: vault 64 | when: vault_systemd_unit_result.changed 65 | 66 | ... 67 | -------------------------------------------------------------------------------- /ansible/roles/consul/tasks/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Install dependencies 4 | package: 5 | name: "{{ item }}" 6 | state: present 7 | loop: 8 | - unzip 9 | - jq 10 | 11 | - name: Fetch binary checksums file 12 | get_url: 13 | url: "https://releases.hashicorp.com/consul/{{ consul_version }}/consul_{{ consul_version }}_SHA256SUMS" 14 | dest: "{{ role_path }}/files/" 15 | force: yes 16 | delegate_to: localhost 17 | become: no 18 | when: consul_verify_binary_checksum 19 | 20 | - name: Find binary checksum 21 | shell: "grep {{ consul_download_filename }} {{ role_path }}/files/consul_{{ consul_version }}_SHA256SUMS | grep -oE '^\\S+'" 22 | delegate_to: localhost 23 | become: no 24 | when: consul_verify_binary_checksum 25 | register: consul_binary_expected_checksum 26 | 27 | - name: Fetch consul binary 28 | get_url: 29 | url: "https://releases.hashicorp.com/consul/{{ consul_version }}/{{ consul_download_filename }}" 30 | dest: "{{ role_path }}/files/" 31 | checksum: "{{ (consul_verify_binary_checksum)|ternary('sha256:'+consul_binary_expected_checksum.stdout, '') }}" 32 | delegate_to: localhost 33 | become: no 34 | 35 | - name: Remove existing binary 36 | file: 37 | path: "{{ consul_binary_install_dir }}/consul" 38 | state: absent 39 | when: consul_overwrite_binary 40 | 41 | - name: Install binary 42 | unarchive: 43 | src: "{{ consul_download_filename }}" 44 | dest: "{{ consul_binary_install_dir }}/" 45 | creates: "{{ consul_binary_install_dir }}/consul" 46 | owner: root 47 | group: root 48 | mode: 0755 49 | 50 | - name: Install consul systemd service unit 51 | template: 52 | src: consul_systemd.service.j2 53 | dest: /etc/systemd/system/consul.service 54 | owner: root 55 | group: root 56 | mode: 0644 57 | register: consul_systemd_unit_result 58 | 59 | - name: Reload systemd if necessary 60 | systemd: 61 | daemon_reload: yes 62 | enabled: yes 63 | name: consul 64 | when: consul_systemd_unit_result.changed 65 | 66 | ... 67 | -------------------------------------------------------------------------------- /ansible/roles/haproxy-consul-template/templates/haproxy.cfg.ctmpl.j2: -------------------------------------------------------------------------------- 1 | global 2 | log 127.0.0.1 local0 info 3 | log 127.0.0.1 local0 notice 4 | maxconn 1000 5 | daemon 6 | spread-checks 4 7 | 8 | defaults 9 | mode tcp 10 | log global 11 | balance roundrobin 12 | retries 3 13 | option abortonclose 14 | option dontlognull 15 | option log-health-checks 16 | timeout connect 10s 17 | timeout client 30s 18 | timeout server 30s 19 | 20 | frontend haproxy-monitoring 21 | bind *:80 {{ "accept-proxy" if haproxy_enable_incoming_proxy_protocol else "" }} 22 | mode http 23 | stats enable 24 | stats show-legends 25 | stats uri /haproxy-stats 26 | stats admin if FALSE 27 | 28 | frontend vault 29 | mode tcp 30 | bind *:443 {{ "accept-proxy" if haproxy_enable_incoming_proxy_protocol else "" }} 31 | tcp-request inspect-delay 5s 32 | tcp-request content accept if { req_ssl_hello_type 1 } 33 | default_backend vault 34 | 35 | # For each individual server 36 | 37 | acl req_ssl_sni -i .{{ vault_lb_hostname }} 38 | use_backend if 39 | 40 | 41 | acl vault req_ssl_sni -i {{ vault_lb_hostname }} 42 | use_backend vault if vault 43 | 44 | 45 | backend 46 | mode tcp 47 | option httpchk GET /v1/sys/health?standbyok=true&sealedcode=200&standbycode=200&uninitcode=200 48 | http-check expect status 200 49 | default-server inter 2s fastinter 1s downinter 1s 50 | server :8200 check check-ssl port 8200 ca-file /etc/ssl/certs/vault-ca.pem send-proxy check-send-proxy 51 | 52 | 53 | backend vault 54 | mode tcp 55 | option httpchk GET /v1/sys/health?standbyok=true 56 | http-check expect status 200 57 | default-server inter 2s fastinter 1s downinter 1s 58 | server :8200 check check-ssl port 8200 ca-file /etc/ssl/certs/vault-ca.pem send-proxy check-send-proxy 59 | 60 | -------------------------------------------------------------------------------- /ansible/roles/haproxy-consul-template/tasks/tls.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Copy Vault CA crt 4 | copy: 5 | src: "{{ vault_ca_crt_file }}" 6 | dest: "{{ consul_template_config_dir }}/{{ vault_ca_crt_file|basename }}" 7 | owner: root 8 | group: "{{ consul_template_user }}" 9 | 10 | - name: Copy ConsulAuth CA crt 11 | copy: 12 | src: "{{ consul_template_consulauth_ca_crt_file }}" 13 | dest: "{{ consul_template_config_dir }}/{{ consul_template_consulauth_ca_crt_file|basename }}" 14 | owner: root 15 | group: "{{ consul_template_user }}" 16 | 17 | - name: Copy ConsulAuth crt 18 | copy: 19 | src: "{{ consul_template_consulauth_crt_file }}" 20 | dest: "{{ consul_template_config_dir }}/{{ consul_template_consulauth_crt_file|basename }}" 21 | owner: root 22 | group: "{{ consul_template_user }}" 23 | mode: 0640 24 | 25 | - name: Copy ConsulAuth key 26 | copy: 27 | src: "{{ consul_template_consulauth_key_file }}" 28 | dest: "{{ consul_template_config_dir }}/{{ consul_template_consulauth_key_file|basename }}" 29 | owner: root 30 | group: "{{ consul_template_user }}" 31 | mode: 0640 32 | 33 | - name: Copy Vault CA crt to system store 34 | copy: 35 | src: "{{ consul_template_config_dir }}/{{ vault_ca_crt_file|basename }}" 36 | dest: "/usr/local/share/ca-certificates/{{ vault_ca_crt_file|basename }}" 37 | owner: root 38 | group: root 39 | mode: 0644 40 | remote_src: yes 41 | register: vault_ca_system_store_result 42 | 43 | - name: Copy Consul CA crt to system store 44 | copy: 45 | src: "{{ consul_template_config_dir }}/{{ consul_template_consulauth_ca_crt_file|basename }}" 46 | dest: "/usr/local/share/ca-certificates/{{ consul_template_consulauth_ca_crt_file|basename }}" 47 | owner: root 48 | group: root 49 | mode: 0644 50 | remote_src: yes 51 | register: consul_template_consulauth_ca_system_store_result 52 | 53 | - name: Update system CA bundle 54 | shell: update-ca-certificates 55 | when: vault_ca_system_store_result.changed or consul_template_consulauth_ca_system_store_result.changed 56 | 57 | ... 58 | -------------------------------------------------------------------------------- /azure/packer/vault.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "subscription_id": null, 4 | "location": null, 5 | "resource_group": null 6 | }, 7 | "builders": [ 8 | { 9 | "name": "vault", 10 | "type": "azure-arm", 11 | "use_azure_cli_auth": true, 12 | "location": "{{user `location`}}", 13 | "subscription_id": "{{user `subscription_id`}}", 14 | "image_publisher": "Canonical", 15 | "image_offer": "0001-com-ubuntu-server-jammy", 16 | "image_sku": "22_04-LTS", 17 | "os_type": "Linux", 18 | "vm_size": "Standard_D2_v3", 19 | "managed_image_name": "vault-{{isotime | clean_resource_name}}", 20 | "managed_image_resource_group_name": "{{user `resource_group`}}" 21 | } 22 | ], 23 | "provisioners": [ 24 | { 25 | "type": "shell", 26 | "inline": [ 27 | "timeout 60s bash -c \"while ! [ -f /var/lib/cloud/instance/boot-finished ]; do echo 'Waiting on cloud-init...'; sleep 2; done\"", 28 | "sudo apt-get update", 29 | "sudo apt-get upgrade -y || true" 30 | ] 31 | }, 32 | { 33 | "type": "ansible", 34 | "playbook_file": "{{ template_dir }}/../../ansible/vault.yml", 35 | "groups": ["vault", "azure", "example"], 36 | "host_alias": "{{ replace_all \"-\" \"\" uuid }}", 37 | "inventory_directory": "{{ template_dir }}/../../ansible", 38 | "user": "packer" 39 | }, 40 | { 41 | "execute_command": "chmod +x {{ .Path }}; {{ .Vars }} sudo -E sh '{{ .Path }}'", 42 | "inline": [ 43 | "/usr/sbin/waagent -force -deprovision+user && export HISTSIZE=0 && sync" 44 | ], 45 | "inline_shebang": "/bin/sh -x", 46 | "type": "shell" 47 | } 48 | ] 49 | } 50 | -------------------------------------------------------------------------------- /azure/packer/consul.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "subscription_id": null, 4 | "location": null, 5 | "resource_group": null 6 | }, 7 | "builders": [ 8 | { 9 | "name": "consul", 10 | "type": "azure-arm", 11 | "use_azure_cli_auth": true, 12 | "location": "{{user `location`}}", 13 | "subscription_id": "{{user `subscription_id`}}", 14 | "image_publisher": "Canonical", 15 | "image_offer": "0001-com-ubuntu-server-jammy", 16 | "image_sku": "22_04-LTS", 17 | "os_type": "Linux", 18 | "vm_size": "Standard_D2_v3", 19 | "managed_image_name": "consul-{{isotime | clean_resource_name}}", 20 | "managed_image_resource_group_name": "{{user `resource_group`}}" 21 | } 22 | ], 23 | "provisioners": [ 24 | { 25 | "type": "shell", 26 | "inline": [ 27 | "timeout 60s bash -c \"while ! [ -f /var/lib/cloud/instance/boot-finished ]; do echo 'Waiting on cloud-init...'; sleep 2; done\"", 28 | "sudo apt-get update", 29 | "sudo apt-get upgrade -y || true" 30 | ] 31 | }, 32 | { 33 | "type": "ansible", 34 | "playbook_file": "{{ template_dir }}/../../ansible/consul.yml", 35 | "groups": ["consul", "azure", "example"], 36 | "host_alias": "{{ replace_all \"-\" \"\" uuid }}", 37 | "inventory_directory": "{{ template_dir }}/../../ansible", 38 | "user": "packer" 39 | }, 40 | { 41 | "execute_command": "chmod +x {{ .Path }}; {{ .Vars }} sudo -E sh '{{ .Path }}'", 42 | "inline": [ 43 | "/usr/sbin/waagent -force -deprovision+user && export HISTSIZE=0 && sync" 44 | ], 45 | "inline_shebang": "/bin/sh -x", 46 | "type": "shell" 47 | } 48 | ] 49 | } 50 | -------------------------------------------------------------------------------- /ansible/roles/vault/tasks/tls.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Copy Vault CA crt 4 | copy: 5 | src: "{{ vault_ca_crt_file }}" 6 | dest: "{{ vault_config_dir }}/{{ vault_ca_crt_file|basename }}" 7 | owner: root 8 | group: vault 9 | 10 | - name: Copy Vault crt 11 | copy: 12 | src: "{{ vault_crt_file }}" 13 | dest: "{{ vault_config_dir }}/{{ vault_crt_file|basename }}" 14 | owner: root 15 | group: vault 16 | mode: 0640 17 | 18 | - name: Copy Vault key 19 | copy: 20 | src: "{{ vault_key_file }}" 21 | dest: "{{ vault_config_dir }}/{{ vault_key_file|basename }}" 22 | owner: root 23 | group: vault 24 | mode: 0640 25 | 26 | - name: Copy ConsulAuth CA crt 27 | copy: 28 | src: "{{ vault_consulauth_ca_crt_file }}" 29 | dest: "{{ vault_config_dir }}/{{ vault_consulauth_ca_crt_file|basename }}" 30 | owner: root 31 | group: vault 32 | 33 | - name: Copy ConsulAuth crt 34 | copy: 35 | src: "{{ vault_consulauth_crt_file }}" 36 | dest: "{{ vault_config_dir }}/{{ vault_consulauth_crt_file|basename }}" 37 | owner: root 38 | group: vault 39 | mode: 0640 40 | 41 | - name: Copy ConsulAuth key 42 | copy: 43 | src: "{{ vault_consulauth_key_file }}" 44 | dest: "{{ vault_config_dir }}/{{ vault_consulauth_key_file|basename }}" 45 | owner: root 46 | group: vault 47 | mode: 0640 48 | 49 | - name: Copy Vault CA crt to system store 50 | copy: 51 | src: "{{ vault_config_dir }}/{{ vault_ca_crt_file|basename }}" 52 | dest: "/usr/local/share/ca-certificates/{{ vault_ca_crt_file|basename }}" 53 | owner: root 54 | group: root 55 | mode: 0644 56 | remote_src: yes 57 | register: vault_ca_system_store_result 58 | 59 | - name: Copy Consul CA crt to system store 60 | copy: 61 | src: "{{ vault_config_dir }}/{{ vault_consulauth_ca_crt_file|basename }}" 62 | dest: "/usr/local/share/ca-certificates/{{ vault_consulauth_ca_crt_file|basename }}" 63 | owner: root 64 | group: root 65 | mode: 0644 66 | remote_src: yes 67 | register: vault_consulauth_ca_system_store_result 68 | 69 | - name: Update system CA bundle 70 | shell: update-ca-certificates 71 | when: vault_ca_system_store_result.changed or vault_consulauth_ca_system_store_result.changed 72 | 73 | ... 74 | -------------------------------------------------------------------------------- /tls-bootstrap/bootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | CONSUL_DATACENTRE="example-dc" 4 | VAULT_PKI_ROLE_NAME="bootstrap" 5 | 6 | export VAULT_ADDR=http://127.0.0.1:8200 7 | export VAULT_FORMAT=json 8 | 9 | rm -f *.crt *.key 10 | vault server -dev > /dev/null 2>&1 & 11 | sleep 2 12 | VAULT_SERVER_PID="$!" 13 | 14 | setup_pki_engine_for() { 15 | TARGET_SYSTEM="$1" 16 | SECRET_BACKEND_PATH="${TARGET_SYSTEM}-https-root" 17 | vault secrets enable -path="${SECRET_BACKEND_PATH}" pki > /dev/null 2>&1 18 | vault secrets tune -max-lease-ttl=876000h -default-lease-ttl=876000h $SECRET_BACKEND_PATH > /dev/null 2>&1 # 100 years 19 | CA_JSON="$(vault write "${SECRET_BACKEND_PATH}/root/generate/internal" common_name="${SECRET_BACKEND_PATH}BootstrapCA")" 20 | vault write "${SECRET_BACKEND_PATH}/roles/${VAULT_PKI_ROLE_NAME}" allow_any_name=true no_store=true > /dev/null 2>&1 21 | echo "$CA_JSON" | jq -r '.data.certificate' > "${TARGET_SYSTEM}-ca.crt" 22 | } 23 | 24 | generate_certificate_for() { 25 | TARGET_SYSTEM="$1" 26 | SECRET_BACKEND_PATH="${TARGET_SYSTEM}-https-root" 27 | if [[ ! -z "$2" ]]; then 28 | SECRET_BACKEND_PATH="${2}-https-root" 29 | fi 30 | CERT_JSON=$(vault write "${SECRET_BACKEND_PATH}/issue/${VAULT_PKI_ROLE_NAME}" common_name="$CERTIFICATE_DOMAIN" alt_names="${ALT_NAMES}" ip_sans="127.0.0.1" ttl=875999h) 31 | echo "$CERT_JSON" | jq -r '.data.certificate' > "${TARGET_SYSTEM}.crt" 32 | echo "$CERT_JSON" | jq -r '.data.private_key' > "${TARGET_SYSTEM}.key" 33 | } 34 | 35 | # Change the CERTIFICATE_DOMAIN variable to match your domain, keep the 'consul.' at the start 36 | CERTIFICATE_DOMAIN="consul.example.com" 37 | setup_pki_engine_for "consul" 38 | 39 | ALT_NAMES="*.${CERTIFICATE_DOMAIN},server.${CONSUL_DATACENTRE}.consul,localhost" 40 | generate_certificate_for "consul-servers" "consul" 41 | 42 | ALT_NAMES="*.${CERTIFICATE_DOMAIN},localhost" 43 | generate_certificate_for "consul-agents" "consul" 44 | 45 | cp consul* ../ansible/ 46 | 47 | # Change the CERTIFICATE_DOMAIN variable to match your domain, keep the 'vault.' at the start 48 | CERTIFICATE_DOMAIN="vault.example.com" 49 | ALT_NAMES="*.${CERTIFICATE_DOMAIN},localhost" 50 | setup_pki_engine_for "vault" 51 | generate_certificate_for "vault" 52 | cp vault* ../ansible/ 53 | 54 | kill "$VAULT_SERVER_PID" 55 | -------------------------------------------------------------------------------- /ansible/roles/haproxy-consul-template/tasks/install-consul-template.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Install dependencies 4 | package: 5 | name: unzip 6 | state: present 7 | 8 | - name: Fetch binary checksums file 9 | get_url: 10 | url: "https://releases.hashicorp.com/consul-template/{{ consul_template_version }}/consul-template_{{ consul_template_version }}_SHA256SUMS" 11 | dest: "{{ role_path }}/files/" 12 | force: yes 13 | delegate_to: localhost 14 | become: no 15 | when: consul_template_verify_binary_checksum 16 | 17 | - name: Find binary checksum 18 | shell: "grep {{ consul_template_download_filename }} {{ role_path }}/files/consul-template_{{ consul_template_version }}_SHA256SUMS | grep -oE '^\\S+'" 19 | delegate_to: localhost 20 | become: no 21 | when: consul_template_verify_binary_checksum 22 | register: consul_template_binary_expected_checksum 23 | 24 | - name: Fetch consul-template binary 25 | get_url: 26 | url: "https://releases.hashicorp.com/consul-template/{{ consul_template_version }}/{{ consul_template_download_filename }}" 27 | dest: "{{ role_path }}/files/" 28 | checksum: "{{ (consul_template_verify_binary_checksum)|ternary('sha256:'+consul_template_binary_expected_checksum.stdout, '') }}" 29 | delegate_to: localhost 30 | become: no 31 | 32 | - name: Remove existing binary 33 | file: 34 | path: "{{ consul_template_binary_install_dir }}/consul-template" 35 | state: absent 36 | when: consul_template_overwrite_binary 37 | 38 | - name: Install binary 39 | unarchive: 40 | src: "{{ consul_template_download_filename }}" 41 | dest: "{{ consul_template_binary_install_dir }}/" 42 | creates: "{{ consul_template_binary_install_dir }}/consul-template" 43 | owner: root 44 | group: root 45 | mode: 0755 46 | notify: restart_consul_template 47 | 48 | - name: Install consul-template systemd service unit 49 | template: 50 | src: consul-template_systemd_unit.service.j2 51 | dest: /etc/systemd/system/consul-template.service 52 | owner: root 53 | group: root 54 | mode: 0644 55 | register: consul_template_systemd_unit_result 56 | 57 | - name: Reload systemd if necessary 58 | systemd: 59 | daemon_reload: yes 60 | enabled: yes 61 | name: consul-template 62 | when: consul_template_systemd_unit_result.changed 63 | notify: restart_consul_template 64 | 65 | - name: Create consul-template user 66 | user: 67 | name: "{{ consul_template_user }}" 68 | password: "{{ consul_user_password_hash }}" 69 | system: yes 70 | 71 | ... 72 | -------------------------------------------------------------------------------- /gcp/terraform/main.tf: -------------------------------------------------------------------------------- 1 | variable vault_hostname {} 2 | variable consul_hostname {} 3 | variable trusted_external_ips { type = list(string) } 4 | variable credentials {} 5 | variable project {} 6 | variable region {} 7 | variable vault_instance_image_filters { type = list(string) } 8 | 9 | provider google { 10 | credentials = var.credentials 11 | project = var.project 12 | region = var.region 13 | } 14 | 15 | locals { 16 | vault_proxy_authorized_addresses = jsonencode(concat(["127.0.0.1"], [data.google_compute_subnetwork.default.ip_cidr_range])) 17 | consul_retry_join_config = "provider=gce tag_value=consul-${data.google_client_config.current.region}" 18 | } 19 | 20 | data google_client_config current {} 21 | 22 | data google_compute_network default { 23 | name = "default" 24 | } 25 | 26 | data google_compute_subnetwork default { 27 | name = "default" 28 | region = data.google_client_config.current.region 29 | } 30 | 31 | data google_compute_image consul { 32 | family = "consul" 33 | } 34 | 35 | data google_compute_image vault { 36 | count = 2 37 | name = var.vault_instance_image_filters[count.index] 38 | } 39 | 40 | resource google_project_iam_custom_role get_compute_instances { 41 | role_id = "getComputeInstances" 42 | title = "Get Compute Instances" 43 | 44 | permissions = [ 45 | "compute.instanceGroupManagers.get", 46 | "compute.instanceGroupManagers.list", 47 | "compute.instanceGroups.get", 48 | "compute.instanceGroups.list", 49 | "compute.instances.get", 50 | "compute.instances.list", 51 | "compute.zones.list", 52 | "compute.zones.get" 53 | ] 54 | } 55 | 56 | resource google_project_iam_binding consul_get_compute_instances { 57 | project = data.google_client_config.current.project 58 | members = ["serviceAccount:${google_service_account.consul.email}"] 59 | role = "projects/${data.google_client_config.current.project}/roles/${google_project_iam_custom_role.get_compute_instances.role_id}" 60 | } 61 | 62 | resource google_service_account consul { 63 | account_id = "consul" 64 | } 65 | 66 | resource google_compute_health_check consul_autoheal { 67 | name = "consul-autoheal" 68 | timeout_sec = 30 69 | check_interval_sec = 30 70 | 71 | tcp_health_check { 72 | port = 8501 73 | } 74 | } 75 | 76 | resource google_compute_instance_template consul { 77 | name_prefix = "consul" 78 | machine_type = "n1-standard-1" 79 | tags = ["consul-${data.google_client_config.current.region}", "consul"] 80 | 81 | metadata_startup_script = < /etc/consul.d/retry-join-config 84 | 85 | if [[ ! -e /etc/consul.d/agent-bootstrap-complete ]]; then 86 | source /etc/consul.d/agent-bootstrap.sh 87 | fi 88 | 89 | systemctl start consul 90 | systemctl enable consul 91 | EOF 92 | 93 | disk { 94 | source_image = data.google_compute_image.consul.self_link 95 | } 96 | 97 | network_interface { 98 | subnetwork = data.google_compute_subnetwork.default.self_link 99 | 100 | access_config { 101 | network_tier = "STANDARD" 102 | } 103 | } 104 | 105 | service_account { 106 | email = google_service_account.consul.email 107 | scopes = ["compute-ro"] 108 | } 109 | 110 | lifecycle { 111 | create_before_destroy = true 112 | } 113 | } 114 | 115 | resource google_compute_region_instance_group_manager consul { 116 | base_instance_name = "consul" 117 | name = "consul" 118 | region = data.google_client_config.current.region 119 | target_size = 3 120 | target_pools = [google_compute_target_pool.consul.self_link] 121 | wait_for_instances = true 122 | 123 | version { 124 | name = "consul" 125 | instance_template = google_compute_instance_template.consul.self_link 126 | } 127 | 128 | # Needs global health check 129 | # auto_healing_policies { 130 | # health_check = google_compute_region_health_check.consul_autoheal.self_link 131 | # initial_delay_sec = 300 132 | # } 133 | } 134 | 135 | resource google_compute_target_pool consul { 136 | name = "consul" 137 | } 138 | 139 | resource google_compute_forwarding_rule consul { 140 | name = "consul" 141 | ip_protocol = "TCP" 142 | load_balancing_scheme = "EXTERNAL" 143 | port_range = "8501" 144 | target = google_compute_target_pool.consul.self_link 145 | network_tier = "STANDARD" 146 | } 147 | 148 | resource google_compute_instance vault { 149 | count = 2 150 | name = "vault-${count.index}" 151 | machine_type = "n1-standard-1" 152 | tags = ["consul-${data.google_client_config.current.region}", "consul", "vault", "haproxy"] 153 | zone = "${data.google_client_config.current.region}-a" 154 | hostname = "vault-${count.index}.${var.vault_hostname}" 155 | 156 | metadata_startup_script = < /etc/vault.d/vault.json.new 162 | mv /etc/vault.d/vault.json.new /etc/vault.d/vault.json 163 | 164 | echo "${local.consul_retry_join_config}" > /etc/consul.d/retry-join-config 165 | 166 | if [[ ! -e /etc/consul.d/agent-bootstrap-complete ]]; then 167 | source /etc/consul.d/agent-bootstrap.sh 168 | fi 169 | 170 | if [[ ! -e /etc/vault.d/bootstrap-complete ]]; then 171 | jq '.listener[0].tcp.proxy_protocol_authorized_addrs = ${local.vault_proxy_authorized_addresses}' /etc/vault.d/vault.json > /etc/vault.d/vault.json.new 172 | jq '.api_addr = "https://${var.vault_hostname}"' /etc/vault.d/vault.json.new > /etc/vault.d/vault.json 173 | systemctl restart vault 174 | touch /etc/vault.d/bootstrap-complete 175 | fi 176 | EOF 177 | 178 | boot_disk { 179 | initialize_params { 180 | image = data.google_compute_image.vault[count.index].self_link 181 | } 182 | } 183 | 184 | network_interface { 185 | subnetwork = data.google_compute_subnetwork.default.self_link 186 | 187 | access_config { 188 | network_tier = "STANDARD" 189 | } 190 | } 191 | 192 | service_account { 193 | email = google_service_account.consul.email 194 | scopes = ["compute-ro"] 195 | } 196 | } 197 | 198 | resource google_compute_target_pool vault { 199 | name = "vault" 200 | instances = google_compute_instance.vault.*.self_link 201 | health_checks = [google_compute_http_health_check.vault.self_link] 202 | } 203 | 204 | resource google_compute_http_health_check vault { 205 | name = "vault" 206 | request_path = "/haproxy-stats" 207 | } 208 | 209 | resource google_compute_forwarding_rule haproxy_stats { 210 | name = "haproxy-stats" 211 | ip_protocol = "TCP" 212 | load_balancing_scheme = "EXTERNAL" 213 | port_range = "80" 214 | target = google_compute_target_pool.vault.self_link 215 | network_tier = "STANDARD" 216 | } 217 | 218 | resource google_compute_forwarding_rule vault_https { 219 | name = "vault-https" 220 | ip_protocol = "TCP" 221 | load_balancing_scheme = "EXTERNAL" 222 | port_range = "443" 223 | target = google_compute_target_pool.vault.self_link 224 | network_tier = "STANDARD" 225 | } 226 | 227 | resource null_resource consul_acl_bootstrap { 228 | triggers = { 229 | instance_group_id = google_compute_region_instance_group_manager.consul.self_link 230 | } 231 | 232 | provisioner "local-exec" { 233 | interpreter = ["/bin/bash", "-c"] 234 | environment = { 235 | CONSUL_CACERT = abspath("${path.module}/../../ansible/consul-ca.crt") 236 | CONSUL_CLIENT_CERT = abspath("${path.module}/../../ansible/consul-agents.crt") 237 | CONSUL_CLIENT_KEY = abspath("${path.module}/../../ansible/consul-agents.key") 238 | CONSUL_HTTP_ADDR = "https://${var.consul_hostname}:8501" 239 | } 240 | 241 | command = < master-token 251 | consul acl policy create -token-file master-token -name "agent" -rules @${abspath("${path.module}/../../ansible/roles/consul/files/policies/agent.hcl")} > /dev/null 252 | consul acl token create -token-file master-token -policy-name "agent" -secret $(cat ${abspath("${path.module}/../../ansible/roles/consul/files/tokens/agent")}) > /dev/null 253 | consul acl policy create -token-file master-token -name "haproxy" -rules @${abspath("${path.module}/../../ansible/roles/consul/files/policies/haproxy.hcl")} > /dev/null 254 | consul acl token create -token-file master-token -policy-name "haproxy" -secret $(cat ${abspath("${path.module}/../../ansible/roles/consul/files/tokens/haproxy")}) > /dev/null 255 | consul acl policy create -token-file master-token -name "vault" -rules @${abspath("${path.module}/../../ansible/roles/consul/files/policies/vault.hcl")} > /dev/null 256 | consul acl token create -token-file master-token -policy-name "vault" -secret $(cat ${abspath("${path.module}/../../ansible/roles/consul/files/tokens/vault")}) > /dev/null 257 | EOF 258 | } 259 | } 260 | 261 | resource google_compute_firewall consul_https_external { 262 | name = "consul-https-external" 263 | network = data.google_compute_network.default.self_link 264 | priority = 800 265 | source_ranges = var.trusted_external_ips 266 | target_tags = ["consul"] 267 | 268 | allow { 269 | protocol = "tcp" 270 | ports = ["8501"] 271 | } 272 | } 273 | 274 | resource google_compute_firewall haproxy_http_https_external { 275 | name = "haproxy-http-https-external" 276 | network = data.google_compute_network.default.self_link 277 | priority = 810 278 | source_ranges = var.trusted_external_ips 279 | target_tags = ["haproxy"] 280 | 281 | allow { 282 | protocol = "tcp" 283 | ports = ["80", "443"] 284 | } 285 | } 286 | 287 | resource google_compute_firewall consul_internal_rpc { 288 | name = "consul-internal-rpc" 289 | network = data.google_compute_network.default.self_link 290 | priority = 820 291 | source_tags = ["consul"] 292 | target_tags = ["consul"] 293 | 294 | allow { 295 | ports = ["8300"] 296 | protocol = "tcp" 297 | } 298 | 299 | allow { 300 | ports = ["8300"] 301 | protocol = "udp" 302 | } 303 | } 304 | 305 | resource google_compute_firewall consul_lan_serf { 306 | name = "consul-lan-serf" 307 | network = data.google_compute_network.default.self_link 308 | priority = 830 309 | source_tags = ["consul"] 310 | target_tags = ["consul"] 311 | 312 | allow { 313 | ports = ["8301"] 314 | protocol = "tcp" 315 | } 316 | 317 | allow { 318 | ports = ["8301"] 319 | protocol = "udp" 320 | } 321 | } 322 | 323 | resource google_compute_firewall consul_https_internal { 324 | name = "consul-https-internal" 325 | network = data.google_compute_network.default.self_link 326 | priority = 840 327 | source_tags = ["consul"] 328 | target_tags = ["consul"] 329 | 330 | allow { 331 | ports = ["8501"] 332 | protocol = "tcp" 333 | } 334 | } 335 | 336 | resource google_compute_firewall consul_grpc_internal { 337 | name = "consul-grpc-internal" 338 | network = data.google_compute_network.default.self_link 339 | priority = 850 340 | source_tags = ["consul"] 341 | target_tags = ["consul"] 342 | 343 | allow { 344 | ports = ["8503"] 345 | protocol = "tcp" 346 | } 347 | } 348 | 349 | resource google_compute_firewall consul_https_vault { 350 | name = "consul-https-vault" 351 | network = data.google_compute_network.default.self_link 352 | priority = 860 353 | source_tags = ["vault"] 354 | target_tags = ["consul"] 355 | 356 | allow { 357 | ports = ["8501"] 358 | protocol = "tcp" 359 | } 360 | } 361 | 362 | resource google_compute_firewall consul_https_haproxy { 363 | name = "consul-https-haproxy" 364 | network = data.google_compute_network.default.self_link 365 | priority = 870 366 | source_tags = ["haproxy"] 367 | target_tags = ["consul"] 368 | 369 | allow { 370 | ports = ["8501"] 371 | protocol = "tcp" 372 | } 373 | } 374 | 375 | resource google_compute_firewall vault_https_internal { 376 | name = "vault-https-internal" 377 | network = data.google_compute_network.default.self_link 378 | priority = 880 379 | source_tags = ["vault"] 380 | target_tags = ["vault"] 381 | 382 | allow { 383 | ports = ["8200"] 384 | protocol = "tcp" 385 | } 386 | } 387 | 388 | resource google_compute_firewall vault_https_haproxy { 389 | name = "vault-https-haproxy" 390 | network = data.google_compute_network.default.self_link 391 | priority = 890 392 | source_tags = ["haproxy"] 393 | target_tags = ["vault"] 394 | 395 | allow { 396 | ports = ["8200"] 397 | protocol = "tcp" 398 | } 399 | } 400 | 401 | resource google_compute_firewall vault_cluster_internal { 402 | name = "vault-cluster-internal" 403 | network = data.google_compute_network.default.self_link 404 | priority = 900 405 | source_tags = ["vault"] 406 | target_tags = ["vault"] 407 | 408 | allow { 409 | ports = ["8201"] 410 | protocol = "tcp" 411 | } 412 | } 413 | 414 | resource google_compute_firewall consul_deny_all { 415 | name = "consul-deny-all" 416 | network = data.google_compute_network.default.self_link 417 | priority = 910 418 | source_ranges = ["0.0.0.0/0"] 419 | target_tags = ["consul"] 420 | 421 | deny { 422 | protocol = "tcp" 423 | } 424 | 425 | deny { 426 | protocol = "udp" 427 | } 428 | 429 | deny { 430 | protocol = "icmp" 431 | } 432 | } 433 | 434 | resource google_compute_firewall vault_deny_all { 435 | name = "vault-deny-all" 436 | network = data.google_compute_network.default.self_link 437 | priority = 920 438 | source_ranges = ["0.0.0.0/0"] 439 | target_tags = ["vault"] 440 | 441 | deny { 442 | protocol = "tcp" 443 | } 444 | 445 | deny { 446 | protocol = "udp" 447 | } 448 | 449 | deny { 450 | protocol = "icmp" 451 | } 452 | } 453 | 454 | resource google_compute_firewall haproxy_deny_all { 455 | name = "haproxy-deny-all" 456 | network = data.google_compute_network.default.self_link 457 | priority = 930 458 | source_ranges = ["0.0.0.0/0"] 459 | target_tags = ["haproxy"] 460 | 461 | deny { 462 | protocol = "tcp" 463 | } 464 | 465 | deny { 466 | protocol = "udp" 467 | } 468 | 469 | deny { 470 | protocol = "icmp" 471 | } 472 | } 473 | -------------------------------------------------------------------------------- /aws/terraform/main.tf: -------------------------------------------------------------------------------- 1 | variable vault_hostname {} 2 | variable consul_hostname {} 3 | variable trusted_external_ips { type = list(string) } 4 | variable vault_instance_image_filters { type = list(string) } 5 | 6 | provider aws { 7 | region = "eu-west-2" 8 | } 9 | 10 | locals { 11 | vault_proxy_authorized_addresses = jsonencode(concat(["127.0.0.1"], tolist(data.aws_subnet.default.*.cidr_block))) 12 | consul_retry_join_config = "provider=aws tag_key=consul_cluster tag_value=eu-west-2" 13 | } 14 | 15 | data aws_vpc default { 16 | default = true 17 | } 18 | 19 | data aws_ami consul { 20 | owners = ["self"] 21 | most_recent = true 22 | 23 | filter { 24 | name = "tag:system" 25 | values = ["consul"] 26 | } 27 | 28 | filter { 29 | name = "name" 30 | values = ["consul-*"] 31 | } 32 | } 33 | 34 | data aws_ami vault { 35 | count = 2 36 | owners = ["self"] 37 | most_recent = true 38 | 39 | filter { 40 | name = "tag:system" 41 | values = ["vault"] 42 | } 43 | 44 | filter { 45 | name = "name" 46 | values = [var.vault_instance_image_filters[count.index]] 47 | } 48 | } 49 | 50 | resource aws_iam_policy describe-instances { 51 | description = "A policy to permit DescribeInstances, used particularly for Consul cloud auto-join." 52 | name = "EC2DescribeInstances" 53 | policy = <> /etc/hosts 134 | 127.0.0.1 $hostname 135 | EOL 136 | 137 | echo "${local.consul_retry_join_config}" > /etc/consul.d/retry-join-config 138 | 139 | if [[ ! -e /etc/consul.d/agent-bootstrap-complete ]]; then 140 | source /etc/consul.d/agent-bootstrap.sh 141 | fi 142 | 143 | systemctl start consul 144 | systemctl enable consul 145 | EOF 146 | 147 | root_block_device { 148 | volume_type = "standard" 149 | volume_size = "16" 150 | delete_on_termination = true 151 | } 152 | 153 | lifecycle { 154 | create_before_destroy = true 155 | } 156 | } 157 | 158 | data aws_subnets default { 159 | filter { 160 | name = "vpc-id" 161 | values = [data.aws_vpc.default.id] 162 | } 163 | } 164 | 165 | data aws_subnet default { 166 | count = length(data.aws_subnets.default.ids) 167 | id = sort(tolist(data.aws_subnets.default.ids))[count.index] 168 | } 169 | 170 | resource aws_autoscaling_group consul { 171 | name_prefix = "consul" 172 | max_size = 5 173 | min_size = 3 174 | desired_capacity = 3 175 | default_cooldown = 120 176 | launch_configuration = aws_launch_configuration.consul.name 177 | vpc_zone_identifier = data.aws_subnets.default.ids 178 | target_group_arns = [aws_lb_target_group.consul.arn] 179 | termination_policies = ["OldestLaunchConfiguration", "OldestInstance"] 180 | wait_for_capacity_timeout = 0 181 | 182 | tag { 183 | key = "consul_cluster" 184 | value = "eu-west-2" 185 | propagate_at_launch = true 186 | } 187 | } 188 | 189 | resource aws_instance vault { 190 | count = 2 191 | ami = data.aws_ami.vault[count.index].image_id 192 | instance_type = "t2.small" 193 | iam_instance_profile = aws_iam_instance_profile.describe-instances.name 194 | vpc_security_group_ids = [aws_security_group.consul.id, aws_security_group.vault.id, aws_security_group.haproxy.id] 195 | associate_public_ip_address = true 196 | ebs_optimized = false 197 | key_name = "id_rsa" 198 | subnet_id = element(tolist(data.aws_subnets.default.ids), count.index) 199 | 200 | user_data = <> /etc/hosts 206 | 127.0.0.1 $hostname 207 | EOL 208 | 209 | current_ip=$(ec2metadata --local-ipv4) 210 | cluster_addr="https://$${current_ip}:8201" 211 | jq --arg cluster_addr $cluster_addr '.cluster_addr = $cluster_addr' /etc/vault.d/vault.json > /etc/vault.d/vault.json.new 212 | mv /etc/vault.d/vault.json.new /etc/vault.d/vault.json 213 | 214 | echo "${local.consul_retry_join_config}" > /etc/consul.d/retry-join-config 215 | 216 | if [[ ! -e /etc/consul.d/agent-bootstrap-complete ]]; then 217 | source /etc/consul.d/agent-bootstrap.sh 218 | fi 219 | 220 | if [[ ! -e /etc/vault.d/bootstrap-complete ]]; then 221 | jq '.listener[0].tcp.proxy_protocol_authorized_addrs = ${local.vault_proxy_authorized_addresses}' /etc/vault.d/vault.json > /etc/vault.d/vault.json.new 222 | jq '.api_addr = "https://${var.vault_hostname}"' /etc/vault.d/vault.json.new > /etc/vault.d/vault.json 223 | systemctl restart vault 224 | touch /etc/vault.d/bootstrap-complete 225 | fi 226 | EOF 227 | 228 | root_block_device { 229 | volume_type = "standard" 230 | volume_size = "16" 231 | delete_on_termination = true 232 | } 233 | 234 | lifecycle { 235 | create_before_destroy = true 236 | } 237 | 238 | tags = { 239 | Name = "vault-${count.index}" 240 | } 241 | } 242 | 243 | resource aws_lb vault { 244 | name = "vault" 245 | internal = false 246 | load_balancer_type = "network" 247 | subnets = data.aws_subnets.default.ids 248 | enable_deletion_protection = false 249 | enable_cross_zone_load_balancing = true 250 | } 251 | 252 | resource aws_lb_listener vault_stats { 253 | load_balancer_arn = aws_lb.vault.arn 254 | port = "80" 255 | protocol = "TCP" 256 | 257 | default_action { 258 | type = "forward" 259 | target_group_arn = aws_lb_target_group.vault_stats.arn 260 | } 261 | } 262 | 263 | resource aws_lb_listener vault { 264 | load_balancer_arn = aws_lb.vault.arn 265 | port = "443" 266 | protocol = "TCP" 267 | 268 | default_action { 269 | type = "forward" 270 | target_group_arn = aws_lb_target_group.vault.arn 271 | } 272 | } 273 | 274 | resource aws_lb_listener consul { 275 | load_balancer_arn = aws_lb.vault.arn 276 | port = "8501" 277 | protocol = "TCP" 278 | 279 | default_action { 280 | type = "forward" 281 | target_group_arn = aws_lb_target_group.consul.arn 282 | } 283 | } 284 | 285 | resource aws_lb_target_group vault_stats { 286 | name = "vault-stats" 287 | port = 80 288 | protocol = "TCP" 289 | target_type = "instance" 290 | vpc_id = data.aws_vpc.default.id 291 | deregistration_delay = 15 292 | proxy_protocol_v2 = true 293 | 294 | stickiness { 295 | enabled = false 296 | type = "source_ip" 297 | } 298 | 299 | health_check { 300 | protocol = "TCP" 301 | } 302 | } 303 | 304 | resource aws_lb_target_group_attachment vault_stats { 305 | count = length(aws_instance.vault.*.id) 306 | target_group_arn = aws_lb_target_group.vault_stats.arn 307 | target_id = aws_instance.vault.*.id[count.index] 308 | } 309 | 310 | resource aws_lb_target_group vault { 311 | name = "vault" 312 | port = 443 313 | protocol = "TCP" 314 | target_type = "instance" 315 | vpc_id = data.aws_vpc.default.id 316 | deregistration_delay = 15 317 | proxy_protocol_v2 = true 318 | 319 | health_check { 320 | protocol = "TCP" 321 | } 322 | } 323 | 324 | resource aws_lb_target_group_attachment vault { 325 | count = length(aws_instance.vault.*.id) 326 | target_group_arn = aws_lb_target_group.vault.arn 327 | target_id = aws_instance.vault.*.id[count.index] 328 | } 329 | 330 | resource aws_lb_target_group consul { 331 | name = "consul" 332 | port = 8501 333 | protocol = "TCP" 334 | target_type = "instance" 335 | vpc_id = data.aws_vpc.default.id 336 | deregistration_delay = 15 337 | proxy_protocol_v2 = false 338 | 339 | stickiness { 340 | enabled = false 341 | type = "source_ip" 342 | } 343 | 344 | health_check { 345 | protocol = "TCP" 346 | } 347 | } 348 | 349 | resource aws_security_group consul { 350 | name = "consul" 351 | description = "Consul security rules." 352 | vpc_id = data.aws_vpc.default.id 353 | 354 | ingress { 355 | description = "Consul internal RPC (TCP)" 356 | from_port = 8300 357 | to_port = 8300 358 | protocol = "tcp" 359 | self = true 360 | } 361 | 362 | ingress { 363 | description = "Consul internal RPC (UDP)" 364 | from_port = 8300 365 | to_port = 8300 366 | protocol = "udp" 367 | self = true 368 | } 369 | 370 | ingress { 371 | description = "Consul LAN SERF (TCP)" 372 | from_port = 8301 373 | to_port = 8301 374 | protocol = "tcp" 375 | self = true 376 | } 377 | 378 | ingress { 379 | description = "Consul LAN SERF (UDP)" 380 | from_port = 8301 381 | to_port = 8301 382 | protocol = "udp" 383 | self = true 384 | } 385 | 386 | ingress { 387 | description = "Consul HTTPS (external)" 388 | from_port = 8501 389 | to_port = 8501 390 | protocol = "tcp" 391 | cidr_blocks = var.trusted_external_ips 392 | } 393 | 394 | ingress { 395 | description = "Consul HTTPS (internal)" 396 | from_port = 8501 397 | to_port = 8501 398 | protocol = "tcp" 399 | self = true 400 | } 401 | 402 | ingress { 403 | description = "Consul HTTPS (Vault)" 404 | from_port = 8501 405 | to_port = 8501 406 | protocol = "tcp" 407 | security_groups = [aws_security_group.vault.id] 408 | } 409 | 410 | ingress { 411 | description = "Consul gRPC (Vault)" 412 | from_port = 8503 413 | to_port = 8503 414 | protocol = "tcp" 415 | security_groups = [aws_security_group.vault.id] 416 | } 417 | 418 | ingress { 419 | description = "Consul HTTPS (HAProxy)" 420 | from_port = 8501 421 | to_port = 8501 422 | protocol = "tcp" 423 | security_groups = [aws_security_group.haproxy.id] 424 | } 425 | 426 | egress { 427 | from_port = 0 428 | to_port = 0 429 | protocol = "-1" 430 | cidr_blocks = ["0.0.0.0/0"] 431 | } 432 | 433 | tags = { 434 | Name = "consul" 435 | } 436 | } 437 | 438 | resource aws_security_group vault { 439 | name = "vault" 440 | description = "Vault security rules." 441 | vpc_id = data.aws_vpc.default.id 442 | 443 | ingress { 444 | description = "Vault HTTPS (internal)" 445 | from_port = 8200 446 | to_port = 8200 447 | protocol = "tcp" 448 | self = true 449 | } 450 | 451 | ingress { 452 | description = "Vault HTTPS (HAProxy)" 453 | from_port = 8200 454 | to_port = 8200 455 | protocol = "tcp" 456 | security_groups = [aws_security_group.haproxy.id] 457 | } 458 | 459 | ingress { 460 | description = "Vault Cluster (internal)" 461 | from_port = 8201 462 | to_port = 8201 463 | protocol = "tcp" 464 | self = true 465 | } 466 | 467 | egress { 468 | from_port = 0 469 | to_port = 0 470 | protocol = "-1" 471 | cidr_blocks = ["0.0.0.0/0"] 472 | } 473 | 474 | tags = { 475 | Name = "vault" 476 | } 477 | } 478 | 479 | resource aws_security_group haproxy { 480 | name = "haproxy" 481 | description = "HAProxy security rules." 482 | vpc_id = data.aws_vpc.default.id 483 | 484 | ingress { 485 | description = "HAProxy stats HTTP (external)" 486 | from_port = 80 487 | to_port = 80 488 | protocol = "tcp" 489 | cidr_blocks = var.trusted_external_ips 490 | } 491 | 492 | ingress { 493 | description = "HAProxy Vault HTTPS (external)" 494 | from_port = 443 495 | to_port = 443 496 | protocol = "tcp" 497 | cidr_blocks = var.trusted_external_ips 498 | } 499 | 500 | egress { 501 | from_port = 0 502 | to_port = 0 503 | protocol = "-1" 504 | cidr_blocks = ["0.0.0.0/0"] 505 | } 506 | 507 | tags = { 508 | Name = "haproxy" 509 | } 510 | } 511 | 512 | resource null_resource consul_acl_bootstrap { 513 | triggers = { 514 | asg_id = aws_autoscaling_group.consul.id 515 | } 516 | 517 | provisioner "local-exec" { 518 | interpreter = ["/bin/bash", "-c"] 519 | environment = { 520 | CONSUL_CACERT = abspath("${path.module}/../../ansible/consul-ca.crt") 521 | CONSUL_CLIENT_CERT = abspath("${path.module}/../../ansible/consul-agents.crt") 522 | CONSUL_CLIENT_KEY = abspath("${path.module}/../../ansible/consul-agents.key") 523 | CONSUL_HTTP_ADDR = "https://${var.consul_hostname}:8501" 524 | } 525 | 526 | command = < master-token 536 | consul acl policy create -token-file master-token -name "agent" -rules @${abspath("${path.module}/../../ansible/roles/consul/files/policies/agent.hcl")} > /dev/null 537 | consul acl token create -token-file master-token -policy-name "agent" -secret $(cat ${abspath("${path.module}/../../ansible/roles/consul/files/tokens/agent")}) > /dev/null 538 | consul acl policy create -token-file master-token -name "haproxy" -rules @${abspath("${path.module}/../../ansible/roles/consul/files/policies/haproxy.hcl")} > /dev/null 539 | consul acl token create -token-file master-token -policy-name "haproxy" -secret $(cat ${abspath("${path.module}/../../ansible/roles/consul/files/tokens/haproxy")}) > /dev/null 540 | consul acl policy create -token-file master-token -name "vault" -rules @${abspath("${path.module}/../../ansible/roles/consul/files/policies/vault.hcl")} > /dev/null 541 | consul acl token create -token-file master-token -policy-name "vault" -secret $(cat ${abspath("${path.module}/../../ansible/roles/consul/files/tokens/vault")}) > /dev/null 542 | EOF 543 | } 544 | } 545 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Production-ish Vault installation with a Consul cluster (storage) 2 | 3 | _NOTE_: This setup does not ship ready out-of-the-box, there are some tweaks the end user should do. 4 | 5 | ## Super-quick getting started (try it out) 6 | 7 | This repo assumes you're running on Linux or macOS. 8 | 9 | Ensure you have the following installed Terraform 0.13 (min), Packer 1.5 (min), Vault 1.4 (min), and ansible 2.8 (min). 10 | The configs should work with newer versions as well. 11 | This repo assumes some prior knowledge and experience with Vault, Terraform, Packer, and at least one of the cloud providers mentioned. 12 | 13 | ### Common setup 14 | 15 | 1. Replace any instances of `example.com` in the `tls-bootstrap/bootstrap.sh` with a domain that you control. 16 | 1. Replace any instances of `example.com` in the `ansible/group_vars/example.yml` file with the same domain as the previous step. 17 | 1. `cd` into the `ansible` folder and run the following to create Consul tokens for the necessary use cases: 18 | ``` 19 | uuidgen | tr '[:upper:]' '[:lower:]' > roles/consul/files/tokens/agent 20 | uuidgen | tr '[:upper:]' '[:lower:]' > roles/consul/files/tokens/haproxy 21 | uuidgen | tr '[:upper:]' '[:lower:]' > roles/consul/files/tokens/vault 22 | ``` 23 | 1. `cd` into the `tls-bootstrap` folder, run `./bootstrap.sh`. 24 | 25 | Pick a cloud provider - AWS, GCP, or Azure and setup the infrastructure as follows. 26 | It's recommended to have a console for your chosen provider open and available, having logged in. 27 | 28 | ### AWS 29 | 30 | 1. Ensure you have a default VPC that has a minimum of 2 subnets and outbound internet access. 31 | If you don't have one or don't want one, adjust the Packer templates and Terraform to use a different pre-configured VPC. 32 | 1. Adjust the Packer and Terrafom variables. 33 | 1. Check the `aws/packer/example.vars` file and supply credentials and a region. 34 | 1. Check the `aws/terraform/example.tfvars` file and adjust the hostnames and trusted external IPs to fit your setup. It's recommended to add the outbound IP of your machine to this list. _NOTE_: AWS requires that these addresses be in CIDR format. 35 | 1. Build the images with Packer. 36 | 1. `cd` into the `aws/packer` folder, run 37 | ``` 38 | packer build --var-file example.vars consul.json 39 | packer build --var-file example.vars vault.json 40 | ``` 41 | 1. Run the Terraform. 42 | 1. `cd` into the `aws/terraform` folder, run `terraform init ; terraform apply --var-file example.tfvars` and if the plan looks good, approve it. 43 | 1. **While Terraform is running**, setup the domain configured in `aws/terraform/example.tfvars` to point at the AWS ELB. This can be done with a CNAME record in your DNS zone, or by resolving the DNS record (`dig `) and editing the hosts file as follows. If the `dig` command doesn't produce IPs for the ELB, ensure it's finished provisioning and retry. 44 | ``` 45 | vault-0.vault.example.com vault-1.vault.example.com vault.example.com consul.example.com 46 | vault-0.vault.example.com vault-1.vault.example.com vault.example.com consul.example.com 47 | vault-0.vault.example.com vault-1.vault.example.com vault.example.com consul.example.com 48 | ``` 49 | 1. If the Terraform apply step hangs on provisioning `null_resource.consul_acl_bootstrap`, check that Consul is responding at `consul.:8501`. 50 | 1. If you receive an error similar to `Failed to create new policy: Unexpected response code: 500 ()`, the situation can be recovered by locating the `null_resource consul_acl_bootstrap` resource and commenting all lines of the `command` _except_ those which start with `consul acl policy` or `consul acl token`. Terraform should then be re-run. 51 | 52 | ### GCP 53 | 54 | 1. Ensure you have a default VPC (named 'default') with a subnet (named 'default') that has outbound internet access. 55 | If you don't have one or don't want one, adjust the Packer templates and Terraform to look for a different pre-configured VPC. 56 | 1. Adjust the Packer and Terrafom variables. 57 | 1. Check the `gcp/packer/example.vars` file and supply credentials and a project ID. 58 | 1. Check the `gcp/terraform/example.tfvars` file and supply credentials and adjust the hostnames and trusted external IPs to fit your setup. It's recommended to add the outbound IP of your machine to this list. 59 | 1. Build the images with Packer. 60 | 1. `cd` into the `gcp/packer` folder, run 61 | ``` 62 | packer build --var-file example.vars consul.json 63 | packer build --var-file example.vars vault.json 64 | ``` 65 | 1. Run the Terraform. 66 | 1. `cd` into the `gcp/terraform` folder, run `terraform init ; terraform apply --var-file example.tfvars` and if the plan looks good, approve it. 67 | 1. **While Terraform is running**, setup the domain configured in `gcp/terraform/example.tfvars` to point at the Load Balancer frontends. This can be done with A records in your DNS zone, or by editing the hosts file: 68 | ``` 69 | consul.example.com 70 | vault-0.vault.example.com vault-1.vault.example.com vault.example.com 71 | ``` 72 | 1. If the Terraform apply step hangs on provisioning `null_resource.consul_acl_bootstrap`, check that Consul is responding at `consul.:8501`. 73 | 1. If you receive an error similar to `Failed to create new policy: Unexpected response code: 500 ()`, the situation can be recovered by locating the `null_resource consul_acl_bootstrap` resource and commenting all lines of the `command` _except_ those which start with `consul acl policy` or `consul acl token`. Terraform should then be re-run. 74 | 75 | ### Azure 76 | 77 | 1. Ensure you have a virtual network setup that has outbound internet access with a Network Security Group named 'default' attached to the relevant subnet. 78 | 1. Create a new resource group, called 'default'. 79 | 1. Create a new virtual network, called 'default' with whatever IP space fits your needs. 80 | 1. Create a new network security group, called 'default' and associate it with the subnet in the virtual network. 81 | 1. Adjust the Packer and Terrafom variables. 82 | 1. Check the `azure/packer/example.vars` file and supply a subscription ID, a resource group name, and a region. 83 | 1. Check the `azure/terraform/example.tfvars` file and adjust the hostnames and trusted external IPs to fit your setup. 84 | 1. Login to the azure CLI if not already, and select the appropriate subscription with `az account set -s `. 85 | 1. Build the images with Packer. 86 | 1. `cd` into the `azure/packer` folder, run 87 | ``` 88 | packer build --var-file example.vars consul.json 89 | packer build --var-file example.vars vault.json 90 | ``` 91 | 1. If you run into issues authenticating with AzureAD, service principal authentication can be used instead. See [the packer docs](https://packer.io/docs/builders/azure-arm.html#service-principal). 92 | 1. Run the Terraform. 93 | 1. `cd` into the `azure/terraform` folder, run `terraform init ; terraform apply --var-file example.tfvars` and if the plan looks good, approve it. 94 | 1. **While Terraform is running**, setup the domains configured in `azure/terraform/example.tfvars` to point at the Load Balancer public IPs. This can be done with A records in your DNS zone, or by editing the hosts file: 95 | ``` 96 | consul.example.com 97 | vault-0.vault.example.com vault-1.vault.example.com vault.example.com 98 | ``` 99 | 1. You may receive an error relating to HealthProbes when creating the scale set for Consul, in this case, re-attempt the running of Terraform. 100 | 1. If you receive an error similar to `Failed to create new policy: Unexpected response code: 500 ()`, the situation can be recovered by locating the `null_resource consul_acl_bootstrap` resource and commenting all lines of the `command` _except_ those which start with `consul acl policy` or `consul acl token`. Terraform should then be re-run. 101 | 102 | ### Testing and Usage 103 | 104 | 1. Prepare a client certificate for use with Consul. 105 | 1. `cd` into the `ansible` folder, run `openssl pkcs12 -export -in consul.crt -inkey consul.key -out consul.p12` and enter a password when prompted. 106 | 1. Import this certificate into your browser of choice. 107 | 1. Try to access Consul by browsing to `https://consul.:8501/ui`, select the certificate when prompted. 108 | 1. Click on the `ACL` navbar item. 109 | 1. Find the master token created during bootstrap and supply it to the UI, it should be in a file at `/terraform/master-token`. 110 | 1. Try to access the HAProxy stats page for vault by visiting `http://vault./haproxy-stats` or `http:///haproxy-stats` if running on GCP. 111 | 1. Initialise Vault and unseal if you wish to experiment further. 112 | 1. `cd` into the `ansible` folder, and setup some useful environment variables. 113 | ``` 114 | export VAULT_ADDR=https://vault-0.vault. 115 | export VAULT_CACERT="$(pwd)/vault-ca.crt" 116 | ``` 117 | 1. Run `vault operator init`. 118 | 1. Copy the unseal keys and root token from the output and paste them into a text editor. 119 | 1. Unseal the specific Vault node by running the following `vault operator unseal` and supplying an unseal key when prompted. Repeat this process until the node is unsealed. 120 | 1. Once enough keys have been entered (3 by default), refresh the HAProxy stats page and look for the server that was just unsealed (vault-0) - it should be green at the bottom. 121 | 122 | ## Getting started (more in-depth) 123 | 124 | ### Terraform 125 | 126 | The Terraform config in this repo uses the local filesystem for state storage instead of remote state. 127 | It is highly recommended to use a remote storage mechanism for Terraform's state. 128 | 129 | Additionally, there are no version pins for any of the providers and it's recommended that you set some. 130 | 131 | 132 | 133 | ### Network 134 | 135 | It's assumed that a network and subnet are available in which to setup the cluster, please adjust the automation accordingly. 136 | 137 | 138 | ### Variables (ansible) 139 | 140 | Most variables are already setup with sensible values, but secrets or sensitive variables should be set per installation along with any other installation-specific variables. 141 | The `example.yml` group variables are not stored securely for the purposes of enabling easy experimentation with this setup. 142 | Of course for a proper deployment, these secrets should be appropriately protected using something such as ansible-vault or by not committing them at all. 143 | 144 | _NOTE_: Special remarks about Consul tokens are made further on, though they can be configured through variables. 145 | 146 | #### Consul role 147 | 148 | ``` 149 | consul_user_password_hash - The password hash to set for the Consul system user 150 | 151 | consul_gossip_encryption_key - The encryption key used to secure Gossip traffic between Consul nodes, generated with `consul keygen` 152 | ``` 153 | 154 | #### HAProxy-consul-template role 155 | 156 | ``` 157 | consul_template_user_password_hash - The password hash to set for the consul-template system user 158 | 159 | vault_lb_hostname - The external hostname used to access the load-balanced Vault endpoint. 160 | ``` 161 | 162 | #### Vault role 163 | 164 | ``` 165 | vault_user_password_hash - The password hash to set for the Vault system user 166 | ``` 167 | 168 | ### Variables (terraform) 169 | 170 | There is only a handful of variables needed by Terraform, each of which should be tweaked for your needs: 171 | ``` 172 | vault_hostname - The hostname which will be used to access Vault's load-balanced endpoint. 173 | 174 | consul_hostname - The hostname which will be used to access Consul's load-balanced endpoint. 175 | 176 | trusted_external_ips - The external IPs to permit when configuring external access to Vault and Consul. 177 | 178 | consul_retry_join_config - This should not require adjustment unless the cloud auto-join tag or value is changed. 179 | ``` 180 | 181 | Some variables are provider-specific, such as GCP: 182 | 183 | ``` 184 | credentials - The path on disk of a credentials file for Terraform to use. 185 | 186 | project - The ID of the project to provision resources in. 187 | 188 | region - The region in which to provision resources. 189 | ``` 190 | 191 | ### Tokens 192 | 193 | Consul tokens are required for the Consul agent, for consul-template, and for Vault. 194 | The `SecretID` values for each token are set in advance so that the machines can boot and automatically be able to perform their function without extra setup. 195 | These are configured through variables in Ansible, which by default look for the tokens on the filesystem using the `lookup` plugin. 196 | You should populate these tokens with your own values, which must be UUIDs, and can be supplied through files or by setting the ansible variables explicitly. 197 | _NOTE_: If you choose to use ansible variables instead of files, the ACL bootstrap process in Terraform will need to be adjusted to remove the creation of Consul tokens. 198 | 199 | The relevant ansible variables are as follows: 200 | ``` 201 | consul_agent_acl_token - The token for the Consul agent to use, expects a corresponding file in `ansible/roles/consul/files/tokens/agent` 202 | 203 | consul_default_acl_token - The default token used by the Consul agent, expects a corresponding file in `ansible/roles/consul/files/tokens/agent` 204 | 205 | consul_template_consul_token - The token used by consul-template to obtain node data about vault, expects a corresponding file in `ansible/roles/consul/files/tokens/haproxy` 206 | 207 | vault_consul_acl_token - The token used by Vault to access Consul's KV store, expects a corresponding file in `ansible/roles/consul/files/tokens/vault` 208 | ``` 209 | 210 | ### Certificates 211 | 212 | Certificates are used to secure traffic from Consul and Vault (TLS server certificates) as well as to Consul (TLS client certificates). 213 | You should generate your own keys and certificates signed by a CA you trust. 214 | Specific recommendations about TLS are in the Design section and a script is provided in `tls-bootstrap` to get things started. 215 | _NOTE_: Some values (particularly CNs and SANs) will need to be adjusted depending on hostnames in use. 216 | Particular attention should be paid to the hostnames on the certificate to ensure that communication isn't blocked. 217 | Consul expects a name of `consul` to be present within the Consul server and client certificates by default. 218 | 219 | Ansible and Terraform expect the following files to be available at the root of the `ansible` folder: 220 | 221 | * consul.crt - The certificate file (optionally containing the issuing CA's certificate) to use for Consul server and client authentication. 222 | * consul-ca.crt - The certificate file of the CA that signs the certificate in `consul.crt`. 223 | * consul.key - The private key file to use for Consul server and client authentication. 224 | * vault.crt - The certificate file (optionally containing the issuing CA's certificate) to use for Vault server authentication. 225 | * vault-ca.crt - The certificate file of the CA that signs the certificate in `vault.crt`. 226 | * vault.key - The private key file to use for Vault server authentication. 227 | 228 | ### DNS 229 | 230 | Hostnames are only needed in a few places, and should be adjusted before provisioning. 231 | See 232 | * `haproxy-consul-template` ansible role, defaults 233 | * `vault_hostname` variable in Terraform 234 | * `consul_hostname` variable in Terraform 235 | * `CERTIFICATE_DOMAIN` variable in `tls-bootstrap/bootstrap.sh` 236 | 237 | The automation does _NOT_ create any DNS records, but does expect them to exist and therefore you should add the necessary automation to Terraform or arrange some other means of ensuring that the expected hostname resolves to an address on the load-balancer. 238 | 239 | ### Backups 240 | 241 | There is no provision made to enable backups as the situation of each user is likely to be different. 242 | Since Consul is the backing store for Vault, an automated process that takes a snapshot of Consul and saves it somewhere would probably be useful. 243 | 244 | ## Design 245 | 246 | ### External access 247 | 248 | All external access is IP controlled within security groups configured through Terraform. 249 | HTTPS communication to Consul is exposed via a load-balancer on port 8501 and traffic is sent to the autoscaling group. 250 | HTTPS communication to Vault is exposed via a load-balancer on port 443 and traffic is sent to HAProxy on the Vault nodes. 251 | Depending on the hostname supplied, traffic is routed either to any available Vault node or directly to a specific node. 252 | 253 | This is done so that individual Vault nodes can be unsealed externally and so as to enable initialisation of Vault. 254 | 255 | ### DNS 256 | 257 | Consul and Vault are exposed through a load-balancer and are expected to be available at `vault.` and `consul.`. 258 | Individual Vault server nodes are available at `.vault.` where `` is the name of the VM within the cloud provider. 259 | By default this is something like `vault-0`. 260 | 261 | Various systems need to be aware of the hostnames used for access, as well as requiring certificates with appropriate CNs and SANs. 262 | In particular these are: 263 | * HAProxy (via the `haproxy-consul-template` ansible role) 264 | * Terraform (via the `vault_hostname` and `consul_hostname` locals) 265 | 266 | ### TLS 267 | 268 | Private CAs are created to secure traffic to Consul and Vault, and the script in `tls-bootstrap` is designed to achieve this. 269 | You can use whatever certificates you'd like, including Let's Encrypt but be aware of the following: 270 | 271 | * The certificates and keys are baked into the machine images 272 | * Ansible expects the certificate and key files to be available to it, so they should be placed in the `ansible` folder or within the `files` folder of the relevant role 273 | * If using a public CA, ensure that the `.crt` file contains the certificate of the issuing CA and any intermediates, and that the `-ca.crt` file contains the certificate of the root CA. 274 | * The CA used to secure outgoing communication (TLS server certs) from Consul must be the same as the one used to secure incoming communication (TLS client certs), so a private CA is recommended. 275 | 276 | Certificates are needed at various points in the provisioning process, chiefly by ansible and Terraform. 277 | Ansible bakes the certificate and key files into the machine image, and Terraform uses the Consul certificate files in the ACL bootstrapping process. 278 | 279 | _NOTE_: The CNs and SANs used on certificates are critical and must match various expected names. 280 | Of course for external access, the certificates should have `consul.`, `vault.`, and `*.vault.` names. 281 | In addition, to enable Consul to communicate securely with itself, it expects a given name to be present in the certificate, by default this is `consul`. 282 | If you wish to adjust this, be sure to update the Consul configuration to expect the newly assigned value. 283 | 284 | ### Consul 285 | 286 | An autoscaling group is created for Consul, but with no scaling rules as this is a very installation-specific concern. 287 | The Consul nodes are designed to be able to join a cluster with minimal fuss and use the cloud auto-join mechanism to do so. 288 | The agent goes through a bootstrap process on startup to configure the cloud auto-join settings as well as setting the agent ACL. 289 | The cloud auto-join settings are configured in Terraform. 290 | 291 | #### ACLs 292 | 293 | The ACL system is bootstrapped using the bootstrap process and currently is achieved using a null resource in Terraform to call the relevant APIs from the machine running Terraform. 294 | The master token is captured and output to the filesystem for the operator to do with as they please. 295 | Some essential policies and tokens are also created at this point to enable Vault and consul-template to function. 296 | The bootstrap process will retry indefinitely until it succeeds, which can lead to an infinite provisioning loop if the bootstrap operation is successful but subsequent operations fail. 297 | In this situation, the bootstrap process should be reset, or the relevant lines should be commented allowing Terraform to re-run. 298 | 299 | Having Consul tokens within machine images has been avoided as much as possible, however a certain amount of it is necessary. 300 | For the purposes of configuring the Consul agent with the necessary permissions to do node updates, a file is placed in `/etc/consul.d` for use in the agent bootstrap process. 301 | Once the agent has been configured to use the token with the agent ACL API, the token file is deleted as token persistence within Consul is enabled. 302 | 303 | ### HAProxy and consul-template 304 | 305 | HAProxy is installed on the Vault nodes to be able to direct traffic as necessary and achieve the direct-to-node or load-balanced access as previously described. 306 | To achieve this, there are two types/groups of backends - a backend per node for the direct-to-node access, containing only that specific node, and a single backend containing all nodes for the load-balanced access. 307 | HAProxy is deliberately unaware of the content of any HTTP requests going through it (except stats), and uses the SNI conversation as a judgement for where to send traffic. 308 | The HAProxy frontends can optionally accept the proxy protocol (defaults to on) from the fronting load-balancer. 309 | All backends within HAProxy (individual nodes and load-balanced pool) have health checks enabled. 310 | The load-balanced backend uses an HTTPS check to Vault's health endpoint and the individual node backends use HTTPS health checks to Vault's health endpoint, permitting most error conditions. 311 | In addition, all backends send the proxy protocol to Vault. 312 | 313 | Consul-tempmlate is used to query Consul for Vault node information and populates HAProxy's configuration accordingly for the individual node backends as well as the load-balanced backend. 314 | 315 | ### Vault 316 | 317 | Vault is setup to receive the proxy protocol and is configured such that any IP in the subnet is allowed to send the proxy protocol to Vault. 318 | This enables multiple Vault nodes to load-balance one another (with HAProxy) without needing to authorise specific IPs or needing to dynamically configure Vault according to what nodes are available. 319 | 320 | It's expected that the `file` audit method will be used and so logrotate has been configured accordingly, especting an audit log file to be placed in `/var/log/vault/` with an extension of `.log`. 321 | 322 | It should be noted that auto-unsealing is not in use in this installation and the initialisation of Vault is left as an exercise for the operator. 323 | 324 | ## Considerations of automation and setup 325 | 326 | ### TLS certificates 327 | 328 | It is hypothetically possible to create one or more PKI backends within Vault and have them serve as the CAs for securing Consul and Vault communication. 329 | This could give you such benefits as not needing to create machine images that contain certificates and keys, instead having the nodes generate keys and obtain signed certificates from Vault upon startup. 330 | 331 | The reason this hasn't been done is that it makes the overall setup more complicated and requires more initial configuration during the setup of the system, as it creates a cyclical dependency on the cluster itself. 332 | You may of course pursue such a setup should you wish, just bear in mind the differences between automating 'the first' setup and 'the ongoing' setup. 333 | If the cluster needed to be rebuilt, it's likely that you would need to revert to storing certificates and keys within the image until the cluster can be brought up from scratch again. 334 | One way to achieve the self-certificating setup would be to use consul-template to request certificates from Vault, and restarting Consul or triggering a config reload when the files changed. 335 | It would be best to use the Vault agent as well to maintain a Vault token and have the agent make requests to Vault on behalf of consul-template. 336 | You would also need to change the explicit CA cert file in Consul's config, with a directory to permit the change in CA to take place as new agents are rolled out to the autoscaling group. 337 | 338 | Incidentally the commentary mentions Consul as a target of automated certificates, but the approach for Vault would be very similar. 339 | 340 | It would also be possible to use a secret storage mechanism on a cloud provider to store the certificates and keys and have the machines pull them out of storage on startup. 341 | This hasn't been done in order to simplify the setup and to avoid introducing further dependencies outwith those already in use. 342 | Depending on your situation, you may wish to avoid trusting such a tool, or you may consider that acceptable. 343 | 344 | If you wanted to pull certificates in on startup, it would be reasonably trivial to do and the userdata field could be used fairly effectively. 345 | 346 | ### ACL tokens 347 | 348 | In this setup, Consul tokens are created with known secret values already provisioned within components such as consul-template and Vault. 349 | The tokens are stored in the machine image and removed if possible after startup (Consul only). 350 | 351 | It would be possible to instead store these tokens within Vault or even a cloud provider's secrets storage facility and have the nodes retrieve them on startup. 352 | This hasn't been done for similar reasons to those discussed in the previous section - to avoid introducing unnecessary dependencies, to limit the reach of trust, and also to avoid complexity in the setup. 353 | 354 | Once again, such a setup is fairly trivial to achieve, and the recommendation is to use userdata to trigger the behaviour. 355 | 356 | ### ACL bootstrapping 357 | 358 | It's possible to use the Consul provider for Terraform to create ACL policies and tokens within Consul. 359 | 360 | In this setup, policies and tokens are instead created by calling the APIs via the Consul binary. 361 | The reason for this is, again to avoid introducing complexity into the initial setup. 362 | When managing resources via Terraform, layered and explicitly-ordered dependencies within the same configuration don't always work well. 363 | The CLI-based approach allows for plenty of retries and a more robust experience than attempting to wire the Consul provider up to a cluster that doesn't yet exist or is still being provisioned. 364 | 365 | Again, you could bootstrap the cluster and then go on to manage the ACL policies and tokens within Terraform, including importing the master and default tokens and this has been left as an exercise for the operator. 366 | It would also be possible to use Vault to create and distribute tokens for use with Consul, and much like the previous sections, this has been left out so as to not introduce complexity. 367 | 368 | -------------------------------------------------------------------------------- /azure/terraform/main.tf: -------------------------------------------------------------------------------- 1 | variable vault_hostname {} 2 | variable consul_hostname {} 3 | variable trusted_external_ips { type = list(string) } 4 | variable vault_instance_image_filters { type = list(string) } 5 | 6 | provider azurerm { 7 | features {} 8 | } 9 | 10 | locals { 11 | vault_proxy_authorized_addresses = jsonencode(concat(["127.0.0.1"], [data.azurerm_subnet.default.address_prefix])) 12 | consul_retry_join_config = join(" ", 13 | [ 14 | "provider=azure", 15 | "tenant_id=${data.azurerm_client_config.current.tenant_id}", 16 | "subscription_id=${data.azurerm_client_config.current.subscription_id}", 17 | "resource_group=${data.azurerm_resource_group.default.name}", 18 | "vm_scale_set=consul" 19 | ] 20 | ) 21 | } 22 | 23 | data azurerm_resource_group default { 24 | name = "default" 25 | } 26 | 27 | data azurerm_virtual_network default { 28 | name = "default" 29 | resource_group_name = data.azurerm_resource_group.default.name 30 | } 31 | 32 | data azurerm_subnet default { 33 | name = "default" 34 | virtual_network_name = data.azurerm_virtual_network.default.name 35 | resource_group_name = data.azurerm_resource_group.default.name 36 | } 37 | 38 | data azurerm_client_config current {} 39 | 40 | data azurerm_image consul { 41 | name_regex = "consul-" 42 | sort_descending = true 43 | resource_group_name = data.azurerm_resource_group.default.name 44 | } 45 | 46 | data azurerm_image vault { 47 | count = 2 48 | name_regex = var.vault_instance_image_filters[count.index] 49 | sort_descending = true 50 | resource_group_name = data.azurerm_resource_group.default.name 51 | } 52 | 53 | resource azurerm_role_definition compute_reader { 54 | name = "Compute Reader" 55 | scope = "/subscriptions/${data.azurerm_client_config.current.subscription_id}/resourceGroups/${data.azurerm_resource_group.default.name}" 56 | assignable_scopes = ["/subscriptions/${data.azurerm_client_config.current.subscription_id}/resourceGroups/${data.azurerm_resource_group.default.name}"] 57 | 58 | permissions { 59 | actions = [ 60 | "Microsoft.Compute/virtualMachineScaleSets/*/read", 61 | "Microsoft.Compute/virtualMachines/*/read", 62 | "Microsoft.Network/networkInterfaces/read" 63 | ] 64 | } 65 | } 66 | 67 | resource azurerm_user_assigned_identity consul { 68 | name = "consul" 69 | resource_group_name = data.azurerm_resource_group.default.name 70 | location = data.azurerm_resource_group.default.location 71 | } 72 | 73 | resource azurerm_role_assignment consul_compute_reader { 74 | scope = data.azurerm_resource_group.default.id 75 | role_definition_id = azurerm_role_definition.compute_reader.role_definition_resource_id 76 | principal_id = azurerm_user_assigned_identity.consul.principal_id 77 | } 78 | 79 | resource azurerm_application_security_group consul { 80 | name = "consul" 81 | resource_group_name = data.azurerm_resource_group.default.name 82 | location = data.azurerm_resource_group.default.location 83 | } 84 | 85 | resource azurerm_linux_virtual_machine_scale_set consul { 86 | name = "consul" 87 | resource_group_name = data.azurerm_resource_group.default.name 88 | location = data.azurerm_resource_group.default.location 89 | admin_username = "ubuntu" 90 | health_probe_id = azurerm_lb_probe.consul.id 91 | instances = 3 92 | source_image_id = data.azurerm_image.consul.id 93 | sku = "Standard_B2s" 94 | upgrade_mode = "Manual" 95 | zones = ["1", "2", "3"] 96 | zone_balance = true 97 | 98 | custom_data = base64encode(< /etc/consul.d/retry-join-config 101 | 102 | if [[ ! -e /etc/consul.d/agent-bootstrap-complete ]]; then 103 | source /etc/consul.d/agent-bootstrap.sh 104 | fi 105 | 106 | systemctl start consul 107 | systemctl enable consul 108 | EOF 109 | ) 110 | 111 | admin_ssh_key { 112 | public_key = file("~/.ssh/id_rsa.pub") 113 | username = "ubuntu" 114 | } 115 | 116 | identity { 117 | type = "UserAssigned" 118 | identity_ids = [azurerm_user_assigned_identity.consul.id] 119 | } 120 | 121 | os_disk { 122 | caching = "None" 123 | storage_account_type = "Standard_LRS" 124 | } 125 | 126 | network_interface { 127 | name = "consul" 128 | primary = true 129 | 130 | ip_configuration { 131 | name = "consul" 132 | primary = true 133 | subnet_id = data.azurerm_subnet.default.id 134 | application_security_group_ids = [azurerm_application_security_group.consul.id] 135 | load_balancer_backend_address_pool_ids = [azurerm_lb_backend_address_pool.consul.id] 136 | } 137 | } 138 | } 139 | 140 | resource azurerm_application_security_group vault { 141 | name = "vault" 142 | resource_group_name = data.azurerm_resource_group.default.name 143 | location = data.azurerm_resource_group.default.location 144 | } 145 | 146 | resource azurerm_application_security_group haproxy { 147 | name = "haproxy" 148 | resource_group_name = data.azurerm_resource_group.default.name 149 | location = data.azurerm_resource_group.default.location 150 | } 151 | 152 | resource azurerm_linux_virtual_machine vault { 153 | count = 2 154 | name = "vault-${count.index}" 155 | resource_group_name = data.azurerm_resource_group.default.name 156 | location = data.azurerm_resource_group.default.location 157 | admin_username = "ubuntu" 158 | network_interface_ids = [azurerm_network_interface.vault.*.id[count.index]] 159 | source_image_id = data.azurerm_image.vault[count.index].id 160 | size = "Standard_B2s" 161 | zone = count.index + 1 162 | 163 | custom_data = base64encode(< /etc/vault.d/vault.json.new 169 | mv /etc/vault.d/vault.json.new /etc/vault.d/vault.json 170 | 171 | echo "${local.consul_retry_join_config}" > /etc/consul.d/retry-join-config 172 | 173 | if [[ ! -e /etc/consul.d/agent-bootstrap-complete ]]; then 174 | source /etc/consul.d/agent-bootstrap.sh 175 | fi 176 | 177 | if [[ ! -e /etc/vault.d/bootstrap-complete ]]; then 178 | jq '.listener[0].tcp.proxy_protocol_authorized_addrs = ${local.vault_proxy_authorized_addresses}' /etc/vault.d/vault.json > /etc/vault.d/vault.json.new 179 | jq '.api_addr = "https://${var.vault_hostname}"' /etc/vault.d/vault.json.new > /etc/vault.d/vault.json 180 | systemctl restart vault 181 | touch /etc/vault.d/bootstrap-complete 182 | fi 183 | EOF 184 | ) 185 | 186 | admin_ssh_key { 187 | public_key = file("~/.ssh/id_rsa.pub") 188 | username = "ubuntu" 189 | } 190 | 191 | identity { 192 | type = "UserAssigned" 193 | identity_ids = [azurerm_user_assigned_identity.consul.id] 194 | } 195 | 196 | os_disk { 197 | caching = "None" 198 | storage_account_type = "Standard_LRS" 199 | } 200 | } 201 | 202 | resource azurerm_network_interface vault { 203 | count = 2 204 | name = "vault-${count.index}" 205 | resource_group_name = data.azurerm_resource_group.default.name 206 | location = data.azurerm_resource_group.default.location 207 | 208 | ip_configuration { 209 | name = "config1" 210 | subnet_id = data.azurerm_subnet.default.id 211 | private_ip_address_allocation = "Dynamic" 212 | } 213 | } 214 | 215 | resource azurerm_network_interface_application_security_group_association vault_consul { 216 | count = 2 217 | network_interface_id = azurerm_network_interface.vault.*.id[count.index] 218 | application_security_group_id = azurerm_application_security_group.consul.id 219 | } 220 | 221 | resource azurerm_network_interface_application_security_group_association vault_vault { 222 | count = 2 223 | network_interface_id = azurerm_network_interface.vault.*.id[count.index] 224 | application_security_group_id = azurerm_application_security_group.vault.id 225 | } 226 | 227 | resource azurerm_network_interface_application_security_group_association vault_haproxy { 228 | count = 2 229 | network_interface_id = azurerm_network_interface.vault.*.id[count.index] 230 | application_security_group_id = azurerm_application_security_group.haproxy.id 231 | } 232 | 233 | resource azurerm_network_interface_backend_address_pool_association vault { 234 | count = 2 235 | network_interface_id = azurerm_network_interface.vault.*.id[count.index] 236 | ip_configuration_name = "config1" 237 | backend_address_pool_id = azurerm_lb_backend_address_pool.vault.id 238 | } 239 | 240 | resource azurerm_lb vault { 241 | name = "vault" 242 | resource_group_name = data.azurerm_resource_group.default.name 243 | location = data.azurerm_resource_group.default.location 244 | sku = "Standard" 245 | 246 | frontend_ip_configuration { 247 | name = "consul" 248 | public_ip_address_id = azurerm_public_ip.consul.id 249 | } 250 | 251 | frontend_ip_configuration { 252 | name = "vault" 253 | public_ip_address_id = azurerm_public_ip.vault.id 254 | } 255 | } 256 | 257 | resource azurerm_public_ip consul { 258 | name = "consul" 259 | resource_group_name = data.azurerm_resource_group.default.name 260 | location = data.azurerm_resource_group.default.location 261 | allocation_method = "Static" 262 | sku = "Standard" 263 | } 264 | 265 | resource azurerm_public_ip vault { 266 | name = "vault" 267 | resource_group_name = data.azurerm_resource_group.default.name 268 | location = data.azurerm_resource_group.default.location 269 | allocation_method = "Static" 270 | sku = "Standard" 271 | } 272 | 273 | resource azurerm_lb_backend_address_pool consul { 274 | name = "consul" 275 | loadbalancer_id = azurerm_lb.vault.id 276 | } 277 | 278 | resource azurerm_lb_backend_address_pool vault { 279 | name = "vault" 280 | loadbalancer_id = azurerm_lb.vault.id 281 | } 282 | 283 | resource azurerm_lb_rule consul { 284 | name = "consul" 285 | loadbalancer_id = azurerm_lb.vault.id 286 | frontend_ip_configuration_name = "consul" 287 | protocol = "Tcp" 288 | frontend_port = "8501" 289 | backend_port = "8501" 290 | backend_address_pool_ids = [azurerm_lb_backend_address_pool.consul.id] 291 | probe_id = azurerm_lb_probe.consul.id 292 | disable_outbound_snat = true 293 | } 294 | 295 | resource azurerm_lb_rule haproxy_stats { 296 | name = "haproxy-stats" 297 | loadbalancer_id = azurerm_lb.vault.id 298 | frontend_ip_configuration_name = "vault" 299 | protocol = "Tcp" 300 | frontend_port = "80" 301 | backend_port = "80" 302 | backend_address_pool_ids = [azurerm_lb_backend_address_pool.vault.id] 303 | probe_id = azurerm_lb_probe.haproxy_stats.id 304 | disable_outbound_snat = true 305 | } 306 | 307 | resource azurerm_lb_rule vault { 308 | name = "vault" 309 | loadbalancer_id = azurerm_lb.vault.id 310 | frontend_ip_configuration_name = "vault" 311 | protocol = "Tcp" 312 | frontend_port = "443" 313 | backend_port = "443" 314 | backend_address_pool_ids = [azurerm_lb_backend_address_pool.vault.id] 315 | probe_id = azurerm_lb_probe.vault.id 316 | disable_outbound_snat = true 317 | } 318 | 319 | resource azurerm_lb_probe consul { 320 | name = "consul" 321 | loadbalancer_id = azurerm_lb.vault.id 322 | protocol = "Tcp" 323 | port = "8501" 324 | } 325 | 326 | resource azurerm_lb_probe haproxy_stats { 327 | name = "haproxy-stats" 328 | loadbalancer_id = azurerm_lb.vault.id 329 | protocol = "Tcp" 330 | port = "80" 331 | } 332 | 333 | resource azurerm_lb_probe vault { 334 | name = "vault" 335 | loadbalancer_id = azurerm_lb.vault.id 336 | protocol = "Tcp" 337 | port = "443" 338 | } 339 | 340 | resource azurerm_lb_outbound_rule consul { 341 | name = "consul" 342 | loadbalancer_id = azurerm_lb.vault.id 343 | backend_address_pool_id = azurerm_lb_backend_address_pool.consul.id 344 | protocol = "All" 345 | 346 | frontend_ip_configuration { 347 | name = "consul" 348 | } 349 | } 350 | 351 | resource azurerm_lb_outbound_rule vault { 352 | name = "vault" 353 | loadbalancer_id = azurerm_lb.vault.id 354 | backend_address_pool_id = azurerm_lb_backend_address_pool.vault.id 355 | protocol = "All" 356 | 357 | frontend_ip_configuration { 358 | name = "vault" 359 | } 360 | } 361 | 362 | resource azurerm_network_security_rule consul_lb_https { 363 | name = "consul-lb-https" 364 | resource_group_name = data.azurerm_resource_group.default.name 365 | network_security_group_name = "default" 366 | protocol = "Tcp" 367 | source_port_range = "*" 368 | destination_port_range = "8501" 369 | source_address_prefix = "AzureLoadBalancer" 370 | destination_application_security_group_ids = [azurerm_application_security_group.consul.id] 371 | access = "Allow" 372 | priority = "200" 373 | direction = "Inbound" 374 | } 375 | 376 | resource azurerm_network_security_rule haproxy_lb_http { 377 | name = "haproxy-lb-http" 378 | resource_group_name = data.azurerm_resource_group.default.name 379 | network_security_group_name = "default" 380 | protocol = "Tcp" 381 | source_port_range = "*" 382 | destination_port_range = "80" 383 | source_address_prefix = "AzureLoadBalancer" 384 | destination_application_security_group_ids = [azurerm_application_security_group.haproxy.id] 385 | access = "Allow" 386 | priority = "210" 387 | direction = "Inbound" 388 | } 389 | 390 | resource azurerm_network_security_rule haproxy_lb_https { 391 | name = "haproxy-lb-https" 392 | resource_group_name = data.azurerm_resource_group.default.name 393 | network_security_group_name = "default" 394 | protocol = "Tcp" 395 | source_port_range = "*" 396 | destination_port_range = "443" 397 | source_address_prefix = "AzureLoadBalancer" 398 | destination_application_security_group_ids = [azurerm_application_security_group.haproxy.id] 399 | access = "Allow" 400 | priority = "220" 401 | direction = "Inbound" 402 | } 403 | 404 | resource azurerm_network_security_rule consul_external_https { 405 | name = "consul-external-https" 406 | resource_group_name = data.azurerm_resource_group.default.name 407 | network_security_group_name = "default" 408 | protocol = "Tcp" 409 | source_port_range = "*" 410 | destination_port_range = "8501" 411 | source_address_prefixes = var.trusted_external_ips 412 | destination_application_security_group_ids = [azurerm_application_security_group.consul.id] 413 | access = "Allow" 414 | priority = "230" 415 | direction = "Inbound" 416 | } 417 | 418 | resource azurerm_network_security_rule haproxy_external_http { 419 | name = "haproxy-external-http" 420 | resource_group_name = data.azurerm_resource_group.default.name 421 | network_security_group_name = "default" 422 | protocol = "Tcp" 423 | source_port_range = "*" 424 | destination_port_range = "80" 425 | source_address_prefixes = var.trusted_external_ips 426 | destination_application_security_group_ids = [azurerm_application_security_group.haproxy.id] 427 | access = "Allow" 428 | priority = "240" 429 | direction = "Inbound" 430 | } 431 | 432 | resource azurerm_network_security_rule haproxy_external_https { 433 | name = "haproxy-external-https" 434 | resource_group_name = data.azurerm_resource_group.default.name 435 | network_security_group_name = "default" 436 | protocol = "Tcp" 437 | source_port_range = "*" 438 | destination_port_range = "443" 439 | source_address_prefixes = var.trusted_external_ips 440 | destination_application_security_group_ids = [azurerm_application_security_group.haproxy.id] 441 | access = "Allow" 442 | priority = "250" 443 | direction = "Inbound" 444 | } 445 | 446 | resource azurerm_network_security_rule consul_internal_rpc { 447 | name = "consul-internal-rpc" 448 | resource_group_name = data.azurerm_resource_group.default.name 449 | network_security_group_name = "default" 450 | protocol = "Tcp" 451 | source_port_range = "*" 452 | destination_port_range = "8300" 453 | source_application_security_group_ids = [azurerm_application_security_group.consul.id] 454 | destination_application_security_group_ids = [azurerm_application_security_group.consul.id] 455 | access = "Allow" 456 | priority = "260" 457 | direction = "Inbound" 458 | } 459 | 460 | resource azurerm_network_security_rule consul_internal_raft { 461 | name = "consul-internal-raft" 462 | resource_group_name = data.azurerm_resource_group.default.name 463 | network_security_group_name = "default" 464 | protocol = "*" 465 | source_port_range = "*" 466 | destination_port_range = "8301" 467 | source_application_security_group_ids = [azurerm_application_security_group.consul.id] 468 | destination_application_security_group_ids = [azurerm_application_security_group.consul.id] 469 | access = "Allow" 470 | priority = "270" 471 | direction = "Inbound" 472 | } 473 | 474 | resource azurerm_network_security_rule consul_internal_https { 475 | name = "consul-internal-https" 476 | resource_group_name = data.azurerm_resource_group.default.name 477 | network_security_group_name = "default" 478 | protocol = "Tcp" 479 | source_port_range = "*" 480 | destination_port_range = "8501" 481 | source_application_security_group_ids = [azurerm_application_security_group.consul.id] 482 | destination_application_security_group_ids = [azurerm_application_security_group.consul.id] 483 | access = "Allow" 484 | priority = "280" 485 | direction = "Inbound" 486 | } 487 | 488 | resource azurerm_network_security_rule consul_internal_grpc { 489 | name = "consul-internal-grpc" 490 | resource_group_name = data.azurerm_resource_group.default.name 491 | network_security_group_name = "default" 492 | protocol = "Tcp" 493 | source_port_range = "*" 494 | destination_port_range = "8503" 495 | source_application_security_group_ids = [azurerm_application_security_group.consul.id] 496 | destination_application_security_group_ids = [azurerm_application_security_group.consul.id] 497 | access = "Allow" 498 | priority = "290" 499 | direction = "Inbound" 500 | } 501 | 502 | resource azurerm_network_security_rule vault_external_https { 503 | name = "vault-external-https" 504 | resource_group_name = data.azurerm_resource_group.default.name 505 | network_security_group_name = "default" 506 | protocol = "Tcp" 507 | source_port_range = "*" 508 | destination_port_range = "8200" 509 | source_application_security_group_ids = [azurerm_application_security_group.vault.id] 510 | destination_application_security_group_ids = [azurerm_application_security_group.vault.id] 511 | access = "Allow" 512 | priority = "300" 513 | direction = "Inbound" 514 | } 515 | 516 | resource azurerm_network_security_rule vault_internal_https { 517 | name = "vault-internal-https" 518 | resource_group_name = data.azurerm_resource_group.default.name 519 | network_security_group_name = "default" 520 | protocol = "Tcp" 521 | source_port_range = "*" 522 | destination_port_range = "8201" 523 | source_application_security_group_ids = [azurerm_application_security_group.vault.id] 524 | destination_application_security_group_ids = [azurerm_application_security_group.vault.id] 525 | access = "Allow" 526 | priority = "310" 527 | direction = "Inbound" 528 | } 529 | 530 | resource azurerm_network_security_rule deny_all_consul { 531 | name = "deny-all-consul" 532 | resource_group_name = data.azurerm_resource_group.default.name 533 | network_security_group_name = "default" 534 | protocol = "*" 535 | source_port_range = "*" 536 | destination_port_range = "*" 537 | source_address_prefix = "*" 538 | destination_application_security_group_ids = [azurerm_application_security_group.consul.id] 539 | access = "Deny" 540 | priority = "320" 541 | direction = "Inbound" 542 | } 543 | 544 | resource azurerm_network_security_rule deny_all_vault { 545 | name = "deny-all-vault" 546 | resource_group_name = data.azurerm_resource_group.default.name 547 | network_security_group_name = "default" 548 | protocol = "*" 549 | source_port_range = "*" 550 | destination_port_range = "*" 551 | source_address_prefix = "*" 552 | destination_application_security_group_ids = [azurerm_application_security_group.vault.id] 553 | access = "Deny" 554 | priority = "330" 555 | direction = "Inbound" 556 | } 557 | 558 | resource azurerm_network_security_rule deny_all_haproxy { 559 | name = "deny-all-haproxy" 560 | resource_group_name = data.azurerm_resource_group.default.name 561 | network_security_group_name = "default" 562 | protocol = "*" 563 | source_port_range = "*" 564 | destination_port_range = "*" 565 | source_address_prefix = "*" 566 | destination_application_security_group_ids = [azurerm_application_security_group.haproxy.id] 567 | access = "Deny" 568 | priority = "340" 569 | direction = "Inbound" 570 | } 571 | 572 | resource null_resource consul_acl_bootstrap { 573 | triggers = { 574 | scale_set_id = azurerm_linux_virtual_machine_scale_set.consul.id 575 | } 576 | 577 | provisioner "local-exec" { 578 | interpreter = ["/bin/bash", "-c"] 579 | environment = { 580 | CONSUL_CACERT = abspath("${path.module}/../../ansible/consul-ca.crt") 581 | CONSUL_CLIENT_CERT = abspath("${path.module}/../../ansible/consul-agents.crt") 582 | CONSUL_CLIENT_KEY = abspath("${path.module}/../../ansible/consul-agents.key") 583 | CONSUL_HTTP_ADDR = "https://${var.consul_hostname}:8501" 584 | CONSUL_TLS_SERVER_NAME = "consul" 585 | } 586 | 587 | command = < master-token 597 | consul acl policy create -token-file master-token -name "agent" -rules @${abspath("${path.module}/../../ansible/roles/consul/files/policies/agent.hcl")} > /dev/null 598 | consul acl token create -token-file master-token -policy-name "agent" -secret $(cat ${abspath("${path.module}/../../ansible/roles/consul/files/tokens/agent")}) > /dev/null 599 | consul acl policy create -token-file master-token -name "haproxy" -rules @${abspath("${path.module}/../../ansible/roles/consul/files/policies/haproxy.hcl")} > /dev/null 600 | consul acl token create -token-file master-token -policy-name "haproxy" -secret $(cat ${abspath("${path.module}/../../ansible/roles/consul/files/tokens/haproxy")}) > /dev/null 601 | consul acl policy create -token-file master-token -name "vault" -rules @${abspath("${path.module}/../../ansible/roles/consul/files/policies/vault.hcl")} > /dev/null 602 | consul acl token create -token-file master-token -policy-name "vault" -secret $(cat ${abspath("${path.module}/../../ansible/roles/consul/files/tokens/vault")}) > /dev/null 603 | EOF 604 | } 605 | } 606 | --------------------------------------------------------------------------------