├── .gitattributes ├── .github └── workflows │ ├── docs.yml │ ├── molecule.yml │ └── pre-commit.yml ├── .gitignore ├── .gitleaks.toml ├── .pre-commit-config.yaml ├── LICENSE ├── Makefile ├── README.md ├── ansible ├── .ansible-lint ├── ansible.cfg ├── goss │ ├── base.yml │ ├── client.yml │ ├── docker.yml │ ├── security.yml │ └── server.yml ├── inventory │ ├── group_vars │ │ ├── all.yml │ │ ├── client.yml │ │ ├── dev.yml │ │ ├── prod.yml │ │ └── server.yml │ ├── host_vars │ │ └── .gitkeep │ └── hosts ├── main.yml ├── molecule │ ├── common │ │ ├── converge.yml │ │ ├── molecule.yml │ │ └── verify.yml │ └── vault │ │ ├── converge.yml │ │ ├── generate_ca.yml │ │ ├── molecule.yml │ │ ├── prepare.yml │ │ ├── testca │ │ ├── intermediate │ │ │ ├── int.crt │ │ │ ├── int.csr │ │ │ └── int_key.pem │ │ ├── root │ │ │ ├── ca.crt │ │ │ ├── ca.csr │ │ │ └── ca_key.pem │ │ └── vault │ │ │ ├── .gitkeep │ │ │ └── ca-chain.crt │ │ └── verify.yml ├── playbooks │ └── common.yml └── roles │ ├── common │ ├── defaults │ │ └── main.yml │ └── tasks │ │ ├── hashicorp.yml │ │ └── main.yml │ ├── consul-template │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── consul-template.hcl.j2 │ │ └── consul-template.service │ ├── consul │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ ├── dnsmasq.yml │ │ ├── gossip.yml │ │ ├── main.yml │ │ └── tls.yml │ └── templates │ │ ├── client.hcl.j2 │ │ ├── consul.hcl.j2 │ │ ├── consul.service.j2 │ │ ├── dnsmasq.conf.j2 │ │ ├── healthcheck.sh.j2 │ │ └── server.hcl.j2 │ ├── issue_cert │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml │ ├── nomad │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ ├── cni_plugins.yml │ │ ├── gossip.yml │ │ ├── main.yml │ │ ├── tls.yml │ │ └── vault_integration.yml │ └── templates │ │ ├── client.hcl.j2 │ │ ├── nomad-startup.sh.j2 │ │ ├── nomad.hcl.j2 │ │ ├── nomad.service.j2 │ │ └── server.hcl.j2 │ ├── unseal_vault │ ├── defaults │ │ └── main.yml │ └── tasks │ │ ├── main.yml │ │ └── unseal.yml │ └── vault │ ├── defaults │ └── main.yml │ ├── files │ ├── bw_get.sh │ └── bw_store.sh │ ├── handlers │ └── main.yml │ ├── tasks │ ├── agent.yml │ ├── init.yml │ ├── main.yml │ └── server.yml │ └── templates │ ├── logrotate-vault.j2 │ ├── token_action.sh.j2 │ ├── vault-agent.hcl.j2 │ ├── vault-agent.service.j2 │ ├── vault.hcl.j2 │ └── vault.service.j2 ├── bin ├── generate-vars ├── import-cloud-image └── packer-fmt ├── certs └── .gitkeep ├── docs ├── book.toml └── src │ ├── SUMMARY.md │ ├── ansible │ ├── index.md │ ├── inventory.md │ └── roles │ │ ├── common.md │ │ ├── consul-template.md │ │ ├── consul.md │ │ ├── index.md │ │ ├── issue_cert.md │ │ ├── nomad.md │ │ ├── unseal_vault.md │ │ └── vault.md │ ├── apps │ ├── add_new.md │ ├── diun.md │ ├── index.md │ └── registry.md │ ├── backups.md │ ├── getting_started.md │ ├── images │ ├── cloud_image.md │ ├── index.md │ └── packer.md │ ├── index.md │ ├── prerequisites.md │ ├── provisioning.md │ ├── references │ ├── LOG.md │ ├── TODO.md │ └── issues.md │ └── terraform │ ├── index.md │ ├── postgres.md │ ├── proxmox.md │ └── vault.md ├── packer ├── base-clone │ ├── main.pkr.hcl │ └── variables.pkr.hcl └── base │ ├── bin │ ├── minimize.sh │ └── vagrant.sh │ ├── http │ ├── preseed.cfg │ └── preseed.pkrtpl │ ├── main.pkr.hcl │ └── variables.pkr.hcl ├── requirements.txt ├── requirements.yml └── terraform ├── cluster ├── .terraform.lock.hcl ├── main.tf └── variables.tf ├── modules ├── database │ ├── .terraform.lock.hcl │ ├── main.tf │ └── variables.tf └── vm │ ├── .terraform.lock.hcl │ ├── main.tf │ └── variables.tf ├── nomad ├── .terraform.lock.hcl ├── apps │ ├── actual.tpl │ ├── calibre_web.tpl │ ├── countdash.tpl │ ├── diun.nomad.hcl │ ├── diun.tpl │ ├── ghostfolio.nomad.hcl │ ├── linkding.tpl │ ├── minio.tpl │ ├── openbooks.tpl │ ├── paperless.tpl │ ├── pigallery2.tpl │ ├── postgres.tpl │ ├── registry.tpl │ ├── traefik.tpl │ ├── whoami.tpl │ └── yarr.tpl ├── main.tf └── variables.tf ├── postgres ├── .terraform.lock.hcl ├── main.tf └── variables.tf └── vault ├── .gitignore ├── .terraform.lock.hcl ├── auth.tf ├── kv.tf ├── pki.tf ├── policies.tf ├── policies ├── admin.hcl ├── ansible.hcl ├── consul_template.hcl ├── kvuser.hcl ├── nomad_diun.hcl ├── nomad_linkding.hcl ├── nomad_minio.hcl ├── nomad_paperless.hcl ├── nomad_registry.hcl ├── nomad_startup.hcl ├── nomad_token.hcl ├── nomad_traefik.hcl └── nomad_yarr.hcl ├── provider.tf ├── roles.tf ├── root.tf └── variables.tf /.gitattributes: -------------------------------------------------------------------------------- 1 | *.yml linguist-detectable=true 2 | -------------------------------------------------------------------------------- /.github/workflows/docs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: mdbook 3 | on: 4 | push: 5 | branches: [master] 6 | paths: 7 | - 'docs/**' 8 | - 'README.md' 9 | pull_request: 10 | paths: 11 | - 'docs/**' 12 | - 'README.md' 13 | 14 | jobs: 15 | build: 16 | runs-on: ubuntu-latest 17 | permissions: 18 | contents: write 19 | if: github.event_name == 'push' && github.ref == 'refs/heads/master' 20 | steps: 21 | - name: Checkout codebase 22 | uses: actions/checkout@v4 23 | 24 | - name: Install latest mdbook 25 | run: | 26 | tag=$(curl 'https://api.github.com/repos/rust-lang/mdbook/releases/latest' | jq -r '.tag_name') 27 | url="https://github.com/rust-lang/mdbook/releases/download/${tag}/mdbook-${tag}-x86_64-unknown-linux-gnu.tar.gz" 28 | mkdir mdbook 29 | curl -sSL $url | tar -xz --directory=./mdbook 30 | echo `pwd`/mdbook >> $GITHUB_PATH 31 | 32 | - name: Build mdbook 33 | run: | 34 | cd docs 35 | mdbook build 36 | 37 | - name: Deploy to GitHub Pages 38 | run: | 39 | git worktree add gh-pages 40 | git config user.name "Deploy from CI" 41 | git config user.email "" 42 | 43 | cd gh-pages 44 | # Delete the ref to avoid keeping history. 45 | git update-ref -d refs/heads/gh-pages 46 | rm -rf * 47 | mv ../docs/book/* . 48 | git add . 49 | git commit -m "Deploy $GITHUB_SHA to gh-pages" 50 | git push --force --set-upstream origin gh-pages 51 | -------------------------------------------------------------------------------- /.github/workflows/molecule.yml: -------------------------------------------------------------------------------- 1 | name: molecule 2 | on: workflow_dispatch 3 | 4 | jobs: 5 | molecule: 6 | runs-on: macos-10.15 7 | steps: 8 | - name: Checkout codebase 9 | uses: actions/checkout@v3 10 | 11 | - name: Setup Python3 12 | uses: actions/setup-python@v4 13 | with: 14 | python-version: 3.x 15 | 16 | - name: Cache pip dependencies 17 | uses: actions/cache@v3 18 | with: 19 | path: ~/.cache/pip 20 | key: pip-ansible 21 | 22 | - name: Cache Vagrant box 23 | uses: actions/cache@v3 24 | with: 25 | path: ~/.vagrant.d/boxes 26 | key: ${{ runner.os }}-vagrant-${{ hashFiles('ansible/molecule/common/molecule.yml') }} 27 | restore-keys: | 28 | ${{ runner.os }}-vagrant- 29 | 30 | - name: Install dependencies 31 | run: | 32 | pip install -q --disable-pip-version-check wheel 33 | pip install -q --disable-pip-version-check ansible molecule-vagrant 34 | env: 35 | PIP_CACHE_DIR: ~/.cache/pip 36 | 37 | - name: Test common 38 | run: molecule test -s common 39 | working-directory: ansible 40 | env: 41 | ANSIBLE_FORCE_COLOR: "True" 42 | -------------------------------------------------------------------------------- /.github/workflows/pre-commit.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: pre-commit 3 | on: 4 | push: 5 | branches: [master] 6 | 7 | jobs: 8 | pre-commit: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - name: Checkout codebase 12 | uses: actions/checkout@v4 13 | 14 | - name: Setup Python3 15 | uses: actions/setup-python@v4 16 | with: 17 | python-version: '3.11' 18 | 19 | - name: Pre-commit 20 | uses: pre-commit/action@v3.0.0 21 | with: 22 | extra_args: --all-files 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .cache/* 2 | .venv/* 3 | cloud-config 4 | 5 | # secrets 6 | certs/* 7 | !certs/.gitkeep 8 | *.crt 9 | secrets.* 10 | vault.txt 11 | 12 | # ansible 13 | **/test.yml 14 | **/testca/vault/vault* 15 | 16 | # terraform 17 | **/.terraform/* 18 | *.tfstate 19 | *.tfstate.* 20 | *.tfvars 21 | tf_ansible_* 22 | 23 | # packer 24 | *.pkrvars.hcl 25 | **/builds/* 26 | 27 | # vagrant 28 | **/.vagrant/* 29 | 30 | # nomad 31 | input.vars 32 | 33 | # docs 34 | **/site/* 35 | **/book/* 36 | **/public/* 37 | -------------------------------------------------------------------------------- /.gitleaks.toml: -------------------------------------------------------------------------------- 1 | [extend] 2 | useDefault = true 3 | 4 | [allowlist] 5 | # testing CA for Vault 6 | paths = [ 7 | '''ansible/molecule/vault/testca''', 8 | '''ansible/molecule/vault/testca\/\.*''', 9 | ] 10 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | default_stages: [push, commit] 2 | 3 | repos: 4 | - repo: https://github.com/pre-commit/pre-commit-hooks 5 | rev: v4.5.0 6 | hooks: 7 | - id: trailing-whitespace 8 | - id: end-of-file-fixer 9 | - id: check-merge-conflict 10 | - id: check-added-large-files 11 | 12 | - repo: https://github.com/pre-commit/pre-commit-hooks 13 | rev: v4.5.0 14 | hooks: 15 | - id: detect-private-key 16 | exclude: ansible/molecule/vault/testca 17 | 18 | - repo: https://github.com/zricethezav/gitleaks 19 | rev: v8.18.0 20 | hooks: 21 | - id: gitleaks-docker 22 | 23 | - repo: https://github.com/antonbabenko/pre-commit-terraform 24 | rev: v1.83.5 25 | hooks: 26 | - id: terraform_fmt 27 | args: 28 | - --args=-diff 29 | - --args=-recursive 30 | - id: terraform_validate 31 | 32 | - repo: local 33 | hooks: 34 | - id: packer_fmt 35 | name: Packer fmt 36 | description: Rewrites all Packer files in canonical format 37 | language: script 38 | entry: ./bin/packer-fmt 39 | files: (\.pkr\.(hcl|json)|\.pkrvars\.hcl)$ 40 | require_serial: true 41 | 42 | - repo: https://github.com/gruntwork-io/pre-commit 43 | rev: v0.1.22 44 | hooks: 45 | - id: packer-validate 46 | 47 | # - repo: https://github.com/ansible-community/ansible-lint 48 | # rev: v6.22.0 49 | # hooks: 50 | # - id: ansible-lint 51 | # entry: ansible-lint -c ansible/.ansible-lint ansible 52 | # pass_filenames: false 53 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2023 kencx 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: venv pre-commit docs 2 | 3 | venv: 4 | @python3 -m venv .venv 5 | 6 | pip.install: requirements.txt venv 7 | source .venv/bin/activate && pip install -r $< --disable-pip-version-check -q 8 | 9 | pre-commit.install: pip.install 10 | source .venv/bin/activate && pre-commit install 11 | 12 | pre-commit: 13 | pre-commit run --all-files 14 | 15 | galaxy.install: requirements.yml 16 | ansible-galaxy install -f -r $< 17 | 18 | # packer 19 | packer.validate: 20 | cd packer/base-clone && packer validate -var-file="auto.pkrvars.hcl" . 21 | 22 | packer.base: 23 | cd packer/base-clone && packer build -var-file="auto.pkrvars.hcl" . 24 | 25 | # molecule 26 | mol = create converge verify destroy test login prepare 27 | mol.$(mol): 28 | cd ansible && molecule $@ -s $(scen) 29 | 30 | mol.list: 31 | cd ansible && molecule list 32 | 33 | docs: 34 | mdbook serve docs 35 | 36 | vars.generate: bin/generate-vars 37 | source .venv/bin/activate && bin/generate-vars 38 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Hubble Homelab 2 | 3 | **[Documentation](https://kencx.github.io/homelab)** 4 | 5 | This repository contains infrastructure-as-code for the automated deployment and 6 | configuration, and management of a Hashicorp (Nomad + Consul + Vault) cluster on 7 | Proxmox. 8 | 9 | ## Disclaimer 10 | 11 | This project is in alpha status and subject to 12 | [bugs](https://kencx.github.io/homelab/references/issues) and breaking changes. 13 | 14 | Please do not run any code on your machine without understanding the 15 | provisioning flow, in case of data loss. Some playbooks may perform destructive 16 | actions that are irreversible! 17 | 18 | ## Overview 19 | 20 | This project aims to provision a full Hashicorp cluster in a **semi-automated** 21 | manner. It utilizes Packer, Ansible and Terraform: 22 | 23 | 1. Packer creates base Proxmox VM templates from cloud images and ISOs 24 | 2. Terraform provisions cluster nodes by cloning existing VM templates 25 | 3. Ansible installs and configures Vault, Consul, Nomad on cluster nodes 26 | 27 | It comprises minimally of one server and one client node with no high 28 | availability (HA). The nodes run Vault, Consul and Nomad as a cluster. 29 | 30 | To support HA, the setup can be further expanded to at least three server nodes 31 | and multiple client nodes hosted on a Proxmox cluster, spanning multiple 32 | physical machines. 33 | 34 | ## Features 35 | 36 | - [x] Golden image creation with Packer 37 | - [x] Declarative configuration of Proxmox VMs and Vault with Terraform 38 | - [x] Automated post-provisioning with Ansible 39 | - [x] Nomad container scheduling and orchestration 40 | - [x] Consul service discovery 41 | - [x] Secure node communication via mTLS 42 | - [x] Personal Certificate Authority hosted on Vault 43 | - [x] Secrets management, retrieval and rotation with Vault 44 | - [x] Automated certificate management with Vault and consul-template 45 | - [x] Let's Encrypt certificates on Traefik reverse proxy 46 | 47 | ## Getting Started 48 | 49 | See the [documentation](https://kencx.github.io/homelab/getting_started) for more 50 | information on the concrete steps to configure and provision the cluster. 51 | 52 | ## Folder Structure 53 | 54 | ```bash 55 | . 56 | ├── ansible/ 57 | │ ├── roles 58 | │ ├── playbooks 59 | │ ├── inventory # inventory files 60 | │ └── goss # goss config 61 | ├── bin # custom scripts 62 | ├── packer/ 63 | │ ├── base # VM template from ISO 64 | │ └── base-clone # VM template from existing template 65 | └── terraform/ 66 | ├── cluster # config for cluster 67 | ├── dev # config where I test changes 68 | ├── minio # config for Minio buckets 69 | ├── modules # tf modules 70 | ├── nomad # nomad jobs 71 | ├── postgres # config for Postgres DB users 72 | ├── proxmox # config for Proxmox accounts 73 | └── vault # config for Vault 74 | ``` 75 | 76 | ## Limitations 77 | 78 | - Manual Vault unseal on reboot 79 | - Inter-job dependencies are [not supported](https://github.com/hashicorp/nomad/issues/545) in Nomad 80 | - Vault agent is run as root 81 | 82 | See [issues]() for more information. 83 | 84 | ## Acknowledgements 85 | 86 | - [CGamesPlay/infra](https://github.com/CGamesPlay/infra) 87 | - [assareh/homelab](https://github.com/assareh/home-lab) 88 | - [RealOrangeOne/infrastructure](https://github.com/RealOrangeOne/infrastructure) 89 | -------------------------------------------------------------------------------- /ansible/.ansible-lint: -------------------------------------------------------------------------------- 1 | # exclude_paths included in this file are parsed relative to this file's location 2 | # and not relative to the CWD of execution. CLI arguments passed to the --exclude 3 | # option will be parsed relative to the CWD of execution. 4 | exclude_paths: 5 | - .cache/ 6 | - .github/ 7 | - inventory/ 8 | - ${HOME}/.ansible/roles/ 9 | 10 | skip_list: 11 | - fqcn-builtins 12 | - no-handler 13 | 14 | warn_list: 15 | - parser-error 16 | - command-instead-of-module 17 | - git-latest 18 | - yaml 19 | - no-relative-paths 20 | - risky-file-permissions 21 | 22 | mock_roles: 23 | - kencx.ansible.ansible 24 | - kencx.ansible.nfs 25 | 26 | kinds: 27 | - vars: "**/vars.yml" 28 | - vars: "**/group_vars/*.yml" 29 | - vars: "**/defaults/*.yml" 30 | - tasks: "**/tasks/*.yml" 31 | - playbook: "**/*.{yml,yaml}" 32 | -------------------------------------------------------------------------------- /ansible/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | force_color=True 3 | stdout_callback=yaml 4 | 5 | interpreter_python=python3 6 | ansible_managed=This file is managed by Ansible.%n 7 | date: %Y-%m-%d %H:%M:%S 8 | host_key_checking=False 9 | 10 | inventory=../terraform/cluster/tf_ansible_inventory,inventory/hosts 11 | roles_path=./roles:~/.ansible/roles 12 | -------------------------------------------------------------------------------- /ansible/goss/base.yml: -------------------------------------------------------------------------------- 1 | user: 2 | debian: 3 | exists: true 4 | groups: 5 | - debian 6 | - sudo 7 | - docker 8 | home: /home/debian 9 | 10 | file: 11 | /mnt/storage: 12 | exists: true 13 | mode: "0755" 14 | owner: debian 15 | group: debian 16 | filetype: directory 17 | 18 | package: 19 | nomad: 20 | installed: true 21 | consul: 22 | installed: true 23 | vault: 24 | installed: true 25 | consul-template: 26 | installed: true 27 | nfs-common: 28 | installed: true 29 | jq: 30 | installed: true 31 | unzip: 32 | installed: true 33 | 34 | gossfile: 35 | docker.yml: {} 36 | security.yml: {} 37 | -------------------------------------------------------------------------------- /ansible/goss/client.yml: -------------------------------------------------------------------------------- 1 | port: 2 | # nomad ui 3 | tcp6:4646: 4 | listening: true 5 | # consul https ui 6 | tcp6:8501: 7 | listening: true 8 | # consul dns 9 | tcp6:8600: 10 | listening: true 11 | 12 | service: 13 | nomad: 14 | enabled: true 15 | running: true 16 | consul: 17 | enabled: true 18 | running: true 19 | consul-template: 20 | enabled: true 21 | running: true 22 | 23 | gossfile: 24 | base.yml: {} 25 | -------------------------------------------------------------------------------- /ansible/goss/docker.yml: -------------------------------------------------------------------------------- 1 | service: 2 | docker: 3 | enabled: true 4 | running: true 5 | 6 | package: 7 | docker-ce: 8 | installed: true 9 | docker-ce-cli: 10 | installed: true 11 | docker-compose-plugin: 12 | installed: true 13 | -------------------------------------------------------------------------------- /ansible/goss/security.yml: -------------------------------------------------------------------------------- 1 | port: 2 | tcp:22: 3 | listening: true 4 | ip: 5 | - 0.0.0.0 6 | 7 | service: 8 | sshd: 9 | enabled: true 10 | running: true 11 | fail2ban: 12 | enabled: true 13 | running: true 14 | 15 | file: 16 | /etc/ssh/sshd_config: 17 | exists: true 18 | mode: "0600" 19 | owner: root 20 | group: root 21 | contains: 22 | - "PermitRootLogin no" 23 | - "PasswordAuthentication no" 24 | - "PermitEmptyPasswords no" 25 | -------------------------------------------------------------------------------- /ansible/goss/server.yml: -------------------------------------------------------------------------------- 1 | port: 2 | # nomad ui 3 | tcp6:4646: 4 | listening: true 5 | # consul https ui 6 | tcp6:8501: 7 | listening: true 8 | # consul dns 9 | tcp6:8600: 10 | listening: true 11 | # vault 12 | tcp:8200: 13 | listening: true 14 | 15 | service: 16 | nomad: 17 | enabled: true 18 | running: true 19 | consul: 20 | enabled: true 21 | running: true 22 | vault: 23 | enabled: true 24 | running: true 25 | vault-agent: 26 | enabled: true 27 | running: true 28 | consul-template: 29 | enabled: true 30 | running: true 31 | 32 | gossfile: 33 | base.yml: {} 34 | -------------------------------------------------------------------------------- /ansible/inventory/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | common_dotfiles: 3 | - url: https://raw.githubusercontent.com/kencx/dotfiles/master/remote/vim/.vim/vimrc 4 | dest: "/home/{{ common_user }}/.vimrc" 5 | - url: https://raw.githubusercontent.com/kencx/dotfiles/master/dots/bash/.bash_aliases 6 | dest: "/home/{{ common_user }}/.bash_aliases" 7 | -------------------------------------------------------------------------------- /ansible/inventory/group_vars/client.yml: -------------------------------------------------------------------------------- 1 | --- 2 | consul_vault_addr: "https://{{ server_ip }}:8200" 3 | consul_common_name: "client.dc1.consul" 4 | consul_ip_sans: "127.0.0.1, {{ client_ip }}" 5 | nomad_vault_addr: "https://{{ server_ip }}:8200" 6 | nomad_common_name: "client.global.nomad" 7 | nomad_ip_sans: "127.0.0.1, {{ client_ip }}" 8 | 9 | nfs_share_mounts: 10 | - src: "10.10.10.102:/home/debian/apps/prod-apps" 11 | path: "/mnt/storage" 12 | opts: "defaults,bg,intr,_netdev,retry=3" 13 | state: mounted 14 | - src: "10.10.10.102:/home/debian/syncthing/sync/books" 15 | path: "/mnt/books" 16 | opts: "defaults,bg,intr,_netdev,retry=3" 17 | state: mounted 18 | - src: "10.10.10.102:/home/debian/syncthing/sync/paper" 19 | path: "/mnt/paper" 20 | opts: "defaults,bg,intr,_netdev,retry=3" 21 | state: mounted 22 | -------------------------------------------------------------------------------- /ansible/inventory/group_vars/dev.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # server_ip: "{{ hostvars[groups['dev'] | difference(groups['client']) | first]['ansible_default_ipv4']['address'] }}" 3 | server_ip: "10.10.10.130" 4 | client_ip: "10.10.10.131" 5 | ansible_user: "debian" 6 | 7 | # ssl 8 | ssl_remote_tls_dir: "/opt/vault/tls" 9 | ssl_remote_ca_trust_store_dir: "/usr/share/ca-certificates/vault" 10 | ssl_ca_chain_name: "ca" 11 | ssl_remote_user: vault 12 | ssl_remote_cert_name: "vault" 13 | ssl_remote_cert_common_name: "vault" 14 | ssl_remote_cert_san: 15 | - "DNS:vault.service.consul" 16 | 17 | # vault 18 | vault_config_dir: "/etc/vault.d" 19 | vault_data_dir: "/opt/vault/data" 20 | vault_tls_dir: "/opt/vault/tls" 21 | vault_ca_cert_dir: "{{ ssl_remote_ca_trust_store_dir }}" 22 | vault_store_bw: false 23 | vault_server_fqdn: "{{ server_ip }}" 24 | vault_terraform_workspace: "dev" 25 | vault_admin_password: "password" 26 | vault_kvuser_password: "password" 27 | 28 | # consul-template 29 | consul_template_config_dir: "/etc/consul-template" 30 | vault_agent_token_file_path: "{{ vault_data_dir }}/.vault-token" 31 | 32 | # consul 33 | consul_config_dir: "/etc/consul.d" 34 | consul_data_dir: "/opt/consul" 35 | consul_tls_dir: "{{ consul_data_dir }}/tls" 36 | consul_server_ip: "{{ server_ip }}" 37 | setup_consul_watches: false 38 | 39 | # nomad 40 | nomad_config_dir: "/etc/nomad.d" 41 | nomad_data_dir: "/opt/nomad/data" 42 | nomad_tls_dir: "/opt/nomad/data/tls" 43 | nomad_server_ip: "{{ server_ip }}" 44 | -------------------------------------------------------------------------------- /ansible/inventory/group_vars/prod.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # server_ip: "{{ hostvars[groups['prod'] | difference(groups['client']) | first]['ansible_default_ipv4']['address'] }}" 3 | server_ip: "10.10.10.110" 4 | client_ip: "10.10.10.111" 5 | ansible_user: "debian" 6 | 7 | # ssl 8 | ssl_remote_tls_dir: "/opt/vault/tls" 9 | ssl_remote_ca_trust_store_dir: "/usr/share/ca-certificates/vault" 10 | ssl_ca_chain_name: "ca" 11 | ssl_remote_user: vault 12 | ssl_remote_cert_name: "vault" 13 | ssl_remote_cert_common_name: "vault" 14 | ssl_remote_cert_san: 15 | - "DNS:vault.service.consul" 16 | 17 | # vault 18 | vault_config_dir: "/etc/vault.d" 19 | vault_data_dir: "/opt/vault/data" 20 | vault_tls_dir: "/opt/vault/tls" 21 | vault_ca_cert_dir: "{{ ssl_remote_ca_trust_store_dir }}" 22 | vault_store_bw: false 23 | vault_server_fqdn: "{{ server_ip }}" 24 | vault_terraform_workspace: "default" 25 | vault_admin_password: "password" 26 | vault_kvuser_password: "password" 27 | 28 | # consul-template 29 | consul_template_config_dir: "/etc/consul-template" 30 | vault_agent_token_file_path: "{{ vault_data_dir }}/.vault-token" 31 | 32 | # consul 33 | consul_config_dir: "/etc/consul.d" 34 | consul_data_dir: "/opt/consul" 35 | consul_tls_dir: "{{ consul_data_dir }}/tls" 36 | consul_server_ip: "{{ server_ip }}" 37 | consul_ttl: "168h" 38 | consul_upstream_dns_address: ["192.168.86.49", "1.1.1.1"] 39 | setup_consul_watches: false 40 | 41 | # nomad 42 | nomad_config_dir: "/etc/nomad.d" 43 | nomad_data_dir: "/opt/nomad/data" 44 | nomad_tls_dir: "/opt/nomad/data/tls" 45 | nomad_server_ip: "{{ server_ip }}" 46 | nomad_ttl: "168h" 47 | -------------------------------------------------------------------------------- /ansible/inventory/group_vars/server.yml: -------------------------------------------------------------------------------- 1 | --- 2 | consul_vault_addr: "https://{{ server_ip }}:8200" 3 | consul_common_name: "server.dc1.consul" 4 | consul_ip_sans: "127.0.0.1, {{ server_ip }}" 5 | nomad_vault_addr: "https://{{ server_ip }}:8200" 6 | nomad_common_name: "server.global.nomad" 7 | nomad_ip_sans: "127.0.0.1, {{ server_ip }}" 8 | -------------------------------------------------------------------------------- /ansible/inventory/host_vars/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kencx/homelab/1c451e1634f818e9d912bb054db47988cb083989/ansible/inventory/host_vars/.gitkeep -------------------------------------------------------------------------------- /ansible/inventory/hosts: -------------------------------------------------------------------------------- 1 | [cluster:children] 2 | server 3 | client 4 | prod 5 | dev 6 | -------------------------------------------------------------------------------- /ansible/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: server, !dev 3 | become: true 4 | tags: server 5 | pre_tasks: 6 | - name: Wait for cloud-init 7 | command: cloud-init status --wait 8 | changed_when: false 9 | 10 | roles: 11 | - name: Create root, intermediate CA for Vault 12 | role: kencx.ansible_collection.ssl 13 | become: true 14 | run_once: true 15 | 16 | - role: vault 17 | - role: consul-template 18 | - role: consul 19 | - role: nomad 20 | 21 | tasks: 22 | - name: Goss smoke test 23 | import_role: 24 | name: kencx.ansible_collection.goss 25 | vars: 26 | goss_dir: "./goss" 27 | goss_file: "server.yml" 28 | 29 | - hosts: client, !dev 30 | become: true 31 | tags: client 32 | pre_tasks: 33 | - name: Wait for cloud-init 34 | command: cloud-init status --wait 35 | changed_when: false 36 | 37 | roles: 38 | - name: Mount NFS shares on client 39 | role: kencx.ansible_collection.nfs 40 | tags: nfs 41 | 42 | - role: vault 43 | vars: 44 | vault_server: false 45 | - role: consul-template 46 | - role: consul 47 | vars: 48 | consul_server: false 49 | consul_client: true 50 | - role: nomad 51 | vars: 52 | nomad_server: false 53 | nomad_client: true 54 | 55 | tasks: 56 | - name: Allow HTTP, HTTPS ports on ufw 57 | become: true 58 | ufw: 59 | rule: "allow" 60 | port: "{{ item }}" 61 | with_items: 62 | - 80 63 | - 443 64 | 65 | - name: Goss smoke test 66 | import_role: 67 | name: kencx.ansible_collection.goss 68 | vars: 69 | goss_dir: "./goss" 70 | goss_file: "client.yml" 71 | -------------------------------------------------------------------------------- /ansible/molecule/common/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | become: true 5 | roles: 6 | - role: common 7 | -------------------------------------------------------------------------------- /ansible/molecule/common/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependency: 3 | name: galaxy 4 | driver: 5 | name: vagrant 6 | provider: 7 | name: libvirt 8 | default_box: "debian/bullseye64" 9 | platforms: 10 | - name: instance 11 | box: "debian/bullseye64" 12 | config_options: 13 | synced_folder: false 14 | 15 | provisioner: 16 | name: ansible 17 | env: 18 | ANSIBLE_ROLES_PATH: "../../roles" 19 | ANSIBLE_STDOUT_CALLBACK: yaml 20 | verifier: 21 | name: ansible 22 | -------------------------------------------------------------------------------- /ansible/molecule/common/verify.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Verify 3 | hosts: all 4 | gather_facts: false 5 | become: true 6 | tasks: 7 | - name: Include default vars 8 | include_vars: 9 | dir: "{{ role_directory }}/defaults/" 10 | extensions: 11 | - 'yml' 12 | - 'yaml' 13 | vars: 14 | role_directory: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') }}/roles/{{ lookup('env', 'MOLECULE_SCENARIO_NAME') }}" 15 | 16 | - name: User created 17 | getent: 18 | database: passwd 19 | key: "{{ common_user }}" 20 | 21 | - name: Packages installed 22 | apt: 23 | name: "{{ item }}" 24 | state: present 25 | with_items: "{{ common_packages }}" 26 | 27 | - name: Hashicorp tools installed 28 | apt: 29 | name: "{{ item }}" 30 | state: present 31 | with_items: 32 | - nomad 33 | - consul 34 | - vault 35 | - consul-template 36 | 37 | - name: Nomad stopped 38 | systemd: 39 | name: nomad 40 | state: stopped 41 | 42 | - name: Bitwarden installed 43 | stat: 44 | path: "/usr/bin/bw" 45 | register: bw 46 | changed_when: false 47 | failed_when: bw.stat.exists != true 48 | 49 | - name: NFS share directories created 50 | file: 51 | path: "{{ common_nfs_dir }}" 52 | mode: 0755 53 | owner: "{{ common_user }}" 54 | group: "{{ common_user }}" 55 | state: directory 56 | -------------------------------------------------------------------------------- /ansible/molecule/vault/converge.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Converge 3 | hosts: all 4 | become: true 5 | roles: 6 | - role: vault 7 | -------------------------------------------------------------------------------- /ansible/molecule/vault/molecule.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependency: 3 | name: galaxy 4 | driver: 5 | name: vagrant 6 | provider: 7 | name: libvirt 8 | default_box: "debian/bullseye64" 9 | platforms: 10 | - name: server 11 | box: "debian/bullseye64" 12 | config_options: 13 | synced_folder: false 14 | groups: 15 | - mol_server 16 | 17 | provisioner: 18 | name: ansible 19 | env: 20 | ANSIBLE_ROLES_PATH: "../../roles" 21 | ANSIBLE_STDOUT_CALLBACK: yaml 22 | verifier: 23 | name: ansible 24 | -------------------------------------------------------------------------------- /ansible/molecule/vault/testca/intermediate/int.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIFHTCCAwWgAwIBAgIUI+/NWraxsKNhHDY9i/A3LvKj3l0wDQYJKoZIhvcNAQEL 3 | BQAwEjEQMA4GA1UEAwwHUm9vdCBDQTAeFw0yMzAyMDMwNzUyNDVaFw0zMzAxMzEw 4 | NzUyNDVaMBoxGDAWBgNVBAMMD0ludGVybWVkaWF0ZSBDQTCCAiIwDQYJKoZIhvcN 5 | AQEBBQADggIPADCCAgoCggIBAL1Ij4V1NmXVZ3/P17DQq2HAxn+GllSVC8g+73UC 6 | 5+jmmnKWK+s1aYXzBthNMBE30ESc4Oy0kWXDm5cKgGbjDcmRuWHPGeb68zX9F3ln 7 | c08zIKR0HV0YYbfFt0kXwVrHqOBNDyspy1WYq9n7nndAQywL0OTXGQ108t/cVWxk 8 | hKoxKmYNxu7CHK4upj3wboVXFIqZwjLjud5iQzT3iJjPpReAWMt1YNUt9KuLf9Zp 9 | f2h0G2R2yzff2msMZrcYarzVBkkwWj+gcO/SKHLa8CxUswAmzt3jK69+0o7cAjeJ 10 | qSr50RJSuf7URn8CsFL4hdXe/o5A64iDyh8BAkNf2JvWo2uILxBwCJVzToE4H++V 11 | JePf0nmnGlYU9vP+Nmdv8X5/qht5UoATDFBfKjZqCEZ+Rm2DUwb0TnD0ijlR0VGA 12 | FA3G2BTdSjLKxE7hiAq/MEjdrwI9tNomTfpeXjAmJhGu1opDhb0l0IubANXC50s6 13 | TBYtG6zcjcsb0rQy7TZwsBlovAUf4j6g7YzrYGF0V/yXo9DpUbyax1/wxY+YuavA 14 | hmso1uHBR4dwjWW2CxGLx3qE9O9flpla2OKyTNA+zS6KOhVhBSyIAFUESN4fsAeZ 15 | lcT4l+JedPB1EtWbgL8RheEtQwPZaqffD53ZJ/PcJItwBQl8Gyn+iWtL5DSeX33T 16 | zIjRAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwICBDAPBgNVHRMBAf8EBTADAQH/MB0G 17 | A1UdDgQWBBTtoEzFCo5JJ8wntFhr8CMNvjWPPDAfBgNVHSMEGDAWgBTXMwjLQfgS 18 | MHxHfHpv08GOJXXUUjANBgkqhkiG9w0BAQsFAAOCAgEArFnRjvitJeXiWL70GZho 19 | YQeOBTnqNhvkoukrgzxO795cQSL8KN3AScwa6AjwaBwPoB5s8Kyd/FG6EIjNQFGc 20 | v8YodxRcP00i71racvWguss+fvLx12f75wBdof7pXmMhT1KeoCD1n4lTogiocb6N 21 | NmNkMtBKudiVwM89wAhz//3/O+predXze2Y0D1wFfVD+abcqpDkZQrsXF+JvAXOB 22 | 6D0clfulJHR4CfnqqYnbHyWKYVUh62n7QBTopEUJKskpF0pHJeKZMK4FazxXBYOt 23 | Gxy5dKOhvsTOIJMhb2/tZTqP8tcMeez7r3pWi+GAY9r8l/OXAzBMIqEOkwymFa+F 24 | T+WpG8ce68XckNOL4FHtEMhxwd+4sj1siJe7l7Wu/egijovs959tKRkUT9U+o+30 25 | aAk5D2RyC7vd1HJcmvCzOry4g9CEijz38r4dnOw2CsLe+6MsB0RmUSEZxWBbRLNG 26 | oz3sqyXWvXPlmKVCQjKq4Yl0Nqxps3dSiAaUbqsy/9ZrXa8i/KSIif40j87qfHgY 27 | 6Tx9FS6yoKbr2ZLfCKKQbd3y7rH8hW80kL5XstgXgzrt8BiXVF4x5Tve3hKcZxal 28 | 00Nqaz6B9BsCzjTy+HOd0BJNSmBNqhTMnhxqHGp/QrFLjqiG7NS/GO7+GBocKCiV 29 | xa7VC2KjCXPEz3L1hDRdWk4= 30 | -----END CERTIFICATE----- 31 | -------------------------------------------------------------------------------- /ansible/molecule/vault/testca/intermediate/int.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIIEkTCCAnkCAQAwGjEYMBYGA1UEAwwPSW50ZXJtZWRpYXRlIENBMIICIjANBgkq 3 | hkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAvUiPhXU2ZdVnf8/XsNCrYcDGf4aWVJUL 4 | yD7vdQLn6OaacpYr6zVphfMG2E0wETfQRJzg7LSRZcOblwqAZuMNyZG5Yc8Z5vrz 5 | Nf0XeWdzTzMgpHQdXRhht8W3SRfBWseo4E0PKynLVZir2fued0BDLAvQ5NcZDXTy 6 | 39xVbGSEqjEqZg3G7sIcri6mPfBuhVcUipnCMuO53mJDNPeImM+lF4BYy3Vg1S30 7 | q4t/1ml/aHQbZHbLN9/aawxmtxhqvNUGSTBaP6Bw79IoctrwLFSzACbO3eMrr37S 8 | jtwCN4mpKvnRElK5/tRGfwKwUviF1d7+jkDriIPKHwECQ1/Ym9aja4gvEHAIlXNO 9 | gTgf75Ul49/SeacaVhT28/42Z2/xfn+qG3lSgBMMUF8qNmoIRn5GbYNTBvROcPSK 10 | OVHRUYAUDcbYFN1KMsrETuGICr8wSN2vAj202iZN+l5eMCYmEa7WikOFvSXQi5sA 11 | 1cLnSzpMFi0brNyNyxvStDLtNnCwGWi8BR/iPqDtjOtgYXRX/Jej0OlRvJrHX/DF 12 | j5i5q8CGayjW4cFHh3CNZbYLEYvHeoT071+WmVrY4rJM0D7NLoo6FWEFLIgAVQRI 13 | 3h+wB5mVxPiX4l508HUS1ZuAvxGF4S1DA9lqp98Pndkn89wki3AFCXwbKf6Ja0vk 14 | NJ5ffdPMiNECAwEAAaAyMDAGCSqGSIb3DQEJDjEjMCEwDgYDVR0PAQH/BAQDAgIE 15 | MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggIBADeS2nAdXGDNUDWd 16 | 9aMB1CENsNfE1xFy4jLsnOXLpaWzCNBG/DgZmsT3n4aIUKkP9Rnz3ciJbems7c24 17 | Nes1f6st1OD66FeUQHpGaF4T/2cM039I46wevh85lsL+aOSYlhR8dOzS+99AkGXx 18 | PAbIzNykl++ivJd38CpCUPUYNDlPALx/EHpN1oq0bNaXHyYd77OnLM3f1No9agUA 19 | PIv86rpRsE/HDxlP5NQA5iDSVsqCET58iX9ns6NYNlTtrfHzwABaKtn+MBXXyoDi 20 | y4HS0vOEH4Iv6opKpF1FZH5OcxDqxCGpn/kow/oHMxcmtU/Nt0JDAX0cs8NQlNvZ 21 | cfTmsx4IdfJ2GruxJQgsxQAIZfeYNNSUJAd4FPpZK/gTLhg61BrU+qWvMJlO9/tN 22 | yQW970VKUsn0/fZiatJy7g/BH5vRltl6c1R3zbtxNK61JoPm71G8m4D3APP7Qbm2 23 | JF/geqAqw9zvBudNjF/8XbHpcGXvwcXVCzKprym4J6Xlc7IYLm01KPnrio6b4NX2 24 | XUweG19L/jIrH/KL8LUSBaQ+XiTX8jHiWEX6fpKSah6mo7HdPCgDtCZT42xMu8EN 25 | HF1lUtAdmP7lXWllvD9m5e6EJaxo3JUq2zEdbxzkPsIWz34Tf16sN+m8+20Kn4vl 26 | 4eoIfrRWPOBqtHVA81elA/2bB2sK 27 | -----END CERTIFICATE REQUEST----- 28 | -------------------------------------------------------------------------------- /ansible/molecule/vault/testca/intermediate/int_key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIJKQIBAAKCAgEAvUiPhXU2ZdVnf8/XsNCrYcDGf4aWVJULyD7vdQLn6OaacpYr 3 | 6zVphfMG2E0wETfQRJzg7LSRZcOblwqAZuMNyZG5Yc8Z5vrzNf0XeWdzTzMgpHQd 4 | XRhht8W3SRfBWseo4E0PKynLVZir2fued0BDLAvQ5NcZDXTy39xVbGSEqjEqZg3G 5 | 7sIcri6mPfBuhVcUipnCMuO53mJDNPeImM+lF4BYy3Vg1S30q4t/1ml/aHQbZHbL 6 | N9/aawxmtxhqvNUGSTBaP6Bw79IoctrwLFSzACbO3eMrr37SjtwCN4mpKvnRElK5 7 | /tRGfwKwUviF1d7+jkDriIPKHwECQ1/Ym9aja4gvEHAIlXNOgTgf75Ul49/Seaca 8 | VhT28/42Z2/xfn+qG3lSgBMMUF8qNmoIRn5GbYNTBvROcPSKOVHRUYAUDcbYFN1K 9 | MsrETuGICr8wSN2vAj202iZN+l5eMCYmEa7WikOFvSXQi5sA1cLnSzpMFi0brNyN 10 | yxvStDLtNnCwGWi8BR/iPqDtjOtgYXRX/Jej0OlRvJrHX/DFj5i5q8CGayjW4cFH 11 | h3CNZbYLEYvHeoT071+WmVrY4rJM0D7NLoo6FWEFLIgAVQRI3h+wB5mVxPiX4l50 12 | 8HUS1ZuAvxGF4S1DA9lqp98Pndkn89wki3AFCXwbKf6Ja0vkNJ5ffdPMiNECAwEA 13 | AQKCAgAGnne0W0eycm6627YHbxibI75dPcPag9bEYx0FCru4232JpcI16Kl+v2Od 14 | Iz/bzkeYaqL77AmMlTVG6B1a27RC+PdSWgdmGh7wo5/TRUCVQ6yvy4auV3itUDrr 15 | UasXhVVcuJ0f7Yd4gAz8Y63kLilbLDObGQF9hLoshVwwXOh9uhfpNzCeb0fI+IUk 16 | M9jtjUDA4wUVOfzpL6b5fnF2pIY5pumT/dALzF5atFddVFWZ/btqfTP4iXmuFiIu 17 | Y2pyHYHpg8kYIgBpBoCXGZxd9H1I81J3FGdV7PmoeL2UGxqBD/jVavKpnFs1vE6V 18 | rC6j7rcYmuERpkgmYublG8ArnOsce1iP9TRBnfJReM15ujacLMPTDQcbpmjskN6V 19 | gB41HMnfInzNzmJaKUJPdjnMpASkTSOcGq7mmqsTB4toJMKM2DleJ/uGF+sM9hU2 20 | Bp7662+PFNk1blH8mIgXpD8Q0hDZQ9vzaFrW5Gj4BEwWQEB+ObT+1tsinTXlraaP 21 | 55UgV1fO1HMyKbSrO8LveBk9DalqYpfbqbuwBoIQ4d1shX2FlESp1pzja/8PfqDE 22 | aerWlr+H/EYDu9m9PuunMZhqQ1ThQKJccUFfSyCcKlMrf+zK7IOAmlgPQQPBaB/s 23 | jc+2H3GIjdBxJ9X2srYL34LycZ5Ed6WdFHSH4ouQIuOR+1h2hQKCAQEAzOzMSyRg 24 | 5UaE1bKDrc5ywX5pz/80iN2YFKbHFkuLN3awAKrznxO1pTOVzSdN0k2kUZ32zf4N 25 | lbYv8CItPkY5mzePZsJh2Oj5zGhKlYK8li0U7d4QaOP7RrXNLF9gUxDxzmyhgE90 26 | LZeV0iUz2pk+YdiTiS0wThATAN619wqjUviB2TKVnsj8mYKtdVykY0H9eQVyfX5A 27 | M329hGcMTtC6bmKzexswSSLajLzLz10VZrqGsMdfMC89Lxh9PD/4Zh/S8qizO4Tl 28 | JvgWc94JzH6xAQAsnaiiEnVx7VKaJlnGnQx+2whTSEKRXbBBivuc/xvhI/Tjo10Q 29 | plPaWLvLeNbA9QKCAQEA7HXBlkaqUznJmd3u21LRj9IdGARLoVCSa2bBwV797CE4 30 | cBaF0D/sSV4uY++XexjksrVmITsaq7B25SPhWuNZJw0j0lT7XwhHd685Pex9PIjv 31 | P8blDG03pBsWnYpdV3PKXciRGR6KQp2eovjSRjsDj10Qr2WwzIe+4DSy5NLn40/v 32 | RB0zyPDSvHg4pFaBvfpwDFKgyY0t53BhvWHMqpc3DJX1qibc7wHSNm4FGBHBoMxF 33 | A9EnP2o3Gig4LEoJZ04b8ne4poJbc1F5LYATSzNM5wpj3NFguXbjSfGiz9Ssi2Rn 34 | EuHmo+m5qVK2eLk7OyEOcDaAqJKN2uGDlJBbtaSO7QKCAQB8DUkqn84MXWEiYBt4 35 | hsZPPfBeGna597MNpVxVscuH+tBWxyYNgYfvhsfDVmy0x42WXtK+JnIg1BY2/ul2 36 | jlpnDP5BTaCgB4DYbbXgxjDVlFrcGBbE+GXvizS2DNKp1lMbDAXEP9TrmJz4hs7a 37 | IPRzlQDTBMj+KJroZumusADoxEkOq6GJatAHOeU5a6iT8am4rkR9lYTV5uNoGF2K 38 | BK8nDZbrCi1zyKaWld+nGwlOk9HfiAGmkKIwNVnytCLx54fyn49Ik42IxpwI7jvu 39 | UC9klK9tjVz3YaNi8CMi3ivufFb6zxJjZvDKz10N/n0urbjf5Go9RaRDm/VMy/pD 40 | n3VVAoIBAQDTMWGT/M2Hw+dB3CzICzJMyBdgKgX6sGH2NzL6qQMV8Z3czuD5XP6H 41 | kOzSKV4cfeFVlBsBty5dBiFLCz7t1Hz+GghShcB14okiZogzvJMmjA9T4SsWbD3z 42 | /pW2Y6x2vV6TYN3nl5VZZjoXyTDVtUO6uh566dJ8+wkKa0cUhSLDrdM//O0RnbY6 43 | yw2cCf+bVKDbALnHCqdjPMavRThtbN2oCg/+xvMxLWk5YA6ArutH2OoTPK1XQGCy 44 | phV1yTKY5/SiHTf40Bkp6CzY7LlOwyT+fhMvWHqRa5QZqQ4HniX14dkFzmoXIFvI 45 | 1a1QKfCzIRUqOCO1bB/yzNSezkh69d0JAoIBAQDCPbdeZ75Xzstj4Cz9j32bdoU5 46 | MR0E3e4JtbAm/SS3lhFPWBnsp62WTd9dXlDp5IvrWmtSnbJpNqdzE21rRP3+3UNb 47 | 95xdblcQPdOiqi/oTBSOCWjAF7WWhAUDS5SVS3P/ETaCGZgCONK5Jks3vOdu2PmH 48 | mA7yHJbO1lUCtDmckw2d3XGYPHCRx4MgnCnn7Uyn+G+9TH4tmFHDd+hwkzHSa0qR 49 | rxkrhe9qjywpHWSvtkryKCN9GeIvt3G4XAJUwmBxanY8ngJ1KTA3agTq0eCy+tvN 50 | q8N0FNxudvk1clu/4f+foXelYB0NhrACssBTS2fHWQwMq1BZ9/CVCXJDnJQh 51 | -----END RSA PRIVATE KEY----- 52 | -------------------------------------------------------------------------------- /ansible/molecule/vault/testca/root/ca.crt: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE----- 2 | MIIE9DCCAtygAwIBAgIUbE0l4KV6+5+6kWxz8F0n7GjAPe4wDQYJKoZIhvcNAQEL 3 | BQAwEjEQMA4GA1UEAwwHUm9vdCBDQTAeFw0yMzAyMDMwNzQzMzRaFw0zMzAxMzEw 4 | NzQzMzRaMBIxEDAOBgNVBAMMB1Jvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4IC 5 | DwAwggIKAoICAQC8aQWeBS67qdPrzi5t06wMfXl00bcpac7XneygoNJ88G7VYfLh 6 | gzxu7zK1pzDB6Cna4VB8/3F+tK3IjfieUiOtwIc6wyf5jiZj5KnWoOki0xqluUKv 7 | q9osRMw2pHUDbFonVL9hQ1oN9OT870D9sCrql9PIGeisP8V/c1cJeTGEu7GT09Yg 8 | oxUdz/HQi5wBrF9xu8FSvZLOZt2C3VxkOGUcQI5+WE0WiTOaAdGbjZWoKTKDl144 9 | DCOMwtg+NQ+XpmOJhvOSXmSth8psKN+zHRirp/7Y8/EpVjY0pZBfUXQXcbv0renE 10 | x29RowJACwWO1WMe0l1ZTOhsWCH3lGyV3dnEZLHsnrctyD6+fYf+qBgyXvOLMVWC 11 | 4+HaNeznl/I4iN0PX13zMRMebqi7gFXpc4cKgrVtxOZ4d941wEwUeuV5cxtaenPG 12 | 7DeCtViDomC6JlLgtop820azR13ap40EysLqXz0gxbXmvYQmxJU5GnjItS2gul9e 13 | 1NRViBTEPLsqRgcgZGI6EsDGuKGEBzQJ/mM6c6/qGq1X1C94ItFnn43uoujs9+b0 14 | PGqkeI/T6NwVjzsVrZGfp0I54zshcXsAWItGCGJJmBIqV9FqgUvQRns3KvbbGbPT 15 | gTrw83NESvi8EXM1dZlubHFOPImwvrBM0wYz92GDsKyv5VGz6PNNn4VkVwIDAQAB 16 | o0IwQDAOBgNVHQ8BAf8EBAMCAgQwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU 17 | 1zMIy0H4EjB8R3x6b9PBjiV11FIwDQYJKoZIhvcNAQELBQADggIBAHxFlw8HkLhd 18 | DLzvuImJ9SUrFmvp97/oHjdM2sgPAmuoWDmRl2zRt9bqhIGGcOh1vhOOdts215d7 19 | 74zQGQK8nvQjn0HDrt87YRAyxbyglOQjD5VQBBvCvPx6nbkXue/6o3YM0naktEwJ 20 | lEHMuMQeCT+J46nzsFLXn2khCSUlnsLwPQnlgI0HJZPB2kLfKV0ePFSVsnOrXgj+ 21 | M9a970GgwuH6wbLBZjagWlFgEY5k/2zmOV9vnGeMET0bc4Sqxj8hA7yEwWbJKfhQ 22 | bP8J/g/ZBE3VgBECN03C1s1iplsM3bktJGrDMG2SMEjfDndJ+7lbcKX/PhnuKOsN 23 | 2UcTAQanibDYNSHef/i88NF0e8KL5PIXa7fGCuDoFIDu9lccxC1M2bHlh2vwW5mE 24 | rC9bIRsGomfPSkM4IMh9pN/wivw17tU/Kzb/XHmxwTFKwXrO5Y6hjOHAK1nbp7wm 25 | 8cEtENaG+Rd0MuLpp82n+2UCOgS7I64e5bLSXAMQawzg2z3twDHo4Pvh3JBJblnA 26 | S/ZbqGbjHkUsKyBqRPwuR2vwRyh3VMUXTwQ/GBYf7fpinezJI9kBAzuMMpe6Dr+T 27 | k7du5pxOPlDKQl/e+W5Y0p9dvi4LfiwhAEmtIFTr6WSsvsEin0MehOS2Tz3KWmK9 28 | 7ekgJJZ+n3NvPFPfq0Hiel2JzanOa3hR 29 | -----END CERTIFICATE----- 30 | -------------------------------------------------------------------------------- /ansible/molecule/vault/testca/root/ca.csr: -------------------------------------------------------------------------------- 1 | -----BEGIN CERTIFICATE REQUEST----- 2 | MIIEiTCCAnECAQAwEjEQMA4GA1UEAwwHUm9vdCBDQTCCAiIwDQYJKoZIhvcNAQEB 3 | BQADggIPADCCAgoCggIBALxpBZ4FLrup0+vOLm3TrAx9eXTRtylpzted7KCg0nzw 4 | btVh8uGDPG7vMrWnMMHoKdrhUHz/cX60rciN+J5SI63AhzrDJ/mOJmPkqdag6SLT 5 | GqW5Qq+r2ixEzDakdQNsWidUv2FDWg305PzvQP2wKuqX08gZ6Kw/xX9zVwl5MYS7 6 | sZPT1iCjFR3P8dCLnAGsX3G7wVK9ks5m3YLdXGQ4ZRxAjn5YTRaJM5oB0ZuNlagp 7 | MoOXXjgMI4zC2D41D5emY4mG85JeZK2Hymwo37MdGKun/tjz8SlWNjSlkF9RdBdx 8 | u/St6cTHb1GjAkALBY7VYx7SXVlM6GxYIfeUbJXd2cRkseyety3IPr59h/6oGDJe 9 | 84sxVYLj4do17OeX8jiI3Q9fXfMxEx5uqLuAVelzhwqCtW3E5nh33jXATBR65Xlz 10 | G1p6c8bsN4K1WIOiYLomUuC2inzbRrNHXdqnjQTKwupfPSDFtea9hCbElTkaeMi1 11 | LaC6X17U1FWIFMQ8uypGByBkYjoSwMa4oYQHNAn+Yzpzr+oarVfUL3gi0Wefje6i 12 | 6Oz35vQ8aqR4j9Po3BWPOxWtkZ+nQjnjOyFxewBYi0YIYkmYEipX0WqBS9BGezcq 13 | 9tsZs9OBOvDzc0RK+LwRczV1mW5scU48ibC+sEzTBjP3YYOwrK/lUbPo802fhWRX 14 | AgMBAAGgMjAwBgkqhkiG9w0BCQ4xIzAhMA4GA1UdDwEB/wQEAwICBDAPBgNVHRMB 15 | Af8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQA/qYFtCOalxTMHzc1TgzP7f5YP 16 | 6tjQW2kXe7IVUW+90rnExZcvbSfAF8diylVvAH5QXu0pFRXxoAGSBmbKbU4caQMm 17 | GPLzSHDjZWDqY6+PwhvKqhJxUjppv2kydeH6UXdJp2p0ghYyBzaguDFfvv1c/AAN 18 | 69pecmdBxe+8j2lKJbh2LXnK7zgdsS7EqX0UFwLhfmMmIzdE0ebXq9RpMs/KVJ3u 19 | toXRanMfK2et7NBq2YPjrnHwY+UCoBs83hYzqfd+RhQnc3Y8gxZTPy3yxaKlTLne 20 | KNLzMUF2rZSW5perozIHjcEQ0edm0s09zJ7Zx2oYZZCSRs1vJseLV8RKTZAwj0oa 21 | H8vuxQg2O8hk+0cM8CPGX1+0xyyraOH5ds/bwfWZ5IbASx9wGO2t4eLdjnI40g8C 22 | d3ZaK8mLaSvxb0sIN9OlK0RoNhObAdwodRwSMvxMIsBmeUzz078eMKzcnaKFkKrj 23 | zwZftQYud7nd5R5bU7DkPvMu3jJolnWx9Wq2BMwgH3v+T8ye2dWGJf7Jl2mjydkE 24 | 2EbpB8Z0s5C4+6k29R35Pwv1N39JrqF+GB0k7vEg23bq+xZ3dL5aq5/5EwUf1Poe 25 | EfrbRiTfiBb6Kdg71DwFTQidcS/arZmTfrbnahP1pMC5+zHsQbADYJfds9+DuOxe 26 | doE20ch0iGfdFdsNoA== 27 | -----END CERTIFICATE REQUEST----- 28 | -------------------------------------------------------------------------------- /ansible/molecule/vault/testca/root/ca_key.pem: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIJJwIBAAKCAgEAvGkFngUuu6nT684ubdOsDH15dNG3KWnO153soKDSfPBu1WHy 3 | 4YM8bu8ytacwwegp2uFQfP9xfrStyI34nlIjrcCHOsMn+Y4mY+Sp1qDpItMapblC 4 | r6vaLETMNqR1A2xaJ1S/YUNaDfTk/O9A/bAq6pfTyBnorD/Ff3NXCXkxhLuxk9PW 5 | IKMVHc/x0IucAaxfcbvBUr2Szmbdgt1cZDhlHECOflhNFokzmgHRm42VqCkyg5de 6 | OAwjjMLYPjUPl6ZjiYbzkl5krYfKbCjfsx0Yq6f+2PPxKVY2NKWQX1F0F3G79K3p 7 | xMdvUaMCQAsFjtVjHtJdWUzobFgh95Rsld3ZxGSx7J63Lcg+vn2H/qgYMl7zizFV 8 | guPh2jXs55fyOIjdD19d8zETHm6ou4BV6XOHCoK1bcTmeHfeNcBMFHrleXMbWnpz 9 | xuw3grVYg6JguiZS4LaKfNtGs0dd2qeNBMrC6l89IMW15r2EJsSVORp4yLUtoLpf 10 | XtTUVYgUxDy7KkYHIGRiOhLAxrihhAc0Cf5jOnOv6hqtV9QveCLRZ5+N7qLo7Pfm 11 | 9DxqpHiP0+jcFY87Fa2Rn6dCOeM7IXF7AFiLRghiSZgSKlfRaoFL0EZ7Nyr22xmz 12 | 04E68PNzREr4vBFzNXWZbmxxTjyJsL6wTNMGM/dhg7Csr+VRs+jzTZ+FZFcCAwEA 13 | AQKCAgABSgOTgc16094NMmTQS3fJqkWR+IzSI6xUfrUrodJtafYXcny15akcsb7r 14 | wkMjGIKEE3cJ7VyZBtb+1OLvjDJrHbK0/OcetTpTvl927xG8M08dUiquUpHrt7YP 15 | oAOelSZAlOmifWm16tOhW/Rx1nXfnvLGJPM805CDDd3WuUE9hu/IPnhmL5YDuEAD 16 | /miLt0tZKIx8ZNnvbjvuCYUP0B7JWRvpNarSc2ywde7Q5bIlws/azgEDWRGNBMdc 17 | oN9+qnQB/KhB5EIVc98rvI3bpDIBbTZsxMVtyHn/eE8TtbNIPIKSNoI7L/JS/nF8 18 | iyVOudV/SP12Py8OSrhDUoicks2zlGziRPi/5YZGF1UmJdgrodMEi3DVkbr7Oxyo 19 | Z2PTiwu+1xQ8/j6Bi2pxMb/CBk/zoNSuLycR64xi9W4YFuK0y8fgwZ3T+B+igNO3 20 | q9Xb7gIo2qCS+ZALPWH1ocV7PVNUA1WGoomqx4dRBKRWH3zsjz5e8n4b5zouAMvH 21 | nZ08A+1ipEx3Ms3ht6HhmShjIGkI6FWfD7/n7+O57j2PBVD6wxAej04JxOGxCBEb 22 | ueyccvzG+3EViLXQYUqSu/yL1dzRyxeL1focAkJLo64a4cYHuJ37BOUCIASaib7o 23 | eKt8gnzFIVLBl9l6UXOyVeRk1wulq7T8s7lbWLPrieC2M4CvuQKCAQEAzcYpvirF 24 | Ypl0P+463cqgkafm6hg54nPCm/QctfNrELf3iWqrZNjNnQ4Q95TMg02jUj8l1rBd 25 | GydmvMB9vYlcklVxKnsSlxlX/4pV8KGiFuei/HV+g1f7iUQAyZnRs+sAdoihNFg6 26 | LaEKwf8iRMR1sonzVKCxgEBlfshyBW/3AtiVvSkgbOsQtGXZrKqFFOGF5L7geR/b 27 | Zhbbl5ZZjs8bk404twVIyfFabwJaPppWXh2Yjy7X/NP0RsY50Qn3q0eYbSGQESNX 28 | PILPl8zUGNAtgCOFmlWoaiRg0bCVpwLVVy9078nYztbal01a9TQ82gA0dpFDIajD 29 | C/C4/7KAdAHxmQKCAQEA6mXgSJKOLhEMLz4lNicL9AFyWPsN4TkFGPVbqIokZeSK 30 | ttfPS2KHkRLA2JrV8zjXFOHczJB573hDqVZ1F/3B7fXYrm8492C1fQeiLl5bAoXD 31 | VkrIPJ89qYnRgkfX4VjZam86h5Df6Ot9m2m39PKaluGkPK9tA+B9RZnVU2E1w9/S 32 | HoHd1XxaCmEBjOpBYA82b8I7D/Ae1xNk84f6cMLuiwcCkBqWp6R+hF+ipeLd6aYB 33 | PI3fD4Ql/tXJ58zsAHn7JQlwl+jj3i0Ot2byEz+8dXXVw3IjjLG2x7ERVkNxMAXy 34 | +AFaXNEV0r5+WT+LHZ7nWU4IkSlbcPvyinGEXFabbwKCAQAmArbvQ+NWRVZuAMmq 35 | 8qUagtNq5rK6m/uU51DuY1qhqfn1zlh+tnW0z0ZfFYN5tbu3k6afs9BfFbYqo7wW 36 | aOtKtLHE0RssAih5McqSOqlAZeV7kRCfnn7+aitjEiDkhUJ+otp0Wt/UUe00zLY9 37 | 0bSUGFt0hIKFWF/nPYuiaX5NbkiqupAczoc1a6L7/Du9EbOz/3ZjqXjTsOE7bTB5 38 | 6tQDO5EAbd92PjkgTGTzL3tIw1rNCDApqdVzxWYksRyo7P7BGfQM1mRk7xz9w35i 39 | XwIoq2cgjTYg9TtVQLNEOIfEJSYKrVR5UMLsNAPbS5z6ABOw20B1VUFCUWM7CHQl 40 | +b3xAoIBAGTjVJXn/qp1S/lJllqok8yCkuUZqe+2yyri8ICqdOAmbk4ktWSNFCyt 41 | FbQlcNEvNeQXEyI5TUJ1USohvGSHJGF337u0w6A2T+dw0a2Dj0iWqm0J4wW0xKWk 42 | XDjnE1Q7Gdll5ptTEA0Azx+FcVhM05z9oeM83n88sK60d81bmfik7ZuT6hdpXpf5 43 | COHOMjVWVufjgehqQdvwzRLFBFZkkKfmbIP57b5aXto7D5a2/y0B9qZJqijTiEcX 44 | XcAXw7ywxBg85+l6pBmM6zilZqxCUqt7/HYTOpnkI/fvjrbCcIVrChkU+//XPr1m 45 | oJtpbkdhjdIydB2G34TR7KMKGR5laKECggEATjF+c5fPH+ynxxLx2K3EqJIWYy00 46 | QqDEUxwLavLxOIHF6Y2ZE76Cp1ULLKGV4BcR0x8zAFXj19mlI2VRrmc2YcFxNCBI 47 | ju0+u+nl2jwB2eVtsS91Y1kActSyvlE+L3hQSpxBF0mvNwg8FuI6iXSqOeXw0V5g 48 | J93P5nuZD8KbvvvUN48x2M7/thpwrxixDgaYtewnyU1UJ+0/s9gNLNmiYKdMv9xA 49 | kBv610mDeZNbxgSw6BcgFvQRYDIOn/ECdvKuOd1KkEEcEbsvDjoHW8Wq6xHy/GqJ 50 | Cq86ZQTLX+md814M0DRxUK6dAjpeRJgo4z9GnnAB+cTgDFhWzyo5YNW94Q== 51 | -----END RSA PRIVATE KEY----- 52 | -------------------------------------------------------------------------------- /ansible/molecule/vault/testca/vault/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/kencx/homelab/1c451e1634f818e9d912bb054db47988cb083989/ansible/molecule/vault/testca/vault/.gitkeep -------------------------------------------------------------------------------- /ansible/molecule/vault/verify.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Verify 3 | hosts: all 4 | gather_facts: false 5 | become: true 6 | tasks: 7 | - name: Include default vars 8 | include_vars: 9 | dir: "{{ role_directory }}/defaults/" 10 | extensions: 11 | - 'yml' 12 | - 'yaml' 13 | vars: 14 | role_directory: "{{ lookup('env', 'MOLECULE_PROJECT_DIRECTORY') }}/roles/{{ lookup('env', 'MOLECULE_SCENARIO_NAME') }}" 15 | 16 | - name: Vault service started 17 | systemd: 18 | name: vault 19 | state: started 20 | enabled: true 21 | 22 | - name: Check Vault status 23 | command: vault status -format=json 24 | register: vault_status 25 | changed_when: false 26 | failed_when: 27 | - vault_status.rc >= 1 28 | 29 | - name: Check Vault login as admin 30 | command: vault login -method=userpass username=admin password={{ vault_admin_password }} 31 | register: vault_login 32 | changed_when: false 33 | failed_when: "'Success' not in vault_login.stdout" 34 | 35 | # - debug: 36 | # msg: "{{ vault_login }}" 37 | 38 | - name: Vault-agent service started 39 | systemd: 40 | name: vault-agent 41 | state: started 42 | enabled: true 43 | -------------------------------------------------------------------------------- /ansible/playbooks/common.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | become: true 4 | vars: 5 | user: debian 6 | 7 | roles: 8 | - role: common 9 | vars: 10 | common_user: "{{ user }}" 11 | common_reset_nomad: true 12 | common_dotfiles_url: https://github.com/kencx/dotfiles.git 13 | 14 | - name: Install Docker 15 | role: geerlingguy.docker 16 | vars: 17 | # install docker compose plugin instead of docker-compose binary 18 | docker_install_compose_plugin: true 19 | docker_install_compose: false 20 | docker_users: ["{{ user }}"] 21 | 22 | - name: Security hardening 23 | role: kencx.ansible_collection.security 24 | vars: 25 | ssh_disable_root_login: true 26 | ufw_rules: 27 | - {rule: 'allow', port: '22', proto: 'tcp', comment: 'ssh'} 28 | - {rule: 'allow', port: '4646', proto: 'tcp', comment: 'nomad HTTP'} 29 | - {rule: 'allow', port: '4647', proto: 'tcp', comment: 'nomad RPC'} 30 | - {rule: 'allow', port: '4648', proto: 'tcp', comment: 'nomad Serf WAN'} # server only 31 | - {rule: 'allow', port: '4648', proto: 'udp', comment: 'nomad Serf WAN'} # server only 32 | - {rule: 'allow', port: '8200', proto: 'tcp', comment: 'vault UI'} 33 | - {rule: 'allow', port: '8300', proto: 'tcp', comment: 'consul RPC'} # server only 34 | - {rule: 'allow', port: '8301', proto: 'tcp', comment: 'consul Serf LAN'} 35 | - {rule: 'allow', port: '8301', proto: 'udp', comment: 'consul Serf LAN'} 36 | - {rule: 'allow', port: '8302', proto: 'tcp', comment: 'consul Serf WAN'} 37 | - {rule: 'allow', port: '8302', proto: 'udp', comment: 'consul Serf WAN'} 38 | - {rule: 'allow', port: '8501', proto: 'tcp', comment: 'consul HTTPS'} 39 | - {rule: 'allow', port: '8503', proto: 'tcp', comment: 'consul gRPC'} 40 | - {rule: 'allow', port: '8600', proto: 'tcp', comment: 'consul DNS'} 41 | - {rule: 'allow', port: '8600', proto: 'udp', comment: 'consul DNS'} 42 | 43 | - name: Goss validation 44 | role: kencx.ansible_collection.goss 45 | vars: 46 | goss_dir: "./goss" 47 | goss_file: "base.yml" 48 | -------------------------------------------------------------------------------- /ansible/roles/common/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | common_user: "debian" 3 | common_timezone: "Asia/Singapore" 4 | common_keyring_dir: "/etc/apt/keyrings" 5 | common_nfs_dir: "/mnt/storage" 6 | common_packages: 7 | - sudo 8 | - curl 9 | - git 10 | - openssh-server 11 | - vim 12 | - make 13 | - gnupg 14 | - nfs-common 15 | - software-properties-common 16 | - jq 17 | - unzip 18 | - ca-certificates 19 | - qemu-guest-agent 20 | - bind9-utils 21 | - ncdu 22 | common_dotfiles: [] 23 | # common_dotfiles: 24 | # - url: https://raw.githubusercontent.com/foo/repo/master/vimrc 25 | # dest: /home/{{ common_user }}/.vimrc 26 | 27 | common_nomad_version: "1.6.1-1" 28 | common_consul_version: "1.15.4-1" 29 | common_vault_version: "1.14.0-1" 30 | common_consul_template_version: "0.32.0-1" 31 | common_reset_nomad: true 32 | -------------------------------------------------------------------------------- /ansible/roles/common/tasks/hashicorp.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create keyring directory 3 | file: 4 | path: "{{ common_keyring_dir }}" 5 | mode: 0755 6 | owner: root 7 | group: root 8 | state: directory 9 | 10 | # apt-key is being deprecated in Debian 11 11 | # note the .asc extension is necessary for de-armouring 12 | # https://www.jeffgeerling.com/blog/2022/aptkey-deprecated-debianubuntu-how-fix-ansible 13 | - name: Add Hashicorp apt key 14 | get_url: 15 | url: https://apt.releases.hashicorp.com/gpg 16 | dest: "{{ common_keyring_dir }}/hashicorp.asc" 17 | mode: 0644 18 | force: true 19 | 20 | - name: Add Hashicorp apt repository 21 | apt_repository: 22 | repo: "deb [arch=amd64 signed-by={{ common_keyring_dir }}/hashicorp.asc] https://apt.releases.hashicorp.com {{ ansible_distribution_release }} main" 23 | filename: hashicorp 24 | state: present 25 | update_cache: true 26 | 27 | - name: Install Hashicorp software 28 | apt: 29 | name: "{{ item.pkg }}={{ item.version }}" 30 | state: present 31 | with_items: 32 | - pkg: nomad 33 | version: "{{ common_nomad_version }}" 34 | - pkg: consul 35 | version: "{{ common_consul_version }}" 36 | - pkg: vault 37 | version: "{{ common_vault_version }}" 38 | - pkg: consul-template 39 | version: "{{ common_consul_template_version }}" 40 | 41 | # Nomad starts automatically after installation, which results in issues when it tries 42 | # to start up with a different configuration later 43 | - name: Reset Nomad 44 | block: 45 | - name: Stop Nomad 46 | systemd: 47 | name: nomad 48 | state: stopped 49 | 50 | - name: Delete default Nomad data 51 | file: 52 | path: /opt/nomad/data 53 | state: absent 54 | when: common_reset_nomad 55 | -------------------------------------------------------------------------------- /ansible/roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create user 3 | user: 4 | name: "{{ common_user }}" 5 | shell: /bin/bash 6 | groups: sudo 7 | append: true 8 | create_home: true 9 | # password: "{{ password | password_hash('sha512') }}" 10 | # update_password: on_create 11 | state: present 12 | 13 | # - name: Force reset password on first login 14 | # command: "chage -d 0 {{ common_user }}" 15 | 16 | - name: Set timezone 17 | community.general.timezone: 18 | name: "{{ common_timezone }}" 19 | 20 | - name: Update apt packages 21 | apt: 22 | update_cache: true 23 | cache_valid_time: 86400 24 | upgrade: true 25 | 26 | - name: Wait for dpkg lock to be released 27 | shell: while lsof /var/lib/dpkg/lock-frontend ; do sleep 10 ; done; 28 | changed_when: false 29 | 30 | - name: Update and install packages 31 | apt: 32 | name: "{{ item }}" 33 | state: present 34 | with_items: "{{ common_packages }}" 35 | register: apt_action 36 | retries: 10 37 | until: apt_action is success or ('/var/lib/dpkg/lock-frontend' not in apt_action.msg) 38 | 39 | # Note: This role only supports Ubuntu/Debian installations. 40 | # Note: The Hashicorp apt server only supports amd64 packages. For arm64, download the 41 | # individual zip files instead. 42 | # See https://github.com/hashicorp/terraform/issues/27378 43 | - import_tasks: hashicorp.yml 44 | when: 45 | - ansible_architecture == 'x86_64' 46 | - ansible_distribution in ["Debian", "Ubuntu"] 47 | 48 | - name: Install Bitwarden CLI 49 | unarchive: 50 | src: "https://vault.bitwarden.com/download/?app=cli&platform={{ ansible_system|lower }}" 51 | dest: "/usr/bin" 52 | remote_src: true 53 | mode: 0755 54 | owner: root 55 | group: root 56 | tags: bw 57 | 58 | - name: Create NFS share directories 59 | file: 60 | path: "{{ common_nfs_dir }}" 61 | mode: 0755 62 | owner: "{{ common_user }}" 63 | group: "{{ common_user }}" 64 | state: directory 65 | tags: nfs 66 | 67 | - name: Limit journalctl size 68 | lineinfile: 69 | path: "/etc/systemd/journald.conf" 70 | regexp: "^SystemMaxUse=" 71 | line: "SystemMaxUse=100M" 72 | 73 | - name: Add dotfiles to remote 74 | get_url: 75 | url: "{{ item.url }}" 76 | dest: "{{ item.dest }}" 77 | mode: 0644 78 | owner: "{{ common_user }}" 79 | group: "{{ common_user }}" 80 | with_items: "{{ common_dotfiles }}" 81 | tags: dotfiles 82 | -------------------------------------------------------------------------------- /ansible/roles/consul-template/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | consul_template_config_dir: "/etc/consul-template" 3 | vault_server_fqdn: "{{ ansible_default_ipv4.address }}" 4 | vault_agent_token_file_path: "/opt/vault/data/.vault-token" 5 | -------------------------------------------------------------------------------- /ansible/roles/consul-template/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: reload consul-template 3 | systemd: 4 | name: consul-template 5 | state: reloaded 6 | -------------------------------------------------------------------------------- /ansible/roles/consul-template/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # consul-template requires write access to all template destination directories. 3 | # - name: Create consul-template user 4 | # user: 5 | # name: consul-template 6 | # groups: ["vault", "consul", "nomad"] 7 | # append: true 8 | # create_home: false 9 | # state: present 10 | 11 | - name: Create consul-template directories 12 | file: 13 | path: "{{ item }}" 14 | state: directory 15 | mode: 0755 16 | owner: root 17 | group: root 18 | with_items: 19 | - "{{ consul_template_config_dir }}" 20 | 21 | - name: Copy consul-template service file 22 | template: 23 | src: "consul-template.service" 24 | dest: "/etc/systemd/system/consul-template.service" 25 | mode: 0644 26 | owner: root 27 | group: root 28 | notify: reload consul-template 29 | 30 | - name: Copy consul-template config 31 | template: 32 | src: "consul-template.hcl.j2" 33 | dest: "{{ consul_template_config_dir }}/consul-template.hcl" 34 | mode: 0640 35 | owner: root 36 | group: root 37 | notify: reload consul-template 38 | 39 | - name: Start consul-template 40 | systemd: 41 | name: consul-template 42 | state: started 43 | enabled: true 44 | daemon_reload: true 45 | -------------------------------------------------------------------------------- /ansible/roles/consul-template/templates/consul-template.hcl.j2: -------------------------------------------------------------------------------- 1 | vault { 2 | address = "https://{{ vault_server_fqdn }}:8200" 3 | vault_agent_token_file = "{{ vault_agent_token_file_path }}" 4 | 5 | client_user_agent = "consul-template" 6 | 7 | unwrap_token = false 8 | renew_token = false 9 | 10 | retry { 11 | enabled = true 12 | attempts = 3 13 | backoff = "1m" 14 | max_backoff = "16m" 15 | } 16 | } 17 | 18 | template_error_fatal = true 19 | err_on_failed_lookup = true 20 | -------------------------------------------------------------------------------- /ansible/roles/consul-template/templates/consul-template.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Consul-template 3 | Wants=network-online.target vault-agent.service 4 | After=network-online.target vault-agent.service 5 | StartLimitInterval=180 6 | StartLimitBurst=3 7 | 8 | [Service] 9 | #User=consul-template 10 | #Group=consul-template 11 | ExecStart=/usr/bin/consul-template -config="{{ consul_template_config_dir }}/consul-template.hcl" -log-level=info 12 | ExecReload=/bin/kill -HUP $MAINPID 13 | KillMode=process 14 | KillSignal=SIGINT 15 | Restart=on-failure 16 | RestartSec=60 17 | 18 | [Install] 19 | WantedBy=multi-user.target 20 | -------------------------------------------------------------------------------- /ansible/roles/consul/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | consul_config_dir: "/etc/consul.d" 3 | consul_data_dir: "/opt/consul" 4 | consul_tls_dir: "{{ consul_data_dir }}/tls" 5 | consul_template_config_dir: "/etc/consul-template" 6 | consul_upstream_dns_address: ["1.1.1.1"] 7 | 8 | # server 9 | consul_server: true 10 | consul_bootstrap_expect: 1 11 | 12 | # client 13 | consul_client: false 14 | consul_server_ip: "" 15 | 16 | # certificate generation 17 | consul_vault_addr: "https://localhost:8200" 18 | consul_common_name: "{{ 'server' if consul_server else 'client' }}.dc1.consul" 19 | consul_alt_names: "consul.service.consul" 20 | consul_ttl: "24h" 21 | consul_ip_sans: "127.0.0.1" 22 | 23 | # watches 24 | setup_consul_watches: false 25 | consul_gotify_url: "" 26 | consul_gotify_token: "" 27 | -------------------------------------------------------------------------------- /ansible/roles/consul/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart consul 3 | systemd: 4 | name: consul 5 | state: restarted 6 | daemon_reload: true 7 | when: not consul_started.changed 8 | 9 | - name: reload consul 10 | systemd: 11 | name: consul 12 | state: reloaded 13 | when: not consul_started.changed 14 | 15 | - name: reload consul-template 16 | systemd: 17 | name: consul-template 18 | state: reloaded 19 | -------------------------------------------------------------------------------- /ansible/roles/consul/tasks/dnsmasq.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install dnsmasq 3 | apt: 4 | name: 5 | - dnsmasq 6 | state: present 7 | 8 | - name: Remove resolvconf 9 | apt: 10 | name: 11 | - resolvconf 12 | - openresolv 13 | state: absent 14 | 15 | - name: Allow ufw port 53 16 | ufw: 17 | rule: "allow" 18 | port: 53 19 | 20 | - name: Disable systemd-resolved 21 | systemd: 22 | name: systemd-resolved 23 | state: stopped 24 | enabled: false 25 | 26 | - name: Write new /etc/resolv.conf 27 | copy: 28 | content: "nameserver 127.0.0.1" 29 | dest: "/etc/resolv.conf" 30 | owner: root 31 | group: root 32 | mode: 0644 33 | 34 | - name: Copy dnsmasq config 35 | template: 36 | src: "dnsmasq.conf.j2" 37 | dest: "/etc/dnsmasq.conf" 38 | owner: root 39 | group: root 40 | mode: 0644 41 | 42 | - name: Copy Consul dnsmasq forwarding config 43 | copy: 44 | content: | 45 | server=/consul/127.0.0.1#8600 46 | rev-server=10.0.0.0/8,127.0.0.1#8600 47 | dest: "/etc/dnsmasq.d/10-consul" 48 | owner: root 49 | group: root 50 | mode: 0644 51 | 52 | - name: Start dnsmasq 53 | systemd: 54 | name: dnsmasq 55 | state: started 56 | enabled: true 57 | 58 | - name: Wait for port 53 59 | wait_for: 60 | port: 53 61 | host: "{{ ansible_default_ipv4.address }}" 62 | state: started 63 | -------------------------------------------------------------------------------- /ansible/roles/consul/tasks/gossip.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Write a new Consul gossip key to Vault 3 | # This is generated on the first server node only. Subsequent server and client nodes 4 | # will use the existing secret 5 | - name: Save gossip key to Vault 6 | shell: 7 | cmd: | 8 | if ! vault kv get -field=gossip_key kvv2/cluster/consul_config >/dev/null 2>&1; then 9 | vault kv put kvv2/cluster/consul_config gossip_key=$(consul keygen) 10 | else 11 | echo "gossip_key present" 12 | fi 13 | environment: 14 | VAULT_TOKEN: "{{ consul_ansible_token }}" 15 | register: prepare_gossip_key 16 | changed_when: "'present' not in prepare_gossip_key.stdout" 17 | when: consul_server | bool 18 | run_once: true 19 | 20 | # TODO Setup gossip key rotation with vault: 21 | # consul-template only triggers reload in consul, it does not renew the secret for us. 22 | # We need to run a cronjob to trigger this renewal manually. the secret also has no ttl 23 | # so old versions will never expire. requires helper-script 24 | - name: Add gossip key to Consul configuration 25 | blockinfile: 26 | path: "{{ consul_template_config_dir }}/consul-template.hcl" 27 | marker: "# {mark} Consul gossip" 28 | validate: consul-template -config=%s -parse-only 29 | block: | 30 | template { 31 | contents = </dev/null 2>&1; then 9 | vault kv put kvv2/cluster/nomad_config gossip_key=$(nomad operator gossip keyring generate) 10 | else 11 | echo "gossip_key present" 12 | fi 13 | environment: 14 | VAULT_TOKEN: "{{ nomad_ansible_token }}" 15 | register: prepare_gossip_key 16 | changed_when: "'present' not in prepare_gossip_key.stdout" 17 | when: nomad_server | bool 18 | run_once: true 19 | 20 | # TODO Setup gossip key rotation with vault: 21 | # consul-template only triggers reload in consul, it does not renew the secret for us. 22 | # We need to run a cronjob to trigger this renewal manually. the secret also has no ttl 23 | # so old versions will never expire. requires helper-script 24 | - name: Add gossip key to Nomad configuration 25 | blockinfile: 26 | path: "{{ consul_template_config_dir }}/consul-template.hcl" 27 | marker: "# {mark} Nomad gossip" 28 | validate: consul-template -config=%s -parse-only 29 | block: | 30 | template { 31 | contents = < 0 10 | 11 | - name: Unseal Vault from given keys 12 | command: 13 | cmd: "vault operator unseal {{ item }}" 14 | environment: 15 | VAULT_ADDR: "{{ unseal_vault_addr }}" 16 | with_items: "{{ unseal_keys }}" 17 | when: 18 | - unseal_keys is defined 19 | - unseal_keys | length > 0 20 | changed_when: false 21 | failed_when: vault_unseal_operation.rc == 1 22 | 23 | # TODO copy bw_get.sh script to role 24 | - name: Get unseal key from Bitwarden 25 | script: "bw_get.sh 'Vault Unseal Key' {{ unseal_bw_password }}" 26 | register: vault_unseal_key_raw 27 | when: 28 | - unseal_store == 'bitwarden' 29 | - unseal_bw_password is defined 30 | 31 | - set_fact: 32 | vault_unseal_key_json: "{{ vault_unseal_key_raw.stdout | from_json }}" 33 | when: vault_unseal_key_raw is defined and vault_unseal_key_raw.stdout is defined 34 | 35 | - name: Unseal Vault from json data 36 | shell: 37 | cmd: "vault operator unseal --format json {{ vault_unseal_key_json.data.data }}" 38 | environment: 39 | VAULT_ADDR: "{{ unseal_vault_addr }}" 40 | register: vault_unseal_operation 41 | when: 42 | - vault_unseal_key_json is defined 43 | - vault_unseal_key_json.data.data is defined 44 | - vault_unseal_key_json.success 45 | changed_when: false 46 | failed_when: vault_unseal_operation.rc == 1 47 | 48 | - name: Check Vault seal status 49 | command: vault status -format=json 50 | register: post_vault_status_raw 51 | changed_when: false 52 | failed_when: post_vault_status_raw.rc == 1 53 | 54 | - set_fact: 55 | vault_status_json: "{{ post_vault_status_raw.stdout | from_json }}" 56 | 57 | - fail: 58 | msg: Vault is still sealed! 59 | when: 60 | - post_vault_status_json is defined 61 | - post_vault_status_json.sealed 62 | 63 | - debug: 64 | msg: Vault is unsealed! 65 | when: 66 | - post_vault_status_json is defined 67 | - not post_vault_status_json.sealed 68 | -------------------------------------------------------------------------------- /ansible/roles/vault/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | vault_config_dir: "/etc/vault.d" 3 | vault_data_dir: "/opt/vault/data" 4 | vault_log_dir: "/opt/vault/logs" 5 | vault_tls_dir: "/opt/vault/tls" 6 | vault_ca_cert_dir: "/usr/share/ca-certificates/vault" 7 | vault_log_file: "{{ vault_log_dir }}/vault.log" 8 | 9 | vault_server: true 10 | 11 | vault_store_local: true 12 | vault_secrets_file: "{{ playbook_dir }}/vault.txt" 13 | vault_store_bw: false 14 | 15 | vault_terraform_workspace: "default" 16 | vault_admin_password: "password" 17 | vault_kvuser_password: "password" 18 | 19 | vault_register_consul: true 20 | 21 | vault_setup_agent: true 22 | vault_server_fqdn: "{{ ansible_default_ipv4.address }}" 23 | -------------------------------------------------------------------------------- /ansible/roles/vault/files/bw_get.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | NAME="$1" 4 | SESSION_ID=$(bw unlock --raw "$2") 5 | bw sync --quiet --session "$SESSION_ID" 6 | bw get password "$NAME" --response --session "$SESSION_ID" 7 | -------------------------------------------------------------------------------- /ansible/roles/vault/files/bw_store.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # set -euo pipefail 3 | 4 | # build json item 5 | NAME="$1" 6 | PASSWORD="$2" 7 | LOGIN=$(jq -n \ 8 | --argjson uris [] \ 9 | --arg username "" \ 10 | --arg password "$PASSWORD" \ 11 | --arg totp "" \ 12 | '$ARGS.named') 13 | ITEM=$(jq -n \ 14 | --argjson type 1 \ 15 | --arg name "$NAME" \ 16 | --argjson login "$LOGIN" \ 17 | '$ARGS.named') 18 | 19 | SESSION_ID=$(bw unlock --raw "$3") 20 | bw sync --quiet --session "$SESSION_ID" 21 | 22 | BW_EXISTING=$(bw get item "$NAME" --response --session "$SESSION_ID") 23 | BW_SUCCESS=$(echo "$BW_EXISTING" | jq -r '.success') 24 | BW_DATA=$(echo "$BW_EXISTING" | jq -r '.data') 25 | BW_ID=$(echo "$BW_DATA" | jq -r '.id') 26 | 27 | if [[ "$BW_SUCCESS" = "false" ]]; then 28 | echo "Creating item..." 29 | echo "$ITEM" | bw encode | bw create item --session "$SESSION_ID" --quiet 30 | elif [[ ! "$BW_DATA" = "" ]]; then 31 | echo "Item exists. Updating..." 32 | NEW_ITEM=$(echo "$BW_DATA" | jq ".login.password=\"${PASSWORD}\"") 33 | echo "$NEW_ITEM" | bw encode | bw edit item "$BW_ID" --session "$SESSION_ID" --quiet 34 | else 35 | echo "bw get failed" 36 | fi 37 | -------------------------------------------------------------------------------- /ansible/roles/vault/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: reload vault 3 | systemd: 4 | name: vault 5 | state: reloaded 6 | when: not vault_started.changed 7 | 8 | - name: reload vault-agent 9 | systemd: 10 | name: vault-agent 11 | state: reloaded 12 | when: not vault_started.changed 13 | -------------------------------------------------------------------------------- /ansible/roles/vault/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create Vault restricted directories 3 | file: 4 | path: "{{ item }}" 5 | mode: 0700 6 | owner: vault 7 | group: vault 8 | state: directory 9 | with_items: 10 | - "{{ vault_data_dir }}" 11 | - "{{ vault_data_dir }}/raft" 12 | - "{{ vault_log_dir }}" 13 | - "{{ vault_tls_dir }}" 14 | 15 | - name: Create Vault config directories 16 | file: 17 | path: "{{ item }}" 18 | mode: 0755 19 | owner: vault 20 | group: vault 21 | state: directory 22 | with_items: 23 | - "{{ vault_config_dir }}" 24 | - "{{ vault_ca_cert_dir }}" 25 | 26 | - name: Remove default TLS cert and key 27 | file: 28 | path: "{{ item }}" 29 | state: absent 30 | with_items: 31 | - "{{ vault_tls_dir }}/tls.key" 32 | - "{{ vault_tls_dir }}/tls.crt" 33 | 34 | - import_tasks: server.yml 35 | when: vault_server | bool 36 | 37 | - import_tasks: agent.yml 38 | when: vault_setup_agent | bool 39 | -------------------------------------------------------------------------------- /ansible/roles/vault/tasks/server.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Copy Vault service file 3 | template: 4 | src: "vault.service.j2" 5 | dest: "/etc/systemd/system/vault.service" 6 | mode: 0644 7 | owner: root 8 | group: root 9 | notify: 10 | - reload vault 11 | 12 | - name: Copy Vault config files 13 | template: 14 | src: "vault.hcl.j2" 15 | dest: "{{ vault_config_dir }}/vault.hcl" 16 | mode: 0644 17 | owner: vault 18 | group: vault 19 | notify: 20 | - reload vault 21 | 22 | - name: Create Vault log file audit 23 | file: 24 | path: "{{ vault_log_file }}" 25 | mode: 0640 26 | owner: vault 27 | group: vault 28 | # ensures idempotence 29 | access_time: preserve 30 | modification_time: preserve 31 | state: touch 32 | 33 | - name: Set up logrotate for file audit 34 | template: 35 | src: "logrotate-vault.j2" 36 | dest: "/etc/logrotate.d/vault" 37 | mode: 0644 38 | owner: root 39 | group: root 40 | 41 | - name: Start Vault 42 | systemd: 43 | name: vault 44 | state: started 45 | enabled: true 46 | register: vault_started 47 | 48 | - name: Wait for Vault port 49 | wait_for: 50 | port: 8200 51 | state: started 52 | 53 | - name: Check Vault status 54 | command: vault status -format=json 55 | register: vault_status_raw 56 | changed_when: false 57 | failed_when: vault_status_raw.rc == 1 58 | 59 | - set_fact: 60 | vault_status_json: "{{ vault_status_raw.stdout | from_json }}" 61 | 62 | # Initialization will only be performed on the first run of the role: 63 | # - Initialize and unseal Vault 64 | # - Store root token and unseal key 65 | # - Login with root token 66 | # - First time provisioning of Vault resources with Terraform provider 67 | # using root token 68 | # 69 | # After Vault has been initialized, this role will not apply any changes made in 70 | # terraform/vault. This prevents changes from being applied with the root token after 71 | # initialization, which can be dangerous. Any changes should be planned and applied 72 | # outside of the role, either manually or in a CI pipeline. 73 | 74 | # TODO check that bw is setup 75 | 76 | - name: Assert that vault_secrets_file is defined 77 | assert: 78 | that: 79 | - vault_secrets_file is defined 80 | - vault_secrets_file | length != 0 81 | fail_msg: vault_secrets_file is not defined or is empty. 82 | when: 83 | - vault_store_local | bool 84 | - vault_status_json is defined 85 | - not vault_status_json.initialized 86 | 87 | - name: Assert that bw_password variable is defined 88 | assert: 89 | that: bw_password is defined 90 | fail_msg: bw_password is not defined. 91 | when: 92 | - vault_store_bw | bool 93 | - vault_status_json is defined 94 | - not vault_status_json.initialized 95 | 96 | - name: Vault Initialization 97 | import_tasks: init.yml 98 | when: 99 | - vault_status_json is defined 100 | - not vault_status_json.initialized 101 | 102 | - name: Unset root token 103 | set_fact: 104 | vault_root_token: "" 105 | -------------------------------------------------------------------------------- /ansible/roles/vault/templates/logrotate-vault.j2: -------------------------------------------------------------------------------- 1 | {{ vault_log_file }} { 2 | size 50M 3 | rotate 10 4 | missingok 5 | notifempty 6 | compress 7 | delaycompress 8 | copytruncate 9 | dateext 10 | } 11 | -------------------------------------------------------------------------------- /ansible/roles/vault/templates/token_action.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | VAULT_ADDR=https://"{{ vault_server_fqdn }}":8200 VAULT_TOKEN=$(cat "{{ vault_data_dir 4 | }}"/.vault-token) vault "$@" 5 | -------------------------------------------------------------------------------- /ansible/roles/vault/templates/vault-agent.hcl.j2: -------------------------------------------------------------------------------- 1 | vault { 2 | address = "https://{{ vault_server_fqdn }}:8200" 3 | ca_cert = "{{ vault_ca_cert_dir }}/ca.crt" 4 | client_cert = "{{ vault_tls_dir }}/agent.crt" 5 | client_key = "{{ vault_tls_dir }}/agent_key.pem" 6 | 7 | retry { 8 | num_retries = 3 9 | } 10 | } 11 | 12 | auto_auth { 13 | method "cert" { 14 | name = "{{ ansible_fqdn }}" 15 | mount_path = "auth/agent" 16 | exit_on_err = true 17 | 18 | config = { 19 | ca_cert = "{{ vault_ca_cert_dir }}/ca.crt" 20 | client_cert = "{{ vault_tls_dir }}/agent.crt" 21 | client_key = "{{ vault_tls_dir }}/agent_key.pem" 22 | reload = true 23 | } 24 | } 25 | 26 | sink "file" { 27 | config = { 28 | path = "{{ vault_data_dir }}/.vault-token" 29 | mode = 0400 30 | } 31 | } 32 | } 33 | 34 | cache {} 35 | 36 | template_config { 37 | exit_on_retry_failure = true 38 | } 39 | 40 | # renew vault-agent's own auth cert 41 | template { 42 | contents = < 23 | - [Roles](ansible/roles/index.md) 24 | - [Common](ansible/roles/common.md) 25 | - [Consul](ansible/roles/consul.md) 26 | - [Consul Template](ansible/roles/consul-template.md) 27 | - [Issue Cert](ansible/roles/issue_cert.md) 28 | - [Nomad](ansible/roles/nomad.md) 29 | - [Unseal Vault](ansible/roles/unseal_vault.md) 30 | - [Vault](ansible/roles/vault.md) 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | # Applications 39 | 40 | - [Applications](apps/index.md) 41 | - [Adding New Application](apps/add_new.md) 42 | - [Diun](apps/diun.md) 43 | - [Registry](apps/registry.md) 44 | 45 | # References 46 | 47 | - [Known Issues](references/issues.md) 48 | - [Roadmap](references/TODO.md) 49 | -------------------------------------------------------------------------------- /docs/src/ansible/index.md: -------------------------------------------------------------------------------- 1 | # Ansible 2 | 3 | Ansible playbooks are used to configure provisioned server and client nodes to 4 | run a functional cluster. They use modular and customizable roles to setup 5 | various software. 6 | -------------------------------------------------------------------------------- /docs/src/ansible/inventory.md: -------------------------------------------------------------------------------- 1 | # Inventory 2 | -------------------------------------------------------------------------------- /docs/src/ansible/roles/common.md: -------------------------------------------------------------------------------- 1 | # Common 2 | 3 | This role installs common packages and performs standard post-provisioning such 4 | as: 5 | 6 | - Creation of user 7 | - Creation of NFS share directories 8 | - Installation of Hashicorp software 9 | - Installation of Bitwarden CLI 10 | 11 | >**Note**: Security hardening and installation of Docker are performed 12 | >separately in the `common.yml` playbook. 13 | 14 | ## Variables 15 | 16 | | Variable | Description | Type | Default | 17 | | -------- | ----------- | ---- | ------- | 18 | | common_user | User to be created | string | `debian` | 19 | | common_timezone | Timezone to set | string | `Asia/Singapore` | 20 | | common_keyring_dir | Keyring directory path for external apt repositories | string | `/etc/apt/keyrings` | 21 | | common_nfs_dir | NFS share directory path | string | `/mnt/storage` | 22 | | common_packages | List of common packages to be installed | list(string) | See `defaults.yml` for full list | 23 | | common_nomad_version | Nomad version to install | string | `1.6.1-1` | 24 | | common_consul_version | Consul version to install | string | `1.15.4-1` | 25 | | common_vault_version | Vault version to install | string | `1.14.0-1` | 26 | | common_consul_template_version | Consul template version to install | string | `0.32.0-1` | 27 | | common_reset_nomad | Clear Nomad data directory | boolean | `true` | 28 | | common_dotfiles | List of dotfiles to be added, and their destinations | list | `[]` | 29 | 30 | ## Tags 31 | 32 | - Skip `bw` to not install the Bitwarden CLI 33 | - Skip `nfs` to not create any NFS share directories 34 | - Skip `dotfiles` to not copy any remote dotfiles 35 | 36 | ## Notes 37 | 38 | - This role clears any existing `/opt/nomad/data` directories to a blank slate. To disable this 39 | behaviour, set `common_reset_nomad: false`. 40 | - This role only supports Ubuntu/Debian amd64 systems with `apt`. 41 | - The Hashicorp apt server [only supports amd64 42 | packages](https://github.com/hashicorp/terraform/issues/27378). For arm64 43 | systems, download the individual zip files instead. 44 | - `common_dotfiles` is used to add dotfiles from a Github repository to the host. 45 | For example: 46 | 47 | ```yml 48 | common_dotfiles: 49 | - url: https://raw.githubusercontent.com/foo/repo/master/.vimrc 50 | dest: /home/foo/.vimrc 51 | ``` 52 | -------------------------------------------------------------------------------- /docs/src/ansible/roles/consul-template.md: -------------------------------------------------------------------------------- 1 | # Consul-template 2 | 3 | This role deploys a new Consul-template instance. 4 | 5 | ## Prerequisites 6 | - consul-template installed 7 | - Access to any template destination directories 8 | 9 | ## Setup 10 | 11 | [Vault-agent](./vault.md#vault-agent) is used to authenticate to Vault for 12 | consul-template. It only requires access to the `vault_agent_token_file`. This 13 | means consul-template requires access to Vault directories. It also requires 14 | access to any template destination directories (eg. Consul, Nomad TLS 15 | directories). As such, the role runs consul-template as root. I'm still 16 | considering alternatives that allow consul-template to be ran as a 17 | non-privileged user. 18 | 19 | >**Note**: Vault and Vault-agent do not have to be installed for the role to run 20 | >successfully. However, they must be available for the consul-template service 21 | >to start without error. 22 | 23 | ## Variables 24 | 25 | | Variable | Description | Type | Default | 26 | | -------- | ----------- | ---- | ------- | 27 | | consul_template_dir | Configuration directory | string | `/opt/consul-template` | 28 | | vault_address | Vault instance IP address | string | `${ansible_default_ipv4.address}` | 29 | -------------------------------------------------------------------------------- /docs/src/ansible/roles/consul.md: -------------------------------------------------------------------------------- 1 | # Consul 2 | 3 | This role deploys a new Consul instance. It can deploy Consul as a server or client, 4 | depending on the host's group name. 5 | 6 | ## Prerequisites 7 | - An existing Vault instance to save gossip key and provision TLS certs 8 | - An existing consul-template instance to rotate TLS certs 9 | - Consul installed 10 | - Ansible auth certificate on localhost to access Vault 11 | 12 | ## Setup 13 | For encryption, the role creates consul-template templates for: 14 | 15 | - Consul's gossip key. A new key is added with `consul keygen` if it does not 16 | already exist 17 | - Consul TLS certs from Vault PKI 18 | 19 | ## Variables 20 | 21 | | Variable | Description | Type | Default | 22 | | -------- | ----------- | ---- | ------- | 23 | | consul_config_dir | Configuration directory | string | `/etc/consul.d` | 24 | | consul_data_dir | Data directory | string | `/opt/consul` | 25 | | consul_tls_dir | TLS files directory | string | `${consul_data_dir}/tls` | 26 | | consul_template_config_dir | consul-template configuration file | string | `/etc/consul-template` | 27 | | consul_upstream_dns_address | List of upstream DNS servers for dnsmasq | `["1.1.1.1"]` | 28 | | consul_server | Start Consul in server mode | bool | `true` | 29 | | consul_bootstrap_expect | (server only) The expected number of servers in a cluster | number | `1` | 30 | | consul_client | Start Consul in client mode | bool | `false` | 31 | | consul_server_ip | (client only) Server's IP address | string | - | 32 | | consul_vault_addr | Vault server API address to use | string | `https://localhost:8200` | 33 | | consul_common_name | Consul node certificate common_name | string | See below | 34 | | consul_alt_names | Consul's TLS certificate alt names | string | `consul.service.consul` | 35 | | consul_ip_sans | Consul's TLS certificate IP SANs | string | `127.0.0.1` | 36 | | setup_consul_watches | Set up Consul watches for healthchecks | bool | `false` | 37 | | consul_gotify_url | Gotify URL for sending webhook | string | `""` | 38 | | consul_gotify_token | Gotify token for sending webhook | string | `""` | 39 | 40 | ## Notes 41 | 42 | - `consul_server` and `consul_agent` are mutually exclusive and cannot be both 43 | `true`. 44 | - `consul_bootstrap_expect` must be the same value in all Consul servers. If the 45 | key is not present in the server, that server instance will not attempt to 46 | bootstrap the cluster. 47 | - An existing Consul server must be running and reachable at `consul_server_ip` 48 | when `consul_agent` is `true`. 49 | - The default value of `consul_common_name` is `server.dc1.consul` or 50 | `client.dc1.consul` depending on whether Consul is started in server or client 51 | mode. 52 | -------------------------------------------------------------------------------- /docs/src/ansible/roles/index.md: -------------------------------------------------------------------------------- 1 | # Roles 2 | -------------------------------------------------------------------------------- /docs/src/ansible/roles/issue_cert.md: -------------------------------------------------------------------------------- 1 | # Issue Cert 2 | 3 | This role issues a new Vault certificate from the configured `pki_int` role. 4 | 5 | ## Prerequisites 6 | - An existing Vault instance 7 | - (Optional) An existing consul-template instance 8 | - Ansible auth certificate on localhost 9 | 10 | ## Setup 11 | The role issues a new certificate from Vault and writes it to the host's 12 | filesystem at a chosen path. The role logins with an existing Ansible 13 | auth certificate with limited permissions from its configured policies. 14 | 15 | The role also optionally adds a consul-template template stanza to automatically 16 | renew the certificate key pair. 17 | 18 | ## Variables 19 | 20 | | Variable | Description | Type | Default | 21 | | -------- | ----------- | ---- | ------- | 22 | | issue_cert_role | Certificate role | string | `client` | 23 | |issue_cert_common_name | Certificate common name | string | `""` | 24 | |issue_cert_ttl | Certificate TTL | string | `24h` | 25 | |issue_cert_vault_addr | Vault instance address | string | `https://localhost:8200` | 26 | |issue_cert_owner | Certificate key pair owner | string | `""` | 27 | |issue_cert_group | Certificate key pair group | string | `""` | 28 | |issue_cert_path | Certificate path | string | `cert.crt` | 29 | |issue_cert_key_path | Private key path | string | `key.pem` | 30 | |issue_cert_ca_path | CA path | string | `ca.crt` | 31 | |issue_cert_auth_role | Auth role to write certificate to | string | `""` | 32 | |issue_cert_auth_policies | Policies to add to auth role | string | `""` | 33 | |issue_cert_add_template | Add consul-template template | boolean | `true` | 34 | |issue_cert_consul_template_config | consul-template config file path | string | `/etc/consul-template/consul-template.hcl` | 35 | |issue_cert_consul_template_marker | consul-template template marker | string | `# {mark} TLS` | 36 | |issue_cert_service | Service to restart after consul-template renews cert | string | `""` | 37 | 38 | - `issue_cert_auth_*` variables are only used when `issue_cert_role = "auth"` 39 | -------------------------------------------------------------------------------- /docs/src/ansible/roles/nomad.md: -------------------------------------------------------------------------------- 1 | # Nomad 2 | 3 | This role deploys a new Nomad instance. It can deploy Nomad as a server or client, 4 | depending on the host's group name. 5 | 6 | ## Prerequisites 7 | - An existing Vault instance to save gossip key and provision TLS certs 8 | - An existing consul-template instance to rotate TLS certs 9 | - Nomad installed 10 | - Ansible auth certificate on localhost to access Vault 11 | 12 | ## Setup 13 | For encryption, the role creates consul-template templates for: 14 | 15 | - Nomad's gossip key. A new key is added with `nomad operator gossip keyring 16 | generate` if it does not already exist 17 | - Nomad TLS certs from Vault PKI 18 | - Vault token for Vault integration 19 | 20 | ## Variables 21 | 22 | | Variable | Description | Type | Default | 23 | | -------- | ----------- | ---- | ------- | 24 | | nomad_config_dir | Configuration directory | string | `/etc/nomad.d` | 25 | | nomad_data_dir | Data directory | string | `/opt/nomad` | 26 | | nomad_tls_dir | TLS files directory | string | `${nomad_data_dir}/tls` | 27 | | consul_template_config_dir | consul-template configuration file | string | `/etc/consul-template` | 28 | | nomad_register_consul | Register Nomad as a Consul service | bool | `true` | 29 | | nomad_vault_integration | Sets up Vault integration in server node | bool | `true` | 30 | | nomad_server | Start Nomad in server mode | bool | `true` | 31 | | nomad_bootstrap_expect | (server only) The expected number of servers in a cluster | number | `1` | 32 | | nomad_client | Start Nomad in client mode | bool | `false` | 33 | | nomad_server_ip | (client only) Server's IP address | string | - | 34 | | nomad_vault_addr | Vault server API address to use | string | `https://localhost:8200` | 35 | | nomad_common_name | Nomad node certificate common_name | string | `server.global.nomad` | 36 | | nomad_alt_names | Nomad's TLS certificate alt names | string | `nomad.service.consul` | 37 | | nomad_ip_sans | Nomad's TLS certificate IP SANs | string | `127.0.0.1` | 38 | | cni_plugin_version | CNI plugins version | string | `1.3.0` | 39 | 40 | ## Notes 41 | 42 | - `nomad_server` and `nomad_agent` are mutually exclusive and cannot be both 43 | `true`. 44 | - `nomad_bootstrap_expect` must be the same value in all Nomad servers. If the 45 | key is not present in the server, that server instance will not attempt to 46 | bootstrap the cluster. 47 | - An existing Nomad server must be running and reachable at `nomad_server_ip` 48 | when `nomad_agent` is `true`. 49 | - The default value of `nomad_common_name` is `server.global.nomad` or 50 | `client.global.nomad` depending on whether nomad is started in server or client 51 | mode. 52 | -------------------------------------------------------------------------------- /docs/src/ansible/roles/unseal_vault.md: -------------------------------------------------------------------------------- 1 | # Unseal Vault 2 | 3 | >**Work in Progress**: This role is unfinished and untested. 4 | 5 | This role unseals an initialized but sealed Vault server. The unseal key shares 6 | can be provided as: 7 | 8 | - A variable array of keys 9 | - A variable array of file paths to the keys on the remote filesystem 10 | - Secrets from Bitwarden 11 | 12 | ## Variables 13 | 14 | | Variable | Description | Type | Default | 15 | | -------- | ----------- | ---- | ------- | 16 | | unseal_vault_port | Configured Vault port | int | `8200` | 17 | | unseal_vault_addr | Vault HTTP address | string | `http://localhost:8200` | 18 | | unseal_store | Accepts `file, bitwarden` | string | | 19 | | unseal_keys_files | Array of files with unseal keys | list | | 20 | | unseal_keys | Array of key shares | list | | 21 | | unseal_bw_password | Bitwarden password | string | | 22 | | unseal_bw_keys_names | List of Bitwarden secrets storing key shares | list | | 23 | -------------------------------------------------------------------------------- /docs/src/apps/diun.md: -------------------------------------------------------------------------------- 1 | # Diun 2 | 3 | [Diun](https://crazymax.dev/diun/) is used to monitor Docker images for new 4 | updates. 5 | 6 | ## Configuration 7 | 8 | ```yml 9 | watch: 10 | workers: 10 11 | schedule: "0 0 * * 5" 12 | jitter: 30s 13 | firstCheckNotif: false 14 | 15 | providers: 16 | docker: 17 | watchByDefault: false 18 | 19 | notif: 20 | telegram: 21 | # Telegram bot token 22 | token: aabbccdd:11223344 23 | # Telegram chat ID 24 | chatIDs: 25 | - 123456789 26 | templateBody: | 27 | Docker tag {{ .Entry.Image }} which you subscribed to through {{ .Entry.Provider }} provider has been released. 28 | ``` 29 | 30 | ## Watch Images 31 | 32 | To opt in to watching a Docker image, include the `diun.enable` 33 | Docker label: 34 | 35 | ```hcl 36 | config { 37 | labels = { 38 | "diun.enable" = "true" 39 | } 40 | } 41 | ``` 42 | 43 | By default, this will only watch the current tag of the image. If the tag is 44 | `latest`, Diun will send a notification when that tag's checksum changes. 45 | 46 | To allow Diun to watch other tags, include additional labels: 47 | 48 | ```hcl 49 | config { 50 | labels = { 51 | "diun.enable" = "true" 52 | "diun.watch_repo" = "true" 53 | "diun.max_tags" = 3 54 | } 55 | } 56 | ``` 57 | 58 | This will let Diun watch all tags in the Docker repo. It is highly recommended 59 | to set a maximum number of tags that Diun should watch, otherwise Diun will 60 | watch ALL tags, including older ones. 61 | 62 | ### Command Line 63 | 64 | ```bash 65 | # manipulate images in database 66 | $ docker exec diun diun image list 67 | $ docker exec diun diun image inspect --image=[image] 68 | $ docker exec diun diun image remove --image=[image] 69 | 70 | # send test notification 71 | $ docker exec diun diun notif test 72 | ``` 73 | 74 | ## References 75 | - [Diun](https://crazymax.dev/diun/) 76 | -------------------------------------------------------------------------------- /docs/src/apps/index.md: -------------------------------------------------------------------------------- 1 | # Applications 2 | 3 | ## Actual 4 | 5 | - On first startup, you will be prompted to secure the new server with a password. 6 | 7 | ## Calibre Web 8 | 9 | - Point the `books` bind mount to an existing 10 | [calibre](https://github.com/kovidgoyal/calibre) database with the books 11 | metadata. 12 | 13 | ## Gotify 14 | 15 | - Populate `GOTIFY_DEFAULTUSER_NAME` and `GOTIFY_DEFAULTUSER_PASS` with custom 16 | credentials. 17 | 18 | ## Linkding 19 | 20 | - Populate `LD_SUPERUSER_NAME` and `LD_SUPERUSER_PASSWORD` with custom 21 | credentials. 22 | 23 | ## yarr 24 | 25 | - Populate the `AUTH_FILE` environment variable with custom credentials 26 | in the form `username:password`. 27 | -------------------------------------------------------------------------------- /docs/src/apps/registry.md: -------------------------------------------------------------------------------- 1 | # Registry 2 | 3 | ## Basic Auth 4 | Create a password file with `htpasswd`: 5 | 6 | ```bash 7 | $ docker run \ 8 | --entrypoint htpasswd \ 9 | httpd:2 -Bbn foo password > htpasswd 10 | ``` 11 | 12 | ## Usage 13 | 14 | Login to the registry by providing the username and password given in [Basic 15 | Auth](#basic-auth): 16 | 17 | ```bash 18 | $ docker login foo.example.com 19 | ``` 20 | 21 | ## References 22 | 23 | - [Docker Registry](https://docs.docker.com/registry/deploying/) 24 | -------------------------------------------------------------------------------- /docs/src/images/cloud_image.md: -------------------------------------------------------------------------------- 1 | # Cloud Images 2 | 3 | Cloud images are pre-installed disk images that have been customized to run on 4 | cloud platforms. They are shipped with `cloud-init` that simplifies the 5 | installation and provisioning of virtual machines. 6 | 7 | Unlike ISOs and LXC container images, Proxmox's API lacks support for uploading 8 | cloud images directly from a given URL (see 9 | [here](https://bugzilla.proxmox.com/show_bug.cgi?id=4141) and 10 | [here](https://forum.proxmox.com/threads/new-vm-from-cloud-init-image-via-api.111091/)). 11 | Instead, they must be manually downloaded and converted into a VM 12 | template to be available to Proxmox. 13 | 14 | >**Warning**: When cloning the cloud image template with Terraform, 15 | >`qemu-guest-agent` must be installed and `agent=1` must be set. Otherwise, 16 | >Terraform will timeout. As such, it is recommended to create a further 17 | >bootstrapped template with [Packer and Ansible](./packer.md). 18 | 19 | 20 | ## Manual Upload 21 | 22 | 1. Download any cloud image: 23 | 24 | ```bash 25 | $ wget https://cloud.debian.org/images/cloud/bullseye/20230124-1270/debian11-generic-amd64-20230124-1270.qcow2 26 | ``` 27 | 28 | 2. Create a Proxmox VM from the downloaded image: 29 | 30 | ```bash 31 | $ qm create 9000 \ 32 | --name "debian-11-amd64" \ 33 | --net0 "virtio,bridge=vmbr0" \ 34 | --serial0 socket \ 35 | --vga serial0 \ 36 | --scsihw virtio-scsi-pci \ 37 | --scsi0 "local:0,import-from=/path/to/image" \ 38 | --bootdisk scsi0 \ 39 | --boot "order=scsi0" \ 40 | --ide1 "local:cloudinit" \ 41 | --ostype l26 \ 42 | --cores 1 \ 43 | --sockets 1 \ 44 | --memory 512 \ 45 | --agent 1 46 | ``` 47 | 48 | 3. Resize the new VM (if necessary): 49 | 50 | ```bash 51 | $ qm resize 9000 scsi0 5G 52 | ``` 53 | 54 | 4. Convert the VM into a template: 55 | 56 | ```bash 57 | $ qm template 9000 58 | ``` 59 | 60 | ## Script 61 | 62 | A full script of the steps above can be found at 63 | [bin/import-cloud-image](https://github.com/kencx/homelab/blob/master/bin/import-cloud-image). 64 | 65 | ```bash 66 | $ import-cloud-image --help 67 | 68 | Usage: import-cloud-image [--debug|--force] [URL] [FILENAME] 69 | ``` 70 | 71 | ## References 72 | - [Proxmox Wiki - cloud-init Support](https://pve.proxmox.com/wiki/Cloud-Init_Support) 73 | -------------------------------------------------------------------------------- /docs/src/images/index.md: -------------------------------------------------------------------------------- 1 | # Images 2 | -------------------------------------------------------------------------------- /docs/src/index.md: -------------------------------------------------------------------------------- 1 | # Hubble Homelab 2 | 3 | **[Documentation](https://kencx.github.io/homelab)** 4 | 5 | This repository contains infrastructure-as-code for the automated deployment and 6 | configuration, and management of a Hashicorp (Nomad + Consul + Vault) cluster on 7 | Proxmox. 8 | 9 | ## Disclaimer 10 | 11 | This project is in alpha status and subject to 12 | [bugs](https://kencx.github.io/homelab/references/issues) and breaking changes. 13 | 14 | Please do not run any code on your machine without understanding the 15 | provisioning flow, in case of data loss. Some playbooks may perform destructive 16 | actions that are irreversible! 17 | 18 | ## Overview 19 | 20 | This project aims to provision a full Hashicorp cluster in a **semi-automated** 21 | manner. It utilizes Packer, Ansible and Terraform: 22 | 23 | 1. Packer creates base Proxmox VM templates from cloud images and ISOs 24 | 2. Terraform provisions cluster nodes by cloning existing VM templates 25 | 3. Ansible installs and configures Vault, Consul, Nomad on cluster nodes 26 | 27 | It comprises minimally of one server and one client node with no high 28 | availability (HA). The nodes run Vault, Consul and Nomad as a cluster. 29 | 30 | To support HA, the setup can be further expanded to at least three server nodes 31 | and multiple client nodes hosted on a Proxmox cluster, spanning multiple 32 | physical machines. 33 | 34 | ## Features 35 | 36 | - [x] Golden image creation with Packer 37 | - [x] Declarative configuration of Proxmox VMs and Vault with Terraform 38 | - [x] Automated post-provisioning with Ansible 39 | - [x] Nomad container scheduling and orchestration 40 | - [x] Consul service discovery 41 | - [x] Secure node communication via mTLS 42 | - [x] Personal Certificate Authority hosted on Vault 43 | - [x] Secrets management, retrieval and rotation with Vault 44 | - [x] Automated certificate management with Vault and consul-template 45 | - [x] Let's Encrypt certificates on Traefik reverse proxy 46 | 47 | ## Getting Started 48 | 49 | See the [documentation](https://kencx.github.io/homelab/getting_started) for more 50 | information on the concrete steps to configure and provision the cluster. 51 | 52 | ## Folder Structure 53 | 54 | ```bash 55 | . 56 | ├── ansible/ 57 | │ ├── roles 58 | │ ├── playbooks 59 | │ ├── inventory # inventory files 60 | │ └── goss # goss config 61 | ├── bin # custom scripts 62 | ├── packer/ 63 | │ ├── base # VM template from ISO 64 | │ └── base-clone # VM template from existing template 65 | └── terraform/ 66 | ├── cluster # config for cluster 67 | ├── dev # config where I test changes 68 | ├── minio # config for Minio buckets 69 | ├── modules # tf modules 70 | ├── nomad # nomad jobs 71 | ├── postgres # config for Postgres DB users 72 | ├── proxmox # config for Proxmox accounts 73 | └── vault # config for Vault 74 | ``` 75 | 76 | ## Limitations 77 | 78 | - Manual Vault unseal on reboot 79 | - Inter-job dependencies are [not supported](https://github.com/hashicorp/nomad/issues/545) in Nomad 80 | - Vault agent is run as root 81 | 82 | See [issues]() for more information. 83 | 84 | ## Acknowledgements 85 | 86 | - [CGamesPlay/infra](https://github.com/CGamesPlay/infra) 87 | - [assareh/homelab](https://github.com/assareh/home-lab) 88 | - [RealOrangeOne/infrastructure](https://github.com/RealOrangeOne/infrastructure) 89 | -------------------------------------------------------------------------------- /docs/src/prerequisites.md: -------------------------------------------------------------------------------- 1 | # Prerequisites 2 | 3 | ## Hardware Requirements 4 | 5 | This project can be run on any modern x86_64 system that meets the recommended system 6 | requirements of [Proxmox](https://pve.proxmox.com/wiki/System_Requirements). I 7 | recommend mini-SFF workstations such as those from [Project 8 | TinyMiniMicro](https://www.servethehome.com/introducing-project-tinyminimicro-home-lab-revolution/). 9 | Alternatively, you may choose to run the cluster on a different hypervisor, on 10 | ARM64 systems or entirely on bare metal but YMMV. 11 | 12 | My own setup comprises of: 13 | 14 | - 1x Intel HP Elitedesk 800 G2 Mini 15 | - CPU: Intel Core i5-6500T 16 | - RAM: 16GB DDR4 17 | - Storage: 256GB SSD (OS), 3TB HDD 18 | - 1x Raspberry Pi 4B+ 19 | - TP-Link 5 Port Gigabit Switch 20 | 21 | While a separate router and NAS is recommended, I run a virtualized instance of 22 | both within Proxmox itself. 23 | 24 | ### Networking 25 | 26 | The LAN is not restricted to any specific network architecture, but all server 27 | nodes should be reachable by each other, and the controller host via SSH. 28 | 29 | The following are optional, but highly recommended: 30 | 31 | - A local DNS server that 32 | [forwards](https://developer.hashicorp.com/consul/tutorials/networking/dns-forwarding) 33 | `service.consul` queries to Consul for DNS lookup. This project uses 34 | [Coredns](roles/coredns.md). 35 | - A custom domain from any domain registrar, added to Cloudflare as a zone. 36 | 37 | ## Controller Node 38 | 39 | A workstation, controller node or separate host system will be used to run the 40 | required provisioning tools. This system will need to have the following tools 41 | installed: 42 | 43 | - Packer 44 | - Terraform 45 | - Ansible 46 | - Python 3 for various scripts (optional) 47 | 48 | Alternatively, you are free to install the above tools on the same server that 49 | you are provisioning the cluster. 50 | 51 | ## Cluster Requirements 52 | 53 | - An existing Proxmox server that is reachable by the controller node 54 | - (Optional) An offline, private root and intermediate CA. 55 | - A self-signed certificate, private key for TLS encryption of Vault. A default 56 | key-pair is 57 | [generated](https://github.com/hashicorp/vault/blob/main/.release/linux/postinst) 58 | on installation of Vault. 59 | 60 | >**Note**: While Vault can use certificates generated from its own PKI secrets 61 | >engine, a temporary key pair is still required to start up Vault. 62 | 63 | 64 | 65 | -------------------------------------------------------------------------------- /docs/src/provisioning.md: -------------------------------------------------------------------------------- 1 | # Provisioning 2 | 3 | Provisioning requires a minimum of one server and one client node with no high 4 | availability (HA). 5 | 6 | To support HA, the setup can be further expanded to at least three server nodes 7 | and multiple client nodes hosted on a Proxmox cluster, spanning multiple 8 | physical machines. 9 | 10 | 11 | -------------------------------------------------------------------------------- /docs/src/references/TODO.md: -------------------------------------------------------------------------------- 1 | # Roadmap 2 | 3 | - [ ] Run consul-template as non-root user 4 | - [ ] Run vault-agent as non-root user 5 | - [ ] Automated gossip key rotation for Nomad and Consul 6 | - [ ] ACLs for Nomad and Consul 7 | - [ ] `unseal_vault` role 8 | - [ ] Packer `base` builder 9 | - `preseed.cfg` is unreachable by boot command when controller host and Proxmox VM 10 | are on different subnets. 11 | - [ ] Fix configurable cert TTL by Vault 12 | - [ ] Improve robustness of Bitwarden scripts in Vault role 13 | -------------------------------------------------------------------------------- /docs/src/references/issues.md: -------------------------------------------------------------------------------- 1 | # Issues 2 | 3 | This documents known issues that have not been fixed. 4 | 5 | ## Manual Vault Unseal Process 6 | 7 | Vault server must be manually unsealed when host is rebooted. 8 | 9 | ## Unreachable Nomad Jobs on Reboot 10 | 11 | On some occasions, restarting the Nomad client results in some running jobs 12 | being unreachable. The temporary fix is to restart the job (not alloc or task). 13 | 14 | ## ~Vault-agent not reloading TLS certs~ 15 | 16 | ~Vault-agent does not reload its own TLS configuration after the certificate has 17 | been renewed. Although this causes the agent to fail to authenticate with Vault, 18 | it does not constitute a systemd service failure, and the service must be 19 | manually restarted to read the new TLS configuration. Sending a `SIGHUP` sending 20 | is [not supported](https://github.com/hashicorp/vault/issues/20538).~ 21 | 22 | ~Similar issues: [#16266](https://github.com/hashicorp/vault/issues/16266) and 23 | [#18562](https://github.com/hashicorp/vault/issues/18562). A 24 | [fix](https://github.com/hashicorp/vault/pull/19002) is available in Vault 25 | 1.14.~ 26 | 27 | ## Static Goss Files 28 | 29 | The provided goss files in `ansible/goss` contain hardcoded information that can 30 | cause the smoke tests to fail if some Ansible variables are modified: 31 | 32 | - common_user 33 | - common_nfs_dir 34 | - common_packages 35 | 36 | The temporary workaround is to create your own goss files, edit the given goss 37 | files or to simply comment out the smoke test tasks. 38 | 39 | To fix this, goss 40 | [supports](https://github.com/goss-org/goss/blob/master/docs/gossfile.md#templates) 41 | templating to create dynamic goss files. The `ansible_collection.goss` role must 42 | be modified to add support for dynamic tests. 43 | -------------------------------------------------------------------------------- /docs/src/terraform/index.md: -------------------------------------------------------------------------------- 1 | # Terraform 2 | 3 | Terraform is used to provision Proxmox guest VMs by cloning existing templates. 4 | 5 | ## State 6 | 7 | Terraform state can be configured to be stored in a Minio S3 bucket. 8 | 9 | ```hcl 10 | terraform { 11 | backend "s3" { 12 | region = "main" 13 | bucket = "terraform-state" 14 | key = "path/to/terraform.tfstate" 15 | 16 | skip_credentials_validation = true 17 | skip_region_validation = true 18 | skip_metadata_api_check = true 19 | force_path_style = true 20 | } 21 | } 22 | ``` 23 | 24 | Initialize the backend with: 25 | 26 | ```bash 27 | $ terraform init \ 28 | -backend-config="access_key=${TFSTATE_ACCESS_KEY}" \ 29 | -backend-config="secret_key=${TFSTATE_SECRET_KEY}" \ 30 | -backend-config="endpoint=${TFSTATE_ENDPOINT}" 31 | ``` 32 | 33 | >**Note**: When the Minio credentials are passed with the `-backend-config` 34 | >flag, they will still appear in plain text in the `.terraform` subdirectory and 35 | >any plan files. 36 | -------------------------------------------------------------------------------- /docs/src/terraform/vault.md: -------------------------------------------------------------------------------- 1 | # Vault 2 | 3 | This uses the 4 | [Vault](https://registry.terraform.io/providers/hashicorp/vault/latest/docs) 5 | provider to declaratively manage secrets and policies in a running Vault 6 | instance. The Vault provider must be configured appropriately: 7 | 8 | ```tf 9 | provider "vault" { 10 | address = var.vault_address 11 | token = var.vault_token 12 | ca_cert_file = var.vault_ca_cert_file 13 | } 14 | ``` 15 | 16 | ## Workspaces 17 | 18 | Ansible initializes Vault in the [vault role](../roles/vault.md#initialization). 19 | When doing so, any existing Vault resources in the same workspace are 20 | **destroyed permanently**. As such, care should be taken to ensure the 21 | appropriate workspaces are used when running the role on multiple Vault server 22 | instances or environments (eg. dev and prod). 23 | 24 | ## Outputs 25 | 26 | Vault produces the following outputs: 27 | 28 | - Certificate key pair for Ansible certificate authentication to Vault 29 | 30 | ## Variables 31 | 32 | | Variable | Description | Type | Default | 33 | | -------------------- | ------------------------------------ | ------ | ---------- | 34 | | vault_address | Vault address | string | `https://localhost:8200` | 35 | | vault_token | (Root) Vault token for provider | string | | 36 | | vault_ca_cert_file | Local path to Vault CA cert file | string | `./certs/vault_ca.crt` | 37 | | vault_audit_path | Vault audit file path | string | `/vault/logs/vault.log`| 38 | | admin_password | Password for admin user | string | | 39 | | kvuser_password | Password for kv user | string | 40 | | allowed_server_domains | List of allowed_domains for PKI server role | list(string) | `["service.consul", "dc1.consul", "dc1.nomad", "global.nomad"]`| 41 | | allowed_client_domains | List of allowed_domains for PKI client role | list(string) | `["service.consul", "dc1.consul", "dc1.nomad", "global.nomad"]` | 42 | | allowed_auth_domains | List of allowed_domains for PKI auth role | list(string) | `["global.vault"]`| 43 | | allowed_vault_domains | List of allowed_domains for PKI vault role | list(string) | `["vault.service.consul", "global.vault"]`| 44 | | ansible_public_key_path | Local path to store Ansible public key for auth | string | `../../certs/ansible.crt` | 45 | | ansible_private_key_path | Local path to store Ansible private key for auth | string | `../../certs/ansible_key.pem` | 46 | 47 | ## Notes 48 | 49 | - The resources for Postgres database secrets engine are configured separately 50 | in [Postgres](./postgres.md). This is because the Postgres database might not 51 | be up when Vault is being initialized. 52 | - It is not recommended to change the `ansible_*_key_path` variables. Changing 53 | them will heavily affect the Ansible roles when they attempt to login to Vault 54 | with the auth certs. 55 | -------------------------------------------------------------------------------- /packer/base-clone/main.pkr.hcl: -------------------------------------------------------------------------------- 1 | packer { 2 | required_plugins { 3 | proxmox = { 4 | version = ">= 1.1.0" 5 | source = "github.com/hashicorp/proxmox" 6 | } 7 | ansible = { 8 | version = "~> 1" 9 | source = "github.com/hashicorp/ansible" 10 | } 11 | } 12 | } 13 | 14 | locals { 15 | vm_name = "${var.vm_name}-${formatdate("YYYY-MM-DD", timestamp())}" 16 | ssh_public_key = file(var.ssh_public_key_path) 17 | template_desc = "${var.template_description}. Created by Packer on ${formatdate("YYYY-MM-DD", timestamp())}." 18 | ipv4 = "${var.ip_address}/24" 19 | } 20 | 21 | source "proxmox-clone" "base" { 22 | proxmox_url = var.proxmox_url 23 | username = var.proxmox_username 24 | password = var.proxmox_password 25 | node = var.proxmox_node 26 | 27 | clone_vm = var.clone_vm 28 | full_clone = true 29 | task_timeout = "5m" 30 | insecure_skip_tls_verify = true 31 | 32 | qemu_agent = true 33 | cloud_init = true 34 | cloud_init_storage_pool = "volumes" 35 | 36 | vm_id = var.vm_id 37 | vm_name = local.vm_name 38 | template_description = local.template_desc 39 | 40 | os = "l26" 41 | cores = var.cores 42 | sockets = var.sockets 43 | memory = var.memory 44 | scsi_controller = "virtio-scsi-pci" 45 | 46 | network_adapters { 47 | bridge = "vmbr1" 48 | model = "virtio" 49 | } 50 | 51 | vga { 52 | type = "serial0" 53 | } 54 | 55 | ipconfig { 56 | ip = local.ipv4 57 | gateway = var.gateway 58 | } 59 | 60 | ssh_host = var.ip_address 61 | ssh_username = var.ssh_username 62 | ssh_private_key_file = var.ssh_private_key_path 63 | ssh_port = 22 64 | ssh_timeout = "10m" 65 | } 66 | 67 | build { 68 | sources = ["source.proxmox-clone.base"] 69 | 70 | # make user ssh-ready for Ansible 71 | provisioner "shell" { 72 | execute_command = "{{ .Vars }} sudo -S -E sh -eux '{{ .Path }}'" 73 | inline = [ 74 | "HOME_DIR=/home/${var.ssh_username}/.ssh", 75 | "mkdir -m 0700 -p $HOME_DIR", 76 | "echo '${local.ssh_public_key}' >> $HOME_DIR/authorized_keys", 77 | "chown -R ${var.ssh_username}:${var.ssh_username} $HOME_DIR", 78 | "chmod 0600 $HOME_DIR/authorized_keys", 79 | "SUDOERS_FILE=/etc/sudoers.d/80-packer-users", 80 | "echo '${var.ssh_username} ALL=(ALL) NOPASSWD: ALL' > $SUDOERS_FILE", 81 | "chmod 0440 $SUDOERS_FILE", 82 | ] 83 | } 84 | 85 | provisioner "shell" { 86 | execute_command = "{{ .Vars }} sudo -S -E sh -eux '{{ .Path }}'" 87 | inline = [ 88 | # wait for cloud-init to complete 89 | "/usr/bin/cloud-init status --wait", 90 | # install and start qemu-guest-agent 91 | "apt update && apt install -y qemu-guest-agent ", 92 | "systemctl enable qemu-guest-agent.service", 93 | "systemctl start --no-block qemu-guest-agent.service", 94 | ] 95 | expect_disconnect = true 96 | } 97 | 98 | # inventory file is automatically generated by Packer 99 | provisioner "ansible" { 100 | playbook_file = "../../ansible/playbooks/common.yml" 101 | extra_arguments = [ 102 | "--extra-vars", 103 | "user=${var.ssh_username}", 104 | ] 105 | user = var.ssh_username 106 | galaxy_file = "../../requirements.yml" 107 | ansible_env_vars = [ 108 | "ANSIBLE_STDOUT_CALLBACK=yaml", 109 | "ANSIBLE_HOST_KEY_CHECKING=False", 110 | "ANSIBLE_CONFIG=../../ansible/ansible.cfg" 111 | ] 112 | use_proxy = false 113 | pause_before = "5s" 114 | } 115 | } 116 | -------------------------------------------------------------------------------- /packer/base-clone/variables.pkr.hcl: -------------------------------------------------------------------------------- 1 | variable "proxmox_url" { 2 | type = string 3 | } 4 | 5 | variable "proxmox_username" { 6 | type = string 7 | } 8 | 9 | variable "proxmox_password" { 10 | type = string 11 | sensitive = true 12 | } 13 | 14 | variable "proxmox_node" { 15 | type = string 16 | default = "pve" 17 | } 18 | 19 | variable "clone_vm" { 20 | type = string 21 | description = "Name of existing VM template to clone" 22 | } 23 | 24 | variable "vm_id" { 25 | type = number 26 | description = "ID of VM template" 27 | default = 5000 28 | } 29 | 30 | variable "vm_name" { 31 | type = string 32 | description = "Name of VM template" 33 | } 34 | 35 | variable "template_description" { 36 | type = string 37 | description = "Description of VM template" 38 | default = "Debian 11 base image" 39 | } 40 | 41 | variable "cores" { 42 | type = number 43 | description = "Number of cores" 44 | default = 1 45 | } 46 | 47 | variable "sockets" { 48 | type = number 49 | description = "Number of sockets" 50 | default = 1 51 | } 52 | 53 | variable "memory" { 54 | type = number 55 | description = "Memory in MB" 56 | default = 1024 57 | } 58 | 59 | variable "ssh_username" { 60 | type = string 61 | } 62 | 63 | variable "ip_address" { 64 | type = string 65 | description = "Temporary IP address of VM template" 66 | default = "10.10.10.250" 67 | } 68 | 69 | variable "gateway" { 70 | type = string 71 | description = "Gateway of VM template" 72 | default = "10.10.10.1" 73 | } 74 | 75 | variable "ssh_public_key_path" { 76 | type = string 77 | description = "SSH Public Key Path" 78 | } 79 | 80 | variable "ssh_private_key_path" { 81 | type = string 82 | description = "SSH Private Key Path" 83 | } 84 | -------------------------------------------------------------------------------- /packer/base/bin/minimize.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | case "$PACKER_BUILDER_TYPE" in 4 | qemu) exit 0 ;; 5 | esac 6 | 7 | # Whiteout root 8 | count=$(df --sync -kP / | tail -n1 | awk -F ' ' '{print $4}') 9 | count=$(($count-1)) 10 | dd if=/dev/zero of=/tmp/whitespace bs=1M count=$count || echo "dd exit code $? is suppressed"; 11 | rm /tmp/whitespace 12 | 13 | # Whiteout /boot 14 | count=$(df --sync -kP /boot | tail -n1 | awk -F ' ' '{print $4}') 15 | count=$(($count-1)) 16 | dd if=/dev/zero of=/boot/whitespace bs=1M count=$count || echo "dd exit code $? is suppressed"; 17 | rm /boot/whitespace 18 | 19 | set +e 20 | swapuuid="`/sbin/blkid -o value -l -s UUID -t TYPE=swap`"; 21 | case "$?" in 22 | 2|0) ;; 23 | *) exit 1 ;; 24 | esac 25 | set -e 26 | 27 | if [ "x${swapuuid}" != "x" ]; then 28 | # Whiteout the swap partition to reduce box size 29 | # Swap is disabled till reboot 30 | swappart="`readlink -f /dev/disk/by-uuid/$swapuuid`"; 31 | /sbin/swapoff "$swappart" || true; 32 | dd if=/dev/zero of="$swappart" bs=1M || echo "dd exit code $? is suppressed"; 33 | /sbin/mkswap -U "$swapuuid" "$swappart"; 34 | fi 35 | 36 | sync; 37 | -------------------------------------------------------------------------------- /packer/base/bin/vagrant.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -eux 2 | 3 | # turn off reverse dns lookup when ssh-ing 4 | SSHD_CONFIG="/etc/ssh/sshd_config" 5 | # ensure that there is a trailing newline before attempting to concatenate 6 | sed -i -e '$a\' "$SSHD_CONFIG" 7 | 8 | USEDNS="UseDNS no" 9 | if grep -q -E "^[[:space:]]*UseDNS" "$SSHD_CONFIG"; then 10 | sed -i "s/^\s*UseDNS.*/${USEDNS}/" "$SSHD_CONFIG" 11 | else 12 | echo "$USEDNS" >>"$SSHD_CONFIG" 13 | fi 14 | 15 | # disable predictable network interface names and use eth0 16 | sed -i 's/en[[:alnum:]]*/eth0/g' /etc/network/interfaces; 17 | sed -i 's/GRUB_CMDLINE_LINUX="\(.*\)"/GRUB_CMDLINE_LINUX="net.ifnames=0 biosdevname=0 \1"/g' /etc/default/grub; 18 | update-grub; 19 | 20 | # Adding a 2 sec delay to the interface up, to make the dhclient happy 21 | echo "pre-up sleep 2" >> /etc/network/interfaces 22 | -------------------------------------------------------------------------------- /packer/base/http/preseed.cfg: -------------------------------------------------------------------------------- 1 | choose-mirror-bin mirror/http/proxy string 2 | d-i apt-setup/use_mirror boolean true 3 | d-i base-installer/kernel/override-image string linux-server 4 | d-i grub-installer/only_debian boolean true 5 | d-i grub-installer/with_other_os boolean true 6 | # d-i grub-installer/bootdev string default 7 | # d-i hw-detect/load_firmware boolean false 8 | # d-i hw-detect/load_media boolean false 9 | 10 | # Clock Setup 11 | d-i clock-setup/utc boolean true 12 | d-i clock-setup/utc-auto boolean true 13 | d-i time/zone string UTC 14 | # d-i time/zone string Asia/Singapore 15 | 16 | # Locale Setup 17 | # d-i debian-installer/language string en 18 | # d-i debian-installer/country string sg 19 | 20 | # # Keyboard Setup 21 | d-i keymap select us 22 | 23 | # mirror 24 | apt-mirror-setup apt-setup/use_mirror boolean true 25 | d-i mirror/country string manual 26 | d-i mirror/http/directory string /debian 27 | d-i mirror/http/hostname string httpredir.debian.org 28 | d-i mirror/http/proxy string 29 | 30 | # Drive Setup 31 | d-i partman-auto/method string lvm 32 | d-i partman-auto-lvm/guided_size string max 33 | d-i partman-auto/choose_recipe select atomic 34 | d-i partman-lvm/confirm boolean true 35 | d-i partman-lvm/confirm_nooverwrite boolean true 36 | d-i partman-lvm/device_remove_lvm boolean true 37 | d-i partman-md/device_remove_md boolean true 38 | d-i partman/choose_partition select finish 39 | d-i partman/confirm boolean true 40 | d-i partman/confirm_nooverwrite boolean true 41 | d-i partman/confirm_write_new_label boolean true 42 | 43 | # User Setup 44 | d-i passwd/root-login boolean false 45 | d-i passwd/root-password password password 46 | d-i passwd/root-password-again password password 47 | d-i passwd/user-fullname string debian 48 | d-i passwd/username string debian 49 | d-i passwd/user-uid string 1000 50 | d-i passwd/user-password password debian 51 | d-i passwd/user-password-again password debian 52 | d-i passwd/user-default-groups sudo 53 | d-i user-setup/allow-password-weak boolean true 54 | d-i user-setup/encrypt-home boolean false 55 | 56 | # Package Setup 57 | # d-i apt-setup/contrib boolean true 58 | # d-i apt-setup/non-free boolean true 59 | tasksel tasksel/first multiselect standard, ssh-server 60 | d-i pkgsel/install-language-support boolean false 61 | d-i pkgsel/update-policy select none 62 | d-i pkgsel/upgrade select full-upgrade 63 | d-i pkgsel/include string sudo wget curl fuse make qemu-guest-agent cloud-init 64 | # d-i pkgsel/update-policy select unattended-upgrades 65 | # d-i pkgsel/include string unattended-upgrades 66 | popularity-contest popularity-contest/participate boolean false 67 | 68 | # Final Setup 69 | d-i finish-install/reboot_in_progress note 70 | 71 | d-i preseed/late_command string sed -i '/^deb cdrom:/s/^/#/' /target/etc/apt/sources.list 72 | # https://unix.stackexchange.com/questions/409212/preseed-directive-to-skip-another-cd-dvd-scanning 73 | apt-cdrom-setup apt-setup/cdrom/set-first boolean false 74 | -------------------------------------------------------------------------------- /packer/base/http/preseed.pkrtpl: -------------------------------------------------------------------------------- 1 | choose-mirror-bin mirror/http/proxy string 2 | d-i apt-setup/use_mirror boolean true 3 | d-i base-installer/kernel/override-image string linux-server 4 | d-i grub-installer/only_debian boolean true 5 | d-i grub-installer/with_other_os boolean true 6 | # d-i grub-installer/bootdev string default 7 | # d-i hw-detect/load_firmware boolean false 8 | # d-i hw-detect/load_media boolean false 9 | 10 | # Clock Setup 11 | d-i clock-setup/utc boolean true 12 | d-i clock-setup/utc-auto boolean true 13 | d-i time/zone string UTC 14 | # d-i time/zone string Asia/Singapore 15 | 16 | # Locale Setup 17 | # d-i debian-installer/language string en 18 | # d-i debian-installer/country string sg 19 | 20 | # # Keyboard Setup 21 | d-i keymap select us 22 | 23 | # mirror 24 | apt-mirror-setup apt-setup/use_mirror boolean true 25 | d-i mirror/country string manual 26 | d-i mirror/http/directory string /debian 27 | d-i mirror/http/hostname string httpredir.debian.org 28 | d-i mirror/http/proxy string 29 | 30 | # Drive Setup 31 | d-i partman-auto/method string lvm 32 | d-i partman-auto-lvm/guided_size string max 33 | d-i partman-auto/choose_recipe select atomic 34 | d-i partman-lvm/confirm boolean true 35 | d-i partman-lvm/confirm_nooverwrite boolean true 36 | d-i partman-lvm/device_remove_lvm boolean true 37 | d-i partman-md/device_remove_md boolean true 38 | d-i partman/choose_partition select finish 39 | d-i partman/confirm boolean true 40 | d-i partman/confirm_nooverwrite boolean true 41 | d-i partman/confirm_write_new_label boolean true 42 | 43 | # User Setup 44 | d-i passwd/root-login boolean false 45 | d-i passwd/root-password password ${root_password} 46 | d-i passwd/root-password-again password ${root_password} 47 | d-i passwd/user-fullname string ${username} 48 | d-i passwd/username string ${username} 49 | d-i passwd/user-uid string 1000 50 | d-i passwd/user-password password ${password} 51 | d-i passwd/user-password-again password ${password} 52 | d-i passwd/user-default-groups sudo 53 | d-i user-setup/allow-password-weak boolean true 54 | d-i user-setup/encrypt-home boolean false 55 | 56 | # Package Setup 57 | # d-i apt-setup/contrib boolean true 58 | # d-i apt-setup/non-free boolean true 59 | tasksel tasksel/first multiselect standard, ssh-server 60 | d-i pkgsel/install-language-support boolean false 61 | d-i pkgsel/update-policy select none 62 | d-i pkgsel/upgrade select full-upgrade 63 | d-i pkgsel/include string sudo wget curl fuse make qemu-guest-agent cloud-init 64 | # d-i pkgsel/update-policy select unattended-upgrades 65 | # d-i pkgsel/include string unattended-upgrades 66 | popularity-contest popularity-contest/participate boolean false 67 | 68 | # Final Setup 69 | d-i finish-install/reboot_in_progress note 70 | 71 | d-i preseed/late_command string sed -i '/^deb cdrom:/s/^/#/' /target/etc/apt/sources.list 72 | # https://unix.stackexchange.com/questions/409212/preseed-directive-to-skip-another-cd-dvd-scanning 73 | apt-cdrom-setup apt-setup/cdrom/set-first boolean false 74 | -------------------------------------------------------------------------------- /packer/base/variables.pkr.hcl: -------------------------------------------------------------------------------- 1 | variable "proxmox_url" { 2 | type = string 3 | } 4 | 5 | variable "proxmox_username" { 6 | type = string 7 | } 8 | 9 | variable "proxmox_password" { 10 | type = string 11 | sensitive = true 12 | } 13 | 14 | variable "proxmox_node" { 15 | type = string 16 | default = "pve" 17 | } 18 | 19 | variable "iso_url" { 20 | type = string 21 | description = "ISO file URL" 22 | } 23 | 24 | variable "iso_checksum" { 25 | type = string 26 | description = "ISO file checksum" 27 | } 28 | 29 | variable "vm_id" { 30 | type = number 31 | default = 9000 32 | description = "ID of temp VM during build process" 33 | } 34 | 35 | variable "vm_name" { 36 | type = string 37 | description = "VM name" 38 | default = "base" 39 | } 40 | 41 | variable "cores" { 42 | type = number 43 | description = "Number of cores" 44 | default = 1 45 | } 46 | 47 | variable "sockets" { 48 | type = number 49 | description = "Number of sockets" 50 | default = 1 51 | } 52 | 53 | variable "memory" { 54 | type = number 55 | description = "Memory in MB" 56 | default = 1024 57 | } 58 | 59 | variable "root_password" { 60 | type = string 61 | description = "Root password" 62 | default = "vagrant" 63 | } 64 | 65 | variable "ssh_username" { 66 | type = string 67 | description = "SSH username" 68 | default = "debian" 69 | } 70 | 71 | variable "ssh_password" { 72 | type = string 73 | description = "SSH password" 74 | default = "vagrant" 75 | } 76 | 77 | variable "ssh_public_key_path" { 78 | type = string 79 | description = "SSH Public Key Path" 80 | default = "~/.ssh/vagrant.pub" 81 | } 82 | 83 | variable "ssh_private_key_path" { 84 | type = string 85 | description = "SSH Private Key Path" 86 | default = "~/.ssh/vagrant" 87 | } 88 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | cfgv==3.3.1 2 | distlib==0.3.7 3 | filelock==3.12.2 4 | identify==2.5.26 5 | lark==1.1.7 6 | nodeenv==1.8.0 7 | platformdirs==3.10.0 8 | pre-commit==3.3.3 9 | python-hcl2==4.3.2 10 | PyYAML==6.0.1 11 | virtualenv==20.24.2 12 | -------------------------------------------------------------------------------- /requirements.yml: -------------------------------------------------------------------------------- 1 | roles: 2 | - name: geerlingguy.docker 3 | version: 7.0.1 4 | collections: 5 | - name: https://github.com/kencx/ansible-collection.git 6 | type: git 7 | version: HEAD 8 | - name: community.docker 9 | -------------------------------------------------------------------------------- /terraform/cluster/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/bpg/proxmox" { 5 | version = "0.36.0" 6 | constraints = ">= 0.36.0" 7 | hashes = [ 8 | "h1:z/67CMYHJULjF9qpaHdVh22IJnykwAH1r6ZauPhHr9c=", 9 | "zh:04781bb2d728725cd453b7c76b712b3e0b88e7dd94241a9afe9ef397d9d663c7", 10 | "zh:270a3d2a5b515b8b5b10406edd704c26ae82faee7b9f3e9c0f6e4e2a52a11efd", 11 | "zh:28b9a0c507388d1004e793c5d6b0dff88c737ee8c9c725f7e405410004178c68", 12 | "zh:38185b751f8314abbb836ac2e51b1c43b1c669c3d0145dc81e35c09d3d1e10eb", 13 | "zh:4bd20448c58d89bca52bc8c39950651981d0176103900dc752a07e4ff050f492", 14 | "zh:4c65c39a33120a068b187ad7c411a95aa44cee4c39a2278325450060b09f6a7f", 15 | "zh:659d675485bfcd99c5a9023b4815e8bd2e3cb9d5387d89161d5f57c06bd71c8d", 16 | "zh:7adb39250bffb095c3770b0ed0ec38fb9b9ec529792580671e65af787ed94ccd", 17 | "zh:ae9d1b7302edd582d0ec4d2328e187706c185c0031b2c4df0724be60ede76019", 18 | "zh:b47cdaed4cc6080a444d5b831eb6ec13a6b8f74364c9b8e371420b97d29eb9bc", 19 | "zh:ba5e29b31d6a4f61984eae1bc1f7d1a4c2df84596dcf04ac91d1c562b17500a0", 20 | "zh:cb8703bcf8b0bcda56e65a3e84b6edc7a1cd943f639c93b893d532906fdabf11", 21 | "zh:da5718d73a8a05df234b26bf86429a9953008686dd513b028d58d23d46af3826", 22 | "zh:fa399ff24db7287f2cde373e3bbb8df9bb97bf4d6abc8d882f66a2bf3a954250", 23 | ] 24 | } 25 | 26 | provider "registry.terraform.io/hashicorp/local" { 27 | version = "2.4.0" 28 | hashes = [ 29 | "h1:R97FTYETo88sT2VHfMgkPU3lzCsZLunPftjSI5vfKe8=", 30 | "zh:53604cd29cb92538668fe09565c739358dc53ca56f9f11312b9d7de81e48fab9", 31 | "zh:66a46e9c508716a1c98efbf793092f03d50049fa4a83cd6b2251e9a06aca2acf", 32 | "zh:70a6f6a852dd83768d0778ce9817d81d4b3f073fab8fa570bff92dcb0824f732", 33 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", 34 | "zh:82a803f2f484c8b766e2e9c32343e9c89b91997b9f8d2697f9f3837f62926b35", 35 | "zh:9708a4e40d6cc4b8afd1352e5186e6e1502f6ae599867c120967aebe9d90ed04", 36 | "zh:973f65ce0d67c585f4ec250c1e634c9b22d9c4288b484ee2a871d7fa1e317406", 37 | "zh:c8fa0f98f9316e4cfef082aa9b785ba16e36ff754d6aba8b456dab9500e671c6", 38 | "zh:cfa5342a5f5188b20db246c73ac823918c189468e1382cb3c48a9c0c08fc5bf7", 39 | "zh:e0e2b477c7e899c63b06b38cd8684a893d834d6d0b5e9b033cedc06dd7ffe9e2", 40 | "zh:f62d7d05ea1ee566f732505200ab38d94315a4add27947a60afa29860822d3fc", 41 | "zh:fa7ce69dde358e172bd719014ad637634bbdabc49363104f4fca759b4b73f2ce", 42 | ] 43 | } 44 | -------------------------------------------------------------------------------- /terraform/cluster/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | proxmox = { 4 | source = "bpg/proxmox" 5 | version = ">=0.36.0" 6 | } 7 | } 8 | } 9 | 10 | provider "proxmox" { 11 | endpoint = var.proxmox_ip 12 | api_token = var.proxmox_api_token 13 | insecure = true 14 | ssh { 15 | agent = true 16 | } 17 | } 18 | 19 | module "server" { 20 | source = "../modules/vm" 21 | for_each = { 22 | for idx, vm in var.servers : idx + 1 => vm 23 | } 24 | 25 | hostname = "server-${each.key}" 26 | vmid = each.value.id 27 | tags = var.tags 28 | target_node = var.target_node 29 | 30 | clone_template_id = var.template_id 31 | onboot = var.onboot 32 | started = var.started 33 | 34 | cores = each.value.cores 35 | sockets = each.value.sockets 36 | memory = each.value.memory 37 | 38 | disk_size = each.value.disk_size 39 | disk_datastore = var.disk_datastore 40 | 41 | ip_address = each.value.ip_address 42 | ip_gateway = var.ip_gateway 43 | 44 | ssh_user = var.ssh_user 45 | ssh_public_keys = [file(var.ssh_public_key_file)] 46 | } 47 | 48 | module "client" { 49 | source = "../modules/vm" 50 | for_each = { 51 | for idx, vm in var.clients : idx + 1 => vm 52 | } 53 | 54 | hostname = "client-${each.key}" 55 | vmid = each.value.id 56 | tags = var.tags 57 | target_node = var.target_node 58 | 59 | clone_template_id = var.template_id 60 | onboot = var.onboot 61 | started = var.started 62 | 63 | cores = each.value.cores 64 | sockets = each.value.sockets 65 | memory = each.value.memory 66 | 67 | disk_size = each.value.disk_size 68 | disk_datastore = var.disk_datastore 69 | 70 | ip_address = each.value.ip_address 71 | ip_gateway = var.ip_gateway 72 | 73 | ssh_user = var.ssh_user 74 | ssh_public_keys = [file(var.ssh_public_key_file)] 75 | } 76 | 77 | resource "local_file" "tf_ansible_inventory_file" { 78 | content = <<-EOF 79 | [server] 80 | %{for vm in var.servers~} 81 | ${split("/", vm.ip_address)[0]} 82 | %{endfor~} 83 | 84 | [client] 85 | %{for vm in var.clients~} 86 | ${split("/", vm.ip_address)[0]} 87 | %{endfor~} 88 | 89 | [prod] 90 | %{for vm in var.servers~} 91 | ${split("/", vm.ip_address)[0]} 92 | %{endfor~} 93 | %{for vm in var.clients~} 94 | ${split("/", vm.ip_address)[0]} 95 | %{endfor~} 96 | EOF 97 | filename = "${path.module}/tf_ansible_inventory" 98 | file_permission = "0644" 99 | } 100 | -------------------------------------------------------------------------------- /terraform/cluster/variables.tf: -------------------------------------------------------------------------------- 1 | variable "proxmox_ip" { 2 | type = string 3 | description = "IP of Proxmox server (mandatory)" 4 | } 5 | 6 | variable "proxmox_api_token" { 7 | type = string 8 | sensitive = true 9 | } 10 | 11 | variable "target_node" { 12 | type = string 13 | description = "" 14 | default = "pve" 15 | } 16 | 17 | variable "tags" { 18 | type = list(string) 19 | description = "VM tags" 20 | default = ["prod"] 21 | } 22 | 23 | variable "template_id" { 24 | type = number 25 | description = "Template ID to clone" 26 | } 27 | 28 | variable "onboot" { 29 | type = bool 30 | description = "Start VM on boot" 31 | default = false 32 | } 33 | 34 | variable "started" { 35 | type = bool 36 | description = "Start VM on creation" 37 | default = true 38 | } 39 | 40 | variable "servers" { 41 | type = list(object({ 42 | name = string 43 | id = number 44 | cores = number 45 | sockets = number 46 | memory = number 47 | disk_size = number 48 | ip_address = string 49 | })) 50 | default = [] 51 | } 52 | 53 | variable "clients" { 54 | type = list(object({ 55 | name = string 56 | id = number 57 | cores = number 58 | sockets = number 59 | memory = number 60 | disk_size = number 61 | ip_address = string 62 | })) 63 | default = [] 64 | } 65 | 66 | variable "disk_datastore" { 67 | type = string 68 | description = "Datastore on which to store disk" 69 | default = "volumes" 70 | } 71 | 72 | # variable "server_ip_address" { 73 | # type = list(string) 74 | # description = "List of server IPv4 address in CIDR notation (eg. 10.10.10.2/24)" 75 | # validation { 76 | # condition = alltrue([ 77 | # for i in var.server_ip_address : can(cidrnetmask(i)) 78 | # ]) 79 | # error_message = "Must be a valid IPv4 address with subnet mask" 80 | # } 81 | # } 82 | # 83 | # variable "client_ip_address" { 84 | # type = list(string) 85 | # description = "List of client IPv4 address in CIDR notation (eg. 10.10.10.2/24)" 86 | # validation { 87 | # condition = alltrue([ 88 | # for i in var.client_ip_address : can(cidrnetmask(i)) 89 | # ]) 90 | # error_message = "Must be a valid IPv4 address with subnet mask" 91 | # } 92 | # } 93 | 94 | variable "control_ip_address" { 95 | type = string 96 | description = "Control IPv4 address in CIDR notation (eg. 10.10.10.2/24)" 97 | validation { 98 | condition = can(cidrnetmask(var.control_ip_address)) 99 | error_message = "Must be a valid IPv4 address with subnet mask" 100 | } 101 | } 102 | 103 | variable "ip_gateway" { 104 | type = string 105 | description = "IP gateway address (eg. 10.10.10.1)" 106 | validation { 107 | condition = can(cidrnetmask("${var.ip_gateway}/24")) 108 | error_message = "Must be a valid IPv4 address" 109 | } 110 | } 111 | 112 | variable "ssh_user" { 113 | type = string 114 | description = "SSH user" 115 | } 116 | 117 | variable "ssh_public_key_file" { 118 | type = string 119 | description = "Public SSH key file" 120 | } 121 | -------------------------------------------------------------------------------- /terraform/modules/database/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/cyrilgdn/postgresql" { 5 | version = "1.20.0" 6 | constraints = ">= 1.19.0" 7 | hashes = [ 8 | "h1:MMtr2K+jD4aaD98IEAr9OkEL3Zn+U7jICBW+J1Oqtz8=", 9 | "zh:19c62675a98dfd535fc502a08819f4baee0d3367282b90ae07ccb09bb02d4c8b", 10 | "zh:49bb53da13fe4f97d3735a977a833ddfc205bf05fcea6ce5de1978d99923d2b7", 11 | "zh:65b423afa8eba3ea9cb3375f8f288ef9496d71cd4c9ba248b1ec55d2f8d6ad64", 12 | "zh:841c0418ed751d33c5a98750bf72dc06bd53bdf1bb7c739a39995b7002aa4ea9", 13 | "zh:87840476c5b8b04c3a8398c756ec96a7d63f6aa9e3f657fb45d79135aec0c7da", 14 | "zh:98fe7973afe8a95f86428fd5a47f96df5f1984eac9907b4665a65190b7008f59", 15 | "zh:a82759e5701b709cdc23eff81b850a2979b5fcedf22900cd10ca484d2ccda496", 16 | "zh:ad3a9288149cf4f534baf4278bd9ca50e254373ecd151d870f2eba9aa184b441", 17 | "zh:c938d8c959efaf63f38393a9ee9b9fe28e154523c5c25adececbed26b8f38043", 18 | "zh:cf81079e67e457a201aafea9f4e8b96987549a15c0f0bb284680cca8f069a679", 19 | "zh:dca79738c6a5cff0668b0f0fec5e7bceceadf6ad27b116c42907f1b942103d69", 20 | "zh:e4922cbe888c949b1bfe8d459869cc3fd17c2957dcde265a30209ae25635a3c2", 21 | "zh:e56c16a8dfb649f731e5cd41b8cc12ed970da76a0c6794c50bc70b89633f4d5b", 22 | "zh:ec44eafdaf7433cf8f4ee366ca89bfca1dd3e18e23bfcf61d35e231377201a77", 23 | ] 24 | } 25 | 26 | provider "registry.terraform.io/hashicorp/vault" { 27 | version = "3.18.0" 28 | constraints = ">= 3.18.0" 29 | hashes = [ 30 | "h1:e10+o2ABDgkhcg8pw+odmlrrtzl4PfAyevOjazAlRZ4=", 31 | "zh:0e898f977d2dbd0b2ffeb25520f6f3aaa0a078f654bf312dc12fefc327313204", 32 | "zh:11899fb3e6d2ce6215047cc37c4e1cbdc01334242103600d79009bcdda2cccd9", 33 | "zh:19c57f433f014f6275d1461dd190c50b1fbd2b1217718de6d2eb64e6a9bcea5c", 34 | "zh:4e2aa164ffd13080dc10d5de4256b684108126e1082c2613854e26a398831389", 35 | "zh:77abbf9d90d085677194305cf192f7890408881bbedc77e97c5146cef3e27a7c", 36 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", 37 | "zh:790758438efe4389fdb0cabfb6f5118dad13869946665a72ba79a2f1102ff153", 38 | "zh:b9f3f1ba160a41545c4a8cb3a0d91fb37e194cfd6879ac7f358321851242ff78", 39 | "zh:bf19d8380e93a8a6ea8735cc015d4d04c6c588b233bb7cbb2bc3c277b7973f9a", 40 | "zh:de096c2afc87052e4848661ae5fc87528468399ae1a3ef242f1d6738504c79fc", 41 | "zh:eb4dce6a7bc10fa836cd379161bb5fad698d3288099e6ce0fa92ca3183acf242", 42 | "zh:f1c150dc13d6597ee08b83904fdd97a6702a106d3f524d60f048f2bd5c492f51", 43 | ] 44 | } 45 | -------------------------------------------------------------------------------- /terraform/modules/database/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | vault = { 4 | source = "hashicorp/vault" 5 | version = ">= 3.18.0" 6 | } 7 | postgresql = { 8 | source = "cyrilgdn/postgresql" 9 | version = ">= 1.19.0" 10 | } 11 | } 12 | } 13 | 14 | resource "postgresql_role" "role" { 15 | name = var.postgres_role_name 16 | password = var.postgres_role_password 17 | login = true 18 | create_database = true 19 | } 20 | 21 | resource "postgresql_database" "database" { 22 | name = var.postgres_role_name 23 | owner = var.postgres_role_name 24 | depends_on = [postgresql_role.role] 25 | } 26 | 27 | resource "vault_database_secret_backend_static_role" "static_role" { 28 | backend = var.postgres_vault_backend 29 | name = var.postgres_role_name 30 | username = var.postgres_role_name 31 | db_name = var.postgres_db_name 32 | 33 | rotation_period = var.postgres_static_role_rotation_period 34 | rotation_statements = [ 35 | "ALTER USER \"{{name}}\" WITH PASSWORD '{{password}}';" 36 | ] 37 | depends_on = [postgresql_database.database] 38 | } 39 | 40 | locals { 41 | policy_path = "${var.postgres_vault_backend}/static-creds/${var.postgres_role_name}" 42 | } 43 | 44 | data "vault_policy_document" "policy_document" { 45 | rule { 46 | path = local.policy_path 47 | capabilities = ["read"] 48 | } 49 | } 50 | 51 | resource "vault_policy" "policy" { 52 | name = var.postgres_role_name 53 | policy = data.vault_policy_document.policy_document.hcl 54 | } 55 | -------------------------------------------------------------------------------- /terraform/modules/database/variables.tf: -------------------------------------------------------------------------------- 1 | variable "postgres_vault_backend" { 2 | type = string 3 | description = "Mount for Postgres database secrets engine." 4 | default = "postgres" 5 | } 6 | 7 | variable "postgres_db_name" { 8 | type = string 9 | description = "Unique name of Postgres database connection to use for static role." 10 | default = "postgres" 11 | } 12 | 13 | variable "postgres_role_name" { 14 | type = string 15 | description = "Postgres role name." 16 | } 17 | 18 | variable "postgres_role_password" { 19 | type = string 20 | sensitive = true 21 | description = "Temporary password for Postgres role. This will be rotated and managed by Vault." 22 | } 23 | 24 | variable "postgres_static_role_rotation_period" { 25 | type = number 26 | description = "Postgres role password rotation period (in s)." 27 | default = 86400 28 | } 29 | -------------------------------------------------------------------------------- /terraform/modules/vm/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/bpg/proxmox" { 5 | version = "0.36.0" 6 | constraints = ">= 0.36.0" 7 | hashes = [ 8 | "h1:z/67CMYHJULjF9qpaHdVh22IJnykwAH1r6ZauPhHr9c=", 9 | "zh:04781bb2d728725cd453b7c76b712b3e0b88e7dd94241a9afe9ef397d9d663c7", 10 | "zh:270a3d2a5b515b8b5b10406edd704c26ae82faee7b9f3e9c0f6e4e2a52a11efd", 11 | "zh:28b9a0c507388d1004e793c5d6b0dff88c737ee8c9c725f7e405410004178c68", 12 | "zh:38185b751f8314abbb836ac2e51b1c43b1c669c3d0145dc81e35c09d3d1e10eb", 13 | "zh:4bd20448c58d89bca52bc8c39950651981d0176103900dc752a07e4ff050f492", 14 | "zh:4c65c39a33120a068b187ad7c411a95aa44cee4c39a2278325450060b09f6a7f", 15 | "zh:659d675485bfcd99c5a9023b4815e8bd2e3cb9d5387d89161d5f57c06bd71c8d", 16 | "zh:7adb39250bffb095c3770b0ed0ec38fb9b9ec529792580671e65af787ed94ccd", 17 | "zh:ae9d1b7302edd582d0ec4d2328e187706c185c0031b2c4df0724be60ede76019", 18 | "zh:b47cdaed4cc6080a444d5b831eb6ec13a6b8f74364c9b8e371420b97d29eb9bc", 19 | "zh:ba5e29b31d6a4f61984eae1bc1f7d1a4c2df84596dcf04ac91d1c562b17500a0", 20 | "zh:cb8703bcf8b0bcda56e65a3e84b6edc7a1cd943f639c93b893d532906fdabf11", 21 | "zh:da5718d73a8a05df234b26bf86429a9953008686dd513b028d58d23d46af3826", 22 | "zh:fa399ff24db7287f2cde373e3bbb8df9bb97bf4d6abc8d882f66a2bf3a954250", 23 | ] 24 | } 25 | -------------------------------------------------------------------------------- /terraform/modules/vm/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | proxmox = { 4 | source = "bpg/proxmox" 5 | version = ">=0.36.0" 6 | } 7 | } 8 | } 9 | 10 | resource "proxmox_virtual_environment_vm" "vm" { 11 | name = var.hostname 12 | vm_id = var.vmid 13 | description = "Managed by Terraform" 14 | tags = var.tags 15 | 16 | node_name = var.target_node 17 | on_boot = var.onboot 18 | started = var.started 19 | 20 | agent { 21 | type = "virtio" 22 | # ensure qemu_guest_agent is installed in template/img/vm 23 | enabled = true 24 | } 25 | 26 | tablet_device = false 27 | 28 | cpu { 29 | cores = var.cores 30 | sockets = var.sockets 31 | } 32 | 33 | disk { 34 | interface = "scsi0" 35 | datastore_id = var.disk_datastore 36 | size = var.disk_size 37 | } 38 | 39 | memory { 40 | dedicated = var.memory 41 | } 42 | 43 | network_device { 44 | bridge = "vmbr1" 45 | } 46 | 47 | operating_system { 48 | type = "l26" 49 | } 50 | 51 | serial_device {} 52 | 53 | clone { 54 | datastore_id = var.disk_datastore 55 | vm_id = var.clone_template_id 56 | retries = 3 57 | } 58 | 59 | vga { 60 | enabled = true 61 | memory = 16 62 | type = "serial0" 63 | } 64 | 65 | initialization { 66 | datastore_id = var.disk_datastore 67 | interface = "ide0" 68 | 69 | ip_config { 70 | ipv4 { 71 | address = var.ip_address 72 | gateway = var.ip_gateway 73 | } 74 | } 75 | 76 | user_account { 77 | username = var.ssh_user 78 | keys = var.ssh_public_keys 79 | } 80 | } 81 | 82 | lifecycle { 83 | ignore_changes = [ 84 | # temp fix for SSH keys 85 | # https://github.com/bpg/terraform-provider-proxmox/issues/373 86 | initialization[0].user_account 87 | ] 88 | } 89 | } 90 | -------------------------------------------------------------------------------- /terraform/modules/vm/variables.tf: -------------------------------------------------------------------------------- 1 | variable "target_node" { 2 | type = string 3 | description = "" 4 | default = "pve" 5 | } 6 | 7 | variable "hostname" { 8 | type = string 9 | description = "Hostname of VM (defaults to base)" 10 | default = "base" 11 | } 12 | 13 | variable "vmid" { 14 | type = number 15 | description = "VM ID (defaults to 400)" 16 | default = 400 17 | } 18 | 19 | variable "tags" { 20 | type = list(string) 21 | description = "VM tags" 22 | default = [] 23 | } 24 | 25 | variable "clone_template_id" { 26 | type = string 27 | description = "VM Template ID to clone" 28 | } 29 | 30 | variable "onboot" { 31 | type = bool 32 | description = "Start VM on boot" 33 | default = false 34 | } 35 | 36 | variable "started" { 37 | type = bool 38 | description = "Start VM on creation" 39 | default = true 40 | } 41 | 42 | variable "cores" { 43 | type = number 44 | description = "Number of cores" 45 | default = 1 46 | } 47 | 48 | variable "sockets" { 49 | type = number 50 | description = "Number of sockets" 51 | default = 1 52 | } 53 | 54 | variable "memory" { 55 | type = number 56 | description = "Memory in MB" 57 | default = 1024 58 | } 59 | 60 | variable "disk_size" { 61 | type = number 62 | description = < role.rotation_period 34 | } 35 | } 36 | 37 | resource "vault_mount" "db" { 38 | path = "postgres" 39 | type = "database" 40 | } 41 | 42 | resource "vault_database_secret_backend_connection" "postgres" { 43 | backend = vault_mount.db.path 44 | name = "postgres" 45 | allowed_roles = ["*"] 46 | 47 | postgresql { 48 | connection_url = local.connection_url 49 | } 50 | } 51 | 52 | # NOTE: Remember to add the created policy to 53 | # vault_token_auth_backend_role.nomad_cluster 54 | module "role" { 55 | source = "../modules/database" 56 | for_each = local.roles 57 | 58 | postgres_vault_backend = vault_mount.db.path 59 | postgres_db_name = vault_database_secret_backend_connection.postgres.name 60 | 61 | postgres_role_name = each.key 62 | postgres_role_password = each.key 63 | postgres_static_role_rotation_period = each.value 64 | } 65 | -------------------------------------------------------------------------------- /terraform/postgres/variables.tf: -------------------------------------------------------------------------------- 1 | variable "vault_address" { 2 | type = string 3 | description = "Vault address" 4 | default = "https://localhost:8200" 5 | } 6 | 7 | variable "vault_token" { 8 | type = string 9 | sensitive = true 10 | description = "Vault token for provider" 11 | } 12 | 13 | variable "vault_ca_cert_file" { 14 | type = string 15 | description = "Local path to Vault CA cert file" 16 | default = "../../certs/vault-ca.crt" 17 | } 18 | 19 | variable "postgres_username" { 20 | type = string 21 | description = "Postgres root username" 22 | default = "postgres" 23 | } 24 | 25 | variable "postgres_password" { 26 | type = string 27 | sensitive = true 28 | description = "Postgres root password" 29 | default = "postgres" 30 | } 31 | 32 | variable "postgres_database" { 33 | type = string 34 | description = "Postgres database" 35 | default = "postgres" 36 | } 37 | 38 | variable "postgres_host" { 39 | type = string 40 | description = "Postgres host" 41 | default = "localhost" 42 | } 43 | 44 | variable "postgres_port" { 45 | type = string 46 | description = "Postgres port" 47 | default = "5432" 48 | } 49 | 50 | variable "postgres_roles" { 51 | type = list(object({ 52 | name = string 53 | rotation_period = optional(number, 86400) 54 | })) 55 | description = "List of roles with name and rotation period in sec (default 86400s)." 56 | validation { 57 | condition = alltrue([for r in var.postgres_roles : r.rotation_period > 0]) 58 | error_message = "Rotation period cannot be <= 0" 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /terraform/vault/.gitignore: -------------------------------------------------------------------------------- 1 | certs/ 2 | -------------------------------------------------------------------------------- /terraform/vault/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/local" { 5 | version = "2.4.0" 6 | hashes = [ 7 | "h1:R97FTYETo88sT2VHfMgkPU3lzCsZLunPftjSI5vfKe8=", 8 | "zh:53604cd29cb92538668fe09565c739358dc53ca56f9f11312b9d7de81e48fab9", 9 | "zh:66a46e9c508716a1c98efbf793092f03d50049fa4a83cd6b2251e9a06aca2acf", 10 | "zh:70a6f6a852dd83768d0778ce9817d81d4b3f073fab8fa570bff92dcb0824f732", 11 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", 12 | "zh:82a803f2f484c8b766e2e9c32343e9c89b91997b9f8d2697f9f3837f62926b35", 13 | "zh:9708a4e40d6cc4b8afd1352e5186e6e1502f6ae599867c120967aebe9d90ed04", 14 | "zh:973f65ce0d67c585f4ec250c1e634c9b22d9c4288b484ee2a871d7fa1e317406", 15 | "zh:c8fa0f98f9316e4cfef082aa9b785ba16e36ff754d6aba8b456dab9500e671c6", 16 | "zh:cfa5342a5f5188b20db246c73ac823918c189468e1382cb3c48a9c0c08fc5bf7", 17 | "zh:e0e2b477c7e899c63b06b38cd8684a893d834d6d0b5e9b033cedc06dd7ffe9e2", 18 | "zh:f62d7d05ea1ee566f732505200ab38d94315a4add27947a60afa29860822d3fc", 19 | "zh:fa7ce69dde358e172bd719014ad637634bbdabc49363104f4fca759b4b73f2ce", 20 | ] 21 | } 22 | 23 | provider "registry.terraform.io/hashicorp/vault" { 24 | version = "3.18.0" 25 | constraints = ">= 3.11.0, >= 3.18.0" 26 | hashes = [ 27 | "h1:e10+o2ABDgkhcg8pw+odmlrrtzl4PfAyevOjazAlRZ4=", 28 | "zh:0e898f977d2dbd0b2ffeb25520f6f3aaa0a078f654bf312dc12fefc327313204", 29 | "zh:11899fb3e6d2ce6215047cc37c4e1cbdc01334242103600d79009bcdda2cccd9", 30 | "zh:19c57f433f014f6275d1461dd190c50b1fbd2b1217718de6d2eb64e6a9bcea5c", 31 | "zh:4e2aa164ffd13080dc10d5de4256b684108126e1082c2613854e26a398831389", 32 | "zh:77abbf9d90d085677194305cf192f7890408881bbedc77e97c5146cef3e27a7c", 33 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", 34 | "zh:790758438efe4389fdb0cabfb6f5118dad13869946665a72ba79a2f1102ff153", 35 | "zh:b9f3f1ba160a41545c4a8cb3a0d91fb37e194cfd6879ac7f358321851242ff78", 36 | "zh:bf19d8380e93a8a6ea8735cc015d4d04c6c588b233bb7cbb2bc3c277b7973f9a", 37 | "zh:de096c2afc87052e4848661ae5fc87528468399ae1a3ef242f1d6738504c79fc", 38 | "zh:eb4dce6a7bc10fa836cd379161bb5fad698d3288099e6ce0fa92ca3183acf242", 39 | "zh:f1c150dc13d6597ee08b83904fdd97a6702a106d3f524d60f048f2bd5c492f51", 40 | ] 41 | } 42 | -------------------------------------------------------------------------------- /terraform/vault/kv.tf: -------------------------------------------------------------------------------- 1 | resource "vault_mount" "kvv2" { 2 | path = "kvv2" 3 | type = "kv" 4 | description = "KV version 2 secrets engine" 5 | options = { 6 | version = "2" 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /terraform/vault/pki.tf: -------------------------------------------------------------------------------- 1 | resource "vault_mount" "pki" { 2 | path = "pki" 3 | type = "pki" 4 | description = "Root PKI" 5 | max_lease_ttl_seconds = 315360000 # 10 years 6 | } 7 | 8 | resource "vault_pki_secret_backend_config_urls" "root" { 9 | backend = vault_mount.pki.path 10 | issuing_certificates = [ 11 | "http://127.0.0.1:8200/v1/pki/ca", 12 | ] 13 | crl_distribution_points = [ 14 | "http://127.0.0.1:8200/v1/pki/crl", 15 | ] 16 | } 17 | 18 | resource "vault_pki_secret_backend_root_cert" "root" { 19 | depends_on = [vault_mount.pki] 20 | backend = vault_mount.pki.path 21 | type = "internal" 22 | common_name = "Vault PKI Root CA" 23 | ttl = "87600h" 24 | } 25 | 26 | resource "vault_mount" "pki_int" { 27 | path = "pki_int" 28 | type = vault_mount.pki.type 29 | description = "Intermediate PKI" 30 | max_lease_ttl_seconds = 315360000 31 | } 32 | 33 | # intermediate CSR 34 | resource "vault_pki_secret_backend_intermediate_cert_request" "intermediate" { 35 | depends_on = [vault_mount.pki, vault_mount.pki_int] 36 | backend = vault_mount.pki_int.path 37 | type = "internal" 38 | common_name = "Vault PKI Intermediate CA" 39 | } 40 | 41 | # intermediate cert 42 | resource "vault_pki_secret_backend_root_sign_intermediate" "root" { 43 | depends_on = [vault_pki_secret_backend_intermediate_cert_request.intermediate] 44 | backend = vault_mount.pki.path 45 | csr = vault_pki_secret_backend_intermediate_cert_request.intermediate.csr 46 | common_name = "Intermediate CA" 47 | ttl = "43800h" 48 | } 49 | 50 | # import intermediate cert to Vault 51 | resource "vault_pki_secret_backend_intermediate_set_signed" "intermediate" { 52 | backend = vault_mount.pki_int.path 53 | certificate = vault_pki_secret_backend_root_sign_intermediate.root.certificate 54 | } 55 | -------------------------------------------------------------------------------- /terraform/vault/policies.tf: -------------------------------------------------------------------------------- 1 | data "vault_policy_document" "update_userpass" { 2 | rule { 3 | path = "auth/userpass/users/{{ identity.entity.aliases.${vault_auth_backend.userpass.accessor}.name }}" 4 | capabilities = ["update"] 5 | allowed_parameter { 6 | key = "password" 7 | value = [] 8 | } 9 | } 10 | } 11 | 12 | resource "vault_policy" "admin" { 13 | name = "admin" 14 | policy = file("policies/admin.hcl") 15 | } 16 | 17 | resource "vault_policy" "update_userpass" { 18 | name = "update_userpass" 19 | policy = data.vault_policy_document.update_userpass.hcl 20 | } 21 | 22 | resource "vault_policy" "consul_template" { 23 | name = "consul_template" 24 | policy = file("policies/consul_template.hcl") 25 | } 26 | 27 | resource "vault_policy" "nomad_startup" { 28 | name = "nomad_startup" 29 | policy = file("policies/nomad_startup.hcl") 30 | } 31 | 32 | resource "vault_policy" "nomad_cluster" { 33 | name = "nomad_cluster" 34 | policy = file("policies/nomad_token.hcl") 35 | } 36 | 37 | resource "vault_policy" "ansible" { 38 | name = "ansible" 39 | policy = file("policies/ansible.hcl") 40 | } 41 | 42 | resource "vault_policy" "kvuser" { 43 | name = "kvuser" 44 | policy = file("policies/kvuser.hcl") 45 | } 46 | 47 | resource "vault_policy" "nomad_yarr" { 48 | name = "nomad_yarr" 49 | policy = file("policies/nomad_yarr.hcl") 50 | } 51 | 52 | resource "vault_policy" "nomad_linkding" { 53 | name = "nomad_linkding" 54 | policy = file("policies/nomad_linkding.hcl") 55 | } 56 | 57 | resource "vault_policy" "nomad_traefik" { 58 | name = "nomad_traefik" 59 | policy = file("policies/nomad_traefik.hcl") 60 | } 61 | 62 | resource "vault_policy" "nomad_diun" { 63 | name = "nomad_diun" 64 | policy = file("policies/nomad_diun.hcl") 65 | } 66 | 67 | resource "vault_policy" "nomad_minio" { 68 | name = "nomad_minio" 69 | policy = file("policies/nomad_minio.hcl") 70 | } 71 | 72 | resource "vault_policy" "nomad_registry" { 73 | name = "nomad_registry" 74 | policy = file("policies/nomad_registry.hcl") 75 | } 76 | 77 | resource "vault_policy" "nomad_paperless" { 78 | name = "nomad_paperless" 79 | policy = file("policies/nomad_paperless.hcl") 80 | } 81 | -------------------------------------------------------------------------------- /terraform/vault/policies/admin.hcl: -------------------------------------------------------------------------------- 1 | ## System Backend 2 | 3 | # Read system health check 4 | path "sys/health" { 5 | capabilities = ["read", "sudo"] 6 | } 7 | 8 | path "sys/audit" { 9 | capabilities = ["read", "create", "sudo"] 10 | } 11 | 12 | # Manage leases 13 | path "sys/leases/*" { 14 | capabilities = ["create", "read", "update", "delete", "list", "sudo"] 15 | } 16 | 17 | ## ACL Policies 18 | 19 | # Create, manage ACL policies 20 | path "sys/policies/acl/*" { 21 | capabilities = ["create", "read", "update", "delete", "list", "sudo"] 22 | } 23 | 24 | # List existing policies 25 | path "sys/policies/acl" { 26 | capabilities = ["list"] 27 | } 28 | 29 | # Deny changing own policy 30 | path "sys/policies/acl/admin" { 31 | capabilities = ["read"] 32 | } 33 | 34 | ## Auth Methods 35 | 36 | # Manage auth methods 37 | path "auth/*" { 38 | capabilities = ["create", "read", "update", "delete", "list", "sudo"] 39 | } 40 | 41 | # Create, update, delete auth methods 42 | path "sys/auth/*" { 43 | capabilities = ["create", "update", "delete", "sudo"] 44 | } 45 | 46 | # List auth methods 47 | path "sys/auth" { 48 | capabilities = ["read"] 49 | } 50 | 51 | ## IdentityEntity 52 | path "identity/entity/*" { 53 | capabilities = ["create", "update", "delete", "read"] 54 | } 55 | 56 | path "identity/entity/name" { 57 | capabilities = ["list"] 58 | } 59 | 60 | path "identity/entity/id" { 61 | capabilities = ["list"] 62 | } 63 | 64 | path "identity/entity-alias/*" { 65 | capabilities = ["create", "update", "delete", "read"] 66 | } 67 | 68 | path "identity/entity-alias/id" { 69 | capabilities = ["list"] 70 | } 71 | 72 | ## KV Secrets Engine 73 | 74 | # manage kv secrets engine 75 | path "kvv2/*" { 76 | capabilities = ["create", "read", "update", "delete", "list", "sudo"] 77 | } 78 | 79 | # Manage secrets engine 80 | path "sys/mounts/*" { 81 | capabilities = ["create", "read", "update", "delete", "list", "sudo"] 82 | } 83 | 84 | # List secrets engine 85 | path "sys/mounts" { 86 | capabilities = ["read"] 87 | } 88 | 89 | ## PKI - Intermediate CA 90 | 91 | path "pki/config/urls" { 92 | capabilities = ["read"] 93 | } 94 | 95 | # Create, update roles 96 | path "pki_int/roles/*" { 97 | capabilities = ["create", "read", "update", "delete", "list", "sudo"] 98 | } 99 | 100 | # List roles 101 | path "pki_int/roles" { 102 | capabilities = ["list"] 103 | } 104 | 105 | # Issue certs 106 | path "pki_int/issue/*" { 107 | capabilities = ["create", "update"] 108 | } 109 | 110 | # Read certs 111 | path "pki_int/cert/*" { 112 | capabilities = ["read"] 113 | } 114 | 115 | # Revoke certs 116 | path "pki_int/revoke" { 117 | capabilities = ["create", "update", "read"] 118 | } 119 | 120 | # List certs 121 | path "pki_int/certs" { 122 | capabilities = ["list"] 123 | } 124 | 125 | # Tidy certs 126 | path "pki_int/tidy" { 127 | capabilities = ["create", "update", "read"] 128 | } 129 | 130 | path "pki_int/tidy-status" { 131 | capabilities = ["read"] 132 | } 133 | -------------------------------------------------------------------------------- /terraform/vault/policies/ansible.hcl: -------------------------------------------------------------------------------- 1 | path "pki_int/issue/auth" { 2 | capabilities = ["create", "update"] 3 | } 4 | 5 | path "pki_int/issue/server" { 6 | capabilities = ["create", "update"] 7 | } 8 | 9 | path "pki_int/issue/client" { 10 | capabilities = ["create", "update"] 11 | } 12 | 13 | path "auth/agent/certs/*" { 14 | capabilities = ["create", "update"] 15 | } 16 | 17 | # required to update nomad_startup auth cert 18 | path "auth/cert/certs/nomad_startup" { 19 | capabilities = ["create", "update"] 20 | } 21 | 22 | path "kvv2/data/cluster/consul_config" { 23 | capabilities = ["read", "create"] 24 | } 25 | 26 | path "kvv2/data/cluster/nomad_config" { 27 | capabilities = ["read", "create"] 28 | } 29 | -------------------------------------------------------------------------------- /terraform/vault/policies/consul_template.hcl: -------------------------------------------------------------------------------- 1 | path "pki_int/issue/auth" { 2 | capabilities = ["create", "update"] 3 | } 4 | 5 | path "pki_int/issue/server" { 6 | capabilities = ["create", "update"] 7 | } 8 | 9 | path "pki_int/issue/client" { 10 | capabilities = ["create", "update"] 11 | } 12 | 13 | # required to update vault agent auth cert 14 | path "auth/agent/certs/*" { 15 | capabilities = ["create", "update"] 16 | } 17 | 18 | # required to update nomad_startup auth cert 19 | path "auth/cert/certs/nomad_startup" { 20 | capabilities = ["create", "update"] 21 | } 22 | 23 | # manage kv secrets engine 24 | path "kvv2/data/cluster/*" { 25 | capabilities = ["create", "read", "update"] 26 | } 27 | -------------------------------------------------------------------------------- /terraform/vault/policies/kvuser.hcl: -------------------------------------------------------------------------------- 1 | path "kvv2/data/prod/*" { 2 | capabilities = ["create", "read", "update", "delete", "patch"] 3 | } 4 | 5 | path "kvv2/data/prod" { 6 | capabilities = ["list"] 7 | } 8 | 9 | path "kvv2/data/dev/*" { 10 | capabilities = ["create", "read", "update", "delete", "patch"] 11 | } 12 | 13 | path "kvv2/data/dev" { 14 | capabilities = ["list"] 15 | } 16 | -------------------------------------------------------------------------------- /terraform/vault/policies/nomad_diun.hcl: -------------------------------------------------------------------------------- 1 | path "kvv2/data/prod/nomad/diun" { 2 | capabilities = ["read"] 3 | } 4 | -------------------------------------------------------------------------------- /terraform/vault/policies/nomad_linkding.hcl: -------------------------------------------------------------------------------- 1 | path "kvv2/data/prod/nomad/linkding" { 2 | capabilities = ["read"] 3 | } 4 | -------------------------------------------------------------------------------- /terraform/vault/policies/nomad_minio.hcl: -------------------------------------------------------------------------------- 1 | path "kvv2/data/prod/nomad/minio" { 2 | capabilities = ["read"] 3 | } 4 | -------------------------------------------------------------------------------- /terraform/vault/policies/nomad_paperless.hcl: -------------------------------------------------------------------------------- 1 | path "kvv2/data/prod/nomad/paperless" { 2 | capabilities = ["read"] 3 | } 4 | -------------------------------------------------------------------------------- /terraform/vault/policies/nomad_registry.hcl: -------------------------------------------------------------------------------- 1 | path "kvv2/data/prod/nomad/registry" { 2 | capabilities = ["read"] 3 | } 4 | -------------------------------------------------------------------------------- /terraform/vault/policies/nomad_startup.hcl: -------------------------------------------------------------------------------- 1 | path "auth/token/create-orphan" { 2 | capabilities = ["create", "update", "sudo"] 3 | } 4 | -------------------------------------------------------------------------------- /terraform/vault/policies/nomad_token.hcl: -------------------------------------------------------------------------------- 1 | # Allow creating tokens under "nomad_cluster" token role. 2 | path "auth/token/create/nomad_cluster" { 3 | capabilities = ["update"] 4 | } 5 | 6 | # Allow looking up "nomad_cluster" token role. 7 | path "auth/token/roles/nomad_cluster" { 8 | capabilities = ["read"] 9 | } 10 | 11 | # Allow looking up the token passed to Nomad to validate 12 | # the token has the proper capabilities. 13 | # This is provided by the "default" policy. 14 | path "auth/token/lookup-self" { 15 | capabilities = ["read"] 16 | } 17 | 18 | # Allow looking up incoming tokens to validate they have permissions to access 19 | # the tokens they are requesting. This is only required if 20 | # `allow_unauthenticated` is set to false. 21 | path "auth/token/lookup" { 22 | capabilities = ["update"] 23 | } 24 | 25 | # Allow revoking tokens that should no longer exist. This allows revoking 26 | # tokens for dead tasks. 27 | path "auth/token/revoke-accessor" { 28 | capabilities = ["update"] 29 | } 30 | 31 | # Allow checking the capabilities of our own token. This is used to validate the 32 | # token upon startup. Note this requires update permissions because the Vault API 33 | # is a POST 34 | path "sys/capabilities-self" { 35 | capabilities = ["update"] 36 | } 37 | 38 | # Allow our own token to be renewed. 39 | path "auth/token/renew-self" { 40 | capabilities = ["update"] 41 | } 42 | -------------------------------------------------------------------------------- /terraform/vault/policies/nomad_traefik.hcl: -------------------------------------------------------------------------------- 1 | path "kvv2/data/prod/nomad/traefik" { 2 | capabilities = ["read"] 3 | } 4 | 5 | path "pki_int/issue/client" { 6 | capabilities = ["create", "update", "read"] 7 | } 8 | -------------------------------------------------------------------------------- /terraform/vault/policies/nomad_yarr.hcl: -------------------------------------------------------------------------------- 1 | path "kvv2/data/prod/nomad/yarr" { 2 | capabilities = ["read"] 3 | } 4 | -------------------------------------------------------------------------------- /terraform/vault/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | vault = { 4 | source = "hashicorp/vault" 5 | version = ">= 3.18.0" 6 | } 7 | } 8 | } 9 | 10 | provider "vault" { 11 | address = var.vault_address 12 | token = var.vault_token 13 | ca_cert_file = var.vault_ca_cert_file 14 | } 15 | -------------------------------------------------------------------------------- /terraform/vault/roles.tf: -------------------------------------------------------------------------------- 1 | resource "vault_pki_secret_backend_role" "server_role" { 2 | backend = vault_mount.pki_int.path 3 | name = "server" 4 | ttl = "86400" 5 | max_ttl = "2592000" # 30d 6 | generate_lease = true 7 | 8 | allowed_domains = concat(["localhost"], var.allowed_server_domains) 9 | allow_any_name = false 10 | allow_glob_domains = true 11 | allow_subdomains = true 12 | enforce_hostnames = true 13 | 14 | client_flag = true 15 | server_flag = true 16 | } 17 | 18 | resource "vault_pki_secret_backend_role" "client_role" { 19 | backend = vault_mount.pki_int.path 20 | name = "client" 21 | ttl = "86400" 22 | max_ttl = "2592000" # 30d 23 | generate_lease = true 24 | 25 | allowed_domains = concat(["localhost"], var.allowed_client_domains) 26 | allow_any_name = false 27 | allow_bare_domains = true # Required for email addresses 28 | allow_glob_domains = false 29 | allow_ip_sans = true 30 | allow_subdomains = true 31 | enforce_hostnames = true 32 | 33 | client_flag = true 34 | server_flag = false 35 | } 36 | 37 | # issue auth certificates for username@global.vault 38 | resource "vault_pki_secret_backend_role" "auth_role" { 39 | backend = vault_mount.pki_int.path 40 | name = "auth" 41 | ttl = "86400" 42 | max_ttl = "2592000" # 30d 43 | generate_lease = true 44 | 45 | allowed_domains = concat(["localhost"], var.allowed_auth_domains) 46 | allow_any_name = false 47 | allow_bare_domains = true 48 | allow_glob_domains = false 49 | allow_ip_sans = true 50 | allow_subdomains = false 51 | enforce_hostnames = true 52 | } 53 | 54 | resource "vault_pki_secret_backend_role" "vault_server" { 55 | backend = vault_mount.pki_int.path 56 | name = "vault" 57 | ttl = "31536000" # 1 year 58 | max_ttl = "157788000" # 5 years 59 | generate_lease = true 60 | 61 | allowed_domains = concat(["localhost"], var.allowed_vault_domains) 62 | allow_any_name = false 63 | allow_bare_domains = true 64 | allow_glob_domains = true 65 | allow_ip_sans = true 66 | allow_subdomains = true 67 | enforce_hostnames = true 68 | 69 | client_flag = false 70 | server_flag = true 71 | } 72 | -------------------------------------------------------------------------------- /terraform/vault/root.tf: -------------------------------------------------------------------------------- 1 | resource "vault_audit" "file" { 2 | type = "file" 3 | options = { 4 | file_path = var.vault_audit_path 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /terraform/vault/variables.tf: -------------------------------------------------------------------------------- 1 | variable "vault_address" { 2 | type = string 3 | description = "Vault address" 4 | default = "https://localhost:8200" 5 | } 6 | 7 | variable "vault_token" { 8 | type = string 9 | sensitive = true 10 | description = "Vault token for provider" 11 | } 12 | 13 | variable "vault_ca_cert_file" { 14 | type = string 15 | description = "Local path to Vault CA cert file" 16 | default = "../../certs/vault-ca.crt" 17 | } 18 | 19 | variable "vault_audit_path" { 20 | type = string 21 | description = "Vault audit file path" 22 | default = "/opt/vault/logs/vault.log" 23 | } 24 | 25 | variable "admin_password" { 26 | type = string 27 | sensitive = true 28 | description = "Admin password" 29 | } 30 | 31 | variable "kvuser_password" { 32 | type = string 33 | sensitive = true 34 | description = "kvuser password" 35 | } 36 | 37 | variable "allowed_server_domains" { 38 | type = list(string) 39 | description = "List of allowed_domains for PKI server role" 40 | default = ["service.consul", "dc1.consul", "dc1.nomad", "global.nomad"] 41 | } 42 | 43 | variable "allowed_client_domains" { 44 | type = list(string) 45 | description = "List of allowed_domains for PKI client role" 46 | default = ["service.consul", "dc1.consul", "dc1.nomad", "global.nomad"] 47 | } 48 | 49 | variable "allowed_auth_domains" { 50 | type = list(string) 51 | description = "List of allowed_domains for PKI auth role" 52 | default = ["global.vault"] 53 | } 54 | 55 | variable "allowed_vault_domains" { 56 | type = list(string) 57 | description = "List of allowed_domains for PKI vault role" 58 | default = ["vault.service.consul", "global.vault"] 59 | } 60 | 61 | # Changing these will affect the Ansible roles when they attempt to login to Vault with 62 | # Ansible 63 | variable "ansible_public_key_path" { 64 | type = string 65 | description = "Local path to store Ansible public key for authentication" 66 | default = "../../certs/ansible.crt" 67 | } 68 | 69 | variable "ansible_private_key_path" { 70 | type = string 71 | description = "Local path to store Ansible private key for authentication" 72 | default = "../../certs/ansible_key.pem" 73 | } 74 | --------------------------------------------------------------------------------