├── .gitmodules
├── poetry.toml
├── requirements.yml
├── templates
├── telegraf
│ ├── custom_metrics.conf.j2
│ ├── speedtest.conf.j2
│ ├── leader.conf.j2
│ ├── nomad.conf.j2
│ ├── pingHosts.conf.j2
│ └── base_config.conf.j2
├── docker_compose_files
│ ├── synology_promtail.yml.j2
│ ├── synology_plex.yml.j2
│ ├── synology_consul.yml.j2
│ ├── synology_telegraf.yml.j2
│ ├── synology_jellyfin.yml.j2
│ ├── synology_diun.yml.j2
│ ├── synology_sabnzbd.yml.j2
│ └── synology_tdarr_node.yml.j2
├── Tdarr_Server_Config.json.j2
├── nomad_jobs
│ ├── testing
│ │ └── execTest.hcl
│ ├── backup_local_filesystems.hcl
│ ├── remove_nzbs.hcl
│ ├── diun.hcl
│ ├── promtail-syslogs.hcl
│ ├── nginx.hcl
│ ├── headless-chrome.hcl
│ ├── nzbhydra.hcl
│ ├── freshrss.hcl
│ ├── changedetection.hcl
│ ├── ladder.hcl
│ ├── whoogle.hcl
│ ├── overseerr.hcl
│ ├── chronograf.hcl
│ ├── syncthing.hcl
│ ├── template-simple.hcl
│ ├── speedtest.hcl
│ ├── mealie.hcl
│ ├── sabnzbd.hcl
│ ├── jellyfin.hcl
│ ├── code.hcl
│ ├── hishtory-server.hcl
│ ├── stash.hcl
│ ├── influxdb.hcl
│ ├── diagnostics.hcl
│ ├── uptimekuma.hcl
│ ├── template_localfs.hcl
│ ├── sonarr.hcl
│ ├── grafana.hcl
│ ├── readarr.hcl
│ ├── lidarr.hcl
│ ├── prowlarr.hcl
│ ├── radarr.hcl
│ ├── pihole.hcl
│ └── loki.hcl
├── consul.service.j2
├── scripts
│ └── telegraf_custom_metrics.sh.j2
├── Tdarr_Node_Config.json.j2
├── nomad.service.j2
├── consul.launchd.j2
├── consul_services
│ ├── tdarr_service.json.j2
│ └── consul_synology_checks.json.j2
├── nomad.launchd.j2
└── consul.hcl.j2
├── .typos.toml
├── .vscode
├── settings.json
└── shellscript.code-snippets
├── .gitignore
├── ansible.cfg
├── tasks
├── sanity.yml
├── debug.yml
├── logrotate.yml
├── pull_repositories.yml
├── backups.yml
├── interpolated_variables.yml
├── packages.yml
├── service_prometheus_nodeExporter.yml
├── docker.yml
└── orchestration_jobs.yml
├── CHANGELOG.md
├── .ansible-lint.yml
├── .yamllint.yml
├── files
└── certs
│ ├── nomad
│ ├── cfssl.json
│ ├── cli-key.pem
│ ├── client-key.pem
│ ├── nomad-ca-key.pem
│ ├── server-key.pem
│ ├── cli.csr
│ ├── client.csr
│ ├── server.csr
│ ├── nomad-ca.csr
│ ├── cli.pem
│ ├── nomad-ca.pem
│ ├── client.pem
│ └── server.pem
│ └── consul
│ ├── consul-agent-ca-key.pem
│ ├── homelab-server-consul-0-key.pem
│ ├── homelab-server-consul-0.pem
│ └── consul-agent-ca.pem
├── .ansible-lint-ignore
├── scripts
└── ansible-vault-precommit.sh
├── pyproject.toml
├── .pre-commit-config.yaml
├── main.yml
└── handlers
└── main.yml
/.gitmodules:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/poetry.toml:
--------------------------------------------------------------------------------
1 | [virtualenvs]
2 | in-project = true
3 |
--------------------------------------------------------------------------------
/requirements.yml:
--------------------------------------------------------------------------------
1 | ---
2 | roles:
3 | - name: arillso.logrotate
4 |
--------------------------------------------------------------------------------
/templates/telegraf/custom_metrics.conf.j2:
--------------------------------------------------------------------------------
1 | [[inputs.exec]]
2 | commands = ["/usr/local/bin/telegraf_custom_metrics.sh"]
3 | timeout = "5s"
4 | name_suffix = "_homelab_metrics"
5 | data_format = "influx"
6 |
--------------------------------------------------------------------------------
/.typos.toml:
--------------------------------------------------------------------------------
1 | [default]
2 | default.locale = "en_us"
3 |
4 | [default.extend-words]
5 | Hashi = "Hashi" # Hashicorpt
6 | hishtory = "hishtory" # Used for the hishtory package
7 |
8 | [files]
9 | extend-exclude = ["galaxy-roles/"]
10 |
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "files.associations": {
3 | "**/tasks/*.yml": "ansible",
4 | "**/handlers/*.yml": "ansible",
5 | "main.yml": "ansible",
6 | "inventory.yml": "ansible",
7 | "default_variables.yml": "ansible",
8 | "vault.yml": "ansible"
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Ignore MacOS Junk
2 | .DS_Store
3 |
4 | # Ignore synology junk
5 | *@eaDir*
6 |
7 | # Don't push customized information, Galaxy roles, or collections
8 | galaxy-roles
9 | ansible_collections
10 |
11 | # Don't sync vault password file
12 | password_file
13 | .password_file
14 |
15 | # Ignore caches
16 | .cache
17 |
--------------------------------------------------------------------------------
/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | nocows = True
3 | roles_path = ./galaxy-roles:./roles
4 | collections_path = ./
5 | inventory = ./inventory.yml
6 | stdout_callback = yaml
7 | any_errors_fatal = True
8 | display_skipped_hosts = False
9 | vault_password_file = ./.password_file
10 |
11 | [ssh_connection]
12 | transfer_method = smart
13 |
--------------------------------------------------------------------------------
/tasks/sanity.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # TASK DESCRIPTION:
3 | # Always runs fist. Confirms we can actually use Ansible
4 | - name: Sanity - user mode
5 | become: false
6 | ansible.builtin.debug:
7 | msg: "Sanity check: user mode"
8 |
9 | - name: Sanity - become mode
10 | become: true
11 | ansible.builtin.debug:
12 | msg: "Sanity check: become mode"
13 |
--------------------------------------------------------------------------------
/templates/telegraf/speedtest.conf.j2:
--------------------------------------------------------------------------------
1 | [[inputs.exec]]
2 | commands = ["/usr/local/bin/telegraf_speedtest.sh"]
3 | interval = "30m"
4 | timeout = "1m"
5 | name_suffix = "_speedtest"
6 | data_format = "influx"
7 |
8 | [[outputs.influxdb]]
9 | urls = ["http://influxdb.service.consul:{{ influxdb_port }}"]
10 | database = "homelab"
11 | retention_policy = "1month"
12 | namepass = ["*_speedtest"]
13 |
--------------------------------------------------------------------------------
/templates/docker_compose_files/synology_promtail.yml.j2:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 |
3 | services:
4 | promtail:
5 | image: grafana/promtail
6 | hostname: promtail
7 | container_name: promtail
8 | ports:
9 | - 9080:9080
10 | network_mode: "bridge"
11 | volumes:
12 | - /volume1/docker/promtail/config.yml:/etc/promtail/config.yml
13 | - /var/log:/var/log:ro
14 | restart: unless-stopped
15 |
--------------------------------------------------------------------------------
/templates/docker_compose_files/synology_plex.yml.j2:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 |
3 | services:
4 | plex:
5 | image: ghcr.io/linuxserver/plex:latest
6 | hostname: plex
7 | container_name: plex
8 | network_mode: "host"
9 | environment:
10 | - "TZ=America/New_York"
11 | - "PGID=101"
12 | - "PUID={{ ansible_user_uid }}"
13 | - "VERSION=docker"
14 | volumes:
15 | - /volume1/media/media:/data/media
16 | - /volume1/docker/plex:/config
17 | restart: unless-stopped
18 |
--------------------------------------------------------------------------------
/templates/docker_compose_files/synology_consul.yml.j2:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 |
3 | services:
4 | consul:
5 | image: hashicorp/consul:{{ consul_version }}
6 | hostname: consul
7 | container_name: consul
8 | network_mode: "host"
9 | environment:
10 | - CONSUL_DISABLE_PERM_MGMT=
11 | volumes:
12 | - /volume1/docker/consul/data:/consul/data
13 | - /volume1/docker/consul/config:/consul/config
14 | command: consul agent -config-dir=/consul/config
15 | restart: unless-stopped
16 |
--------------------------------------------------------------------------------
/templates/docker_compose_files/synology_telegraf.yml.j2:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 |
3 | services:
4 | telegraf:
5 | image: nuntz/telegraf-snmp:latest
6 | hostname: telegraf
7 | container_name: nuntz-telegraf-snmp
8 | network_mode: "host"
9 | volumes:
10 | - /var/run/docker.sock:/var/run/docker.sock:ro
11 | - /volume1/docker/telegraf/mibs:/usr/share/snmp/mibs
12 | - /volume1/docker/telegraf/logs:/var/logs/telegraf
13 | - /volume1/docker/telegraf/config:/etc/telegraf
14 | restart: unless-stopped
15 |
--------------------------------------------------------------------------------
/templates/Tdarr_Server_Config.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "serverPort": "{{ tdarr_server_port }}",
3 | "webUIPort": "{{ tdarr_webui_port }}",
4 | "serverIP": "{% for h in groups['lan'] if hostvars[h].is_tdarr_server == true %}{{ hostvars[h].ansible_host }}{% endfor %}",
5 | {% if ansible_os_family == 'Darwin' and ansible_architecture == 'arm64' -%}
6 | "handbrakePath": "/opt/homebrew/bin/HandBrakeCLI",
7 | "ffmpegPath": "/opt/homebrew/bin/ffmpeg",
8 | {% else %}
9 | "handbrakePath": "/usr/local/bin/HandBrakeCLI",
10 | "ffmpegPath": "/usr/local/bin/ffmpeg"
11 | {%- endif %}
12 |
13 | }
14 |
--------------------------------------------------------------------------------
/templates/docker_compose_files/synology_jellyfin.yml.j2:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 |
3 | services:
4 | jellyfin:
5 | image: lscr.io/linuxserver/jellyfin:latest
6 | hostname: jellyfin
7 | container_name: jellyfin
8 | network_mode: "host"
9 | environment:
10 | - "TZ=America/New_York"
11 | - "PGID=101"
12 | - "PUID={{ ansible_user_uid }}"
13 | volumes:
14 | - /volume1/pi-cluster/jellyfin:/config
15 | - /volume1/media/media/movies:/data/movies
16 | - /volume1/media/media/tv:/data/tv
17 | restart: unless-stopped
18 |
--------------------------------------------------------------------------------
/templates/nomad_jobs/testing/execTest.hcl:
--------------------------------------------------------------------------------
1 | job "execTest" {
2 | region = "global"
3 | datacenters = ["{{ datacenter_name }}"]
4 | type = "batch"
5 |
6 | constraint {
7 | attribute = "${node.unique.name}"
8 | operator = "regexp"
9 | value = "rpi3"
10 | }
11 |
12 | group "testing" {
13 |
14 | task "execTest" {
15 | driver = "raw_exec"
16 | config {
17 | command = "/usr/local/bin/backup_configs"
18 | args = ["--verbose","--job","sonarr"]
19 | }
20 |
21 | resources {
22 | cpu = 500
23 | memory = 256
24 | }
25 | }
26 | }
27 | }
28 |
--------------------------------------------------------------------------------
/templates/docker_compose_files/synology_diun.yml.j2:
--------------------------------------------------------------------------------
1 | version: "3.9"
2 |
3 | services:
4 | diun:
5 | image: ghcr.io/crazy-max/diun
6 | hostname: diun
7 | container_name: diun
8 | network_mode: "bridge"
9 | environment:
10 | - "TZ=America/New_York"
11 | - "DIUN_WATCH_SCHEDULE=26 */48 * * *"
12 | - "DIUN_PROVIDERS_DOCKER_WATCHBYDEFAULT=true"
13 | - "DIUN_NOTIF_PUSHOVER_TOKEN={{ pushover_token }}"
14 | - "DIUN_NOTIF_PUSHOVER_RECIPIENT={{ pushover_recipient }}"
15 | - "DIUN_WATCH_FIRSTCHECKNOTIF=false"
16 | volumes:
17 | - /var/run/docker.sock:/var/run/docker.sock:ro
18 | restart: unless-stopped
19 |
--------------------------------------------------------------------------------
/templates/consul.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description="hashiCorp Consul - A service mesh solution"
3 | Documentation=https://www.consul.io/
4 | Requires=network-online.target
5 | After=network-online.target
6 | After=docker.service
7 | Requires=docker.service
8 | ConditionFileNotEmpty={{ interpolated_consul_configuration_dir }}/consul.hcl
9 |
10 | [Service]
11 | Type=notify
12 | User=consul
13 | Group=consul
14 | ExecStart=/usr/local/bin/consul agent -config-dir={{ interpolated_consul_configuration_dir }}
15 | ExecReload=/usr/local/bin/consul reload
16 | KillMode=process
17 | Restart=on-failure
18 | LimitNOFILE=65536
19 |
20 | [Install]
21 | WantedBy=multi-user.target
22 |
--------------------------------------------------------------------------------
/templates/scripts/telegraf_custom_metrics.sh.j2:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | EXTERNAL_IP="$(curl -s http://icanhazip.com)"
4 |
5 | # OS specific support
6 | if _uname=$(command -v uname); then
7 | case $("${_uname}" | tr '[:upper:]' '[:lower:]') in
8 | linux*)
9 | TEMP="$(cat /sys/class/thermal/thermal_zone0/temp)"
10 | ;;
11 | darwin*)
12 | if [ -e "/usr/local/bin/osx-cpu-temp" ]; then
13 | TEMP="$(/usr/local/bin/osx-cpu-temp | cut -c -4 | sed -E 's/\.//g')00"
14 | fi
15 | ;;
16 | esac
17 | fi
18 |
19 | # Print output
20 | printf 'exec ip_external="%s",cpu_temp=%s' "${EXTERNAL_IP}" "${TEMP}"
21 |
--------------------------------------------------------------------------------
/templates/nomad_jobs/backup_local_filesystems.hcl:
--------------------------------------------------------------------------------
1 | job "backup_local_filesystems" {
2 | region = "global"
3 | datacenters = ["{{ datacenter_name }}"]
4 | type = "sysbatch"
5 |
6 | periodic {
7 | cron = "0 */8 * * * *"
8 | prohibit_overlap = true
9 | time_zone = "America/New_York"
10 | }
11 |
12 | task "do_backups" {
13 | driver = "raw_exec"
14 | config {
15 | # When running a binary that exists on the host, the path must be absolute
16 | command = "${meta.backupCommand}"
17 | args = ["${meta.backupCommandArg1}", "${meta.backupCommandArg2}", "${meta.backupCommandArg3}"]
18 | }
19 | } // /task do_backups
20 |
21 | } //job
22 |
--------------------------------------------------------------------------------
/templates/nomad_jobs/remove_nzbs.hcl:
--------------------------------------------------------------------------------
1 | job "remove_nzbs" {
2 | region = "global"
3 | datacenters = ["{{ datacenter_name }}"]
4 | type = "batch"
5 |
6 | constraint {
7 | attribute = "${node.unique.name}"
8 | operator = "regexp"
9 | value = "rpi"
10 | }
11 |
12 | periodic {
13 | cron = "*/15 * * * * *"
14 | prohibit_overlap = true
15 | time_zone = "America/New_York"
16 | }
17 |
18 | task "remove_nzbs" {
19 | driver = "raw_exec"
20 | config {
21 | command = "/home/pi/.pyenv/shims/python"
22 | args = ["/home/pi/repos/bin/bin-sabnzbd/removeNZBs.py"]
23 | }
24 |
25 | } // /task do_backups
26 |
27 | } //job
28 |
--------------------------------------------------------------------------------
/templates/Tdarr_Node_Config.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "nodeID": "{{ inventory_hostname }}",
3 | "nodeIP": "{{ ansible_host }}",
4 | "nodePort": "{{ tdarr_node_port }}",
5 | "serverIP": "{% for h in groups['lan'] if hostvars[h].is_tdarr_server == true %}{{ hostvars[h].ansible_host }}{% endfor %}",
6 | "serverPort": "{{ tdarr_server_port }}",
7 | {% if ansible_os_family == 'Darwin' and ansible_architecture == 'arm64' -%}
8 | "handbrakePath": "/opt/homebrew/bin/HandBrakeCLI",
9 | "ffmpegPath": "/opt/homebrew/bin/ffmpeg",
10 | {% else %}
11 | "handbrakePath": "/usr/local/bin/HandBrakeCLI",
12 | "ffmpegPath": "/usr/local/bin/ffmpeg",
13 | {%- endif %}
14 |
15 | "mkvpropeditPath": "",
16 | "pathTranslators": [
17 | {
18 | "server": "",
19 | "node": ""
20 | }
21 | ]
22 | }
23 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | ## v0.2.0 (2023-02-07)
2 |
3 | ### Feat
4 |
5 | - **services**: add fresshrss
6 | - **jobs**: add recyclarr to keep sonarr/radarr profiles in sync
7 | - **jobs**: diagnostics now includes whoami
8 | - **jobs**: move changedetection to linuxserver.io docker image
9 | - bump software versions
10 |
11 | ### Fix
12 |
13 | - **inventory**: update python interpreter for pyenv
14 | - **services**: bump versions
15 | - **recyclarr**: pin to v2.x
16 | - **ansible**: add FQDN to ansible tasks
17 | - **authelia**: exclude ntp checks at startup
18 | - **services**: bump versions
19 | - **mounts**: explicitly state mounting nfs on boot
20 | - **telegraf**: use bullseye deb repository for apt
21 | - bump traefik version
22 |
23 | ### Refactor
24 |
25 | - **jobs**: remove device specific constraints
26 |
--------------------------------------------------------------------------------
/.ansible-lint.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Full documentation: https://ansible-lint.readthedocs.io/en/latest/index.html
3 | exclude_paths:
4 | - ../../.cache/
5 | - .cache/
6 | - .github/
7 | - .hooks/
8 | - .vscode/
9 | - archived_data/
10 | - galaxy-roles/
11 | - .cz.yaml
12 | - vault.yml
13 | - .venv/
14 | - ansible_collections/
15 | skip_list:
16 | - yaml[indentation]
17 | # - name[template]
18 | # - ignore-errors
19 | # - meta-incorrect
20 | # - meta-no-info
21 | # - package-latest
22 | # - role-name
23 | # - unnamed-task
24 | # - var-naming
25 | # - latest[git]
26 |
27 | # warn_list:
28 | # - experimental
29 | # - risky-file-permissions
30 | # - command-instead-of-module
31 | # - no-changed-when
32 | # - command-instead-of-shell
33 |
--------------------------------------------------------------------------------
/.yamllint.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Find full documentation at: https://yamllint.readthedocs.io/en/stable/index.html
3 | extends: default
4 | locale: en_US.UTF-8
5 |
6 | rules:
7 | braces:
8 | level: error
9 | max-spaces-inside: 1
10 | min-spaces-inside: 1
11 | comments-indentation: disable
12 | comments:
13 | min-spaces-from-content: 1
14 | indentation:
15 | spaces: consistent
16 | indent-sequences: true
17 | check-multi-line-strings: false
18 | line-length: disable
19 | quoted-strings:
20 | quote-type: any
21 | required: false
22 | extra-required:
23 | - "^http://"
24 | - "^https://"
25 | - "ftp://"
26 | - 'ssh \w.*'
27 | - "{{"
28 | extra-allowed: []
29 | truthy:
30 | level: error
31 |
--------------------------------------------------------------------------------
/templates/telegraf/leader.conf.j2:
--------------------------------------------------------------------------------
1 | [[processors.regex]]
2 | namepass = ["consul_health_checks"]
3 |
4 | # Tag and field conversions defined in a separate sub-tables
5 | [[processors.regex.tags]]
6 | ## Tag to change
7 | key = "check_name"
8 | ## Regular expression to match on a tag value
9 | pattern = "^service: \\W(\\w+)\\W check$"
10 | ## Matches of the pattern will be replaced with this string. Use ${1}
11 | ## notation to use the text of the first submatch.
12 | replacement = "${1}"
13 |
14 | [[inputs.consul]]
15 | address = "consul.service.consul:8500"
16 | scheme = "http"
17 | insecure_skip_verify = true
18 | metric_version = 2
19 | namedrop = ["traefik.http*","traefik.enable*","traefik.tcp*"]
20 | tagexclude = ["traefik.http*","traefik.enable*", "traefik.tcp*"]
21 | [inputs.consul.tagdrop]
22 | check_name = [ "Nomad Client*", "Nomad Server*", "Serf Health Status" ]
23 |
--------------------------------------------------------------------------------
/templates/docker_compose_files/synology_sabnzbd.yml.j2:
--------------------------------------------------------------------------------
1 | version: '3.9'
2 |
3 | services:
4 | sabnzbd:
5 | image: ghcr.io/linuxserver/sabnzbd:{{ sabnzbd_version }}
6 | hostname: sabnzbd
7 | container_name: sabnzbd
8 | network_mode: "bridge"
9 | environment:
10 | - "TZ=America/New_York"
11 | - "PGID=101"
12 | - "PUID={{ ansible_user_uid }}"
13 | #- "DOCKER_MODS=linuxserver/mods:universal-cron"
14 | volumes:
15 | - /var/services/homes/{{ my_username }}:/{{ my_username }}
16 | - /volume1/nate:/nate
17 | - /volume1/media/downloads/nzb:/nzbd
18 | - /volume1/media/downloads/temp:/incomplete-downloads
19 | - /volume1/media/downloads/complete:/downloads
20 | - /volume1/pi-cluster/sabnzbd:/config
21 | - /volume1/pi-cluster/sabnzbd/startup-scripts:/custom-cont-init.d
22 | ports:
23 | - 8080:8080
24 | - 9090:9090
25 | restart: unless-stopped
26 |
--------------------------------------------------------------------------------
/templates/nomad.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Nomad
3 | Documentation=https://nomadproject.io/docs/
4 | Wants=network-online.target
5 | After=network-online.target
6 | ConditionFileNotEmpty={{ nomad_configuration_dir }}/nomad.hcl
7 |
8 | [Service]
9 | {# {% if 'linode' in group_names %} #}
10 | {# User=nomad #}
11 | {# Group=nomad #}
12 | {# {% endif %} #}
13 |
14 | {# NOTE: Nomad is running as root rather than the Nomad user due to the Docker driver not being started when cgroups v2 are enabled.
15 |
16 | https://github.com/hashicorp/nomad/pull/16063
17 | #}
18 | User=root
19 | Group=root
20 | ExecReload=/bin/kill -HUP $MAINPID
21 | ExecStart=/usr/local/bin/nomad agent -config {{ nomad_configuration_dir }}
22 | KillMode=process
23 | KillSignal=SIGINT
24 | LimitNOFILE=infinity
25 | LimitNPROC=infinity
26 | Restart=on-failure
27 | RestartSec=2
28 | StartLimitBurst=3
29 | TasksMax=infinity
30 |
31 | [Install]
32 | WantedBy=multi-user.target
33 |
--------------------------------------------------------------------------------
/templates/consul.launchd.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | EnvironmentVariables
6 |
7 | PATH
8 | /usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/opt/X11/bin:/usr/local/sbin
9 |
10 | KeepAlive
11 |
12 | PathState
13 |
14 | {{ mac_keep_alive_file }}
15 |
16 |
17 | SuccessfulExit
18 |
19 |
20 | Label
21 | com.{{ my_username }}.consul
22 | ProgramArguments
23 |
24 | /usr/local/bin/consul
25 | agent
26 | -config-dir
27 | {{ interpolated_consul_configuration_dir }}
28 |
29 | RunAtLoad
30 |
31 |
32 |
33 |
--------------------------------------------------------------------------------
/files/certs/nomad/cfssl.json:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 38373339313965336461323636373265373637393535636337626562373431383763346138386130
3 | 6564613633623264663835373966616439313364646436310a366531623162623130353339323236
4 | 31383364646331333261633738353538663361313130623730623036393534306634623065333335
5 | 6336626163653033310a383132366162666434653461396130643034343861633462373532373535
6 | 35613162333365373739313063393865356561636437623634303864376630373737343930653062
7 | 30366634386132646232353132303831363364363131653433363838396565646139306232336566
8 | 63333866373364613239353330373263613863306136393932383539306134646639326233313761
9 | 63666163656265663633326564643864343639636136373735353731353431313866646335333139
10 | 33653764633333656631323865663639613735303430643230663862376631613437346564393631
11 | 39303231363839663834616439643632613331373735393834626665323831646165303738386132
12 | 31326633613030356338323133643031666666303730346636393134393930383462653637393830
13 | 61306131646563626565
14 |
--------------------------------------------------------------------------------
/tasks/debug.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # - name: architecture
3 | # ansible.builtin.debug:
4 | # var: ansible_facts['architecture']
5 |
6 | # - name: distribution
7 | # ansible.builtin.debug:
8 | # var: ansible_facts['distribution']
9 |
10 | # - name: distribution_file_variety
11 | # ansible.builtin.debug:
12 | # var: ansible_facts['distribution_file_variety']
13 |
14 | # - name: service_mgr
15 | # ansible.builtin.debug:
16 | # var: ansible_facts['service_mgr']
17 |
18 | # - name: os_family
19 | # ansible.builtin.debug:
20 | # var: ansible_facts['os_family']
21 |
22 | # - ansible.builtin.debug:
23 | # msg: "{{ ansible_os_family }}"
24 |
25 | # - ansible.builtin.debug:
26 | # msg: "pass: {{ ansible_become_pass }}"
27 |
28 | # - ansible.builtin.debug:
29 | # var: ansible_facts['nodename']
30 |
31 | # - ansible.builtin.debug:
32 | # var: ansible_facts['system_vendor']
33 | # when:
34 | # - ansible_facts['system_vendor'] is search("Synology")
35 |
36 | - name: "End play"
37 | ansible.builtin.meta: end_play
38 |
--------------------------------------------------------------------------------
/templates/telegraf/nomad.conf.j2:
--------------------------------------------------------------------------------
1 | [[inputs.statsd]]
2 | protocol = "udp" # Protocol, must be "tcp", "udp4", "udp6" or "udp" (default=udp)
3 | service_address = "127.0.0.1:8125" # Address and port to host UDP listener on
4 | delete_gauges = true # Reset gauges every interval (default=true)
5 | delete_counters = true # Reset counters every interval (default=true)
6 | delete_sets = true # Reset sets every interval (default=true)
7 | delete_timings = true # Reset timings & histograms every interval (default=true)
8 | percentiles = [90.0] # Percentiles to calculate for timing & histogram stats
9 | metric_separator = "_"
10 | datadog_extensions = true # Parses tags in the datadog statsd format
11 | allowed_pending_messages = 10000
12 | percentile_limit = 1000
13 | [inputs.statsd.tagdrop]
14 | task = [ "await-*","run-*","await_*","run_*","create_*","create-*" ]
15 | task_group = [ "await-*","run-*","await_*","run_*","create_*","create-*" ]
16 |
--------------------------------------------------------------------------------
/templates/docker_compose_files/synology_tdarr_node.yml.j2:
--------------------------------------------------------------------------------
1 | ---
2 | version: '3.9'
3 |
4 | services:
5 | tdarr_node:
6 | image: haveagitgat/tdarr_node:latest
7 | hostname: tdarr_node
8 | container_name: tdarr_node
9 | network_mode: "bridge"
10 | environment:
11 | - "nodeID={{ inventory_hostname }}"
12 | - "nodeIP={{ ansible_host }}"
13 | - "nodePort={{ tdarr_node_port }}"
14 | - "serverIP={% for h in groups['lan'] if hostvars[h].is_tdarr_server == true %}{{ hostvars[h].ansible_host }}{% endfor %}"
15 | - "serverPort={{ tdarr_server_port }}"
16 | - "TZ=America/New_York"
17 | - "PGID=101"
18 | - "PUID={{ ansible_user_uid }}"
19 | volumes:
20 | - /volume1/docker/tdarr_node:/app/configs
21 | - /volume1/media/media/movies:/movies
22 | - /volume1/media/tdarr_tmp:/tdarr_tmp
23 | - /volume1/media/tdarr_complete:/tdarr_complete
24 | ports:
25 | - {{ tdarr_node_port }}:{{ tdarr_node_port }}
26 | devices:
27 | - /dev/dri:/dev/dri
28 | privileged: true
29 | restart: unless-stopped
30 |
--------------------------------------------------------------------------------
/templates/consul_services/tdarr_service.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "services": [{
3 | "name": "tdarr",
4 | "id": "tdarr",
5 | "tags": [
6 | "traefik.enable=true",
7 | "traefik.http.services.tdarr.loadbalancer.server.port={{ tdarr_webui_port }}",
8 | "traefik.http.routers.tdarr.rule=Host(`tdarr.{{ homelab_domain_name }}`)",
9 | "traefik.http.routers.tdarr.entryPoints=web,websecure",
10 | "traefik.http.routers.tdarr.service=tdarr",
11 | "traefik.http.routers.tdarr.tls=true",
12 | "traefik.http.routers.tdarr.tls.certresolver=cloudflare",
13 | "traefik.http.routers.tdarr.middlewares=authelia@file"
14 | ],
15 | "checks": [{
16 | "id": "tdarr-http-check",
17 | "http": "http://{{ ansible_host }}:{{ tdarr_webui_port }}",
18 | "interval": "30s",
19 | "timeout": "30s",
20 | "success_before_passing": 3,
21 | "failures_before_critical": 3
22 | }]
23 | }
24 | ]
25 | }
26 |
--------------------------------------------------------------------------------
/templates/nomad.launchd.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 | EnvironmentVariables
6 |
7 | PATH
8 | /usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:/opt/X11/bin:/usr/local/sbin
9 |
10 | KeepAlive
11 |
12 | PathState
13 |
14 | {{ mac_keep_alive_file }}
15 |
16 |
17 | SuccessfulExit
18 |
19 |
20 | Label
21 | com.{{ my_username }}.nomad
22 | ProgramArguments
23 |
24 | /usr/local/bin/nomad
25 | agent
26 | -config
27 | {{ nomad_configuration_dir }}
28 |
29 | RunAtLoad
30 |
31 | StandardErrorPath
32 | /usr/local/var/log/nomad.log
33 | StandardOutPath
34 | /usr/local/var/log/nomad.log
35 |
36 |
37 |
--------------------------------------------------------------------------------
/tasks/logrotate.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # TASK DESCRIPTION:
3 | # Insalls logrotate and associated rotation jobs
4 | #
5 | # NOTE: This task exists due to the arillso.logrotate failing completely on macOS
6 |
7 | - name: Add service_backups.log to logrotate # noqa: ignore-errors
8 | become: true
9 | vars:
10 | logrotate_applications:
11 | - name: service_backups
12 | definitions:
13 | - logs:
14 | - "{{ rpi_nfs_mount_point }}/pi-cluster/logs/service_backups.log"
15 | options:
16 | - rotate 1
17 | - size 100k
18 | - missingok
19 | - notifempty
20 | - su root root
21 | - extension .log
22 | - compress
23 | - nodateext
24 | - nocreate
25 | - delaycompress
26 | ansible.builtin.import_role:
27 | name: arillso.logrotate
28 | failed_when: false
29 | ignore_errors: true
30 | when:
31 | - "'macs' not in group_names"
32 | - is_cluster_leader
33 |
--------------------------------------------------------------------------------
/templates/nomad_jobs/diun.hcl:
--------------------------------------------------------------------------------
1 | job "diun" {
2 | region = "global"
3 | datacenters = ["{{ datacenter_name }}"]
4 | type = "system"
5 |
6 | group "diun" {
7 |
8 | restart {
9 | attempts = 0
10 | delay = "30s"
11 | }
12 |
13 | task "diun" {
14 |
15 | env {
16 | // DIUN_PROVIDERS_DOCKER_ENDPOINT = "unix:///var/run/docker.sock"
17 | DIUN_NOTIF_PUSHOVER_RECIPIENT = "{{ pushover_recipient }}"
18 | DIUN_NOTIF_PUSHOVER_TOKEN = "{{ pushover_token }}"
19 | DIUN_PROVIDERS_DOCKER_WATCHBYDEFAULT = "true"
20 | DIUN_WATCH_FIRSTCHECKNOTIF = "false"
21 | DIUN_WATCH_SCHEDULE = "26 */48 * * *"
22 | TZ = "America/New_York"
23 | }
24 |
25 | driver = "docker"
26 | config {
27 | image = "crazymax/diun:latest"
28 | hostname = "${NOMAD_JOB_NAME}"
29 | volumes = [
30 | "/var/run/docker.sock:/var/run/docker.sock"
31 | ]
32 | } // docker config
33 |
34 | // resources {
35 | // cpu = 100 # MHz
36 | // memory = 300 # MB
37 | // } // resources
38 |
39 | } // task diun
40 | } // group
41 | } // job
42 |
--------------------------------------------------------------------------------
/.ansible-lint-ignore:
--------------------------------------------------------------------------------
1 | # This file contains ignores rule violations for ansible-lint
2 | handlers/main.yml ignore-errors
3 | handlers/main.yml name[casing]
4 | main.yml name[casing]
5 | main.yml name[missing]
6 | tasks/backups.yml name[casing]
7 | tasks/cluster_storage.yml name[casing]
8 | tasks/consul.yml command-instead-of-module
9 | tasks/consul.yml name[template]
10 | tasks/consul.yml no-changed-when
11 | tasks/debug.yml name[casing]
12 | tasks/docker.yml name[casing]
13 | tasks/docker.yml no-changed-when
14 | tasks/interpolated_variables.yml name[casing]
15 | tasks/logrotate.yml ignore-errors
16 | tasks/logrotate.yml name[casing]
17 | tasks/nomad.yml name[casing]
18 | tasks/nomad.yml name[template]
19 | tasks/orchestration_jobs.yml name[casing]
20 | tasks/packages.yml ignore-errors
21 | tasks/packages.yml name[casing]
22 | tasks/pull_repositories.yml name[casing]
23 | tasks/pull_repositories.yml no-changed-when
24 | tasks/sanity.yml name[casing]
25 | tasks/service_prometheus_nodeExporter.yml name[casing]
26 | tasks/service_prometheus_nodeExporter.yml no-changed-when
27 | tasks/tdarr.yml name[casing]
28 | tasks/tdarr.yml no-changed-when
29 | tasks/telegraf.yml name[casing]
30 | tasks/telegraf.yml name[template]
31 | tasks/telegraf.yml package-latest
32 | vault.yml yaml[document-start]
33 |
--------------------------------------------------------------------------------
/files/certs/nomad/cli-key.pem:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 36316264663735643835613962666266363839626566356439343937616430646431323637303462
3 | 3963346362316139323039666664613737343363663938330a346333323164666330366137383931
4 | 38356164333732633166383336306636373633333064393665636465316538393735393732306263
5 | 3666623932623331310a303735326530356661333735653033326236313637633334346335383437
6 | 66343464356436353962613866316462346162643534663732336664366634356661613165616135
7 | 37343464616463313835356637623531323939356565333966383062643334626434643532393136
8 | 30326335303838306531306534323863383263343661333939393966353934313663623933376666
9 | 32653435316362653631653234333261303137333831373037306266383061313135333033373639
10 | 35383665373737646431333034653330306637316362623162323464333937626632353162363538
11 | 31393238323362643363643631323531363532666366373063343434656630366363616164343564
12 | 66306264396135396538663966613966646534303235373738326234373030336132373163386232
13 | 62396338636233376163383632623030323934363863383733626333613430313332373138666566
14 | 66653464663132333466396233616339366438376166393935363965663738356639663165303561
15 | 66313134383162373564303838356137313536633465353763336363326364313961366534383966
16 | 38363364313961386262613563656330633133306432383464643530323266333139316636353834
17 | 62303232343761383765
18 |
--------------------------------------------------------------------------------
/files/certs/nomad/client-key.pem:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 30666633323939666138653137653533636566333239346165303637366339663633666166363662
3 | 3336356139306162653534616466343764393435633861350a333932393537353932663735383839
4 | 36343935343962626537346236626433343238346633623139303738633736653366386232303766
5 | 3934633635353361350a663165343036356565326162396164313333653733363939316661633436
6 | 34303162343964623034333161343439616261643963646436333663646537366639353666353964
7 | 66633736343965346630356438323536333232613066353737306639663562373164333530393536
8 | 65393136363764343632376561393033346166373761613230643136323534366330613363373232
9 | 65343332373538346432383964366331373262373137653632353932653633366564633263333063
10 | 30336663313032646237306639373865663462666331633363376538326666323334326563343539
11 | 63383031643461366536666330303431666437636432306234623633393666653862323964646638
12 | 63323065393330636561393464376234613330343161383835613036396461306438643961396336
13 | 39323932346431383063643334383065343934303861363564633438636631623461346661653332
14 | 34336533323738343638396431616433663632306166316337356332616466626363666363613838
15 | 37656338353163663364356134353635653637653865656466383663303131326230623635366330
16 | 65343438653236616332363935653337623762376338313663373163343163656561326536336234
17 | 37376333653931616436
18 |
--------------------------------------------------------------------------------
/files/certs/nomad/nomad-ca-key.pem:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 30313163323036306164633430346437623836386164346437353434323565373864383762623932
3 | 6330626463336534333665333563313530356664623933320a393866343230626434376230643536
4 | 32663637343633303633356531326636383933643866313337633464316330393262343935373830
5 | 3032396661623133660a323336386261363036643561613832323961343162663937363230373936
6 | 61316666613933333861373631626436626366323332386236333232656262356439316430633265
7 | 33663433636139313061373764346537373137613431623262383262663231356431643534336535
8 | 32386332643164303561326262323334323961623831333535366362623038623137646465316364
9 | 65626230333737356365333232373338373563616264346266663965396266303632613136313435
10 | 36333733306565656134356465373165323837393438393465316363623133363765343537633234
11 | 35386530373664383864313430653037396133363064303866666331366235643566636264343732
12 | 37636235653065643466313438353236373663666163636235373365323533303334383637666130
13 | 62643439376639623330653265613163333934326561613333363232303061356133383234653365
14 | 32623334646661616232393061626362643238323433353936363833626532333232616261643032
15 | 34653735333938396163346464396538333061343631623861383336343465643230323433306532
16 | 63353333363861303137656165303364633166626132616236363536633136616361623432613932
17 | 64616333303538643933
18 |
--------------------------------------------------------------------------------
/files/certs/nomad/server-key.pem:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 36363530323232636238366533353530383364656162356131303335386263386364396533656264
3 | 3461613435353336646437346434646532373733663531650a643361386533393636376533613030
4 | 32626336366262396466636161616161393662616633633335336136653830326165346537636338
5 | 3839613763336532300a666636343239643438643465623237343236363363386138646662343335
6 | 62613965306133393530656139613230373535376665336566316435616134396263383231383936
7 | 66336638626663623663626635306237646536303437396530636662373830316334613932633832
8 | 38393264363662346234616535353732356635626133386637336234356665653563356337376333
9 | 63363930653430326165336231616366636535313161663530653238383663333039383564323064
10 | 39306330306133646466633533366562653834313438316566323833653035383430353335646261
11 | 66363864333337623631343738653138393036343330306266613735336431363337386530626433
12 | 31383962646336393538313961396531653865393566626137616435373839613133343331313935
13 | 38383564663031636561343266613863633565356239646363636363313964323139626234383134
14 | 62386162393133666633663439623735386235303933343666373666656133393331323435353464
15 | 66353136343439333561363234373666643766633438646663376238343663363136613963653162
16 | 39313039326133633536653665373165653733353037303264363337663537376162313466353261
17 | 38303033323162643939
18 |
--------------------------------------------------------------------------------
/files/certs/consul/consul-agent-ca-key.pem:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 30303134363364323437636131396232313837343934613131303933346235643163316632666466
3 | 6331613537326562306465323561356531396136346231390a653966326662376561373462376536
4 | 39353666623933616265303761353237346439656431666263316230633132363166663733633064
5 | 3539636539653130340a303532373663353861336131656138633434306336376233316632323833
6 | 37363831643563333430346239323961376161306231326461333262333037323133323463623064
7 | 63343063383964653165393265613135333037316266623636313135313130306534396262373232
8 | 36626562653961306536363937363032383633636230666633383032623333653466663135623666
9 | 32623963626235626365383637616265623233306331376530383765666362373434303135613637
10 | 33383131386238303866313436316137656632306663303235643431373762383238663031313231
11 | 66336261653561653434343139653231656633306630363935383830616434313963323963376661
12 | 36323731333664633139333539306137633932323236313137643562393833383533303733366336
13 | 64623462336636373562623035613262373634323265363330366336353936613531643037316236
14 | 37363266616362373764613530646231613566306432656236623034643139666430623539303936
15 | 31333234323033616231643264616139326238303836363035373938653531623563636531303166
16 | 65646238376464663763313034306536323935366263383265396236636266353631343538343465
17 | 36313837653839303861
18 |
--------------------------------------------------------------------------------
/files/certs/consul/homelab-server-consul-0-key.pem:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 30336236353230313637666437663037386434306132373730386366353039393838313937656438
3 | 6264323738383364383766303166343834636661373934610a373032353232643465353234363038
4 | 35383063633233313963353063316262326335333661313865623132386236376535646361333536
5 | 6361626132393136650a643530306535306564393236626362643038643831653837666539626538
6 | 62616439373161646165343465396335646665643033623664613838303066653735613030376662
7 | 38383261653366336662623337363737323738636339316464643932346366383832396639363137
8 | 31306433343635363663643234383330653833666663336639666434333166663934353263633837
9 | 35316536313437363838366361643830353036643033373361396137316265633933323432646538
10 | 37643561393438626132316637626334623463646230316663613739386638653831623337643134
11 | 39313364386130336330666461666362386232366535386639333930366266666232323862343462
12 | 33623165363131653138633635353431343630623834343132646135323039336631383038373536
13 | 65376462393636376639643437656363366533613032313330343934356364386234306433623634
14 | 39653638646330643237333335653132306666643063323539653237643366396631326462346234
15 | 33383639313963323963636134643232396463303963396566643432653664656231386534653266
16 | 33376664366464616630356664376264376430356163356463623034316363623936366163373165
17 | 31613566313531383734
18 |
--------------------------------------------------------------------------------
/scripts/ansible-vault-precommit.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | if ! GITROOT=$(git rev-parse --show-toplevel 2>/dev/null); then
4 | error "We do not seem to be running in a git repository"
5 | _safeExit_ 1
6 | fi
7 |
8 | FILES_PATTERN='.*vault.*\.ya?ml$'
9 | REQUIRED='ANSIBLE_VAULT'
10 |
11 | EXIT_STATUS=0
12 | wipe="\033[1m\033[0m"
13 | yellow='\033[1;33m'
14 | # carriage return hack. Leave it on 2 lines.
15 | cr='
16 | '
17 | for f in $(git diff --cached --name-only | grep -E "${FILES_PATTERN}"); do
18 | # test for the presence of the required bit.
19 | MATCH="$(head -n1 "${GITROOT}/${f}" | grep --no-messages "${REQUIRED}")"
20 | echo "$MATCH"
21 | if [ ! "${MATCH}" ]; then
22 | # Build the list of unencrypted files if any
23 | UNENCRYPTED_FILES="${f}${cr}${UNENCRYPTED_FILES}"
24 | EXIT_STATUS=1
25 | fi
26 | done
27 | if [ ! $EXIT_STATUS = 0 ]; then
28 | echo '# COMMIT REJECTED'
29 | echo '# Looks like unencrypted ansible-vault files are part of the commit:'
30 | echo '#'
31 | while read -r line; do
32 | if [ -n "${line}" ]; then
33 | echo -e "#\t${yellow}unencrypted: ${line}${wipe}"
34 | fi
35 | done <<<"${UNENCRYPTED_FILES}"
36 | echo '#'
37 | echo "# Please encrypt them with 'ansible-vault encrypt '"
38 | echo "# (or force the commit with '--no-verify')."
39 | exit $EXIT_STATUS
40 | fi
41 | exit $EXIT_STATUS
42 |
--------------------------------------------------------------------------------
/tasks/pull_repositories.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # TASK DESCRIPTION:
3 | # Runs a git pull against all repositories in ~/repos by running a shellscript named 'pull_all_repos'.
4 | # NOTE: This shellscript is not part of this repository.
5 |
6 | - name: "Check if pull_all_repos exists"
7 | ansible.builtin.stat:
8 | path: "~/bin/pull_all_repos"
9 | check_mode: false
10 | register: pull_script_check
11 |
12 | - name: "Check if ~/repos exists"
13 | ansible.builtin.stat:
14 | path: "~/repos"
15 | check_mode: false
16 | register: repos_directory_check
17 |
18 | - name: "Run pull_all_repos script"
19 | ansible.builtin.command:
20 | cmd: "~/bin/pull_all_repos --directory ~/repos"
21 | register: pull_script_output
22 | when:
23 | - not ansible_check_mode
24 | - pull_script_check.stat.exists
25 | - pull_script_check.stat.executable
26 | - repos_directory_check.stat.isdir is defined
27 | - repos_directory_check.stat.isdir
28 | - repos_directory_check.stat.writeable
29 | failed_when: pull_script_output.rc > 1
30 | changed_when: pull_script_output.rc == 0
31 |
32 | - name: "Output from pull_all_repos"
33 | ansible.builtin.debug:
34 | msg: "{{ pull_script_output.stdout }}"
35 | when:
36 | - not ansible_check_mode
37 | - pull_script_check.stat.exists
38 | - pull_script_check.stat.executable
39 | - repos_directory_check.stat.isdir is defined
40 | - repos_directory_check.stat.isdir
41 | - repos_directory_check.stat.writeable
42 |
--------------------------------------------------------------------------------
/tasks/backups.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # TASK DESCRIPTION:
3 | # Nomad jobs which can not run with NFS storage use pre-start and post-stop tasks to invoke
4 | # shell scripts which keep the job's filesystem in sync. This task does the following:
5 | #
6 | # 1. Copies a backup and restore shellscript to /usr/local/bin
7 | # 2. Edits the sudoers file to allow the script to be invoked with sudo privileges
8 |
9 | - name: Copy backup shellscript to server
10 | become: true
11 | ansible.builtin.template:
12 | src: scripts/service_backups.sh.j2
13 | dest: /usr/local/bin/service_backups
14 | mode: 0755
15 | when:
16 | - is_nomad_client or is_nomad_server
17 |
18 | - name: Copy restore shellscript to server
19 | become: true
20 | ansible.builtin.template:
21 | src: scripts/service_restore.sh.j2
22 | dest: /usr/local/bin/service_restore
23 | mode: 0755
24 | when:
25 | - is_nomad_client or is_nomad_server
26 |
27 | - name: "SUDO: Confirm users can run service_backups"
28 | become: true
29 | ansible.builtin.lineinfile:
30 | path: "/etc/sudoers.d/010_{{ item }}-backups-nopasswd"
31 | line: "{{ item }} ALL=(ALL) NOPASSWD: /usr/local/bin/service_backups, /usr/local/bin/service_restore"
32 | state: present
33 | create: true
34 | mode: "0440"
35 | validate: "/usr/sbin/visudo -cf %s"
36 | loop:
37 | - nomad
38 | - "{{ ansible_user }}"
39 | when:
40 | - is_nomad_client or is_nomad_server
41 | - "'pis' in group_names"
42 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.poetry]
2 | authors = ["Nate Landau "]
3 | description = "Ansible scripts to configure my homelab"
4 | name = "ansible-homelab-config"
5 | packages = [{ include = "ansible_homelab_config" }]
6 | readme = "README.md"
7 | version = "0.2.0"
8 |
9 | [tool.poetry.dependencies]
10 | ansible = "^9.3.0"
11 | ansible-lint = { version = "^24.2.1", markers = "platform_system != 'Windows'" }
12 | commitizen = "^3.18.3"
13 | jmespath = "^1.0.1"
14 | poethepoet = "^0.25.0"
15 | pre-commit = "^3.6.2"
16 | python = "^3.11"
17 | typos = "^1.19.0"
18 | yamllint = "^1.35.1"
19 |
20 | [build-system]
21 | build-backend = "poetry.core.masonry.api"
22 | requires = ["poetry-core"]
23 |
24 | [tool.commitizen]
25 | bump_message = "bump(release): v$current_version → v$new_version"
26 | tag_format = "v$version"
27 | update_changelog_on_bump = true
28 | version = "0.2.0"
29 | version_provider = "poetry"
30 |
31 | [tool.poe.tasks]
32 | pb = """
33 | ansible-playbook
34 | --vault-password-file .password_file
35 | main.yml
36 | -i inventory.yml
37 | """
38 |
39 | [tool.poe.tasks.lint]
40 | help = "Run linters"
41 |
42 | [[tool.poe.tasks.lint.sequence]]
43 | cmd = "yamllint --strict --config-file .yamllint.yml tasks/ handlers/ main.yml inventory.yml default_variables.yml"
44 |
45 | [[tool.poe.tasks.lint.sequence]]
46 | cmd = "ansible-lint --force-color --config-file .ansible-lint.yml"
47 |
--------------------------------------------------------------------------------
/files/certs/nomad/cli.csr:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 66373835646438666239653764656335366633633232316231336365393037343661636139633836
3 | 3336626134623162313762376237373231356232626334380a633538303638366161353833396331
4 | 61313731646136313862633630613035643236633863363463393730646538643666393562313735
5 | 3261386231343232340a346332343065393836633637636332363232653964343636383863366334
6 | 35313537323131623365363231663731323662323030306162323939366462663662363631363561
7 | 37356231353739386135323636613734366333653233653862613133333032383432613834343162
8 | 63393162353538333862663062383030653234623732643264613565393831353634626133396434
9 | 65613166353666366134653865373765363530363533383639633864373038646661303932626466
10 | 33323066333935323465393361396164353430373837323137396332323038656534636436366438
11 | 33646365623835383863643966353335323763376265343364666334306435386266313061353964
12 | 39326137316366613965306135346432333438393137363962366232306638666633306332623930
13 | 34656662383838646439656636623631666566336263363163666231343538383963633134366262
14 | 36366665636132623532323661633637346664336332383636626236653738383433316534636434
15 | 66313338356632633636356262383633656464383532313264306464393139616533343932353530
16 | 64373062363137666166313837366162383233633030393362373836373165643932346665653363
17 | 36333138306437343263613965386638393033386535616138363433323230393564396231366634
18 | 39346438373763643438323438633136346364313266323563663035363839313961653530353466
19 | 63343934333462656635653531653838663032666339633837663539633139613061633264366461
20 | 34623561353330636538373865356335393234613865663965636262333532316234646330333530
21 | 34306438396564626638623265386565383735303365383735633961663266633766333666633437
22 | 6432
23 |
--------------------------------------------------------------------------------
/tasks/interpolated_variables.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # TASK DESCRIPTION:
3 | # Creates variables based on other variables and Ansible facts
4 | #
5 | # Variables created:
6 | # - interpolated_localfs_service_storage: [dir]
7 | # - interpolated_consul_configuration_dir: [dir]
8 |
9 | - name: "Set local filesystem location (pis)"
10 | ansible.builtin.set_fact:
11 | interpolated_localfs_service_storage: "{{ rpi_localfs_service_storage }}"
12 | changed_when: false
13 | when:
14 | - "'pis' in group_names"
15 |
16 | - name: "Set local filesystem location (macs)"
17 | ansible.builtin.set_fact:
18 | interpolated_localfs_service_storage: "{{ mac_localfs_service_storage }}"
19 | changed_when: false
20 | when:
21 | - "'macs' in group_names"
22 |
23 | - name: "Set NFS mount location (pis)"
24 | ansible.builtin.set_fact:
25 | interpolated_nfs_service_storage: "{{ rpi_nfs_mount_point }}"
26 | changed_when: false
27 | when:
28 | - "'pis' in group_names"
29 |
30 | - name: "Set NFS mount location location (macs)"
31 | ansible.builtin.set_fact:
32 | interpolated_nfs_service_storage: "{{ mac_storage_mount_point }}"
33 | changed_when: false
34 | when:
35 | - "'macs' in group_names"
36 |
37 | - name: "Set consul configuration directory (synology)"
38 | ansible.builtin.set_fact:
39 | interpolated_consul_configuration_dir: "{{ synology_consul_configuration_dir }}"
40 | when:
41 | - inventory_hostname == 'synology'
42 |
43 | - name: "Set consul configuration directory (pis)"
44 | ansible.builtin.set_fact:
45 | interpolated_consul_configuration_dir: "{{ rpi_consul_configuration_dir }}"
46 | when:
47 | - "'pis' in group_names"
48 |
49 | - name: "Set consul configuration directory (macs)"
50 | ansible.builtin.set_fact:
51 | interpolated_consul_configuration_dir: "{{ mac_consul_configuration_dir }}"
52 | when:
53 | - "'macs' in group_names"
54 |
--------------------------------------------------------------------------------
/files/certs/nomad/client.csr:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 61626565613135366230633966646533626265353465316636313339643964343730303030323366
3 | 6132353835396263323762373136363233356538303434370a623237313934303563346236613635
4 | 61313063353339623437303136633962343630633261643865386237343234306232626630383561
5 | 3430613332396631610a343566613833383762303238323364326165663234366530393636376233
6 | 30616265303939306333633534646231326134653633343364336638313361626462383230323465
7 | 32666662356336613431306137373263373532383935616130613933326535343561303731316335
8 | 30663961366236356634373736353531613135366538336539383463643764356437616234653166
9 | 61333061626232663630656362643136313331373336353164663734393265336164303935363565
10 | 38656561636639333935613238373537333663633138303338623434393339636132303062386165
11 | 30346135396163353261666332383365323662303437363033333130646563666337653565346338
12 | 65306635336537333430366136326631386266316339653836646337363263346239386332666436
13 | 61393231666531613738363037346338633832616137363039333761373561636637636535303563
14 | 61616433633734656666646237376535613836336262313362393765396436343135626536333332
15 | 35336564383533663864303937356536636232653065343431393765343230323465656665373036
16 | 32623533303338396630393536663435633430343765356630306432636238323263653366396566
17 | 31343065323634623861663062343437316532346337653864616638653362663965303130343134
18 | 34323164393438623434376536393635353661353633326530313061353030336333333135376338
19 | 61613361626361623834343330316164306138623034393131656566636565373531653764653235
20 | 63383035383465393365303434346162646363366232386664306665623661653936363631393634
21 | 38653765303932363661396635353162353561316234613239366334323531353736333036646538
22 | 65333831333466383433663964623263633835636534666366383032303963343066646434376335
23 | 38383033343530353931663036366131313633643563633631663235336165353139363438353666
24 | 64616236313032373034626232396637346165343436613836393361613864333033623233393436
25 | 39353935376131366363663439646563303364626630643836323163306639633335363439363236
26 | 30303761653732353835363464613937376339623230303633363236336636313730
27 |
--------------------------------------------------------------------------------
/files/certs/nomad/server.csr:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 35623164313039613163636133336163626265636364643831313639396131636332313139396335
3 | 6664646363336636303230363532613766393334636432630a663131333037363730313366393364
4 | 31656531633964393336363866323162333630663434353535373732333465386163353835373836
5 | 3031616538393133330a353265613262633336613930646234313062326133343435663366383936
6 | 61336265353263613762306638663566346361656463626634343234626362393363616134333138
7 | 31373765336364376433333966333038373962623934353335396463633566633961353035343139
8 | 61306235636530376537386637343038336137626466356265386338306137666436353434633161
9 | 30363933656133356237363661643333333363326431376239633964623338393933343233623265
10 | 35366533313663386432666466613938653263323463373864663337333332326539343839633633
11 | 61643963643037656236373164633437396537363965313966336663326234343665373238306461
12 | 64643435363430633266333033633934343837393235363937653364333965383131653139313530
13 | 35316133623338333933326339383061316266663630656132346132633638643335623230636563
14 | 62376337303132616261326437316235316538336561623339383462353461363433643833393438
15 | 39343761393065333939303664633361633139643765663965346361336565323464653238356464
16 | 61303162336265623761323437303338633530636561346339303437346366383537323738373563
17 | 64326530313039343133323137393363316465383064303933303537383037376532373066343037
18 | 64666333306366353863303839613335663263333838326364386233383731373335333630633036
19 | 31336364393861643531336331363939616166356164663161336435616239363066626338653863
20 | 37653633316262393766393463613763316436633465356234326565316539633537666538383135
21 | 63396633396365316531613366643239633662366633613034373737323661313565386334383666
22 | 35353735306533643835353537656331373434643132333530343463303466363933383663663364
23 | 64386633643831303737316461663531633437623133313166616462333136393231383239313065
24 | 36343635303937393662623633663633613534663937393933373830346630313861373662396265
25 | 38663833393133643635343439386461396565373865303532323039303239663836616161623935
26 | 34353238653535663935663165326137663762343639343564346663326431373730
27 |
--------------------------------------------------------------------------------
/tasks/packages.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # TASK DESCRIPTION:
3 | # Ensures all packages are installed and updated. apt on Debian. Homebrew on Mac.
4 |
5 | - name: "Update and install APT packages"
6 | when:
7 | - ansible_os_family != 'Darwin'
8 | - manage_apt_packages_list
9 | block:
10 | - name: Update APT package cache
11 | become: true
12 | ansible.builtin.apt:
13 | update_cache: true
14 | cache_valid_time: 3600
15 |
16 | - name: "Upgrade APT to the latest packages (this may take a while)"
17 | become: true
18 | ansible.builtin.apt:
19 | upgrade: safe
20 |
21 | - name: "Install/upgrade APT packages (this may take a while)"
22 | become: true
23 | ansible.builtin.apt:
24 | pkg: "{{ item }}"
25 | state: present
26 | loop: "{{ apt_packages_list }}"
27 | register: apt_output
28 |
29 | - name: "Update and install Homebrew packages"
30 | when:
31 | - manage_homebrew_package_list
32 | - ansible_os_family == 'Darwin'
33 | block:
34 | - name: Upgrade homebrew and all packages
35 | community.general.homebrew:
36 | update_homebrew: true
37 | upgrade_all: true
38 | register: homebrew_output
39 | ignore_errors: true
40 |
41 | - name: Install base homebrew packages
42 | community.general.homebrew:
43 | name: "{{ homebrew_package_list | join(',') }}"
44 | state: present
45 | update_homebrew: false
46 | upgrade_all: false
47 | register: homebrew_output
48 |
49 | - name: Homebrew packages updated or installed
50 | ansible.builtin.debug:
51 | msg: "{{ homebrew_output.changed_pkgs }}"
52 |
53 | - name: Unchanged homebrew packages
54 | ansible.builtin.debug:
55 | msg: "{{ homebrew_output.unchanged_pkgs }}"
56 |
57 | # - name: Install homebrew casks # noqa: ignore-errors
58 | # community.general.homebrew_cask:
59 | # name: "{{ item }}"
60 | # state: present
61 | # install_options: "appdir=/Applications"
62 | # accept_external_apps: true
63 | # upgrade_all: false
64 | # update_homebrew: false
65 | # greedy: false
66 | # loop: "{{ homebrew_casks_list }}"
67 | # ignore_errors: true
68 |
--------------------------------------------------------------------------------
/templates/nomad_jobs/promtail-syslogs.hcl:
--------------------------------------------------------------------------------
1 | job "promtail-syslogs" {
2 | region = "global"
3 | datacenters = ["{{ datacenter_name }}"]
4 | type = "system"
5 |
6 | update {
7 | max_parallel = 1
8 | health_check = "checks"
9 | min_healthy_time = "10s"
10 | healthy_deadline = "5m"
11 | progress_deadline = "10m"
12 | auto_revert = true
13 | canary = 0
14 | stagger = "30s"
15 | }
16 |
17 | group "promtail-syslogs" {
18 |
19 | restart {
20 | attempts = 0
21 | delay = "30s"
22 | }
23 |
24 | task "promtail-syslogs" {
25 |
26 | driver = "docker"
27 | config {
28 | image = "grafana/promtail"
29 | hostname = "${NOMAD_JOB_NAME}"
30 | volumes = [
31 | "/var/log:/var/log"
32 | ]
33 | args = [
34 | "-config.file",
35 | "/local/promtail-config.yaml",
36 | "-print-config-stderr"
37 | ]
38 | } // docker config
39 |
40 |
41 | template {
42 | destination = "local/promtail-config.yaml"
43 | env = false
44 | data = <&1 >/dev/null; do echo '.'; sleep 2; done"
45 | ]
46 | network_mode = "host"
47 | }
48 |
49 | resources {
50 | cpu = 200
51 | memory = 128
52 | }
53 |
54 | lifecycle {
55 | hook = "prestart"
56 | sidecar = false
57 | }
58 | } // /task
59 |
60 | task "chronograf" {
61 |
62 | // env {
63 | // KEY = "VALUE"
64 | // }
65 |
66 | driver = "docker"
67 | config {
68 | image = "chronograf:latest"
69 | hostname = "${NOMAD_JOB_NAME}"
70 | ports = ["chronografPort"]
71 | } // docker config
72 |
73 | service {
74 | port = "chronografPort"
75 | name = "${NOMAD_JOB_NAME}"
76 | provider = "nomad"
77 | tags = [
78 | "traefik.enable=true",
79 | "traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
80 | "traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
81 | "traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
82 | "traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
83 | "traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
84 | ]
85 |
86 | check {
87 | type = "tcp"
88 | port = "chronografPort"
89 | interval = "30s"
90 | timeout = "4s"
91 | }
92 | check_restart {
93 | limit = 0
94 | grace = "1m"
95 | }
96 | } // service
97 |
98 | // resources {
99 | // cpu = 40 # MHz
100 | // memory = 10 # MB
101 | // } // resources
102 |
103 | } // task
104 |
105 |
106 | } // group
107 |
108 |
109 | } // job
110 |
--------------------------------------------------------------------------------
/templates/nomad_jobs/syncthing.hcl:
--------------------------------------------------------------------------------
1 | job "syncthing" {
2 | region = "global"
3 | datacenters = ["{{ datacenter_name }}"]
4 | type = "service"
5 |
6 | constraint {
7 | attribute = "${node.unique.name}"
8 | operator = "regexp"
9 | value = "rpi"
10 | }
11 |
12 | // constraint {
13 | // attribute = "${attr.cpu.arch}"
14 | // operator = "regexp"
15 | // value = "64"
16 | // }
17 |
18 |
19 | update {
20 | max_parallel = 1
21 | health_check = "checks"
22 | min_healthy_time = "10s"
23 | healthy_deadline = "5m"
24 | progress_deadline = "10m"
25 | auto_revert = true
26 | canary = 0
27 | stagger = "30s"
28 | }
29 |
30 | group "syncthing" {
31 |
32 | restart {
33 | attempts = 0
34 | delay = "30s"
35 | }
36 |
37 | network {
38 | port "webGUI" {
39 | to = "8384"
40 | }
41 | port "listen_tcp_udp" {
42 | static = "22000"
43 | to = "22000"
44 | }
45 | port "udp_proto_discovery" {
46 | static = "21027"
47 | to = "21027"
48 | }
49 | }
50 |
51 | task "syncthing" {
52 |
53 | env {
54 | PUID = "${meta.PUID}"
55 | PGID = "${meta.PGID}"
56 | TZ = "America/New_York"
57 | }
58 |
59 | driver = "docker"
60 | config {
61 | image = "ghcr.io/linuxserver/syncthing"
62 | hostname = "${NOMAD_JOB_NAME}"
63 | volumes = [
64 | "${meta.nfsStorageRoot}/pi-cluster/${NOMAD_JOB_NAME}:/config",
65 | "${meta.nfsStorageRoot}/${NOMAD_JOB_NAME}:/Sync"
66 | ]
67 | ports = ["webGUI","listen_tcp_udp","udp_proto_discovery"]
68 | } // docker config
69 |
70 | service {
71 | port = "webGUI"
72 | name = "${NOMAD_JOB_NAME}"
73 | provider = "nomad"
74 | tags = [
75 | "traefik.enable=true",
76 | "traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
77 | "traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
78 | "traefik.http.routers.${NOMAD_JOB_NAME}.service=syncthing",
79 | "traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
80 | "traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare",
81 | "traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia@file"
82 | ]
83 |
84 | check {
85 | type = "tcp"
86 | port = "webGUI"
87 | interval = "30s"
88 | timeout = "4s"
89 | }
90 | check_restart {
91 | limit = 0
92 | grace = "1m"
93 | }
94 | } // service
95 |
96 | resources {
97 | cpu = 1200 # MHz
98 | memory = 300 # MB
99 | } // resources
100 |
101 | } // task
102 |
103 |
104 | } // group
105 |
106 |
107 | } // job
108 |
--------------------------------------------------------------------------------
/templates/telegraf/pingHosts.conf.j2:
--------------------------------------------------------------------------------
1 | {# Ping internal servers #}
2 | [[processors.enum]]
3 | [[processors.enum.mapping]]
4 | ## Name of the field to map
5 | #field = "url"
6 |
7 | ## Name of the tag to map
8 | tag = "url"
9 |
10 | ## Destination tag or field to be used for the mapped value. By default the
11 | ## source tag or field is used, overwriting the original value.
12 | dest = "host"
13 |
14 | ## Default value to be used for all values not contained in the mapping
15 | ## table. When unset and no match is found, the original field will remain
16 | ## unmodified and the destination tag or field will not be created.
17 | # default = 0
18 |
19 | ## Table of mappings
20 | [processors.enum.mapping.value_mappings]
21 | "10.0.30.6" = "synology"
22 | {% for i in groups['pis'] %}
23 | "{{ hostvars[i].ansible_host }}" = "{{ hostvars[i].inventory_hostname }}"
24 | {% endfor %}
25 |
26 | [[inputs.ping]]
27 | ## Hosts to send ping packets to.
28 | # https://github.com/influxdata/telegraf/blob/release-1.13/plugins/inputs/ping/README.md
29 | urls = [{% for i in groups['pis'] %}'{{ hostvars[i].ansible_host }}'{% if not loop.last %}, {% endif %}{% endfor %},
30 | '10.0.30.6',
31 | 'core1.bos1.he.net',
32 | 'core2.lax1.he.net',
33 | 'core1.nyc4.he.net',
34 | 'core2.oma1.he.net',
35 | 'core1.chi1.he.net',
36 | 'core1.dal1.he.net',
37 | 'core1.den1.he.net',
38 | 'core2.mia1.he.net',
39 | 'core1.phx1.he.net',
40 | 'core1.sea1.he.net',
41 | 'core1.blp1.he.net',
42 | 'core1.ams1.he.net',
43 | 'core1.dxb1.he.net',
44 | 'core1.jnb1.he.net',
45 | 'core1.man1.he.net',
46 | 'core1.rom1.he.net',
47 | 'core1.tyo1.he.net',
48 | 'core1.zrh3.he.net',
49 | 'core2.sao1.he.net',
50 | 'core1.sin1.he.net',
51 | 'core1.nbo1.he.net',
52 | 'core1.tpe1.he.net',
53 | 'core1.ymq1.he.net',
54 | 'core2.syd1.he.net'
55 | ]
56 |
57 | ## Method used for sending pings, can be either "exec" or "native". When set
58 | ## to "exec" the systems ping command will be executed. When set to "native"
59 | ## the plugin will send pings directly.
60 | ##
61 | ## While the default is "exec" for backwards compatibility, new deployments
62 | ## are encouraged to use the "native" method for improved compatibility and
63 | ## performance.
64 | method = "exec"
65 |
66 | ## Number of ping packets to send per interval. Corresponds to the "-c"
67 | ## option of the ping command.
68 | count = 1
69 |
70 | ## Time to wait between sending ping packets in seconds. Operates like the
71 | ## "-i" option of the ping command.
72 | ping_interval = 1.0
73 |
74 | fielddrop = ["packets_received", "packets_transmitted", "ttl", "standard_deviation_ms"]
75 |
76 | interval = "1m" ## Interval to send pings
77 |
78 | ## Specify the ping executable binary.
79 | {% if 'pis' in group_names %}
80 | binary = "/usr/bin/ping"
81 | {% elif 'macs' in group_names %}
82 | binary = "/sbin/ping"
83 | {% else %}
84 | binary = "/bin/ping"
85 | {% endif %}
86 |
--------------------------------------------------------------------------------
/templates/nomad_jobs/template-simple.hcl:
--------------------------------------------------------------------------------
1 | job "TEMPLATE" {
2 | region = "global"
3 | datacenters = ["{{ datacenter_name }}"]
4 | type = "service"
5 |
6 | // constraint {
7 | // attribute = "${node.unique.name}"
8 | // operator = "regexp"
9 | // value = "rpi(1|2|3)"
10 | // }
11 |
12 | update {
13 | max_parallel = 1
14 | health_check = "checks"
15 | min_healthy_time = "10s"
16 | healthy_deadline = "5m"
17 | progress_deadline = "10m"
18 | auto_revert = true
19 | canary = 0
20 | stagger = "30s"
21 | }
22 |
23 | group "TEMPLATE" {
24 |
25 | count = 1
26 |
27 | restart {
28 | attempts = 0
29 | delay = "30s"
30 | }
31 |
32 | network {
33 | port "port1" {
34 | static = "80"
35 | to = "80"
36 | }
37 | }
38 |
39 | task "TEMPLATE" {
40 |
41 | env {
42 | PUID = "${meta.PUID}"
43 | PGID = "${meta.PGID}"
44 | TZ = "America/New_York"
45 | }
46 |
47 | driver = "docker"
48 | config {
49 | image = ""
50 | image_pull_timeout = "10m"
51 | hostname = "${NOMAD_TASK_NAME}"
52 | volumes = [
53 | "${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}:/etc/TEMPLATE/"
54 | ]
55 | ports = ["port1"]
56 | } // docker config
57 |
58 | service {
59 | port = "port1"
60 | name = "${NOMAD_TASK_NAME}"
61 | provider = "nomad"
62 | tags = [
63 | "traefik.enable=true",
64 | "traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
65 | "traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
66 | "traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
67 | "traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
68 | "traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
69 | "traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file"
70 | ]
71 |
72 | check {
73 | type = "tcp"
74 | port = "port1"
75 | interval = "30s"
76 | timeout = "4s"
77 | }
78 |
79 | check_restart {
80 | limit = 0
81 | grace = "1m"
82 | }
83 |
84 | } // service
85 |
86 | // resources {
87 | // cpu = 100 # MHz
88 | // memory = 300 # MB
89 | // } // resources
90 |
91 | } // task
92 |
93 |
94 | } // group
95 |
96 |
97 | } // job
98 |
--------------------------------------------------------------------------------
/templates/nomad_jobs/speedtest.hcl:
--------------------------------------------------------------------------------
1 | job "speedtest" {
2 | region = "global"
3 | datacenters = ["{{ datacenter_name }}"]
4 | type = "service"
5 |
6 | constraint {
7 | attribute = "${node.unique.name}"
8 | operator = "regexp"
9 | value = "macmini"
10 | }
11 |
12 | update {
13 | max_parallel = 1
14 | health_check = "checks"
15 | min_healthy_time = "10s"
16 | healthy_deadline = "5m"
17 | progress_deadline = "10m"
18 | auto_revert = true
19 | canary = 0
20 | stagger = "30s"
21 | }
22 |
23 | group "speedtest" {
24 |
25 | count = 1
26 |
27 | restart {
28 | attempts = 0
29 | delay = "30s"
30 | }
31 |
32 | network {
33 | port "port1" {
34 | to = "80"
35 | }
36 | }
37 |
38 | task "speedtest" {
39 |
40 | env {
41 | PUID = "${meta.PUID}"
42 | PGID = "${meta.PGID}"
43 | TZ = "America/New_York"
44 | DB_CONNECTION = "sqlite"
45 | APP_KEY = "{{ speedtest_app_key }}"
46 | }
47 |
48 | driver = "docker"
49 | config {
50 | image = "lscr.io/linuxserver/speedtest-tracker:latest"
51 | image_pull_timeout = "10m"
52 | hostname = "${NOMAD_TASK_NAME}"
53 | volumes = [
54 | "${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}:/config"
55 | ]
56 | ports = ["port1"]
57 | } // docker config
58 |
59 | service {
60 | port = "port1"
61 | name = "${NOMAD_TASK_NAME}"
62 | provider = "nomad"
63 | tags = [
64 | "traefik.enable=true",
65 | "traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
66 | "traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
67 | "traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
68 | "traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
69 | "traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare"
70 | ]
71 |
72 | check {
73 | type = "tcp"
74 | port = "port1"
75 | interval = "30s"
76 | timeout = "4s"
77 | }
78 |
79 | check_restart {
80 | limit = 0
81 | grace = "1m"
82 | }
83 |
84 | } // service
85 |
86 | resources {
87 | cpu = 1000 # MHz
88 | memory = 200 # MB
89 | } // resources
90 |
91 | } // task
92 |
93 |
94 | } // group
95 |
96 |
97 | } // job
98 |
--------------------------------------------------------------------------------
/tasks/service_prometheus_nodeExporter.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # TASK DESCRIPTION:
3 | # Downloads, installs, and configures Prometheus Node Exporter.
4 | #
5 | # NOTE: This is depreciated, I no longer use Prometheus and have migrated to Telegraf
6 |
7 | - name: Populate service facts
8 | ansible.builtin.service_facts:
9 |
10 | - name: Stop node_exporter
11 | become: true
12 | ansible.builtin.systemd:
13 | name: node_exporter
14 | state: stopped
15 | when: ansible_facts.services["node_exporter.service"] is defined
16 |
17 | - name: Ensure group "prometheus" exists
18 | become: true
19 | ansible.builtin.group:
20 | name: prometheus
21 | state: present
22 |
23 | - name: Add the user 'prometheus' with group 'prometheus'
24 | become: true
25 | ansible.builtin.user:
26 | name: prometheus
27 | group: prometheus
28 | groups: docker
29 | append: true
30 |
31 | # --------------- Install or Update Prometheus
32 | - name: "Set fact: need to install Prometheus?"
33 | ansible.builtin.set_fact:
34 | need_prometheus_install: false
35 |
36 | - name: Check if node_exporter is installed
37 | ansible.builtin.stat:
38 | path: /usr/local/bin/node_exporter
39 | register: prometheus_binary_file_location
40 |
41 | - name: "Set fact: need to install Prometheus?"
42 | ansible.builtin.set_fact:
43 | need_prometheus_install: true
44 | when:
45 | - not prometheus_binary_file_location.stat.exists
46 |
47 | - name: Check current version of Prometheus
48 | ansible.builtin.shell: /usr/local/bin/node_exporter --version 3>&1 1>&2 2>&3 | head -n1 | grep -oE '[0-9]+\.[0-9]+\.[0-9]+'
49 | ignore_errors: true
50 | register: current_prometheus_version
51 | failed_when: false
52 | changed_when: false
53 | check_mode: false
54 | when:
55 | - need_prometheus_install is false
56 |
57 | - name: "Set fact: need to install Prometheus?"
58 | ansible.builtin.set_fact:
59 | need_prometheus_install: true
60 | when:
61 | - need_prometheus_install is false
62 | - current_prometheus_version.stdout != prometheus_verssion
63 |
64 | - name: Install node_exporter
65 | become: true
66 | ansible.builtin.unarchive:
67 | src: "https://github.com/prometheus/node_exporter/releases/download/v{{ prometheus_verssion }}/node_exporter-{{ prometheus_verssion }}.linux-armv7.tar.gz"
68 | dest: /usr/local/bin
69 | group: prometheus
70 | owner: prometheus
71 | # reference for extra_opts: https://github.com/ansible/ansible/issues/27081
72 | extra_opts:
73 | - --strip=1
74 | - --no-anchored
75 | - "node_exporter"
76 | remote_src: true
77 | when:
78 | - need_prometheus_install is true
79 |
80 | - name: Create node_exporter service
81 | become: true
82 | ansible.builtin.template:
83 | src: node_exporter.service.j2
84 | dest: /etc/systemd/system/node_exporter.service
85 | mode: 0644
86 |
87 | - name: Start node_exporter
88 | become: true
89 | ansible.builtin.systemd:
90 | name: node_exporter
91 | daemon_reload: true
92 | enabled: true
93 | state: started
94 | when:
95 | - "'nostart' not in ansible_run_tags"
96 |
--------------------------------------------------------------------------------
/files/certs/nomad/cli.pem:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 36356332313236303863343636323336633232646464346636386433363936323464613831343034
3 | 3537343935636631326133393138626233646631616338610a366464303537333035366231303236
4 | 30356139353363303737323566633538303833326365633265616130393462626438313461306537
5 | 6536336437353364310a333134333339396134323937666639376562323334366436636131613435
6 | 65646362393437666233303235363838326663376332336132393364636232323939346635363830
7 | 31353430656136366565393563376538613031396437333763396138313036366164633932643532
8 | 39623965316165653636333465636161363939653839396563346261363232666537313132323764
9 | 62653963333261373132343965663539353134333634353264316532323732613361393433386561
10 | 65303237303235343939623132616264303266633936653339303066633633346466306637306330
11 | 61356138323139363537353836326437393130356165343039323130343331373263653833343466
12 | 34663838653361633836306436333263616435326132316461353931663437393466646662383038
13 | 37646139333437306534366634636566353337353337376533363630326135303832633361386664
14 | 36356536633536623463376637313437633939623434636334616534333839303261323966303761
15 | 62376430653538633834633130353762323163633036626333383434306231326665386432333030
16 | 31303561303136303532386362363431343137633336626136336362653863333237363233326638
17 | 65626163383732323534396162666539343238393938663734306634386238306638663037376433
18 | 32343466393638643764383034613130633964366563323333343831353834666263646335636365
19 | 37303633666432666637633266346438613832666438386333303535626162663336636637366263
20 | 39376338353665376166363337386133363364363165383463373231313264306634313661623638
21 | 65373831386466613239666534313236323230363331363331613366633030616636373366303762
22 | 39313330303665326462653030373839623130643833343730383135353030393237343832626432
23 | 34356136333339323334633939633666366664393433663461646139633339633761646537633238
24 | 30636135346135626161333738666331353466373861363666613332323037623139393065346362
25 | 62653261336637333837313030616564376234363637373030663262396664613731326130343538
26 | 32663730323863353933353264303361356337653965633632623461303035633030643939396230
27 | 62363234633434333330346132333533303833303231616631656165643365393833356331346430
28 | 34306334363262373333363331323536656166656638653239373130633036633630373134353964
29 | 35383731656138313961663039396134313139383835366637373234346165383538313931356264
30 | 31616435393730366561366162633434303332333734636234343063326461636264333231393634
31 | 65346338633236366237653631656561386239376261623064383535386530356664643666363230
32 | 62653864656538616236333131343631343039626335363462396437656366346132303462393530
33 | 66633362343661613462653861316337353963373037376361323163613163356532333136646363
34 | 36653531623132326666323561666431656430383735633537656133636630393330643334373462
35 | 61396334336165303031663836336264343538656134633837373635323238363136336232396361
36 | 34376136313935346363633836316366376439333164386265326561396238656339646239333064
37 | 63623263326437663739333866363165316638346130393763643936303262313133656230613638
38 | 38326531313838313037393536343139326465663064396232643036303031323436633766383763
39 | 6237346436363964366331633064383761326464376166323266
40 |
--------------------------------------------------------------------------------
/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Running playbook"
3 | hosts: all
4 | serial: 1
5 |
6 | vars_files:
7 | - default_variables.yml
8 | - vault.yml
9 |
10 | pre_tasks:
11 | - name: Run sanity checks
12 | ansible.builtin.import_tasks: tasks/sanity.yml
13 | tags: ["always", "sanity"]
14 | - name: Populate service facts
15 | ansible.builtin.service_facts:
16 | tags: ["nomad", "consul"]
17 | - name: Run debug tasks
18 | ansible.builtin.import_tasks: tasks/debug.yml
19 | tags: [never, debug]
20 | - name: Populate device specific variables
21 | ansible.builtin.import_tasks: tasks/interpolated_variables.yml
22 | tags: ["always"]
23 | - name: Ensure we have up-to-date packages
24 | ansible.builtin.import_tasks: tasks/packages.yml
25 | tags: ["packages", "update"]
26 | - name: Set clean nomad_jobs_dir variable
27 | ansible.builtin.set_fact:
28 | clean_nomad_jobs: true
29 | tags: ["never", "clean"]
30 |
31 | tasks:
32 | - name: Configure cluster NFS mounts
33 | ansible.builtin.import_tasks: tasks/cluster_storage.yml
34 | tags: ["storage"]
35 | when:
36 | - is_nomad_client or is_nomad_server or is_shared_storage_client
37 | - name: Install Docker
38 | ansible.builtin.import_tasks: tasks/docker.yml
39 | tags: ["docker"]
40 | when: "'nas' not in group_names"
41 | - name: Install and Upgrade Consul
42 | ansible.builtin.import_tasks: tasks/consul.yml
43 | tags: ["consul"]
44 | when: is_consul_client or is_consul_server
45 | - name: Install and Upgrade Nomad
46 | ansible.builtin.import_tasks: tasks/nomad.yml
47 | tags: ["nomad"]
48 | when: is_nomad_client or is_nomad_server
49 | - name: Orchestration Jobs
50 | ansible.builtin.import_tasks: tasks/orchestration_jobs.yml
51 | tags: ["jobs", "update"]
52 | - name: Prometheus Node Exporter
53 | ansible.builtin.import_tasks: tasks/service_prometheus_nodeExporter.yml
54 | tags: ["prometheus_exporter"]
55 | when:
56 | - is_prometheus_node
57 | - "'pis' in group_names"
58 | - name: Install backup scripts
59 | ansible.builtin.import_tasks: tasks/backups.yml
60 | tags: ["backup", "backups"]
61 | when: is_nomad_client or is_nomad_server
62 | - name: Install and configure Telegraf
63 | ansible.builtin.import_tasks: tasks/telegraf.yml
64 | tags: ["telegraf"]
65 | when: is_telegraf_client
66 | - name: Pull repositories
67 | ansible.builtin.import_tasks: tasks/pull_repositories.yml
68 | tags: ["never", "update", "repos"]
69 | - name: Configure log rotate
70 | ansible.builtin.import_tasks: tasks/logrotate.yml
71 | tags: ["logrotate"]
72 | when: is_cluster_leader
73 | - name: Install and configure tdarr
74 | ansible.builtin.import_tasks: tasks/tdarr.yml
75 | tags: ["tdarr"]
76 | when: is_tdarr_server or is_tdarr_node
77 |
78 | handlers:
79 | - name: "Run handlers"
80 | ansible.builtin.import_tasks: handlers/main.yml
81 |
--------------------------------------------------------------------------------
/templates/nomad_jobs/mealie.hcl:
--------------------------------------------------------------------------------
1 | job "mealie" {
2 | region = "global"
3 | datacenters = ["{{ datacenter_name }}"]
4 | type = "service"
5 |
6 |
7 | constraint {
8 | attribute = "${attr.cpu.arch}"
9 | regexp = "amd64"
10 | }
11 |
12 | update {
13 | max_parallel = 1
14 | health_check = "checks"
15 | min_healthy_time = "10s"
16 | healthy_deadline = "5m"
17 | progress_deadline = "10m"
18 | auto_revert = true
19 | canary = 0
20 | stagger = "30s"
21 | }
22 |
23 | group "mealie" {
24 |
25 | count = 1
26 |
27 | restart {
28 | attempts = 0
29 | delay = "30s"
30 | }
31 |
32 | network {
33 | port "port1" {
34 | // static = "80"
35 | to = "80"
36 | }
37 | }
38 |
39 | task "mealie" {
40 |
41 | env {
42 | PUID = "${meta.PUID}"
43 | PGID = "${meta.PGID}"
44 | TZ = "America/New_York"
45 | RECIPE_PUBLIC = "true"
46 | RECIPE_SHOW_NUTRITION = "true"
47 | RECIPE_SHOW_ASSETS = "true"
48 | RECIPE_LANDSCAPE_VIEW = "true"
49 | RECIPE_DISABLE_COMMENTS = "false"
50 | RECIPE_DISABLE_AMOUNT = "false"
51 | DB_ENGINE = "sqlite" # 'sqlite', 'postgres'
52 | BASE_URL = "https://${NOMAD_JOB_NAME}.{{ homelab_domain_name }}"
53 | AUTO_BACKUP_ENABLED = "true"
54 |
55 | }
56 |
57 | driver = "docker"
58 | config {
59 | image = "hkotel/mealie:latest"
60 | hostname = "${NOMAD_TASK_NAME}"
61 | volumes = [
62 | "${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}:/app/data"
63 | ]
64 | ports = ["port1"]
65 | } // docker config
66 |
67 | service {
68 | port = "port1"
69 | name = "${NOMAD_TASK_NAME}"
70 | provider = "nomad"
71 | tags = [
72 | "traefik.enable=true",
73 | "traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
74 | "traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
75 | "traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
76 | "traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
77 | "traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare"
78 | ]
79 |
80 | check {
81 | type = "tcp"
82 | port = "port1"
83 | interval = "30s"
84 | timeout = "4s"
85 | }
86 | check_restart {
87 | limit = 0
88 | grace = "1m"
89 | }
90 | } // service
91 |
92 | // resources {
93 | // cpu = 100 # MHz
94 | // memory = 300 # MB
95 | // } // resources
96 |
97 | } // task
98 |
99 |
100 | } // group
101 |
102 |
103 | } // job
104 |
--------------------------------------------------------------------------------
/templates/nomad_jobs/sabnzbd.hcl:
--------------------------------------------------------------------------------
1 | job "sabnzbd" {
2 | region = "global"
3 | datacenters = ["{{ datacenter_name }}"]
4 | type = "service"
5 |
6 | constraint {
7 | attribute = "${node.unique.name}"
8 | operator = "regexp"
9 | value = "macmini"
10 | }
11 |
12 | update {
13 | max_parallel = 1
14 | health_check = "checks"
15 | min_healthy_time = "10s"
16 | healthy_deadline = "5m"
17 | progress_deadline = "10m"
18 | auto_revert = true
19 | canary = 0
20 | stagger = "30s"
21 | }
22 |
23 | group "sabnzbd" {
24 |
25 | count = 1
26 |
27 | restart {
28 | attempts = 0
29 | delay = "30s"
30 | }
31 |
32 | network {
33 | port "http" {
34 | static = "8080"
35 | to = "8080"
36 | }
37 |
38 | }
39 |
40 | task "sabnzbd" {
41 |
42 | env {
43 | PUID = "${meta.PUID}"
44 | PGID = "${meta.PGID}"
45 | TZ = "America/New_York"
46 | DOCKER_MODS = "linuxserver/mods:universal-cron"
47 | }
48 |
49 | driver = "docker"
50 | config {
51 | image = "ghcr.io/linuxserver/sabnzbd"
52 | hostname = "${NOMAD_TASK_NAME}"
53 | volumes = [
54 | "${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}:/config",
55 | "${meta.nfsStorageRoot}/media/downloads/nzb:/nzbd",
56 | "${meta.nfsStorageRoot}/media/downloads/temp:/incomplete-downloads",
57 | "${meta.nfsStorageRoot}/media/downloads/complete:/downloads",
58 | "${meta.nfsStorageRoot}/nate:/nate",
59 | "${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}/startup-scripts:/custom-cont-init.d"
60 | ]
61 | ports = ["http"]
62 | } // docker config
63 |
64 | service {
65 | port = "http"
66 | name = "${NOMAD_TASK_NAME}"
67 | provider = "nomad"
68 | tags = [
69 | "traefik.enable=true",
70 | "traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`sab.{{ homelab_domain_name }}`)",
71 | "traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
72 | "traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
73 | "traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
74 | "traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare"
75 | // "traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file"
76 | ]
77 |
78 | check {
79 | type = "tcp"
80 | port = "http"
81 | interval = "30s"
82 | timeout = "4s"
83 | }
84 | check_restart {
85 | limit = 0
86 | grace = "1m"
87 | }
88 | } // service
89 |
90 | resources {
91 | cpu = 5000 # MHz
92 | memory = 1000 # MB
93 | } // resources
94 |
95 | } // task
96 |
97 |
98 | } // group
99 |
100 |
101 | } // job
102 |
--------------------------------------------------------------------------------
/files/certs/nomad/nomad-ca.pem:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 35623831653130633938356633623231356465303736666531356231623234656134613135306230
3 | 6336636538396464643136303337333935623764643433390a343538306361326137356237383235
4 | 64396366623632636162323861653035653266653739663330616135643732663065313966306634
5 | 6339393438363561630a616636386663626635366161633638613161613337616638623936663337
6 | 37643761633239653436383130376131643035316135663438626631376561386461346466383636
7 | 30656262393463343733623937636336626262383130663438326138656565336430346638653638
8 | 30373937633033663834663665316563393264306133366165376132396661306466343961643731
9 | 66376237643131356633623539313832656433356233376565663763643335326137396463626539
10 | 30396438333866613130333661363031626363633838393662303865613533616263373065393762
11 | 66666662636237623963353864346539376461626162356165366133356165373438336435366631
12 | 63306435656130323061303834333532326539386564623133356230623864363936666664616530
13 | 32633330306133363364636339346461653731343361323964623733336562613730336238623537
14 | 32663237326430643861316231333762326663646534633431326165366339613465326262616438
15 | 61346637346636333832323037636630313965383633313531666333373265653231313835643731
16 | 33343865633564356134326463373438623739356330333863666262343532616164663738656266
17 | 64373538616665643830613232373034323138623036386135396561363132396432623439333233
18 | 39643066383338373266336465323930356466303637323937383532396464323939363737656634
19 | 38383039633764306666393564633430343438333636656232656464616561376639666434383065
20 | 63626537633832376536333765363439626261393765656638623566616666313838343666303765
21 | 66643432316637626539393262346131643265613030633439656362383461643830343430386336
22 | 64313435346630376438633764363961636432363435636634393365316563386439633339323064
23 | 36323835363865633862396634346334393037373136633062366530316164323533363261393939
24 | 62303530663332393134373731393062393163383230653463653933313965633366646566636462
25 | 38366634353134663439373837663434343433623531333865653038353431353161626532663264
26 | 35383265323565626136323062366636653632643336376161636337623636333035663262613438
27 | 38333161616339373763323236363538326166353139626336633766336236663732363965333032
28 | 32646564616133376662396438666364653433393739363632663138623238366366346338373565
29 | 62353535663765663335373032393332313037383732306264343538306237303033663139623033
30 | 39656565356337393339616634366339363138316162303861633033303765393536633763643835
31 | 63366262393662313166656461326138356135653763356362326261623839346263386363373166
32 | 36353233346133613961303736383836333766386634393263313335306665353762316131353435
33 | 64353630373633333366666638613364396135393130333261666230666366646461306133626333
34 | 39346336326665303333323464396565313934396361313232313738653538393535633662646135
35 | 31636438616430306230326336636433613162643334363232353938353238393037636333636134
36 | 37653164633136373735303030343236613437316533383434653036373834623237663566623632
37 | 31363239396562613839356232346665373334656266343938613635373632333165303737343164
38 | 66303532313435383831373939366264316230653162626536336231646661323731383539323535
39 | 61356261626535336661333831613635376430356662633561373765373033363737316138326465
40 | 333666633034333730333137363462326134
41 |
--------------------------------------------------------------------------------
/templates/nomad_jobs/jellyfin.hcl:
--------------------------------------------------------------------------------
1 | job "jellyfin" {
2 | region = "global"
3 | datacenters = ["{{ datacenter_name }}"]
4 | type = "service"
5 |
6 | constraint {
7 | attribute = "${node.unique.name}"
8 | operator = "regexp"
9 | value = "macmini"
10 | }
11 |
12 | update {
13 | max_parallel = 1
14 | health_check = "checks"
15 | min_healthy_time = "10s"
16 | healthy_deadline = "5m"
17 | progress_deadline = "10m"
18 | auto_revert = true
19 | canary = 0
20 | stagger = "30s"
21 | }
22 |
23 | group "jellyfin" {
24 |
25 | count = 1
26 |
27 | restart {
28 | attempts = 0
29 | delay = "30s"
30 | }
31 |
32 | network {
33 | port "webui" {
34 | static = "8096"
35 | to = "8096"
36 | }
37 | port "udp1" {
38 | static = "7359"
39 | to = "7359"
40 | }
41 | }
42 |
43 | task "jellyfin" {
44 |
45 | env {
46 | PUID = "${meta.PUID}"
47 | PGID = "${meta.PGID}"
48 | TZ = "America/New_York"
49 | }
50 |
51 | driver = "docker"
52 | config {
53 | image = "lscr.io/linuxserver/jellyfin:latest"
54 | image_pull_timeout = "10m"
55 | hostname = "${NOMAD_TASK_NAME}"
56 | volumes = [
57 | "${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}:/config",
58 | "${meta.nfsStorageRoot}/media/media/movies:/data/movies",
59 | "${meta.nfsStorageRoot}/media/media/tv:/data/tv"
60 | ]
61 | ports = ["webui", "udp1"]
62 | } // docker config
63 |
64 | service {
65 | port = "webui"
66 | name = "${NOMAD_TASK_NAME}"
67 | provider = "nomad"
68 | tags = [
69 | "traefik.enable=true",
70 | "traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
71 | "traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
72 | "traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
73 | "traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
74 | "traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare"
75 | ]
76 |
77 | check {
78 | type = "tcp"
79 | port = "webui"
80 | interval = "30s"
81 | timeout = "4s"
82 | }
83 |
84 | check_restart {
85 | limit = 0
86 | grace = "1m"
87 | }
88 |
89 | } // service
90 |
91 | resources {
92 | cpu = 2500 # MHz
93 | memory = 750 # MB
94 | } // resources
95 |
96 | } // task
97 | } // group
98 | } // job
99 |
--------------------------------------------------------------------------------
/templates/nomad_jobs/code.hcl:
--------------------------------------------------------------------------------
1 | job "code" {
2 | region = "global"
3 | datacenters = ["{{ datacenter_name }}"]
4 | type = "service"
5 |
6 | // constraint {
7 | // attribute = "${node.unique.name}"
8 | // operator = "regexp"
9 | // value = "rpi(1|2|3)"
10 | // }
11 |
12 | update {
13 | max_parallel = 1
14 | health_check = "checks"
15 | min_healthy_time = "10s"
16 | healthy_deadline = "5m"
17 | progress_deadline = "10m"
18 | auto_revert = true
19 | canary = 0
20 | stagger = "30s"
21 | }
22 |
23 | group "code" {
24 |
25 | count = 1
26 |
27 | restart {
28 | attempts = 0
29 | delay = "30s"
30 | }
31 |
32 | network {
33 | port "port1" {
34 | // static = "80"
35 | to = "3000"
36 | }
37 | }
38 |
39 | task "code" {
40 |
41 | env {
42 | PUID = "${meta.PUID}"
43 | PGID = "${meta.PGID}"
44 | TZ = "America/New_York"
45 | SUDO_PASSWORD = "{{ simple_web_password }}"
46 | PROXY_DOMAIN = "code.{{ homelab_domain_name }}"
47 | CONNECTION_TOKEN = "1234"
48 | DOCKER_MODS = "linuxserver/mods:code-server-python3|linuxserver/mods:code-server-shellcheck|linuxserver/mods:universal-git|linuxserver/mods:code-server-zsh"
49 | // CONNECTION_TOKEN = supersecrettoken
50 | // CONNECTION_SECRET = supersecrettoken
51 | }
52 |
53 | driver = "docker"
54 | config {
55 | image = "lscr.io/linuxserver/openvscode-server"
56 | hostname = "${NOMAD_JOB_NAME}"
57 | volumes = [
58 | "${meta.nfsStorageRoot}/pi-cluster/${NOMAD_JOB_NAME}:/config"
59 | ]
60 | ports = ["port1"]
61 | } // docker config
62 |
63 | service {
64 | port = "port1"
65 | name = "${NOMAD_JOB_NAME}"
66 | provider = "nomad"
67 | tags = [
68 | "traefik.enable=true",
69 | "traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
70 | "traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
71 | "traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
72 | "traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
73 | "traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare",
74 | "traefik.http.routers.${NOMAD_JOB_NAME}.middlewares=authelia@file,redirectScheme@file"
75 | ]
76 |
77 | check {
78 | type = "tcp"
79 | port = "port1"
80 | interval = "30s"
81 | timeout = "4s"
82 | }
83 | check_restart {
84 | limit = 0
85 | grace = "1m"
86 | }
87 | } // service
88 |
89 | resources {
90 | cpu = 1500 # MHz
91 | memory = 300 # MB
92 | } // resources
93 |
94 | } // task
95 |
96 |
97 | } // group
98 |
99 |
100 | } // job
101 |
--------------------------------------------------------------------------------
/templates/nomad_jobs/hishtory-server.hcl:
--------------------------------------------------------------------------------
1 | job "hishtory" {
2 |
3 | region = "global"
4 | datacenters = ["{{ datacenter_name }}"]
5 | type = "service"
6 |
7 | # README
8 | # https://github.com/linuxserver/docker-hishtory-server
9 | # https://github.com/ddworken/hishtory/blob/master/README.md
10 |
11 | // constraint {
12 | // attribute = "${node.unique.name}"
13 | // operator = "regexp"
14 | // value = "rpi(1|2|3)"
15 | // }
16 |
17 | update {
18 | max_parallel = 1
19 | health_check = "checks"
20 | min_healthy_time = "10s"
21 | healthy_deadline = "5m"
22 | progress_deadline = "10m"
23 | auto_revert = true
24 | canary = 0
25 | stagger = "30s"
26 | }
27 |
28 | group "hishtory" {
29 |
30 | count = 1
31 |
32 | restart {
33 | attempts = 0
34 | delay = "30s"
35 | }
36 |
37 | network {
38 | port "port1" {
39 | to = "8080"
40 | }
41 | }
42 |
43 | task "hishtory" {
44 |
45 | env {
46 | PUID = "${meta.PUID}"
47 | PGID = "${meta.PGID}"
48 | TZ = "America/New_York"
49 | HISHTORY_SQLITE_DB = "/config/hishtory.db"
50 | }
51 |
52 | driver = "docker"
53 | config {
54 | image = "lscr.io/linuxserver/hishtory-server:latest"
55 | image_pull_timeout = "10m"
56 | hostname = "${NOMAD_TASK_NAME}"
57 | volumes = [
58 | "${meta.nfsStorageRoot}/pi-cluster/${NOMAD_TASK_NAME}:/config"
59 | ]
60 | ports = ["port1"]
61 | } // docker config
62 |
63 | service {
64 | port = "port1"
65 | name = "${NOMAD_TASK_NAME}"
66 | provider = "nomad"
67 | tags = [
68 | "traefik.enable=true",
69 | "traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
70 | "traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
71 | "traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
72 | "traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
73 | "traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare"
74 | ]
75 |
76 | check {
77 | type = "tcp"
78 | port = "port1"
79 | interval = "30s"
80 | timeout = "4s"
81 | }
82 |
83 | check_restart {
84 | limit = 0
85 | grace = "1m"
86 | }
87 |
88 | } // service
89 |
90 | resources {
91 | cpu = 1800 # MHz
92 | memory = 800 # MB
93 | } // resources
94 |
95 | } // task
96 |
97 |
98 | } // group
99 |
100 |
101 | } // job
102 |
--------------------------------------------------------------------------------
/tasks/docker.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # TASK DESCRIPTION:
3 | # Installs Docker on specified server
4 |
5 | - name: Check if Docker is already present
6 | ansible.builtin.command:
7 | cmd: docker --version
8 | register: docker_command_result
9 | changed_when: docker_command_result.rc == 1
10 | failed_when: false
11 |
12 | - name: Install docker on Debian
13 | when: ansible_os_family == 'Debian'
14 | block:
15 | - name: "Add docker local filesystem storage directory"
16 | ansible.builtin.file:
17 | path: "{{ rpi_localfs_service_storage }}"
18 | mode: 0755
19 | state: directory
20 |
21 | - name: Download Docker install convenience script
22 | ansible.builtin.get_url:
23 | url: "https://get.docker.com/"
24 | dest: /tmp/get-docker.sh
25 | mode: 0775
26 | when: docker_command_result.rc == 1
27 |
28 | - name: Run Docker install convenience script
29 | ansible.builtin.command: /tmp/get-docker.sh
30 | environment:
31 | CHANNEL: stable
32 | register: docker_install
33 | failed_when: docker_install.rc > 0
34 | changed_when: docker_install.rc == 0
35 | when: docker_command_result.rc == 1
36 |
37 | - name: Make sure Docker CE is the version specified
38 | ansible.builtin.apt:
39 | name: "docker-ce"
40 | state: present
41 | when: docker_command_result.rc == 1
42 |
43 | - name: Ensure Docker is started
44 | ansible.builtin.service:
45 | name: docker
46 | state: started
47 | enabled: true
48 |
49 | - name: Ensure docker users are added to the docker group
50 | become: true
51 | ansible.builtin.user:
52 | name: "{{ ansible_user }}"
53 | groups: docker
54 | append: true
55 | when: docker_command_result.rc == 1
56 |
57 | - name: Install docker on macOS
58 | when: "'macs' in group_names"
59 | block:
60 | - name: "Add docker directory to ~/Library"
61 | ansible.builtin.file:
62 | path: "{{ mac_localfs_service_storage }}"
63 | mode: 0755
64 | state: directory
65 |
66 | - name: Install base homebrew packages
67 | community.general.homebrew:
68 | name: docker
69 | state: present
70 | update_homebrew: false
71 | upgrade_all: false
72 | when: docker_command_result.rc == 1
73 |
74 | - name: Open docker application
75 | ansible.builtin.command:
76 | cmd: open /Applications/Docker.app
77 | register: docker_open_app
78 | failed_when: docker_open_app.rc > 0
79 | changed_when: docker_open_app.rc == 0
80 | when: docker_command_result.rc == 1
81 |
82 | - name: Must install Docker manually
83 | ansible.builtin.debug:
84 | msg: |
85 | Docker must be installed manually on MacOS. Log in to mac to install then rerun playbook
86 |
87 | Be certain to configure the following:
88 | - run on login
89 | - add '{{ mac_storage_mount_point }}' to mountable file system directories
90 | when: docker_command_result.rc == 1
91 |
92 | - name: End play
93 | ansible.builtin.meta: end_play
94 | when: docker_command_result.rc == 1
95 |
--------------------------------------------------------------------------------
/templates/nomad_jobs/stash.hcl:
--------------------------------------------------------------------------------
1 | job "stash" {
2 | region = "global"
3 | datacenters = ["{{ datacenter_name }}"]
4 | type = "service"
5 |
6 | constraint {
7 | attribute = "${node.unique.name}"
8 | operator = "regexp"
9 | value = "macmini"
10 | }
11 |
12 | update {
13 | max_parallel = 1
14 | health_check = "checks"
15 | min_healthy_time = "10s"
16 | healthy_deadline = "5m"
17 | progress_deadline = "10m"
18 | auto_revert = true
19 | canary = 0
20 | stagger = "30s"
21 | }
22 |
23 | group "stashGroup" {
24 |
25 | count = 1
26 |
27 | restart {
28 | attempts = 0
29 | delay = "30s"
30 | }
31 |
32 | network {
33 | port "port1" {
34 | to = "9999"
35 | }
36 | }
37 |
38 | task "stash" {
39 |
40 | env {
41 | PUID = "${meta.PUID}"
42 | PGID = "${meta.PGID}"
43 | TZ = "America/New_York"
44 | STASH_STASH = "/data/"
45 | STASH_GENERATED = "/generated/"
46 | STASH_METADATA = "/metadata/"
47 | STASH_CACHE = "/cache/"
48 | STASH_PORT = "9999"
49 | STASH_EXTERNAL_HOST = "https://${NOMAD_JOB_NAME}.{{ homelab_domain_name }}"
50 | }
51 |
52 | driver = "docker"
53 | config {
54 | image = "stashapp/stash:latest"
55 | hostname = "${NOMAD_JOB_NAME}"
56 | volumes = [
57 | "${meta.nfsStorageRoot}/nate/.stash/cache:/cache",
58 | "${meta.nfsStorageRoot}/nate/.stash/config:/root/.stash",
59 | "${meta.nfsStorageRoot}/nate/.stash/generated:/generated",
60 | "${meta.nfsStorageRoot}/nate/.stash/media:/data",
61 | "${meta.nfsStorageRoot}/nate/.stash/metadata:/metadata",
62 | "${meta.nfsStorageRoot}/nate/.stash/blobs:/blobs",
63 | "/etc/timezone:/etc/timezone:ro"
64 | ]
65 | ports = ["port1"]
66 | } // docker config
67 |
68 | service {
69 | port = "port1"
70 | name = "${NOMAD_JOB_NAME}"
71 | provider = "nomad"
72 | tags = [
73 | "traefik.enable=true",
74 | "traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
75 | "traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
76 | "traefik.http.routers.${NOMAD_JOB_NAME}.service=${NOMAD_JOB_NAME}",
77 | "traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
78 | "traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare",
79 | ]
80 |
81 | check {
82 | type = "tcp"
83 | port = "port1"
84 | interval = "30s"
85 | timeout = "4s"
86 | }
87 | check_restart {
88 | limit = 0
89 | grace = "1m"
90 | }
91 | } // service
92 |
93 | resources {
94 | cpu = 3000 # MHz
95 | memory = 400 # MB
96 | } // resources
97 |
98 | } // task
99 |
100 |
101 | } // group
102 |
103 |
104 | } // job
105 |
--------------------------------------------------------------------------------
/files/certs/nomad/client.pem:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 64633662633464336163353665653938313265386465306438303432613934616238653839663336
3 | 6131666230663261623138343862386433613831643730370a623165636264326363663266393438
4 | 36366635313134663865396433643561306336386264663333323638356530633062343832626362
5 | 6536643333653439630a623666376561613963653437303535326433313730346134623430643033
6 | 65343934386337396339383332383530373963383531336631343332343166316237383632313332
7 | 63363432646237616464616139636533313137663330663730313032633239633866393132386663
8 | 33643162363665383266326634613132656663623831306631386233366161386438396464383936
9 | 62326333363662376533383834633534336339323063643066323265636535366339623761333239
10 | 63666262646136323235613161353162306534306534383232663532333636376363396239663232
11 | 39333138356366356437393864303232623733343165646132633865396566646431383931386133
12 | 36633261656131633538343131336132613435323533353761323438306266343834366165323831
13 | 32393532623162383539343731636238616464313561643535343031393431666465636236373864
14 | 36383234656639633137386466323364323265663334396532356662323961366438643563313065
15 | 31623432303939346465393962663164313039646134646532613461333361393636613334373736
16 | 39383861343531373939653964336163643330343032383533666533383762393864613264316139
17 | 34623632613336343530353930383564383532383838363265663532666135336538323639623637
18 | 32323436316362343536663636636365353962633835343662613264336266336439623833636264
19 | 39613238663837313536343866323165313837656362323532363064336136643435316463373736
20 | 34343734393830313664303030303565633939396666323463363935663639356264353035363862
21 | 35633039396638653931376134373564343339393639393665666566386666633261653638316666
22 | 33316266343039643138373634363661613536643866366130663031336166333866376337343835
23 | 64383962333839663161343139663130623830626166333737363336373936663432353536336562
24 | 30663836363466396239343838633861376638353131643038313762633733383163656363626662
25 | 36383533353666616530633339346461333539383732613462666166343461383232303163636163
26 | 64343838646137643835386230383230626235623965316230333634656662366231633763366666
27 | 39393930616530623662636161663336653036643265363765656130376365613363636461646164
28 | 64613364623839353739653762303966353134396639383463326138633337303337613132326134
29 | 61393131343232353963363062323134613639326265623338353030643931626664363635353734
30 | 39363237646339366330623239323066363465666235663461366465643838626363326133353137
31 | 36386339323939333838643930376336333536386635656361623533613565646162633933306266
32 | 38626638353033616535623263363765613236636439303234306666346430373462613666643631
33 | 36313932393862386337363631393965376436396630653937663264366531636530356437633763
34 | 64356262626563383038643063653537646165613734303964643633643961303535303563363933
35 | 66343866633137633235363634653665326134356633613735383437653830636336663263303437
36 | 63663230653564643137386564346232626264623537383763313936396666643464393163616230
37 | 33633333383063376331643462653363373837333830613362383532383962353432313064623233
38 | 35623838633739356665626533366430333535666366383262646336353933373230383235336261
39 | 32333234303133643630383334633134396434303561353534623134616539626165616132616331
40 | 64373133316238396330316435393832326430376238383266326330613037656433636334666637
41 | 38346562353630366637666539616362313239363261363933313534666436383765643934633565
42 | 64646534353732393230633838303332366338356137383437396439383261343432656334353933
43 | 6535303963366434396464363064616432633138636230616666
44 |
--------------------------------------------------------------------------------
/files/certs/nomad/server.pem:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 34306465616539336630613562366536663339636564303738333835356362613532663739643464
3 | 6664623436333737653463623961613562393334613231660a316665613530353863396663373231
4 | 32306430303065326639343564383262363031373137306664653662326136333933353061373731
5 | 3036356237356534620a343434653836373733666135636435383761383333316439363337303439
6 | 66346161666633656562643433313930313332306439323535613361303333373762343930356434
7 | 30616161323162626364323662393232336661303766396332376234643638616533663534633031
8 | 66383165396163626137373731386462333436323037323032366134303664363133343537643165
9 | 32313433386332623135336561383638373666363338613061623439393165366435353963613534
10 | 39626564626466666432653933653663666330613666393330633735353931313639373537633332
11 | 35623836396265306237613365653936623337313962616131316637353162306236626632336533
12 | 66396537336439313437326462346166333535353065343037343130653131633832333264343465
13 | 31323163656237353337646631326138653362326263326537383137653132323661666631346533
14 | 33386331373039613763326366373133663230373331313632303833353061353733363838623239
15 | 64613334666266636461633762326631383565373533343166626431316365363935346334646531
16 | 33313337623434646534626435333333326533386234663834383661343766313139653262346137
17 | 64663034663333663462663863666430396266373964633231633763323139643639313637363731
18 | 63343065653461363764306132366535323432303062333263326137346532636234303566326433
19 | 62626130323561326534316463343133633362356361373965333665616265316233616538633633
20 | 33656133313434316534396432643333363963643137393836313165333965646266623564323531
21 | 61633534363465313262393566353733663862353265376563626138643234303565613739386130
22 | 33346336643861646635663330373361326265666461656338323962656532613637356535616462
23 | 33356562363262336466303563353162343632633639643237313236313831383063653731616135
24 | 63643830656432383139363461666362636632613737623436333537613034643961313262396535
25 | 33316261623963333837353839353431663361393134656130386137396362613139656563396565
26 | 39323362386333646163313565346565653738616162363563613733333038383636386364316664
27 | 39343232356434373031396630636136613331323630346437366166343432626131656562323537
28 | 66666165653836656437363265393037656266643164303362383337326130383630303362366631
29 | 32633636653564653162663033323130623336643231626665353630303031366639353765306239
30 | 62666532646635383935346135353963613435656363343063306534323339393233386532303263
31 | 35356532316337323264633631653736633731396366663237373035393861663138346537333338
32 | 37663264383135626636303163383461313037313330383332636339343661343164633833396238
33 | 30663134373431663336343537643635666265303461643435643661343333396533643763636238
34 | 32373338666461613939386630303666643461333030663432353938343835373166363332376263
35 | 36306133316436633632326362373438643061356638663964393431616165393231346362303164
36 | 66333638646136646465663232663866353833303833623765653731643464653065363663616632
37 | 65633766333264663634343965313863303337343766306365653464386662333939393835353732
38 | 66613133666533663535376337313364643938333939303339646161343162393964613431393431
39 | 30383534333165313630613663316639343031346532333933636238313636306238343131663862
40 | 36633866313530303634326261396637363031623365663030656231623939376635626265383333
41 | 39323133363338643537363265386237623065343162346538346663306334306239343864396261
42 | 32393334373439653163343832306365323763653231313631613537323664616264313964323263
43 | 3161323630326465333035363461316635326330616337333238
44 |
--------------------------------------------------------------------------------
/tasks/orchestration_jobs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # TASK DESCRIPTION:
3 | # Keeps Nomad jobs and docker-compose-files in sync. All job and docker-compose files are written as Jinja2 templates. Performs the following:
4 | #
5 | # - Syncs Nomad jobs
6 | # - Syncs docker-compose files
7 | # - Ensures we have directories on the local filesystem for jobs which can't function with networked
8 | # storage. (I'm looking at you, 'arr' apps). These folders must be created, even if empty, to
9 | # allow mounting nomad local storage end-points
10 |
11 | - name: "Sync Nomad Jobs"
12 | block:
13 | - name: Remove nomad jobs directory
14 | ansible.builtin.file:
15 | path: "{{ nomad_jobfile_location }}"
16 | state: absent
17 | when:
18 | - is_nomad_client or is_nomad_server or ("'macs' in group_names")
19 | - clean_nomad_jobs
20 |
21 | - name: (Re)Create nomad jobs directory
22 | ansible.builtin.file:
23 | path: "{{ nomad_jobfile_location }}"
24 | state: directory
25 | mode: 0755
26 | when:
27 | - is_nomad_client or is_nomad_server or ("'macs' in group_names")
28 | - "'nas' not in group_names"
29 |
30 | - name: Synchronize nomad job templates (jinja)
31 | ansible.builtin.template:
32 | src: "{{ item }}"
33 | dest: "{{ nomad_jobfile_location }}/{{ item | basename | regex_replace('.j2$', '') }}"
34 | mode: 0644
35 | with_fileglob: "templates/nomad_jobs/*.j2"
36 | when:
37 | - is_nomad_client or is_nomad_server or ("'macs' in group_names")
38 | - "'nas' not in group_names"
39 |
40 | - name: Synchronize nomad job templates (hcl)
41 | ansible.builtin.template:
42 | src: "{{ item }}"
43 | dest: "{{ nomad_jobfile_location }}/{{ item | basename }}"
44 | mode: 0644
45 | with_fileglob: "templates/nomad_jobs/*.hcl"
46 | when:
47 | - is_nomad_client or is_nomad_server or ("'macs' in group_names")
48 | - "'nas' not in group_names"
49 |
50 | - name: Ensure we have local storage folders
51 | become: true
52 | ansible.builtin.file:
53 | path: "{{ interpolated_localfs_service_storage }}/{{ item }}"
54 | state: directory
55 | mode: 0777
56 | group: "{{ ansible_user_gid }}"
57 | owner: "{{ ansible_user_uid }}"
58 | when:
59 | - is_nomad_client or is_nomad_server
60 | loop: "{{ service_localfs_dirs }}"
61 |
62 | - name: Sync docker compose files
63 | when: is_docker_compose_client
64 | block:
65 | - name: Confirm compose file dir exists
66 | ansible.builtin.file:
67 | path: "{{ docker_compose_file_location }}"
68 | state: directory
69 | mode: 0755
70 |
71 | - name: Synchronize docker-compose files
72 | ansible.builtin.template:
73 | src: "{{ item }}"
74 | dest: "{{ docker_compose_file_location }}/{{ item | basename | regex_replace('.j2$', '') }}"
75 | mode: 0644
76 | with_fileglob: "../templates/docker_compose_files/*.j2"
77 |
78 | - name: "Prune docker caches"
79 | community.docker.docker_prune:
80 | containers: true
81 | images: true
82 | images_filters:
83 | dangling: false
84 | networks: true
85 | volumes: true
86 | builder_cache: true
87 | when:
88 | - is_docker_compose_client or is_nomad_client or is_nomad_server
89 |
--------------------------------------------------------------------------------
/templates/nomad_jobs/influxdb.hcl:
--------------------------------------------------------------------------------
1 | job "influxdb" {
2 | region = "global"
3 | datacenters = ["{{ datacenter_name }}"]
4 | type = "service"
5 |
6 | // constraint {
7 | // attribute = "${attr.cpu.arch}"
8 | // operator = "regexp"
9 | // value = "64"
10 | // }
11 |
12 | update {
13 | max_parallel = 1
14 | health_check = "checks"
15 | min_healthy_time = "10s"
16 | healthy_deadline = "5m"
17 | progress_deadline = "10m"
18 | auto_revert = true
19 | canary = 0
20 | stagger = "30s"
21 | }
22 |
23 | group "influxdbGroup" {
24 | count = 1
25 | network {
26 | port "httpAPI" {
27 | static = "{{ influxdb_port }}"
28 | to = "8086"
29 | }
30 | }
31 |
32 | restart {
33 | attempts = 0
34 | delay = "30s"
35 | }
36 |
37 | task "create_filesystem" {
38 | // Copy the most recent backup into place on the local computer. sonarr will not work with
39 | // its database in an NFS share
40 |
41 | driver = "raw_exec"
42 | config {
43 | # When running a binary that exists on the host, the path must be absolute
44 | command = "${meta.restoreCommand}"
45 | args = [
46 | "${meta.restoreCommand1}",
47 | "${meta.restoreCommand2}",
48 | "${NOMAD_JOB_NAME}",
49 | "${meta.restoreCommand3}"
50 | ]
51 | }
52 |
53 | lifecycle {
54 | hook = "prestart"
55 | sidecar = false
56 | }
57 |
58 | } // /task create_filesystem
59 |
60 | task "influxdb" {
61 |
62 | env {
63 | PUID = "${meta.PUID}"
64 | PGID = "${meta.PGID}"
65 | TZ = "America/New_York"
66 | }
67 |
68 | driver = "docker"
69 | config {
70 | image = "influxdb:{{ influxdb_version }}"
71 | hostname = "${NOMAD_JOB_NAME}"
72 | ports = ["httpAPI"]
73 | volumes = [
74 | "${meta.localStorageRoot}/influxdb:/var/lib/influxdb"
75 | ]
76 | } // docker config
77 |
78 | service {
79 | port = "httpAPI"
80 | name = "${NOMAD_JOB_NAME}"
81 | provider = "nomad"
82 |
83 | check {
84 | type = "tcp"
85 | port = "httpAPI"
86 | interval = "30s"
87 | timeout = "4s"
88 | }
89 |
90 | check_restart {
91 | limit = 0
92 | grace = "1m"
93 | }
94 |
95 |
96 | } // service
97 |
98 | resources {
99 | cpu = 1000 # MHz
100 | memory = 400 # MB
101 | } // resources
102 |
103 | } // /task influxdb
104 |
105 | task "save_configuration" {
106 | driver = "raw_exec"
107 | config {
108 | # When running a binary that exists on the host, the path must be absolute
109 | command = "${meta.backupCommand}"
110 | args = [
111 | "${meta.backupAllocArg1}",
112 | "${meta.backupAllocArg2}",
113 | "${meta.backupAllocArg3}",
114 | "${meta.backupAllocArg4}",
115 | "${meta.backupAllocArg5}",
116 | "${NOMAD_JOB_NAME}",
117 | "${meta.backupAllocArg6}"
118 | ]
119 | }
120 | lifecycle {
121 | hook = "poststop"
122 | sidecar = false
123 | }
124 | } // /task save_configuration
125 | } // group
126 | } // job
127 |
--------------------------------------------------------------------------------
/templates/nomad_jobs/diagnostics.hcl:
--------------------------------------------------------------------------------
1 | job "diagnostics" {
2 | region = "global"
3 | datacenters = ["{{ datacenter_name }}"]
4 | type = "service"
5 |
6 | constraint {
7 | attribute = "${node.unique.name}"
8 | operator = "regexp"
9 | value = "macmini"
10 | }
11 |
12 | group "diagnostics" {
13 |
14 | count = 1
15 |
16 | restart {
17 | attempts = 0
18 | delay = "30s"
19 | }
20 |
21 | network {
22 | port "whoami" {
23 | to = 80
24 | }
25 | }
26 |
27 | task "diagnostics" {
28 |
29 | // env {
30 | // KEY = "VALUE"
31 | // }
32 |
33 | driver = "docker"
34 | config {
35 | image = "alpine:latest"
36 | hostname = "${NOMAD_JOB_NAME}"
37 | args = [
38 | "/bin/sh",
39 | "-c",
40 | "chmod 755 /local/bootstrap.sh && /local/bootstrap.sh"
41 | ]
42 | volumes = [
43 | "${meta.nfsStorageRoot}/pi-cluster/tmp:/diagnostics",
44 | "${meta.localStorageRoot}:/docker"
45 | ]
46 | } // docker config
47 |
48 | template {
49 | destination = "local/bootstrap.sh"
50 | data = < 0
9 | changed_when: automount_output.rc == 0
10 | when:
11 | - "'macs' in group_names"
12 | - not ansible_check_mode
13 | listen: "mac_run_automount"
14 |
15 | - name: Mount and unmount shared storage on Mac
16 | become: true
17 | ansible.builtin.command:
18 | cmd: automount -cvu
19 | register: automount_output
20 | failed_when: automount_output.rc > 0
21 | changed_when: automount_output.rc == 0
22 | when:
23 | - "'macs' in group_names"
24 | - not ansible_check_mode
25 | listen: "mac_run_automount_unmount"
26 |
27 | ##################################### TELEGRAF
28 | - name: (Re)Start telegraf (Debian)
29 | become: true
30 | ansible.builtin.service:
31 | name: telegraf
32 | state: restarted
33 | register: telegraf_service
34 | failed_when: telegraf_service.rc > 0
35 | changed_when: telegraf_service.rc == 0
36 | when:
37 | - ansible_os_family == 'Debian'
38 | listen: restart_telegraf
39 |
40 | - name: (Re)Start telegraf
41 | ansible.builtin.shell:
42 | cmd: /usr/local/bin/brew services restart telegraf
43 | executable: /usr/local/bin/bash
44 | ignore_errors: true
45 | register: telegraf_service
46 | failed_when: telegraf_service.rc > 0
47 | changed_when: telegraf_service.rc == 0
48 | when:
49 | - ansible_os_family == 'Darwin'
50 | listen: restart_telegraf
51 |
52 | ##################################### NOMAD
53 |
54 | - name: Restart nomad (Debian)
55 | become: true
56 | ansible.builtin.systemd_service:
57 | name: nomad
58 | enabled: true
59 | state: restarted
60 | register: nomad_service
61 | # failed_when: nomad_service.Result != "success"
62 | # changed_when: nomad_service.Result == "success"
63 | when:
64 | - ansible_os_family == 'Debian'
65 | - "'nostart' not in ansible_run_tags"
66 | listen: "restart nomad"
67 |
68 | - name: "Unload nomad agent (MacOSX)"
69 | ansible.builtin.command:
70 | cmd: "launchctl unload -w {{ nomad_plist_macos }}"
71 | register: nomad_service
72 | changed_when: nomad_service.rc == 0
73 | failed_when: nomad_service.rc > 0
74 | when:
75 | - ansible_os_family == 'Darwin'
76 | - "'nostart' not in ansible_run_tags"
77 | listen: "restart nomad"
78 |
79 | - name: "Load the nomad agent (MacOSX)"
80 | ansible.builtin.command:
81 | cmd: "launchctl load -w {{ nomad_plist_macos }}"
82 | register: nomad_service
83 | changed_when: nomad_service.rc == 0
84 | failed_when: nomad_service.rc > 0
85 | when:
86 | - ansible_os_family == 'Darwin'
87 | - "'nostart' not in ansible_run_tags"
88 | listen: "restart nomad"
89 |
90 | - name: "Ensure nomad is really running"
91 | ansible.builtin.shell:
92 | cmd: "set -o pipefail && sleep 10 && /usr/local/bin/nomad node status -self -short | grep {{ inventory_hostname }}"
93 | args:
94 | executable: /bin/bash
95 | register: node_status_response
96 | failed_when: node_status_response.rc > 0
97 | changed_when: false
98 | when: "'nostart' not in ansible_run_tags"
99 | listen: "restart nomad"
100 | # - name: "Ensure sure Nomad service is really running"
101 | # ansible.builtin.command:
102 | # cmd: systemctl is-active nomad
103 | # register: is_nomad_really_running
104 | # changed_when: false
105 | # failed_when: is_nomad_really_running.rc != 0
106 | # when:
107 | # - ansible_os_family == 'Debian'
108 | # - "'nostart' not in ansible_run_tags"
109 | # listen: "restart nomad"
110 |
111 | ##################################### CONSUL
112 |
--------------------------------------------------------------------------------
/templates/consul_services/consul_synology_checks.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "services": [{
3 | "name": "sabnzbd",
4 | "id": "sabnzbd",
5 | "tags": [
6 | "traefik.enable=true",
7 | "traefik.http.services.sabnzbd.loadbalancer.server.port=8080",
8 | "traefik.http.routers.sabnzbd.rule=Host(`sab.{{ homelab_domain_name }}`)",
9 | "traefik.http.routers.sabnzbd.entryPoints=web,websecure",
10 | "traefik.http.routers.sabnzbd.service=sabnzbd",
11 | "traefik.http.routers.sabnzbd.tls=true",
12 | "traefik.http.routers.sabnzbd.tls.certresolver=cloudflare"
13 | ],
14 | "checks": [{
15 | "id": "sabnzbd-http-check",
16 | "http": "http://{{ synology_second_ip }}:8080",
17 | "interval": "30s",
18 | "timeout": "5s",
19 | "success_before_passing": 3,
20 | "failures_before_critical": 3
21 | }]
22 | },
23 | {
24 | "name": "jellyfin",
25 | "id": "jellyfin",
26 | "tags": [
27 | "traefik.enable=true",
28 | "traefik.http.services.jellyfin.loadbalancer.server.port=8096",
29 | "traefik.http.routers.jellyfin.rule=Host(`jellyfin.{{ homelab_domain_name }}`)",
30 | "traefik.http.routers.jellyfin.entryPoints=web,websecure",
31 | "traefik.http.routers.jellyfin.service=jellyfin",
32 | "traefik.http.routers.jellyfin.tls=true",
33 | "traefik.http.routers.jellyfin.tls.certresolver=cloudflare"
34 | ],
35 | "checks": [{
36 | "id": "jellyfin-http-check",
37 | "http": "http://{{ synology_second_ip }}:8096",
38 | "interval": "30s",
39 | "timeout": "5s",
40 | "success_before_passing": 3,
41 | "failures_before_critical": 3
42 | }]
43 | },
44 | {
45 | "name": "synology",
46 | "id": "synology",
47 | "tags": [
48 | "traefik.enable=true",
49 | "traefik.http.services.synology.loadbalancer.server.port=5000",
50 | "traefik.http.routers.synology.rule=Host(`nas.{{ homelab_domain_name }}`)",
51 | "traefik.http.routers.synology.entryPoints=web,websecure",
52 | "traefik.http.routers.synology.service=synology",
53 | "traefik.http.routers.synology.tls=true",
54 | "traefik.http.routers.synology.tls.certresolver=cloudflare"
55 | ],
56 | "checks": [{
57 | "id": "synology-http-check",
58 | "http": "http://{{ synology_second_ip }}:5000",
59 | "interval": "30s",
60 | "timeout": "5s",
61 | "success_before_passing": 3,
62 | "failures_before_critical": 3
63 | }]
64 | },
65 | {
66 | "name": "asntoip",
67 | "id": "asntoip",
68 | "tags": [
69 | "traefik.enable=true",
70 | "traefik.http.services.asntoip.loadbalancer.server.port=5151",
71 | "traefik.http.routers.asntoip.rule=Host(`asntoip.{{ homelab_domain_name }}`)",
72 | "traefik.http.routers.asntoip.entryPoints=web,websecure",
73 | "traefik.http.routers.asntoip.service=asntoip",
74 | "traefik.http.routers.asntoip.tls=true",
75 | "traefik.http.routers.asntoip.tls.certresolver=cloudflare"
76 | ],
77 | "checks": [{
78 | "id": "asntoip-http-check",
79 | "http": "http://{{ synology_second_ip }}:5151",
80 | "interval": "30s",
81 | "timeout": "5s",
82 | "success_before_passing": 3,
83 | "failures_before_critical": 3
84 | }]
85 | }
86 | ]
87 | }
88 |
--------------------------------------------------------------------------------
/templates/nomad_jobs/template_localfs.hcl:
--------------------------------------------------------------------------------
1 |
2 | job "TEMPLATE" {
3 | region = "global"
4 | datacenters = ["{{ datacenter_name }}"]
5 | type = "service"
6 |
7 | // constraint {
8 | // attribute = "${node.unique.name}"
9 | // operator = "regexp"
10 | // value = "rpi4"
11 | // }
12 |
13 | update {
14 | max_parallel = 1
15 | health_check = "checks"
16 | min_healthy_time = "10s"
17 | healthy_deadline = "5m"
18 | progress_deadline = "10m"
19 | auto_revert = true
20 | canary = 0
21 | stagger = "30s"
22 | }
23 |
24 | group "TEMPLATE-group" {
25 |
26 | count = 1
27 |
28 | restart {
29 | attempts = 0
30 | delay = "10m"
31 | }
32 |
33 | network {
34 | port "port1" {
35 | static = ""
36 | to = ""
37 | }
38 | }
39 |
40 | task "create_filesystem" {
41 | // Copy the most recent backup into place on the local computer. sonarr will not work with
42 | // its database in an NFS share
43 |
44 | driver = "raw_exec"
45 | config {
46 | # When running a binary that exists on the host, the path must be absolute
47 | command = "${meta.restoreCommand}"
48 | args = [
49 | "${meta.restoreCommand1}",
50 | "${meta.restoreCommand2}",
51 | "${NOMAD_JOB_NAME}",
52 | "${meta.restoreCommand3}"
53 | ]
54 | }
55 |
56 | lifecycle {
57 | hook = "prestart"
58 | sidecar = false
59 | }
60 |
61 | } // /task create_filesystem
62 |
63 | task "TEMPLATE" {
64 |
65 | env {
66 | PUID = "${meta.PUID}"
67 | PGID = "${meta.PGID}"
68 | TZ = "America/New_York"
69 | }
70 |
71 | driver = "docker"
72 | config {
73 | image = ""
74 | hostname = "${NOMAD_TASK_NAME}"
75 | ports = ["port1"]
76 | volumes = [
77 | "${meta.localStorageRoot}/${NOMAD_TASK_NAME}:/config"
78 | ]
79 |
80 | } // docker config
81 |
82 | service {
83 | port = "port1"
84 | name = "${NOMAD_TASK_NAME}"
85 | provider = "nomad"
86 | tags = [
87 | "traefik.enable=true",
88 | "traefik.http.routers.${NOMAD_TASK_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
89 | "traefik.http.routers.${NOMAD_TASK_NAME}.entryPoints=web,websecure",
90 | "traefik.http.routers.${NOMAD_TASK_NAME}.service=${NOMAD_TASK_NAME}",
91 | "traefik.http.routers.${NOMAD_TASK_NAME}.tls=true",
92 | "traefik.http.routers.${NOMAD_TASK_NAME}.tls.certresolver=cloudflare",
93 | "traefik.http.routers.${NOMAD_TASK_NAME}.middlewares=authelia@file"
94 | ]
95 |
96 | check {
97 | type = "tcp"
98 | port = "port1"
99 | interval = "30s"
100 | timeout = "4s"
101 | }
102 |
103 | check_restart {
104 | limit = 0
105 | grace = "1m"
106 | }
107 | } // service
108 |
109 | resources {
110 | cpu = 1000 # MHz
111 | memory = 400 # MB
112 | } // resources
113 |
114 | } // /task ${NOMAD_JOB_NAME}
115 |
116 | task "save_configuration" {
117 | driver = "raw_exec"
118 | config {
119 | # When running a binary that exists on the host, the path must be absolute
120 | command = "${meta.backupCommand}"
121 | args = [
122 | "${meta.backupAllocArg1}",
123 | "${meta.backupAllocArg2}",
124 | "${meta.backupAllocArg3}",
125 | "${meta.backupAllocArg4}",
126 | "${meta.backupAllocArg5}",
127 | "${NOMAD_JOB_NAME}",
128 | "${meta.backupAllocArg6}"
129 | ]
130 | }
131 | lifecycle {
132 | hook = "poststop"
133 | sidecar = false
134 | }
135 | } // /task save_configuration
136 |
137 |
138 | } // group
139 |
140 |
141 | } // job
142 |
--------------------------------------------------------------------------------
/templates/nomad_jobs/sonarr.hcl:
--------------------------------------------------------------------------------
1 | job "sonarr" {
2 | region = "global"
3 | datacenters = ["{{ datacenter_name }}"]
4 | type = "service"
5 |
6 | // constraint {
7 | // attribute = "${attr.cpu.arch}"
8 | // operator = "regexp"
9 | // value = "64"
10 | // }
11 |
12 | update {
13 | max_parallel = 1
14 | health_check = "checks"
15 | min_healthy_time = "10s"
16 | healthy_deadline = "5m"
17 | progress_deadline = "10m"
18 | auto_revert = true
19 | canary = 0
20 | stagger = "30s"
21 | }
22 |
23 | group "sonarrGroup" {
24 |
25 | count = 1
26 |
27 | restart {
28 | attempts = 0
29 | delay = "10m"
30 | }
31 |
32 | network {
33 | port "sonarr" {
34 | to = "8989"
35 | }
36 | }
37 |
38 | task "create_filesystem" {
39 | // Copy the most recent backup into place on the local computer. sonarr will not work with
40 | // its database in an NFS share
41 |
42 | driver = "raw_exec"
43 | config {
44 | # When running a binary that exists on the host, the path must be absolute
45 | command = "${meta.restoreCommand}"
46 | args = [
47 | "${meta.restoreCommand1}",
48 | "${meta.restoreCommand2}",
49 | "${NOMAD_JOB_NAME}",
50 | "${meta.restoreCommand3}"
51 | ]
52 | }
53 |
54 | lifecycle {
55 | hook = "prestart"
56 | sidecar = false
57 | }
58 |
59 | } // /task create_filesystem
60 |
61 | task "sonarr" {
62 |
63 | env {
64 | PUID = "${meta.PUID}"
65 | PGID = "${meta.PGID}"
66 | TZ = "America/New_York"
67 | //DOCKER_MODS = "linuxserver/mods:universal-cron|linuxserver/mods:universal-mod2"
68 | //UMASK_SET = 022 #optional
69 | }
70 |
71 | driver = "docker"
72 | config {
73 | image = "linuxserver/sonarr:latest"
74 | hostname = "${NOMAD_JOB_NAME}"
75 | ports = ["sonarr"]
76 | volumes = [
77 | "${meta.localStorageRoot}/${NOMAD_JOB_NAME}:/config",
78 | "${meta.nfsStorageRoot}/media:/media"
79 | ]
80 | } // docker config
81 |
82 | service {
83 | port = "sonarr"
84 | name = "${NOMAD_JOB_NAME}"
85 | provider = "nomad"
86 | tags = [
87 | "traefik.enable=true",
88 | "traefik.http.routers.${NOMAD_JOB_NAME}.rule=Host(`${NOMAD_JOB_NAME}.{{ homelab_domain_name }}`)",
89 | "traefik.http.routers.${NOMAD_JOB_NAME}.entryPoints=web,websecure",
90 | "traefik.http.routers.${NOMAD_JOB_NAME}.service=sonarr",
91 | "traefik.http.routers.${NOMAD_JOB_NAME}.tls=true",
92 | "traefik.http.routers.${NOMAD_JOB_NAME}.tls.certresolver=cloudflare"
93 | ]
94 |
95 | check {
96 | type = "tcp"
97 | port = "sonarr"
98 | interval = "30s"
99 | timeout = "4s"
100 | }
101 | check_restart {
102 | limit = 0
103 | grace = "1m"
104 | }
105 | } // service
106 |
107 | resources {
108 | cpu = 1000 # MHz
109 | memory = 400 # MB
110 | } // resources
111 |
112 | } // /task sonarr
113 |
114 | task "save_configuration" {
115 | driver = "raw_exec"
116 | config {
117 | # When running a binary that exists on the host, the path must be absolute
118 | command = "${meta.backupCommand}"
119 | args = [
120 | "${meta.backupAllocArg1}",
121 | "${meta.backupAllocArg2}",
122 | "${meta.backupAllocArg3}",
123 | "${meta.backupAllocArg4}",
124 | "${meta.backupAllocArg5}",
125 | "${NOMAD_JOB_NAME}",
126 | "${meta.backupAllocArg6}"
127 | ]
128 | }
129 | lifecycle {
130 | hook = "poststop"
131 | sidecar = false
132 | }
133 | } // /task save_configuration
134 |
135 |
136 | } // group
137 |
138 |
139 | } // job
140 |
--------------------------------------------------------------------------------
/templates/nomad_jobs/grafana.hcl:
--------------------------------------------------------------------------------
1 | job "grafana" {
2 | region = "global"
3 | datacenters = ["{{ datacenter_name }}"]
4 | type = "service"
5 |
6 | // constraint {
7 | // attribute = "${node.unique.name}"
8 | // operator = "regexp"
9 | // value = "macmini"
10 | // }
11 |
12 | update {
13 | max_parallel = 1
14 | health_check = "checks"
15 | min_healthy_time = "10s"
16 | healthy_deadline = "5m"
17 | progress_deadline = "10m"
18 | auto_revert = true
19 | canary = 0
20 | stagger = "30s"
21 | }
22 |
23 | group "grafana" {
24 |
25 | count = 1
26 |
27 | restart {
28 | attempts = 0
29 | delay = "30s"
30 | }
31 |
32 | network {
33 | port "http" {}
34 | }
35 |
36 |
37 | task "grafana" {
38 |
39 | env {
40 | GF_PATHS_CONFIG = "/local/grafana.ini"
41 | }
42 |
43 | driver = "docker"
44 | config {
45 | image = "grafana/grafana:latest"
46 | hostname = "${NOMAD_JOB_NAME}"
47 | ports = ["http"]
48 | volumes = ["${meta.nfsStorageRoot}/pi-cluster/grafana:/var/lib/grafana"]
49 | } // docker config
50 |
51 | template {
52 | destination = "local/grafana.ini"
53 | data = <