├── .gitattributes
├── .gitea
└── workflows
│ └── ci.yml
├── .gitignore
├── .yamllint.yml
├── README.md
├── ansible
├── .ansible-lint
├── ansible.cfg
├── dev-requirements.txt
├── files
│ └── nginx-docker.conf
├── galaxy-requirements.yml
├── group_vars
│ └── all
│ │ ├── base.yml
│ │ ├── certbot.yml
│ │ ├── directories.yml
│ │ ├── docker.yml
│ │ ├── me.yml
│ │ ├── network.yml
│ │ ├── node_exporter.yml
│ │ ├── pve.yml
│ │ ├── tailscale.yml
│ │ ├── vault.yml
│ │ ├── vps-hosts.yml
│ │ └── wireguard.yml
├── host_vars
│ ├── casey
│ │ ├── main.yml
│ │ └── vault.yml
│ ├── forrest
│ │ ├── main.yml
│ │ └── vault.yml
│ ├── ingress.yml
│ ├── pve-docker
│ │ ├── main.yml
│ │ └── vault.yml
│ ├── pve
│ │ ├── main.yml
│ │ └── vault.yml
│ ├── qbittorrent.yml
│ ├── restic
│ │ ├── main.yml
│ │ └── vault.yml
│ ├── tang
│ │ ├── main.yml
│ │ └── vault.yml
│ └── walker
│ │ ├── main.yml
│ │ └── vault.yml
├── hosts
├── main.yml
├── roles
│ ├── adguardhome
│ │ ├── files
│ │ │ ├── Corefile
│ │ │ └── resolved-adguardhome.conf
│ │ ├── handlers
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── baby_buddy
│ │ ├── files
│ │ │ └── docker-compose.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── vars
│ │ │ └── vault.yml
│ ├── base
│ │ ├── defaults
│ │ │ └── main.yml
│ │ ├── files
│ │ │ ├── fail2ban-logrotate
│ │ │ ├── ssh-jail.conf
│ │ │ ├── ssh-keys
│ │ │ │ ├── mobile.pub
│ │ │ │ └── ps.pub
│ │ │ └── sshd_config
│ │ └── tasks
│ │ │ ├── fail2ban.yml
│ │ │ ├── logrotate.yml
│ │ │ ├── main.yml
│ │ │ ├── packages.yml
│ │ │ ├── ssh.yml
│ │ │ └── user.yml
│ ├── bsky
│ │ ├── files
│ │ │ ├── docker-compose.yml
│ │ │ └── pds.env
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── vars
│ │ │ └── vault.yml
│ ├── comentario
│ │ ├── files
│ │ │ └── docker-compose.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── vars
│ │ │ ├── main.yml
│ │ │ └── vault.yml
│ ├── coredns_docker_proxy
│ │ ├── files
│ │ │ ├── Corefile
│ │ │ └── docker-compose.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── db_auto_backup
│ │ ├── defaults
│ │ │ └── main.yml
│ │ ├── files
│ │ │ └── docker-compose.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── docker_cleanup
│ │ ├── defaults
│ │ │ └── main.yml
│ │ ├── files
│ │ │ ├── docker-utils
│ │ │ │ ├── ctop
│ │ │ │ ├── dc
│ │ │ │ ├── dc-all
│ │ │ │ ├── hard-restart-all
│ │ │ │ └── update-all
│ │ │ └── zfs-override.conf
│ │ └── tasks
│ │ │ ├── main.yml
│ │ │ └── zfs-override.yml
│ ├── forgejo
│ │ ├── files
│ │ │ ├── app.ini
│ │ │ ├── docker-compose.yml
│ │ │ └── footer.html
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── vars
│ │ │ └── vault.yml
│ ├── forgejo_runner
│ │ ├── files
│ │ │ ├── config.yml
│ │ │ └── docker-compose.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── freshrss
│ │ ├── files
│ │ │ └── docker-compose.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── vars
│ │ │ └── vault.yml
│ ├── gateway
│ │ ├── files
│ │ │ ├── nginx-cdn.conf
│ │ │ ├── nginx-fail2ban-filter.conf
│ │ │ ├── nginx-fail2ban-jail.conf
│ │ │ ├── nginx.conf
│ │ │ ├── wireguard-client.conf
│ │ │ └── wireguard-server.conf
│ │ └── tasks
│ │ │ ├── fail2ban.yml
│ │ │ ├── main.yml
│ │ │ ├── nginx.yml
│ │ │ └── wireguard.yml
│ ├── glinet_vpn
│ │ ├── files
│ │ │ ├── client.conf
│ │ │ └── server.conf
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── vars
│ │ │ ├── main.yml
│ │ │ └── vault.yml
│ ├── headscale
│ │ ├── files
│ │ │ ├── acls.json
│ │ │ ├── headscale.yml
│ │ │ └── nginx.conf
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── vars
│ │ │ └── vault.yml
│ ├── http_proxy
│ │ ├── files
│ │ │ └── squid.conf
│ │ ├── handlers
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── immich
│ │ ├── files
│ │ │ ├── docker-compose.yml
│ │ │ └── ipp-config.json
│ │ ├── handlers
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── ingress
│ │ ├── files
│ │ │ ├── nftables.conf
│ │ │ ├── nginx.conf
│ │ │ └── wireguard.conf
│ │ ├── handlers
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ ├── firewall.yml
│ │ │ ├── main.yml
│ │ │ ├── nginx.yml
│ │ │ └── wireguard.yml
│ ├── jellyfin
│ │ └── tasks
│ │ │ └── main.yml
│ ├── mastodon
│ │ ├── files
│ │ │ ├── docker-compose.yml
│ │ │ └── purge-media.sh
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── vars
│ │ │ └── vault.yml
│ ├── minio
│ │ ├── files
│ │ │ └── docker-compose.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── vars
│ │ │ └── vault.yml
│ ├── nginx
│ │ ├── defaults
│ │ │ └── main.yml
│ │ ├── files
│ │ │ ├── includes
│ │ │ │ ├── docker-resolver.conf
│ │ │ │ ├── proxy.conf
│ │ │ │ └── ssl.conf
│ │ │ ├── nginx-https-redirect.conf
│ │ │ └── nginx.conf
│ │ ├── handlers
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── ntfy
│ │ ├── files
│ │ │ └── docker-compose.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── vars
│ │ │ └── vault.yml
│ ├── paccache
│ │ ├── files
│ │ │ └── paccache.hook
│ │ └── tasks
│ │ │ └── main.yml
│ ├── plausible
│ │ ├── files
│ │ │ ├── clickhouse-config.xml
│ │ │ ├── clickhouse-user-config.xml
│ │ │ └── docker-compose.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── vars
│ │ │ └── vault.yml
│ ├── pocket_id
│ │ ├── files
│ │ │ └── docker-compose.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── vars
│ │ │ └── vault.yml
│ ├── privatebin
│ │ ├── files
│ │ │ ├── config.ini
│ │ │ └── docker-compose.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── prometheus
│ │ ├── files
│ │ │ ├── grafana
│ │ │ │ └── docker-compose.yml
│ │ │ └── prometheus
│ │ │ │ ├── alert-rules.d
│ │ │ │ └── blackbox.yml
│ │ │ │ ├── alertmanager.yml
│ │ │ │ ├── blackbox.yml
│ │ │ │ ├── docker-compose.yml
│ │ │ │ └── prometheus.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ ├── grafana.yml
│ │ │ ├── main.yml
│ │ │ └── prometheus.yml
│ │ └── vars
│ │ │ └── vault.yml
│ ├── pve_docker
│ │ ├── files
│ │ │ ├── calibre
│ │ │ │ └── docker-compose.yml
│ │ │ ├── librespeed
│ │ │ │ └── docker-compose.yml
│ │ │ ├── nextcloud
│ │ │ │ ├── config.php
│ │ │ │ ├── docker-compose.yml
│ │ │ │ └── occ
│ │ │ ├── quassel
│ │ │ │ └── docker-compose.yml
│ │ │ ├── synapse
│ │ │ │ ├── docker-compose.yml
│ │ │ │ └── homeserver.yml
│ │ │ ├── wallabag
│ │ │ │ └── docker-compose.yml
│ │ │ └── whoami
│ │ │ │ └── docker-compose.yml
│ │ ├── tasks
│ │ │ ├── calibre.yml
│ │ │ ├── librespeed.yml
│ │ │ ├── main.yml
│ │ │ ├── nextcloud.yml
│ │ │ ├── quassel.yml
│ │ │ ├── synapse.yml
│ │ │ ├── wallabag.yml
│ │ │ └── whoami.yml
│ │ └── vars
│ │ │ ├── librespeed.yml
│ │ │ ├── nextcloud.yml
│ │ │ ├── synapse.yml
│ │ │ └── wallabag.yml
│ ├── pve_tailscale_route
│ │ └── tasks
│ │ │ └── main.yml
│ ├── qbittorrent
│ │ ├── files
│ │ │ └── nginx.conf
│ │ ├── handlers
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ ├── main.yml
│ │ │ ├── nginx.yml
│ │ │ └── qbittorrent.yml
│ ├── renovate
│ │ ├── files
│ │ │ ├── config.js
│ │ │ ├── docker-compose.yml
│ │ │ └── entrypoint.sh
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── vars
│ │ │ └── vault.yml
│ ├── restic
│ │ ├── defaults
│ │ │ └── main.yml
│ │ ├── files
│ │ │ ├── backrest.sh
│ │ │ ├── restic-backup.sh
│ │ │ ├── restic-post.hook
│ │ │ └── restic-post.sh
│ │ ├── tasks
│ │ │ ├── homeassistant.yml
│ │ │ └── main.yml
│ │ └── vars
│ │ │ └── main.yml
│ ├── s3_sync
│ │ ├── files
│ │ │ └── rclone.conf
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── vars
│ │ │ └── vault.yml
│ ├── slides
│ │ ├── files
│ │ │ └── docker-compose.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── vars
│ │ │ └── vault.yml
│ ├── tandoor
│ │ ├── files
│ │ │ └── docker-compose.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── vars
│ │ │ └── vault.yml
│ ├── traefik
│ │ ├── defaults
│ │ │ └── main.yml
│ │ ├── files
│ │ │ ├── docker-compose.yml
│ │ │ ├── file-provider-grafana.yml
│ │ │ ├── file-provider-homeassistant.yml
│ │ │ ├── file-provider-jellyfin.yml
│ │ │ ├── file-provider-main.yml
│ │ │ ├── file-provider-uptime-kuma.yml
│ │ │ └── traefik.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── vars
│ │ │ └── vault.yml
│ ├── uptime_kuma
│ │ ├── files
│ │ │ └── docker-compose.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ └── main.yml
│ ├── vaultwarden
│ │ ├── files
│ │ │ └── docker-compose.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── vars
│ │ │ └── main.yml
│ ├── vikunja
│ │ ├── files
│ │ │ └── docker-compose.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── vars
│ │ │ └── vault.yml
│ ├── website
│ │ ├── files
│ │ │ └── docker-compose.yml
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── vars
│ │ │ └── vault.yml
│ ├── yourls
│ │ ├── files
│ │ │ ├── docker-compose.yml
│ │ │ └── index.html
│ │ ├── handlers
│ │ │ └── main.yml
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── vars
│ │ │ └── main.yml
│ └── zfs
│ │ ├── defaults
│ │ └── main.yml
│ │ ├── files
│ │ ├── sanoid.conf
│ │ └── zfs-modprobe.conf
│ │ ├── tasks
│ │ ├── main.yml
│ │ └── sanoid.yml
│ │ └── vars
│ │ └── main.yml
└── vault-pass.sh
├── justfile
├── renovate.json
└── terraform
├── .terraform.lock.hcl
├── 0rng.one.tf
├── backblaze.tf
├── backends.tf
├── casey_vps.tf
├── context.tf
├── hetzner_firewall.tf
├── jakehoward.tech.tf
├── providers.tf
├── rclone.tf
├── state.tf
├── sys_domains.tf
├── terraform.tf
├── theorangeone.net.tf
├── variables.tf
└── walker_vps.tf
/.gitattributes:
--------------------------------------------------------------------------------
1 | *.yml linguist-detectable
2 | vault.yml linguist-generated
3 |
--------------------------------------------------------------------------------
/.gitea/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | on:
2 | push:
3 |
4 | jobs:
5 | terraform:
6 | runs-on: ubuntu-latest
7 | steps:
8 | - uses: actions/checkout@v4
9 | - name: Setup Terraform
10 | uses: hashicorp/setup-terraform@v3
11 | - uses: taiki-e/install-action@just
12 | - name: Init
13 | run: just terraform init -backend=false
14 | - name: Lint
15 | run: just terraform-lint
16 |
17 | ansible:
18 | runs-on: ubuntu-latest
19 | steps:
20 | - uses: actions/checkout@v4
21 | - name: Set up Python
22 | uses: actions/setup-python@v5
23 | with:
24 | python-version: 3.11
25 | - uses: taiki-e/install-action@just
26 |
27 | # HACK: https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir
28 | - name: Fix permissions
29 | run: chmod 0755 ansible/
30 |
31 | - name: Set up
32 | run: just ansible-setup
33 | - name: Lint
34 | run: just ansible-lint
35 |
--------------------------------------------------------------------------------
/.yamllint.yml:
--------------------------------------------------------------------------------
1 | extends: default
2 |
3 | ignore: |
4 | ansible/galaxy_roles
5 | ansible/galaxy_collections
6 | ansible/group_vars/all/vps-hosts.yml
7 | ansible/roles/traefik/files/traefik.yml
8 | ansible/roles/forgejo_runner/files/config.yml
9 | env
10 |
11 | rules:
12 | document-start: disable
13 | truthy: disable
14 | quoted-strings:
15 | quote-type: double
16 | required: only-when-needed
17 | line-length:
18 | max: 160
19 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Infrastructure
2 |
3 | ## Requirements
4 |
5 | - Python 3
6 | - Locally configured SSH config (ideally deployed through [dotfiles](https://github.com/realorangeone/dotfiles))
7 | - `ansible` installed on the system
8 | - [`just`](https://github.com/casey/just)
9 |
10 | ## Installation
11 |
12 | - `just setup`
13 | - `just terraform init`
14 |
15 | ### Private Settings
16 |
17 | Ansible [integrates](https://theorangeone.net/posts/ansible-vault-bitwarden/) with Bitwarden through its [CLI](https://bitwarden.com/help/article/cli/).
18 |
19 | Terraform secrets are stored in `terraform/.env`, and provisioned using `just update-secrets`.
20 |
21 | ## Deploying
22 |
23 | - `just ansible-deploy`
24 | - `juts terraform apply`
25 |
26 | ## External configuration
27 |
28 | This repository contains most of my infrastructure configuration, but not everything is configured here. Some things are external, for various reasons.
29 |
--------------------------------------------------------------------------------
/ansible/.ansible-lint:
--------------------------------------------------------------------------------
1 | skip_list:
2 | - command-instead-of-shell
3 | - no-handler
4 | - git-latest
5 | - fqcn
6 | - name[casing]
7 | - name[play]
8 | - no-changed-when
9 | - var-naming[no-role-prefix]
10 |
11 | exclude_paths:
12 | - galaxy_roles/
13 | - galaxy_collections/
14 | - ~/.ansible
15 | - roles/traefik/files/traefik.yml
16 | - roles/forgejo_runner/files/config.yml
17 |
--------------------------------------------------------------------------------
/ansible/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | nocows = 1
3 | host_key_checking = False
4 | retry_files_enabled = False
5 | roles_path = $PWD/galaxy_roles:$PWD/roles
6 | collections_path = $PWD/galaxy_collections
7 | inventory = ./hosts
8 | interpreter_python = auto_silent
9 |
10 | [privilege_escalation]
11 | become = True
12 | become_ask_pass = True
13 |
14 | [ssh_connection]
15 | pipelining = True
16 |
--------------------------------------------------------------------------------
/ansible/dev-requirements.txt:
--------------------------------------------------------------------------------
1 | ansible-lint==25.1.1
2 | yamllint==1.35.1
3 | ansible
4 | passlib
5 |
--------------------------------------------------------------------------------
/ansible/files/nginx-docker.conf:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 |
3 | server {
4 | listen 443 ssl http2;
5 | listen [::]:443 ssl http2;
6 |
7 | server_name {{ server_name }};
8 | set $upstream {{ upstream }};
9 |
10 | access_log /var/log/nginx/{{ server_name|split|first }}.log main;
11 |
12 | ssl_certificate {{ ssl_cert_path }}/fullchain.pem;
13 | ssl_certificate_key {{ ssl_cert_path }}/privkey.pem;
14 | ssl_trusted_certificate {{ ssl_cert_path }}/chain.pem;
15 | include includes/ssl.conf;
16 |
17 | include includes/docker-resolver.conf;
18 |
19 | location / {
20 | proxy_pass http://$upstream;
21 |
22 | {%- if location_extra is defined +%}
23 | {{ location_extra }}
24 | {%- endif +%}
25 | }
26 | }
27 |
--------------------------------------------------------------------------------
/ansible/galaxy-requirements.yml:
--------------------------------------------------------------------------------
1 | collections:
2 | - ansible.posix
3 | - community.general
4 | - community.docker
5 | - kewlfft.aur
6 | - name: https://github.com/prometheus-community/ansible
7 | type: git
8 |
9 | roles:
10 | - src: geerlingguy.docker
11 | version: 7.4.3
12 | - src: geerlingguy.ntp
13 | version: 2.7.0
14 | - src: realorangeone.reflector
15 | - src: ironicbadger.proxmox_nag_removal
16 | version: 1.0.2
17 | - src: ironicbadger.snapraid
18 | version: 1.0.0
19 | - src: geerlingguy.certbot
20 | version: 5.2.1
21 | - src: artis3n.tailscale
22 | version: v4.5.0
23 |
--------------------------------------------------------------------------------
/ansible/group_vars/all/base.yml:
--------------------------------------------------------------------------------
1 | timezone: Europe/London # noqa var-naming
2 |
3 | # HACK: Some of the hostnames aren't valid dict keys
4 | hostname_slug: "{{ ansible_hostname | replace('-', '_') }}"
5 |
--------------------------------------------------------------------------------
/ansible/group_vars/all/certbot.yml:
--------------------------------------------------------------------------------
1 | certbot_install_method: package
2 | certbot_auto_renew: true
3 | certbot_auto_renew_user: root
4 | certbot_auto_renew_hour: 23
5 | certbot_auto_renew_minute: 30
6 | certbot_auto_renew_options: --quiet --post-hook "systemctl reload nginx"
7 | certbot_admin_email: "{{ vault_certbot_admin_email }}"
8 |
9 | certbot_create_method: webroot
10 |
11 | certbot_webroot: /var/www/certbot-webroot
12 |
13 | certbot_create_if_missing: true
14 |
--------------------------------------------------------------------------------
/ansible/group_vars/all/directories.yml:
--------------------------------------------------------------------------------
1 | app_data_dir: /mnt/tank/app-data
2 |
--------------------------------------------------------------------------------
/ansible/group_vars/all/docker.yml:
--------------------------------------------------------------------------------
1 | docker_user:
2 | id: 3000
3 | name: dockeruser
4 |
5 | docker_users:
6 | - "{{ me.user }}"
7 |
8 | docker_compose_file_mask: "664"
9 | docker_compose_directory_mask: "775"
10 |
11 | # HACK: Use compose-switch as the install for compose, so the commands still work.
12 | # Run this task manually, as version comparisons usually fail
13 | docker_compose_url: https://github.com/docker/compose-switch/releases/latest/download/docker-compose-linux-{{ docker_apt_arch }}
14 | docker_install_compose: false
15 |
16 | docker_install_compose_plugin: "{{ ansible_os_family == 'Debian' }}"
17 |
18 | docker_update_command: docker-compose pull && docker-compose down --remove-orphans && docker-compose rm && docker-compose up -d
19 |
--------------------------------------------------------------------------------
/ansible/group_vars/all/me.yml:
--------------------------------------------------------------------------------
1 | me:
2 | user: jake
3 | home: /home/jake
4 | name: Jake Howard
5 |
--------------------------------------------------------------------------------
/ansible/group_vars/all/network.yml:
--------------------------------------------------------------------------------
1 | ssh_port: 7743
2 |
--------------------------------------------------------------------------------
/ansible/group_vars/all/node_exporter.yml:
--------------------------------------------------------------------------------
1 | node_exporter_version: 1.5.0
2 | node_exporter_web_listen_address: "{{ private_ip }}:9100"
3 | node_exporter_enabled_collectors: [] # Disable the systemd collector by default
4 |
--------------------------------------------------------------------------------
/ansible/group_vars/all/pve.yml:
--------------------------------------------------------------------------------
1 | pve_hosts:
2 | internal_cidr: 10.23.1.0/24
3 | internal_cidr_ipv6: fde3:15e9:e883::1/48
4 | pve:
5 | ip: 10.23.1.1
6 | external_ip: 192.168.2.200
7 | pve_restic:
8 | ip: 10.23.1.11
9 | forrest:
10 | ip: 10.23.1.13
11 | ipv6: fde3:15e9:e883::103
12 | jellyfin:
13 | ip: 10.23.1.101
14 | docker:
15 | ip: 10.23.1.103
16 | ipv6: fde3:15e9:e883::203
17 | ingress:
18 | ip: 10.23.1.10
19 | external_ip: 192.168.2.201
20 | external_ipv6: "{{ vault_ingress_ipv6 }}"
21 | ipv6: fde3:15e9:e883::100
22 | homeassistant:
23 | ip: 192.168.2.203
24 | qbittorrent:
25 | ip: 10.23.1.105
26 | renovate:
27 | ip: 10.23.1.110
28 | gitea_runner:
29 | ip: 10.23.1.114
30 |
--------------------------------------------------------------------------------
/ansible/group_vars/all/tailscale.yml:
--------------------------------------------------------------------------------
1 | # Just install for now, don't configure
2 | tailscale_up_skip: true
3 |
4 | tailscale_cidr: 100.64.0.0/24 # It's really /10, but I don't use that many IPs
5 | tailscale_cidr_ipv6: fd7a:115c:a1e0::/120 # It's really /48, but I don't use that many IPs
6 |
7 | tailscale_port: 41641
8 |
9 | tailscale_nodes:
10 | casey:
11 | ip: 100.64.0.6
12 |
--------------------------------------------------------------------------------
/ansible/group_vars/all/vps-hosts.yml:
--------------------------------------------------------------------------------
1 | "vps_hosts":
2 | "casey_ip": "213.219.38.11"
3 | "private_ipv6_marker": "2a01:7e00:e000:7f7::1"
4 | "private_ipv6_range": "2a01:7e00:e000:7f7::1/128"
5 | "walker_ip": "162.55.181.67"
6 |
--------------------------------------------------------------------------------
/ansible/group_vars/all/wireguard.yml:
--------------------------------------------------------------------------------
1 | wireguard:
2 | public_ip: "{{ vps_hosts.casey_ip }}"
3 | port: 51820
4 | cidr: 10.23.0.0/24
5 | server:
6 | ip: 10.23.0.1
7 | public_key: "{{ vault_wireguard_server_public_key }}"
8 | private_key: "{{ vault_wireguard_server_private_key }}"
9 | clients:
10 | bartowski:
11 | ip: 10.23.0.10
12 | public_key: "{{ vault_wireguard_bartowski_public_key }}"
13 | private_key: "{{ vault_wireguard_bartowski_private_key }}"
14 | op7:
15 | ip: 10.23.0.11
16 | public_key: "{{ vault_wireguard_op7_public_key }}"
17 | private_key: "{{ vault_wireguard_op7_private_key }}"
18 | ingress:
19 | ip: 10.23.0.2
20 | public_key: "{{ vault_wireguard_ingress_public_key }}"
21 | private_key: "{{ vault_wireguard_ingress_private_key }}"
22 |
--------------------------------------------------------------------------------
/ansible/host_vars/casey/main.yml:
--------------------------------------------------------------------------------
1 | private_ip: "{{ ansible_tailscale0.ipv4.address }}"
2 | nginx_https_redirect: true
3 |
4 | certbot_certs:
5 | - domains:
6 | - headscale.jakehoward.tech
7 | - domains:
8 | - whoami-cdn.theorangeone.net
9 |
10 | cdn_domains:
11 | - whoami-cdn.theorangeone.net
12 |
13 | restic_backup_locations:
14 | - /var/lib/headscale/
15 |
--------------------------------------------------------------------------------
/ansible/host_vars/casey/vault.yml:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 65613137336266343033333338323734396266363431356166316233646663383039336634343936
3 | 3939353039396237396432336361653838323161356330360a393962313733363734323666666361
4 | 34303239633739383432323337356535613636376466323931323237626264333534626566386630
5 | 3839613338316530360a396364363163623633333362636238316463313732613562386161306661
6 | 38396361393837613137633830636333653565323331643937323863383963383739623235656636
7 | 30393033393031393733653335633462383062613039613332653466313439366161303533366264
8 | 39626132643534366639623230383233353332363836356132363130306637653465633663333665
9 | 34656636316439626230663037656130346635636232336561346361396166643465313565363963
10 | 32303962386635653264306530653132353238356336656634363136323564313261336638376136
11 | 63306333303763633362663238396434663066386235666163383135353232633236623832356439
12 | 62613664663164363838303531326363623465343036656530663562323231613737383464303664
13 | 35646137373233643966323363623961393361316463313464666261653636623937646464613133
14 | 39363863643835316330626435343166363931613430303966383263663639646463616133363463
15 | 33346665616263666635306162383333313063636364623838306462303438373832333965633236
16 | 61346161376161353736633332386538643839333261646432323466653962653964643438323130
17 | 64663133346564336334653430616363643662313732356634353764613466346638353833316332
18 | 31323364356265313263383138626234343239383063373066613666663330653431346630393937
19 | 34636464383766623662623136636363316530643534306366616333396465636264616531363863
20 | 33616237386132373034346132333766343030313039336531613837366265346539366264303634
21 | 65383731656130373464
22 |
--------------------------------------------------------------------------------
/ansible/host_vars/forrest/main.yml:
--------------------------------------------------------------------------------
1 | db_backups_dir: /mnt/tank/files/db-backups
2 |
--------------------------------------------------------------------------------
/ansible/host_vars/forrest/vault.yml:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 36376462326539663933303664633661303163333865656435356465373264626366336137303563
3 | 6239643535636538636434313739303030333162613635610a643831613934643631306232613130
4 | 65386166663136646161643133643238643033363533616664653565313463396138663839353131
5 | 3637333263663333610a653361336264313835383239396662626462353239616165626134666663
6 | 36386234633039653431343564653463376561306430663939663338646665616532393364363363
7 | 38613034393265376133366232386662373634623662613762653439633931323634613838656262
8 | 30623763366362653834636161646339393933346134613132623365656363373165323633663432
9 | 37636538383734646363
10 |
--------------------------------------------------------------------------------
/ansible/host_vars/ingress.yml:
--------------------------------------------------------------------------------
1 | private_ip: "{{ ansible_tailscale0.ipv4.address }}"
2 | nginx_https_redirect: true
3 |
--------------------------------------------------------------------------------
/ansible/host_vars/pve-docker/main.yml:
--------------------------------------------------------------------------------
1 | private_ip: "{{ pve_hosts.docker.ip }}"
2 |
3 | traefik_provider_jellyfin: true
4 | traefik_provider_homeassistant: true
5 | traefik_provider_grafana: true
6 | traefik_provider_uptime_kuma: true
7 |
8 | db_backups_dir: /mnt/tank/files/db-backups
9 |
--------------------------------------------------------------------------------
/ansible/host_vars/pve-docker/vault.yml:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 35383562343262633962376665646331613539666465663661376361306439366662646439376561
3 | 6139303637323938303537313331353937636631396537630a626362383465336661636431373163
4 | 36666665373636353263636366303064386262653038396338396532376363616236623430363431
5 | 3965653231323338360a396635666137343865373063376639333735323434346136663636396533
6 | 65616465633839663335666236383039356334353561343830363264353532326530326565323339
7 | 61643637663966626264626166663639666465383063333266353064396565653564623735663939
8 | 35646461393163633639326563353835313762353166346237383430336632353761623438353930
9 | 61333536343662396331
10 |
--------------------------------------------------------------------------------
/ansible/host_vars/pve/main.yml:
--------------------------------------------------------------------------------
1 | private_ip: "{{ pve_hosts.pve.ip }}"
2 |
3 | zpools_to_scrub:
4 | - tank
5 | - rpool
6 | - speed
7 |
8 | # 5GB, or so
9 | zfs_arc_size: 5000000000
10 |
11 | sanoid_datasets:
12 | tank:
13 | use_template: production
14 | recursive: true
15 | process_children_only: true
16 |
17 | speed:
18 | use_template: production
19 | recursive: true
20 | process_children_only: true
21 |
22 | rpool:
23 | use_template: production
24 | recursive: true
25 |
26 | sanoid_templates:
27 | production:
28 | frequently: 4
29 | hourly: 48
30 | daily: 28
31 | monthly: 3
32 | yearly: 0
33 | autosnap: true
34 | autoprune: true
35 |
36 | replaceable:
37 | frequently: 0
38 | hourly: 24
39 | daily: 7
40 | monthly: 0
41 | yearly: 0
42 | autosnap: true
43 | autoprune: true
44 |
45 |
46 | # Snapraid
47 | snapraid_install: false
48 | snapraid_runner: false
49 |
50 | snapraid_data_disks:
51 | - path: /mnt/bulk
52 | content: true
53 | snapraid_parity_disks:
54 | - path: /mnt/parity
55 | content: true
56 |
57 | snapraid_content_files:
58 | - /var/snapraid.content
59 | - /mnt/tank/files/snapraid.content
60 |
61 | snapraid_config_excludes:
62 | - "*.unrecoverable"
63 | - /lost+found/
64 | - "*.!sync"
65 | - /tmp/
66 |
67 | snapraid_scrub_schedule:
68 | hour: 2
69 | weekday: 4
70 |
71 | snapraid_scrub_healthcheck_io_uuid: "{{ vault_snapraid_scrub_healthcheck_io_uuid }}"
72 | snapraid_sync_healthcheck_io_uuid: "{{ vault_snapraid_sync_healthcheck_io_uuid }}"
73 |
--------------------------------------------------------------------------------
/ansible/host_vars/pve/vault.yml:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 35373139393931313861616335663835396132626632363635316430306539666631393230323539
3 | 3830333131633532343962376562663463656235333137340a343536626237306465646661656566
4 | 32346535633838386137383238336130663639633266366137353739633062313730333963626462
5 | 3436633035396461630a313433343330303434396665313536656462306166623636633731353937
6 | 33366265383932343231386438633432623263316363623032356662393538346234326238333130
7 | 64326434393165653134386631636165303836323763636532303562326238366638333063636135
8 | 33303866383934393961363933316433623637656264333531623034383337343231323361383363
9 | 63623264626537363832623662313533326230326665363161643931306338363831343566353839
10 | 39363562366430383461396232653531626131386234643731643463616563363334636365353934
11 | 66643561326566613364653363313763356662623066326232653938373135313561386636313264
12 | 31633938363863633866336435396239346266343662356231376161363763666332306330393337
13 | 64373933396136386366
14 |
--------------------------------------------------------------------------------
/ansible/host_vars/qbittorrent.yml:
--------------------------------------------------------------------------------
1 | private_ip: "{{ pve_hosts.qbittorrent.ip }}"
2 |
--------------------------------------------------------------------------------
/ansible/host_vars/restic/main.yml:
--------------------------------------------------------------------------------
1 | restic_backup_locations:
2 | - /mnt/host/mnt/tank
3 | - /mnt/host/mnt/speed
4 | - /mnt/host/etc/pve
5 | - /mnt/home-assistant
6 | - /home/rclone/sync
7 |
8 | restic_dir: ""
9 |
--------------------------------------------------------------------------------
/ansible/host_vars/restic/vault.yml:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 37616635326362383437633735343430663563653561636338646666323631333135313465623933
3 | 6363373730633062343966663735376666623439633139650a323537313831386537383133336461
4 | 63353034663931363663383766653465386335383238306636666531353062316263356362386230
5 | 3330356164373731390a363439656564666364323530363464623736313165353465346163623037
6 | 62383238386330623662343835306563353831396666643862653965323438373332363364383333
7 | 35343230396564343161393963666438613865316137356139393361636661373335303735323664
8 | 36383632643534623237353562386638336533626362636363396635393533656631326337383465
9 | 32633239643464353465626165393261323033623062313930353764386465623332613534613636
10 | 39613563623135306232356235613862353437393062646464633732383735343362316462326561
11 | 34346262656461643237353366303138653764363337343439656330393833333233386436646661
12 | 62343631323035613132656665636661643162323632323363396362306266323631343161316230
13 | 34666363383861323231353734336165393335646537326162343430653337653739376232343033
14 | 63663731653836393232323731323965643262653836353565383261393539616536346237323166
15 | 36633339303038613635333537393933383732303332366366326666343066316337383535333566
16 | 65396636666238616339633839323763383732326364386138306439353030396561336262306632
17 | 32383934383463326532363235333062363631363131616466316638366631663930366461393564
18 | 31646330386161626463633931363439366433646439363035396364366332346339623661333562
19 | 37633136343838666338356533643230393331636136333931653937363731623434653364393464
20 | 31623937656231363262343366343565616134313466313835636139306164393638626263623833
21 | 62623564396232373565393131366366383335366631373031656235326365373137613031653665
22 | 35616265663064363832623132356365643065343830306539646635383737666231343830323261
23 | 66633032373737653966623930386661386634316339303762383431613332643134323731636563
24 | 34313832623430623964626139306535323139346162626332366438623630356639616630376230
25 | 34656138323234386238373036363335353430366139363964323437623833653361613333383537
26 | 6466
27 |
--------------------------------------------------------------------------------
/ansible/host_vars/tang/main.yml:
--------------------------------------------------------------------------------
1 | ssh_extra_allowed_users: jake
2 | private_ip: "{{ ansible_default_ipv4.address }}"
3 |
4 | restic_backup_locations:
5 | - /var/lib/private/adguardhome/
6 |
--------------------------------------------------------------------------------
/ansible/host_vars/tang/vault.yml:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 66303032306566656332616563633936393036326332646664366430383635363534363037303065
3 | 3164383833353062633336313163336364616230653338390a636234663832636666623864623464
4 | 65373739396235383536363631326333623533613064303961333637613664386161656432613638
5 | 3466623664326632310a363338363433323132626537396665333766366161393832663537623837
6 | 62626166353230626334633735323164316663353936303439656336653130613963666530356630
7 | 31346465663437663630663839613530323064383066323633363435616431346231396130383032
8 | 32623730376363353938663834346665333133666661303162323030623462633234363139626633
9 | 36623039363838646336623464313662333962326335653561383633306263366130366362626466
10 | 33633366653036363935316239396161323663393263323435313032363862326637663732663839
11 | 34663432663333666666303538623566633330313037623662616565373733636432373430333436
12 | 65386331623439313066613437396566643062333062666437363365363134626333353332393534
13 | 38343764383036343836346439363162363733646335616136616463396635323239636264303735
14 | 34393533643730343432316661633736653161396161343431623862353136313035353933666537
15 | 34346330663866323864666366363030613663643363346433303266643434643239643062303632
16 | 30306638303534633833626532653462663337376435626533316230323638653861393130343763
17 | 35376331333135343130303062333436643639353733653862383732363030396432386461346632
18 | 30653230616231666665383564346565656461613561666139393234626263656137343530353136
19 | 36376561333833633435353861336538636633633064633739313831366633633861303437306234
20 | 39353538323563396632353936316330643961636665356439376666346135323563663631653365
21 | 35633533643731373861
22 |
--------------------------------------------------------------------------------
/ansible/host_vars/walker/main.yml:
--------------------------------------------------------------------------------
1 | private_ip: "{{ ansible_tailscale0.ipv4.address }}"
2 |
3 | restic_backup_locations:
4 | - /opt
5 |
6 | nginx_https_redirect: true
7 |
8 | certbot_certs:
9 | - domains:
10 | - theorangeone.net
11 | - jakehoward.tech
12 | - domains:
13 | - plausible.theorangeone.net
14 | - elbisualp.theorangeone.net
15 | - domains:
16 | - slides.jakehoward.tech
17 | - domains:
18 | - comentario.theorangeone.net
19 |
--------------------------------------------------------------------------------
/ansible/host_vars/walker/vault.yml:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 61616639316338623739306163363831303664633965666134373335353038323065306538303465
3 | 3462346437303139393738613031363637383731333438630a393862353436376264386264626531
4 | 37326431643130373566633431313431653538636662623135386364373634373761303365646564
5 | 3735633436323231390a386661336431656266616136626261373132393862386163396336643366
6 | 31366463656363363363666438653762653332313336303561313961393236613065303966386535
7 | 34396239366138613330366361323562663132343762333536646131643466643533303163636139
8 | 34626135613731653033313236386162613037386464613531633063656564336566386461666639
9 | 65653635326232643937313465343763326464613231383932393262323062316136353538626464
10 | 31383361643164303330653531333439613665313136393833366334323931373963313033646163
11 | 37363231616232353565636634646235383564356461393831323430363965333265656166363265
12 | 62353130323939313931316430393636336634323930376337373130363362396561373835633731
13 | 30383633333864623336353937623438616562346361626333306162626331326635363365353939
14 | 32636637396461396662626633323034383034353630633565363439636261333063306638373063
15 | 63363932623635393465336132343337633765646339376638326635373930353734666461636538
16 | 39613538313462633836343664333034326436336139343865643135383736656132343866663263
17 | 64323562383963396237383537306261643331646533616233326435386164336237316462623438
18 | 30623662303835653039393739396535613264373031336637616165333837343939363564613339
19 | 37633831653361373038643438623732323535653230626364653936383736363364313632656538
20 | 63646361323733656362366433353136643038643039633231326638346636653861616437653562
21 | 65343237623039386339326564316333636362376266316661333632313034366565383139323564
22 | 30343531333038323438393461326335386439373365323031366562363966616437616265386234
23 | 33646562626564386639376130623366303063313739343435656434356230636630333834666433
24 | 35663035663137666537633335613737383563356266336433396531366166313435653934663433
25 | 63646162663563643962
26 |
--------------------------------------------------------------------------------
/ansible/hosts:
--------------------------------------------------------------------------------
1 | casey
2 | walker
3 |
4 | pve
5 | tang
6 |
7 | # PVE VMs
8 | ingress
9 | pve-docker
10 | jellyfin
11 | forrest
12 | qbittorrent
13 | restic
14 | renovate
15 | gitea-runner
16 |
--------------------------------------------------------------------------------
/ansible/roles/adguardhome/files/Corefile:
--------------------------------------------------------------------------------
1 | (alias) {
2 | errors
3 | cancel
4 | cache 600
5 |
6 | forward . tls://9.9.9.9 tls://149.112.112.112 tls://2620:fe::fe tls://2620:fe::9 {
7 | tls_servername dns.quad9.net
8 | health_check 15s
9 | }
10 |
11 | hosts {
12 | {{ pve_hosts.ingress.external_ip }} pve.sys.theorangeone.net
13 | {{ pve_hosts.ingress.external_ipv6 }} pve.sys.theorangeone.net
14 | fallthrough
15 | ttl 300
16 | }
17 |
18 | # HACK: Rewrite the CNAME to itself so it's reprocessed
19 | rewrite cname exact pve.sys.theorangeone.net. pve.sys.theorangeone.net.
20 | }
21 |
22 | theorangeone.net:53053 {
23 | import alias
24 | }
25 |
26 | jakehoward.tech:53053 {
27 | import alias
28 | }
29 |
30 | .:53053 {
31 | acl {
32 | block
33 | }
34 | }
35 |
--------------------------------------------------------------------------------
/ansible/roles/adguardhome/files/resolved-adguardhome.conf:
--------------------------------------------------------------------------------
1 | [Resolve]
2 | DNS=127.0.0.1
3 | DNSStubListener=no
4 |
--------------------------------------------------------------------------------
/ansible/roles/adguardhome/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart coredns
2 | service:
3 | name: coredns
4 | state: restarted
5 | enabled: true
6 |
7 | - name: restart systemd-resolved
8 | service:
9 | name: systemd-resolved
10 | state: restarted
11 | enabled: true
12 |
--------------------------------------------------------------------------------
/ansible/roles/adguardhome/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Install adguardhome
2 | package:
3 | name: adguardhome
4 |
5 | - name: Disable resolved stub
6 | template:
7 | src: files/resolved-adguardhome.conf
8 | dest: /etc/systemd/resolved.conf.d/adguardhome.conf
9 | owner: root
10 | mode: "0644"
11 | notify: restart systemd-resolved
12 |
13 | - name: Use resolved resolv.conf
14 | file:
15 | src: /run/systemd/resolve/resolv.conf
16 | dest: /etc/resolv.conf
17 | state: link
18 | notify: restart systemd-resolved
19 |
20 | - name: Install coredns
21 | kewlfft.aur.aur:
22 | name: coredns
23 |
24 | - name: Install coredns config file
25 | template:
26 | src: files/Corefile
27 | dest: /etc/coredns/Corefile
28 | owner: coredns
29 | mode: "0644"
30 | notify: restart coredns
31 |
--------------------------------------------------------------------------------
/ansible/roles/baby_buddy/files/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | baby-buddy:
3 | image: lscr.io/linuxserver/babybuddy:latest
4 | restart: unless-stopped
5 | environment:
6 | - PUID={{ docker_user.id }}
7 | - PGID={{ docker_user.id }}
8 | - TZ={{ timezone }}
9 | - DATABASE_URL=postgres://baby-buddy:baby-buddy@db/baby-buddy
10 | - ALLOWED_HOSTS=baby-buddy.jakehoward.tech
11 | - CSRF_COOKIE_SECURE=True
12 | - SECRET_KEY={{ vault_secret_key }}
13 | - SECURE_PROXY_SSL_HEADER=True
14 | - SESSION_COOKIE_SECURE=True
15 | labels:
16 | - traefik.enable=true
17 | - traefik.http.routers.baby-buddy.rule=Host(`baby-buddy.jakehoward.tech`)
18 | - traefik.http.routers.baby-buddy.middlewares=tailscale-only@file
19 | volumes:
20 | - "{{ app_data_dir }}/baby-buddy:/config"
21 | depends_on:
22 | - db
23 | networks:
24 | - default
25 | - traefik
26 |
27 | db:
28 | image: postgres:14-alpine
29 | restart: unless-stopped
30 | volumes:
31 | - /mnt/speed/dbs/postgres/baby-buddy:/var/lib/postgresql/data
32 | environment:
33 | - POSTGRES_PASSWORD=baby-buddy
34 | - POSTGRES_USER=baby-buddy
35 |
36 | networks:
37 | traefik:
38 | external: true
39 |
--------------------------------------------------------------------------------
/ansible/roles/baby_buddy/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart baby-buddy
2 | shell:
3 | chdir: /opt/baby-buddy
4 | cmd: "{{ docker_update_command }}"
5 |
--------------------------------------------------------------------------------
/ansible/roles/baby_buddy/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Include vault
2 | include_vars: vault.yml
3 |
4 | - name: Create install directory
5 | file:
6 | path: /opt/baby-buddy
7 | state: directory
8 | owner: "{{ docker_user.name }}"
9 | mode: "{{ docker_compose_directory_mask }}"
10 |
11 | - name: Install compose file
12 | template:
13 | src: files/docker-compose.yml
14 | dest: /opt/baby-buddy/docker-compose.yml
15 | mode: "{{ docker_compose_file_mask }}"
16 | owner: "{{ docker_user.name }}"
17 | validate: docker-compose -f %s config
18 | notify: restart baby-buddy
19 |
--------------------------------------------------------------------------------
/ansible/roles/baby_buddy/vars/vault.yml:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 31663462633839636531393633633938376534316230626362353733653862623964626232333265
3 | 3733313066313639363131353963373431363761383537300a613662393631623832613537363034
4 | 30623931653839636361646231386465383333343535646436656565663137303166366533353866
5 | 3634643437303034330a646236353831363638633835666239383430636532396466623461303535
6 | 31383238633430393935653366646666303066316232643733366264353034626461613038323834
7 | 35383961316663356136363562646636313133346438343965383931353336643434303938373766
8 | 303432363965616134613933643635626565
9 |
--------------------------------------------------------------------------------
/ansible/roles/base/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ssh_extra_allowed_users: ""
2 |
--------------------------------------------------------------------------------
/ansible/roles/base/files/fail2ban-logrotate:
--------------------------------------------------------------------------------
1 | /var/log/fail2ban.log {
2 | daily
3 | rotate 7
4 | missingok
5 | compress
6 | nodateext
7 | notifempty
8 | postrotate
9 | /usr/bin/fail2ban-client flushlogs 1>/dev/null || true
10 | endscript
11 | }
12 |
--------------------------------------------------------------------------------
/ansible/roles/base/files/ssh-jail.conf:
--------------------------------------------------------------------------------
1 | [sshd]
2 | enabled = true
3 | bantime = 600
4 | findtime = 30
5 | maxretry = 5
6 | port = {{ ssh_port }},ssh
7 | ignoreip = {{ wireguard.cidr }},{{ pve_hosts.internal_cidr }},{{ pve_hosts.internal_cidr_ipv6 }},{{ tailscale_cidr }}
8 |
--------------------------------------------------------------------------------
/ansible/roles/base/files/ssh-keys/mobile.pub:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAEAQCEi0j/PvJFyMx9S4zDX9Ornxyr89JrUmBZLXj38XNIUHyckmpSm/oSurmzQnDBAp16oiDw+U0zGkSB8BKDkhM2hqYOb8GUsuri85Hu/hvUyiFIEsSNNC0bcrImqHYPE0tiNoDbJ6hlz+eelIdqj5kmnGh8V91QIi9nQz2DkR2j8j8HVjXExI2w5c5p2yuqdNNU1p4BTkeKirLHrQIstiWlOpfbtPAh6Wp597NO1Rp9YuMp0/dpS0W4ebm0h5iYvVwXYcXShA1zINCrWYEAGFSeG2iDqBY9vYzOU5pZDkRn2Ewl/2+EQ34GDDbjCZ7mn7siJmN0M1oNpygphPxjAKR95Zidvsyvs9iX0ua+c35f4z9YsVizsIVbouj3LT4c43WwcS1XWPRCit5gFHbpUNRzs0ypwIUwR9AF3mCkYlqYmpSJyfLcdPZwZPhJYxd2MzhquQ+CS+eXfhYEiioD6KvNL8ehsuJmSQtPm4vTXuipfseOdh1GtakDH9wDRTs+THgoNPc9K2ozbo6bofMOvfO2ZvqYeC4Vb5mTeHkeKBB1XU8FCrKBJTCZZ67LSxCBM2liemEHaklbl4H50xGxOWbtL1ZMEd4gkKF8TduO9vEPge9AUbbtZSDrvXgi+QPUrzF2iuk85OKWZOcz0ObWOXaL0dMI87SZTguu0SR3VtCYGCeKqh7HFfA3AO7Bq1AmKZLnWXVqQHjUp56TgVugj86B80zE1AOrbDac1qScT8dG9KpM4nP+ewmIsrl/a16NaAfcit+UPeTSA8MNDAV0xUbAIyt7Mka7Qeuln+EHeZNenJ5VkpTYdyxwpq2FOXgrbfLz7y4pqBVF/lPDy/gyhFEIoFsqIp7si7ve1O8vZyRcsypT9j4HNSpy07+kG1V3BPnTBY3+YSq9lRTtVfAQQZdI8/a43cUzP4j8Czbri3fYvUsEb0ml9AViRZEZhGLcHETiOy50dJkHQm+OcGpIgA71zFp7qfVBX/OW7/+jeAboz8WHYSlHPbjpiD6d22rVW9svbOMli3tDe2bIZm9qeJRcU/0gCO7dKHSqkiJk0l27Io5wNRP9KY7OdOp0uC0RtgpSxFHS4U3fnOuRbKk5PxGSUNgCCn4iUcboFRu0gXaIRBXs4Lbw2ivwx492sk5XDax5YZ33dm6quqOh2n/f16X80/vKRrXxAuN3vbDXOTdVzHkrVF/vTSl8eHqunSidHpe5ID4d82qjyrHJL+0q870FacVOXRIxUsmOtG+f0RXcFENGPTdz4KjoBPcUv8u+z+01jgXOzbpFGXZ8PzqQVuRPlf874kR8SrYUY76LqdubDy9cROj47ThKwriuthYpDipFQNk7fz+qvh95WRjqt/fz Mobile
2 |
--------------------------------------------------------------------------------
/ansible/roles/base/files/ssh-keys/ps.pub:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCbd96S1+SBBdoGfWIGj+5Wa7B16iwUhmmMVz+7QLmYF7fgS98yBzBqd4pTVW0dDf6mmNWMpTo5eNGNys7t9roGTzeIIVA3XOnPgAR1WX03u5c3XnZp4ax2FNq3Q2nYvyu8XUqy81P1yR93fjs49tMs6OAeqWV08xMdE6Y21ewdRti3+zfjKN5RVwHzQa8l6P5tKqMi409KOma/FpepJyLlhdSh6UQBhy+wZHOIwMgRzv9fAV/R1+xsiUDyLZi3Q8yqrTTohARaDAc20yUKJC0x38wx1U4nKJR0O6fzn6aBpulKwAE/7qpp+oSzEYJES1ATaglrZ/M0h58euNDfNcxOl9XVAvG2ZJjlC9VwIu6R7YtpIFVRERKUKJbn+NnN2iheDjsIkm6mX3uvOMq6aCVIuBU6aDatTDXC3lXXzxBHOc5iU9FrvQe1olePNhhhd1kl7jy7eanOq9EqEvhFIpPGrVOPm37M4MY6bCoH7+YgWTgxAR1O7KYsKEaoJcVq5dJxC3Gsj49WdCw2OUguCZl/FPscRnHgCTNGPdimeXaxGnSdSw2LCxDGq90RquQAnLuFmiCp5M1ouI+234BpD6trE85sshnpWo5WW8jt5yvlYV3o4L4OtqWLhTh7O0ORUEwQbFmA1FXoWVwn5S0S+PzMOxaw1jv9OZAPESWw+Twtiw== jake@TOO-Portable
2 |
--------------------------------------------------------------------------------
/ansible/roles/base/files/sshd_config:
--------------------------------------------------------------------------------
1 | # TCP port to bind to
2 | # Change to a high/odd port if this server is exposed to the internet directly
3 | Port {{ ssh_port }}
4 |
5 | AllowUsers {% if hostname_slug in pve_hosts %}{{ me.user }}@{{ pve_hosts.internal_cidr }}{% endif %} {{ me.user }}@{{ tailscale_cidr }} {{ ssh_extra_allowed_users }}
6 |
7 | # Bind to all interfaces (change to specific interface if needed)
8 | ListenAddress 0.0.0.0
9 |
10 | # Force SSHv2 Protocol
11 | Protocol 2
12 |
13 | HostKey /etc/ssh/ssh_host_ed25519_key
14 |
15 | # Public key authentication + Password authentication
16 | # Two-Factor Authentication in OpenSSH v6.2+
17 | PubkeyAuthentication yes
18 | AuthenticationMethods publickey
19 |
20 | # Disable root SSH access
21 | PermitRootLogin no
22 |
23 | # Client timeout
24 | ClientAliveInterval 60
25 | ClientAliveCountMax 100
26 |
27 | # Compression (only after authentication)
28 | Compression delayed
29 |
30 | # Logging
31 | SyslogFacility AUTH
32 | LogLevel INFO
33 |
34 | # Authentication must happen within 30 seconds
35 | LoginGraceTime 30
36 |
37 | PermitEmptyPasswords no
38 |
39 | # Check user folder permissions before allowing access
40 | StrictModes yes
41 |
42 | # Message Authentication Code (Hash, only SHA2-512)
43 | # SHA-256 included for compat with PuTTY-WinCrypt clients
44 | MACs hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,umac-128-etm@openssh.com
45 |
46 | # Ciphers (only secure AES-256)
47 | Ciphers aes256-ctr,aes128-gcm@openssh.com,aes128-ctr,aes192-ctr,aes256-gcm@openssh.com,chacha20-poly1305@openssh.com
48 |
49 | # Key Exchange algorithms (Elliptic Curve Diffie-Hellman)
50 | # DH-SHA-256 included for compat with PuTTY-WinCrypt clients
51 | KexAlgorithms diffie-hellman-group18-sha512,curve25519-sha256,curve25519-sha256@libssh.org,diffie-hellman-group-exchange-sha256,diffie-hellman-group14-sha256,diffie-hellman-group16-sha512
52 |
53 | # Don't read the user's ~/.rhosts and ~/.shosts files
54 | IgnoreRhosts yes
55 |
56 | # Disable unused authentication schemes
57 | HostbasedAuthentication no
58 | ChallengeResponseAuthentication no
59 | KerberosAuthentication no
60 | GSSAPIAuthentication no
61 | UsePAM no
62 |
63 | # X11 support
64 | X11Forwarding no
65 |
66 | # Don't show Message of the Day
67 | PrintMotd yes
68 |
69 | # TCPKeepAlive (non-tunneled, disabled)
70 | TCPKeepAlive no
71 |
72 | # Allow client to pass locale environment variables
73 | AcceptEnv LANG LC_*
74 |
75 | Subsystem sftp internal-sftp
76 |
--------------------------------------------------------------------------------
/ansible/roles/base/tasks/fail2ban.yml:
--------------------------------------------------------------------------------
1 | - name: Install fail2ban
2 | package:
3 | name: fail2ban
4 |
5 | - name: Enable fail2ban
6 | service:
7 | name: fail2ban
8 | enabled: true
9 |
10 | - name: fail2ban SSH jail
11 | template:
12 | src: files/ssh-jail.conf
13 | dest: /etc/fail2ban/jail.d/ssh.conf
14 | mode: "0600"
15 | register: fail2ban_jail
16 |
17 | - name: Restart fail2ban
18 | service:
19 | name: fail2ban
20 | state: restarted
21 | when: fail2ban_jail.changed
22 |
--------------------------------------------------------------------------------
/ansible/roles/base/tasks/logrotate.yml:
--------------------------------------------------------------------------------
1 | - name: Install logrotate
2 | package:
3 | name: logrotate
4 |
5 | - name: Enable logrotate timer
6 | service:
7 | name: logrotate.timer
8 | enabled: true
9 | when: ansible_os_family == 'Archlinux'
10 |
11 | - name: logrotate fail2ban config
12 | template:
13 | src: files/fail2ban-logrotate
14 | dest: /etc/logrotate.d/fail2ban
15 | mode: "0600"
16 |
--------------------------------------------------------------------------------
/ansible/roles/base/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Packages
2 | include_tasks: packages.yml
3 |
4 | - name: User
5 | include_tasks: user.yml
6 |
7 | - name: SSH
8 | include_tasks: ssh.yml
9 |
10 | - name: fail2ban
11 | include_tasks: fail2ban.yml
12 |
13 | - name: logrotate
14 | include_tasks: logrotate.yml
15 |
--------------------------------------------------------------------------------
/ansible/roles/base/tasks/packages.yml:
--------------------------------------------------------------------------------
1 | - name: Install Base Packages
2 | package:
3 | name: "{{ item }}"
4 | loop:
5 | - htop
6 | - neofetch
7 | - net-tools
8 | - pv
9 | - speedtest-cli
10 | - sudo
11 | - vim
12 | - git
13 |
--------------------------------------------------------------------------------
/ansible/roles/base/tasks/ssh.yml:
--------------------------------------------------------------------------------
1 | - name: Install OpenSSH for Debian
2 | package:
3 | name: openssh-server
4 | when: ansible_os_family == 'Debian'
5 |
6 | - name: Install OpenSSH for Arch
7 | package:
8 | name: openssh
9 | when: ansible_os_family == 'Archlinux'
10 |
11 | - name: Define context
12 | set_fact:
13 | user: jake
14 | enable_root: false
15 |
16 | - name: SSH config
17 | template:
18 | src: files/sshd_config
19 | dest: /etc/ssh/sshd_config
20 | validate: /usr/sbin/sshd -t -f %s
21 | backup: true
22 | mode: "644"
23 | register: sshd_config
24 |
25 | - name: Set up authorized keys
26 | ansible.posix.authorized_key:
27 | user: "{{ me.user }}"
28 | state: present
29 | key: "{{ lookup('file', item) }}"
30 | loop:
31 | - ssh-keys/ps.pub
32 | - ssh-keys/mobile.pub
33 |
34 | - name: Enable SSH
35 | service:
36 | name: sshd
37 | enabled: true
38 |
39 | - name: Restart SSH Daemon
40 | service:
41 | name: sshd
42 | state: reloaded
43 | when: sshd_config.changed
44 |
--------------------------------------------------------------------------------
/ansible/roles/base/tasks/user.yml:
--------------------------------------------------------------------------------
1 | - name: Make me
2 | user:
3 | name: "{{ me.user }}"
4 | home: "{{ me.home }}"
5 | comment: "{{ me.name }}"
6 | shell: /bin/bash
7 | system: true
8 |
9 | - name: Give user sudo access
10 | user:
11 | name: "{{ me.user }}"
12 | groups: "{{ 'sudo' if ansible_os_family == 'Debian' else 'wheel' }}"
13 | append: true
14 |
--------------------------------------------------------------------------------
/ansible/roles/bsky/files/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | pds:
3 | image: ghcr.io/bluesky-social/pds:latest
4 | user: "{{ docker_user.id }}"
5 | restart: unless-stopped
6 | env_file:
7 | - /opt/bsky/pds.env
8 | labels:
9 | - traefik.enable=true
10 | - traefik.http.routers.bsky.rule=Host(`bsky.theorangeone.net`)
11 | volumes:
12 | - "{{ app_data_dir }}/bsky:/pds"
13 | networks:
14 | - default
15 | - traefik
16 |
17 | networks:
18 | traefik:
19 | external: true
20 |
--------------------------------------------------------------------------------
/ansible/roles/bsky/files/pds.env:
--------------------------------------------------------------------------------
1 | TZ={{ timezone }}
2 | PDS_HOSTNAME=bsky.theorangeone.net
3 | PDS_JWT_SECRET={{ vault_jwt_secret }}
4 | PDS_ADMIN_PASSWORD={{ vault_admin_password }}
5 | PDS_PLC_ROTATION_KEY_K256_PRIVATE_KEY_HEX={{ vault_plc_rotation_private_key }}
6 | PDS_DATA_DIRECTORY=/pds
7 | PDS_BLOBSTORE_DISK_LOCATION=/pds/blocks
8 | PDS_BLOB_UPLOAD_LIMIT=52428800
9 | PDS_DID_PLC_URL=https://plc.directory
10 | PDS_BSKY_APP_VIEW_URL=https://api.bsky.app
11 | PDS_BSKY_APP_VIEW_DID=did:web:api.bsky.app
12 | PDS_REPORT_SERVICE_URL=https://mod.bsky.app
13 | PDS_REPORT_SERVICE_DID=did:plc:ar7c4by46qjdydhdevvrndac
14 | PDS_CRAWLERS=https://bsky.network
15 | LOG_ENABLED=false
16 | PDS_EMAIL_SMTP_URL={{ vault_smtp_url }}
17 | PDS_EMAIL_FROM_ADDRESS={{ vault_smtp_from_address }}
18 |
--------------------------------------------------------------------------------
/ansible/roles/bsky/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart bsky
2 | shell:
3 | chdir: /opt/bsky
4 | cmd: "{{ docker_update_command }}"
5 |
--------------------------------------------------------------------------------
/ansible/roles/bsky/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Include vault
2 | include_vars: vault.yml
3 |
4 | - name: Create install directory
5 | file:
6 | path: /opt/bsky
7 | state: directory
8 | owner: "{{ docker_user.name }}"
9 | mode: "{{ docker_compose_directory_mask }}"
10 |
11 | - name: Install environment variables
12 | template:
13 | src: files/pds.env
14 | dest: /opt/bsky/pds.env
15 | mode: "660"
16 | owner: "{{ docker_user.name }}"
17 | notify: restart bsky
18 |
19 | - name: Install compose file
20 | template:
21 | src: files/docker-compose.yml
22 | dest: /opt/bsky/docker-compose.yml
23 | mode: "{{ docker_compose_file_mask }}"
24 | owner: "{{ docker_user.name }}"
25 | validate: docker-compose -f %s config
26 | notify: restart bsky
27 |
--------------------------------------------------------------------------------
/ansible/roles/bsky/vars/vault.yml:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 35316238376465633461333439343331636238346532623336316231653664653963643331346362
3 | 3763363363333066636166356465373233323138643961390a343232623866303961316431363534
4 | 31653234383465356637363636363838393130396364623261353266396533326563303838643366
5 | 6339666332326439610a666235636432616565643839663234336134343632316538353331396337
6 | 33303836373037336533623864613966646463333161663965653663326266376234633530393530
7 | 63303938376338613531623065316339653938666439643035663231646566643334356337343861
8 | 65353264613465626532643935313262323766666538386239613163366536636335616562613635
9 | 31643637333266373336323035366465636261346263666239323934616238616366383330336661
10 | 38386536326137363531636635626232333465613031633031336330316337303237303736656639
11 | 37313331346165363465326336663536646438363835393138646238353661303937346430303333
12 | 39663236663530396562626133666434396132356638643563626362636563373464356636313337
13 | 63303730656338313036313937323462326366366231363265363335636536396335323561663235
14 | 65333666333033376334303463376666373738376361316463343836323839383735666530656135
15 | 33316238356536663362646437633866323531353439393561626331326562663366663839393438
16 | 35653262653262326532386431373336393737363665393030363538356262346435343333373636
17 | 34343261623832306139623337353137646435613433346630643865333965303334393666336534
18 | 61353035373034323864356636643930333638396564616134353536663164363932643364656162
19 | 35366139363939663632353066373932363961656464393131373239356663303736653334336531
20 | 35303236303065363764313432643664333532343134393965323963636664663536376632323538
21 | 38356335383934636631643436356563636364646136333637666331363261656236346539373233
22 | 37306330306531623464663031626337346339613630363635633161336366653638626339356662
23 | 63383836613863646436346233376563353037373666313631393161333133633132666633663361
24 | 326132663033396335306165333862666433
25 |
--------------------------------------------------------------------------------
/ansible/roles/comentario/files/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | comentario:
3 | image: registry.gitlab.com/comentario/comentario:v3.13.1
4 | restart: unless-stopped
5 | user: "{{ docker_user.id }}:{{ docker_user.id }}"
6 | depends_on:
7 | - db
8 | networks:
9 | - default
10 | - coredns
11 | volumes:
12 | - ./secrets.yml:/comentario/secrets.yaml
13 | environment:
14 | - BASE_URL=https://comentario.theorangeone.net
15 | - NO_PAGE_VIEW_STATS=true
16 | - LOG_FULL_IPS=true
17 |
18 | db:
19 | image: postgres:14-alpine
20 | restart: unless-stopped
21 | volumes:
22 | - ./postgres:/var/lib/postgresql/data
23 | environment:
24 | - POSTGRES_PASSWORD=comentario
25 | - POSTGRES_USER=comentario
26 |
27 | networks:
28 | coredns:
29 | external: true
30 |
--------------------------------------------------------------------------------
/ansible/roles/comentario/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart comentario
2 | shell:
3 | chdir: /opt/comentario
4 | cmd: "{{ docker_update_command }}"
5 |
--------------------------------------------------------------------------------
/ansible/roles/comentario/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Include vault
2 | include_vars: vault.yml
3 |
4 | - name: Create install directory
5 | file:
6 | path: /opt/comentario
7 | state: directory
8 | owner: "{{ docker_user.name }}"
9 | mode: "{{ docker_compose_directory_mask }}"
10 |
11 | - name: Install compose file
12 | template:
13 | src: files/docker-compose.yml
14 | dest: /opt/comentario/docker-compose.yml
15 | mode: "{{ docker_compose_file_mask }}"
16 | owner: "{{ docker_user.name }}"
17 | validate: docker-compose -f %s config
18 | notify: restart comentario
19 |
20 | - name: Install secrets
21 | copy:
22 | content: "{{ comentario_secrets | to_nice_yaml }}"
23 | dest: /opt/comentario/secrets.yml
24 | mode: "600"
25 | owner: "{{ docker_user.name }}"
26 | notify: restart comentario
27 |
28 | - name: Install nginx config
29 | template:
30 | src: files/nginx-docker.conf
31 | dest: /etc/nginx/http.d/comentario.conf
32 | mode: "0644"
33 | notify: reload nginx
34 | vars:
35 | server_name: comentario.theorangeone.net
36 | upstream: comentario-comentario-1.docker:80
37 | ssl_cert_path: /etc/letsencrypt/live/comentario.theorangeone.net
38 |
--------------------------------------------------------------------------------
/ansible/roles/comentario/vars/main.yml:
--------------------------------------------------------------------------------
1 | comentario_secrets:
2 | postgres:
3 | host: db
4 | database: comentario
5 | username: comentario
6 | password: comentario
7 | idp:
8 | github:
9 | key: "{{ vault_comentario_github_client_id }}"
10 | secret: "{{ vault_comentario_github_client_secret }}"
11 | gitlab:
12 | key: "{{ vault_comentario_gitlab_application_id }}"
13 | secret: "{{ vault_comentario_gitlab_application_secret }}"
14 | twitter:
15 | key: "{{ vault_comentario_twitter_api_key }}"
16 | secret: "{{ vault_comentario_twitter_api_secret }}"
17 | smtpServer:
18 | host: smtp.eu.mailgun.org
19 | port: 587
20 | username: "{{ vault_comentario_smtp_username }}"
21 | password: "{{ vault_comentario_smtp_password }}"
22 |
--------------------------------------------------------------------------------
/ansible/roles/coredns_docker_proxy/files/Corefile:
--------------------------------------------------------------------------------
1 | . {
2 | errors
3 | cancel
4 |
5 | # Only allow requests to `.docker` records
6 | view docker {
7 | expr name() endsWith '.docker.'
8 | }
9 |
10 | # Strip the `.docker` suffix
11 | rewrite name suffix .docker . answer auto
12 |
13 | # Forward requests to Docker's DNS server
14 | forward . 127.0.0.11
15 | }
16 |
17 | . {
18 | acl {
19 | block
20 | }
21 | }
22 |
--------------------------------------------------------------------------------
/ansible/roles/coredns_docker_proxy/files/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | coredns:
3 | image: coredns/coredns:latest
4 | restart: unless-stopped
5 | volumes:
6 | - ./Corefile:/Corefile:ro
7 | ports:
8 | - "{{ private_ip }}:53053:53/udp"
9 | networks:
10 | - default
11 | - coredns
12 |
13 | networks:
14 | coredns:
15 | external: true
16 |
--------------------------------------------------------------------------------
/ansible/roles/coredns_docker_proxy/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart coredns
2 | shell:
3 | chdir: /opt/coredns
4 | cmd: "{{ docker_update_command }}"
5 |
--------------------------------------------------------------------------------
/ansible/roles/coredns_docker_proxy/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Create network
2 | docker_network:
3 | name: coredns
4 |
5 | - name: Create install directory
6 | file:
7 | path: /opt/coredns
8 | state: directory
9 | owner: "{{ docker_user.name }}"
10 | mode: "{{ docker_compose_directory_mask }}"
11 |
12 | - name: Install compose file
13 | template:
14 | src: files/docker-compose.yml
15 | dest: /opt/coredns/docker-compose.yml
16 | mode: "{{ docker_compose_file_mask }}"
17 | owner: "{{ docker_user.name }}"
18 | validate: docker-compose -f %s config
19 | notify: restart coredns
20 |
21 | - name: Install Corefile
22 | template:
23 | src: files/Corefile
24 | dest: /opt/coredns/Corefile
25 | mode: "{{ docker_compose_file_mask }}"
26 | owner: "{{ docker_user.name }}"
27 | notify: restart coredns
28 |
--------------------------------------------------------------------------------
/ansible/roles/db_auto_backup/defaults/main.yml:
--------------------------------------------------------------------------------
1 | db_backups_dir: ./backups
2 |
--------------------------------------------------------------------------------
/ansible/roles/db_auto_backup/files/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | backup:
3 | image: ghcr.io/realorangeone/db-auto-backup:latest
4 | restart: unless-stopped
5 | volumes:
6 | - "{{ db_backups_dir }}:/var/backups"
7 | environment:
8 | - DOCKER_HOST=tcp://docker_proxy:2375
9 | - HEALTHCHECKS_ID={{ vault_db_auto_backup_healthchecks_id }}
10 | depends_on:
11 | - docker_proxy
12 | networks:
13 | - default
14 | - backup_private
15 |
16 | docker_proxy:
17 | image: lscr.io/linuxserver/socket-proxy:latest
18 | restart: unless-stopped
19 | environment:
20 | - POST=1
21 | - CONTAINERS=1
22 | - IMAGES=1
23 | - EXEC=1
24 | volumes:
25 | - /var/run/docker.sock:/var/run/docker.sock:ro
26 | networks:
27 | - backup_private
28 | tmpfs:
29 | - /run
30 | logging:
31 | driver: none
32 |
33 | networks:
34 | backup_private:
35 | internal: true
36 |
--------------------------------------------------------------------------------
/ansible/roles/db_auto_backup/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart db-auto-backup
2 | shell:
3 | chdir: /opt/db-auto-backup
4 | cmd: "{{ docker_update_command }}"
5 |
--------------------------------------------------------------------------------
/ansible/roles/db_auto_backup/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Create install directory
2 | file:
3 | path: /opt/db-auto-backup
4 | state: directory
5 | owner: "{{ docker_user.name }}"
6 | mode: "{{ docker_compose_directory_mask }}"
7 |
8 | - name: Install compose file
9 | template:
10 | src: files/docker-compose.yml
11 | dest: /opt/db-auto-backup/docker-compose.yml
12 | mode: "{{ docker_compose_file_mask }}"
13 | owner: "{{ docker_user.name }}"
14 | validate: docker-compose -f %s config
15 | notify: restart db-auto-backup
16 |
--------------------------------------------------------------------------------
/ansible/roles/docker_cleanup/defaults/main.yml:
--------------------------------------------------------------------------------
1 | docker_zfs_override: false
2 |
--------------------------------------------------------------------------------
/ansible/roles/docker_cleanup/files/docker-utils/ctop:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -ex
4 |
5 | docker run --rm -it --name=ctop --volume /var/run/docker.sock:/var/run/docker.sock:ro quay.io/vektorlab/ctop:latest
6 |
--------------------------------------------------------------------------------
/ansible/roles/docker_cleanup/files/docker-utils/dc:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e
4 |
5 | exec docker-compose -f "/opt/$1/docker-compose.yml" "${@:2}"
6 |
--------------------------------------------------------------------------------
/ansible/roles/docker_cleanup/files/docker-utils/dc-all:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e
4 |
5 | for compose_file in /opt/**/docker-compose.yml; do
6 | docker-compose -f "$compose_file" "$@"
7 | done
8 |
--------------------------------------------------------------------------------
/ansible/roles/docker_cleanup/files/docker-utils/hard-restart-all:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e
4 |
5 | for compose_file in /opt/**/docker-compose.yml; do
6 | if [[ $(docker-compose -f "$compose_file" ps -q) ]]; then
7 | docker-compose -f "$compose_file" down --remove-orphans
8 | docker-compose -f "$compose_file" up -d
9 | else
10 | echo "> Skipping $compose_file as it's not running."
11 | fi
12 | done
13 |
--------------------------------------------------------------------------------
/ansible/roles/docker_cleanup/files/docker-utils/update-all:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e
4 |
5 | for compose_file in /opt/**/docker-compose.yml; do
6 | if [[ $(docker-compose -f "$compose_file" ps -q) ]]; then
7 | echo "> Updating $compose_file"
8 | docker-compose -f "$compose_file" pull
9 | docker-compose -f "$compose_file" up -d --remove-orphans
10 | else
11 | echo "> Skipping $compose_file as it's not running."
12 | fi
13 | done
14 |
--------------------------------------------------------------------------------
/ansible/roles/docker_cleanup/files/zfs-override.conf:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Requires=zfs.target
3 | After=zfs.target
4 |
--------------------------------------------------------------------------------
/ansible/roles/docker_cleanup/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Install docker-compose
2 | package:
3 | name: docker-compose
4 | when: ansible_os_family != 'Debian'
5 |
6 | - name: Install compose-switch
7 | get_url:
8 | url: "{{ docker_compose_url }}"
9 | dest: "{{ docker_compose_path }}"
10 | mode: "0755"
11 | when: ansible_os_family == 'Debian'
12 |
13 | - name: Create docker group
14 | group:
15 | name: "{{ docker_user.name }}"
16 | state: present
17 | gid: "{{ docker_user.id }}"
18 |
19 | - name: Create docker user
20 | user:
21 | name: "{{ docker_user.name }}"
22 | uid: "{{ docker_user.id }}"
23 | group: "{{ docker_user.name }}"
24 | create_home: false
25 |
26 | - name: Add user to docker user group
27 | user:
28 | name: "{{ me.user }}"
29 | groups: "{{ docker_user.name }}"
30 | append: true
31 |
32 | - name: Add user to docker group
33 | user:
34 | name: "{{ me.user }}"
35 | groups: docker
36 | append: true
37 |
38 | - name: Clean up docker containers
39 | cron:
40 | name: clean up docker containers
41 | hour: 1
42 | minute: 0
43 | job: docker system prune -af --volumes
44 | cron_file: docker_cleanup
45 | user: root
46 |
47 | - name: Install util scripts
48 | copy:
49 | src: ./files/docker-utils
50 | dest: "{{ me.home }}"
51 | mode: "755"
52 | directory_mode: "755"
53 | owner: "{{ me.user }}"
54 |
55 | - name: override docker service for zfs dependencies
56 | include_tasks: zfs-override.yml
57 | when: docker_zfs_override
58 |
--------------------------------------------------------------------------------
/ansible/roles/docker_cleanup/tasks/zfs-override.yml:
--------------------------------------------------------------------------------
1 | - name: Create dir for docker service ZFS override
2 | file:
3 | path: /etc/systemd/system/docker.service.d
4 | state: directory
5 | mode: "0755"
6 |
7 | - name: Create override.conf
8 | copy:
9 | src: files/zfs-override.conf
10 | dest: /etc/systemd/system/docker.service.d/zfs-override.conf
11 | owner: root
12 | group: root
13 | mode: "0644"
14 |
--------------------------------------------------------------------------------
/ansible/roles/forgejo/files/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | forgejo:
3 | image: code.forgejo.org/forgejo/forgejo:11-rootless
4 | user: "{{ docker_user.id }}:{{ docker_user.id }}"
5 | environment:
6 | - TZ={{ timezone }}
7 | volumes:
8 | - /etc/timezone:/etc/timezone:ro
9 | - /etc/localtime:/etc/localtime:ro
10 | - "{{ app_data_dir }}/gitea/data:/var/lib/gitea"
11 | - "{{ app_data_dir }}/gitea/config:/etc/gitea"
12 | - /mnt/tank/files/gitea-repositories/repositories:/mnt/repositories
13 | - /mnt/tank/files/gitea-repositories/lfs:/mnt/lfs
14 | - /mnt/tank/files/gitea-repositories/archive:/mnt/repo-archive
15 | tmpfs:
16 | - /var/lib/gitea/tmp
17 | restart: unless-stopped
18 | ports:
19 | - "{{ pve_hosts.docker.ip }}:2222:2222"
20 | depends_on:
21 | - db
22 | - redis
23 | labels:
24 | - traefik.enable=true
25 | - traefik.http.routers.forgejo.rule=Host(`git.theorangeone.net`)
26 | - traefik.http.services.forgejo-forgejo.loadbalancer.server.port=3000
27 | networks:
28 | - default
29 | - traefik
30 |
31 | db:
32 | image: postgres:14-alpine
33 | restart: unless-stopped
34 | volumes:
35 | - /mnt/speed/dbs/postgres/gitea:/var/lib/postgresql/data
36 | environment:
37 | - POSTGRES_PASSWORD=gitea
38 | - POSTGRES_USER=gitea
39 |
40 | redis:
41 | image: redis:7-alpine
42 | restart: unless-stopped
43 | volumes:
44 | - /mnt/speed/dbs/redis/gitea:/data
45 |
46 | networks:
47 | traefik:
48 | external: true
49 |
--------------------------------------------------------------------------------
/ansible/roles/forgejo/files/footer.html:
--------------------------------------------------------------------------------
1 | {{ if not .IsSigned }}
2 |
3 | {{ end }}
4 |
--------------------------------------------------------------------------------
/ansible/roles/forgejo/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart gitea
2 | shell:
3 | chdir: /opt/gitea
4 | cmd: "{{ docker_update_command }}"
5 |
--------------------------------------------------------------------------------
/ansible/roles/forgejo/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Include vault
2 | include_vars: vault.yml
3 |
4 | - name: Create install directory
5 | file:
6 | path: /opt/gitea
7 | state: directory
8 | owner: "{{ docker_user.name }}"
9 | mode: "{{ docker_compose_directory_mask }}"
10 |
11 | - name: Install compose file
12 | template:
13 | src: files/docker-compose.yml
14 | dest: /opt/gitea/docker-compose.yml
15 | mode: "{{ docker_compose_file_mask }}"
16 | owner: "{{ docker_user.name }}"
17 | validate: docker-compose -f %s config
18 | notify: restart gitea
19 |
20 | - name: Install config file
21 | template:
22 | src: files/app.ini
23 | dest: "{{ app_data_dir }}/gitea/config/app.ini"
24 | mode: "{{ docker_compose_file_mask }}"
25 | owner: "{{ docker_user.name }}"
26 | notify: restart gitea
27 |
28 | - name: Create custom templates directory
29 | file:
30 | path: "{{ app_data_dir }}/gitea/data/custom/templates/custom"
31 | state: directory
32 | owner: "{{ docker_user.name }}"
33 | mode: "{{ docker_compose_directory_mask }}"
34 | recurse: true
35 |
36 | - name: Install custom footer
37 | copy:
38 | src: files/footer.html
39 | dest: "{{ app_data_dir }}/gitea/data/custom/templates/custom/footer.tmpl"
40 | owner: "{{ docker_user.name }}"
41 | mode: "{{ docker_compose_file_mask }}"
42 | notify: restart gitea
43 |
--------------------------------------------------------------------------------
/ansible/roles/forgejo_runner/files/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | forgejo-runner:
3 | image: code.forgejo.org/forgejo/runner:6.3.1
4 | user: "{{ docker_user.id }}"
5 | volumes:
6 | - /mnt/data:/data
7 | - ./config.yml:/data/config.yml
8 | environment:
9 | - TZ={{ timezone }}
10 | - DOCKER_HOST=tcp://docker_proxy:2375
11 | restart: unless-stopped
12 | command: forgejo-runner --config config.yml daemon
13 | networks:
14 | - default
15 | - forgejo_private
16 | depends_on:
17 | - docker_proxy
18 | ports:
19 | - "{{ ansible_default_ipv4.address }}:4000:4000"
20 |
21 | docker_proxy:
22 | image: lscr.io/linuxserver/socket-proxy:latest
23 | restart: unless-stopped
24 | environment:
25 | - POST=1
26 | - CONTAINERS=1
27 | - INFO=1
28 | - IMAGES=1
29 | - VOLUMES=1
30 | - NETWORKS=1
31 | - ALLOW_START=1
32 | - ALLOW_STOP=1
33 | - ALLOW_RESTARTS=1
34 | - EXEC=1
35 | tmpfs:
36 | - /run
37 | volumes:
38 | - /var/run/docker.sock:/var/run/docker.sock:ro
39 | networks:
40 | - forgejo_private
41 | logging:
42 | driver: none
43 |
44 | networks:
45 | forgejo_private:
46 | internal: true
47 |
--------------------------------------------------------------------------------
/ansible/roles/forgejo_runner/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart forgejo-runner
2 | shell:
3 | chdir: /opt/forgejo-runner
4 | cmd: "{{ docker_update_command }}"
5 |
--------------------------------------------------------------------------------
/ansible/roles/forgejo_runner/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Create install directory
2 | file:
3 | path: /opt/forgejo-runner
4 | state: directory
5 | owner: "{{ docker_user.name }}"
6 | mode: "{{ docker_compose_directory_mask }}"
7 |
8 | - name: Install config file
9 | template:
10 | src: files/config.yml
11 | dest: /opt/forgejo-runner/config.yml
12 | mode: "600"
13 | owner: "{{ docker_user.name }}"
14 | notify: restart forgejo-runner
15 |
16 | - name: Install compose file
17 | template:
18 | src: files/docker-compose.yml
19 | dest: /opt/forgejo-runner/docker-compose.yml
20 | mode: "{{ docker_compose_file_mask }}"
21 | owner: "{{ docker_user.name }}"
22 | validate: docker-compose -f %s config
23 | notify: restart forgejo-runner
24 |
--------------------------------------------------------------------------------
/ansible/roles/freshrss/files/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | freshrss:
3 | image: freshrss/freshrss:latest
4 | restart: unless-stopped
5 | environment:
6 | - TZ=Europe/London
7 | - OIDC_ENABLED=1
8 | - OIDC_PROVIDER_METADATA_URL=https://auth.jakehoward.tech/.well-known/openid-configuration
9 | - OIDC_CLIENT_ID={{ vault_oidc_client_id }}
10 | - OIDC_CLIENT_SECRET={{ vault_oidc_client_secret }}
11 | - OIDC_SCOPES=openid profile email
12 | - OIDC_X_FORWARDED_HEADERS=X-Forwarded-Proto X-Forwarded-Host
13 | - CRON_MIN=*/15
14 | labels:
15 | - traefik.enable=true
16 | - traefik.http.routers.freshrss.rule=Host(`freshrss.jakehoward.tech`)
17 | - traefik.http.routers.freshrss.middlewares=tailscale-only@file
18 | volumes:
19 | - "{{ app_data_dir }}/freshrss:/var/www/FreshRSS/data"
20 | - ./extensions:/var/www/FreshRSS/extensions
21 | depends_on:
22 | - db
23 | networks:
24 | - default
25 | - traefik
26 |
27 | db:
28 | image: postgres:14-alpine
29 | restart: unless-stopped
30 | volumes:
31 | - /mnt/speed/dbs/postgres/freshrss:/var/lib/postgresql/data
32 | environment:
33 | - POSTGRES_PASSWORD=freshrss
34 | - POSTGRES_USER=freshrss
35 |
36 | networks:
37 | traefik:
38 | external: true
39 |
--------------------------------------------------------------------------------
/ansible/roles/freshrss/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart freshrss
2 | shell:
3 | chdir: /opt/freshrss
4 | cmd: "{{ docker_update_command }}"
5 |
--------------------------------------------------------------------------------
/ansible/roles/freshrss/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Include vault
2 | include_vars: vault.yml
3 |
4 | - name: Create install directory
5 | file:
6 | path: /opt/freshrss
7 | state: directory
8 | owner: "{{ docker_user.name }}"
9 | mode: "{{ docker_compose_directory_mask }}"
10 |
11 | - name: Create extensions directory
12 | file:
13 | path: /opt/freshrss/extensions
14 | state: directory
15 | owner: www-data
16 | mode: "{{ docker_compose_directory_mask }}"
17 | register: extensions_dir
18 |
19 | - name: Install compose file
20 | template:
21 | src: files/docker-compose.yml
22 | dest: /opt/freshrss/docker-compose.yml
23 | mode: "{{ docker_compose_file_mask }}"
24 | owner: "{{ docker_user.name }}"
25 | validate: docker-compose -f %s config
26 | notify: restart freshrss
27 |
28 | - name: Install three panes extension
29 | git:
30 | repo: https://framagit.org/nicofrand/xextension-threepanesview
31 | dest: "{{ extensions_dir.path }}/xextension-threepanesview"
32 | depth: 1
33 | become: true
34 | become_user: www-data
35 | notify: restart freshrss
36 |
--------------------------------------------------------------------------------
/ansible/roles/freshrss/vars/vault.yml:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 63323639343139643530393939613035623633336236326364653239386133386261623837346664
3 | 3562383630313232663036663730316237386662373130630a623532666465633161383832316535
4 | 30643939303966323962343331336565306665613831656232316339303862666332353938313239
5 | 6437666438333963300a393136646465633531643135313736313666613637303038313333396561
6 | 30313432646631623463376135613963326134363837343766623262336263613939616134373631
7 | 37336364323134656432376236613766636635373736656263666661323866306532656231306665
8 | 36333033663961303838363039363832663638633664333036306532386336353232386637393862
9 | 32613437303637333034343433396665323665663136396637303533663834633663663630323839
10 | 38323964663131306230376334323936326533316132316531303363303338383639616331383433
11 | 3033313234666632653663646230633766656339373866363735
12 |
--------------------------------------------------------------------------------
/ansible/roles/gateway/files/nginx-cdn.conf:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 |
3 | proxy_cache_path /var/lib/nginx/cache levels=1:2 keys_zone=cdncache:20m max_size=1g inactive=48h;
4 |
5 | {% for domain in cdn_domains %}
6 | server {
7 | listen 8800 ssl http2 proxy_protocol;
8 |
9 | server_name {{ domain }};
10 |
11 | ssl_certificate /etc/letsencrypt/live/{{ domain }}/fullchain.pem;
12 | ssl_certificate_key /etc/letsencrypt/live/{{ domain }}/privkey.pem;
13 | ssl_trusted_certificate /etc/letsencrypt/live/{{ domain }}/chain.pem;
14 |
15 | include includes/ssl.conf;
16 |
17 | real_ip_header proxy_protocol;
18 |
19 | set_real_ip_from 127.0.0.1;
20 |
21 | proxy_cache_use_stale error timeout http_500 http_502 http_503 http_504;
22 |
23 | location / {
24 | proxy_cache cdncache;
25 | add_header X-Cache-Status $upstream_cache_status;
26 | proxy_pass https://{{ wireguard.clients.ingress.ip }}:443;
27 | }
28 | }
29 | {% endfor %}
30 |
--------------------------------------------------------------------------------
/ansible/roles/gateway/files/nginx-fail2ban-filter.conf:
--------------------------------------------------------------------------------
1 | [Definition]
2 |
3 | failregex = ^ .*$
4 | ignoreregex =
5 |
--------------------------------------------------------------------------------
/ansible/roles/gateway/files/nginx-fail2ban-jail.conf:
--------------------------------------------------------------------------------
1 | [nginx]
2 | enabled = true
3 | bantime = 600
4 | findtime = 10
5 | maxretry = 100
6 | filter = nginx-tcp
7 | logpath = /var/log/nginx/ips.log
8 | port = http,https,8448
9 | ignoreip = {{ wireguard.cidr }},{{ pve_hosts.internal_cidr }},{{ pve_hosts.internal_cidr_ipv6 }},{{ vps_hosts.values()|sort|join(",") }},{{ tailscale_cidr }}
10 |
11 | [traefik]
12 | enabled = true
13 | port = http,https,8448
14 | ignoreip = {{ wireguard.cidr }},{{ pve_hosts.internal_cidr }},{{ pve_hosts.internal_cidr_ipv6 }},{{ vps_hosts.values()|sort|join(",") }},{{ tailscale_cidr }}
15 |
--------------------------------------------------------------------------------
/ansible/roles/gateway/files/nginx.conf:
--------------------------------------------------------------------------------
1 | log_format gateway '$remote_addr [$time_local] '
2 | '$protocol $status $bytes_sent $bytes_received '
3 | '$session_time "$ssl_preread_server_name" '
4 | '"$upstream_bytes_sent" "$upstream_bytes_received" "$upstream_connect_time"';
5 |
6 | log_format ips '$remote_addr [$time_local] $ssl_preread_server_name';
7 |
8 | access_log /var/log/nginx/gateway.log gateway;
9 | access_log /var/log/nginx/ips.log ips;
10 |
11 | map $ssl_preread_server_name $gateway_destination {
12 | default {{ wireguard.clients.ingress.ip }}:8443;
13 |
14 | headscale.jakehoward.tech 127.0.0.1:8888;
15 |
16 | {% for domain in cdn_domains %}
17 | {{ domain }} 127.0.0.1:8800;
18 | {% endfor %}
19 | }
20 |
21 | server {
22 | listen 443;
23 | listen 8448;
24 | listen [::]:443;
25 | listen [::]:8448;
26 | proxy_pass $gateway_destination;
27 | proxy_protocol on;
28 | }
29 |
30 | server {
31 | listen [{{ vps_hosts.private_ipv6_marker }}]:443;
32 | listen [{{ vps_hosts.private_ipv6_marker }}]:8448;
33 |
34 | access_log off;
35 |
36 | deny all;
37 |
38 | # This is never used, but need to keep nginx happy
39 | proxy_pass 127.0.0.1:80;
40 | }
41 |
--------------------------------------------------------------------------------
/ansible/roles/gateway/files/wireguard-client.conf:
--------------------------------------------------------------------------------
1 | [Interface]
2 | Address = {{ item.value.ip }}
3 | PrivateKey = {{ item.value.private_key }}
4 |
5 | [Peer]
6 | PublicKey = {{ wireguard.server.public_key }}
7 | Endpoint = {{ wireguard.public_ip }}:{{ wireguard.port }}
8 | AllowedIPs = {{ wireguard.cidr }}
9 |
10 | PersistentKeepalive = 25
11 |
--------------------------------------------------------------------------------
/ansible/roles/gateway/files/wireguard-server.conf:
--------------------------------------------------------------------------------
1 | [Interface]
2 | Address = {{ wireguard.server.ip }}
3 | PrivateKey = {{ wireguard.server.private_key }}
4 | ListenPort = {{ wireguard.port }}
5 |
6 | {% for name, config in wireguard.clients.items() %}
7 | [Peer]
8 | # {{ name }}
9 | PublicKey = {{ config.public_key }}
10 | AllowedIPs = {{ config.ip }}/32
11 | {% endfor %}
12 |
--------------------------------------------------------------------------------
/ansible/roles/gateway/tasks/fail2ban.yml:
--------------------------------------------------------------------------------
1 | - name: fail2ban filter
2 | template:
3 | src: files/nginx-fail2ban-filter.conf
4 | dest: /etc/fail2ban/filter.d/nginx-tcp.conf
5 | mode: "0600"
6 | register: fail2ban_filter
7 |
8 | - name: fail2ban jail
9 | template:
10 | src: files/nginx-fail2ban-jail.conf
11 | dest: /etc/fail2ban/jail.d/nginx.conf
12 | mode: "0600"
13 | register: fail2ban_jail
14 |
15 | - name: Restart fail2ban
16 | service:
17 | name: fail2ban
18 | state: restarted
19 | when: fail2ban_filter.changed or fail2ban_jail.changed
20 |
--------------------------------------------------------------------------------
/ansible/roles/gateway/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Configure Nginx
2 | include_tasks: nginx.yml
3 |
4 | - name: Configure wireguard
5 | include_tasks: wireguard.yml
6 |
7 | - name: Configure fail2ban
8 | include_tasks: fail2ban.yml
9 |
--------------------------------------------------------------------------------
/ansible/roles/gateway/tasks/nginx.yml:
--------------------------------------------------------------------------------
1 | - name: Nginx config
2 | template:
3 | src: files/nginx.conf
4 | dest: /etc/nginx/stream.d/gateway.conf
5 | mode: "0644"
6 | register: nginx_config
7 |
8 | - name: Install CDN config
9 | template:
10 | src: files/nginx-cdn.conf
11 | dest: /etc/nginx/http.d/cdn.conf
12 | mode: "0644"
13 | register: nginx_config
14 |
15 | - name: Reload Nginx
16 | service:
17 | name: nginx
18 | state: reloaded
19 | when: nginx_config.changed
20 |
--------------------------------------------------------------------------------
/ansible/roles/gateway/tasks/wireguard.yml:
--------------------------------------------------------------------------------
1 | - name: Install wireguard tools
2 | package:
3 | name: "{{ item }}"
4 | loop:
5 | - wireguard-tools
6 | - qrencode
7 |
8 | - name: Wireguard server config
9 | template:
10 | src: files/wireguard-server.conf
11 | dest: /etc/wireguard/wg0.conf
12 | mode: "0600"
13 | backup: true
14 | register: wireguard_conf
15 |
16 | - name: Enable wireguard
17 | service:
18 | name: wg-quick@wg0
19 | enabled: true
20 |
21 | - name: Restart wireguard
22 | service:
23 | name: wg-quick@wg0
24 | state: restarted
25 | when: wireguard_conf.changed
26 |
27 | - name: Create wireguard client directory
28 | file:
29 | path: "{{ me.home }}/wireguard-clients"
30 | state: directory
31 | owner: "{{ me.user }}"
32 | mode: "700"
33 |
34 | - name: Wireguard client configuration
35 | template:
36 | src: files/wireguard-client.conf
37 | dest: "{{ me.home }}/wireguard-clients/{{ item.key }}.conf"
38 | owner: "{{ me.user }}"
39 | mode: "600"
40 | loop: "{{ wireguard.clients | dict2items }}"
41 | loop_control:
42 | label: "{{ item.key }}"
43 |
--------------------------------------------------------------------------------
/ansible/roles/glinet_vpn/files/client.conf:
--------------------------------------------------------------------------------
1 | [Interface]
2 | Address = {{ client_cidr }}
3 | PrivateKey = {{ client_private_key }}
4 |
5 | [Peer]
6 | PublicKey = {{ server_public_key }}
7 | Endpoint = {{ server_public_ip }}:53
8 | AllowedIPs = 0.0.0.0/0 ::/0
9 |
10 | PersistentKeepalive = 25
11 |
--------------------------------------------------------------------------------
/ansible/roles/glinet_vpn/files/server.conf:
--------------------------------------------------------------------------------
1 | [Interface]
2 | Address = {{ server_ip }}
3 | PrivateKey = {{ server_private_key }}
4 | ListenPort = 53
5 |
6 | PostUp = iptables -A FORWARD -i %i -j ACCEPT; iptables -A FORWARD -o %i -j ACCEPT; iptables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
7 | PostDown = iptables -D FORWARD -i %i -j ACCEPT; iptables -D FORWARD -o %i -j ACCEPT; iptables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
8 |
9 | PostUp = ip6tables -A FORWARD -i %i -j ACCEPT; ip6tables -A FORWARD -o %i -j ACCEPT; ip6tables -t nat -A POSTROUTING -o eth0 -j MASQUERADE
10 | PostDown = ip6tables -D FORWARD -i %i -j ACCEPT; ip6tables -D FORWARD -o %i -j ACCEPT; ip6tables -t nat -D POSTROUTING -o eth0 -j MASQUERADE
11 |
12 | [Peer]
13 | PublicKey = {{ client_public_key }}
14 | AllowedIPs = {{ client_cidr }}
15 |
--------------------------------------------------------------------------------
/ansible/roles/glinet_vpn/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart wireguard
2 | service:
3 | name: wg-quick@glinet
4 | state: restarted
5 |
--------------------------------------------------------------------------------
/ansible/roles/glinet_vpn/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Include vault
2 | include_vars: vault.yml
3 |
4 | - name: Install wireguard tools
5 | package:
6 | name: "{{ item }}"
7 | loop:
8 | - wireguard-tools
9 | - qrencode
10 |
11 | - name: Wireguard server config
12 | template:
13 | src: files/server.conf
14 | dest: /etc/wireguard/glinet.conf
15 | mode: "0600"
16 | backup: true
17 | notify: restart wireguard
18 |
19 | - name: Wireguard client config
20 | template:
21 | src: files/client.conf
22 | dest: "{{ me.home }}/glinet-vpn.conf"
23 | mode: "0600"
24 | owner: "{{ me.user }}"
25 | notify: restart wireguard
26 |
27 | - name: Enable wireguard
28 | service:
29 | name: wg-quick@glinet
30 | enabled: true
31 |
--------------------------------------------------------------------------------
/ansible/roles/glinet_vpn/vars/main.yml:
--------------------------------------------------------------------------------
1 | client_public_key: "{{ vault_client_public_key }}"
2 | client_private_key: "{{ vault_client_private_key }}"
3 | client_cidr: 10.23.4.2/24
4 |
5 | server_public_key: "{{ vault_server_public_key }}"
6 | server_private_key: "{{ vault_server_private_key }}"
7 | server_public_ip: "{{ ansible_default_ipv4.address }}"
8 | server_ip: 10.23.4.1
9 |
--------------------------------------------------------------------------------
/ansible/roles/glinet_vpn/vars/vault.yml:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 35366163656631633636333937333238346539653236323463316333356637623263326436623130
3 | 3333616234643935306337386165623734333265663237610a326538636532643835373137316333
4 | 30363133343035353235616639613637353435303863393130396261623063633836383430326530
5 | 3634313639353264310a393266313230646132656561393737363834646566313765633235343139
6 | 36303834353039303134393061386634373735316135656564386464363863376265633239313037
7 | 62616535313239353233376163343437303933346264323266386533336138656135663664356164
8 | 65643262303436343164613133333361393438616234616566336131636461383538326130623264
9 | 62313134386430636665646539306661383039323339373838346164653836326536386332616634
10 | 34313331623166356137363131356130623863313339663938386138643538323666616239656662
11 | 36313534323237306631663931633830346565616139313864333762356330643131343630653535
12 | 62323939376163363436336633386433323435316535623462353138386430333332653966383262
13 | 33636534346466326631333362343638616332633163623533613364326665376565643739666261
14 | 34646533613133313034366636623134613336623134356562393335313337336336623634336633
15 | 66623365353866396564386536386330353537383866616665373762306530356333643265326537
16 | 38353138626331623433643636623130613766616638343034633536306232316133303133356463
17 | 36616665643264396137336234316466306238303461363531653461623834376361653334326235
18 | 31366530636565383062313562663639393534373737363465656538393266363936333136636161
19 | 3239303565613865633433313237393932306632633633373261
20 |
--------------------------------------------------------------------------------
/ansible/roles/headscale/files/acls.json:
--------------------------------------------------------------------------------
1 | {
2 | "tagOwners": {
3 | "tag:client": [],
4 | "tag:private-svcs": []
5 |
6 | },
7 | "acls": [
8 | {
9 | "action": "accept",
10 | "src": ["tag:client"],
11 | "dst": ["*:*"]
12 | },
13 | {
14 | "action": "accept",
15 | "src": ["tag:private-svcs"],
16 | "dst": ["{{ vps_hosts.private_ipv6_marker }}:80,443"]
17 | }
18 | ]
19 | }
20 |
--------------------------------------------------------------------------------
/ansible/roles/headscale/files/nginx.conf:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 |
3 | limit_req_zone $binary_remote_addr zone=headscale:10m rate=1r/m;
4 |
5 | server {
6 | listen 8888 ssl http2 proxy_protocol;
7 |
8 | server_name headscale.jakehoward.tech;
9 |
10 | ssl_certificate /etc/letsencrypt/live/headscale.jakehoward.tech/fullchain.pem;
11 | ssl_certificate_key /etc/letsencrypt/live/headscale.jakehoward.tech/privkey.pem;
12 | ssl_trusted_certificate /etc/letsencrypt/live/headscale.jakehoward.tech/chain.pem;
13 | include includes/ssl.conf;
14 |
15 | real_ip_header proxy_protocol;
16 |
17 | set_real_ip_from 127.0.0.1;
18 |
19 | location / {
20 | proxy_pass http://localhost:8416;
21 | }
22 |
23 | location /oidc {
24 | # 3 should be enough for the redirect, callback plus 1 error
25 | limit_req zone=headscale burst=3 nodelay;
26 | limit_req_status 429;
27 |
28 | proxy_pass http://localhost:8416;
29 | }
30 |
31 | # Block access to the API entirely - I'm not using it
32 | location /api {
33 | return 403;
34 | }
35 | }
36 |
--------------------------------------------------------------------------------
/ansible/roles/headscale/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart headscale
2 | service:
3 | name: headscale
4 | state: restarted
5 | enabled: true
6 |
--------------------------------------------------------------------------------
/ansible/roles/headscale/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Include vault
2 | include_vars: vault.yml
3 |
4 | - name: Install Headscale
5 | package:
6 | name: headscale
7 |
8 | - name: Install headscale config file
9 | template:
10 | src: files/headscale.yml
11 | dest: /etc/headscale/config.yaml
12 | owner: headscale
13 | mode: "0600"
14 | notify: restart headscale
15 |
16 | - name: Install ACLs
17 | template:
18 | src: files/acls.json
19 | dest: /etc/headscale/acls.json
20 | owner: headscale
21 | mode: "0600"
22 | notify: restart headscale
23 |
24 | - name: Install nginx config
25 | template:
26 | src: files/nginx.conf
27 | dest: /etc/nginx/http.d/headscale.conf
28 | mode: "0644"
29 | notify: reload nginx
30 |
--------------------------------------------------------------------------------
/ansible/roles/headscale/vars/vault.yml:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 38616264313731363865383762393566306366653037373633393433626264646563353765316631
3 | 3366613332663439616266373566646435646237626465350a363731396436376262313831393632
4 | 37646330343763343732336239393364303664303562373937663662643162313863333363323534
5 | 6361333166363339390a356130633130663132393766636261346262363138656335646366643966
6 | 30383933303536353165343363386239316139346165613366323731666664303638613862303139
7 | 38353033633765633731656537626166316566613732633239356238393033386131626535383462
8 | 33343064306162393733643165343266623931643136623934303861353064363235303539353935
9 | 30636338613132323262626338623366393965316239616132346330646537636238363631643038
10 | 39306465616131343666353865336231643966313830386164336539626134323030353561636165
11 | 37623338656134316130653236643339636339303632653536366665653830386562313734626130
12 | 31663335323630343666386337363564313633323766623535303564633132346165303462353436
13 | 64303863303631613237343762653938646537646534343234656465316330356361643163623631
14 | 36396535343061323962386135633736333261323965646266366637666564623666306365356135
15 | 37346666343634306137393663646362333062303636616332333235313634633261333136303837
16 | 37363835313563323035313465626261353365653261326463313461616430643335316661386365
17 | 34333161373164306335646161346437643039663638353134613533383364363065373433383561
18 | 66653335393262333739376364356639316530626664656438353861303134383833393236656134
19 | 66353563313661393062656636393331386263333566303938303038643135646431653663363931
20 | 656663316137373831346432356438386639
21 |
--------------------------------------------------------------------------------
/ansible/roles/http_proxy/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart squid
2 | service:
3 | name: squid
4 | state: restarted
5 |
--------------------------------------------------------------------------------
/ansible/roles/http_proxy/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Install squid
2 | package:
3 | name: squid
4 |
5 | - name: Squid config
6 | template:
7 | src: files/squid.conf
8 | dest: /etc/squid/squid.conf
9 | mode: "0600"
10 | notify: restart squid
11 |
12 | - name: Enable squid
13 | service:
14 | name: squid
15 | enabled: true
16 |
--------------------------------------------------------------------------------
/ansible/roles/immich/files/ipp-config.json:
--------------------------------------------------------------------------------
1 | {
2 | "ipp": {
3 | "showHomePage": false,
4 | "allowDownloadAll": 1
5 | }
6 | }
7 |
--------------------------------------------------------------------------------
/ansible/roles/immich/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart immich
2 | shell:
3 | chdir: /opt/immich
4 | cmd: "{{ docker_update_command }}"
5 |
--------------------------------------------------------------------------------
/ansible/roles/immich/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Create install directory
2 | file:
3 | path: /opt/immich
4 | state: directory
5 | owner: "{{ docker_user.name }}"
6 | mode: "{{ docker_compose_directory_mask }}"
7 |
8 | - name: Install compose file
9 | template:
10 | src: files/docker-compose.yml
11 | dest: /opt/immich/docker-compose.yml
12 | mode: "{{ docker_compose_file_mask }}"
13 | owner: "{{ docker_user.name }}"
14 | validate: docker-compose -f %s config
15 | notify: restart immich
16 |
17 | - name: Install IPP config
18 | template:
19 | src: files/ipp-config.json
20 | dest: /opt/immich/ipp-config.json
21 | mode: "{{ docker_compose_file_mask }}"
22 | owner: "{{ docker_user.name }}"
23 | notify: restart immich
24 |
--------------------------------------------------------------------------------
/ansible/roles/ingress/files/nftables.conf:
--------------------------------------------------------------------------------
1 | #!/usr/sbin/nft -f
2 |
3 | flush ruleset
4 |
5 | table inet filter {
6 | chain input {
7 | type filter hook input priority 0
8 | policy drop
9 |
10 | ct state {established, related} counter accept
11 |
12 | iif lo accept
13 |
14 | # Allow ICMP (pings)
15 | ip protocol icmp accept
16 | meta l4proto icmpv6 accept
17 |
18 | tcp dport {http, https, {{ ssh_port }}, 8443, 8448} accept
19 |
20 | # Allow Tailscale
21 | udp dport {{ tailscale_port }} accept;
22 | }
23 |
24 | chain POSTROUTING {
25 | type nat hook postrouting priority srcnat
26 | policy accept
27 |
28 | # NAT - because the proxmox machines may not have routes back
29 | ip saddr {{ tailscale_cidr }} counter masquerade
30 | }
31 |
32 | chain FORWARD {
33 | type filter hook forward priority mangle
34 | policy drop
35 |
36 | # Allow monitoring of Tailscale network
37 | ip saddr {{ pve_hosts.forrest.ip }}/32 ip daddr {{ tailscale_cidr }} accept
38 |
39 | # Allow Tailscale exit node
40 | ip saddr {{ tailscale_cidr }} ip daddr 192.168.0.0/16 drop
41 | ip saddr {{ tailscale_cidr }} accept
42 | ip daddr {{ tailscale_cidr }} ct state related,established accept
43 | }
44 | }
45 |
--------------------------------------------------------------------------------
/ansible/roles/ingress/files/nginx.conf:
--------------------------------------------------------------------------------
1 | log_format access '$remote_addr [$time_local] '
2 | '$protocol $status $bytes_sent $bytes_received '
3 | '$session_time "$ssl_preread_server_name" '
4 | '"$upstream_bytes_sent" "$upstream_bytes_received" "$upstream_connect_time"';
5 |
6 | access_log /var/log/nginx/access.log access;
7 |
8 | # Internal LAN route
9 | server {
10 | listen 443;
11 | listen 8448;
12 | listen [::]:443;
13 | listen [::]:8448;
14 | proxy_pass {{ pve_hosts.docker.ip }}:443;
15 | proxy_protocol on;
16 | proxy_socket_keepalive on;
17 | proxy_timeout 1h;
18 | }
19 |
20 | # External routes
21 | server {
22 | listen 8443 proxy_protocol;
23 | proxy_protocol on;
24 | proxy_pass {{ pve_hosts.docker.ip }}:443;
25 | set_real_ip_from {{ wireguard.server.ip }};
26 | proxy_socket_keepalive on;
27 | }
28 |
--------------------------------------------------------------------------------
/ansible/roles/ingress/files/wireguard.conf:
--------------------------------------------------------------------------------
1 | [Interface]
2 | Address = {{ ingress_wireguard.ip }}
3 | PrivateKey = {{ ingress_wireguard.private_key }}
4 |
5 | [Peer]
6 | PublicKey = {{ wireguard.server.public_key }}
7 | Endpoint = {{ wireguard.public_ip }}:{{ wireguard.port }}
8 | AllowedIPs = {{ wireguard.cidr }}
9 |
10 | PersistentKeepalive = 25
11 |
--------------------------------------------------------------------------------
/ansible/roles/ingress/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart wireguard
2 | service:
3 | name: wg-quick@wg0
4 | state: restarted
5 |
6 | - name: reload nginx
7 | service:
8 | name: nginx
9 | state: reloaded
10 |
11 | - name: reload nftables
12 | command:
13 | argv:
14 | - nft
15 | - -f
16 | - /etc/nftables.conf
17 |
--------------------------------------------------------------------------------
/ansible/roles/ingress/tasks/firewall.yml:
--------------------------------------------------------------------------------
1 | - name: Install nftables
2 | package:
3 | name: nftables
4 |
5 | - name: Copy firewall config
6 | template:
7 | src: files/nftables.conf
8 | dest: /etc/nftables.conf
9 | validate: nft -c -f %s
10 | mode: "644"
11 | notify: reload nftables
12 |
13 | - name: Enable nftables
14 | service:
15 | name: nftables
16 | enabled: true
17 | state: started
18 |
--------------------------------------------------------------------------------
/ansible/roles/ingress/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Configure wireguard
2 | include_tasks: wireguard.yml
3 |
4 | - name: Configure nginx
5 | include_tasks: nginx.yml
6 |
7 | - name: Configure firewall
8 | include_tasks: firewall.yml
9 |
--------------------------------------------------------------------------------
/ansible/roles/ingress/tasks/nginx.yml:
--------------------------------------------------------------------------------
1 | - name: Nginx config
2 | template:
3 | src: files/nginx.conf
4 | dest: /etc/nginx/stream.d/ingress.conf
5 | mode: "0644"
6 | notify: reload nginx
7 |
--------------------------------------------------------------------------------
/ansible/roles/ingress/tasks/wireguard.yml:
--------------------------------------------------------------------------------
1 | - name: Install Wireguard
2 | package:
3 | name: wireguard
4 |
5 | - name: Get wireguard credentials
6 | set_fact:
7 | ingress_wireguard: "{{ wireguard.clients.ingress }}"
8 |
9 | - name: Wireguard config
10 | template:
11 | src: files/wireguard.conf
12 | dest: /etc/wireguard/wg0.conf
13 | mode: "0600"
14 | backup: true
15 | notify: restart wireguard
16 |
17 | - name: Enable wireguard
18 | service:
19 | name: wg-quick@wg0
20 | enabled: true
21 |
22 | - name: Enable p2p communication
23 | sysctl:
24 | name: net.ipv4.ip_forward
25 | value: "1"
26 | sysctl_set: true
27 | state: present
28 | reload: true
29 | sysctl_file: /etc/sysctl.d/99-sysctl.conf
30 |
--------------------------------------------------------------------------------
/ansible/roles/jellyfin/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Add Jellyfin apt key
2 | ansible.builtin.apt_key:
3 | url: https://repo.jellyfin.org/jellyfin_team.gpg.key
4 | state: present
5 |
6 | - name: Add Jellyfin repository
7 | apt_repository:
8 | repo: deb [arch=amd64] https://repo.jellyfin.org/debian {{ ansible_distribution_release }} main
9 | filename: jellyfin
10 | state: present
11 |
12 | - name: Install jellyfin
13 | package:
14 | name: jellyfin
15 |
16 | - name: Set media dir permissions
17 | cron:
18 | name: Set media permissions
19 | special_time: daily
20 | job: chown -R jellyfin:jellyfin /mnt/media
21 |
--------------------------------------------------------------------------------
/ansible/roles/mastodon/files/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | mastodon:
3 | image: lscr.io/linuxserver/mastodon:4.3.8
4 | environment:
5 | - TZ={{ timezone }}
6 | - PUID={{ docker_user.id }}
7 | - PGID={{ docker_user.id }}
8 | - LOCAL_DOMAIN=theorangeone.net
9 | - WEB_DOMAIN=mastodon.theorangeone.net
10 | - DATABASE_URL=postgresql://mastodon:mastodon@db:5432/mastodon
11 | - REDIS_URL=redis://redis
12 | - SIDEKIQ_REDIS_URL=redis://redis/1
13 | - SECRET_KEY_BASE={{ vault_secret_key_base }}
14 | - OTP_SECRET={{ vault_otp_secret }}
15 | - VAPID_PRIVATE_KEY={{ vault_vapid_private_key }}
16 | - VAPID_PUBLIC_KEY={{ vault_vapid_public_key }}
17 | - ACTIVE_RECORD_ENCRYPTION_DETERMINISTIC_KEY={{ vault_active_record_encryption_deterministic_key }}
18 | - ACTIVE_RECORD_ENCRYPTION_KEY_DERIVATION_SALT={{ vault_active_record_encryption_key_derivation_salt }}
19 | - ACTIVE_RECORD_ENCRYPTION_PRIMARY_KEY={{ vault_active_record_encryption_primary_key }}
20 | - SINGLE_USER_MODE=true
21 | - DEFAULT_LOCALE=en
22 | - STREAMING_CLUSTER_NUM=1
23 | - WEB_CONCURRENCY=0 # 0 means 1, but not in clustered mode
24 | - SIDEKIQ_THREADS=1
25 | - SIDEKIQ_CONCURRENCY=1
26 | - HTTP_PROXY={{ pve_hosts.qbittorrent.ip }}:3128
27 | - HTTPS_PROXY={{ pve_hosts.qbittorrent.ip }}:3128
28 | restart: unless-stopped
29 | volumes:
30 | - "{{ app_data_dir }}/mastodon:/config/mastodon"
31 | depends_on:
32 | - db
33 | - redis
34 | networks:
35 | - default
36 | - traefik
37 | tmpfs:
38 | - /var/cache
39 | - /config/log
40 | labels:
41 | - traefik.enable=true
42 | - traefik.http.routers.mastodon.rule=Host(`mastodon.theorangeone.net`)
43 | - traefik.http.services.mastodon-mastodon.loadbalancer.server.port=443
44 | - traefik.http.services.mastodon-mastodon.loadbalancer.server.scheme=https
45 |
46 | db:
47 | image: postgres:14-alpine
48 | restart: unless-stopped
49 | volumes:
50 | - /mnt/speed/dbs/postgres/mastodon:/var/lib/postgresql/data
51 | environment:
52 | - POSTGRES_PASSWORD=mastodon
53 | - POSTGRES_USER=mastodon
54 |
55 | redis:
56 | image: redis:7-alpine
57 | restart: unless-stopped
58 | volumes:
59 | - /mnt/speed/dbs/redis/mastodon:/data
60 |
61 | networks:
62 | traefik:
63 | external: true
64 |
--------------------------------------------------------------------------------
/ansible/roles/mastodon/files/purge-media.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -ex
4 |
5 | cd /opt/mastodon
6 |
7 | docker-compose exec mastodon tootctl accounts prune
8 | docker-compose exec mastodon tootctl media remove --days=7
9 | docker-compose exec mastodon tootctl statuses remove --days=7
10 | docker-compose exec mastodon tootctl preview_cards remove --days=7
11 | docker-compose exec mastodon tootctl media remove-orphans
12 | docker-compose exec mastodon tootctl cache clear
13 |
--------------------------------------------------------------------------------
/ansible/roles/mastodon/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart mastodon
2 | shell:
3 | chdir: /opt/mastodon
4 | cmd: "{{ docker_update_command }}"
5 |
--------------------------------------------------------------------------------
/ansible/roles/mastodon/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Include vault
2 | include_vars: vault.yml
3 |
4 | - name: Create install directory
5 | file:
6 | path: /opt/mastodon
7 | state: directory
8 | owner: "{{ docker_user.name }}"
9 | mode: "{{ docker_compose_directory_mask }}"
10 |
11 | - name: Install compose file
12 | template:
13 | src: files/docker-compose.yml
14 | dest: /opt/mastodon/docker-compose.yml
15 | mode: "{{ docker_compose_file_mask }}"
16 | owner: "{{ docker_user.name }}"
17 | validate: docker-compose -f %s config
18 | notify: restart mastodon
19 |
20 | - name: Install media cleanup script
21 | template:
22 | src: files/purge-media.sh
23 | dest: /opt/mastodon/purge-media.sh
24 | mode: "0755"
25 | owner: "{{ docker_user.name }}"
26 |
27 | - name: Schedule media cleanup
28 | cron:
29 | name: clean up mastodon media
30 | hour: 2
31 | minute: 0
32 | weekday: 1
33 | job: /opt/mastodon/purge-media.sh
34 | user: "{{ me.user }}"
35 |
--------------------------------------------------------------------------------
/ansible/roles/minio/files/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | minio:
3 | image: quay.io/minio/minio:RELEASE.2025-04-22T22-12-26Z
4 | command: server /data --console-address ":9090"
5 | user: "{{ docker_user.id }}"
6 | environment:
7 | - TZ=Europe/London
8 | - MINIO_ROOT_USER=jake
9 | - MINIO_ROOT_PASSWORD={{ vault_minio_root_password }}
10 | restart: unless-stopped
11 | labels:
12 | - traefik.enable=true
13 |
14 | - traefik.http.routers.minio-console.rule=Host(`minio.jakehoward.tech`)
15 | - traefik.http.routers.minio-console.service=minio-console
16 | - traefik.http.services.minio-console.loadbalancer.server.port=9090
17 |
18 | - traefik.http.routers.minio-s3.rule=Host(`s3.jakehoward.tech`)
19 | - traefik.http.routers.minio-s3.service=minio-s3
20 | - traefik.http.services.minio-s3.loadbalancer.server.port=9000
21 | volumes:
22 | - /mnt/tank/files/minio:/data
23 | networks:
24 | - default
25 | - traefik
26 |
27 | networks:
28 | traefik:
29 | external: true
30 |
--------------------------------------------------------------------------------
/ansible/roles/minio/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart minio
2 | shell:
3 | chdir: /opt/minio
4 | cmd: "{{ docker_update_command }}"
5 |
--------------------------------------------------------------------------------
/ansible/roles/minio/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Include vault
2 | include_vars: vault.yml
3 |
4 | - name: Create install directory
5 | file:
6 | path: /opt/minio
7 | state: directory
8 | owner: "{{ docker_user.name }}"
9 | mode: "{{ docker_compose_directory_mask }}"
10 |
11 | - name: Install compose file
12 | template:
13 | src: files/docker-compose.yml
14 | dest: /opt/minio/docker-compose.yml
15 | mode: "{{ docker_compose_file_mask }}"
16 | owner: "{{ docker_user.name }}"
17 | validate: docker-compose -f %s config
18 | notify: restart minio
19 |
--------------------------------------------------------------------------------
/ansible/roles/minio/vars/vault.yml:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 38666632613233313534666339373331396438323131643238356235323535303430373733353737
3 | 6330313565333032333461623361333232633836343163650a663762653233303832333936646364
4 | 66623566393464323537376666353631383464373030616263383536393735316336636636356332
5 | 6639383839666563330a323166336565636634306538633761333338366637643162633133353164
6 | 39306166373131303464373530373163626538623735393962306237663634326264323339643634
7 | 37323564373839356434343836373631323162663038393861383934306538313262326637653537
8 | 62653766623734343231633262636237366433363932316631393237633135636538623362373963
9 | 39303531656431623733
10 |
--------------------------------------------------------------------------------
/ansible/roles/nginx/defaults/main.yml:
--------------------------------------------------------------------------------
1 | nginx_https_redirect: false
2 | docker_resolver_address: "{{ private_ip }}:53053"
3 |
--------------------------------------------------------------------------------
/ansible/roles/nginx/files/includes/docker-resolver.conf:
--------------------------------------------------------------------------------
1 | resolver {{ docker_resolver_address }} valid=2s;
2 | resolver_timeout 5s;
3 |
--------------------------------------------------------------------------------
/ansible/roles/nginx/files/includes/proxy.conf:
--------------------------------------------------------------------------------
1 | # Timeout if the real server is dead
2 | proxy_next_upstream error timeout invalid_header http_500 http_502 http_503;
3 |
4 | # Proxy Connection Settings
5 | proxy_buffers 32 4k;
6 | proxy_connect_timeout 240;
7 | proxy_headers_hash_bucket_size 128;
8 | proxy_headers_hash_max_size 1024;
9 | proxy_http_version 1.1;
10 | proxy_read_timeout 240;
11 | proxy_redirect http:// $scheme://;
12 | proxy_send_timeout 240;
13 |
14 | # Proxy Cache and Cookie Settings
15 | proxy_cache_bypass $cookie_session;
16 | proxy_no_cache $cookie_session;
17 |
18 | # Proxy Header Settings
19 | proxy_set_header Connection $connection_upgrade;
20 | proxy_set_header Early-Data $ssl_early_data;
21 | proxy_set_header Host $host;
22 | proxy_set_header Proxy "";
23 | proxy_set_header Upgrade $http_upgrade;
24 | proxy_set_header X-Forwarded-For $remote_addr;
25 | proxy_set_header X-Forwarded-Host $host;
26 | proxy_set_header X-Forwarded-Method $request_method;
27 | proxy_set_header X-Forwarded-Port $server_port;
28 | proxy_set_header X-Forwarded-Proto $scheme;
29 | proxy_set_header X-Forwarded-Server $host;
30 | proxy_set_header X-Forwarded-Ssl on;
31 | proxy_set_header X-Forwarded-Uri $request_uri;
32 | proxy_set_header X-Original-Method $request_method;
33 | proxy_set_header X-Original-URL $scheme://$http_host$request_uri;
34 | proxy_set_header X-Real-IP $remote_addr;
35 |
--------------------------------------------------------------------------------
/ansible/roles/nginx/files/includes/ssl.conf:
--------------------------------------------------------------------------------
1 | ssl_session_timeout 1d;
2 | ssl_session_cache shared:sslcache:10m; # about 40000 sessions
3 | ssl_session_tickets off;
4 |
5 | ssl_dhparam dhparams.pem;
6 |
7 | # intermediate configuration
8 | ssl_protocols TLSv1.2 TLSv1.3;
9 | ssl_ciphers ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-RSA-CHACHA20-POLY1305;
10 | ssl_prefer_server_ciphers off;
11 | more_set_headers "Strict-Transport-Security: max-age=2592000";
12 |
13 | # OCSP stapling
14 | ssl_stapling on;
15 | ssl_stapling_verify on;
16 |
--------------------------------------------------------------------------------
/ansible/roles/nginx/files/nginx-https-redirect.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80;
3 | listen [::]:80;
4 |
5 | server_name _;
6 | access_log off;
7 |
8 | location ^~ /.well-known/acme-challenge/ {
9 | default_type "text/plain";
10 | root {{ certbot_webroot }};
11 | }
12 |
13 | location / {
14 | return 308 https://$host$request_uri;
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/ansible/roles/nginx/files/nginx.conf:
--------------------------------------------------------------------------------
1 | worker_processes auto;
2 |
3 | error_log /var/log/nginx/error.log;
4 |
5 | load_module /usr/lib/nginx/modules/ngx_http_brotli_filter_module.so;
6 | load_module /usr/lib/nginx/modules/ngx_http_headers_more_filter_module.so;
7 | load_module /usr/lib/nginx/modules/ngx_stream_module.so;
8 |
9 | pcre_jit on;
10 |
11 | events {
12 | worker_connections 1024;
13 | }
14 |
15 | http {
16 | include /etc/nginx/mime.types;
17 | default_type application/octet-stream;
18 |
19 | server_tokens off;
20 | more_clear_headers "Server";
21 |
22 | add_header Permissions-Policy "interest-cohort=()";
23 |
24 | types_hash_max_size 2048;
25 | types_hash_bucket_size 128;
26 |
27 | include includes/proxy.conf;
28 |
29 | # Helper variable for proxying websockets.
30 | map $http_upgrade $connection_upgrade {
31 | default upgrade;
32 | '' close;
33 | }
34 |
35 | log_format main '$remote_addr - $remote_user [$time_local] "$request" '
36 | '$status $body_bytes_sent "$http_referer" '
37 | '"$http_user_agent" "$http_x_forwarded_for"';
38 |
39 | access_log /var/log/nginx/access.log main;
40 |
41 | sendfile on;
42 | tcp_nopush on;
43 |
44 | # Gzip basically everything if we can
45 | gzip on;
46 | gzip_vary on;
47 | gzip_types *;
48 | gzip_proxied any;
49 | gzip_comp_level 3;
50 | gzip_min_length 1024;
51 |
52 | brotli on;
53 | brotli_types *;
54 | brotli_comp_level 7;
55 | brotli_min_length 1024;
56 |
57 | keepalive_timeout 65;
58 |
59 | include /etc/nginx/http.d/*.conf;
60 | }
61 |
62 | stream {
63 | ssl_preread on;
64 |
65 | include /etc/nginx/stream.d/*.conf;
66 | }
67 |
--------------------------------------------------------------------------------
/ansible/roles/nginx/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: reload nginx
2 | service:
3 | name: nginx
4 | state: reloaded
5 |
--------------------------------------------------------------------------------
/ansible/roles/nginx/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Install nginx
2 | package:
3 | name: nginx
4 |
5 | - name: Install nginx modules
6 | package:
7 | name: "{{ item }}"
8 | loop:
9 | - libnginx-mod-http-headers-more-filter
10 | - libnginx-mod-http-brotli-filter
11 | - libnginx-mod-stream
12 | when: ansible_os_family != 'Archlinux'
13 |
14 | - name: Install nginx modules (on Arch)
15 | kewlfft.aur.aur:
16 | name: "{{ item }}"
17 | loop:
18 | - nginx-mod-headers-more
19 | - nginx-mod-brotli
20 | - nginx-mod-stream
21 | when: ansible_os_family == 'Archlinux'
22 |
23 | - name: Generate Diffie-Hellman parameters
24 | community.crypto.openssl_dhparam:
25 | path: /etc/nginx/dhparams.pem
26 |
27 | - name: Create config directories
28 | file:
29 | path: /etc/nginx/{{ item }}
30 | state: directory
31 | mode: "0755"
32 | loop:
33 | - http.d
34 | - stream.d
35 | - includes
36 |
37 | - name: Copy config files
38 | template:
39 | src: "{{ item }}"
40 | dest: /etc/nginx/includes/{{ item | basename }}
41 | mode: "0644"
42 | with_fileglob: files/includes/*.conf
43 | notify: reload nginx
44 |
45 | - name: Install config
46 | template:
47 | src: files/nginx.conf
48 | dest: /etc/nginx/nginx.conf
49 | mode: "0644"
50 | notify: reload nginx
51 |
52 | - name: Install HTTPS redirect
53 | template:
54 | src: files/nginx-https-redirect.conf
55 | dest: /etc/nginx/http.d/https-redirect.conf
56 | mode: "0644"
57 | notify: reload nginx
58 | when: nginx_https_redirect
59 |
--------------------------------------------------------------------------------
/ansible/roles/ntfy/files/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | ntfy:
3 | image: binwiederhier/ntfy:latest
4 | command: serve
5 | user: "{{ docker_user.id }}"
6 | environment:
7 | - TZ={{ timezone }}
8 | - NTFY_BASE_URL=https://ntfy.jakehoward.tech
9 | - NTFY_AUTH_FILE=/etc/ntfy/auth.db
10 | - NTFY_CACHE_FILE=/etc/ntfy/cache.db
11 | - NTFY_AUTH_DEFAULT_ACCESS=deny-all
12 | - NTFY_CACHE_DURATION=24h
13 | - NTFY_ATTACHMENT_CACHE_DIR=/etc/ntfy/attachments
14 | - NTFY_ATTACHMENT_EXPIRY_DURATION=24h
15 | - NTFY_WEB_PUSH_PUBLIC_KEY={{ vault_ntfy_web_push_public_key }}
16 | - NTFY_WEB_PUSH_PRIVATE_KEY={{ vault_ntfy_web_push_private_key }}
17 | - NTFY_WEB_PUSH_FILE=/etc/ntfy/webpush.db
18 | - NTFY_WEB_PUSH_EMAIL_ADDRESS={{ vault_ntfy_web_push_email }}
19 | restart: unless-stopped
20 | volumes:
21 | - "{{ app_data_dir }}/ntfy:/etc/ntfy"
22 | labels:
23 | - traefik.enable=true
24 | - traefik.http.routers.ntfy.rule=Host(`ntfy.jakehoward.tech`)
25 | tmpfs:
26 | - /var/cache/ntfy
27 | - /tmp
28 | networks:
29 | - default
30 | - traefik
31 |
32 | networks:
33 | traefik:
34 | external: true
35 |
--------------------------------------------------------------------------------
/ansible/roles/ntfy/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart ntfy
2 | shell:
3 | chdir: /opt/ntfy
4 | cmd: "{{ docker_update_command }}"
5 |
--------------------------------------------------------------------------------
/ansible/roles/ntfy/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Include vault
2 | include_vars: vault.yml
3 |
4 | - name: Create install directory
5 | file:
6 | path: /opt/ntfy
7 | state: directory
8 | owner: "{{ docker_user.name }}"
9 | mode: "{{ docker_compose_directory_mask }}"
10 |
11 | - name: Install compose file
12 | template:
13 | src: files/docker-compose.yml
14 | dest: /opt/ntfy/docker-compose.yml
15 | mode: "{{ docker_compose_file_mask }}"
16 | owner: "{{ docker_user.name }}"
17 | validate: docker-compose -f %s config
18 | notify: restart ntfy
19 |
--------------------------------------------------------------------------------
/ansible/roles/ntfy/vars/vault.yml:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 34623831653665313137333830663439373661363232373363363061346561393963643165313037
3 | 6561363436623761363564336564646266363062306135660a663235333338646430663263363732
4 | 61343565366365353435623032383933383162623037343833313539363666333666313338376635
5 | 6661363766613734610a666233396533353464666439346237326237316633633862323365336335
6 | 37633263386336623236396362663438663930636132313561353639343035643731633037363062
7 | 31323962633562616636326338353639306131343366343339666131373632616266313435313131
8 | 61663438656363633064653738393765633436313365633766376266626438353535303336616235
9 | 62333430316661393830646166383839383431313761613633366536336564363266623433336631
10 | 61376263663234333530333339333930396361326466653639393930633962316362643031656666
11 | 33633633366461323861333434316466623736343030396163323166313233373339336463383362
12 | 35613962393965636332343763313534366339646133636238626265393334643233346537376564
13 | 30393934323566383232333066633839316434306430323063323336346633346261313032646336
14 | 66373233356465646338313966386631376466323834353235663034656335373730373463333431
15 | 63366438393736343233623837383963663664303332396438373462633330323664656464363037
16 | 65333331616366316330313330643765383437666164376435383737346437656433643366383835
17 | 65346531346639623936643936373933306664656231626432343733393434303630363232333730
18 | 3530
19 |
--------------------------------------------------------------------------------
/ansible/roles/paccache/files/paccache.hook:
--------------------------------------------------------------------------------
1 | [Trigger]
2 | Operation = Upgrade
3 | Operation = Install
4 | Operation = Remove
5 | Type = Package
6 | Target = *
7 |
8 | [Action]
9 | Description = Cleaning pacman cache...
10 | When = PostTransaction
11 | Exec = /usr/bin/paccache --remove --keep 2
12 |
--------------------------------------------------------------------------------
/ansible/roles/paccache/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Install Pacman utils
2 | package:
3 | name: pacman-contrib
4 |
5 | - name: Create hooks directory
6 | file:
7 | path: /etc/pacman.d/hooks/
8 | state: directory
9 | mode: "0755"
10 |
11 | - name: Install pacman hook
12 | template:
13 | src: files/paccache.hook
14 | dest: /etc/pacman.d/hooks/clean_package_cache.hook
15 | mode: "0644"
16 |
--------------------------------------------------------------------------------
/ansible/roles/plausible/files/clickhouse-config.xml:
--------------------------------------------------------------------------------
1 |
2 | ::
3 | 0.0.0.0
4 | 1
5 |
6 | warning
7 | true
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
--------------------------------------------------------------------------------
/ansible/roles/plausible/files/clickhouse-user-config.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 0
5 | 0
6 |
7 |
8 |
9 |
--------------------------------------------------------------------------------
/ansible/roles/plausible/files/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | plausible:
3 | image: ghcr.io/plausible/community-edition:v3.0
4 | restart: unless-stopped
5 | command: sh -c "/entrypoint.sh db migrate && /entrypoint.sh run"
6 | depends_on:
7 | - db
8 | - clickhouse
9 | networks:
10 | - default
11 | - coredns
12 | environment:
13 | - SECRET_KEY_BASE={{ vault_plausible_secret_key }}
14 | - SIGNING_SALT={{ vault_plausible_signing_salt }}
15 | - TOTP_VAULT_KEY={{ vault_plausible_totp_vault_key }}
16 | - DATABASE_URL=postgres://plausible:plausible@db:5432/plausible
17 | - DISABLE_REGISTRATION=true
18 | - DISABLE_SUBSCRIPTION=true
19 | - CLICKHOUSE_DATABASE_URL=http://clickhouse:8123/plausible
20 | - BASE_URL=https://plausible.theorangeone.net
21 | - GOOGLE_CLIENT_ID={{ vault_plausible_google_client_id }}
22 | - GOOGLE_CLIENT_SECRET={{ vault_plausible_google_client_secret }}
23 | - RELEASE_DISTRIBUTION=none
24 | - MAILER_EMAIL={{ vault_plausible_from_email }}
25 | - SMTP_HOST_ADDR=smtp.eu.mailgun.org
26 | - SMTP_HOST_PORT=465
27 | - SMTP_USER_NAME={{ vault_plausible_smtp_user }}
28 | - SMTP_USER_PWD={{ vault_plausible_smtp_password }}
29 | - SMTP_HOST_SSL_ENABLED=true
30 |
31 | clickhouse:
32 | image: clickhouse/clickhouse-server:24.12-alpine
33 | restart: unless-stopped
34 | environment:
35 | - CLICKHOUSE_SKIP_USER_SETUP=1
36 | volumes:
37 | - ./clickhouse:/var/lib/clickhouse
38 | - ./docker_related_config.xml:/etc/clickhouse-server/config.d/docker_related_config.xml:ro
39 | - ./docker_related_user_config.xml:/etc/clickhouse-server/users.d/docker_related_user_config.xml:ro
40 | tmpfs:
41 | - /var/log/clickhouse-server
42 | ulimits:
43 | nofile:
44 | soft: 262144
45 | hard: 262144
46 |
47 | db:
48 | image: postgres:14-alpine
49 | restart: unless-stopped
50 | volumes:
51 | - ./postgres:/var/lib/postgresql/data
52 | environment:
53 | - POSTGRES_PASSWORD=plausible
54 | - POSTGRES_USER=plausible
55 |
56 | networks:
57 | coredns:
58 | external: true
59 |
--------------------------------------------------------------------------------
/ansible/roles/plausible/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart plausible
2 | shell:
3 | chdir: /opt/plausible
4 | cmd: "{{ docker_update_command }}"
5 |
--------------------------------------------------------------------------------
/ansible/roles/plausible/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Include vault
2 | include_vars: vault.yml
3 |
4 | - name: Create install directory
5 | file:
6 | path: /opt/plausible
7 | state: directory
8 | owner: "{{ docker_user.name }}"
9 | mode: "{{ docker_compose_directory_mask }}"
10 |
11 | - name: Install clickhouse config
12 | template:
13 | src: files/clickhouse-config.xml
14 | dest: /opt/plausible/docker_related_config.xml
15 | mode: "0644"
16 | notify: restart plausible
17 |
18 | - name: Install clickhouse user config
19 | template:
20 | src: files/clickhouse-user-config.xml
21 | dest: /opt/plausible/docker_related_user_config.xml
22 | mode: "0644"
23 | notify: restart plausible
24 |
25 | - name: Install compose file
26 | template:
27 | src: files/docker-compose.yml
28 | dest: /opt/plausible/docker-compose.yml
29 | mode: "{{ docker_compose_file_mask }}"
30 | owner: "{{ docker_user.name }}"
31 | validate: docker-compose -f %s config
32 | notify: restart plausible
33 |
34 | - name: Install nginx config
35 | template:
36 | src: files/nginx-docker.conf
37 | dest: /etc/nginx/http.d/plausible.conf
38 | mode: "0644"
39 | notify: reload nginx
40 | vars:
41 | server_name: plausible.theorangeone.net elbisualp.theorangeone.net
42 | upstream: plausible-plausible-1.docker:8000
43 | ssl_cert_path: /etc/letsencrypt/live/plausible.theorangeone.net
44 | location_extra: |
45 | rewrite ^/js/index.js$ /js/plausible.js last;
46 |
--------------------------------------------------------------------------------
/ansible/roles/pocket_id/files/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | pocket-id:
3 | image: ghcr.io/pocket-id/pocket-id
4 | restart: unless-stopped
5 | user: "{{ docker_user.id }}"
6 | environment:
7 | - PUBLIC_APP_URL=https://auth.jakehoward.tech
8 | - TRUST_PROXY=true
9 | - DB_PROVIDER=postgres
10 | - DB_CONNECTION_STRING=postgres://pocketid:{{ vault_pocket_id_db_password }}@db/pocketid
11 | - UPDATE_CHECK_DISABLED=true
12 | - PUBLIC_UI_CONFIG_DISABLED=true
13 | - BACKGROUND_IMAGE_TYPE=png
14 | - APP_NAME=Orange ID
15 | - SESSION_DURATION=30
16 | - SMTP_HOST=smtp.eu.mailgun.org
17 | - SMTP_PORT=465
18 | - SMTP_FROM={{ vault_pocket_id_from_address }}
19 | - SMTP_USER={{ vault_pocket_id_smtp_user }}
20 | - SMTP_PASSWORD={{ vault_pocket_id_smtp_password }}
21 | - SMTP_TLS=tls
22 | - EMAIL_LOGIN_NOTIFICATION_ENABLED=true
23 | volumes:
24 | - "{{ app_data_dir }}/pocket-id:/app/backend/data"
25 | labels:
26 | - traefik.enable=true
27 | - traefik.http.routers.pocket-id.rule=Host(`auth.jakehoward.tech`)
28 | - traefik.http.middlewares.pocket-id-ratelimit.ratelimit.average=5
29 | - traefik.http.middlewares.pocket-id-ratelimit.ratelimit.burst=200
30 | - traefik.http.routers.pocket-id.middlewares=pocket-id-ratelimit
31 | depends_on:
32 | - db
33 | networks:
34 | - default
35 | - traefik
36 |
37 | db:
38 | image: postgres:15-alpine
39 | restart: unless-stopped
40 | volumes:
41 | - /mnt/speed/dbs/postgres/pocket-id:/var/lib/postgresql/data
42 | environment:
43 | - POSTGRES_PASSWORD={{ vault_pocket_id_db_password }}
44 | - POSTGRES_USER=pocketid
45 |
46 | networks:
47 | traefik:
48 | external: true
49 |
--------------------------------------------------------------------------------
/ansible/roles/pocket_id/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart pocket-id
2 | shell:
3 | chdir: /opt/pocket-id
4 | cmd: "{{ docker_update_command }}"
5 |
--------------------------------------------------------------------------------
/ansible/roles/pocket_id/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Include vault
2 | include_vars: vault.yml
3 |
4 | - name: Create install directory
5 | file:
6 | path: /opt/pocket-id
7 | state: directory
8 | owner: "{{ docker_user.name }}"
9 | mode: "{{ docker_compose_directory_mask }}"
10 |
11 | - name: Install compose file
12 | template:
13 | src: files/docker-compose.yml
14 | dest: /opt/pocket-id/docker-compose.yml
15 | mode: "{{ docker_compose_file_mask }}"
16 | owner: "{{ docker_user.name }}"
17 | validate: docker-compose -f %s config
18 | notify: restart pocket-id
19 |
--------------------------------------------------------------------------------
/ansible/roles/pocket_id/vars/vault.yml:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 61613231353938346635326165303531326232393334313261366561326633323836366334663634
3 | 3563306334306632383964643634653166373964666335360a363338323236323461316634333161
4 | 38303037373861326263353366653034646162653331616265313865613964666133326334666666
5 | 3038633037313437370a353865346537323135313632303439373535303366383437633135386333
6 | 65393434633037393535373331366265386663313334333664636565313331353332393530393231
7 | 64646135356265653662333637643461636539306138386263383062396666363264386535653438
8 | 30653634393337366363646364613032666631346662333435313931356639643962316666343939
9 | 36383563373733353437386262326638666533653232636363363636376131313661396136663261
10 | 30373732616665353137623561346666616361376563323764346536623734633737643736653238
11 | 62633663373863653663383635373537653337376338656433626163393666396139363038666461
12 | 37316537383566373239393831353632393135303831353866373339323831663038333733356333
13 | 30663137653539363162386362303266396365623936386335303536386239323739383436373462
14 | 34336566316332396166633735396532363238386531396361656666323233393763663335333038
15 | 65333939343066376365633138383364376136303431343333353835656332626563646530343766
16 | 64366466303038373661616136636530383366373365323062383836336530373035323466306135
17 | 34366438613438323133356138383566663165323837666435353435333539333862366630306132
18 | 66386265636139653739386630656566326131613231373639656264643232373134
19 |
--------------------------------------------------------------------------------
/ansible/roles/privatebin/files/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | privatebin:
3 | image: privatebin/nginx-fpm-alpine:latest
4 | environment:
5 | - TZ={{ timezone }}
6 | volumes:
7 | - "{{ app_data_dir }}/privatebin/:/srv/data"
8 | - "{{ app_data_dir }}/privatebin/conf.php:/srv/cfg/conf.php:ro"
9 | restart: unless-stopped
10 | labels:
11 | - traefik.enable=true
12 | - traefik.http.routers.privatebin.rule=Host(`bin.theorangeone.net`)
13 | networks:
14 | - default
15 | - traefik
16 |
17 | networks:
18 | traefik:
19 | external: true
20 |
--------------------------------------------------------------------------------
/ansible/roles/privatebin/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart privatebin
2 | shell:
3 | chdir: /opt/privatebin
4 | cmd: "{{ docker_update_command }}"
5 |
--------------------------------------------------------------------------------
/ansible/roles/privatebin/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Create install directory
2 | file:
3 | path: /opt/privatebin
4 | state: directory
5 | owner: "{{ docker_user.name }}"
6 | mode: "{{ docker_compose_directory_mask }}"
7 |
8 | - name: Install compose file
9 | template:
10 | src: files/docker-compose.yml
11 | dest: /opt/privatebin/docker-compose.yml
12 | mode: "{{ docker_compose_file_mask }}"
13 | owner: "{{ docker_user.name }}"
14 | validate: docker-compose -f %s config
15 | notify: restart privatebin
16 |
17 | - name: Install config file
18 | template:
19 | src: files/config.ini
20 | dest: "{{ app_data_dir }}/privatebin/conf.php" # Yes, really
21 | mode: "{{ docker_compose_file_mask }}"
22 | owner: "{{ docker_user.name }}"
23 | notify: restart privatebin
24 |
--------------------------------------------------------------------------------
/ansible/roles/prometheus/files/grafana/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | grafana:
3 | image: grafana/grafana:latest
4 | environment:
5 | - TZ={{ timezone }}
6 | - GF_DATABASE_URL=postgres://grafana:grafana@db/grafana
7 | - GF_DATABASE_TYPE=postgres
8 | - GF_RENDERING_SERVER_URL=http://renderer:8081/render
9 | - GF_RENDERING_CALLBACK_URL=http://grafana:3000/
10 | - GF_SERVER_ROOT_URL=https://grafana.jakehoward.tech
11 | - GF_SERVER_ENABLE_GZIP=true
12 | - GF_DEFAULT_FORCE_MIGRATION=true
13 | - GF_FEATURE_TOGGLES_ENABLE=publicDashboards
14 |
15 | - GF_ANALYTICS_ENABLED=false
16 | - GF_ANALYTICS_REPORTING_ENABLED=false
17 | - GF_ANALYTICS_CHECK_FOR_UPDATES=false
18 | - GF_ANALYTICS_CHECK_FOR_PLUGIN_UPDATES=false
19 |
20 | - GF_SMTP_ENABLED=true
21 | - GF_SMTP_HOST=smtp.eu.mailgun.org:465
22 | - GF_SMTP_USER={{ vault_grafana_smtp_user }}
23 | - GF_SMTP_PASSWORD={{ vault_grafana_smtp_password }}
24 | - GF_SMTP_FROM_ADDRESS={{ vault_grafana_from_email }}
25 | - GF_SMTP_FROM_NAME=grafana
26 | volumes:
27 | - "{{ app_data_dir }}/grafana:/var/lib/grafana"
28 | networks:
29 | - default
30 | - grafana
31 | restart: unless-stopped
32 | ports:
33 | - "{{ pve_hosts.forrest.ip }}:3000:3000"
34 | depends_on:
35 | - db
36 | - renderer
37 |
38 | db:
39 | image: postgres:14-alpine
40 | restart: unless-stopped
41 | volumes:
42 | - /mnt/speed/dbs/postgres/grafana/:/var/lib/postgresql/data
43 | environment:
44 | - POSTGRES_PASSWORD=grafana
45 | - POSTGRES_USER=grafana
46 |
47 | renderer:
48 | image: grafana/grafana-image-renderer:latest
49 | restart: unless-stopped
50 | environment:
51 | - BROWSER_TZ={{ timezone }}
52 |
53 |
54 | networks:
55 | grafana:
56 | external: true
57 |
--------------------------------------------------------------------------------
/ansible/roles/prometheus/files/prometheus/alert-rules.d/blackbox.yml:
--------------------------------------------------------------------------------
1 | groups:
2 | - name: blackbox
3 | rules:
4 | - alert: HttpSuccess
5 | expr: probe_success{job="blackbox_http_external"} == 0
6 | for: 5m
7 |
--------------------------------------------------------------------------------
/ansible/roles/prometheus/files/prometheus/alertmanager.yml:
--------------------------------------------------------------------------------
1 | global:
2 | resolve_timeout: 3m
3 | smtp_smarthost: smtp.eu.mailgun.org:465
4 | smtp_from: "{{ vault_alertmanager_from_address }}"
5 | smtp_auth_username: "{{ vault_alertmanager_from_address }}"
6 | smtp_auth_password: "{{ vault_alertmanager_smtp_password }}"
7 |
8 | route:
9 | receiver: default
10 |
11 | receivers:
12 | - name: default
13 | email_configs:
14 | - to: "{{ vault_alertmanager_to_address }}"
15 | send_resolved: true
16 |
--------------------------------------------------------------------------------
/ansible/roles/prometheus/files/prometheus/blackbox.yml:
--------------------------------------------------------------------------------
1 | modules:
2 | http:
3 | prober: http
4 | timeout: 10s
5 |
6 | https_redir:
7 | prober: http
8 | timeout: 10s
9 | http:
10 | method: GET
11 | valid_status_codes: [301, 302, 307, 308]
12 | follow_redirects: false
13 | fail_if_ssl: true
14 | fail_if_header_not_matches:
15 | - header: Location
16 | regexp: ^https
17 |
18 | icmp:
19 | prober: icmp
20 |
--------------------------------------------------------------------------------
/ansible/roles/prometheus/files/prometheus/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | prometheus:
3 | image: prom/prometheus:latest
4 | restart: unless-stopped
5 | user: "{{ docker_user.id }}"
6 | volumes:
7 | - ./prometheus.yml:/etc/prometheus/prometheus.yml:ro
8 | - ./alert-rules.d:/etc/prometheus/alert-rules.d:ro
9 | - /mnt/speed/dbs/prometheus/forrest/:/prometheus/
10 | networks:
11 | - default
12 | - grafana
13 | ports:
14 | - "{{ pve_hosts.forrest.ip }}:9090:9090"
15 |
16 | blackbox:
17 | image: prom/blackbox-exporter:latest
18 | restart: unless-stopped
19 | user: "{{ docker_user.id }}"
20 | volumes:
21 | - ./blackbox.yml:/etc/blackbox_exporter/config.yml:ro
22 |
23 | alertmanager:
24 | image: prom/alertmanager:latest
25 | restart: unless-stopped
26 | volumes:
27 | - ./alertmanager.yml:/etc/alertmanager/alertmanager.yml:ro
28 |
29 | proxmox_exporter:
30 | image: prompve/prometheus-pve-exporter:latest
31 | restart: unless-stopped
32 | user: "{{ docker_user.id }}"
33 | environment:
34 | - PVE_USER=prometheus@pve
35 | - PVE_TOKEN_NAME=prometheus
36 | - PVE_TOKEN_VALUE={{ vault_prometheus_api_token }}
37 | - PVE_VERIFY_SSL=false
38 |
39 | speedtest_exporter:
40 | image: jraviles/prometheus_speedtest:latest
41 | restart: unless-stopped
42 | user: "{{ docker_user.id }}"
43 |
44 | networks:
45 | grafana:
46 | external: true
47 | default:
48 | enable_ipv6: true
49 |
--------------------------------------------------------------------------------
/ansible/roles/prometheus/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart grafana
2 | shell:
3 | chdir: /opt/grafana
4 | cmd: "{{ docker_update_command }}"
5 |
6 | - name: restart prometheus
7 | shell:
8 | chdir: /opt/prometheus
9 | cmd: "{{ docker_update_command }}"
10 |
11 | - name: reload prometheus
12 | shell:
13 | chdir: /opt/prometheus
14 | cmd: docker-compose exec prometheus kill -HUP 1
15 |
--------------------------------------------------------------------------------
/ansible/roles/prometheus/tasks/grafana.yml:
--------------------------------------------------------------------------------
1 | - name: Create network
2 | docker_network:
3 | name: grafana
4 |
5 | - name: Create grafana install directory
6 | file:
7 | path: /opt/grafana
8 | state: directory
9 | owner: "{{ docker_user.name }}"
10 | mode: "{{ docker_compose_directory_mask }}"
11 |
12 | - name: Install grafana compose file
13 | template:
14 | src: files/grafana/docker-compose.yml
15 | dest: /opt/grafana/docker-compose.yml
16 | mode: "{{ docker_compose_file_mask }}"
17 | owner: "{{ docker_user.name }}"
18 | validate: docker-compose -f %s config
19 | notify: restart grafana
20 |
--------------------------------------------------------------------------------
/ansible/roles/prometheus/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Include vault
2 | include_vars: vault.yml
3 |
4 | - name: Grafana
5 | include_tasks: grafana.yml
6 |
7 | - name: Prometheus
8 | include_tasks: prometheus.yml
9 |
10 | - name: Get routes
11 | command:
12 | argv:
13 | - ip
14 | - -6
15 | - route
16 | - show
17 | - "{{ vps_hosts.private_ipv6_range }}"
18 | register: routes
19 | changed_when: false
20 |
21 | - name: Add route to private services via ingress
22 | command:
23 | argv:
24 | - ip
25 | - -6
26 | - route
27 | - add
28 | - "{{ vps_hosts.private_ipv6_range }}"
29 | - via
30 | - "{{ pve_hosts.ingress.ipv6 }}"
31 | - dev
32 | - eth0
33 | when: vps_hosts.private_ipv6_marker not in routes.stdout
34 |
--------------------------------------------------------------------------------
/ansible/roles/prometheus/tasks/prometheus.yml:
--------------------------------------------------------------------------------
1 | - name: Create prometheus install directory
2 | file:
3 | path: /opt/prometheus
4 | state: directory
5 | owner: "{{ docker_user.name }}"
6 | mode: "{{ docker_compose_directory_mask }}"
7 |
8 | - name: Install prometheus config
9 | template:
10 | src: files/prometheus/prometheus.yml
11 | dest: /opt/prometheus/prometheus.yml
12 | mode: "{{ docker_compose_file_mask }}"
13 | owner: "{{ docker_user.name }}"
14 | notify: reload prometheus
15 |
16 | - name: Install prometheus compose file
17 | template:
18 | src: files/prometheus/docker-compose.yml
19 | dest: /opt/prometheus/docker-compose.yml
20 | mode: "{{ docker_compose_file_mask }}"
21 | owner: "{{ docker_user.name }}"
22 | validate: docker-compose -f %s config
23 | notify: restart prometheus
24 |
25 | - name: Install blackbox config
26 | template:
27 | src: files/prometheus/blackbox.yml
28 | dest: /opt/prometheus/blackbox.yml
29 | mode: "{{ docker_compose_file_mask }}"
30 | owner: "{{ docker_user.name }}"
31 | notify: restart prometheus
32 |
33 | - name: Install alertmanager config
34 | template:
35 | src: files/prometheus/alertmanager.yml
36 | dest: /opt/prometheus/alertmanager.yml
37 | mode: "{{ docker_compose_file_mask }}"
38 | owner: "{{ docker_user.name }}"
39 | notify: restart prometheus
40 |
41 | - name: Install prometheus alert rules
42 | copy:
43 | src: files/prometheus/alert-rules.d/
44 | dest: /opt/prometheus/alert-rules.d/
45 | mode: "{{ docker_compose_file_mask }}"
46 | owner: "{{ docker_user.name }}"
47 | notify: reload prometheus
48 |
--------------------------------------------------------------------------------
/ansible/roles/pve_docker/files/calibre/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | calibre:
3 | image: lscr.io/linuxserver/calibre-web:latest
4 | environment:
5 | - PUID={{ docker_user.id }}
6 | - PGID={{ docker_user.id }}
7 | - TZ={{ timezone }}
8 | restart: unless-stopped
9 | volumes:
10 | - /mnt/tank/app-data/calibre:/config
11 | - /mnt/tank/files/ebooks:/books:ro
12 | labels:
13 | - traefik.enable=true
14 | - traefik.http.routers.calibre.rule=Host(`calibre.jakehoward.tech`)
15 | networks:
16 | - default
17 | - traefik
18 |
19 | networks:
20 | traefik:
21 | external: true
22 |
--------------------------------------------------------------------------------
/ansible/roles/pve_docker/files/librespeed/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | librespeed:
3 | image: lscr.io/linuxserver/librespeed:latest
4 | environment:
5 | - PUID={{ docker_user.id }}
6 | - PGID={{ docker_user.id }}
7 | - TZ={{ timezone }}
8 | ports:
9 | - 33377:80
10 | restart: unless-stopped
11 | labels:
12 | - traefik.enable=true
13 | - traefik.http.routers.librespeed.rule=Host(`speed.jakehoward.tech`)
14 | - traefik.http.routers.librespeed.middlewares=librespeed-auth@docker
15 | - traefik.http.middlewares.librespeed-auth.basicauth.users={{ librespeed_basicauth }}
16 | networks:
17 | - default
18 | - traefik
19 |
20 | networks:
21 | traefik:
22 | external: true
23 |
--------------------------------------------------------------------------------
/ansible/roles/pve_docker/files/nextcloud/config.php:
--------------------------------------------------------------------------------
1 | '\\OC\\Memcache\\APCu',
4 | 'memcache.locking' => '\\OC\\Memcache\\Redis',
5 | 'memcache.distributed' => '\\OC\\Memcache\\Redis',
6 | 'filelocking.enabled' => true,
7 | 'redis' =>
8 | array (
9 | 'host' => 'redis',
10 | 'port' => 6379,
11 | 'timeout' => 0.0,
12 | ),
13 | 'datadirectory' => '/data',
14 | 'instanceid' => '{{ nextcloud.instance_id }}',
15 | 'passwordsalt' => '{{ nextcloud.passwordsalt }}',
16 | 'secret' => '{{ nextcloud.secret }}',
17 | 'trusted_domains' =>
18 | array (
19 | 0 => 'intersect.jakehoward.tech',
20 | ),
21 | 'dbtype' => 'mysql',
22 | 'version' => '31.0.5.1',
23 | 'overwrite.cli.url' => 'https://intersect.jakehoward.tech',
24 | 'dbname' => 'nextcloud',
25 | 'dbhost' => 'mariadb',
26 | 'dbport' => '3306',
27 | 'dbtableprefix' => 'oc_',
28 | 'mysql.utf8mb4' => true,
29 | 'dbuser' => 'nextcloud',
30 | 'dbpassword' => 'nextcloud',
31 | 'installed' => true,
32 | 'trusted_proxies' =>
33 | array (
34 | 0 => '172.17.0.1',
35 | ),
36 | 'maintenance' => false,
37 | 'theme' => '',
38 | 'loglevel' => 0,
39 | 'preview_max_x' => '2048',
40 | 'preview_max_y' => '2048',
41 | 'jpeg_quality' => '60',
42 | 'has_rebuilt_cache' => true,
43 | 'logfile' => '/config/log/nextcloud.log',
44 | 'default_phone_region' => 'GB',
45 | 'mail_smtpmode' => 'smtp',
46 | 'mail_smtpsecure' => 'ssl',
47 | 'mail_sendmailmode' => 'smtp',
48 | 'mail_from_address' => '{{ nextcloud.email_from_user }}',
49 | 'mail_domain' => '{{ nextcloud.email_from_domain }}',
50 | 'mail_smtpauthtype' => 'LOGIN',
51 | 'mail_smtpauth' => 1,
52 | 'mail_smtphost' => 'smtp.eu.mailgun.org',
53 | 'mail_smtpname' => '{{ nextcloud.email_username }}',
54 | 'mail_smtppassword' => '{{ nextcloud.email_password }}',
55 | 'mail_smtpport' => '465',
56 | 'upgrade.disable-web' => true,
57 | # Allow Nextcloud to talk to other services
58 | 'allow_local_remote_servers' => true,
59 | 'maintenance_window_start' => 18,
60 | );
61 |
--------------------------------------------------------------------------------
/ansible/roles/pve_docker/files/nextcloud/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | nextcloud:
3 | image: lscr.io/linuxserver/nextcloud:31.0.5
4 | environment:
5 | - PUID={{ docker_user.id }}
6 | - PGID={{ docker_user.id }}
7 | - TZ={{ timezone }}
8 | - DOCKER_MODS=theorangeone/lsio-mod-more-processes:latest
9 | volumes:
10 | - "{{ app_data_dir }}/nextcloud/apps:/config/www/nextcloud/apps"
11 | - "{{ app_data_dir }}/nextcloud/config:/config/www/nextcloud/config"
12 | - /mnt/tank/files/nextcloud:/data
13 | - /mnt/tank/files:/mnt/files
14 | restart: unless-stopped
15 | depends_on:
16 | - mariadb
17 | - redis
18 | tmpfs:
19 | - /config/log
20 | labels:
21 | - traefik.enable=true
22 | - traefik.http.routers.nextcloud.rule=Host(`intersect.jakehoward.tech`)
23 | - traefik.http.services.nextcloud-nextcloud.loadbalancer.server.port=443
24 | - traefik.http.services.nextcloud-nextcloud.loadbalancer.server.scheme=https
25 | - traefik.http.middlewares.nextcloud-hsts.headers.stsseconds=15552000
26 | - traefik.http.routers.nextcloud.middlewares=nextcloud-hsts@docker
27 | networks:
28 | - default
29 | - traefik
30 |
31 | collabora:
32 | image: collabora/code:latest
33 | restart: unless-stopped
34 | tty: true
35 | environment:
36 | - domain=intersect.jakehoward.tech
37 | labels:
38 | - traefik.enable=true
39 | - traefik.http.routers.collabora.rule=Host(`collabora.jakehoward.tech`)
40 | - traefik.http.services.collabora-nextcloud.loadbalancer.server.scheme=https
41 | networks:
42 | - default
43 | - traefik
44 | tmpfs:
45 | - /tmp
46 |
47 | mariadb:
48 | image: mariadb:10.11
49 | restart: unless-stopped
50 | volumes:
51 | - /mnt/speed/dbs/mariadb/nextcloud:/var/lib/mysql
52 | environment:
53 | - MYSQL_ROOT_PASSWORD=nextcloud
54 | - MYSQL_DATABASE=nextcloud
55 | - MYSQL_USER=nextcloud
56 | - MYSQL_PASSWORD=nextcloud
57 |
58 | redis:
59 | image: redis:7-alpine
60 | restart: unless-stopped
61 | volumes:
62 | - /mnt/speed/dbs/redis/nextcloud:/data
63 |
64 | networks:
65 | traefik:
66 | external: true
67 |
--------------------------------------------------------------------------------
/ansible/roles/pve_docker/files/nextcloud/occ:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e
4 |
5 | exec docker-compose -f /opt/nextcloud/docker-compose.yml exec nextcloud occ "$@"
6 |
--------------------------------------------------------------------------------
/ansible/roles/pve_docker/files/quassel/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | quassel-core:
3 | image: lscr.io/linuxserver/quassel-core:latest
4 | environment:
5 | - PUID={{ docker_user.id }}
6 | - PGID={{ docker_user.id }}
7 | - TZ={{ timezone }}
8 | - DB_BACKEND=PostgreSQL
9 | - DB_PGSQL_USERNAME=quassel
10 | - DB_PGSQL_PASSWORD=quassel
11 | - DB_PGSQL_HOSTNAME=db
12 | - DB_PGSQL_PORT=5432
13 | - AUTH_AUTHENTICATOR=Database
14 | - RUN_OPTS=--config-from-environment
15 | restart: unless-stopped
16 | depends_on:
17 | - db
18 | ports:
19 | - 4242:4242
20 |
21 | db:
22 | image: postgres:14-alpine
23 | restart: unless-stopped
24 | environment:
25 | - POSTGRES_USER=quassel
26 | - POSTGRES_PASSWORD=quassel
27 | volumes:
28 | - /mnt/speed/dbs/postgres/quassel:/var/lib/postgresql/data
29 |
--------------------------------------------------------------------------------
/ansible/roles/pve_docker/files/synapse/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | synapse:
3 | image: ghcr.io/element-hq/synapse:latest
4 | restart: unless-stopped
5 | environment:
6 | - SYNAPSE_CONFIG_PATH=/etc/homeserver.yaml
7 | - SYNAPSE_REPORT_STATS=yes
8 | - UID={{ docker_user.id }}
9 | - GID={{ docker_user.id }}
10 | - HTTP_PROXY={{ pve_hosts.qbittorrent.ip }}:3128
11 | - HTTPS_PROXY={{ pve_hosts.qbittorrent.ip }}:3128
12 | volumes:
13 | - "{{ app_data_dir }}/synapse/homeserver.yaml:/etc/homeserver.yaml"
14 | - "{{ app_data_dir }}/synapse:/data"
15 | depends_on:
16 | - db
17 | labels:
18 | - traefik.enable=true
19 | - traefik.http.routers.synapse.rule=Host(`matrix.jakehoward.tech`) || Host(`matrix.theorangeone.net`)
20 | networks:
21 | - default
22 | - traefik
23 |
24 | db:
25 | image: postgres:14-alpine
26 | restart: unless-stopped
27 | environment:
28 | - POSTGRES_USER=synapse
29 | - POSTGRES_PASSWORD=synapse
30 | - POSTGRES_INITDB_ARGS=--encoding=UTF-8 --lc-collate=C --lc-ctype=C
31 | volumes:
32 | - /mnt/speed/dbs/postgres/synapse:/var/lib/postgresql/data
33 |
34 | redis:
35 | image: redis:7-alpine
36 | restart: unless-stopped
37 | volumes:
38 | - /mnt/speed/dbs/redis/synapse:/data
39 |
40 | admin:
41 | image: awesometechnologies/synapse-admin:latest
42 | restart: unless-stopped
43 | labels:
44 | - traefik.enable=true
45 | - traefik.http.routers.synapse-admin.rule=Host(`synapse-admin.jakehoward.tech`)
46 | networks:
47 | - default
48 | - traefik
49 |
50 | networks:
51 | traefik:
52 | external: true
53 |
--------------------------------------------------------------------------------
/ansible/roles/pve_docker/files/synapse/homeserver.yml:
--------------------------------------------------------------------------------
1 | # Just the keys I need. See https://github.com/matrix-org/synapse/blob/develop/docs/sample_config.yaml
2 |
3 | ## Server ##
4 |
5 | server_name: theorangeone.net
6 |
7 | pid_file: /data/homeserver.pid
8 |
9 | listeners:
10 | - port: 8008
11 | tls: false
12 | type: http
13 | x_forwarded: true
14 |
15 | resources:
16 | - names: [client, federation]
17 | compress: false
18 |
19 | ## TLS ##
20 |
21 | acme:
22 | # ACME support is disabled by default. Set this to `true` and uncomment
23 | # tls_certificate_path and tls_private_key_path above to enable it.
24 | #
25 | enabled: false
26 |
27 | ## Database ##
28 |
29 | database:
30 | # The database engine name
31 | name: psycopg2
32 | # Arguments to pass to the engine
33 | args:
34 | user: synapse
35 | password: synapse
36 | database: synapse
37 | host: db
38 |
39 | ## Logging ##
40 |
41 | log_config: /data/theorangeone.net.log.config
42 |
43 | ## Media Store ##
44 |
45 | media_store_path: /data/media_store
46 |
47 | uploads_path: /data/uploads
48 |
49 | ## Registration ##
50 |
51 | registration_shared_secret: "{{ synapse.registration_shared_secret }}"
52 |
53 | ## Metrics ###
54 |
55 | report_stats: true
56 |
57 | ## API Configuration ##
58 |
59 | macaroon_secret_key: "{{ synapse.macaroon_secret_key }}"
60 |
61 | form_secret: "{{ synapse.form_secret }}"
62 |
63 | ## Signing Keys ##
64 |
65 | signing_key_path: /data/theorangeone.net.signing.key
66 |
67 | key_refresh_interval: 2w
68 |
69 | trusted_key_servers:
70 | - server_name: matrix.org
71 |
72 | suppress_key_server_warning: true
73 |
74 | ## Workers ##
75 |
76 | redis:
77 | enabled: true
78 | host: redis
79 | port: 6379
80 |
--------------------------------------------------------------------------------
/ansible/roles/pve_docker/files/wallabag/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | wallabag:
3 | image: wallabag/wallabag:2.6.12
4 | restart: unless-stopped
5 | environment:
6 | - SYMFONY__ENV__SECRET={{ wallabag_secret }}
7 | - SYMFONY__ENV__DOMAIN_NAME=https://wallabag.jakehoward.tech
8 | - SYMFONY__ENV__FOSUSER_REGISTRATION=false
9 | - POSTGRES_PASSWORD=wallabag
10 | - POSTGRES_USER=wallabag
11 | - SYMFONY__ENV__DATABASE_DRIVER=pdo_pgsql
12 | - SYMFONY__ENV__DATABASE_HOST=db
13 | - SYMFONY__ENV__DATABASE_PORT=5432
14 | - SYMFONY__ENV__DATABASE_NAME=wallabag
15 | - SYMFONY__ENV__DATABASE_USER=wallabag
16 | - SYMFONY__ENV__DATABASE_PASSWORD=wallabag
17 | - POPULATE_DATABASE=true
18 | volumes:
19 | - /mnt/tank/app-data/wallabag/data:/var/www/wallabag/data
20 | - /mnt/tank/app-data/wallabag/images:/var/www/wallabag/images
21 | labels:
22 | - traefik.enable=true
23 | - traefik.http.routers.wallabag.rule=Host(`wallabag.jakehoward.tech`)
24 | depends_on:
25 | - db
26 | - redis
27 | networks:
28 | - default
29 | - traefik
30 |
31 | redis:
32 | image: redis:7-alpine
33 | restart: unless-stopped
34 | volumes:
35 | - /mnt/speed/dbs/redis/wallabag:/data
36 |
37 | db:
38 | image: postgres:14-alpine
39 | restart: unless-stopped
40 | volumes:
41 | - /mnt/speed/dbs/postgres/wallabag/:/var/lib/postgresql/data
42 | environment:
43 | - POSTGRES_PASSWORD=wallabag
44 | - POSTGRES_USER=wallabag
45 |
46 | networks:
47 | traefik:
48 | external: true
49 |
--------------------------------------------------------------------------------
/ansible/roles/pve_docker/files/whoami/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | whoami:
3 | image: traefik/whoami:latest
4 | restart: unless-stopped
5 | labels:
6 | - traefik.enable=true
7 | - traefik.http.routers.whoami.rule=Host(`whoami.theorangeone.net`) || Host(`whoami-cdn.theorangeone.net`) || Host(`who.0rng.one`)
8 |
9 | - traefik.http.routers.whoami-private.rule=Host(`whoami-private.theorangeone.net`)
10 | - traefik.http.routers.whoami-private.middlewares=tailscale-only@file
11 | networks:
12 | - default
13 | - traefik
14 |
15 | networks:
16 | traefik:
17 | external: true
18 |
--------------------------------------------------------------------------------
/ansible/roles/pve_docker/tasks/calibre.yml:
--------------------------------------------------------------------------------
1 | - name: Create calibre directory
2 | file:
3 | path: /opt/calibre
4 | state: directory
5 | owner: "{{ docker_user.name }}"
6 | mode: "{{ docker_compose_directory_mask }}"
7 |
8 | - name: Install calibre compose file
9 | template:
10 | src: files/calibre/docker-compose.yml
11 | dest: /opt/calibre/docker-compose.yml
12 | mode: "{{ docker_compose_file_mask }}"
13 | owner: "{{ docker_user.name }}"
14 | validate: docker-compose -f %s config
15 | register: compose_file
16 |
17 | - name: restart calibre
18 | shell:
19 | chdir: /opt/calibre
20 | cmd: "{{ docker_update_command }}"
21 | when: compose_file.changed
22 |
--------------------------------------------------------------------------------
/ansible/roles/pve_docker/tasks/librespeed.yml:
--------------------------------------------------------------------------------
1 | - name: Include librespeed variables
2 | include_vars: librespeed.yml
3 |
4 | - name: Create librespeed directory
5 | file:
6 | path: /opt/librespeed
7 | state: directory
8 | owner: "{{ docker_user.name }}"
9 | mode: "{{ docker_compose_directory_mask }}"
10 |
11 | - name: Install librespeed compose file
12 | template:
13 | src: files/librespeed/docker-compose.yml
14 | dest: /opt/librespeed/docker-compose.yml
15 | mode: "{{ docker_compose_file_mask }}"
16 | owner: "{{ docker_user.name }}"
17 | validate: docker-compose -f %s config
18 | register: compose_file
19 |
20 | - name: restart librespeed
21 | shell:
22 | chdir: /opt/librespeed
23 | cmd: "{{ docker_update_command }}"
24 | when: compose_file.changed
25 |
--------------------------------------------------------------------------------
/ansible/roles/pve_docker/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Install calibre
2 | include_tasks: calibre.yml
3 |
4 | - name: Install librespeed
5 | include_tasks: librespeed.yml
6 |
7 | - name: Install nextcloud
8 | include_tasks: nextcloud.yml
9 |
10 | - name: Install quassel
11 | include_tasks: quassel.yml
12 |
13 | - name: Install synapse
14 | include_tasks: synapse.yml
15 |
16 | - name: Install wallabag
17 | include_tasks: wallabag.yml
18 |
19 | - name: Install whoami
20 | include_tasks: whoami.yml
21 |
--------------------------------------------------------------------------------
/ansible/roles/pve_docker/tasks/nextcloud.yml:
--------------------------------------------------------------------------------
1 | - name: Include nextcloud variables
2 | include_vars: nextcloud.yml
3 |
4 | - name: Create nextcloud directory
5 | file:
6 | path: /opt/nextcloud
7 | state: directory
8 | owner: "{{ docker_user.name }}"
9 | mode: "{{ docker_compose_directory_mask }}"
10 |
11 | - name: Install nextcloud compose file
12 | template:
13 | src: files/nextcloud/docker-compose.yml
14 | dest: /opt/nextcloud/docker-compose.yml
15 | mode: "{{ docker_compose_file_mask }}"
16 | owner: "{{ docker_user.name }}"
17 | validate: docker-compose -f %s config
18 | register: compose_file
19 |
20 | - name: Install nextcloud config
21 | template:
22 | src: files/nextcloud/config.php
23 | dest: "{{ app_data_dir }}/nextcloud/config/config.php"
24 | mode: "{{ docker_compose_file_mask }}"
25 | owner: "{{ docker_user.name }}"
26 | register: config_file
27 |
28 | - name: Install occ script
29 | template:
30 | src: files/nextcloud/occ
31 | dest: /opt/nextcloud/occ
32 | mode: "0755"
33 | owner: "{{ docker_user.name }}"
34 |
35 | - name: restart nextcloud
36 | shell:
37 | chdir: /opt/nextcloud
38 | cmd: "{{ docker_update_command }}"
39 | when: compose_file.changed or config_file.changed
40 |
41 | - name: Set data dir permissions
42 | cron:
43 | name: Set nextcloud data permissions
44 | special_time: daily
45 | job: chown -R {{ docker_user.name }}:{{ docker_user.name }} /mnt/tank/files/nextcloud
46 |
--------------------------------------------------------------------------------
/ansible/roles/pve_docker/tasks/quassel.yml:
--------------------------------------------------------------------------------
1 | - name: Create quassel directory
2 | file:
3 | path: /opt/quassel
4 | state: directory
5 | owner: "{{ docker_user.name }}"
6 | mode: "{{ docker_compose_directory_mask }}"
7 |
8 | - name: Install quassel compose file
9 | template:
10 | src: files/quassel/docker-compose.yml
11 | dest: /opt/quassel/docker-compose.yml
12 | mode: "{{ docker_compose_file_mask }}"
13 | owner: "{{ docker_user.name }}"
14 | validate: docker-compose -f %s config
15 | register: compose_file
16 |
17 | - name: restart quassel
18 | shell:
19 | chdir: /opt/quassel
20 | cmd: "{{ docker_update_command }}"
21 | when: compose_file.changed
22 |
--------------------------------------------------------------------------------
/ansible/roles/pve_docker/tasks/synapse.yml:
--------------------------------------------------------------------------------
1 | - name: Include synapse variables
2 | include_vars: synapse.yml
3 |
4 | - name: Create synapse directory
5 | file:
6 | path: /opt/synapse
7 | state: directory
8 | owner: "{{ docker_user.name }}"
9 | mode: "{{ docker_compose_directory_mask }}"
10 |
11 | - name: Install synapse compose file
12 | template:
13 | src: files/synapse/docker-compose.yml
14 | dest: /opt/synapse/docker-compose.yml
15 | mode: "{{ docker_compose_file_mask }}"
16 | owner: "{{ docker_user.name }}"
17 | validate: docker-compose -f %s config
18 | register: compose_file
19 |
20 | - name: Install synapse config
21 | template:
22 | src: files/synapse/homeserver.yml
23 | dest: "{{ app_data_dir }}/synapse/homeserver.yaml"
24 | mode: "{{ docker_compose_file_mask }}"
25 | owner: "{{ docker_user.name }}"
26 | register: homeserver_config
27 |
28 | - name: restart synapse
29 | shell:
30 | chdir: /opt/synapse
31 | cmd: "{{ docker_update_command }}"
32 | when: compose_file.changed or homeserver_config.changed
33 |
--------------------------------------------------------------------------------
/ansible/roles/pve_docker/tasks/wallabag.yml:
--------------------------------------------------------------------------------
1 | - name: Include wallabag variables
2 | include_vars: wallabag.yml
3 |
4 | - name: Create wallabag directory
5 | file:
6 | path: /opt/wallabag
7 | state: directory
8 | owner: "{{ docker_user.name }}"
9 | mode: "{{ docker_compose_directory_mask }}"
10 |
11 | - name: Install wallabag compose file
12 | template:
13 | src: files/wallabag/docker-compose.yml
14 | dest: /opt/wallabag/docker-compose.yml
15 | mode: "{{ docker_compose_file_mask }}"
16 | owner: "{{ docker_user.name }}"
17 | validate: docker-compose -f %s config
18 | register: compose_file
19 |
20 | - name: restart wallabag
21 | shell:
22 | chdir: /opt/wallabag
23 | cmd: "{{ docker_update_command }}"
24 | when: compose_file.changed
25 |
--------------------------------------------------------------------------------
/ansible/roles/pve_docker/tasks/whoami.yml:
--------------------------------------------------------------------------------
1 | - name: Create whoami directory
2 | file:
3 | path: /opt/whoami
4 | state: directory
5 | owner: "{{ docker_user.name }}"
6 | mode: "{{ docker_compose_directory_mask }}"
7 |
8 | - name: Install whoami compose file
9 | template:
10 | src: files/whoami/docker-compose.yml
11 | dest: /opt/whoami/docker-compose.yml
12 | mode: "{{ docker_compose_file_mask }}"
13 | owner: "{{ docker_user.name }}"
14 | validate: docker-compose -f %s config
15 | register: compose_file
16 |
17 | - name: restart whoami
18 | shell:
19 | chdir: /opt/whoami
20 | cmd: "{{ docker_update_command }}"
21 | when: compose_file.changed
22 |
--------------------------------------------------------------------------------
/ansible/roles/pve_docker/vars/librespeed.yml:
--------------------------------------------------------------------------------
1 | librespeed_basicauth: !vault |
2 | $ANSIBLE_VAULT;1.1;AES256
3 | 35356563313534363433663038363934303165303033616366333965653939653430363065613832
4 | 6361303335363161393130383565346237613362326433630a343663366263626531326633626366
5 | 30313535643466306662626361326361623536353636333965326131626130613337323732643865
6 | 3265643930333535630a666362353034376364613731326236363136363562303163646266313265
7 | 63386138356164633365313239383365393638393738633461393536653935643665626562313835
8 | 61623635366362303462633432376436326638373339666561383434613364366237366666393332
9 | 643139616536666232346262386239663931
10 |
--------------------------------------------------------------------------------
/ansible/roles/pve_docker/vars/synapse.yml:
--------------------------------------------------------------------------------
1 | synapse:
2 | registration_shared_secret: !vault |
3 | $ANSIBLE_VAULT;1.1;AES256
4 | 36363037303531333331356363393631623633636366653139346665316638623039323031633066
5 | 3262366439396362336338313933363539616262383461330a353638613364616134613130616665
6 | 66643239366363623536663163386138633535353066613633346131366333316538643031396537
7 | 3634356163353232620a663538393966353961353562376136303161336636663535376238383938
8 | 66623539643032353131313538313131326237313936313061616566643639623939313161633230
9 | 33663666633130373138393937323939383865623939623035373835626363386466663233333534
10 | 376362626664643664653833646162333863
11 | macaroon_secret_key: !vault |
12 | $ANSIBLE_VAULT;1.1;AES256
13 | 64643336366430326631643331316239646539363362383531313335396231306335623538333761
14 | 3565393330656363376233356631353665636139643261360a656362623264353931613936666531
15 | 64373437366364313862326564623135643838343834633364656238356565353463393230326631
16 | 6438383561666561390a323965353136383264376265633364356265346463316161383563663939
17 | 66653565623666353833353639386634633631366234643933323836633033613963373863303466
18 | 66326435393266326233613833616263623739346634613531346535346136373965336333373566
19 | 316535323861356438623065313530346461
20 | form_secret: !vault |
21 | $ANSIBLE_VAULT;1.1;AES256
22 | 36313462383034633133373938613161316436313631333061346664653861353062316539393536
23 | 3436373931363139653232353764393534303530366231310a643335333735656361333632346332
24 | 30393931366466666535363837663436633533366662373834643362636663386439356236393933
25 | 3532363432623637330a663263643263333764383637373339616665353631616130643537326263
26 | 38386538393032353365313733393835303630356536303635373764633139626162356165343539
27 | 64343135323833356130363262353638353533396563656566666635663263383065646630663063
28 | 366137356638386537376236623531326636
29 |
--------------------------------------------------------------------------------
/ansible/roles/pve_docker/vars/wallabag.yml:
--------------------------------------------------------------------------------
1 | wallabag_secret: !vault |
2 | $ANSIBLE_VAULT;1.1;AES256
3 | 64353939666265306238333239316631373533643030663638316338356330653539643837373637
4 | 6638323330373264363535316339616432373132613939360a653533393164386266646337663234
5 | 33306333663165303431343537336465383937646437643630313037326135643666656435373331
6 | 6563303734643532370a313361656434333537366636366265653861656636386164373261666633
7 | 31646263326539303862386261623938323338333839656135656663643231653361663438326136
8 | 65306537383931393432633561333131386138333132383737383539646233313735613566633537
9 | 61656630396333376635393264346266356238626134316331623638393234363161336439303939
10 | 31393361393432366638613865323663326630306432363364663266663264656339323939306266
11 | 62646464306363353032326638613361633433303433633361656662353237396631
12 |
--------------------------------------------------------------------------------
/ansible/roles/pve_tailscale_route/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Get routes
2 | command:
3 | argv:
4 | - ip
5 | - route
6 | - show
7 | - "{{ tailscale_cidr }}"
8 | register: routes
9 | changed_when: false
10 |
11 | - name: Add route to tailscale hosts via ingress
12 | command:
13 | argv:
14 | - ip
15 | - route
16 | - add
17 | - "{{ tailscale_cidr }}"
18 | - via
19 | - "{{ pve_hosts.ingress.ip }}"
20 | when: tailscale_cidr not in routes.stdout
21 |
--------------------------------------------------------------------------------
/ansible/roles/qbittorrent/files/nginx.conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80;
3 |
4 | gzip off;
5 | gzip_static off;
6 |
7 | root /mnt/downloads;
8 |
9 | autoindex on;
10 | autoindex_exact_size off;
11 | }
12 |
--------------------------------------------------------------------------------
/ansible/roles/qbittorrent/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: reload nginx
2 | service:
3 | name: nginx
4 | state: reloaded
5 |
6 | - name: restart qbittorrent
7 | service:
8 | name: qbittorrent-nox@{{ qbittorrent_user.name }}
9 | state: restarted
10 |
--------------------------------------------------------------------------------
/ansible/roles/qbittorrent/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: qbittorrent
2 | include_tasks: qbittorrent.yml
3 |
4 | - name: nginx
5 | include_tasks: nginx.yml
6 |
--------------------------------------------------------------------------------
/ansible/roles/qbittorrent/tasks/nginx.yml:
--------------------------------------------------------------------------------
1 | - name: Nginx config
2 | template:
3 | src: files/nginx.conf
4 | dest: /etc/nginx/http.d/downloads.conf
5 | mode: "0644"
6 | notify: reload nginx
7 |
--------------------------------------------------------------------------------
/ansible/roles/qbittorrent/tasks/qbittorrent.yml:
--------------------------------------------------------------------------------
1 | - name: Install qbittorrent
2 | package:
3 | name: qbittorrent-nox
4 |
5 | - name: Create user
6 | user:
7 | name: qbittorrent
8 | system: true
9 | register: qbittorrent_user
10 |
11 | - name: Enable service
12 | service:
13 | name: qbittorrent-nox@{{ qbittorrent_user.name }}
14 | enabled: true
15 |
16 | - name: Set configuration
17 | ini_file:
18 | path: "{{ qbittorrent_user.home }}/.config/qBittorrent/qBittorrent.conf"
19 | mode: "700"
20 | owner: "{{ qbittorrent_user.name }}"
21 | section: "{{ item.section }}"
22 | option: "{{ item.option }}"
23 | value: "{{ item.value }}"
24 | no_extra_spaces: true
25 | loop_control:
26 | label: "{{ item.section }}.{{ item.option }}={{ item.value }}"
27 | loop:
28 | - {section: AutoRun, option: enabled, value: "false"}
29 | - {section: LegalNotice, option: Accepted, value: "true"}
30 | - {section: Preferences, option: Connection\UPnP, value: "false"}
31 | - {section: Preferences, option: Downloads\SavePath, value: /mnt/media/temp/downloads}
32 | - {section: Preferences, option: WebUI\Address, value: "*"}
33 | - {section: Preferences, option: WebUI\ServerDomains, value: "*"}
34 | - {section: Preferences, option: WebUI\Port, value: "8080"}
35 | - {section: Preferences, option: WebUI\AuthSubnetWhitelist, value: 0.0.0.0/0}
36 | - {section: Preferences, option: WebUI\AuthSubnetWhitelistEnabled, value: "true"}
37 | - {section: Preferences, option: WebUI\LocalHostAuth, value: "false"}
38 | - {section: Preferences, option: Bittorrent\MaxConnecs, value: -1"}
39 | - {section: Preferences, option: Bittorrent\MaxConnecsPerTorrent, value: -1"}
40 | - {section: Preferences, option: Bittorrent\MaxUploads, value: -1"}
41 | - {section: Preferences, option: Bittorrent\MaxUploadsPerTorrent, value: -1"}
42 | notify: restart qbittorrent
43 |
--------------------------------------------------------------------------------
/ansible/roles/renovate/files/config.js:
--------------------------------------------------------------------------------
1 | module.exports = {
2 | endpoint: 'https://git.theorangeone.net/',
3 | token: '{{ vault_renovate_gitea_token }}',
4 | platform: 'gitea',
5 | //dryRun: true,
6 | autodiscover: true,
7 | onboarding: false,
8 | redisUrl: 'redis://redis',
9 | repositoryCache: 'enabled',
10 | persistRepoData: true,
11 | binarySource: "docker",
12 | dockerUser: "{{ docker_user.id }}",
13 | baseDir: "/mnt/data",
14 | cacheDir: "/mnt/data/cache"
15 | };
16 |
--------------------------------------------------------------------------------
/ansible/roles/renovate/files/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | renovate:
3 | image: renovate/renovate:37-slim
4 | command: /entrypoint.sh
5 | user: "{{ docker_user.id }}"
6 | environment:
7 | - TZ={{ timezone }}
8 | - GITHUB_COM_TOKEN={{ vault_renovate_github_token }}
9 | - DOCKER_HOST=tcp://docker_proxy:2375
10 | - LOG_LEVEL=debug # Noisy, but required for debugging
11 | restart: unless-stopped
12 | networks:
13 | - default
14 | - renovate_private
15 | depends_on:
16 | - redis
17 | - docker_proxy
18 | volumes:
19 | - ./config.js:/usr/src/app/config.js:ro
20 | - ./entrypoint.sh:/entrypoint.sh:ro
21 | - /mnt/data:/mnt/data # These must be the same
22 |
23 | redis:
24 | image: redis:7-alpine
25 | restart: unless-stopped
26 | volumes:
27 | - ./redis:/data
28 |
29 | docker_proxy:
30 | image: lscr.io/linuxserver/socket-proxy:latest
31 | restart: unless-stopped
32 | environment:
33 | - POST=1
34 | - CONTAINERS=1
35 | - INFO=1
36 | - IMAGES=1
37 | volumes:
38 | - /var/run/docker.sock:/var/run/docker.sock:ro
39 | networks:
40 | - renovate_private
41 | tmpfs:
42 | - /run
43 | logging:
44 | driver: none
45 |
46 | networks:
47 | renovate_private:
48 | internal: true
49 |
--------------------------------------------------------------------------------
/ansible/roles/renovate/files/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e
4 |
5 | curl -fsSL https://github.com/aptible/supercronic/releases/download/v0.2.1/supercronic-linux-amd64 -o ~/supercronic
6 |
7 | chmod +x ~/supercronic
8 |
9 | # Ever 2 hours between 08:00 and 23:00
10 | ~/supercronic <(echo "0 8-23/2 * * *" renovate $@)
11 |
--------------------------------------------------------------------------------
/ansible/roles/renovate/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart renovate
2 | shell:
3 | chdir: /opt/renovate
4 | cmd: "{{ docker_update_command }}"
5 |
--------------------------------------------------------------------------------
/ansible/roles/renovate/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Include vault
2 | include_vars: vault.yml
3 |
4 | - name: Create install directory
5 | file:
6 | path: /opt/renovate
7 | state: directory
8 | owner: "{{ docker_user.name }}"
9 | mode: "{{ docker_compose_directory_mask }}"
10 |
11 | - name: Install compose file
12 | template:
13 | src: files/docker-compose.yml
14 | dest: /opt/renovate/docker-compose.yml
15 | mode: "{{ docker_compose_file_mask }}"
16 | owner: "{{ docker_user.name }}"
17 | validate: docker-compose -f %s config
18 | notify: restart renovate
19 |
20 | - name: Install config file
21 | template:
22 | src: files/config.js
23 | dest: /opt/renovate/config.js
24 | mode: "{{ docker_compose_file_mask }}"
25 | owner: "{{ docker_user.name }}"
26 | notify: restart renovate
27 |
28 | - name: Install custom entrypoint
29 | template:
30 | src: files/entrypoint.sh
31 | dest: /opt/renovate/entrypoint.sh
32 | mode: "0755"
33 | owner: "{{ docker_user.name }}"
34 | notify: restart renovate
35 |
--------------------------------------------------------------------------------
/ansible/roles/renovate/vars/vault.yml:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 37366639643131363366396164326262323466373931336364636638333163613939373839653031
3 | 6639386366626634396466666335623935663462663632360a663532353265626536663162366463
4 | 66663864336436643739316638373934646134366639643762353338363637373831313764343164
5 | 3263303535386538640a636635643830383633653761613633393835396139343365333266373732
6 | 33363638356138663832373163363739663834653161653164323762313633616632643833623530
7 | 35336461653963623536646230326536666661636661366566653030363363373132633836376235
8 | 31626634613962356237623830653031626533663562663661616433373761373333323065313337
9 | 31616631653063326532623431356366663830343336396134303534373933363538653365313038
10 | 37353332653638643130653962643431613638383839323831623338643631666630663832303435
11 | 37306261333430366561386266383037626563626462663363396335663763313063393334383663
12 | 383936653136336266343539313536656139
13 |
--------------------------------------------------------------------------------
/ansible/roles/restic/defaults/main.yml:
--------------------------------------------------------------------------------
1 | restic_backup_locations: []
2 | restic_backup_excludes: []
3 | restic_dir: "{{ hostname_slug }}"
4 |
--------------------------------------------------------------------------------
/ansible/roles/restic/files/backrest.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e
4 |
5 | export AWS_ACCESS_KEY_ID="{{ vault_restic_b2_application_key_id }}"
6 | export AWS_SECRET_ACCESS_KEY="{{ vault_restic_b2_application_key }}"
7 | export RESTIC_PASSWORD="{{ vault_restic_key }}"
8 | export RESTIC_REPOSITORY="s3:{{ restic_b2_endpoint }}/{{ restic_b2_bucket }}/{{ restic_dir }}"
9 | export GOGC=20 # HACK: Work around for restic's high memory usage https://github.com/restic/restic/issues/1988
10 |
11 | set -x
12 |
13 | exec restic $@
14 |
--------------------------------------------------------------------------------
/ansible/roles/restic/files/restic-backup.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | set -e
4 |
5 | exec $HOME/backrest.sh --verbose backup --files-from=$HOME/restic-include.txt --exclude-file=$HOME/restic-excludes.txt
6 |
7 | exec $HOME/backrest.sh forget --prune --keep-daily 30 --keep-monthly 3 --group-by host
8 |
--------------------------------------------------------------------------------
/ansible/roles/restic/files/restic-post.hook:
--------------------------------------------------------------------------------
1 | [Trigger]
2 | Operation = Upgrade
3 | Type = Package
4 | Target = restic
5 |
6 | [Action]
7 | Description = Set restic binary permissions
8 | When = PostTransaction
9 | Exec = /usr/share/libalpm/scripts/restic-post.sh
10 |
--------------------------------------------------------------------------------
/ansible/roles/restic/files/restic-post.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # See https://restic.readthedocs.io/en/stable/080_examples.html#backing-up-your-system-without-running-restic-as-root
4 |
5 | set -e
6 |
7 | RESTIC_BIN=$(which restic)
8 |
9 | # Set owner
10 | chown root:restic $RESTIC_BIN
11 | chmod 750 $RESTIC_BIN
12 |
13 | # Lest restic run root things
14 | setcap cap_dac_read_search=+ep $RESTIC_BIN
15 |
--------------------------------------------------------------------------------
/ansible/roles/restic/tasks/homeassistant.yml:
--------------------------------------------------------------------------------
1 | - name: Install CIFS utils
2 | package:
3 | name: cifs-utils
4 |
5 | - name: Create dir for CIFS mount
6 | file:
7 | path: /mnt/home-assistant
8 | state: directory
9 | mode: "0755"
10 |
11 | - name: Create dir for each CIFS mount
12 | file:
13 | path: /mnt/home-assistant/{{ item }}
14 | state: directory
15 | mode: "0600"
16 | loop: "{{ restic_homeassistant_mounts }}"
17 |
18 | - name: Create mounts
19 | mount:
20 | path: /mnt/home-assistant/{{ item }}
21 | fstype: cifs
22 | opts: username=homeassistant,password={{ vault_homeassistant_smb_password }}
23 | src: //{{ pve_hosts.homeassistant.ip }}/{{ item }}
24 | state: mounted
25 | loop: "{{ restic_homeassistant_mounts }}"
26 |
--------------------------------------------------------------------------------
/ansible/roles/restic/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Install restic
2 | package:
3 | name: restic
4 |
5 | - name: Install runitor
6 | kewlfft.aur.aur:
7 | name: runitor-bin
8 |
9 | - name: Make user
10 | user:
11 | name: restic
12 | shell: /bin/nologin
13 | system: false
14 |
15 | - name: Install scripts
16 | template:
17 | src: files/{{ item }}
18 | dest: /home/restic/{{ item }}
19 | mode: "0700"
20 | owner: restic
21 | loop:
22 | - backrest.sh
23 | - restic-backup.sh
24 |
25 | - name: Install includes files
26 | copy:
27 | content: "{{ restic_backup_locations | join('\n') }}"
28 | dest: /home/restic/restic-include.txt
29 | mode: "0644"
30 | owner: restic
31 |
32 | - name: Install excludes files
33 | copy:
34 | content: "{{ restic_backup_excludes | join('\n') }}"
35 | dest: /home/restic/restic-excludes.txt
36 | mode: "0644"
37 | owner: restic
38 |
39 | - name: Set restic binary permissions
40 | file:
41 | path: /usr/bin/restic
42 | mode: "0750"
43 | owner: root
44 | group: restic
45 |
46 | - name: Set cap_sys_chroot=+ep on restic
47 | community.general.capabilities:
48 | path: /usr/bin/restic
49 | capability: cap_dac_read_search=+ep
50 |
51 | - name: Schedule backup
52 | cron:
53 | name: restic backup
54 | hour: 0
55 | minute: 0
56 | job: CHECK_UUID={{ vault_restic_healthchecks_id }} /usr/bin/runitor -- /home/restic/restic-backup.sh
57 | user: restic
58 |
59 | - name: Install pacman post script
60 | template:
61 | src: files/restic-post.sh
62 | dest: /usr/share/libalpm/scripts/restic-post.sh
63 | mode: "0700"
64 | when: ansible_os_family == 'Archlinux'
65 |
66 | - name: Install pacman post hook
67 | template:
68 | src: files/restic-post.hook
69 | dest: /usr/share/libalpm/hooks/restic-post.hook
70 | mode: "0600"
71 | when: ansible_os_family == 'Archlinux'
72 |
73 | - name: Install HomeAssistant mounts
74 | include_tasks: homeassistant.yml
75 | when: ansible_hostname == 'pve-restic'
76 |
--------------------------------------------------------------------------------
/ansible/roles/restic/vars/main.yml:
--------------------------------------------------------------------------------
1 | restic_b2_bucket: 0rng-restic
2 | restic_b2_endpoint: s3.eu-central-003.backblazeb2.com
3 | healthchecks_host: https://hc-ping.com
4 |
5 | restic_homeassistant_mounts:
6 | - backup
7 | - config
8 |
--------------------------------------------------------------------------------
/ansible/roles/s3_sync/files/rclone.conf:
--------------------------------------------------------------------------------
1 | [s3]
2 | type = s3
3 | provider = AWS
4 | access_key_id = {{ vault_access_key_id }}
5 | secret_access_key = {{ vault_secret_access_key }}
6 | region = eu-west-2
7 | server_side_encryption = AES256
8 |
--------------------------------------------------------------------------------
/ansible/roles/s3_sync/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Include vault
2 | include_vars: vault.yml
3 |
4 | - name: Install rclone
5 | package:
6 | name: rclone
7 |
8 | - name: Install runitor
9 | kewlfft.aur.aur:
10 | name: runitor-bin
11 |
12 | - name: Make user
13 | user:
14 | name: rclone
15 | shell: /bin/nologin
16 | system: false
17 | register: rclone_user
18 |
19 | - name: Create config directory
20 | file:
21 | path: "{{ rclone_user.home }}/.config/rclone"
22 | state: directory
23 | owner: rclone
24 | mode: "0700"
25 |
26 | - name: Install rclone config
27 | template:
28 | src: files/rclone.conf
29 | dest: "{{ rclone_user.home }}/.config/rclone/rclone.conf"
30 | owner: rclone
31 | mode: "0600"
32 |
33 | - name: Create config directory
34 | file:
35 | path: "{{ rclone_user.home }}/sync"
36 | state: directory
37 | owner: rclone
38 | mode: "0700"
39 |
40 | - name: Schedule sync
41 | cron:
42 | name: Sync terraform state
43 | hour: 23
44 | minute: 0
45 | job: CHECK_UUID={{ vault_healthchecks_id }} /usr/bin/runitor -- /usr/bin/rclone sync s3:0rng-terraform {{ rclone_user.home }}/sync/0rng-terraform
46 | user: rclone
47 |
--------------------------------------------------------------------------------
/ansible/roles/s3_sync/vars/vault.yml:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 37613533316463396164343438656435336538386662303233363362336638323630386539663164
3 | 3935363961323436386232373537356262393736303161640a623636353634346162323764653133
4 | 63313838393764626436306161343934643237333733336465383235306163343561666234623337
5 | 3561623665643631310a656466663533313362346134333731613062653862316438373331386664
6 | 39303365633661356136396261616566343230386536336238336565386639613362326461666665
7 | 63316337623362663839376561323063633931326133303730653037306461376230613663663465
8 | 64613834316164363336383338643139366532336264646233323639646536326330663265356431
9 | 61623938653633636539663063636139666261326130323139623565303632633335633266376666
10 | 35363138396137336264386638613861313764383031373434646461613463386132303762383162
11 | 65393464343432646266663831626531613239303431326661336636303432323065323664373233
12 | 38333363346163356463386537393563346631343263323232633561313238663632393265316636
13 | 62643261336332346535393661383166623733396564303832373162316166326635616637396537
14 | 6661
15 |
--------------------------------------------------------------------------------
/ansible/roles/slides/files/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | slides:
3 | image: ghcr.io/realorangeone/slides:latest
4 | restart: unless-stopped
5 | environment:
6 | - TZ={{ timezone }}
7 | - PUID={{ docker_user.id }}
8 | volumes:
9 | - ./htpasswd:/etc/nginx/.htpasswd:ro
10 | - ./slides:/srv
11 | networks:
12 | - default
13 | - coredns
14 |
15 | networks:
16 | coredns:
17 | external: true
18 |
--------------------------------------------------------------------------------
/ansible/roles/slides/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart slides
2 | shell:
3 | chdir: /opt/slides
4 | cmd: "{{ docker_update_command }}"
5 |
--------------------------------------------------------------------------------
/ansible/roles/slides/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Include vault
2 | include_vars: vault.yml
3 |
4 | - name: Create install directory
5 | file:
6 | path: /opt/slides
7 | state: directory
8 | owner: "{{ docker_user.name }}"
9 | mode: "{{ docker_compose_directory_mask }}"
10 |
11 | - name: Install compose file
12 | template:
13 | src: files/docker-compose.yml
14 | dest: /opt/slides/docker-compose.yml
15 | mode: "{{ docker_compose_file_mask }}"
16 | owner: "{{ docker_user.name }}"
17 | validate: docker-compose -f %s config
18 | notify: restart slides
19 |
20 | - name: Create credentials
21 | htpasswd:
22 | path: /opt/slides/htpasswd
23 | name: "{{ item.user }}"
24 | password: "{{ item.password }}"
25 | owner: "{{ docker_user.name }}"
26 | mode: "0600"
27 | loop: "{{ webdav_credentials }}"
28 | loop_control:
29 | label: "{{ item.user }}"
30 | notify: restart slides
31 |
32 | - name: Install nginx config
33 | template:
34 | src: files/nginx-docker.conf
35 | dest: /etc/nginx/http.d/slides.conf
36 | mode: "0644"
37 | notify: reload nginx
38 | vars:
39 | server_name: slides.jakehoward.tech
40 | upstream: slides-slides-1.docker:80
41 | ssl_cert_path: /etc/letsencrypt/live/slides.jakehoward.tech
42 | location_extra: |
43 | client_max_body_size 0;
44 |
--------------------------------------------------------------------------------
/ansible/roles/slides/vars/vault.yml:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 39346133313638313030663139356637666666346665356161383332613836656131353830323530
3 | 6636613939346437633430316436363538623339643439300a363464383763613631333161613034
4 | 31336138386639306166313532633439343763363563616130633165323166376265303663643130
5 | 3634303836383737340a643834373666386261363533353936623335396633396366373230653932
6 | 38316662333932646636623839396630383339393135643533323832623330323666613465626431
7 | 36356663653861666362376265636162336531663266616432636635333537656661396263643631
8 | 36653462663365646338623434393738346566633266643634633430336235343531613631383562
9 | 30333165313438363966626264643732353833366662653164666631636465636538303961316465
10 | 62356132643837646638376334343935313338316266393261316538393561356264313932623236
11 | 62326235303139353034636365663434383439366163646635626563666434636564623336653634
12 | 35363834306534333531383131323830623438323736656234623263353930666130363132343464
13 | 32363433653066656364393732366366353033663332366166343139616433303439623631663537
14 | 65313539663333626333623966313864623639353031313131346635666138613032
15 |
--------------------------------------------------------------------------------
/ansible/roles/tandoor/files/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | tandoor:
3 | image: vabene1111/recipes:latest
4 | environment:
5 | - TIMEZONE={{ timezone }}
6 | - DEBUG=0
7 | - ALLOWED_HOSTS=recipes.jakehoward.tech
8 | - SECRET_KEY={{ vault_tandoor_secret_key }}
9 | - DATABASE_URL=postgres://tandoor:tandoor@db:5432/tandoor
10 | - DB_ENGINE=django.db.backends.postgresql
11 | - POSTGRES_HOST=db
12 | - POSTGRES_PORT=5432
13 | - POSTGRES_USER=tandoor
14 | - POSTGRES_PASSWORD=tandoor
15 | - GUNICORN_MEDIA=1
16 | - EMAIL_HOST=smtp.eu.mailgun.org
17 | - EMAIL_PORT=465
18 | - EMAIL_HOST_USER={{ vault_tandoor_email_user }}
19 | - EMAIL_HOST_PASSWORD={{ vault_tandoor_email_password }}
20 | - EMAIL_USE_TLS=1
21 | - DEFAULT_FROM_EMAIL={{ vault_tandoor_email_from }}
22 | restart: unless-stopped
23 | labels:
24 | - traefik.enable=true
25 | - traefik.http.routers.tandoor.rule=Host(`recipes.jakehoward.tech`)
26 | depends_on:
27 | - db
28 | networks:
29 | - default
30 | - traefik
31 | volumes:
32 | - "{{ app_data_dir }}/tandoor/media:/opt/recipes/mediafiles"
33 | tmpfs:
34 | - /opt/recipes/staticfiles
35 |
36 | db:
37 | image: postgres:14-alpine
38 | restart: unless-stopped
39 | volumes:
40 | - /mnt/speed/dbs/postgres/tandoor/:/var/lib/postgresql/data
41 | environment:
42 | - POSTGRES_PASSWORD=tandoor
43 | - POSTGRES_USER=tandoor
44 |
45 | networks:
46 | traefik:
47 | external: true
48 |
--------------------------------------------------------------------------------
/ansible/roles/tandoor/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart tandoor
2 | shell:
3 | chdir: /opt/tandoor
4 | cmd: "{{ docker_update_command }}"
5 |
--------------------------------------------------------------------------------
/ansible/roles/tandoor/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Include vault
2 | include_vars: vault.yml
3 |
4 | - name: Create install directory
5 | file:
6 | path: /opt/tandoor
7 | state: directory
8 | owner: "{{ docker_user.name }}"
9 | mode: "{{ docker_compose_directory_mask }}"
10 |
11 | - name: Install compose file
12 | template:
13 | src: files/docker-compose.yml
14 | dest: /opt/tandoor/docker-compose.yml
15 | mode: "{{ docker_compose_file_mask }}"
16 | owner: "{{ docker_user.name }}"
17 | validate: docker-compose -f %s config
18 | notify: restart tandoor
19 |
--------------------------------------------------------------------------------
/ansible/roles/tandoor/vars/vault.yml:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 32376533376133353135353163353539336133646561363930313164393935616638333166346137
3 | 3762663933633039346564646366333639313333323138330a633531343733326438363633656466
4 | 31376566653130633462613938333031636164346636393034373830356332333633396661613639
5 | 3364393366353631360a396638626435613936663536346531663834643939396433383337613031
6 | 37323463616465333234616532313330666362336431343462623833353961623030343135313362
7 | 34346161356137336632386533336131383166383038343664613962343838393134346332643936
8 | 62646132306364383232363933653664626434656562643539623163663763643032346535623035
9 | 36386236393631396235306533666134303166373839623831363461346238316132653936393738
10 | 36383362383232326531633165323431346139633864336364343030353335303261376537343163
11 | 34623362353233666631643638643235666465613965313064333565343431656566363561386636
12 | 62633635306166313238313365313134343539353232616461316330666561653765333866306133
13 | 35343137323636623565626634326364616665663432313434623231636231656335343332333632
14 | 32343664663363306237643532396231383838626361333533336432613930313232626536646331
15 | 39643632666661313534363431346138383933383861356232383639353939306336626465636436
16 | 64616663613561643066636466663663616137376136643538383139393437393134633264343139
17 | 64353430316630393231393636313164383336396535383834323239666535623130313561396635
18 | 30373865353734663131373339393163396664663135343330313761613530333062
19 |
--------------------------------------------------------------------------------
/ansible/roles/traefik/defaults/main.yml:
--------------------------------------------------------------------------------
1 | traefik_provider_jellyfin: false
2 | traefik_provider_homeassistant: false
3 | traefik_provider_grafana: false
4 | traefik_provider_uptime_kuma: false
5 |
--------------------------------------------------------------------------------
/ansible/roles/traefik/files/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | traefik:
3 | image: traefik:v3
4 | user: "{{ docker_user.id }}"
5 | environment:
6 | - GANDIV5_PERSONAL_ACCESS_TOKEN={{ vault_gandi_personal_access_token }}
7 | volumes:
8 | - ./traefik:/etc/traefik
9 | restart: unless-stopped
10 | ports:
11 | - 80:80
12 | - 443:443
13 | - "{{ private_ip }}:8080:8080"
14 | depends_on:
15 | - docker_proxy
16 | networks:
17 | - default
18 | - traefik
19 | - proxy_private
20 |
21 | docker_proxy:
22 | image: lscr.io/linuxserver/socket-proxy:latest
23 | restart: unless-stopped
24 | environment:
25 | - CONTAINERS=1
26 | - INFO=1
27 | volumes:
28 | - /var/run/docker.sock:/var/run/docker.sock:ro
29 | networks:
30 | - proxy_private
31 | tmpfs:
32 | - /run
33 | logging:
34 | driver: none
35 |
36 | networks:
37 | traefik:
38 | external: true
39 | proxy_private:
40 | internal: true
41 |
--------------------------------------------------------------------------------
/ansible/roles/traefik/files/file-provider-grafana.yml:
--------------------------------------------------------------------------------
1 | http:
2 | routers:
3 | router-grafana:
4 | rule: Host(`grafana.jakehoward.tech`)
5 | service: service-grafana
6 | services:
7 | service-grafana:
8 | loadBalancer:
9 | servers:
10 | - url: http://{{ pve_hosts.forrest.ip }}:3000
11 |
--------------------------------------------------------------------------------
/ansible/roles/traefik/files/file-provider-homeassistant.yml:
--------------------------------------------------------------------------------
1 | http:
2 | routers:
3 | router-homeassistant:
4 | rule: Host(`homeassistant.jakehoward.tech`)
5 | service: service-homeassistant
6 | middlewares:
7 | - tailscale-only@file
8 | services:
9 | service-homeassistant:
10 | loadBalancer:
11 | servers:
12 | - url: http://{{ pve_hosts.homeassistant.ip }}:8123
13 |
--------------------------------------------------------------------------------
/ansible/roles/traefik/files/file-provider-jellyfin.yml:
--------------------------------------------------------------------------------
1 | http:
2 | routers:
3 | router-jellyfin:
4 | rule: Host(`media.jakehoward.tech`)
5 | service: service-jellyfin
6 | services:
7 | service-jellyfin:
8 | loadBalancer:
9 | servers:
10 | - url: http://{{ pve_hosts.jellyfin.ip }}:8096
11 |
--------------------------------------------------------------------------------
/ansible/roles/traefik/files/file-provider-main.yml:
--------------------------------------------------------------------------------
1 | http:
2 | middlewares:
3 | compress:
4 | compress: {}
5 |
6 | secure-headers:
7 | headers:
8 | stsSeconds: 2592000
9 | contentTypeNosniff: true
10 | browserXssFilter: true
11 | customResponseHeaders:
12 | Server: ""
13 |
14 | # https://paramdeo.com/blog/opting-your-website-out-of-googles-floc-network
15 | floc-block:
16 | headers:
17 | customResponseHeaders:
18 | Permissions-Policy: interest-cohort=()
19 |
20 | tailscale-only:
21 | ipAllowList:
22 | sourceRange:
23 | - "{{ tailscale_cidr }}"
24 | - "{{ tailscale_cidr_ipv6 }}"
25 | - "{{ pve_hosts.forrest.ip }}"
26 | - "{{ pve_hosts.forrest.ipv6 }}"
27 |
28 | private-access:
29 | ipAllowList:
30 | sourceRange:
31 | - "{{ tailscale_cidr }}"
32 | - "{{ tailscale_cidr_ipv6 }}"
33 | - "{{ pve_hosts.internal_cidr }}"
34 | - "{{ pve_hosts.internal_cidr_ipv6 }}"
35 |
--------------------------------------------------------------------------------
/ansible/roles/traefik/files/file-provider-uptime-kuma.yml:
--------------------------------------------------------------------------------
1 | http:
2 | routers:
3 | router-uptime-kuma:
4 | rule: Host(`uptime.jakehoward.tech`)
5 | service: service-uptime-kuma
6 | services:
7 | service-uptime-kuma:
8 | loadBalancer:
9 | servers:
10 | - url: http://{{ pve_hosts.forrest.ip }}:3001
11 |
--------------------------------------------------------------------------------
/ansible/roles/traefik/files/traefik.yml:
--------------------------------------------------------------------------------
1 | entryPoints:
2 | web:
3 | address: :80
4 | http:
5 | redirections:
6 | entryPoint:
7 | to: web-secure
8 | scheme: https
9 | proxyProtocol:
10 | trustedIPs:
11 | - "{{ wireguard.cidr }}"
12 | - "{{ pve_hosts.internal_cidr }}"
13 | - "{{ tailscale_cidr }}"
14 | web-secure:
15 | address: :443
16 | http:
17 | middlewares:
18 | - floc-block@file
19 | - compress@file
20 | - secure-headers@file
21 | tls:
22 | certResolver: le
23 | domains:
24 | - main: theorangeone.net
25 | sans: ["*.theorangeone.net"]
26 | - main: jakehoward.tech
27 | sans: ["*.jakehoward.tech"]
28 | - main: 0rng.one
29 | sans: ["*.0rng.one"]
30 | proxyProtocol:
31 | trustedIPs:
32 | - "{{ pve_hosts.ingress.ip }}/32"
33 | forwardedHeaders:
34 | trustedIPs:
35 | - "{{ wireguard.server.ip }}/32" # This is obtained from the connecting `proxy_protocol`
36 | transport:
37 | respondingTimeouts:
38 | readTimeout: 180s
39 | traefik:
40 | address: :8080
41 |
42 | ping: {}
43 |
44 | providers:
45 | docker:
46 | endpoint: tcp://docker_proxy:2375
47 | watch: true
48 | exposedByDefault: false
49 | network: traefik
50 | file:
51 | directory: /etc/traefik/conf
52 |
53 | api:
54 | dashboard: true
55 | insecure: true
56 | disableDashboardAd: true
57 |
58 | certificatesResolvers:
59 | le:
60 | acme:
61 | email: "{{ vault_letsencrypt_email }}"
62 | storage: /etc/traefik/acme.json
63 | dnsChallenge:
64 | provider: gandiv5
65 | delayBeforeCheck: 0s
66 | resolvers:
67 | - 9.9.9.9:53
68 | - 149.112.112.112:53
69 |
70 | serversTransport:
71 | insecureSkipVerify: true
72 |
73 | metrics:
74 | prometheus:
75 | entryPoint: traefik
76 |
77 | tls:
78 | options:
79 | default:
80 | minVersion: VersionTLS12
81 |
82 | pilot:
83 | dashboard: false
84 |
--------------------------------------------------------------------------------
/ansible/roles/traefik/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart traefik
2 | shell:
3 | chdir: /opt/traefik
4 | cmd: "{{ docker_update_command }}"
5 |
--------------------------------------------------------------------------------
/ansible/roles/traefik/vars/vault.yml:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 30393461663462666434333462386264383831333936633961636237616338303335393861626336
3 | 3566306338633735613431393736653061636536353335620a366335623630643137343863636161
4 | 37383436323439393965623436393465626362633134346239356463633936396236666164333762
5 | 3565623930353964620a303965626164396536646336313438346464663236633465353036303935
6 | 30373230393432643330663434313637396234306563336137653861333839623530636465653532
7 | 37363239663939303834633332656365363437356236633933313339656563343130383262626539
8 | 61363762663630366430326635386163613936653938303366636363363334643035396233646430
9 | 32636431616335326264343931343064646363393736303263633038623562623965393763636562
10 | 35316264636264366161326463343730613232663539306532303838656338343535376439343834
11 | 3234663334333866376233336538343264623930653662303835
12 |
--------------------------------------------------------------------------------
/ansible/roles/uptime_kuma/files/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | uptime-kuma:
3 | image: louislam/uptime-kuma:1.23.16-alpine
4 | environment:
5 | - TZ={{ timezone }}
6 | - PUID={{ docker_user.id }}
7 | - PGID={{ docker_user.id }}
8 | dns:
9 | - 9.9.9.9
10 | - 149.112.112.112
11 | ports:
12 | - "{{ pve_hosts.forrest.ip }}:3001:3001"
13 | volumes:
14 | - "{{ app_data_dir }}/uptime-kuma:/app/data"
15 | restart: unless-stopped
16 |
17 | networks:
18 | default:
19 | enable_ipv6: true
20 |
--------------------------------------------------------------------------------
/ansible/roles/uptime_kuma/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart uptime-kuma
2 | shell:
3 | chdir: /opt/uptime-kuma
4 | cmd: "{{ docker_update_command }}"
5 |
--------------------------------------------------------------------------------
/ansible/roles/uptime_kuma/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Create install directory
2 | file:
3 | path: /opt/uptime-kuma
4 | state: directory
5 | owner: "{{ docker_user.name }}"
6 | mode: "{{ docker_compose_directory_mask }}"
7 |
8 | - name: Install compose file
9 | template:
10 | src: files/docker-compose.yml
11 | dest: /opt/uptime-kuma/docker-compose.yml
12 | mode: "{{ docker_compose_file_mask }}"
13 | owner: "{{ docker_user.name }}"
14 | validate: docker-compose -f %s config
15 | notify: restart uptime-kuma
16 |
--------------------------------------------------------------------------------
/ansible/roles/vaultwarden/files/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | vaultwarden:
3 | image: vaultwarden/server:1.33.2-alpine
4 | restart: unless-stopped
5 | user: "{{ docker_user.id }}:{{ docker_user.id }}"
6 | volumes:
7 | - "{{ app_data_dir }}/vaultwarden/:/data"
8 | depends_on:
9 | - db
10 | dns:
11 | - 9.9.9.9
12 | - 149.112.112.112
13 | labels:
14 | - traefik.enable=true
15 |
16 | - traefik.http.routers.vaultwarden.rule=Host(`vaultwarden.jakehoward.tech`)
17 | - traefik.http.routers.vaultwarden.service=vaultwarden
18 | - traefik.http.services.vaultwarden.loadbalancer.server.port=80
19 |
20 | - traefik.http.middlewares.vaultwarden-ratelimit.ratelimit.average=5
21 | - traefik.http.middlewares.vaultwarden-ratelimit.ratelimit.burst=200
22 |
23 | - traefik.http.routers.vaultwarden.middlewares=vaultwarden-ratelimit,tailscale-only@file
24 | environment:
25 | - SIGNUPS_ALLOWED=false
26 | - DOMAIN=https://vaultwarden.jakehoward.tech
27 | - SHOW_PASSWORD_HINT=false
28 | - DATABASE_URL=postgres://vaultwarden:{{ vaultwarden_database_password }}@db/vaultwarden
29 | - INVITATIONS_ALLOWED=false
30 | - ROCKET_WORKERS=2
31 | - EMERGENCY_ACCESS_ALLOWED=false
32 | - AUTHENTICATOR_DISABLE_TIME_DRIFT=true
33 | - SENDS_ALLOWED=false
34 | - PASSWORD_HINTS_ALLOWED=false
35 | - EXPERIMENTAL_CLIENT_FEATURE_FLAGS=autofill-overlay,autofill-v2,extension-refresh,inline-menu-positioning-improvements,ssh-key-vault-item,ssh-agent
36 | networks:
37 | - default
38 | - traefik
39 |
40 | db:
41 | image: postgres:14-alpine
42 | restart: unless-stopped
43 | volumes:
44 | - /mnt/speed/dbs/postgres/vaultwarden/:/var/lib/postgresql/data
45 | environment:
46 | - POSTGRES_PASSWORD={{ vaultwarden_database_password }}
47 | - POSTGRES_USER=vaultwarden
48 |
49 | networks:
50 | traefik:
51 | external: true
52 |
--------------------------------------------------------------------------------
/ansible/roles/vaultwarden/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart vaultwarden
2 | shell:
3 | chdir: /opt/vaultwarden
4 | cmd: "{{ docker_update_command }}"
5 |
--------------------------------------------------------------------------------
/ansible/roles/vaultwarden/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Create install directory
2 | file:
3 | path: /opt/vaultwarden
4 | state: directory
5 | owner: "{{ docker_user.name }}"
6 | mode: "{{ docker_compose_directory_mask }}"
7 |
8 | - name: Install compose file
9 | template:
10 | src: files/docker-compose.yml
11 | dest: /opt/vaultwarden/docker-compose.yml
12 | mode: "{{ docker_compose_file_mask }}"
13 | owner: "{{ docker_user.name }}"
14 | validate: docker-compose -f %s config
15 | notify: restart vaultwarden
16 |
--------------------------------------------------------------------------------
/ansible/roles/vaultwarden/vars/main.yml:
--------------------------------------------------------------------------------
1 | vaultwarden_database_password: !vault |
2 | $ANSIBLE_VAULT;1.1;AES256
3 | 37666163343038663763633038323938383665386463666239313431626334613432346462656366
4 | 3937363766396236326333353332393564623736336535630a333930613864396536366330633438
5 | 37376637646561636238646636356533343837376336636637646434383731316264353462383039
6 | 3138666164623437360a306538323263313966633631653739313435646435363236303066663938
7 | 34336366313439356434353333373963633666306463323662353033393832356462666163613161
8 | 3031623933363563343163376564373066613634356237643663
9 |
--------------------------------------------------------------------------------
/ansible/roles/vikunja/files/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | vikunja:
3 | image: vikunja/vikunja:latest
4 | restart: unless-stopped
5 | environment:
6 | - VIKUNJA_DATABASE_HOST=db
7 | - VIKUNJA_DATABASE_PASSWORD=vikunja
8 | - VIKUNJA_DATABASE_TYPE=postgres
9 | - VIKUNJA_DATABASE_USER=vikunja
10 | - VIKUNJA_DATABASE_DATABASE=vikunja
11 | - VIKUNJA_SERVICE_FRONTENDURL=https://tasks.jakehoward.tech
12 | - VIKUNJA_SERVICE_JWTSECRET="{{ vault_jwt_secret }}"
13 | - VIKUNJA_SERVICE_ENABLEREGISTRATION=false
14 | - VIKUNJA_SERVICE_TIMEZONE={{ timezone }}
15 | - VIKUNJA_REDIS_HOST=redis:6379
16 | - VIKUNJA_REDIS_ENABLED=true
17 | - VIKUNJA_LOG_PATH=/dev/stdout
18 | - VIKUNJA_KEYVALUE_TYPE=redis
19 | - VIKUNJA_MAILER_ENABLED=true
20 | - VIKUNJA_MAIL_FROMEMAIL={{ vault_from_email }}
21 | - VIKUNJA_MAILER_USERNAME={{ vault_smtp_username }}
22 | - VIKUNJA_MAILER_PASSWORD={{ vault_smtp_password }}
23 | - VIKUNJA_MAILER_HOST=smtp.eu.mailgun.org
24 | - VIKUNJA_SENTRY_DSN=
25 | - TZ={{ timezone }}
26 | - PUID={{ docker_user.id }}
27 | - PGID={{ docker_user.id }}
28 | labels:
29 | - traefik.enable=true
30 | - traefik.http.routers.vikunja.rule=Host(`tasks.jakehoward.tech`)
31 | volumes:
32 | - /mnt/tank/app-data/vikunja/files:/app/vikunja/files
33 | depends_on:
34 | - db
35 | - redis
36 | networks:
37 | - default
38 | - traefik
39 |
40 | db:
41 | image: postgres:14-alpine
42 | restart: unless-stopped
43 | volumes:
44 | - /mnt/speed/dbs/postgres/vikunja/:/var/lib/postgresql/data
45 | environment:
46 | - POSTGRES_PASSWORD=vikunja
47 | - POSTGRES_USER=vikunja
48 |
49 | redis:
50 | image: redis:7-alpine
51 | restart: unless-stopped
52 | volumes:
53 | - /mnt/speed/dbs/redis/vikunja/:/data
54 |
55 | networks:
56 | traefik:
57 | external: true
58 |
--------------------------------------------------------------------------------
/ansible/roles/vikunja/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart vikunja
2 | shell:
3 | chdir: /opt/vikunja
4 | cmd: "{{ docker_update_command }}"
5 |
--------------------------------------------------------------------------------
/ansible/roles/vikunja/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Include vault
2 | include_vars: vault.yml
3 |
4 | - name: Create install directory
5 | file:
6 | path: /opt/vikunja
7 | state: directory
8 | owner: "{{ docker_user.name }}"
9 | mode: "{{ docker_compose_directory_mask }}"
10 |
11 | - name: Install compose file
12 | template:
13 | src: files/docker-compose.yml
14 | dest: /opt/vikunja/docker-compose.yml
15 | mode: "{{ docker_compose_file_mask }}"
16 | owner: "{{ docker_user.name }}"
17 | validate: docker-compose -f %s config
18 | notify: restart vikunja
19 |
--------------------------------------------------------------------------------
/ansible/roles/vikunja/vars/vault.yml:
--------------------------------------------------------------------------------
1 | $ANSIBLE_VAULT;1.1;AES256
2 | 61316434313939373632643634633832396637666431346236316538323166333730306536646136
3 | 3436633738363136393035613232633666633064343435320a383665663266633933366563306461
4 | 30396666653162393963326562646534646566623230396531323138396330663835363436663132
5 | 6364343831396235390a376261366336656439396534383666663732316237646465303864643464
6 | 36623162363536336139326239636231343232383639353132643733336339383837613565323466
7 | 61373533623065393037393764636636393263666637646433396263376462393132623062336135
8 | 39613639343964373764373335623137376265393239393837623138656366653231313434326436
9 | 36326534646532636536653064623862626661626638643763313234316337393038386666373133
10 | 62666233653033326137633365373737356662616261666533386565643933336364373265376662
11 | 62616233343635346335333334353330656365316563636433363462326138616462333233323864
12 | 66353539643566323863366464343139323138353034306434613735666361343438393135356334
13 | 63356262646163626266643962343937346639396366343633616632643236356537393936643663
14 | 36323635336566313861366333356461396430336462363939386563313262616430626461303836
15 | 34373165666365313464656563383838363134623530383363343935323365316164353435313032
16 | 34653136336539386532633938386431613263356665396332313266316161653938393837653463
17 | 63333238633566636233
18 |
--------------------------------------------------------------------------------
/ansible/roles/website/files/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | website:
3 | image: registry.gitlab.com/realorangeone/website:latest
4 | restart: unless-stopped
5 | environment:
6 | - TZ={{ timezone }}
7 | - DEBUG=false
8 | - SECRET_KEY={{ vault_website_secret_key }}
9 | - DATABASE_URL=postgres://website:website@db/website?conn_max_age=600
10 | - CACHE_URL=redis://redis/0
11 | - QUEUE_STORE_URL=redis://redis/1
12 | - RENDITION_CACHE_URL=redis://redis/2
13 | - SPOTIFY_PROXY_URL=http://spotify_public_proxy
14 | - UNSPLASH_CLIENT_ID={{ vault_unsplash_client_id }}
15 | - SENTRY_DSN={{ vault_website_sentry_dsn }}
16 | - BASE_HOSTNAME=theorangeone.net
17 | - GRANIAN_WORKERS=2
18 | - GRANIAN_BLOCKING_THREADS=2
19 | - SEO_INDEX=true
20 | - ACTIVITYPUB_HOST=mastodon.theorangeone.net
21 | - S6_CMD_WAIT_FOR_SERVICES_MAXTIME=60000
22 | volumes:
23 | - ./media:/app/media
24 | - ./cache:/tmp/nginx_cache
25 | networks:
26 | - default
27 | - coredns
28 | depends_on:
29 | - db
30 | - redis
31 |
32 | db:
33 | image: postgres:14-alpine
34 | restart: unless-stopped
35 | volumes:
36 | - ./postgres:/var/lib/postgresql/data
37 | environment:
38 | - POSTGRES_PASSWORD=website
39 | - POSTGRES_USER=website
40 |
41 | redis:
42 | image: redis:7-alpine
43 | restart: unless-stopped
44 | volumes:
45 | - ./redis:/data
46 |
47 | spotify_public_proxy:
48 | image: ghcr.io/realorangeone/spotify-public-proxy:latest
49 | restart: unless-stopped
50 | environment:
51 | - PORT=80
52 | - SPOTIFY_CLIENT_ID={{ vault_spotify_client_id }}
53 | - SPOTIFY_CLIENT_SECRET={{ vault_spotify_client_secret }}
54 | - SENTRY_DSN={{ vault_spotify_sentry_dsn }}
55 |
56 | networks:
57 | coredns:
58 | external: true
59 |
--------------------------------------------------------------------------------
/ansible/roles/website/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart website
2 | shell:
3 | chdir: /opt/website
4 | cmd: "{{ docker_update_command }}"
5 |
--------------------------------------------------------------------------------
/ansible/roles/website/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Include vault
2 | include_vars: vault.yml
3 |
4 | - name: Create install directory
5 | file:
6 | path: /opt/website
7 | state: directory
8 | owner: "{{ docker_user.name }}"
9 | mode: "{{ docker_compose_directory_mask }}"
10 |
11 | - name: Install compose file
12 | template:
13 | src: files/docker-compose.yml
14 | dest: /opt/website/docker-compose.yml
15 | mode: "{{ docker_compose_file_mask }}"
16 | owner: "{{ docker_user.name }}"
17 | validate: docker-compose -f %s config
18 | notify: restart website
19 |
20 | - name: Install nginx config
21 | template:
22 | src: files/nginx-docker.conf
23 | dest: /etc/nginx/http.d/website.conf
24 | mode: "0644"
25 | notify: reload nginx
26 | vars:
27 | server_name: theorangeone.net jakehoward.tech
28 | upstream: website-website-1.docker:8000
29 | ssl_cert_path: /etc/letsencrypt/live/theorangeone.net
30 | location_extra: |
31 | more_set_headers "Server: $upstream_http_server";
32 |
--------------------------------------------------------------------------------
/ansible/roles/yourls/files/docker-compose.yml:
--------------------------------------------------------------------------------
1 | services:
2 | yourls:
3 | image: yourls:apache
4 | restart: unless-stopped
5 | depends_on:
6 | - mariadb
7 | environment:
8 | - YOURLS_DB_PASS=yourls
9 | - YOURLS_DB_USER=yourls
10 | - YOURLS_DB_HOST=mariadb
11 | - YOURLS_USER=jake
12 | - YOURLS_PASS={{ yourls_password }}
13 | - YOURLS_SITE=https://0rng.one
14 | volumes:
15 | - ./index.html:/var/www/html/index.html:ro
16 | labels:
17 | - traefik.enable=true
18 | - traefik.http.routers.yourls.rule=Host(`0rng.one`)
19 | networks:
20 | - default
21 | - traefik
22 |
23 | mariadb:
24 | image: mariadb:10.10
25 | environment:
26 | - MYSQL_ROOT_PASSWORD=root
27 | - MYSQL_DATABASE=yourls
28 | - MYSQL_USER=yourls
29 | - MYSQL_PASSWORD=yourls
30 | volumes:
31 | - /mnt/speed/dbs/mariadb/yourls:/var/lib/mysql
32 | restart: unless-stopped
33 |
34 | networks:
35 | traefik:
36 | external: true
37 |
--------------------------------------------------------------------------------
/ansible/roles/yourls/files/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 | Redirecting to website...
8 |
9 |
10 |
--------------------------------------------------------------------------------
/ansible/roles/yourls/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: restart yourls
2 | shell:
3 | chdir: /opt/yourls
4 | cmd: "{{ docker_update_command }}"
5 |
--------------------------------------------------------------------------------
/ansible/roles/yourls/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Create install directory
2 | file:
3 | path: /opt/yourls
4 | state: directory
5 | owner: "{{ docker_user.name }}"
6 | mode: "{{ docker_compose_directory_mask }}"
7 |
8 | - name: Install compose file
9 | template:
10 | src: files/docker-compose.yml
11 | dest: /opt/yourls/docker-compose.yml
12 | mode: "{{ docker_compose_file_mask }}"
13 | owner: "{{ docker_user.name }}"
14 | validate: docker-compose -f %s config
15 | notify: restart yourls
16 |
17 | - name: Install redirect file
18 | template:
19 | src: files/index.html
20 | dest: /opt/yourls/index.html
21 | mode: "{{ docker_compose_file_mask }}"
22 | owner: "{{ docker_user.name }}"
23 | notify: restart yourls
24 |
--------------------------------------------------------------------------------
/ansible/roles/yourls/vars/main.yml:
--------------------------------------------------------------------------------
1 | yourls_password: !vault |
2 | $ANSIBLE_VAULT;1.1;AES256
3 | 33643039353165363833306237306638636438623138343263666562356638333539376166363366
4 | 3861353462663436303638636562313236303238346235390a333438303537313966656337306138
5 | 36636530613837333631323135356334343639653761656132333531616230326332663366663865
6 | 3730363965303264620a663566353232363364613264636534343462323239633938343033366133
7 | 38313162626432393732383635623364343934303234323935353130373566343436323437636334
8 | 61383663653636373931326437636262306639616335613865366630313537613333393337353639
9 | 34613464323964633265363534323435353834333539323763623537373064303439353566373266
10 | 31636336663734326530623434633334663962663634653861383465343861316463386136373936
11 | 62393662343637313239396462643938393132366536666638623266376566616639
12 |
--------------------------------------------------------------------------------
/ansible/roles/zfs/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # Cap ARC size to 50% RAM
2 | zfs_arc_size: "{{ (ansible_memtotal_mb * 1024 * 1024) * 0.5 }}"
3 |
4 | sanoid_datasets: {}
5 |
6 | sanoid_templates: {}
7 |
--------------------------------------------------------------------------------
/ansible/roles/zfs/files/sanoid.conf:
--------------------------------------------------------------------------------
1 | ######################################
2 | # This is a sample sanoid.conf file. #
3 | # It should go in /etc/sanoid. #
4 | ######################################
5 |
6 | {% for name, config in sanoid_datasets.items() %}
7 | [{{ name }}]
8 | {% for key, value in config.items() %}
9 | {{ key }} = {{ value | lower }}
10 | {% endfor %}
11 | {% endfor %}
12 |
13 | #############################
14 | # templates below this line #
15 | #############################
16 |
17 | {% for name, config in sanoid_templates.items() %}
18 | [template_{{ name }}]
19 | {% for key, value in config.items() %}
20 | {{ key }} = {{ value | lower }}
21 | {% endfor %}
22 | {% endfor %}
23 |
--------------------------------------------------------------------------------
/ansible/roles/zfs/files/zfs-modprobe.conf:
--------------------------------------------------------------------------------
1 | options zfs zfs_arc_max={{ zfs_arc_size }}
2 |
--------------------------------------------------------------------------------
/ansible/roles/zfs/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Set module options
2 | template:
3 | src: files/zfs-modprobe.conf
4 | dest: /etc/modprobe.d/zfs.conf
5 | mode: "0644"
6 |
7 | - name: ZFS Scrub
8 | cron:
9 | name: scrub {{ item }} ZFS pool
10 | hour: 2
11 | minute: 0
12 | weekday: 5
13 | job: zpool scrub {{ item }}
14 | loop: "{{ zpools_to_scrub }}"
15 |
16 | - name: Give user passwordless access to ZFS commands
17 | lineinfile:
18 | path: /etc/sudoers
19 | line: "{{ me.user }} ALL=(ALL) NOPASSWD: /usr/sbin/zfs,/usr/sbin/zpool"
20 |
21 | - name: Sanoid
22 | include_tasks: sanoid.yml
23 |
--------------------------------------------------------------------------------
/ansible/roles/zfs/tasks/sanoid.yml:
--------------------------------------------------------------------------------
1 | - name: Install dependencies for Arch
2 | package:
3 | name: "{{ item }}"
4 | loop:
5 | - perl
6 | - perl-capture-tiny
7 | - perl-config-inifiles
8 | - pv
9 | - lzop
10 | when: ansible_os_family == 'Archlinux'
11 |
12 | - name: Install dependencies for Debian-based distros
13 | package:
14 | name: "{{ item }}"
15 | loop:
16 | - libcapture-tiny-perl
17 | - libconfig-inifiles-perl
18 | - pv
19 | - lzop
20 | - mbuffer
21 | when: ansible_os_family == 'Debian'
22 |
23 | - name: Download
24 | git:
25 | repo: https://github.com/jimsalterjrs/sanoid.git
26 | dest: /opt/sanoid
27 | version: v2.1.0
28 |
29 | - name: Create config directory
30 | file:
31 | path: /etc/sanoid
32 | state: directory
33 | mode: "0755"
34 |
35 | - name: Install default config
36 | file:
37 | src: /opt/sanoid/sanoid.defaults.conf
38 | dest: /etc/sanoid/sanoid.defaults.conf
39 | state: link
40 |
41 | - name: Install executables
42 | file:
43 | src: /opt/sanoid/{{ item }}
44 | dest: /usr/sbin/{{ item }}
45 | state: link
46 | loop:
47 | - sanoid
48 | - syncoid
49 | - findoid
50 | - sleepymutex
51 |
52 | - name: Install config
53 | template:
54 | src: files/sanoid.conf
55 | dest: /etc/sanoid/sanoid.conf
56 | mode: "0755"
57 |
58 | - name: Install systemd services
59 | file:
60 | src: /opt/sanoid/packages/debian/{{ item }}
61 | dest: /lib/systemd/system/{{ item }}
62 | state: link
63 | loop: "{{ sanoid_services }}"
64 |
65 | - name: Enable systemd services
66 | systemd:
67 | name: "{{ item }}"
68 | enabled: true
69 | masked: false
70 | loop: "{{ sanoid_services }}"
71 |
72 | - name: Start sanoid timer
73 | systemd:
74 | name: sanoid.timer
75 | state: started
76 |
--------------------------------------------------------------------------------
/ansible/roles/zfs/vars/main.yml:
--------------------------------------------------------------------------------
1 | sanoid_services:
2 | - sanoid.timer
3 | - sanoid.service
4 | - sanoid-prune.service
5 |
--------------------------------------------------------------------------------
/ansible/vault-pass.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | bw get password infrastructure
4 |
--------------------------------------------------------------------------------
/justfile:
--------------------------------------------------------------------------------
1 |
2 | export PATH := justfile_directory() + "/env/bin:" + env_var("PATH")
3 |
4 | # Recipes
5 | @default:
6 | just --list
7 |
8 | ansible-setup: ansible-install ansible-galaxy-install
9 |
10 | ansible-install:
11 | python -m venv env
12 | pip install -r ansible/dev-requirements.txt
13 |
14 | ansible-galaxy-install: ansible-install
15 | cd ansible/ && ansible-galaxy install -r galaxy-requirements.yml --force
16 |
17 | @ansible-facts HOST:
18 | cd ansible/ && ansible -m setup {{ HOST }} --vault-password-file=vault-pass.sh
19 |
20 | # Run terraform with required environment
21 | terraform +ARGS:
22 | #!/usr/bin/env bash
23 | cd terraform/
24 |
25 | # Load secrets from env file (if it exists)
26 | set -a
27 | source ./.env || true
28 | set +a
29 |
30 | terraform {{ ARGS }}
31 |
32 | # Download secrets
33 | update-secrets:
34 | bw sync
35 | cd terraform/ && bw get attachment .env --itemid c4f8b44e-ae62-442d-a9e0-02d0621c2454
36 |
37 | ansible-deploy *ARGS: ansible-galaxy-install
38 | cd ansible/ && ansible-playbook main.yml --vault-password-file=vault-pass.sh -K --diff {{ ARGS }}
39 |
40 | ansible-deploy-fast *ARGS:
41 | cd ansible/ && ansible-playbook main.yml --vault-password-file=vault-pass.sh -K --diff {{ ARGS }}
42 |
43 | ansible-vault ACTION *ARGS:
44 | cd ansible/ && ansible-vault {{ ACTION }} --vault-password-file=vault-pass.sh {{ ARGS }}
45 |
46 | terraform-lint:
47 | just terraform validate
48 | just terraform fmt -check -recursive
49 |
50 | yamllint:
51 | yamllint -s .
52 |
53 | ansible-lint: yamllint
54 | #!/usr/bin/env bash
55 | cd ansible/
56 |
57 | ansible-lint -p
58 | ansible-playbook main.yml --syntax-check
59 |
60 | lint: terraform-lint ansible-lint
61 |
--------------------------------------------------------------------------------
/renovate.json:
--------------------------------------------------------------------------------
1 | {
2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json",
3 | "extends": [
4 | "config:base"
5 | ],
6 | "prHourlyLimit": 0,
7 | "prConcurrentLimit": 0,
8 | "ansible-galaxy": {
9 | "fileMatch": [
10 | "(^|/)ansible/galaxy-requirements\\.ya?ml$"
11 | ]
12 | }
13 | }
14 |
--------------------------------------------------------------------------------
/terraform/0rng.one.tf:
--------------------------------------------------------------------------------
1 | resource "gandi_livedns_domain" "orngone" {
2 | name = "0rng.one"
3 | }
4 |
5 | resource "gandi_livedns_record" "orngone_apex" {
6 | zone = gandi_livedns_domain.orngone.id
7 | name = "@"
8 | type = "ALIAS" # Gandi doesn't support CNAME-flattening
9 | ttl = 3600
10 | values = [
11 | "${gandi_livedns_record.sys_domain_pve.name}.${gandi_livedns_record.sys_domain_pve.zone}."
12 | ]
13 | }
14 |
15 | resource "gandi_livedns_record" "orngone_caa" {
16 | zone = gandi_livedns_domain.orngone.id
17 | name = "@"
18 | type = "CAA"
19 | ttl = 3600
20 | values = [
21 | "0 issue \"letsencrypt.org\""
22 | ]
23 | }
24 |
25 |
26 | resource "gandi_livedns_record" "orngone_who" {
27 | zone = gandi_livedns_domain.orngone.id
28 | name = "who"
29 | type = "CNAME"
30 | ttl = 3600
31 | values = [
32 | "${gandi_livedns_record.sys_domain_pve.name}.${gandi_livedns_record.sys_domain_pve.zone}."
33 | ]
34 | }
35 |
--------------------------------------------------------------------------------
/terraform/backends.tf:
--------------------------------------------------------------------------------
1 | terraform {
2 | backend "s3" {
3 | bucket = "0rng-terraform"
4 | key = "infrastructure/terraform.tfstate"
5 | encrypt = true
6 | region = "eu-west-2"
7 | }
8 | }
9 |
--------------------------------------------------------------------------------
/terraform/context.tf:
--------------------------------------------------------------------------------
1 | resource "local_file" "hosts" {
2 | content = yamlencode({
3 | vps_hosts : {
4 | casey_ip : linode_instance.casey.ip_address,
5 | private_ipv6_marker : local.private_ipv6_marker,
6 | private_ipv6_range : local.private_ipv6_range,
7 | walker_ip : hcloud_server.walker.ipv4_address,
8 | }
9 | })
10 | filename = "${path.module}/../ansible/group_vars/all/vps-hosts.yml"
11 | }
12 |
--------------------------------------------------------------------------------
/terraform/hetzner_firewall.tf:
--------------------------------------------------------------------------------
1 | resource "hcloud_firewall" "base" {
2 | name = "base"
3 |
4 | rule {
5 | direction = "in"
6 | protocol = "icmp"
7 | source_ips = [
8 | "0.0.0.0/0",
9 | "::/0"
10 | ]
11 | }
12 | }
13 |
14 | resource "hcloud_firewall" "tailscale" {
15 | name = "tailscale"
16 |
17 | rule {
18 | direction = "in"
19 | protocol = "udp"
20 | port = "41641"
21 | source_ips = [
22 | "0.0.0.0/0",
23 | "::/0"
24 | ]
25 | }
26 | }
27 |
28 | resource "hcloud_firewall" "web" {
29 | name = "web"
30 |
31 | # HTTP
32 | rule {
33 | direction = "in"
34 | protocol = "tcp"
35 | port = "80"
36 | source_ips = [
37 | "0.0.0.0/0",
38 | "::/0"
39 | ]
40 | }
41 |
42 | # HTTPS
43 | rule {
44 | direction = "in"
45 | protocol = "tcp"
46 | port = "443"
47 | source_ips = [
48 | "0.0.0.0/0",
49 | "::/0"
50 | ]
51 | }
52 | }
53 |
--------------------------------------------------------------------------------
/terraform/providers.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = "eu-west-2"
3 | }
4 |
5 | provider "linode" {
6 | token = var.linode_personal_access_token
7 | }
8 |
9 | provider "gandi" {
10 | personal_access_token = var.gandi_personal_access_token
11 | }
12 |
13 | provider "b2" {
14 | application_key = var.backblaze_application_key
15 | application_key_id = var.backblaze_application_key_id
16 | }
17 |
18 | provider "hcloud" {
19 | token = var.hetzner_token
20 | }
21 |
--------------------------------------------------------------------------------
/terraform/rclone.tf:
--------------------------------------------------------------------------------
1 | resource "aws_iam_user" "rclone" {
2 | name = "rclone"
3 | }
4 |
5 | resource "aws_iam_user_policy" "read-terraform-state" {
6 | name = "read-terraform-state"
7 | user = aws_iam_user.rclone.name
8 |
9 | policy = <