├── ansible ├── .python-version ├── ansible.cfg ├── README.md ├── vars │ ├── setup.yml │ ├── proxmox-backup-cifs.yml │ └── media-server.yml ├── ee │ ├── requirements.txt │ ├── requirements.yml │ ├── custom_entrypoint.sh │ └── execution-environment.yml ├── inventory │ ├── host_vars │ │ ├── idp01.net.dbren.uk.yml │ │ ├── apps01.net.dbren.uk.yml │ │ ├── backup01.net.dbren.uk.yml │ │ ├── media01.net.dbren.uk.yml │ │ └── homeops.hetzner.dbren.uk.yml │ ├── group_vars │ │ ├── proxmox.yml │ │ ├── subnet_router.yml │ │ ├── docker.yml │ │ ├── tailscale.yml │ │ └── all.yml │ └── inventory.yml ├── playbooks │ ├── playbook-beszel-agent.yml │ ├── playbook-docker-prune.yml │ ├── playbook-tailscale.yml │ ├── playbook-proxmox-backup-cifs.yml │ ├── playbook-setup.yml │ ├── playbook-proxmox-storage.yml │ ├── playbook-media-server.yml │ └── playbook-awx.yml ├── pyproject.toml ├── templates │ └── setup │ │ └── ssh-discord-notify.sh.j2 └── ansible-navigator.yml ├── docs ├── CNAME ├── assets │ └── images │ │ ├── Beszel.png │ │ ├── Talos.png │ │ ├── OMVSMBShare.png │ │ ├── OMVFileSystems.png │ │ ├── TalosDeployed.png │ │ └── OMVSharedFolders.png ├── miscellaneous │ ├── python-uv.md │ └── nuc-app-config.md ├── infrastructure │ ├── dns.md │ ├── hetzner.md │ ├── secrets-management.md │ ├── backblaze.md │ ├── tailscale.md │ ├── beszel.md │ ├── raspberrypi3.md │ ├── opentofu.md │ ├── kubernetes │ │ ├── flux.md │ │ └── talos.md │ ├── media.md │ └── proxmox │ │ ├── backup.md │ │ └── ve.md ├── docker-compose.md ├── index.md └── ansible │ ├── awx.md │ ├── execution-environment.md │ └── minecraft.md ├── docker ├── beszel │ ├── .env │ ├── config │ │ └── serve.json │ └── docker-compose.yml ├── gatus │ ├── .env │ ├── config │ │ ├── serve.json │ │ └── config.yaml │ └── docker-compose.yml ├── caddy │ ├── .env │ ├── conf │ │ ├── Caddyfile │ │ └── Caddyfile.media01.net.dbren.uk │ └── docker-compose.yml ├── your-spotify │ ├── .env │ └── docker-compose.yml ├── pocket-id │ ├── .env │ └── docker-compose.yml ├── paperless-ngx │ ├── .env │ └── docker-compose.yml └── stirling-pdf │ └── docker-compose.yml ├── kubernetes ├── talos │ ├── clusterconfig │ │ └── .gitignore │ ├── talenv.sops.yaml │ ├── talconfig.yaml │ └── talsecret.sops.yaml ├── .sops.yaml ├── apps │ └── metallb-system │ │ ├── metallb │ │ ├── app │ │ │ ├── l2advertisement.yaml │ │ │ ├── ipaddresspool.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── helmrelease.yaml │ │ │ └── ocirepository.yaml │ │ └── ks.yaml │ │ ├── namespace.yaml │ │ └── kustomization.yaml └── flux │ └── flux-system │ ├── kustomization.yaml │ ├── gotk-sync.yaml │ └── ks.yaml ├── .gitignore ├── terraform ├── .env ├── modules │ ├── proxmox_lxc │ │ ├── README.md │ │ ├── LICENSE │ │ ├── variables.tf │ │ └── main.tf │ ├── proxmox_vm │ │ ├── README.md │ │ ├── LICENSE │ │ ├── variables.tf │ │ └── main.tf │ └── proxmox_cloud_init_config │ │ ├── README.md │ │ ├── variables.tf │ │ ├── LICENSE │ │ └── main.tf ├── op.tf ├── variables.tf ├── main.tf ├── hetzner.tf └── proxmox.tf ├── .ansible-lint ├── .github ├── workflows │ ├── ansible-lint.yml │ ├── cleanup-ee.yml │ ├── gh-pages.yml │ ├── tflint.yml │ └── build-ee.yml └── renovate.json ├── .devcontainer ├── devcontainer.json └── Dockerfile ├── nextdns-config.yml ├── LICENSE ├── Taskfile.yml ├── mkdocs.yml └── README.md /ansible/.python-version: -------------------------------------------------------------------------------- 1 | 3.14 2 | -------------------------------------------------------------------------------- /docs/CNAME: -------------------------------------------------------------------------------- 1 | homeops.dbren.uk 2 | -------------------------------------------------------------------------------- /docker/beszel/.env: -------------------------------------------------------------------------------- 1 | TS_AUTHKEY=op://Home-Ops/eiktj776yov7lf32copxlnkjcq/password 2 | -------------------------------------------------------------------------------- /docker/gatus/.env: -------------------------------------------------------------------------------- 1 | TS_AUTHKEY=op://Home-Ops/22xur5gm5fmibqhonmobjbsjbe/password 2 | -------------------------------------------------------------------------------- /kubernetes/talos/clusterconfig/.gitignore: -------------------------------------------------------------------------------- 1 | home-ops-talos01.yaml 2 | talosconfig 3 | -------------------------------------------------------------------------------- /docs/assets/images/Beszel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dbrennand/home-ops/HEAD/docs/assets/images/Beszel.png -------------------------------------------------------------------------------- /docs/assets/images/Talos.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dbrennand/home-ops/HEAD/docs/assets/images/Talos.png -------------------------------------------------------------------------------- /docs/assets/images/OMVSMBShare.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dbrennand/home-ops/HEAD/docs/assets/images/OMVSMBShare.png -------------------------------------------------------------------------------- /docs/assets/images/OMVFileSystems.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dbrennand/home-ops/HEAD/docs/assets/images/OMVFileSystems.png -------------------------------------------------------------------------------- /docs/assets/images/TalosDeployed.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dbrennand/home-ops/HEAD/docs/assets/images/TalosDeployed.png -------------------------------------------------------------------------------- /docs/assets/images/OMVSharedFolders.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/dbrennand/home-ops/HEAD/docs/assets/images/OMVSharedFolders.png -------------------------------------------------------------------------------- /docker/caddy/.env: -------------------------------------------------------------------------------- 1 | ACME_EMAIL=op://Home-Ops/Caddy/ACME_EMAIL 2 | CF_API_TOKEN=op://Home-Ops/Caddy/CF_API_TOKEN 3 | DOMAIN=net.dbren.uk 4 | -------------------------------------------------------------------------------- /kubernetes/.sops.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | creation_rules: 3 | - age: >- 4 | age1csxr93np9ynejzvt8jjjau97s29mayy447vlsf4mu9srmpjmr3uq56n27k 5 | -------------------------------------------------------------------------------- /ansible/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | inventory = inventory/inventory.yml 3 | host_key_checking = False 4 | interpreter_python = auto_silent 5 | -------------------------------------------------------------------------------- /ansible/README.md: -------------------------------------------------------------------------------- 1 | # Ansible 2 | 3 | The Ansible content in this repository are used to configure my servers and deploy applications in my Homelab. 4 | -------------------------------------------------------------------------------- /docker/your-spotify/.env: -------------------------------------------------------------------------------- 1 | SPOTIFY_PUBLIC=op://Home-Ops/czbguufu4gifomyvbj7bnp62lm/username 2 | SPOTIFY_SECRET=op://Home-Ops/czbguufu4gifomyvbj7bnp62lm/password 3 | -------------------------------------------------------------------------------- /docker/caddy/conf/Caddyfile: -------------------------------------------------------------------------------- 1 | { 2 | email {env.ACME_EMAIL} 3 | } 4 | 5 | (cloudflare) { 6 | tls { 7 | dns cloudflare {env.CF_API_TOKEN} 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .vscode 2 | .terraform* 3 | *.tfstate* 4 | terraform.tfvars 5 | .DS_Store 6 | !docker/**/.env 7 | .venv 8 | .ansible/ 9 | ansible/ee/context 10 | ansible-navigator.log 11 | -------------------------------------------------------------------------------- /kubernetes/apps/metallb-system/metallb/app/l2advertisement.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: metallb.io/v1beta1 3 | kind: L2Advertisement 4 | metadata: 5 | name: main 6 | namespace: metallb-system 7 | -------------------------------------------------------------------------------- /kubernetes/flux/flux-system/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - gotk-components.yaml 5 | - gotk-sync.yaml 6 | - ks.yaml 7 | -------------------------------------------------------------------------------- /kubernetes/apps/metallb-system/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: metallb-system 6 | annotations: 7 | kustomize.toolkit.fluxcd.io/prune: disabled 8 | -------------------------------------------------------------------------------- /ansible/vars/setup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for playbook-setup.yml 3 | 4 | # List of packages to install 5 | packages: 6 | - tar 7 | - unzip 8 | - sudo 9 | - ncdu 10 | - btop 11 | - vim 12 | - tmux 13 | -------------------------------------------------------------------------------- /docker/pocket-id/.env: -------------------------------------------------------------------------------- 1 | # See the documentation for more information: https://pocket-id.org/docs/configuration/environment-variables 2 | APP_URL=https://idp.net.dbren.uk 3 | TRUST_PROXY=true 4 | ANALYTICS_DISABLED=true 5 | PUID=1000 6 | PGID=1000 7 | -------------------------------------------------------------------------------- /ansible/ee/requirements.txt: -------------------------------------------------------------------------------- 1 | certifi==2025.11.12 2 | cffi==2.0.0 3 | charset-normalizer==3.4.4 4 | idna==3.11 5 | jmespath==1.0.1 6 | passlib==1.7.4 7 | pycparser==2.23 ; implementation_name != 'PyPy' 8 | requests==2.32.5 9 | urllib3==2.6.2 10 | -------------------------------------------------------------------------------- /kubernetes/apps/metallb-system/metallb/app/ipaddresspool.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: metallb.io/v1beta1 3 | kind: IPAddressPool 4 | metadata: 5 | name: main 6 | namespace: metallb-system 7 | spec: 8 | addresses: 9 | - 192.168.0.21-192.168.0.49 10 | -------------------------------------------------------------------------------- /kubernetes/apps/metallb-system/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # yaml-language-server: $schema=https://json.schemastore.org/kustomization 3 | apiVersion: kustomize.config.k8s.io/v1beta1 4 | kind: Kustomization 5 | namespace: metallb-system 6 | resources: 7 | - ./metallb/ks.yaml 8 | -------------------------------------------------------------------------------- /ansible/inventory/host_vars/idp01.net.dbren.uk.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # idp01.net.dbren.uk host_vars 3 | # artis3n.tailscale collection machine role vars 4 | # https://github.com/artis3n/ansible-collection-tailscale/blob/main/roles/machine/README.md#variables 5 | tailscale_args: --accept-dns=false --ssh 6 | -------------------------------------------------------------------------------- /ansible/inventory/group_vars/proxmox.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # proxmox group_vars 3 | ansible_user: root 4 | # artis3n.tailscale collection machine role vars 5 | # https://github.com/artis3n/ansible-collection-tailscale/blob/main/roles/machine/README.md#variables 6 | tailscale_args: --accept-dns=false --ssh 7 | -------------------------------------------------------------------------------- /ansible/inventory/host_vars/apps01.net.dbren.uk.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # apps01.net.dbren.uk host_vars 3 | # artis3n.tailscale collection machine role vars 4 | # https://github.com/artis3n/ansible-collection-tailscale/blob/main/roles/machine/README.md#variables 5 | tailscale_args: --accept-dns=false --ssh 6 | -------------------------------------------------------------------------------- /ansible/playbooks/playbook-beszel-agent.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Deploy Beszel Binary Agent 3 | hosts: all 4 | roles: 5 | - role: community.beszel.agent 6 | vars: 7 | agent_public_key: "{{ lookup('community.general.onepassword', 'Beszel', field='PUBLIC_KEY', vault='Home-Ops') }}" 8 | -------------------------------------------------------------------------------- /ansible/inventory/host_vars/backup01.net.dbren.uk.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # backup01.net.dbren.uk host_vars 3 | ansible_user: root 4 | # artis3n.tailscale collection machine role vars 5 | # https://github.com/artis3n/ansible-collection-tailscale/blob/main/roles/machine/README.md#variables 6 | tailscale_args: --accept-dns=false --ssh 7 | -------------------------------------------------------------------------------- /ansible/inventory/host_vars/media01.net.dbren.uk.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # media01.net.dbren.uk host_vars 3 | ansible_user: root 4 | # artis3n.tailscale collection machine role vars 5 | # https://github.com/artis3n/ansible-collection-tailscale/blob/main/roles/machine/README.md#variables 6 | tailscale_args: --accept-dns=false --ssh 7 | -------------------------------------------------------------------------------- /docker/caddy/conf/Caddyfile.media01.net.dbren.uk: -------------------------------------------------------------------------------- 1 | { 2 | email {env.ACME_EMAIL} 3 | } 4 | 5 | (cloudflare) { 6 | tls { 7 | dns cloudflare {env.CF_API_TOKEN} 8 | } 9 | } 10 | # OpenMediaVault 11 | media01.{env.DOMAIN} { 12 | import cloudflare 13 | reverse_proxy host.docker.internal:1080 14 | } 15 | -------------------------------------------------------------------------------- /kubernetes/apps/metallb-system/metallb/app/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # yaml-language-server: $schema=https://json.schemastore.org/kustomization 3 | apiVersion: kustomize.config.k8s.io/v1beta1 4 | kind: Kustomization 5 | resources: 6 | - ./ocirepository.yaml 7 | - ./helmrelease.yaml 8 | - ./ipaddresspool.yaml 9 | - ./l2advertisement.yaml 10 | -------------------------------------------------------------------------------- /terraform/.env: -------------------------------------------------------------------------------- 1 | # OpenTofu variables 2 | TF_VAR_op_sa_token=op://Home-Ops/soyelbgbxvtfeiwch7vkbgrorq/credential 3 | HCLOUD_TOKEN=op://Home-Ops/62ykvsuohbajneji4bfnvhhouy/token 4 | AWS_ACCESS_KEY_ID=op://Home-Ops/TerraformState/username 5 | AWS_SECRET_ACCESS_KEY=op://Home-Ops/TerraformState/password 6 | AWS_SSE_CUSTOMER_KEY=op://Home-Ops/TerraformState/sse_customer_key 7 | -------------------------------------------------------------------------------- /docs/miscellaneous/python-uv.md: -------------------------------------------------------------------------------- 1 | # :simple-python: Python 2 | 3 | [uv](https://docs.astral.sh/uv/) is used to to manage Python installations and virtual environments on my M1 Pro Max MacBook Pro. 4 | 5 | Install `uv` via [homebrew](https://brew.sh/): 6 | 7 | ```bash 8 | brew install uv 9 | ``` 10 | 11 | Then, install Python using `uv`: 12 | 13 | ```bash 14 | uv python install 3.13 15 | ``` 16 | -------------------------------------------------------------------------------- /ansible/playbooks/playbook-docker-prune.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Docker | Prune System 3 | hosts: docker 4 | tasks: 5 | - name: Prune all objects (including non-dangling images) 6 | community.docker.docker_prune: 7 | containers: true 8 | images: true 9 | images_filters: 10 | dangling: false 11 | networks: true 12 | volumes: true 13 | builder_cache: true 14 | -------------------------------------------------------------------------------- /kubernetes/apps/metallb-system/metallb/app/helmrelease.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/helm.toolkit.fluxcd.io/helmrelease_v2.json 3 | apiVersion: helm.toolkit.fluxcd.io/v2 4 | kind: HelmRelease 5 | metadata: 6 | name: metallb 7 | spec: 8 | chartRef: 9 | kind: OCIRepository 10 | name: metallb 11 | interval: 1h 12 | values: 13 | crds: 14 | enabled: true 15 | -------------------------------------------------------------------------------- /docker/gatus/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:8080" 12 | } 13 | } 14 | } 15 | }, 16 | "AllowFunnel": { 17 | "${TS_CERT_DOMAIN}:443": true 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /docker/beszel/config/serve.json: -------------------------------------------------------------------------------- 1 | { 2 | "TCP": { 3 | "443": { 4 | "HTTPS": true 5 | } 6 | }, 7 | "Web": { 8 | "${TS_CERT_DOMAIN}:443": { 9 | "Handlers": { 10 | "/": { 11 | "Proxy": "http://127.0.0.1:8090" 12 | } 13 | } 14 | } 15 | }, 16 | "AllowFunnel": { 17 | "${TS_CERT_DOMAIN}:443": false 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /ansible/vars/proxmox-backup-cifs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for playbook-proxmox-backup-cifs.yml 3 | cifs_share: "{{ lookup('community.general.onepassword', 'Hetzner StorageBox', field='CIFS_SHARE', vault='Home-Ops') }}" 4 | mountpoint: /mnt/storagebox 5 | smb_username: "{{ lookup('community.general.onepassword', 'Hetzner StorageBox', field='username', vault='Home-Ops') }}" 6 | smb_password: "{{ lookup('community.general.onepassword', 'Hetzner StorageBox', vault='Home-Ops') }}" 7 | -------------------------------------------------------------------------------- /ansible/inventory/host_vars/homeops.hetzner.dbren.uk.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # homeops.hetzner.dbren.uk host_vars 3 | # artis3n.tailscale collection machine role vars 4 | # https://github.com/artis3n/ansible-collection-tailscale/blob/main/roles/machine/README.md#variables 5 | tailscale_authkey: "{{ lookup('community.general.onepassword', 'Tailscale OAuth External Tag', vault='Home-Ops') }}" 6 | tailscale_args: --ssh --advertise-exit-node --accept-routes 7 | tailscale_tags: 8 | - external 9 | -------------------------------------------------------------------------------- /ansible/inventory/group_vars/subnet_router.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # subnet_router group vars 3 | # artis3n.tailscale collection machine role vars 4 | # https://github.com/artis3n/ansible-collection-tailscale/blob/main/roles/machine/README.md#variables 5 | tailscale_args: >- 6 | --ssh --advertise-exit-node --accept-dns=false 7 | --advertise-routes=192.168.0.2/32,192.168.0.3/32,192.168.0.4/32,192.168.0.5/32,192.168.0.6/32,192.168.0.7/32,192.168.0.8/32,192.168.0.9/32,192.168.0.10/32,192.168.0.11/32,192.168.0.12/32 8 | -------------------------------------------------------------------------------- /docker/paperless-ngx/.env: -------------------------------------------------------------------------------- 1 | PG_PASS=op://Home-Ops/Paperless-ngx/PG_PASS 2 | PAPERLESS_URL=https://paperless.net.dbren.uk 3 | PAPERLESS_SECRET_KEY=op://Home-Ops/Paperless-ngx/PAPERLESS_SECRET_KEY 4 | PAPERLESS_ADMIN_USER=op://Home-Ops/Paperless-ngx/PAPERLESS_ADMIN_USER 5 | PAPERLESS_ADMIN_PASSWORD=op://Home-Ops/Paperless-ngx/PAPERLESS_ADMIN_PASSWORD 6 | PAPERLESS_SOCIALACCOUNT_PROVIDERS=op://Home-Ops/Paperless-ngx/PAPERLESS_SOCIALACCOUNT_PROVIDERS 7 | PAPERLESS_TIME_ZONE=Europe/London 8 | PAPERLESS_OCR_LANGUAGE=eng 9 | -------------------------------------------------------------------------------- /kubernetes/apps/metallb-system/metallb/app/ocirepository.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/source.toolkit.fluxcd.io/ocirepository_v1.json 3 | apiVersion: source.toolkit.fluxcd.io/v1 4 | kind: OCIRepository 5 | metadata: 6 | name: metallb 7 | spec: 8 | interval: 15m 9 | layerSelector: 10 | mediaType: application/vnd.cncf.helm.chart.content.v1.tar+gzip 11 | operation: copy 12 | ref: 13 | tag: v1.19.2 14 | url: oci://quay.io/jetstack/charts/cert-manager 15 | -------------------------------------------------------------------------------- /.ansible-lint: -------------------------------------------------------------------------------- 1 | --- 2 | # .ansible-lint 3 | # https://ansible-lint.readthedocs.io/en/latest/configuring/#configuration-file 4 | # exclude_paths included in this file are parsed relative to this file's location 5 | # and not relative to the CWD of execution. CLI arguments passed to the --exclude 6 | # option will be parsed relative to the CWD of execution. 7 | exclude_paths: 8 | - .ansible/ 9 | - .github/ 10 | - .git/ 11 | - mkdocs.yml 12 | - docker/ 13 | - kubernetes/ 14 | skip_list: 15 | - no-relative-paths 16 | -------------------------------------------------------------------------------- /.github/workflows/ansible-lint.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Ansible Lint 3 | on: 4 | push: 5 | paths: 6 | - ansible/** 7 | 8 | jobs: 9 | ansible-lint: 10 | name: Ansible Lint 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v6 14 | - name: Install 1Password CLI 15 | uses: 1password/install-cli-action@v2 16 | - name: Run ansible-lint 17 | uses: ansible/ansible-lint@v25.12.1 18 | with: 19 | requirements_file: ansible/ee/requirements.yml 20 | -------------------------------------------------------------------------------- /kubernetes/apps/metallb-system/metallb/ks.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/kustomize.toolkit.fluxcd.io/kustomization_v1.json 3 | apiVersion: kustomize.toolkit.fluxcd.io/v1 4 | kind: Kustomization 5 | metadata: 6 | name: metallb 7 | spec: 8 | interval: 1h 9 | path: ./kubernetes/apps/metallb-system/metallb/app 10 | prune: true 11 | sourceRef: 12 | kind: GitRepository 13 | name: flux-system 14 | namespace: flux-system 15 | targetNamespace: metallb-system 16 | -------------------------------------------------------------------------------- /terraform/modules/proxmox_lxc/README.md: -------------------------------------------------------------------------------- 1 | # OpenTofu Module - Proxmox LXC (Deprecated) 2 | 3 | This module creates a Proxmox LXC based on the provided parameters. 4 | 5 | ## Usage 6 | 7 | 1. Initialise the module: 8 | 9 | ```bash 10 | tofu init 11 | ``` 12 | 13 | 2. Call the module from a `.tf` file: 14 | 15 | ```hcl 16 | module "proxmox_lxc_hostname" { 17 | source = "./modules/proxmox_lxc" 18 | # Variables 19 | ... 20 | } 21 | ``` 22 | 23 | ## License 24 | 25 | See [LICENSE](LICENSE). 26 | -------------------------------------------------------------------------------- /ansible/inventory/group_vars/docker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # docker group_vars 3 | # geerlingguy.pip role vars 4 | # https://github.com/geerlingguy/ansible-role-pip#role-variables 5 | pip_install_packages: 6 | - name: docker 7 | version: "7.1.0" 8 | 9 | # geerlingguy.docker role vars 10 | # https://github.com/geerlingguy/ansible-role-docker#role-variables 11 | docker_users: 12 | - "{{ ansible_user }}" 13 | docker_daemon_options: 14 | log-level: "info" 15 | log-driver: "json-file" 16 | log-opts: 17 | max-size: "10m" 18 | max-file: "3" 19 | -------------------------------------------------------------------------------- /terraform/modules/proxmox_vm/README.md: -------------------------------------------------------------------------------- 1 | # OpenTofu Module - Proxmox Virtual Machine (Deprecated) 2 | 3 | This module creates a Proxmox Virtual Machine (VM) based on the provided parameters. 4 | 5 | ## Usage 6 | 7 | 1. Initialise the module: 8 | 9 | ```bash 10 | tofu init 11 | ``` 12 | 13 | 2. Call the module from a `.tf` file: 14 | 15 | ```hcl 16 | module "proxmox_vm_hostname" { 17 | source = "./modules/proxmox_vm" 18 | # Variables 19 | ... 20 | } 21 | ``` 22 | 23 | ## License 24 | 25 | See [LICENSE](LICENSE). 26 | -------------------------------------------------------------------------------- /ansible/inventory/group_vars/tailscale.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tailscale group_vars 3 | # artis3n.tailscale collection machine role vars 4 | # https://github.com/artis3n/ansible-collection-tailscale/blob/main/roles/machine/README.md#variables 5 | # Generated via: https://login.tailscale.com/admin/settings/oauth 6 | tailscale_authkey: "{{ lookup('community.general.onepassword', 'Tailscale OAuth Home-Ops Tag', vault='Home-Ops') }}" 7 | tailscale_oauth_ephemeral: false 8 | tailscale_oauth_preauthorized: false 9 | tailscale_args: --ssh 10 | tailscale_tags: 11 | - home-ops 12 | state: latest 13 | -------------------------------------------------------------------------------- /terraform/op.tf: -------------------------------------------------------------------------------- 1 | # https://registry.terraform.io/providers/1Password/onepassword/latest/docs 2 | provider "onepassword" { 3 | service_account_token = var.op_sa_token 4 | } 5 | 6 | # https://registry.terraform.io/providers/1Password/onepassword/latest/docs/data-sources/item 7 | data "onepassword_item" "proxmox_virtual_environment" { 8 | vault = var.op_proxmox_virtual_environment_vault_name 9 | title = var.op_proxmox_virtual_environment_item_name 10 | } 11 | 12 | data "onepassword_item" "ssh_key" { 13 | vault = var.op_ssh_vault_name 14 | title = var.op_ssh_key_name 15 | } 16 | -------------------------------------------------------------------------------- /terraform/modules/proxmox_cloud_init_config/README.md: -------------------------------------------------------------------------------- 1 | # OpenTofu Module - Proxmox Cloud Init Configuration 2 | 3 | Create a Proxmox Cloud Init Configuration file for a Virtual Machine. 4 | 5 | ## Usage 6 | 7 | 1. Initialise the module: 8 | 9 | ```bash 10 | tofu init 11 | ``` 12 | 13 | 2. Call the module from a `.tf` file: 14 | 15 | ```hcl 16 | module "proxmox_cloud_init_config" { 17 | source = "./modules/proxmox_cloud_init_config" 18 | # Variables 19 | ... 20 | } 21 | ``` 22 | 23 | ## License 24 | 25 | See [LICENSE](LICENSE). 26 | -------------------------------------------------------------------------------- /.github/workflows/cleanup-ee.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Clean up Execution Environments 3 | on: 4 | workflow_dispatch: # Allows manual execution 5 | schedule: 6 | # At 00:00 on day-of-month 1 7 | - cron: "0 0 1 * *" 8 | 9 | jobs: 10 | cleanup: 11 | runs-on: ubuntu-latest 12 | permissions: 13 | packages: write 14 | steps: 15 | - name: Delete Container Images older than the 10 latest versions 16 | uses: actions/delete-package-versions@v5 17 | with: 18 | package-name: home-ops 19 | package-type: container 20 | min-versions-to-keep: 10 21 | -------------------------------------------------------------------------------- /ansible/pyproject.toml: -------------------------------------------------------------------------------- 1 | [project] 2 | name = "ansible" 3 | version = "0.1.0" 4 | description = "Home-Ops Ansible Content." 5 | readme = "README.md" 6 | requires-python = ">=3.14" 7 | dependencies = [ 8 | "jmespath==1.0.1", 9 | "passlib==1.7.4", 10 | "charset-normalizer==3.4.4", 11 | "requests==2.32.5", 12 | "urllib3==2.6.2", 13 | "certifi==2025.11.12", 14 | "idna==3.11", 15 | "cffi==2.0.0", 16 | "pycparser==2.23 ; implementation_name != 'PyPy'", 17 | ] 18 | 19 | [dependency-groups] 20 | dev = [ 21 | "ansible-core==2.20.1", 22 | "ansible-lint==25.12.1" 23 | ] 24 | docs = [ 25 | "mkdocs-material>=9.6.21", 26 | ] 27 | -------------------------------------------------------------------------------- /docs/infrastructure/dns.md: -------------------------------------------------------------------------------- 1 | # :material-dns-outline: DNS 2 | 3 | [NextDNS](https://nextdns.io/) is the upstream DNS provider in my Homelab. 4 | 5 | !!! quote "What is NextDNS?" 6 | 7 | NextDNS protects you from all kinds of security threats, blocks ads and trackers on websites and in apps. 8 | 9 | ## Rewrites (A Records) 10 | 11 | NextDNS has a rewrites feature which allows me to create DNS A records for my Homelab. I manage these rewrite records via the NextDNS REST API using a script I created called [NextDNS-Rewrites](https://github.com/dbrennand/NextDNS-Rewrites). My configuration file is located [here](https://github.com/dbrennand/home-ops/blob/main/nextdns-config.yml). 12 | -------------------------------------------------------------------------------- /kubernetes/flux/flux-system/gotk-sync.yaml: -------------------------------------------------------------------------------- 1 | # This manifest was generated by flux. DO NOT EDIT. 2 | --- 3 | apiVersion: source.toolkit.fluxcd.io/v1 4 | kind: GitRepository 5 | metadata: 6 | name: flux-system 7 | namespace: flux-system 8 | spec: 9 | interval: 1m0s 10 | ref: 11 | branch: main 12 | secretRef: 13 | name: flux-system 14 | url: https://github.com/dbrennand/home-ops.git 15 | --- 16 | apiVersion: kustomize.toolkit.fluxcd.io/v1 17 | kind: Kustomization 18 | metadata: 19 | name: flux-system 20 | namespace: flux-system 21 | spec: 22 | interval: 10m0s 23 | path: ./kubernetes/flux 24 | prune: true 25 | sourceRef: 26 | kind: GitRepository 27 | name: flux-system 28 | -------------------------------------------------------------------------------- /ansible/templates/setup/ssh-discord-notify.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Author: Daniel Brennand 3 | # License: MIT 4 | # This script sends an SSH login notification using a Discord webhook. 5 | # Requirements: 6 | # - curl 7 | 8 | DISCORD_WEBHOOK="{{ discord_webhook }}" 9 | HOSTNAME="$(hostname)" 10 | 11 | if [[ "${PAM_TYPE}" == "open_session" ]]; then 12 | MESSAGE="⚠️ ${HOSTNAME}: ${PAM_USER} logged in." 13 | elif [[ "${PAM_TYPE}" == "close_session" ]]; then 14 | MESSAGE="${HOSTNAME}: ${PAM_USER} logged out." 15 | fi 16 | 17 | # Send a request only if MESSAGE is populated 18 | if [[ ! -z "${MESSAGE}" ]]; then 19 | curl -X POST -H "Content-Type: application/json" -d "{\"content\": \"${MESSAGE}\"}" "${DISCORD_WEBHOOK}" 20 | fi 21 | -------------------------------------------------------------------------------- /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "build": { 3 | "dockerfile": "Dockerfile" 4 | }, 5 | "updateRemoteUserUID": true, 6 | "mounts": [ 7 | "source=/run/host-services/ssh-auth.sock,target=/run/host-services/ssh-auth.sock,type=bind" 8 | ], 9 | "remoteEnv": { 10 | "SSH_AUTH_SOCK": "/run/host-services/ssh-auth.sock" 11 | }, 12 | "customizations": { 13 | "vscode": { 14 | "extensions": [ 15 | "streetsidesoftware.code-spell-checker", 16 | "redhat.ansible", 17 | "yzhang.markdown-all-in-one", 18 | "hashicorp.terraform", 19 | "rebornix.toggle" 20 | ] 21 | } 22 | }, 23 | "postStartCommand": "uv sync --directory ansible --dev" 24 | } 25 | -------------------------------------------------------------------------------- /.github/workflows/gh-pages.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: GitHub Pages 3 | on: 4 | push: 5 | branches: 6 | - main 7 | paths: 8 | - docs/** 9 | - mkdocs.yml 10 | permissions: 11 | contents: write 12 | jobs: 13 | deploy: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - uses: actions/checkout@v6 17 | 18 | - uses: actions/setup-python@v6 19 | with: 20 | python-version: 3.x 21 | 22 | - run: echo "cache_id=$(date --utc '+%V')" >> $GITHUB_ENV 23 | 24 | - uses: actions/cache@v5 25 | with: 26 | key: mkdocs-material-${{ env.cache_id }} 27 | path: .cache 28 | restore-keys: | 29 | mkdocs-material- 30 | 31 | - run: pip install mkdocs-material 32 | 33 | - run: mkdocs gh-deploy --force 34 | -------------------------------------------------------------------------------- /docs/infrastructure/hetzner.md: -------------------------------------------------------------------------------- 1 | # :simple-hetzner: Hetzner Cloud 2 | 3 | [Hetzner Cloud](https://www.hetzner.com/cloud) is a European based public cloud provider. Based in Germany, they also have datacenters in Finland, USA and Singapore. 4 | 5 | ## :material-server: Cloud VPS 6 | 7 | I have a single cloud VPS provisioned on Hetzner. The VPS is deployed using [OpenTofu](https://github.com/dbrennand/home-ops/blob/main/terraform/hetzner.tf) using the method documented [here](./opentofu.md). 8 | 9 | I'm currently using this VPS for: 10 | 11 | - Remotely monitoring via Tailscale all my Homelab devices using [Beszel](./beszel.md). 12 | - Hosting a [status](https://status.macaroni-beardie.ts.net/) page using [Tailscale Funnel](https://tailscale.com/kb/1223/funnel). All devices are accessed remotely via Tailscale. 13 | -------------------------------------------------------------------------------- /ansible/inventory/group_vars/all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # all group_vars 3 | ansible_user: daniel 4 | 5 | # Variables defined below are used in multiple playbooks 6 | # Network subdomain 7 | domain: net.dbren.uk 8 | # Cloudflare API token for DNS-01 challenge 9 | # https://github.com/dbrennand/ansible-role-caddy-docker/blob/main/README.md#example---cloudflare-dns-01-challenge 10 | cloudflare_api_token: "{{ lookup('community.general.onepassword', 'Caddy', field='CF_API_TOKEN', vault='Home-Ops') }}" 11 | # Email address used to receive notifications from Let's Encrypt for certificate expiry 12 | acme_email: "{{ lookup('community.general.onepassword', 'Caddy', field='ACME_EMAIL', vault='Home-Ops') }}" 13 | # Discord webhook URL for notifications 14 | discord_webhook: "{{ lookup('community.general.onepassword', 'Discord Webhook', vault='Home-Ops') }}" 15 | -------------------------------------------------------------------------------- /terraform/modules/proxmox_cloud_init_config/variables.tf: -------------------------------------------------------------------------------- 1 | # OpenTofu Module - Proxmox Cloud Init Configuration - Variables 2 | # LICENSE: MIT 3 | # Author: Daniel Brennand 4 | 5 | variable "proxmox_cloud_init_config_vm_name" { 6 | description = "Name of the Proxmox Virtual Machine." 7 | type = string 8 | } 9 | 10 | variable "proxmox_cloud_init_config_virtual_environment_node_name" { 11 | description = "Name of the Proxmox VE node." 12 | type = string 13 | default = "proxmox01" 14 | } 15 | 16 | variable "proxmox_cloud_init_config_ssh_authorized_keys" { 17 | description = "SSH public key used by Cloud-init." 18 | type = string 19 | } 20 | 21 | variable "proxmox_cloud_init_vm_username" { 22 | description = "Username to by provisioned by Cloud-init." 23 | type = string 24 | default = "daniel" 25 | } 26 | -------------------------------------------------------------------------------- /ansible/ee/requirements.yml: -------------------------------------------------------------------------------- 1 | --- 2 | collections: 3 | - name: community.docker 4 | version: 5.0.4 5 | - name: community.general 6 | version: 12.1.0 7 | - name: ansible.posix 8 | version: 2.1.0 9 | - name: artis3n.tailscale 10 | version: 1.1.0 11 | - name: awx.awx 12 | version: 24.6.1 13 | - name: community.beszel 14 | version: 0.6.0 15 | roles: 16 | - name: dbrennand.autorestic 17 | version: 1.4.0 18 | - name: geerlingguy.docker 19 | version: 7.9.0 20 | - name: geerlingguy.pip 21 | version: 3.1.1 22 | - name: geerlingguy.security 23 | src: https://github.com/dbrennand/ansible-role-security 24 | version: refactor/ssh-regexp 25 | - name: ironicbadger.figurine 26 | src: https://github.com/ironicbadger/ansible-role-figurine 27 | version: main 28 | - name: geerlingguy.repo-epel 29 | version: 3.1.1 30 | -------------------------------------------------------------------------------- /.devcontainer/Dockerfile: -------------------------------------------------------------------------------- 1 | ARG BASE_IMAGE=docker.io/library/python:3.14.2-slim 2 | FROM $BASE_IMAGE 3 | 4 | # https://github.com/opencontainers/image-spec/blob/main/annotations.md 5 | LABEL org.opencontainers.image.source "https://github.com/dbrennand/home-ops/blob/main/.devcontainer/Dockerfile" 6 | LABEL org.opencontainers.image.authors "Daniel Brennand" 7 | LABEL org.opencontainers.image.licenses "MIT" 8 | LABEL org.opencontainers.image.base.name $BASE_IMAGE 9 | LABEL org.opencontainers.image.title "home-ops" 10 | LABEL org.opencontainers.image.description "home-ops VSCode Dev Container" 11 | 12 | # Tooling 13 | RUN apt update && \ 14 | apt -y install git \ 15 | openssh-client \ 16 | locales-all 17 | 18 | # uv 19 | COPY --from=ghcr.io/astral-sh/uv:0.9.18 /uv /uvx /bin/ 20 | 21 | # OpenTofu 22 | COPY --from=ghcr.io/opentofu/opentofu:1.11.1 /usr/local/bin/tofu /bin/tofu 23 | -------------------------------------------------------------------------------- /docker/pocket-id/docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://raw.githubusercontent.com/pocket-id/pocket-id/main/docker-compose.yml 3 | services: 4 | pocket-id: 5 | image: ghcr.io/pocket-id/pocket-id:v1.16.0 6 | container_name: pocket-id 7 | restart: unless-stopped 8 | env_file: .env 9 | expose: 10 | - 1411 11 | volumes: 12 | - "./data:/app/data" 13 | networks: 14 | - pocket-id 15 | - caddy 16 | # Optional healthcheck 17 | healthcheck: 18 | test: ["CMD", "/app/pocket-id", "healthcheck"] 19 | interval: 1m30s 20 | timeout: 5s 21 | retries: 2 22 | start_period: 10s 23 | labels: 24 | caddy: idp.{$$DOMAIN} 25 | caddy.import: cloudflare 26 | caddy.reverse_proxy: "{{ upstreams 1411 }}" 27 | 28 | networks: 29 | pocket-id: 30 | name: pocket-id 31 | caddy: 32 | external: true 33 | -------------------------------------------------------------------------------- /ansible/playbooks/playbook-tailscale.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Tailscale | Install & Update 3 | hosts: tailscale 4 | pre_tasks: 5 | - name: Tailscale | Enable IP forwarding 6 | when: inventory_hostname in groups["subnet_router"] 7 | become: true 8 | block: 9 | - name: Tailscale | Enable IPv4 forwarding 10 | ansible.posix.sysctl: 11 | name: net.ipv4.ip_forward 12 | value: "1" 13 | sysctl_file: /etc/sysctl.d/99-tailscale.conf 14 | state: present 15 | reload: true 16 | 17 | - name: Tailscale | Enable IPv6 forwarding 18 | ansible.posix.sysctl: 19 | name: net.ipv6.conf.all.forwarding 20 | value: "1" 21 | sysctl_file: /etc/sysctl.d/99-tailscale.conf 22 | state: present 23 | reload: true 24 | roles: 25 | - role: artis3n.tailscale.machine 26 | -------------------------------------------------------------------------------- /terraform/variables.tf: -------------------------------------------------------------------------------- 1 | variable "op_sa_token" { 2 | description = "1Password Service Account token." 3 | type = string 4 | sensitive = true 5 | } 6 | 7 | variable "op_proxmox_virtual_environment_vault_name" { 8 | description = "1Password vault name containing the Proxmox virtual environment item." 9 | type = string 10 | default = "Home-Ops" 11 | } 12 | 13 | variable "op_proxmox_virtual_environment_item_name" { 14 | description = "1Password Proxmox virtual environment item name." 15 | type = string 16 | default = "proxmox01.net.dbren.uk" 17 | } 18 | 19 | variable "op_ssh_vault_name" { 20 | description = "1Password vault name containing the SSH key." 21 | type = string 22 | default = "Home-Ops" 23 | } 24 | 25 | variable "op_ssh_key_name" { 26 | description = "1Password SSH key name." 27 | type = string 28 | default = "Home-Ops SSH Key" 29 | } 30 | -------------------------------------------------------------------------------- /docker/stirling-pdf/docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | services: 3 | stirling-pdf: 4 | image: ghcr.io/stirling-tools/stirling-pdf:2.1.3 5 | container_name: stirling-pdf 6 | restart: unless-stopped 7 | networks: 8 | - stirling-pdf 9 | - caddy 10 | expose: 11 | - 8080 12 | volumes: 13 | - ./StirlingPDF/trainingData:/usr/share/tessdata # Required for extra OCR languages 14 | - ./StirlingPDF/extraConfigs:/configs 15 | - ./StirlingPDF/customFiles:/customFiles/ 16 | - ./StirlingPDF/logs:/logs/ 17 | - ./StirlingPDF/pipeline:/pipeline/ 18 | environment: 19 | - DISABLE_ADDITIONAL_FEATURES=false 20 | - LANGS=en_GB 21 | labels: 22 | caddy: stirling.{$$DOMAIN} 23 | caddy.import: cloudflare 24 | caddy.reverse_proxy: "{{ upstreams 8080 }}" 25 | 26 | networks: 27 | stirling-pdf: 28 | name: stirling-pdf 29 | caddy: 30 | external: true 31 | -------------------------------------------------------------------------------- /terraform/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 1.6.2" 3 | required_providers { 4 | proxmox = { 5 | source = "bpg/proxmox" 6 | version = "0.89.1" 7 | } 8 | onepassword = { 9 | source = "1Password/onepassword" 10 | version = "3.0.1" 11 | } 12 | hcloud = { 13 | source = "hetznercloud/hcloud" 14 | version = "~> 1.45" 15 | } 16 | http = { 17 | source = "hashicorp/http" 18 | version = "3.5.0" 19 | } 20 | } 21 | backend "s3" { 22 | skip_region_validation = true 23 | skip_credentials_validation = true 24 | skip_s3_checksum = true 25 | encrypt = true 26 | key = "terraform.tfstate" 27 | region = "eu-central-003" 28 | bucket = "homeops" 29 | endpoints = { 30 | s3 = "s3.eu-central-003.backblazeb2.com" 31 | } 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /ansible/playbooks/playbook-proxmox-backup-cifs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Proxmox Backup Server | Mount CIFS Share 3 | hosts: backup01.net.dbren.uk 4 | become: true 5 | handlers: 6 | - name: Reload systemd daemon 7 | ansible.builtin.systemd: 8 | daemon_reload: true 9 | vars_files: 10 | - ../vars/proxmox-backup-cifs.yml 11 | tasks: 12 | - name: Create Samba credentials file 13 | ansible.builtin.copy: 14 | content: | 15 | username={{ smb_username }} 16 | password={{ smb_password }} 17 | dest: /etc/samba/.smbcreds 18 | mode: "0600" 19 | 20 | - name: Mount CIFS Share 21 | notify: Reload systemd daemon 22 | ansible.posix.mount: 23 | src: "{{ cifs_share }}" 24 | path: "{{ mountpoint }}" 25 | # backup user uid is 34 and gid is 34 26 | opts: vers=3.0,credentials=/etc/samba/.smbcreds,uid=34,gid=34,defaults 27 | fstype: cifs 28 | state: present 29 | -------------------------------------------------------------------------------- /docker/caddy/docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | services: 3 | caddy: 4 | container_name: caddy 5 | image: ghcr.io/dbrennand/caddy-docker-proxy-cloudflare:2025.09.17 6 | restart: unless-stopped 7 | command: caddy docker-proxy 8 | ports: 9 | - "80:80" 10 | - "443:443" 11 | - "443:443/udp" 12 | environment: 13 | ACME_EMAIL: "${ACME_EMAIL}" 14 | CF_API_TOKEN: "${CF_API_TOKEN}" 15 | DOMAIN: "${DOMAIN}" 16 | # https://github.com/lucaslorentz/caddy-docker-proxy#caddy-cli 17 | CADDY_INGRESS_NETWORKS: caddy 18 | CADDY_DOCKER_CADDYFILE_PATH: /etc/caddy/Caddyfile 19 | volumes: 20 | - ./conf:/etc/caddy 21 | - ./data:/data 22 | - ./config:/config 23 | # Required for Caddy to read Docker container labels 24 | - /var/run/docker.sock:/var/run/docker.sock 25 | networks: 26 | - caddy 27 | extra_hosts: 28 | - "host.docker.internal:host-gateway" 29 | 30 | networks: 31 | caddy: 32 | name: caddy 33 | -------------------------------------------------------------------------------- /.github/workflows/tflint.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Terraform Lint 3 | on: 4 | push: 5 | paths: 6 | - terraform/** 7 | 8 | jobs: 9 | tflint: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v6 13 | name: Checkout source code 14 | 15 | - uses: actions/cache@v5 16 | name: Cache plugin dir 17 | with: 18 | path: ~/.tflint.d/plugins 19 | key: tflint-${{ hashFiles('.tflint.hcl') }} 20 | 21 | - uses: terraform-linters/setup-tflint@v6 22 | name: Setup TFLint 23 | with: 24 | tflint_version: v0.44.1 25 | 26 | - name: Show version 27 | run: tflint --version 28 | 29 | - name: Init TFLint 30 | run: tflint --init 31 | env: 32 | # https://github.com/terraform-linters/tflint/blob/master/docs/user-guide/plugins.md#avoiding-rate-limiting 33 | GITHUB_TOKEN: ${{ github.token }} 34 | 35 | - name: Run TFLint 36 | run: tflint -f compact --recursive 37 | -------------------------------------------------------------------------------- /docker/beszel/docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | services: 3 | tailscale: 4 | image: ghcr.io/tailscale/tailscale:v1.92.4 5 | container_name: tailscale_beszel 6 | hostname: beszel 7 | restart: unless-stopped 8 | environment: 9 | - TS_AUTHKEY=${TS_AUTHKEY}?ephemeral=false 10 | - TS_EXTRA_ARGS=--accept-routes --advertise-tags=tag:beszel-hub 11 | - TS_ACCEPT_DNS=true 12 | - TS_STATE_DIR=/var/lib/tailscale 13 | - TS_USERSPACE=false 14 | - TS_SERVE_CONFIG=/config/serve.json 15 | volumes: 16 | - tailscale_beszel:/var/lib/tailscale 17 | - ./config:/config 18 | devices: 19 | - /dev/net/tun:/dev/net/tun 20 | cap_add: 21 | - NET_ADMIN 22 | beszel: 23 | image: ghcr.io/henrygd/beszel/beszel:0.17.0 24 | container_name: beszel 25 | restart: unless-stopped 26 | depends_on: 27 | - tailscale 28 | network_mode: service:tailscale 29 | volumes: 30 | - ./beszel_data:/beszel_data 31 | 32 | volumes: 33 | tailscale_beszel: 34 | -------------------------------------------------------------------------------- /docker/gatus/docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | services: 3 | tailscale: 4 | image: ghcr.io/tailscale/tailscale:v1.92.4 5 | container_name: tailscale_gatus 6 | hostname: status 7 | restart: unless-stopped 8 | environment: 9 | - TS_AUTHKEY=${TS_AUTHKEY}?ephemeral=false 10 | - TS_EXTRA_ARGS=--accept-routes --advertise-tags=tag:gatus 11 | - TS_ACCEPT_DNS=true 12 | - TS_STATE_DIR=/var/lib/tailscale 13 | - TS_USERSPACE=false 14 | - TS_SERVE_CONFIG=/config/serve.json 15 | volumes: 16 | - tailscale_gatus:/var/lib/tailscale 17 | - ./config:/config 18 | devices: 19 | - /dev/net/tun:/dev/net/tun 20 | cap_add: 21 | - NET_ADMIN 22 | 23 | gatus: 24 | image: ghcr.io/twin/gatus:v5.33.1 25 | container_name: gatus 26 | restart: unless-stopped 27 | depends_on: 28 | - tailscale 29 | network_mode: service:tailscale 30 | volumes: 31 | - ./config:/config 32 | - ./data:/data/ 33 | 34 | volumes: 35 | tailscale_gatus: 36 | -------------------------------------------------------------------------------- /docs/infrastructure/secrets-management.md: -------------------------------------------------------------------------------- 1 | # :simple-1password: Secrets Management 2 | 3 | [1Password](https://1password.com/) is used for managing secrets for devices and services deployed in my Homelab. 4 | 5 | ## Integrations 6 | 7 | 1Password has various [developer](https://developer.1password.com/) integrations which I use. I also use open source community developed tooling too. These include: 8 | 9 | - [1Password SSH Agent](https://developer.1password.com/docs/ssh/agent/) 10 | - [1Password CLI (op)](https://developer.1password.com/docs/cli/get-started/) 11 | - [1Password Service Accounts](https://developer.1password.com/docs/service-accounts/) 12 | - [1Password Terraform Provider](https://search.opentofu.org/provider/1password/onepassword/latest) 13 | - [`community.general.onepassword` Ansible Lookup Plugin](https://docs.ansible.com/ansible/latest/collections/community/general/onepassword_lookup.html) 14 | 15 | The 1Password Service Accounts are used to authenticate these integrations and are restricted to only read from a specific vault. 16 | -------------------------------------------------------------------------------- /ansible/ee/custom_entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # License: MIT 3 | # This script is used as an entrypoint for the Execution Environment container image. 4 | # It authenticates the op CLI using a service account. 5 | # See: https://developer.1password.com/docs/service-accounts/use-with-1password-cli/ 6 | # We do not directly pass the OP_SERVICE_ACCOUNT_TOKEN environment variable because 7 | # then the terminal session on the host will only be authenticated as the service 8 | # account. We pass a slightly different environment variable named "ONEPASSWORD_SERVICE_ACCOUNT_TOKEN" 9 | # and then set this environment variable's value to OP_SERVICE_ACCOUNT_TOKEN. 10 | # It also sets the SSH_AUTH_SOCK environment variable due to observations seen in: 11 | # https://github.com/ansible/ansible-navigator/issues/1591#issuecomment-2816701294 12 | # The contents of the default ansible-builder entrypoint: 13 | # https://github.com/ansible/ansible-builder/blob/devel/src/ansible_builder/_target_scripts/entrypoint 14 | # are appended to this script. 15 | export OP_SERVICE_ACCOUNT_TOKEN="${ONEPASSWORD_SERVICE_ACCOUNT_TOKEN}" 16 | export SSH_AUTH_SOCK="/agent.sock" 17 | -------------------------------------------------------------------------------- /nextdns-config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Config for https://github.com/dbrennand/NextDNS-Rewrites 3 | profile_name: "Main" 4 | 5 | rewrites: 6 | # A Records 7 | - name: subnet01.net.dbren.uk 8 | content: 192.168.0.2 9 | # 192.168.0.3 is free 10 | - name: proxmox01.net.dbren.uk 11 | content: 192.168.0.4 12 | # 192.168.0.5 is free 13 | - name: backup01.net.dbren.uk 14 | content: 192.168.0.6 15 | - name: exit01.net.dbren.uk 16 | content: 192.168.0.7 17 | # 192.168.0.8 is free 18 | - name: media01.net.dbren.uk 19 | content: 192.168.0.9 20 | - name: paperless.net.dbren.uk 21 | content: 192.168.0.9 22 | - name: stirling.net.dbren.uk 23 | content: 192.168.0.9 24 | - name: spotify-stats.net.dbren.uk 25 | content: 192.168.0.9 26 | - name: spotify-api.net.dbren.uk 27 | content: 192.168.0.9 28 | - name: talos01.net.dbren.uk 29 | content: 192.168.0.10 30 | - name: awx.net.dbren.uk 31 | content: 192.168.0.10 32 | - name: idp01.net.dbren.uk 33 | content: 192.168.0.11 34 | - name: idp.net.dbren.uk 35 | content: 192.168.0.11 36 | - name: apps01.net.dbren.uk 37 | content: 192.168.0.12 38 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Daniel Brennand 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /.github/renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": [ 3 | "config:recommended", 4 | ":dependencyDashboard" 5 | ], 6 | "regexManagers": [ 7 | { 8 | "fileMatch": [ 9 | "(^|/)execution-environment\\.ya?ml$" 10 | ], 11 | "matchStrings": [ 12 | "# renovate: depName=(?[^\\s]+) datasource=(?docker)\\s*\\n\\s*name:\\s*(?[^:]+):(?[^\\s]+)" 13 | ], 14 | "datasourceTemplate": "{{datasource}}", 15 | "depNameTemplate": "{{depName}}", 16 | "versioningTemplate": "docker" 17 | }, 18 | { 19 | "fileMatch": [ 20 | "(^|/)execution-environment\\.ya?ml$" 21 | ], 22 | "matchStrings": [ 23 | "# renovate: depName=(?[^\\s]+) datasource=(?pypi)\\s*\\n\\s*package_pip:\\s*(?[^=]+)==(?[^\\s]+)" 24 | ], 25 | "datasourceTemplate": "{{datasource}}", 26 | "depNameTemplate": "{{depName}}", 27 | "versioningTemplate": "pep440" 28 | } 29 | ] 30 | } 31 | -------------------------------------------------------------------------------- /terraform/modules/proxmox_lxc/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Daniel Brennand 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /terraform/modules/proxmox_vm/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Daniel Brennand 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /ansible/inventory/inventory.yml: -------------------------------------------------------------------------------- 1 | --- 2 | all: 3 | hosts: 4 | subnet01.net.dbren.uk: 5 | proxmox01.net.dbren.uk: 6 | exit01.net.dbren.uk: 7 | backup01.net.dbren.uk: 8 | media01.net.dbren.uk: 9 | homeops.hetzner.dbren.uk: 10 | idp01.net.dbren.uk: 11 | apps01.net.dbren.uk: 12 | children: 13 | # Proxmox 14 | proxmox: 15 | hosts: 16 | proxmox01.net.dbren.uk: 17 | # Docker 18 | docker: 19 | hosts: 20 | media01.net.dbren.uk: 21 | homeops.hetzner.dbren.uk: 22 | idp01.net.dbren.uk: 23 | apps01.net.dbren.uk: 24 | # Tailscale subnet routers 25 | subnet_router: 26 | hosts: 27 | subnet01.net.dbren.uk: 28 | exit01.net.dbren.uk: 29 | # Tailscale hosts 30 | tailscale: 31 | vars: 32 | # Ensures other group_vars have higher precedence than tailscale group_vars 33 | ansible_group_priority: 0 34 | children: 35 | proxmox: 36 | subnet_router: 37 | hosts: 38 | media01.net.dbren.uk: 39 | backup01.net.dbren.uk: 40 | homeops.hetzner.dbren.uk: 41 | idp01.net.dbren.uk: 42 | apps01.net.dbren.uk: 43 | -------------------------------------------------------------------------------- /terraform/modules/proxmox_cloud_init_config/LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2025 Daniel Brennand 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /docs/docker-compose.md: -------------------------------------------------------------------------------- 1 | # Docker Compose 2 | 3 | [Docker Compose](https://docs.docker.com/compose/) is used to deploy certain applications in my Homelab. The [docker](https://github.com/dbrennand/home-ops/tree/main/docker) directory contains subdirectories for each application. 4 | 5 | ## Environment Variables 6 | 7 | Each directory contains a `.env` file which references secrets from a 1Password Vault. On the Docker host, the [`op`](https://developer.1password.com/docs/cli/get-started/) CLI is configured to authenticate with a 1Password Service Account. The secret references are then substituted at runtime when using the `op run` command. 8 | 9 | ## Deployment 10 | 11 | !!! info 12 | 13 | The example commands below are for Caddy, but the process is identical for the other application directories. 14 | 15 | 1. Copy the application directory to the Docker host: 16 | 17 | ```bash 18 | scp -r caddy daniel@pihole02.net.dbren.uk:~/ 19 | ``` 20 | 21 | 2. SSH to the Docker host: 22 | 23 | ```bash 24 | ssh daniel@pihole02.net.dbren.uk 25 | ``` 26 | 27 | 3. Deploy the application: 28 | 29 | ```bash 30 | cd caddy && op run --env-file=./.env -- docker compose up -d 31 | ``` 32 | -------------------------------------------------------------------------------- /kubernetes/talos/talenv.sops.yaml: -------------------------------------------------------------------------------- 1 | ts_authkey: ENC[AES256_GCM,data:0hXUw0riZ0wHZYAbk9NhwCBVxzTg3N+wJSXHLcz4o6/MCoOLSHB363xDLCFtslMUyx6F81wPD6Qr039dl2Q=,iv:LMqiGVn4pc9Xxz4Umi0oARITpFshMN43Wn/k29URcb8=,tag:JW3g+G2NTrvnbUNFrjYMmQ==,type:str] 2 | sops: 3 | age: 4 | - recipient: age1csxr93np9ynejzvt8jjjau97s29mayy447vlsf4mu9srmpjmr3uq56n27k 5 | enc: | 6 | -----BEGIN AGE ENCRYPTED FILE----- 7 | YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBaTVVNb3llWGpsZGVVRHNN 8 | L1Q3QVlIakZJOUtaUEpnQjljVmQrRURyWERZCk9rRWtvZEQ4ZnNTV0x3amc3eUR2 9 | Zkp1NzFZTFpXakxESDRrMXVhT0hmTUEKLS0tIHhEdU1ldWQrSFR2ZHNnenRkMG50 10 | OVQ3K0Z0VGR3bSt5aEovTTZ4cVQ0STgKBCH48R5ZkWeO7vicpI+8u1YVW9CR6YEA 11 | TBgx7++xO0lo+cwtDyWrTyGh1QaHLcGqUqMfys33xbe/Bx3538dtCw== 12 | -----END AGE ENCRYPTED FILE----- 13 | lastmodified: "2025-11-20T20:31:08Z" 14 | mac: ENC[AES256_GCM,data:PjHrg2rOZOYyl05oC98e9t3g8NQ+qEZTv4BH3K14ZdiUO9PIH3Ab9FwZSqo/+S1JkFE6EdIOCK8YLTqsWmrMZNl2pzs1rja5eS32IqDl9k8tGGL8pxsnAGkdV8WXzHbYPfN2PCnkX5POOFtzBdPiFiba0tgAb/W0D25v5byMass=,iv:F6nlIGO1YyRittNBErsDHLW4nEteDjuix1Z7fN9nWoM=,tag:30HL2kR+7K394WITb2WnvA==,type:str] 15 | unencrypted_suffix: _unencrypted 16 | version: 3.11.0 17 | -------------------------------------------------------------------------------- /Taskfile.yml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | vars: 4 | PROJECT_ROOT: 5 | sh: git rev-parse --show-toplevel 6 | VENV_PATH: "{{ .PROJECT_ROOT }}/ansible/.venv" 7 | ANSIBLE_PROJECT_PATH: "{{ .PROJECT_ROOT }}/ansible" 8 | 9 | tasks: 10 | venv: 11 | desc: Create the Python virtual environment using uv. 12 | dir: "{{ .ANSIBLE_PROJECT_PATH }}" 13 | cmd: "uv sync --dev {{ .CLI_ARGS }}" 14 | 15 | ansible:install:requirements: 16 | desc: Install Ansible requirements locally. 17 | dir: "{{ .ANSIBLE_PROJECT_PATH }}" 18 | cmds: 19 | - uv add -r ee/requirements.txt 20 | - "uv run ansible-galaxy install -r ee/requirements.yml {{ .CLI_ARGS }}" 21 | preconditions: 22 | - sh: command -v {{ .VENV_PATH }}/bin/ansible-galaxy 23 | msg: ansible-galaxy is not installed. Have you ran 'task venv'? 24 | 25 | ansible:ee:requirements: 26 | desc: Generate Ansible EE Python requirements.txt. 27 | dir: "{{ .ANSIBLE_PROJECT_PATH }}" 28 | cmd: uv export --no-hashes --no-header --no-annotate --no-dev --format requirements.txt > ee/requirements.txt 29 | 30 | ansible:ee:build: 31 | desc: Build the Ansible Execution Environment. 32 | dir: "{{ .ANSIBLE_PROJECT_PATH }}/ee" 33 | cmd: uv run ansible-builder build --container-runtime docker --tag ghcr.io/dbrennand/home-ops:latest -vvv 34 | -------------------------------------------------------------------------------- /docs/infrastructure/backblaze.md: -------------------------------------------------------------------------------- 1 | # :simple-backblaze: Backblaze Object Storage (S3) 2 | 3 | [Backblaze B2 Cloud Storage](https://www.backblaze.com/cloud-storage) is used in my Homelab to store backups and provide object storage for various applications. 4 | 5 | ## :octicons-terminal-16: Backblaze B2 CLI 6 | 7 | The [Backblaze B2 CLI](https://www.backblaze.com/docs/cloud-storage-command-line-tools) is used to create buckets, upload and download files, and manage your account. 8 | 9 | Install the Backblaze B2 CLI on MacOS: 10 | 11 | ```bash 12 | brew install b2-tools 13 | ``` 14 | 15 | ## :fontawesome-solid-bucket: Creating a Backblaze B2 Bucket 16 | 17 | Create a new bucket keeping only the last version of files: 18 | 19 | ```bash 20 | BUCKET_NAME="my-bucket" 21 | b2 bucket create "${BUCKET_NAME}" allPrivate 22 | # To keep only the last version of files 23 | b2 bucket create --lifecycle-rule '{"daysFromHidingToDeleting": 1, "daysFromUploadingToHiding": null, "fileNamePrefix": ""}' "${BUCKET_NAME}" allPrivate 24 | ``` 25 | 26 | ## :octicons-key-16: Creating Application Keys 27 | 28 | Create a new application key with specific permissions for a bucket: 29 | 30 | ```bash 31 | BUCKET_NAME="my-bucket" 32 | KEY_NAME="my-key" 33 | b2 key create --bucket "${BUCKET_NAME}" "${KEY_NAME}" readFiles,writeFiles,listFiles,deleteFiles,readBuckets,listBuckets 34 | ``` 35 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://squidfunk.github.io/mkdocs-material/schema.json 2 | theme: 3 | features: 4 | - content.code.copy 5 | - navigation.tracking 6 | - navigation.expand 7 | - navigation.top 8 | icon: 9 | repo: fontawesome/brands/git-alt 10 | name: material 11 | palette: 12 | # Palette toggle for light mode 13 | - scheme: default 14 | media: "(prefers-color-scheme: light)" 15 | primary: blue 16 | toggle: 17 | icon: material/weather-night 18 | name: Switch to dark mode 19 | 20 | # Palette toggle for dark mode 21 | - scheme: slate 22 | media: "(prefers-color-scheme: dark)" 23 | primary: blue 24 | toggle: 25 | icon: material/weather-sunny 26 | name: Switch to light mode 27 | site_name: dbrennand | home-ops 28 | repo_url: https://github.com/dbrennand/home-ops 29 | repo_name: dbrennand/home-ops 30 | markdown_extensions: 31 | - pymdownx.highlight: 32 | anchor_linenums: true 33 | line_spans: __span 34 | pygments_lang_class: true 35 | - pymdownx.inlinehilite 36 | - pymdownx.snippets 37 | - pymdownx.superfences 38 | - admonition 39 | - pymdownx.details 40 | - attr_list 41 | - pymdownx.emoji: 42 | emoji_index: !!python/name:material.extensions.emoji.twemoji 43 | emoji_generator: !!python/name:material.extensions.emoji.to_svg 44 | -------------------------------------------------------------------------------- /ansible/ansible-navigator.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ansible-navigator: 3 | execution-environment: 4 | container-engine: docker 5 | image: ghcr.io/dbrennand/home-ops:latest 6 | container-options: 7 | # SSH keys from 1Password are added to the SSH agent. 8 | # See: https://developer.1password.com/docs/ssh/agent/compatibility/#ssh-auth-sock 9 | # OrbStack creates the same path as Docker to bind the host's SSH agent socket into 10 | # the Execution Environment. See below links: 11 | # https://docs.docker.com/desktop/features/networking/#ssh-agent-forwarding 12 | # https://docs.orbstack.dev/docker/#ssh-agent-forwarding 13 | # This allows the Execution Environment to access the 1Password SSH agent socket. 14 | - "--volume=/run/host-services/ssh-auth.sock:/agent.sock" 15 | environment-variables: 16 | # We don't set the SSH_AUTH_SOCK to /agent.sock here because of observations seen in 17 | # https://github.com/ansible/ansible-navigator/issues/1591#issuecomment-2816701294 18 | # The variable precedence in this file is overridden by ansible-runner. 19 | # To workaround this issue, as we're already using a custom entrypoint script 20 | # (see ee/custom_entrypoint.sh), we override the SSH_AUTH_SOCK there instead. 21 | pass: 22 | # This environment variable is passed to the Execution Environment to authenticate 23 | # the op CLI. See ee/custom_entrypoint.sh for further details. 24 | - ONEPASSWORD_SERVICE_ACCOUNT_TOKEN 25 | -------------------------------------------------------------------------------- /docker/your-spotify/docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://github.com/Yooooomi/your_spotify?tab=readme-ov-file#using-docker-compose 3 | 4 | networks: 5 | caddy: 6 | external: true 7 | your-spotify: 8 | name: your-spotify 9 | 10 | services: 11 | server: 12 | image: yooooomi/your_spotify_server:1.15.0 13 | container_name: your-spotify-server 14 | restart: unless-stopped 15 | expose: 16 | - 8080 17 | networks: 18 | - caddy 19 | - your-spotify 20 | depends_on: 21 | - mongo 22 | environment: 23 | API_ENDPOINT: https://spotify-api.net.dbren.uk 24 | CLIENT_ENDPOINT: https://spotify-stats.net.dbren.uk 25 | SPOTIFY_PUBLIC: ${SPOTIFY_PUBLIC} 26 | SPOTIFY_SECRET: ${SPOTIFY_SECRET} 27 | CORS: https://spotify-stats.net.dbren.uk 28 | labels: 29 | caddy: spotify-api.{$$DOMAIN} 30 | caddy.import: cloudflare 31 | caddy.reverse_proxy: "{{ upstreams 8080 }}" 32 | 33 | mongo: 34 | image: mongo:6 35 | networks: 36 | - your-spotify 37 | restart: unless-stopped 38 | volumes: 39 | - ./database:/data/db 40 | 41 | web: 42 | image: yooooomi/your_spotify_client:1.15.0 43 | container_name: your-spotify-client 44 | restart: unless-stopped 45 | networks: 46 | - caddy 47 | - your-spotify 48 | expose: 49 | - 3000 50 | environment: 51 | API_ENDPOINT: https://spotify-api.net.dbren.uk 52 | labels: 53 | caddy: spotify-stats.{$$DOMAIN} 54 | caddy.import: cloudflare 55 | caddy.reverse_proxy: "{{ upstreams 3000 }}" 56 | -------------------------------------------------------------------------------- /ansible/ee/execution-environment.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: 3 3 | images: 4 | base_image: 5 | # renovate: depName=ghcr.io/almalinux/10-minimal datasource=docker 6 | name: ghcr.io/almalinux/10-minimal:10 7 | dependencies: 8 | ansible_core: 9 | # renovate: depName=ansible-core datasource=pypi 10 | package_pip: ansible-core==2.20.1 11 | ansible_runner: 12 | # renovate: depName=ansible-runner datasource=pypi 13 | package_pip: ansible-runner==2.4.2 14 | python: requirements.txt 15 | galaxy: requirements.yml 16 | additional_build_files: 17 | - src: custom_entrypoint.sh 18 | dest: scripts 19 | additional_build_steps: 20 | prepend_base: 21 | - RUN rpm --import https://downloads.1password.com/linux/keys/1password.asc 22 | - RUN echo -e "[1password]\nname=1Password Stable Channel\nbaseurl=https://downloads.1password.com/linux/rpm/stable/\$basearch\nenabled=1\ngpgcheck=1\ngpgkey=\"https://downloads.1password.com/linux/keys/1password.asc\"" > /etc/yum.repos.d/1password.repo # noqa yaml[line-length] 23 | - RUN microdnf install -y python3 python3-pip openssh-clients git jq 1password-cli 24 | - COPY _build/scripts/custom_entrypoint.sh /bin/custom_entrypoint.sh 25 | - RUN chmod +x /bin/custom_entrypoint.sh 26 | append_final: 27 | - RUN cat /opt/builder/bin/entrypoint >> /bin/custom_entrypoint.sh 28 | # renovate: depName=dumb-init datasource=pypi 29 | - RUN $PYCMD -m pip install --no-cache-dir dumb-init==1.2.5 30 | options: 31 | package_manager_path: /usr/bin/microdnf 32 | container_init: 33 | entrypoint: '["/bin/custom_entrypoint.sh", "dumb-init"]' 34 | -------------------------------------------------------------------------------- /docs/infrastructure/tailscale.md: -------------------------------------------------------------------------------- 1 | # :simple-tailscale: Tailscale 2 | 3 | !!! quote "What is Tailscale?" 4 | 5 | [Tailscale](https://tailscale.com/) is a VPN service that makes the devices and applications you own accessible anywhere in the world, securely and effortlessly. It enables encrypted point-to-point connections using the open source WireGuard protocol, which means only devices on your private network can communicate with each other. 6 | 7 | Tailscale is used in my Homelab to remotely access services. I've written a little more about this [here](https://danielbrennand.com/blog/tailscale/). 8 | 9 | An [Ansible playbook](https://github.com/dbrennand/home-ops/blob/main/ansible/playbooks/playbook-tailscale.yml) is used to install and configure Tailscale on all devices in my Homelab. The configuration for each device is managed via Ansible [group](https://github.com/dbrennand/home-ops/tree/main/ansible/inventory/group_vars) and [host](https://github.com/dbrennand/home-ops/tree/main/ansible/inventory/host_vars) vars. 10 | 11 | ## Tailscale OAuth Client 12 | 13 | For devices to authenticate to the Tailnet an [OAuth client](https://login.tailscale.com/admin/settings/oauth) is required. 14 | 15 | ## Tailscale Auth Key 16 | 17 | [Talos](kubernetes/talos.md) authenticates to Tailscale using an [auth key](https://tailscale.com/kb/1085/auth-keys#generate-an-auth-key). 18 | 19 | ## DNS 20 | 21 | My Tailnet is configured to use [NextDNS](./dns.md) as the upstream DNS provider. This blocks trackers and ads as well as provide DNS resolution for my Homelab without the maintenance overhead of maintaining my own DNS server. 22 | -------------------------------------------------------------------------------- /terraform/hetzner.tf: -------------------------------------------------------------------------------- 1 | # https://registry.terraform.io/providers/hetznercloud/hcloud/latest/docs 2 | provider "hcloud" {} 3 | 4 | # Get public IP address to restrict SSH access on the firewall 5 | data "http" "public_ip" { 6 | url = "https://api.ipify.org" 7 | } 8 | 9 | resource "hcloud_firewall" "home_ops" { 10 | name = "Home-Ops" 11 | rule { 12 | direction = "in" 13 | protocol = "tcp" 14 | port = "22" 15 | source_ips = ["${chomp(data.http.public_ip.response_body)}/32"] 16 | } 17 | # https://tailscale.com/kb/1150/cloud-hetzner#step-2-allow-udp-port-41641 18 | rule { 19 | direction = "in" 20 | protocol = "udp" 21 | port = "41641" 22 | source_ips = [ 23 | "0.0.0.0/0", 24 | "::/0" 25 | ] 26 | } 27 | } 28 | 29 | resource "hcloud_server" "home_ops" { 30 | name = "Home-Ops" 31 | image = "alma-9" 32 | server_type = "cpx11" 33 | location = "hel1" 34 | backups = true 35 | user_data = <@subnet01.net.dbren.uk 31 | ``` 32 | 33 | 5. Run the following commands on the Raspberry Pi to set a static IP address, update the system and expand the filesystem: 34 | 35 | ```bash 36 | # Become root 37 | sudo -i 38 | raspi-config 39 | # Select "Advanced Options" > "Network Config" > 2 NetworkManager 40 | # Select "Advanced Options" > "Expand Filesystem" 41 | 42 | # Set static IP address 43 | nmcli connection modify eth0 ipv4.method manual ipv4.addresses 192.168.0.2/24 ipv4.gateway 192.168.0.1 ipv4.dns 45.90.28.138,45.90.30.138 connection.autoconnect yes 44 | 45 | # Enable connection to apply changes 46 | nmcli connection up eth0 47 | 48 | # Update system 49 | apt-get update && apt-get upgrade -y 50 | reboot 51 | ``` 52 | -------------------------------------------------------------------------------- /ansible/playbooks/playbook-media-server.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Media Server | Setup 3 | hosts: media01.net.dbren.uk 4 | vars_files: 5 | - ../vars/media-server.yml 6 | handlers: 7 | - name: Restart monit systemd service 8 | become: true 9 | ansible.builtin.systemd: 10 | name: monit 11 | state: restarted 12 | 13 | - name: Restart nginx systemd service 14 | become: true 15 | ansible.builtin.systemd: 16 | name: nginx 17 | state: restarted 18 | pre_tasks: 19 | # https://www.jeffgeerling.com/blog/2023/how-solve-error-externally-managed-environment-when-installing-pip3 20 | # Resolves error from stderr: error: externally-managed-environment 21 | - name: Ignore PEP 668 22 | tags: pip 23 | ansible.builtin.file: 24 | path: /usr/lib/python3.11/EXTERNALLY-MANAGED 25 | state: absent 26 | roles: 27 | - role: geerlingguy.pip 28 | tags: pip 29 | become: true 30 | - role: geerlingguy.docker 31 | tags: docker 32 | become: true 33 | tasks: 34 | - name: OpenMediaVault | Patches 35 | tags: omv 36 | block: 37 | - name: Patch OpenMediaVault monit nginx config to check on docker0 interface port 1080 38 | notify: Restart monit systemd service 39 | become: true 40 | ansible.builtin.lineinfile: 41 | path: /etc/monit/conf.d/openmediavault-nginx.conf 42 | regexp: "^if failed host 127.0.0.1.*" 43 | line: "if failed host 172.17.0.1 port 1080 protocol http timeout 15 seconds for 2 times within 3 cycles then restart" 44 | backrefs: true 45 | backup: true 46 | state: present 47 | 48 | - name: Patch OpenMediaVault nginx config to listen on docker0 interface port 1080 49 | notify: Restart nginx systemd service 50 | become: true 51 | ansible.builtin.lineinfile: 52 | path: /etc/nginx/sites-enabled/openmediavault-webgui 53 | regexp: "^(.*)listen(.*)default_server;" 54 | line: "listen 172.17.0.1:1080;" 55 | backrefs: true 56 | backup: true 57 | state: present 58 | post_tasks: 59 | - name: Include dbrennand.autorestic role 60 | tags: autorestic 61 | ansible.builtin.import_role: 62 | name: dbrennand.autorestic 63 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 | 3 | # 🏠 🛠 Home Operations 4 | 5 | Home Operations repository for managing my Homelab infrastructure. 6 | 7 | [![AWX](https://img.shields.io/endpoint?url=https%3A%2F%2Fstatus.macaroni-beardie.ts.net%2Fapi%2Fv1%2Fendpoints%2Fhome-ops_awx%2Fhealth%2Fbadge.shields&style=for-the-badge&logo=ansible&logoColor=white&label=AWX)](https://status.macaroni-beardie.ts.net) 8 | [![Beszel Hub](https://img.shields.io/endpoint?url=https%3A%2F%2Fstatus.macaroni-beardie.ts.net%2Fapi%2Fv1%2Fendpoints%2Fhome-ops_beszel-hub%2Fhealth%2Fbadge.shields&style=for-the-badge&logo=statuspage&logoColor=white&label=Beszel%20Hub)](https://status.macaroni-beardie.ts.net) 9 | [![OpenMediaVault](https://img.shields.io/endpoint?url=https%3A%2F%2Fstatus.macaroni-beardie.ts.net%2Fapi%2Fv1%2Fendpoints%2Fhome-ops_openmediavault%2Fhealth%2Fbadge.shields&style=for-the-badge&logo=openmediavault&logoColor=white&label=OpenMediaVault)](https://status.macaroni-beardie.ts.net) 10 | [![Proxmox](https://img.shields.io/endpoint?url=https%3A%2F%2Fstatus.macaroni-beardie.ts.net%2Fapi%2Fv1%2Fendpoints%2Fhome-ops_proxmox%2Fhealth%2Fbadge.shields&style=for-the-badge&logo=proxmox&logoColor=white&label=Proxmox)](https://status.macaroni-beardie.ts.net) 11 | [![Pocket ID](https://img.shields.io/endpoint?url=https%3A%2F%2Fstatus.macaroni-beardie.ts.net%2Fapi%2Fv1%2Fendpoints%2Fhome-ops_pocket-id%2Fhealth%2Fbadge.shields&style=for-the-badge&logo=auth0&logoColor=white&label=Pocket%20ID)](https://status.macaroni-beardie.ts.net) 12 | 13 |
14 | 15 | ## 📝 Overview 16 | 17 | > [!NOTE] 18 | > This repository is a constant work in progress and I will continue to update it as I learn more. 19 | 20 | The goals of this repository are: 21 | 22 | - Automate the configuration of my Homelab infrastructure and deployment of applications. 23 | - Adhere to best practices. 24 | - Learn and test new technologies and concepts. 25 | - Document my Homelab setup and configuration for future reference and in case of disaster recovery. 26 | - Share knowledge and learnings with others. 27 | 28 | ## Ansible Content 29 | 30 | Ansible content used to configure my Homelab infrastructure and deploy applications are located in the [ansible](ansible) directory. 31 | 32 | ## 📋 Taskfile 33 | 34 | This repository uses [Taskfile](https://taskfile.dev) to quickly perform repetitive [tasks](Taskfile.yml). 35 | 36 | ## License 37 | 38 | [MIT](LICENSE) 39 | -------------------------------------------------------------------------------- /docs/infrastructure/opentofu.md: -------------------------------------------------------------------------------- 1 | # :simple-opentofu: OpenTofu 2 | 3 | [OpenTofu](https://opentofu.org/) (a fork of Terraform) is used to deploy infrastructure in my Homelab. Resources located in [`terraform`](https://github.com/dbrennand/home-ops/tree/main/terraform/) are used to deploy VMs on my Proxmox VE node. 4 | 5 | ## Custom Module(s) 6 | 7 | My Home-Ops project contains three OpenTofu modules I've created. The only active one is [`proxmox_cloud_init_config`](https://github.com/dbrennand/home-ops/tree/main/terraform/modules/proxmox_cloud_init_config) which I use to create a Cloud-init configuration file for each VM. I previously used the [`proxmox_vm`](https://github.com/dbrennand/home-ops/tree/main/terraform/modules/proxmox_vm) module but I've found that it doesn't provide enough flexibility for me anymore so I've deprecated it. 8 | 9 | ## OpenTofu State 10 | 11 | The [OpenTofu state](https://opentofu.org/docs/language/state/) is stored in a Backblaze S3 bucket. 12 | 13 | ## :lock: Secrets 14 | 15 | The [1Password Terraform provider](https://search.opentofu.org/provider/1password/onepassword/latest) is used to retrieve credentials for Proxmox and an SSH key used during VM creation. 16 | 17 | ## Usage 18 | 19 | 1. Install OpenTofu: 20 | 21 | ```bash 22 | brew install opentofu 23 | ``` 24 | 25 | 2. Initialize OpenTofu providers and the S3 backend: 26 | 27 | ```bash 28 | cd terraform 29 | op run --env-file=./.env -- tofu init 30 | ``` 31 | 32 | 3. Plan the deployment: 33 | 34 | ```bash 35 | op run --env-file=./.env -- tofu plan 36 | ``` 37 | 38 | 4. If everything looks good, apply the deployment: 39 | 40 | ```bash 41 | op run --env-file=./.env -- tofu apply 42 | ``` 43 | 44 | ## Destroying Specific Resources 45 | 46 | To destroy specific resources, use the [`tofu destroy`](https://opentofu.org/docs/cli/commands/destroy/) command with the `-target` flag: 47 | 48 | ```bash 49 | op run --env-file=./.env -- tofu destroy -target module.proxmox_vm_control01 -target module.proxmox_vm_worker01 -target module.proxmox_vm_worker02 50 | ``` 51 | 52 | ## Removing State for Manually Destroyed Resources 53 | 54 | If resources are manually destroyed, the state file will need to be updated to reflect the changes to the infrastructure. To do this, use the [`tofu state rm`](https://opentofu.org/docs/v1.6/cli/commands/state/rm/) command: 55 | 56 | ```bash 57 | op run --env-file=./.env -- tofu state rm 'module.proxmox_vm_worker02' 58 | ``` 59 | -------------------------------------------------------------------------------- /docs/infrastructure/kubernetes/flux.md: -------------------------------------------------------------------------------- 1 | # :simple-flux: Flux 2 | 3 | [FluxCD](https://fluxcd.io/) is deployed on my [Talos](./talos.md) Kubernetes node to adopt a [GitOps](https://fluxcd.io/flux/concepts/#gitops) approach to deploying applications on Kubernetes. My [GitHub repository](https://github.com/dbrennand/home-ops/tree/main/kubernetes) is the source of truth for Kubernetes applications I have deployed. 4 | 5 | !!! quote "What is GitOps?" 6 | 7 | GitOps is a way of managing your infrastructure and applications so that whole system is described declaratively and version controlled (most likely in a Git repository), and having an automated process that ensures that the deployed environment matches the state specified in a repository. 8 | 9 | [Source](https://fluxcd.io/flux/concepts/#gitops). 10 | 11 | ## Prerequisite 12 | 13 | Install the [`flux`](https://fluxcd.io/flux/installation/#install-the-flux-cli) CLI: 14 | 15 | ```bash 16 | brew install fluxcd/tap/flux 17 | ``` 18 | 19 | ## Deploying the Flux Controllers 20 | 21 | !!! abstract 22 | 23 | - [Reference Documentation - GitHub PAT](https://fluxcd.io/flux/installation/bootstrap/github/#github-pat). 24 | - [Reference Documentation - GitHub Personal Account](https://fluxcd.io/flux/installation/bootstrap/github/#github-personal-account). 25 | 26 | 1. Export the GitHub PAT: 27 | 28 | ```bash 29 | export GITHUB_TOKEN= 30 | ``` 31 | 32 | 2. Deploy the Flux controllers: 33 | 34 | ```bash 35 | flux bootstrap github \ 36 | --token-auth \ 37 | --cluster-domain=cluster.net.dbren.uk \ 38 | --owner=dbrennand \ 39 | --repository=home-ops \ 40 | --branch=main \ 41 | --path=kubernetes/flux \ 42 | --personal 43 | ``` 44 | 45 | 3. Verify that the Flux controllers are reconciled and deployed successfully: 46 | 47 | ```bash 48 | flux check 49 | ``` 50 | 51 | ## Upgrading Flux 52 | 53 | 1. Upgrade the `flux` CLI: 54 | 55 | ```bash 56 | brew install fluxcd/tap/flux 57 | ``` 58 | 59 | 2. Update the Flux manifest: 60 | 61 | ```bash 62 | flux install --export > ./kubernetes/flux/flux-system/gotk-components.yaml 63 | ``` 64 | 65 | 3. Commit and push the changes: 66 | 67 | ```bash 68 | git add ./kubernetes/flux/flux-system/gotk-components.yaml 69 | git push 70 | ``` 71 | 72 | 4. Force flux to upgrade immediately: 73 | 74 | ```bash 75 | flux reconcile ks flux-system --with-source 76 | ``` 77 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # 🏠 🛠 Home Operations 2 | 3 | Home Operations repository for managing my Homelab infrastructure. 4 | 5 | [![AWX](https://img.shields.io/endpoint?url=https%3A%2F%2Fstatus.macaroni-beardie.ts.net%2Fapi%2Fv1%2Fendpoints%2Fhome-ops_awx%2Fhealth%2Fbadge.shields&style=for-the-badge&logo=ansible&logoColor=white&label=AWX)](https://status.macaroni-beardie.ts.net) 6 | [![Beszel Hub](https://img.shields.io/endpoint?url=https%3A%2F%2Fstatus.macaroni-beardie.ts.net%2Fapi%2Fv1%2Fendpoints%2Fhome-ops_beszel-hub%2Fhealth%2Fbadge.shields&style=for-the-badge&logo=statuspage&logoColor=white&label=Beszel%20Hub)](https://status.macaroni-beardie.ts.net) 7 | [![OpenMediaVault](https://img.shields.io/endpoint?url=https%3A%2F%2Fstatus.macaroni-beardie.ts.net%2Fapi%2Fv1%2Fendpoints%2Fhome-ops_openmediavault%2Fhealth%2Fbadge.shields&style=for-the-badge&logo=openmediavault&logoColor=white&label=OpenMediaVault)](https://status.macaroni-beardie.ts.net) 8 | [![Proxmox](https://img.shields.io/endpoint?url=https%3A%2F%2Fstatus.macaroni-beardie.ts.net%2Fapi%2Fv1%2Fendpoints%2Fhome-ops_proxmox%2Fhealth%2Fbadge.shields&style=for-the-badge&logo=proxmox&logoColor=white&label=Proxmox)](https://status.macaroni-beardie.ts.net) 9 | [![Pocket ID](https://img.shields.io/endpoint?url=https%3A%2F%2Fstatus.macaroni-beardie.ts.net%2Fapi%2Fv1%2Fendpoints%2Fhome-ops_pocket-id%2Fhealth%2Fbadge.shields&style=for-the-badge&logo=auth0&logoColor=white&label=Pocket%20ID)](https://status.macaroni-beardie.ts.net) 10 | 11 | ## 📝 Overview 12 | 13 | !!! note 14 | 15 | This repository is a constant work in progress and I will continue to update it as I learn more. 16 | 17 | The goals of this repository are: 18 | 19 | - Automate the configuration of my Homelab infrastructure and deployment of applications. 20 | - Adhere to best practices. 21 | - Learn and test new technologies and concepts. 22 | - Document my Homelab setup and configuration for future reference and in case of disaster recovery. 23 | - Share knowledge and learnings with others. 24 | 25 | ## :simple-ansible: Ansible Content 26 | 27 | Ansible content used to configure my Homelab infrastructure and deploy applications are located in the [ansible](https://github.com/dbrennand/home-ops/tree/main/ansible) directory. 28 | 29 | ## 📋 Taskfile 30 | 31 | This repository uses [Taskfile](https://taskfile.dev) to quickly perform repetitive [tasks](https://github.com/dbrennand/home-ops/blob/main/Taskfile.yml). 32 | 33 | ## :material-license: License 34 | 35 | [MIT](https://github.com/dbrennand/home-ops/blob/main/LICENSE) 36 | -------------------------------------------------------------------------------- /.github/workflows/build-ee.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Build Execution Environment 3 | on: 4 | workflow_dispatch: # Allows manual execution 5 | push: 6 | branches: 7 | - main 8 | paths: 9 | - ansible/ee/** 10 | 11 | jobs: 12 | build: 13 | runs-on: ubuntu-latest 14 | # Sets the permissions granted to the `GITHUB_TOKEN` for the actions in this job 15 | permissions: 16 | contents: write 17 | packages: write 18 | steps: 19 | - name: Checkout 20 | uses: actions/checkout@v6 21 | 22 | - name: Get short SHA commit hash 23 | id: short_sha 24 | run: echo "SHORT_SHA=$(git rev-parse --short HEAD)" >> $GITHUB_OUTPUT 25 | 26 | - name: Set up Docker 27 | uses: docker/setup-docker-action@v4 28 | with: 29 | daemon-config: | 30 | { 31 | "features": { 32 | "containerd-snapshotter": true 33 | } 34 | } 35 | 36 | - name: Set up Python 3.14 37 | uses: actions/setup-python@v6 38 | with: 39 | python-version: 3.14 40 | 41 | - name: Install dependencies 42 | run: python3 -m pip install ansible-builder==3.1.1 43 | 44 | - name: Login to GitHub Container Registry 45 | uses: docker/login-action@v3 46 | with: 47 | registry: ghcr.io 48 | username: ${{ github.actor }} 49 | password: ${{ secrets.GITHUB_TOKEN }} 50 | 51 | - name: Set up QEMU 52 | uses: docker/setup-qemu-action@v3 53 | 54 | - name: Set up Docker Buildx 55 | uses: docker/setup-buildx-action@v3 56 | id: docker_buildx 57 | with: 58 | platforms: linux/amd64,linux/arm64 59 | 60 | - name: Build Execution Environment image 61 | working-directory: ansible/ee 62 | run: ansible-builder build -vvv --container-runtime docker --tag "ghcr.io/dbrennand/home-ops:${{ steps.short_sha.outputs.SHORT_SHA }}" --extra-build-cli-args "--load --builder ${{ steps.docker_buildx.outputs.name }} --platform linux/amd64,linux/arm64" 63 | 64 | - name: Determine the ID of the Execution Environment image 65 | id: ee_id 66 | run: echo "IMAGE_ID=$(docker images -q "ghcr.io/dbrennand/home-ops:${{ steps.short_sha.outputs.SHORT_SHA }}")" >> $GITHUB_OUTPUT 67 | 68 | - name: Create the latest tag 69 | run: docker tag "${{ steps.ee_id.outputs.IMAGE_ID }}" "ghcr.io/dbrennand/home-ops:latest" 70 | 71 | - name: Push Execution Environment image to GitHub Container Registry 72 | run: | 73 | docker push ghcr.io/dbrennand/home-ops:latest 74 | docker push ghcr.io/dbrennand/home-ops:${{ steps.short_sha.outputs.SHORT_SHA }} 75 | -------------------------------------------------------------------------------- /docker/gatus/config/config.yaml: -------------------------------------------------------------------------------- 1 | storage: 2 | type: sqlite 3 | path: /data/data.db 4 | caching: true 5 | 6 | ui: 7 | title: Home-Ops | Status Page 8 | description: Home-Ops | Status Page 9 | header: Home-Ops | Status Page 10 | logo: https://avatars.githubusercontent.com/u/52419383 11 | link: https://github.com/dbrennand/home-ops 12 | buttons: 13 | - name: GitHub 14 | link: https://github.com/dbrennand/home-ops 15 | - name: Docs 16 | link: https://homeops.dbren.uk 17 | 18 | endpoints: 19 | - name: Proxmox 20 | group: Home-Ops 21 | url: "https://proxmox01.net.dbren.uk:8006" 22 | interval: 5m 23 | conditions: 24 | - "[STATUS] == 200" 25 | - "[CERTIFICATE_EXPIRATION] > 48h" 26 | - name: Proxmox Backup Server 27 | group: Home-Ops 28 | url: "https://backup01.net.dbren.uk:8007" 29 | interval: 5m 30 | conditions: 31 | - "[STATUS] == 200" 32 | - "[CERTIFICATE_EXPIRATION] > 48h" 33 | - name: OpenMediaVault 34 | group: Home-Ops 35 | url: "https://media01.net.dbren.uk" 36 | interval: 5m 37 | conditions: 38 | - "[STATUS] == 200" 39 | - "[CERTIFICATE_EXPIRATION] > 48h" 40 | - name: Pocket ID 41 | group: Home-Ops 42 | url: "https://idp.net.dbren.uk" 43 | interval: 5m 44 | conditions: 45 | - "[STATUS] == 200" 46 | - "[CERTIFICATE_EXPIRATION] > 48h" 47 | - name: Paperless-ngx 48 | group: Home-Ops 49 | url: "https://paperless.net.dbren.uk" 50 | interval: 5m 51 | conditions: 52 | - "[STATUS] == 200" 53 | - "[CERTIFICATE_EXPIRATION] > 48h" 54 | - name: Stirling PDF 55 | group: Home-Ops 56 | url: "https://stirling.net.dbren.uk" 57 | interval: 5m 58 | conditions: 59 | - "[STATUS] == 200" 60 | - "[CERTIFICATE_EXPIRATION] > 48h" 61 | - name: AWX 62 | group: Home-Ops 63 | url: "https://awx.net.dbren.uk" 64 | interval: 5m 65 | conditions: 66 | - "[STATUS] == 200" 67 | - "[CERTIFICATE_EXPIRATION] > 48h" 68 | - name: Beszel Hub 69 | group: Home-Ops 70 | url: "https://beszel.macaroni-beardie.ts.net" 71 | interval: 5m 72 | conditions: 73 | - "[STATUS] == 200" 74 | - "[CERTIFICATE_EXPIRATION] > 48h" 75 | - name: Blog 76 | group: External 77 | url: "https://dbren.uk" 78 | interval: 5m 79 | conditions: 80 | - "[STATUS] == 200" 81 | - "[CERTIFICATE_EXPIRATION] > 48h" 82 | - name: Home-Ops Docs 83 | group: External 84 | url: "https://homeops.dbren.uk" 85 | interval: 5m 86 | conditions: 87 | - "[STATUS] == 200" 88 | - "[CERTIFICATE_EXPIRATION] > 48h" 89 | - name: NextDNS 90 | group: External 91 | url: "https://ping.nextdns.io/" 92 | interval: 5m 93 | conditions: 94 | - "[STATUS] == 200" 95 | - "[CERTIFICATE_EXPIRATION] > 48h" 96 | -------------------------------------------------------------------------------- /kubernetes/talos/talconfig.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | clusterName: home-ops 3 | talosVersion: v1.11.5 4 | kubernetesVersion: v1.34.1 5 | endpoint: https://talos01.net.dbren.uk:6443 6 | domain: cluster.net.dbren.uk 7 | allowSchedulingOnControlPlanes: true 8 | nodes: 9 | - hostname: talos01 10 | ipAddress: 192.168.0.10 11 | controlPlane: true 12 | installDisk: /dev/sda 13 | machineSpec: 14 | mode: metal 15 | arch: amd64 16 | secureboot: true 17 | useUKI: true 18 | nameservers: 19 | # NextDNS 20 | - 45.90.28.138 21 | - 45.90.30.138 22 | networkInterfaces: 23 | - interface: ens18 24 | dhcp: false 25 | addresses: 26 | - 192.168.0.10/24 27 | routes: 28 | - network: 0.0.0.0/0 29 | gateway: 192.168.0.1 30 | certSANs: 31 | - 192.168.0.10 32 | - talos01.net.dbren.uk 33 | userVolumes: 34 | # Projects volume for AWX 35 | - name: awx-projects 36 | provisioning: 37 | diskSelector: 38 | match: disk.dev_path == "/dev/sdb" 39 | maxSize: "50GiB" 40 | # Postgres volume for AWX 41 | - name: awx-postgres 42 | provisioning: 43 | diskSelector: 44 | match: disk.dev_path == "/dev/sdb" 45 | maxSize: "50GiB" 46 | schematic: 47 | customization: 48 | systemExtensions: 49 | officialExtensions: 50 | - siderolabs/qemu-guest-agent 51 | - siderolabs/tailscale 52 | - siderolabs/intel-ucode 53 | patches: 54 | - |- 55 | machine: 56 | nodeLabels: 57 | node.kubernetes.io/exclude-from-external-load-balancers: 58 | $$patch: delete 59 | - |- 60 | machine: 61 | kubelet: 62 | extraMounts: 63 | - destination: /var/mnt/awx-projects 64 | type: bind 65 | source: /var/mnt/awx-projects 66 | options: 67 | - bind 68 | - rshared 69 | - rw 70 | - destination: /var/mnt/awx-postgres 71 | type: bind 72 | source: /var/mnt/awx-postgres 73 | options: 74 | - bind 75 | - rshared 76 | - rw 77 | patches: 78 | # Disable cluster discovery as it is not required for a single node 79 | - |- 80 | cluster: 81 | discovery: 82 | enabled: false 83 | # Enable kubelet certificate rotation 84 | - |- 85 | machine: 86 | kubelet: 87 | extraArgs: 88 | rotate-server-certificates: "true" 89 | # Configure the Tailscale extension 90 | - |- 91 | --- 92 | apiVersion: v1alpha1 93 | kind: ExtensionServiceConfig 94 | name: tailscale 95 | environment: 96 | - TS_AUTHKEY=${ts_authkey} 97 | -------------------------------------------------------------------------------- /docs/ansible/awx.md: -------------------------------------------------------------------------------- 1 | # :simple-ansible: AWX 2 | 3 | [AWX](https://github.com/ansible/awx) is used in my Homelab to run Ansible content against devices. 4 | 5 | ## Deployment 6 | 7 | AWX is deployed via the [AWX Operator](https://github.com/ansible/awx-operator) on Kubernetes. I'm running version `2.19.1` of the operator. 8 | 9 | I have a single node [K3s](https://k3s.io/) VM on my Proxmox VE node which I deployed using [OpenTofu](../infrastructure/opentofu.md). The K3s deployment is done via an [Ansible Playbook](https://github.com/dbrennand/home-ops/blob/main/ansible/playbooks/playbook-k3s-deploy.yml). 10 | 11 | The [awx-on-k3s](https://github.com/kurokobo/awx-on-k3s) project is used to deploy the AWX Operator and AWX Custom Resource Definition (CRD) on the K3s cluster. I use an [Ansible playbook](https://github.com/dbrennand/home-ops/blob/main/ansible/playbooks/playbook-awx-deploy.yml) to prepare the K3s node for the AWX deployment. 12 | 13 | Next, I perform the following steps to deploy AWX: 14 | 15 | 1. SSH to the K3s node: 16 | 17 | ```bash 18 | ssh daniel@k3s01.net.dbren.uk 19 | ``` 20 | 21 | 2. Deploy the AWX Operator: 22 | 23 | ```bash 24 | cd awx-on-k3s && kubectl apply -k operator 25 | ``` 26 | 27 | 3. Configure the ingress in `base/awx.yml` to use the `ClusterIssuer` for Cloudflare: 28 | 29 | ```yaml 30 | --- 31 | apiVersion: awx.ansible.com/v1beta1 32 | kind: AWX 33 | metadata: 34 | name: awx 35 | spec: 36 | # ... 37 | ingress_type: ingress 38 | ingress_hosts: 39 | - hostname: awx.net.dbren.uk 40 | tls_secret: awx-secret-tls 41 | ingress_annotations: | 42 | cert-manager.io/cluster-issuer: letsencrypt-production 43 | ``` 44 | 45 | 3. Configure the PostgreSQL and AWX admin credentials in `base/kustomization.yaml`: 46 | 47 | ```yaml 48 | --- 49 | apiVersion: kustomize.config.k8s.io/v1beta1 50 | kind: Kustomization 51 | namespace: awx 52 | 53 | generatorOptions: 54 | disableNameSuffixHash: true 55 | 56 | secretGenerator: 57 | - name: awx-postgres-configuration 58 | type: Opaque 59 | literals: 60 | - host=awx-postgres-15 61 | - port=5432 62 | - database=awx 63 | - username=awx 64 | - password= 65 | - type=managed 66 | 67 | - name: awx-admin-password 68 | type: Opaque 69 | literals: 70 | - password= 71 | ``` 72 | 73 | 4. Deploy the AWX CRD: 74 | 75 | ```bash 76 | kubectl apply -k base 77 | ``` 78 | 79 | ## Configuration 80 | 81 | An [Ansible playbook](https://github.com/dbrennand/home-ops/blob/main/ansible/playbooks/playbook-awx.yml) is used to configure AWX with the [Execution Environment](execution-environment.md), credentials, project, inventories and Discord notification template. 82 | -------------------------------------------------------------------------------- /terraform/modules/proxmox_vm/variables.tf: -------------------------------------------------------------------------------- 1 | # OpenTofu Module - Proxmox Virtual Machine - Variables 2 | # LICENSE: MIT 3 | # Author: Daniel Brennand 4 | 5 | variable "proxmox_vm_id" { 6 | description = "ID of the Proxmox Virtual Machine." 7 | type = number 8 | } 9 | 10 | variable "proxmox_vm_name" { 11 | description = "Name of the Proxmox Virtual Machine." 12 | type = string 13 | } 14 | 15 | variable "proxmox_vm_tags" { 16 | description = "Tags for the Proxmox Virtual Machine." 17 | type = list(string) 18 | default = ["vm", "opentofu"] 19 | } 20 | 21 | variable "proxmox_vm_cores" { 22 | description = "Number of CPU cores for the Proxmox Virtual Machine." 23 | type = number 24 | default = 2 25 | } 26 | 27 | variable "proxmox_vm_cpu_type" { 28 | description = "Type of CPU for the Proxmox Virtual Machine." 29 | type = string 30 | default = "host" 31 | } 32 | 33 | variable "proxmox_vm_memory" { 34 | description = "Amount of memory for the Proxmox Virtual Machine in megabytes." 35 | type = number 36 | default = 2048 37 | } 38 | 39 | variable "proxmox_vm_ip" { 40 | description = "IP address for the Proxmox Virtual Machine." 41 | type = string 42 | } 43 | 44 | variable "proxmox_vm_gateway" { 45 | description = "Gateway IP address for the Proxmox Virtual Machine." 46 | type = string 47 | default = "192.168.0.1" 48 | } 49 | 50 | variable "proxmox_vm_virtual_environment_disk_datastore_id" { 51 | description = "ID of the Proxmox VE datastore used for the Virtual Machine disk." 52 | type = string 53 | default = "local-lvm" 54 | } 55 | 56 | variable "proxmox_vm_virtual_environment_node_name" { 57 | description = "Name of the Proxmox VE node." 58 | type = string 59 | default = "proxmox01" 60 | } 61 | 62 | variable "proxmox_vm_os_type" { 63 | description = "Type of operating system for the Proxmox Virtual Machine." 64 | type = string 65 | default = "l26" 66 | } 67 | 68 | variable "proxmox_vm_cloud_init_config_ssh_authorized_keys" { 69 | description = "SSH public key used by Cloud-init." 70 | type = string 71 | } 72 | 73 | variable "proxmox_vm_download_file_datastore_id" { 74 | description = "ID of the Proxmox VE datastore used for the Debian qcow2 image." 75 | type = string 76 | default = "local" 77 | } 78 | 79 | variable "proxmox_vm_on_boot" { 80 | description = "Start the Proxmox Virtual Machine when the Proxmox node boots." 81 | type = bool 82 | default = false 83 | } 84 | 85 | variable "proxmox_vm_started" { 86 | description = "Start the Proxmox Virtual Machine after the VM is created." 87 | type = bool 88 | default = false 89 | } 90 | 91 | variable "proxmox_vm_disk_size" { 92 | description = "Size of the Proxmox Virtual Machine disk in gigabytes." 93 | type = number 94 | default = 50 95 | } 96 | -------------------------------------------------------------------------------- /docker/paperless-ngx/docker-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://github.com/paperless-ngx/paperless-ngx/tree/main/docker/compose 3 | services: 4 | broker: 5 | image: docker.io/library/redis:7 6 | container_name: paperless-ngx-redis 7 | restart: unless-stopped 8 | networks: 9 | - paperless-ngx 10 | volumes: 11 | - paperless-ngx-redis:/data 12 | 13 | db: 14 | image: docker.io/library/postgres:16 15 | container_name: paperless-ngx-postgres 16 | restart: unless-stopped 17 | networks: 18 | - paperless-ngx 19 | volumes: 20 | - ./database:/var/lib/postgresql/data 21 | environment: 22 | POSTGRES_DB: paperless 23 | POSTGRES_USER: paperless 24 | POSTGRES_PASSWORD: ${PG_PASS} 25 | 26 | webserver: 27 | image: ghcr.io/paperless-ngx/paperless-ngx:2.20 28 | container_name: paperless-ngx 29 | restart: unless-stopped 30 | depends_on: 31 | - db 32 | - broker 33 | - gotenberg 34 | - tika 35 | networks: 36 | - paperless-ngx 37 | - caddy 38 | expose: 39 | - 8000 40 | volumes: 41 | - ./data:/usr/src/paperless/data 42 | - ./export:/usr/src/paperless/export 43 | - /srv/dev-disk-by-uuid-8b39b057-49ce-42d6-90c9-2b138ef4dee3/media:/usr/src/paperless/media 44 | - /srv/dev-disk-by-uuid-8b39b057-49ce-42d6-90c9-2b138ef4dee3/consume:/usr/src/paperless/consume 45 | environment: 46 | PAPERLESS_REDIS: redis://paperless-ngx-redis:6379 47 | PAPERLESS_DBHOST: paperless-ngx-postgres 48 | PAPERLESS_DBPASS: ${PG_PASS} 49 | PAPERLESS_TIKA_ENABLED: 1 50 | PAPERLESS_TIKA_GOTENBERG_ENDPOINT: http://paperless-ngx-gotenberg:3000 51 | PAPERLESS_TIKA_ENDPOINT: http://paperless-ngx-tika:9998 52 | PAPERLESS_SECRET_KEY: ${PAPERLESS_SECRET_KEY} 53 | PAPERLESS_TIME_ZONE: ${PAPERLESS_TIME_ZONE} 54 | PAPERLESS_URL: ${PAPERLESS_URL} 55 | PAPERLESS_OCR_LANGUAGE: ${PAPERLESS_OCR_LANGUAGE} 56 | PAPERLESS_ADMIN_USER: ${PAPERLESS_ADMIN_USER} 57 | PAPERLESS_ADMIN_PASSWORD: ${PAPERLESS_ADMIN_PASSWORD} 58 | PAPERLESS_FILENAME_FORMAT: "{{ created_year }}/{{ document_type }}/{{ correspondent }}/{{ created_year }}_{{ created_month }}_{{ created_day }}_{{ title }}" 59 | PAPERLESS_APPS: "allauth.socialaccount.providers.openid_connect" 60 | PAPERLESS_SOCIALACCOUNT_PROVIDERS: ${PAPERLESS_SOCIALACCOUNT_PROVIDERS} 61 | labels: 62 | caddy: paperless.{$$DOMAIN} 63 | caddy.import: cloudflare 64 | caddy.reverse_proxy: "{{ upstreams 8000 }}" 65 | 66 | gotenberg: 67 | image: docker.io/gotenberg/gotenberg:8.25 68 | container_name: paperless-ngx-gotenberg 69 | restart: unless-stopped 70 | networks: 71 | - paperless-ngx 72 | # The gotenberg chromium route is used to convert .eml files. We do not 73 | # want to allow external content like tracking pixels or even javascript. 74 | command: 75 | - "gotenberg" 76 | - "--chromium-disable-javascript=true" 77 | - "--chromium-allow-list=file:///tmp/.*" 78 | 79 | tika: 80 | image: docker.io/apache/tika:latest 81 | container_name: paperless-ngx-tika 82 | restart: unless-stopped 83 | networks: 84 | - paperless-ngx 85 | 86 | volumes: 87 | paperless-ngx-redis: 88 | driver: local 89 | 90 | networks: 91 | paperless-ngx: 92 | name: paperless-ngx 93 | caddy: 94 | external: true 95 | -------------------------------------------------------------------------------- /terraform/modules/proxmox_lxc/variables.tf: -------------------------------------------------------------------------------- 1 | # OpenTofu Module - Proxmox LXC - Variables 2 | # LICENSE: MIT 3 | # Author: Daniel Brennand 4 | 5 | variable "proxmox_container_download_file_datastore_id" { 6 | description = "Name of the Proxmox Datastore to save the tar.xz image to." 7 | type = string 8 | default = "local" 9 | } 10 | 11 | variable "proxmox_container_virtual_environment_node_name" { 12 | description = "Name of the Proxmox VE node." 13 | type = string 14 | default = "proxmox01" 15 | } 16 | 17 | variable "proxmox_container_id" { 18 | description = "ID of the Proxmox LXC." 19 | type = number 20 | } 21 | 22 | variable "proxmox_container_tags" { 23 | description = "Tags for the Proxmox LXC." 24 | type = list(string) 25 | default = ["lxc", "opentofu"] 26 | } 27 | 28 | variable "proxmox_container_cores" { 29 | description = "Number of CPU cores for the Proxmox LXC." 30 | type = number 31 | default = 1 32 | } 33 | 34 | variable "proxmox_container_memory_dedicated" { 35 | description = "Amount of memory for the Proxmox LXC." 36 | type = number 37 | default = 1024 38 | } 39 | 40 | variable "proxmox_container_memory_swap" { 41 | description = "Amount of swap memory for the Proxmox LXC." 42 | type = number 43 | default = 512 44 | } 45 | 46 | variable "proxmox_container_hostname" { 47 | description = "Hostname for the Proxmox LXC." 48 | type = string 49 | } 50 | 51 | variable "proxmox_container_dns_domain" { 52 | description = "DNS search domain for the Proxmox LXC." 53 | type = string 54 | } 55 | 56 | variable "proxmox_container_dns_servers" { 57 | description = "DNS servers for the Proxmox LXC." 58 | type = list(string) 59 | default = ["192.168.0.2", "192.168.0.5"] 60 | } 61 | 62 | variable "proxmox_container_ip" { 63 | description = "IP address for the Proxmox LXC." 64 | type = string 65 | } 66 | 67 | variable "proxmox_container_gateway" { 68 | description = "Gateway IP address for the Proxmox LXC." 69 | type = string 70 | default = "192.168.0.1" 71 | } 72 | 73 | variable "proxmox_container_keys" { 74 | description = "SSH keys for the Proxmox LXC." 75 | type = list(string) 76 | default = [] 77 | } 78 | 79 | variable "proxmox_container_password" { 80 | description = "Password for the Proxmox LXC." 81 | type = string 82 | sensitive = true 83 | } 84 | 85 | variable "proxmox_container_started" { 86 | description = "Boolean to start the Proxmox LXC once created." 87 | type = bool 88 | default = false 89 | } 90 | 91 | variable "proxmox_container_start_on_boot" { 92 | description = "Boolean to start the Proxmox LXC once the Proxmox node boots." 93 | type = bool 94 | default = false 95 | } 96 | 97 | variable "proxmox_container_unprivileged" { 98 | description = "Boolean to specify whether the Proxmox LXC is unprivileged or not." 99 | type = bool 100 | default = true 101 | } 102 | 103 | variable "proxmox_container_disk_datastore_id" { 104 | description = "ID of the Proxmox datastore to store the Proxmox LXC disk." 105 | type = string 106 | default = "local-lvm" 107 | } 108 | 109 | variable "proxmox_container_disk_size" { 110 | description = "Size of the Proxmox LXC disk in Gigabytes." 111 | type = number 112 | default = 4 113 | } 114 | -------------------------------------------------------------------------------- /docs/ansible/execution-environment.md: -------------------------------------------------------------------------------- 1 | # :simple-ansible: Execution Environment 2 | 3 | An Ansible [Execution Environment (EE)](https://docs.ansible.com/ansible/latest/getting_started_ee/index.html) is used to run Ansible content against devices in my Homelab. 4 | 5 | The EE is built using [`ansible-builder`](https://ansible.readthedocs.io/projects/builder/en/latest/). 6 | 7 | ## :simple-files: Execution Environment Files 8 | 9 | Files relating to the Execution Environment are located in [`ansible/ee`](https://github.com/dbrennand/home-ops/tree/main/ansible/ee). 10 | 11 | | File Path | Description | 12 | | ------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------- | 13 | | [`ansible/ee/execution-environment.yml`](https://github.com/dbrennand/home-ops/blob/main/ansible/ee/execution-environment.yml) | Configuration file used by `ansible-builder` to create the EE. | 14 | | [`ansible/ee/requirements.txt`](https://github.com/dbrennand/home-ops/blob/main/ansible/ee/requirements.txt) | Extra Python dependencies to include in the EE. | 15 | | [`ansible/ee/requirements.yml`](https://github.com/dbrennand/home-ops/blob/main/ansible/ee/requirements.yml) | Ansible collection and roles to include in the EE. | 16 | | [`ansible/ee/custom_entrypoint.sh`](https://github.com/dbrennand/home-ops/blob/main/ansible/ee/custom_entrypoint.sh) | Entrypoint script used to configure specific environments variables for the 1Password CLI and SSH agent socket. | 17 | 18 | ## :simple-1password: Secrets 19 | 20 | The 1Password CLI is [installed](https://github.com/dbrennand/home-ops/blob/main/ansible/ee/execution-environment.yml#L18) in the EE to retrieve secrets for devices and services. 21 | 22 | ## :simple-githubactions: Automated Build 23 | 24 | A [GitHub Action](https://github.com/dbrennand/home-ops/blob/main/.github/workflows/build-ee.yml) is set up to automatically re-build the EE when changes are made to files in [`ansible/ee`](https://github.com/dbrennand/home-ops/tree/main/ansible/ee). 25 | 26 | ## Using the Execution Environment 27 | 28 | ### Ansible Navigator 29 | 30 | [`ansible-navigator`](https://ansible.readthedocs.io/projects/navigator/) is used on my Macbook M1 Pro Max to run Ansible Content against devices in my Homelab. Under the hood `ansible-navigator` uses [`ansible-runner`](https://ansible.readthedocs.io/projects/runner/en/latest/) to interact with the container engine to launch the EE. I use [OrbStack](https://orbstack.dev/) which has a compatible [Docker engine](https://docs.orbstack.dev/docker/). 31 | 32 | `ansible-navigator` is configured using the [`ansible-navigator.yml`](https://github.com/dbrennand/home-ops/blob/main/ansible/ansible-navigator.yml) file. I use [specific configuration](https://github.com/dbrennand/home-ops/blob/main/ansible/ansible-navigator.yml#L7) so that the EE can access the [1Password SSH Agent](https://developer.1password.com/docs/ssh/agent/) running on my Macbook to connect to devices. Furthermore, as mentioned above the EE has the 1Password CLI installed which is used by the [`community.general.onepassword`](https://docs.ansible.com/ansible/latest/collections/community/general/onepassword_lookup.html) lookup plugin to retrieve secrets from a 1Password vault. 33 | 34 | ### AWX 35 | 36 | The EE is used by my AWX instance to run Ansible Content against devices in my Homelab. See [AWX](awx.md) for more information. 37 | -------------------------------------------------------------------------------- /docs/ansible/minecraft.md: -------------------------------------------------------------------------------- 1 | # :material-minecraft: Minecraft 2 | 3 | !!! note 4 | 5 | This page has been archived and kept for reference. Some of the links on this page may no longer work. 6 | 7 | The Minecraft [playbook](https://github.com/dbrennand/home-ops/blob/main/ansible/playbooks/playbook-minecraft.yml) is used to deploy Minecraft servers on Ubuntu Server 22.04 LTS in my Homelab. 8 | 9 | ## :simple-ansible: Ansible Playbook 10 | 11 | The playbook configures two Minecraft servers, `minecraft01` and `minecraft02`; each with different configuration. Both Minecraft servers are deployed using the [itzg/minecraft-server](https://github.com/itzg/docker-minecraft-server) container image. 12 | 13 | ## Vanilla Server 14 | 15 | The `minecraft01` server is deployed with [Paper MC](https://papermc.io/). Server specific settings are located in [`ansible/vars/paper_minecraft.yml`](https://github.com/dbrennand/home-ops/blob/main/ansible/vars/paper-minecraft.yml). 16 | 17 | ## Modded Server 18 | 19 | The `minecraft02` server is deployed with the [All the Mods 9 (ATM9)](https://www.curseforge.com/minecraft/modpacks/all-the-mods-9) modpack. Server specific settings are located in [`ansible/vars/modded_minecraft.yml`](https://github.com/dbrennand/home-ops/blob/main/ansible/vars/modded-minecraft.yml). 20 | 21 | ### Staging the Modpack Server ZIP File 22 | 23 | To run ATM9 on the `minecraft02` server, the modpack server ZIP file must be staged on the server prior to running the Ansible playbook. 24 | 25 | 1. Download the modpack server ZIP file from [CurseForge](https://www.curseforge.com/minecraft/modpacks/all-the-mods-9/files/5125809/additional-files). 26 | 27 | 2. SCP the modpack server ZIP file to `minecraft02`: 28 | 29 | ```bash 30 | ssh daniel@minecraft02.net.dbren.uk mkdir -pv ~/modpacks 31 | scp /path/to/Server-Files-0.2.41.zip minecraft02.net.dbren.uk:~/modpacks/ 32 | ``` 33 | 34 | 3. Update the [`ansible/vars/modded_minecraft.yml`](https://github.com/dbrennand/home-ops/blob/main/ansible/vars/modded-minecraft.yml) file with the correct modpack server ZIP file name: 35 | 36 | ```yaml 37 | minecraft_options: 38 | CF_SERVER_MOD: /modpacks/Server-Files-0.2.41.zip 39 | ``` 40 | 41 | ## Server Files & World Backup 42 | 43 | The Ansible playbook is configured to deploy the [itzg/mc-backup](https://github.com/itzg/docker-mc-backup) container image which will backup the Minecraft server files and world to a Backblaze B2 S3 bucket. This occurs every 24 hours. 44 | 45 | See [dbrennand | home-ops Backblaze](../infrastructure/backblaze.md) for more information on how to configure the Backblaze B2 S3 bucket. 46 | 47 | ### itzg/mc-backup - Removing Stale Locks 48 | 49 | You may come across the following error in the logs. This occurs when the server is shut down unexpectedly during a backup and the restic lock file is not removed: 50 | 51 | ```bash 52 | docker logs minecraft-backup 53 | # ... 54 | ERROR the `unlock` command can be used to remove stale locks 55 | ``` 56 | 57 | Remove the lock file by running `restic unlock`: 58 | 59 | ```bash 60 | docker restart minecraft-backup; docker exec -it minecraft-backup restic -r b2: unlock 61 | ``` 62 | 63 | ## :simple-tailscale: Tailscale 64 | 65 | Tailscale is used on the server to allow friends to connect to the server remotely. 66 | 67 | ## :simple-opentofu: OpenTofu 68 | 69 | Both servers are deployed using [OpenTofu](../infrastructure/opentofu.md). 70 | 71 | ## Usage 72 | 73 | 1. Clone the repository: 74 | 75 | ```bash 76 | git clone https://github.com/dbrennand/home-ops.git && cd home-ops/ansible 77 | ``` 78 | 79 | 2. Create the Python virtual environment and install Ansible dependencies: 80 | 81 | ```bash 82 | task venv 83 | task ansible:requirements 84 | ``` 85 | 86 | 3. Verify Ansible can connect to the server: 87 | 88 | ```bash 89 | task ansible:adhoc -- minecraft -m ping 90 | ``` 91 | 92 | 4. Run the playbook: 93 | 94 | ```bash 95 | task ansible:play -- playbooks/minecraft-playbook.yml 96 | # The following tags are supported: minecraft, backup 97 | # Example using tags: 98 | task ansible:play -- playbooks/minecraft-playbook.yml --tags minecraft 99 | ``` 100 | -------------------------------------------------------------------------------- /terraform/modules/proxmox_lxc/main.tf: -------------------------------------------------------------------------------- 1 | # OpenTofu Module - Proxmox LXC 2 | # LICENSE: MIT 3 | # Author: Daniel Brennand 4 | 5 | terraform { 6 | required_version = ">= 1.6.2" 7 | # https://discuss.hashicorp.com/t/using-a-non-hashicorp-provider-in-a-module/21841/2 8 | required_providers { 9 | proxmox = { 10 | source = "bpg/proxmox" 11 | version = "0.89.1" 12 | } 13 | } 14 | } 15 | 16 | locals { 17 | datetime = timestamp() 18 | proxmox_container_description = "Created by OpenTofu at ${local.datetime}" 19 | } 20 | 21 | # https://registry.terraform.io/providers/bpg/proxmox/latest/docs/resources/virtual_environment_download_file 22 | resource "proxmox_virtual_environment_download_file" "latest_almalinux_vztmpl" { 23 | content_type = "vztmpl" 24 | datastore_id = var.proxmox_container_download_file_datastore_id 25 | node_name = var.proxmox_container_virtual_environment_node_name 26 | file_name = "almalinux-9-cloud_amd64.tar.xz" 27 | url = "https://images.linuxcontainers.org/images/almalinux/9/amd64/cloud/20250223_23:08/rootfs.tar.xz" 28 | checksum = "d593648a6a3a3ba6bd8a410e1d3e1688bf12f4fa9e62dcf31aedb8cf729313e5" 29 | checksum_algorithm = "sha256" 30 | } 31 | 32 | # https://registry.terraform.io/providers/bpg/proxmox/latest/docs/resources/virtual_environment_container 33 | # Assumed default user is root 34 | resource "proxmox_virtual_environment_container" "container" { 35 | vm_id = var.proxmox_container_id 36 | description = local.proxmox_container_description 37 | tags = var.proxmox_container_tags 38 | node_name = var.proxmox_container_virtual_environment_node_name 39 | 40 | # https://registry.terraform.io/providers/bpg/proxmox/latest/docs/resources/virtual_environment_container#cpu-2 41 | cpu { 42 | cores = var.proxmox_container_cores 43 | } 44 | 45 | # https://registry.terraform.io/providers/bpg/proxmox/latest/docs/resources/virtual_environment_container#memory-4 46 | memory { 47 | dedicated = var.proxmox_container_memory_dedicated 48 | swap = var.proxmox_container_memory_swap 49 | } 50 | 51 | # https://registry.terraform.io/providers/bpg/proxmox/latest/docs/resources/virtual_environment_container#initialization-2 52 | initialization { 53 | hostname = var.proxmox_container_hostname 54 | 55 | dns { 56 | domain = var.proxmox_container_dns_domain 57 | servers = var.proxmox_container_dns_servers 58 | } 59 | 60 | ip_config { 61 | ipv4 { 62 | address = var.proxmox_container_ip 63 | gateway = var.proxmox_container_gateway 64 | } 65 | } 66 | 67 | user_account { 68 | keys = var.proxmox_container_keys 69 | password = var.proxmox_container_password 70 | } 71 | } 72 | 73 | # https://registry.terraform.io/providers/bpg/proxmox/latest/docs/resources/virtual_environment_container#operating_system-2 74 | operating_system { 75 | template_file_id = proxmox_virtual_environment_download_file.latest_almalinux_vztmpl.id 76 | type = "centos" 77 | } 78 | 79 | # https://registry.terraform.io/providers/bpg/proxmox/latest/docs/resources/virtual_environment_container#started-1 80 | started = var.proxmox_container_started 81 | 82 | # https://registry.terraform.io/providers/bpg/proxmox/latest/docs/resources/virtual_environment_container#start_on_boot-1 83 | start_on_boot = var.proxmox_container_start_on_boot 84 | 85 | # https://registry.terraform.io/providers/bpg/proxmox/latest/docs/resources/virtual_environment_container#unprivileged-1 86 | unprivileged = var.proxmox_container_unprivileged 87 | 88 | # https://registry.terraform.io/providers/bpg/proxmox/latest/docs/resources/virtual_environment_container#network_interface-1 89 | network_interface { 90 | name = "eth0" 91 | } 92 | 93 | # https://registry.terraform.io/providers/bpg/proxmox/latest/docs/resources/virtual_environment_container#disk-1 94 | disk { 95 | datastore_id = var.proxmox_container_disk_datastore_id 96 | size = var.proxmox_container_disk_size 97 | } 98 | 99 | # https://registry.terraform.io/providers/bpg/proxmox/latest/docs/resources/virtual_environment_container#features-2 100 | features { 101 | keyctl = true 102 | nesting = true 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /terraform/proxmox.tf: -------------------------------------------------------------------------------- 1 | # https://registry.terraform.io/providers/bpg/proxmox/latest/docs 2 | provider "proxmox" { 3 | endpoint = data.onepassword_item.proxmox_virtual_environment.url 4 | username = "${data.onepassword_item.proxmox_virtual_environment.username}@pam" 5 | password = data.onepassword_item.proxmox_virtual_environment.password 6 | ssh { 7 | agent = true 8 | } 9 | } 10 | 11 | resource "proxmox_virtual_environment_download_file" "almalinux_9_latest_cloud_image" { 12 | content_type = "iso" 13 | datastore_id = "local" 14 | node_name = "proxmox01" 15 | file_name = "AlmaLinux-9-GenericCloud-latest.x86_64.qcow2.img" 16 | url = "https://repo.almalinux.org/almalinux/9/cloud/x86_64/images/AlmaLinux-9-GenericCloud-latest.x86_64.qcow2" 17 | checksum = "b08cd5db79bf32860412f5837e8c7b8df9447e032376e3c622840b31aaf26bc6" 18 | checksum_algorithm = "sha256" 19 | } 20 | 21 | module "proxmox_cloud_init_config_idp01" { 22 | source = "./modules/proxmox_cloud_init_config" 23 | proxmox_cloud_init_config_vm_name = "idp01" 24 | proxmox_cloud_init_config_virtual_environment_node_name = "proxmox01" 25 | proxmox_cloud_init_config_ssh_authorized_keys = data.onepassword_item.ssh_key.public_key 26 | } 27 | 28 | resource "proxmox_virtual_environment_vm" "idp01" { 29 | name = "idp01" 30 | vm_id = 106 31 | description = "Created with OpenTofu." 32 | tags = ["vm", "opentofu", "tailscale", "192.168.0.11"] 33 | node_name = "proxmox01" 34 | on_boot = false 35 | started = true 36 | 37 | agent { 38 | enabled = true 39 | trim = true 40 | } 41 | 42 | cpu { 43 | cores = 2 44 | type = "host" 45 | } 46 | 47 | scsi_hardware = "virtio-scsi-single" 48 | 49 | disk { 50 | interface = "scsi0" 51 | datastore_id = "lv-ssd-crucial" 52 | file_id = proxmox_virtual_environment_download_file.almalinux_9_latest_cloud_image.id 53 | size = 50 54 | discard = "on" 55 | ssd = true 56 | iothread = true 57 | } 58 | 59 | memory { 60 | dedicated = 2048 61 | } 62 | 63 | initialization { 64 | datastore_id = "lv-ssd-crucial" 65 | ip_config { 66 | ipv4 { 67 | address = "192.168.0.11/24" 68 | gateway = "192.168.0.1" 69 | } 70 | } 71 | user_data_file_id = module.proxmox_cloud_init_config_idp01.cloud_init_config_id 72 | } 73 | 74 | operating_system { 75 | type = "l26" 76 | } 77 | 78 | network_device { 79 | bridge = "vmbr0" 80 | enabled = true 81 | firewall = true 82 | } 83 | } 84 | 85 | module "proxmox_cloud_init_config_apps01" { 86 | source = "./modules/proxmox_cloud_init_config" 87 | proxmox_cloud_init_config_vm_name = "apps01" 88 | proxmox_cloud_init_config_virtual_environment_node_name = "proxmox01" 89 | proxmox_cloud_init_config_ssh_authorized_keys = data.onepassword_item.ssh_key.public_key 90 | } 91 | 92 | resource "proxmox_virtual_environment_vm" "apps01" { 93 | name = "apps01" 94 | vm_id = 107 95 | description = "Created with OpenTofu." 96 | tags = ["vm", "opentofu", "tailscale", "192.168.0.12"] 97 | node_name = "proxmox01" 98 | on_boot = false 99 | started = true 100 | 101 | agent { 102 | enabled = true 103 | trim = true 104 | } 105 | 106 | cpu { 107 | cores = 2 108 | type = "host" 109 | } 110 | 111 | scsi_hardware = "virtio-scsi-single" 112 | 113 | disk { 114 | interface = "scsi0" 115 | datastore_id = "lv-ssd-crucial" 116 | file_id = proxmox_virtual_environment_download_file.almalinux_9_latest_cloud_image.id 117 | size = 100 118 | discard = "on" 119 | ssd = true 120 | iothread = true 121 | } 122 | 123 | memory { 124 | dedicated = 2048 125 | } 126 | 127 | initialization { 128 | datastore_id = "lv-ssd-crucial" 129 | ip_config { 130 | ipv4 { 131 | address = "192.168.0.12/24" 132 | gateway = "192.168.0.1" 133 | } 134 | } 135 | user_data_file_id = module.proxmox_cloud_init_config_apps01.cloud_init_config_id 136 | } 137 | 138 | operating_system { 139 | type = "l26" 140 | } 141 | 142 | network_device { 143 | bridge = "vmbr0" 144 | enabled = true 145 | firewall = true 146 | } 147 | } 148 | -------------------------------------------------------------------------------- /terraform/modules/proxmox_vm/main.tf: -------------------------------------------------------------------------------- 1 | # OpenTofu Module - Proxmox Virtual Machine 2 | # LICENSE: MIT 3 | # Author: Daniel Brennand 4 | 5 | terraform { 6 | required_version = ">= 1.6.2" 7 | # https://discuss.hashicorp.com/t/using-a-non-hashicorp-provider-in-a-module/21841/2 8 | required_providers { 9 | proxmox = { 10 | source = "bpg/proxmox" 11 | version = "0.89.1" 12 | } 13 | } 14 | } 15 | 16 | locals { 17 | proxmox_vm_description = "Created by OpenTofu." 18 | } 19 | 20 | # https://registry.terraform.io/providers/bpg/proxmox/latest/docs/resources/virtual_environment_file 21 | resource "proxmox_virtual_environment_file" "cloud_init_config" { 22 | content_type = "snippets" 23 | # Local is the only datastore in my Homelab which supports the snippets content type 24 | datastore_id = "local" 25 | node_name = var.proxmox_vm_virtual_environment_node_name 26 | source_raw { 27 | file_name = "cloud-init-config-${var.proxmox_vm_name}.yaml" 28 | data = <- 53 | # Get this in the keys.txt file from previous step 54 | ``` 55 | 56 | ## Talos ISO 57 | 58 | [ISO image](https://github.com/siderolabs/talos/releases/download/v1.11.5/metal-amd64.iso) I used when originally deploying my Talos node. 59 | 60 | ## Deploy the Talos ISO on Proxmox 61 | 62 | 1. Download the ISO to the Node 1 `local` storage. 63 | 64 | 2. Navigate to `proxmox01` > `Create VM`. 65 | 66 | 3. Provide the following details for `General` and click **Next**: 67 | 68 | | Setting | Value | 69 | | ------------- | ----------- | 70 | | Name | `talos01` | 71 | | ID | 100 | 72 | | Node | `proxmox01` | 73 | | Start at boot | ❌ | 74 | 75 | 4. Under `OS`, select the storage where the ISO was downloaded to and choose the `metal-amd64.iso`. Click **Next**. 76 | 77 | 5. Under `System`, select the `VirtIO SCSI Single` controller and `Qemu Agent` and click **Next**. 78 | 79 | 6. Provide the following details for `Disks` and click **Next**: 80 | 81 | | Setting | Value | 82 | | ------------- | ---------------------- | 83 | | Bus/Device | `SCSI` | 84 | | Storage | `lv-ssd-crucial` | 85 | | Size | `100GiB` | 86 | | Format | `Raw disk image (raw)` | 87 | | Discard | ✅ | 88 | | SSD Emulation | ✅ | 89 | | IO thread | ✅ | 90 | | Backup | ✅ | 91 | 92 | | Setting | Value | 93 | | ------------- | ---------------------- | 94 | | Bus/Device | `SCSI` | 95 | | Storage | `lv-ssd-crucial` | 96 | | Size | `200GiB` | 97 | | Format | `Raw disk image (raw)` | 98 | | Discard | ✅ | 99 | | SSD Emulation | ✅ | 100 | | IO thread | ✅ | 101 | | Backup | ✅ | 102 | 103 | 7. Provide the following details for `CPU` and click **Next**: 104 | 105 | | Setting | Value | 106 | | ------- | ------ | 107 | | Cores | `4` | 108 | | Type | `host` | 109 | 110 | 8. Provide the following details for `Memory` and click **Next**: 111 | 112 | | Setting | Value | 113 | | ----------------- | ------ | 114 | | Memory (MiB) | `4096` | 115 | | Ballooning Device | ✅ | 116 | | Minimum Memory | `4096` | 117 | 118 | 9. Leave `Network` as default, click **Next** and confirm deployment. 119 | 120 | 10. Start the `talos01` VM and open the console to begin the installation. 121 | 122 | 11. In the console, press E to edit the grub menu option and add the end of the kernel boot options enter: 123 | 124 | ``` 125 | ip=192.168.0.10::192.168.0.1:255.255.255.0::eth0:off 126 | ``` 127 | 128 | 12. Press CTRL + X to boot. 129 | 130 | Talos will now have booted into maintenance mode and is waiting for machine configuration to be applied. It should look like the screenshot below: 131 | 132 | ![Talos Maintenance Mode](../../assets/images/Talos.png) 133 | 134 | ## Deploying Talos 135 | 136 | ### Generating and applying the Machine Configuration 137 | 138 | !!! info 139 | 140 | [Referenced Documentation](https://budimanjojo.github.io/talhelper/latest/getting-started/#you-are-starting-from-scratch). 141 | 142 | These steps only need to be performed during the initial set up of Talos and assume you've already created the `talconfig.yaml`, `talenv.sops.yaml` and `.sops.yaml` files. 143 | 144 | ```bash 145 | cd kubernetes/talos 146 | talhelper gensecret > talsecret.sops.yaml 147 | sops -e -i talsecret.sops.yaml 148 | sops -e -i talenv.sops.yaml 149 | talhelper genconfig 150 | talosctl apply-config --talosconfig=./clusterconfig/talosconfig --nodes=192.168.0.10 --file=./clusterconfig/home-ops-talos01.yaml --insecure 151 | ``` 152 | 153 | ### Bootstrap Talos 154 | 155 | 1. Bootstrap etcd: 156 | 157 | ```bash 158 | talosctl bootstrap --talosconfig=./clusterconfig/talosconfig --nodes=192.168.0.10 159 | ``` 160 | 161 | 2. Retrieve the `kubeconfig`: 162 | 163 | ```bash 164 | talosctl kubeconfig --talosconfig=./clusterconfig/talosconfig --nodes=192.168.0.10 165 | ``` 166 | 167 | Once the machine configuration has finished applying and `etcd` has finished set up the node should show as below: 168 | 169 | ![Talos Deployed](../../assets/images/TalosDeployed.png) 170 | -------------------------------------------------------------------------------- /docs/infrastructure/media.md: -------------------------------------------------------------------------------- 1 | # :material-folder: Media Server 2 | 3 | Media server for hosting files and various containerised services. 4 | 5 | !!! quote "What is OpenMediaVault?" 6 | 7 | [OpenMediaVault](https://www.openmediavault.org/) is the next generation network attached storage (NAS) solution based on Debian Linux. It contains services like SSH, (S)FTP, SMB/CIFS, RSync and many more ready to use. 8 | 9 | ## Deployment 10 | 11 | The media server is deployed as a VM on Proxmox using the [OpenMediaVault ISO](https://www.openmediavault.org/download.html). 12 | 13 | 1. Download the ISO to the Node 1 storage. 14 | 15 | 2. Navigate to `proxmox01` > `Create VM`. 16 | 17 | 3. Provide the following details for `General` and click **Next**: 18 | 19 | | Setting | Value | 20 | | ------------- | ----------- | 21 | | Name | `media01` | 22 | | Node | `proxmox01` | 23 | | Start at boot | ❌ | 24 | 25 | 4. Under `OS`, select the storage where the ISO was downloaded to and choose the Proxmox Backup Server ISO image. Click **Next**. 26 | 27 | 5. Under `System`, select the `VirtIO SCSI Single` controller and click **Next**. 28 | 29 | 6. Provide the following details for `Disks` and click **Next**: 30 | 31 | | Setting | Value | 32 | | ------------- | ---------------------- | 33 | | Bus/Device | `SCSI` | 34 | | Storage | `local-lvm` | 35 | | Size | `50GiB` | 36 | | Format | `Raw disk image (raw)` | 37 | | Discard | ✅ | 38 | | SSD Emulation | ✅ | 39 | 40 | | Setting | Value | 41 | | ------------- | ---------------------- | 42 | | Bus/Device | `SCSI` | 43 | | Storage | `lv-ssd-samsung` | 44 | | Size | `930GiB` | 45 | | Format | `Raw disk image (raw)` | 46 | | Discard | ✅ | 47 | | SSD Emulation | ✅ | 48 | 49 | | Setting | Value | 50 | | ------------- | ---------------------- | 51 | | Bus/Device | `SCSI` | 52 | | Storage | `lv-ssd-crucial` | 53 | | Size | `50GiB` | 54 | | Format | `Raw disk image (raw)` | 55 | | Discard | ✅ | 56 | | SSD Emulation | ✅ | 57 | 58 | 7. Provide the following details for `CPU` and click **Next**: 59 | 60 | | Setting | Value | 61 | | ------- | ------ | 62 | | Cores | `4` | 63 | | Type | `host` | 64 | 65 | 8. Provide the following details for `Memory` and click **Next**: 66 | 67 | | Setting | Value | 68 | | ----------------- | ------ | 69 | | Memory (MiB) | `8192` | 70 | | Ballooning Device | ✅ | 71 | | Minimum Memory | `1024` | 72 | 73 | 9. Leave `Network` as default, click **Next** and confirm deployment. 74 | 75 | 10. Start the `media01` VM and open the console to begin the installation. 76 | 77 | 11. Follow the on-screen instructions to install OpenMediaVault, when prompted enter the following details: 78 | 79 | | Setting | Value | 80 | | --------------- | --------------- | 81 | | Hostname | `media01` | 82 | | Domain Name | `net.dbren.uk` | 83 | | Email | Enter email | 84 | | Password | Enter password | 85 | | Default Gateway | `192.168.0.1` | 86 | | Subnet Mask | `255.255.255.0` | 87 | | IP Address | `192.168.0.9` | 88 | 89 | 12. Once installation has completed, login to the web interface using the FQDN and credentials entered during installation. 90 | 91 | ## Post Installation 92 | 93 | !!! info 94 | 95 | Where required, make sure to apply changes before moving on to the next step. A yellow box will appear after certain operations if this is necessary. 96 | 97 | 1. Navigate to `System` > `Date & Time` and set the time zone to `Europe/London`. 98 | 99 | 2. Under `Storage` > `File Systems`, click the `+` symbol and use the following configuration, repeating for each file system: 100 | 101 | | Setting | Value | 102 | | ----------- | ---------- | 103 | | File System | `EXT4` | 104 | | Device | `/dev/sdb` | 105 | | Label | `apps` | 106 | 107 | | Setting | Value | 108 | | ----------- | --------------- | 109 | | File System | `EXT4` | 110 | | Device | `/dev/sdc` | 111 | | Label | `paperless-ngx` | 112 | 113 | Click **Save** when finished. 114 | 115 | The file system configuration should look like below: 116 | 117 | ![OMV File Systems](../assets/images/OMVFileSystems.png) 118 | 119 | 3. Navigate to `Storage` > `Shared Folders` and click the `+` symbol and use the following configuration, repeating for each shared folder: 120 | 121 | | Setting | Value | 122 | | ------------- | ----------------------------------------------------------------- | 123 | | Name | `apps` | 124 | | Device | `/dev/sdb1` | 125 | | Permissions | `Administrator: read/write, Users: read/write, Others: read-only` | 126 | | Relative Path | `apps/` | 127 | | Tags | `apps` | 128 | 129 | | Setting | Value | 130 | | ------------- | -------------------------- | 131 | | Name | `consume` | 132 | | Device | `/dev/sdc1` | 133 | | Permissions | `Everyone: read/write` | 134 | | Relative Path | `consume/` | 135 | | Tags | `consume`, `paperless-ngx` | 136 | 137 | | Setting | Value | 138 | | ------------- | ----------------------------------------------------------------- | 139 | | Name | `media` | 140 | | Device | `/dev/sdc1` | 141 | | Permissions | `Administrator: read/write, Users: read/write, Others: read-only` | 142 | | Relative Path | `media/` | 143 | | Tags | `apps` | 144 | | Tags | `media`, `paperless-ngx` | 145 | 146 | The shared folder configuration should look like below: 147 | 148 | ![OMV Shared Folders](../assets/images/OMVSharedFolders.png) 149 | 150 | 4. Navigate to `Services` > `SSH` and configure the following settings: 151 | 152 | | Setting | Value | 153 | | ------------------------- | ----- | 154 | | Password Authentication | ❌ | 155 | | Public Key Authentication | ✅ | 156 | 157 | ### SMB/CIFS 158 | 159 | The steps below can be used to create a SMB/CIFS share for the Paperless-ngx consume directory. 160 | 161 | 1. Navigate to `Services` > `SMB/CIFS` > `Settings` and configure the following settings: 162 | 163 | | Setting | Value | 164 | | ------------------------ | ------ | 165 | | Enabled | ✅ | 166 | | Browsable | ✅ | 167 | | Minimum protocol version | `SMB3` | 168 | 169 | 2. Navigate to `Services` > `SMB/CIFS` > `Shares` and click the `+` symbol to add a new share with the following configuration: 170 | 171 | | Setting | Value | 172 | | ------------------- | ---------------------------------- | 173 | | Enabled | ✅ | 174 | | Shared Folder | `consume [consume, paperless-ngx]` | 175 | | Comment | `Paperless-ngx consume directory` | 176 | | Public | Guests only | 177 | | Browsable | ✅ | 178 | | Inherit ACLs | ✅ | 179 | | Inherit Permissions | ✅ | 180 | 181 | The share configuration should look like below: 182 | 183 | ![OMV SMB Share](../assets/images/OMVSMBShare.png) 184 | -------------------------------------------------------------------------------- /docs/infrastructure/proxmox/backup.md: -------------------------------------------------------------------------------- 1 | # :simple-proxmox: Proxmox Backup Server (PBS) 2 | 3 | !!! quote "What is Proxmox Backup Server?" 4 | 5 | [Proxmox Backup Server](https://proxmox.com/en/proxmox-backup-server/overview) is an enterprise backup solution, for backing up and restoring VMs, containers, and physical hosts. 6 | 7 | ## Deployment 8 | 9 | PBS is deployed as a virtual machine on the Proxmox VE Node 1. The PBS deployment is via an [ISO](https://proxmox.com/en/downloads/proxmox-backup-server) image. 10 | 11 | 1. Download the ISO to the Node 1 storage. 12 | 13 | 2. Navigate to `proxmox01` > `Create VM`. 14 | 15 | 3. Provide the following details for `General` and click **Next**: 16 | 17 | | Setting | Value | 18 | | ------------- | ----------- | 19 | | Name | `backup01` | 20 | | Node | `proxmox01` | 21 | | Start at boot | ✅ | 22 | 23 | 4. Under `OS`, select the storage where the ISO was downloaded to and choose the Proxmox Backup Server ISO image. Click **Next**. 24 | 25 | 5. Under `System`, select the `VirtIO SCSI Single` controller and click **Next**. 26 | 27 | 6. Provide the following details for `Disks` and click **Next**: 28 | 29 | | Setting | Value | 30 | | ------------- | ---------------------- | 31 | | Bus/Device | `SCSI` | 32 | | Storage | `lv-ssd-crucial` | 33 | | Size | `32GiB` | 34 | | Format | `Raw disk image (raw)` | 35 | | Discard | ✅ | 36 | | SSD Emulation | ✅ | 37 | 38 | | Setting | Value | 39 | | ------------- | ---------------------- | 40 | | Bus/Device | `SCSI` | 41 | | Storage | `lv-ssd-crucial` | 42 | | Size | `150GiB` | 43 | | Format | `Raw disk image (raw)` | 44 | | Discard | ✅ | 45 | | SSD Emulation | ✅ | 46 | 47 | 7. Provide the following details for `CPU` and click **Next**: 48 | 49 | | Setting | Value | 50 | | ------- | ------ | 51 | | Cores | `4` | 52 | | Type | `host` | 53 | 54 | 8. Provide the following details for `Memory` and click **Next**: 55 | 56 | | Setting | Value | 57 | | ----------------- | ------ | 58 | | Memory (MiB) | `5120` | 59 | | Ballooning Device | ✅ | 60 | | Minimum Memory | `1024` | 61 | 62 | 9. Leave `Network` as default, click **Next** and confirm deployment. 63 | 64 | 10. Start the `backup01` VM and open the console to begin the installation. 65 | 66 | 11. Follow the on-screen instructions to install PBS, when prompted enter the following details: 67 | 68 | | Setting | Value | 69 | | --------------- | ----------------------- | 70 | | FQDN | `backup01.net.dbren.uk` | 71 | | Email | Enter email | 72 | | Password | Enter password | 73 | | Default Gateway | `192.168.0.1` | 74 | | IP Address | `192.168.0.6/24` | 75 | 76 | 12. Once installation has completed, login to the web interface listening on port `8007` using the FQDN and credentials entered during installation. 77 | 78 | ## Post Installation 79 | 80 | Below are the post installation steps for configuring the PBS. 81 | 82 | Copy SSH public key to the PBS's `authorized_keys` file: 83 | 84 | ```bash 85 | ssh-copy-id root@backup01.net.dbren.uk 86 | ``` 87 | 88 | ### Datastore Creation 89 | 90 | The PBS requires a datastore to store backups. In my setup, I have two datastores, one which is a [Hetzner Storagebox](https://docs.hetzner.com/robot/storage-box/general) mounted via CIFS on the PBS at `/mnt/storagebox`, and the other which is a local disk. 91 | 92 | !!! info "Documentation" 93 | 94 | [Proxmox Backup Server - Backup Storage](https://pbs.proxmox.com/docs/storage.html) 95 | 96 | #### Hetzner Storagebox Datastore 97 | 98 | 1. Use the [`playbook-proxmox-backup-cifs.yml`](https://github.com/dbrennand/home-ops/blob/main/ansible/playbooks/playbook-proxmox-backup-cifs.yml) to mount the CIFS share on the PBS. 99 | 100 | 2. SSH to the PBS and create the datastore: 101 | 102 | ```bash 103 | proxmox-backup-manager datastore create Remote /mnt/storagebox --gc-schedule "sun 04:00" 104 | ``` 105 | 106 | #### Local Datastore 107 | 108 | 1. SSH to the PBS and initialise the disk with a GPT: 109 | 110 | ```bash 111 | proxmox-backup-manager disk initialize sdb 112 | ``` 113 | 114 | 2. Create the datastore: 115 | 116 | ```bash 117 | proxmox-backup-manager disk fs create Local --disk sdb --filesystem ext4 --add-datastore true 118 | ``` 119 | 120 | 3. Configure the datastore: 121 | 122 | ```bash 123 | proxmox-backup-manager datastore update Local --gc-schedule "sun 04:00" 124 | ``` 125 | 126 | ### Verify Job Creation 127 | 128 | The verify job is used to verify the integrity of the backups. SSH to the PBS and use the following command to create the verify job: 129 | 130 | ```bash 131 | proxmox-backup-manager verify-job create verify-Local --store Local --schedule "03:00" --ignore-verified=true --outdated-after=30 132 | ``` 133 | 134 | ### Sync Job Creation 135 | 136 | !!!info "Local & Offsite Copy" 137 | 138 | This makes sure that I have a local copy of backups and an offsite copy on the Hetzner Storagebox. 139 | 140 | Configure the backups to sync from the `Local` datastore to `Remote` datastore: 141 | 142 | ```bash 143 | proxmox-backup-manager sync-job create sync-pull-Local-Remote --owner 'root@pam' --store Remote --remote-store Local --schedule "02:00" --remove-vanished=true 144 | ``` 145 | 146 | ### :simple-letsencrypt: HTTPS - Web Interface with Let's Encrypt 147 | 148 | !!! info "Cloudflare API Token & Zone ID" 149 | 150 | See the following [instructions](https://github.com/dbrennand/ansible-role-caddy-docker#example---cloudflare-dns-01-challenge) for generating a Cloudflare API Token. 151 | 152 | Furthermore, you will need to obtain your domain's zone ID. This can be found in the Cloudflare dashboard page for the domain, on the right side under *API* > *Zone ID*. 153 | 154 | 1. Login to the PBS GUI and go to `Configuration` > `Certificates` > `ACME Accounts` > *Accounts* and click **Add**: 155 | 156 | | Setting | Value | 157 | | -------------- | ------------------ | 158 | | Account Name | `default` | 159 | | Email | `` | 160 | | ACME Directory | `Let's Encrypt V2` | 161 | | Accept TOS | `True` | 162 | 163 | 2. Click **Register**. 164 | 165 | 3. Under *Challenge Plugins*, click **Add** and enter the following details: 166 | 167 | | Setting | Value | 168 | | ---------- | ------------------------ | 169 | | Plugin ID | `cloudflare` | 170 | | DNS API | `Cloudflare Managed API` | 171 | | CF_Token | `` | 172 | | CF_Zone_ID | `` | 173 | 174 | 4. Click **Add**. 175 | 176 | 5. Navigate to `Certificates` and under *ACME* click **Add**: 177 | 178 | | Setting | Value | 179 | | -------------- | ----------------------- | 180 | | Challenge Type | `DNS` | 181 | | Plugin | `cloudflare` | 182 | | Domain | `backup01.net.dbren.uk` | 183 | 184 | 6. Click **Create**. 185 | 186 | 7. Under *ACME* > for `Using Account` click **Edit** and select the `default` account and click **Apply**. 187 | 188 | 8. Click **Order Certificates Now**. 189 | 190 | Once completed, the PBS web interface will reload and show the new certificate. 191 | 192 | ### :fontawesome-solid-terminal: Scripts 193 | 194 | !!! warning 195 | 196 | Proceed with caution, before running any of the scripts below, make sure to review the code to ensure it is safe to run. 197 | 198 | 1. Run the [PBS post install](https://github.com/tteck/Proxmox) script on PBS: 199 | 200 | !!! quote 201 | 202 | The script will give options to Disable the Enterprise Repo, Add/Correct PBS Sources, Enable the No-Subscription Repo, Add Test Repo, Disable Subscription Nag, Update Proxmox Backup Server and Reboot PBS. 203 | 204 | ```bash 205 | bash -c "$(wget -qLO - https://github.com/tteck/Proxmox/raw/main/misc/post-pbs-install.sh)" 206 | ``` 207 | 208 | ## Backup Operations 209 | 210 | Backup operations are performed using the [`proxmox-backup-client`](https://pbs.proxmox.com/docs/backup-client.html) command on the Proxmox VE nodes. Below are some common operations. 211 | 212 | ### View all snapshots for a datastore 213 | 214 | ```bash 215 | proxmox-backup-client snapshot list --repository backup01.net.dbren.uk:backup01 216 | ``` 217 | 218 | ### View snapshots for a VM 219 | 220 | ``` 221 | proxmox-backup-client snapshot list vm/102 --repository backup01.net.dbren.uk:backup01 222 | ``` 223 | 224 | ### Delete a snapshot for a VM 225 | 226 | ```bash 227 | proxmox-backup-client snapshot forget vm/102/2024-06-27T16:57:49Z --repository backup01.net.dbren.uk:backup01 228 | ``` 229 | -------------------------------------------------------------------------------- /docs/miscellaneous/nuc-app-config.md: -------------------------------------------------------------------------------- 1 | # NUC - Application Configuration 2 | 3 | !!! note 4 | 5 | This page has been archived and kept for reference. 6 | 7 | ## :simple-sonarr: Sonarr & :simple-radarr: Radarr 8 | 9 | 1. Go to `Settings > Media Management` and click **Show Advanced**. 10 | 11 | 2. Configure the following settings: 12 | 13 | !!! tip 14 | 15 | Shoutout to TRaSH Guides for the naming formats and other settings: 16 | 17 | * [Radarr](https://trash-guides.info/Radarr/Radarr-recommended-naming-scheme/) 18 | 19 | * [Sonarr](https://trash-guides.info/Sonarr/Sonarr-recommended-naming-scheme/) 20 | 21 | | Setting | Value | 22 | | ------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | 23 | | Rename Episodes / Rename Movies | ✅ | 24 | | Replace Illegal Characters | ✅ | 25 | | Standard Movie Format | `{Movie CleanTitle} {(Release Year)} [imdbid-{ImdbId}] - {Edition Tags }{[Custom Formats]}{[Quality Full]}{[MediaInfo 3D]}{[MediaInfo VideoDynamicRangeType]}{[Mediainfo AudioCodec}{ Mediainfo AudioChannels}][{Mediainfo VideoCodec}]{-Release Group}` | 26 | | Movie Folder Format | `{Movie CleanTitle} ({Release Year}) [imdbid-{ImdbId}]` | 27 | | Standard Episode Format | `{Series TitleYear} - S{season:00}E{episode:00} - {Episode CleanTitle} [{Preferred Words }{Quality Full}]{[MediaInfo VideoDynamicRangeType]}{[Mediainfo AudioCodec}{ Mediainfo AudioChannels]}{[MediaInfo VideoCodec]}{-Release Group}` | 28 | | Daily Episode Format | `{Series TitleYear} - {Air-Date} - {Episode CleanTitle} [{Preferred Words }{Quality Full}]{[MediaInfo VideoDynamicRangeType]}{[Mediainfo AudioCodec}{ Mediainfo AudioChannels]}{[MediaInfo VideoCodec]}{-Release Group}` | 29 | | Anime Episode Format | `{Series TitleYear} - S{season:00}E{episode:00} - {absolute:000} - {Episode CleanTitle} [{Preferred Words }{Quality Full}]{[MediaInfo VideoDynamicRangeType]}[{MediaInfo VideoBitDepth}bit]{[MediaInfo VideoCodec]}[{Mediainfo AudioCodec} { Mediainfo AudioChannels}]{MediaInfo AudioLanguages}{-Release Group}` | 30 | | Series Folder Format | `{Series TitleYear} [tvdbid-{TvdbId}]` | 31 | | Season Folder Format | `Season {season:00}` | 32 | | Multi-Episode Style | Prefixed Range | 33 | | Delete empty folders | ✅ | 34 | | Use Hardlinks instead of Copy | ✅ | 35 | | Propers and Repacks | Do not Prefer | 36 | 37 | 3. Click **Add Root Folder** and configure the path to: 38 | 39 | Sonarr: `/data/media/tv` 40 | 41 | Radarr: `/data/media/movies` 42 | 43 | 4. Go to `Settings > Quality` and set the quality definitions from TRaSH guides: 44 | 45 | - [Radarr](https://trash-guides.info/Radarr/Radarr-Quality-Settings-File-Size/#radarr-quality-definitions) 46 | - [Sonarr](https://trash-guides.info/Sonarr/Sonarr-Quality-Settings-File-Size/#sonarr-quality-definitions) 47 | 48 | 5. Go to `Settings > Download Clients` and click the `+` button. 49 | 50 | 6. Under *Torrents* select **Transmission** and enter the following settings: 51 | 52 | | Setting | Value | 53 | | ---------------- | -------------- | 54 | | Name | `Transmission` | 55 | | Enable | ✅ | 56 | | Host | `transmission` | 57 | | Port | `9091` | 58 | | Remove Completed | ✅ | 59 | 60 | 7. Click **Save**. 61 | 62 | 8. Go to `Settings > General` and under *Analytics* disable the checkbox. 63 | 64 | ## Prowlarr 65 | 66 | 1. Go to `Settings > Indexers` and click **Show Advanced**. 67 | 68 | 2. Click the `+` button to add an indexer proxy. 69 | 70 | 3. Select `Http` and enter the following settings: 71 | 72 | | Setting | Value | 73 | | ------- | -------------- | 74 | | Name | `Privoxy` | 75 | | Tags | `privoxy` | 76 | | Host | `transmission` | 77 | | Port | `8118` | 78 | 79 | 4. Click **Save**. 80 | 81 | 5. Go to `Settings > Apps` and click the `+` button to add an application. 82 | 83 | 6. Add two applications for Sonarr and Radarr respectively: 84 | 85 | | Setting | Value | 86 | | --------------- | ---------------------------------------- | 87 | | Name | `Sonarr` | 88 | | Sync Level | `Full Sync` | 89 | | Prowlarr Server | `https://prowlarr.net.domain.tld` | 90 | | Sonarr Server | `http://sonarr:8989` | 91 | | ApiKey | `Sonarr API key from Settings > General` | 92 | 93 | | Setting | Value | 94 | | --------------- | ---------------------------------------- | 95 | | Name | `Radarr` | 96 | | Sync Level | `Full Sync` | 97 | | Prowlarr Server | `https://prowlarr.net.domain.tld` | 98 | | Sonarr Server | `http://radarr:7878` | 99 | | ApiKey | `Radarr API key from Settings > General` | 100 | 101 | 7. Click **Save**. 102 | 103 | 8. Go to `Settings > Notifications` and click the `+` button to add a connection. 104 | 105 | 9. Select `Telegram` and enter the following settings: 106 | 107 | | Setting | Value | 108 | | --------------------- | ------------------------------------------------------ | 109 | | Name | `Telegram` | 110 | | Notification Triggers | `On Health Issue`, `On Application Update` | 111 | | Bot Token | `Enter Telegram bot token from https://t.me/BotFather` | 112 | | Chat ID | `Enter Telegram chat ID from https://t.me/userinfobot` | 113 | 114 | 10. Click **Save**. 115 | 116 | 11. Go to `Indexers` and click **Add Indexer**. 117 | 118 | 12. Select an indexer from the list and when configuring, make sure to add the `privoxy` tag so traffic is routed through the proxy. 119 | 120 | ## :simple-jellyfin: Jellyfin 121 | 122 | 1. Login to Jellyfin and under *Administration* go to `Dashboard > Libraries`. 123 | 124 | 2. Click **Add Media Library** and add one for movies and shows respectively: 125 | 126 | Movies: `/data/media/movies` 127 | 128 | Shows: `/data/media/tv` 129 | 130 | 3. Under `Playback` select `Intel QuickSync (QSV)` as the hardware acceleration option. 131 | 132 | 4. Check **Throttle Transcodes** and click **Save**. 133 | 134 | 5. Under `Networking` check `Allow remote connections to this server` and click **Save**. 135 | 136 | Enjoy! ✨🚀 137 | -------------------------------------------------------------------------------- /docs/infrastructure/proxmox/ve.md: -------------------------------------------------------------------------------- 1 | # :simple-proxmox: Proxmox Virtual Environment (VE) 2 | 3 | !!! quote "What is Proxmox VE?" 4 | 5 | [Proxmox Virtual Environment](https://proxmox.com/en/proxmox-virtual-environment/overview) is a complete open-source platform for enterprise virtualization. With the built-in web interface you can easily manage VMs and containers, software-defined storage and networking, high-availability clustering, and multiple out-of-the-box tools using a single solution. 6 | 7 | ## Proxmox VE Specs 8 | 9 | ### Node 1 10 | 11 | [Minisforum Venus Series UN1265](https://store.minisforum.uk/collections/intel/products/un1265) 12 | 13 | | Component | Details | 14 | | ------------------ | --------------------------------------------------------------------------------- | 15 | | CPU | Intel® Core™ i7-12650H Processor, 10 Cores/16 Threads (24M Cache, up to 4.70 GHz) | 16 | | Memory | 64GB DDR4 3200MHz SODIMM (2x32GB) | 17 | | Storage (Internal) | Samsung NVMe 970 EVO Plus 1TB | 18 | | Storage (External) | Crucial SSD MX500 2TB | 19 | | Storage (External) | Samsung SSD 870 QVO 1TB | 20 | | Storage (External) | 64GB USB | 21 | 22 | ## Deployment 23 | 24 | 1. Power on the node and enter the BIOS. 25 | 26 | 2. Go to `Advanced` > `System Devices Configuration` and set `VT-d` and `SR-IOV` to `Enabled`. 27 | 28 | 3. Download the [Proxmox VE ISO](https://www.proxmox.com/en/downloads/proxmox-virtual-environment/iso) and flash it to a USB drive using a tool such as [Etcher](https://etcher.balena.io/). 29 | 30 | 4. Insert the USB drive into the node and boot to the USB by pressing the `DELETE` key during boot. 31 | 32 | 5. Follow the on-screen instructions to install Proxmox VE, when prompted enter the following details: 33 | 34 | | Setting | Value | 35 | | --------------- | ------------------------ | 36 | | Install Disk | `/dev/nvme0n1` | 37 | | FQDN | `proxmox01.net.dbren.uk` | 38 | | Email | Enter email | 39 | | Password | Enter password | 40 | | Default Gateway | `192.168.0.1` | 41 | | IP Address | `192.168.0.4/24` | 42 | 43 | 6. Once installation has completed, login to the web interface listening on port `8006` using the FQDN and credentials entered during installation. 44 | 45 | ## Post Installation 46 | 47 | Below are the post installation steps for configuring the Proxmox VE node. 48 | 49 | Copy SSH public key to the Proxmox VE node's `authorized_keys` file: 50 | 51 | ```bash 52 | ssh-copy-id root@proxmox01.net.dbren.uk 53 | ``` 54 | 55 | ### :material-harddisk: Storage 56 | 57 | 1. Extend the Proxmox `data` logical volume on each node to use the remaining space in the volume group: 58 | 59 | ```bash 60 | lvextend -l +100%FREE /dev/pve/data 61 | ``` 62 | 63 | 2. Use the [`playbook-proxmox-storage.yml`](https://github.com/dbrennand/home-ops/blob/main/ansible/playbooks/playbook-proxmox-storage.yml) to configure the Proxmox storage on Node 1. 64 | 65 | ### :simple-letsencrypt: HTTPS - Web Interface with Let's Encrypt 66 | 67 | !!! info "Cloudflare API Token & Zone ID" 68 | 69 | See the following [instructions](https://github.com/dbrennand/ansible-role-caddy-docker#example---cloudflare-dns-01-challenge) for generating a Cloudflare API Token. 70 | 71 | Furthermore, you will need to obtain your domain's zone ID. This can be found in the Cloudflare dashboard page for the domain, on the right side under *API* > *Zone ID*. 72 | 73 | 1. Login to the Proxmox GUI on Node 1 and go to `Datacenter` > `ACME` > *Accounts* and click **Add**: 74 | 75 | | Setting | Value | 76 | | -------------- | ------------------ | 77 | | Account Name | `default` | 78 | | Email | `` | 79 | | ACME Directory | `Let's Encrypt V2` | 80 | | Accept TOS | `True` | 81 | 82 | 2. Click **Register**. 83 | 84 | 3. Under *Challenge Plugins*, click **Add** and enter the following details: 85 | 86 | | Setting | Value | 87 | | ---------- | ------------------------ | 88 | | Plugin ID | `cloudflare` | 89 | | DNS API | `Cloudflare Managed API` | 90 | | CF_Token | `` | 91 | | CF_Zone_ID | `` | 92 | 93 | 4. Click **Add**. 94 | 95 | 5. Navigate to `Datacenter` > `proxmox01` > `Certificates` and under *ACME* click **Add**: 96 | 97 | | Setting | Value | 98 | | -------------- | ------------------------ | 99 | | Challenge Type | `DNS` | 100 | | Plugin | `cloudflare` | 101 | | Domain | `proxmox01.net.dbren.uk` | 102 | 103 | 6. Click **Create**. 104 | 105 | 7. Under *ACME* > for `Using Account` click **Edit** and select the `default` account and click **Apply**. 106 | 107 | 8. Click **Order Certificates Now**. 108 | 109 | Once completed, the `pveproxy.service` will reload the web interface and show the new certificate. 110 | 111 | ### :fontawesome-solid-terminal: Scripts 112 | 113 | !!! warning 114 | 115 | Proceed with caution, before running any of the scripts below, make sure to review the code to ensure it is safe to run. 116 | 117 | 1. Run the [Proxmox VE post install](https://github.com/tteck/Proxmox) script on both PVE nodes: 118 | 119 | !!! quote 120 | 121 | This script provides options for managing Proxmox VE repositories, including disabling the Enterprise Repo, adding or correcting PVE sources, enabling the No-Subscription Repo, adding the test Repo, disabling the subscription nag, updating Proxmox VE, and rebooting the system. 122 | 123 | ```bash 124 | bash -c "$(wget -qLO - https://github.com/tteck/Proxmox/raw/main/misc/post-pve-install.sh)" 125 | ``` 126 | 127 | 2. Run the [Proxmox Dark Theme](https://github.com/Weilbyte/PVEDiscordDark) script: 128 | 129 | !!! quote 130 | 131 | A dark theme for the Proxmox VE Web UI is a custom theme created by [Weilbyte](https://github.com/Weilbyte/PVEDiscordDark) that changes the look and feel of the Proxmox web-based interface to a dark color scheme. This theme can improve the visual experience and make the interface easier on the eyes, especially when used in low-light environments. 132 | 133 | ```bash 134 | bash <(curl -s https://raw.githubusercontent.com/Weilbyte/PVEDiscordDark/master/PVEDiscordDark.sh ) install 135 | ``` 136 | 137 | ### :material-clock-time-nine: Configure Backup Schedules 138 | 139 | !!! note 140 | 141 | The following steps are to be completed once the [Proxmox Backup Server](https://homeops.danielbrennand.com/infrastructure/proxmox/backup/) has been deployed and configured. 142 | 143 | 1. Navigate to the Proxmox GUI on Node 1 and go to `Datacenter` > `Storage` > `Add` and choose `Proxmox Backup Server`. 144 | 145 | 2. Enter the following details and click **Add**: 146 | 147 | !!! note 148 | 149 | The following Proxmox Backup Server datastore was called `backup01`. 150 | 151 | | Setting | Value | 152 | | ----------- | -------------------------------------------------------------- | 153 | | ID | `backup01-Remote` | 154 | | Server | `backup01.net.dbren.uk` | 155 | | Datastore | `Remote` | 156 | | Username | `root@pam` | 157 | | Password | Enter password | 158 | | Fingerprint | Copy from Proxmox Backup Server Dashboard > `Show Fingerprint` | 159 | | Encryption | Upload an existing client encryption key | 160 | 161 | 3. Repeat the steps above for `Local` (formerly `backup02`): 162 | 163 | | Setting | Value | 164 | | ----------- | -------------------------------------------------------------- | 165 | | ID | `backup01-Local` | 166 | | Server | `backup01.net.dbren.uk` | 167 | | Datastore | `Local` | 168 | | Username | `root@pam` | 169 | | Password | Enter password | 170 | | Fingerprint | Copy from Proxmox Backup Server Dashboard > `Show Fingerprint` | 171 | | Encryption | Upload an existing client encryption key | 172 | 173 | 4. Navigate to `Datacenter` > `Backup` > `Add` and enter the following details: 174 | 175 | | Setting | Value | 176 | | ----------------------- | ---------------------- | 177 | | Storage | `backup01-Local` | 178 | | Schedule | `01:00` | 179 | | Selection Mode | `Exclude selected VMs` | 180 | | Mode | `Snapshot` | 181 | | Retention - Keep Daily | 10 | 182 | | Retention - Keep Weekly | 2 | 183 | 184 | Choose `backup01` to exclude from backups and click **OK**. 185 | 186 | ## Archived Steps 187 | 188 | !!! note 189 | 190 | The documentation under this heading are old steps used when I had a 2 node Proxmox VE cluster. I've kept them here in case I ever need them again in the future. 191 | 192 | ### Create the Proxmox Cluster 193 | 194 | 1. Navigate to the Proxmox GUI on Node 1 (Primary) and go to `Datacenter` > `Cluster` > `Create Cluster`: 195 | 196 | | Setting | Value | 197 | | ------- | ---------- | 198 | | Name | `home-ops` | 199 | 200 | 2. Once the cluster has been created, click **Join Information** and copy the alphanumeric string to the clipboard. 201 | 202 | 3. Navigate to the Proxmox GUI on Node 2 (Secondary) and go to `Datacenter` > `Cluster` > `Join Cluster` and paste the alphanumeric string into the text box. 203 | 204 | 4. Enter Node 1's root password for the *peer's root password* field and click **Join 'home-ops'**. 205 | 206 | 5. Wait for the cluster to establish. You will know when this has completed as on each node's GUIs you should now see the other node listed under `Datacenter`. 207 | 208 | ### :material-vote: Create External Vote Server 209 | 210 | Due to the Proxmox cluster only consisting of two nodes, there is no way to establish quorum. 211 | 212 | !!! quote "What's Quroum?" 213 | 214 | A [quorum](https://pve.proxmox.com/wiki/Cluster_Manager#_quorum) is the minimum number of votes that a distributed transaction has to obtain in order to be allowed to perform an operation in a distributed system. 215 | 216 | Without quorum, if one node goes down, the other node will not be able to determine if it is the only node left in the cluster or if the other node is still running. This is often referred to as a *split-brain* scenario. Luckily, Proxmox's Corosync supports an external vote server (known as a Corosync Quorum Device (QDevice)) to act as a tie-breaker. This lightweight daemon can be run on a device such as a Raspberry Pi. 217 | 218 | 1. On each Proxmox VE cluster node, install `corosync-qdevice`: 219 | 220 | ```bash 221 | apt-get -y install corosync-qdevice 222 | ``` 223 | 224 | 2. On the Raspberry Pi, install `corosync-qnetd`: 225 | 226 | ```bash 227 | sudo apt-get -y install corosync-qnetd 228 | ``` 229 | 230 | 3. On Proxmox Node 1 (Primary), execute the following command to add the QDevice to the cluster: 231 | 232 | ```bash 233 | pvecm qdevice setup 192.168.0.3 234 | ``` 235 | 236 | 4. Once added, verify the QDevice is online: 237 | 238 | ```bash 239 | pvecm status 240 | ``` 241 | 242 | If the QDevice is successfully added, you should see the following: 243 | 244 | ``` 245 | Membership information 246 | ---------------------- 247 | Nodeid Votes Qdevice Name 248 | 0x00000001 1 A,V,NMW 192.168.0.4 (local) 249 | 0x00000002 1 A,V,NMW 192.168.0.3 250 | 0x00000000 1 Qdevice 251 | ``` 252 | -------------------------------------------------------------------------------- /kubernetes/talos/talsecret.sops.yaml: -------------------------------------------------------------------------------- 1 | cluster: 2 | id: ENC[AES256_GCM,data:MuZTelcbsGIa/YDYEB2uQqwgnN/RhSTXuDvpGWABNRAvcXXhALN19YxeF3A=,iv:VpDk3teUtr2IEg5fdIikmDFdflfmkjZI43ICyBac9FI=,tag:VtdsqvYcc0N6nywlvoyFpA==,type:str] 3 | secret: ENC[AES256_GCM,data:sWqS3E7hPRmY99Kn/mYdtdx+6EbaIaHO1x0O9etehCrnwC3o5/qE1YtgkuQ=,iv:fAQSGNFt+g7bgXQbRfDsS15LgzSqn/8nr/GbwUR03S4=,tag:IEVNwXo8N2eL1p1HIACeog==,type:str] 4 | secrets: 5 | bootstraptoken: ENC[AES256_GCM,data:f3euSCE8YHnpI/zzbtvsoeVr/q2Rb6M=,iv:ovDO8qgA9uTyHTRO3uNz5tUX+JGhUtsJB/zx181bEpE=,tag:RREZpMYIXYwzOsVc4/VY4w==,type:str] 6 | secretboxencryptionsecret: ENC[AES256_GCM,data:v9j9WCLqiYAkOsni6NLMY566YiejFvh6Zfjn5syUZdSZlw7ksiyR4xC3nnc=,iv:/ilni0bR3dmNa5xepIvlWiGy/xDaDVrVYJUiq58RDnE=,tag:y/tuXqFwiFu+7DiHvy8RVw==,type:str] 7 | trustdinfo: 8 | token: ENC[AES256_GCM,data:VaZxGceycnHZ4l3f0OxdkVGqddW6Wxc=,iv:tSoVaURUoSupSq7fFWpvVxD45XeJiZ9l8hP+cexSY7U=,tag:Csu9H3uC5OAgLZYdr3hQDg==,type:str] 9 | certs: 10 | etcd: 11 | crt: ENC[AES256_GCM,data:YXvBm1I0arjvKD3+V9N4yFxjLjq3pMKLWBJ2Xk/A42BewyERxjnvXbF1CUWxdLAKkvBP71tijUoy7iVpV3XeTGYl5xkI7jUzRKpxFgduFRKXqP9Fy3kBftJjZwG99Zva9hK7NHsFGH657y1Fz839IK1vd8t1I7hL3XWnKPjzyz9hD3wCsitxY+IhXdSkG/CIFLDrXqMil7RpNVtYH0x7EQa5a3dqFzXow35ZoZ/q1BgYPtL40iejVAWvH6qHFJCO4y7jzeJTIoDac0JuJSZzjIwpelQN77S2Hbvo9wkXPlSX2onwqIkJTlE5nTo+iG59+ORlVF1B/jyuTmjgv+148deobR5bw/kLAc+2s5AsO3gVrvtULuvCOs0NrcGnoEEzJRsy4Yfs9SrmqDyb7yU9dn1QuWUdf2CzLSRbwsZjZm7sHlsdtDFfAp1yh80sxjpUoshKKkE2iKnqFgndYB+S6kdyRqQhdXc9VmBoojoW5tUbSo8f/Pl+803QrJa8JdP6D4DNr/5dmmYaadass4JzuKL/VIn/VYK7nSM4YAgfMvRXGAaesHxoOuO71l7kyviUWGjNSXvXPzjIqUHh5kMOmy9lEOB2sQpFQkcZqLhc6mdapwPOXH3IrC4rfndpMWO+m4x2CFgSd3/tWpmnKQceiXC8/Muxas1T5K1/TqjV92bpBiTpNVgJkgppgXSFbf3dag6ak7jqsofxBH6DApjUT/CloahNS8psVMBnO4mCtG+BW+h8LFum0Y5vK7iX7LfBNg/JBNJysF/mr6tTuVS1G5nYXdb/io33LtAaVJDP4XrcLb3un7HBfWX0kqMGSb/qDeGxTEIlCHXMclDSdGgb/WLATq4Kj4CI0P5t8CbL6fSnFm88Saq46UBT4WnhmrKpMrsyc3iQ9GGGFqT3RUrdGDbnjn+FYTBDvoisfI2Hi4WBGPFyID1u0qoPuvKAKC//bUOqdZ74xQXU4RYXP6zpAb5IPcV84JA3tNGh7CfeLpuAKtY6B7oVMp1gdNokAojwmsDZJg==,iv:AK8S/MsdOL1LopJK0l/ug0W2Tz4w3k1zDE+saVAXHBQ=,tag:ZlVrt0f3hvAF64tUvYIAAw==,type:str] 12 | key: ENC[AES256_GCM,data:XsZc3AuPd8NS4JRhZNoMDSixqSuOKx5yTreOpe40ZMtnpuXEYaCf426WcG38eJkepLKII8GhKPCu6v4kYQMBxSdrZQ/hhErrEIG9CZlqx+bUINc8fYOZG0kKRr/7ddvFwwscUyCdfda41BcYKlwN9xwYfZtnvDRK36DkoOcJOlZMPCDhUAggEUJ1v57V1oN3hECPWnmqft7jfXq7hrOVJsXWSFy8s3lj8207ILZ6eFtaAXbY8zIHOdEAM0ywNx5UCxWY/t88FVcfQJHOATsoCggKwCG9cHQ43SO2L3EBsj4MAoyF+2BTmIqav7ELcoiJQ5Eg5l2rms3NOIvLk2Cch0bbNy+khVZJRbmzZsHfyD0uTkw2majKPAskcKH3dA3sbYD3JMhuCb2HpCMjuBZofw==,iv:z0aevTrifWg9+YNLrd/VK7k8llnCtGYadLVlRJgsNsE=,tag:nd5ul1LWm7KUw3G81WyedQ==,type:str] 13 | k8s: 14 | crt: ENC[AES256_GCM,data:v7+C5F4ksBKIVbfcnANvocMsa0clUze0ykj2ohc1iLKcOqCZFPaIN46rJeyLnVUyUOpcfpBfb2uf+PR/95q9+e4lra4hXF3xCoK1n54JSwHGkmRMPbTSjFTeNArZ6AWNNxZB3exoqro/jY+riJsn0OsC9zJlR/ygVSx53m9UPrWSxuCAmYhGmOVIgXx/qxn/Jclh8iba27PIZz8aAcMGj2QLphl8tE8M0X/hGoY0PpBbFTKHYQ0UBQVKNzjoHofAkqxTR9r6TohA4Fyl+8yIkCek55z/Z69TFYO5Et21bC0kWFHQ2XsaPKlC+F8hJZA66WnStO6mIf9zxIVjL9HTY2flxSLhA+no9RxuDhDqFFON+XLyTAge3NxJLWceEWtjgKksIYEAhuMk7wIDWmojlEhZMcZYmyuPoiAaKrC5LNVR27Pi1evLpYQNjk+n3pMa25Ox5bxJGRNQZWJIYw/giiLeyf94OWOLHr9zpkXQpSr38HVi5A8huTUBRrIRbcB2Qw/NuWCYYwZjfoe1NlA3YYYo3m4CYLTttfRiJdmhlVi1CyCYY73YEUmFccYAq1//UY+FnftQzlkAT1yzj1gIzQTml6EzG1erYnFQ15aBr1PDQl8xj55CFydCyBaMLccC6/wdyBMbYSXVq/6S3ictCCZBKbCiSd6I4aUJZQpaUGJ/GlRwlK0FzrAEZZ0QjdIsmJtkBbQOKKN2/ySv8vA7IVslehiW3Yj3bBJfkWYhty5xIyRT1b9B3i/xCVDX+k8Ev9A/lecOy6RRWgOXrWB6C04x+ZFlBSJ9XYzj7kpWido67He7fKqYY9gK5TfOV7zaOUhtnBQjZPWCJ1cTvSTyKgb/OqaJa4gJKsJZPReZoL7hlCNUuCIhWz0Je+mkHsE48s/d1FKVMdCN17Y4ZdbEyLdSg0br8tbcj1JsfkbS1QjdgpQ6Rl9jnsnbAg7Wapn619+wN0kW/7LVj+xZpO7BUMZpUz4LG5FhMKgdfAOXbuS60tcHoP5yMMBwo59s/bKwie8EFWt59Xd1Ep1wilszS7UEQ0iTHgj5FRyHGw==,iv:kSU8p/KKqUU0TxUVHLVeWcG+UO66mv2yx/3YREko6NE=,tag:oDQbzOab6LEPrjZ/9uOpSw==,type:str] 15 | key: ENC[AES256_GCM,data:/MpAvqOVvqsOOeekFzrU2G8bgkqxdIRpbLLcUw5hinsUPJ3bLnBfoEVo8rILVwA6xNNFStcVh3j+XmqC/rvFuRnvo367mmJXELs+dY6d1AdiFY5VwprpEP0/vtXC5VmqUwHHH92Pf2ChcRLoFcS2kJOMwI4sseTmqkA+XCl3oBI+YwjY/FWswI0oSAoonlRVLHgjqQ6/Z4j10TlMmSe3AYZVaf9JhrpfBBtcqX7WoHQNo1Pppj1AGFfs7Q7JTc9QIKz4vuz6BRE89oay4djPtdML4znHLv5uWXY0Seawgkll3Sd24IS/52VsDlGcg0BrRUr7RM6zKGXP8QmSBZqzvjIssKQ6e9dop/SOLMy0njNUUDaP7xUnow+oLfT7de1Rhva+ttSCk3KqIN2lkv28WA==,iv:2FQbeN/kylHxBBJXCLDlnmcnbCkfDCxE48TE+Kya+/k=,tag:Dt7LApDs6TGhM842PcBKsw==,type:str] 16 | k8saggregator: 17 | crt: ENC[AES256_GCM,data:/A6u9vzVLN3V5Ljl35anuWlbkaJ4S63LzgsgfZDbuuKAGgQbyB+XIzfHd3r43hNq89uafiOuWclfybVXTdM1k3bat0J9vRrfFPNpqjUiV002GFKFV1ULTuITjLwsNNUoVCE4DaZb+gl1IgQBMepFHRZarfzkfAODA/HTs9O7sn1htlqTrc9d2f+vRqy2xtAa9qh6DeYblt4LdR4kwo55Ul8rfGTmqc4ktOKzMKlfShPM/bFStTrQPhgVmnMcN/wLjks9X4bx7wqZxpLaJiO0PfaPTSUJDYpKcJkfx2enIYRQ6uqq77iusFuLoy1q3HIwmgvssvwxWfJS749to6oxo4/pP1WH434q5wmVy97tyDPdX4uT/MeuZKQJWEvbQXQuxx84dN01Cq/PO4X4FxTAKubnQSAfAAf9cVllAAHNIs7mYQPQH4nU91m9KfxPtXShg1I9BtXBcw+NZPmBda4f06DobnKDyk8erzoaLKrm1fsVJTGP9Tit0HnBAPIKzlZeP9Efq496sXldIICX3P62o6t+aPdbvSoPp4k+X4FAeMTFW78MLnYtOrsRz4nCVHUN06n2yxyr26LPOOB3Um64gfHNYHmgXnMeTe+Sxrsw0DW3sEC9QENCovDLJviHXDXYa3oNv8UXDLQLAiTqwM6BLl7vowgeZMCQ/73qfzcGjuUtKsqX50d0dqz0fnLg37IpBADN7kJAHW4JSydayDTPY73RptCUS1vuDDOvUiprM1L8b+cd1ZMY/0b9FkBP6bUXwFZssDyoiiOPJ9iTGigflrSC7WWHuAjfw2KIcyeCK/qhQ8mA7vuu9nkcbdP71fPKDE9AeCsrWE+/36I4V131/iO2cHLPVbIWPNVnxKj7bFByn0VaoeFmL1YwEUEgeaSXALpgVRupfav+k2Mz2gLrbXREXF+KSZFJnQXa9shaDlhYOZHgWwvMX5hHtshKiVQx,iv:AOHwyYqWV/b4iCVCfC984RsrbgT56lm+5LJM3dtnG4U=,tag:p4tKSEOSNdczwyWaRR69aA==,type:str] 18 | key: ENC[AES256_GCM,data:UDytqNZOqAN22K36NknA0ewWJVvy+wZQtIz9jkz4ZWd4WhP4MujfkfH2B2yHoEWdjqmBBFZZ71WDDy0zIZeJsLS3bopBoga6w7uo8vy67WwEhzsZP+A0CHoost67Q/Nm1tMDonZTSQd606IhiqeM3lZFHhqUghGBmAFoVcYBO+5SiDWCVVu82x2sgl3u5BjsR0ocAjGh6SjhzR2+JOx2gxsV64/rmzxbvdrhimONNAbBKXiwu7eNq25B/y1cNqftypU6xKlE2qqn9k1iLEdOfkWQ1RUa0QuN7SUJ7KvCUGMcTNtgvVGR8n9Wclo5lmBbcPxHFZkvzNm6xsDgR73QEnnW9H9coyuBwhJyRlIVNMuwNojWXzvJhmGnmppNwbaLf6zMhq9M6Vbz3/abb7wVMg==,iv:WZZ+keWkNzIEGPlLC8R4cVUlDqd9Hic5VXwIOoiZjLQ=,tag:sdTfFBbKUlVue9iek2wgrw==,type:str] 19 | k8sserviceaccount: 20 | key: ENC[AES256_GCM,data:KJUNikMnrP6A6pZSc6wfUVw9UG8aUx0YmGX9fbDwiGO63tGd+LPy/n5pveUZD0RUnOXhmmw8fzFD5eZr84LP9LXP8PBRPfg5UlObMmtIWXf/ai5S9D5Z1cyc3/wAXztdeIeTiGEFub7mgrrzf7JoGCia/1iIiF5dDXv55twsZKzqyw1fqki0noRPYqGRkB/E1ch4ZiCEdkpyYE4b5PgRcTbt5m3E2cYgR6a6kP/uSNKZwAdm9qVgy4404BFDTinnO6sCDSkz4D2/nHO6RRBG5buzQbiwRP5mSeJe6hZxrFtebJ2+nDsFa79nXWwdvx8clLlm1ReeLKR9sjmCTKKLEDXjGDH+Cy/x2uk7xAzJlMXDHoDrusB2WrniISlvNRn5J5jOyWBNDFTtZTn1giNSBLHHXuTs4HEMSxBHMPWEAovCIKP3Y/4NWRcNjMVc+Du4uACKJB2kYgbDh58rPGk6he6EGAVgHUh/zISFmMf6kaclqtWRUa7+2BrcmhUDXEHarlP0KTFZQD9cB2AzDpWmbyy4GH3vhZBS5P6PtViqi0VHXmfjsxW0fiIvjmoszsL9iXmJbJfoc4CW7A+g1sxDAR9C+wO1O+wN+J+cVxnvvEiihPCDPN17rsRT7ETp/5v+0y90IA9RpRCcL4WAzw5I6N6qN3IyY6et7BMlQ/CdvqZtx97BZBWJK8RX339y/HVXi8dgkyVIwvAcFREK7VX4dhcrq9OgpLYpG6oWOfzWpvdIPc0xzRaRK2A+8YBHGFD1AhRFB1YdeJ+Hd/7w4zq5tyh7QgVkz9YSMqXOTuAEG7D23yIHyaJxrq3rPeRvUuYfKNaBxHxM8otfUUh9bhDmm3UoWSl9Dod9E1+0ycleLEpTkuqGTXlzK2axjp0Z2ttnhvl8ERnFNDmrHpqcdwfDDqm7luoTyqf/U1virfXulx+V6uuSwIA7Vn7BMqHlCbkTrnn5+47XCy6c3ReNsRpZz9QCmz8HFSLmxQogQsFPC+h4GHE40kdMq2WMCfeRRTwgT3wTMAEiMgDAcWUdmhJzhRaIndZueR95hYfWC/pfK3hZ4De2QJ5IhZ+V/R+uTr1odw/IUs00Sz1Jaj2fB7JK3RmiXB8qXz3f+DggSRtsE2WUXkgln4Bd3lgk/Iy3+TmTa7Uq+uITHm7JYM4PQTOS62uhsVhgtDEJIMJ629T0QttNpVU/HGf2B8BkCSdoOi9udYF8KlF4Vd5CpHjV9gEDIpl4GaWpfWEM/SJqmzjskPBpvNTNss6E+NWcGyr8E/aS5Q9YG2FNwvO8dQNmAUQea0VVMcG7nOJogSS4vecEfJ0HFwDPzBqroPZJ4rBo+pdeOhoj1ICXvyokhd5BmbiaQTgc8+AktLc74/ygR3grLz9x/9AR7bw/46TjPx5VG7jt2tQvhyfXso7K97PYkNHX69lVTOJmDswthqUr68kHE0JKqVRmYcfT4peyT9eEtADNIuAmOubM9RHIl1Nxy/zixK9RgRE6j9P7HYr4CkpU/drgkvFv98ssyIi7/3+swxDUYIH40araTQgwnJrLc45omB8/OXd6Z4XywXRw84nWXNC+D9l47wM0p1h3BVvX+9heONz+cD+aYgkkeZzFDkhhe37/OwX2NEtK2RzSb5EtSnXnu7CldYUQ18L5wJqveczHNc/SslYe7lwHMnCHmpTqAy/3Q/nj/1OHb+oKoanO6vSUwXWTI+N8YcOQQ6k7iSN49JDmRtds4DtiInA6jH/q2V7N9aMyjJ090B39j9eHkhQUFyvrVv6M0dPr84xwnIWksUtl17vo7ONiLkSKtbAEXMW3Pee6TSBb//XYx/0HyjKtUTw3iwQg42hF9YpHUA2UdoYBjL89y63+ftXafLYdHb685O7ht0sP46m1F1PugcVPV/H/bxqRTn+IPklG+Bmz5sf1oyyGHqlWWn8GK8FH61ZuQG1uvjDDGrIm9j/xOxsokQRElx9bJOddtZN+z5V9JY2Hqq9CFTSnMPcc68YDGSIXJdMNbBWLnuFEBrkBIhKYUSf2doceatR7vdsTL+IGiREOgD/SzJavWkaBzCMVQWF0kOoN1fXiGNGtjyY0zelOPJzJ1WLB6aXQ8KoIPmcq9uonDeHYbv9iLm6RboqXA514f5WAJkFBvmBK3GK9IhKxudUMQBFCYVwmxiJ04R6tLIr2EQDUnuVow2xU/US1qEPSpJFlw9AqdLHBfEfUHTfMFKiRRRJYE99AePiIaMJjuyfv/cvRSciI2dMitFurxcDN25eCtVq+m5fXBM6aI2DA9u9IPLBI3hLvejSbHqSIzmaUjq7+weoJlqXyy70N/43v7NJfdYTBgWCJHd/G+paz/yiAArvV37b3SzjzHU4VnAKvTsDqmQyRe0iBVjwMvkinZ7K8LGZ7+MlOINBh6VZrHlHcuSoOE6SdoqXdsw78KiY6COgjFDDxw9qH08lfh0kg48zxJui/zoXbdJP62a9IaAmIdPD3knPfOMvr1pExUhkIM3eWxLc4wq3YfJswiqHQB3hnCHE6IUCud1gH1Lb2I0550byTdbTTvHzFjk4VeQzsUXDt0/nS2emOmR/9G5f6gT2oFZUZHjwGaMs9Azub2suNPLboVmS2mXoZ6bYjiAuclgXn55MJgSlAHMMaXrwbLw32BaYgT7qi5EFp4w9M22H191DGpYR8Zfg0tuxlgRD2d5c2/em+T/SreyGNGmNJMeRuApE4Ga9f7bjSo+KXVbOSAJXexa638OaTaugI/l0YZoHQPSPTMxhG1x/IqrWQ0BBwB0QC09WvFOSPod9yEIi/+fb3nI2Nk8kgqY7yYwX2/FKJs5Vh60Y2zVmrbvrw72PjENbyXgYKFylzNScKRaV3OkHSvIBSRfHZDpNDmTaQr2zIu9FfvZh1kppbrUAHufhlTbhg8dft7SCC61Ejn1xUwIW2613T3Xd7OeaQxdMmtySbM+JJQ/H+U+rg/TFo33Wn0VKV5rRM6itWE4Dsb+ungI902elV4oz/ChwYoEYtZ7ncuM9sndkBM0C18Q6kuW76irrnR8R1jUHIvY4urvX1ySv3bL3iz6ymPcuOqnlgfXNcNrFZ5s0wV8tDax7vttVNCsnJ64GI5g0ma0pCHnkwQ/aUU1oNzW4dasJ1eCsmaa9d2GliNMzfvXUnqXCfmgu+xI+sNf1x/LO4Sre9OKsK8tzOKeLMthBCvvKnoWiEcF4HqdUbIYH+q0ODpfvJdr7Jmk/2JIkG1AyqVFOWEHhdBj2kQ96OYzLGTRVZVnGyX+bIE7+6q2qju5t5swPHrJsD47kW/04er+B+ZTujYXK7+XdINqPBgP0dCIRjeBpPVMHCnZTXzkKX5+DNGxDveCcpuwo42HoVQNd5HdLufN2LAMc3hNDHEEDlhOyKyuEjr1KMRFX4g6WD3uuGYvOUshTNJaepku7SMsfpyO3lIuh4Z//SNRNrv5Ol+Ue4ZZGe1QFwEsBsUCpM6VOXo0aWN3KzWu6BhdN4/5hCfqiXbf1FPJmu2+m1u93uK6OsfCJdWCwjbd0lvnBR9/rb9nrCEv0hfgILiBuYW7DyoGMZjU7ZXW73DWCu7FdhTRJ3n3gh7u9T3irKmNySrwasilNySBiL0tkhQXk8zLenQo0uo5yFJuNELNlWSeohYcbHHYuyBt9ams0kEhDnF1WIR9pa+mKpyKb5HWjRoCuBD7UefzC5+gJGjYMZPJqE28fTy6brmr7p3rgVJZ97qnUI5e3D6+FzMUF+QckxJwVx7ejQDQD0KiQS7o2IzvnOg6bakpnFJcsm+JIz2PNxAynXFN0fAyQW+gYV3HxFbT1TAF9gC30gxQ6s0Dery9Rrd/AU/AUciG8Qm1A+4ITbAuo+0DCZBCfOssBlKuR2yaChGoGMHHZsZnFgpC+Si+uFDaVDkF+GYJS/0iaHzBm86HbQ3w0OikY+Zts6RDnSqLPeowq3Wlc0oYeQXmLRd16BBxHXB4vq7IO0AbhZluPe3tsyxYD7AM/7LxQlnyYcZmSQL1z7yXf6KsfdYBf9kkSpEthNcJTiSUb+92ZkHvlGDveQXJ30BeBNPl/2kPH9V9sIyK614zpUzhjhkOvkjUhqBU51szB6D/iPl1AWxy3uZAlpecYN+7ydfxXgiSvwYsPN/zC1Inilb3Ad+We246CdYJO8rApWLBZ4uVKOM3k1aXXM1kQ5MBu64YSdkU4ksaBoNzNNDM1LsT27jnP8j7BkyJhdo8VREzTVE3CGDnNsJs0Te63dji6XrCRXu1cjCKyLBUDH9dDWm0kimafjUd/Xg2QrpmCk2gT85HQQfqFVzmmPBA5LTGjsz+H9x79MGSiTTLAldO4qDn5dpaz8qRUAg+PS2LlnNIGz6RAipA0hhuemAJskMX8xE33rsW0UtSVNRoR7+bOPqdH3EFrf+z1QSrH0yk7nIIf/32osO+rSs8d60Khgr7moVgn3vJ64/JivPp/JyrxJIFO07iPjYl4jpr8agzJbl7GfguqXW3X3DWN4nDFY452cnbshfNIVGu5YxCCNaTQSmHyDbV5WI0EbRognfIA/7x/SYVIPWbfDp6Bc1c/E2wbi1Ve+olX6SjBjWnWqEXQAKEh9eFHjz7goXFs1Ai43KniUx877pOtgS2oullDvcvrrZSd80K8zMmNDt0Lu1rJquyRTJS41/Sz/Jhi7sofJ9mY+jSqJNhixXwu41IVGGS1waIAlMudbv5Yce37k+ORPPOBPbgqPtrmJOKddqjtOMyBjgrvWc5hnQ6DXZFBC9PMM44wynH2UsYxgo/0eG2F2KPSNYA5hg0l4J4C8vI2QuPenE2/zHMXPuZdr0ETuqr7D3x6ulfXbjcwGzdGfb+71GIh9YAkjtCv5YMzA5ATxmlNSDiyk4i9tCkr2LQ3rzJcXx7bZK2/8sHVhPIZJM3CmxfPdbKl29waKofBdsDyp2glw7Jc1aS9T135rbqv5E3cGBygsKo2hrT00TiLAgZRSTpWzIBMKkWkbtT1efvz43CF5L5wn+FqVLcZQ1WfJReN287o9S43CpHNOSpaKQStOIcYPEmJh/uUI/bzegYdY+cczY2LnAlIomWGFfPsyRXMCJyCo1kg85diRehBnNfRbe193Xkw0mRfBw58Vj1lUfbMUgjcgepKj7aaRpvb8SMtTGDupJFpXFpfjs/oa7gj11dCzeD77Kk1bnKxn04JE9EZdMyx0Jo9qtY2YG3AdiJ/9k15xtJ9EkvhzlXU7Vdv/1cr9t2/qAa9NYIwd6iYRvAEijRpfrO6+9gafmhmvF4/a6NSEvBoZ1U16+esJfnFBNUhwHH/HeGYvKBWjejUKvul2SOEZHD305X6pk6wYtGvkhWupMAX5YdMpKGVVq0vCeoxdC1EL7Zc2DmWrC+fsCbKtDjIA1NApQCKpOjozrh6C87QrB6aXpcrM5cntzWzgi/WeD603krLVrs3z4dtoyq1nELzkrSeRIirFeEYmLjArCRWhB94wq864/aUk38GhZnodltMv8eDB4gweFEheatWc0PpZqc+RgjbKleeZedSkBB4RcyBaGH6xRR5bZ2Q387jXsMKsjaNOl/bojQDmI8yir48bYGeZLIN4l6EYJ8motDm67i4iuzDdFmmbn53FSHnKJaP5bq0flM/z0PNn6nWKj1Xm4vUwZ4uGM8m0iBXpUTxPHKQ2FCzyWKIKUcdn+Vd7asVR2n1xK2cxm/bj0TuTQie/+WZVsWu38zV7MC9Rf8DhmVh2jC+nVa1sz1+5ayvYhplaMo2mGQ/fBIs1yA==,iv:9mKt75I+k1vYbPuACtZNTJhoarFqoSoSzrQeWBcEDAk=,tag:zvbiccomeH1i7Cl4F7SU/g==,type:str] 21 | os: 22 | crt: ENC[AES256_GCM,data:7809g4+opdwGXrh787QLVOAcERBXSC4QxioHWjUWsnXXk23b6Yz2WIrYy1dde4UfpYebdg61NgjhOlJyvDhrOx4MznQMHfqcmH57ZyrUUPLEK6uIOIvOS2w2xZX5zMzgEa6NHyTUxbSj/F40cwZqGyTzUp+DcWCkj1K09JYev4NBxfz45K7/bqyzpm6MQnUg2fRFNdv5Klud7kdIslTNLWpLAzMV44bMnxirFSg5LbQGW8bFBazvq4zihdou8ojee9Xr9jMAcxTfx0A+35U3ztpGy3WpTZpAaqzigIZhIMfFk1GZD2kNeeCt40a1U+mN+q0frLobQduM4zBWOJLWAExb+ib/cJctpQmnpeTmA5ScE+9cLvuAGUowyFTSgPdkBvGBY+KAAPrr0MvxeabBfN098gTHXSPYtXjf3u6x8W+T/49RkyL0BE+ls5ngQmjmodv7v8OaS5/ENPxx+0trdSmBiQvsd2uty67/h/9QqqiokAKe95p3uYw1X3Ws3c/By+R61pSXbBXIOW2yBsdeSi0qpE/TlzsH2eu/7HE7V+FTaq+hltdmKTeWAz+UgIIqyYMWfL9+1CJxKjKKm2HxPiStMvRd3fvg0DvBZOhAY1/F1SWYZXNBEMGyuaK0PEXPxrt0AWI23kjTR4FtAw66soWxAaYG+5BrWf7cH34CHp5cb6VYLrDyJZT4y/wdCY/QSB8n5qhyz2kf4jZNu2/KApDXenC0+tf5B1oPZGEPUORuBZiogh3CDqA6d521Y4SDErHznCbM5dB9+PTsBo6lbMqCgZvrSiAmO9CvwGBV+43hZZwDP1dSBIxQLHCN5yELyBNcpO04P9japFAjlN8MAcD59mecGSRbEGDaOIyuDr46CVsy,iv:ABIucDta6XTGWVj9aG76koTnAEIgx3dZ0fOYMkijsZw=,tag:26CvAE7Xn7XhT3bhFC4xqw==,type:str] 23 | key: ENC[AES256_GCM,data:np2Bd37W1+bvRPMx1bS+UnaGIzDe6G+AU1EhcaKo81WFNoyuXQ0o25nYS2WgH6T9bA4Ks5zDJVafkKWDW8mo/h24AR60vLwZXEzj2mBSxcUbB+MjcdtTSeVPEBXam/p+sBn7lrhdUjZWcYzEOc4zOA8IXPjVx7aIQjRkFdLjnRcE6MHSVE0oAOFEhSy4kUB5xivJdr0+aj8wsuRX3yftao65pxGuKm9ZOThOueW/WY+bqbNJ,iv:rEtoPUGH+DyIh1EhpNozJCZ+pXcRjKj/8rjfl9/olRM=,tag:zbj2JppWQEdc8vsGgWH9mA==,type:str] 24 | sops: 25 | age: 26 | - recipient: age1csxr93np9ynejzvt8jjjau97s29mayy447vlsf4mu9srmpjmr3uq56n27k 27 | enc: | 28 | -----BEGIN AGE ENCRYPTED FILE----- 29 | YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBwWngrOUdHQnY1dzMzTUR0 30 | UVovNVAvTnFFdDNVZjZGa3RBMkFKc04vUlFjCnR2SFFYdEdCbzdkMU44dHE4UFIv 31 | dXFkUjk1a2trZnBtenZESkFXOUc3MFEKLS0tIFd6dDVUUXE3d0Z0a3dKZmN3T1ZJ 32 | QmJ2ZXQxZ3dQem9TM2tyWWsxR0ptVEUK4nR4/0rWNCVGaCIFPQ90A7SxcDBh8USX 33 | 6r5nsHxDRUQqGDIKM3iq5QXJ+vAbu56qdsMRGiFHn87SVoAy0OtQjA== 34 | -----END AGE ENCRYPTED FILE----- 35 | lastmodified: "2025-11-15T20:47:41Z" 36 | mac: ENC[AES256_GCM,data:7EoXEwmLq/fwIOceCYqDDdgwBOusKNwiLbMmT35fHe+X7ojXwsHjiRve6aFQWoRUGMGrFXFu0Ffqtzx4W0yPTLvCspsIzhtSVbgDGsAa3skn+r9B2A3mo5bDJ+TNqhP5HB2ov47PYfEuFo9ee+8nByOM+znyFZSGkjgXZ+P9JKg=,iv:TaFp8RXERka+FOt62CYEFkL933ksP7AOe2oPO34QHJI=,tag:VHf/aA/t5fYjiykGYrcIBQ==,type:str] 37 | unencrypted_suffix: _unencrypted 38 | version: 3.11.0 39 | --------------------------------------------------------------------------------