├── apps ├── tanque │ ├── values.yaml │ ├── Chart.yaml │ └── templates │ │ ├── service.yaml │ │ └── ingress.yaml ├── esphome │ ├── .helmignore │ ├── config │ │ ├── requirements.txt │ │ ├── irrigation-controller │ │ │ ├── esp32.jpg │ │ │ ├── esp32-box.jpg │ │ │ ├── esp32_with_valve.jpg │ │ │ ├── irrigation-controller-box.FCStd │ │ │ ├── irrigation-controller-box-Box.stl │ │ │ └── irrigation-controller-box-Cover.stl │ │ ├── rack-controller │ │ │ ├── rack-controller.jpg │ │ │ ├── rack-controller-back.jpg │ │ │ ├── rack-controller-box.FCStd │ │ │ ├── rack-controller-front.jpg │ │ │ ├── rack-controller-box-Box.stl │ │ │ └── rack-controller-box-Cover.stl │ │ ├── .gitignore │ │ ├── garage-door-opener.yaml │ │ ├── mini-switch-1.yaml │ │ ├── mini-switch-2.yaml │ │ ├── mini-switch-3.yaml │ │ ├── mini-switch-4.yaml │ │ ├── mini-switch-office-a.yaml │ │ ├── smart-plug-v2-terrace.yaml │ │ ├── irrigation-controller-mid.yaml │ │ ├── smart-plug-v2-snapmaker.yaml │ │ ├── irrigation-controller-corner.yaml │ │ ├── lilygo-higrow-1.yaml │ │ ├── lilygo-higrow-2.yaml │ │ └── packages │ │ │ └── connection.yaml │ ├── templates │ │ ├── configmap.yaml │ │ ├── configmap-packages.yaml │ │ └── prometheus-rules.yaml │ ├── Chart.yaml │ └── rules │ │ └── rack-controller.yaml ├── home-assistant │ ├── .helmignore │ ├── appdaemon │ │ ├── requirements.txt │ │ ├── apps │ │ │ ├── tests │ │ │ │ └── __init__.py │ │ │ └── last_connection_sensor.py │ │ ├── requirements_test.txt │ │ ├── .gitignore │ │ └── appdaemon.yaml │ ├── config │ │ ├── integrations │ │ │ ├── appdaemon_dhw.yaml │ │ │ ├── appdaemon_climate.yaml │ │ │ ├── tuya.yaml │ │ │ └── database_error.yaml │ │ └── configuration.yaml │ ├── templates │ │ ├── appdaemon │ │ │ ├── configmap.yaml │ │ │ ├── configmap-apps.yaml │ │ │ └── external-secrets.yaml │ │ ├── configmap.yaml │ │ ├── configmap-scripts.yaml │ │ ├── home-assistant-dashboard.yaml │ │ ├── configmap-integrations.yaml │ │ ├── snapshots.yaml │ │ ├── snapshots-postgres.yaml │ │ ├── pvc-models.yaml │ │ ├── prometheus-rules.yaml │ │ ├── pvc.yaml │ │ └── external-secrets.yaml │ └── Chart.yaml ├── antdroid │ ├── templates │ │ ├── configmap.yaml │ │ ├── snapshots.yaml │ │ └── pvc.yaml │ ├── Chart.yaml │ └── config │ │ └── nginx.conf ├── mosquitto │ ├── templates │ │ ├── configmap.yaml │ │ ├── snapshots.yaml │ │ ├── prometheus-rules.yaml │ │ ├── pvc.yaml │ │ └── external-secret.yaml │ ├── Chart.yaml │ ├── config │ │ └── mosquitto.conf │ └── rules │ │ └── custom.yaml ├── special-web │ ├── templates │ │ ├── configmap.yaml │ │ ├── snapshots.yaml │ │ └── pvc.yaml │ ├── Chart.yaml │ └── config │ │ └── nginx.conf ├── cross-backups │ ├── Chart.yaml │ └── templates │ │ ├── pvc.yaml │ │ └── external-secrets-users.yaml ├── mosquitto-tls │ ├── templates │ │ ├── configmap.yaml │ │ ├── snapshots.yaml │ │ ├── certificate.yaml │ │ ├── pvc.yaml │ │ └── external-secret.yaml │ ├── Chart.yaml │ └── config │ │ └── mosquitto.conf ├── ollama │ ├── Chart.yaml │ └── templates │ │ └── pvc.yaml ├── open-webui │ ├── Chart.yaml │ └── templates │ │ ├── kanidm-group.yaml │ │ ├── pvc.yaml │ │ ├── pvc-pipelines.yaml │ │ ├── external-secrets.yaml │ │ └── kanidm-oauth2-client.yaml ├── atuin │ ├── Chart.yaml │ └── templates │ │ ├── snapshots-postgres.yaml │ │ ├── k8s-secret-store │ │ ├── secret-store.yaml │ │ └── rbac.yaml │ │ └── postgres-secret.yaml ├── bazarr │ ├── Chart.yaml │ └── templates │ │ ├── snapshots.yaml │ │ └── pvc.yaml ├── immich │ ├── Chart.yaml │ └── templates │ │ ├── snapshots.yaml │ │ ├── pvc-models.yaml │ │ ├── kanidm-groups.yaml │ │ ├── external-secrets.yaml │ │ ├── pvc.yaml │ │ └── kanidm-oauth2-client.yaml ├── kroki │ └── Chart.yaml ├── lidarr │ ├── Chart.yaml │ └── templates │ │ ├── snapshots.yaml │ │ └── pvc.yaml ├── radarr │ ├── Chart.yaml │ └── templates │ │ ├── snapshots.yaml │ │ └── pvc.yaml ├── sonarr │ ├── Chart.yaml │ └── templates │ │ ├── snapshots.yaml │ │ └── pvc.yaml ├── stump │ ├── Chart.yaml │ └── templates │ │ ├── snapshots.yaml │ │ └── pvc.yaml ├── unifi │ ├── Chart.yaml │ └── templates │ │ ├── snapshots.yaml │ │ └── pvc.yaml ├── dawarich │ ├── Chart.yaml │ └── templates │ │ ├── external-secrets.yaml │ │ └── pvc.yaml ├── freshrss │ ├── Chart.yaml │ └── templates │ │ ├── kanidm-groups.yaml │ │ ├── snapshots.yaml │ │ ├── snapshots-postgres.yaml │ │ ├── pvc.yaml │ │ ├── k8s-secret-store │ │ ├── secret-store.yaml │ │ └── rbac.yaml │ │ ├── kanidm-oauth2-client.yaml │ │ ├── postgresql.yaml │ │ └── external-secret.yaml ├── http-echo │ └── Chart.yaml ├── jellyfin │ ├── Chart.yaml │ └── templates │ │ ├── snapshots.yaml │ │ ├── kanidm-groups.yaml │ │ ├── pvc.yaml │ │ └── kanidm-oauth2-client.yaml ├── navidrome │ ├── Chart.yaml │ └── templates │ │ ├── dashboards.yaml │ │ ├── snapshots.yaml │ │ ├── pvc-data.yaml │ │ ├── pvc-music.yaml │ │ └── external-secrets.yaml ├── prowlarr │ ├── Chart.yaml │ └── templates │ │ ├── snapshots.yaml │ │ └── pvc.yaml ├── unpackerr │ ├── Chart.yaml │ └── templates │ │ └── external-secret.yaml ├── wallabag │ ├── Chart.yaml │ └── templates │ │ ├── snapshots.yaml │ │ ├── snapshots-postgres.yaml │ │ ├── external-secrets.yaml │ │ ├── pvc.yaml │ │ └── postgresql.yaml ├── jellyseerr │ ├── Chart.yaml │ └── templates │ │ ├── kanidm-groups.yaml │ │ ├── snapshots.yaml │ │ ├── pvc.yaml │ │ └── kanidm-oauth2-client.yaml ├── m-rajoy-api │ ├── Chart.yaml │ └── templates │ │ ├── snapshots.yaml │ │ └── pvc.yaml ├── mintpsicologia │ ├── Chart.yaml │ └── templates │ │ ├── snapshots.yaml │ │ ├── snapshots-mariadb.yaml │ │ ├── external-secret.yaml │ │ ├── external-secret-mariadb.yaml │ │ └── external-ingress.yaml ├── qbittorrent │ ├── Chart.yaml │ ├── templates │ │ ├── dashboards.yaml │ │ ├── snapshots.yaml │ │ ├── prometheus-rules.yaml │ │ └── pvc.yaml │ └── rules │ │ └── custom.yaml ├── transcoder │ ├── Chart.yaml │ └── templates │ │ ├── rabbit-external-secret.yaml │ │ └── postgresql.yaml ├── m-rajoy-front │ └── Chart.yaml ├── telegram-bot │ ├── Chart.yaml │ └── templates │ │ ├── mongo │ │ ├── snapshots.yaml │ │ └── service.yaml │ │ ├── external-secrets-database.yaml │ │ └── external-secrets.yaml ├── github-exporter │ ├── Chart.yaml │ └── templates │ │ ├── dashboards.yaml │ │ └── external-secret.yaml ├── nextcloud │ ├── templates │ │ ├── kanidm-groups.yaml │ │ ├── snapshots.yaml │ │ ├── snapshots-postgresql.yaml │ │ ├── pvc.yaml │ │ ├── kanidm-oauth2-client.yaml │ │ └── postgresql.yaml │ └── Chart.yaml └── flaresolverr │ ├── kustomization.yaml │ └── values.yaml ├── platform ├── vault │ ├── values.yaml │ ├── Chart.yaml │ └── templates │ │ ├── kanidm-groups.yaml │ │ ├── snapshots.yaml │ │ └── kanidm-oauth2-client.yaml ├── reloader │ ├── values.yaml │ └── Chart.yaml ├── minio │ ├── Chart.yaml │ └── templates │ │ ├── kanidm-groups.yaml │ │ ├── pvc.yaml │ │ ├── external-secrets-users.yaml │ │ └── kanidm-oauth2-client.yaml ├── git │ ├── Chart.yaml │ └── templates │ │ ├── kanidm-groups.yaml │ │ ├── snapshots.yaml │ │ ├── snapshots-postgres.yaml │ │ ├── pvc.yaml │ │ ├── oidc-secret.yaml │ │ ├── admin-secret.yaml │ │ ├── k8s-secret-store │ │ ├── secret-store.yaml │ │ └── rbac.yaml │ │ ├── postgres-secret.yaml │ │ ├── kanidm-oauth2-client.yaml │ │ └── postgresql.yaml ├── velero │ ├── Chart.yaml │ ├── templates │ │ ├── dashboards.yaml │ │ ├── schedule-retain-weekly.yaml │ │ ├── schedule-retain-quaterly.yaml │ │ └── external-secret.yaml │ └── tests │ │ ├── pvc.yaml │ │ ├── pod.yaml │ │ └── schedule-test.yaml ├── postgres-operator │ └── Chart.yaml └── external-secrets │ ├── values.yaml │ ├── resources │ └── clustersecretstore.yaml │ └── kustomization.yaml ├── system ├── kanidm │ ├── values.yaml │ ├── Chart.yaml │ └── templates │ │ ├── dbcloud │ │ ├── kanidm-groups.yaml │ │ └── kanidm-oauth2-client.yaml │ │ ├── snapshots.yaml │ │ └── service-ldaps.yaml ├── snapscheduler │ ├── values.yaml │ └── kustomization.yaml ├── zfs-exporter │ ├── values.yaml │ ├── Chart.yaml │ └── templates │ │ ├── dashboards.yaml │ │ ├── scrape-config.yaml │ │ └── prometheus-rule.yaml ├── kube-system │ ├── resources │ │ ├── runtime-class.yaml │ │ ├── nodelocaldns │ │ │ ├── serviceaccount.yaml │ │ │ ├── service.yaml │ │ │ └── cilium-local-redirect-policy.yaml │ │ ├── cilium │ │ │ ├── load-balancer-ip-pool.yaml │ │ │ └── bgp-peering-policy.yaml │ │ └── priority-class-high.yaml │ ├── test │ │ ├── priority-class.yaml │ │ ├── cilium │ │ │ └── pod-test-limit-egress.yaml │ │ └── nvidia-device-plugin │ │ │ └── pod.yaml │ └── nvidia-device-plugin-values.yaml ├── kured │ ├── Chart.yaml │ ├── templates │ │ └── external-secret.yaml │ └── values.yaml ├── cert-manager │ ├── Chart.yaml │ ├── templates │ │ ├── clusterissuer-selfsigned-for-k8s-webhooks.yaml │ │ ├── configmap-dashboards.yaml │ │ ├── clusterissuer-internal.yaml │ │ ├── prometheus-rules.yaml │ │ ├── external-secret-vault-ca.yaml │ │ ├── external-secret-external.yaml │ │ ├── clusterissuer-external.yaml │ │ ├── external-secret-internal-ca.yaml │ │ ├── vault-rbac.yaml │ │ └── clusterissuer-iot.yaml │ └── values.yaml ├── zfs-localpv │ ├── Chart.yaml │ ├── tests │ │ ├── snapshot.yaml │ │ ├── read.yaml │ │ ├── write.yaml │ │ └── clone.yaml │ ├── templates │ │ ├── snapshot-class.yaml │ │ └── storage-class-openebs-zfspv.yaml │ └── values.yaml ├── pod-cleaner │ ├── Chart.yaml │ ├── templates │ │ └── rbac.yaml │ └── values.yaml ├── external-dns │ ├── Chart.yaml │ ├── templates │ │ └── external-secret.yaml │ └── values.yaml ├── ingress-nginx │ ├── Chart.yaml │ └── templates │ │ └── configmap-dashboards.yaml ├── ingress-nginx-external │ └── Chart.yaml ├── oauth2-proxy │ ├── templates │ │ ├── kanidm-groups.yaml │ │ ├── k8s-secret-store │ │ │ ├── secret-store.yaml │ │ │ └── rbac.yaml │ │ ├── kanidm-oauth2-client.yaml │ │ └── external-secrets.yaml │ └── Chart.yaml ├── system-upgrade │ ├── kustomization.yaml │ └── k3s │ │ ├── kustomization.yaml │ │ ├── server.yaml │ │ └── agent.yaml ├── monitoring │ ├── resources │ │ ├── kanidm-groups-grafana.yaml │ │ ├── grafana-admin-secret.yaml │ │ ├── long-term-metrics │ │ │ ├── service.yaml │ │ │ ├── datasource-configmap.yaml │ │ │ ├── ingress.yaml │ │ │ └── prometheus-rules.yaml │ │ └── kanidm-oauth2-client-grafana.yaml │ └── smartctl-exporter-values.yaml ├── kaniop │ └── kustomization.yaml └── loki │ ├── kustomization.yaml │ └── resources │ └── service-monitor.yaml ├── docs ├── .gitignore ├── index.md ├── requirements.txt ├── images │ ├── rack.jpg │ └── k8s-amd64-1.jpg ├── user-guide │ ├── wallabag.md │ ├── kubernetes-upgrade.md │ ├── grigri.md │ ├── wan-phone-failover.md │ ├── vpn.md │ ├── install-pre-commit-hooks.md │ ├── expand-longhorn-volume.md │ ├── upgrades.md │ ├── cpu-optimize-tuning.md │ ├── run-commands-on-multiple-nodes.md │ └── clone-data.md └── troubleshooting │ └── mqtt.md ├── metal ├── roles │ ├── .gitignore │ ├── prepare │ │ ├── files │ │ │ ├── 02-armbian-periodic │ │ │ ├── 20auto-upgrades │ │ │ ├── truncate-logs.sh │ │ │ └── journald.conf │ │ ├── README.md │ │ ├── templates │ │ │ ├── hosts.j2 │ │ │ └── netplan.j2 │ │ └── tasks │ │ │ ├── logs2ram.yml │ │ │ ├── python.yml │ │ │ ├── sshd.yml │ │ │ ├── unattended-upgrades.yml │ │ │ └── user.yml │ ├── nvidia-container-runtime │ │ ├── handlers │ │ │ └── main.yml │ │ └── files │ │ │ └── nvidia-container-toolkit.list │ ├── setup │ │ ├── templates │ │ │ ├── sasl_passwd.j2 │ │ │ └── zfs.conf.j2 │ │ ├── files │ │ │ └── telegram-notification.service │ │ ├── handlers │ │ │ └── main.yml │ │ └── tasks │ │ │ ├── zfs.yml │ │ │ └── backup-user.yml │ ├── k3s │ │ ├── handlers │ │ │ └── main.yml │ │ ├── templates │ │ │ └── config.yaml.j2 │ │ └── defaults │ │ │ └── main.yml │ ├── zfs_exporter │ │ ├── handlers │ │ │ └── main.yml │ │ ├── templates │ │ │ └── service.j2 │ │ └── defaults │ │ │ └── main.yml │ └── pikvm │ │ ├── files │ │ ├── kvmd-fan.service │ │ └── edid.hex │ │ └── handlers │ │ └── main.yml ├── inventory │ ├── group_vars │ │ ├── all │ │ │ ├── backup.yml │ │ │ ├── k3s.yml │ │ │ ├── ssh.yml │ │ │ └── ntp.yml │ │ ├── arm.yml │ │ └── kube_node.yml │ ├── host_vars │ │ ├── k8s-odroid-hc4-3.yml │ │ ├── k8s-odroid-hc4-1.yml │ │ ├── k8s-odroid-hc4-2.yml │ │ ├── grigri.yml │ │ └── prusik.yml │ └── hosts.ini ├── requirements.txt ├── requirements.yml ├── playbooks │ ├── install │ │ ├── prepare.yml │ │ └── cluster.yml │ └── uninstall │ │ └── k3s.yml └── ansible.cfg ├── bootstrap ├── root │ ├── Chart.yaml │ ├── apply.sh │ └── values.yaml ├── argocd │ ├── Chart.yaml │ ├── templates │ │ ├── kanidm-groups.yaml │ │ ├── argocd-dashboard.yaml │ │ ├── k8s-secret-store │ │ │ ├── secret-store.yaml │ │ │ └── rbac.yaml │ │ ├── kanidm-oauth2-client.yaml │ │ └── ingress-external.yaml │ └── apply.sh └── Makefile ├── .yamllint.yaml ├── .github ├── renovate-config.json └── workflows │ └── docs.yaml ├── .gitignore ├── commitlint.config.js ├── .vscode └── settings.json ├── scripts └── prepare_sdcard.sh └── test └── cluster.yaml /apps/tanque/values.yaml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /platform/vault/values.yaml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /system/kanidm/values.yaml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/.gitignore: -------------------------------------------------------------------------------- 1 | mermaid*.js 2 | -------------------------------------------------------------------------------- /system/snapscheduler/values.yaml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /system/zfs-exporter/values.yaml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /apps/esphome/.helmignore: -------------------------------------------------------------------------------- 1 | .direnv/ 2 | -------------------------------------------------------------------------------- /metal/roles/.gitignore: -------------------------------------------------------------------------------- 1 | geerlingguy.ntp 2 | -------------------------------------------------------------------------------- /apps/home-assistant/.helmignore: -------------------------------------------------------------------------------- 1 | .direnv/ 2 | -------------------------------------------------------------------------------- /apps/home-assistant/appdaemon/requirements.txt: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /metal/roles/prepare/files/02-armbian-periodic: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /apps/home-assistant/appdaemon/apps/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | --8<-- 2 | README.md 3 | --8<-- 4 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | mkdocs-include-dir-to-nav==1.2.0 2 | -------------------------------------------------------------------------------- /apps/esphome/config/requirements.txt: -------------------------------------------------------------------------------- 1 | esphome 2 | pillow 3 | -------------------------------------------------------------------------------- /apps/tanque/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: tanque 3 | version: 0.0.0 4 | -------------------------------------------------------------------------------- /bootstrap/root/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: root 3 | version: 0.0.0 4 | -------------------------------------------------------------------------------- /apps/home-assistant/appdaemon/requirements_test.txt: -------------------------------------------------------------------------------- 1 | pytest 2 | pytest-asyncio 3 | -------------------------------------------------------------------------------- /metal/inventory/group_vars/all/backup.yml: -------------------------------------------------------------------------------- 1 | backup_target_dir: /datasets/backups 2 | -------------------------------------------------------------------------------- /system/kanidm/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: kanidm 3 | version: 0.0.0 4 | -------------------------------------------------------------------------------- /docs/images/rack.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pando85/homelab/HEAD/docs/images/rack.jpg -------------------------------------------------------------------------------- /metal/inventory/group_vars/all/k3s.yml: -------------------------------------------------------------------------------- 1 | --- 2 | k3s_arch: "amd64" 3 | k3s_binary: "k3s" 4 | -------------------------------------------------------------------------------- /metal/inventory/group_vars/all/ssh.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ansible_user: "{{ prepare_username }}" 3 | -------------------------------------------------------------------------------- /system/zfs-exporter/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: zfs-exporter 3 | version: 0.0.0 4 | -------------------------------------------------------------------------------- /metal/inventory/group_vars/arm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | k3s_arch: "arm64" 3 | k3s_binary: "k3s-{{ k3s_arch }}" 4 | -------------------------------------------------------------------------------- /docs/images/k8s-amd64-1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pando85/homelab/HEAD/docs/images/k8s-amd64-1.jpg -------------------------------------------------------------------------------- /metal/roles/prepare/README.md: -------------------------------------------------------------------------------- 1 | # Prepare 2 | 3 | Role intended to setup servers: login, packages, disks... 4 | -------------------------------------------------------------------------------- /metal/roles/nvidia-container-runtime/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: restart server # noqa: name[casing] 2 | reboot: 3 | -------------------------------------------------------------------------------- /metal/roles/setup/templates/sasl_passwd.j2: -------------------------------------------------------------------------------- 1 | [smtp.gmail.com]:587 grigriserver@gmail.com:{{ lookup('passwordstore', 'grigri/smtp') }} 2 | -------------------------------------------------------------------------------- /metal/roles/k3s/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: Restart k3s 2 | systemd: 3 | name: k3s 4 | daemon_reload: true 5 | state: restarted 6 | -------------------------------------------------------------------------------- /.yamllint.yaml: -------------------------------------------------------------------------------- 1 | ignore: | 2 | templates/ 3 | 4 | extends: default 5 | 6 | rules: 7 | document-start: disable 8 | line-length: disable 9 | -------------------------------------------------------------------------------- /.github/renovate-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "username": "renovate85-bot[bot]", 3 | "repositories": [ 4 | "pando85/homelab" 5 | ] 6 | } 7 | -------------------------------------------------------------------------------- /apps/esphome/config/irrigation-controller/esp32.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pando85/homelab/HEAD/apps/esphome/config/irrigation-controller/esp32.jpg -------------------------------------------------------------------------------- /metal/inventory/group_vars/all/ntp.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ntp_servers: 3 | - pfsense.grigri 4 | 5 | ntp_timezone: Europe/Madrid 6 | ntp_manage_config: true 7 | -------------------------------------------------------------------------------- /platform/reloader/values.yaml: -------------------------------------------------------------------------------- 1 | reloader: 2 | reloader: 3 | podMonitor: 4 | enabled: true 5 | labels: 6 | release: monitoring 7 | -------------------------------------------------------------------------------- /system/kube-system/resources/runtime-class.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: node.k8s.io/v1 2 | kind: RuntimeClass 3 | metadata: 4 | name: nvidia 5 | handler: nvidia 6 | -------------------------------------------------------------------------------- /apps/esphome/config/irrigation-controller/esp32-box.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pando85/homelab/HEAD/apps/esphome/config/irrigation-controller/esp32-box.jpg -------------------------------------------------------------------------------- /apps/esphome/config/rack-controller/rack-controller.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pando85/homelab/HEAD/apps/esphome/config/rack-controller/rack-controller.jpg -------------------------------------------------------------------------------- /apps/esphome/config/rack-controller/rack-controller-back.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pando85/homelab/HEAD/apps/esphome/config/rack-controller/rack-controller-back.jpg -------------------------------------------------------------------------------- /apps/esphome/config/rack-controller/rack-controller-box.FCStd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pando85/homelab/HEAD/apps/esphome/config/rack-controller/rack-controller-box.FCStd -------------------------------------------------------------------------------- /apps/esphome/config/rack-controller/rack-controller-front.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pando85/homelab/HEAD/apps/esphome/config/rack-controller/rack-controller-front.jpg -------------------------------------------------------------------------------- /apps/esphome/config/irrigation-controller/esp32_with_valve.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pando85/homelab/HEAD/apps/esphome/config/irrigation-controller/esp32_with_valve.jpg -------------------------------------------------------------------------------- /apps/esphome/config/rack-controller/rack-controller-box-Box.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pando85/homelab/HEAD/apps/esphome/config/rack-controller/rack-controller-box-Box.stl -------------------------------------------------------------------------------- /apps/tanque/templates/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: tanque 5 | spec: 6 | type: ExternalName 7 | externalName: tanque.iot.grigri 8 | -------------------------------------------------------------------------------- /bootstrap/root/apply.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | helm template \ 4 | --include-crds \ 5 | --namespace argocd \ 6 | argocd . \ 7 | | kubectl -n argocd apply -f - 8 | -------------------------------------------------------------------------------- /metal/requirements.txt: -------------------------------------------------------------------------------- 1 | ansible~=9.4 2 | ansible-core~=2.16 3 | netaddr~=1.2 4 | pbr~=6.0 5 | jmespath~=1.0 6 | ruamel.yaml~=0.18 7 | openshift~=0.13 8 | ansible-lint~=24.2 9 | -------------------------------------------------------------------------------- /system/kube-system/resources/nodelocaldns/serviceaccount.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: node-local-dns 5 | namespace: kube-system 6 | -------------------------------------------------------------------------------- /apps/esphome/config/rack-controller/rack-controller-box-Cover.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pando85/homelab/HEAD/apps/esphome/config/rack-controller/rack-controller-box-Cover.stl -------------------------------------------------------------------------------- /apps/home-assistant/config/integrations/appdaemon_dhw.yaml: -------------------------------------------------------------------------------- 1 | input_boolean: 2 | appdaemon_dhw_enable: 3 | name: AppDaemon DHW Control Enable 4 | icon: mdi:thermometer-auto 5 | -------------------------------------------------------------------------------- /platform/minio/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: minio 3 | version: 0.0.0 4 | dependencies: 5 | - name: minio 6 | version: 5.4.0 7 | repository: https://charts.min.io/ 8 | -------------------------------------------------------------------------------- /apps/antdroid/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: nginx-config 5 | data: 6 | {{ (.Files.Glob "config/*.conf").AsConfig | indent 2 }} 7 | -------------------------------------------------------------------------------- /apps/esphome/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: esphome-config 5 | data: 6 | {{ (.Files.Glob "config/*.yaml").AsConfig | indent 2 }} 7 | -------------------------------------------------------------------------------- /apps/mosquitto/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: mosquitto-config 5 | data: 6 | {{ (.Files.Glob "config/*").AsConfig | indent 2 }} 7 | -------------------------------------------------------------------------------- /apps/special-web/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: nginx-config 5 | data: 6 | {{ (.Files.Glob "config/*.conf").AsConfig | indent 2 }} 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Environment 2 | .env 3 | .envrc 4 | .venv/ 5 | .direnv/ 6 | 7 | book/ 8 | 9 | *.iso 10 | *.log 11 | *.png 12 | *.tgz 13 | *kubeconfig.yaml 14 | Chart.lock 15 | charts 16 | -------------------------------------------------------------------------------- /apps/cross-backups/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: cross-backups 3 | version: 0.0.0 4 | dependencies: 5 | - name: minio 6 | version: 5.4.0 7 | repository: https://charts.min.io/ 8 | -------------------------------------------------------------------------------- /apps/esphome/config/irrigation-controller/irrigation-controller-box.FCStd: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pando85/homelab/HEAD/apps/esphome/config/irrigation-controller/irrigation-controller-box.FCStd -------------------------------------------------------------------------------- /apps/mosquitto-tls/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: mosquitto-tls-config 5 | data: 6 | {{ (.Files.Glob "config/*").AsConfig | indent 2 }} 7 | -------------------------------------------------------------------------------- /apps/ollama/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: ollama 3 | version: 0.0.0 4 | dependencies: 5 | - name: ollama 6 | version: 1.36.0 7 | repository: https://otwld.github.io/ollama-helm/ 8 | -------------------------------------------------------------------------------- /platform/git/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: gitea 3 | version: 0.0.0 4 | dependencies: 5 | - name: forgejo 6 | version: 15.0.3 7 | repository: oci://code.forgejo.org/forgejo-helm 8 | -------------------------------------------------------------------------------- /system/kured/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: kured 3 | version: 0.0.0 4 | dependencies: 5 | - name: kured 6 | version: 5.10.0 7 | repository: https://kubereboot.github.io/charts 8 | -------------------------------------------------------------------------------- /apps/esphome/config/irrigation-controller/irrigation-controller-box-Box.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pando85/homelab/HEAD/apps/esphome/config/irrigation-controller/irrigation-controller-box-Box.stl -------------------------------------------------------------------------------- /apps/home-assistant/templates/appdaemon/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: hass-appdaemon 5 | data: 6 | {{ (.Files.Glob "appdaemon/*").AsConfig | indent 2 }} 7 | -------------------------------------------------------------------------------- /apps/open-webui/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: open-webui 3 | version: 0.0.0 4 | dependencies: 5 | - name: open-webui 6 | version: 8.21.0 7 | repository: https://helm.openwebui.com/ 8 | -------------------------------------------------------------------------------- /bootstrap/argocd/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: argocd 3 | version: 0.0.0 4 | dependencies: 5 | - name: argo-cd 6 | version: 9.1.9 7 | repository: https://argoproj.github.io/argo-helm 8 | -------------------------------------------------------------------------------- /apps/atuin/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: atuin 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /apps/bazarr/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: bazarr 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /apps/esphome/config/irrigation-controller/irrigation-controller-box-Cover.stl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/pando85/homelab/HEAD/apps/esphome/config/irrigation-controller/irrigation-controller-box-Cover.stl -------------------------------------------------------------------------------- /apps/immich/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: immich 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /apps/kroki/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: kroki 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /apps/lidarr/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: lidarr 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /apps/radarr/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: radarr 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /apps/sonarr/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: sonarr 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /apps/stump/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: stump 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /apps/unifi/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: unifi 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /platform/vault/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: vault 3 | version: 0.0.0 4 | dependencies: 5 | - name: vault-operator 6 | version: 1.23.4 7 | repository: oci://ghcr.io/bank-vaults/helm-charts 8 | -------------------------------------------------------------------------------- /platform/velero/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: velero 3 | version: 0.0.0 4 | dependencies: 5 | - name: velero 6 | version: 11.2.0 7 | repository: https://vmware-tanzu.github.io/helm-charts 8 | -------------------------------------------------------------------------------- /system/cert-manager/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: cert-manager 3 | version: 0.0.0 4 | dependencies: 5 | - name: cert-manager 6 | version: v1.19.2 7 | repository: https://charts.jetstack.io 8 | -------------------------------------------------------------------------------- /apps/antdroid/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: antdroid 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /apps/dawarich/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: dawarich 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /apps/esphome/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: esphome 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /apps/freshrss/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: freshrss 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /apps/http-echo/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: http-echo 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /apps/jellyfin/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: jellyfin 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /apps/mosquitto/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: mosquitto 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /apps/navidrome/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: navidrome 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /apps/prowlarr/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: prowlarr 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /apps/unpackerr/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: unpackerr 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /apps/wallabag/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: wallabag 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /platform/reloader/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: reloader 3 | version: 0.0.0 4 | dependencies: 5 | - name: reloader 6 | version: 2.2.7 7 | repository: https://stakater.github.io/stakater-charts 8 | -------------------------------------------------------------------------------- /apps/esphome/templates/configmap-packages.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: esphome-config-packages 5 | data: 6 | {{ (.Files.Glob "config/packages/*.yaml").AsConfig | indent 2 }} 7 | -------------------------------------------------------------------------------- /apps/home-assistant/templates/appdaemon/configmap-apps.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: hass-appdaemon-apps 5 | data: 6 | {{ (.Files.Glob "appdaemon/apps/*").AsConfig | indent 2 }} 7 | -------------------------------------------------------------------------------- /apps/jellyseerr/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: jellyseerr 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /apps/m-rajoy-api/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: m-rajoy-api 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /apps/mintpsicologia/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: mintpsicologia 3 | version: 0.0.0 4 | dependencies: 5 | - name: wordpress 6 | version: 28.1.0 7 | repository: https://charts.bitnami.com/bitnami 8 | -------------------------------------------------------------------------------- /apps/qbittorrent/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: qbittorrent 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /apps/special-web/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: special-web 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /apps/transcoder/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: transcorder 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /metal/inventory/host_vars/k8s-odroid-hc4-3.yml: -------------------------------------------------------------------------------- 1 | prepare_additional_disks: 2 | - dev_id: /dev/disk/by-id/ata-TOSHIBA_HDWQ140_Y8I5K0D2FAYG 3 | - dev_id: /dev/disk/by-id/ata-SanDisk_SD6SB1M-128G-1006_141924401021 4 | -------------------------------------------------------------------------------- /system/zfs-localpv/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: zfs-localpv 3 | version: 0.0.0 4 | dependencies: 5 | - name: zfs-localpv 6 | version: 2.9.0 7 | repository: https://openebs.github.io/zfs-localpv 8 | -------------------------------------------------------------------------------- /apps/home-assistant/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: home-assistant 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /apps/m-rajoy-front/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: m-rajoy-front 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /apps/mosquitto-tls/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: mosquitto-tls 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /apps/telegram-bot/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: telegram-bot 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /metal/inventory/host_vars/k8s-odroid-hc4-1.yml: -------------------------------------------------------------------------------- 1 | prepare_additional_disks: 2 | - dev_id: /dev/disk/by-id/ata-WDC_WD30EZRX-00DC0B0_WD-WMC1T2457392 3 | - dev_id: /dev/disk/by-id/ata-SanDisk_SDSSDHII240G_170234400122 4 | -------------------------------------------------------------------------------- /metal/inventory/host_vars/k8s-odroid-hc4-2.yml: -------------------------------------------------------------------------------- 1 | prepare_additional_disks: 2 | - dev_id: /dev/disk/by-id/ata-WDC_WD30EZRX-00DC0B0_WD-WMC1T2292099 3 | - dev_id: /dev/disk/by-id/ata-SanDisk_SDSSDHII240G_170235401310 4 | -------------------------------------------------------------------------------- /metal/roles/zfs_exporter/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart zfs-exporter 3 | become: true 4 | systemd: 5 | name: zfs-exporter 6 | enabled: true 7 | daemon_reload: true 8 | state: restarted 9 | -------------------------------------------------------------------------------- /system/cert-manager/templates/clusterissuer-selfsigned-for-k8s-webhooks.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: selfsigned-for-k8s-webhooks 5 | spec: 6 | selfSigned: {} 7 | -------------------------------------------------------------------------------- /system/pod-cleaner/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: pod-cleaner 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /system/zfs-localpv/tests/snapshot.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapshot.storage.k8s.io/v1 2 | kind: VolumeSnapshot 3 | metadata: 4 | name: new-snapshot-test 5 | spec: 6 | source: 7 | persistentVolumeClaimName: test-claim 8 | -------------------------------------------------------------------------------- /apps/github-exporter/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: github-exporter 3 | version: 0.0.0 4 | dependencies: 5 | - name: app-template 6 | version: 4.5.0 7 | repository: https://bjw-s-labs.github.io/helm-charts/ 8 | -------------------------------------------------------------------------------- /platform/git/templates/kanidm-groups.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kaniop.rs/v1beta1 3 | kind: KanidmGroup 4 | metadata: 5 | name: git-users 6 | spec: 7 | kanidmRef: 8 | name: kanidm 9 | namespace: kanidm 10 | -------------------------------------------------------------------------------- /system/external-dns/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: external-dns 3 | version: 0.0.0 4 | dependencies: 5 | - name: external-dns 6 | version: 1.19.0 7 | repository: https://kubernetes-sigs.github.io/external-dns/ 8 | -------------------------------------------------------------------------------- /system/ingress-nginx/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: ingress-nginx 3 | version: 0.0.0 4 | dependencies: 5 | - name: ingress-nginx 6 | version: 4.14.1 7 | repository: https://kubernetes.github.io/ingress-nginx 8 | -------------------------------------------------------------------------------- /apps/freshrss/templates/kanidm-groups.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kaniop.rs/v1beta1 3 | kind: KanidmGroup 4 | metadata: 5 | name: freshrss-users 6 | spec: 7 | kanidmRef: 8 | name: kanidm 9 | namespace: kanidm 10 | -------------------------------------------------------------------------------- /apps/nextcloud/templates/kanidm-groups.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kaniop.rs/v1beta1 3 | kind: KanidmGroup 4 | metadata: 5 | name: nextcloud-users 6 | spec: 7 | kanidmRef: 8 | name: kanidm 9 | namespace: kanidm 10 | -------------------------------------------------------------------------------- /bootstrap/argocd/templates/kanidm-groups.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kaniop.rs/v1beta1 3 | kind: KanidmGroup 4 | metadata: 5 | name: argocd-users 6 | spec: 7 | kanidmRef: 8 | name: kanidm 9 | namespace: kanidm 10 | -------------------------------------------------------------------------------- /platform/minio/templates/kanidm-groups.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kaniop.rs/v1beta1 3 | kind: KanidmGroup 4 | metadata: 5 | name: minio-users 6 | spec: 7 | kanidmRef: 8 | name: kanidm 9 | namespace: kanidm 10 | -------------------------------------------------------------------------------- /platform/vault/templates/kanidm-groups.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kaniop.rs/v1beta1 3 | kind: KanidmGroup 4 | metadata: 5 | name: vault-admins 6 | spec: 7 | kanidmRef: 8 | name: kanidm 9 | namespace: kanidm 10 | -------------------------------------------------------------------------------- /apps/jellyseerr/templates/kanidm-groups.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kaniop.rs/v1beta1 3 | kind: KanidmGroup 4 | metadata: 5 | name: jellyseerr-users 6 | spec: 7 | kanidmRef: 8 | name: kanidm 9 | namespace: kanidm 10 | -------------------------------------------------------------------------------- /apps/open-webui/templates/kanidm-group.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kaniop.rs/v1beta1 3 | kind: KanidmGroup 4 | metadata: 5 | name: open-webui-users 6 | spec: 7 | kanidmRef: 8 | name: kanidm 9 | namespace: kanidm 10 | -------------------------------------------------------------------------------- /platform/velero/templates/dashboards.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: dashboards-velero 5 | labels: 6 | grafana_dashboard: "1" 7 | data: 8 | {{ (.Files.Glob "dashboards/*").AsConfig | indent 2 }} 9 | -------------------------------------------------------------------------------- /system/ingress-nginx-external/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: ingress-nginx 3 | version: 0.0.0 4 | dependencies: 5 | - name: ingress-nginx 6 | version: 4.14.1 7 | repository: https://kubernetes.github.io/ingress-nginx 8 | -------------------------------------------------------------------------------- /system/kanidm/templates/dbcloud/kanidm-groups.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kaniop.rs/v1beta1 3 | kind: KanidmGroup 4 | metadata: 5 | name: dbcloud-users 6 | spec: 7 | kanidmRef: 8 | name: kanidm 9 | namespace: kanidm 10 | -------------------------------------------------------------------------------- /apps/esphome/config/.gitignore: -------------------------------------------------------------------------------- 1 | # Gitignore settings for ESPHome 2 | # This is an example and may include too much for your use-case. 3 | # You can modify this file to suit your needs. 4 | /.esphome/ 5 | /secrets.yaml 6 | .envrc 7 | .direnv 8 | -------------------------------------------------------------------------------- /apps/home-assistant/templates/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: home-assistant-config 5 | data: 6 | # this removes subdirectories hiterarchy 7 | {{ (.Files.Glob "config/*").AsConfig | indent 2 }} 8 | -------------------------------------------------------------------------------- /apps/navidrome/templates/dashboards.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: dashboards-navidrome 5 | labels: 6 | grafana_dashboard: "1" 7 | data: 8 | {{ (.Files.Glob "dashboards/*").AsConfig | indent 2 }} 9 | -------------------------------------------------------------------------------- /metal/requirements.yml: -------------------------------------------------------------------------------- 1 | --- 2 | roles: 3 | - src: geerlingguy.ntp 4 | version: 2.0.0 5 | 6 | collections: 7 | - name: community.general 8 | version: 8.5.0 9 | 10 | - name: community.crypto 11 | version: 2.18.0 12 | -------------------------------------------------------------------------------- /bootstrap/argocd/templates/argocd-dashboard.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: dashboards-argocd 5 | labels: 6 | grafana_dashboard: "1" 7 | data: 8 | {{ (.Files.Glob "dashboards/*").AsConfig | indent 2 }} 9 | -------------------------------------------------------------------------------- /system/oauth2-proxy/templates/kanidm-groups.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kaniop.rs/v1beta1 3 | kind: KanidmGroup 4 | metadata: 5 | name: k8s-oauth2-proxy-users 6 | spec: 7 | kanidmRef: 8 | name: kanidm 9 | namespace: kanidm 10 | -------------------------------------------------------------------------------- /system/zfs-exporter/templates/dashboards.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: dashboards-zfs-exporter 5 | labels: 6 | grafana_dashboard: "1" 7 | data: 8 | {{ (.Files.Glob "dashboards/*").AsConfig | indent 2 }} 9 | -------------------------------------------------------------------------------- /apps/github-exporter/templates/dashboards.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: dashboards-github-exporter 5 | labels: 6 | grafana_dashboard: "1" 7 | data: 8 | {{ (.Files.Glob "dashboards/*").AsConfig | indent 2 }} 9 | -------------------------------------------------------------------------------- /apps/qbittorrent/templates/dashboards.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: dashboards-qbittorrent-exporter 5 | labels: 6 | grafana_dashboard: "1" 7 | data: 8 | {{ (.Files.Glob "dashboards/*").AsConfig | indent 2 }} 9 | -------------------------------------------------------------------------------- /system/ingress-nginx/templates/configmap-dashboards.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: dashboards-nginx 5 | labels: 6 | grafana_dashboard: "1" 7 | data: 8 | {{ (.Files.Glob "dashboards/*").AsConfig | indent 2 }} 9 | -------------------------------------------------------------------------------- /system/cert-manager/templates/configmap-dashboards.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: dashboards-cert-manager 5 | labels: 6 | grafana_dashboard: "1" 7 | data: 8 | {{ (.Files.Glob "dashboards/*").AsConfig | indent 2 }} 9 | -------------------------------------------------------------------------------- /system/kube-system/resources/cilium/load-balancer-ip-pool.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "cilium.io/v2alpha1" 2 | kind: CiliumLoadBalancerIPPool 3 | metadata: 4 | name: default 5 | spec: 6 | allowFirstLastIPs: "Yes" 7 | blocks: 8 | - cidr: "192.168.193.0/24" 9 | -------------------------------------------------------------------------------- /apps/home-assistant/templates/configmap-scripts.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: home-assistant-config-script 5 | data: 6 | # this removes subdirectories hiterarchy 7 | {{ (.Files.Glob "config/script/*").AsConfig | indent 2 }} 8 | -------------------------------------------------------------------------------- /apps/home-assistant/templates/home-assistant-dashboard.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: dashboards-home-assistants 5 | labels: 6 | grafana_dashboard: "1" 7 | data: 8 | {{ (.Files.Glob "dashboards/*").AsConfig | indent 2 }} 9 | -------------------------------------------------------------------------------- /platform/postgres-operator/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: postgres-operator 3 | version: 0.0.0 4 | dependencies: 5 | - name: postgres-operator 6 | version: 1.15.1 7 | repository: https://opensource.zalando.com/postgres-operator/charts/postgres-operator/ 8 | -------------------------------------------------------------------------------- /system/cert-manager/templates/clusterissuer-internal.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cert-manager.io/v1 3 | kind: ClusterIssuer 4 | metadata: 5 | name: internal 6 | namespace: cert-manager 7 | spec: 8 | ca: 9 | secretName: kubernetes-internal-ca-key-pair 10 | -------------------------------------------------------------------------------- /apps/home-assistant/templates/configmap-integrations.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: home-assistant-config-integrations 5 | data: 6 | # this removes subdirectories hiterarchy 7 | {{ (.Files.Glob "config/integrations/*").AsConfig | indent 2 }} 8 | -------------------------------------------------------------------------------- /platform/velero/tests/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | backup/retain: test 6 | name: test 7 | spec: 8 | accessModes: 9 | - ReadWriteOnce 10 | resources: 11 | requests: 12 | storage: 500Mi 13 | -------------------------------------------------------------------------------- /apps/home-assistant/appdaemon/.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # Env 7 | .envrc 8 | .direnv 9 | .env.list 10 | 11 | # AppDaemon 12 | dashboards/ 13 | compiled/ 14 | namespaces/ 15 | www/ 16 | appdaemon.yaml 17 | -------------------------------------------------------------------------------- /metal/roles/nvidia-container-runtime/files/nvidia-container-toolkit.list: -------------------------------------------------------------------------------- 1 | deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://nvidia.github.io/libnvidia-container/stable/deb/$(ARCH) / 2 | #deb https://nvidia.github.io/libnvidia-container/experimental/deb/$(ARCH) / 3 | -------------------------------------------------------------------------------- /docs/user-guide/wallabag.md: -------------------------------------------------------------------------------- 1 | # Wallabag 2 | 3 | ## Fix user login fail 4 | 5 | ``` 6 | cd /var/www/wallabag 7 | su -c "php bin/console cache:clear --env=prod" -s /bin/sh nobody 8 | su -c "php bin/console doctrine:migrations:migrate --no-interaction --env=prod" -s /bin/sh nobody 9 | ``` 10 | -------------------------------------------------------------------------------- /apps/stump/templates/snapshots.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: stump-backups 5 | spec: 6 | retention: 7 | maxCount: 20 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | backup: stump-zfs 12 | -------------------------------------------------------------------------------- /system/zfs-localpv/templates/snapshot-class.yaml: -------------------------------------------------------------------------------- 1 | kind: VolumeSnapshotClass 2 | apiVersion: snapshot.storage.k8s.io/v1 3 | metadata: 4 | name: zfspv-snapclass 5 | annotations: 6 | snapshot.storage.kubernetes.io/is-default-class: "true" 7 | driver: zfs.csi.openebs.io 8 | deletionPolicy: Delete 9 | -------------------------------------------------------------------------------- /apps/lidarr/templates/snapshots.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: lidarr-backups 5 | spec: 6 | retention: 7 | maxCount: 20 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | backup: lidarr-zfs 12 | -------------------------------------------------------------------------------- /apps/radarr/templates/snapshots.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: radarr-backups 5 | spec: 6 | retention: 7 | maxCount: 20 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | backup: radarr-zfs 12 | -------------------------------------------------------------------------------- /apps/unifi/templates/snapshots.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: unifi-zfs-backups 5 | spec: 6 | retention: 7 | maxCount: 20 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | backup: unifi-zfs 12 | -------------------------------------------------------------------------------- /platform/git/templates/snapshots.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: gitea-zfs-backups 5 | spec: 6 | retention: 7 | maxCount: 20 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | backup: gitea-zfs 12 | -------------------------------------------------------------------------------- /platform/vault/templates/snapshots.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: vault-zfs-backups 5 | spec: 6 | retention: 7 | maxCount: 20 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | vault_cr: vault 12 | -------------------------------------------------------------------------------- /apps/bazarr/templates/snapshots.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: bazarr-zfs-backups 5 | spec: 6 | retention: 7 | maxCount: 20 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | backup: bazarr-zfs 12 | -------------------------------------------------------------------------------- /apps/immich/templates/snapshots.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: immich-zfs-backups 5 | spec: 6 | retention: 7 | maxCount: 20 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | backup: immich-zfs 12 | -------------------------------------------------------------------------------- /apps/jellyfin/templates/snapshots.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: jellyfin-backups 5 | spec: 6 | retention: 7 | maxCount: 20 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | backup: jellyfin-zfs 12 | -------------------------------------------------------------------------------- /apps/prowlarr/templates/snapshots.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: prowlarr-backups 5 | spec: 6 | retention: 7 | maxCount: 20 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | backup: prowlarr-zfs 12 | -------------------------------------------------------------------------------- /apps/sonarr/templates/snapshots.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: sonarr-zfs-backups 5 | spec: 6 | retention: 7 | maxCount: 20 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | backup: sonarr-zfs 12 | -------------------------------------------------------------------------------- /apps/wallabag/templates/snapshots.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: wallabag-backups 5 | spec: 6 | retention: 7 | maxCount: 20 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | backup: wallabag-zfs 12 | -------------------------------------------------------------------------------- /metal/roles/prepare/files/20auto-upgrades: -------------------------------------------------------------------------------- 1 | // config doc: https://debian-handbook.info/browse/stable/sect.regular-upgrades.html 2 | APT::Periodic::Update-Package-Lists "30"; 3 | APT::Periodic::Download-Upgradeable-Packages "30"; 4 | APT::Periodic::AutocleanInterval "7"; 5 | APT::Periodic::Unattended-Upgrade "1"; 6 | -------------------------------------------------------------------------------- /system/kanidm/templates/snapshots.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: kanidm-zfs-backups 5 | spec: 6 | retention: 7 | maxCount: 20 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | backup: kanidm-zfs 12 | -------------------------------------------------------------------------------- /apps/antdroid/templates/snapshots.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: antdroid-zfs-backups 5 | spec: 6 | retention: 7 | maxCount: 2 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | backup: antdroid-zfs 12 | -------------------------------------------------------------------------------- /apps/freshrss/templates/snapshots.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: freshrss-zfs-backups 5 | spec: 6 | retention: 7 | maxCount: 20 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | backup: freshrss-zfs 12 | -------------------------------------------------------------------------------- /apps/mosquitto/templates/snapshots.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: mosquitto-backups 5 | spec: 6 | retention: 7 | maxCount: 20 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | backup: mosquitto-zfs 12 | -------------------------------------------------------------------------------- /apps/navidrome/templates/snapshots.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: navidrome-backups 5 | spec: 6 | retention: 7 | maxCount: 20 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | backup: navidrome-zfs 12 | -------------------------------------------------------------------------------- /apps/nextcloud/templates/snapshots.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: nextcloud-backups 5 | spec: 6 | retention: 7 | maxCount: 20 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | backup: nextcloud-zfs 12 | -------------------------------------------------------------------------------- /docs/user-guide/kubernetes-upgrade.md: -------------------------------------------------------------------------------- 1 | # Kubernetes upgrade 2 | 3 | Update `k3s_version` to desired version and then run: 4 | 5 | ```bash 6 | cd metal 7 | ANSIBLE_EXTRA_ARGS="-t k3s-upgrade -e serial=1" make cluster 8 | ``` 9 | 10 | *Note*: it worked perfectly with serial=100% or running it by default. 11 | -------------------------------------------------------------------------------- /apps/nextcloud/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: nextcloud 3 | version: 0.0.0 4 | dependencies: 5 | - name: nextcloud 6 | version: 8.7.0 7 | repository: https://nextcloud.github.io/helm/ 8 | - name: app-template 9 | version: 4.5.0 10 | repository: https://bjw-s-labs.github.io/helm-charts/ 11 | -------------------------------------------------------------------------------- /apps/qbittorrent/templates/snapshots.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: qbittorrent-backups 5 | spec: 6 | retention: 7 | maxCount: 20 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | backup: qbittorrent-zfs 12 | -------------------------------------------------------------------------------- /apps/special-web/templates/snapshots.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: special-web-zfs-backups 5 | spec: 6 | retention: 7 | maxCount: 2 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | backup: special-web-zfs 12 | -------------------------------------------------------------------------------- /apps/jellyseerr/templates/snapshots.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: jellyseerr-zfs-backups 5 | spec: 6 | retention: 7 | maxCount: 20 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | snapshots: jellyseerr-zfs 12 | -------------------------------------------------------------------------------- /apps/m-rajoy-api/templates/snapshots.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: m-rajoy-api-zfs-backups 5 | spec: 6 | retention: 7 | maxCount: 20 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | backup: m-rajoy-api-zfs 12 | -------------------------------------------------------------------------------- /docs/user-guide/grigri.md: -------------------------------------------------------------------------------- 1 | # grigri 2 | 3 | Motherboard: Supermicro A1SRi-2758F 4 | 5 | ## Remote access 6 | 7 | Go to [https://grigri-ipmi.grigri](https://grigri-ipmi.grigri): `Remote Control -> Console Redirection` 8 | 9 | ```bash 10 | sudo archlinux-java set java-8-openjdk 11 | javaws /tmp/launch.jnlp 12 | ``` 13 | -------------------------------------------------------------------------------- /metal/roles/pikvm/files/kvmd-fan.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=PiKVM - A small fan controller daemon 3 | After=systemd-modules-load.service 4 | 5 | [Service] 6 | Type=simple 7 | Restart=always 8 | RestartSec=3 9 | ExecStart=/usr/bin/kvmd-fan 10 | TimeoutStopSec=3 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | -------------------------------------------------------------------------------- /apps/atuin/templates/snapshots-postgres.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: atuin-postgres-backups 5 | spec: 6 | retention: 7 | maxCount: 1 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | cluster-name: atuin-postgres 12 | -------------------------------------------------------------------------------- /apps/esphome/config/garage-door-opener.yaml: -------------------------------------------------------------------------------- 1 | substitutions: 2 | name: "garage-door-opener" 3 | friendly_name: "Garage Door Opener" 4 | project_name: "garage-door-opener" 5 | project_version: "1.1" 6 | 7 | packages: 8 | connection: !include ./packages/connection.yaml 9 | cc1101: !include ./packages/cc1101.yaml 10 | -------------------------------------------------------------------------------- /apps/home-assistant/templates/snapshots.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: home-assistant-zfs-backups 5 | spec: 6 | retention: 7 | maxCount: 20 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | backup: home-assistant-zfs 12 | -------------------------------------------------------------------------------- /apps/mintpsicologia/templates/snapshots.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: wordpress-backups 5 | spec: 6 | retention: 7 | maxCount: 20 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | app.kubernetes.io/name: wordpress 12 | -------------------------------------------------------------------------------- /apps/mosquitto-tls/templates/snapshots.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: mosquitto-tls-zfs-backups 5 | spec: 6 | retention: 7 | maxCount: 20 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | backup: mosquitto-tls-zfs 12 | -------------------------------------------------------------------------------- /apps/telegram-bot/templates/mongo/snapshots.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: telegram-bot-backups 5 | spec: 6 | retention: 7 | maxCount: 20 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | backup: telegram-bot-zfs 12 | -------------------------------------------------------------------------------- /platform/git/templates/snapshots-postgres.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: gitea-postgres-backups 5 | spec: 6 | retention: 7 | maxCount: 1 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | cluster-name: gitea-postgres 12 | -------------------------------------------------------------------------------- /system/kube-system/test/priority-class.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: test-priority-class-high 5 | spec: 6 | restartPolicy: Never 7 | priorityClassName: high-priority 8 | containers: 9 | - name: test-priority-class 10 | image: busybox 11 | command: ["sh", "-c", "sleep 1h"] 12 | -------------------------------------------------------------------------------- /apps/ollama/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: ollama 6 | app.kubernetes.io/name: ollama 7 | name: ollama-data 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 10Gi 14 | -------------------------------------------------------------------------------- /apps/freshrss/templates/snapshots-postgres.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: freshrss-postgres-backups 5 | spec: 6 | retention: 7 | maxCount: 1 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | cluster-name: freshrss-postgres 12 | -------------------------------------------------------------------------------- /apps/wallabag/templates/snapshots-postgres.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: wallabag-postgres-backups 5 | spec: 6 | retention: 7 | maxCount: 1 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | cluster-name: wallabag-postgres 12 | -------------------------------------------------------------------------------- /system/kube-system/resources/priority-class-high.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: scheduling.k8s.io/v1 2 | kind: PriorityClass 3 | metadata: 4 | name: high-priority 5 | # Default value: 0 6 | # Value range: -2147483648 to 1000000000 7 | value: 1000 8 | globalDefault: false 9 | description: "This priority class is used for high priority pods." 10 | -------------------------------------------------------------------------------- /system/oauth2-proxy/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: oauth2-proxy 3 | version: 0.0.0 4 | dependencies: 5 | - name: oauth2-proxy 6 | version: 10.0.0 7 | repository: https://oauth2-proxy.github.io/manifests 8 | - name: app-template 9 | version: 4.5.0 10 | repository: https://bjw-s-labs.github.io/helm-charts/ 11 | -------------------------------------------------------------------------------- /apps/immich/templates/pvc-models.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: immich 6 | app.kubernetes.io/name: immich 7 | name: immich-models 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 20Gi 14 | -------------------------------------------------------------------------------- /apps/nextcloud/templates/snapshots-postgresql.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: nextcloud-postgres-backups 5 | spec: 6 | retention: 7 | maxCount: 1 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | cluster-name: nextcloud-postgres 12 | -------------------------------------------------------------------------------- /bootstrap/argocd/apply.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | helm template \ 4 | --include-crds \ 5 | --namespace argocd \ 6 | argocd . \ 7 | | kubectl -n argocd apply -f - 8 | 9 | kubectl -n argocd wait --timeout=60s --for condition=Established \ 10 | crd/applications.argoproj.io \ 11 | crd/applicationsets.argoproj.io 12 | -------------------------------------------------------------------------------- /apps/mintpsicologia/templates/snapshots-mariadb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: wordpress-mariadb-zfs-backups 5 | spec: 6 | retention: 7 | maxCount: 20 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | app.kubernetes.io/name: mariadb 12 | -------------------------------------------------------------------------------- /apps/open-webui/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: open-webui 6 | app.kubernetes.io/name: open-webui 7 | name: open-webui-data 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 10Gi 14 | -------------------------------------------------------------------------------- /metal/roles/zfs_exporter/templates/service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=zfs-exporter 3 | After=network-online.target 4 | 5 | [Service] 6 | Restart=always 7 | RestartSec=5 8 | TimeoutSec=5 9 | User=root 10 | Group=root 11 | ExecStart=/usr/local/bin/zfs_exporter {{ zfs_exporter_arguments }} 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /apps/home-assistant/templates/snapshots-postgres.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: snapscheduler.backube/v1 2 | kind: SnapshotSchedule 3 | metadata: 4 | name: home-assistant-postgres-backups 5 | spec: 6 | retention: 7 | maxCount: 1 8 | schedule: "0 1 * * *" # UTC 9 | claimSelector: 10 | matchLabels: 11 | cluster-name: home-assistant-postgres 12 | -------------------------------------------------------------------------------- /system/system-upgrade/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | # renovate: url-match: datasource=github-releases depName=rancher/system-upgrade-controller 6 | - https://github.com/rancher/system-upgrade-controller/releases/download/v0.18.0/system-upgrade-controller.yaml 7 | - k3s 8 | -------------------------------------------------------------------------------- /apps/flaresolverr/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: flaresolverr 4 | 5 | helmCharts: 6 | - name: app-template 7 | namespace: flaresolverr 8 | releaseName: flaresolverr 9 | repo: https://bjw-s-labs.github.io/helm-charts/ 10 | valuesFile: values.yaml 11 | version: 4.5.0 12 | -------------------------------------------------------------------------------- /apps/mosquitto/config/mosquitto.conf: -------------------------------------------------------------------------------- 1 | per_listener_settings false 2 | 3 | listener 1883 4 | protocol mqtt 5 | 6 | listener 8083 7 | protocol websockets 8 | 9 | allow_anonymous false 10 | persistence true 11 | persistence_location /data 12 | autosave_interval 1800 13 | connection_messages false 14 | password_file /mosquitto/external_config/mosquitto_pwd 15 | -------------------------------------------------------------------------------- /docs/user-guide/wan-phone-failover.md: -------------------------------------------------------------------------------- 1 | # WAN phone failover 2 | 3 | Use phone as WAN failover mechanism. 4 | 5 | ## Steps 6 | 7 | Connect phone to router. In the phone: 8 | 9 | ``` 10 | Ajustes -> Punto de acces portátil -> Anclaje USB 11 | ``` 12 | 13 | In the router: 14 | 15 | ``` 16 | Interfaces -> Add ue8 -> DHCP 17 | System -> Routing ... 18 | ``` 19 | -------------------------------------------------------------------------------- /metal/inventory/group_vars/kube_node.yml: -------------------------------------------------------------------------------- 1 | k3s_kubelet_extra_args_node_default: 2 | - kube-reserved=cpu=300m,memory=300Mi,ephemeral-storage=1Gi 3 | - system-reserved=cpu=100m,memory=50Mi,ephemeral-storage=1Gi 4 | - eviction-hard=memory.available<300Mi,nodefs.available<10% 5 | 6 | k3s_kubelet_extra_args: "{{ k3s_kubelet_extra_args_node_default | default([]) }}" 7 | -------------------------------------------------------------------------------- /apps/esphome/config/mini-switch-1.yaml: -------------------------------------------------------------------------------- 1 | substitutions: 2 | name: "mini-switch-1" 3 | friendly_name: "Mini Switch 1" 4 | project_name: "athom.mini-switch-1" 5 | project_version: "1.1" 6 | light_restore_mode: RESTORE_DEFAULT_OFF 7 | 8 | packages: 9 | connection: !include ./packages/connection.yaml 10 | mini-switch: !include ./packages/mini-switch.yaml 11 | -------------------------------------------------------------------------------- /apps/esphome/config/mini-switch-2.yaml: -------------------------------------------------------------------------------- 1 | substitutions: 2 | name: "mini-switch-2" 3 | friendly_name: "Mini Switch 2" 4 | project_name: "athom.mini-switch-2" 5 | project_version: "1.1" 6 | light_restore_mode: RESTORE_DEFAULT_OFF 7 | 8 | packages: 9 | connection: !include ./packages/connection.yaml 10 | mini-switch: !include ./packages/mini-switch.yaml 11 | -------------------------------------------------------------------------------- /apps/esphome/config/mini-switch-3.yaml: -------------------------------------------------------------------------------- 1 | substitutions: 2 | name: "mini-switch-3" 3 | friendly_name: "Mini Switch 3" 4 | project_name: "athom.mini-switch-3" 5 | project_version: "1.1" 6 | light_restore_mode: RESTORE_DEFAULT_OFF 7 | 8 | packages: 9 | connection: !include ./packages/connection.yaml 10 | mini-switch: !include ./packages/mini-switch.yaml 11 | -------------------------------------------------------------------------------- /apps/esphome/config/mini-switch-4.yaml: -------------------------------------------------------------------------------- 1 | substitutions: 2 | name: "mini-switch-4" 3 | friendly_name: "Mini Switch 4" 4 | project_name: "athom.mini-switch-4" 5 | project_version: "1.1" 6 | light_restore_mode: RESTORE_DEFAULT_OFF 7 | 8 | packages: 9 | connection: !include ./packages/connection.yaml 10 | mini-switch: !include ./packages/mini-switch.yaml 11 | -------------------------------------------------------------------------------- /apps/home-assistant/templates/pvc-models.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: home-assistant 6 | app.kubernetes.io/name: home-assistant 7 | name: home-assistant-models 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 20Gi 14 | -------------------------------------------------------------------------------- /apps/open-webui/templates/pvc-pipelines.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: open-webui-pipelines 6 | app.kubernetes.io/instance: open-webui 7 | name: pipelines-data 8 | spec: 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 2Gi 14 | -------------------------------------------------------------------------------- /metal/inventory/host_vars/grigri.yml: -------------------------------------------------------------------------------- 1 | k3s_kubelet_extra_args: 2 | - kube-reserved=cpu=0.5,memory=1Gi,ephemeral-storage=1Gi 3 | - system-reserved=cpu=2,memory=10Gi,ephemeral-storage=1Gi 4 | - eviction-hard=memory.available<300Mi,nodefs.available<10% 5 | 6 | zfs_arc_min_gb: 1 7 | zfs_arc_max_gb: 8 8 | 9 | l2arc_write_max_mb: 120 10 | l2arc_write_boost_mb: 160 11 | -------------------------------------------------------------------------------- /system/zfs-exporter/templates/scrape-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1alpha1 2 | kind: ScrapeConfig 3 | metadata: 4 | name: zfs-exporter 5 | labels: 6 | release: monitoring 7 | spec: 8 | staticConfigs: 9 | - targets: 10 | - grigri.grigri:9134 11 | - prusik.grigri:9134 12 | scrapeInterval: 30s 13 | scrapeTimeout: 15s 14 | -------------------------------------------------------------------------------- /apps/mosquitto/templates/prometheus-rules.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PrometheusRule 3 | metadata: 4 | name: mosquitto 5 | labels: 6 | release: monitoring 7 | spec: 8 | groups: 9 | - name: mosquitto.rules 10 | rules: 11 | {{- range $path, $_ := .Files.Glob "rules/*.yaml" }} 12 | {{ $.Files.Get $path | indent 8 }} 13 | {{- end }} 14 | -------------------------------------------------------------------------------- /docs/user-guide/vpn.md: -------------------------------------------------------------------------------- 1 | # VPN 2 | 3 | Add a new user to the VPN group to allow them to connect to the VPN. 4 | 5 | ```bash 6 | export USER= 7 | kanidm group add-members vpn-users ${USER} 8 | kanidm person posix set ${USER} 9 | ``` 10 | 11 | You can then download the VPN configuration file from the web interface at 12 | `https://pfsense.grigri/vpn_openvpn_export.php`. 13 | -------------------------------------------------------------------------------- /apps/qbittorrent/rules/custom.yaml: -------------------------------------------------------------------------------- 1 | - alert: QBittorrentSlowDownloading 2 | annotations: 3 | description: | 4 | {{ $value }} torrents from {{ $labels.category }} category have being in download status 5 | for longer than 2 days 6 | expr: qbittorrent_torrents_count{status=~"(downloading|stalledDL)"} > 0 7 | for: 2d 8 | labels: 9 | severity: warning 10 | -------------------------------------------------------------------------------- /system/cert-manager/templates/prometheus-rules.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PrometheusRule 3 | metadata: 4 | name: cert-manager 5 | labels: 6 | release: monitoring 7 | spec: 8 | groups: 9 | - name: cert-manager 10 | rules: 11 | {{- range $path, $_ := .Files.Glob "rules/*.yaml" }} 12 | {{ $.Files.Get $path | indent 8 }} 13 | {{- end }} 14 | -------------------------------------------------------------------------------- /apps/qbittorrent/templates/prometheus-rules.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PrometheusRule 3 | metadata: 4 | name: qbittorrent 5 | labels: 6 | release: monitoring 7 | spec: 8 | groups: 9 | - name: qbittorrent.rules 10 | rules: 11 | {{- range $path, $_ := .Files.Glob "rules/*.yaml" }} 12 | {{ $.Files.Get $path | indent 8 }} 13 | {{- end }} 14 | -------------------------------------------------------------------------------- /system/zfs-exporter/templates/prometheus-rule.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PrometheusRule 3 | metadata: 4 | name: zfs-exporter 5 | labels: 6 | release: monitoring 7 | spec: 8 | groups: 9 | - name: zfs-exporter.rules 10 | rules: 11 | {{- range $path, $_ := .Files.Glob "rules/*.yaml" }} 12 | {{ $.Files.Get $path | indent 8 }} 13 | {{ end }} 14 | -------------------------------------------------------------------------------- /apps/esphome/config/mini-switch-office-a.yaml: -------------------------------------------------------------------------------- 1 | substitutions: 2 | name: "mini-switch-office-a" 3 | friendly_name: "Mini Switch Office A" 4 | project_name: "athom.mini-switch-office-a" 5 | project_version: "1.1" 6 | light_restore_mode: RESTORE_DEFAULT_OFF 7 | 8 | packages: 9 | connection: !include ./packages/connection.yaml 10 | mini-switch: !include ./packages/mini-switch.yaml 11 | -------------------------------------------------------------------------------- /apps/telegram-bot/templates/mongo/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: mongo 5 | labels: 6 | name: mongo 7 | spec: 8 | ports: 9 | - port: 27017 10 | targetPort: 27017 11 | clusterIP: None 12 | selector: 13 | app.kubernetes.io/component: database 14 | app.kubernetes.io/instance: mongo 15 | app.kubernetes.io/name: telegram-bot 16 | -------------------------------------------------------------------------------- /apps/home-assistant/templates/prometheus-rules.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PrometheusRule 3 | metadata: 4 | name: hass 5 | labels: 6 | operator.prometheus.io/instance: prometheus-long-term 7 | spec: 8 | groups: 9 | - name: hass.rules 10 | rules: 11 | {{- range $path, $_ := .Files.Glob "rules/*.yaml" }} 12 | {{ $.Files.Get $path | indent 8 }} 13 | {{- end }} 14 | -------------------------------------------------------------------------------- /apps/wallabag/templates/external-secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: wallabag-secret 5 | namespace: wallabag 6 | spec: 7 | secretStoreRef: 8 | kind: ClusterSecretStore 9 | name: vault 10 | data: 11 | - secretKey: SYMFONY__ENV__SECRET 12 | remoteRef: 13 | key: /wallabag/secret 14 | property: secret 15 | -------------------------------------------------------------------------------- /apps/esphome/config/smart-plug-v2-terrace.yaml: -------------------------------------------------------------------------------- 1 | substitutions: 2 | device_name: "smart-plug-v2-terrace" 3 | friendly_name: "Smart Plug V2 Terrace" 4 | project_name: "athom.smart-plug-v2-terrace" 5 | project_version: "1.1" 6 | relay_restore_mode: RESTORE_DEFAULT_OFF 7 | 8 | packages: 9 | connection: !include ./packages/connection.yaml 10 | smart-plug-v2: !include ./packages/smart-plug-v2.yaml 11 | -------------------------------------------------------------------------------- /apps/immich/templates/kanidm-groups.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kaniop.rs/v1beta1 3 | kind: KanidmGroup 4 | metadata: 5 | name: immich-users 6 | spec: 7 | kanidmRef: 8 | name: kanidm 9 | namespace: kanidm 10 | 11 | --- 12 | apiVersion: kaniop.rs/v1beta1 13 | kind: KanidmGroup 14 | metadata: 15 | name: immich-admins 16 | spec: 17 | kanidmRef: 18 | name: kanidm 19 | namespace: kanidm 20 | -------------------------------------------------------------------------------- /docs/user-guide/install-pre-commit-hooks.md: -------------------------------------------------------------------------------- 1 | # Install pre-commit hooks 2 | 3 | Git hook scripts are useful for identifying simple issues before commiting changes. 4 | 5 | Install [pre-commit](https://pre-commit.com/#install) first, one-liner for Arch users: 6 | 7 | ```sh 8 | sudo pacman -S python-pre-commit 9 | ``` 10 | 11 | Then install git hook scripts: 12 | 13 | ```sh 14 | make git-hooks 15 | ``` 16 | -------------------------------------------------------------------------------- /metal/roles/prepare/files/truncate-logs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eo pipefail 3 | 4 | LINES_TO_KEEP=1000 5 | LOGS_DIR=/var/log/ 6 | 7 | cd $LOGS_DIR 8 | find . -type f -print | grep -E -v "(\.gz|\.xz|\.[0-9])" | while IFS= read -r file 9 | do 10 | echo "Truncating ${LOGS_DIR}${file}" 11 | # shellcheck disable=SC2005,SC2086,SC2086 12 | echo "$(tail -n ${LINES_TO_KEEP} ${file})" > ${file} 13 | done 14 | -------------------------------------------------------------------------------- /platform/minio/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: minio 6 | app.kubernetes.io/name: minio 7 | annotations: 8 | argocd.argoproj.io/sync-options: Prune=false 9 | name: minio-backup 10 | spec: 11 | accessModes: 12 | - ReadWriteOnce 13 | resources: 14 | requests: 15 | storage: 4000Gi 16 | -------------------------------------------------------------------------------- /apps/esphome/config/irrigation-controller-mid.yaml: -------------------------------------------------------------------------------- 1 | substitutions: 2 | name: "irrigation-controller-mid" 3 | friendly_name: "Irrigation Controller Mid" 4 | id_prefix: "ic_mid_" 5 | project_name: "pando85.irrigation-controller" 6 | project_version: "0.0.0" 7 | 8 | packages: 9 | connection: !include ./packages/connection.yaml 10 | irrigation-controller: !include ./packages/irrigation-controller.yaml 11 | -------------------------------------------------------------------------------- /apps/esphome/templates/prometheus-rules.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PrometheusRule 3 | metadata: 4 | name: esphome 5 | labels: 6 | operator.prometheus.io/instance: prometheus-long-term 7 | spec: 8 | groups: 9 | - name: rack-controller.rules 10 | rules: 11 | {{- range $path, $_ := .Files.Glob "rules/*.yaml" }} 12 | {{ $.Files.Get $path | indent 8 }} 13 | {{- end }} 14 | -------------------------------------------------------------------------------- /apps/immich/templates/external-secrets.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: external-secrets.io/v1 3 | kind: ExternalSecret 4 | metadata: 5 | name: immich-secrets 6 | spec: 7 | secretStoreRef: 8 | kind: ClusterSecretStore 9 | name: vault 10 | target: 11 | name: immich-secrets 12 | data: 13 | - secretKey: JWT_SECRET 14 | remoteRef: 15 | key: /immich/jwt 16 | property: secret 17 | -------------------------------------------------------------------------------- /apps/jellyfin/templates/kanidm-groups.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kaniop.rs/v1beta1 3 | kind: KanidmGroup 4 | metadata: 5 | name: jellyfin-users 6 | spec: 7 | kanidmRef: 8 | name: kanidm 9 | namespace: kanidm 10 | 11 | --- 12 | apiVersion: kaniop.rs/v1beta1 13 | kind: KanidmGroup 14 | metadata: 15 | name: jellyfin-admins 16 | spec: 17 | kanidmRef: 18 | name: kanidm 19 | namespace: kanidm 20 | -------------------------------------------------------------------------------- /apps/esphome/config/smart-plug-v2-snapmaker.yaml: -------------------------------------------------------------------------------- 1 | substitutions: 2 | device_name: "smart-plug-v2-snapmaker" 3 | friendly_name: "Smart Plug V2 Snapmaker" 4 | project_name: "athom.smart-plug-v2-snapmaker" 5 | project_version: "1.1" 6 | relay_restore_mode: RESTORE_DEFAULT_OFF 7 | 8 | packages: 9 | connection: !include ./packages/connection.yaml 10 | smart-plug-v2: !include ./packages/smart-plug-v2.yaml 11 | -------------------------------------------------------------------------------- /metal/roles/prepare/templates/hosts.j2: -------------------------------------------------------------------------------- 1 | 127.0.0.1 localhost localhost.localdomain 2 | 127.0.1.1 {{ inventory_hostname }}.{{ prepare_domain }} {{ inventory_hostname }} 3 | ::1 {{ inventory_hostname }}.{{ prepare_domain }} {{ inventory_hostname }} ip6-localhost ip6-loopback localhost6 localhost6.localdomain 4 | fe00::0 ip6-localnet 5 | ff00::0 ip6-mcastprefix 6 | ff02::1 ip6-allnodes 7 | ff02::2 ip6-allrouters 8 | -------------------------------------------------------------------------------- /metal/roles/prepare/templates/netplan.j2: -------------------------------------------------------------------------------- 1 | network: 2 | version: 2 3 | renderer: networkd 4 | ethernets: 5 | {{ ansible_default_ipv4.alias }}: 6 | dhcp4: true 7 | dhcp4-overrides: 8 | use-mtu: false 9 | dhcp6-overrides: 10 | use-mtu: false 11 | dhcp6: true 12 | match: 13 | macaddress: {{ ansible_default_ipv4.macaddress }} 14 | mtu: {{ prepare_mtu }} 15 | -------------------------------------------------------------------------------- /system/cert-manager/templates/external-secret-vault-ca.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: vault-ca 5 | spec: 6 | secretStoreRef: 7 | kind: ClusterSecretStore 8 | name: vault 9 | target: 10 | name: vault-ca 11 | data: 12 | - secretKey: api-token 13 | remoteRef: 14 | key: /cert-manager/cloudflare 15 | property: token 16 | -------------------------------------------------------------------------------- /apps/cross-backups/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: cross-backup 6 | app.kubernetes.io/name: cross-backup 7 | annotations: 8 | argocd.argoproj.io/sync-options: Prune=false 9 | name: cross-backups 10 | spec: 11 | accessModes: 12 | - ReadWriteOnce 13 | resources: 14 | requests: 15 | storage: 10Ti 16 | -------------------------------------------------------------------------------- /metal/roles/setup/files/telegram-notification.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Telegram notification server service 3 | 4 | [Service] 5 | Type=oneshot 6 | ExecStart=/usr/local/bin/telegram-notification start 7 | ExecStop=/usr/local/bin/telegram-notification stop 8 | RemainAfterExit=yes 9 | 10 | [Install] 11 | WantedBy=multi-user.target 12 | 13 | [Unit] 14 | Wants=network-online.target 15 | After=network-online.target 16 | -------------------------------------------------------------------------------- /system/monitoring/resources/kanidm-groups-grafana.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kaniop.rs/v1beta1 3 | kind: KanidmGroup 4 | metadata: 5 | name: grafana-users 6 | spec: 7 | kanidmRef: 8 | name: kanidm 9 | namespace: kanidm 10 | 11 | --- 12 | apiVersion: kaniop.rs/v1beta1 13 | kind: KanidmGroup 14 | metadata: 15 | name: grafana-admins 16 | spec: 17 | kanidmRef: 18 | name: kanidm 19 | namespace: kanidm 20 | -------------------------------------------------------------------------------- /apps/esphome/config/irrigation-controller-corner.yaml: -------------------------------------------------------------------------------- 1 | substitutions: 2 | name: "irrigation-controller-corner" 3 | friendly_name: "Irrigation Controller Corner" 4 | id_prefix: "ic_corner_" 5 | project_name: "pando85.irrigation-controller" 6 | project_version: "0.0.0" 7 | 8 | packages: 9 | connection: !include ./packages/connection.yaml 10 | irrigation-controller: !include ./packages/irrigation-controller.yaml 11 | -------------------------------------------------------------------------------- /metal/roles/pikvm/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: restart kvmd-oled 2 | systemd: 3 | name: kvmd-oled 4 | state: restarted 5 | enabled: yes 6 | daemon_reload: yes 7 | 8 | - name: restart kvmd-fan 9 | systemd: 10 | name: kvmd-fan 11 | state: restarted 12 | enabled: yes 13 | daemon_reload: yes 14 | 15 | - name: make edid configuration permanent 16 | shell: kvmd-edidconf --import=/root/edid.hex 17 | -------------------------------------------------------------------------------- /apps/dawarich/templates/external-secrets.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: external-secrets.io/v1 3 | kind: ExternalSecret 4 | metadata: 5 | name: dawarich-secrets 6 | spec: 7 | secretStoreRef: 8 | kind: ClusterSecretStore 9 | name: vault 10 | target: 11 | name: dawarich-secrets 12 | data: 13 | - secretKey: geoapify_api_key 14 | remoteRef: 15 | key: /dawarich/geoapify 16 | property: api-key 17 | -------------------------------------------------------------------------------- /apps/github-exporter/templates/external-secret.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: external-secrets.io/v1 3 | kind: ExternalSecret 4 | metadata: 5 | name: github-exporter 6 | spec: 7 | secretStoreRef: 8 | kind: ClusterSecretStore 9 | name: vault 10 | target: 11 | name: github-exporter 12 | data: 13 | - secretKey: GITHUB_TOKEN 14 | remoteRef: 15 | key: /github-exporter/github 16 | property: token 17 | -------------------------------------------------------------------------------- /system/external-dns/templates/external-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: external-dns-cloudflare 5 | spec: 6 | secretStoreRef: 7 | kind: ClusterSecretStore 8 | name: vault 9 | target: 10 | name: external-dns-cloudflare 11 | data: 12 | - secretKey: token 13 | remoteRef: 14 | key: /external-dns/cloudflare 15 | property: token 16 | -------------------------------------------------------------------------------- /system/monitoring/smartctl-exporter-values.yaml: -------------------------------------------------------------------------------- 1 | serviceMonitor: 2 | enabled: true 3 | extraLabels: 4 | release: monitoring 5 | relabelings: 6 | - action: replace 7 | sourceLabels: 8 | - __meta_kubernetes_pod_node_name 9 | targetLabel: instance 10 | 11 | prometheusRules: 12 | enabled: true 13 | extraLabels: 14 | release: monitoring 15 | 16 | nodeSelector: 17 | kubernetes.io/arch: amd64 18 | -------------------------------------------------------------------------------- /bootstrap/Makefile: -------------------------------------------------------------------------------- 1 | .POSIX: 2 | 3 | default: namespace argocd root 4 | 5 | argocd/charts: argocd/Chart.yaml 6 | cd argocd \ 7 | && helm dependency update 8 | 9 | namespace: 10 | kubectl create namespace argocd --dry-run=client --output=yaml \ 11 | | kubectl apply -f - 12 | 13 | .PHONY: argocd 14 | argocd: argocd/charts 15 | cd argocd && \ 16 | ./apply.sh 17 | 18 | .PHONY: root 19 | root: 20 | cd root && \ 21 | ./apply.sh 22 | -------------------------------------------------------------------------------- /platform/velero/tests/pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: write 5 | spec: 6 | containers: 7 | - name: write 8 | image: debian 9 | command: 10 | - sleep 11 | - "3600" 12 | volumeMounts: 13 | - mountPath: /mnt/pv 14 | name: test 15 | volumes: 16 | - name: test 17 | persistentVolumeClaim: 18 | claimName: test 19 | restartPolicy: Never 20 | -------------------------------------------------------------------------------- /system/cert-manager/templates/external-secret-external.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: cloudflare-api-token 5 | spec: 6 | secretStoreRef: 7 | kind: ClusterSecretStore 8 | name: vault 9 | target: 10 | name: cloudflare-api-token 11 | data: 12 | - secretKey: api-token 13 | remoteRef: 14 | key: /cert-manager/cloudflare 15 | property: token 16 | -------------------------------------------------------------------------------- /system/kube-system/test/cilium/pod-test-limit-egress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: test-limit-egress 5 | annotations: 6 | kubernetes.io/egress-bandwidth: "25M" 7 | # this annotation is ignored by cilium, it will be supported in 1.18.0 8 | kubernetes.io/ingress-bandwidth: "100M" 9 | spec: 10 | restartPolicy: Never 11 | containers: 12 | - name: speedtest 13 | image: gists/speedtest-cli 14 | -------------------------------------------------------------------------------- /docs/user-guide/expand-longhorn-volume.md: -------------------------------------------------------------------------------- 1 | # Expand Longhorn volume 2 | 3 | Longhorn requires volumes to be detached in order to expand them. 4 | 5 | An example of volume expansion: 6 | 7 | ```bash 8 | kubectl -n gitea delete --cascade=orphan sts gitea-postgres 9 | kubectl -n gitea delete pod gitea-postgres-0 10 | kubectl -n gitea patch pvc pgdata-gitea-postgres-0 -p '{ "spec": { "resources": { "requests": { "storage": "1.5Gi" }}}}' 11 | ``` 12 | -------------------------------------------------------------------------------- /metal/roles/k3s/templates/config.yaml.j2: -------------------------------------------------------------------------------- 1 | {% if inventory_hostname == groups['kube_control_plane'][0] %} 2 | cluster-init: true 3 | {% else %} 4 | server: https://{{ hostvars[groups['kube_control_plane'][0]].ansible_hostname }}:6443 5 | {% endif %} 6 | token-file: {{ k3s_token_file }} 7 | {% if 'kube_control_plane' in group_names %} 8 | {{ k3s_server_config | to_nice_yaml }} 9 | {% endif %} 10 | kubelet-arg: 11 | - "cluster-dns=169.254.25.10" 12 | -------------------------------------------------------------------------------- /system/kube-system/resources/nodelocaldns/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: node-local-dns 5 | namespace: kube-system 6 | labels: 7 | k8s-app: node-local-dns 8 | spec: 9 | ports: 10 | - name: dns 11 | port: 53 12 | protocol: UDP 13 | targetPort: 53 14 | - name: dns-tcp 15 | port: 53 16 | protocol: TCP 17 | targetPort: 53 18 | selector: 19 | k8s-app: node-local-dns 20 | -------------------------------------------------------------------------------- /docs/troubleshooting/mqtt.md: -------------------------------------------------------------------------------- 1 | # MQTT 2 | 3 | Connect to server mosquitto with web client: 4 | 5 | ```bash 6 | docker run --rm --name mqttx-web -p 80:80 emqx/mqttx-web 7 | ``` 8 | 9 | ```yaml 10 | name: random 11 | client_id: random 12 | host: mosquitto.internal.grigri.cloud 13 | port: 8083 14 | path: /mqtt 15 | username: vault:mosquitto/user#username 16 | password: vault:mosquitto/user#password 17 | ``` 18 | 19 | Subscribe to all topics with `#`. 20 | -------------------------------------------------------------------------------- /apps/mintpsicologia/templates/external-secret.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: external-secrets.io/v1 3 | kind: ExternalSecret 4 | metadata: 5 | name: wordpress-secret 6 | spec: 7 | secretStoreRef: 8 | kind: ClusterSecretStore 9 | name: vault 10 | target: 11 | name: wordpress-secret 12 | data: 13 | - secretKey: wordpress-password 14 | remoteRef: 15 | key: /mintpsicologia/wordpress 16 | property: wordpress-password 17 | -------------------------------------------------------------------------------- /apps/bazarr/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: bazarr 6 | app.kubernetes.io/name: bazarr 7 | backup: bazarr-zfs 8 | backup/retain: weekly 9 | annotations: 10 | argocd.argoproj.io/sync-options: Prune=false 11 | name: config-bazarr 12 | spec: 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 1Gi 18 | -------------------------------------------------------------------------------- /apps/immich/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: immich 6 | app.kubernetes.io/name: immich 7 | backup: immich-zfs 8 | backup/retain: quaterly 9 | annotations: 10 | argocd.argoproj.io/sync-options: Prune=false 11 | name: immich-data 12 | spec: 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 300Gi 18 | -------------------------------------------------------------------------------- /apps/lidarr/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: lidarr 6 | app.kubernetes.io/name: lidarr 7 | backup: lidarr-zfs 8 | backup/retain: quaterly 9 | annotations: 10 | argocd.argoproj.io/sync-options: Prune=false 11 | name: config-lidarr 12 | spec: 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 10Gi 18 | -------------------------------------------------------------------------------- /apps/radarr/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: radarr 6 | app.kubernetes.io/name: radarr 7 | backup: radarr-zfs 8 | backup/retain: quaterly 9 | annotations: 10 | argocd.argoproj.io/sync-options: Prune=false 11 | name: config-radarr 12 | spec: 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 10Gi 18 | -------------------------------------------------------------------------------- /apps/sonarr/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: sonarr 6 | app.kubernetes.io/name: sonarr 7 | backup: sonarr-zfs 8 | backup/retain: quaterly 9 | annotations: 10 | argocd.argoproj.io/sync-options: Prune=false 11 | name: config-sonarr 12 | spec: 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 5Gi 18 | -------------------------------------------------------------------------------- /apps/stump/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: stump 6 | app.kubernetes.io/name: stump 7 | backup: stump-zfs 8 | backup/retain: quaterly 9 | annotations: 10 | argocd.argoproj.io/sync-options: Prune=false 11 | name: config-stump 12 | spec: 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 500Mi 18 | -------------------------------------------------------------------------------- /apps/unifi/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: unifi 6 | app.kubernetes.io/name: unifi 7 | backup: unifi-zfs 8 | backup/retain: quaterly 9 | annotations: 10 | argocd.argoproj.io/sync-options: Prune=false 11 | name: config-unifi 12 | spec: 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 5.5Gi 18 | -------------------------------------------------------------------------------- /system/kube-system/test/nvidia-device-plugin/pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: gpu-nvidia-class 5 | spec: 6 | restartPolicy: Never 7 | runtimeClassName: nvidia 8 | containers: 9 | - name: gpu 10 | image: "nvidia/cuda:11.5.2-base-ubuntu20.04" 11 | command: ["/bin/bash", "-c", "--"] 12 | args: ["while true; do sleep 30; done;"] 13 | resources: 14 | limits: 15 | nvidia.com/gpu: 1 16 | -------------------------------------------------------------------------------- /platform/git/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: gitea 6 | app.kubernetes.io/name: gitea 7 | backup: gitea-zfs 8 | backup/retain: quaterly 9 | annotations: 10 | argocd.argoproj.io/sync-options: Prune=false 11 | name: data-gitea-zfs-0 12 | spec: 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 10Gi 18 | -------------------------------------------------------------------------------- /apps/antdroid/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: antdroid 6 | app.kubernetes.io/name: antdroid 7 | backup: antdroid-zfs 8 | backup/retain: quaterly 9 | annotations: 10 | argocd.argoproj.io/sync-options: Prune=false 11 | name: antdroid-data 12 | spec: 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 0.1Gi 18 | -------------------------------------------------------------------------------- /apps/dawarich/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: dawarich 6 | app.kubernetes.io/name: dawarich 7 | backup: dawarich-zfs 8 | backup/retain: weekly 9 | annotations: 10 | argocd.argoproj.io/sync-options: Prune=false 11 | name: dawarich-imports 12 | spec: 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 1Gi 18 | -------------------------------------------------------------------------------- /apps/freshrss/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: freshrss 6 | app.kubernetes.io/name: freshrss 7 | backup: freshrss-zfs 8 | backup/retain: weekly 9 | annotations: 10 | argocd.argoproj.io/sync-options: Prune=false 11 | name: freshrss-config 12 | spec: 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 2Gi 18 | -------------------------------------------------------------------------------- /apps/mosquitto-tls/templates/certificate.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: mosquitto-tls-crt 5 | spec: 6 | secretName: mosquitto-tls-crt-secret 7 | commonName: mosquitto-tls.internal.grigri.cloud 8 | dnsNames: 9 | - mosquitto-tls.internal.grigri.cloud 10 | privateKey: 11 | algorithm: RSA 12 | size: 4096 13 | issuerRef: 14 | name: iot 15 | kind: ClusterIssuer 16 | group: cert-manager.io 17 | -------------------------------------------------------------------------------- /apps/mosquitto/rules/custom.yaml: -------------------------------------------------------------------------------- 1 | - alert: MQTTMissingClient 2 | annotations: 3 | description: | 4 | There are only {{ $value }} clients connected to the MQTT server, but there should be at least 5 clients connected: 5 | Missing clients: HASS, tanque, valetudopng, goHeishamon, and exporter itself. 6 | This condition has persisted for longer than 1 hour. 7 | expr: broker_clients_connected < 5 8 | for: 1h 9 | labels: 10 | severity: warning 11 | -------------------------------------------------------------------------------- /apps/mosquitto/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: mosquitto 6 | app.kubernetes.io/name: mosquitto 7 | backup: mosquitto-zfs 8 | backup/retain: weekly 9 | annotations: 10 | argocd.argoproj.io/sync-options: Prune=false 11 | name: mosquitto-data 12 | spec: 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 2Gi 18 | -------------------------------------------------------------------------------- /apps/prowlarr/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: prowlarr 6 | app.kubernetes.io/name: prowlarr 7 | backup: prowlarr-zfs 8 | backup/retain: weekly 9 | annotations: 10 | argocd.argoproj.io/sync-options: Prune=false 11 | name: prowlarr-config 12 | spec: 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 2Gi 18 | -------------------------------------------------------------------------------- /apps/wallabag/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: wallabag 6 | app.kubernetes.io/name: wallabag 7 | backup: wallabag-zfs 8 | backup/retain: quaterly 9 | annotations: 10 | argocd.argoproj.io/sync-options: Prune=false 11 | name: wallabag-images 12 | spec: 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 4Gi 18 | -------------------------------------------------------------------------------- /platform/external-secrets/values.yaml: -------------------------------------------------------------------------------- 1 | resources: 2 | requests: 3 | cpu: 10m 4 | memory: 38Mi 5 | limits: 6 | memory: 64Mi 7 | 8 | extraArgs: 9 | loglevel: debug 10 | 11 | webhook: 12 | resources: 13 | requests: 14 | cpu: 10m 15 | memory: 21Mi 16 | limits: 17 | memory: 64Mi 18 | 19 | certController: 20 | resources: 21 | requests: 22 | cpu: 10m 23 | memory: 64Mi 24 | limits: 25 | memory: 128Mi 26 | -------------------------------------------------------------------------------- /platform/git/templates/oidc-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: oidc-client 5 | spec: 6 | secretStoreRef: 7 | kind: SecretStore 8 | name: k8s-store 9 | target: 10 | name: oidc-client 11 | template: 12 | data: 13 | key: {{`"{{ .CLIENT_ID }}"`}} 14 | secret: {{`"{{ .CLIENT_SECRET }}"`}} 15 | dataFrom: 16 | - extract: 17 | key: git-kanidm-oauth2-credentials 18 | -------------------------------------------------------------------------------- /apps/jellyfin/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: jellyfin 6 | app.kubernetes.io/name: jellyfin 7 | backup: jellyfin-zfs 8 | backup/retain: quaterly 9 | annotations: 10 | argocd.argoproj.io/sync-options: Prune=false 11 | name: config-jellyfin 12 | spec: 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 100Gi 18 | -------------------------------------------------------------------------------- /apps/navidrome/templates/pvc-data.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: navidrome 6 | app.kubernetes.io/name: navidrome 7 | backup: navidrome-zfs 8 | backup/retain: quaterly 9 | annotations: 10 | argocd.argoproj.io/sync-options: Prune=false 11 | name: data-navidrome 12 | spec: 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 5Gi 18 | -------------------------------------------------------------------------------- /docs/user-guide/upgrades.md: -------------------------------------------------------------------------------- 1 | # Upgrades 2 | 3 | ## OS upgrades 4 | 5 | Managed by `unattended-upgrade` in Debian based distributions and rebooted by `kured` when needed. 6 | 7 | Review the update history in `/var/log/unattended-upgrades/unattended-upgrades-dpkg.log` 8 | 9 | ## k3s upgrades 10 | 11 | Managed by [system-upgrade-controller](https://github.com/rancher/system-upgrade-controller). 12 | Increase K3s version in `system/system-upgrade/k3s/kustomization.yaml` file. 13 | -------------------------------------------------------------------------------- /metal/inventory/hosts.ini: -------------------------------------------------------------------------------- 1 | [kube_control_plane] 2 | prusik 3 | 4 | [kube_node:children] 5 | amd64_node 6 | odroid_hc4 7 | # odroid_c4 8 | # rock64 9 | 10 | [arm:children] 11 | odroid_hc4 12 | # odroid_c4 13 | # rock64 14 | 15 | [amd64_node] 16 | grigri 17 | 18 | [amd64] 19 | prusik 20 | grigri 21 | 22 | [ipmi] 23 | prusik-ipmi 24 | 25 | [odroid_hc4] 26 | k8s-odroid-hc4-3 27 | 28 | [nvidia] 29 | prusik 30 | 31 | [k3s_cluster:children] 32 | kube_control_plane 33 | kube_node 34 | -------------------------------------------------------------------------------- /system/cert-manager/templates/clusterissuer-external.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: letsencrypt-prod-dns 5 | spec: 6 | acme: 7 | server: https://acme-v02.api.letsencrypt.org/directory 8 | privateKeySecretRef: 9 | name: letsencrypt-prod-dns 10 | solvers: 11 | - dns01: 12 | cloudflare: 13 | apiTokenSecretRef: 14 | name: cloudflare-api-token 15 | key: api-token 16 | -------------------------------------------------------------------------------- /apps/jellyseerr/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: jellyseerr 6 | app.kubernetes.io/name: jellyseerr 7 | snapshots: jellyseerr-zfs 8 | backup/retain: quaterly 9 | annotations: 10 | argocd.argoproj.io/sync-options: Prune=false 11 | name: jellyseerr-config 12 | spec: 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 1Gi 18 | -------------------------------------------------------------------------------- /apps/m-rajoy-api/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: m-rajoy-api 6 | app.kubernetes.io/name: m-rajoy-api 7 | backup: m-rajoy-api-zfs 8 | backup/retain: quaterly 9 | annotations: 10 | argocd.argoproj.io/sync-options: Prune=false 11 | name: m-rajoy-api-data 12 | spec: 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 0.1Gi 18 | -------------------------------------------------------------------------------- /apps/mosquitto-tls/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: mosquitto-tls 6 | app.kubernetes.io/name: mosquitto-tls 7 | backup: mosquitto-tls-zfs 8 | backup/retain: weekly 9 | annotations: 10 | argocd.argoproj.io/sync-options: Prune=false 11 | name: mosquitto-tls 12 | spec: 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 2Gi 18 | -------------------------------------------------------------------------------- /apps/navidrome/templates/pvc-music.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: navidrome 6 | app.kubernetes.io/name: navidrome 7 | backup: navidrome-zfs 8 | backup/retain: quaterly 9 | annotations: 10 | argocd.argoproj.io/sync-options: Prune=false 11 | name: music-navidrome 12 | spec: 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 200Gi 18 | -------------------------------------------------------------------------------- /apps/special-web/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: special-web 6 | app.kubernetes.io/name: special-web 7 | backup: special-web-zfs 8 | backup/retain: quaterly 9 | annotations: 10 | argocd.argoproj.io/sync-options: Prune=false 11 | name: special-web-data 12 | spec: 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 0.1Gi 18 | -------------------------------------------------------------------------------- /apps/home-assistant/appdaemon/appdaemon.yaml: -------------------------------------------------------------------------------- 1 | appdaemon: 2 | latitude: 3 | longitude: 4 | elevation: 5 | time_zone: 6 | plugins: 7 | HASS: 8 | type: hass 9 | ha_url: 10 | token: 11 | cert_verify: True 12 | http: 13 | url: 14 | admin: 15 | api: 16 | hadashboard: 17 | 18 | total_threads: 0 19 | -------------------------------------------------------------------------------- /apps/home-assistant/config/integrations/appdaemon_climate.yaml: -------------------------------------------------------------------------------- 1 | input_boolean: 2 | appdaemon_climate_enable: 3 | name: AppDaemon Climate Enable 4 | icon: mdi:thermometer-auto 5 | 6 | input_number: 7 | appdaemon_climate_min_hours_per_day: 8 | name: AppDaemon Climate Min Hours Per Day 9 | min: 0 10 | max: 24 11 | step: 1 12 | 13 | input_select: 14 | appdaemon_climate_heat_mode: 15 | name: Set Heat Mode 16 | options: 17 | - Heat+DHW 18 | - Cool+DHW 19 | -------------------------------------------------------------------------------- /apps/home-assistant/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: home-assistant 6 | app.kubernetes.io/name: home-assistant 7 | backup: home-assistant-zfs 8 | backup/retain: quaterly 9 | annotations: 10 | argocd.argoproj.io/sync-options: Prune=false 11 | name: home-assistant-config 12 | spec: 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 4Gi 18 | -------------------------------------------------------------------------------- /apps/mosquitto-tls/config/mosquitto.conf: -------------------------------------------------------------------------------- 1 | per_listener_settings false 2 | 3 | listener 8883 4 | protocol mqtt 5 | certfile /mosquitto/tls/tls.crt 6 | keyfile /mosquitto/tls/tls.key 7 | 8 | listener 8884 9 | protocol websockets 10 | certfile /mosquitto/tls/tls.crt 11 | keyfile /mosquitto/tls/tls.key 12 | 13 | allow_anonymous false 14 | persistence true 15 | persistence_location /data 16 | autosave_interval 1800 17 | connection_messages false 18 | password_file /mosquitto/external_config/mosquitto_pwd 19 | -------------------------------------------------------------------------------- /apps/transcoder/templates/rabbit-external-secret.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: external-secrets.io/v1 3 | kind: ExternalSecret 4 | metadata: 5 | name: rabbit-credentials 6 | spec: 7 | secretStoreRef: 8 | kind: ClusterSecretStore 9 | name: vault 10 | data: 11 | - secretKey: username 12 | remoteRef: 13 | key: /transcoder/rabbit 14 | property: username 15 | - secretKey: password 16 | remoteRef: 17 | key: /transcoder/rabbit 18 | property: password 19 | -------------------------------------------------------------------------------- /platform/git/templates/admin-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: gitea-admin 5 | spec: 6 | secretStoreRef: 7 | kind: ClusterSecretStore 8 | name: vault 9 | target: 10 | name: gitea-admin 11 | data: 12 | - secretKey: username 13 | remoteRef: 14 | key: /git/admin 15 | property: username 16 | - secretKey: password 17 | remoteRef: 18 | key: /git/admin 19 | property: password 20 | -------------------------------------------------------------------------------- /apps/telegram-bot/templates/external-secrets-database.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: telegram-bot-database 5 | spec: 6 | secretStoreRef: 7 | kind: ClusterSecretStore 8 | name: vault 9 | data: 10 | - secretKey: username 11 | remoteRef: 12 | key: /telegram-bot/mongodb 13 | property: username 14 | - secretKey: password 15 | remoteRef: 16 | key: /telegram-bot/mongodb 17 | property: password 18 | -------------------------------------------------------------------------------- /platform/velero/tests/schedule-test.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: velero.io/v1 2 | kind: Schedule 3 | metadata: 4 | name: retain-test 5 | namespace: velero 6 | spec: 7 | schedule: '* * * * *' 8 | template: 9 | includedNamespaces: 10 | - "*" 11 | orLabelSelectors: 12 | - matchLabels: 13 | backup/retain: test 14 | snapshotVolumes: true 15 | storageLocation: default 16 | ttl: 12m30s 17 | volumeSnapshotLocations: 18 | - zfspv-incr 19 | useOwnerReferencesInBackup: true 20 | -------------------------------------------------------------------------------- /apps/atuin/templates/k8s-secret-store/secret-store.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: SecretStore 3 | metadata: 4 | name: k8s-store 5 | spec: 6 | provider: 7 | kubernetes: 8 | remoteNamespace: atuin 9 | server: 10 | url: https://kubernetes.default.svc.cluster.local 11 | caProvider: 12 | type: ConfigMap 13 | name: kube-root-ca.crt 14 | key: ca.crt 15 | auth: 16 | serviceAccount: 17 | name: external-secrets-k8s-store 18 | -------------------------------------------------------------------------------- /apps/atuin/templates/postgres-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: atuin-database 5 | spec: 6 | secretStoreRef: 7 | kind: SecretStore 8 | name: k8s-store 9 | target: 10 | name: atuin-database 11 | template: 12 | data: 13 | ATUIN_DB_URI: postgresql://{{`{{ .username }}`}}:{{`{{ .password }}`}}@atuin-postgres/atuin 14 | dataFrom: 15 | - extract: 16 | key: atuin.atuin-postgres.credentials.postgresql.acid.zalan.do 17 | -------------------------------------------------------------------------------- /apps/unpackerr/templates/external-secret.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: external-secrets.io/v1 3 | kind: ExternalSecret 4 | metadata: 5 | name: unpackerr 6 | spec: 7 | secretStoreRef: 8 | kind: ClusterSecretStore 9 | name: vault 10 | target: 11 | name: unpackerr 12 | data: 13 | - secretKey: UN_SONARR_0_API_KEY 14 | remoteRef: 15 | key: /sonarr/api 16 | property: key 17 | - secretKey: UN_RADARR_0_API_KEY 18 | remoteRef: 19 | key: /radarr/api 20 | property: key 21 | -------------------------------------------------------------------------------- /system/kube-system/nvidia-device-plugin-values.yaml: -------------------------------------------------------------------------------- 1 | config: 2 | map: 3 | default: |- 4 | version: v1 5 | flags: 6 | migStrategy: none 7 | sharing: 8 | timeSlicing: 9 | renameByDefault: false 10 | failRequestsGreaterThanOne: false 11 | resources: 12 | - name: nvidia.com/gpu 13 | replicas: 8 14 | runtimeClassName: nvidia 15 | nodeSelector: 16 | feature.node.kubernetes.io/pci-10de.present: "true" 17 | 18 | gfd: 19 | enabled: true 20 | -------------------------------------------------------------------------------- /apps/home-assistant/config/integrations/tuya.yaml: -------------------------------------------------------------------------------- 1 | automation: 2 | alias: Reboot Tuya when disconnected 3 | trigger: 4 | - platform: event 5 | event_type: system_log_event 6 | event_data: 7 | name: tuya_iot 8 | level: ERROR 9 | condition: 10 | condition: template 11 | value_template: "{{ 'error while get mqtt config' in trigger.event.data.message[0] }}" 12 | action: 13 | - service: homeassistant.reload_config_entry 14 | data: 15 | entity_id: climate.thermostat_bedroom 16 | -------------------------------------------------------------------------------- /platform/git/templates/k8s-secret-store/secret-store.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: SecretStore 3 | metadata: 4 | name: k8s-store 5 | spec: 6 | provider: 7 | kubernetes: 8 | remoteNamespace: {{ .Release.Namespace }} 9 | server: 10 | url: https://kubernetes.default.svc.cluster.local 11 | caProvider: 12 | type: ConfigMap 13 | name: kube-root-ca.crt 14 | key: ca.crt 15 | auth: 16 | serviceAccount: 17 | name: external-secrets-k8s-store 18 | -------------------------------------------------------------------------------- /platform/velero/templates/schedule-retain-weekly.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: velero.io/v1 2 | kind: Schedule 3 | metadata: 4 | name: retain-weekly 5 | namespace: velero 6 | spec: 7 | schedule: 30 2 * * 2 8 | template: 9 | includedNamespaces: 10 | - "*" 11 | orLabelSelectors: 12 | - matchLabels: 13 | backup/retain: weekly 14 | snapshotVolumes: true 15 | storageLocation: default 16 | ttl: 240h0m0s # 10d 17 | volumeSnapshotLocations: 18 | - zfspv-full 19 | useOwnerReferencesInBackup: true 20 | -------------------------------------------------------------------------------- /apps/freshrss/templates/k8s-secret-store/secret-store.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: SecretStore 3 | metadata: 4 | name: k8s-store 5 | spec: 6 | provider: 7 | kubernetes: 8 | remoteNamespace: {{ .Release.Namespace }} 9 | server: 10 | url: https://kubernetes.default.svc.cluster.local 11 | caProvider: 12 | type: ConfigMap 13 | name: kube-root-ca.crt 14 | key: ca.crt 15 | auth: 16 | serviceAccount: 17 | name: external-secrets-k8s-store 18 | -------------------------------------------------------------------------------- /bootstrap/argocd/templates/k8s-secret-store/secret-store.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: SecretStore 3 | metadata: 4 | name: k8s-store 5 | spec: 6 | provider: 7 | kubernetes: 8 | remoteNamespace: {{ .Release.Namespace }} 9 | server: 10 | url: https://kubernetes.default.svc.cluster.local 11 | caProvider: 12 | type: ConfigMap 13 | name: kube-root-ca.crt 14 | key: ca.crt 15 | auth: 16 | serviceAccount: 17 | name: external-secrets-k8s-store 18 | -------------------------------------------------------------------------------- /platform/velero/templates/schedule-retain-quaterly.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: velero.io/v1 2 | kind: Schedule 3 | metadata: 4 | name: retain-quaterly 5 | namespace: velero 6 | spec: 7 | schedule: 30 2 * * 2 8 | template: 9 | includedNamespaces: 10 | - "*" 11 | orLabelSelectors: 12 | - matchLabels: 13 | backup/retain: quaterly 14 | snapshotVolumes: true 15 | storageLocation: default 16 | ttl: 2160h0m0s # 90d 17 | volumeSnapshotLocations: 18 | - zfspv-incr 19 | useOwnerReferencesInBackup: true 20 | -------------------------------------------------------------------------------- /apps/open-webui/templates/external-secrets.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: external-secrets.io/v1 3 | kind: ExternalSecret 4 | metadata: 5 | name: open-webui 6 | spec: 7 | secretStoreRef: 8 | kind: ClusterSecretStore 9 | name: vault 10 | target: 11 | name: open-webui 12 | data: 13 | - secretKey: OPENAI_API_KEY 14 | remoteRef: 15 | key: /open-webui/pipelines 16 | property: api-key 17 | - secretKey: WEBUI_SECRET_KEY 18 | remoteRef: 19 | key: /open-webui/webui 20 | property: secret-key 21 | -------------------------------------------------------------------------------- /platform/git/templates/postgres-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: gitea-app-ini-database 5 | spec: 6 | secretStoreRef: 7 | kind: SecretStore 8 | name: k8s-store 9 | target: 10 | name: gitea-app-ini-database 11 | template: 12 | data: 13 | database: | 14 | USER={{`"{{ .username }}"`}} 15 | PASSWD={{`"{{ .password }}"`}} 16 | dataFrom: 17 | - extract: 18 | key: gitea.gitea-postgres.credentials.postgresql.acid.zalan.do 19 | -------------------------------------------------------------------------------- /system/oauth2-proxy/templates/k8s-secret-store/secret-store.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: SecretStore 3 | metadata: 4 | name: k8s-store 5 | spec: 6 | provider: 7 | kubernetes: 8 | remoteNamespace: {{ .Release.Namespace }} 9 | server: 10 | url: https://kubernetes.default.svc.cluster.local 11 | caProvider: 12 | type: ConfigMap 13 | name: kube-root-ca.crt 14 | key: ca.crt 15 | auth: 16 | serviceAccount: 17 | name: external-secrets-k8s-store 18 | -------------------------------------------------------------------------------- /metal/roles/zfs_exporter/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # apply with `ANSIBLE_EXTRA_ARGS='-t zfs-exporter' make cluster` in metal dir 3 | # renovate: datasource=github-releases depName=pdf/zfs_exporter 4 | zfs_exporter_version: 2.3.11 5 | # deadline: Maximum duration that a collection should run before returning cached data. 6 | # Should be set to a value shorter than your scrape timeout duration. The current collection 7 | # run will continue and update the cache when complete (default: 8s) 8 | zfs_exporter_arguments: --collector.dataset-snapshot --deadline=8s 9 | -------------------------------------------------------------------------------- /metal/roles/k3s/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # renovate: datasource=github-releases depName=k3s-io/k3s versioning=regex:^v(?\d+)(\.(?\d+))?(\.(?\d+))\+k3s?((?\d+))$ 2 | k3s_version: v1.33.6+k3s1 3 | k3s_config_file: /etc/rancher/k3s/config.yaml 4 | k3s_token_file: /etc/rancher/node/password 5 | k3s_service_file: /etc/systemd/system/k3s.service 6 | k3s_kubelet_extra_args: [] 7 | k3s_server_config: 8 | disable: 9 | - local-storage 10 | - servicelb 11 | - traefik 12 | disable-cloud-controller: true 13 | secrets-encryption: true 14 | -------------------------------------------------------------------------------- /apps/home-assistant/appdaemon/apps/last_connection_sensor.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | 3 | import appdaemon.plugins.hass.hassapi as hass 4 | 5 | 6 | class LastConnectionSensor(hass.Hass): 7 | async def initialize(self): 8 | time = datetime.now(self.get_timezone()) 9 | self.run_minutely(self.update_last_connection, time) 10 | 11 | async def update_last_connection(self, cb_args): 12 | now = datetime.now(self.get_timezone()) 13 | self.set_state("sensor.last_appdaemon_connection", state=now.strftime("%Y-%m-%d %H:%M:%S")) 14 | -------------------------------------------------------------------------------- /metal/inventory/host_vars/prusik.yml: -------------------------------------------------------------------------------- 1 | # ntp is changed by ntpsec in Ubuntu 24.04 and Debian 12 2 | ntp_config_file: /etc/ntpsec/ntp.conf 3 | ntp_package: ntpsec 4 | ntp_driftfile: /var/lib/ntpsec/ntp.drift 5 | ntp_daemon: ntpsec 6 | 7 | k3s_kubelet_extra_args: 8 | - kube-reserved=cpu=0.5,memory=1Gi,ephemeral-storage=1Gi 9 | - system-reserved=cpu=2,memory=10Gi,ephemeral-storage=1Gi 10 | - eviction-hard=memory.available<300Mi,nodefs.available<10% 11 | 12 | zfs_arc_min_gb: 4 13 | zfs_arc_max_gb: 24 14 | 15 | l2arc_write_max_mb: 300 16 | l2arc_write_boost_mb: 500 17 | -------------------------------------------------------------------------------- /system/monitoring/resources/grafana-admin-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: grafana-admin-secret 5 | namespace: monitoring 6 | spec: 7 | secretStoreRef: 8 | kind: ClusterSecretStore 9 | name: vault 10 | target: 11 | name: grafana-admin-secret 12 | data: 13 | - secretKey: username 14 | remoteRef: 15 | key: /grafana/admin 16 | property: username 17 | - secretKey: password 18 | remoteRef: 19 | key: /grafana/admin 20 | property: password 21 | -------------------------------------------------------------------------------- /apps/flaresolverr/values.yaml: -------------------------------------------------------------------------------- 1 | controllers: 2 | flaresolverr: 3 | replicas: 1 4 | containers: 5 | flaresolverr: 6 | image: 7 | repository: ghcr.io/flaresolverr/flaresolverr 8 | tag: v3.4.6 9 | env: 10 | TZ: Europe/Madrid 11 | resources: 12 | requests: 13 | cpu: 10m 14 | memory: 128Mi 15 | limits: 16 | memory: 2Gi 17 | 18 | service: 19 | main: 20 | controller: flaresolverr 21 | type: ClusterIP 22 | ports: 23 | http: 24 | port: 8191 25 | -------------------------------------------------------------------------------- /system/cert-manager/templates/external-secret-internal-ca.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: kubernetes-internal-ca-key-pair 5 | spec: 6 | secretStoreRef: 7 | kind: ClusterSecretStore 8 | name: vault 9 | target: 10 | name: kubernetes-internal-ca-key-pair 11 | data: 12 | - secretKey: tls.crt 13 | remoteRef: 14 | key: /cert-manager/internal-ca 15 | property: crt 16 | - secretKey: tls.key 17 | remoteRef: 18 | key: /cert-manager/internal-ca 19 | property: key 20 | -------------------------------------------------------------------------------- /apps/qbittorrent/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/instance: qbittorrent 6 | app.kubernetes.io/name: qbittorrent 7 | backup: qbittorrent-zfs 8 | backup/retain: quaterly 9 | annotations: 10 | argocd.argoproj.io/sync-options: Prune=false 11 | name: config-qbittorrent 12 | spec: 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 1Gi 18 | storageClassName: openebs-zfspv 19 | volumeMode: Filesystem 20 | volumeName: config-qbittorrent 21 | -------------------------------------------------------------------------------- /system/monitoring/resources/long-term-metrics/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: prometheus-long-term 5 | namespace: monitoring 6 | labels: 7 | app: long-term-prometheus 8 | spec: 9 | ports: 10 | - name: http-web 11 | port: 9090 12 | protocol: TCP 13 | targetPort: 9090 14 | - appProtocol: http 15 | name: reloader-web 16 | port: 8080 17 | protocol: TCP 18 | targetPort: reloader-web 19 | selector: 20 | app.kubernetes.io/name: prometheus 21 | operator.prometheus.io/name: long-term-prometheus 22 | -------------------------------------------------------------------------------- /apps/home-assistant/config/integrations/database_error.yaml: -------------------------------------------------------------------------------- 1 | automation: 2 | alias: Reboot HASS when database disconnected 3 | trigger: 4 | - platform: event 5 | event_type: system_log_event 6 | event_data: 7 | name: homeassistant.components.websocket_api.http.connection 8 | level: ERROR 9 | condition: 10 | condition: template 11 | value_template: "{{ 'The database connection has not been established' in trigger.event.data.exception[0] }}" 12 | action: 13 | - service: homeassistant.restart 14 | data: 15 | entity_id: climate.thermostat_bedroom 16 | -------------------------------------------------------------------------------- /apps/freshrss/templates/kanidm-oauth2-client.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kaniop.rs/v1beta1 3 | kind: KanidmOAuth2Client 4 | metadata: 5 | name: freshrss 6 | spec: 7 | kanidmRef: 8 | name: kanidm 9 | namespace: kanidm 10 | 11 | displayname: freshrss 12 | 13 | origin: https://freshrss.grigri.cloud/i/oidc/ 14 | 15 | redirectUrl: 16 | - https://freshrss.grigri.cloud/i/oidc/ 17 | 18 | scopeMap: 19 | - group: freshrss-users 20 | scopes: 21 | - openid 22 | - profile 23 | - email 24 | 25 | strictRedirectUrl: true 26 | allowInsecureClientDisablePkce: true 27 | -------------------------------------------------------------------------------- /platform/git/templates/kanidm-oauth2-client.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kaniop.rs/v1beta1 3 | kind: KanidmOAuth2Client 4 | metadata: 5 | name: git 6 | spec: 7 | kanidmRef: 8 | name: kanidm 9 | namespace: kanidm 10 | 11 | displayname: git 12 | 13 | origin: https://git.grigri.cloud/user/oauth2/kanidm/callback 14 | 15 | redirectUrl: 16 | - https://git.grigri.cloud/user/oauth2/kanidm/callback 17 | 18 | scopeMap: 19 | - group: git-users 20 | scopes: 21 | - openid 22 | - profile 23 | - email 24 | 25 | preferShortUsername: true 26 | strictRedirectUrl: true 27 | -------------------------------------------------------------------------------- /system/kaniop/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: kaniop 4 | 5 | helmCharts: 6 | - includeCRDs: true 7 | name: kaniop 8 | namespace: kaniop 9 | releaseName: kaniop 10 | repo: oci://ghcr.io/pando85/helm-charts 11 | valuesFile: values.yaml 12 | version: 0.1.9 13 | 14 | patches: 15 | - patch: |- 16 | - op: add 17 | path: "/metadata/annotations/argocd.argoproj.io~1sync-options" 18 | value: "Replace=true" 19 | target: 20 | group: apiextensions.k8s.io 21 | kind: CustomResourceDefinition 22 | -------------------------------------------------------------------------------- /system/cert-manager/templates/vault-rbac.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: Role 4 | metadata: 5 | name: vault-issuer 6 | rules: 7 | - apiGroups: [''] 8 | resources: ['serviceaccounts/token'] 9 | resourceNames: ['cert-manager'] 10 | verbs: ['create'] 11 | --- 12 | apiVersion: rbac.authorization.k8s.io/v1 13 | kind: RoleBinding 14 | metadata: 15 | name: vault-issuer 16 | subjects: 17 | - kind: ServiceAccount 18 | name: cert-manager 19 | namespace: cert-manager 20 | roleRef: 21 | apiGroup: rbac.authorization.k8s.io 22 | kind: Role 23 | name: vault-issuer 24 | -------------------------------------------------------------------------------- /metal/roles/pikvm/files/edid.hex: -------------------------------------------------------------------------------- 1 | 00ffffffffffff005262888800888888 2 | 1c150103800000780aEE91A3544C9926 3 | 0F505400000001010101010101010101 4 | 010101010101011d007251d01e206e28 5 | 5500c48e2100001e8c0ad08a20e02d10 6 | 103e9600138e2100001e000000fc0054 7 | 6f73686962612d4832430a20000000FD 8 | 003b3d0f2e0f1e0a202020202020014f 9 | 020321434e041303021211012021a23c 10 | 3d3e1f2309070766030c00300080E300 11 | 7F8c0ad08a20e02d10103e9600c48e21 12 | 0000188c0ad08a20e02d10103e960013 13 | 8e210000188c0aa01451f01600267c43 14 | 00138e21000098000000000000000000 15 | 00000000000000000000000000000000 16 | 00000000000000000000000000000028 17 | -------------------------------------------------------------------------------- /platform/external-secrets/resources/clustersecretstore.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ClusterSecretStore 3 | metadata: 4 | name: vault 5 | spec: 6 | provider: 7 | vault: 8 | server: https://vault.vault:8200 9 | path: secret 10 | caProvider: 11 | type: Secret 12 | name: vault-tls 13 | namespace: vault 14 | key: ca.crt 15 | auth: 16 | kubernetes: 17 | mountPath: kubernetes 18 | role: allow-secrets 19 | serviceAccountRef: 20 | name: external-secrets 21 | namespace: external-secrets 22 | -------------------------------------------------------------------------------- /system/monitoring/resources/long-term-metrics/datasource-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: long-term-prometheus-grafana-datasource 5 | labels: 6 | grafana_datasource: "1" 7 | namespace: monitoring 8 | data: 9 | datasource.yaml: |- 10 | apiVersion: 1 11 | datasources: 12 | - name: Prometheus long term 13 | type: prometheus 14 | uid: prometheus-long-term 15 | url: http://prometheus-long-term.monitoring:9090/ 16 | access: proxy 17 | isDefault: false 18 | jsonData: 19 | httpMethod: POST 20 | timeInterval: 1m 21 | -------------------------------------------------------------------------------- /commitlint.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | extends: [ 3 | "@commitlint/config-conventional" 4 | ], 5 | parserPreset: { 6 | parserOpts: { headerPattern: /^([^\(\):]*)(?:\((.*)\))?!?: (.*)$/ } 7 | }, 8 | rules: { 9 | "type-enum": [ 10 | 0, 11 | "always" 12 | ], 13 | "subject-case": [ 14 | 2, 15 | "always", 16 | [ 17 | "sentence-case", 18 | "lower-case" 19 | ] 20 | ], 21 | "body-leading-blank": [ 22 | 2, 23 | "always" 24 | ], 25 | "footer-leading-blank": [ 26 | 2, 27 | "always" 28 | ] 29 | } 30 | }; 31 | -------------------------------------------------------------------------------- /metal/roles/prepare/tasks/logs2ram.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: logs2ram | Mount /var/log in RAM for arm64 devices 3 | mount: 4 | path: /var/log 5 | src: tmpfs 6 | fstype: tmpfs 7 | opts: nosuid,noexec,nodev,mode=0755,size=50m 8 | state: present 9 | 10 | - name: logs2ram | Copy truncate-logs script 11 | copy: 12 | src: truncate-logs.sh 13 | dest: /usr/local/bin/truncate-logs 14 | mode: "0755" 15 | 16 | - name: logs2ram | Create cron job for truncating logs 17 | ansible.builtin.cron: 18 | name: "truncate logs" 19 | minute: "0" 20 | hour: "0" 21 | job: "/usr/local/bin/truncate-logs" 22 | -------------------------------------------------------------------------------- /system/kanidm/templates/dbcloud/kanidm-oauth2-client.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kaniop.rs/v1beta1 3 | kind: KanidmOAuth2Client 4 | metadata: 5 | name: dbcloud 6 | spec: 7 | kanidmRef: 8 | name: kanidm 9 | namespace: kanidm 10 | 11 | displayname: dbcloud 12 | 13 | origin: https://keycloak.dbcloud.org/realms/master/broker/grigri/endpoint 14 | 15 | redirectUrl: 16 | - https://keycloak.dbcloud.org/realms/master/broker/grigri/endpoint 17 | 18 | scopeMap: 19 | - group: dbcloud-users 20 | scopes: 21 | - openid 22 | - profile 23 | - email 24 | 25 | strictRedirectUrl: true 26 | -------------------------------------------------------------------------------- /system/zfs-localpv/values.yaml: -------------------------------------------------------------------------------- 1 | zfs-localpv: 2 | zfsNode: 3 | resources: {} 4 | # limits: 5 | # cpu: 10m 6 | # memory: 32Mi 7 | # requests: 8 | # cpu: 10m 9 | # memory: 32Mi 10 | nodeSelector: 11 | kubernetes.io/arch: amd64 12 | zfsController: 13 | resources: {} 14 | # limits: 15 | # cpu: 10m 16 | # memory: 32Mi 17 | # requests: 18 | # cpu: 10m 19 | # memory: 32Mi 20 | snapshotter: 21 | extraArgs: 22 | - -v=1 23 | snapshotController: 24 | extraArgs: 25 | - -v=1 26 | 27 | backupGC: 28 | enabled: true 29 | -------------------------------------------------------------------------------- /apps/nextcloud/templates/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | labels: 5 | app.kubernetes.io/component: app 6 | app.kubernetes.io/instance: nextcloud 7 | app.kubernetes.io/name: nextcloud 8 | backup: nextcloud-zfs 9 | backup/retain: quaterly 10 | annotations: 11 | argocd.argoproj.io/sync-options: Prune=false 12 | name: nextcloud-nextcloud-data 13 | namespace: nextcloud 14 | spec: 15 | accessModes: 16 | - ReadWriteOnce 17 | resources: 18 | requests: 19 | storage: 1000Gi 20 | storageClassName: openebs-zfspv 21 | volumeName: nextcloud-nextcloud-data 22 | -------------------------------------------------------------------------------- /metal/playbooks/install/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # ansible-playbook -i inventory/hosts.ini -K --limit {{ hosts }} -e 'serial=100%' playbooks/install/os.yml 3 | 4 | - name: Prepare 5 | hosts: all 6 | serial: "{{ serial | default('100%') }}" 7 | roles: 8 | - prepare 9 | 10 | - name: Setup amd64 11 | hosts: 12 | - grigri 13 | - prusik 14 | roles: 15 | - setup 16 | 17 | - name: Setup NVIDA drivers 18 | hosts: nvidia 19 | roles: 20 | - name: nvidia-container-runtime 21 | tags: 22 | - nvidia-container-runtime 23 | 24 | - name: Setup pikvm 25 | hosts: prusik-ipmi 26 | roles: 27 | - pikvm 28 | -------------------------------------------------------------------------------- /apps/home-assistant/config/configuration.yaml: -------------------------------------------------------------------------------- 1 | # Loads default set of integrations. Do not remove. 2 | default_config: 3 | 4 | homeassistant: 5 | # Load packages 6 | packages: !include_dir_named integrations 7 | 8 | script: !include_dir_merge_named script 9 | 10 | # Load frontend themes from the themes folder 11 | frontend: 12 | themes: !include_dir_merge_named themes 13 | 14 | http: 15 | use_x_forwarded_for: true 16 | trusted_proxies: 17 | - 0.0.0.0/0 18 | 19 | recorder: 20 | db_url: !env_var ZZ_DB_URL 21 | 22 | prometheus: 23 | namespace: hass 24 | requires_auth: false 25 | 26 | notify: !include secrets/notify.yaml 27 | -------------------------------------------------------------------------------- /metal/roles/prepare/tasks/python.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: python | Check if python is needed 3 | raw: which python 4 | register: need_python 5 | failed_when: false 6 | changed_when: false 7 | check_mode: false 8 | 9 | - name: python | Check if python3 is installed 10 | raw: which python3 11 | register: need_python3 12 | failed_when: false 13 | changed_when: false 14 | check_mode: false 15 | 16 | - name: python | Create a symbolic link 17 | file: 18 | src: "{{ need_python3.stdout_lines | first }}" 19 | dest: /usr/bin/python 20 | state: link 21 | when: 22 | - need_python.rc != 0 23 | - need_python3.rc == 0 24 | -------------------------------------------------------------------------------- /platform/velero/templates/external-secret.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: external-secrets.io/v1 3 | kind: ExternalSecret 4 | metadata: 5 | name: velero-bucket-credentials 6 | spec: 7 | secretStoreRef: 8 | kind: ClusterSecretStore 9 | name: vault 10 | target: 11 | name: velero-bucket-credentials 12 | template: 13 | engineVersion: v2 14 | data: 15 | cloud: |- 16 | [default] 17 | aws_access_key_id=velero 18 | aws_secret_access_key={{`{{ .password }}`}} 19 | data: 20 | - secretKey: password 21 | remoteRef: 22 | key: /minio/users 23 | property: veleroPassword 24 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "files.associations": { 3 | "**/tasks/**/*.yml": "ansible", 4 | "**/handler/*.yml": "ansible", 5 | "**/*_vars/**/*.yml": "ansible", 6 | "**/roles/**/*.yml": "ansible", 7 | "**/playbooks/**/*.yml": "ansible", 8 | "**/*ansible*/**/*.yml": "ansible", 9 | "**/vars/**/*.yml": "ansible", 10 | "**/inventory/*/*": "ansible", 11 | "*.rh": "ansible", 12 | "*.yaml": "yaml", 13 | "**/templates/**/*.yaml": "helm", 14 | "*.yaml.gotmpl": "helm", 15 | "*.tf": "terraform", 16 | "50unattended-upgrades": "cpp" 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /metal/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | callback_whitelist = profile_tasks 3 | deprecation_warnings = False 4 | fact_caching = jsonfile 5 | fact_caching_connection = /tmp 6 | force_color=true 7 | gathering = smart 8 | host_key_checking = False 9 | inventory = inventory/hosts.ini 10 | inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo, .creds 11 | retry_files_enabled = False 12 | roles_path = roles 13 | stdout_callback = skippy 14 | 15 | [ssh_connection] 16 | pipelining = True 17 | ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o ConnectionAttempts=3 -o UserKnownHostsFile=/dev/null 18 | 19 | [privilege_escalation] 20 | become=true 21 | -------------------------------------------------------------------------------- /platform/minio/templates/external-secrets-users.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: external-secrets.io/v1 3 | kind: ExternalSecret 4 | metadata: 5 | name: minio-users 6 | spec: 7 | secretStoreRef: 8 | kind: ClusterSecretStore 9 | name: vault 10 | target: 11 | name: minio-users 12 | data: 13 | - secretKey: rootUser 14 | remoteRef: 15 | key: /minio/users 16 | property: rootUser 17 | - secretKey: rootPassword 18 | remoteRef: 19 | key: /minio/users 20 | property: rootPassword 21 | - secretKey: veleroPassword 22 | remoteRef: 23 | key: /minio/users 24 | property: veleroPassword 25 | -------------------------------------------------------------------------------- /platform/minio/templates/kanidm-oauth2-client.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kaniop.rs/v1beta1 3 | kind: KanidmOAuth2Client 4 | metadata: 5 | name: minio 6 | spec: 7 | kanidmRef: 8 | name: kanidm 9 | namespace: kanidm 10 | 11 | displayname: minio 12 | 13 | origin: https://mc-s3.internal.grigri.cloud/oauth_callback 14 | 15 | redirectUrl: 16 | - https://mc-s3.internal.grigri.cloud/oauth_callback 17 | 18 | scopeMap: 19 | - group: minio-users 20 | scopes: 21 | - openid 22 | - profile 23 | - email 24 | 25 | preferShortUsername: true 26 | strictRedirectUrl: true 27 | allowInsecureClientDisablePkce: true 28 | -------------------------------------------------------------------------------- /system/cert-manager/templates/clusterissuer-iot.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cert-manager.io/v1 3 | kind: ClusterIssuer 4 | metadata: 5 | name: iot 6 | namespace: cert-manager 7 | spec: 8 | vault: 9 | path: pki/iot/sign/internal-certificates-grigri-iot 10 | # Vault internal service has a custom CA that has to be added here replicating secret. 11 | # This adds an additional jump through the ingress but it is cleanest: 12 | server: https://vault.internal.grigri.cloud 13 | auth: 14 | kubernetes: 15 | role: allow-iot-pki 16 | mountPath: /v1/auth/kubernetes 17 | serviceAccountRef: 18 | name: cert-manager 19 | -------------------------------------------------------------------------------- /system/oauth2-proxy/templates/kanidm-oauth2-client.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kaniop.rs/v1beta1 3 | kind: KanidmOAuth2Client 4 | metadata: 5 | name: k8s-oauth2-proxy 6 | spec: 7 | kanidmRef: 8 | name: kanidm 9 | namespace: kanidm 10 | 11 | displayname: k8s-oauth2-proxy 12 | 13 | origin: https://auth.grigri.cloud/oauth2/callback 14 | 15 | redirectUrl: 16 | - https://auth.grigri.cloud/oauth2/callback 17 | 18 | scopeMap: 19 | - group: k8s-oauth2-proxy-users 20 | scopes: 21 | - openid 22 | - profile 23 | - email 24 | - groups 25 | 26 | preferShortUsername: true 27 | strictRedirectUrl: true 28 | -------------------------------------------------------------------------------- /apps/nextcloud/templates/kanidm-oauth2-client.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kaniop.rs/v1beta1 3 | kind: KanidmOAuth2Client 4 | metadata: 5 | name: nextcloud 6 | spec: 7 | kanidmRef: 8 | name: kanidm 9 | namespace: kanidm 10 | 11 | displayname: nextcloud 12 | 13 | origin: https://nextcloud.grigri.cloud/apps/user_oidc/code 14 | 15 | redirectUrl: 16 | - https://nextcloud.grigri.cloud/apps/user_oidc/code 17 | 18 | scopeMap: 19 | - group: nextcloud-users 20 | scopes: 21 | - openid 22 | - profile 23 | - email 24 | 25 | preferShortUsername: true 26 | strictRedirectUrl: true 27 | jwtLegacyCryptoEnable: true 28 | -------------------------------------------------------------------------------- /apps/esphome/rules/rack-controller.yaml: -------------------------------------------------------------------------------- 1 | - alert: RackHighTemperature 2 | annotations: 3 | description: | 4 | Entity {{ $labels.entity }} has a temperature of {{ $value }} celsius degrees. 5 | expr: hass_sensor_temperature_celsius{entity="sensor.rack_controller_rack_temperature"} > 35 6 | for: 5m 7 | labels: 8 | severity: warning 9 | - alert: RackHighTemperature 10 | annotations: 11 | description: | 12 | Entity {{ $labels.entity }} has a temperature of {{ $value }} celsius degrees. 13 | expr: hass_sensor_temperature_celsius{entity="sensor.rack_controller_rack_temperature"} > 40 14 | for: 5m 15 | labels: 16 | severity: critical 17 | -------------------------------------------------------------------------------- /apps/jellyseerr/templates/kanidm-oauth2-client.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kaniop.rs/v1beta1 3 | kind: KanidmOAuth2Client 4 | metadata: 5 | name: jellyseerr 6 | spec: 7 | kanidmRef: 8 | name: kanidm 9 | namespace: kanidm 10 | 11 | displayname: jellyseerr 12 | 13 | origin: http://requests.grigri.cloud/login/oidc/callback 14 | 15 | redirectUrl: 16 | - http://requests.grigri.cloud/login/oidc/callback 17 | 18 | scopeMap: 19 | - group: jellyseerr-users 20 | scopes: 21 | - openid 22 | - profile 23 | - email 24 | 25 | preferShortUsername: true 26 | strictRedirectUrl: true 27 | allowInsecureClientDisablePkce: true 28 | -------------------------------------------------------------------------------- /apps/open-webui/templates/kanidm-oauth2-client.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kaniop.rs/v1beta1 3 | kind: KanidmOAuth2Client 4 | metadata: 5 | name: open-webui 6 | spec: 7 | kanidmRef: 8 | name: kanidm 9 | namespace: kanidm 10 | 11 | displayname: open-webui 12 | 13 | origin: https://open-webui.grigri.cloud/oauth/oidc/callback 14 | 15 | redirectUrl: 16 | - https://open-webui.grigri.cloud/oauth/oidc/callback 17 | 18 | scopeMap: 19 | - group: open-webui-users 20 | scopes: 21 | - openid 22 | - profile 23 | - email 24 | 25 | preferShortUsername: true 26 | strictRedirectUrl: true 27 | jwtLegacyCryptoEnable: true 28 | -------------------------------------------------------------------------------- /platform/vault/templates/kanidm-oauth2-client.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kaniop.rs/v1beta1 3 | kind: KanidmOAuth2Client 4 | metadata: 5 | name: vault 6 | spec: 7 | kanidmRef: 8 | name: kanidm 9 | namespace: kanidm 10 | displayname: vault 11 | origin: https://vault.internal.grigri.cloud/ui/vault/auth/oidc/oidc/callback 12 | redirectUrl: 13 | - http://localhost:8250/oidc/callback 14 | - https://vault.internal.grigri.cloud/ui/vault/auth/oidc/oidc/callback 15 | scopeMap: 16 | - group: vault-admins 17 | scopes: 18 | - openid 19 | - profile 20 | - email 21 | preferShortUsername: true 22 | strictRedirectUrl: true 23 | -------------------------------------------------------------------------------- /system/zfs-localpv/tests/read.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | name: read 6 | spec: 7 | template: 8 | metadata: 9 | name: read 10 | spec: 11 | containers: 12 | - name: read 13 | image: ubuntu:xenial 14 | command: 15 | - dd 16 | - if=/mnt/pv/test.img 17 | - of=/dev/null 18 | - bs=8k 19 | volumeMounts: 20 | - mountPath: "/mnt/pv" 21 | name: test-volume 22 | volumes: 23 | - name: test-volume 24 | persistentVolumeClaim: 25 | claimName: test-claim 26 | restartPolicy: Never 27 | -------------------------------------------------------------------------------- /bootstrap/argocd/templates/kanidm-oauth2-client.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kaniop.rs/v1beta1 3 | kind: KanidmOAuth2Client 4 | metadata: 5 | name: argocd 6 | spec: 7 | kanidmRef: 8 | name: kanidm 9 | namespace: kanidm 10 | 11 | displayname: argocd 12 | 13 | origin: https://argocd.internal.grigri.cloud/auth/callback 14 | 15 | redirectUrl: 16 | - https://argocd.internal.grigri.cloud/pkce/verify 17 | - https://argocd.internal.grigri.cloud/auth/callback 18 | 19 | scopeMap: 20 | - group: argocd-users 21 | scopes: 22 | - openid 23 | - profile 24 | - email 25 | 26 | strictRedirectUrl: true 27 | allowInsecureClientDisablePkce: false 28 | -------------------------------------------------------------------------------- /system/monitoring/resources/kanidm-oauth2-client-grafana.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kaniop.rs/v1beta1 3 | kind: KanidmOAuth2Client 4 | metadata: 5 | name: grafana 6 | spec: 7 | kanidmRef: 8 | name: kanidm 9 | namespace: kanidm 10 | displayname: grafana 11 | origin: https://grafana.grigri.cloud/login/generic_oauth 12 | redirectUrl: 13 | - https://grafana.grigri.cloud/login/generic_oauth 14 | scopeMap: 15 | - group: grafana-users 16 | scopes: 17 | - openid 18 | - profile 19 | - email 20 | supScopeMap: 21 | - group: grafana-admins 22 | scopes: 23 | - admin 24 | 25 | preferShortUsername: true 26 | strictRedirectUrl: true 27 | -------------------------------------------------------------------------------- /metal/roles/setup/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: add postfix password # noqa: name[casing] 3 | command: postmap /etc/postfix/sasl_passwd 4 | 5 | - name: restart postfix # noqa: name[casing] 6 | systemd: 7 | name: postfix 8 | state: restarted 9 | 10 | - name: mount zfs datasets # noqa: name[casing] 11 | command: zfs mount -a 12 | 13 | - name: reload zfs module config # noqa: name[casing] 14 | shell: | 15 | set -o pipefail 16 | grep -E -v '^#|^\s*$' /etc/modprobe.d/zfs.conf | while read L; do 17 | M=($L) 18 | N=${M[2]} 19 | P=(${N/=/ }) 20 | echo "${P[1]}" > /sys/module/zfs/parameters/${P[0]} 21 | done 22 | args: 23 | executable: /bin/bash 24 | -------------------------------------------------------------------------------- /system/loki/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: loki 4 | 5 | resources: 6 | - resources/prometheus-rules-alerts.yaml 7 | - resources/prometheus-rules-rules.yaml 8 | - resources/service-monitor.yaml 9 | 10 | helmCharts: 11 | - includeCRDs: true 12 | name: loki 13 | namespace: loki 14 | releaseName: loki 15 | repo: https://grafana.github.io/helm-charts 16 | valuesFile: values.yaml 17 | version: 6.49.0 18 | - includeCRDs: true 19 | name: promtail 20 | namespace: loki 21 | releaseName: loki 22 | repo: https://grafana.github.io/helm-charts 23 | valuesFile: promtail.yaml 24 | version: 6.17.1 25 | -------------------------------------------------------------------------------- /system/kured/templates/external-secret.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: kured-notifications-secret 5 | spec: 6 | secretStoreRef: 7 | kind: ClusterSecretStore 8 | name: vault 9 | target: 10 | name: kured-notifications-secret 11 | template: 12 | data: 13 | notifyUrl: 'telegram://{{`{{ .token }}`}}@telegram?chats={{`{{ .chat_id }}`}}' 14 | data: 15 | - secretKey: token 16 | remoteRef: 17 | key: /alertmanager-telegram-forwarder/telegram 18 | property: token 19 | - secretKey: chat_id 20 | remoteRef: 21 | key: /alertmanager-telegram-forwarder/telegram 22 | property: chat_id 23 | -------------------------------------------------------------------------------- /system/snapscheduler/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: snapscheduler 4 | 5 | helmCharts: 6 | - includeCRDs: true 7 | name: snapscheduler 8 | namespace: snapscheduler 9 | releaseName: snapscheduler 10 | repo: https://backube.github.io/helm-charts/ 11 | valuesFile: values.yaml 12 | version: 3.5.0 13 | 14 | patches: 15 | - target: 16 | kind: Deployment 17 | name: snapscheduler 18 | patch: |- 19 | - op: test 20 | path: /spec/template/spec/containers/1/name 21 | value: manager 22 | - op: add 23 | path: /spec/template/spec/containers/1/args/- 24 | value: -zap-log-level=error 25 | -------------------------------------------------------------------------------- /apps/immich/templates/kanidm-oauth2-client.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kaniop.rs/v1beta1 3 | kind: KanidmOAuth2Client 4 | metadata: 5 | name: immich 6 | spec: 7 | kanidmRef: 8 | name: kanidm 9 | namespace: kanidm 10 | 11 | displayname: immich 12 | 13 | origin: https://photos.grigri.cloud/auth/login 14 | 15 | redirectUrl: 16 | - https://photos.grigri.cloud/auth/login 17 | - app.immich:///oauth-callback 18 | 19 | scopeMap: 20 | - group: immich-users 21 | scopes: 22 | - openid 23 | - profile 24 | - email 25 | 26 | supScopeMap: 27 | - group: immich-admins 28 | scopes: 29 | - admin 30 | 31 | preferShortUsername: true 32 | strictRedirectUrl: true 33 | -------------------------------------------------------------------------------- /platform/external-secrets/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: external-secrets 4 | 5 | resources: 6 | - resources/clustersecretstore.yaml 7 | 8 | helmCharts: 9 | - includeCRDs: true 10 | name: external-secrets 11 | namespace: external-secrets 12 | releaseName: external-secrets 13 | repo: https://charts.external-secrets.io 14 | valuesFile: values.yaml 15 | version: 1.2.0 16 | 17 | patches: 18 | - patch: |- 19 | - op: add 20 | path: "/metadata/annotations/argocd.argoproj.io~1sync-options" 21 | value: "Replace=true" 22 | target: 23 | group: apiextensions.k8s.io 24 | kind: CustomResourceDefinition 25 | -------------------------------------------------------------------------------- /apps/esphome/config/lilygo-higrow-1.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | substitutions: 3 | name: "lilygo-higrow-1" 4 | friendly_name: "Lilygo Higrow 1" 5 | project_name: "lilygo.higrow-1" 6 | project_version: "1.1" 7 | update_interval: 1min 8 | loglevel: DEBUG 9 | moisture_min: "2.833" 10 | moisture_max: "1.56" 11 | conductivity_min: "0.075" 12 | conductivity_max: "0.086" 13 | # Uncomment run_duration and sleep_duration if you want to use deepsleep 14 | # set how long to stay awake - NOT less then 10sec 15 | # run_duration: 11s 16 | # set how long to sleep in minutes 17 | # sleep_duration: 60min 18 | 19 | packages: 20 | connection: !include ./packages/connection.yaml 21 | lilygo-higrow: !include ./packages/lilygo-higrow.yaml 22 | -------------------------------------------------------------------------------- /apps/esphome/config/lilygo-higrow-2.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | substitutions: 3 | name: "lilygo-higrow-2" 4 | friendly_name: "Lilygo Higrow 2" 5 | project_name: "lilygo.higrow-2" 6 | project_version: "1.1" 7 | update_interval: 1min 8 | loglevel: DEBUG 9 | moisture_min: "2.833" 10 | moisture_max: "1.562" 11 | conductivity_min: "0.075" 12 | conductivity_max: "0.086" 13 | # Uncomment run_duration and sleep_duration if you want to use deepsleep 14 | # set how long to stay awake - NOT less then 10sec 15 | # run_duration: 11s 16 | # set how long to sleep in minutes 17 | # sleep_duration: 60min 18 | 19 | packages: 20 | connection: !include ./packages/connection.yaml 21 | lilygo-higrow: !include ./packages/lilygo-higrow.yaml 22 | -------------------------------------------------------------------------------- /docs/user-guide/cpu-optimize-tuning.md: -------------------------------------------------------------------------------- 1 | # CPU optimize tuning 2 | 3 | ## AMD Ryzen 7950X 4 | 5 | Go to BIOS and turn on overclocking and change 6 | `Advanced -> AMD Overclocking -> AMD Overclocking -> Precision Boost Overdrive` from `auto` to 7 | advance. Then in `Curve Optimizer` select `all cores` and change sign from `positive` to `negative`. 8 | Start testing with a magnitude of 30 and if it is not stable go down. 9 | 10 | E.g. for testing CPU: 11 | 12 | ```bash 13 | sysbench --threads="$(nproc)" cpu run --cpu-max-prime=2000000000 14 | ``` 15 | 16 | Additionally, you can change `Platform Thermal Throttle Ctrl` to manual and set power limit to 85W. 17 | I didn't apply this. 18 | 19 | Ref: https://www.youtube.com/watch?v=FaOYYHNGlLs 20 | -------------------------------------------------------------------------------- /system/zfs-localpv/templates/storage-class-openebs-zfspv.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: openebs-zfspv 5 | annotations: 6 | storageclass.kubernetes.io/is-default-class: "true" 7 | allowVolumeExpansion: true 8 | parameters: 9 | fstype: "zfs" 10 | shared: "yes" 11 | poolname: "datasets/openebs" 12 | provisioner: zfs.csi.openebs.io 13 | allowedTopologies: 14 | - matchLabelExpressions: 15 | - key: kubernetes.io/hostname 16 | values: 17 | - grigri 18 | - prusik 19 | # with this we will take into account the k8s scheduler instead of CapacityWeighted (default) or VolumeWeighted. 20 | volumeBindingMode: WaitForFirstConsumer 21 | reclaimPolicy: Retain 22 | -------------------------------------------------------------------------------- /apps/jellyfin/templates/kanidm-oauth2-client.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kaniop.rs/v1beta1 3 | kind: KanidmOAuth2Client 4 | metadata: 5 | name: jellyfin 6 | spec: 7 | kanidmRef: 8 | name: kanidm 9 | namespace: kanidm 10 | 11 | displayname: jellyfin 12 | 13 | origin: https://jellyfin.grigri.cloud/sso/OID/redirect/idm.grigri.cloud 14 | 15 | redirectUrl: 16 | - https://jellyfin.grigri.cloud/sso/OID/redirect/idm.grigri.cloud 17 | 18 | scopeMap: 19 | - group: jellyfin-users 20 | scopes: 21 | - openid 22 | - profile 23 | - email 24 | 25 | supScopeMap: 26 | - group: jellyfin-admins 27 | scopes: 28 | - admin 29 | 30 | preferShortUsername: true 31 | strictRedirectUrl: true 32 | -------------------------------------------------------------------------------- /system/system-upgrade/k3s/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - server.yaml 6 | - agent.yaml 7 | 8 | commonAnnotations: 9 | # TODO: https://github.com/rancher/system-upgrade-controller/issues/172 10 | argocd.argoproj.io/sync-options: SkipDryRunOnMissingResource=true 11 | 12 | patches: 13 | - patch: |- 14 | - op: add 15 | path: /spec/version 16 | # renovate: datasource=github-releases depName=k3s-io/k3s versioning=regex:^v(?\d+)(\.(?\d+))?(\.(?\d+))\+k3s?((?\d+))$ 17 | value: v1.33.6+k3s1 18 | target: 19 | group: upgrade.cattle.io 20 | version: v1 21 | kind: Plan 22 | name: .* 23 | -------------------------------------------------------------------------------- /system/kube-system/resources/nodelocaldns/cilium-local-redirect-policy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cilium.io/v2 2 | kind: CiliumLocalRedirectPolicy 3 | metadata: 4 | name: nodelocaldns 5 | namespace: kube-system 6 | spec: 7 | redirectFrontend: 8 | addressMatcher: 9 | ip: 169.254.25.10 10 | toPorts: 11 | - name: dns-tcp 12 | port: "53" 13 | protocol: TCP 14 | - name: dns 15 | port: "53" 16 | protocol: UDP 17 | redirectBackend: 18 | localEndpointSelector: 19 | matchLabels: 20 | k8s-app: node-local-dns 21 | toPorts: 22 | - port: "53" 23 | name: dns 24 | protocol: UDP 25 | - port: "53" 26 | name: dns-tcp 27 | protocol: TCP 28 | -------------------------------------------------------------------------------- /metal/playbooks/install/cluster.yml: -------------------------------------------------------------------------------- 1 | - name: Create Kubernetes cluster 2 | hosts: k3s_cluster 3 | serial: "{{ serial | default('100%') }}" 4 | vars: 5 | ansible_path: "{{ playbook_dir }}/../.." 6 | roles: 7 | - name: geerlingguy.ntp 8 | tags: 9 | - ntp 10 | - name: k3s 11 | tags: 12 | - k3s 13 | 14 | - name: Label Kubernetes nodes 15 | ansible.builtin.import_playbook: k8s-node-labels.yml 16 | 17 | - name: Setup cron backups 18 | ansible.builtin.import_playbook: backups.yml 19 | 20 | - name: Setup democratic-csi user 21 | ansible.builtin.import_playbook: democratic-csi-user.yml 22 | 23 | - name: Install zfs-exporter 24 | hosts: 25 | - grigri 26 | - prusik 27 | roles: 28 | - zfs_exporter 29 | -------------------------------------------------------------------------------- /bootstrap/argocd/templates/ingress-external.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | external-dns.alpha.kubernetes.io/enabled: "true" 6 | external-dns.alpha.kubernetes.io/target: grigri.cloud 7 | cert-manager.io/cluster-issuer: letsencrypt-prod-dns 8 | name: argocd-external 9 | spec: 10 | ingressClassName: nginx-external 11 | rules: 12 | - host: &host argocd.grigri.cloud 13 | http: 14 | paths: 15 | - backend: 16 | service: 17 | name: argocd-server 18 | port: 19 | number: 80 20 | path: /api/webhook 21 | pathType: Prefix 22 | tls: 23 | - hosts: 24 | - *host 25 | secretName: argocd-external-tls-certificate 26 | -------------------------------------------------------------------------------- /apps/mintpsicologia/templates/external-secret-mariadb.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: external-secrets.io/v1 3 | kind: ExternalSecret 4 | metadata: 5 | name: wordpress-mariadb-secret 6 | spec: 7 | secretStoreRef: 8 | kind: ClusterSecretStore 9 | name: vault 10 | target: 11 | name: wordpress-mariadb-secret 12 | data: 13 | - secretKey: mariadb-root-password 14 | remoteRef: 15 | key: /mintpsicologia/mariadb 16 | property: mariadb-root-password 17 | - secretKey: mariadb-replication-password 18 | remoteRef: 19 | key: /mintpsicologia/mariadb 20 | property: mariadb-replication-password 21 | - secretKey: mariadb-password 22 | remoteRef: 23 | key: /mintpsicologia/mariadb 24 | property: mariadb-password 25 | -------------------------------------------------------------------------------- /system/external-dns/values.yaml: -------------------------------------------------------------------------------- 1 | external-dns: 2 | interval: 2m 3 | # logLevel: debug 4 | provider: 5 | name: cloudflare 6 | env: 7 | - name: CF_API_TOKEN 8 | valueFrom: 9 | secretKeyRef: 10 | name: external-dns-cloudflare 11 | key: token 12 | extraArgs: 13 | - --annotation-filter=external-dns.alpha.kubernetes.io/enabled in (true) 14 | # policy sync for fully handle the domain 15 | policy: upsert-only 16 | 17 | sources: 18 | - service 19 | - ingress 20 | domainFilters: 21 | - grigri.cloud 22 | serviceMonitor: 23 | enabled: true 24 | additionalLabels: 25 | release: monitoring 26 | resources: 27 | requests: 28 | cpu: 10m 29 | memory: 14Mi 30 | limits: 31 | memory: 64Mi 32 | -------------------------------------------------------------------------------- /system/pod-cleaner/templates/rbac.yaml: -------------------------------------------------------------------------------- 1 | kind: ServiceAccount 2 | apiVersion: v1 3 | metadata: 4 | name: pod-cleaner 5 | namespace: pod-cleaner 6 | 7 | --- 8 | 9 | kind: ClusterRole 10 | apiVersion: rbac.authorization.k8s.io/v1 11 | metadata: 12 | name: pod-watcher 13 | rules: 14 | - apiGroups: 15 | - "" 16 | resources: 17 | - pods 18 | verbs: 19 | - list 20 | - watch 21 | - get 22 | - delete 23 | --- 24 | 25 | kind: ClusterRoleBinding 26 | apiVersion: rbac.authorization.k8s.io/v1 27 | metadata: 28 | name: pod-cleaner-pod-watcher 29 | roleRef: 30 | kind: ClusterRole 31 | name: pod-watcher 32 | apiGroup: rbac.authorization.k8s.io 33 | subjects: 34 | - kind: ServiceAccount 35 | name: pod-cleaner 36 | namespace: pod-cleaner 37 | -------------------------------------------------------------------------------- /.github/workflows/docs.yaml: -------------------------------------------------------------------------------- 1 | name: Publish docs via GitHub Pages 2 | 3 | # yamllint disable-line rule:truthy 4 | on: 5 | push: 6 | branches: 7 | - master 8 | 9 | jobs: 10 | build: 11 | name: Deploy docs 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Checkout main 15 | uses: actions/checkout@v6 16 | 17 | - name: Deploy docs 18 | uses: mhausenblas/mkdocs-deploy-gh-pages@master 19 | # Or use mhausenblas/mkdocs-deploy-gh-pages@nomaterial to build without the mkdocs-material theme 20 | env: 21 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 22 | CONFIG_FILE: mkdocs.yml 23 | EXTRA_PACKAGES: build-base 24 | # GITHUB_DOMAIN: github.myenterprise.com 25 | REQUIREMENTS: docs/requirements.txt 26 | -------------------------------------------------------------------------------- /metal/roles/prepare/tasks/sshd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: sshd | Disable sshd options 3 | lineinfile: 4 | path: /etc/ssh/sshd_config 5 | regex: "^(# *)?{{ item }}" 6 | line: "{{ item }} no" 7 | notify: restart sshd 8 | loop: 9 | - PermitRootLogin 10 | - UseDNS 11 | 12 | - name: "sshd | Workaround sshd halt: disable pam" 13 | lineinfile: 14 | path: /etc/pam.d/common-session 15 | regex: '^session optional pam_systemd.so' 16 | state: absent 17 | 18 | - name: "sshd | Workaround sshd halt: disable security options in systemd-login" 19 | lineinfile: 20 | path: /lib/systemd/system/systemd-logind.service 21 | regex: '^{{ item }}.*' 22 | state: absent 23 | loop: 24 | - MemoryDenyWriteExecute 25 | - SystemCallFilter 26 | notify: restart systemd-logind 27 | -------------------------------------------------------------------------------- /apps/antdroid/config/nginx.conf: -------------------------------------------------------------------------------- 1 | user nginx; 2 | worker_processes 1; 3 | 4 | error_log /var/log/nginx/error.log notice; 5 | pid /var/run/nginx.pid; 6 | 7 | 8 | events { 9 | worker_connections 1024; 10 | } 11 | 12 | 13 | http { 14 | include /etc/nginx/mime.types; 15 | default_type application/octet-stream; 16 | 17 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 18 | '$status $body_bytes_sent "$http_referer" ' 19 | '"$http_user_agent" "$http_x_forwarded_for"'; 20 | 21 | access_log /var/log/nginx/access.log main; 22 | 23 | sendfile on; 24 | #tcp_nopush on; 25 | 26 | keepalive_timeout 65; 27 | 28 | #gzip on; 29 | 30 | include /etc/nginx/conf.d/*.conf; 31 | } 32 | -------------------------------------------------------------------------------- /docs/user-guide/run-commands-on-multiple-nodes.md: -------------------------------------------------------------------------------- 1 | # Run commands on multiple nodes 2 | 3 | Use [ansible-console](https://docs.ansible.com/ansible/latest/cli/ansible-console.html): 4 | 5 | ```sh 6 | cd metal 7 | make console 8 | ``` 9 | 10 | Then enter the command(s) you want to run. 11 | 12 | !!! example 13 | 14 | `root@all (4)[f:5]$ uptime` 15 | 16 | ```console 17 | metal0 | CHANGED | rc=0 >> 18 | 10:52:02 up 2 min, 1 user, load average: 0.17, 0.15, 0.06 19 | metal1 | CHANGED | rc=0 >> 20 | 10:52:02 up 2 min, 1 user, load average: 0.14, 0.11, 0.04 21 | metal3 | CHANGED | rc=0 >> 22 | 10:52:02 up 2 min, 1 user, load average: 0.03, 0.02, 0.00 23 | metal2 | CHANGED | rc=0 >> 24 | 10:52:02 up 2 min, 1 user, load average: 0.06, 0.06, 0.02 25 | ``` 26 | -------------------------------------------------------------------------------- /apps/special-web/config/nginx.conf: -------------------------------------------------------------------------------- 1 | user nginx; 2 | worker_processes 1; 3 | 4 | error_log /var/log/nginx/error.log notice; 5 | pid /var/run/nginx.pid; 6 | 7 | 8 | events { 9 | worker_connections 1024; 10 | } 11 | 12 | 13 | http { 14 | include /etc/nginx/mime.types; 15 | default_type application/octet-stream; 16 | 17 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 18 | '$status $body_bytes_sent "$http_referer" ' 19 | '"$http_user_agent" "$http_x_forwarded_for"'; 20 | 21 | access_log /var/log/nginx/access.log main; 22 | 23 | sendfile on; 24 | #tcp_nopush on; 25 | 26 | keepalive_timeout 65; 27 | 28 | #gzip on; 29 | 30 | include /etc/nginx/conf.d/*.conf; 31 | } 32 | -------------------------------------------------------------------------------- /apps/cross-backups/templates/external-secrets-users.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: external-secrets.io/v1 3 | kind: ExternalSecret 4 | metadata: 5 | name: cross-backups-users 6 | spec: 7 | secretStoreRef: 8 | kind: ClusterSecretStore 9 | name: vault 10 | target: 11 | name: cross-backups-users 12 | data: 13 | - secretKey: rootUser 14 | remoteRef: 15 | key: /cross-backup/users 16 | property: rootUser 17 | - secretKey: rootPassword 18 | remoteRef: 19 | key: /cross-backup/users 20 | property: rootPassword 21 | - secretKey: millaPassword 22 | remoteRef: 23 | key: /cross-backup/users 24 | property: millaPassword 25 | - secretKey: dabolPassword 26 | remoteRef: 27 | key: /cross-backup/users 28 | property: dabolPassword 29 | -------------------------------------------------------------------------------- /apps/telegram-bot/templates/external-secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: telegram-bot-secret 5 | spec: 6 | secretStoreRef: 7 | kind: ClusterSecretStore 8 | name: vault 9 | target: 10 | name: telegram-bot-secret 11 | template: 12 | data: 13 | BOT_TOKEN: {{`"{{ .token }}"`}} 14 | MONGO_URI: 'mongodb://{{`{{ .username }}`}}:{{`{{ .password }}`}}@mongo' 15 | data: 16 | - secretKey: token 17 | remoteRef: 18 | key: /telegram-bot/bot-token 19 | property: token 20 | - secretKey: password 21 | remoteRef: 22 | key: /telegram-bot/mongodb 23 | property: password 24 | - secretKey: username 25 | remoteRef: 26 | key: /telegram-bot/mongodb 27 | property: username 28 | -------------------------------------------------------------------------------- /metal/roles/prepare/tasks/unattended-upgrades.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: unattended-upgrades | Install basic packages 3 | apt: 4 | name: unattended-upgrades 5 | state: present 6 | force_apt_get: true 7 | update_cache: true 8 | 9 | - name: unattended-upgrades | Config 10 | copy: 11 | src: "{{ item }}" 12 | dest: /etc/apt/apt.conf.d/{{ item }} 13 | mode: "0644" 14 | loop: 15 | - 20auto-upgrades 16 | - 50unattended-upgrades 17 | - 02-armbian-periodic 18 | notify: 19 | - unattended-upgrades | restart systemd service 20 | - unattended-upgrades | ignore armbian changes 21 | 22 | - name: unattended-upgrades | Start systemd service 23 | systemd: 24 | name: unattended-upgrades 25 | enabled: true 26 | state: started 27 | notify: unattended-upgrades | restart systemd service 28 | -------------------------------------------------------------------------------- /scripts/prepare_sdcard.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | IMAGE=$1 4 | K8S_HOSTNAME=$2 5 | 6 | if [[ $EUID -ne 0 ]]; then 7 | echo "This script must be run as root" 8 | exit 1 9 | fi 10 | 11 | if [[ -z $IMAGE ]] || [[ -z $K8S_HOSTNAME ]]; then 12 | echo "Usage: sudo ./prepare_sdcard.sh distros/Armbian_20.05.4_Odroidc4_focal_current_5.6.18.img k8s-1 " 13 | exit 1 14 | fi 15 | 16 | bash -c "dd if=$IMAGE of=/dev/mmcblk0 bs=1M conv=sync" 17 | sleep 2 18 | 19 | mount /dev/mmcblk0p1 /mnt 20 | HOSTNAME=$(cat /mnt/etc/hostname) 21 | sed -i "s/$HOSTNAME/$K8S_HOSTNAME/g" /mnt/etc/hostname 22 | sed -i "s/$HOSTNAME/$K8S_HOSTNAME/g" /mnt/etc/hosts 23 | rm /mnt/root/.not_logged_in_yet || true 24 | rm /mnt/etc/profile.d/armbian-check-first-login.sh || true 25 | rm /mnt/etc/profile.d/armbian-check-first-login-reboot.sh || true 26 | umount /mnt 27 | -------------------------------------------------------------------------------- /system/loki/resources/service-monitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: loki 5 | labels: 6 | release: monitoring 7 | spec: 8 | endpoints: 9 | - interval: 15s 10 | path: /metrics 11 | port: http-metrics 12 | relabelings: 13 | - action: replace 14 | replacement: loki/$1 15 | sourceLabels: 16 | - job 17 | targetLabel: job 18 | - action: replace 19 | replacement: loki 20 | targetLabel: cluster 21 | scheme: http 22 | selector: 23 | matchExpressions: 24 | - key: prometheus.io/service-monitor 25 | operator: NotIn 26 | values: 27 | - "false" 28 | matchLabels: 29 | app.kubernetes.io/instance: loki 30 | app.kubernetes.io/name: loki 31 | -------------------------------------------------------------------------------- /bootstrap/root/values.yaml: -------------------------------------------------------------------------------- 1 | gitops: 2 | repo: https://github.com/pando85/homelab 3 | revision: master 4 | stacks: 5 | - name: bootstrap 6 | namespace: argocd 7 | - name: system 8 | ignoreDifferences: 9 | - group: '*' 10 | kind: CustomResourceDefinition 11 | name: 'addresspools.metallb.io' 12 | jsonPointers: 13 | - /spec/conversion/webhook/clientConfig/caBundle 14 | - group: '*' 15 | kind: CustomResourceDefinition 16 | name: 'bgppeers.metallb.io' 17 | jsonPointers: 18 | - /spec/conversion/webhook/clientConfig/caBundle 19 | - group: '*' 20 | kind: Secret 21 | jsonPointers: 22 | - /data/ca.crt 23 | - /data/ca.key 24 | - /data/tls.crt 25 | - /data/tls.key 26 | - name: platform 27 | - name: apps 28 | -------------------------------------------------------------------------------- /system/kanidm/templates/service-ldaps.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: kanidm-ldaps 5 | labels: 6 | app.kubernetes.io/instance: kanidm 7 | app.kubernetes.io/name: kanidm 8 | app.kubernetes.io/service: kanidm-ldaps 9 | kanidm.kaniop.rs/cluster: kanidm 10 | annotations: 11 | lbipam.cilium.io/ips: "192.168.193.8" 12 | external-dns.alpha.kubernetes.io/enabled: "true" 13 | external-dns.alpha.kubernetes.io/hostname: ldaps.idm.internal.grigri.cloud 14 | spec: 15 | type: LoadBalancer 16 | externalTrafficPolicy: Local 17 | sessionAffinity: ClientIP 18 | ports: 19 | - name: ldaps 20 | port: 636 21 | protocol: TCP 22 | targetPort: 3636 23 | selector: 24 | app.kubernetes.io/instance: kanidm 25 | app.kubernetes.io/name: kanidm 26 | kanidm.kaniop.rs/cluster: kanidm 27 | -------------------------------------------------------------------------------- /system/system-upgrade/k3s/server.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: upgrade.cattle.io/v1 2 | kind: Plan 3 | metadata: 4 | name: k3s-server 5 | namespace: system-upgrade 6 | labels: 7 | k3s-upgrade: server 8 | spec: 9 | nodeSelector: 10 | matchExpressions: 11 | - key: node-role.kubernetes.io/control-plane 12 | operator: Exists 13 | serviceAccountName: system-upgrade 14 | concurrency: 1 15 | # If left unspecified, no drain will be performed. 16 | # See: 17 | # - https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/ 18 | # - https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#drain 19 | # drain: 20 | # force: true 21 | # skipWaitForDeleteTimeout: 300 # Honor PodDisruptionBudgets 22 | # disableEviction: true 23 | cordon: true 24 | upgrade: 25 | image: rancher/k3s-upgrade 26 | -------------------------------------------------------------------------------- /apps/mosquitto/templates/external-secret.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/external-secrets.io/externalsecret_v1beta1.json 2 | apiVersion: external-secrets.io/v1 3 | kind: ExternalSecret 4 | metadata: 5 | name: mosquitto-user 6 | spec: 7 | secretStoreRef: 8 | kind: ClusterSecretStore 9 | name: vault 10 | target: 11 | name: mosquitto-user 12 | template: 13 | data: 14 | username: {{`"{{ .username }}"`}} 15 | password: {{`"{{ .password }}"`}} 16 | mosquitto_pwd: | 17 | {{`{{ .username }}`}}:{{`{{ .password }}`}} 18 | data: 19 | - secretKey: username 20 | remoteRef: 21 | key: /mosquitto/user 22 | property: username 23 | - secretKey: password 24 | remoteRef: 25 | key: /mosquitto/user 26 | property: password 27 | -------------------------------------------------------------------------------- /system/kube-system/resources/cilium/bgp-peering-policy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "cilium.io/v2alpha1" 2 | kind: CiliumBGPPeeringPolicy 3 | metadata: 4 | name: default 5 | spec: 6 | nodeSelector: 7 | matchLabels: 8 | kubernetes.io/os: linux 9 | virtualRouters: 10 | - localASN: 64513 11 | # this allows routingMode native withouth the need of autoDirectNodeRoutes true: Not tested 12 | # exportPodCIDR: true 13 | # Advertisement of services to BGP peers 14 | # https://docs.cilium.io/en/v1.15/network/bgp-control-plane/#service-announcements 15 | serviceSelector: 16 | matchExpressions: 17 | - key: advertise-bgp 18 | operator: NotIn 19 | values: 20 | - "never-used-value" 21 | neighbors: 22 | - peerAddress: "192.168.192.1/32" 23 | peerASN: 64512 24 | -------------------------------------------------------------------------------- /system/monitoring/resources/long-term-metrics/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | annotations: 5 | cert-manager.io/cluster-issuer: letsencrypt-prod-dns 6 | external-dns.alpha.kubernetes.io/enabled: "true" 7 | name: prometheus-long-term 8 | namespace: monitoring 9 | spec: 10 | ingressClassName: nginx-internal 11 | rules: 12 | - host: prometheus-long-term.internal.grigri.cloud 13 | http: 14 | paths: 15 | - backend: 16 | service: 17 | name: prometheus-long-term 18 | port: 19 | number: 9090 20 | path: / 21 | pathType: ImplementationSpecific 22 | tls: 23 | - hosts: 24 | - prometheus-long-term.internal.grigri.cloud 25 | secretName: prometheus-long-term-general-tls 26 | -------------------------------------------------------------------------------- /apps/mosquitto-tls/templates/external-secret.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/external-secrets.io/externalsecret_v1beta1.json 2 | apiVersion: external-secrets.io/v1 3 | kind: ExternalSecret 4 | metadata: 5 | name: mosquitto-tls-user 6 | spec: 7 | secretStoreRef: 8 | kind: ClusterSecretStore 9 | name: vault 10 | target: 11 | name: mosquitto-tls-user 12 | template: 13 | data: 14 | username: {{`"{{ .username }}"`}} 15 | password: {{`"{{ .password }}"`}} 16 | mosquitto_pwd: | 17 | {{`{{ .username }}`}}:{{`{{ .password }}`}} 18 | data: 19 | - secretKey: username 20 | remoteRef: 21 | key: /mosquitto-tls/user 22 | property: username 23 | - secretKey: password 24 | remoteRef: 25 | key: /mosquitto-tls/user 26 | property: password 27 | -------------------------------------------------------------------------------- /apps/atuin/templates/k8s-secret-store/rbac.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ServiceAccount 3 | apiVersion: v1 4 | metadata: 5 | name: external-secrets-k8s-store 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1 8 | kind: Role 9 | metadata: 10 | name: external-secrets-k8s-store 11 | rules: 12 | - apiGroups: 13 | - "" 14 | resources: 15 | - secrets 16 | verbs: 17 | - get 18 | - list 19 | - watch 20 | - apiGroups: 21 | - authorization.k8s.io 22 | resources: 23 | - selfsubjectrulesreviews 24 | verbs: 25 | - create 26 | --- 27 | apiVersion: rbac.authorization.k8s.io/v1 28 | kind: RoleBinding 29 | metadata: 30 | name: external-secrets-k8s-store 31 | roleRef: 32 | apiGroup: rbac.authorization.k8s.io 33 | kind: Role 34 | name: external-secrets-k8s-store 35 | subjects: 36 | - kind: ServiceAccount 37 | name: external-secrets-k8s-store 38 | -------------------------------------------------------------------------------- /apps/freshrss/templates/k8s-secret-store/rbac.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ServiceAccount 3 | apiVersion: v1 4 | metadata: 5 | name: external-secrets-k8s-store 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1 8 | kind: Role 9 | metadata: 10 | name: external-secrets-k8s-store 11 | rules: 12 | - apiGroups: 13 | - "" 14 | resources: 15 | - secrets 16 | verbs: 17 | - get 18 | - list 19 | - watch 20 | - apiGroups: 21 | - authorization.k8s.io 22 | resources: 23 | - selfsubjectrulesreviews 24 | verbs: 25 | - create 26 | --- 27 | apiVersion: rbac.authorization.k8s.io/v1 28 | kind: RoleBinding 29 | metadata: 30 | name: external-secrets-k8s-store 31 | roleRef: 32 | apiGroup: rbac.authorization.k8s.io 33 | kind: Role 34 | name: external-secrets-k8s-store 35 | subjects: 36 | - kind: ServiceAccount 37 | name: external-secrets-k8s-store 38 | -------------------------------------------------------------------------------- /platform/git/templates/k8s-secret-store/rbac.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ServiceAccount 3 | apiVersion: v1 4 | metadata: 5 | name: external-secrets-k8s-store 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1 8 | kind: Role 9 | metadata: 10 | name: external-secrets-k8s-store 11 | rules: 12 | - apiGroups: 13 | - "" 14 | resources: 15 | - secrets 16 | verbs: 17 | - get 18 | - list 19 | - watch 20 | - apiGroups: 21 | - authorization.k8s.io 22 | resources: 23 | - selfsubjectrulesreviews 24 | verbs: 25 | - create 26 | --- 27 | apiVersion: rbac.authorization.k8s.io/v1 28 | kind: RoleBinding 29 | metadata: 30 | name: external-secrets-k8s-store 31 | roleRef: 32 | apiGroup: rbac.authorization.k8s.io 33 | kind: Role 34 | name: external-secrets-k8s-store 35 | subjects: 36 | - kind: ServiceAccount 37 | name: external-secrets-k8s-store 38 | -------------------------------------------------------------------------------- /apps/esphome/config/packages/connection.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | api: 3 | encryption: 4 | key: !secret home_assistant_api_key 5 | 6 | ota: 7 | - platform: esphome 8 | password: !secret ota_password 9 | 10 | safe_mode: 11 | 12 | mdns: 13 | disabled: false 14 | 15 | wifi: 16 | ssid: !secret wifi_ssid 17 | password: !secret wifi_password 18 | domain: !secret domain_devices 19 | 20 | # Enable scanning to connect to the AP with the best signal 21 | # Issue related: https://github.com/esphome/feature-requests/issues/731 22 | fast_connect: false 23 | 24 | reboot_timeout: 3h 25 | power_save_mode: none 26 | ap: 27 | ap_timeout: 5min 28 | ssid: "${friendly_name}" 29 | password: !secret wifi_fallback_ap_password 30 | 31 | captive_portal: 32 | 33 | time: 34 | - platform: sntp 35 | timezone: Europe/Madrid 36 | id: sntp_time 37 | servers: 38 | - pfsense.grigri 39 | -------------------------------------------------------------------------------- /bootstrap/argocd/templates/k8s-secret-store/rbac.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ServiceAccount 3 | apiVersion: v1 4 | metadata: 5 | name: external-secrets-k8s-store 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1 8 | kind: Role 9 | metadata: 10 | name: external-secrets-k8s-store 11 | rules: 12 | - apiGroups: 13 | - "" 14 | resources: 15 | - secrets 16 | verbs: 17 | - get 18 | - list 19 | - watch 20 | - apiGroups: 21 | - authorization.k8s.io 22 | resources: 23 | - selfsubjectrulesreviews 24 | verbs: 25 | - create 26 | --- 27 | apiVersion: rbac.authorization.k8s.io/v1 28 | kind: RoleBinding 29 | metadata: 30 | name: external-secrets-k8s-store 31 | roleRef: 32 | apiGroup: rbac.authorization.k8s.io 33 | kind: Role 34 | name: external-secrets-k8s-store 35 | subjects: 36 | - kind: ServiceAccount 37 | name: external-secrets-k8s-store 38 | -------------------------------------------------------------------------------- /apps/navidrome/templates/external-secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: navidrome-secret 5 | namespace: navidrome 6 | spec: 7 | secretStoreRef: 8 | kind: ClusterSecretStore 9 | name: vault 10 | data: 11 | - secretKey: ND_PASSWORDENCRYPTIONKEY 12 | remoteRef: 13 | key: /navidrome/password 14 | property: encryption_key 15 | - secretKey: ND_SPOTIFY_ID 16 | remoteRef: 17 | key: /navidrome/spotify 18 | property: id 19 | - secretKey: ND_SPOTIFY_SECRET 20 | remoteRef: 21 | key: /navidrome/spotify 22 | property: secret 23 | - secretKey: ND_LASTFM_APIKEY 24 | remoteRef: 25 | key: /navidrome/lastfm 26 | property: api_key 27 | - secretKey: ND_LASTFM_SECRET 28 | remoteRef: 29 | key: /navidrome/lastfm 30 | property: secret 31 | -------------------------------------------------------------------------------- /system/oauth2-proxy/templates/k8s-secret-store/rbac.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ServiceAccount 3 | apiVersion: v1 4 | metadata: 5 | name: external-secrets-k8s-store 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1 8 | kind: Role 9 | metadata: 10 | name: external-secrets-k8s-store 11 | rules: 12 | - apiGroups: 13 | - "" 14 | resources: 15 | - secrets 16 | verbs: 17 | - get 18 | - list 19 | - watch 20 | - apiGroups: 21 | - authorization.k8s.io 22 | resources: 23 | - selfsubjectrulesreviews 24 | verbs: 25 | - create 26 | --- 27 | apiVersion: rbac.authorization.k8s.io/v1 28 | kind: RoleBinding 29 | metadata: 30 | name: external-secrets-k8s-store 31 | roleRef: 32 | apiGroup: rbac.authorization.k8s.io 33 | kind: Role 34 | name: external-secrets-k8s-store 35 | subjects: 36 | - kind: ServiceAccount 37 | name: external-secrets-k8s-store 38 | -------------------------------------------------------------------------------- /apps/mintpsicologia/templates/external-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: mintpsicologia-external 5 | annotations: 6 | cert-manager.io/cluster-issuer: letsencrypt-prod-dns 7 | external-dns.alpha.kubernetes.io/enabled: "true" 8 | external-dns.alpha.kubernetes.io/target: grigri.cloud 9 | nginx.ingress.kubernetes.io/proxy-buffer-size: "16k" 10 | spec: 11 | ingressClassName: nginx-external 12 | rules: 13 | - host: mintpsicologia.grigri.cloud 14 | http: 15 | paths: 16 | - backend: 17 | service: 18 | name: mintpsicologia-wordpress 19 | port: 20 | name: http 21 | path: / 22 | pathType: ImplementationSpecific 23 | tls: 24 | - hosts: 25 | - mintpsicologia.grigri.cloud 26 | secretName: mintpsicologia-external-tls 27 | -------------------------------------------------------------------------------- /system/cert-manager/values.yaml: -------------------------------------------------------------------------------- 1 | cert-manager: 2 | installCRDs: true 3 | 4 | 5 | resources: 6 | requests: 7 | cpu: 10m 8 | memory: 36Mi 9 | limits: 10 | memory: 128Mi 11 | 12 | extraArgs: 13 | # When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted 14 | - --enable-certificate-owner-ref=true 15 | 16 | global: 17 | leaderElection: 18 | namespace: cert-manager 19 | 20 | prometheus: 21 | enabled: true 22 | servicemonitor: 23 | enabled: true 24 | labels: 25 | release: monitoring 26 | 27 | webhook: 28 | resources: 29 | requests: 30 | cpu: 10m 31 | memory: 18Mi 32 | limits: 33 | memory: 128Mi 34 | 35 | cainjector: 36 | resources: 37 | requests: 38 | cpu: 20m 39 | memory: 72Mi 40 | limits: 41 | memory: 172Mi 42 | -------------------------------------------------------------------------------- /system/monitoring/resources/long-term-metrics/prometheus-rules.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PrometheusRule 3 | metadata: 4 | name: long-term-prometheus-rules 5 | labels: 6 | operator.prometheus.io/instance: prometheus-long-term 7 | spec: 8 | groups: 9 | - name: general.rules 10 | rules: 11 | - alert: TargetDown 12 | annotations: 13 | description: '{{ printf "%.4g" $value }}% of the {{ $labels.job }}/{{ $labels.service }} targets in {{ $labels.namespace }} namespace are down.' 14 | runbook_url: https://runbooks.prometheus-operator.dev/runbooks/general/targetdown 15 | summary: One or more targets are unreachable. 16 | expr: "100 * (count(up == 0) BY (cluster, job, namespace, service) / count(up) BY (cluster, job, namespace, service)) > 10" 17 | for: 10m 18 | labels: 19 | severity: warning 20 | -------------------------------------------------------------------------------- /system/system-upgrade/k3s/agent.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: upgrade.cattle.io/v1 2 | kind: Plan 3 | metadata: 4 | name: k3s-agent 5 | namespace: system-upgrade 6 | labels: 7 | k3s-upgrade: agent 8 | spec: 9 | nodeSelector: 10 | matchExpressions: 11 | - key: node-role.kubernetes.io/control-plane 12 | operator: DoesNotExist 13 | serviceAccountName: system-upgrade 14 | prepare: 15 | image: rancher/k3s-upgrade 16 | args: 17 | - prepare 18 | - k3s-server 19 | concurrency: 1 20 | # If left unspecified, no drain will be performed. 21 | # See: 22 | # - https://kubernetes.io/docs/tasks/administer-cluster/safely-drain-node/ 23 | # - https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands#drain 24 | # drain: 25 | # force: true 26 | # skipWaitForDeleteTimeout: 300 # Honor PodDisruptionBudgets 27 | cordon: true 28 | upgrade: 29 | image: rancher/k3s-upgrade 30 | -------------------------------------------------------------------------------- /metal/roles/setup/templates/zfs.conf.j2: -------------------------------------------------------------------------------- 1 | options zfs zfs_arc_min={{ zfs_arc_min_gb | int * 1024 * 1024 * 1024 }} 2 | options zfs zfs_arc_max={{ zfs_arc_max_gb | int * 1024 * 1024 * 1024 }} 3 | 4 | # Increase throughput 5 | # https://openzfs.github.io/openzfs-docs/Performance%20and%20Tuning/ZIO%20Scheduler.html 6 | # https://openzfs.github.io/openzfs-docs/Performance%20and%20Tuning/Module%20Parameters.html#zfs-vdev-sync-read-min-active 7 | options zfs zfs_vdev_sync_read_max_active=20 8 | options zfs zfs_vdev_sync_read_min_active=20 9 | options zfs zfs_vdev_sync_write_max_active=20 10 | options zfs zfs_vdev_sync_write_min_active=20 11 | 12 | options zfs l2arc_write_max={{ l2arc_write_max_mb | int * 1024 * 1024 }} 13 | options zfs l2arc_write_boost={{ l2arc_write_boost_mb | int * 1024 * 1024 }} 14 | options zfs l2arc_noprefetch=0 15 | options zfs l2arc_headroom=4 16 | options zfs l2arc_headroom_boost=200 17 | options zfs l2arc_norw=0 18 | -------------------------------------------------------------------------------- /apps/home-assistant/templates/external-secrets.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/external-secrets.io/externalsecret_v1beta1.json 2 | apiVersion: external-secrets.io/v1 3 | kind: ExternalSecret 4 | metadata: 5 | name: hass-secrets 6 | spec: 7 | secretStoreRef: 8 | kind: ClusterSecretStore 9 | name: vault 10 | target: 11 | name: hass-secrets 12 | template: 13 | data: 14 | notify.yaml: | 15 | - platform: telegram 16 | name: agil 17 | chat_id: {{`"{{ .chat_id }}"`}} 18 | - platform: telegram 19 | name: all 20 | chat_id: {{`"{{ .chat_id_all }}"`}} 21 | data: 22 | - secretKey: chat_id 23 | remoteRef: 24 | key: /home-assistant/telegram 25 | property: chat_id 26 | - secretKey: chat_id_all 27 | remoteRef: 28 | key: /home-assistant/telegram 29 | property: chat_id_all 30 | -------------------------------------------------------------------------------- /metal/playbooks/uninstall/k3s.yml: -------------------------------------------------------------------------------- 1 | - name: Remove k3s 2 | hosts: all 3 | tasks: 4 | - name: Disable k3s 5 | systemd: 6 | name: k3s 7 | enabled: false 8 | state: stopped 9 | 10 | - name: Clean all running processes 11 | shell: | 12 | set -o pipefail 13 | if [ -e /sys/fs/cgroup/systemd/system.slice/k3s.service/cgroup.procs ]; then 14 | kill -9 `cat /sys/fs/cgroup/systemd/system.slice/k3s.service/cgroup.procs` 15 | fi 16 | umount `cat /proc/self/mounts | awk '{print $2}' | grep '^/run/k3s'` 17 | umount `cat /proc/self/mounts | awk '{print $2}' | grep '^/var/lib/rancher/k3s'` 18 | 19 | - name: Remove all directories 20 | file: 21 | path: "{{ item }}" 22 | state: absent 23 | loop: 24 | - /etc/systemd/system/k3s.service 25 | - /usr/local/bin/k3s 26 | - /var/lib/rancher/k3s 27 | - /etc/rancher/k3s 28 | -------------------------------------------------------------------------------- /test/cluster.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: k3d.io/v1alpha4 2 | kind: Simple 3 | metadata: 4 | name: homelab-dev 5 | image: docker.io/rancher/k3s:v1.24.3-k3s1 6 | servers: 1 7 | agents: 3 8 | options: 9 | k3s: 10 | nodeLabels: 11 | - label: storage-node=true 12 | nodeFilters: 13 | - agent:* 14 | extraArgs: 15 | # add when longhorn is available 16 | # - arg: --disable=local-storage 17 | # nodeFilters: 18 | # - server:* 19 | - arg: --disable=traefik 20 | nodeFilters: 21 | - server:* 22 | - arg: --disable-cloud-controller 23 | nodeFilters: 24 | - server:* 25 | - arg: "--kubelet-arg=eviction-hard=imagefs.available<1%,nodefs.available<1%" 26 | nodeFilters: 27 | - agent:* 28 | - arg: "--kubelet-arg=eviction-minimum-reclaim=imagefs.available=1%,nodefs.available=1%" 29 | nodeFilters: 30 | - agent:* 31 | subnet: 172.28.0.0/16 32 | -------------------------------------------------------------------------------- /system/zfs-localpv/tests/write.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: test-claim 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | resources: 10 | requests: 11 | storage: 5Gi 12 | storageClassName: backup 13 | --- 14 | apiVersion: batch/v1 15 | kind: Job 16 | metadata: 17 | name: write 18 | spec: 19 | template: 20 | metadata: 21 | name: write 22 | spec: 23 | containers: 24 | - name: write 25 | image: debian 26 | command: 27 | - dd 28 | - if=/dev/zero 29 | - of=/mnt/pv/test.img 30 | - bs=1G 31 | - count=1 32 | - oflag=dsync 33 | volumeMounts: 34 | - mountPath: /mnt/pv 35 | name: test-volume 36 | volumes: 37 | - name: test-volume 38 | persistentVolumeClaim: 39 | claimName: test-claim 40 | restartPolicy: Never 41 | -------------------------------------------------------------------------------- /apps/home-assistant/templates/appdaemon/external-secrets.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/external-secrets.io/externalsecret_v1beta1.json 2 | apiVersion: external-secrets.io/v1 3 | kind: ExternalSecret 4 | metadata: 5 | name: hass-appdaemon 6 | spec: 7 | secretStoreRef: 8 | kind: ClusterSecretStore 9 | name: vault 10 | target: 11 | name: hass-appdaemon 12 | data: 13 | - secretKey: TOKEN 14 | remoteRef: 15 | key: /home-assistant/appdaemon/climate/hass 16 | property: token 17 | - secretKey: LATITUDE 18 | remoteRef: 19 | key: /home-assistant/appdaemon/climate/hass 20 | property: latitude 21 | - secretKey: LONGITUDE 22 | remoteRef: 23 | key: /home-assistant/appdaemon/climate/hass 24 | property: longitude 25 | - secretKey: ELEVATION 26 | remoteRef: 27 | key: /home-assistant/appdaemon/climate/hass 28 | property: elevation 29 | -------------------------------------------------------------------------------- /system/pod-cleaner/values.yaml: -------------------------------------------------------------------------------- 1 | app-template: 2 | controllers: 3 | pod-cleaner: 4 | containers: 5 | pod-cleaner: 6 | image: 7 | # code from: https://github.com/Daniel-Boluda/homelab/tree/6396459b75dc2af9914ef58a5e01da06c0820179/system/nvidia-watcher/src 8 | repository: bolferdocker/nvidia-watcher 9 | tag: 0.0.6 10 | env: 11 | LABEL_SELECTOR: pod-cleaner.dbcloud.org/watch=true 12 | POD_STATUS_REASON: UnexpectedAdmissionError 13 | POD_STATUS_PHASE: Failed 14 | ENABLE_ALLOCATABLE: false 15 | ENABLE_SMOKE: false 16 | 17 | resources: 18 | requests: 19 | cpu: 10m 20 | memory: 54Mi 21 | limits: 22 | memory: 128Mi 23 | 24 | serviceAccount: 25 | create: false 26 | name: pod-cleaner 27 | 28 | defaultPodOptions: 29 | nodeSelector: 30 | kubernetes.io/arch: amd64 31 | -------------------------------------------------------------------------------- /metal/roles/setup/tasks/zfs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: zfs | Ensure variables are defined 3 | assert: 4 | that: 5 | - zfs_arc_min_gb is defined 6 | - zfs_arc_max_gb is defined 7 | - l2arc_write_max_mb is defined 8 | - l2arc_write_boost_mb is defined 9 | 10 | - name: zfs | Enable smartd service 11 | systemd: 12 | name: smartd 13 | daemon_reload: true 14 | enabled: true 15 | 16 | - name: zfs | Install ZFS tools 17 | apt: 18 | name: zfsutils-linux 19 | state: present 20 | force_apt_get: true 21 | update_cache: true 22 | 23 | # To check creation command: `zpool history ` 24 | - name: zfs | ZFS import datasets 25 | command: zpool import datasets 26 | args: 27 | creates: /datasets 28 | notify: mount zfs datasets 29 | 30 | - name: zfs | Configure ZFS module 31 | template: 32 | src: zfs.conf.j2 33 | dest: /etc/modprobe.d/zfs.conf 34 | mode: "0644" 35 | notify: reload zfs module config 36 | tags: 37 | - zfs-config 38 | -------------------------------------------------------------------------------- /metal/roles/prepare/tasks/user.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "user | Add group {{ prepare_username }}" 3 | group: 4 | name: "{{ prepare_username }}" 5 | gid: 1000 6 | 7 | - name: "user | Add group sudo" 8 | group: 9 | name: "sudo" 10 | gid: 27 11 | 12 | - name: "user | Add user {{ prepare_username }}" 13 | user: 14 | name: "{{ prepare_username }}" 15 | shell: /bin/bash 16 | uid: 1000 17 | groups: "sudo,{{ prepare_username }}" 18 | append: true 19 | 20 | - name: user | Configure vim as default editor 21 | copy: 22 | content: "export EDITOR=vi" 23 | dest: /etc/profile.d/editor.sh 24 | mode: "0644" 25 | 26 | - name: user | Allow sudo without password 27 | lineinfile: 28 | path: /etc/sudoers 29 | regexp: "^%sudo" 30 | line: "%sudo ALL=(ALL:ALL) NOPASSWD:ALL" 31 | 32 | - name: user | Set authorized keys 33 | authorized_key: 34 | user: "{{ prepare_username }}" 35 | state: present 36 | key: "{{ item }}" 37 | loop: "{{ prepare_public_keys }}" 38 | -------------------------------------------------------------------------------- /apps/nextcloud/templates/postgresql.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "acid.zalan.do/v1" 2 | kind: postgresql 3 | metadata: 4 | name: nextcloud-postgres 5 | labels: 6 | backup/retain: weekly 7 | spec: 8 | teamId: nextcloud 9 | 10 | numberOfInstances: 1 11 | 12 | resources: 13 | requests: 14 | cpu: 10m 15 | memory: 128Mi 16 | limits: 17 | memory: 1Gi 18 | 19 | volume: 20 | size: 6Gi 21 | 22 | users: 23 | nextcloud: 24 | - superuser 25 | - createdb 26 | databases: 27 | nextcloud: nextcloud 28 | 29 | postgresql: 30 | version: "17" 31 | parameters: 32 | archive_mode: "off" 33 | # minimal value. If not default to 100 34 | max_connections: "25" 35 | shared_buffers: 32MB 36 | log_checkpoints: "off" 37 | log_connections: "off" 38 | log_disconnections: "off" 39 | log_lock_waits: "off" 40 | log_min_duration_statement: "-1" 41 | log_statement: none 42 | # ZFS settings: 43 | full_page_writes: "off" 44 | -------------------------------------------------------------------------------- /metal/roles/setup/tasks/backup-user.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: backup-user | Add backup group 3 | group: 4 | name: backup 5 | gid: 987 6 | 7 | - name: backup-user | Add backup user 8 | user: 9 | name: backup 10 | shell: /bin/bash 11 | uid: 990 12 | home: /datasets/backups 13 | create_home: false 14 | groups: backup 15 | append: true 16 | 17 | - name: backup-user | Create .ssh directory if it doesn't exist 18 | file: 19 | path: /datasets/backups/.ssh 20 | state: directory 21 | owner: backup 22 | group: backup 23 | mode: '0700' 24 | 25 | - name: backup-user | Generate RSA SSH key for backup user 26 | openssh_keypair: 27 | path: /datasets/backups/.ssh/id_rsa 28 | type: rsa 29 | size: 4096 30 | owner: backup 31 | group: backup 32 | mode: '0600' 33 | state: present 34 | comment: "backup@{{ ansible_hostname }}" 35 | # bug related with ZFS not supporting chattr: https://github.com/ansible/ansible/issues/77217 36 | changed_when: false 37 | -------------------------------------------------------------------------------- /platform/git/templates/postgresql.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "acid.zalan.do/v1" 2 | kind: postgresql 3 | metadata: 4 | name: gitea-postgres 5 | labels: 6 | backup/retain: quaterly 7 | spec: 8 | teamId: gitea 9 | 10 | numberOfInstances: 1 11 | 12 | resources: 13 | requests: 14 | cpu: 10m 15 | memory: 128Mi 16 | limits: 17 | cpu: "1" 18 | memory: 256Mi 19 | 20 | volume: 21 | size: 2Gi 22 | 23 | users: 24 | gitea: 25 | - superuser 26 | - createdb 27 | databases: 28 | gitea: gitea 29 | 30 | postgresql: 31 | version: "17" 32 | parameters: 33 | archive_mode: "off" 34 | # minimal value. If not default to 100 35 | max_connections: "25" 36 | shared_buffers: 32MB 37 | log_checkpoints: "off" 38 | log_connections: "off" 39 | log_disconnections: "off" 40 | log_lock_waits: "off" 41 | log_min_duration_statement: "-1" 42 | log_statement: none 43 | # ZFS settings: 44 | full_page_writes: "off" 45 | -------------------------------------------------------------------------------- /docs/user-guide/clone-data.md: -------------------------------------------------------------------------------- 1 | # Clone data between volumes 2 | 3 | ## rsync 4 | 5 | No dependencies: 6 | 7 | ```yaml 8 | apiVersion: batch/v1 9 | kind: Job 10 | metadata: 11 | name: rsync 12 | namespace: plex 13 | spec: 14 | template: 15 | metadata: 16 | name: rsync 17 | spec: 18 | containers: 19 | - name: rsync 20 | image: instrumentisto/rsync-ssh 21 | command: 22 | - rsync 23 | - -av 24 | - --numeric-ids 25 | - /src/ 26 | - /dest/ 27 | volumeMounts: 28 | - name: src 29 | mountPath: "/src/" 30 | - name: dest 31 | mountPath: "/dest/" 32 | volumes: 33 | - name: src 34 | persistentVolumeClaim: 35 | claimName: config-plex-0 36 | - name: dest 37 | persistentVolumeClaim: 38 | claimName: config-plex-0-zfs 39 | restartPolicy: Never 40 | nodeSelector: 41 | name: grigri 42 | ``` 43 | -------------------------------------------------------------------------------- /metal/roles/prepare/files/journald.conf: -------------------------------------------------------------------------------- 1 | # Managed by Ansible 2 | # This file is part of systemd. 3 | # 4 | # systemd is free software; you can redistribute it and/or modify it under the 5 | # terms of the GNU Lesser General Public License as published by the Free 6 | # Software Foundation; either version 2.1 of the License, or (at your option) 7 | # any later version. 8 | # 9 | # Entries in this file show the compile time defaults. Local configuration 10 | # should be created by either modifying this file, or by creating "drop-ins" in 11 | # the journald.conf.d/ subdirectory. The latter is generally recommended. 12 | # Defaults can be restored by simply deleting this file and all drop-ins. 13 | # 14 | # Use 'systemd-analyze cat-config systemd/journald.conf' to display the full config. 15 | # 16 | # See journald.conf(5) for details. 17 | 18 | [Journal] 19 | Storage=volatile 20 | Compress=yes 21 | #Seal=yes 22 | #SplitMode=uid 23 | #SyncIntervalSec=5m 24 | RateLimitIntervalSec=30s 25 | RateLimitBurst=10000 26 | SystemMaxUse=10M 27 | -------------------------------------------------------------------------------- /system/zfs-localpv/tests/clone.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: clone-of-test-claim 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: backup 10 | resources: 11 | requests: 12 | storage: 5Gi 13 | dataSource: 14 | name: test-claim 15 | kind: PersistentVolumeClaim 16 | --- 17 | apiVersion: batch/v1 18 | kind: Job 19 | metadata: 20 | name: clone-read 21 | spec: 22 | template: 23 | metadata: 24 | name: clone-read 25 | spec: 26 | containers: 27 | - name: read 28 | image: ubuntu:xenial 29 | command: 30 | - dd 31 | - if=/mnt/pv/test.img 32 | - of=/dev/null 33 | - bs=8k 34 | volumeMounts: 35 | - mountPath: "/mnt/pv" 36 | name: test-volume 37 | volumes: 38 | - name: test-volume 39 | persistentVolumeClaim: 40 | claimName: clone-of-test-claim 41 | restartPolicy: Never 42 | -------------------------------------------------------------------------------- /apps/transcoder/templates/postgresql.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "acid.zalan.do/v1" 2 | kind: postgresql 3 | metadata: 4 | name: transcoder-postgres 5 | labels: 6 | backup/retain: quaterly 7 | spec: 8 | teamId: transcoder 9 | numberOfInstances: 1 10 | resources: 11 | requests: 12 | cpu: 10m 13 | memory: 128Mi 14 | limits: 15 | cpu: 500m 16 | memory: 256Mi 17 | 18 | volume: 19 | size: 2Gi 20 | 21 | users: 22 | transcoder: 23 | - superuser 24 | - createdb 25 | databases: 26 | transcoder: transcoder 27 | 28 | postgresql: 29 | version: "17" 30 | parameters: 31 | archive_mode: 'off' 32 | # minimal value. If not default to 100 33 | max_connections: "25" 34 | shared_buffers: 32MB 35 | log_checkpoints: 'off' 36 | log_connections: 'off' 37 | log_disconnections: 'off' 38 | log_lock_waits: 'off' 39 | log_min_duration_statement: "-1" 40 | log_statement: none 41 | # ZFS settings: 42 | full_page_writes: "off" 43 | -------------------------------------------------------------------------------- /apps/freshrss/templates/postgresql.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "acid.zalan.do/v1" 2 | kind: postgresql 3 | metadata: 4 | name: freshrss-postgres 5 | labels: 6 | backup/retain: weekly 7 | spec: 8 | teamId: freshrss 9 | 10 | numberOfInstances: 1 11 | 12 | resources: 13 | requests: 14 | cpu: 10m 15 | memory: 128Mi 16 | limits: 17 | cpu: 500m 18 | memory: 256Mi 19 | 20 | volume: 21 | size: 5Gi 22 | 23 | users: 24 | freshrss: 25 | - superuser 26 | - createdb 27 | databases: 28 | freshrss: freshrss 29 | 30 | postgresql: 31 | version: "17" 32 | parameters: 33 | archive_mode: "off" 34 | # minimal value. If not default to 100 35 | max_connections: "25" 36 | shared_buffers: 32MB 37 | log_checkpoints: "off" 38 | log_connections: "off" 39 | log_disconnections: "off" 40 | log_lock_waits: "off" 41 | log_min_duration_statement: "-1" 42 | log_statement: none 43 | # ZFS settings: 44 | full_page_writes: "off" 45 | -------------------------------------------------------------------------------- /apps/wallabag/templates/postgresql.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: "acid.zalan.do/v1" 2 | kind: postgresql 3 | metadata: 4 | name: wallabag-postgres 5 | labels: 6 | backup/retain: weekly 7 | spec: 8 | teamId: wallabag 9 | 10 | numberOfInstances: 1 11 | 12 | resources: 13 | requests: 14 | cpu: 10m 15 | memory: 128Mi 16 | limits: 17 | cpu: 500m 18 | memory: 256Mi 19 | 20 | volume: 21 | size: 5Gi 22 | 23 | users: 24 | wallabag: 25 | - superuser 26 | - createdb 27 | databases: 28 | wallabag: wallabag 29 | 30 | postgresql: 31 | version: "17" 32 | parameters: 33 | archive_mode: "off" 34 | # minimal value. If not default to 100 35 | max_connections: "25" 36 | shared_buffers: 32MB 37 | log_checkpoints: "off" 38 | log_connections: "off" 39 | log_disconnections: "off" 40 | log_lock_waits: "off" 41 | log_min_duration_statement: "-1" 42 | log_statement: none 43 | # ZFS settings: 44 | full_page_writes: "off" 45 | -------------------------------------------------------------------------------- /apps/tanque/templates/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: tanque 5 | annotations: 6 | cert-manager.io/cluster-issuer: letsencrypt-prod-dns 7 | external-dns.alpha.kubernetes.io/enabled: "true" 8 | external-dns.alpha.kubernetes.io/target: grigri.cloud 9 | nginx.ingress.kubernetes.io/auth-signin: https://auth.grigri.cloud/oauth2/start 10 | nginx.ingress.kubernetes.io/auth-url: https://auth.grigri.cloud/oauth2/auth?allowed_groups=tanque-users@idm.grigri.cloud 11 | nginx.ingress.kubernetes.io/proxy-buffer-size: "16k" 12 | spec: 13 | ingressClassName: nginx-external 14 | rules: 15 | - host: tanque.grigri.cloud 16 | http: 17 | paths: 18 | - backend: 19 | service: 20 | name: tanque 21 | port: 22 | number: 80 23 | path: / 24 | pathType: ImplementationSpecific 25 | tls: 26 | - hosts: 27 | - tanque.grigri.cloud 28 | secretName: tanque-external-tls 29 | -------------------------------------------------------------------------------- /system/kured/values.yaml: -------------------------------------------------------------------------------- 1 | kured: 2 | extraEnvVars: 3 | - name: KURED_NOTIFY_URL 4 | valueFrom: 5 | secretKeyRef: 6 | name: kured-notifications-secret 7 | key: notifyUrl 8 | 9 | configuration: 10 | # reboot check period (default 1h0m0s) 11 | period: "15m" 12 | startTime: "5:00" 13 | endTime: "8:00" 14 | timeZone: Europe/Madrid 15 | rebootCommand: "/usr/bin/systemctl reboot" 16 | drainPodSelector: "kured.dev/drain=true" 17 | # time in seconds given to each pod to terminate gracefully, if negative, the default value specified in the pod will be used (default: -1) 18 | drainGracePeriod: "180" 19 | # when time is greater than zero, skip waiting for the pods whose deletion timestamp is older than N seconds while draining a node (default: 0) 20 | skipWaitForDeleteTimeout: "200" 21 | # timeout after which the drain is aborted (default: 0, infinite time) 22 | drainTimeout: "361s" 23 | # avoid tuesday and wednesday because long running backups can be running 24 | rebootDays: [mo, th, fr, sa, su] 25 | -------------------------------------------------------------------------------- /system/oauth2-proxy/templates/external-secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | kind: ExternalSecret 3 | metadata: 4 | name: oauth2-proxy-vault-secret 5 | spec: 6 | secretStoreRef: 7 | kind: ClusterSecretStore 8 | name: vault 9 | target: 10 | name: oauth2-proxy-vault-secret 11 | data: 12 | - secretKey: cookie-secret 13 | remoteRef: 14 | key: /oauth2-proxy/cookie 15 | property: secret 16 | --- 17 | apiVersion: external-secrets.io/v1 18 | kind: ExternalSecret 19 | metadata: 20 | name: oauth2-proxy 21 | spec: 22 | secretStoreRef: 23 | kind: SecretStore 24 | name: k8s-store 25 | target: 26 | name: oauth2-proxy 27 | template: 28 | data: 29 | cookie-secret: {{ `{{ index . "cookie-secret" | toString }}` | quote }} 30 | client-id: {{ `{{ .CLIENT_ID | toString }}` | quote }} 31 | client-secret: {{ `{{ .CLIENT_SECRET | toString }}` | quote }} 32 | dataFrom: 33 | - extract: 34 | key: oauth2-proxy-vault-secret 35 | - extract: 36 | key: k8s-oauth2-proxy-kanidm-oauth2-credentials 37 | -------------------------------------------------------------------------------- /apps/freshrss/templates/external-secret.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: external-secrets.io/v1 3 | kind: ExternalSecret 4 | metadata: 5 | name: freshrss-vault-sso 6 | spec: 7 | secretStoreRef: 8 | kind: ClusterSecretStore 9 | name: vault 10 | target: 11 | name: freshrss-vault-sso 12 | data: 13 | - secretKey: OIDC_CLIENT_CRYPTO_KEY 14 | remoteRef: 15 | key: /freshrss/sso 16 | property: crypto_passphrase 17 | --- 18 | apiVersion: external-secrets.io/v1 19 | kind: ExternalSecret 20 | metadata: 21 | name: freshrss-sso 22 | spec: 23 | secretStoreRef: 24 | kind: SecretStore 25 | name: k8s-store 26 | target: 27 | name: freshrss-sso 28 | template: 29 | data: 30 | OIDC_CLIENT_CRYPTO_KEY: {{ `{{ .OIDC_CLIENT_CRYPTO_KEY | toString }}` | quote }} 31 | OIDC_CLIENT_ID: {{ `{{ .CLIENT_ID | toString }}` | quote }} 32 | OIDC_CLIENT_SECRET: {{ `{{ .CLIENT_SECRET | toString }}` | quote }} 33 | dataFrom: 34 | - extract: 35 | key: freshrss-vault-sso 36 | - extract: 37 | key: freshrss-kanidm-oauth2-credentials 38 | --------------------------------------------------------------------------------