├── .gitignore ├── .pre-commit-config.yaml ├── .sops.yaml ├── README.md ├── cluster ├── apps │ ├── authentication │ │ ├── authelia │ │ │ ├── authelia-configmap.yaml │ │ │ ├── authelia-helmrelease.yaml │ │ │ ├── authelia-ingressroute.yaml │ │ │ ├── authelia-middleware.yaml │ │ │ ├── authelia-values.yaml │ │ │ ├── kustomization.yaml │ │ │ └── values │ │ │ │ ├── authelia-values_0.6.3.yaml │ │ │ │ ├── authelia-values_0.7.6.yaml │ │ │ │ ├── authelia-values_0.7.7.yaml │ │ │ │ ├── authelia-values_0.8.1.yaml │ │ │ │ ├── authelia-values_0.8.2.yaml │ │ │ │ └── authelia-values_0.8.38.yaml │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ └── postgres │ │ │ ├── kustomization.yaml │ │ │ ├── postgres-helmrelease.yaml │ │ │ ├── postgres-pvc.yaml │ │ │ ├── postgres-values.yaml │ │ │ └── values │ │ │ └── postgres-values_0.3.10.yaml │ ├── blogs │ │ ├── kustomization.yaml │ │ └── simone │ │ │ ├── ingressroutes.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── mariadb │ │ │ ├── kustomization.yaml │ │ │ ├── mariadb-helmrelease.yaml │ │ │ ├── mariadb-pvc.yaml │ │ │ └── mariadb-values.yaml │ │ │ ├── namespace.yaml │ │ │ └── wordpress │ │ │ ├── kustomization.yaml │ │ │ ├── wordpress-helmrelease.yaml │ │ │ ├── wordpress-pvc.yaml │ │ │ └── wordpress-values.yaml │ ├── development │ │ ├── adminer │ │ │ ├── adminer-deployment.yaml │ │ │ ├── adminer-ingressroute.yaml │ │ │ ├── adminer-service.yaml │ │ │ └── kustomization.yaml │ │ ├── docker-registry │ │ │ ├── docker-registry-helmrelease.yaml │ │ │ ├── docker-registry-ingressroute.yaml │ │ │ ├── docker-registry-middleware.yaml │ │ │ ├── docker-registry-pvc.yaml │ │ │ ├── docker-registry-ui │ │ │ │ ├── docker-registry-ui-deployment.yaml │ │ │ │ ├── docker-registry-ui-middleware.yaml │ │ │ │ ├── docker-registry-ui-secret.sops.yaml │ │ │ │ ├── docker-regsitry-ui-ingressroute.yaml │ │ │ │ └── kustomization.yaml │ │ │ ├── docker-registry-values.yaml │ │ │ ├── kustomization.yaml │ │ │ └── namespace.yaml │ │ ├── gitea │ │ │ ├── gitea-config │ │ │ │ └── app.example.ini │ │ │ ├── gitea-helmrelease.yaml │ │ │ ├── gitea-ingressroute.yaml │ │ │ ├── gitea-pvc.yaml │ │ │ ├── gitea-values.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── namespace.yaml │ │ │ ├── postgres │ │ │ │ ├── kustomization.yaml │ │ │ │ ├── postgres-helmrelease.yaml │ │ │ │ ├── postgres-pvc.yaml │ │ │ │ └── postgres-values.yaml │ │ │ ├── redis │ │ │ │ ├── kustomization.yaml │ │ │ │ ├── redis-helmrelease.yaml │ │ │ │ ├── redis-pvc.yaml │ │ │ │ └── redis-values.yaml │ │ │ └── values │ │ │ │ ├── gitea-values_0.5.13.yaml │ │ │ │ └── gitea-values_0.5.2.yaml │ │ ├── kustomization.yaml │ │ └── namespace.yaml │ ├── dns │ │ ├── do-dns-updater │ │ │ ├── digitalocean-dns-updater.sh │ │ │ ├── do-dns-updater-cron-job.yaml │ │ │ ├── do-dns-updater-pvc.yaml │ │ │ └── kustomization.yaml │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ ├── production │ │ │ ├── kustomization.yaml │ │ │ ├── pihole │ │ │ │ ├── kustomization.yaml │ │ │ │ ├── pihole-config │ │ │ │ │ ├── adlists.list │ │ │ │ │ ├── custom.list │ │ │ │ │ ├── kustomization.yaml │ │ │ │ │ ├── pihole-env-vars.env │ │ │ │ │ ├── regex.list │ │ │ │ │ └── whitelist.txt │ │ │ │ ├── pihole-deployment.yaml │ │ │ │ ├── pihole-ingressroute.yaml │ │ │ │ ├── pihole-tcp-service.yaml │ │ │ │ └── pihole-udp-service.yaml │ │ │ └── unbound │ │ │ │ ├── kustomization.yaml │ │ │ │ ├── unbound-config │ │ │ │ ├── a-records.conf │ │ │ │ ├── kustomization.yaml │ │ │ │ ├── root.hints │ │ │ │ ├── root.key │ │ │ │ └── unbound.conf │ │ │ │ ├── unbound-deployment.yaml │ │ │ │ ├── unbound-tcp-service.yaml │ │ │ │ └── unbound-udp-service.yaml │ │ └── staging │ │ │ ├── blocky-staging │ │ │ ├── blocky-config │ │ │ │ ├── config.yml │ │ │ │ └── kustomization.yaml │ │ │ ├── blocky-configmap.yaml │ │ │ ├── blocky-deployment.yaml │ │ │ ├── blocky-ingressroute.yaml │ │ │ ├── blocky-tcp-service.yaml │ │ │ ├── blocky-udp-service.yaml │ │ │ └── kustomization.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── namespace.yaml │ │ │ ├── pihole-staging │ │ │ ├── kustomization.yaml │ │ │ ├── pihole-config │ │ │ │ ├── adlists.list │ │ │ │ ├── custom.list │ │ │ │ ├── kustomization.yaml │ │ │ │ ├── pihole-env-vars.env │ │ │ │ ├── regex.list │ │ │ │ └── whitelist.txt │ │ │ ├── pihole-configmap.yaml │ │ │ ├── pihole-deployment.yaml │ │ │ ├── pihole-tcp-service.yaml │ │ │ └── pihole-udp-service.yaml │ │ │ └── unbound-staging │ │ │ ├── kustomization.yaml │ │ │ ├── unbound-config │ │ │ ├── a-records.conf │ │ │ ├── kustomization.yaml │ │ │ ├── root.hints │ │ │ ├── root.key │ │ │ └── unbound.conf │ │ │ ├── unbound-deployment.yaml │ │ │ ├── unbound-tcp-service.yaml │ │ │ └── unbound-udp-service.yaml │ ├── documentation │ │ ├── documentation-deployment.yaml │ │ ├── documentation-ingressroute.yaml │ │ ├── documentation-pvc.yaml │ │ ├── kustomization.yaml │ │ └── namespace.yaml │ ├── kube-system │ │ ├── kubernetes-dashboard │ │ │ ├── kubernetes-dashboard-adminuser.yaml │ │ │ ├── kubernetes-dashboard-ingressroute.yaml │ │ │ └── kustomization.yaml │ │ ├── kured │ │ │ ├── kured-helmrelease.yaml │ │ │ ├── kured-values.yaml │ │ │ ├── kustomization.yaml │ │ │ └── values │ │ │ │ ├── kured-values_2.11.0.yaml │ │ │ │ ├── kured-values_2.11.1.yaml │ │ │ │ └── kured-values_2.11.2.yaml │ │ ├── kustomization.yaml │ │ ├── reloader │ │ │ ├── kustomization.yaml │ │ │ ├── reloader-helmrelease.yaml │ │ │ ├── reloader-values.yaml │ │ │ └── values │ │ │ │ ├── reloader-values_0.0.102.yaml │ │ │ │ ├── reloader-values_0.0.103.yaml │ │ │ │ ├── reloader-values_0.0.104.yaml │ │ │ │ ├── reloader-values_0.0.105.yaml │ │ │ │ ├── reloader-values_0.0.117.yaml │ │ │ │ └── reloader-values_0.0.99.yaml │ │ └── system-upgrade-controller │ │ │ ├── k3s-upgrade-plan.yaml │ │ │ └── kustomization.yaml │ ├── kustomization.yaml │ ├── monitoring │ │ ├── grafana │ │ │ ├── grafana-dashboards │ │ │ │ ├── k8s-system-api-server.json │ │ │ │ ├── k8s-system-coredns.json │ │ │ │ ├── k8s-views-global.json │ │ │ │ ├── k8s-views-namespaces.json │ │ │ │ ├── k8s-views-nodes.json │ │ │ │ ├── k8s-views-pods.json │ │ │ │ ├── kustomization.yaml │ │ │ │ └── old │ │ │ │ │ ├── Raspberry Pi K3S Cluster - Cronjobs-1629135293439.json │ │ │ │ │ ├── Raspberry Pi K3S Cluster - Pi-Hole Dashboard-1629135371441.json │ │ │ │ │ ├── Raspberry Pi K3S Cluster - Speedtest Exporter-1629135450920.json │ │ │ │ │ ├── Raspberry Pi K3S Cluster - Traefik Dashboard-1629135490869.json │ │ │ │ │ ├── fritzbox-router-status.json │ │ │ │ │ ├── grafana-node-exporter-full-dashboard.json │ │ │ │ │ ├── grafana-simple-cluster-dashboard.json │ │ │ │ │ └── speedtest.json │ │ │ ├── grafana-datasources │ │ │ │ ├── datasources.yaml │ │ │ │ └── kustomization.yaml │ │ │ ├── grafana-helmrelease.yaml │ │ │ ├── grafana-ingressroute.yaml │ │ │ ├── grafana-pvc.yaml │ │ │ ├── grafana-values.yaml │ │ │ ├── kustomization.yaml │ │ │ └── values │ │ │ │ ├── grafana-values_6.17.9.yaml │ │ │ │ ├── grafana-values_6.19.0.yaml │ │ │ │ ├── grafana-values_6.20.3.yaml │ │ │ │ ├── grafana-values_6.20.5.yaml │ │ │ │ ├── grafana-values_6.21.5.yaml │ │ │ │ ├── grafana-values_6.21.8.yaml │ │ │ │ ├── grafana-values_6.32.1.yaml │ │ │ │ └── grafana-values_6.32.2.yaml │ │ ├── influxdata │ │ │ ├── chronograf-helmrelease.yaml │ │ │ ├── chronograf-ingressroute.yaml │ │ │ ├── chronograf-values.yaml │ │ │ ├── enc_influxdb-auth-secret.yaml │ │ │ ├── influxdb-helmrelease.yaml │ │ │ ├── influxdb-ingressroute.yaml │ │ │ ├── influxdb-pvc.yaml │ │ │ ├── influxdb-values.yaml │ │ │ └── kustomization.yaml │ │ ├── kube-prometheus-stack │ │ │ ├── kube-prometheus-stack-helmrelease.yaml │ │ │ ├── kube-prometheus-stack-ingressroute.yaml │ │ │ ├── kube-prometheus-stack-values.yaml │ │ │ ├── kube-prometheus-stack-values_36.2.1.yaml │ │ │ ├── kube-prometheus-stack-values_40.0.2.yaml │ │ │ └── kustomization.yaml │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ ├── tools │ │ │ ├── fritzinfluxdb │ │ │ │ ├── fritzinfluxdb-deployment.yaml │ │ │ │ ├── fritzinfluxdb.ini │ │ │ │ └── kustomization.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── prometheus-exporter │ │ │ │ ├── fastcom-exporter │ │ │ │ │ ├── fastcom-exporter-deployment.yaml │ │ │ │ │ ├── fastcom-exporter-ingressroute.yaml │ │ │ │ │ └── kustomization.yaml │ │ │ │ ├── fritzbox-exporter │ │ │ │ │ ├── fritzbox-exporter-deployment.yaml │ │ │ │ │ ├── fritzbox-exporter-ingressroute.yaml │ │ │ │ │ └── kustomization.yaml │ │ │ │ ├── kustomization.yaml │ │ │ │ ├── pihole-exporter │ │ │ │ │ ├── enc_pihole-exporter-secrets.yaml │ │ │ │ │ ├── kustomization.yaml │ │ │ │ │ └── pihole-exporter-deployment.yaml │ │ │ │ └── speedtest-exporter │ │ │ │ │ ├── kustomization.yaml │ │ │ │ │ ├── speedtest-exporter-deployment.yaml │ │ │ │ │ └── speedtest-exporter-ingressroute.yaml │ │ │ └── speedtest-tracker │ │ │ │ ├── kustomization.yaml │ │ │ │ ├── speedtest-tracker-configmap.yaml │ │ │ │ ├── speedtest-tracker-deployment.yaml │ │ │ │ ├── speedtest-tracker-ingressroute.yaml │ │ │ │ └── speedtest-tracker-pvc.yaml │ │ └── uptime-kuma │ │ │ ├── kustomization.yaml │ │ │ ├── uptime-kuma-deployment.yaml │ │ │ ├── uptime-kuma-ingressroute.yaml │ │ │ ├── uptime-kuma-pvc.yaml │ │ │ └── uptime-kuma-service.yaml │ ├── nextcloud │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ ├── nextcloud-helmrelease.yaml │ │ ├── nextcloud-ingressroute.yaml │ │ ├── nextcloud-middleware.yaml │ │ ├── nextcloud-pvcs.yaml │ │ ├── nextcloud-values.yaml │ │ ├── postgres │ │ │ ├── kustomization.yaml │ │ │ ├── postgres-helmrelease.yaml │ │ │ ├── postgres-pvc.yaml │ │ │ └── postgres-values.yaml │ │ ├── redis │ │ │ ├── kustomization.yaml │ │ │ ├── redis-helmrelease.yaml │ │ │ ├── redis-pvc.yaml │ │ │ └── redis-values.yaml │ │ └── values │ │ │ ├── nextcloud-values_0.10.0.yaml │ │ │ ├── nextcloud-values_0.10.1.yaml │ │ │ ├── nextcloud-values_0.7.0.yaml │ │ │ ├── nextcloud-values_0.7.1.yaml │ │ │ ├── nextcloud-values_0.8.0.yaml │ │ │ ├── nextcloud-values_0.8.1.yaml │ │ │ ├── nextcloud-values_0.8.4.yaml │ │ │ ├── nextcloud-values_0.8.4_new.yaml │ │ │ └── nextcloud-values_0.9.0.yaml │ ├── utils │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ ├── network-multitool │ │ │ ├── kustomization.yaml │ │ │ └── network-multitool.yaml │ │ └── whoami │ │ │ ├── kustomization.yaml │ │ │ ├── whoami-deployment.yaml │ │ │ └── whoami-ingressroute.yaml │ └── vaultwarden │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ ├── postgres │ │ ├── kustomization.yaml │ │ ├── postgres-helmrelease.yaml │ │ ├── postgres-pvc.yaml │ │ └── postgres-values.yaml │ │ ├── vaultwarden-config │ │ ├── kustomization.yaml │ │ ├── vaultwarden-configmap_old.yaml │ │ └── vaultwarden.env │ │ ├── vaultwarden-deployment.yaml │ │ ├── vaultwarden-ingressroute.yaml │ │ ├── vaultwarden-middleware.yaml │ │ ├── vaultwarden-pvc.yaml │ │ ├── vaultwarden-rbac.yaml │ │ └── vaultwarden-service.yaml ├── base │ ├── apps.yaml │ ├── cluster-secrets.sops.yaml │ ├── cluster-settings.yaml │ ├── core.yaml │ ├── crds.yaml │ └── flux-system │ │ ├── charts │ │ ├── helm │ │ │ ├── authelia-charts.yaml │ │ │ ├── grafana-charts.yaml │ │ │ ├── groundhog2k-charts.yaml │ │ │ ├── influxdata-charts.yaml │ │ │ ├── jetstack-charts.yaml │ │ │ ├── k8s-at-home-charts.yaml │ │ │ ├── kured-charts.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── longhorn-charts.yaml │ │ │ ├── metallb-charts.yaml │ │ │ ├── nfs-provisioner-charts.yaml │ │ │ ├── prometheus-community-charts.yaml │ │ │ ├── stakater-charts.yaml │ │ │ ├── traefik-charts.yaml │ │ │ └── twuni-charts.yaml │ │ └── kustomization.yaml │ │ ├── gotk-components.yaml │ │ ├── gotk-patches.yaml │ │ ├── gotk-sync.yaml │ │ ├── kustomization.yaml │ │ └── notifications │ │ ├── enc_slack-webhook-url-secret.yaml │ │ ├── flux-alert.yaml │ │ ├── flux-slack-notification-provider.yaml │ │ └── kustomization.yaml ├── core │ ├── cert-manager │ │ ├── controller │ │ │ ├── cert-manager-helmrelease.yaml │ │ │ ├── cert-manager-values.yaml │ │ │ ├── digitalocean-access-token.sops.yaml │ │ │ ├── kustomization.yaml │ │ │ └── values │ │ │ │ ├── cert-manager-values_1.8.1.yaml │ │ │ │ └── cert-manager-values_1.8.2.yaml │ │ ├── issuer │ │ │ ├── kustomization.yaml │ │ │ ├── letsencrypt-production-clusterissuer.yaml │ │ │ └── letsencrypt-staging-clusterissuer.yaml │ │ └── kustomization.yaml │ ├── ingress │ │ ├── kustomization.yaml │ │ ├── traefik-additions │ │ │ ├── basicauth-middleware.yaml │ │ │ ├── basicauth-secret.sops.yaml │ │ │ ├── error-pages │ │ │ │ ├── error-pages-deployment.yaml │ │ │ │ ├── error-pages-ingressroute.yaml │ │ │ │ ├── error-pages-middleware.yaml │ │ │ │ ├── error-pages-service.yaml │ │ │ │ └── kustomization.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── traefik-ingressroute.yaml │ │ │ ├── traefik-middleware.yaml │ │ │ └── traefik-tlsoption.yaml │ │ └── traefik │ │ │ ├── kustomization.yaml │ │ │ ├── traefik-helmrelease.yaml │ │ │ ├── traefik-values.yaml │ │ │ ├── values │ │ │ ├── traefik-values_10.14.1.yaml │ │ │ ├── traefik-values_10.21.1.yaml │ │ │ └── traefik-values_10.24.0.yaml │ │ │ └── wildcard-certificate │ │ │ ├── kustomization.yaml │ │ │ ├── traefik-letsencrypt-certificate.yaml │ │ │ └── wildcard-certificate.yaml │ ├── kustomization.yaml │ ├── namespaces │ │ ├── cert-manager.yaml │ │ ├── kustomization.yaml │ │ ├── longhorn-system.yaml │ │ ├── metallb-system.yaml │ │ ├── rook-ceph.yaml │ │ ├── storage.yaml │ │ └── traefik.yaml │ ├── network │ │ ├── kustomization.yaml │ │ └── metallb-system │ │ │ ├── kustomization.yaml │ │ │ ├── metallb-configmap.yaml │ │ │ ├── metallb-helmrelease.yaml │ │ │ ├── metallb-values.yaml │ │ │ └── values │ │ │ └── metallb-values_0.12.1.yaml │ └── storage │ │ ├── kustomization.yaml │ │ ├── longhorn │ │ ├── kustomization.yaml │ │ ├── longhorn-helmrelease.yaml │ │ ├── longhorn-ingressroute.yaml │ │ ├── longhorn-middleware.yaml │ │ ├── longhorn-values.yaml │ │ ├── storageclasses │ │ │ ├── kustomization.yaml │ │ │ ├── longhorn-all-node-storageclass.yaml │ │ │ ├── longhorn-master-node-storageclass.yaml │ │ │ ├── longhorn-test-pvc.yaml │ │ │ └── longhorn-worker-node-storageclass.yaml │ │ ├── tools │ │ │ ├── kustomization.yaml │ │ │ ├── longhorn-volume-migration-job.yaml │ │ │ └── longhorn-volume-mounter.yaml │ │ └── values │ │ │ ├── longhorn-values_1.1.2.yaml │ │ │ ├── longhorn-values_1.2.2.yaml │ │ │ ├── longhorn-values_1.2.3.yaml │ │ │ ├── longhorn-values_1.2.4.yaml │ │ │ └── longhorn-values_1.3.0.yaml │ │ ├── nfs-test │ │ ├── kustomization.yaml │ │ ├── nfs-test-provisioner-helmrelease.yaml │ │ └── nfs-test-provisioner-values.yaml │ │ └── nfs │ │ ├── kustomization.yaml │ │ ├── nfs-provisioner-helmrelease.yaml │ │ └── nfs-provisioner-values.yaml └── crds │ ├── cert-manager │ └── kustomization.yaml │ ├── kube-prometheus-stack │ └── crds.yaml │ ├── kustomization.yaml │ └── traefik │ ├── crds.yaml │ └── kustomization.yaml ├── docs ├── assets │ └── images │ │ ├── logo-32.png │ │ ├── logo.png │ │ └── logo.svg ├── flux.md ├── gpg-sops.md ├── helm-charts.md ├── index.md ├── k3s-install.md ├── kubectl.md ├── neofetch.md ├── netboot.md ├── nodes-settings.md ├── os-settings.md ├── rpi-setup.md ├── ssh.md ├── storage-settings.md └── system-maintenance.md ├── mkdocs.yml └── setup ├── 01-bootstrap-cluster.sh ├── 02-taint-label-nodes.sh └── 03-bootstrap-flux.sh /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/k8s-at-home/sops-pre-commit 3 | rev: v2.1.0 4 | hooks: 5 | - id: forbid-secrets 6 | - repo: https://github.com/pre-commit/pre-commit-hooks 7 | rev: v4.3.0 8 | hooks: 9 | - id: trailing-whitespace 10 | -------------------------------------------------------------------------------- /.sops.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | creation_rules: 3 | - encrypted_regex: '^(data|stringData)$' 4 | pgp: >- 5 | DA7188353C2127DE69D06E476F3C872D898F7421, 6 | 00087A22E06D3978D1255FCB46B174AB7866AE7D, 7 | 3FD9A842B65D82EE208E0C417A3E40C1328162FA 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |
2 | 3 | 4 | 5 |
6 | 7 |
8 | 9 | ### My Kubernetes Cluster (k3s) managed by GitOps (Flux2) 10 | 11 |
12 | 13 |
14 | 15 | [![k3s](https://img.shields.io/badge/k3s-v1.23.8-yellow?style=for-the-badge&logo=kubernetes)](https://k3s.io/) 16 | [![flux2](https://img.shields.io/badge/flux2-v0.31.3-blue?style=for-the-badge)](https://fluxcd.io/) 17 | [![raspberrypi](https://img.shields.io/badge/Raspberry_Pi-8x_Model_4B_(4GB)-A22846?logo=raspberrypi&logoColor=A22846&style=for-the-badge)](https://www.raspberrypi.org/) 18 | [![ubuntu-server](https://img.shields.io/badge/ubuntu_server-22.04_LTS-E95420?logo=ubuntu&logoColor=E95420&style=for-the-badge)](https://ubuntu.com/download/raspberry-pi) 19 | [![pre-commit](https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&style=for-the-badge)](https://github.com/pre-commit/pre-commit) 20 | 21 |
22 | -------------------------------------------------------------------------------- /cluster/apps/authentication/authelia/authelia-configmap.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: users-database 6 | namespace: authentication 7 | data: 8 | users_database.yml: | 9 | --- 10 | ############################################################### 11 | # Users Database # 12 | ############################################################### 13 | 14 | # This file can be used if you do not have an LDAP set up. 15 | 16 | users: 17 | ${SECRET_AUTHELIA_USER_01}: 18 | displayname: "Alexander Untch" 19 | password: "${SECRET_AUTHELIA_PASSWORD_01}" 20 | email: ${SECRET_EMAIL} 21 | groups: 22 | - admins 23 | -------------------------------------------------------------------------------- /cluster/apps/authentication/authelia/authelia-ingressroute.yaml: -------------------------------------------------------------------------------- 1 | # authelia-http-ingressroute 2 | # authelia-https-ingressroute 3 | 4 | --- 5 | apiVersion: traefik.containo.us/v1alpha1 6 | kind: IngressRoute 7 | metadata: 8 | name: authelia-http-ingressroute 9 | namespace: authentication 10 | spec: 11 | entryPoints: 12 | - web 13 | routes: 14 | - match: Host(`auth.${SECRET_DOMAIN}`) 15 | kind: Rule 16 | middlewares: 17 | - name: https-redirect-scheme-middleware 18 | namespace: traefik 19 | services: 20 | - name: authentication-authelia 21 | port: 80 22 | 23 | --- 24 | apiVersion: traefik.containo.us/v1alpha1 25 | kind: IngressRoute 26 | metadata: 27 | name: authelia-https-ingressroute 28 | namespace: traefik 29 | annotations: 30 | cert-manager.io/cluster-issuer: "letsencrypt-dns01-production-do" 31 | spec: 32 | entryPoints: 33 | - websecure 34 | routes: 35 | - match: Host(`auth.${SECRET_DOMAIN}`) 36 | kind: Rule 37 | middlewares: 38 | - name: secure-headers-middleware 39 | namespace: traefik 40 | services: 41 | - name: authentication-authelia 42 | namespace: authentication 43 | port: 80 44 | tls: 45 | secretName: "${SECRET_DOMAIN/./-}-tls" 46 | options: 47 | name: default-tlsoption 48 | namespace: traefik 49 | -------------------------------------------------------------------------------- /cluster/apps/authentication/authelia/authelia-middleware.yaml: -------------------------------------------------------------------------------- 1 | # authelia-middleware 2 | 3 | --- 4 | apiVersion: traefik.containo.us/v1alpha1 5 | kind: Middleware 6 | metadata: 7 | name: authelia-middleware 8 | namespace: authentication 9 | spec: 10 | forwardAuth: 11 | address: http://authentication-authelia.authentication.svc.cluster.local/api/verify?rd=https://auth.${SECRET_DOMAIN} 12 | trustForwardHeader: true 13 | authResponseHeaders: 14 | - Remote-User 15 | - Remote-Name 16 | - Remote-Email 17 | - Remote-Groups 18 | -------------------------------------------------------------------------------- /cluster/apps/authentication/authelia/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - authelia-configmap.yaml 6 | - authelia-helmrelease.yaml 7 | - authelia-ingressroute.yaml 8 | - authelia-middleware.yaml 9 | -------------------------------------------------------------------------------- /cluster/apps/authentication/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - namespace.yaml 7 | - postgres 8 | - authelia 9 | -------------------------------------------------------------------------------- /cluster/apps/authentication/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: authentication 6 | -------------------------------------------------------------------------------- /cluster/apps/authentication/postgres/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - postgres-pvc.yaml 7 | - postgres-helmrelease.yaml 8 | -------------------------------------------------------------------------------- /cluster/apps/authentication/postgres/postgres-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: authelia-postgres-pvc 6 | namespace: authentication 7 | spec: 8 | storageClassName: longhorn-worker-node-storageclass 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 1Gi 14 | -------------------------------------------------------------------------------- /cluster/apps/blogs/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - simone 7 | -------------------------------------------------------------------------------- /cluster/apps/blogs/simone/ingressroutes.yaml: -------------------------------------------------------------------------------- 1 | # wp-simone-local-ingressroute 2 | # wp-simone-http-ingressroute 3 | # wp-simone-https-ingressroute 4 | 5 | --- 6 | apiVersion: traefik.containo.us/v1alpha1 7 | kind: IngressRoute 8 | metadata: 9 | name: wp-simone-local-ingressroute 10 | namespace: wp-simone 11 | spec: 12 | entryPoints: 13 | - web 14 | routes: 15 | - match: Host(`simone.${SECRET_DOMAIN_LOCAL}`) 16 | kind: Rule 17 | services: 18 | - name: wp-simone-wp-simone-wordpress 19 | port: 80 20 | 21 | --- 22 | apiVersion: traefik.containo.us/v1alpha1 23 | kind: IngressRoute 24 | metadata: 25 | name: wp-simone-http-ingressroute 26 | namespace: wp-simone 27 | spec: 28 | entryPoints: 29 | - web 30 | routes: 31 | - match: Host(`simone.${SECRET_DOMAIN}`) 32 | kind: Rule 33 | middlewares: 34 | - name: https-redirect-scheme-middleware 35 | namespace: traefik 36 | services: 37 | - name: wp-simone-wp-simone-wordpress 38 | port: 80 39 | 40 | --- 41 | apiVersion: traefik.containo.us/v1alpha1 42 | kind: IngressRoute 43 | metadata: 44 | name: wp-simone-https-ingressroute 45 | namespace: traefik 46 | annotations: 47 | cert-manager.io/cluster-issuer: "letsencrypt-dns01-production-do" 48 | spec: 49 | entryPoints: 50 | - websecure 51 | routes: 52 | - match: Host(`simone.${SECRET_DOMAIN}`) 53 | kind: Rule 54 | middlewares: 55 | # - name: authelia-middleware 56 | # namespace: authentication 57 | - name: secure-headers-middleware 58 | namespace: traefik 59 | services: 60 | - name: wp-simone-wp-simone-wordpress 61 | namespace: wp-simone 62 | port: 80 63 | tls: 64 | secretName: "${SECRET_DOMAIN/./-}-tls" 65 | options: 66 | name: default-tlsoption 67 | namespace: traefik 68 | -------------------------------------------------------------------------------- /cluster/apps/blogs/simone/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - namespace.yaml 7 | - mariadb 8 | - wordpress 9 | # - ingressroutes.yaml 10 | -------------------------------------------------------------------------------- /cluster/apps/blogs/simone/mariadb/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - mariadb-pvc.yaml 7 | # - mariadb-helmrelease.yaml 8 | -------------------------------------------------------------------------------- /cluster/apps/blogs/simone/mariadb/mariadb-helmrelease.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 3 | kind: HelmRelease 4 | metadata: 5 | name: wp-simone-mariadb 6 | namespace: flux-system 7 | spec: 8 | chart: 9 | spec: 10 | chart: mariadb 11 | reconcileStrategy: ChartVersion 12 | sourceRef: 13 | kind: HelmRepository 14 | name: groundhog2k 15 | version: 0.5.0 16 | interval: 1m0s 17 | targetNamespace: wp-simone 18 | values: 19 | affinity: {} 20 | customConfig: "" 21 | customLivenessProbe: {} 22 | customReadinessProbe: {} 23 | customStartupProbe: {} 24 | env: [] 25 | extraContainers: [] 26 | extraEnvSecrets: [] 27 | extraInitContainers: [] 28 | extraScripts: null 29 | extraSecretConfigs: null 30 | extraSecrets: [] 31 | fullnameOverride: "" 32 | image: 33 | pullPolicy: IfNotPresent 34 | repository: mariadb 35 | tag: "" 36 | imagePullSecrets: [] 37 | livenessProbe: 38 | enabled: true 39 | failureThreshold: 3 40 | initialDelaySeconds: 120 41 | periodSeconds: 10 42 | successThreshold: 1 43 | timeoutSeconds: 5 44 | nameOverride: "" 45 | nodeSelector: 46 | node-type: worker 47 | podAnnotations: {} 48 | podManagementPolicy: OrderedReady 49 | podSecurityContext: 50 | fsGroup: 999 51 | readinessProbe: 52 | enabled: true 53 | failureThreshold: 3 54 | initialDelaySeconds: 30 55 | periodSeconds: 10 56 | successThreshold: 1 57 | timeoutSeconds: 5 58 | resources: {} 59 | revisionHistoryLimit: null 60 | securityContext: 61 | allowPrivilegeEscalation: false 62 | privileged: false 63 | readOnlyRootFilesystem: true 64 | runAsGroup: 999 65 | runAsNonRoot: true 66 | runAsUser: 999 67 | service: 68 | annotations: {} 69 | clusterIP: null 70 | loadBalancerIP: null 71 | nodePort: null 72 | port: 3306 73 | type: ClusterIP 74 | serviceAccount: 75 | annotations: {} 76 | create: false 77 | name: "" 78 | settings: 79 | allowEmptyRootPassword: false 80 | arguments: [] 81 | rootPassword: ${SECRET_GLOBAL_MARIADB_ROOT_PASSWORD} 82 | skipTZInfo: false 83 | startupProbe: 84 | enabled: true 85 | failureThreshold: 30 86 | initialDelaySeconds: 10 87 | periodSeconds: 10 88 | successThreshold: 1 89 | timeoutSeconds: 5 90 | storage: 91 | accessModes: 92 | - ReadWriteOnce 93 | className: null 94 | persistentVolumeClaimName: mariadb-data-pvc 95 | requestedSize: null 96 | volumeName: db-volume 97 | tolerations: [] 98 | updateStrategyType: RollingUpdate 99 | userDatabase: 100 | name: wordpress 101 | password: ${SECRET_WORDPRESS_SIMONE_MARIADB_PASSWORD} 102 | user: wordpress 103 | 104 | -------------------------------------------------------------------------------- /cluster/apps/blogs/simone/mariadb/mariadb-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: mariadb-data-pvc 6 | namespace: wp-simone 7 | labels: 8 | app: mariadb 9 | annotations: 10 | nfs.io/storage-path: "/mariadb" 11 | spec: 12 | storageClassName: nfs-provisioner 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 1Gi 18 | -------------------------------------------------------------------------------- /cluster/apps/blogs/simone/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: wp-simone 6 | -------------------------------------------------------------------------------- /cluster/apps/blogs/simone/wordpress/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - wordpress-pvc.yaml 7 | # - wordpress-helmrelease.yaml 8 | -------------------------------------------------------------------------------- /cluster/apps/blogs/simone/wordpress/wordpress-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: wordpress-data-pvc 6 | namespace: wp-simone 7 | labels: 8 | app: wordpress 9 | annotations: 10 | nfs.io/storage-path: "/wordpress" 11 | spec: 12 | storageClassName: nfs-provisioner 13 | accessModes: 14 | - ReadWriteOnce 15 | resources: 16 | requests: 17 | storage: 1Gi 18 | -------------------------------------------------------------------------------- /cluster/apps/development/adminer/adminer-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: adminer 6 | namespace: development 7 | labels: 8 | app: adminer 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app: adminer 14 | strategy: 15 | type: Recreate 16 | template: 17 | metadata: 18 | labels: 19 | app: adminer 20 | name: adminer 21 | spec: 22 | nodeSelector: 23 | node-type: worker 24 | containers: 25 | - name: adminer 26 | image: adminer:4.8.1 27 | imagePullPolicy: IfNotPresent 28 | resources: 29 | requests: 30 | cpu: 10m 31 | memory: 100Mi 32 | ports: 33 | - name: http 34 | containerPort: 8080 35 | protocol: TCP 36 | env: 37 | - name: TZ 38 | value: ${TIMEZONE} 39 | -------------------------------------------------------------------------------- /cluster/apps/development/adminer/adminer-ingressroute.yaml: -------------------------------------------------------------------------------- 1 | # adminer-local-ingressroute 2 | # adminer-http-ingressroute 3 | # adminer-https-ingressroute 4 | 5 | # --- 6 | # apiVersion: traefik.containo.us/v1alpha1 7 | # kind: IngressRoute 8 | # metadata: 9 | # name: adminer-local-ingressroute 10 | # namespace: development 11 | # spec: 12 | # entryPoints: 13 | # - web 14 | # routes: 15 | # - match: Host(`adminer.${SECRET_DOMAIN_LOCAL_02}`) 16 | # kind: Rule 17 | # services: 18 | # - name: adminer-svc 19 | # port: 8080 20 | 21 | --- 22 | apiVersion: traefik.containo.us/v1alpha1 23 | kind: IngressRoute 24 | metadata: 25 | name: adminer-http-ingressroute 26 | namespace: development 27 | spec: 28 | entryPoints: 29 | - web 30 | routes: 31 | - match: Host(`adminer.${SECRET_DOMAIN}`) 32 | kind: Rule 33 | middlewares: 34 | - name: https-redirect-scheme-middleware 35 | namespace: traefik 36 | services: 37 | - name: adminer-svc 38 | port: 8080 39 | 40 | --- 41 | apiVersion: traefik.containo.us/v1alpha1 42 | kind: IngressRoute 43 | metadata: 44 | name: adminer-https-ingressroute 45 | namespace: traefik 46 | annotations: 47 | cert-manager.io/cluster-issuer: "letsencrypt-dns01-production-do" 48 | spec: 49 | entryPoints: 50 | - websecure 51 | routes: 52 | - match: Host(`adminer.${SECRET_DOMAIN}`) 53 | kind: Rule 54 | middlewares: 55 | - name: secure-headers-middleware 56 | namespace: traefik 57 | services: 58 | - name: adminer-svc 59 | namespace: development 60 | port: 8080 61 | tls: 62 | secretName: "${SECRET_DOMAIN/./-}-tls" 63 | options: 64 | name: default-tlsoption 65 | namespace: traefik 66 | -------------------------------------------------------------------------------- /cluster/apps/development/adminer/adminer-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: adminer-svc 6 | namespace: development 7 | labels: 8 | app: adminer 9 | spec: 10 | selector: 11 | app: adminer 12 | ports: 13 | - name: http 14 | port: 8080 15 | protocol: TCP 16 | targetPort: http 17 | type: ClusterIP 18 | -------------------------------------------------------------------------------- /cluster/apps/development/adminer/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - adminer-service.yaml 7 | - adminer-deployment.yaml 8 | - adminer-ingressroute.yaml 9 | -------------------------------------------------------------------------------- /cluster/apps/development/docker-registry/docker-registry-ingressroute.yaml: -------------------------------------------------------------------------------- 1 | # docker-registry-http-ingressroute 2 | # docker-registry-https-ingressroute 3 | 4 | --- 5 | apiVersion: traefik.containo.us/v1alpha1 6 | kind: IngressRoute 7 | metadata: 8 | name: docker-registry-http-ingressroute 9 | namespace: development 10 | spec: 11 | entryPoints: 12 | - web 13 | routes: 14 | - match: Host(`registry.${SECRET_DOMAIN}`) 15 | kind: Rule 16 | middlewares: 17 | - name: https-redirect-scheme-middleware 18 | namespace: traefik 19 | services: 20 | - name: docker-registry-docker-registry 21 | port: 5000 22 | 23 | --- 24 | apiVersion: traefik.containo.us/v1alpha1 25 | kind: IngressRoute 26 | metadata: 27 | name: docker-registry-https-ingressroute 28 | namespace: traefik 29 | annotations: 30 | cert-manager.io/cluster-issuer: "letsencrypt-dns01-production-do" 31 | spec: 32 | entryPoints: 33 | - websecure 34 | routes: 35 | - match: Host(`registry.${SECRET_DOMAIN}`) 36 | kind: Rule 37 | middlewares: 38 | - name: docker-registry-cors-middleware 39 | namespace: docker-registry 40 | services: 41 | - name: docker-registry-docker-registry 42 | namespace: docker-registry 43 | port: 5000 44 | tls: 45 | secretName: "${SECRET_DOMAIN/./-}-tls" 46 | options: 47 | name: default-tlsoption 48 | namespace: traefik 49 | -------------------------------------------------------------------------------- /cluster/apps/development/docker-registry/docker-registry-middleware.yaml: -------------------------------------------------------------------------------- 1 | # docker-registry-cors-middleware 2 | 3 | --- 4 | apiVersion: traefik.containo.us/v1alpha1 5 | kind: Middleware 6 | metadata: 7 | name: docker-registry-cors-middleware 8 | namespace: docker-registry 9 | spec: 10 | headers: 11 | accessControlAllowMethods: 12 | - GET 13 | - OPTIONS 14 | # - HEAD 15 | - PUT 16 | - POST 17 | - DELETE 18 | accessControlAllowOriginList: 19 | - https://registry-ui.${SECRET_DOMAIN} 20 | accessControlAllowCredentials: true 21 | accessControlMaxAge: 100 22 | addVaryHeader: true 23 | accessControlAllowHeaders: 24 | - "Authorization" 25 | - "Accept" 26 | -------------------------------------------------------------------------------- /cluster/apps/development/docker-registry/docker-registry-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: docker-registry-pvc 6 | namespace: docker-registry 7 | annotations: 8 | nfs.io/storage-path: "/" 9 | spec: 10 | storageClassName: nfs-provisioner 11 | accessModes: 12 | - ReadWriteOnce 13 | resources: 14 | requests: 15 | storage: 10Gi 16 | -------------------------------------------------------------------------------- /cluster/apps/development/docker-registry/docker-registry-ui/docker-registry-ui-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | labels: 6 | app: docker-registry-ui 7 | name: docker-registry-ui-http 8 | namespace: docker-registry 9 | spec: 10 | selector: 11 | app: docker-registry-ui 12 | type: ClusterIP 13 | ports: 14 | - name: http 15 | port: 80 16 | protocol: TCP 17 | targetPort: http 18 | 19 | --- 20 | apiVersion: apps/v1 21 | kind: Deployment 22 | metadata: 23 | labels: 24 | app: docker-registry-ui 25 | name: docker-registry-ui 26 | namespace: docker-registry 27 | spec: 28 | replicas: 1 29 | revisionHistoryLimit: 1 30 | selector: 31 | matchLabels: 32 | app: docker-registry-ui 33 | strategy: 34 | rollingUpdate: 35 | maxSurge: 1 36 | maxUnavailable: 0 37 | type: RollingUpdate 38 | template: 39 | metadata: 40 | labels: 41 | app: docker-registry-ui 42 | name: docker-registry-ui 43 | spec: 44 | containers: 45 | - image: "docker.io/joxit/docker-registry-ui:latest" 46 | imagePullPolicy: IfNotPresent 47 | livenessProbe: 48 | httpGet: 49 | path: / 50 | port: http 51 | name: docker-registry-ui 52 | ports: 53 | - containerPort: 80 54 | name: http 55 | protocol: TCP 56 | env: 57 | - name: REGISTRY_URL 58 | value: https://registry.${SECRET_DOMAIN} 59 | - name: REGISTRY_TITLE 60 | value: "Docker registry UI" 61 | - name: DELETE_IMAGES 62 | value: "true" 63 | - name: SHOW_CONTENT_DIGEST 64 | value: "true" 65 | - name: SINGLE_REGISTRY 66 | value: "true" 67 | - name: NGINX_PROXY_PASS_URL 68 | value: "http://registry.${SECRET_DOMAIN}" 69 | readinessProbe: 70 | httpGet: 71 | path: / 72 | port: http 73 | nodeSelector: 74 | node-type: worker 75 | resources: 76 | limits: 77 | cpu: 200m 78 | memory: 512Mi 79 | requests: 80 | cpu: 100m 81 | memory: 256Mi 82 | restartPolicy: Always 83 | -------------------------------------------------------------------------------- /cluster/apps/development/docker-registry/docker-registry-ui/docker-registry-ui-middleware.yaml: -------------------------------------------------------------------------------- 1 | # docker-registry-ui-auth-middleware 2 | 3 | --- 4 | apiVersion: traefik.containo.us/v1alpha1 5 | kind: Middleware 6 | metadata: 7 | name: docker-registry-ui-auth-middleware 8 | namespace: docker-registry 9 | spec: 10 | basicAuth: 11 | secret: docker-registry-ui-secret 12 | -------------------------------------------------------------------------------- /cluster/apps/development/docker-registry/docker-registry-ui/docker-regsitry-ui-ingressroute.yaml: -------------------------------------------------------------------------------- 1 | # docker-registry-ui-https-ingressroute 2 | 3 | --- 4 | apiVersion: traefik.containo.us/v1alpha1 5 | kind: IngressRoute 6 | metadata: 7 | name: docker-registry-ui-https-ingressroute 8 | namespace: traefik 9 | annotations: 10 | cert-manager.io/cluster-issuer: "letsencrypt-dns01-production-do" 11 | spec: 12 | entryPoints: 13 | - websecure 14 | routes: 15 | - match: Host(`registry-ui.${SECRET_DOMAIN}`) 16 | kind: Rule 17 | middlewares: 18 | # - name: secure-headers-middleware 19 | # namespace: traefik 20 | # - name: authelia-middleware 21 | # namespace: authentication 22 | - name: docker-registry-ui-auth-middleware 23 | namespace: docker-registry 24 | services: 25 | - name: docker-registry-ui-http 26 | namespace: docker-registry 27 | port: 80 28 | tls: 29 | secretName: "${SECRET_DOMAIN/./-}-tls" 30 | options: 31 | name: default-tlsoption 32 | namespace: traefik 33 | -------------------------------------------------------------------------------- /cluster/apps/development/docker-registry/docker-registry-ui/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - docker-registry-ui-secret.sops.yaml 7 | # - docker-registry-ui-deployment.yaml 8 | # - docker-regsitry-ui-ingressroute.yaml 9 | # - docker-registry-ui-middleware.yaml 10 | -------------------------------------------------------------------------------- /cluster/apps/development/docker-registry/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - namespace.yaml 7 | - docker-registry-pvc.yaml 8 | # - docker-registry-helmrelease.yaml 9 | # - docker-registry-ingressroute.yaml 10 | # - docker-registry-middleware.yaml 11 | - docker-registry-ui 12 | -------------------------------------------------------------------------------- /cluster/apps/development/docker-registry/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: docker-registry 6 | -------------------------------------------------------------------------------- /cluster/apps/development/gitea/gitea-ingressroute.yaml: -------------------------------------------------------------------------------- 1 | # git-local-ingressroute 2 | # git-http-ingressroute 3 | # git-https-ingressroute 4 | # git-ssh-ingressroute 5 | 6 | # --- 7 | # apiVersion: traefik.containo.us/v1alpha1 8 | # kind: IngressRoute 9 | # metadata: 10 | # name: git-local-ingressroute 11 | # namespace: gitea 12 | # spec: 13 | # entryPoints: 14 | # - web 15 | # routes: 16 | # - match: Host(`git.${SECRET_DOMAIN_LOCAL}`) 17 | # kind: Rule 18 | # services: 19 | # - name: gitea-gitea-http 20 | # port: 80 21 | 22 | --- 23 | apiVersion: traefik.containo.us/v1alpha1 24 | kind: IngressRoute 25 | metadata: 26 | name: git-http-ingressroute 27 | namespace: gitea 28 | spec: 29 | entryPoints: 30 | - web 31 | routes: 32 | - match: Host(`git.${SECRET_DOMAIN}`) 33 | kind: Rule 34 | middlewares: 35 | - name: https-redirect-scheme-middleware 36 | namespace: traefik 37 | services: 38 | - name: gitea-gitea-http 39 | port: 80 40 | 41 | --- 42 | apiVersion: traefik.containo.us/v1alpha1 43 | kind: IngressRoute 44 | metadata: 45 | name: git-https-ingressroute 46 | namespace: traefik 47 | annotations: 48 | cert-manager.io/cluster-issuer: "letsencrypt-dns01-production-do" 49 | spec: 50 | entryPoints: 51 | - websecure 52 | routes: 53 | - match: Host(`git.${SECRET_DOMAIN}`) 54 | kind: Rule 55 | middlewares: 56 | - name: secure-headers-middleware 57 | namespace: traefik 58 | services: 59 | - name: gitea-gitea-http 60 | namespace: gitea 61 | port: 80 62 | tls: 63 | secretName: "${SECRET_DOMAIN/./-}-tls" 64 | options: 65 | name: default-tlsoption 66 | namespace: traefik 67 | 68 | --- 69 | apiVersion: traefik.containo.us/v1alpha1 70 | kind: IngressRouteTCP 71 | metadata: 72 | name: git-ssh-ingressroute 73 | namespace: gitea 74 | spec: 75 | entryPoints: 76 | - ssh 77 | routes: 78 | - match: HostSNI(`*`) 79 | services: 80 | - name: gitea-gitea-ssh 81 | port: 22 82 | -------------------------------------------------------------------------------- /cluster/apps/development/gitea/gitea-pvc.yaml: -------------------------------------------------------------------------------- 1 | # --- 2 | # apiVersion: v1 3 | # kind: PersistentVolumeClaim 4 | # metadata: 5 | # name: gitea-data-pvc 6 | # namespace: gitea 7 | # spec: 8 | # storageClassName: longhorn-worker-node-storageclass 9 | # accessModes: 10 | # - ReadWriteOnce 11 | # resources: 12 | # requests: 13 | # storage: 8Gi 14 | 15 | --- 16 | apiVersion: v1 17 | kind: PersistentVolumeClaim 18 | metadata: 19 | name: gitea-data-pvc 20 | namespace: gitea 21 | annotations: 22 | nfs.io/storage-path: "/data" 23 | spec: 24 | storageClassName: nfs-provisioner 25 | accessModes: 26 | - ReadWriteOnce 27 | resources: 28 | requests: 29 | storage: 8Gi 30 | -------------------------------------------------------------------------------- /cluster/apps/development/gitea/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - namespace.yaml 7 | - postgres 8 | - redis 9 | - gitea-pvc.yaml 10 | # - gitea-helmrelease.yaml 11 | # - gitea-ingressroute.yaml 12 | -------------------------------------------------------------------------------- /cluster/apps/development/gitea/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: gitea 6 | -------------------------------------------------------------------------------- /cluster/apps/development/gitea/postgres/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - postgres-pvc.yaml 7 | # - postgres-helmrelease.yaml 8 | -------------------------------------------------------------------------------- /cluster/apps/development/gitea/postgres/postgres-helmrelease.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 3 | kind: HelmRelease 4 | metadata: 5 | name: gitea-postgres 6 | namespace: flux-system 7 | spec: 8 | chart: 9 | spec: 10 | chart: postgres 11 | reconcileStrategy: ChartVersion 12 | sourceRef: 13 | kind: HelmRepository 14 | name: groundhog2k 15 | version: 0.3.10 16 | interval: 1m0s 17 | targetNamespace: gitea 18 | values: 19 | affinity: {} 20 | args: [] 21 | customConfig: "" 22 | customLivenessProbe: {} 23 | customReadinessProbe: {} 24 | customStartupProbe: {} 25 | env: 26 | - name: PGTZ 27 | value: ${TIMEZONE} 28 | - name: TZ 29 | value: ${TIMEZONE} 30 | extraContainers: [] 31 | extraEnvSecrets: [] 32 | extraInitContainers: [] 33 | extraScripts: null 34 | extraSecretConfigs: null 35 | extraSecrets: [] 36 | fullnameOverride: "" 37 | image: 38 | pullPolicy: IfNotPresent 39 | repository: postgres 40 | tag: "" 41 | imagePullSecrets: [] 42 | livenessProbe: 43 | enabled: true 44 | failureThreshold: 3 45 | initialDelaySeconds: 10 46 | periodSeconds: 10 47 | successThreshold: 1 48 | timeoutSeconds: 5 49 | nameOverride: "" 50 | nodeSelector: 51 | node-type: worker 52 | podAnnotations: {} 53 | podManagementPolicy: OrderedReady 54 | podSecurityContext: 55 | fsGroup: 999 56 | readinessProbe: 57 | enabled: true 58 | failureThreshold: 3 59 | initialDelaySeconds: 10 60 | periodSeconds: 10 61 | successThreshold: 1 62 | timeoutSeconds: 5 63 | resources: {} 64 | revisionHistoryLimit: null 65 | securityContext: 66 | allowPrivilegeEscalation: false 67 | privileged: false 68 | readOnlyRootFilesystem: true 69 | runAsGroup: 999 70 | runAsNonRoot: true 71 | runAsUser: 999 72 | service: 73 | annotations: {} 74 | clusterIP: null 75 | loadBalancerIP: null 76 | nodePort: null 77 | port: 5432 78 | type: ClusterIP 79 | serviceAccount: 80 | annotations: {} 81 | create: false 82 | name: "" 83 | settings: 84 | authMethod: md5 85 | initDbArgs: null 86 | superuser: postgres 87 | superuserPassword: ${SECRET_GLOBAL_POSTGRES_SUPERUSER_PASSWORD} 88 | startupProbe: 89 | enabled: true 90 | failureThreshold: 30 91 | initialDelaySeconds: 10 92 | periodSeconds: 10 93 | successThreshold: 1 94 | timeoutSeconds: 5 95 | storage: 96 | accessModes: 97 | - ReadWriteOnce 98 | className: null 99 | persistentVolumeClaimName: gitea-postgres-pvc 100 | requestedSize: null 101 | volumeName: postgres-data 102 | tolerations: [] 103 | updateStrategyType: RollingUpdate 104 | userDatabase: 105 | name: ${SECRET_GITEA_DB_NAME} 106 | password: ${SECRET_GITEA_DB_PASSWORD} 107 | user: ${SECRET_GITEA_DB_USER} 108 | 109 | -------------------------------------------------------------------------------- /cluster/apps/development/gitea/postgres/postgres-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: gitea-postgres-pvc 6 | namespace: gitea 7 | spec: 8 | storageClassName: longhorn-worker-node-storageclass 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 5Gi 14 | -------------------------------------------------------------------------------- /cluster/apps/development/gitea/redis/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - redis-pvc.yaml 7 | # - redis-helmrelease.yaml 8 | -------------------------------------------------------------------------------- /cluster/apps/development/gitea/redis/redis-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: gitea-redis-pvc 6 | namespace: gitea 7 | spec: 8 | storageClassName: longhorn-worker-node-storageclass 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 2Gi 14 | -------------------------------------------------------------------------------- /cluster/apps/development/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - namespace.yaml 7 | # - adminer 8 | - gitea 9 | - docker-registry 10 | -------------------------------------------------------------------------------- /cluster/apps/development/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: development 6 | -------------------------------------------------------------------------------- /cluster/apps/dns/do-dns-updater/digitalocean-dns-updater.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | #################### CHANGE THE FOLLOWING VARIABLES #################### 4 | TOKEN="${SECRET_DIGITALOCEAN_API_TOKEN}" 5 | DOMAIN="${SECRET_DOMAIN}" 6 | RECORD_ID="${SECRET_DIGITALOCEAN_RECORD_ID}" 7 | LOG_FILE="/data/digital-ocean-dns-updater-log.txt" 8 | ######################################################################## 9 | 10 | CURRENT_IPV4="$(dig +short myip.opendns.com @resolver1.opendns.com)" 11 | LAST_IPV4="$(tail -1 $LOG_FILE | awk -F, '{print $2}')" 12 | 13 | if [ "$CURRENT_IPV4" = "$LAST_IPV4" ]; then 14 | echo "IP has not changed ($CURRENT_IPV4)" 15 | else 16 | echo "IP has changed: $CURRENT_IPV4" 17 | echo "$(date),$CURRENT_IPV4" >> "$LOG_FILE" 18 | curl -X PUT -H "Content-Type: application/json" -H "Authorization: Bearer $TOKEN" -d '{"data":"'"$CURRENT_IPV4"'"}' "https://api.digitalocean.com/v2/domains/$DOMAIN/records/$RECORD_ID" 19 | fi 20 | 21 | # https://salvatorelab.com/2020/10/how-to-point-a-domain-to-your-dynamic-home-ip-address/ 22 | 23 | # curl GET -H "Content-Type: application/json" \ 24 | # -H "Authorization: Bearer " \ 25 | # https://api.digitalocean.com/v2/domains//records | json_pp 26 | -------------------------------------------------------------------------------- /cluster/apps/dns/do-dns-updater/do-dns-updater-cron-job.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: CronJob 4 | metadata: 5 | name: do-dns-updater-cron-job 6 | namespace: dns 7 | spec: 8 | schedule: "*/15 * * * *" 9 | jobTemplate: 10 | spec: 11 | template: 12 | spec: 13 | nodeSelector: 14 | node-type: worker 15 | containers: 16 | - name: digitalocean-dns-updater-tool 17 | image: praqma/network-multitool 18 | command: 19 | - /bin/sh 20 | - -c 21 | - "/script/digitalocean-dns-updater.sh" 22 | volumeMounts: 23 | - name: do-dns-updater-script-volume 24 | mountPath: /script 25 | - name: do-dns-updater-log-volume 26 | mountPath: /data 27 | restartPolicy: Never 28 | automountServiceAccountToken: false 29 | volumes: 30 | - name: do-dns-updater-script-volume 31 | configMap: 32 | name: do-dns-updater-script-configmap 33 | defaultMode: 0777 34 | # defaultMode: 0744 35 | - name: do-dns-updater-log-volume 36 | persistentVolumeClaim: 37 | claimName: do-dns-updater-log-pvc 38 | -------------------------------------------------------------------------------- /cluster/apps/dns/do-dns-updater/do-dns-updater-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: do-dns-updater-log-pvc 6 | namespace: dns 7 | spec: 8 | storageClassName: longhorn-worker-node-storageclass 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 512Mi 14 | -------------------------------------------------------------------------------- /cluster/apps/dns/do-dns-updater/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - do-dns-updater-pvc.yaml 7 | - do-dns-updater-cron-job.yaml 8 | 9 | configMapGenerator: 10 | - name: do-dns-updater-script-configmap 11 | namespace: dns 12 | files: 13 | - digitalocean-dns-updater.sh 14 | generatorOptions: 15 | disableNameSuffixHash: true 16 | -------------------------------------------------------------------------------- /cluster/apps/dns/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - namespace.yaml 7 | - do-dns-updater 8 | # - staging 9 | - production 10 | -------------------------------------------------------------------------------- /cluster/apps/dns/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: dns 6 | -------------------------------------------------------------------------------- /cluster/apps/dns/production/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - unbound 7 | - pihole 8 | -------------------------------------------------------------------------------- /cluster/apps/dns/production/pihole/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - pihole-config 7 | - pihole-tcp-service.yaml 8 | - pihole-udp-service.yaml 9 | - pihole-deployment.yaml 10 | -------------------------------------------------------------------------------- /cluster/apps/dns/production/pihole/pihole-config/adlists.list: -------------------------------------------------------------------------------- 1 | https://dbl.oisd.nl/ 2 | https://raw.githubusercontent.com/PolishFiltersTeam/KADhosts/master/KADhosts.txt 3 | https://raw.githubusercontent.com/FadeMind/hosts.extras/master/add.Spam/hosts 4 | https://v.firebog.net/hosts/static/w3kbl.txt 5 | https://adaway.org/hosts.txt 6 | https://v.firebog.net/hosts/AdguardDNS.txt 7 | https://v.firebog.net/hosts/Admiral.txt 8 | https://raw.githubusercontent.com/anudeepND/blacklist/master/adservers.txt 9 | https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt 10 | https://v.firebog.net/hosts/Easylist.txt 11 | https://pgl.yoyo.org/adservers/serverlist.php?hostformat=hosts&showintro=0&mimetype=plaintext 12 | https://raw.githubusercontent.com/FadeMind/hosts.extras/master/UncheckyAds/hosts 13 | https://raw.githubusercontent.com/bigdargon/hostsVN/master/hosts 14 | https://v.firebog.net/hosts/Easyprivacy.txt 15 | https://v.firebog.net/hosts/Prigent-Ads.txt 16 | https://raw.githubusercontent.com/FadeMind/hosts.extras/master/add.2o7Net/hosts 17 | https://raw.githubusercontent.com/crazy-max/WindowsSpyBlocker/master/data/hosts/spy.txt 18 | https://hostfiles.frogeye.fr/firstparty-trackers-hosts.txt 19 | https://raw.githubusercontent.com/DandelionSprout/adfilt/master/Alternate%20versions%20Anti-Malware%20List/AntiMalwareHosts.txt 20 | https://osint.digitalside.it/Threat-Intel/lists/latestdomains.txt 21 | https://s3.amazonaws.com/lists.disconnect.me/simple_malvertising.txt 22 | https://v.firebog.net/hosts/Prigent-Crypto.txt 23 | https://bitbucket.org/ethanr/dns-blacklists/raw/8575c9f96e5b4a1308f2f12394abd86d0927a4a0/bad_lists/Mandiant_APT1_Report_Appendix_D.txt 24 | https://phishing.army/download/phishing_army_blocklist_extended.txt 25 | https://gitlab.com/quidsup/notrack-blocklists/raw/master/notrack-malware.txt 26 | https://raw.githubusercontent.com/Spam404/lists/master/main-blacklist.txt 27 | https://raw.githubusercontent.com/FadeMind/hosts.extras/master/add.Risk/hosts 28 | https://urlhaus.abuse.ch/downloads/hostfile/ 29 | https://zerodot1.gitlab.io/CoinBlockerLists/hosts_browser 30 | https://raw.githubusercontent.com/anudeepND/blacklist/master/adservers.txt 31 | https://raw.githubusercontent.com/anudeepND/blacklist/master/facebook.txt 32 | -------------------------------------------------------------------------------- /cluster/apps/dns/production/pihole/pihole-config/custom.list: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /cluster/apps/dns/production/pihole/pihole-config/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | configMapGenerator: 6 | - name: pihole-env-vars 7 | namespace: dns 8 | envs: 9 | - pihole-env-vars.env 10 | - name: custom.list 11 | namespace: dns 12 | files: 13 | - custom.list 14 | - name: regex.list 15 | namespace: dns 16 | files: 17 | - regex.list 18 | - name: whitelist.txt 19 | namespace: dns 20 | files: 21 | - whitelist.txt 22 | - name: adlists.list 23 | namespace: dns 24 | files: 25 | - adlists.list 26 | generatorOptions: 27 | disableNameSuffixHash: true 28 | -------------------------------------------------------------------------------- /cluster/apps/dns/production/pihole/pihole-config/pihole-env-vars.env: -------------------------------------------------------------------------------- 1 | WEBPASSWORD=${SECRET_PIHOLE_WEBPASSWORD} 2 | ADMIN_EMAIL=${SECRET_EMAIL} 3 | TZ=${TIMEZONE} 4 | DNS1=${UNBOUND_SVC_LB_IP} 5 | DNS2=${UNBOUND_SVC_LB_IP} 6 | ServerIP=${PIHOLE_SVC_LB_IP} 7 | IPv6=false 8 | TEMPERATUREUNIT=c 9 | WEBUIBOXEDLAYOUT=boxed 10 | WEBTHEME=default-dark 11 | FTLCONF_REPLY_ADDR4=${PIHOLE_SVC_LB_IP} 12 | FTLCONF_MAXDBDAYS=14 13 | -------------------------------------------------------------------------------- /cluster/apps/dns/production/pihole/pihole-config/regex.list: -------------------------------------------------------------------------------- 1 | (\.|^)trbo\.com$ 2 | # https://raw.githubusercontent.com/mmotti/pihole-regex/master/regex.list 3 | ^ad([sxv]?[0-9]*|system)[_.-]([^.[:space:]]+\.){1,}|[_.-]ad([sxv]?[0-9]*|system)[_.-] 4 | ^(.+[_.-])?adse?rv(er?|ice)?s?[0-9]*[_.-] 5 | ^(.+[_.-])?telemetry[_.-] 6 | ^adim(age|g)s?[0-9]*[_.-] 7 | ^adtrack(er|ing)?[0-9]*[_.-] 8 | ^advert(s|is(ing|ements?))?[0-9]*[_.-] 9 | ^aff(iliat(es?|ion))?[_.-] 10 | ^analytics?[_.-] 11 | ^banners?[_.-] 12 | ^beacons?[0-9]*[_.-] 13 | ^count(ers?)?[0-9]*[_.-] 14 | ^mads\. 15 | ^pixels?[-.] 16 | ^stat(s|istics)?[0-9]*[_.-] 17 | # block crappy speedtest.net server 18 | (\.|^)contabo\.net$ 19 | (\.|^)net-d-sign\.de$ 20 | (\.|^)sitnetworks\.de$ 21 | (\.|^)goingnet\.at$ 22 | -------------------------------------------------------------------------------- /cluster/apps/dns/production/pihole/pihole-ingressroute.yaml: -------------------------------------------------------------------------------- 1 | # pihole-local-ingressroute 2 | # pihole-metrics-local-ingressroute 3 | # pihole-http-ingressroute 4 | # pihole-https-ingressroute 5 | # pihole-middleware-headers 6 | 7 | --- 8 | apiVersion: traefik.containo.us/v1alpha1 9 | kind: IngressRoute 10 | metadata: 11 | name: pihole-local-ingressroute 12 | namespace: dns 13 | spec: 14 | entryPoints: 15 | - web 16 | routes: 17 | - match: Host(`pihole.${SECRET_DOMAIN_LOCAL_02}`) && PathPrefix(`/admin`) 18 | kind: Rule 19 | services: 20 | - name: pihole-tcp 21 | port: 80 22 | 23 | --- 24 | apiVersion: traefik.containo.us/v1alpha1 25 | kind: IngressRoute 26 | metadata: 27 | name: pihole-metrics-local-ingressroute 28 | namespace: dns 29 | spec: 30 | entryPoints: 31 | - web 32 | routes: 33 | - match: Host(`pihole-metrics.${SECRET_DOMAIN_LOCAL_02}`) && PathPrefix(`/metrics`) 34 | kind: Rule 35 | services: 36 | - name: pihole-exporter 37 | port: 9617 38 | 39 | --- 40 | apiVersion: traefik.containo.us/v1alpha1 41 | kind: IngressRoute 42 | metadata: 43 | name: pihole-http-ingressroute 44 | namespace: dns 45 | spec: 46 | entryPoints: 47 | - web 48 | routes: 49 | - match: Host(`pihole.${SECRET_DIGITALOCEAN_DOMAIN_02}`) && PathPrefix(`/admin`) 50 | kind: Rule 51 | middlewares: 52 | - name: https-redirect-scheme-middleware 53 | namespace: traefik 54 | services: 55 | - name: pihole-tcp 56 | port: 80 57 | 58 | --- 59 | apiVersion: traefik.containo.us/v1alpha1 60 | kind: IngressRoute 61 | metadata: 62 | name: pihole-https-ingressroute 63 | namespace: dns 64 | annotations: 65 | cert-manager.io/cluster-issuer: "letsencrypt-dns01-production-do" 66 | spec: 67 | entryPoints: 68 | - websecure 69 | routes: 70 | - match: Host(`pihole.${SECRET_DIGITALOCEAN_DOMAIN_02}`) && PathPrefix(`/admin`) 71 | kind: Rule 72 | middlewares: 73 | - name: pihole-middleware-headers 74 | services: 75 | - name: pihole-tcp 76 | port: 80 77 | tls: 78 | secretName: pihole-letsencrypt-production-certificate 79 | 80 | --- 81 | apiVersion: traefik.containo.us/v1alpha1 82 | kind: Middleware 83 | metadata: 84 | name: pihole-middleware-headers 85 | namespace: dns 86 | spec: 87 | headers: 88 | browserXssFilter: true 89 | contentTypeNosniff: true 90 | frameDeny: true 91 | sslRedirect: true 92 | forceSTSHeader: true 93 | stsIncludeSubdomains: true 94 | stsPreload: true 95 | stsSeconds: 315360000 96 | # stsSeconds: 63072000 97 | contentSecurityPolicy: | 98 | default-src 'none';form-action 'none';frame-ancestors 'none';base-uri 'none' 99 | accessControlAllowMethods: 100 | - "GET" 101 | - "POST" 102 | accessControlAllowOriginList: 103 | - "https://pihole.${SECRET_DIGITALOCEAN_DOMAIN_02}" 104 | accessControlMaxAge: 100 105 | addVaryHeader: true 106 | referrerPolicy: "same-origin" 107 | customFrameOptionsValue: SAMEORIGIN 108 | -------------------------------------------------------------------------------- /cluster/apps/dns/production/pihole/pihole-tcp-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: pihole-production-tcp-service 6 | namespace: dns 7 | labels: 8 | app: pihole-production 9 | environment: production 10 | annotations: 11 | metallb.universe.tf/address-pool: dns 12 | metallb.universe.tf/allow-shared-ip: pihole-production-svc 13 | spec: 14 | selector: 15 | app: pihole-production 16 | environment: production 17 | ports: 18 | - name: http 19 | port: 80 20 | protocol: TCP 21 | targetPort: http 22 | - name: https 23 | port: 443 24 | protocol: TCP 25 | targetPort: https 26 | - name: dns-tcp 27 | port: 53 28 | protocol: TCP 29 | targetPort: dns-tcp 30 | sessionAffinity: None 31 | externalTrafficPolicy: Local 32 | type: LoadBalancer 33 | loadBalancerIP: ${PIHOLE_SVC_LB_IP} 34 | -------------------------------------------------------------------------------- /cluster/apps/dns/production/pihole/pihole-udp-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: pihole-production-udp-service 6 | namespace: dns 7 | labels: 8 | app: pihole-production 9 | environment: production 10 | annotations: 11 | metallb.universe.tf/address-pool: dns 12 | metallb.universe.tf/allow-shared-ip: pihole-production-svc 13 | spec: 14 | selector: 15 | app: pihole-production 16 | environment: production 17 | ports: 18 | - name: dns-udp 19 | port: 53 20 | protocol: UDP 21 | targetPort: dns-udp 22 | - name: client-udp 23 | port: 67 24 | protocol: UDP 25 | targetPort: client-udp 26 | sessionAffinity: None 27 | externalTrafficPolicy: Local 28 | type: LoadBalancer 29 | loadBalancerIP: ${PIHOLE_SVC_LB_IP} 30 | -------------------------------------------------------------------------------- /cluster/apps/dns/production/unbound/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - unbound-config 7 | - unbound-tcp-service.yaml 8 | - unbound-udp-service.yaml 9 | - unbound-deployment.yaml 10 | -------------------------------------------------------------------------------- /cluster/apps/dns/production/unbound/unbound-config/a-records.conf: -------------------------------------------------------------------------------- 1 | # A Record 2 | local-data: "adminer.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 3 | local-data: "chronograf.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 4 | local-data: "cloud.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 5 | local-data: "doc.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 6 | local-data: "git.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 7 | local-data: "grafana.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 8 | local-data: "homer.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 9 | local-data: "longhorn.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 10 | local-data: "pihole-metrics.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 11 | local-data: "pihole.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 12 | local-data: "portainer.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 13 | local-data: "prometheus-alertmanager.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 14 | local-data: "prometheus.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 15 | local-data: "simone.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 16 | local-data: "speedtest.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 17 | local-data: "test.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 18 | local-data: "traefik.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 19 | local-data: "vault.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 20 | local-data: "whoami.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 21 | 22 | # PTR Record 23 | local-data-ptr: "192.168.178.240 adminer.${SECRET_DOMAIN_LOCAL}." 24 | local-data-ptr: "192.168.178.240 chronograf.${SECRET_DOMAIN_LOCAL}." 25 | local-data-ptr: "192.168.178.240 cloud.${SECRET_DOMAIN_LOCAL}." 26 | local-data-ptr: "192.168.178.240 doc.${SECRET_DOMAIN_LOCAL}." 27 | local-data-ptr: "192.168.178.240 git.${SECRET_DOMAIN_LOCAL}." 28 | local-data-ptr: "192.168.178.240 grafana.${SECRET_DOMAIN_LOCAL}." 29 | local-data-ptr: "192.168.178.240 homer.${SECRET_DOMAIN_LOCAL}." 30 | local-data-ptr: "192.168.178.240 longhorn.${SECRET_DOMAIN_LOCAL}." 31 | local-data-ptr: "192.168.178.240 pihole-metrics.${SECRET_DOMAIN_LOCAL}." 32 | local-data-ptr: "192.168.178.240 pihole.${SECRET_DOMAIN_LOCAL}." 33 | local-data-ptr: "192.168.178.240 portainer.${SECRET_DOMAIN_LOCAL}." 34 | local-data-ptr: "192.168.178.240 prometheus-alertmanager.${SECRET_DOMAIN_LOCAL}." 35 | local-data-ptr: "192.168.178.240 prometheus.${SECRET_DOMAIN_LOCAL}." 36 | local-data-ptr: "192.168.178.240 simone.${SECRET_DOMAIN_LOCAL}." 37 | local-data-ptr: "192.168.178.240 speedtest.${SECRET_DOMAIN_LOCAL}." 38 | local-data-ptr: "192.168.178.240 test.${SECRET_DOMAIN_LOCAL}." 39 | local-data-ptr: "192.168.178.240 traefik.${SECRET_DOMAIN_LOCAL}." 40 | local-data-ptr: "192.168.178.240 vault.${SECRET_DOMAIN_LOCAL}." 41 | local-data-ptr: "192.168.178.240 whoami.${SECRET_DOMAIN_LOCAL}." 42 | -------------------------------------------------------------------------------- /cluster/apps/dns/production/unbound/unbound-config/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | configMapGenerator: 6 | - name: unbound-config 7 | namespace: dns 8 | files: 9 | - unbound.conf 10 | - name: unbound-root-hints 11 | namespace: dns 12 | files: 13 | - root.hints 14 | - name: unbound-root-key-initial 15 | namespace: dns 16 | files: 17 | - root.key 18 | - name: unbound-a-records 19 | namespace: dns 20 | files: 21 | - a-records.conf 22 | generatorOptions: 23 | disableNameSuffixHash: true 24 | -------------------------------------------------------------------------------- /cluster/apps/dns/production/unbound/unbound-config/root.key: -------------------------------------------------------------------------------- 1 | ; autotrust trust anchor file 2 | ;;id: . 1 3 | ;;last_queried: 1607451082 ;;Tue Dec 8 18:11:22 2020 4 | ;;last_success: 1607451082 ;;Tue Dec 8 18:11:22 2020 5 | ;;next_probe_time: 1607491905 ;;Wed Dec 9 05:31:45 2020 6 | ;;query_failed: 0 7 | ;;query_interval: 43200 8 | ;;retry_time: 8640 9 | . 86400 IN DNSKEY 257 3 8 AwEAAaz/tAm8yTn4Mfeh5eyI96WSVexTBAvkMgJzkKTOiW1vkIbzxeF3+/4RgWOq7HrxRixHlFlExOLAJr5emLvN7SWXgnLh4+B5xQlNVz8Og8kvArMtNROxVQuCaSnIDdD5LKyWbRd2n9WGe2R8PzgCmr3EgVLrjyBxWezF0jLHwVN8efS3rCj/EWgvIWgb9tarpVUDK/b58Da+sqqls3eNbuv7pr+eoZG+SrDK6nWeL3c6H5Apxz7LjVc1uTIdsIXxuOLYA4/ilBmSVIzuDWfdRUfhHdY6+cn8HFRm+2hM8AnXGXws9555KrUB5qihylGa8subX2Nn6UwNR1AkUTV74bU= ;{id = 20326 (ksk), size = 2048b} ;;state=2 [ VALID ] ;;count=0 ;;lastchange=1607451082 ;;Tue Dec 8 18:11:22 2020 10 | -------------------------------------------------------------------------------- /cluster/apps/dns/production/unbound/unbound-tcp-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Service 3 | apiVersion: v1 4 | metadata: 5 | name: unbound-production-tcp-service 6 | namespace: dns 7 | labels: 8 | app: unbound-production 9 | environment: production 10 | annotations: 11 | metallb.universe.tf/address-pool: dns 12 | metallb.universe.tf/allow-shared-ip: unbound-production-svc 13 | spec: 14 | selector: 15 | app: unbound-production 16 | environment: production 17 | ports: 18 | - name: dns-tcp 19 | port: 53 20 | protocol: TCP 21 | targetPort: 53 22 | type: LoadBalancer 23 | loadBalancerIP: ${UNBOUND_SVC_LB_IP} 24 | -------------------------------------------------------------------------------- /cluster/apps/dns/production/unbound/unbound-udp-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Service 3 | apiVersion: v1 4 | metadata: 5 | name: unbound-production-udp-service 6 | namespace: dns 7 | labels: 8 | app: unbound-production 9 | environment: production 10 | annotations: 11 | metallb.universe.tf/address-pool: dns 12 | metallb.universe.tf/allow-shared-ip: unbound-production-svc 13 | spec: 14 | selector: 15 | app: unbound-production 16 | environment: production 17 | ports: 18 | - name: dns-udp 19 | port: 53 20 | protocol: UDP 21 | targetPort: 53 22 | type: LoadBalancer 23 | loadBalancerIP: ${UNBOUND_SVC_LB_IP} 24 | -------------------------------------------------------------------------------- /cluster/apps/dns/staging/blocky-staging/blocky-config/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | configMapGenerator: 6 | - name: blocky-config 7 | namespace: dns-staging 8 | files: 9 | - config.yml 10 | generatorOptions: 11 | disableNameSuffixHash: true 12 | -------------------------------------------------------------------------------- /cluster/apps/dns/staging/blocky-staging/blocky-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: blocky 6 | namespace: dns-staging 7 | labels: 8 | app: blocky 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app: blocky 14 | strategy: 15 | rollingUpdate: 16 | maxSurge: 1 17 | maxUnavailable: 0 18 | type: RollingUpdate 19 | template: 20 | metadata: 21 | labels: 22 | app: blocky 23 | name: blocky 24 | spec: 25 | containers: 26 | - name: blocky 27 | image: spx01/blocky:v0.16 28 | imagePullPolicy: IfNotPresent 29 | env: 30 | - name: TZ 31 | value: ${TIMEZONE} 32 | ports: 33 | - name: http 34 | containerPort: 4000 35 | protocol: TCP 36 | - name: dns-tcp 37 | containerPort: 53 38 | protocol: TCP 39 | - name: dns-udp 40 | containerPort: 53 41 | protocol: UDP 42 | livenessProbe: 43 | tcpSocket: 44 | port: 4000 45 | timeoutSeconds: 1 46 | periodSeconds: 10 47 | successThreshold: 1 48 | failureThreshold: 3 49 | readinessProbe: 50 | tcpSocket: 51 | port: 4000 52 | timeoutSeconds: 1 53 | periodSeconds: 10 54 | successThreshold: 1 55 | failureThreshold: 3 56 | startupProbe: 57 | tcpSocket: 58 | port: 4000 59 | timeoutSeconds: 1 60 | periodSeconds: 5 61 | successThreshold: 1 62 | failureThreshold: 30 63 | volumeMounts: 64 | - name: blocky-config 65 | mountPath: /app/config.yml 66 | subPath: config.yml 67 | restartPolicy: Always 68 | volumes: 69 | - name: blocky-config 70 | configMap: 71 | name: blocky-config 72 | defaultMode: 420 73 | -------------------------------------------------------------------------------- /cluster/apps/dns/staging/blocky-staging/blocky-ingressroute.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: traefik.containo.us/v1alpha1 3 | kind: IngressRoute 4 | metadata: 5 | name: blocky-local-ingressroute 6 | namespace: dns-staging 7 | spec: 8 | entryPoints: 9 | - web 10 | routes: 11 | - match: Host(`blocky.${SECRET_DOMAIN_LOCAL_02}`) && (PathPrefix(`/metrics`) || PathPrefix(`/api`)) 12 | kind: Rule 13 | services: 14 | - name: blocky-tcp 15 | port: 4000 16 | -------------------------------------------------------------------------------- /cluster/apps/dns/staging/blocky-staging/blocky-tcp-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: blocky-tcp 6 | namespace: dns-staging 7 | labels: 8 | app: blocky 9 | annotations: 10 | metallb.universe.tf/address-pool: services 11 | metallb.universe.tf/allow-shared-ip: blocky-svc 12 | spec: 13 | selector: 14 | app: blocky 15 | ports: 16 | - name: http 17 | port: 4000 18 | protocol: TCP 19 | targetPort: http 20 | - name: dns-tcp 21 | port: 53 22 | protocol: TCP 23 | targetPort: dns-tcp 24 | # sessionAffinity: None 25 | externalTrafficPolicy: Local 26 | type: LoadBalancer 27 | loadBalancerIP: ${BLOCKY_SVC_LB_IP} 28 | -------------------------------------------------------------------------------- /cluster/apps/dns/staging/blocky-staging/blocky-udp-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: blocky-udp 6 | namespace: dns-staging 7 | labels: 8 | app: blocky 9 | annotations: 10 | metallb.universe.tf/address-pool: services 11 | metallb.universe.tf/allow-shared-ip: blocky-svc 12 | spec: 13 | selector: 14 | app: blocky 15 | ports: 16 | - name: dns-udp 17 | port: 53 18 | protocol: UDP 19 | targetPort: dns-udp 20 | # sessionAffinity: None 21 | externalTrafficPolicy: Local 22 | type: LoadBalancer 23 | loadBalancerIP: ${BLOCKY_SVC_LB_IP} 24 | -------------------------------------------------------------------------------- /cluster/apps/dns/staging/blocky-staging/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - blocky-config 6 | # - blocky-configmap.yaml 7 | # - blocky-tcp-service.yaml 8 | # - blocky-udp-service.yaml 9 | # - blocky-deployment.yaml 10 | # - blocky-ingressroute.yaml 11 | -------------------------------------------------------------------------------- /cluster/apps/dns/staging/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - namespace.yaml 6 | - unbound-staging 7 | - pihole-staging 8 | # - blocky-staging 9 | -------------------------------------------------------------------------------- /cluster/apps/dns/staging/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: dns-staging 6 | -------------------------------------------------------------------------------- /cluster/apps/dns/staging/pihole-staging/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - pihole-config 7 | # - pihole-configmap.yaml 8 | - pihole-tcp-service.yaml 9 | - pihole-udp-service.yaml 10 | - pihole-deployment.yaml 11 | -------------------------------------------------------------------------------- /cluster/apps/dns/staging/pihole-staging/pihole-config/adlists.list: -------------------------------------------------------------------------------- 1 | https://dbl.oisd.nl/ 2 | https://raw.githubusercontent.com/PolishFiltersTeam/KADhosts/master/KADhosts.txt 3 | https://raw.githubusercontent.com/FadeMind/hosts.extras/master/add.Spam/hosts 4 | https://v.firebog.net/hosts/static/w3kbl.txt 5 | https://adaway.org/hosts.txt 6 | https://v.firebog.net/hosts/AdguardDNS.txt 7 | https://v.firebog.net/hosts/Admiral.txt 8 | https://raw.githubusercontent.com/anudeepND/blacklist/master/adservers.txt 9 | https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt 10 | https://v.firebog.net/hosts/Easylist.txt 11 | https://pgl.yoyo.org/adservers/serverlist.php?hostformat=hosts&showintro=0&mimetype=plaintext 12 | https://raw.githubusercontent.com/FadeMind/hosts.extras/master/UncheckyAds/hosts 13 | https://raw.githubusercontent.com/bigdargon/hostsVN/master/hosts 14 | https://v.firebog.net/hosts/Easyprivacy.txt 15 | https://v.firebog.net/hosts/Prigent-Ads.txt 16 | https://raw.githubusercontent.com/FadeMind/hosts.extras/master/add.2o7Net/hosts 17 | https://raw.githubusercontent.com/crazy-max/WindowsSpyBlocker/master/data/hosts/spy.txt 18 | https://hostfiles.frogeye.fr/firstparty-trackers-hosts.txt 19 | https://raw.githubusercontent.com/DandelionSprout/adfilt/master/Alternate%20versions%20Anti-Malware%20List/AntiMalwareHosts.txt 20 | https://osint.digitalside.it/Threat-Intel/lists/latestdomains.txt 21 | https://s3.amazonaws.com/lists.disconnect.me/simple_malvertising.txt 22 | https://v.firebog.net/hosts/Prigent-Crypto.txt 23 | https://bitbucket.org/ethanr/dns-blacklists/raw/8575c9f96e5b4a1308f2f12394abd86d0927a4a0/bad_lists/Mandiant_APT1_Report_Appendix_D.txt 24 | https://phishing.army/download/phishing_army_blocklist_extended.txt 25 | https://gitlab.com/quidsup/notrack-blocklists/raw/master/notrack-malware.txt 26 | https://raw.githubusercontent.com/Spam404/lists/master/main-blacklist.txt 27 | https://raw.githubusercontent.com/FadeMind/hosts.extras/master/add.Risk/hosts 28 | https://urlhaus.abuse.ch/downloads/hostfile/ 29 | https://zerodot1.gitlab.io/CoinBlockerLists/hosts_browser 30 | https://raw.githubusercontent.com/anudeepND/blacklist/master/adservers.txt 31 | https://raw.githubusercontent.com/anudeepND/blacklist/master/facebook.txt 32 | -------------------------------------------------------------------------------- /cluster/apps/dns/staging/pihole-staging/pihole-config/custom.list: -------------------------------------------------------------------------------- 1 | ${SECRET_PIHOLE_DNS_RECORD_01} 2 | ${SECRET_PIHOLE_DNS_RECORD_02} 3 | ${SECRET_PIHOLE_DNS_RECORD_03} 4 | ${SECRET_PIHOLE_DNS_RECORD_04} 5 | ${SECRET_PIHOLE_DNS_RECORD_05} 6 | ${SECRET_PIHOLE_DNS_RECORD_06} 7 | ${SECRET_PIHOLE_DNS_RECORD_07} 8 | ${SECRET_PIHOLE_DNS_RECORD_08} 9 | ${SECRET_PIHOLE_DNS_RECORD_09} 10 | ${SECRET_PIHOLE_DNS_RECORD_10} 11 | -------------------------------------------------------------------------------- /cluster/apps/dns/staging/pihole-staging/pihole-config/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | configMapGenerator: 6 | - name: pihole-env-vars 7 | namespace: dns-staging 8 | envs: 9 | - pihole-env-vars.env 10 | - name: custom.list 11 | namespace: dns-staging 12 | files: 13 | - custom.list 14 | - name: regex.list 15 | namespace: dns-staging 16 | files: 17 | - regex.list 18 | - name: whitelist.txt 19 | namespace: dns-staging 20 | files: 21 | - whitelist.txt 22 | - name: adlists.list 23 | namespace: dns-staging 24 | files: 25 | - adlists.list 26 | generatorOptions: 27 | disableNameSuffixHash: true 28 | -------------------------------------------------------------------------------- /cluster/apps/dns/staging/pihole-staging/pihole-config/pihole-env-vars.env: -------------------------------------------------------------------------------- 1 | WEBPASSWORD=${SECRET_PIHOLE_WEBPASSWORD} 2 | ADMIN_EMAIL=${SECRET_EMAIL} 3 | TZ=${TIMEZONE} 4 | DNS1=${UNBOUND_STAGING_SVC_LB_IP} 5 | DNS2=${UNBOUND_STAGING_SVC_LB_IP} 6 | ServerIP=${PIHOLE_STAGING_SVC_LB_IP} 7 | IPv6=false 8 | TEMPERATUREUNIT=c 9 | WEBUIBOXEDLAYOUT=boxed 10 | WEBTHEME=default-dark 11 | FTLCONF_REPLY_ADDR4=${PIHOLE_STAGING_SVC_LB_IP} 12 | FTLCONF_MAXDBDAYS=14 13 | -------------------------------------------------------------------------------- /cluster/apps/dns/staging/pihole-staging/pihole-config/regex.list: -------------------------------------------------------------------------------- 1 | (\.|^)trbo\.com$ 2 | # https://raw.githubusercontent.com/mmotti/pihole-regex/master/regex.list 3 | ^ad([sxv]?[0-9]*|system)[_.-]([^.[:space:]]+\.){1,}|[_.-]ad([sxv]?[0-9]*|system)[_.-] 4 | ^(.+[_.-])?adse?rv(er?|ice)?s?[0-9]*[_.-] 5 | ^(.+[_.-])?telemetry[_.-] 6 | ^adim(age|g)s?[0-9]*[_.-] 7 | ^adtrack(er|ing)?[0-9]*[_.-] 8 | ^advert(s|is(ing|ements?))?[0-9]*[_.-] 9 | ^aff(iliat(es?|ion))?[_.-] 10 | ^analytics?[_.-] 11 | ^banners?[_.-] 12 | ^beacons?[0-9]*[_.-] 13 | ^count(ers?)?[0-9]*[_.-] 14 | ^mads\. 15 | ^pixels?[-.] 16 | ^stat(s|istics)?[0-9]*[_.-] 17 | # block crappy speedtest.net server 18 | (\.|^)contabo\.net$ 19 | (\.|^)net-d-sign\.de$ 20 | (\.|^)sitnetworks\.de$ 21 | (\.|^)goingnet\.at$ 22 | -------------------------------------------------------------------------------- /cluster/apps/dns/staging/pihole-staging/pihole-tcp-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: pihole-staging-tcp-service 6 | namespace: dns-staging 7 | labels: 8 | app: pihole-staging 9 | environment: staging 10 | annotations: 11 | metallb.universe.tf/address-pool: dns 12 | metallb.universe.tf/allow-shared-ip: pihole-staging-svc 13 | spec: 14 | selector: 15 | app: pihole-staging 16 | environment: staging 17 | ports: 18 | - name: http 19 | port: 80 20 | protocol: TCP 21 | targetPort: http 22 | - name: https 23 | port: 443 24 | protocol: TCP 25 | targetPort: https 26 | - name: dns-tcp 27 | port: 53 28 | protocol: TCP 29 | targetPort: dns-tcp 30 | sessionAffinity: None 31 | externalTrafficPolicy: Local 32 | type: LoadBalancer 33 | loadBalancerIP: ${PIHOLE_STAGING_SVC_LB_IP} 34 | -------------------------------------------------------------------------------- /cluster/apps/dns/staging/pihole-staging/pihole-udp-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: pihole-staging-udp-service 6 | namespace: dns-staging 7 | labels: 8 | app: pihole-staging 9 | environment: staging 10 | annotations: 11 | metallb.universe.tf/address-pool: dns 12 | metallb.universe.tf/allow-shared-ip: pihole-staging-svc 13 | spec: 14 | selector: 15 | app: pihole-staging 16 | environment: staging 17 | ports: 18 | - name: dns-udp 19 | port: 53 20 | protocol: UDP 21 | targetPort: dns-udp 22 | - name: client-udp 23 | port: 67 24 | protocol: UDP 25 | targetPort: client-udp 26 | sessionAffinity: None 27 | externalTrafficPolicy: Local 28 | type: LoadBalancer 29 | loadBalancerIP: ${PIHOLE_STAGING_SVC_LB_IP} 30 | -------------------------------------------------------------------------------- /cluster/apps/dns/staging/unbound-staging/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - unbound-config 7 | - unbound-tcp-service.yaml 8 | - unbound-udp-service.yaml 9 | - unbound-deployment.yaml 10 | -------------------------------------------------------------------------------- /cluster/apps/dns/staging/unbound-staging/unbound-config/a-records.conf: -------------------------------------------------------------------------------- 1 | # A Record 2 | local-data: "adminer.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 3 | local-data: "chronograf.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 4 | local-data: "cloud.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 5 | local-data: "doc.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 6 | local-data: "git.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 7 | local-data: "grafana.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 8 | local-data: "homer.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 9 | local-data: "longhorn.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 10 | local-data: "pihole-metrics.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 11 | local-data: "pihole.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 12 | local-data: "portainer.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 13 | local-data: "prometheus-alertmanager.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 14 | local-data: "prometheus.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 15 | local-data: "simone.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 16 | local-data: "speedtest.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 17 | local-data: "test.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 18 | local-data: "traefik.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 19 | local-data: "vault.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 20 | local-data: "whoami.${SECRET_DOMAIN_LOCAL}. A 192.168.178.240" 21 | 22 | # PTR Record 23 | local-data-ptr: "192.168.178.240 adminer.${SECRET_DOMAIN_LOCAL}." 24 | local-data-ptr: "192.168.178.240 chronograf.${SECRET_DOMAIN_LOCAL}." 25 | local-data-ptr: "192.168.178.240 cloud.${SECRET_DOMAIN_LOCAL}." 26 | local-data-ptr: "192.168.178.240 doc.${SECRET_DOMAIN_LOCAL}." 27 | local-data-ptr: "192.168.178.240 git.${SECRET_DOMAIN_LOCAL}." 28 | local-data-ptr: "192.168.178.240 grafana.${SECRET_DOMAIN_LOCAL}." 29 | local-data-ptr: "192.168.178.240 homer.${SECRET_DOMAIN_LOCAL}." 30 | local-data-ptr: "192.168.178.240 longhorn.${SECRET_DOMAIN_LOCAL}." 31 | local-data-ptr: "192.168.178.240 pihole-metrics.${SECRET_DOMAIN_LOCAL}." 32 | local-data-ptr: "192.168.178.240 pihole.${SECRET_DOMAIN_LOCAL}." 33 | local-data-ptr: "192.168.178.240 portainer.${SECRET_DOMAIN_LOCAL}." 34 | local-data-ptr: "192.168.178.240 prometheus-alertmanager.${SECRET_DOMAIN_LOCAL}." 35 | local-data-ptr: "192.168.178.240 prometheus.${SECRET_DOMAIN_LOCAL}." 36 | local-data-ptr: "192.168.178.240 simone.${SECRET_DOMAIN_LOCAL}." 37 | local-data-ptr: "192.168.178.240 speedtest.${SECRET_DOMAIN_LOCAL}." 38 | local-data-ptr: "192.168.178.240 test.${SECRET_DOMAIN_LOCAL}." 39 | local-data-ptr: "192.168.178.240 traefik.${SECRET_DOMAIN_LOCAL}." 40 | local-data-ptr: "192.168.178.240 vault.${SECRET_DOMAIN_LOCAL}." 41 | local-data-ptr: "192.168.178.240 whoami.${SECRET_DOMAIN_LOCAL}." 42 | -------------------------------------------------------------------------------- /cluster/apps/dns/staging/unbound-staging/unbound-config/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | configMapGenerator: 6 | - name: unbound-config 7 | namespace: dns-staging 8 | files: 9 | - unbound.conf 10 | - name: unbound-root-hints 11 | namespace: dns-staging 12 | files: 13 | - root.hints 14 | - name: unbound-root-key-initial 15 | namespace: dns-staging 16 | files: 17 | - root.key 18 | - name: unbound-a-records 19 | namespace: dns-staging 20 | files: 21 | - a-records.conf 22 | generatorOptions: 23 | disableNameSuffixHash: true 24 | -------------------------------------------------------------------------------- /cluster/apps/dns/staging/unbound-staging/unbound-config/root.key: -------------------------------------------------------------------------------- 1 | ; autotrust trust anchor file 2 | ;;id: . 1 3 | ;;last_queried: 1607451082 ;;Tue Dec 8 18:11:22 2020 4 | ;;last_success: 1607451082 ;;Tue Dec 8 18:11:22 2020 5 | ;;next_probe_time: 1607491905 ;;Wed Dec 9 05:31:45 2020 6 | ;;query_failed: 0 7 | ;;query_interval: 43200 8 | ;;retry_time: 8640 9 | . 86400 IN DNSKEY 257 3 8 AwEAAaz/tAm8yTn4Mfeh5eyI96WSVexTBAvkMgJzkKTOiW1vkIbzxeF3+/4RgWOq7HrxRixHlFlExOLAJr5emLvN7SWXgnLh4+B5xQlNVz8Og8kvArMtNROxVQuCaSnIDdD5LKyWbRd2n9WGe2R8PzgCmr3EgVLrjyBxWezF0jLHwVN8efS3rCj/EWgvIWgb9tarpVUDK/b58Da+sqqls3eNbuv7pr+eoZG+SrDK6nWeL3c6H5Apxz7LjVc1uTIdsIXxuOLYA4/ilBmSVIzuDWfdRUfhHdY6+cn8HFRm+2hM8AnXGXws9555KrUB5qihylGa8subX2Nn6UwNR1AkUTV74bU= ;{id = 20326 (ksk), size = 2048b} ;;state=2 [ VALID ] ;;count=0 ;;lastchange=1607451082 ;;Tue Dec 8 18:11:22 2020 10 | -------------------------------------------------------------------------------- /cluster/apps/dns/staging/unbound-staging/unbound-tcp-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Service 3 | apiVersion: v1 4 | metadata: 5 | name: unbound-staging-tcp-service 6 | namespace: dns-staging 7 | labels: 8 | app: unbound-staging 9 | environment: staging 10 | annotations: 11 | metallb.universe.tf/address-pool: dns 12 | metallb.universe.tf/allow-shared-ip: unbound-staging-svc 13 | spec: 14 | selector: 15 | app: unbound-staging 16 | environment: staging 17 | ports: 18 | - name: dns-tcp 19 | port: 53 20 | protocol: TCP 21 | targetPort: 53 22 | type: LoadBalancer 23 | loadBalancerIP: ${UNBOUND_STAGING_SVC_LB_IP} 24 | -------------------------------------------------------------------------------- /cluster/apps/dns/staging/unbound-staging/unbound-udp-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Service 3 | apiVersion: v1 4 | metadata: 5 | name: unbound-staging-udp-service 6 | namespace: dns-staging 7 | labels: 8 | app: unbound-staging 9 | environment: staging 10 | annotations: 11 | metallb.universe.tf/address-pool: dns 12 | metallb.universe.tf/allow-shared-ip: unbound-staging-svc 13 | spec: 14 | selector: 15 | app: unbound-staging 16 | environment: staging 17 | ports: 18 | - name: dns-udp 19 | port: 53 20 | protocol: UDP 21 | targetPort: 53 22 | type: LoadBalancer 23 | loadBalancerIP: ${UNBOUND_STAGING_SVC_LB_IP} 24 | -------------------------------------------------------------------------------- /cluster/apps/documentation/documentation-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Service 3 | apiVersion: v1 4 | metadata: 5 | name: documentation-static-nginx 6 | namespace: documentation 7 | spec: 8 | selector: 9 | run: documentation-static-nginx 10 | ports: 11 | - name: http 12 | port: 80 13 | 14 | --- 15 | apiVersion: apps/v1 16 | kind: Deployment 17 | metadata: 18 | labels: 19 | run: documentation-static-nginx 20 | name: documentation-static-nginx 21 | namespace: documentation 22 | spec: 23 | replicas: 1 24 | selector: 25 | matchLabels: 26 | run: documentation-static-nginx 27 | template: 28 | metadata: 29 | labels: 30 | run: documentation-static-nginx 31 | spec: 32 | nodeSelector: 33 | node-type: worker 34 | containers: 35 | - image: nginx 36 | name: nginx 37 | volumeMounts: 38 | - mountPath: /usr/share/nginx/html 39 | name: static-content 40 | volumes: 41 | - name: static-content 42 | persistentVolumeClaim: 43 | claimName: documentation-static-nginx-pvc 44 | -------------------------------------------------------------------------------- /cluster/apps/documentation/documentation-ingressroute.yaml: -------------------------------------------------------------------------------- 1 | # documentation-local-ingressroute 2 | # documentation-http-ingressroute 3 | # documentation-https-ingressroute 4 | 5 | --- 6 | apiVersion: traefik.containo.us/v1alpha1 7 | kind: IngressRoute 8 | metadata: 9 | name: documentation-local-ingressroute 10 | namespace: documentation 11 | spec: 12 | entryPoints: 13 | - web 14 | routes: 15 | - match: Host(`doc.${SECRET_DOMAIN_LOCAL_02}`) 16 | kind: Rule 17 | services: 18 | - name: documentation-static-nginx 19 | port: 80 20 | 21 | --- 22 | apiVersion: traefik.containo.us/v1alpha1 23 | kind: IngressRoute 24 | metadata: 25 | name: documentation-http-ingressroute 26 | namespace: documentation 27 | spec: 28 | entryPoints: 29 | - web 30 | routes: 31 | - match: Host(`doc.${SECRET_DIGITALOCEAN_DOMAIN_02}`) 32 | kind: Rule 33 | middlewares: 34 | - name: https-redirect-scheme-middleware 35 | namespace: traefik 36 | services: 37 | - name: documentation-static-nginx 38 | port: 80 39 | 40 | --- 41 | apiVersion: traefik.containo.us/v1alpha1 42 | kind: IngressRoute 43 | metadata: 44 | name: documentation-https-ingressroute 45 | namespace: traefik 46 | annotations: 47 | cert-manager.io/cluster-issuer: "letsencrypt-dns01-production-do" 48 | spec: 49 | entryPoints: 50 | - websecure 51 | routes: 52 | - match: Host(`doc.${SECRET_DIGITALOCEAN_DOMAIN_02}`) 53 | kind: Rule 54 | middlewares: 55 | - name: secure-headers-middleware 56 | namespace: traefik 57 | services: 58 | - name: documentation-static-nginx 59 | namespace: documentation 60 | port: 80 61 | tls: 62 | secretName: "${SECRET_DOMAIN/./-}-tls" 63 | -------------------------------------------------------------------------------- /cluster/apps/documentation/documentation-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: documentation-static-nginx-pvc 6 | namespace: documentation 7 | annotations: 8 | nfs.io/storage-path: "html" 9 | spec: 10 | storageClassName: storage-nfs-provisioner 11 | accessModes: 12 | - ReadWriteOnce 13 | resources: 14 | requests: 15 | storage: 128Mi 16 | -------------------------------------------------------------------------------- /cluster/apps/documentation/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - namespace.yaml 7 | - documentation-pvc.yaml 8 | - documentation-deployment.yaml 9 | - documentation-ingressroute.yaml 10 | -------------------------------------------------------------------------------- /cluster/apps/documentation/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: documentation 6 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/kubernetes-dashboard/kubernetes-dashboard-adminuser.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: admin-user 6 | namespace: kubernetes-dashboard 7 | 8 | --- 9 | apiVersion: rbac.authorization.k8s.io/v1 10 | kind: ClusterRoleBinding 11 | metadata: 12 | name: admin-user 13 | roleRef: 14 | apiGroup: rbac.authorization.k8s.io 15 | kind: ClusterRole 16 | name: cluster-admin 17 | subjects: 18 | - kind: ServiceAccount 19 | name: admin-user 20 | namespace: kubernetes-dashboard 21 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/kubernetes-dashboard/kubernetes-dashboard-ingressroute.yaml: -------------------------------------------------------------------------------- 1 | # kubernetes-dashboard-https-ingressroute 2 | # kubernetes-dashboard-transport 3 | 4 | --- 5 | apiVersion: traefik.containo.us/v1alpha1 6 | kind: IngressRoute 7 | metadata: 8 | name: kubernetes-dashboard-https-ingressroute 9 | namespace: traefik 10 | annotations: 11 | cert-manager.io/cluster-issuer: "letsencrypt-dns01-production-do" 12 | spec: 13 | entryPoints: 14 | - websecure 15 | routes: 16 | - match: Host(`kube.${SECRET_DOMAIN}`) 17 | kind: Rule 18 | middlewares: 19 | - name: authelia-middleware 20 | namespace: authentication 21 | - name: secure-headers-middleware 22 | namespace: traefik 23 | services: 24 | - name: kubernetes-dashboard 25 | namespace: kubernetes-dashboard 26 | port: 443 27 | serversTransport: kubernetes-dashboard-transport 28 | tls: 29 | secretName: "${SECRET_DOMAIN/./-}-tls" 30 | options: 31 | name: default-tlsoption 32 | namespace: traefik 33 | 34 | --- 35 | apiVersion: traefik.containo.us/v1alpha1 36 | kind: ServersTransport 37 | metadata: 38 | name: kubernetes-dashboard-transport 39 | namespace: kubernetes-dashboard 40 | spec: 41 | serverName: "kube.${SECRET_DOMAIN}" 42 | insecureSkipVerify: true 43 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/kubernetes-dashboard/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - https://raw.githubusercontent.com/kubernetes/dashboard/v2.7.0/aio/deploy/recommended.yaml 7 | - kubernetes-dashboard-adminuser.yaml 8 | - kubernetes-dashboard-ingressroute.yaml 9 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/kured/kured-helmrelease.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 3 | kind: HelmRelease 4 | metadata: 5 | name: kured 6 | namespace: flux-system 7 | spec: 8 | chart: 9 | spec: 10 | chart: kured 11 | sourceRef: 12 | kind: HelmRepository 13 | name: kured 14 | version: 2.11.2 15 | interval: 1m0s 16 | targetNamespace: kube-system 17 | values: 18 | affinity: {} 19 | configuration: 20 | alertFilterRegexp: "" 21 | alertFiringOnly: false 22 | annotateNodes: false 23 | blockingPodSelector: [] 24 | drainGracePeriod: "" 25 | drainTimeout: "" 26 | endTime: "08:00" 27 | forceReboot: false 28 | lockAnnotation: "" 29 | lockReleaseDelay: 0 30 | lockTtl: 0 31 | logFormat: text 32 | messageTemplateDrain: "" 33 | messageTemplateReboot: "" 34 | notifyUrl: ${SECRET_SLACK_SHOUTRRR_URL_KURED} 35 | period: "" 36 | preferNoScheduleTaint: "" 37 | prometheusUrl: "" 38 | rebootCommand: /bin/systemctl reboot 39 | rebootDays: [] 40 | rebootDelay: "" 41 | rebootSentinel: "" 42 | rebootSentinelCommand: "" 43 | skipWaitForDeleteTimeout: "" 44 | slackChannel: "" 45 | slackHookUrl: "" 46 | slackUsername: "" 47 | startTime: "03:00" 48 | timeZone: ${TIMEZONE} 49 | dsAnnotations: {} 50 | extraArgs: {} 51 | extraEnvVars: null 52 | image: 53 | pullPolicy: IfNotPresent 54 | pullSecrets: [] 55 | repository: raspbernetes/kured 56 | tag: 1.9.1 57 | maxUnavailable: 1 58 | metrics: 59 | create: false 60 | interval: 60s 61 | labels: {} 62 | namespace: "" 63 | scrapeTimeout: "" 64 | nodeSelector: {} 65 | podAnnotations: {} 66 | podLabels: {} 67 | podSecurityPolicy: 68 | create: false 69 | priorityClassName: "" 70 | rbac: 71 | create: true 72 | resources: {} 73 | service: 74 | annotations: {} 75 | create: false 76 | name: "" 77 | port: 8080 78 | type: ClusterIP 79 | serviceAccount: 80 | create: true 81 | name: null 82 | tolerations: 83 | - effect: NoSchedule 84 | key: node-role.kubernetes.io/master 85 | updateStrategy: RollingUpdate 86 | volumeMounts: [] 87 | volumes: [] 88 | 89 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/kured/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - kured-helmrelease.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - kubernetes-dashboard 7 | # - kured 8 | - reloader 9 | # - system-upgrade-controller 10 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/reloader/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - reloader-helmrelease.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/reloader/reloader-helmrelease.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 3 | kind: HelmRelease 4 | metadata: 5 | name: reloader 6 | namespace: flux-system 7 | spec: 8 | chart: 9 | spec: 10 | chart: reloader 11 | reconcileStrategy: ChartVersion 12 | sourceRef: 13 | kind: HelmRepository 14 | name: stakater 15 | version: v0.0.117 16 | interval: 1m0s 17 | targetNamespace: kube-system 18 | values: 19 | global: 20 | imagePullSecrets: [] 21 | kubernetes: 22 | host: https://kubernetes.default 23 | reloader: 24 | custom_annotations: {} 25 | deployment: 26 | affinity: {} 27 | annotations: {} 28 | env: 29 | field: null 30 | open: null 31 | secret: null 32 | image: 33 | name: stakater/reloader 34 | pullPolicy: IfNotPresent 35 | tag: v0.0.105 36 | labels: 37 | group: com.stakater.platform 38 | provider: stakater 39 | version: v0.0.105 40 | livenessProbe: {} 41 | nodeSelector: 42 | node-type: worker 43 | pod: 44 | annotations: {} 45 | priorityClassName: "" 46 | readinessProbe: {} 47 | replicas: 1 48 | resources: {} 49 | securityContext: 50 | runAsNonRoot: true 51 | runAsUser: 65534 52 | tolerations: [] 53 | ignoreConfigMaps: false 54 | ignoreNamespaces: "" 55 | ignoreSecrets: false 56 | isArgoRollouts: false 57 | isOpenshift: false 58 | legacy: 59 | rbac: false 60 | logFormat: "" 61 | matchLabels: {} 62 | podMonitor: 63 | enabled: false 64 | rbac: 65 | enabled: true 66 | labels: {} 67 | readOnlyRootFileSystem: false 68 | reloadStrategy: default 69 | service: {} 70 | serviceAccount: 71 | annotations: {} 72 | create: true 73 | labels: {} 74 | name: null 75 | serviceMonitor: 76 | enabled: false 77 | watchGlobally: true 78 | 79 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/system-upgrade-controller/k3s-upgrade-plan.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: upgrade.cattle.io/v1 3 | kind: Plan 4 | metadata: 5 | name: k3s-server 6 | namespace: kube-system 7 | labels: 8 | k3s-upgrade: server 9 | spec: 10 | concurrency: 1 11 | version: ${NEXT_K3S_VERSION} 12 | nodeSelector: 13 | matchExpressions: 14 | - {key: k3s-upgrade, operator: Exists} 15 | - {key: k3s-upgrade, operator: NotIn, values: ["disabled", "false"]} 16 | # - {key: k3s.io/hostname, operator: Exists} 17 | # - {key: k3os.io/mode, operator: DoesNotExist} 18 | - {key: node-role.kubernetes.io/master, operator: In, values: ["true"]} 19 | serviceAccountName: system-upgrade 20 | cordon: true 21 | drain: 22 | deleteLocalData: true 23 | ignoreDaemonSets: true 24 | force: true 25 | upgrade: 26 | image: rancher/k3s-upgrade 27 | 28 | --- 29 | apiVersion: upgrade.cattle.io/v1 30 | kind: Plan 31 | metadata: 32 | name: k3s-agent 33 | namespace: kube-system 34 | labels: 35 | k3s-upgrade: agent 36 | spec: 37 | concurrency: 1 38 | version: ${NEXT_K3S_VERSION} 39 | nodeSelector: 40 | matchExpressions: 41 | - {key: k3s-upgrade, operator: Exists} 42 | - {key: k3s-upgrade, operator: NotIn, values: ["disabled", "false"]} 43 | # - {key: k3s.io/hostname, operator: Exists} 44 | # - {key: k3os.io/mode, operator: DoesNotExist} 45 | - {key: node-role.kubernetes.io/master, operator: NotIn, values: ["true"]} 46 | serviceAccountName: system-upgrade 47 | prepare: 48 | # Since v0.5.0-m1 SUC will use the resolved version of the plan for the tag on the prepare container. 49 | # image: rancher/k3s-upgrade:v1.17.4-k3s1 50 | image: rancher/k3s-upgrade 51 | args: ["prepare", "k3s-server"] 52 | drain: 53 | deleteLocalData: true 54 | ignoreDaemonSets: true 55 | force: true 56 | skipWaitForDeleteTimeout: 60 57 | upgrade: 58 | image: rancher/k3s-upgrade 59 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/system-upgrade-controller/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | namespace: kube-system 6 | 7 | resources: 8 | - github.com/rancher/system-upgrade-controller?ref=v0.8.1 9 | - k3s-upgrade-plan.yaml 10 | 11 | images: 12 | - name: rancher/system-upgrade-controller 13 | newTag: v0.8.1 14 | -------------------------------------------------------------------------------- /cluster/apps/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - authentication 7 | # authelia 8 | - blogs 9 | # simone 10 | - development 11 | # adminer 12 | # gitea 13 | - dns 14 | # do-dns-updater 15 | # unbound 16 | # pihole 17 | # - documentation 18 | - kube-system 19 | # reloader 20 | # system-upgrade-controller 21 | # kured 22 | - monitoring 23 | # grafana 24 | # 25 | # kube-prometheus-stack 26 | - nextcloud 27 | - utils 28 | # network-multitool 29 | # whoami 30 | - vaultwarden 31 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/grafana/grafana-dashboards/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | generatorOptions: 6 | disableNameSuffixHash: true 7 | labels: 8 | grafana_dashboard: "1" 9 | 10 | namespace: monitoring 11 | 12 | configMapGenerator: 13 | - name: dashboards-k8s-views-global 14 | files: 15 | - k8s-views-global.json 16 | 17 | - name: dashboards-k8s-views-namespaces 18 | files: 19 | - k8s-views-namespaces.json 20 | 21 | - name: dashboards-k8s-views-nodes 22 | files: 23 | - k8s-views-nodes.json 24 | 25 | - name: dashboards-k8s-views-pods 26 | files: 27 | - k8s-views-pods.json 28 | 29 | - name: dashboards-k8s-system-api-server 30 | files: 31 | - k8s-system-api-server.json 32 | 33 | - name: dashboards-k8s-system-coredns 34 | files: 35 | - k8s-system-coredns.json 36 | 37 | # configMapGenerator: 38 | # - name: grafana-simple-cluster-dashboard 39 | # namespace: monitoring 40 | # files: 41 | # - grafana-simple-cluster-dashboard.json 42 | # - name: grafana-node-exporter-full-dashboard 43 | # namespace: monitoring 44 | # files: 45 | # - grafana-node-exporter-full-dashboard.json 46 | 47 | # commonLabels: 48 | # grafana_dashboard: "1" 49 | # generatorOptions: 50 | # disableNameSuffixHash: true 51 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/grafana/grafana-datasources/datasources.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: 1 3 | datasources: 4 | - name: Prometheus 5 | type: prometheus 6 | url: http://${PROMETHEUS_SVC_LB_IP}:9090 7 | access: proxy 8 | isDefault: true 9 | # - name: InfluxDB_fritzboxstats 10 | # type: influxdb 11 | # access: proxy 12 | # database: fritzboxstats 13 | # user: ${SECRET_FRITZINFLUXDB_INFLUXDB_USERNAME} 14 | # url: http://${INFLUXDB_SVC_LB_IP}:8086 15 | # jsonData: 16 | # httpMode: GET 17 | # secureJsonData: 18 | # password: ${SECRET_FRITZINFLUXDB_INFLUXDB_PASSWORD} 19 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/grafana/grafana-datasources/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | configMapGenerator: 6 | - name: grafana-datasources 7 | namespace: monitoring 8 | files: 9 | - datasources.yaml 10 | 11 | commonLabels: 12 | grafana_datasource: "1" 13 | generatorOptions: 14 | disableNameSuffixHash: true 15 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/grafana/grafana-ingressroute.yaml: -------------------------------------------------------------------------------- 1 | # grafana-local-ingressroute 2 | # grafana-http-ingressroute 3 | # grafana-https-ingressroute 4 | 5 | # --- 6 | # apiVersion: traefik.containo.us/v1alpha1 7 | # kind: IngressRoute 8 | # metadata: 9 | # name: grafana-local-ingressroute 10 | # namespace: monitoring 11 | # spec: 12 | # entryPoints: 13 | # - web 14 | # routes: 15 | # - match: Host(`grafana.${SECRET_DOMAIN_LOCAL_02}`) 16 | # kind: Rule 17 | # services: 18 | # - name: monitoring-grafana 19 | # port: 80 20 | 21 | --- 22 | apiVersion: traefik.containo.us/v1alpha1 23 | kind: IngressRoute 24 | metadata: 25 | name: grafana-http-ingressroute 26 | namespace: monitoring 27 | spec: 28 | entryPoints: 29 | - web 30 | routes: 31 | - match: Host(`grafana.${SECRET_DOMAIN}`) 32 | kind: Rule 33 | middlewares: 34 | - name: https-redirect-scheme-middleware 35 | namespace: traefik 36 | services: 37 | - name: monitoring-grafana 38 | port: 80 39 | 40 | --- 41 | apiVersion: traefik.containo.us/v1alpha1 42 | kind: IngressRoute 43 | metadata: 44 | name: grafana-https-ingressroute 45 | namespace: traefik 46 | annotations: 47 | cert-manager.io/cluster-issuer: "letsencrypt-dns01-production-do" 48 | spec: 49 | entryPoints: 50 | - websecure 51 | routes: 52 | - match: Host(`grafana.${SECRET_DOMAIN}`) 53 | kind: Rule 54 | middlewares: 55 | - name: authelia-middleware 56 | namespace: authentication 57 | - name: secure-headers-middleware 58 | namespace: traefik 59 | services: 60 | - name: monitoring-grafana 61 | namespace: monitoring 62 | port: 80 63 | tls: 64 | secretName: "${SECRET_DOMAIN/./-}-tls" 65 | options: 66 | name: default-tlsoption 67 | namespace: traefik 68 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/grafana/grafana-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: grafana-data-pvc 6 | namespace: monitoring 7 | spec: 8 | accessModes: 9 | - ReadWriteOnce 10 | storageClassName: longhorn-worker-node-storageclass 11 | resources: 12 | requests: 13 | storage: 8Gi 14 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/grafana/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - grafana-pvc.yaml 7 | - grafana-helmrelease.yaml 8 | - grafana-ingressroute.yaml 9 | - grafana-datasources 10 | - grafana-dashboards 11 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/influxdata/chronograf-helmrelease.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 3 | kind: HelmRelease 4 | metadata: 5 | name: chronograf 6 | namespace: flux-system 7 | spec: 8 | chart: 9 | spec: 10 | chart: chronograf 11 | sourceRef: 12 | kind: HelmRepository 13 | name: influxdata 14 | version: 1.2.3 15 | interval: 1m0s 16 | targetNamespace: monitoring 17 | values: 18 | affinity: {} 19 | env: 20 | HOST_PAGE_DISABLED: true 21 | envFromSecret: "" 22 | image: 23 | pullPolicy: IfNotPresent 24 | repository: chronograf 25 | tag: 1.9.3 26 | influxdb: {} 27 | ingress: 28 | annotations: {} 29 | className: null 30 | enabled: false 31 | hostname: chronograf.foobar.com 32 | path: / 33 | tls: false 34 | nodeSelector: 35 | node-type: worker 36 | oauth: 37 | enabled: false 38 | generic: 39 | api_key: "" 40 | api_url: "" 41 | auth_url: "" 42 | enabled: false 43 | public_url: "" 44 | scopes: "" 45 | token_url: "" 46 | github: 47 | enabled: false 48 | gh_orgs: "" 49 | google: 50 | domains: "" 51 | enabled: false 52 | public_url: "" 53 | heroku: 54 | enabled: false 55 | he_orgs: "" 56 | persistence: 57 | accessMode: ReadWriteOnce 58 | enabled: true 59 | size: 2Gi 60 | storageClass: longhorn-worker-node-storageclass 61 | resources: 62 | limits: 63 | cpu: 2 64 | memory: 2Gi 65 | requests: 66 | cpu: 0.1 67 | memory: 256Mi 68 | service: 69 | replicas: 1 70 | type: ClusterIP 71 | tolerations: [] 72 | updateStrategy: 73 | type: RollingUpdate 74 | 75 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/influxdata/chronograf-ingressroute.yaml: -------------------------------------------------------------------------------- 1 | # chronograf-local-ingressroute 2 | # chronograf-http-ingressroute 3 | # chronograf-https-ingressroute 4 | 5 | --- 6 | apiVersion: traefik.containo.us/v1alpha1 7 | kind: IngressRoute 8 | metadata: 9 | name: chronograf-local-ingressroute 10 | namespace: monitoring 11 | spec: 12 | entryPoints: 13 | - web 14 | routes: 15 | - match: Host(`chronograf.${SECRET_DOMAIN_LOCAL_02}`) 16 | kind: Rule 17 | services: 18 | - name: monitoring-chronograf-chronograf 19 | port: 80 20 | 21 | --- 22 | apiVersion: traefik.containo.us/v1alpha1 23 | kind: IngressRoute 24 | metadata: 25 | name: chronograf-http-ingressroute 26 | namespace: monitoring 27 | spec: 28 | entryPoints: 29 | - web 30 | routes: 31 | - match: Host(`chronograf.${SECRET_DIGITALOCEAN_DOMAIN_02}`) 32 | kind: Rule 33 | middlewares: 34 | - name: https-redirect-scheme-middleware 35 | namespace: traefik 36 | services: 37 | - name: monitoring-chronograf-chronograf 38 | port: 80 39 | 40 | --- 41 | apiVersion: traefik.containo.us/v1alpha1 42 | kind: IngressRoute 43 | metadata: 44 | name: chronograf-https-ingressroute 45 | namespace: traefik 46 | annotations: 47 | cert-manager.io/cluster-issuer: "letsencrypt-dns01-production-do" 48 | spec: 49 | entryPoints: 50 | - websecure 51 | routes: 52 | - match: Host(`chronograf.${SECRET_DIGITALOCEAN_DOMAIN_02}`) 53 | kind: Rule 54 | middlewares: 55 | - name: authelia-middleware 56 | namespace: authentication 57 | - name: secure-headers-middleware 58 | namespace: traefik 59 | services: 60 | - name: monitoring-chronograf-chronograf 61 | namespace: monitoring 62 | port: 80 63 | tls: 64 | secretName: "${SECRET_DOMAIN/./-}-tls" 65 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/influxdata/influxdb-ingressroute.yaml: -------------------------------------------------------------------------------- 1 | # influxdb-tcp-ingressroute 2 | 3 | --- 4 | apiVersion: traefik.containo.us/v1alpha1 5 | kind: IngressRouteTCP 6 | metadata: 7 | name: influxdb-tcp-ingressroute 8 | namespace: monitoring 9 | spec: 10 | entryPoints: 11 | - web 12 | routes: 13 | - match: HostSNI(`*`) 14 | services: 15 | - name: monitoring-influxdb 16 | port: 8086 17 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/influxdata/influxdb-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: influxdb-data-pvc 6 | namespace: monitoring 7 | spec: 8 | accessModes: 9 | - ReadWriteOnce 10 | storageClassName: longhorn-worker-node-storageclass 11 | resources: 12 | requests: 13 | storage: 8Gi 14 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/influxdata/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | # - enc_influxdb-auth-secret.yaml 6 | - influxdb-pvc.yaml 7 | - influxdb-helmrelease.yaml 8 | - chronograf-helmrelease.yaml 9 | - chronograf-ingressroute.yaml 10 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/kube-prometheus-stack/kube-prometheus-stack-ingressroute.yaml: -------------------------------------------------------------------------------- 1 | # prometheus-https-ingressroute 2 | # prometheus-alertmanager-https-ingressroute 3 | 4 | --- 5 | apiVersion: traefik.containo.us/v1alpha1 6 | kind: IngressRoute 7 | metadata: 8 | name: prometheus-https-ingressroute 9 | namespace: traefik 10 | annotations: 11 | cert-manager.io/cluster-issuer: "letsencrypt-dns01-production-do" 12 | spec: 13 | entryPoints: 14 | - websecure 15 | routes: 16 | - match: Host(`prometheus.${SECRET_DOMAIN}`) 17 | kind: Rule 18 | middlewares: 19 | - name: authelia-middleware 20 | namespace: authentication 21 | - name: secure-headers-middleware 22 | namespace: traefik 23 | services: 24 | - name: prometheus-operated 25 | namespace: monitoring 26 | port: 9090 27 | tls: 28 | secretName: "${SECRET_DOMAIN/./-}-tls" 29 | options: 30 | name: default-tlsoption 31 | namespace: traefik 32 | 33 | --- 34 | apiVersion: traefik.containo.us/v1alpha1 35 | kind: IngressRoute 36 | metadata: 37 | name: prometheus-alertmanager-https-ingressroute 38 | namespace: traefik 39 | annotations: 40 | cert-manager.io/cluster-issuer: "letsencrypt-dns01-production-do" 41 | spec: 42 | entryPoints: 43 | - websecure 44 | routes: 45 | - match: Host(`prometheus-alertmanager.${SECRET_DOMAIN}`) 46 | kind: Rule 47 | middlewares: 48 | - name: authelia-middleware 49 | namespace: authentication 50 | - name: secure-headers-middleware 51 | namespace: traefik 52 | services: 53 | - name: alertmanager-operated 54 | namespace: monitoring 55 | port: 9093 56 | tls: 57 | secretName: "${SECRET_DOMAIN/./-}-tls" 58 | options: 59 | name: default-tlsoption 60 | namespace: traefik 61 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/kube-prometheus-stack/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - kube-prometheus-stack-helmrelease.yaml 7 | - kube-prometheus-stack-ingressroute.yaml 8 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - namespace.yaml 6 | - kube-prometheus-stack 7 | # - prometheus 8 | # - prometheus-exporter 9 | # - influxdata 10 | - grafana 11 | # - tools 12 | - uptime-kuma 13 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: monitoring 6 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/tools/fritzinfluxdb/fritzinfluxdb-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: fritzinfluxdb-deployment 6 | namespace: monitoring 7 | labels: 8 | app: fritzinfluxdb 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app: fritzinfluxdb 14 | strategy: 15 | rollingUpdate: 16 | maxSurge: 1 17 | maxUnavailable: 0 18 | type: RollingUpdate 19 | template: 20 | metadata: 21 | labels: 22 | app: fritzinfluxdb 23 | annotations: 24 | configmap.reloader.stakater.com/reload: "fritzinfluxdb-ini" 25 | spec: 26 | nodeSelector: 27 | node-type: worker 28 | containers: 29 | - name: fritzinfluxdb-container 30 | image: banafo/fritzinfluxdb:1.5.0 31 | # image: volschin/fritzinfluxdb:latest 32 | imagePullPolicy: IfNotPresent 33 | volumeMounts: 34 | - name: fritzinfluxdb-ini 35 | mountPath: /app/./fritzinfluxdb.ini 36 | subPath: fritzinfluxdb.ini 37 | restartPolicy: Always 38 | volumes: 39 | - name: fritzinfluxdb-ini 40 | configMap: 41 | name: fritzinfluxdb-ini 42 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/tools/fritzinfluxdb/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | # - fritzinfluxdb-deployment.yaml 7 | 8 | configMapGenerator: 9 | - name: fritzinfluxdb-ini 10 | namespace: monitoring 11 | files: 12 | - fritzinfluxdb.ini 13 | generatorOptions: 14 | disableNameSuffixHash: true 15 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/tools/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - speedtest-tracker 7 | - fritzinfluxdb 8 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/tools/prometheus-exporter/fastcom-exporter/fastcom-exporter-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: fastcom-exporter-service 6 | namespace: monitoring 7 | labels: 8 | app: fastcom-exporter 9 | spec: 10 | selector: 11 | app: fastcom-exporter 12 | type: ClusterIP 13 | ports: 14 | - name: http-exporter 15 | port: 9877 16 | targetPort: http-exporter 17 | protocol: TCP 18 | 19 | --- 20 | apiVersion: apps/v1 21 | kind: Deployment 22 | metadata: 23 | name: fastcom-exporter-deployment 24 | namespace: monitoring 25 | labels: 26 | app: fastcom-exporter 27 | spec: 28 | replicas: 1 29 | selector: 30 | matchLabels: 31 | app: fastcom-exporter 32 | template: 33 | metadata: 34 | labels: 35 | app: fastcom-exporter 36 | spec: 37 | nodeSelector: 38 | node-type: worker 39 | containers: 40 | - name: fastcom-exporter-container 41 | image: caarlos0/fastcom-exporter:v1.3.3-arm64 42 | ports: 43 | - name: http-exporter 44 | containerPort: 9877 45 | protocol: TCP 46 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/tools/prometheus-exporter/fastcom-exporter/fastcom-exporter-ingressroute.yaml: -------------------------------------------------------------------------------- 1 | # fastcom-exporter-metrics-local-ingressroute 2 | 3 | --- 4 | apiVersion: traefik.containo.us/v1alpha1 5 | kind: IngressRoute 6 | metadata: 7 | name: fastcom-exporter-metrics-local-ingressroute 8 | namespace: monitoring 9 | spec: 10 | entryPoints: 11 | - web 12 | routes: 13 | - match: Host(`fast.${SECRET_DOMAIN_LOCAL_02}`) && PathPrefix(`/metrics`) 14 | kind: Rule 15 | services: 16 | - name: fastcom-exporter-service 17 | port: 9877 18 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/tools/prometheus-exporter/fastcom-exporter/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - fastcom-exporter-deployment.yaml 7 | - fastcom-exporter-ingressroute.yaml 8 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/tools/prometheus-exporter/fritzbox-exporter/fritzbox-exporter-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: fritzbox-exporter 6 | namespace: monitoring 7 | labels: 8 | app: fritzbox-exporter 9 | spec: 10 | selector: 11 | app: fritzbox-exporter 12 | type: ClusterIP 13 | ports: 14 | - name: http-exporter 15 | port: 9042 16 | protocol: TCP 17 | targetPort: http-exporter 18 | 19 | --- 20 | apiVersion: apps/v1 21 | kind: Deployment 22 | metadata: 23 | name: fritzbox-exporter 24 | namespace: monitoring 25 | labels: 26 | app: fritzbox-exporter 27 | spec: 28 | replicas: 1 29 | selector: 30 | matchLabels: 31 | app: fritzbox-exporter 32 | template: 33 | metadata: 34 | labels: 35 | app: fritzbox-exporter 36 | spec: 37 | nodeSelector: 38 | node-type: worker 39 | containers: 40 | - name: fritzbox-exporter 41 | image: alexxanddr/fritzbox-exporter:latest 42 | env: 43 | - name: GWURL 44 | value: 'http://192.168.178.1:49000' 45 | - name: USERNAME 46 | value: ${SECRET_FRITZBOX_USERNAME} 47 | - name: PASSWORD 48 | value: ${SECRET_FRITZBOX_PASSWORD} 49 | ports: 50 | - name: http-exporter 51 | containerPort: 9042 52 | protocol: TCP 53 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/tools/prometheus-exporter/fritzbox-exporter/fritzbox-exporter-ingressroute.yaml: -------------------------------------------------------------------------------- 1 | # speedtest-metrics-local-ingressroute 2 | 3 | --- 4 | apiVersion: traefik.containo.us/v1alpha1 5 | kind: IngressRoute 6 | metadata: 7 | name: fritzbox-metrics-local-ingressroute 8 | namespace: monitoring 9 | spec: 10 | entryPoints: 11 | - web 12 | routes: 13 | - match: Host(`fritzbox-metrics.${SECRET_DOMAIN_LOCAL_02}`) && PathPrefix(`/metrics`) 14 | kind: Rule 15 | services: 16 | - name: fritzbox-exporter 17 | port: 9042 18 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/tools/prometheus-exporter/fritzbox-exporter/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - fritzbox-exporter-deployment.yaml 6 | - fritzbox-exporter-ingressroute.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/tools/prometheus-exporter/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | # - speedtest-exporter 6 | # - fastcom-exporter 7 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/tools/prometheus-exporter/pihole-exporter/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - enc_pihole-exporter-secrets.yaml 6 | - pihole-exporter-deployment.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/tools/prometheus-exporter/pihole-exporter/pihole-exporter-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: pihole-exporter 6 | namespace: dns 7 | labels: 8 | app: pihole-exporter 9 | spec: 10 | selector: 11 | app: pihole-exporter 12 | type: ClusterIP 13 | ports: 14 | - name: http-exporter 15 | port: 9617 16 | protocol: TCP 17 | targetPort: http-exporter 18 | 19 | --- 20 | apiVersion: apps/v1 21 | kind: Deployment 22 | metadata: 23 | name: pihole-exporter 24 | namespace: dns 25 | labels: 26 | app: pihole-exporter 27 | spec: 28 | replicas: 1 29 | selector: 30 | matchLabels: 31 | app: pihole-exporter 32 | template: 33 | metadata: 34 | labels: 35 | app: pihole-exporter 36 | spec: 37 | nodeSelector: 38 | node-type: worker 39 | containers: 40 | - name: pihole-exporter 41 | image: ekofr/pihole-exporter:v0.0.11 42 | env: 43 | - name: INTERVAL 44 | value: "10s" 45 | - name: PIHOLE_HOSTNAME 46 | value: "pihole-tcp.dns.svc.cluster.local" 47 | # value: ${PIHOLE_SVC_LB_IP} 48 | envFrom: 49 | - secretRef: 50 | name: pihole-exporter-secrets 51 | ports: 52 | - name: http-exporter 53 | containerPort: 9617 54 | protocol: TCP 55 | livenessProbe: 56 | httpGet: 57 | path: /liveness 58 | port: http-exporter 59 | readinessProbe: 60 | httpGet: 61 | path: /readiness 62 | port: http-exporter 63 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/tools/prometheus-exporter/speedtest-exporter/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - speedtest-exporter-deployment.yaml 6 | - speedtest-exporter-ingressroute.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/tools/prometheus-exporter/speedtest-exporter/speedtest-exporter-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: speedtest-exporter-service 6 | namespace: monitoring 7 | labels: 8 | app: speedtest-exporter 9 | spec: 10 | selector: 11 | app: speedtest-exporter 12 | type: ClusterIP 13 | ports: 14 | - name: http-exporter 15 | port: 9090 16 | targetPort: http-exporter 17 | protocol: TCP 18 | 19 | --- 20 | apiVersion: apps/v1 21 | kind: Deployment 22 | metadata: 23 | name: speedtest-exporter-deployment 24 | namespace: monitoring 25 | labels: 26 | app: speedtest-exporter 27 | spec: 28 | replicas: 1 29 | selector: 30 | matchLabels: 31 | app: speedtest-exporter 32 | template: 33 | metadata: 34 | labels: 35 | app: speedtest-exporter 36 | spec: 37 | nodeSelector: 38 | node-type: worker 39 | containers: 40 | - name: speedtest-exporter-container 41 | image: danopstech/speedtest_exporter:latest 42 | command: ["/speedtest_exporter", "-server_id", "27345", "-server_fallback"] 43 | # env: 44 | # - name: SERVER_ID 45 | # value: "27345" 46 | ports: 47 | - name: http-exporter 48 | containerPort: 9090 49 | protocol: TCP 50 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/tools/prometheus-exporter/speedtest-exporter/speedtest-exporter-ingressroute.yaml: -------------------------------------------------------------------------------- 1 | # speedtest-exporter-metrics-local-ingressroute 2 | 3 | --- 4 | apiVersion: traefik.containo.us/v1alpha1 5 | kind: IngressRoute 6 | metadata: 7 | name: speedtest-exporter-metrics-local-ingressroute 8 | namespace: monitoring 9 | spec: 10 | entryPoints: 11 | - web 12 | routes: 13 | - match: Host(`speedtest.${SECRET_DOMAIN_LOCAL_02}`) && PathPrefix(`/metrics`) 14 | kind: Rule 15 | services: 16 | - name: speedtest-exporter-service 17 | port: 9090 18 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/tools/speedtest-tracker/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - speedtest-tracker-configmap.yaml 7 | - speedtest-tracker-pvc.yaml 8 | # - speedtest-tracker-deployment.yaml 9 | - speedtest-tracker-ingressroute.yaml 10 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/tools/speedtest-tracker/speedtest-tracker-configmap.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: speedtest-tracker-env-vars 6 | namespace: monitoring 7 | data: 8 | TZ: ${TIMEZONE} 9 | OOKLA_EULA_GDPR: "true" 10 | SLACK_WEBHOOK: ${SECRET_SPEEDTEST_TRACKER_SLACK_WEBHOOK} 11 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/tools/speedtest-tracker/speedtest-tracker-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: speedtest-tracker-service 6 | namespace: monitoring 7 | labels: 8 | app: speedtest-tracker 9 | spec: 10 | selector: 11 | app: speedtest-tracker 12 | type: ClusterIP 13 | ports: 14 | - name: http-web 15 | port: 80 16 | targetPort: 80 17 | protocol: TCP 18 | 19 | --- 20 | apiVersion: apps/v1 21 | kind: Deployment 22 | metadata: 23 | name: speedtest-tracker-deployment 24 | namespace: monitoring 25 | labels: 26 | app: speedtest-tracker 27 | spec: 28 | replicas: 1 29 | selector: 30 | matchLabels: 31 | app: speedtest-tracker 32 | strategy: 33 | type: Recreate 34 | # rollingUpdate: 35 | # maxSurge: 1 36 | # maxUnavailable: 0 37 | # type: RollingUpdate 38 | template: 39 | metadata: 40 | labels: 41 | app: speedtest-tracker 42 | spec: 43 | nodeSelector: 44 | node-type: worker 45 | containers: 46 | - name: speedtest-tracker-container 47 | image: henrywhitaker3/speedtest-tracker:latest-arm 48 | imagePullPolicy: IfNotPresent 49 | envFrom: 50 | - configMapRef: 51 | name: speedtest-tracker-env-vars 52 | # env: 53 | # - name: TZ 54 | # value: ${TIMEZONE} 55 | # - name: OOKLA_EULA_GDPR 56 | # value: "true" 57 | # - name: SLACK_WEBHOOK 58 | # value: ${SECRET_SPEEDTEST_TRACKER_SLACK_WEBHOOK} 59 | ports: 60 | - name: http-web 61 | containerPort: 80 62 | protocol: TCP 63 | volumeMounts: 64 | - name: config 65 | mountPath: /config 66 | restartPolicy: Always 67 | volumes: 68 | - name: config 69 | persistentVolumeClaim: 70 | claimName: speedtest-tracker-config-pvc 71 | # emptyDir: 72 | # medium: Memory 73 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/tools/speedtest-tracker/speedtest-tracker-ingressroute.yaml: -------------------------------------------------------------------------------- 1 | # speedtest-tracker-local-ingressroute 2 | # speedtest-tracker-http-ingressroute 3 | # speedtest-tracker-https-ingressroute 4 | 5 | --- 6 | apiVersion: traefik.containo.us/v1alpha1 7 | kind: IngressRoute 8 | metadata: 9 | name: speedtest-tracker-local-ingressroute 10 | namespace: monitoring 11 | spec: 12 | entryPoints: 13 | - web 14 | routes: 15 | - match: Host(`speedtest-tracker.${SECRET_DOMAIN_LOCAL_02}`) 16 | kind: Rule 17 | services: 18 | - name: speedtest-tracker-service 19 | port: 80 20 | 21 | --- 22 | apiVersion: traefik.containo.us/v1alpha1 23 | kind: IngressRoute 24 | metadata: 25 | name: speedtest-tracker-http-ingressroute 26 | namespace: monitoring 27 | spec: 28 | entryPoints: 29 | - web 30 | routes: 31 | - match: Host(`speedtest-tracker.${SECRET_DIGITALOCEAN_DOMAIN_02}`) 32 | kind: Rule 33 | middlewares: 34 | - name: https-redirect-scheme-middleware 35 | namespace: traefik 36 | services: 37 | - name: speedtest-tracker-service 38 | port: 80 39 | 40 | --- 41 | apiVersion: traefik.containo.us/v1alpha1 42 | kind: IngressRoute 43 | metadata: 44 | name: speedtest-tracker-https-ingressroute 45 | namespace: traefik 46 | annotations: 47 | cert-manager.io/cluster-issuer: "letsencrypt-dns01-production-do" 48 | spec: 49 | entryPoints: 50 | - websecure 51 | routes: 52 | - match: Host(`speedtest-tracker.${SECRET_DIGITALOCEAN_DOMAIN_02}`) 53 | kind: Rule 54 | middlewares: 55 | - name: authelia-middleware 56 | namespace: authentication 57 | - name: secure-headers-middleware 58 | namespace: traefik 59 | services: 60 | - name: speedtest-tracker-service 61 | namespace: monitoring 62 | port: 80 63 | tls: 64 | secretName: "${SECRET_DOMAIN/./-}-tls" 65 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/tools/speedtest-tracker/speedtest-tracker-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: speedtest-tracker-config-pvc 6 | namespace: monitoring 7 | spec: 8 | storageClassName: longhorn-worker-node-storageclass 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 1Gi 14 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/uptime-kuma/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - uptime-kuma-service.yaml 6 | - uptime-kuma-pvc.yaml 7 | - uptime-kuma-deployment.yaml 8 | - uptime-kuma-ingressroute.yaml 9 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/uptime-kuma/uptime-kuma-deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | labels: 5 | app: uptime-kuma 6 | environment: production 7 | name: uptime-kuma 8 | namespace: monitoring 9 | spec: 10 | replicas: 1 11 | revisionHistoryLimit: 1 12 | selector: 13 | matchLabels: 14 | app: uptime-kuma 15 | environment: production 16 | strategy: 17 | rollingUpdate: 18 | maxSurge: 1 19 | maxUnavailable: 0 20 | type: RollingUpdate 21 | template: 22 | metadata: 23 | labels: 24 | app: uptime-kuma 25 | environment: production 26 | name: uptime-kuma 27 | spec: 28 | containers: 29 | - name: uptime-kuma 30 | image: louislam/uptime-kuma:1.19.6 31 | imagePullPolicy: IfNotPresent 32 | env: 33 | - name: UPTIME_KUMA_PORT 34 | value: "3001" 35 | - name: PORT 36 | value: "3001" 37 | ports: 38 | - containerPort: 3001 39 | name: http 40 | protocol: TCP 41 | resources: 42 | limits: 43 | cpu: 200m 44 | memory: 512Mi 45 | requests: 46 | cpu: 100m 47 | memory: 256Mi 48 | # livenessProbe: 49 | # failureThreshold: 3 50 | # exec: 51 | # command: 52 | # - node 53 | # - extra/healthcheck.js 54 | # initialDelaySeconds: 15 55 | # periodSeconds: 10 56 | # successThreshold: 1 57 | # timeoutSeconds: 5 58 | # readinessProbe: 59 | # failureThreshold: 3 60 | # httpGet: 61 | # path: / 62 | # port: 3001 63 | # scheme: HTTP 64 | # initialDelaySeconds: 15 65 | # periodSeconds: 10 66 | # successThreshold: 1 67 | # timeoutSeconds: 5 68 | securityContext: 69 | allowPrivilegeEscalation: true 70 | privileged: false 71 | volumeMounts: 72 | - mountPath: /app/data 73 | name: uptime-kuma-data 74 | nodeSelector: 75 | node-type: worker 76 | restartPolicy: Always 77 | volumes: 78 | - name: uptime-kuma-data 79 | persistentVolumeClaim: 80 | claimName: uptime-kuma-data-pvc 81 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/uptime-kuma/uptime-kuma-ingressroute.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: traefik.containo.us/v1alpha1 2 | kind: IngressRoute 3 | metadata: 4 | name: uptime-kuma-http-ingressroute 5 | namespace: monitoring 6 | spec: 7 | entryPoints: 8 | - web 9 | routes: 10 | - match: Host(`uptime.${SECRET_DOMAIN}`) 11 | kind: Rule 12 | middlewares: 13 | - name: https-redirect-scheme-middleware 14 | namespace: traefik 15 | services: 16 | - name: uptime-kuma-service 17 | port: 3001 18 | 19 | --- 20 | apiVersion: traefik.containo.us/v1alpha1 21 | kind: IngressRoute 22 | metadata: 23 | name: uptime-kuma-https-ingressroute 24 | namespace: traefik 25 | annotations: 26 | cert-manager.io/cluster-issuer: "letsencrypt-dns01-production-do" 27 | spec: 28 | entryPoints: 29 | - websecure 30 | routes: 31 | - match: Host(`uptime.${SECRET_DOMAIN}`) 32 | kind: Rule 33 | middlewares: 34 | - name: secure-headers-middleware 35 | namespace: traefik 36 | services: 37 | - name: uptime-kuma-service 38 | namespace: monitoring 39 | port: 3001 40 | tls: 41 | secretName: "${SECRET_DOMAIN/./-}-tls" 42 | options: 43 | name: default-tlsoption 44 | namespace: traefik 45 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/uptime-kuma/uptime-kuma-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: uptime-kuma-data-pvc 5 | namespace: monitoring 6 | spec: 7 | storageClassName: longhorn-worker-node-storageclass 8 | accessModes: 9 | - ReadWriteOnce 10 | resources: 11 | requests: 12 | storage: 1Gi 13 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/uptime-kuma/uptime-kuma-service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: uptime-kuma-service 5 | namespace: monitoring 6 | labels: 7 | app: uptime-kuma 8 | environment: production 9 | spec: 10 | selector: 11 | app: uptime-kuma 12 | environment: production 13 | ports: 14 | - name: http 15 | port: 3001 16 | protocol: TCP 17 | targetPort: http 18 | type: ClusterIP 19 | -------------------------------------------------------------------------------- /cluster/apps/nextcloud/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - namespace.yaml 6 | # - postgres 7 | # - redis 8 | # - nextcloud-pvcs.yaml 9 | # - nextcloud-helmrelease.yaml 10 | # - nextcloud-ingressroute.yaml 11 | # - nextcloud-middleware.yaml 12 | -------------------------------------------------------------------------------- /cluster/apps/nextcloud/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: nextcloud 6 | -------------------------------------------------------------------------------- /cluster/apps/nextcloud/nextcloud-ingressroute.yaml: -------------------------------------------------------------------------------- 1 | # nextcloud-local-ingressroute 2 | # nextcloud-http-ingressroute 3 | # nextcloud-https-ingressroute 4 | 5 | --- 6 | apiVersion: traefik.containo.us/v1alpha1 7 | kind: IngressRoute 8 | metadata: 9 | name: nextcloud-local-ingressroute 10 | namespace: nextcloud 11 | spec: 12 | entryPoints: 13 | - web 14 | routes: 15 | - match: Host(`cloud.${SECRET_DOMAIN_LOCAL_02}`) 16 | kind: Rule 17 | services: 18 | - name: nextcloud-nextcloud 19 | port: 80 20 | 21 | --- 22 | apiVersion: traefik.containo.us/v1alpha1 23 | kind: IngressRoute 24 | metadata: 25 | name: nextcloud-http-ingressroute 26 | namespace: nextcloud 27 | spec: 28 | entryPoints: 29 | - web 30 | routes: 31 | - match: Host(`cloud.${SECRET_DIGITALOCEAN_DOMAIN_02}`) 32 | kind: Rule 33 | middlewares: 34 | - name: https-redirect-scheme-middleware 35 | namespace: traefik 36 | services: 37 | - name: nextcloud-nextcloud 38 | port: 80 39 | 40 | --- 41 | apiVersion: traefik.containo.us/v1alpha1 42 | kind: IngressRoute 43 | metadata: 44 | name: nextcloud-https-ingressroute 45 | namespace: traefik 46 | annotations: 47 | cert-manager.io/cluster-issuer: "letsencrypt-dns01-production-do" 48 | spec: 49 | entryPoints: 50 | - websecure 51 | routes: 52 | - match: Host(`cloud.${SECRET_DIGITALOCEAN_DOMAIN_02}`) 53 | kind: Rule 54 | middlewares: 55 | - name: nextcloud-middleware-regex 56 | namespace: nextcloud 57 | - name: secure-headers-middleware 58 | namespace: traefik 59 | services: 60 | - name: nextcloud-nextcloud 61 | namespace: nextcloud 62 | port: 80 63 | tls: 64 | secretName: "${SECRET_DOMAIN/./-}-tls" 65 | -------------------------------------------------------------------------------- /cluster/apps/nextcloud/nextcloud-middleware.yaml: -------------------------------------------------------------------------------- 1 | # nextcloud-middleware-regex 2 | # nextcloud-middleware-headers 3 | 4 | --- 5 | apiVersion: traefik.containo.us/v1alpha1 6 | kind: Middleware 7 | metadata: 8 | name: nextcloud-middleware-regex 9 | namespace: nextcloud 10 | spec: 11 | redirectRegex: 12 | permanent: true 13 | regex: "https://(.*)/.well-known/(card|cal)dav" 14 | replacement: "https://$1/remote.php/dav/" 15 | 16 | # --- 17 | # apiVersion: traefik.containo.us/v1alpha1 18 | # kind: Middleware 19 | # metadata: 20 | # name: nextcloud-middleware-headers 21 | # namespace: nextcloud 22 | # spec: 23 | # headers: 24 | # browserXssFilter: true 25 | # contentTypeNosniff: true 26 | # frameDeny: true 27 | # sslRedirect: true 28 | # forceSTSHeader: true 29 | # stsIncludeSubdomains: true 30 | # stsPreload: true 31 | # stsSeconds: 63072000 32 | # #customRequestHeaders: 33 | # #X-Frame-Options: "SAMEORIGIN" 34 | # #customFrameOptionsValue: "SAMEORIGIN" 35 | # #contentSecurityPolicy: "frame-ancestors 'self' nextcloud.${SECRET_DIGITALOCEAN_DOMAIN_02}" 36 | # #contentSecurityPolicy: "default-src 'self'; script-src https://nextcloud.${SECRET_DIGITALOCEAN_DOMAIN_02}" 37 | # contentSecurityPolicy: | 38 | # default-src 'none';form-action 'none';frame-ancestors 'none';base-uri 'none' 39 | # accessControlAllowMethods: 40 | # - "GET" 41 | # - "POST" 42 | # accessControlAllowOriginList: 43 | # #- "https://*.${SECRET_DIGITALOCEAN_DOMAIN_02}" 44 | # - "https://nextcloud.${SECRET_DIGITALOCEAN_DOMAIN_02}" 45 | # accessControlMaxAge: 100 46 | # addVaryHeader: true 47 | # referrerPolicy: "same-origin" 48 | -------------------------------------------------------------------------------- /cluster/apps/nextcloud/nextcloud-pvcs.yaml: -------------------------------------------------------------------------------- 1 | # --- 2 | # apiVersion: v1 3 | # kind: PersistentVolumeClaim 4 | # metadata: 5 | # name: nextcloud-data-pvc 6 | # namespace: nextcloud 7 | # annotations: 8 | # nfs.io/storage-path: "/data" 9 | # spec: 10 | # storageClassName: storage-nfs-provisioner 11 | # accessModes: 12 | # - ReadWriteOnce 13 | # resources: 14 | # requests: 15 | # storage: 1Gi 16 | 17 | # --- 18 | # apiVersion: v1 19 | # kind: PersistentVolumeClaim 20 | # metadata: 21 | # name: nextcloud-user-data-pvc 22 | # namespace: nextcloud 23 | # annotations: 24 | # nfs.io/storage-path: "/user" 25 | # spec: 26 | # storageClassName: storage-nfs-provisioner 27 | # accessModes: 28 | # - ReadWriteOnce 29 | # resources: 30 | # requests: 31 | # storage: 6Ti 32 | 33 | --- 34 | apiVersion: v1 35 | kind: PersistentVolumeClaim 36 | metadata: 37 | name: nextcloud-data-pvc 38 | namespace: nextcloud 39 | annotations: 40 | nfs.io/storage-path: "/test" 41 | spec: 42 | storageClassName: nfs-provisioner 43 | accessModes: 44 | - ReadWriteOnce 45 | resources: 46 | requests: 47 | storage: 1Gi 48 | -------------------------------------------------------------------------------- /cluster/apps/nextcloud/postgres/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - postgres-pvc.yaml 6 | # - postgres-helmrelease.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/nextcloud/postgres/postgres-helmrelease.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 3 | kind: HelmRelease 4 | metadata: 5 | name: nextcloud-postgres 6 | namespace: flux-system 7 | spec: 8 | chart: 9 | spec: 10 | chart: postgres 11 | sourceRef: 12 | kind: HelmRepository 13 | name: groundhog2k 14 | version: 0.3.3 15 | interval: 1m0s 16 | targetNamespace: nextcloud 17 | values: 18 | affinity: {} 19 | args: [] 20 | customConfig: "" 21 | customLivenessProbe: {} 22 | customReadinessProbe: {} 23 | customStartupProbe: {} 24 | env: 25 | - name: PGTZ 26 | value: ${TIMEZONE} 27 | - name: TZ 28 | value: ${TIMEZONE} 29 | fullnameOverride: "" 30 | image: 31 | pullPolicy: IfNotPresent 32 | repository: postgres 33 | tag: "" 34 | imagePullSecrets: [] 35 | livenessProbe: 36 | enabled: true 37 | failureThreshold: 3 38 | initialDelaySeconds: 10 39 | periodSeconds: 10 40 | successThreshold: 1 41 | timeoutSeconds: 5 42 | nameOverride: "" 43 | nodeSelector: 44 | node-type: worker 45 | podAnnotations: {} 46 | podManagementPolicy: OrderedReady 47 | podSecurityContext: 48 | fsGroup: 999 49 | readinessProbe: 50 | enabled: true 51 | failureThreshold: 3 52 | initialDelaySeconds: 10 53 | periodSeconds: 10 54 | successThreshold: 1 55 | timeoutSeconds: 5 56 | resources: {} 57 | revisionHistoryLimit: null 58 | securityContext: 59 | allowPrivilegeEscalation: false 60 | privileged: false 61 | readOnlyRootFilesystem: true 62 | runAsGroup: 999 63 | runAsNonRoot: true 64 | runAsUser: 999 65 | service: 66 | annotations: {} 67 | clusterIP: null 68 | loadBalancerIP: null 69 | nodePort: null 70 | port: 5432 71 | type: ClusterIP 72 | serviceAccount: 73 | annotations: {} 74 | create: false 75 | name: "" 76 | settings: 77 | authMethod: md5 78 | initDbArgs: null 79 | superuserPassword: ${SECRET_GLOBAL_POSTGRES_SUPERUSER_PASSWORD} 80 | startupProbe: 81 | enabled: true 82 | failureThreshold: 30 83 | initialDelaySeconds: 10 84 | periodSeconds: 10 85 | successThreshold: 1 86 | timeoutSeconds: 5 87 | storage: 88 | accessModes: 89 | - ReadWriteOnce 90 | className: null 91 | persistentVolumeClaimName: nextcloud-postgres-pvc 92 | requestedSize: null 93 | tolerations: [] 94 | updateStrategyType: RollingUpdate 95 | userDatabase: 96 | name: ${SECRET_NEXTCLOUD_DB_NAME} 97 | password: ${SECRET_NEXTCLOUD_DB_PASSWORD} 98 | user: ${SECRET_NEXTCLOUD_DB_USER} 99 | 100 | -------------------------------------------------------------------------------- /cluster/apps/nextcloud/postgres/postgres-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: nextcloud-postgres-pvc 6 | namespace: nextcloud 7 | spec: 8 | storageClassName: longhorn-worker-node-storageclass 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 5Gi 14 | -------------------------------------------------------------------------------- /cluster/apps/nextcloud/redis/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - redis-pvc.yaml 6 | # - redis-helmrelease.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/nextcloud/redis/redis-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: nextcloud-redis-pvc 6 | namespace: nextcloud 7 | spec: 8 | storageClassName: longhorn-worker-node-storageclass 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 2Gi 14 | -------------------------------------------------------------------------------- /cluster/apps/utils/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - namespace.yaml 7 | # - network-multitool 8 | - whoami 9 | -------------------------------------------------------------------------------- /cluster/apps/utils/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: utils 6 | -------------------------------------------------------------------------------- /cluster/apps/utils/network-multitool/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - network-multitool.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/utils/network-multitool/network-multitool.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: DaemonSet 4 | metadata: 5 | name: network-multitool 6 | namespace: utils 7 | labels: 8 | # tier: node 9 | app: network-multitool 10 | spec: 11 | selector: 12 | matchLabels: 13 | # tier: node 14 | app: network-multitool 15 | template: 16 | metadata: 17 | labels: 18 | tier: node 19 | app: network-multitool 20 | spec: 21 | nodeSelector: 22 | node-type: worker 23 | hostNetwork: true 24 | tolerations: 25 | - operator: Exists 26 | effect: NoSchedule 27 | containers: 28 | - name: network-multitool 29 | image: praqma/network-multitool 30 | env: 31 | - name: HTTP_PORT 32 | value: "1180" 33 | - name: HTTPS_PORT 34 | value: "11443" 35 | ports: 36 | - containerPort: 1180 37 | name: http-port 38 | - containerPort: 11443 39 | name: https-port 40 | resources: 41 | requests: 42 | cpu: "1m" 43 | memory: "20Mi" 44 | limits: 45 | cpu: "10m" 46 | memory: "20Mi" 47 | securityContext: 48 | runAsUser: 0 49 | capabilities: 50 | add: ["NET_ADMIN"] 51 | -------------------------------------------------------------------------------- /cluster/apps/utils/whoami/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - whoami-deployment.yaml 7 | - whoami-ingressroute.yaml 8 | -------------------------------------------------------------------------------- /cluster/apps/utils/whoami/whoami-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Service 3 | apiVersion: v1 4 | metadata: 5 | name: whoami 6 | namespace: utils 7 | spec: 8 | selector: 9 | app: whoami 10 | ports: 11 | - name: http 12 | port: 80 13 | 14 | --- 15 | kind: Deployment 16 | apiVersion: apps/v1 17 | metadata: 18 | name: whoami 19 | namespace: utils 20 | labels: 21 | app: whoami 22 | spec: 23 | replicas: 2 24 | selector: 25 | matchLabels: 26 | app: whoami 27 | template: 28 | metadata: 29 | labels: 30 | app: whoami 31 | spec: 32 | nodeSelector: 33 | node-type: worker 34 | containers: 35 | - name: whoami 36 | image: traefik/whoami 37 | env: 38 | - name: WHOAMI_NAME 39 | value: "just-a-test-name" 40 | ports: 41 | - name: web 42 | containerPort: 80 43 | -------------------------------------------------------------------------------- /cluster/apps/utils/whoami/whoami-ingressroute.yaml: -------------------------------------------------------------------------------- 1 | # whoami-local-ingressroute 2 | # whoami-http-ingressroute 3 | # whoami-https-ingressroute 4 | 5 | # --- 6 | # apiVersion: traefik.containo.us/v1alpha1 7 | # kind: IngressRoute 8 | # metadata: 9 | # name: whoami-local-ingressroute 10 | # namespace: utils 11 | # spec: 12 | # entryPoints: 13 | # - web 14 | # routes: 15 | # - match: Host(`whoami.${SECRET_DOMAIN_LOCAL}`) 16 | # kind: Rule 17 | # services: 18 | # - name: whoami 19 | # port: 80 20 | 21 | --- 22 | apiVersion: traefik.containo.us/v1alpha1 23 | kind: IngressRoute 24 | metadata: 25 | name: whoami-http-ingressroute 26 | namespace: utils 27 | spec: 28 | entryPoints: 29 | - web 30 | routes: 31 | - match: Host(`whoami.${SECRET_DOMAIN}`) 32 | kind: Rule 33 | middlewares: 34 | - name: https-redirect-scheme-middleware 35 | namespace: traefik 36 | services: 37 | - name: whoami 38 | port: 80 39 | 40 | --- 41 | apiVersion: traefik.containo.us/v1alpha1 42 | kind: IngressRoute 43 | metadata: 44 | name: whoami-https-ingressroute 45 | namespace: traefik 46 | annotations: 47 | # cert-manager.io/cluster-issuer: "letsencrypt-dns01-staging-do" 48 | cert-manager.io/cluster-issuer: "letsencrypt-dns01-production-do" 49 | spec: 50 | entryPoints: 51 | - websecure 52 | routes: 53 | - match: Host(`whoami.${SECRET_DOMAIN}`) 54 | kind: Rule 55 | middlewares: 56 | - name: authelia-middleware 57 | namespace: authentication 58 | - name: secure-headers-middleware 59 | namespace: traefik 60 | services: 61 | - name: whoami 62 | namespace: utils 63 | port: 80 64 | tls: 65 | secretName: "${SECRET_DOMAIN/./-}-tls" 66 | options: 67 | name: default-tlsoption 68 | namespace: traefik 69 | -------------------------------------------------------------------------------- /cluster/apps/vaultwarden/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - namespace.yaml 7 | - postgres 8 | - vaultwarden-config 9 | - vaultwarden-pvc.yaml 10 | # - vaultwarden-rbac.yaml 11 | # - vaultwarden-service.yaml 12 | # - vaultwarden-deployment.yaml 13 | # - vaultwarden-ingressroute.yaml 14 | # - vaultwarden-middleware.yaml 15 | -------------------------------------------------------------------------------- /cluster/apps/vaultwarden/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: vaultwarden 6 | -------------------------------------------------------------------------------- /cluster/apps/vaultwarden/postgres/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - postgres-pvc.yaml 7 | # - postgres-helmrelease.yaml 8 | -------------------------------------------------------------------------------- /cluster/apps/vaultwarden/postgres/postgres-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: vaultwarden-postgres-pvc 6 | namespace: vaultwarden 7 | spec: 8 | accessModes: 9 | - ReadWriteOnce 10 | storageClassName: longhorn-worker-node-storageclass 11 | resources: 12 | requests: 13 | storage: 5Gi 14 | -------------------------------------------------------------------------------- /cluster/apps/vaultwarden/vaultwarden-config/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | configMapGenerator: 6 | - name: vaultwarden-config 7 | namespace: vaultwarden 8 | envs: 9 | - vaultwarden.env 10 | 11 | generatorOptions: 12 | disableNameSuffixHash: true 13 | labels: 14 | app: vaultwarden 15 | -------------------------------------------------------------------------------- /cluster/apps/vaultwarden/vaultwarden-config/vaultwarden-configmap_old.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: vaultwarden-config-test 6 | namespace: vaultwarden 7 | labels: 8 | app: vaultwarden 9 | data: 10 | DATABASE_URL: postgresql://${SECRET_VAULTWARDEN_DB_USER}:${SECRET_VAULTWARDEN_DB_PASSWORD}@vaultwarden-vaultwarden-postgres:5432/${SECRET_VAULTWARDEN_DB_NAME} 11 | # openssl rand -base64 48 12 | ADMIN_TOKEN: ${SECRET_VAULTWARDEN_ADMIN_TOKEN} 13 | SMTP_HOST: ${SECRET_GLOBAL_SMTP_HOST} 14 | SMTP_FROM: ${SECRET_GLOBAL_SMTP_FROM} 15 | SMTP_USERNAME: ${SECRET_GLOBAL_SMTP_USERNAME} 16 | SMTP_PASSWORD: ${SECRET_GLOBAL_SMTP_PASSWORD} 17 | SMTP_PORT: "587" 18 | SMTP_SSL: "true" 19 | TZ: ${TIMEZONE} 20 | # nginx-ingress-controller has built in support for Websockets 21 | # Project: https://github.com/kubernetes/ingress-nginx 22 | WEBSOCKET_ENABLED: "true" 23 | DATA_FOLDER: "/data" 24 | DOMAIN: "https://vault.${SECRET_DIGITALOCEAN_DOMAIN_02}" 25 | ROCKET_WORKERS: "5" 26 | SHOW_PASSWORD_HINT: "false" 27 | WEB_VAULT_ENABLED: "true" 28 | ROCKET_PORT: "8080" 29 | 30 | # Bitwarden RS settings 31 | SIGNUPS_ALLOWED: "true" 32 | LOG_FILE: "/data/bitwarden.log" 33 | -------------------------------------------------------------------------------- /cluster/apps/vaultwarden/vaultwarden-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: vaultwarden 6 | namespace: vaultwarden 7 | labels: 8 | app: vaultwarden 9 | spec: 10 | replicas: 1 11 | strategy: 12 | # rollingUpdate: 13 | # maxSurge: 1 14 | # maxUnavailable: 0 15 | # type: RollingUpdate 16 | type: Recreate 17 | selector: 18 | matchLabels: 19 | app: vaultwarden 20 | template: 21 | metadata: 22 | labels: 23 | app: vaultwarden 24 | name: vaultwarden 25 | annotations: 26 | configmap.reloader.stakater.com/reload: "vaultwarden-config" 27 | spec: 28 | nodeSelector: 29 | node-type: worker 30 | serviceAccountName: vaultwarden 31 | containers: 32 | - name: vaultwarden 33 | image: vaultwarden/server:1.25.0 34 | imagePullPolicy: IfNotPresent 35 | envFrom: 36 | - configMapRef: 37 | name: vaultwarden-config 38 | ports: 39 | - name: http 40 | containerPort: 8080 41 | protocol: TCP 42 | - name: websocket 43 | containerPort: 3012 44 | protocol: TCP 45 | startupProbe: 46 | tcpSocket: 47 | port: 8080 48 | initialDelaySeconds: 10 49 | timeoutSeconds: 5 50 | failureThreshold: 30 51 | successThreshold: 1 52 | periodSeconds: 10 53 | livenessProbe: 54 | tcpSocket: 55 | port: 8080 56 | initialDelaySeconds: 10 57 | timeoutSeconds: 5 58 | failureThreshold: 3 59 | successThreshold: 1 60 | periodSeconds: 10 61 | readinessProbe: 62 | tcpSocket: 63 | port: 8080 64 | initialDelaySeconds: 10 65 | timeoutSeconds: 5 66 | failureThreshold: 3 67 | successThreshold: 1 68 | periodSeconds: 10 69 | resources: 70 | limits: 71 | cpu: 200m 72 | memory: 512Mi 73 | requests: 74 | cpu: 50m 75 | memory: 256Mi 76 | volumeMounts: 77 | - name: vaultwarden-data 78 | mountPath: /data 79 | restartPolicy: Always 80 | volumes: 81 | - name: vaultwarden-data 82 | persistentVolumeClaim: 83 | claimName: vaultwarden-data-pvc 84 | -------------------------------------------------------------------------------- /cluster/apps/vaultwarden/vaultwarden-ingressroute.yaml: -------------------------------------------------------------------------------- 1 | # vaultwarden-local-ingressroute 2 | # vaultwarden-http-ingressroute 3 | # vaultwarden-https-ingressroute 4 | 5 | # --- 6 | # apiVersion: traefik.containo.us/v1alpha1 7 | # kind: IngressRoute 8 | # metadata: 9 | # name: vaultwarden-local-ingressroute 10 | # namespace: vaultwarden 11 | # spec: 12 | # entryPoints: 13 | # - web 14 | # routes: 15 | # - match: Host(`vault.${SECRET_DOMAIN_LOCAL}`) 16 | # kind: Rule 17 | # services: 18 | # - name: vaultwarden 19 | # port: 80 20 | 21 | --- 22 | apiVersion: traefik.containo.us/v1alpha1 23 | kind: IngressRoute 24 | metadata: 25 | name: vaultwarden-http-ingressroute 26 | namespace: vaultwarden 27 | spec: 28 | entryPoints: 29 | - web 30 | routes: 31 | - match: Host(`vault.${SECRET_DOMAIN}`) 32 | kind: Rule 33 | middlewares: 34 | - name: https-redirect-scheme-middleware 35 | namespace: traefik 36 | services: 37 | - name: vaultwarden 38 | port: 80 39 | 40 | --- 41 | apiVersion: traefik.containo.us/v1alpha1 42 | kind: IngressRoute 43 | metadata: 44 | name: vaultwarden-https-ingressroute 45 | namespace: traefik 46 | annotations: 47 | cert-manager.io/cluster-issuer: "letsencrypt-dns01-production-do" 48 | spec: 49 | entryPoints: 50 | - websecure 51 | routes: 52 | - match: Host(`vault.${SECRET_DOMAIN}`) 53 | kind: Rule 54 | middlewares: 55 | - name: secure-headers-middleware 56 | namespace: traefik 57 | services: 58 | - name: vaultwarden 59 | namespace: vaultwarden 60 | port: 80 61 | - match: Host(`vault.${SECRET_DOMAIN}`) && Path(`/notifications/hub`) 62 | kind: Rule 63 | services: 64 | - name: vaultwarden 65 | namespace: vaultwarden 66 | port: 3012 67 | tls: 68 | secretName: "${SECRET_DOMAIN/./-}-tls" 69 | options: 70 | name: default-tlsoption 71 | namespace: traefik 72 | -------------------------------------------------------------------------------- /cluster/apps/vaultwarden/vaultwarden-middleware.yaml: -------------------------------------------------------------------------------- 1 | # vaultwarden-middleware-headers 2 | 3 | --- 4 | apiVersion: traefik.containo.us/v1alpha1 5 | kind: Middleware 6 | metadata: 7 | name: vaultwarden-middleware-headers 8 | namespace: vaultwarden 9 | spec: 10 | headers: 11 | browserXssFilter: true 12 | contentTypeNosniff: true 13 | frameDeny: true 14 | sslRedirect: true 15 | forceSTSHeader: true 16 | stsIncludeSubdomains: true 17 | stsPreload: true 18 | stsSeconds: 315360000 19 | # stsSeconds: 63072000 20 | # contentSecurityPolicy: | 21 | # default-src 'none';form-action 'none';frame-ancestors 'none';base-uri 'none' 22 | # script-src 'strict-dynamic' 'nonce-rAnd0m123' 'unsafe-inline' http: https:; 23 | contentSecurityPolicy: | 24 | default-src 'none'; 25 | style-src 'none'; 26 | form-action 'none'; 27 | frame-ancestors 'none'; 28 | script-src 'strict-dynamic' 'nonce-rAnd0m123' 'unsafe-inline' http: https:; 29 | object-src 'none'; 30 | base-uri 'none'; 31 | require-trusted-types-for 'script'; 32 | accessControlAllowMethods: 33 | - "GET" 34 | - "POST" 35 | accessControlAllowOriginList: 36 | - "https://vault.${SECRET_DIGITALOCEAN_DOMAIN_02}" 37 | accessControlMaxAge: 100 38 | addVaryHeader: true 39 | referrerPolicy: "same-origin" 40 | customFrameOptionsValue: SAMEORIGIN 41 | -------------------------------------------------------------------------------- /cluster/apps/vaultwarden/vaultwarden-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: vaultwarden-data-pvc 6 | namespace: vaultwarden 7 | spec: 8 | storageClassName: longhorn-worker-node-storageclass 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 5Gi 14 | -------------------------------------------------------------------------------- /cluster/apps/vaultwarden/vaultwarden-rbac.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: vaultwarden 6 | namespace: vaultwarden 7 | labels: 8 | app: vaultwarden 9 | 10 | --- 11 | apiVersion: rbac.authorization.k8s.io/v1 12 | kind: Role 13 | metadata: 14 | name: vaultwarden 15 | namespace: vaultwarden 16 | rules: 17 | - apiGroups: 18 | - "" 19 | resources: 20 | - configmaps 21 | resourceNames: 22 | - "vaultwarden-config" 23 | verbs: 24 | - get 25 | - apiGroups: 26 | - "" 27 | resources: 28 | - secrets 29 | resourceNames: 30 | - "vaultwarden-admin-token" 31 | verbs: 32 | - get 33 | 34 | --- 35 | apiVersion: rbac.authorization.k8s.io/v1 36 | kind: RoleBinding 37 | metadata: 38 | name: vaultwarden 39 | namespace: vaultwarden 40 | roleRef: 41 | apiGroup: rbac.authorization.k8s.io 42 | kind: Role 43 | name: vaultwarden 44 | subjects: 45 | - kind: ServiceAccount 46 | name: vaultwarden 47 | -------------------------------------------------------------------------------- /cluster/apps/vaultwarden/vaultwarden-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: vaultwarden 6 | namespace: vaultwarden 7 | labels: 8 | app: vaultwarden 9 | spec: 10 | selector: 11 | app: vaultwarden 12 | ports: 13 | - name: http 14 | port: 80 15 | protocol: TCP 16 | targetPort: 8080 17 | - name: websocket 18 | port: 3012 19 | protocol: TCP 20 | targetPort: 3012 21 | sessionAffinity: None 22 | type: ClusterIP 23 | -------------------------------------------------------------------------------- /cluster/base/apps.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 3 | kind: Kustomization 4 | metadata: 5 | name: apps 6 | namespace: flux-system 7 | spec: 8 | timeout: 5m0s 9 | interval: 10m0s 10 | # dependsOn: 11 | # - name: core-ingress-controller 12 | path: ./cluster/apps 13 | prune: true 14 | sourceRef: 15 | kind: GitRepository 16 | name: flux-system 17 | decryption: 18 | provider: sops 19 | secretRef: 20 | name: sops-gpg 21 | postBuild: 22 | substitute: {} 23 | substituteFrom: 24 | - kind: ConfigMap 25 | name: cluster-settings 26 | - kind: Secret 27 | name: cluster-secrets 28 | -------------------------------------------------------------------------------- /cluster/base/cluster-settings.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | namespace: flux-system 6 | name: cluster-settings 7 | data: 8 | # CORE 9 | TRAEFIK_SVC_LB_IP: "192.168.178.240" 10 | # DNS 11 | UNBOUND_SVC_LB_IP: "192.168.178.122" 12 | PIHOLE_SVC_LB_IP: "192.168.178.124" 13 | BLOCKY_SVC_LB_IP: "192.168.178.125" 14 | # DNS 15 | UNBOUND_STAGING_SVC_LB_IP: "192.168.178.126" 16 | PIHOLE_STAGING_SVC_LB_IP: "192.168.178.127" 17 | BLOCKY_STAGING_SVC_LB_IP: "192.168.178.128" 18 | # SERVICES 19 | PROMETHEUS_SVC_LB_IP: "192.168.178.141" 20 | INFLUXDB_SVC_LB_IP: "192.168.178.142" 21 | # 22 | NEXT_K3S_VERSION: "v1.23.8+k3s2" 23 | NODE_MASTER_01_IP: "192.168.178.210" 24 | NODE_MASTER_02_IP: "192.168.178.211" 25 | NODE_MASTER_03_IP: "192.168.178.212" 26 | TIMEZONE: "Europe/Berlin" 27 | GLOBAL_SMTP_PORT: "587" 28 | # 29 | LONGHORN_DEFAULT_DATAPATH: "/storage/" 30 | LONGHORN_BACKUP_TARGET: "nfs://192.168.178.41:/longhorn" 31 | # 32 | GOOGLE_DNS1: "8.8.8.8" 33 | GOOGLE_DNS2: "8.8.4.4" 34 | # 35 | NFS_SERVER_IP: "192.168.178.110" 36 | NFS_SERVER_PATH: "/KubeData" 37 | FRITZBOX_IP: "192.168.178.1" 38 | -------------------------------------------------------------------------------- /cluster/base/crds.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 3 | kind: Kustomization 4 | metadata: 5 | name: crds 6 | namespace: flux-system 7 | spec: 8 | interval: 10m0s 9 | path: ./cluster/crds 10 | prune: true 11 | sourceRef: 12 | kind: GitRepository 13 | name: flux-system 14 | # healthChecks: 15 | # - apiVersion: apiextensions.k8s.io/v1 16 | # kind: CustomResourceDefinition 17 | # name: certificaterequests.cert-manager.io 18 | # - apiVersion: apiextensions.k8s.io/v1 19 | # kind: CustomResourceDefinition 20 | # name: certificates.cert-manager.io 21 | # - apiVersion: apiextensions.k8s.io/v1 22 | # kind: CustomResourceDefinition 23 | # name: challenges.acme.cert-manager.io 24 | # - apiVersion: apiextensions.k8s.io/v1 25 | # kind: CustomResourceDefinition 26 | # name: clusterissuers.cert-manager.io 27 | # - apiVersion: apiextensions.k8s.io/v1 28 | # kind: CustomResourceDefinition 29 | # name: issuers.cert-manager.io 30 | # - apiVersion: apiextensions.k8s.io/v1 31 | # kind: CustomResourceDefinition 32 | # name: orders.acme.cert-manager.io 33 | -------------------------------------------------------------------------------- /cluster/base/flux-system/charts/helm/authelia-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta1 3 | kind: HelmRepository 4 | metadata: 5 | name: authelia 6 | namespace: flux-system 7 | spec: 8 | interval: 1m0s 9 | url: https://charts.authelia.com 10 | -------------------------------------------------------------------------------- /cluster/base/flux-system/charts/helm/grafana-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta1 3 | kind: HelmRepository 4 | metadata: 5 | name: grafana 6 | namespace: flux-system 7 | spec: 8 | interval: 1m0s 9 | url: https://grafana.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /cluster/base/flux-system/charts/helm/groundhog2k-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta1 3 | kind: HelmRepository 4 | metadata: 5 | name: groundhog2k 6 | namespace: flux-system 7 | spec: 8 | interval: 1m0s 9 | url: https://groundhog2k.github.io/helm-charts/ 10 | -------------------------------------------------------------------------------- /cluster/base/flux-system/charts/helm/influxdata-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta1 3 | kind: HelmRepository 4 | metadata: 5 | name: influxdata 6 | namespace: flux-system 7 | spec: 8 | interval: 1m0s 9 | url: https://helm.influxdata.com 10 | -------------------------------------------------------------------------------- /cluster/base/flux-system/charts/helm/jetstack-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: jetstack 6 | namespace: flux-system 7 | spec: 8 | interval: 30m 9 | url: https://charts.jetstack.io 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/base/flux-system/charts/helm/k8s-at-home-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta1 3 | kind: HelmRepository 4 | metadata: 5 | name: k8s-at-home 6 | namespace: flux-system 7 | spec: 8 | interval: 1m0s 9 | url: https://k8s-at-home.com/charts/ 10 | -------------------------------------------------------------------------------- /cluster/base/flux-system/charts/helm/kured-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta1 3 | kind: HelmRepository 4 | metadata: 5 | name: kured 6 | namespace: flux-system 7 | spec: 8 | interval: 1m0s 9 | url: https://weaveworks.github.io/kured 10 | -------------------------------------------------------------------------------- /cluster/base/flux-system/charts/helm/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - metallb-charts.yaml 7 | - jetstack-charts.yaml 8 | - traefik-charts.yaml 9 | - nfs-provisioner-charts.yaml 10 | - longhorn-charts.yaml 11 | - stakater-charts.yaml 12 | - groundhog2k-charts.yaml 13 | - authelia-charts.yaml 14 | - prometheus-community-charts.yaml 15 | - grafana-charts.yaml 16 | - twuni-charts.yaml 17 | # - influxdata-charts.yaml 18 | # - k8s-at-home-charts.yaml 19 | # - kured-charts.yaml 20 | -------------------------------------------------------------------------------- /cluster/base/flux-system/charts/helm/longhorn-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta1 3 | kind: HelmRepository 4 | metadata: 5 | name: longhorn 6 | namespace: flux-system 7 | spec: 8 | interval: 1m0s 9 | url: https://charts.longhorn.io 10 | -------------------------------------------------------------------------------- /cluster/base/flux-system/charts/helm/metallb-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: metallb 6 | namespace: flux-system 7 | spec: 8 | interval: 30m 9 | url: https://metallb.github.io/metallb 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/base/flux-system/charts/helm/nfs-provisioner-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta1 3 | kind: HelmRepository 4 | metadata: 5 | name: nfs-provisioner 6 | namespace: flux-system 7 | spec: 8 | interval: 30m 9 | url: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/ 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/base/flux-system/charts/helm/prometheus-community-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta1 3 | kind: HelmRepository 4 | metadata: 5 | name: prometheus-community 6 | namespace: flux-system 7 | spec: 8 | interval: 1m0s 9 | url: https://prometheus-community.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /cluster/base/flux-system/charts/helm/stakater-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta1 3 | kind: HelmRepository 4 | metadata: 5 | name: stakater 6 | namespace: flux-system 7 | spec: 8 | interval: 1m0s 9 | url: https://stakater.github.io/stakater-charts 10 | -------------------------------------------------------------------------------- /cluster/base/flux-system/charts/helm/traefik-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta1 3 | kind: HelmRepository 4 | metadata: 5 | name: traefik 6 | namespace: flux-system 7 | spec: 8 | interval: 30m 9 | url: https://helm.traefik.io/traefik 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/base/flux-system/charts/helm/twuni-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta1 3 | kind: HelmRepository 4 | metadata: 5 | name: twuni 6 | namespace: flux-system 7 | spec: 8 | interval: 1m0s 9 | url: https://helm.twun.io 10 | -------------------------------------------------------------------------------- /cluster/base/flux-system/charts/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - helm 6 | -------------------------------------------------------------------------------- /cluster/base/flux-system/gotk-patches.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 3 | kind: Kustomization 4 | metadata: 5 | name: flux-system 6 | namespace: flux-system 7 | spec: 8 | decryption: 9 | provider: sops 10 | secretRef: 11 | name: sops-gpg 12 | -------------------------------------------------------------------------------- /cluster/base/flux-system/gotk-sync.yaml: -------------------------------------------------------------------------------- 1 | # This manifest was generated by flux. DO NOT EDIT. 2 | --- 3 | apiVersion: source.toolkit.fluxcd.io/v1beta2 4 | kind: GitRepository 5 | metadata: 6 | name: flux-system 7 | namespace: flux-system 8 | spec: 9 | interval: 1m0s 10 | ref: 11 | branch: main 12 | secretRef: 13 | name: flux-system 14 | url: ssh://git@github.com/untcha/rpi-k3s-cluster 15 | --- 16 | apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 17 | kind: Kustomization 18 | metadata: 19 | name: flux-system 20 | namespace: flux-system 21 | spec: 22 | interval: 10m0s 23 | path: ./cluster/base 24 | prune: true 25 | sourceRef: 26 | kind: GitRepository 27 | name: flux-system 28 | -------------------------------------------------------------------------------- /cluster/base/flux-system/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - gotk-components.yaml 6 | - gotk-sync.yaml 7 | - charts 8 | # - notifications 9 | 10 | patchesStrategicMerge: 11 | - gotk-patches.yaml 12 | -------------------------------------------------------------------------------- /cluster/base/flux-system/notifications/flux-alert.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: notification.toolkit.fluxcd.io/v1beta1 3 | kind: Alert 4 | metadata: 5 | name: flux-alert 6 | namespace: flux-system 7 | spec: 8 | providerRef: 9 | name: slack 10 | eventSeverity: info 11 | eventSources: 12 | - kind: GitRepository 13 | name: '*' 14 | - kind: Kustomization 15 | name: '*' 16 | -------------------------------------------------------------------------------- /cluster/base/flux-system/notifications/flux-slack-notification-provider.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: notification.toolkit.fluxcd.io/v1beta1 3 | kind: Provider 4 | metadata: 5 | name: slack 6 | namespace: flux-system 7 | spec: 8 | type: slack 9 | channel: flux-notifications 10 | secretRef: 11 | name: slack-webhook-url 12 | -------------------------------------------------------------------------------- /cluster/base/flux-system/notifications/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - enc_slack-webhook-url-secret.yaml 6 | - flux-slack-notification-provider.yaml 7 | - flux-alert.yaml 8 | -------------------------------------------------------------------------------- /cluster/core/cert-manager/controller/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - digitalocean-access-token.sops.yaml 7 | - cert-manager-helmrelease.yaml 8 | -------------------------------------------------------------------------------- /cluster/core/cert-manager/issuer/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - letsencrypt-staging-clusterissuer.yaml 7 | - letsencrypt-production-clusterissuer.yaml 8 | -------------------------------------------------------------------------------- /cluster/core/cert-manager/issuer/letsencrypt-production-clusterissuer.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cert-manager.io/v1 3 | kind: ClusterIssuer 4 | metadata: 5 | name: letsencrypt-dns01-production-do 6 | spec: 7 | acme: 8 | # The ACME server URL 9 | server: https://acme-v02.api.letsencrypt.org/directory 10 | # Email address used for ACME registration 11 | email: ${SECRET_EMAIL} 12 | # Name of a secret used to store the ACME account private key 13 | privateKeySecretRef: 14 | name: letsencrypt-production 15 | # Enable the DNS-01 challenge provider 16 | solvers: 17 | - dns01: 18 | digitalocean: 19 | tokenSecretRef: 20 | name: digitalocean-dns 21 | key: access-token 22 | -------------------------------------------------------------------------------- /cluster/core/cert-manager/issuer/letsencrypt-staging-clusterissuer.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cert-manager.io/v1 3 | kind: ClusterIssuer 4 | metadata: 5 | name: letsencrypt-dns01-staging-do 6 | spec: 7 | acme: 8 | # The ACME server URL 9 | server: https://acme-staging-v02.api.letsencrypt.org/directory 10 | # Email address used for ACME registration 11 | email: ${SECRET_EMAIL} 12 | # Name of a secret used to store the ACME account private key 13 | privateKeySecretRef: 14 | name: letsencrypt-staging 15 | # Enable the DNS-01 challenge provider 16 | solvers: 17 | - dns01: 18 | digitalocean: 19 | tokenSecretRef: 20 | name: digitalocean-dns 21 | key: access-token 22 | -------------------------------------------------------------------------------- /cluster/core/cert-manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - controller 7 | - issuer 8 | -------------------------------------------------------------------------------- /cluster/core/ingress/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - traefik 7 | - traefik-additions 8 | -------------------------------------------------------------------------------- /cluster/core/ingress/traefik-additions/basicauth-middleware.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: traefik.containo.us/v1alpha1 3 | kind: Middleware 4 | metadata: 5 | name: basicauth-middleware 6 | namespace: traefik 7 | spec: 8 | basicAuth: 9 | secret: basicauth-secret 10 | -------------------------------------------------------------------------------- /cluster/core/ingress/traefik-additions/error-pages/error-pages-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | name: error-pages 6 | namespace: traefik 7 | labels: 8 | app: error-pages 9 | spec: 10 | replicas: 1 11 | strategy: 12 | rollingUpdate: 13 | maxSurge: 0 14 | maxUnavailable: 1 15 | type: RollingUpdate 16 | selector: 17 | matchLabels: 18 | app: error-pages 19 | template: 20 | metadata: 21 | labels: 22 | app: error-pages 23 | spec: 24 | nodeSelector: 25 | node-type: worker 26 | # affinity: 27 | # podAffinity: 28 | # preferredDuringSchedulingIgnoredDuringExecution: 29 | # - weight: 100 30 | # podAffinityTerm: 31 | # labelSelector: 32 | # matchExpressions: 33 | # - key: app.kubernetes.io/name 34 | # operator: In 35 | # values: 36 | # - traefik 37 | # topologyKey: kubernetes.io/hostname 38 | # podAntiAffinity: 39 | # requiredDuringSchedulingIgnoredDuringExecution: 40 | # - labelSelector: 41 | # matchExpressions: 42 | # - key: app.kubernetes.io/name 43 | # operator: In 44 | # values: 45 | # - error-pages 46 | # topologyKey: kubernetes.io/hostname 47 | containers: 48 | - name: error-pages 49 | image: ghcr.io/tarampampam/error-pages:2.16.0 50 | imagePullPolicy: IfNotPresent 51 | env: 52 | - name: TEMPLATE_NAME 53 | value: "app-down" 54 | ports: 55 | - containerPort: 8080 56 | protocol: TCP 57 | securityContext: 58 | readOnlyRootFilesystem: true 59 | runAsNonRoot: true 60 | allowPrivilegeEscalation: false 61 | # Run the container as nobody:nogroup 62 | runAsUser: 65534 63 | runAsGroup: 65534 64 | capabilities: 65 | drop: 66 | - NET_RAW 67 | resources: 68 | requests: 69 | cpu: 50m 70 | memory: 16Mi 71 | limits: 72 | memory: 32Mi 73 | -------------------------------------------------------------------------------- /cluster/core/ingress/traefik-additions/error-pages/error-pages-ingressroute.yaml: -------------------------------------------------------------------------------- 1 | # error-pages-http-ingressroute 2 | # error-pages-https-ingressroute 3 | 4 | --- 5 | apiVersion: traefik.containo.us/v1alpha1 6 | kind: IngressRoute 7 | metadata: 8 | name: error-pages-http-ingressroute 9 | namespace: traefik 10 | spec: 11 | entryPoints: 12 | - web 13 | routes: 14 | - kind: Rule 15 | match: HostRegexp(`{host:.+}`) 16 | priority: 1 17 | services: 18 | - kind: Service 19 | name: error-pages 20 | port: 8080 21 | 22 | --- 23 | apiVersion: traefik.containo.us/v1alpha1 24 | kind: IngressRoute 25 | metadata: 26 | name: error-pages-https-ingressroute 27 | namespace: traefik 28 | spec: 29 | entryPoints: 30 | - websecure 31 | routes: 32 | - kind: Rule 33 | match: HostRegexp(`{host:.+}`) 34 | priority: 1 35 | services: 36 | - kind: Service 37 | name: error-pages 38 | port: 8080 39 | tls: 40 | secretName: "${SECRET_DOMAIN/./-}-tls" 41 | -------------------------------------------------------------------------------- /cluster/core/ingress/traefik-additions/error-pages/error-pages-middleware.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: traefik.containo.us/v1alpha1 3 | kind: Middleware 4 | metadata: 5 | name: error-pages-middleware 6 | namespace: traefik 7 | spec: 8 | errors: 9 | status: 10 | - "400-599" 11 | query: /{status}.html 12 | service: 13 | name: error-pages 14 | port: 8080 15 | -------------------------------------------------------------------------------- /cluster/core/ingress/traefik-additions/error-pages/error-pages-service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: error-pages 6 | namespace: traefik 7 | spec: 8 | selector: 9 | app: error-pages 10 | ports: 11 | - name: http 12 | port: 8080 13 | protocol: TCP 14 | type: ClusterIP 15 | -------------------------------------------------------------------------------- /cluster/core/ingress/traefik-additions/error-pages/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - error-pages-service.yaml 6 | - error-pages-deployment.yaml 7 | - error-pages-ingressroute.yaml 8 | - error-pages-middleware.yaml 9 | -------------------------------------------------------------------------------- /cluster/core/ingress/traefik-additions/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - basicauth-secret.sops.yaml 7 | - basicauth-middleware.yaml 8 | - traefik-ingressroute.yaml 9 | - traefik-middleware.yaml 10 | - traefik-tlsoption.yaml 11 | - error-pages 12 | -------------------------------------------------------------------------------- /cluster/core/ingress/traefik-additions/traefik-ingressroute.yaml: -------------------------------------------------------------------------------- 1 | # traefik-local-ingressroute 2 | # traefik-metrics-local-ingressroute 3 | # traefik-http-ingressroute 4 | # traefik-https-ingressroute 5 | 6 | # --- 7 | # apiVersion: traefik.containo.us/v1alpha1 8 | # kind: IngressRoute 9 | # metadata: 10 | # name: traefik-local-ingressroute 11 | # namespace: traefik 12 | # spec: 13 | # entryPoints: 14 | # - web 15 | # routes: 16 | # - match: Host(`traefik.${SECRET_DOMAIN_LOCAL}`) && (PathPrefix(`/dashboard`) || PathPrefix(`/api`)) 17 | # kind: Rule 18 | # services: 19 | # - name: api@internal 20 | # kind: TraefikService 21 | 22 | # --- 23 | # apiVersion: traefik.containo.us/v1alpha1 24 | # kind: IngressRoute 25 | # metadata: 26 | # name: traefik-metrics-local-ingressroute 27 | # namespace: traefik 28 | # spec: 29 | # entryPoints: 30 | # - web 31 | # routes: 32 | # - match: Host(`traefik.${SECRET_DOMAIN_LOCAL}`) && PathPrefix(`/metrics`) 33 | # kind: Rule 34 | # services: 35 | # - name: prometheus@internal 36 | # kind: TraefikService 37 | 38 | --- 39 | apiVersion: traefik.containo.us/v1alpha1 40 | kind: IngressRoute 41 | metadata: 42 | name: traefik-http-ingressroute 43 | namespace: traefik 44 | spec: 45 | entryPoints: 46 | - web 47 | routes: 48 | - match: Host(`traefik.${SECRET_DOMAIN}`) && (PathPrefix(`/dashboard`) || PathPrefix(`/api`)) 49 | kind: Rule 50 | middlewares: 51 | - name: https-redirect-scheme-middleware 52 | services: 53 | - name: api@internal 54 | kind: TraefikService 55 | 56 | --- 57 | apiVersion: traefik.containo.us/v1alpha1 58 | kind: IngressRoute 59 | metadata: 60 | name: traefik-https-ingressroute 61 | namespace: traefik 62 | annotations: 63 | cert-manager.io/cluster-issuer: "letsencrypt-dns01-production-do" 64 | spec: 65 | entryPoints: 66 | - websecure 67 | routes: 68 | - match: Host(`traefik.${SECRET_DOMAIN}`) && (PathPrefix(`/dashboard`) || PathPrefix(`/api`)) 69 | kind: Rule 70 | middlewares: 71 | # - name: basicauth-middleware 72 | # namespace: traefik 73 | - name: authelia-middleware 74 | namespace: authentication 75 | - name: secure-headers-middleware 76 | namespace: traefik 77 | services: 78 | - name: api@internal 79 | kind: TraefikService 80 | tls: 81 | secretName: "${SECRET_DOMAIN/./-}-tls" 82 | options: 83 | name: default-tlsoption 84 | namespace: traefik 85 | -------------------------------------------------------------------------------- /cluster/core/ingress/traefik-additions/traefik-middleware.yaml: -------------------------------------------------------------------------------- 1 | # https-redirect-scheme-middleware 2 | # secure-headers-middleware 3 | # traefik-middleware-headers 4 | 5 | --- 6 | apiVersion: traefik.containo.us/v1alpha1 7 | kind: Middleware 8 | metadata: 9 | name: https-redirect-scheme-middleware 10 | namespace: traefik 11 | spec: 12 | redirectScheme: 13 | scheme: https 14 | permanent: true 15 | port: "443" 16 | 17 | --- 18 | apiVersion: traefik.containo.us/v1alpha1 19 | kind: Middleware 20 | metadata: 21 | name: secure-headers-middleware 22 | namespace: traefik 23 | spec: 24 | headers: 25 | browserXssFilter: true 26 | contentTypeNosniff: true 27 | frameDeny: true 28 | sslRedirect: true 29 | forceSTSHeader: true 30 | stsIncludeSubdomains: true 31 | stsPreload: true 32 | stsSeconds: 315360000 33 | contentSecurityPolicy: | 34 | default-src 'none'; 35 | style-src 'none'; 36 | form-action 'none'; 37 | frame-ancestors 'none'; 38 | script-src 'strict-dynamic' 'nonce-rAnd0m123' 'unsafe-inline' http: https:; 39 | object-src 'none'; 40 | base-uri 'none'; 41 | require-trusted-types-for 'script'; 42 | accessControlAllowMethods: 43 | - "GET" 44 | - "POST" 45 | accessControlAllowOriginList: 46 | - "https://${SECRET_DOMAIN}" 47 | accessControlMaxAge: 100 48 | addVaryHeader: true 49 | referrerPolicy: "same-origin" 50 | customFrameOptionsValue: SAMEORIGIN 51 | 52 | # --- 53 | # apiVersion: traefik.containo.us/v1alpha1 54 | # kind: Middleware 55 | # metadata: 56 | # name: traefik-middleware-headers 57 | # namespace: traefik 58 | # spec: 59 | # headers: 60 | # browserXssFilter: true 61 | # contentTypeNosniff: true 62 | # frameDeny: true 63 | # sslRedirect: true 64 | # forceSTSHeader: true 65 | # stsIncludeSubdomains: true 66 | # stsPreload: true 67 | # stsSeconds: 315360000 68 | # # stsSeconds: 63072000 69 | # contentSecurityPolicy: | 70 | # default-src 'none';form-action 'none';frame-ancestors 'none';base-uri 'none' 71 | # accessControlAllowMethods: 72 | # - "GET" 73 | # - "POST" 74 | # accessControlAllowOriginList: 75 | # - "https://traefik.${SECRET_DIGITALOCEAN_DOMAIN_02}" 76 | # accessControlMaxAge: 100 77 | # addVaryHeader: true 78 | # referrerPolicy: "same-origin" 79 | # customFrameOptionsValue: SAMEORIGIN 80 | -------------------------------------------------------------------------------- /cluster/core/ingress/traefik-additions/traefik-tlsoption.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: traefik.containo.us/v1alpha1 3 | kind: TLSOption 4 | metadata: 5 | name: default-tlsoption 6 | namespace: traefik 7 | spec: 8 | minVersion: VersionTLS12 9 | maxVersion: VersionTLS13 10 | cipherSuites: 11 | - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 12 | - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 13 | - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 14 | - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 15 | - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 16 | - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 17 | curvePreferences: 18 | - CurveP521 19 | - CurveP384 20 | sniStrict: true 21 | preferServerCipherSuites: true 22 | -------------------------------------------------------------------------------- /cluster/core/ingress/traefik/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - traefik-helmrelease.yaml 7 | - wildcard-certificate 8 | 9 | # configMapGenerator: 10 | # - name: traefik-values 11 | # files: 12 | # - values.yaml=traefik-values.yaml 13 | -------------------------------------------------------------------------------- /cluster/core/ingress/traefik/wildcard-certificate/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - wildcard-certificate.yaml 7 | -------------------------------------------------------------------------------- /cluster/core/ingress/traefik/wildcard-certificate/traefik-letsencrypt-certificate.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cert-manager.io/v1 3 | kind: Certificate 4 | metadata: 5 | name: traefik-letsencrypt-production-certificate 6 | namespace: traefik 7 | spec: 8 | commonName: traefik.${SECRET_DIGITALOCEAN_DOMAIN_02} 9 | secretName: traefik-letsencrypt-production-certificate 10 | dnsNames: 11 | - traefik.${SECRET_DIGITALOCEAN_DOMAIN_02} 12 | issuerRef: 13 | name: letsencrypt-dns01-production-do 14 | kind: ClusterIssuer 15 | -------------------------------------------------------------------------------- /cluster/core/ingress/traefik/wildcard-certificate/wildcard-certificate.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cert-manager.io/v1 3 | kind: Certificate 4 | metadata: 5 | name: "${SECRET_DOMAIN/./-}" 6 | namespace: traefik 7 | spec: 8 | secretName: "${SECRET_DOMAIN/./-}-tls" 9 | issuerRef: 10 | name: letsencrypt-dns01-production-do 11 | kind: ClusterIssuer 12 | commonName: "${SECRET_DOMAIN}" 13 | dnsNames: 14 | - "${SECRET_DOMAIN}" 15 | - "*.${SECRET_DOMAIN}" 16 | -------------------------------------------------------------------------------- /cluster/core/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - namespaces 7 | - network 8 | - cert-manager 9 | - ingress 10 | - storage 11 | -------------------------------------------------------------------------------- /cluster/core/namespaces/cert-manager.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: cert-manager 6 | -------------------------------------------------------------------------------- /cluster/core/namespaces/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - metallb-system.yaml 7 | - cert-manager.yaml 8 | - traefik.yaml 9 | - storage.yaml 10 | # - rook-ceph.yaml 11 | - longhorn-system.yaml 12 | -------------------------------------------------------------------------------- /cluster/core/namespaces/longhorn-system.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: longhorn-system 6 | -------------------------------------------------------------------------------- /cluster/core/namespaces/metallb-system.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: metallb-system 6 | -------------------------------------------------------------------------------- /cluster/core/namespaces/rook-ceph.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: rook-ceph 6 | -------------------------------------------------------------------------------- /cluster/core/namespaces/storage.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: storage 6 | -------------------------------------------------------------------------------- /cluster/core/namespaces/traefik.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: traefik 6 | -------------------------------------------------------------------------------- /cluster/core/network/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - metallb-system 7 | -------------------------------------------------------------------------------- /cluster/core/network/metallb-system/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | #- github.com/metallb/metallb//manifests?ref=v0.10.2 7 | - metallb-configmap.yaml 8 | - metallb-helmrelease.yaml 9 | -------------------------------------------------------------------------------- /cluster/core/network/metallb-system/metallb-configmap.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: metallb-config 6 | namespace: metallb-system 7 | data: 8 | config: | 9 | address-pools: 10 | - name: core 11 | protocol: layer2 12 | addresses: 13 | - 192.168.178.240-192.168.178.250 14 | - name: dns 15 | protocol: layer2 16 | addresses: 17 | - 192.168.178.120-192.168.178.140 18 | - name: services 19 | protocol: layer2 20 | addresses: 21 | - 192.168.178.141-192.168.178.200 22 | -------------------------------------------------------------------------------- /cluster/core/storage/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - nfs 7 | - nfs-test 8 | - longhorn 9 | -------------------------------------------------------------------------------- /cluster/core/storage/longhorn/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - longhorn-helmrelease.yaml 7 | - storageclasses 8 | - longhorn-middleware.yaml 9 | - longhorn-ingressroute.yaml 10 | - tools 11 | -------------------------------------------------------------------------------- /cluster/core/storage/longhorn/longhorn-ingressroute.yaml: -------------------------------------------------------------------------------- 1 | # longhorn-local-ingressroute 2 | # longhorn-http-ingressroute 3 | # longhorn-https-ingressroute 4 | 5 | # --- 6 | # apiVersion: traefik.containo.us/v1alpha1 7 | # kind: IngressRoute 8 | # metadata: 9 | # name: longhorn-local-ingressroute 10 | # namespace: longhorn-system 11 | # spec: 12 | # entryPoints: 13 | # - web 14 | # routes: 15 | # - match: Host(`longhorn.${SECRET_DOMAIN_LOCAL}`) 16 | # kind: Rule 17 | # services: 18 | # - name: longhorn-frontend 19 | # port: 80 20 | 21 | --- 22 | apiVersion: traefik.containo.us/v1alpha1 23 | kind: IngressRoute 24 | metadata: 25 | name: longhorn-http-ingressroute 26 | namespace: longhorn-system 27 | spec: 28 | entryPoints: 29 | - web 30 | routes: 31 | - match: Host(`longhorn.${SECRET_DOMAIN}`) 32 | kind: Rule 33 | middlewares: 34 | - name: https-redirect-scheme-middleware 35 | namespace: traefik 36 | services: 37 | - name: longhorn-frontend 38 | port: 80 39 | 40 | --- 41 | apiVersion: traefik.containo.us/v1alpha1 42 | kind: IngressRoute 43 | metadata: 44 | name: longhorn-https-ingressroute 45 | namespace: traefik 46 | annotations: 47 | cert-manager.io/cluster-issuer: "letsencrypt-dns01-production-do" 48 | spec: 49 | entryPoints: 50 | - websecure 51 | routes: 52 | - match: Host(`longhorn.${SECRET_DOMAIN}`) 53 | kind: Rule 54 | middlewares: 55 | # - name: basicauth-middleware 56 | # namespace: traefik 57 | - name: authelia-middleware 58 | namespace: authentication 59 | - name: secure-headers-middleware 60 | namespace: traefik 61 | - name: longhorn-headers-middleware # avoid CORS problem 62 | namespace: longhorn-system 63 | services: 64 | - name: longhorn-frontend 65 | namespace: longhorn-system 66 | port: 80 67 | tls: 68 | secretName: "${SECRET_DOMAIN/./-}-tls" 69 | options: 70 | name: default-tlsoption 71 | namespace: traefik 72 | -------------------------------------------------------------------------------- /cluster/core/storage/longhorn/longhorn-middleware.yaml: -------------------------------------------------------------------------------- 1 | # longhorn-middleware-headers 2 | 3 | # https://longhorn.io/kb/troubleshooting-traefik-2.x-as-ingress-controller/ 4 | # Avoid CORS problem 5 | --- 6 | apiVersion: traefik.containo.us/v1alpha1 7 | kind: Middleware 8 | metadata: 9 | name: longhorn-headers-middleware 10 | namespace: longhorn-system 11 | spec: 12 | headers: 13 | customRequestHeaders: 14 | X-Forwarded-Proto: "https" 15 | -------------------------------------------------------------------------------- /cluster/core/storage/longhorn/storageclasses/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | # - longhorn-all-node-storageclass.yaml # storage class for volumes that can be created on all nodes (master or worker) 7 | # - longhorn-master-node-storageclass.yaml # storage class for volumes that can be created ONLY on low-capacity master nodes 8 | - longhorn-worker-node-storageclass.yaml # storage class for volumes that can be created ONLY on high-capacity worker nodes 9 | 10 | # - longhorn-test-pvc.yaml 11 | -------------------------------------------------------------------------------- /cluster/core/storage/longhorn/storageclasses/longhorn-all-node-storageclass.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: longhorn-all-node-storageclass 6 | annotations: 7 | storageclass.kubernetes.io/is-default-class: "false" 8 | provisioner: driver.longhorn.io 9 | allowVolumeExpansion: true 10 | reclaimPolicy: "Delete" 11 | volumeBindingMode: Immediate 12 | parameters: 13 | numberOfReplicas: "3" 14 | replicaAutoBalance: "best-effort" 15 | # dataLocality: "best-effort" 16 | staleReplicaTimeout: "30" 17 | fromBackup: "" 18 | fsType: "ext4" 19 | nodeSelector: "all" 20 | -------------------------------------------------------------------------------- /cluster/core/storage/longhorn/storageclasses/longhorn-master-node-storageclass.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: longhorn-master-node-storageclass 6 | annotations: 7 | storageclass.kubernetes.io/is-default-class: "false" 8 | provisioner: driver.longhorn.io 9 | allowVolumeExpansion: true 10 | reclaimPolicy: "Delete" 11 | volumeBindingMode: Immediate 12 | parameters: 13 | numberOfReplicas: "3" 14 | # replicaAutoBalance: "best-effort" 15 | # dataLocality: "best-effort" 16 | staleReplicaTimeout: "30" 17 | fromBackup: "" 18 | fsType: "ext4" 19 | # diskSelector: "low-capacity" 20 | nodeSelector: "master" 21 | -------------------------------------------------------------------------------- /cluster/core/storage/longhorn/storageclasses/longhorn-test-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: longhorn-test-all-pvc 6 | namespace: longhorn-system 7 | spec: 8 | storageClassName: longhorn-all-node-storageclass 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 1Gi 14 | 15 | --- 16 | apiVersion: v1 17 | kind: PersistentVolumeClaim 18 | metadata: 19 | name: longhorn-test-master-pvc 20 | namespace: longhorn-system 21 | spec: 22 | storageClassName: longhorn-master-node-storageclass 23 | accessModes: 24 | - ReadWriteOnce 25 | resources: 26 | requests: 27 | storage: 1Gi 28 | 29 | --- 30 | apiVersion: v1 31 | kind: PersistentVolumeClaim 32 | metadata: 33 | name: longhorn-test-worker-pvc 34 | namespace: longhorn-system 35 | spec: 36 | storageClassName: longhorn-worker-node-storageclass 37 | accessModes: 38 | - ReadWriteOnce 39 | resources: 40 | requests: 41 | storage: 1Gi 42 | -------------------------------------------------------------------------------- /cluster/core/storage/longhorn/storageclasses/longhorn-worker-node-storageclass.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: longhorn-worker-node-storageclass 6 | annotations: 7 | storageclass.kubernetes.io/is-default-class: "false" 8 | provisioner: driver.longhorn.io 9 | allowVolumeExpansion: true 10 | reclaimPolicy: "Delete" 11 | volumeBindingMode: Immediate 12 | parameters: 13 | numberOfReplicas: "3" 14 | # replicaAutoBalance: "best-effort" 15 | # dataLocality: "best-effort" 16 | staleReplicaTimeout: "30" 17 | fromBackup: "" 18 | fsType: "ext4" 19 | # diskSelector: "high-capacity" 20 | nodeSelector: "worker" 21 | -------------------------------------------------------------------------------- /cluster/core/storage/longhorn/tools/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | # - longhorn-volume-mounter.yaml 7 | # - longhorn-volume-migration-job.yaml 8 | -------------------------------------------------------------------------------- /cluster/core/storage/longhorn/tools/longhorn-volume-migration-job.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | namespace: dev-gitea # namespace where the pvc's exist 5 | name: longhorn-volume-migration-job 6 | spec: 7 | completions: 1 8 | parallelism: 1 9 | backoffLimit: 3 10 | template: 11 | metadata: 12 | name: longhorn-volume-migration-job 13 | labels: 14 | name: longhorn-volume-migration-job 15 | spec: 16 | restartPolicy: Never 17 | containers: 18 | - name: longhorn-volume-migration-job 19 | image: praqma/network-multitool:extra 20 | tty: true 21 | command: [ "/bin/sh" ] 22 | args: [ "-c", "rsync -a --exclude 'lost+found' /mnt/old/ /mnt/new" ] 23 | # args: [ "-c", "cp -r -v /mnt/old /mnt/new" ] 24 | volumeMounts: 25 | - name: old-vol 26 | mountPath: /mnt/old 27 | - name: new-vol 28 | mountPath: /mnt/new 29 | volumes: 30 | - name: old-vol 31 | persistentVolumeClaim: 32 | claimName: postgres-pvc # change to data source pvc 33 | - name: new-vol 34 | persistentVolumeClaim: 35 | claimName: postgres-pvc-new # change to data target pvc 36 | -------------------------------------------------------------------------------- /cluster/core/storage/longhorn/tools/longhorn-volume-mounter.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Deployment 3 | apiVersion: apps/v1 4 | metadata: 5 | name: longhorn-volume-mounter 6 | namespace: longhorn-system 7 | labels: 8 | app: longhorn-volume-mounter 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app: longhorn-volume-mounter 14 | template: 15 | metadata: 16 | labels: 17 | app: longhorn-volume-mounter 18 | spec: 19 | nodeSelector: 20 | node-type: worker 21 | containers: 22 | - name: longhorn-volume-mounter 23 | # image: praqma/network-multitool:extra 24 | image: busybox:1.35.0 25 | tty: true 26 | # volumeMounts: 27 | # - name: old-vol 28 | # mountPath: /mnt/old 29 | # - name: new-vol 30 | # mountPath: /mnt/new 31 | # resources: 32 | # requests: 33 | # cpu: "1m" 34 | # memory: "20Mi" 35 | # limits: 36 | # cpu: "10m" 37 | # memory: "20Mi" 38 | # securityContext: 39 | # runAsUser: 0 40 | # capabilities: 41 | # add: ["NET_ADMIN"] 42 | # volumes: 43 | # - name: old-vol 44 | # persistentVolumeClaim: 45 | # claimName: postgres-test-pvc # change to data source pvc 46 | # - name: new-vol 47 | # persistentVolumeClaim: 48 | # claimName: postgres-test-2-pvc # change to data target pvc 49 | -------------------------------------------------------------------------------- /cluster/core/storage/nfs-test/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - nfs-test-provisioner-helmrelease.yaml 7 | -------------------------------------------------------------------------------- /cluster/core/storage/nfs-test/nfs-test-provisioner-helmrelease.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 3 | kind: HelmRelease 4 | metadata: 5 | name: nfs-test-provisioner 6 | namespace: flux-system 7 | spec: 8 | chart: 9 | spec: 10 | chart: nfs-subdir-external-provisioner 11 | reconcileStrategy: ChartVersion 12 | sourceRef: 13 | kind: HelmRepository 14 | name: nfs-provisioner 15 | version: 4.0.16 16 | interval: 1m0s 17 | targetNamespace: storage 18 | values: 19 | affinity: {} 20 | image: 21 | pullPolicy: IfNotPresent 22 | repository: k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner 23 | tag: v4.0.2 24 | imagePullSecrets: [] 25 | labels: {} 26 | leaderElection: 27 | enabled: true 28 | nfs: 29 | mountOptions: 30 | - nfsvers=4.0 31 | path: /nfs-test 32 | reclaimPolicy: Retain 33 | server: 192.168.178.41 34 | volumeName: nfs-test-storage-root 35 | nodeSelector: 36 | node-type: worker 37 | podAnnotations: {} 38 | podSecurityContext: {} 39 | podSecurityPolicy: 40 | enabled: false 41 | rbac: 42 | create: true 43 | replicaCount: 1 44 | resources: {} 45 | securityContext: {} 46 | serviceAccount: 47 | annotations: {} 48 | create: true 49 | name: null 50 | storageClass: 51 | accessModes: ReadWriteOnce 52 | allowVolumeExpansion: true 53 | annotations: {} 54 | archiveOnDelete: true 55 | create: true 56 | defaultClass: false 57 | name: nfs-test-provisioner 58 | onDelete: null 59 | pathPattern: ${.PVC.namespace}/${.PVC.annotations.nfs.io/storage-path} 60 | reclaimPolicy: Delete 61 | strategyType: Recreate 62 | tolerations: [] 63 | 64 | -------------------------------------------------------------------------------- /cluster/core/storage/nfs/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - nfs-provisioner-helmrelease.yaml 7 | -------------------------------------------------------------------------------- /cluster/core/storage/nfs/nfs-provisioner-helmrelease.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 3 | kind: HelmRelease 4 | metadata: 5 | name: nfs-provisioner 6 | namespace: flux-system 7 | spec: 8 | chart: 9 | spec: 10 | chart: nfs-subdir-external-provisioner 11 | reconcileStrategy: ChartVersion 12 | sourceRef: 13 | kind: HelmRepository 14 | name: nfs-provisioner 15 | version: 4.0.16 16 | interval: 1m0s 17 | targetNamespace: storage 18 | values: 19 | affinity: {} 20 | image: 21 | pullPolicy: IfNotPresent 22 | repository: k8s.gcr.io/sig-storage/nfs-subdir-external-provisioner 23 | tag: v4.0.2 24 | imagePullSecrets: [] 25 | labels: {} 26 | leaderElection: 27 | enabled: true 28 | nfs: 29 | mountOptions: 30 | - nfsvers=4.0 31 | path: /KubeData 32 | reclaimPolicy: Retain 33 | server: 192.168.178.110 34 | volumeName: nfs-storage-root 35 | nodeSelector: 36 | node-type: worker 37 | podAnnotations: {} 38 | podSecurityContext: {} 39 | podSecurityPolicy: 40 | enabled: false 41 | rbac: 42 | create: true 43 | replicaCount: 1 44 | resources: {} 45 | securityContext: {} 46 | serviceAccount: 47 | annotations: {} 48 | create: true 49 | name: null 50 | storageClass: 51 | accessModes: ReadWriteOnce 52 | allowVolumeExpansion: true 53 | annotations: {} 54 | archiveOnDelete: true 55 | create: true 56 | defaultClass: false 57 | name: nfs-provisioner 58 | onDelete: null 59 | pathPattern: ${.PVC.namespace}/${.PVC.annotations.nfs.io/storage-path} 60 | reclaimPolicy: Delete 61 | strategyType: Recreate 62 | tolerations: [] 63 | 64 | -------------------------------------------------------------------------------- /cluster/crds/cert-manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - https://github.com/cert-manager/cert-manager/releases/download/v1.8.2/cert-manager.crds.yaml 7 | -------------------------------------------------------------------------------- /cluster/crds/kube-prometheus-stack/crds.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: GitRepository 4 | metadata: 5 | name: kube-prometheus-stack-source 6 | namespace: flux-system 7 | spec: 8 | interval: 30m 9 | url: https://github.com/prometheus-community/helm-charts.git 10 | ref: 11 | # renovate: registryUrl=https://prometheus-community.github.io/helm-charts 12 | tag: kube-prometheus-stack-16.10.0 13 | ignore: | 14 | # exclude all 15 | /* 16 | # include deploy crds dir 17 | !/charts/kube-prometheus-stack/crds 18 | --- 19 | apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 20 | kind: Kustomization 21 | metadata: 22 | name: kube-prometheus-stack-crds 23 | namespace: flux-system 24 | spec: 25 | interval: 15m 26 | prune: false 27 | sourceRef: 28 | kind: GitRepository 29 | name: kube-prometheus-stack-source 30 | healthChecks: 31 | - apiVersion: apiextensions.k8s.io/v1 32 | kind: CustomResourceDefinition 33 | name: alertmanagerconfigs.monitoring.coreos.com 34 | - apiVersion: apiextensions.k8s.io/v1 35 | kind: CustomResourceDefinition 36 | name: alertmanagers.monitoring.coreos.com 37 | - apiVersion: apiextensions.k8s.io/v1 38 | kind: CustomResourceDefinition 39 | name: podmonitors.monitoring.coreos.com 40 | - apiVersion: apiextensions.k8s.io/v1 41 | kind: CustomResourceDefinition 42 | name: probes.monitoring.coreos.com 43 | - apiVersion: apiextensions.k8s.io/v1 44 | kind: CustomResourceDefinition 45 | name: prometheuses.monitoring.coreos.com 46 | - apiVersion: apiextensions.k8s.io/v1 47 | kind: CustomResourceDefinition 48 | name: prometheusrules.monitoring.coreos.com 49 | - apiVersion: apiextensions.k8s.io/v1 50 | kind: CustomResourceDefinition 51 | name: servicemonitors.monitoring.coreos.com 52 | - apiVersion: apiextensions.k8s.io/v1 53 | kind: CustomResourceDefinition 54 | name: thanosrulers.monitoring.coreos.com 55 | -------------------------------------------------------------------------------- /cluster/crds/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - cert-manager 7 | - traefik 8 | # TODO: - kube-prometheus-stack 9 | -------------------------------------------------------------------------------- /cluster/crds/traefik/crds.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: GitRepository 4 | metadata: 5 | name: traefik-crd-source 6 | namespace: flux-system 7 | spec: 8 | interval: 30m 9 | url: https://github.com/traefik/traefik-helm-chart.git 10 | ref: 11 | tag: v10.24.0 12 | ignore: | 13 | # exclude all 14 | /* 15 | # path to crds 16 | !/traefik/crds/ 17 | 18 | --- 19 | apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 20 | kind: Kustomization 21 | metadata: 22 | name: crds-traefik 23 | namespace: flux-system 24 | spec: 25 | interval: 30m 26 | prune: true 27 | sourceRef: 28 | kind: GitRepository 29 | name: traefik-crd-source 30 | healthChecks: 31 | - apiVersion: apiextensions.k8s.io/v1 32 | kind: CustomResourceDefinition 33 | name: ingressroutes.traefik.containo.us 34 | - apiVersion: apiextensions.k8s.io/v1 35 | kind: CustomResourceDefinition 36 | name: ingressroutetcps.traefik.containo.us 37 | - apiVersion: apiextensions.k8s.io/v1 38 | kind: CustomResourceDefinition 39 | name: ingressrouteudps.traefik.containo.us 40 | - apiVersion: apiextensions.k8s.io/v1 41 | kind: CustomResourceDefinition 42 | name: middlewares.traefik.containo.us 43 | - apiVersion: apiextensions.k8s.io/v1 44 | kind: CustomResourceDefinition 45 | name: middlewaretcps.traefik.containo.us 46 | - apiVersion: apiextensions.k8s.io/v1 47 | kind: CustomResourceDefinition 48 | name: serverstransports.traefik.containo.us 49 | - apiVersion: apiextensions.k8s.io/v1 50 | kind: CustomResourceDefinition 51 | name: tlsoptions.traefik.containo.us 52 | - apiVersion: apiextensions.k8s.io/v1 53 | kind: CustomResourceDefinition 54 | name: tlsstores.traefik.containo.us 55 | - apiVersion: apiextensions.k8s.io/v1 56 | kind: CustomResourceDefinition 57 | name: traefikservices.traefik.containo.us 58 | -------------------------------------------------------------------------------- /cluster/crds/traefik/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | 5 | resources: 6 | - crds.yaml 7 | -------------------------------------------------------------------------------- /docs/assets/images/logo-32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/untcha/rpi-k3s-cluster/0193c1e4a980a16b163b7dfbf7cca3fd97ecf2e5/docs/assets/images/logo-32.png -------------------------------------------------------------------------------- /docs/assets/images/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/untcha/rpi-k3s-cluster/0193c1e4a980a16b163b7dfbf7cca3fd97ecf2e5/docs/assets/images/logo.png -------------------------------------------------------------------------------- /docs/gpg-sops.md: -------------------------------------------------------------------------------- 1 | ## Installation 2 | 3 | Install `gnupg` and `sops` 4 | 5 | ```bash 6 | brew install gnupg sops 7 | ``` 8 | 9 | ## Import existing keys 10 | 11 | ```bash 12 | gpg --import public.key 13 | gpg --allow-secret-key-import --import private.key 14 | ``` 15 | 16 | ## Change trust 17 | 18 | ```bash 19 | gpg --edit-key [key-id] 20 | ``` 21 | 22 | run the `trust`command and change the trust. Or just reimport the trustlevel (if available): 23 | 24 | ```bash 25 | gpg --import-ownertrust < trustlevel.txt 26 | ``` 27 | 28 | ```bash 29 | gpg --export-ownertrust > trustlevel.txt 30 | ``` 31 | 32 | ## List keys 33 | 34 | ```bash 35 | gpg --list-keys 36 | ``` 37 | 38 | ## Export keys 39 | 40 | ```bash 41 | # public key 42 | gpg --output public-key.asc --armor --export [key-id] 43 | 44 | # private key 45 | gpg --output private-key.asc --armor --export-secret-key [key-id] 46 | 47 | # create backup key 48 | gpg --output backup.asc --armor --export-secret-keys --export-options export-backup [key-id] 49 | 50 | # This will export all necessary information to restore the secrets keys including the trust database information. 51 | # https://www.jabberwocky.com/software/paperkey/ 52 | ``` 53 | 54 | ## Encrypt and decrypt with `gpg` 55 | 56 | Encrypt: 57 | 58 | ```bash 59 | gpg -e -o file.txt.encrypted -r [key-id] file.txt 60 | ``` 61 | 62 | Decrypt: 63 | 64 | ```bash 65 | gpg -d -o file.txt.decrypted file.txt.encrypted 66 | ``` 67 | 68 | ## Encrypt secrets in config files with `SOPS` 69 | 70 | Find the public fingerprint for the key: 71 | 72 | ```bash 73 | gpg --list-keys "[key-id]" | grep pub -A 1 | grep -v pub 74 | ``` 75 | 76 | Use sops to encrypt the sensitive fields in a yaml file (e.g. my kubeconfig): 77 | 78 | ```bash 79 | # --in-place: encrypts in the same file no new file with the encrypted fields will be created 80 | # --encrypted-regex: encrypt only the fields mentioned in the regex 81 | # $KEY_FP is the public fingerprint for the key 82 | 83 | sops --encrypt --in-place --encrypted-regex 'certificate-authority-data|client-certificate-data|client-key-data' --pgp $KEY_FP config.yaml 84 | ``` 85 | 86 | ```bash 87 | # same creating a new file 88 | 89 | sops --encrypted-regex 'certificate-authority-data|client-certificate-data|client-key-data' --pgp $KEY_FP --encrypt config.yaml > config_encrypted.yaml 90 | ``` 91 | 92 | ```bash 93 | # encrypts ALL fields in the given yaml 94 | 95 | sops --pgp $KEY_FP --encrypt config.yaml > config_encrypted.yaml 96 | ``` 97 | 98 | ## Decrypt secrets in config files with `SOPS` 99 | 100 | For decryption `gpg-agent` needs to be unlocked with the private key password before. First run: 101 | 102 | ```bash 103 | gpgconf --reload gpg-agent 104 | ``` 105 | 106 | And then append the following to your `.zshrc` (or `.bash_profile`): 107 | 108 | ```bash 109 | GPG_TTY=$(tty) 110 | export GPG_TTY 111 | ``` 112 | 113 | Now decrypt your config file: 114 | 115 | ```bash 116 | sops --decrypt config_encrypted.yaml > config_decrypted.yaml 117 | ``` 118 | -------------------------------------------------------------------------------- /docs/helm-charts.md: -------------------------------------------------------------------------------- 1 | ## Helm Repositories 2 | 3 | ```bash 4 | helm repo add authelia https://charts.authelia.com 5 | helm repo add grafana https://grafana.github.io/helm-charts 6 | helm repo add groundhog2k https://groundhog2k.github.io/helm-charts/ 7 | helm repo add influxdata https://helm.influxdata.com 8 | helm repo add jetstack https://charts.jetstack.io 9 | helm repo add k8s-at-home https://k8s-at-home.com/charts/ 10 | helm repo add longhorn https://charts.longhorn.io 11 | helm repo add metallb https://metallb.github.io/metallb 12 | helm repo add portainer https://portainer.github.io/k8s/ 13 | helm repo add prometheus-community https://prometheus-community.github.io/helm-charts 14 | helm repo add stakater https://stakater.github.io/stakater-charts 15 | helm repo add traefik https://helm.traefik.io/traefik 16 | ``` 17 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | # Home 2 | 3 | 4 | 5 | 6 | 7 | ### My Kubernetes Cluster (k3s) managed by GitOps (Flux2) 8 | 9 |
10 |
11 |
12 | 13 | [![k3s](https://img.shields.io/badge/k3s-v1.23.8-yellow?style=for-the-badge&logo=kubernetes)](https://k3s.io/) 14 | [![flux2](https://img.shields.io/badge/flux2-v0.31.3-blue?style=for-the-badge)](https://fluxcd.io/) 15 | [![raspberrypi](https://img.shields.io/badge/Raspberry_Pi-8x_Model_4B_(4GB)-A22846?logo=raspberrypi&logoColor=A22846&style=for-the-badge)](https://www.raspberrypi.org/) 16 | [![ubuntu-server](https://img.shields.io/badge/ubuntu_server_22.04_LTS-E95420?logo=ubuntu&logoColor=E95420&style=for-the-badge)](https://ubuntu.com/download/raspberry-pi) 17 | [![pre-commit](https://img.shields.io/badge/pre--commit-enabled-brightgreen?logo=pre-commit&style=for-the-badge)](https://github.com/pre-commit/pre-commit) 18 | -------------------------------------------------------------------------------- /docs/k3s-install.md: -------------------------------------------------------------------------------- 1 | Intro blabla :-) 2 | 3 | -------------------------------------------------------------------------------- /docs/kubectl.md: -------------------------------------------------------------------------------- 1 | ## Collection of kubectl commands 2 | 3 | ```bash 4 | kubectl scale -n deployment --replicas=1 5 | ``` 6 | 7 | ```bash 8 | kubectl -n rollout restart deployment 9 | ``` 10 | -------------------------------------------------------------------------------- /docs/neofetch.md: -------------------------------------------------------------------------------- 1 | ```bash 2 | sudo wget -O /usr/local/bin/neofetch https://raw.githubusercontent.com/dylanaraps/neofetch/master/neofetch 3 | ``` 4 | 5 | ```bash 6 | sudo chmod a+x /usr/local/bin/neofetch 7 | ``` 8 | 9 | ```bash 10 | neofetch --version 11 | ``` 12 | 13 | ```bash 14 | neofetch 15 | ``` 16 | 17 | ```bash 18 | sudo rm -rf /usr/local/bin/neofetch 19 | ``` 20 | -------------------------------------------------------------------------------- /docs/storage-settings.md: -------------------------------------------------------------------------------- 1 | Intro blabla :-) 2 | 3 | ## Identifying disks for storage 4 | 5 | ``` bash 6 | ansible all -b -m shell -a "lsblk -f" 7 | ``` 8 | 9 | ``` bash 10 | ansible all -b -m shell -a "wipefs -a /dev/{{ var_disk }}" 11 | ``` 12 | 13 | ``` bash 14 | ansible all -b -m shell -a "mkfs.ext4 /dev/{{ var_disk }}" 15 | ``` 16 | 17 | ``` bash 18 | ansible all -b -m shell -a "mkdir /storage" 19 | ``` 20 | 21 | ``` bash 22 | ansible all -b -m shell -a "mount /dev/{{ var_disk }} /storage" 23 | ``` 24 | 25 | ``` bash 26 | ansible all -b -m shell -a "blkid -s UUID -o value /dev/{{ var_disk }}" 27 | ``` 28 | 29 | ``` bash 30 | ansible all -b -m shell -a "echo 'UUID={{ var_uuid }} /storage ext4 defaults 0 2' | tee -a /etc/fstab" 31 | ``` 32 | 33 | ``` bash 34 | ansible all -b -m shell -a "grep UUID /etc/fstab" 35 | ``` 36 | 37 | ``` bash 38 | # Make sure mount have no issues 39 | ansible all -b -m shell -a "mount -a" 40 | ``` 41 | 42 | ``` bash 43 | ansible all -b -m apt -a "name=open-iscsi state=present" 44 | ``` 45 | 46 | ``` bash 47 | ansible all -b -m shell -a "systemctl start open-iscsi" 48 | ``` 49 | 50 | ``` bash 51 | ansible all -b -m shell -a "apt install -y nfs-common" 52 | ansible all -b -m shell -a "apt autoremove -y" 53 | ``` 54 | 55 | ``` bash 56 | ansible all -b -m reboot 57 | ansible masters -b -m reboot 58 | ansible workers -b -m reboot 59 | ``` 60 | -------------------------------------------------------------------------------- /mkdocs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | site_name: untcha | Raspberry Pi K3S Cluster 3 | site_description: >- 4 | My home Kubernetes (k3s) cluster managed by GitOps (Flux2) 5 | site_author: Alexander Untch 6 | site_url: https://example.com 7 | copyright: Copyright © 2021 Alexander Untch 8 | 9 | # Repository 10 | 11 | # Page tree 12 | nav: 13 | - Home: index.md 14 | - Nodes Settings: nodes-settings.md 15 | - OS Settings: os-settings.md 16 | - Storage Settings: storage-settings.md 17 | - Install K3S: k3s-install.md 18 | - Helm Charts: helm-charts.md 19 | - Flux: flux.md 20 | - Netboot (PXE + iSCSI): netboot.md 21 | - Encryption with GPG and Mozilla SOPS: gpg-sops.md 22 | - System Maintenance: system-maintenance.md 23 | - kubectl: kubectl.md 24 | - Misc Guides: 25 | - Raspberry Setup: rpi-setup.md 26 | - SSH Guide: ssh.md 27 | - Neofetch: neofetch.md 28 | 29 | # Theme settings 30 | theme: 31 | name: material 32 | font: 33 | text: Roboto 34 | code: Roboto Mono 35 | logo: assets/images/logo.png 36 | favicon: assets/images/logo.png 37 | features: 38 | - navigation.top 39 | palette: 40 | - scheme: default 41 | primary: indigo 42 | accent: blue 43 | toggle: 44 | icon: material/toggle-switch-off-outline 45 | name: Switch to dark mode 46 | - scheme: slate 47 | primary: blue 48 | accent: indigo 49 | toggle: 50 | icon: material/toggle-switch 51 | name: Switch to light mode 52 | 53 | # Extensions 54 | markdown_extensions: 55 | - admonition 56 | - abbr 57 | - attr_list 58 | - def_list 59 | - footnotes 60 | - meta 61 | - md_in_html 62 | - toc: 63 | permalink: true 64 | - pymdownx.arithmatex: 65 | generic: true 66 | - pymdownx.betterem: 67 | smart_enable: all 68 | - pymdownx.highlight 69 | - pymdownx.superfences 70 | - pymdownx.emoji: 71 | emoji_index: !!python/name:materialx.emoji.twemoji 72 | emoji_generator: !!python/name:materialx.emoji.to_svg 73 | - pymdownx.highlight 74 | - pymdownx.inlinehilite 75 | - pymdownx.keys 76 | - pymdownx.mark 77 | - pymdownx.smartsymbols 78 | - pymdownx.superfences: 79 | custom_fences: 80 | - name: mermaid 81 | class: mermaid-experimental 82 | format: !!python/name:pymdownx.superfences.fence_code_format 83 | - pymdownx.tabbed 84 | - pymdownx.tasklist: 85 | custom_checkbox: true 86 | - pymdownx.tilde 87 | -------------------------------------------------------------------------------- /setup/01-bootstrap-cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # https://github.com/alexellis/k3sup 4 | 5 | echo "Installing Control Node 1" 6 | k3sup install \ 7 | --ip "$K3S_CONTROL_01_IP" \ 8 | --user "$K3S_CLUSTER_USER" \ 9 | --cluster \ 10 | --merge --local-path "$HOME/.kube/config-files/$K3S_CLUSTER_KUBECONFIG_NAME" \ 11 | --k3s-extra-args "--disable-cloud-controller --disable traefik --disable servicelb --disable local-storage" \ 12 | --context "$K3S_CLUSTER_CONTEXT" \ 13 | --k3s-version "$K3S_CLUSTER_VERSION" \ 14 | --ssh-key "$K3S_CLUSTER_SSH_KEY_PATH" 15 | 16 | echo "Installing Control Node 2" 17 | k3sup join \ 18 | --ip "$K3S_CONTROL_02_IP" \ 19 | --user "$K3S_CLUSTER_USER" \ 20 | --server-ip "$K3S_CONTROL_01_IP" \ 21 | --server-user "$K3S_CLUSTER_USER" \ 22 | --server \ 23 | --k3s-extra-args "--disable-cloud-controller --disable traefik --disable servicelb --disable local-storage" \ 24 | --k3s-version "$K3S_CLUSTER_VERSION" \ 25 | --ssh-key "$K3S_CLUSTER_SSH_KEY_PATH" 26 | 27 | echo "Installing Control Node 3" 28 | k3sup join \ 29 | --ip "$K3S_CONTROL_03_IP" \ 30 | --user "$K3S_CLUSTER_USER" \ 31 | --server-ip "$K3S_CONTROL_01_IP" \ 32 | --server-user "$K3S_CLUSTER_USER" \ 33 | --server \ 34 | --k3s-extra-args "--disable-cloud-controller --disable traefik --disable servicelb --disable local-storage" \ 35 | --k3s-version "$K3S_CLUSTER_VERSION" \ 36 | --ssh-key "$K3S_CLUSTER_SSH_KEY_PATH" 37 | 38 | echo "Installing Node NUC 1" 39 | k3sup join \ 40 | --ip "$K3S_NODE_NUC_01" \ 41 | --user "$K3S_CLUSTER_USER" \ 42 | --server-ip "$K3S_CONTROL_01_IP" \ 43 | --k3s-version "$K3S_CLUSTER_VERSION" \ 44 | --ssh-key "$K3S_CLUSTER_SSH_KEY_PATH" 45 | 46 | echo "Installing Node NUC 2" 47 | k3sup join \ 48 | --ip "$K3S_NODE_NUC_02" \ 49 | --user "$K3S_CLUSTER_USER" \ 50 | --server-ip "$K3S_CONTROL_01_IP" \ 51 | --k3s-version "$K3S_CLUSTER_VERSION" \ 52 | --ssh-key "$K3S_CLUSTER_SSH_KEY_PATH" 53 | 54 | echo "Installing Node RPi 1" 55 | k3sup join \ 56 | --ip "$K3S_NODE_RPI_01" \ 57 | --user "$K3S_CLUSTER_USER" \ 58 | --server-ip "$K3S_CONTROL_01_IP" \ 59 | --k3s-version "$K3S_CLUSTER_VERSION" \ 60 | --ssh-key "$K3S_CLUSTER_SSH_KEY_PATH" 61 | 62 | echo "Installing Node RPi 2" 63 | k3sup join \ 64 | --ip "$K3S_NODE_RPI_02" \ 65 | --user "$K3S_CLUSTER_USER" \ 66 | --server-ip "$K3S_CONTROL_01_IP" \ 67 | --k3s-version "$K3S_CLUSTER_VERSION" \ 68 | --ssh-key "$K3S_CLUSTER_SSH_KEY_PATH" 69 | -------------------------------------------------------------------------------- /setup/03-bootstrap-flux.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | echo "Create flux-system namespace" 4 | kubectl create namespace flux-system 5 | 6 | echo "Create flux secret" 7 | gpg --export-secret-keys --armor "${K3S_CLUSTER_GPG_KEY_NAME}" | 8 | kubectl create secret generic sops-gpg \ 9 | --namespace=flux-system \ 10 | --from-file=sops.asc=/dev/stdin 11 | 12 | echo "Bootstrap GitHub and deploy workload" 13 | flux bootstrap github \ 14 | --owner="$GITHUB_USER" \ 15 | --repository="$GITHUB_REPOSITORY" \ 16 | --private=true \ 17 | --personal=true \ 18 | --branch=main \ 19 | --path=./cluster/base \ 20 | --version="$FLUX_VERSION" \ 21 | --network-policy=false 22 | --------------------------------------------------------------------------------