├── docs ├── CNAME ├── cluster_setup │ ├── adding_sops_support_to_flux.md │ ├── setting_up_flux.md │ └── installing_k3s.md ├── index.md ├── miscellaneous │ ├── cloudflare_dns_challenge.md │ ├── metallb_not_working_raspberry_wifi.md │ ├── enabling_WakeOnLan_on_NIC.md │ ├── cloudflare_ddns_not_working_with_openwrt.md │ ├── tips_and_tricks.md │ └── cloudflare_port_forwarding_openwrt.md ├── _static │ └── custom.css └── apps │ └── oauth2-proxy.md ├── ansible ├── poetry.toml ├── .ansible-lint ├── requirements.yaml ├── inventory │ └── hosts.yaml ├── pyproject.toml └── playbooks │ ├── setup-laptop.yaml │ ├── files │ └── sshd_config │ └── setup-worker.yaml ├── cluster ├── apps │ ├── vpn │ │ ├── namespace.yaml │ │ ├── v2ray │ │ │ ├── pvc.yaml │ │ │ ├── svc.yaml │ │ │ ├── ingress.yaml │ │ │ ├── deployment.yaml │ │ │ └── configmap.yaml │ │ ├── wireguard │ │ │ ├── pvc.yaml │ │ │ ├── svc.yaml │ │ │ └── deployment.yaml │ │ └── wstunnel │ │ │ ├── svc.yaml │ │ │ ├── ingress.yaml │ │ │ ├── deployment.yaml │ │ │ └── secret.enc.yaml │ ├── monitoring │ │ ├── namespace.yml │ │ ├── kube-prometheus-stack │ │ │ ├── scrapeconfigs │ │ │ │ └── crowdsec.yaml │ │ │ └── prometheus-rules │ │ │ │ ├── dockerhub-rate-limit-rule.yaml │ │ │ │ ├── oom-rule.yaml │ │ │ │ └── flux.yaml │ │ ├── ntfy-alertmanager │ │ │ ├── svc.yaml │ │ │ └── deployment.yaml │ │ ├── ntfy │ │ │ ├── stunnel │ │ │ │ ├── certificate.yaml │ │ │ │ └── stunnel-configmap.yaml │ │ │ ├── pvc.yaml │ │ │ └── configmap.yaml │ │ ├── promtail │ │ │ └── promtail-helm-release.yaml │ │ └── grafana │ │ │ └── secrets.enc.yaml │ ├── networking │ │ ├── namespace.yaml │ │ ├── nginx │ │ │ ├── udp-services.yaml │ │ │ ├── tcp-services.yaml │ │ │ ├── configmap.yaml │ │ │ └── svc.yaml │ │ ├── wildcard-certificate │ │ │ └── certificate.yaml │ │ ├── crowdsec │ │ │ ├── acquis-configmap.yaml │ │ │ ├── secrets.enc.yaml │ │ │ └── parsers-configmap.yaml │ │ ├── metallb │ │ │ ├── metallb-helm-release.yaml │ │ │ └── ip-address-pool.yaml │ │ ├── external-dns │ │ │ ├── external-dns-helm-release.yaml │ │ │ └── secret.enc.yaml │ │ └── oauth2-proxy │ │ │ ├── secret.enc.yaml │ │ │ └── oauth2-proxy-helm-release.yaml │ ├── cert-manager │ │ ├── namespace.yaml │ │ └── cert-manager │ │ │ ├── cert-manager-helm-release.yaml │ │ │ ├── cert-manager-letsencrypt.yaml │ │ │ └── secret.enc.yaml │ ├── kube-system │ │ ├── namespace.yaml │ │ ├── node-feature-discovery │ │ │ ├── intel-gpu-device.yaml │ │ │ └── node-feature-discovery-helm-release.yaml │ │ ├── intel-device-plugins │ │ │ ├── gpu-plugin-helm-release.yaml │ │ │ └── operator-helm-release.yaml │ │ └── kured │ │ │ ├── kured-helmrelease.yaml │ │ │ └── secrets.enc.yaml │ ├── mb-scheduler │ │ ├── namespace.yaml │ │ ├── mb-scheduler-backend │ │ │ ├── svc.yaml │ │ │ ├── ingress.yaml │ │ │ ├── configmap.yaml │ │ │ └── deployment.yaml │ │ ├── postgresql │ │ │ ├── pvc.yaml │ │ │ ├── postgresql-helm-release.yaml │ │ │ └── secrets.enc.yaml │ │ ├── mb-scheduler-frontend │ │ │ ├── svc.yaml │ │ │ ├── ingress.yaml │ │ │ └── deployment.yaml │ │ ├── redis │ │ │ └── redis-helm-release.yaml │ │ ├── secret.enc.yaml │ │ └── mb-scheduler-worker │ │ │ └── deployment.yaml │ ├── wazuh-system │ │ ├── namespace.yaml │ │ ├── wazuh-dashboard │ │ │ ├── svc.yaml │ │ │ ├── configmap.yaml │ │ │ ├── ingress.yaml │ │ │ ├── certificate.yaml │ │ │ ├── indexer-secret.enc.yaml │ │ │ └── secret.enc.yaml │ │ ├── indexer │ │ │ ├── indexer-api-svc.yaml │ │ │ └── discovery-svc.yaml │ │ ├── manager │ │ │ ├── wazuh-cluster-svc.yaml │ │ │ ├── worker │ │ │ │ └── svc.yaml │ │ │ ├── master │ │ │ │ └── svc.yaml │ │ │ ├── api-secrets.enc.yaml │ │ │ └── secrets.enc.yaml │ │ └── certificates │ │ │ ├── admin-certificate.yaml │ │ │ ├── filebeat-certificate.yaml │ │ │ ├── node-certificate.yaml │ │ │ └── wazuh-ca-certificate.yaml │ ├── default │ │ ├── profilarr │ │ │ ├── svc.yaml │ │ │ ├── pvc.yaml │ │ │ ├── ingress.yaml │ │ │ └── deployment.yaml │ │ ├── hyperion │ │ │ ├── pvc.yaml │ │ │ └── hyperion-helm-release.yaml │ │ ├── mealie │ │ │ ├── pvc.yaml │ │ │ └── mealie-helm-release.yaml │ │ ├── atuin │ │ │ ├── pvc.yaml │ │ │ ├── svc.yaml │ │ │ ├── postgresql │ │ │ │ ├── pvc.yaml │ │ │ │ ├── svc.yaml │ │ │ │ ├── deployment.yaml │ │ │ │ └── secrets.enc.yaml │ │ │ ├── ingress.yaml │ │ │ ├── deployment.yaml │ │ │ └── secrets.enc.yaml │ │ ├── n8n │ │ │ ├── pvc.yaml │ │ │ ├── postgresql │ │ │ │ ├── n8n-postgresql-helm-release.yaml │ │ │ │ └── secrets.enc.yaml │ │ │ ├── secrets.enc.yaml │ │ │ └── n8n-helm-release.yaml │ │ ├── radarr │ │ │ └── pvc.yaml │ │ ├── sonarr │ │ │ └── pvc.yaml │ │ ├── syncthing │ │ │ ├── pvc.yaml │ │ │ └── syncthing-helm-release.yaml │ │ ├── changedetection │ │ │ ├── pvc.yaml │ │ │ ├── service.yaml │ │ │ ├── browserless-chrome │ │ │ │ ├── service.yml │ │ │ │ └── deployment.yaml │ │ │ ├── ingress.yaml │ │ │ └── deployment.yaml │ │ ├── prowlarr │ │ │ ├── pvc.yaml │ │ │ └── prowlarr-helm-release.yaml │ │ ├── qbittorent │ │ │ └── pvc.yaml │ │ ├── flood │ │ │ ├── pvc.yaml │ │ │ └── flood-helm-release.yaml │ │ ├── readarr │ │ │ └── pvc.yaml │ │ ├── v-rising │ │ │ ├── pvc.yaml │ │ │ ├── svc.yaml │ │ │ ├── configmap.yaml │ │ │ ├── secrets.enc.yaml │ │ │ └── deployment.yaml │ │ ├── jellyfin │ │ │ └── pvc.yaml │ │ ├── actual-budget │ │ │ ├── svc.yaml │ │ │ ├── pvc.yaml │ │ │ ├── sms-proxy │ │ │ │ ├── svc.yaml │ │ │ │ ├── ingress.yaml │ │ │ │ ├── deployment.yaml │ │ │ │ └── secrets.enc.yaml │ │ │ ├── deployment.yaml │ │ │ └── ingress.yaml │ │ ├── nfs-pv │ │ │ ├── nfs-media.yaml │ │ │ ├── nfs-big-media.yaml │ │ │ └── nfs-media-downloads.yaml │ │ ├── paperless │ │ │ ├── pvc.yaml │ │ │ ├── redis-helm-release.yaml │ │ │ └── secrets.enc.yaml │ │ ├── plex │ │ │ ├── pvc.yaml │ │ │ └── secrets.enc.yaml │ │ ├── yourls │ │ │ ├── secrets.enc.yaml │ │ │ ├── yourls-helm-release.yaml │ │ │ └── mariadb-secrets.enc.yaml │ │ └── flaresolverr │ │ │ └── flaresolverr-helm-release.yaml │ └── longhorn-system │ │ ├── longhorn │ │ ├── certificate.yaml │ │ └── longhorn-helm-release.yaml │ │ └── namespace.yml ├── base │ ├── flux-system-extras │ │ ├── helm-chart-repositories │ │ │ ├── intel-charts.yaml │ │ │ ├── jellyfin-charts.yaml │ │ │ ├── bjw-s-charts.yaml │ │ │ ├── jetstack-charts.yaml │ │ │ ├── longhorn-charts.yaml │ │ │ ├── k8s-at-home-charts.yaml │ │ │ ├── kubereboot-charts.yaml │ │ │ ├── yourls-charts.yaml │ │ │ ├── metallb-charts.yaml │ │ │ ├── grafana-charts.yaml │ │ │ ├── ingress-nginx-charts.yaml │ │ │ ├── external-dns-charts.yaml │ │ │ ├── kubernetes-sigs-descheduler-charts.yaml │ │ │ ├── prometheus-community-charts.yaml │ │ │ ├── kubernetes-sigs-nfd-charts.yaml │ │ │ └── oauth2-proxy-charts.yaml │ │ ├── oci-repositories │ │ │ ├── bitnami-charts.yaml │ │ │ └── open-8gears-charts.yaml │ │ └── monitoring │ │ │ └── flux-podmonitors.yaml │ ├── crds.yaml │ ├── flux-system │ │ ├── gotk-patches.yaml │ │ ├── gotk-sync.yaml │ │ └── kustomization.yaml │ ├── apps.yaml │ └── cluster-secrets.yaml └── crds │ ├── cert-manager │ └── kustomization.yaml │ └── external-dns │ └── kustomization.yaml ├── .gitignore ├── .github ├── CODEOWNERS └── workflows │ ├── mkdocs.yaml │ └── flux-schedule.yaml ├── .sops.yaml ├── .pre-commit-config.yaml └── pyproject.toml /docs/CNAME: -------------------------------------------------------------------------------- 1 | https://homelab.shadyf.com -------------------------------------------------------------------------------- /docs/cluster_setup/adding_sops_support_to_flux.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /ansible/poetry.toml: -------------------------------------------------------------------------------- 1 | [virtualenvs] 2 | in-project = true 3 | -------------------------------------------------------------------------------- /cluster/apps/vpn/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: vpn -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Jetbrains stuff 2 | .idea 3 | 4 | # Python venv 5 | venv/ 6 | .aider* 7 | /.pki/ 8 | /.cache/ 9 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/namespace.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: monitoring -------------------------------------------------------------------------------- /cluster/apps/networking/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: networking -------------------------------------------------------------------------------- /cluster/apps/cert-manager/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: cert-manager -------------------------------------------------------------------------------- /cluster/apps/kube-system/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: kube-system -------------------------------------------------------------------------------- /cluster/apps/mb-scheduler/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: mb-scheduler -------------------------------------------------------------------------------- /cluster/apps/wazuh-system/namespace.yaml: -------------------------------------------------------------------------------- 1 | #--- 2 | #apiVersion: v1 3 | #kind: Namespace 4 | #metadata: 5 | # name: wazuh-system -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners 2 | * @ShadyF -------------------------------------------------------------------------------- /.sops.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | creation_rules: 3 | - encrypted_regex: '^(data|stringData)$' 4 | pgp: >- 5 | 2D47B9C25AAD87860CC1778F7022B8F414F4AEED -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | fail_fast: false 2 | repos: 3 | - repo: https://github.com/k8s-at-home/sops-pre-commit 4 | rev: v2.0.3 5 | hooks: 6 | - id: forbid-secrets 7 | -------------------------------------------------------------------------------- /ansible/.ansible-lint: -------------------------------------------------------------------------------- 1 | skip_list: 2 | - yaml[line-length] 3 | - var-naming 4 | warn_list: 5 | - command-instead-of-shell 6 | - deprecated-command-syntax 7 | - experimental 8 | - no-changed-when -------------------------------------------------------------------------------- /cluster/apps/networking/nginx/udp-services.yaml: -------------------------------------------------------------------------------- 1 | #apiVersion: v1 2 | #kind: ConfigMap 3 | #metadata: 4 | # name: udp-services 5 | # namespace: networking 6 | #data: 7 | # "51820": "vpn/wg-svc:51820" 8 | -------------------------------------------------------------------------------- /cluster/apps/networking/nginx/tcp-services.yaml: -------------------------------------------------------------------------------- 1 | #apiVersion: v1 2 | #kind: ConfigMap 3 | #metadata: 4 | # name: tcp-services 5 | # namespace: networking 6 | #data: 7 | # "8388": "vpn/shadowsocks-rust-svc:8388" -------------------------------------------------------------------------------- /ansible/requirements.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | collections: 3 | - name: vandot.k3sup 4 | version: 0.1.9 5 | #roles: 6 | # - name: xanmanning.k3s 7 | # src: https://github.com/PyratLabs/ansible-role-k3s 8 | # version: v3.4.2 -------------------------------------------------------------------------------- /cluster/apps/vpn/v2ray/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: v2ray-conf-pvc 5 | namespace: vpn 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: longhorn 10 | resources: 11 | requests: 12 | storage: 50Mi -------------------------------------------------------------------------------- /cluster/apps/vpn/wireguard/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: wg-conf-pvc 5 | namespace: vpn 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: longhorn 10 | resources: 11 | requests: 12 | storage: 50Mi -------------------------------------------------------------------------------- /cluster/base/flux-system-extras/helm-chart-repositories/intel-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: intel-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 2h 9 | url: https://intel.github.io/helm-charts -------------------------------------------------------------------------------- /cluster/base/flux-system-extras/helm-chart-repositories/jellyfin-charts.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1beta2 2 | kind: HelmRepository 3 | metadata: 4 | name: jellyfin-charts 5 | namespace: flux-system 6 | spec: 7 | interval: 1h 8 | url: https://jellyfin.github.io/jellyfin-helm -------------------------------------------------------------------------------- /cluster/apps/default/profilarr/svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: profilarr-svc 5 | namespace: default 6 | spec: 7 | type: ClusterIP 8 | selector: 9 | app: profilarr 10 | ports: 11 | - protocol: TCP 12 | port: 6868 13 | name: http -------------------------------------------------------------------------------- /cluster/base/flux-system-extras/helm-chart-repositories/bjw-s-charts.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1beta2 2 | kind: HelmRepository 3 | metadata: 4 | name: bjw-s 5 | namespace: flux-system 6 | spec: 7 | interval: 30m 8 | url: https://bjw-s-labs.github.io/helm-charts 9 | timeout: 3m -------------------------------------------------------------------------------- /cluster/base/flux-system-extras/helm-chart-repositories/jetstack-charts.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1beta2 2 | kind: HelmRepository 3 | metadata: 4 | name: jetstack-charts 5 | namespace: flux-system 6 | spec: 7 | interval: 30m 8 | url: https://charts.jetstack.io/ 9 | timeout: 3m -------------------------------------------------------------------------------- /cluster/base/flux-system-extras/helm-chart-repositories/longhorn-charts.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1beta2 2 | kind: HelmRepository 3 | metadata: 4 | name: longhorn-charts 5 | namespace: flux-system 6 | spec: 7 | interval: 30m 8 | url: https://charts.longhorn.io 9 | timeout: 3m -------------------------------------------------------------------------------- /cluster/apps/default/hyperion/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: hyperion-pvc 5 | namespace: default 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: longhorn 10 | resources: 11 | requests: 12 | storage: 200Mi -------------------------------------------------------------------------------- /cluster/apps/default/mealie/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: mealie-pvc 5 | namespace: default 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: longhorn 10 | resources: 11 | requests: 12 | storage: 500Mi 13 | -------------------------------------------------------------------------------- /cluster/apps/default/profilarr/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: profilarr-pvc 5 | namespace: default 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: longhorn 10 | resources: 11 | requests: 12 | storage: 400Mi -------------------------------------------------------------------------------- /cluster/apps/vpn/v2ray/svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: v2ray-svc 5 | namespace: vpn 6 | spec: 7 | type: ClusterIP 8 | selector: 9 | app: v2ray 10 | ports: 11 | - name: tcp 12 | protocol: TCP 13 | port: 10086 14 | targetPort: 10086 -------------------------------------------------------------------------------- /cluster/apps/default/atuin/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: atuin-config 5 | namespace: default 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | resources: 10 | requests: 11 | storage: 100Mi 12 | storageClassName: longhorn 13 | -------------------------------------------------------------------------------- /cluster/apps/default/n8n/pvc.yaml: -------------------------------------------------------------------------------- 1 | #apiVersion: v1 2 | #kind: PersistentVolumeClaim 3 | #metadata: 4 | # name: n8n-pvc 5 | # namespace: default 6 | #spec: 7 | # accessModes: 8 | # - ReadWriteOnce 9 | # storageClassName: longhorn 10 | # resources: 11 | # requests: 12 | # storage: 2Gi 13 | -------------------------------------------------------------------------------- /cluster/apps/default/radarr/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: radarr-config-pvc 5 | namespace: default 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: longhorn 10 | resources: 11 | requests: 12 | storage: 3Gi 13 | -------------------------------------------------------------------------------- /cluster/apps/default/sonarr/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: sonarr-config-pvc 5 | namespace: default 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: longhorn 10 | resources: 11 | requests: 12 | storage: 2Gi 13 | -------------------------------------------------------------------------------- /cluster/apps/default/syncthing/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: syncthing-pvc 5 | namespace: default 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: longhorn 10 | resources: 11 | requests: 12 | storage: 6Gi 13 | -------------------------------------------------------------------------------- /cluster/base/flux-system-extras/helm-chart-repositories/k8s-at-home-charts.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1beta2 2 | kind: HelmRepository 3 | metadata: 4 | name: k8s-at-home-charts 5 | namespace: flux-system 6 | spec: 7 | interval: 30m 8 | url: https://k8s-at-home.com/charts/ 9 | timeout: 3m -------------------------------------------------------------------------------- /cluster/apps/default/atuin/svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: atuin 5 | namespace: default 6 | spec: 7 | type: ClusterIP 8 | ports: 9 | - name: http 10 | port: 8888 11 | targetPort: 8888 12 | protocol: TCP 13 | selector: 14 | app: atuin 15 | -------------------------------------------------------------------------------- /cluster/apps/default/changedetection/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: changedetection-pvc 5 | namespace: default 6 | spec: 7 | accessModes: 8 | - ReadWriteMany 9 | storageClassName: longhorn 10 | resources: 11 | requests: 12 | storage: 1Gi -------------------------------------------------------------------------------- /cluster/apps/default/prowlarr/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: prowlarr-config-pvc 5 | namespace: default 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: longhorn 10 | resources: 11 | requests: 12 | storage: 5Gi 13 | -------------------------------------------------------------------------------- /cluster/apps/default/qbittorent/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: qb-config-pvc2 5 | namespace: default 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: longhorn 10 | resources: 11 | requests: 12 | storage: 200Mi 13 | -------------------------------------------------------------------------------- /cluster/base/flux-system-extras/helm-chart-repositories/kubereboot-charts.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1beta2 2 | kind: HelmRepository 3 | metadata: 4 | name: kubereboot-charts 5 | namespace: flux-system 6 | spec: 7 | interval: 10m 8 | url: https://kubereboot.github.io/charts 9 | timeout: 3m -------------------------------------------------------------------------------- /cluster/base/flux-system-extras/helm-chart-repositories/yourls-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: yourls-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 30m 9 | url: https://charts.yourls.org/ 10 | timeout: 3m -------------------------------------------------------------------------------- /cluster/base/crds.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 3 | kind: Kustomization 4 | metadata: 5 | name: crds 6 | namespace: flux-system 7 | spec: 8 | interval: 10m0s 9 | path: ./cluster/crds 10 | prune: true 11 | sourceRef: 12 | kind: GitRepository 13 | name: flux-system -------------------------------------------------------------------------------- /cluster/base/flux-system-extras/helm-chart-repositories/metallb-charts.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1beta2 2 | kind: HelmRepository 3 | metadata: 4 | name: metallb-charts 5 | namespace: flux-system 6 | spec: 7 | interval: 30m 8 | url: https://metallb.github.io/metallb 9 | timeout: 3m 10 | -------------------------------------------------------------------------------- /cluster/apps/default/flood/pvc.yaml: -------------------------------------------------------------------------------- 1 | #apiVersion: v1 2 | #kind: PersistentVolumeClaim 3 | #metadata: 4 | # name: flood-config-pvc-2 5 | # namespace: default 6 | #spec: 7 | # accessModes: 8 | # - ReadWriteOnce 9 | # storageClassName: longhorn 10 | # resources: 11 | # requests: 12 | # storage: 200Mi 13 | -------------------------------------------------------------------------------- /cluster/apps/default/readarr/pvc.yaml: -------------------------------------------------------------------------------- 1 | #apiVersion: v1 2 | #kind: PersistentVolumeClaim 3 | #metadata: 4 | # name: readarr-config-pvc 5 | # namespace: default 6 | #spec: 7 | # accessModes: 8 | # - ReadWriteOnce 9 | # storageClassName: longhorn 10 | # resources: 11 | # requests: 12 | # storage: 1Gi 13 | -------------------------------------------------------------------------------- /cluster/apps/default/v-rising/pvc.yaml: -------------------------------------------------------------------------------- 1 | #apiVersion: v1 2 | #kind: PersistentVolumeClaim 3 | #metadata: 4 | # name: v-rising-saves-pvc-v3 5 | # namespace: default 6 | #spec: 7 | # accessModes: 8 | # - ReadWriteOnce 9 | # storageClassName: longhorn 10 | # resources: 11 | # requests: 12 | # storage: 5Gi -------------------------------------------------------------------------------- /cluster/apps/mb-scheduler/mb-scheduler-backend/svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: mb-scheduler-backend-svc 6 | namespace: mb-scheduler 7 | spec: 8 | type: ClusterIP 9 | selector: 10 | app: mb-scheduler-backend 11 | ports: 12 | - protocol: TCP 13 | port: 80 -------------------------------------------------------------------------------- /cluster/apps/mb-scheduler/postgresql/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: mb-scheduler-db-pvc 5 | namespace: mb-scheduler 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: longhorn 10 | resources: 11 | requests: 12 | storage: 512Mi -------------------------------------------------------------------------------- /cluster/apps/monitoring/kube-prometheus-stack/scrapeconfigs/crowdsec.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: monitoring.coreos.com/v1alpha1 3 | kind: ScrapeConfig 4 | metadata: 5 | name: crowdsec 6 | namespace: monitoring 7 | spec: 8 | staticConfigs: 9 | - targets: ["opnsense.home:6060"] 10 | metricsPath: /metrics 11 | -------------------------------------------------------------------------------- /cluster/apps/vpn/wireguard/svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: wg-svc 6 | namespace: vpn 7 | spec: 8 | type: ClusterIP 9 | selector: 10 | app: wireguard 11 | ports: 12 | - name: wireguard 13 | protocol: UDP 14 | port: 51820 15 | targetPort: 51820 -------------------------------------------------------------------------------- /cluster/apps/vpn/wstunnel/svc.yaml: -------------------------------------------------------------------------------- 1 | #apiVersion: v1 2 | #kind: Service 3 | #metadata: 4 | # name: wstunnel-svc 5 | # namespace: vpn 6 | #spec: 7 | # selector: 8 | # app: wstunnel 9 | # ports: 10 | # - protocol: TCP 11 | # port: 48513 12 | # name: http 13 | # clusterIP: None 14 | # type: ClusterIP -------------------------------------------------------------------------------- /cluster/base/flux-system-extras/helm-chart-repositories/grafana-charts.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1beta2 2 | kind: HelmRepository 3 | metadata: 4 | name: grafana-charts 5 | namespace: flux-system 6 | spec: 7 | interval: 30m 8 | url: https://grafana.github.io/helm-charts 9 | timeout: 3m 10 | -------------------------------------------------------------------------------- /cluster/crds/cert-manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: cert-manager 4 | resources: 5 | # renovate: registryUrl=https://charts.jetstack.io chart=cert-manager 6 | - https://github.com/jetstack/cert-manager/releases/download/v1.19.2/cert-manager.crds.yaml -------------------------------------------------------------------------------- /cluster/apps/default/jellyfin/pvc.yaml: -------------------------------------------------------------------------------- 1 | #apiVersion: v1 2 | #kind: PersistentVolumeClaim 3 | #metadata: 4 | # name: jellyfin-config-pvc 5 | # namespace: default 6 | #spec: 7 | # accessModes: 8 | # - ReadWriteOnce 9 | # storageClassName: longhorn 10 | # resources: 11 | # requests: 12 | # storage: 5Gi 13 | -------------------------------------------------------------------------------- /cluster/apps/mb-scheduler/mb-scheduler-frontend/svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: mb-scheduler-frontend-svc 6 | namespace: mb-scheduler 7 | spec: 8 | type: ClusterIP 9 | selector: 10 | app: mb-scheduler-frontend 11 | ports: 12 | - protocol: TCP 13 | port: 80 -------------------------------------------------------------------------------- /cluster/apps/default/actual-budget/svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: actual-budget-svc 5 | namespace: default 6 | spec: 7 | selector: 8 | app: actual-budget 9 | ports: 10 | - protocol: TCP 11 | port: 5006 12 | name: http 13 | clusterIP: None 14 | type: ClusterIP -------------------------------------------------------------------------------- /cluster/apps/default/atuin/postgresql/pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: atuin-postgresql-data 6 | namespace: default 7 | spec: 8 | accessModes: 9 | - ReadWriteOnce 10 | resources: 11 | requests: 12 | storage: 1Gi 13 | storageClassName: longhorn 14 | -------------------------------------------------------------------------------- /cluster/apps/default/atuin/postgresql/svc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: atuin-postgresql 6 | namespace: default 7 | spec: 8 | type: ClusterIP 9 | selector: 10 | app: atuin-postgresql 11 | ports: 12 | - protocol: TCP 13 | port: 5432 14 | targetPort: 5432 15 | -------------------------------------------------------------------------------- /cluster/base/flux-system-extras/helm-chart-repositories/ingress-nginx-charts.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1beta2 2 | kind: HelmRepository 3 | metadata: 4 | name: ingress-nginx-charts 5 | namespace: flux-system 6 | spec: 7 | interval: 30m 8 | url: https://kubernetes.github.io/ingress-nginx 9 | timeout: 3m 10 | -------------------------------------------------------------------------------- /cluster/base/flux-system-extras/oci-repositories/bitnami-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: bitnami-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 30m 9 | type: oci 10 | url: oci://registry-1.docker.io/bitnamicharts 11 | timeout: 3m -------------------------------------------------------------------------------- /cluster/apps/default/changedetection/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: changedetection-svc 5 | namespace: default 6 | spec: 7 | selector: 8 | app: changedetection 9 | ports: 10 | - protocol: TCP 11 | port: 5000 12 | name: http 13 | clusterIP: None 14 | type: ClusterIP -------------------------------------------------------------------------------- /cluster/base/flux-system-extras/helm-chart-repositories/external-dns-charts.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1beta2 2 | kind: HelmRepository 3 | metadata: 4 | name: external-dns-charts 5 | namespace: flux-system 6 | spec: 7 | interval: 10m 8 | url: https://kubernetes-sigs.github.io/external-dns/ 9 | timeout: 3m 10 | -------------------------------------------------------------------------------- /cluster/apps/default/actual-budget/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: actual-pvc 5 | namespace: default 6 | spec: 7 | # TODO: Should be ReadWriteOnce 8 | accessModes: 9 | - ReadWriteMany 10 | storageClassName: longhorn 11 | resources: 12 | requests: 13 | storage: 200Mi 14 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/ntfy-alertmanager/svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: ntfy-alertmanager-svc 5 | namespace: monitoring 6 | spec: 7 | selector: 8 | app: ntfy-alertmanager 9 | type: ClusterIP 10 | ports: 11 | - protocol: TCP 12 | port: 8080 13 | name: http 14 | targetPort: http 15 | -------------------------------------------------------------------------------- /cluster/base/flux-system-extras/helm-chart-repositories/kubernetes-sigs-descheduler-charts.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: source.toolkit.fluxcd.io/v1beta2 2 | kind: HelmRepository 3 | metadata: 4 | name: kubernetes-sigs-descheduler-charts 5 | namespace: flux-system 6 | spec: 7 | interval: 10m 8 | url: https://kubernetes-sigs.github.io/descheduler/ 9 | timeout: 3m -------------------------------------------------------------------------------- /cluster/apps/default/actual-budget/sms-proxy/svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: actual-budget-sms-proxy-svc 5 | namespace: default 6 | spec: 7 | selector: 8 | app: actual-budget-sms-proxy 9 | ports: 10 | - protocol: TCP 11 | port: 8080 12 | name: http 13 | clusterIP: None 14 | type: ClusterIP -------------------------------------------------------------------------------- /cluster/base/flux-system-extras/helm-chart-repositories/prometheus-community-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: prometheus-community-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 30m 9 | url: https://prometheus-community.github.io/helm-charts 10 | timeout: 3m -------------------------------------------------------------------------------- /cluster/apps/default/changedetection/browserless-chrome/service.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: browserless-chrome-svc 5 | namespace: default 6 | labels: 7 | app: browserless-chrome 8 | spec: 9 | ports: 10 | - port: 3000 11 | targetPort: 3000 12 | protocol: TCP 13 | selector: 14 | app: browserless-chrome -------------------------------------------------------------------------------- /cluster/crds/external-dns/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | namespace: networking 4 | resources: 5 | # renovate: registryUrl=https://kubernetes-sigs.github.io/external-dns/ chart=external-dns 6 | - https://raw.githubusercontent.com/kubernetes-sigs/external-dns/v0.10.2/docs/contributing/crd-source/crd-manifest.yaml -------------------------------------------------------------------------------- /cluster/base/flux-system-extras/helm-chart-repositories/kubernetes-sigs-nfd-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: kubernetes-sigs-nfd-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 10m 9 | url: https://kubernetes-sigs.github.io/node-feature-discovery/charts 10 | timeout: 3m -------------------------------------------------------------------------------- /cluster/base/flux-system-extras/helm-chart-repositories/oauth2-proxy-charts.yaml: -------------------------------------------------------------------------------- 1 | # https://artifacthub.io/packages/helm/oauth2-proxy/oauth2-proxy 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: oauth2-proxy-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 30m 9 | url: https://oauth2-proxy.github.io/manifests 10 | timeout: 3m -------------------------------------------------------------------------------- /cluster/base/flux-system-extras/oci-repositories/open-8gears-charts.yaml: -------------------------------------------------------------------------------- 1 | # https://fluxcd.io/flux/cheatsheets/oci-artifacts/ 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: open-8gears-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 30m 9 | type: oci 10 | url: oci://8gears.container-registry.com/library 11 | timeout: 3m -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Welcome! 3 | --- 4 | 5 | # Welcome! 6 | 7 | Hey there! :wave: 8 | 9 | This website serves as **documentation** for my k8s homelab. 10 | 11 | Here, you'll find _guides_, useful _links_ and _solutions_ to any issues I might have faced. 12 | 13 | If you have any questions or just want to chat, feel free to hit me up on any of my socials located in the footer! :point_down: 14 | -------------------------------------------------------------------------------- /cluster/apps/longhorn-system/longhorn/certificate.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: longhorn-tls-certificate 5 | namespace: longhorn-system 6 | spec: 7 | secretName: longhorn-tls 8 | issuerRef: 9 | name: letsencrypt-production 10 | kind: ClusterIssuer 11 | commonName: "longhorn.${SECRET_DOMAIN}" 12 | dnsNames: 13 | - "longhorn.${SECRET_DOMAIN}" -------------------------------------------------------------------------------- /cluster/apps/networking/wildcard-certificate/certificate.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: "${SECRET_DOMAIN/./-}" 5 | namespace: networking 6 | spec: 7 | secretName: "${SECRET_DOMAIN/./-}-tls" 8 | issuerRef: 9 | name: letsencrypt-production 10 | kind: ClusterIssuer 11 | commonName: "*.${SECRET_DOMAIN}" 12 | dnsNames: 13 | - "*.${SECRET_DOMAIN}" -------------------------------------------------------------------------------- /ansible/inventory/hosts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kubernetes: 3 | vars: 4 | ansible_user: worker 5 | ansible_ssh_port: 22 6 | github_username: ShadyF 7 | children: 8 | master: 9 | hosts: 10 | k8-m1: 11 | ansible_host: 192.168.1.200 12 | workers: 13 | hosts: 14 | k8-w1: 15 | ansible_host: 192.168.1.201 16 | k8-w2: 17 | ansible_host: 192.168.1.202 -------------------------------------------------------------------------------- /ansible/pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "k8s-homelab-ansible" 3 | version = "0.1.0" 4 | description = "ansible scripts for my k8s-homelab" 5 | readme = "README.md" 6 | packages = [{include = "k8s_homelab_ansible"}] 7 | package-mode = false 8 | 9 | [tool.poetry.dependencies] 10 | python = "^3.10" 11 | ansible = "^12.0.0" 12 | 13 | 14 | [build-system] 15 | requires = ["poetry-core"] 16 | build-backend = "poetry.core.masonry.api" 17 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/ntfy/stunnel/certificate.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: "ntfy-stunnel-${SECRET_DOMAIN/./-}" 5 | namespace: monitoring 6 | spec: 7 | secretName: "ntfy-stunnel-${SECRET_DOMAIN/./-}-tls" 8 | issuerRef: 9 | name: letsencrypt-production 10 | kind: ClusterIssuer 11 | commonName: "ntfy-smtp.${SECRET_DOMAIN}" 12 | dnsNames: 13 | - "ntfy-smtp.${SECRET_DOMAIN}" -------------------------------------------------------------------------------- /cluster/apps/longhorn-system/namespace.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: longhorn-system 6 | labels: 7 | pod-security.kubernetes.io/enforce: privileged 8 | pod-security.kubernetes.io/enforce-version: latest 9 | pod-security.kubernetes.io/audit: privileged 10 | pod-security.kubernetes.io/audit-version: latest 11 | pod-security.kubernetes.io/warn: privileged 12 | pod-security.kubernetes.io/warn-version: latest -------------------------------------------------------------------------------- /cluster/apps/networking/crowdsec/acquis-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: crowdsec-acquis-configmap 5 | namespace: networking 6 | data: 7 | acquis.yaml: | 8 | filenames: 9 | - /var/log/containers/nginx-internal-controller-*_networking_*.log 10 | - /var/log/containers/nginx-external-controller-*_networking_*.log 11 | force_inotify: true 12 | poll_without_inotify: false 13 | labels: 14 | type: containerd 15 | program: nginx 16 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.poetry] 2 | name = "k8s-homelab" 3 | version = "0.1.0" 4 | description = "My Kubernetes (k3s) homelab. Synced using Flux v2, automatically updated using Renovate." 5 | authors = ["Shady Fanous "] 6 | license = "Apache License 2.0" 7 | 8 | [tool.poetry.dependencies] 9 | python = "^3.10" 10 | mkdocs-material = "^9.0.0" 11 | 12 | [tool.poetry.dev-dependencies] 13 | 14 | [build-system] 15 | requires = ["poetry-core>=1.0.0"] 16 | build-backend = "poetry.core.masonry.api" 17 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/ntfy/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: ntfy-cache-pvc 5 | namespace: monitoring 6 | spec: 7 | accessModes: 8 | - ReadWriteOnce 9 | storageClassName: longhorn 10 | resources: 11 | requests: 12 | storage: 1Gi 13 | --- 14 | apiVersion: v1 15 | kind: PersistentVolumeClaim 16 | metadata: 17 | name: ntfy-auth-pvc 18 | namespace: monitoring 19 | spec: 20 | accessModes: 21 | - ReadWriteOnce 22 | storageClassName: longhorn 23 | resources: 24 | requests: 25 | storage: 256Mi -------------------------------------------------------------------------------- /cluster/apps/default/profilarr/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: profilarr-ingress 5 | namespace: default 6 | spec: 7 | ingressClassName: "internal" 8 | rules: 9 | - host: "profilarr.${SECRET_DOMAIN}" 10 | http: 11 | paths: 12 | - pathType: Prefix 13 | path: / 14 | backend: 15 | service: 16 | name: profilarr-svc 17 | port: 18 | number: 6868 19 | tls: 20 | - hosts: 21 | - "profilarr.${SECRET_DOMAIN}" -------------------------------------------------------------------------------- /cluster/apps/kube-system/node-feature-discovery/intel-gpu-device.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/nfd.k8s-sigs.io/nodefeaturerule_v1alpha1.json 3 | apiVersion: nfd.k8s-sigs.io/v1alpha1 4 | kind: NodeFeatureRule 5 | metadata: 6 | name: intel-gpu-device 7 | spec: 8 | rules: 9 | - name: intel.gpu 10 | labels: 11 | intel.feature.node.kubernetes.io/gpu: "true" 12 | matchFeatures: 13 | - feature: pci.device 14 | matchExpressions: 15 | class: { op: In, value: ["0300", "0380"] } 16 | vendor: { op: In, value: ["8086"] } -------------------------------------------------------------------------------- /ansible/playbooks/setup-laptop.yaml: -------------------------------------------------------------------------------- 1 | - name: Setup Laptop 2 | hosts: k8-w2 3 | become: true 4 | gather_facts: true 5 | any_errors_fatal: true 6 | pre_tasks: 7 | - name: Pausing for 2 seconds... 8 | ansible.builtin.pause: 9 | seconds: 2 10 | tasks: 11 | - name: Enable fstrim 12 | service: 13 | name: fstrim 14 | state: started 15 | enabled: true 16 | - name: Disable suspend on laptop lid close 17 | lineinfile: 18 | path: "/etc/systemd/logind.conf" 19 | state: present 20 | regexp: "^HandleLidSwitch=" 21 | line: "HandleLidSwitch=ignore" 22 | -------------------------------------------------------------------------------- /cluster/apps/networking/nginx/configmap.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | name: upstream-custom-headers 5 | namespace: networking 6 | labels: 7 | app.kubernetes.io/name: ingress-nginx 8 | app.kubernetes.io/part-of: ingress-nginx 9 | data: 10 | X-Correlation-ID: "$request_id" 11 | # X-GeoIP-Country-Code: "$geoip2_city_country_code" 12 | # X-GeoIP-Country-Name: "$geoip2_city_country_name" 13 | # X-GeoIP-Region: "$geoip2_region_name" 14 | # X-GeoIP-City: "$geoip2_city" 15 | # X-GeoIP-Postal-Code: "$geoip2_postal_code" 16 | # X-GeoIP-Latitude: "$geoip2_latitude" 17 | # X-GeoIP-Longitude: "$geoip2_longitude" -------------------------------------------------------------------------------- /cluster/apps/wazuh-system/wazuh-dashboard/svc.yaml: -------------------------------------------------------------------------------- 1 | ## Copyright (C) 2019, Wazuh Inc. 2 | ## 3 | ## This program is a free software; you can redistribute it 4 | ## and/or modify it under the terms of the GNU General Public 5 | ## License (version 2) as published by the FSF - Free Software 6 | ## Foundation. 7 | # 8 | ## Dashboard service 9 | # 10 | #apiVersion: v1 11 | #kind: Service 12 | #metadata: 13 | # name: wazuh-dashboard-svc 14 | # namespace: wazuh-system 15 | # labels: 16 | # app: wazuh-dashboard 17 | #spec: 18 | # type: ClusterIP 19 | # selector: 20 | # app: wazuh-dashboard 21 | # ports: 22 | # - protocol: TCP 23 | # port: 80 24 | # targetPort: 5601 -------------------------------------------------------------------------------- /cluster/apps/monitoring/ntfy/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: ntfy-configmap 5 | namespace: monitoring 6 | data: 7 | server.yml: |- 8 | behind-proxy: true 9 | base-url: "https://ntfy.${SECRET_DOMAIN}" 10 | listen-http: ":80" 11 | cache-file: "/var/cache/ntfy/cache.db" 12 | attachment-cache-dir: "/var/cache/ntfy/attachments" 13 | auth-file: "/var/lib/ntfy/user.db" 14 | auth-default-access: "deny-all" 15 | enable-login: true 16 | smtp-server-listen: ":25" 17 | smtp-server-domain: "ntfy.${SECRET_DOMAIN}" 18 | #smtp-server-addr-prefix: "ntfy-" # optional 19 | web-root: disable 20 | 21 | -------------------------------------------------------------------------------- /cluster/base/flux-system/gotk-patches.yaml: -------------------------------------------------------------------------------- 1 | # Patches are done so that flux bootstrap does not overwrite them 2 | # See https://github.com/fluxcd/flux2/issues/524 3 | # Don't forget to include this file in the kustomization.yaml 4 | apiVersion: source.toolkit.fluxcd.io/v1 5 | kind: GitRepository 6 | metadata: 7 | name: flux-system 8 | namespace: flux-system 9 | spec: 10 | ignore: | 11 | *.md 12 | cluster/apps/mb-scheduler/ 13 | --- 14 | apiVersion: kustomize.toolkit.fluxcd.io/v1 15 | kind: Kustomization 16 | metadata: 17 | name: flux-system 18 | namespace: flux-system 19 | spec: 20 | decryption: 21 | provider: sops 22 | secretRef: 23 | name: sops-gpg 24 | -------------------------------------------------------------------------------- /cluster/apps/wazuh-system/indexer/indexer-api-svc.yaml: -------------------------------------------------------------------------------- 1 | ## Copyright (C) 2019, Wazuh Inc. 2 | ## 3 | ## This program is a free software; you can redistribute it 4 | ## and/or modify it under the terms of the GNU General Public 5 | ## License (version 2) as published by the FSF - Free Software 6 | ## Foundation. 7 | # 8 | ## Indexer service: API 9 | # 10 | #apiVersion: v1 11 | #kind: Service 12 | #metadata: 13 | # name: wazuh-indexer-api-svc 14 | # namespace: wazuh-system 15 | # labels: 16 | # app: wazuh-indexer 17 | #spec: 18 | # type: ClusterIP 19 | # selector: 20 | # app: wazuh-indexer 21 | # ports: 22 | # - name: indexer-rest 23 | # port: 9200 24 | # targetPort: 9200 25 | -------------------------------------------------------------------------------- /cluster/base/flux-system/gotk-sync.yaml: -------------------------------------------------------------------------------- 1 | # This manifest was generated by flux. DO NOT EDIT. 2 | --- 3 | apiVersion: source.toolkit.fluxcd.io/v1 4 | kind: GitRepository 5 | metadata: 6 | name: flux-system 7 | namespace: flux-system 8 | spec: 9 | interval: 1m0s 10 | ref: 11 | branch: master 12 | secretRef: 13 | name: flux-system 14 | url: ssh://git@github.com/ShadyF/k8s-homelab 15 | --- 16 | apiVersion: kustomize.toolkit.fluxcd.io/v1 17 | kind: Kustomization 18 | metadata: 19 | name: flux-system 20 | namespace: flux-system 21 | spec: 22 | interval: 10m0s 23 | path: ./cluster/base 24 | prune: true 25 | sourceRef: 26 | kind: GitRepository 27 | name: flux-system 28 | -------------------------------------------------------------------------------- /cluster/apps/wazuh-system/indexer/discovery-svc.yaml: -------------------------------------------------------------------------------- 1 | ## Copyright (C) 2019, Wazuh Inc. 2 | ## 3 | ## This program is a free software; you can redistribute it 4 | ## and/or modify it under the terms of the GNU General Public 5 | ## License (version 2) as published by the FSF - Free Software 6 | ## Foundation. 7 | # 8 | ## Indexer service: Communications 9 | # 10 | #apiVersion: v1 11 | #kind: Service 12 | #metadata: 13 | # name: wazuh-indexer-discovery-svc 14 | # namespace: wazuh-system 15 | # labels: 16 | # app: wazuh-indexer 17 | #spec: 18 | # selector: 19 | # app: wazuh-indexer 20 | # ports: 21 | # - name: indexer-nodes 22 | # port: 9300 23 | # targetPort: 9300 24 | # clusterIP: None 25 | -------------------------------------------------------------------------------- /ansible/playbooks/files/sshd_config: -------------------------------------------------------------------------------- 1 | HostKey /etc/ssh/ssh_host_rsa_key 2 | HostKey /etc/ssh/ssh_host_ecdsa_key 3 | HostKey /etc/ssh/ssh_host_ed25519_key 4 | SyslogFacility AUTHPRIV 5 | AuthorizedKeysFile .ssh/authorized_keys 6 | PasswordAuthentication no 7 | ChallengeResponseAuthentication no 8 | GSSAPIAuthentication yes 9 | GSSAPICleanupCredentials no 10 | UsePAM yes 11 | X11Forwarding no 12 | Banner /etc/issue.net 13 | AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES 14 | AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT 15 | AcceptEnv LC_IDENTIFICATION LC_ALL LANGUAGE 16 | AcceptEnv XMODIFIERS 17 | Subsystem sftp /usr/libexec/openssh/sftp-server 18 | PermitRootLogin no -------------------------------------------------------------------------------- /cluster/apps/wazuh-system/manager/wazuh-cluster-svc.yaml: -------------------------------------------------------------------------------- 1 | ## Copyright (C) 2019, Wazuh Inc. 2 | ## 3 | ## This program is a free software; you can redistribute it 4 | ## and/or modify it under the terms of the GNU General Public 5 | ## License (version 2) as published by the FSF - Free Software 6 | ## Foundation. 7 | # 8 | ## Wazuh cluster Service: Manager nodes communication 9 | # 10 | #apiVersion: v1 11 | #kind: Service 12 | #metadata: 13 | # name: wazuh-cluster-svc 14 | # namespace: wazuh-system 15 | # labels: 16 | # app: wazuh-manager 17 | #spec: 18 | # selector: 19 | # app: wazuh-manager 20 | # ports: 21 | # - name: cluster 22 | # port: 1516 23 | # targetPort: 1516 24 | # clusterIP: None 25 | -------------------------------------------------------------------------------- /docs/miscellaneous/cloudflare_dns_challenge.md: -------------------------------------------------------------------------------- 1 | # Using cloudflare DNS challenge instead of basic acme challenge 2 | 3 | First things first, you'll need to change your domain's DNS to cloudflare. 4 | 5 | 1. Use cloudflare nameservers instead of 6 | namecheap https://www.namecheap.com/support/knowledgebase/article.aspx/9607/2210/how-to-set-up-dns-records-for-your-domain-in-cloudflare-account/ 7 | 8 | See https://cert-manager.io/docs/configuration/acme/dns01/cloudflare/ 9 | 10 | ##### Generating cloudflare API token 11 | 12 | https://github.com/k8s-at-home/template-cluster-k3s#cloud-cloudflare-api-token 13 | 14 | See https://www.reddit.com/r/selfhosted/comments/ga02px/you_should_probably_know_about_letsencrypt_dns/ 15 | -------------------------------------------------------------------------------- /cluster/apps/wazuh-system/manager/worker/svc.yaml: -------------------------------------------------------------------------------- 1 | ## Copyright (C) 2019, Wazuh Inc. 2 | ## 3 | ## This program is a free software; you can redistribute it 4 | ## and/or modify it under the terms of the GNU General Public 5 | ## License (version 2) as published by the FSF - Free Software 6 | ## Foundation. 7 | # 8 | ## Wazuh workers service: Agent reporting 9 | # 10 | #apiVersion: v1 11 | #kind: Service 12 | #metadata: 13 | # name: wazuh-workers 14 | # namespace: wazuh-system 15 | # labels: 16 | # app: wazuh-manager 17 | # # dns: route53 18 | #spec: 19 | # type: ClusterIP 20 | # selector: 21 | # app: wazuh-manager 22 | # node-type: worker 23 | # ports: 24 | # - name: agents-events 25 | # port: 1514 26 | # targetPort: 1514 27 | -------------------------------------------------------------------------------- /cluster/apps/mb-scheduler/mb-scheduler-backend/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: mb-scheduler-backend-ingress 5 | namespace: mb-scheduler 6 | annotations: 7 | external-dns/is-public: "true" 8 | external-dns.alpha.kubernetes.io/target: "ipv4.${SECRET_DOMAIN}" 9 | spec: 10 | ingressClassName: "external" 11 | rules: 12 | - host: "api-mb-scheduler.${SECRET_DOMAIN}" 13 | http: 14 | paths: 15 | - pathType: Prefix 16 | path: / 17 | backend: 18 | service: 19 | name: mb-scheduler-backend-svc 20 | port: 21 | number: 80 22 | tls: 23 | - hosts: 24 | - "api-mb-scheduler.${SECRET_DOMAIN}" -------------------------------------------------------------------------------- /docs/miscellaneous/metallb_not_working_raspberry_wifi.md: -------------------------------------------------------------------------------- 1 | # Getting MetalLB to work on Raspberry Pi 4's Wifi 2 | 3 | ## Problem 4 | MetalLB in ARP mode doesn't work on Raspberry Pi if the Wifi interface is used. 5 | 6 | The exact details of the issue can be found [here](https://github.com/raspberrypi/linux/issues/2677) 7 | ## Solution 8 | We'll need to change the Wifi adapter to use promiscuous mode. This can be done by running the following command: 9 | 10 | ```bash 11 | ip link set wlan0 promisc on 12 | ``` 13 | 14 | However, you'll have to run this command after each reboot. To have this run automatically on startup, we can use `crontab`. 15 | 16 | ```bash 17 | crontab -e 18 | 19 | # Add the following line to crontab 20 | @reboot ip link set wlan0 promisc on 21 | ``` -------------------------------------------------------------------------------- /cluster/apps/mb-scheduler/mb-scheduler-frontend/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: mb-scheduler-frontend-ingress 5 | namespace: mb-scheduler 6 | annotations: 7 | external-dns/is-public: "true" 8 | external-dns.alpha.kubernetes.io/target: "ipv4.${SECRET_DOMAIN}" 9 | spec: 10 | ingressClassName: "external" 11 | rules: 12 | - host: "app-mb-scheduler.${SECRET_DOMAIN}" 13 | http: 14 | paths: 15 | - pathType: Prefix 16 | path: / 17 | backend: 18 | service: 19 | name: mb-scheduler-frontend-svc 20 | port: 21 | number: 80 22 | tls: 23 | - hosts: 24 | - "app-mb-scheduler.${SECRET_DOMAIN}" -------------------------------------------------------------------------------- /cluster/apps/wazuh-system/certificates/admin-certificate.yaml: -------------------------------------------------------------------------------- 1 | #--- 2 | ## Source: api/templates/virtual_service.yaml 3 | #apiVersion: cert-manager.io/v1 4 | #kind: Certificate 5 | #metadata: 6 | # name: wazuh-admin-certificate 7 | # namespace: wazuh-system 8 | #spec: 9 | # issuerRef: 10 | # group: cert-manager.io 11 | # kind: Issuer 12 | # name: wazuh-issuer 13 | # secretName: wazuh-admin-tls 14 | # commonName: admin 15 | # dnsNames: 16 | # - "admin" 17 | # usages: 18 | # - "signing" 19 | # - "key encipherment" 20 | # - "server auth" 21 | # - "client auth" 22 | # duration: 2160h 23 | # renewBefore: 360h 24 | # isCA: false 25 | # privateKey: 26 | # algorithm: RSA 27 | # encoding: PKCS1 28 | # size: 2048 29 | # rotationPolicy: Always -------------------------------------------------------------------------------- /cluster/apps/default/atuin/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: atuin 5 | namespace: default 6 | annotations: 7 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 8 | external-dns/is-public: "true" 9 | external-dns.alpha.kubernetes.io/target: "ipv4.${SECRET_DOMAIN}" 10 | spec: 11 | ingressClassName: "external" 12 | rules: 13 | - host: "atuin.${SECRET_DOMAIN}" 14 | http: 15 | paths: 16 | - path: / 17 | pathType: Prefix 18 | backend: 19 | service: 20 | name: atuin 21 | port: 22 | number: 8888 23 | tls: 24 | - hosts: 25 | - "atuin.${SECRET_DOMAIN}" 26 | secretName: atuin-tls 27 | -------------------------------------------------------------------------------- /cluster/apps/networking/nginx/svc.yaml: -------------------------------------------------------------------------------- 1 | #apiVersion: v1 2 | #kind: Service 3 | #metadata: 4 | # name: nginx-external-controller 5 | # namespace: networking 6 | #spec: 7 | # type: LoadBalancer 8 | # ports: 9 | # - name: http 10 | # port: 80 11 | # targetPort: 80 12 | # protocol: TCP 13 | # - name: https 14 | # port: 443 15 | # targetPort: 443 16 | # protocol: TCP 17 | # - name: wireguard 18 | # port: 51820 19 | # targetPort: 51820 20 | # protocol: UDP 21 | # - name: shadowsocks 22 | # port: 8388 23 | # targetPort: 8388 24 | # protocol: TCP 25 | # selector: 26 | # app.kubernetes.io/name: ingress-nginx 27 | # app.kubernetes.io/instance: nginx-external 28 | # app.kubernetes.io/component: controller 29 | -------------------------------------------------------------------------------- /cluster/apps/wazuh-system/certificates/filebeat-certificate.yaml: -------------------------------------------------------------------------------- 1 | ## Source: api/templates/virtual_service.yaml 2 | #apiVersion: cert-manager.io/v1 3 | #kind: Certificate 4 | #metadata: 5 | # name: filebeat-certificate 6 | # namespace: wazuh-system 7 | #spec: 8 | # issuerRef: 9 | # group: cert-manager.io 10 | # kind: Issuer 11 | # name: wazuh-issuer 12 | # secretName: wazuh-filebeat-tls 13 | # commonName: filebeat 14 | # dnsNames: 15 | # - "filebeat" 16 | # usages: 17 | # - "signing" 18 | # - "key encipherment" 19 | # - "server auth" 20 | # - "client auth" 21 | # - digital signature 22 | # duration: 2160h 23 | # renewBefore: 360h 24 | # isCA: false 25 | # privateKey: 26 | # algorithm: RSA 27 | # encoding: PKCS1 28 | # size: 2048 29 | # rotationPolicy: Always -------------------------------------------------------------------------------- /cluster/apps/wazuh-system/wazuh-dashboard/configmap.yaml: -------------------------------------------------------------------------------- 1 | #kind: ConfigMap 2 | #apiVersion: v1 3 | #metadata: 4 | # name: wazuh-dashboard-configmap 5 | # namespace: wazuh-system 6 | # annotations: 7 | # kustomize.toolkit.fluxcd.io/substitute: disabled 8 | #data: 9 | # opensearch_dashboards.yml: |- 10 | # server.host: 0.0.0.0 11 | # server.port: 5601 12 | # opensearch.hosts: ${INDEXER_URL} 13 | # opensearch.ssl.verificationMode: none 14 | # opensearch.requestHeadersWhitelist: [ authorization,securitytenant ] 15 | # opensearch_security.multitenancy.enabled: false 16 | # opensearch_security.readonly_mode.roles: [ "kibana_read_only" ] 17 | # opensearch.ssl.certificateAuthorities: [ "/usr/share/wazuh-dashboard/certs/ca.crt" ] 18 | # uiSettings.overrides.defaultRoute: /app/wz-home 19 | -------------------------------------------------------------------------------- /.github/workflows/mkdocs.yaml: -------------------------------------------------------------------------------- 1 | # Adapted from https://squidfunk.github.io/mkdocs-material/publishing-your-site/ 2 | name: Deploy MkDocs to Github Pages 3 | on: 4 | push: 5 | branches: 6 | - master 7 | - main 8 | # Only run when docs specific files have been changed 9 | paths: 10 | - ".github/workflows/mkdocs.yaml" 11 | - "mkdocs.yml" 12 | - "docs/**" 13 | 14 | jobs: 15 | deploy: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - uses: actions/checkout@v6 19 | - uses: actions/setup-python@v6 20 | with: 21 | python-version: '3.14' 22 | - uses: abatilo/actions-poetry@v4.0.0 23 | with: 24 | poetry-version: '1.1.13' 25 | - run: poetry config virtualenvs.create false && poetry install 26 | - run: poetry run mkdocs gh-deploy --force -------------------------------------------------------------------------------- /cluster/apps/kube-system/intel-device-plugins/gpu-plugin-helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2 3 | kind: HelmRelease 4 | metadata: 5 | name: intel-device-plugin-gpu 6 | namespace: kube-system 7 | spec: 8 | interval: 15m 9 | chart: 10 | spec: 11 | # renovate: registryUrl=https://intel.github.io/helm-charts 12 | chart: intel-device-plugins-gpu 13 | version: 0.34.1 14 | sourceRef: 15 | kind: HelmRepository 16 | name: intel-charts 17 | namespace: flux-system 18 | maxHistory: 3 19 | install: 20 | remediation: 21 | retries: 3 22 | upgrade: 23 | cleanupOnFail: true 24 | remediation: 25 | retries: 3 26 | uninstall: 27 | keepHistory: false 28 | values: 29 | name: intel-gpu-plugin 30 | sharedDevNum: 2 31 | nodeFeatureRule: true -------------------------------------------------------------------------------- /cluster/apps/default/nfs-pv/nfs-media.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolume 4 | metadata: 5 | name: nfs-media-v2 6 | spec: 7 | storageClassName: media 8 | capacity: 9 | storage: 1Mi 10 | accessModes: 11 | - ReadWriteMany 12 | persistentVolumeReclaimPolicy: Retain 13 | nfs: 14 | server: 192.168.1.211 15 | path: /srv/nfs 16 | # Explanation of mount options https://www.thegeekdiary.com/common-nfs-mount-options-in-linux/ 17 | mountOptions: 18 | - nfsvers=3 19 | - tcp 20 | - intr 21 | - hard 22 | - noatime 23 | - nodiratime 24 | --- 25 | apiVersion: v1 26 | kind: PersistentVolumeClaim 27 | metadata: 28 | name: nfs-media-pvc 29 | namespace: default 30 | spec: 31 | accessModes: 32 | - ReadWriteMany 33 | storageClassName: media 34 | resources: 35 | requests: 36 | storage: 1Mi -------------------------------------------------------------------------------- /cluster/apps/wazuh-system/certificates/node-certificate.yaml: -------------------------------------------------------------------------------- 1 | ## Source: api/templates/virtual_service.yaml 2 | #apiVersion: cert-manager.io/v1 3 | #kind: Certificate 4 | #metadata: 5 | # name: wazuh-node-certificate 6 | # namespace: wazuh-system 7 | #spec: 8 | # issuerRef: 9 | # group: cert-manager.io 10 | # kind: Issuer 11 | # name: wazuh-issuer 12 | # secretName: wazuh-node-tls 13 | # commonName: "wazuh-indexer-0.wazuh-indexer-discovery-svc" 14 | # dnsNames: 15 | # - "wazuh-indexer-0.wazuh-indexer-discovery-svc" 16 | # usages: 17 | # - "signing" 18 | # - "key encipherment" 19 | # - "server auth" 20 | # - "client auth" 21 | # - digital signature 22 | # duration: 2160h 23 | # renewBefore: 360h 24 | # isCA: false 25 | # privateKey: 26 | # algorithm: RSA 27 | # encoding: PKCS1 28 | # size: 2048 29 | # rotationPolicy: Always -------------------------------------------------------------------------------- /cluster/apps/mb-scheduler/redis/redis-helm-release.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: mb-scheduler-redis 5 | namespace: mb-scheduler 6 | spec: 7 | interval: 15m 8 | chart: 9 | spec: 10 | chart: app-template 11 | version: 0.2.1 12 | interval: 15m 13 | sourceRef: 14 | kind: HelmRepository 15 | name: bjw-s 16 | namespace: flux-system 17 | 18 | values: 19 | image: 20 | repository: docker.io/library/redis 21 | tag: 8.4.0 22 | 23 | service: 24 | main: 25 | ports: 26 | http: 27 | enabled: false 28 | redis: 29 | enabled: true 30 | port: 6379 31 | 32 | resources: 33 | requests: 34 | cpu: 10m 35 | memory: 100Mi 36 | limits: 37 | memory: 100Mi -------------------------------------------------------------------------------- /cluster/apps/default/nfs-pv/nfs-big-media.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolume 4 | metadata: 5 | name: nfs-big-media-v1 6 | spec: 7 | storageClassName: big-media 8 | capacity: 9 | storage: 1Mi 10 | accessModes: 11 | - ReadWriteMany 12 | persistentVolumeReclaimPolicy: Retain 13 | nfs: 14 | server: 192.168.1.213 15 | path: /srv/nfs 16 | # Explanation of mount options https://www.thegeekdiary.com/common-nfs-mount-options-in-linux/ 17 | mountOptions: 18 | - nfsvers=3 19 | - tcp 20 | - intr 21 | - hard 22 | - noatime 23 | - nodiratime 24 | --- 25 | apiVersion: v1 26 | kind: PersistentVolumeClaim 27 | metadata: 28 | name: nfs-big-media-pvc 29 | namespace: default 30 | spec: 31 | accessModes: 32 | - ReadWriteMany 33 | storageClassName: big-media 34 | resources: 35 | requests: 36 | storage: 1Mi -------------------------------------------------------------------------------- /cluster/apps/default/v-rising/svc.yaml: -------------------------------------------------------------------------------- 1 | #apiVersion: v1 2 | #kind: Service 3 | #metadata: 4 | # name: v-rising-svc 5 | # namespace: default 6 | # 7 | # # Needed if sharing UDP and TCP services, not sure if this is actually needed here though 8 | # annotations: 9 | # metallb.universe.tf/allow-shared-ip: v-rising 10 | #spec: 11 | # # Not sure why this service is needed. See https://www.debontonline.com/2021/01/part-14-deploy-plexserver-yaml-with.html 12 | # type: LoadBalancer 13 | # # Needed since plex only considers clients as local if they're on the same subnet 14 | # loadBalancerIP: 192.168.1.242 15 | # externalTrafficPolicy: Cluster 16 | # selector: 17 | # app: v-rising 18 | # ports: 19 | # - name: udp1 20 | # protocol: UDP 21 | # port: 9876 22 | # targetPort: 9876 23 | # - name: udp2 24 | # protocol: UDP 25 | # port: 9877 26 | # targetPort: 9877 -------------------------------------------------------------------------------- /docs/cluster_setup/setting_up_flux.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Setting up Flux 3 | --- 4 | 5 | # Setting up Flux 6 | 7 | ## Installing Flux 8 | 9 | 1. Make a personal token on github with repo privelages 10 | 2. Install flux locally (`flux-bin` in AUR if using arch) 11 | 3. Export `GITHUB_TOKEN` to shell environment 12 | 4. Run the following command 13 | 14 | ```bash 15 | flux bootstrap github --owner=ShadyF --repository=homelab --branch=master --path=cluster/base --personal 16 | ``` 17 | 18 | ## Reconciling using flux 19 | 20 | ```bash 21 | # Reconcile gitcontroller (given that flux-system is the name of the gitcontroller) 22 | flux reconcile source git flux-system 23 | 24 | # Reconcile kustomization (given that apps is the name of the kustomization controller) 25 | flux reconcile kustomization apps 26 | 27 | # Reconcile a helm release 28 | flux reconcile helmrelease oauth2-proxy -n networking 29 | ``` -------------------------------------------------------------------------------- /cluster/apps/kube-system/intel-device-plugins/operator-helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2 3 | kind: HelmRelease 4 | metadata: 5 | name: intel-device-plugin-operator 6 | namespace: kube-system 7 | spec: 8 | interval: 15m 9 | chart: 10 | spec: 11 | # renovate: registryUrl=https://intel.github.io/helm-charts 12 | chart: intel-device-plugins-operator 13 | version: 0.34.1 14 | sourceRef: 15 | kind: HelmRepository 16 | name: intel-charts 17 | namespace: flux-system 18 | maxHistory: 3 19 | install: 20 | crds: CreateReplace 21 | remediation: 22 | retries: 3 23 | upgrade: 24 | cleanupOnFail: true 25 | crds: CreateReplace 26 | remediation: 27 | retries: 3 28 | uninstall: 29 | keepHistory: false 30 | dependsOn: 31 | - name: node-feature-discovery 32 | namespace: kube-system -------------------------------------------------------------------------------- /cluster/apps/wazuh-system/manager/master/svc.yaml: -------------------------------------------------------------------------------- 1 | ## Copyright (C) 2019, Wazuh Inc. 2 | ## 3 | ## This program is a free software; you can redistribute it 4 | ## and/or modify it under the terms of the GNU General Public 5 | ## License (version 2) as published by the FSF - Free Software 6 | ## Foundation. 7 | # 8 | ## Wazuh master Service: API and registration (authd) 9 | # 10 | #apiVersion: v1 11 | #kind: Service 12 | #metadata: 13 | # name: wazuh-manager-svc # Don't change, unless you update the Wazuh dashboard app config 14 | # namespace: wazuh-system 15 | # labels: 16 | # app: wazuh-manager 17 | # # dns: route53 18 | #spec: 19 | # type: ClusterIP 20 | # selector: 21 | # app: wazuh-manager 22 | # node-type: master 23 | # ports: 24 | # - name: registration 25 | # port: 1515 26 | # targetPort: 1515 27 | # - name: api 28 | # port: 55000 29 | # targetPort: 55000 30 | -------------------------------------------------------------------------------- /cluster/apps/vpn/wstunnel/ingress.yaml: -------------------------------------------------------------------------------- 1 | #apiVersion: networking.k8s.io/v1 2 | #kind: Ingress 3 | #metadata: 4 | # name: wstunnel-ingress 5 | # namespace: vpn 6 | # annotations: 7 | # nginx.ingress.kubernetes.io/ssl-redirect: "true" 8 | # nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" 9 | # nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" 10 | # external-dns/is-public: "true" 11 | # external-dns.alpha.kubernetes.io/target: "ipv4.${SECRET_DOMAIN}" 12 | #spec: 13 | # ingressClassName: "external" 14 | # rules: 15 | # - host: "wg.${SECRET_DOMAIN}" 16 | # http: 17 | # paths: 18 | # - pathType: Prefix 19 | # path: / 20 | # backend: 21 | # service: 22 | # name: wstunnel-svc 23 | # port: 24 | # number: 48513 25 | # tls: 26 | # - hosts: 27 | # - "wg.${SECRET_DOMAIN}" 28 | # # secretName: wstunnel-tls -------------------------------------------------------------------------------- /cluster/apps/default/actual-budget/sms-proxy/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: actual-budget-sms-proxy-ingress 5 | namespace: default 6 | annotations: 7 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 8 | nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" 9 | nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" 10 | external-dns/is-public: "true" 11 | external-dns.alpha.kubernetes.io/target: "ipv4.${SECRET_DOMAIN}" 12 | spec: 13 | ingressClassName: "external" 14 | rules: 15 | - host: "actual-sms-proxy.${SECRET_DOMAIN}" 16 | http: 17 | paths: 18 | - pathType: Prefix 19 | path: / 20 | backend: 21 | service: 22 | name: actual-budget-sms-proxy-svc 23 | port: 24 | number: 8080 25 | tls: 26 | - hosts: 27 | - "actual-sms-proxy.${SECRET_DOMAIN}" -------------------------------------------------------------------------------- /cluster/apps/monitoring/ntfy/stunnel/stunnel-configmap.yaml: -------------------------------------------------------------------------------- 1 | kind: ConfigMap 2 | apiVersion: v1 3 | metadata: 4 | name: stunnel-configmap 5 | namespace: monitoring 6 | data: 7 | start.sh: |- 8 | #!/bin/sh 9 | apk update 10 | apk add --no-cache ca-certificates stunnel 11 | 12 | cat << EOF > /etc/stunnel/stunnel.conf 13 | CApath = /conf/ssl/ 14 | pid = /var/run/stunnel.pid 15 | foreground = yes 16 | debug = 2 17 | cert = /conf/ssl/tls.crt 18 | key = /conf/ssl/tls.key 19 | 20 | [service-tls-tcp] 21 | sslVersionMin = TLSv1.1 22 | accept = 0.0.0.0:587 23 | connect = 127.0.0.1:25 24 | TIMEOUTbusy = 300 25 | TIMEOUTclose = 60 26 | TIMEOUTconnect = 10 27 | TIMEOUTidle = 300 28 | socket = l:SO_LINGER=1:60 29 | retry = no 30 | EOF 31 | 32 | echo 'Starting Stunnel TLS termination for TCP Services...' 33 | stunnel /etc/stunnel/stunnel.conf 34 | -------------------------------------------------------------------------------- /cluster/apps/default/nfs-pv/nfs-media-downloads.yaml: -------------------------------------------------------------------------------- 1 | #--- 2 | #apiVersion: v1 3 | #kind: PersistentVolume 4 | #metadata: 5 | # name: nfs-media-downloads-pv-v1 6 | #spec: 7 | # storageClassName: media-downloads 8 | # capacity: 9 | # storage: 1Mi 10 | # accessModes: 11 | # - ReadWriteMany 12 | # persistentVolumeReclaimPolicy: Retain 13 | # nfs: 14 | # server: 192.168.1.200 15 | # path: /srv/nfs/torrents 16 | # # Explanation of mount options https://www.thegeekdiary.com/common-nfs-mount-options-in-linux/ 17 | # mountOptions: 18 | # - nfsvers=3 19 | # - tcp 20 | # - intr 21 | # - hard 22 | # - noatime 23 | # - nodiratime 24 | #--- 25 | #apiVersion: v1 26 | #kind: PersistentVolumeClaim 27 | #metadata: 28 | # name: nfs-media-downloads-pvc 29 | # namespace: default 30 | #spec: 31 | # accessModes: 32 | # - ReadWriteMany 33 | # storageClassName: media-downloads 34 | # resources: 35 | # requests: 36 | # storage: 1Mi -------------------------------------------------------------------------------- /cluster/apps/monitoring/kube-prometheus-stack/prometheus-rules/dockerhub-rate-limit-rule.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/monitoring.coreos.com/prometheusrule_v1.json 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: PrometheusRule 5 | metadata: 6 | name: dockerhub-rate-limit-rule 7 | namespace: monitoring 8 | labels: 9 | prometheus: k8s 10 | role: alert-rules 11 | spec: 12 | groups: 13 | - name: dockerhub 14 | rules: 15 | - alert: BootstrapRateLimitRisk 16 | annotations: 17 | summary: Kubernetes cluster at risk of being rate limited by dockerhub on bootstrap 18 | # https://docs.docker.com/docker-hub/download-rate-limit/ 19 | # 100 Pulls / 6 hours 20 | expr: count(time() - container_last_seen{image=~"(docker.io).*",container!=""} < 30) > 100 21 | for: 15m 22 | labels: 23 | severity: critical -------------------------------------------------------------------------------- /cluster/apps/mb-scheduler/mb-scheduler-frontend/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: mb-scheduler-frontend 5 | namespace: mb-scheduler 6 | labels: 7 | app: mb-scheduler-frontend 8 | spec: 9 | replicas: 1 10 | strategy: 11 | type: Recreate 12 | selector: 13 | matchLabels: 14 | app: mb-scheduler-frontend 15 | template: 16 | metadata: 17 | labels: 18 | app: mb-scheduler-frontend 19 | spec: 20 | containers: 21 | - name: frontend 22 | image: ghcr.io/shadyf/mb-frontend:sha-5272e50 23 | imagePullPolicy: IfNotPresent 24 | ports: 25 | - protocol: TCP 26 | containerPort: 80 27 | resources: 28 | requests: 29 | cpu: 10m 30 | memory: 25Mi 31 | limits: 32 | memory: 25Mi 33 | imagePullSecrets: 34 | - name: ghcr-login-secret -------------------------------------------------------------------------------- /cluster/apps/monitoring/kube-prometheus-stack/prometheus-rules/oom-rule.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # yaml-language-server: $schema=https://kubernetes-schemas.pages.dev/monitoring.coreos.com/prometheusrule_v1.json 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: PrometheusRule 5 | metadata: 6 | name: oomkilled-rule 7 | namespace: monitoring 8 | labels: 9 | prometheus: k8s 10 | role: alert-rules 11 | spec: 12 | groups: 13 | - name: oom 14 | rules: 15 | - alert: OOMKilled 16 | annotations: 17 | summary: Container {{ $labels.container }} in pod {{ $labels.namespace }}/{{ $labels.pod }} has been OOMKilled {{ $value }} times in the last 10 minutes. 18 | expr: (kube_pod_container_status_restarts_total - kube_pod_container_status_restarts_total offset 10m >= 1) and ignoring (reason) min_over_time(kube_pod_container_status_last_terminated_reason{reason="OOMKilled"}[10m]) == 1 19 | labels: 20 | severity: critical -------------------------------------------------------------------------------- /cluster/base/flux-system/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - gotk-components.yaml 5 | - gotk-sync.yaml 6 | # https://fluxcd.io/flux/cheatsheets/bootstrap/#enable-helm-near-oom-detection 7 | patches: 8 | - patch: | 9 | # Enable OOM watch feature 10 | - op: add 11 | path: /spec/template/spec/containers/0/args/- 12 | value: --feature-gates=OOMWatch=true 13 | # Threshold at which to trigger a graceful shutdown (optional, default 95%) 14 | - op: add 15 | path: /spec/template/spec/containers/0/args/- 16 | value: --oom-watch-memory-threshold=95 17 | # Interval at which to check memory usage (optional, default 500ms) 18 | - op: add 19 | path: /spec/template/spec/containers/0/args/- 20 | value: --oom-watch-interval=500ms 21 | target: 22 | kind: Deployment 23 | name: helm-controller 24 | - path: gotk-patches.yaml 25 | -------------------------------------------------------------------------------- /cluster/apps/wazuh-system/wazuh-dashboard/ingress.yaml: -------------------------------------------------------------------------------- 1 | #apiVersion: networking.k8s.io/v1 2 | #kind: Ingress 3 | #metadata: 4 | # name: wazuh-dashboard-ingress 5 | # namespace: wazuh-system 6 | # annotations: 7 | ## nginx.ingress.kubernetes.io/auth-url: "http://oauth2-proxy.networking.svc.cluster.local/oauth2/auth" 8 | ## nginx.ingress.kubernetes.io/auth-signin: "https://auth.${SECRET_DOMAIN}/oauth2/sign_in" 9 | # external-dns/is-public: "false" 10 | # external-dns.alpha.kubernetes.io/target: "ipv4.${SECRET_DOMAIN}" 11 | #spec: 12 | # ingressClassName: "internal" 13 | # rules: 14 | # - host: "wazuh-dashboard.${SECRET_DOMAIN}" 15 | # http: 16 | # paths: 17 | # - pathType: Prefix 18 | # path: / 19 | # backend: 20 | # service: 21 | # name: wazuh-dashboard-svc 22 | # port: 23 | # number: 80 24 | # tls: 25 | # - hosts: 26 | # - "wazuh-dashboard.${SECRET_DOMAIN}" -------------------------------------------------------------------------------- /cluster/apps/vpn/v2ray/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: v2ray-ingress 5 | namespace: vpn 6 | annotations: 7 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 8 | nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" 9 | nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" 10 | external-dns/is-public: "true" 11 | external-dns.alpha.kubernetes.io/target: "ipv4.${SECRET_DOMAIN}" 12 | nginx.ingress.kubernetes.io/proxy-http-version: "1.1" 13 | nginx.ingress.kubernetes.io/enable-websocket-upgrade: "true" 14 | spec: 15 | ingressClassName: "external" 16 | rules: 17 | - host: "v2ray.${SECRET_DOMAIN}" 18 | http: 19 | paths: 20 | - pathType: Prefix 21 | path: / 22 | backend: 23 | service: 24 | name: v2ray-svc 25 | port: 26 | number: 10086 27 | tls: 28 | - hosts: 29 | - "v2ray.${SECRET_DOMAIN}" -------------------------------------------------------------------------------- /cluster/apps/wazuh-system/certificates/wazuh-ca-certificate.yaml: -------------------------------------------------------------------------------- 1 | #--- 2 | #apiVersion: cert-manager.io/v1 3 | #kind: Issuer 4 | #metadata: 5 | # name: selfsigning-issuer 6 | # namespace: wazuh-system 7 | #spec: 8 | # selfSigned: { } 9 | #--- 10 | #apiVersion: cert-manager.io/v1 11 | #kind: Certificate 12 | #metadata: 13 | # name: wazuh-pki-ca 14 | # namespace: wazuh-system 15 | #spec: 16 | # issuerRef: 17 | # group: cert-manager.io 18 | # kind: Issuer 19 | # name: selfsigning-issuer 20 | # secretName: wazuh-pki-ca 21 | # commonName: "wazuh-ca" 22 | # subject: 23 | # organizations: 24 | # - "Wazuh, Inc." 25 | # usages: 26 | # - "signing" 27 | # - "key encipherment" 28 | # - "cert sign" 29 | # duration: 87600h # 3650d 30 | # renewBefore: 8760h # 365d 31 | # isCA: true 32 | #--- 33 | #apiVersion: cert-manager.io/v1 34 | #kind: Issuer 35 | #metadata: 36 | # name: wazuh-issuer 37 | # namespace: wazuh-system 38 | #spec: 39 | # ca: 40 | # secretName: "wazuh-pki-ca" -------------------------------------------------------------------------------- /cluster/apps/kube-system/node-feature-discovery/node-feature-discovery-helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2 3 | kind: HelmRelease 4 | metadata: 5 | name: node-feature-discovery 6 | namespace: kube-system 7 | spec: 8 | interval: 30m 9 | chart: 10 | spec: 11 | # renovate: registryUrl=https://kubernetes-sigs.github.io/node-feature-discovery/charts 12 | chart: node-feature-discovery 13 | version: 0.18.3 14 | sourceRef: 15 | kind: HelmRepository 16 | name: kubernetes-sigs-nfd-charts 17 | namespace: flux-system 18 | install: 19 | crds: CreateReplace 20 | remediation: 21 | retries: 3 22 | upgrade: 23 | cleanupOnFail: true 24 | crds: CreateReplace 25 | remediation: 26 | strategy: rollback 27 | retries: 3 28 | values: 29 | master: 30 | replicaCount: 1 31 | worker: 32 | config: 33 | core: 34 | labelSources: ["pci", "system", "usb"] 35 | prometheus: 36 | enable: true -------------------------------------------------------------------------------- /cluster/apps/cert-manager/cert-manager/cert-manager-helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2 3 | kind: HelmRelease 4 | metadata: 5 | name: cert-manager 6 | namespace: cert-manager 7 | spec: 8 | interval: 5m 9 | chart: 10 | spec: 11 | # renovate: registryUrl=https://charts.jetstack.io 12 | chart: cert-manager 13 | version: v1.19.2 14 | sourceRef: 15 | kind: HelmRepository 16 | name: jetstack-charts 17 | namespace: flux-system 18 | interval: 5m 19 | values: 20 | installCRDs: false 21 | webhook: 22 | replicaCount: 1 23 | extraArgs: 24 | - --dns01-recursive-nameservers=1.1.1.1:53 25 | - --dns01-recursive-nameservers-only 26 | cainjector: 27 | replicaCount: 1 28 | podDnsPolicy: "None" 29 | podDnsConfig: 30 | nameservers: 31 | - "1.1.1.1" 32 | - "8.8.8.8" 33 | prometheus: 34 | enabled: true 35 | servicemonitor: 36 | enabled: true 37 | prometheusInstance: monitoring 38 | -------------------------------------------------------------------------------- /cluster/apps/default/v-rising/configmap.yaml: -------------------------------------------------------------------------------- 1 | #apiVersion: v1 2 | #kind: ConfigMap 3 | #metadata: 4 | # name: v-rising-host-settings 5 | # namespace: default 6 | # annotations: 7 | # kustomize.toolkit.fluxcd.io/substitute: disabled 8 | #data: 9 | # ServerHostSetting.templ: |- 10 | # { 11 | # "Name": "${V_RISING_NAME}", 12 | # "Description": "${V_RISING_DESC}", 13 | # "Port": ${V_RISING_PORT}, 14 | # "QueryPort": ${V_RISING_QUERY_PORT}, 15 | # "MaxConnectedUsers": ${V_RISING_MAX_USER}, 16 | # "MaxConnectedAdmins": ${V_RISING_MAX_ADMIN}, 17 | # "ServerFps": 30, 18 | # "SaveName": "${V_RISING_SAVE_NAME}", 19 | # "Password": "${V_RISING_PASSW}", 20 | # "Secure": true, 21 | # "ListOnMasterServer": ${V_RISING_PUBLIC_LIST}, 22 | # "AutoSaveCount": "${V_RISING_AUTOSAVE_COUNT}", 23 | # "AutoSaveInterval": "${V_RISING_AUTOSAVE_INTERVAL}", 24 | # "GameSettingsPreset": "${V_RISING_SETTING_PRESET}", 25 | # "AdminOnlyDebugEvents": true, 26 | # "DisableDebugEvents": false 27 | # } -------------------------------------------------------------------------------- /cluster/apps/monitoring/kube-prometheus-stack/prometheus-rules/flux.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: PrometheusRule 4 | metadata: 5 | name: flux-rules 6 | namespace: flux-system 7 | spec: 8 | groups: 9 | - name: flux.rules 10 | rules: 11 | - alert: FluxComponentAbsent 12 | annotations: 13 | summary: Flux component has disappeared from Prometheus target discovery. 14 | expr: | 15 | absent(up{job=~".*flux-system.*"} == 1) 16 | for: 15m 17 | labels: 18 | severity: critical 19 | - alert: FluxReconciliationFailure 20 | annotations: 21 | summary: >- 22 | {{ $labels.kind }} {{ $labels.namespace }}/{{ $labels.name }} reconciliation 23 | has been failing for more than 15 minutes. 24 | expr: | 25 | max by (namespace, name, kind) (gotk_reconcile_condition{status="False",type="Ready"}) == 1 26 | for: 15m 27 | labels: 28 | severity: critical -------------------------------------------------------------------------------- /docs/miscellaneous/enabling_WakeOnLan_on_NIC.md: -------------------------------------------------------------------------------- 1 | # Enabling WakeOnLan on NIC 2 | 3 | !!! info 4 | Adapted from [https://www.techrepublic.com/article/how-to-enable-wake-on-lan-in-ubuntu-server-18-04/](https://www.techrepublic.com/article/how-to-enable-wake-on-lan-in-ubuntu-server-18-04/) 5 | 6 | To enable WOL on a NIC in ubuntu, we're going to have to create a systemd service. Start by running the following command 7 | ```shell 8 | sudo nano /etc/systemd/system/wol.service 9 | ``` 10 | 11 | In that file, paste the following 12 | ``` 13 | [Unit] 14 | Description=Configure Wake On LAN 15 | 16 | [Service] 17 | Type=oneshot 18 | ExecStart=/sbin/ethtool -s INTERFACE wol g 19 | 20 | [Install] 21 | WantedBy=basic.target 22 | ``` 23 | 24 | Afterwards, we need to start the systemd service to take effect immediately and enable it so that it runs on each startup 25 | ```shell 26 | # start the service for the current session 27 | sudo systemctl start wol.service 28 | 29 | # enable the service to run on each startup 30 | sudo systemctl enable wol.service 31 | ``` -------------------------------------------------------------------------------- /cluster/apps/default/actual-budget/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: actual-budget 5 | namespace: default 6 | labels: 7 | app: actual-budget 8 | spec: 9 | replicas: 1 10 | strategy: 11 | type: RollingUpdate 12 | selector: 13 | matchLabels: 14 | app: actual-budget 15 | template: 16 | metadata: 17 | labels: 18 | app: actual-budget 19 | spec: 20 | containers: 21 | - name: actual-budget 22 | image: actualbudget/actual-server:25.12.0 23 | imagePullPolicy: IfNotPresent 24 | ports: 25 | - containerPort: 5006 26 | volumeMounts: 27 | - mountPath: "/data" 28 | name: actual-data-volume 29 | resources: 30 | requests: 31 | cpu: 10m 32 | memory: 100Mi 33 | limits: 34 | memory: 100Mi 35 | volumes: 36 | - name: actual-data-volume 37 | persistentVolumeClaim: 38 | claimName: actual-pvc -------------------------------------------------------------------------------- /cluster/apps/networking/metallb/metallb-helm-release.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: metallb 5 | namespace: networking 6 | spec: 7 | interval: 5m 8 | chart: 9 | spec: 10 | # renovate: registryUrl=https://metallb.github.io/metallb 11 | chart: metallb 12 | version: 0.15.3 13 | sourceRef: 14 | kind: HelmRepository 15 | name: metallb-charts 16 | namespace: flux-system 17 | interval: 5m 18 | install: 19 | remediation: 20 | retries: 3 21 | upgrade: 22 | cleanupOnFail: true 23 | remediation: 24 | retries: 3 25 | dependsOn: 26 | - name: kube-prometheus-stack 27 | namespace: monitoring 28 | # https://github.com/metallb/metallb/blob/main/charts/metallb/values.yaml 29 | values: 30 | crds: 31 | enabled: true 32 | prometheus: 33 | namespace: monitoring 34 | serviceAccount: kube-prometheus-stack-prometheus 35 | podMonitor: 36 | enabled: true 37 | prometheusRule: 38 | enabled: true -------------------------------------------------------------------------------- /cluster/apps/mb-scheduler/postgresql/postgresql-helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2 3 | kind: HelmRelease 4 | metadata: 5 | name: postgresql 6 | namespace: mb-scheduler 7 | spec: 8 | interval: 5m 9 | chart: 10 | spec: 11 | # renovate: registryUrl=https://charts.bitnami.com/bitnami 12 | chart: postgresql 13 | version: 16.7.27 14 | sourceRef: 15 | kind: HelmRepository 16 | name: bitnami-charts 17 | namespace: flux-system 18 | interval: 5m 19 | # https://github.com/bitnami/charts/blob/main/bitnami/postgresql/values.yaml 20 | values: 21 | image: 22 | repository: bitnami/postgresql 23 | tag: 15.4.0-debian-11-r39 24 | auth: 25 | enablePostgresUser: false 26 | username: mb-scheduler 27 | existingSecret: postgresql-secrets 28 | primary: 29 | persistence: 30 | enabled: true 31 | existingClaim: mb-scheduler-db-pvc 32 | resources: 33 | requests: 34 | memory: 50Mi 35 | cpu: 25m 36 | limits: 37 | memory: 150Mi -------------------------------------------------------------------------------- /ansible/playbooks/setup-worker.yaml: -------------------------------------------------------------------------------- 1 | # Does not work, works locally only 2 | --- 3 | - name: Prepare System 4 | hosts: testing 5 | gather_facts: true 6 | any_errors_fatal: true 7 | vars: 8 | local_user: shadyf 9 | server_user: master 10 | pre_tasks: 11 | - name: Pausing for 2 seconds... 12 | ansible.builtin.pause: 13 | seconds: 2 14 | roles: 15 | - role: vandot.k3sup.k3sup 16 | tasks: 17 | - name: Install Python 18 | become: true 19 | block: 20 | - name: Packages | Install 21 | apt: 22 | name: python3,python-is-python3 23 | install_recommends: false 24 | 25 | - name: Install k3s agent on k3s-agents 26 | hosts: localhost 27 | connection: local 28 | tasks: 29 | - name: Install k3s on agent 30 | become: false 31 | vandot.k3sup.k3sup: 32 | action: agent 33 | ip: "{{ ansible_host }}" 34 | user: "{{ local_user }}" 35 | server_user: "{{ server_user }}" 36 | server_ip: 192.168.1.200 37 | ssh_key: "~/.ssh/id_ed25519" 38 | k3s_channel: "latest" -------------------------------------------------------------------------------- /cluster/apps/default/actual-budget/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: actual-budget-ingress 5 | namespace: default 6 | annotations: 7 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 8 | nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" 9 | nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" 10 | nginx.ingress.kubernetes.io/auth-url: "http://oauth2-proxy.networking.svc.cluster.local/oauth2/auth" 11 | nginx.ingress.kubernetes.io/auth-signin: "https://auth.${SECRET_DOMAIN}/oauth2/sign_in" 12 | external-dns/is-public: "true" 13 | external-dns.alpha.kubernetes.io/target: "ipv4.${SECRET_DOMAIN}" 14 | spec: 15 | ingressClassName: "external" 16 | rules: 17 | - host: "actual.${SECRET_DOMAIN}" 18 | http: 19 | paths: 20 | - pathType: Prefix 21 | path: / 22 | backend: 23 | service: 24 | name: actual-budget-svc 25 | port: 26 | number: 5006 27 | tls: 28 | - hosts: 29 | - "actual.${SECRET_DOMAIN}" -------------------------------------------------------------------------------- /cluster/apps/default/n8n/postgresql/n8n-postgresql-helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2 3 | kind: HelmRelease 4 | metadata: 5 | name: n8n-postgresql 6 | namespace: default 7 | spec: 8 | interval: 5m 9 | chart: 10 | spec: 11 | # renovate: registryUrl=https://charts.bitnami.com/bitnami 12 | chart: postgresql 13 | version: 16.7.27 14 | sourceRef: 15 | kind: HelmRepository 16 | name: bitnami-charts 17 | namespace: flux-system 18 | interval: 5m 19 | # https://github.com/bitnami/charts/blob/main/bitnami/postgresql/values.yaml 20 | values: 21 | image: 22 | repository: bitnami/postgresql 23 | tag: 15.4.0-debian-11-r39 24 | auth: 25 | enablePostgresUser: false 26 | username: n8n 27 | existingSecret: n8n-postgresql-secrets 28 | primary: 29 | persistence: 30 | enabled: true 31 | storageClass: longhorn 32 | size: 1Gi 33 | resources: 34 | requests: 35 | cpu: 26m 36 | memory: 100Mi 37 | limits: 38 | memory: 100Mi -------------------------------------------------------------------------------- /cluster/apps/networking/metallb/ip-address-pool.yaml: -------------------------------------------------------------------------------- 1 | # https://metallb.universe.tf/configuration/ 2 | apiVersion: metallb.io/v1beta1 3 | kind: IPAddressPool 4 | metadata: 5 | # A name for the address pool. Services can request allocation 6 | # from a specific address pool using this name, by listing this 7 | # name under the 'metallb.universe.tf/address-pool' annotation. 8 | name: metallb-cluster-pool 9 | namespace: networking 10 | spec: 11 | # A list of IP address ranges over which MetalLB has 12 | # authority. You can list multiple ranges in a single pool, they 13 | # will all share the same settings. Each range can be either a 14 | # CIDR prefix, or an explicit start-end range of IPs. 15 | addresses: 16 | - 192.168.1.240-192.168.1.249 17 | --- 18 | # Protocol can be used to select how the announcement is done. 19 | # Setting no IPAddressPool selector in an L2Advertisement instance is interpreted as that instance being associated to all the IPAddressPools available. 20 | apiVersion: metallb.io/v1beta1 21 | kind: L2Advertisement 22 | metadata: 23 | name: metallb-l2advertiser 24 | namespace: networking -------------------------------------------------------------------------------- /cluster/apps/default/changedetection/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: changedetection-ingress 5 | namespace: default 6 | annotations: 7 | nginx.ingress.kubernetes.io/ssl-redirect: "true" 8 | nginx.ingress.kubernetes.io/proxy-read-timeout: "1800" 9 | nginx.ingress.kubernetes.io/proxy-send-timeout: "1800" 10 | nginx.ingress.kubernetes.io/auth-url: "http://oauth2-proxy.networking.svc.cluster.local/oauth2/auth" 11 | nginx.ingress.kubernetes.io/auth-signin: "https://auth.${SECRET_DOMAIN}/oauth2/sign_in" 12 | external-dns/is-public: "true" 13 | external-dns.alpha.kubernetes.io/target: "ipv4.${SECRET_DOMAIN}" 14 | spec: 15 | ingressClassName: "external" 16 | rules: 17 | - host: "changedetection.${SECRET_DOMAIN}" 18 | http: 19 | paths: 20 | - pathType: Prefix 21 | path: / 22 | backend: 23 | service: 24 | name: changedetection-svc 25 | port: 26 | number: 5000 27 | tls: 28 | - hosts: 29 | - "changedetection.${SECRET_DOMAIN}" -------------------------------------------------------------------------------- /cluster/base/flux-system-extras/monitoring/flux-podmonitors.yaml: -------------------------------------------------------------------------------- 1 | # Needed to be able to make kube-prometheus-stack scrape flux controller 2 | # See https://fluxcd.io/flux/monitoring/metrics/#monitoring-setup 3 | apiVersion: monitoring.coreos.com/v1 4 | kind: PodMonitor 5 | metadata: 6 | name: flux-system 7 | namespace: flux-system 8 | labels: 9 | app.kubernetes.io/part-of: flux 10 | app.kubernetes.io/component: monitoring 11 | spec: 12 | namespaceSelector: 13 | matchNames: 14 | - flux-system 15 | selector: 16 | matchExpressions: 17 | - key: app 18 | operator: In 19 | values: 20 | - helm-controller 21 | - source-controller 22 | - kustomize-controller 23 | - notification-controller 24 | - image-automation-controller 25 | - image-reflector-controller 26 | podMetricsEndpoints: 27 | - port: http-prom 28 | relabelings: 29 | # https://github.com/prometheus-operator/prometheus-operator/issues/4816 30 | - sourceLabels: [ __meta_kubernetes_pod_phase ] 31 | action: keep 32 | regex: Running -------------------------------------------------------------------------------- /cluster/apps/default/paperless/pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: paperless-data-pvc 6 | namespace: default 7 | spec: 8 | accessModes: 9 | - ReadWriteOnce 10 | storageClassName: longhorn 11 | resources: 12 | requests: 13 | storage: 300Mi 14 | --- 15 | apiVersion: v1 16 | kind: PersistentVolumeClaim 17 | metadata: 18 | name: paperless-media-pvc 19 | namespace: default 20 | spec: 21 | accessModes: 22 | - ReadWriteOnce 23 | storageClassName: longhorn 24 | resources: 25 | requests: 26 | storage: 200Mi 27 | --- 28 | apiVersion: v1 29 | kind: PersistentVolumeClaim 30 | metadata: 31 | name: paperless-consume-pvc 32 | namespace: default 33 | spec: 34 | accessModes: 35 | - ReadWriteOnce 36 | storageClassName: longhorn 37 | resources: 38 | requests: 39 | storage: 200Mi 40 | --- 41 | apiVersion: v1 42 | kind: PersistentVolumeClaim 43 | metadata: 44 | name: paperless-export-pvc 45 | namespace: default 46 | spec: 47 | accessModes: 48 | - ReadWriteOnce 49 | storageClassName: longhorn 50 | resources: 51 | requests: 52 | storage: 300Mi 53 | -------------------------------------------------------------------------------- /cluster/apps/wazuh-system/wazuh-dashboard/certificate.yaml: -------------------------------------------------------------------------------- 1 | #apiVersion: cert-manager.io/v1 2 | #kind: Certificate 3 | #metadata: 4 | # name: wazuh-dashboard-tls-certificate 5 | # namespace: wazuh-system 6 | #spec: 7 | # secretName: wazuh-dashboard-tls 8 | # issuerRef: 9 | # name: letsencrypt-production 10 | # kind: ClusterIssuer 11 | # commonName: "wazuh-dashboard.${SECRET_DOMAIN}" 12 | # dnsNames: 13 | # - "wazuh-dashboard.${SECRET_DOMAIN}" 14 | #--- 15 | ## Source: api/templates/virtual_service.yaml 16 | #apiVersion: cert-manager.io/v1 17 | #kind: Certificate 18 | #metadata: 19 | # name: dashboard 20 | # namespace: wazuh-system 21 | #spec: 22 | # issuerRef: 23 | # group: cert-manager.io 24 | # kind: Issuer 25 | # name: wazuh-issuer 26 | # secretName: dashboard-tls 27 | # commonName: dashboard 28 | # dnsNames: 29 | # - "dashboard" 30 | # usages: 31 | # - "signing" 32 | # - "key encipherment" 33 | # - "server auth" 34 | # - "client auth" 35 | # - digital signature 36 | # duration: 2160h 37 | # renewBefore: 360h 38 | # isCA: false 39 | # privateKey: 40 | # algorithm: RSA 41 | # encoding: PKCS1 42 | # size: 2048 43 | # rotationPolicy: Always -------------------------------------------------------------------------------- /cluster/apps/cert-manager/cert-manager/cert-manager-letsencrypt.yaml: -------------------------------------------------------------------------------- 1 | # This manifest will give a kustomization error if the cert-manager crds are not intalled before hand 2 | apiVersion: cert-manager.io/v1 3 | kind: ClusterIssuer 4 | metadata: 5 | name: letsencrypt-staging 6 | spec: 7 | acme: 8 | server: https://acme-staging-v02.api.letsencrypt.org/directory 9 | email: "${SECRET_EMAIL}" 10 | privateKeySecretRef: 11 | name: letsencrypt-staging 12 | solvers: 13 | - dns01: 14 | cloudflare: 15 | email: "${SECRET_EMAIL}" 16 | apiTokenSecretRef: 17 | name: cloudflare-token-secret 18 | key: cloudflare-token 19 | --- 20 | apiVersion: cert-manager.io/v1 21 | kind: ClusterIssuer 22 | metadata: 23 | name: letsencrypt-production 24 | spec: 25 | acme: 26 | server: https://acme-v02.api.letsencrypt.org/directory 27 | email: "${SECRET_EMAIL}" 28 | privateKeySecretRef: 29 | name: letsencrypt-production 30 | solvers: 31 | - dns01: 32 | cloudflare: 33 | email: "${SECRET_EMAIL}" 34 | apiTokenSecretRef: 35 | name: cloudflare-token-secret 36 | key: cloudflare-token 37 | -------------------------------------------------------------------------------- /cluster/apps/default/changedetection/browserless-chrome/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: browserless-chrome 5 | namespace: default 6 | labels: 7 | app: browserless-chrome 8 | spec: 9 | replicas: 1 10 | strategy: 11 | type: Recreate 12 | selector: 13 | matchLabels: 14 | app: browserless-chrome 15 | template: 16 | metadata: 17 | labels: 18 | app: browserless-chrome 19 | spec: 20 | containers: 21 | - name: playwright 22 | image: ghcr.io/browserless/chrome:v2.38.2 23 | imagePullPolicy: IfNotPresent 24 | ports: 25 | - containerPort: 3000 26 | resources: 27 | requests: 28 | cpu: 500m 29 | memory: 1Gi 30 | limits: 31 | cpu: 1000m 32 | memory: 2Gi 33 | # Disallow running on arm64 34 | affinity: 35 | nodeAffinity: 36 | requiredDuringSchedulingIgnoredDuringExecution: 37 | nodeSelectorTerms: 38 | - matchExpressions: 39 | - key: kubernetes.io/arch 40 | operator: NotIn 41 | values: 42 | - arm64 -------------------------------------------------------------------------------- /cluster/apps/default/plex/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: plex-csi-config-pv 5 | namespace: default 6 | spec: 7 | capacity: 8 | storage: 5Gi 9 | volumeMode: Filesystem 10 | accessModes: 11 | - ReadWriteOnce 12 | persistentVolumeReclaimPolicy: Delete 13 | storageClassName: longhorn 14 | csi: 15 | driver: driver.longhorn.io 16 | fsType: ext4 17 | volumeAttributes: 18 | numberOfReplicas: '2' 19 | staleReplicaTimeout: '2880' 20 | # Need to create longhorn volume from UI 21 | # Will probably need to change permissions on the config folder using chmod 777 /config 22 | # See https://github.com/longhorn/longhorn/issues/475 23 | 24 | # Create a new volume in longhorn's UI with the following name (Don't forget to set it as a block storage). 25 | # After that, everything should be taken care of automatically 26 | volumeHandle: plex-config-csi 27 | --- 28 | apiVersion: v1 29 | kind: PersistentVolumeClaim 30 | metadata: 31 | name: plex-csi-config-pvc 32 | namespace: default 33 | spec: 34 | accessModes: 35 | - ReadWriteOnce 36 | resources: 37 | requests: 38 | storage: 5Gi 39 | volumeName: plex-csi-config-pv 40 | storageClassName: longhorn -------------------------------------------------------------------------------- /docs/miscellaneous/cloudflare_ddns_not_working_with_openwrt.md: -------------------------------------------------------------------------------- 1 | # Cloudflare DDNS not working with OpenWRT based routers 2 | 3 | Solution to the problem can be 4 | found [in this forum post](https://community.cloudflare.com/t/ddns-api-not-working/22409 ) 5 | 6 | TLDR - Should be `ip@domain.com` rather than `ip.domain.com` 7 | 8 | ## `proxied` parameter not working when using [Gargoyle](https://www.gargoyle-router.com/) 9 | 10 | Using [Gargoyle](https://www.gargoyle-router.com/) v1.12.0, you'll encounter another issue, the DDNS record won't be 11 | proxied. This is because the cloudflare-dns script doesn't send the `proxied` parameter which defaults to `false` 12 | 13 | To fix this, we're going to have to edit the Cloudflare DDNS script 14 | 15 | ```bash 16 | # ssh into gargoyle router 17 | ssh root@192.168.1.1 -i gargoyle 18 | 19 | # edit cloudflare ddns script with vim 20 | vim /plugin_root/usr/lib/ddns-gargoyle/cloudflare-ddns-helper.sh 21 | 22 | # Go down to the end of the file and you should find this line 23 | {"id":"$ZONEID","type":"A","name":"$HOST","content":"$LOCAL_IP"} 24 | 25 | # Add to it the proxied parameter so that it would be like this 26 | {"id":"$ZONEID","type":"A","name":"$HOST","content":"$LOCAL_IP","proxied":true} 27 | ``` -------------------------------------------------------------------------------- /cluster/apps/default/syncthing/syncthing-helm-release.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: syncthing 5 | namespace: default 6 | spec: 7 | interval: 5m 8 | chart: 9 | spec: 10 | # renovate: registryUrl=https://k8s-at-home.com/charts/ 11 | chart: syncthing 12 | version: 3.5.2 13 | sourceRef: 14 | kind: HelmRepository 15 | name: k8s-at-home-charts 16 | namespace: flux-system 17 | interval: 5m 18 | values: 19 | # https://github.com/k8s-at-home/charts/blob/master/charts/stable/syncthing/values.yaml 20 | image: 21 | repository: syncthing/syncthing 22 | tag: 2.0.12 23 | 24 | ingress: 25 | main: 26 | enabled: true 27 | ingressClassName: "internal" 28 | annotations: 29 | external-dns/is-public: "false" 30 | hosts: 31 | - host: "syncthing.${SECRET_DOMAIN}" 32 | paths: 33 | - path: / 34 | pathType: Prefix 35 | tls: 36 | - hosts: 37 | - "syncthing.${SECRET_DOMAIN}" 38 | 39 | persistence: 40 | data: 41 | enabled: true 42 | existingClaim: syncthing-pvc 43 | mountPath: /var/syncthing 44 | -------------------------------------------------------------------------------- /cluster/apps/vpn/wstunnel/deployment.yaml: -------------------------------------------------------------------------------- 1 | ## TODO: Replaace this with a shadowsocks server 2 | #apiVersion: apps/v1 3 | #kind: Deployment 4 | #metadata: 5 | # name: wstunnel-deployment 6 | # namespace: vpn 7 | # labels: 8 | # app: wstunnel 9 | #spec: 10 | # replicas: 1 11 | # strategy: 12 | # type: Recreate 13 | # selector: 14 | # matchLabels: 15 | # app: wstunnel 16 | # template: 17 | # metadata: 18 | # labels: 19 | # app: wstunnel 20 | # spec: 21 | # containers: 22 | # - name: wstunnel 23 | # image: ghcr.io/erebe/wstunnel:v10.1.11 24 | # imagePullPolicy: IfNotPresent 25 | # # Got this from source code's dockerfile 26 | # command: ["/bin/sh", "-c", "exec /home/app/wstunnel server ws://[::]:48513 --restrict-to wg-svc:51820"] 27 | # ports: 28 | # - containerPort: 48513 29 | # imagePullSecrets: 30 | # - name: regcred 31 | ## affinity: 32 | ## nodeAffinity: 33 | ## requiredDuringSchedulingIgnoredDuringExecution: 34 | ## nodeSelectorTerms: 35 | ## - matchExpressions: 36 | ## - key: beta.kubernetes.io/arch 37 | ## operator: In 38 | ## values: 39 | ## - arm64 -------------------------------------------------------------------------------- /docs/_static/custom.css: -------------------------------------------------------------------------------- 1 | [data-md-color-scheme="default"] { 2 | --md-typeset-a-color: var(--md-accent-fg-color) 3 | } 4 | 5 | /* 6 | text rgb(167, 167, 168) 7 | background rgb(33, 33, 33) 8 | */ 9 | [data-md-color-scheme="slate"] { 10 | 11 | --md-hue: 0; 12 | 13 | --md-default-fg-color: hsla(var(--md-hue), 0%, 80%, 1); 14 | --md-default-fg-color--light: hsla(var(--md-hue), 0%, 80%, 0.62); 15 | --md-default-fg-color--lighter: hsla(var(--md-hue), 0%, 80%, 0.32); 16 | --md-default-fg-color--lightest: hsla(var(--md-hue), 0%, 80%, 0.12); 17 | 18 | 19 | --md-default-bg-color: hsla(var(--md-hue), 0%, 12%, 1); 20 | --md-default-bg-color--light: hsla(var(--md-hue), 0%, 12%, 0.54); 21 | --md-default-bg-color--lighter: hsla(var(--md-hue), 0%, 12%, 0.26); 22 | --md-default-bg-color--lightest: hsla(var(--md-hue), 0%, 12%, 0.07); 23 | 24 | /* Code Blocks Color Shades */ 25 | --md-code-fg-color: rgb(191, 199, 213); 26 | --md-code-bg-color: hsl(0, 0%, 18%); 27 | 28 | --md-typeset-mark-color: hsla(13, 74%, 100%, 0.3); 29 | 30 | --md-typeset-a-color: var(--md-accent-fg-color) !important; 31 | 32 | /* Footer Color Shades */ 33 | --md-footer-bg-color: hsla(var(--md-hue), 0%, 12%, 0.87); 34 | --md-footer-bg-color--dark: hsla(var(--md-hue), 0%, 10%, 1); 35 | } -------------------------------------------------------------------------------- /cluster/apps/monitoring/promtail/promtail-helm-release.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: promtail 5 | namespace: monitoring 6 | spec: 7 | interval: 5m 8 | chart: 9 | spec: 10 | # renovate: registryUrl=https://grafana.github.io/helm-charts 11 | chart: promtail 12 | version: 6.17.1 13 | sourceRef: 14 | kind: HelmRepository 15 | name: grafana-charts 16 | namespace: flux-system 17 | interval: 5m 18 | values: 19 | config: 20 | lokiAddress: http://loki-gateway/loki/api/v1/push 21 | # TODO: Look into syslog 22 | # extraScrapeConfigs: 23 | # - job_name: syslog 24 | # syslog: 25 | # listen_address: 0.0.0.0:1514 26 | # label_structured_data: true 27 | # labels: 28 | # job: "syslog" 29 | # relabel_configs: 30 | # - source_labels: ['__syslog_message_hostname'] 31 | # target_label: 'host' 32 | # - source_labels: ['__syslog_message_app_name'] 33 | # target_label: 'app' 34 | # syslogService: 35 | # enabled: true 36 | # type: LoadBalancer 37 | # port: 1514 38 | # loadBalancerIP: 10.0.6.51 39 | # Enable when prometheus is enabled 40 | # serviceMonitor: 41 | # enabled: true 42 | 43 | -------------------------------------------------------------------------------- /cluster/apps/default/paperless/redis-helm-release.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s-labs/helm-charts/app-template-4.1.1/charts/other/app-template/values.schema.json 2 | 3 | apiVersion: helm.toolkit.fluxcd.io/v2 4 | kind: HelmRelease 5 | metadata: 6 | name: paperless-redis 7 | namespace: default 8 | spec: 9 | interval: 15m 10 | chart: 11 | spec: 12 | # renovate: registryUrl=https://bjw-s-labs.github.io/helm-charts 13 | chart: app-template 14 | version: 4.5.0 15 | interval: 15m 16 | sourceRef: 17 | kind: HelmRepository 18 | name: bjw-s 19 | namespace: flux-system 20 | 21 | values: 22 | controllers: 23 | main: 24 | containers: 25 | main: 26 | image: 27 | repository: docker.io/library/redis 28 | tag: 8.4.0 29 | 30 | resources: 31 | requests: 32 | cpu: 23m 33 | memory: 64M 34 | limits: 35 | memory: 64M 36 | 37 | service: 38 | main: 39 | controller: main 40 | ports: 41 | http: 42 | enabled: false 43 | port: 8080 # Not really the port but just want schema validation to pass 44 | redis: 45 | enabled: true 46 | port: 6379 -------------------------------------------------------------------------------- /cluster/apps/kube-system/kured/kured-helmrelease.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2 3 | kind: HelmRelease 4 | metadata: 5 | name: kured 6 | namespace: kube-system 7 | spec: 8 | interval: 5m 9 | chart: 10 | spec: 11 | # renovate: registryUrl=https://kubereboot.github.io/charts 12 | chart: kured 13 | version: 5.10.0 14 | sourceRef: 15 | kind: HelmRepository 16 | name: kubereboot-charts 17 | namespace: flux-system 18 | interval: 10m 19 | install: 20 | timeout: 10m 21 | replace: true 22 | crds: CreateReplace 23 | remediation: 24 | retries: 3 25 | upgrade: 26 | remediation: 27 | remediateLastFailure: true 28 | retries: 3 29 | strategy: rollback 30 | cleanupOnFail: true 31 | crds: CreateReplace 32 | test: 33 | enable: true 34 | rollback: 35 | recreate: true 36 | force: true 37 | cleanupOnFail: true 38 | uninstall: 39 | keepHistory: false 40 | maxHistory: 3 41 | values: 42 | configuration: 43 | startTime: "3:00" 44 | endTime: "6:00" 45 | timeZone: "Africa/Cairo" 46 | rebootDays: 47 | - fr 48 | - sa 49 | rebootCommand: "/usr/bin/systemctl reboot" 50 | metrics: 51 | create: true 52 | service: 53 | create: true 54 | valuesFrom: 55 | - targetPath: configuration.notifyUrl 56 | kind: Secret 57 | name: kured-secret 58 | valuesKey: NOTIFY_URL -------------------------------------------------------------------------------- /cluster/apps/mb-scheduler/mb-scheduler-backend/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: mb-scheduler-backend-nginx-conf 5 | namespace: mb-scheduler 6 | data: 7 | default.conf: |- 8 | upstream mb-scheduler-backend { 9 | server 127.0.0.1:8000; 10 | } 11 | 12 | server { 13 | listen 80; 14 | listen [::]:80; 15 | server_name _; 16 | 17 | # Increase timeout to 5 minutes 18 | proxy_read_timeout 300; 19 | proxy_connect_timeout 300; 20 | proxy_send_timeout 300; 21 | 22 | client_max_body_size 0; 23 | 24 | location /static { 25 | #autoindex on; 26 | alias /static/; 27 | } 28 | 29 | location / { 30 | try_files $uri @proxy_to_app; 31 | } 32 | 33 | location @proxy_to_app { 34 | proxy_pass http://mb-scheduler-backend; 35 | 36 | proxy_http_version 1.1; 37 | proxy_set_header Upgrade $http_upgrade; 38 | proxy_set_header Connection "upgrade"; 39 | 40 | proxy_redirect off; 41 | proxy_set_header Host $host; 42 | proxy_set_header X-Real-IP $remote_addr; 43 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 44 | proxy_set_header X-Forwarded-Host $server_name; 45 | } 46 | 47 | error_page 497 https://$host:$server_port$request_uri; 48 | } 49 | 50 | -------------------------------------------------------------------------------- /cluster/base/apps.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.toolkit.fluxcd.io/v1 2 | kind: Kustomization 3 | metadata: 4 | name: apps 5 | namespace: flux-system 6 | spec: 7 | interval: 10m0s 8 | dependsOn: 9 | - name: crds 10 | path: ./cluster/apps 11 | prune: true 12 | sourceRef: 13 | kind: GitRepository 14 | name: flux-system 15 | decryption: 16 | provider: sops 17 | secretRef: 18 | name: sops-gpg 19 | postBuild: 20 | substitute: { } 21 | substituteFrom: 22 | # - kind: ConfigMap 23 | # name: cluster-settings 24 | - kind: Secret 25 | name: cluster-secrets 26 | # Needed so other kustomizations have these added to them 27 | # https://github.com/fluxcd/kustomize-controller/issues/707 28 | # https://fluxcd.io/flux/components/kustomize/kustomizations/#patches 29 | # DIDN'T WORK FOR WHATEVER REASON 30 | # patches: 31 | # - patch: |- 32 | # apiVersion: kustomize.toolkit.fluxcd.io/v1 33 | # kind: Kustomization 34 | # metadata: 35 | # name: not-used 36 | # spec: 37 | # decryption: 38 | # provider: sops 39 | # secretRef: 40 | # name: sops-gpg 41 | # postBuild: 42 | # substituteFrom: 43 | # - kind: Secret 44 | # name: cluster-secrets 45 | # target: 46 | # group: kustomize.toolkit.fluxcd.io 47 | # kind: Kustomization 48 | # labelSelector: substitution.flux.home.arpa/disabled notin (true) 49 | -------------------------------------------------------------------------------- /cluster/apps/default/changedetection/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: changedetection 5 | namespace: default 6 | labels: 7 | app: changedetection 8 | spec: 9 | replicas: 1 10 | strategy: 11 | type: Recreate 12 | selector: 13 | matchLabels: 14 | app: changedetection 15 | template: 16 | metadata: 17 | labels: 18 | app: changedetection 19 | spec: 20 | containers: 21 | - name: changedetection 22 | image: ghcr.io/dgtlmoon/changedetection.io:0.51.4 23 | imagePullPolicy: IfNotPresent 24 | env: 25 | - name: PLAYWRIGHT_DRIVER_URL 26 | value: 'ws://browserless-chrome-svc:3000/chrome?launch={"defaultViewport":{"height":720,"width":1280},"headless":false,"stealth":true}&blockAds=true' 27 | - name: BASE_URL 28 | value: "changedetection.${SECRET_DOMAIN}" 29 | - name: HIDE_REFERER 30 | value: "true" # Hide referer when visiting sites 31 | ports: 32 | - containerPort: 5000 33 | volumeMounts: 34 | - mountPath: "/datastore" 35 | name: changedetection-data-volume 36 | resources: 37 | requests: 38 | cpu: 100m 39 | memory: 64Mi 40 | limits: 41 | cpu: 400m 42 | memory: 512Mi 43 | volumes: 44 | - name: changedetection-data-volume 45 | persistentVolumeClaim: 46 | claimName: changedetection-pvc -------------------------------------------------------------------------------- /cluster/apps/vpn/wireguard/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: wg-deployment 5 | namespace: vpn 6 | labels: 7 | app: wireguard 8 | spec: 9 | replicas: 1 10 | strategy: 11 | type: Recreate 12 | selector: 13 | matchLabels: 14 | app: wireguard 15 | template: 16 | metadata: 17 | labels: 18 | app: wireguard 19 | spec: 20 | containers: 21 | - name: wireguard 22 | image: ghcr.io/linuxserver/wireguard:version-v1.0.20210914 23 | imagePullPolicy: IfNotPresent 24 | securityContext: 25 | capabilities: 26 | add: [ "NET_ADMIN", "SYS_MODULE" ] 27 | ports: 28 | - name: wireguard 29 | protocol: UDP 30 | containerPort: 51820 31 | env: 32 | - name: SERVERURL 33 | value: 0.0.0.0 34 | - name: TZ 35 | value: Africa/Cairo 36 | - name: SERVERPORT 37 | value: "51820" 38 | - name: PEERS 39 | value: Laptop 40 | volumeMounts: 41 | - mountPath: "/config" 42 | name: wg-volume 43 | - mountPath: "/lib/modules" 44 | name: lib-modules-volume 45 | volumes: 46 | - name: wg-volume 47 | persistentVolumeClaim: 48 | claimName: wg-conf-pvc 49 | - name: lib-modules-volume 50 | hostPath: 51 | path: /lib/modules 52 | type: Directory 53 | 54 | 55 | -------------------------------------------------------------------------------- /cluster/apps/default/profilarr/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: profilarr 5 | namespace: default 6 | labels: 7 | app: profilarr 8 | spec: 9 | replicas: 1 10 | strategy: 11 | type: Recreate 12 | selector: 13 | matchLabels: 14 | app: profilarr 15 | template: 16 | metadata: 17 | labels: 18 | app: profilarr 19 | spec: 20 | containers: 21 | - name: profilarr 22 | image: santiagosayshey/profilarr:v1.1.3 23 | imagePullPolicy: IfNotPresent 24 | ports: 25 | - containerPort: 6868 26 | name: http 27 | env: 28 | - name: TZ 29 | value: "Africa/Cairo" 30 | - name: URL_BASE 31 | value: "" 32 | - name: BASE_URL 33 | value: "https://profilarr.${SECRET_DOMAIN}" 34 | volumeMounts: 35 | - mountPath: "/config" 36 | name: profilarr-data-volume 37 | resources: 38 | requests: 39 | cpu: 10m 40 | memory: 100Mi 41 | limits: 42 | memory: 200Mi 43 | # livenessProbe: 44 | # httpGet: 45 | # path: / 46 | # port: http 47 | # readinessProbe: 48 | # httpGet: 49 | # path: / 50 | # port: http 51 | volumes: 52 | - name: profilarr-data-volume 53 | persistentVolumeClaim: 54 | claimName: profilarr-pvc -------------------------------------------------------------------------------- /.github/workflows/flux-schedule.yaml: -------------------------------------------------------------------------------- 1 | name: Schedule - Update Flux 2 | 3 | on: 4 | workflow_dispatch: 5 | schedule: 6 | - cron: '0 */12 * * *' 7 | 8 | jobs: 9 | flux-upgrade: 10 | runs-on: ubuntu-24.04 11 | steps: 12 | - uses: actions/checkout@v6 13 | with: 14 | fetch-depth: 1 15 | 16 | - name: Setup Flux CLI 17 | uses: fluxcd/flux2/action@main 18 | 19 | - name: Upgrade Flux 20 | id: upgrade 21 | run: | 22 | UGLY_VERSION="$(flux -v)" 23 | VERSION="v${UGLY_VERSION#*flux version }" 24 | flux install --version="${VERSION}" \ 25 | --network-policy=false \ 26 | --export > ./cluster/base/flux-system/gotk-components.yaml 27 | echo "::set-output name=flux_version::$VERSION" 28 | - name: Create pull request for Flux upgrade 29 | uses: peter-evans/create-pull-request@v8 30 | with: 31 | token: ${{ secrets.GITHUB_TOKEN }} 32 | branch: "flux/upgrade-${{ steps.upgrade.outputs.flux_version }}" 33 | delete-branch: true 34 | title: "chore(deps): upgrade flux components to ${{ steps.upgrade.outputs.flux_version }}" 35 | signoff: true 36 | committer: "Shady Fanous " 37 | author: "Shady Fanous " 38 | assignees: "shadyf" 39 | commit-message: "chore(deps): upgrade flux components to ${{ steps.upgrade.outputs.flux_version }}" 40 | body: | 41 | Release notes: https://github.com/fluxcd/flux2/releases/tag/${{ steps.upgrade.outputs.flux_version }} 42 | labels: flux/upgrade -------------------------------------------------------------------------------- /cluster/apps/vpn/v2ray/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: v2ray-deployment 5 | namespace: vpn 6 | labels: 7 | app: v2ray 8 | spec: 9 | replicas: 1 10 | strategy: 11 | type: Recreate 12 | selector: 13 | matchLabels: 14 | app: v2ray 15 | template: 16 | metadata: 17 | labels: 18 | app: v2ray 19 | spec: 20 | containers: 21 | - name: v2ray 22 | image: v2fly/v2fly-core:v5.41.0 23 | imagePullPolicy: IfNotPresent 24 | securityContext: 25 | runAsUser: 1000 26 | runAsGroup: 1000 27 | allowPrivilegeEscalation: false 28 | capabilities: 29 | drop: 30 | - ALL 31 | readOnlyRootFilesystem: true 32 | ports: 33 | - name: tcp 34 | protocol: TCP 35 | containerPort: 10086 36 | env: 37 | - name: TZ 38 | value: Africa/Cairo 39 | volumeMounts: 40 | - mountPath: "/etc/v2ray/config.json" 41 | name: v2ray-config 42 | subPath: config.json 43 | - mountPath: "/tmp" 44 | name: tmp-volume 45 | command: ["/usr/bin/v2ray", "run", "-c", "/etc/v2ray/config.json"] 46 | resources: 47 | limits: 48 | memory: "256Mi" 49 | cpu: "200m" 50 | requests: 51 | memory: "128Mi" 52 | cpu: "100m" 53 | volumes: 54 | - name: v2ray-config 55 | configMap: 56 | name: v2ray-server-config 57 | - name: tmp-volume 58 | emptyDir: {} -------------------------------------------------------------------------------- /docs/cluster_setup/installing_k3s.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Installing k3s 3 | --- 4 | 5 | # Installing k3s 6 | 7 | [k3s](https://rancher.com/docs/k3s/latest/en/) is a lightweight version of Kubernetes, meant to be used on edge devices, 8 | in CI, ARM boards and so on. 9 | 10 | It's basically a single binary that contains everything to get kubernetes up and running. 11 | 12 | To help us with installing `k3s`, we're going to be using a utility package 13 | called [k3sup](https://github.com/alexellis/k3sup) 14 | 15 | ## Creating your cluster 16 | 17 | Getting your cluster up and running is as simple as running this command from your local machine 18 | 19 | ```sh 20 | k3sup install --ip --user master --local-path ~/.kube/config --merge --context homelab --ssh-key --k3s-channel latest --no-extras 21 | ``` 22 | 23 | This will command will attempt to create a k3s master node on the machine you pointed to (via the `--ip` flag). After 24 | that's done, the newly created cluster will be added to your `KUBECONFIG`, which would could then switch on over to 25 | using `kubectl config use-context homelab` 26 | 27 | If you just want to get the kubeconfig without creating the cluster again, simply add the `--skip-install` flag at the 28 | end of the command above. 29 | 30 | ## Adding nodes to your cluster 31 | 32 | Once you've created your master node, it's time to add some workers! 33 | 34 | Run the following from your local machine 35 | 36 | !!! warning Not 100% sure whether `ssh-key` in below command should be the ssh key of the master node or the worker 37 | node. 38 | 39 | ```sh 40 | k3sup join --ip --server-ip --user master --ssh-key --k3s-channel latest 41 | ``` -------------------------------------------------------------------------------- /docs/miscellaneous/tips_and_tricks.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: Tips and Tricks 3 | --- 4 | 5 | Here are some tips and tricks that might be of use. 6 | 7 | ## Change kubectl context 8 | 9 | ```shell 10 | kubectl config use-context homelab 11 | ``` 12 | 13 | ## Change default shell 14 | 15 | ```shell 16 | chsh --shell /usr/bin/fish 17 | ``` 18 | 19 | ## Retrieve Raspberry pi's CPU temp in Ubuntu 20 | 21 | ```shell 22 | cat /sys/class/thermal/thermal_zone0/temp 23 | ``` 24 | 25 | ## Debug pod stuck in crashloop 26 | 27 | Force the pod to run the sleep command rather than what it has as an entrypoint, allowing you to SSH into it and debug 28 | what's going on 29 | 30 | Add the following to your pod's definition 31 | 32 | ```yaml 33 | command: [ 'sleep' ] 34 | args: [ 'infinity' ] 35 | ``` 36 | 37 | ## Exporting GPG key from one machine to another 38 | 39 | Follow the steps provided 40 | in [this guide](https://makandracards.com/makandra-orga/37763-gpg-extract-private-key-and-import-on-different-machine) 41 | 42 | ## Creating a docker registry secret yaml file 43 | 44 | ```bash 45 | kubectl create secret docker-registry regcred --docker-server="https://index.docker.io/v1/" --docker-username= --docker-password= --docker-email= --dry-run=client -oyaml > regcred.yaml 46 | ``` 47 | 48 | ## Remove useless ubuntu stuff 49 | 50 | ```bash 51 | # Remove snapd, takes up CPU and not needed on a kube node 52 | sudo apt autoremove --purge snapd 53 | ``` 54 | 55 | ## To see the data created in a longhorn volume 56 | 57 | 1. use `lsblk -f` or `df -H` to find your desired PVC path 58 | 2. `cd` into it 59 | 60 | ## Setting up log2ram to reduce SD card strain / SSD writes 61 | https://github.com/azlux/log2ram -------------------------------------------------------------------------------- /cluster/apps/monitoring/ntfy-alertmanager/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: ntfy-alertmanager 5 | namespace: monitoring 6 | labels: 7 | app: ntfy-alertmanager 8 | spec: 9 | replicas: 1 10 | strategy: 11 | type: Recreate 12 | selector: 13 | matchLabels: 14 | app: ntfy-alertmanager 15 | template: 16 | metadata: 17 | labels: 18 | app: ntfy-alertmanager 19 | spec: 20 | containers: 21 | - name: main 22 | image: xenrox/ntfy-alertmanager:0.5.0 23 | imagePullPolicy: IfNotPresent 24 | ports: 25 | - name: http 26 | protocol: TCP 27 | containerPort: 8080 28 | # livenessProbe: 29 | # httpGet: 30 | # path: / 31 | # port: http 32 | # readinessProbe: 33 | # httpGet: 34 | # path: / 35 | # port: http 36 | volumeMounts: 37 | - name: config 38 | mountPath: "/etc/ntfy-alertmanager" 39 | readOnly: true 40 | resources: 41 | requests: 42 | cpu: 5m 43 | memory: 50Mi 44 | limits: 45 | memory: 50Mi 46 | affinity: 47 | nodeAffinity: 48 | requiredDuringSchedulingIgnoredDuringExecution: 49 | nodeSelectorTerms: 50 | - matchExpressions: 51 | - key: kubernetes.io/arch 52 | operator: In 53 | values: 54 | - amd64 55 | volumes: 56 | - name: config 57 | secret: 58 | secretName: ntfy-alertmanager-secrets 59 | 60 | 61 | -------------------------------------------------------------------------------- /cluster/apps/networking/external-dns/external-dns-helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2 3 | kind: HelmRelease 4 | metadata: 5 | name: external-dns 6 | namespace: networking 7 | spec: 8 | interval: 5m 9 | chart: 10 | spec: 11 | # renovate: registryUrl=https://kubernetes-sigs.github.io/external-dns/ 12 | chart: external-dns 13 | version: 1.19.0 14 | sourceRef: 15 | kind: HelmRepository 16 | name: external-dns-charts 17 | namespace: flux-system 18 | interval: 5m 19 | # https://github.com/kubernetes-sigs/external-dns/tree/master/charts/external-dns 20 | values: 21 | logLevel: debug 22 | domainFilters: 23 | - "${SECRET_DOMAIN}" 24 | sources: 25 | - ingress 26 | provider: 27 | name: cloudflare 28 | env: 29 | - name: CF_API_KEY 30 | valueFrom: 31 | secretKeyRef: 32 | name: cloudflare-api-key 33 | key: cloudflare_api_key 34 | - name: CF_API_EMAIL 35 | value: "${SECRET_EMAIL}" 36 | policy: sync 37 | txtOwnerId: default 38 | txtPrefix: "k8s." 39 | annotationFilter: "external-dns/is-public in (true)" 40 | resources: 41 | requests: 42 | cpu: 10m 43 | memory: 100Mi 44 | limits: 45 | memory: 100Mi 46 | serviceMonitor: 47 | enabled: true 48 | extraArgs: 49 | - "--cloudflare-proxied" 50 | # affinity: 51 | # nodeAffinity: 52 | # requiredDuringSchedulingIgnoredDuringExecution: 53 | # nodeSelectorTerms: 54 | # - matchExpressions: 55 | # - key: beta.kubernetes.io/arch 56 | # operator: In 57 | # values: 58 | # - arm64 -------------------------------------------------------------------------------- /cluster/apps/default/plex/secrets.enc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: plex-secrets 5 | namespace: default 6 | stringData: 7 | PLEX_CLAIM: ENC[AES256_GCM,data:lcd4tLihCHYF9evXmCpYEiSKyodN4kioxQ8=,iv:2pnIclGbVMMAKGD19zKk7foWxEbSGNQwR1fFgA1cO8k=,tag:AAW/HG3B6ZH0qSvWCEAYHw==,type:str] 8 | sops: 9 | lastmodified: "2025-09-15T21:12:14Z" 10 | mac: ENC[AES256_GCM,data:24pdz0JuhMIE9aLwJfUaGkuA//ZTROUje/3VKB780/a34A5joHPJ5nLyMYNl0Jy5hN4TKqBuIxrHzNUAXO1Sp67sZo+dlQIKjb48F6sL5UvjchcstmoRzJF6oEOweryMxJ6/mcdvfd8Bsxu5QHtp45+rkclMipA/jm0Vct5FOT8=,iv:XpDyKSfIv7GARHnTYCQ/fxXHYwhnlazQBc2QBjUPWuw=,tag:H4hodyBBLXWkH8xv5yNjRQ==,type:str] 11 | pgp: 12 | - created_at: "2025-09-15T21:12:14Z" 13 | enc: |- 14 | -----BEGIN PGP MESSAGE----- 15 | 16 | hQGMA71EESEn7N2iAQv8C5Nocw6Prbl0mfzdwLLqHkk3cDhxsiA2FKPBKk2w26Ws 17 | 4BiXyRWgNPyNspS7zJmuEDEPk6MdzThHTG6RQTT+CAs0drQtHlcBdc5fqYuNCWtl 18 | jJZP3Zu62/zHh1U9vEOuoD9Fg3vfA+dTABk7d/yLrx+aqfpvgKNCzpC003Hz/Wxn 19 | X4rCyNL2KjbuOzEb9pjCCLQZAygOECYUBtdXUx81UY/1raKjvd+iI50+fX9sk+3b 20 | H/rc69lCcWGPkir5RtzWFnJJe0gGAPp4WP0LU+FMnY1FYhB2eXWINh95NijGDWb3 21 | e2nxGL3Z5stvZ/rH54YuAB71dBSpGyAScxOJVKSZdkQ71zZ90CDxT8b1TvcSMsrA 22 | cLkL3+oDFU5ieEeAlPWUQCkjnOtPnqzzML3FIOWTH60jQ5RHd7D1/TMV5k8XqmaH 23 | VivgwVk4pjSItvwxId6mIdUORrjzCsiBulHUTZX8z2MgunaLRl7R43GYnwDlrqP0 24 | Ya3u9dyl8etaUd+STu360lwBnZ0YRgUeM4S2Y0LnpEJbUTVXsx+tdzC6ZabgLzA/ 25 | PgOlnDbR4P/eHWlaP/RcZQCJyr0MTBEwCACMgDeKTU/7gp31we2Cci9KIJeyJkbB 26 | gnAJd3Iv+GZoxT8Qvw== 27 | =nmUp 28 | -----END PGP MESSAGE----- 29 | fp: 2D47B9C25AAD87860CC1778F7022B8F414F4AEED 30 | encrypted_regex: ^(data|stringData)$ 31 | version: 3.10.2 32 | -------------------------------------------------------------------------------- /cluster/apps/default/atuin/postgresql/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: atuin-postgresql 5 | namespace: default 6 | spec: 7 | replicas: 1 8 | strategy: 9 | type: Recreate 10 | selector: 11 | matchLabels: 12 | app: atuin-postgresql 13 | template: 14 | metadata: 15 | labels: 16 | app: atuin-postgresql 17 | spec: 18 | containers: 19 | - name: postgresql 20 | image: postgres:18 21 | ports: 22 | - containerPort: 5432 23 | env: 24 | - name: POSTGRES_DB 25 | value: atuin 26 | - name: POSTGRES_PASSWORD 27 | valueFrom: 28 | secretKeyRef: 29 | name: atuin-postgresql-secrets 30 | key: ATUIN_DB_PASSWORD 31 | - name: POSTGRES_USER 32 | valueFrom: 33 | secretKeyRef: 34 | name: atuin-postgresql-secrets 35 | key: ATUIN_DB_USERNAME 36 | lifecycle: 37 | preStop: 38 | exec: 39 | # This ensures graceful shutdown see: https://stackoverflow.com/a/75829325/3437018 40 | # Potentially consider using a `StatefulSet` instead of a `Deployment` 41 | command: [ "/usr/local/bin/pg_ctl", "stop", "-D", "/var/lib/postgresql/data", "-w", "-t", "60", "-m", "fast" ] 42 | resources: 43 | requests: 44 | cpu: 100m 45 | memory: 100Mi 46 | limits: 47 | cpu: 250m 48 | memory: 600Mi 49 | volumeMounts: 50 | - mountPath: /var/lib/postgresql 51 | name: database 52 | volumes: 53 | - name: database 54 | persistentVolumeClaim: 55 | claimName: atuin-postgresql-data 56 | -------------------------------------------------------------------------------- /cluster/apps/default/n8n/postgresql/secrets.enc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: n8n-postgresql-secrets 5 | namespace: default 6 | stringData: 7 | password: ENC[AES256_GCM,data:MVuFyXgVOuvwV2V39Uru1vPZ7hywYLvzVMNN+Ia/,iv:WkWDTfr6/ts1A4DSTPwThE97SLQx816UxbwUB2HLrzM=,tag:1otMiwtEtOYNLlA1PQdufA==,type:str] 8 | sops: 9 | kms: [] 10 | gcp_kms: [] 11 | azure_kv: [] 12 | hc_vault: [] 13 | age: [] 14 | lastmodified: "2023-09-28T12:46:31Z" 15 | mac: ENC[AES256_GCM,data:vjCTPNGFp7P65wzPuGwXZSv/hEWiN7hdG6poTVIpWgfgCQWopAQxixGAyT+s6pk+0NCHTR409DI3UbNDkA2Jw53CGO0WkS0HPMGBQmxYAsJ77K38vS2+WgyzMo7UqToDTEy66DfGi15g90vARIlui0eWq//4sNM44PFENhU+SX4=,iv:KjBqWIKS8tYlcP1a/uP1fr/EUB2wuKruR577svueCtw=,tag:/JaKVyGjuJUdlaXFis/PHA==,type:str] 16 | pgp: 17 | - created_at: "2023-09-28T12:46:31Z" 18 | enc: |- 19 | -----BEGIN PGP MESSAGE----- 20 | 21 | hQGMA71EESEn7N2iAQv+LJQ2cEhsYpgS9qa81EpDy4X26B+f3DbRh+4cK12W8BZr 22 | hqfYYOBsaZI77ejmCLQfWQc04XMQTAWDz5SAlwxPfd+Nqy4Oeryx6tXn9YGEH+nc 23 | GCsp3fL0YSkLkOC0aPlA0lv9b6GgDebSPaG1XkO8wbpplGThznk7BoGpKvH5ICja 24 | eHe/qPSyZRHcWDXhKCzjVaRE2juIFPhXa8QCdNXEFBXQd3PSGIRyK/96EYtUuBcA 25 | QYtjM1CLboN7uZuqkcTurxTTU67bGlpo+qhyt9YYg33dsLDeSEL7z9J2Pat1cBGy 26 | 66hZhz+q1dYB5EpxcOWXHV/LyNo/dUDa5LdH3LHJlPyZUKxRDsgAcNHG+8mpLk3c 27 | 98Pd99nrKSgsOo/S0ZLg2mkpEy79tlIi5vjlsdhRjgi0cEWcbtSRBFO58k7h0XCy 28 | 8QxIW7hewUAVPRwoby3H9r1r+MmpC/LeoXHD3F/SJyRdy7CuJ+PL/cy7F6UMw63G 29 | Kwd0bsu5HMzXOUdySAw60lwBoTV4mFgELtf6qboESX/0bPJWrs205t+iXMmEwOTN 30 | JoAhoJToF2o5DderOHgjrsQ8ivhICH0jPGxQ78CAOb+iCgbXOkc81USqp4kJvWm8 31 | FVORavpoJcZj7JSzzQ== 32 | =/R+o 33 | -----END PGP MESSAGE----- 34 | fp: 2D47B9C25AAD87860CC1778F7022B8F414F4AEED 35 | encrypted_regex: ^(data|stringData)$ 36 | version: 3.8.0 37 | -------------------------------------------------------------------------------- /cluster/apps/default/v-rising/secrets.enc.yaml: -------------------------------------------------------------------------------- 1 | #apiVersion: v1 2 | #kind: Secret 3 | #metadata: 4 | # name: v-rising-secrets 5 | # namespace: default 6 | #stringData: 7 | # V_RISING_PASSW: ENC[AES256_GCM,data:8sBNz+kiyQ==,iv:y7oSf97xreaLJfeWanqQoQ40pTLgSYcpWx0UDXIC4bs=,tag:Fph6OKTl2QVAu9FPmoKYZw==,type:str] 8 | #sops: 9 | # kms: [] 10 | # gcp_kms: [] 11 | # azure_kv: [] 12 | # hc_vault: [] 13 | # age: [] 14 | # lastmodified: "2022-05-25T19:24:22Z" 15 | # mac: ENC[AES256_GCM,data:bNDeiycjWOfetlEWUQMCBeO4qj9ibsH/lPCBiO+yajfJRO1UZs6roqM/ScWfFp7VRhQJB1d9vmlwUe+xI1UrsiqGZt9Ur8EkHEgyR/3xNQ/U/EOfOZCrkGlZ6FRAIqcLmijcD63Ep7990I8sLVsDRYBOXed/7Vpe1q4nUoKJryI=,iv:VUon5hdBIE5ZuvmE22d4glxJRzhpXYm6klO9NJ10oGk=,tag:+4MnwFIlemAJ4dzXQbzkRQ==,type:str] 16 | # pgp: 17 | # - created_at: "2022-05-25T19:24:21Z" 18 | # enc: | 19 | # -----BEGIN PGP MESSAGE----- 20 | # 21 | # hQGMA71EESEn7N2iAQv/WX1JwuoXLRF7kFieEsI5goTsXvZTdankd4Gy4AwuuUqO 22 | # dkR3WR3mQb5N5hnY1twa9zejbpmDx/Nt+qNlGFx4l2cUCL0t5k87dzyKU+n1NNTF 23 | # vB4vnMYouB6xVuKN0+4yikMWFbr24yi9HoCT8Ite0fPpCNxTfa/rRZcXMmMD5YSV 24 | # 1rHoOgpw/Y7nUyCuzmNyQ5u6gLmv9YTY5trjK69a7Zml762Quk8hjevmhXlezgIF 25 | # e0h7osAhE0vw8lfer6m07Vb9Z0wu+o2wP2Hdc7ESv6tax/1IwUDm1lFWSCbF0plO 26 | # ViRugj54H6wrkMWSCcSYkpTaCELKv6rk4hN5cltMLtsRZVtab+W2QZoG0/jJYDQC 27 | # 8BlTmNGyZ9LTuHRdyA9II/yAd+7tNcjA5O/vBjZWaM3MQ9zIXKY22ra0gvgSr7jC 28 | # TUNdyooxL9CXjBabXgqiE2T1zLVSGi8CKBo9EuJs7/lk+Z3Lq1bLL6SnhAl3vnPS 29 | # dgw/oqIbKUy6mP9INHFa0lwBbCoa9b81ZyfwnoExvGnScgjYxJb9JkP7IkqWMprx 30 | # Vt93hrdq+3iSnpyVAuOii04d2Bdk+R/dRNVTBfJgWy2w4ho4eGZJ/LuINAPVFZB9 31 | # TAIOrbOrXqdlbf/Viw== 32 | # =RHD7 33 | # -----END PGP MESSAGE----- 34 | # fp: 2D47B9C25AAD87860CC1778F7022B8F414F4AEED 35 | # encrypted_regex: ^(data|stringData)$ 36 | # version: 3.7.2 37 | -------------------------------------------------------------------------------- /cluster/apps/mb-scheduler/postgresql/secrets.enc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: postgresql-secrets 5 | namespace: mb-scheduler 6 | stringData: 7 | password: ENC[AES256_GCM,data:Dcn/XeylI8d+aIaehkdwdS781OsXoELMqPEP71Yu,iv:pZzUsO0JLJAG4TliswE36aK+AEUNKYfrt8ZLgv4hzhs=,tag:20wh05rGkG7q7fRDOpOeeQ==,type:str] 8 | sops: 9 | kms: [] 10 | gcp_kms: [] 11 | azure_kv: [] 12 | hc_vault: [] 13 | age: [] 14 | lastmodified: "2023-09-21T20:33:11Z" 15 | mac: ENC[AES256_GCM,data:Y+0vvPN/ZU2Tf8RZOAt2Eh2EQY4tr7gX2gsLO/hZYjwlryLlUOo+1r6K+9hTXYdh5Ud6XoKFaE0I3OALOi3ympO0u9H7YubWWJaFrlg+/+n+maqXR3+9zX/+0tKccFny+1Wdq3jxOVnGuTOFlgcPnU3mVP3qqOq5a1n+SvntkCI=,iv:73hACV2GC2Ur7ziF9wjVOad9KyGAedJDN4zHtvLdtAQ=,tag:P4fEFE9vHgV4cJ5Bs9Vo7w==,type:str] 16 | pgp: 17 | - created_at: "2023-09-21T20:33:11Z" 18 | enc: |- 19 | -----BEGIN PGP MESSAGE----- 20 | 21 | hQGMA71EESEn7N2iAQv+P/BZwA0VRbHlCmuAC6O3K3RDAdl1smAn5uN0q3ncIAvL 22 | tkyTdLkhA9yH78pauarGPXbP1iQHDW/smKo2R4FyNNdk0gc8z4B4pcoAjMe7xfvk 23 | fhwpuiUrb3Cf1BDZcezn092PqJNbbeafFCLxX/jpQankmyrbQZ+OX8aU78qErPns 24 | 0rFf6WjO86W5ZZ8NRnlLREYA9ugQA+5esuYfKQz3k9vt3wrIjrlRaIAWVrQB+iBT 25 | r4ouzezDENCmFISoxPv4XPXki/GuSXqm1VmK4A8V4srEKPm7qU56+3gzGys+oQZH 26 | abyhet4tFOd/dFn4JGoJW7vvg7tmZmJTKmG5tFq3K5XkDZK6vIGzQnaiX0sSK1Xh 27 | TKESlRHK+HjtdzeLArXilJS8/t4LjdViZttuSmzC+hz98gOqpMpjd5ebg18DqeSq 28 | E+3Qqo5ISLfCTc9LbouMVXLfAEA0GYSnkrKxRjbelibILG6sHhO9oR1up3HNAAiG 29 | 0lBqpFW0fFvuXsv+BWtl0l4BJFMS7UPNHj7i9j5o6tZvR717DfLi6ZY8LRmiZtQ/ 30 | 2/jD0Q0ilF6DgvnN6dJve5/R94qsOqiCbev0V1W1MkkW19Pv22/1Nvjqy5I2mGNL 31 | trTLIz9HmJL1Knl5ZXh6 32 | =nmgl 33 | -----END PGP MESSAGE----- 34 | fp: 2D47B9C25AAD87860CC1778F7022B8F414F4AEED 35 | encrypted_regex: ^(data|stringData)$ 36 | version: 3.8.0 37 | -------------------------------------------------------------------------------- /cluster/apps/cert-manager/cert-manager/secret.enc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: cloudflare-token-secret 5 | namespace: cert-manager 6 | stringData: 7 | cloudflare-token: ENC[AES256_GCM,data:oMQzkqFxBEPy/i1zqxuV5qqJrC7hf7V9fh7jjWg+KnXLDZCN9Us+fw==,iv:n5rycsrIgUyf3KYRkwSw3W+UD7tKM54Ln+OWypRxlHk=,tag:KGobgu5u+LIhv2KdkFxNfQ==,type:str] 8 | sops: 9 | kms: [] 10 | gcp_kms: [] 11 | azure_kv: [] 12 | hc_vault: [] 13 | age: [] 14 | lastmodified: "2021-06-11T12:50:24Z" 15 | mac: ENC[AES256_GCM,data:OZyGfKLlJSwLy7tEfQGjoH/M0I3ItbdGQalctoUBG0iCydgoBd4ZyNy2eBqAQB5wlQsN9BuKKeQkRvPwlL5yttT60y1WqjAVmDtFGxoYGmAkQlIqzgB8rgIG+lt+B9kbg97zr7+oB5YAUctnv4OKXQ6ZMUhxaY1wg5Vwz65Z0Ws=,iv:5s/HQYrC8uukbQoEaUeBHVL5aG8HPJAYO7jvLkpEgeE=,tag:o3TffSgjwZMuozLl4War8w==,type:str] 16 | pgp: 17 | - created_at: "2021-06-11T12:50:24Z" 18 | enc: | 19 | -----BEGIN PGP MESSAGE----- 20 | 21 | hQGLA71EESEn7N2iAQv4txc60sZeoExOF9sN7eqF0+Z0GdeHNmeS/AKVTNwC7AZF 22 | JrquuBBnYR2D3EGkoLAPy3YKE/rsIyY9tdHznIyMz+5V5MVFNm0KoFhYI+RHKd+l 23 | dhrfoRw+Pdjde+Ku394O6F0vPBUioH+PbKfuj/Lop2Uyck3hgATydJaLWgc9Y1mK 24 | cr7DDR5wLZ/rMuIzRx6qqlWS/cbVn0ArbfQ4ZbWQp5PcKyGvmh5PoSxoE5tZPHZz 25 | Vg2ezKjCIsd9zCa3Qt16t5QoJmvoogkrMLKwCzH/IMpf4rMcloPKjpVS7yxn4edN 26 | at1ZnMGJHijiClzEtzJnjJcukFQF5axuBSj2xIt0E0RBs8ygpiavB6OjJa7iz8yD 27 | UdECwYnoDpJVuFUNcs3NshJMzw8sPh0Z33cUkvfRpNuEWWGkQ+wEV4YVDAneYBC+ 28 | S3UY3C0GVVe9AoOlTFZj8MsNWeJvzgw5V4aYSdkmmI3vJT6r6zyAJDbanzcA3bQe 29 | lyG7trargOikVsg4UBTSXAFENzC6bFrHSOttFKcntjnrSh/bW5tL8WFSsPcpFDQE 30 | bSE2vIB2UdVeXknIYlVvDVj2PKVXQrGv9LHzFd6bnoEemH5M5O/7wyxbZSWA4aS+ 31 | TzLEprtO1RSgvWVP 32 | =Gd41 33 | -----END PGP MESSAGE----- 34 | fp: 2D47B9C25AAD87860CC1778F7022B8F414F4AEED 35 | encrypted_regex: ^(data|stringData)$ 36 | version: 3.7.1 37 | -------------------------------------------------------------------------------- /cluster/apps/networking/external-dns/secret.enc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | type: Opaque 4 | metadata: 5 | name: cloudflare-api-key 6 | namespace: networking 7 | stringData: 8 | cloudflare_api_key: ENC[AES256_GCM,data:tqARd4hKwwq0FNB/GIQQGxKboPpyhMIPQK7F4XqS7jKxvhIcCA==,iv:m0Cnbx08d8qh6r0m58GatUqjc4wrf26ruF0wWzOWDSQ=,tag:h9jWc5tYgWrk0cupYtF+hQ==,type:str] 9 | sops: 10 | kms: [] 11 | gcp_kms: [] 12 | azure_kv: [] 13 | hc_vault: [] 14 | age: [] 15 | lastmodified: "2021-06-18T10:47:11Z" 16 | mac: ENC[AES256_GCM,data:JDkOkZXfNY8Y/5DUs2GsKMXRfSchl29w/9kpp2OEarmEGh+coi9WQgyG/IiqX64onOtUuAsQWer7C5BkTJ5nqgItUC+MqvY2xpvOME2nBB6LjLEIePuIGVHL8P5TnXrEvVPIcK9ui3PfbaMiJ5V/+hocPe1ocrIq8Zb77RJNkbI=,iv:lx4KiC/9V+zk/x25i7OV7siLA9vammA7FRkxUeGmfc4=,tag:iLj1jakukqfkAeHBuv0Fqg==,type:str] 17 | pgp: 18 | - created_at: "2021-06-18T10:47:10Z" 19 | enc: | 20 | -----BEGIN PGP MESSAGE----- 21 | 22 | hQGMA71EESEn7N2iAQv/aLcD/kipX9RE52WeWb9N/GhuQ67fJcDbYSJqm6hzpFXq 23 | r6vb8SVQhPX2ZbCB7qMvE9bEL20P5vB7/lh3BzXrbKlCOYukAnAhez9J3nF6oSqo 24 | 6dmynpiKLdu8XZrcnrQEbFANZiyWaL3T/1NK10ES32aRTC0tDNTvoRf238aV9S68 25 | cH0YS1HgiOMvxEMxvh9wlDMXavMuAFusHg6bI3WsiM03cdYjVFsePkt49Dg5xfMC 26 | /V+mrh6C3fQrs/ISwkgqgXLKM3M5ik3p6CC5ssV4jNk+ZTRCdO294kd84h8yG7WB 27 | DGu5+pHnUI9FB3bd+GLXUKX3hXiguZA2x9AztwfyMb3u8+bJhznq95hf8+ha04D4 28 | MsdSODc2O/1IhhGIJrvyAvEXU4XP/S7H8ImLXWPakyjwsz8pWhWLJ9ckYcRoR+W6 29 | k61vLKLxw+21FdJpwzkd/fM39kav4qk8qQzOvJlFouJK4XMUcMlRnPaYQv1c9dNa 30 | bM4UxbxfWaU+Ow21KR7C0lwBEZ7smvu/oIKhVTy4vaZ3K98tepHzBbXTTEhBlKWu 31 | OVY6O16xZFHUh426Mx/eoZNuVYeVKaoHvM6CgamsAlVw/yU4NYigSIwedre5NH2B 32 | uVkQRADpu1VYmVfvag== 33 | =ylNu 34 | -----END PGP MESSAGE----- 35 | fp: 2D47B9C25AAD87860CC1778F7022B8F414F4AEED 36 | encrypted_regex: ^(data|stringData)$ 37 | version: 3.7.1 38 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/kured/secrets.enc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: kured-secret 5 | namespace: kube-system 6 | stringData: 7 | NOTIFY_URL: ENC[AES256_GCM,data:C+NgykRATzOVIAxgl10XfbqHJ7BeK7JfpyCHHZMb3t2v1dY/+SZM3XbAE+EsAH2SDhiR34QRU0b1Bez83+OcKUZ2T5FBoC4SfpHTdcXWI70NoSmV8nL1RMlA1tw=,iv:oE2cgjS38QTyqTLsRPTGuOSAZjs/Na0ellL4C9b0F8w=,tag:XteEERYyN16sRuhG1alOvw==,type:str] 8 | sops: 9 | kms: [] 10 | gcp_kms: [] 11 | azure_kv: [] 12 | hc_vault: [] 13 | age: [] 14 | lastmodified: "2023-12-29T14:09:58Z" 15 | mac: ENC[AES256_GCM,data:AZdlIri597qhM3BCNjpLKPuG5R5YBHHpeAv72yBCCiRL/BN+yDj4XP04Nd73+Lfp2LBaenpn/b3dBO2idnnB7jKuAtVu8y1HVouBnQu67cyQUdHjzzYVyx/jDE2ZzAVzqD9DyoNzljavZ1BPL3eJEn43cNGn2ULmVQ9TBYH9DFQ=,iv:r3orMq+O97qah/j7GfArk5pPB+Ej6DKMce5YHsS2WqU=,tag:E+w3GNOsXhnPOd8a2aDpPQ==,type:str] 16 | pgp: 17 | - created_at: "2023-12-29T14:09:58Z" 18 | enc: |- 19 | -----BEGIN PGP MESSAGE----- 20 | 21 | hQGMA71EESEn7N2iAQv+OJJ/2E4koVb66kH4RQ2ohLzuDwU2qWvTiRyzhmfHNjQV 22 | lkb2GYoog96NR5ys2f0TONerBmkFYa/Hahprb5Jxv4JMO2nu6ef3CCsJ2hyGappS 23 | +ZrpRQPgds4a+C2vbi73oBjU9Z3moKaH6Bw+2PzoVX8238DoQq0ijmO2wr268JtI 24 | LhjQakH7w0dtuEZsY1goTls7czQpFTsKTfit3A8RAw03WZ97rMHRt2WtVwBXwOju 25 | G/pRMAOUtWPLhCLXfGT7GxlyzOprI4ipmyxRkb73TxPiMs5AuohdDdKy9Sei722Y 26 | OwMsgJKhl5SBCSY4NwXq4ZsiQoVP69XI519tdjYLkjduG0fXFIUmzSJYkeUR6jQK 27 | 2miUNRsYsAsk3XCN4Cz0Y1dU5JCrWL/U9v6r732Hta0JKNceOwtNpKxEQuaEGhRr 28 | iKDmpN/l5/l+128rdDwXVWA+YuLfJiPG7BHQVMILqCZ+UVvgQ7zp2VWxtgdVzSI9 29 | 6FiEETwqs4I4ZGcA3Wk10l4BMymCzPxU9ybuBNiGgtxygfIDrQciM+JFooFqpu7F 30 | M39UwLYUCikDSCoKTHC4qPih4rs9tKNf8eNOi3SrgczFO+JO6U+3+eqV8x4ksjWR 31 | 8ouohxbOYpITuj4Ks0No 32 | =HEfZ 33 | -----END PGP MESSAGE----- 34 | fp: 2D47B9C25AAD87860CC1778F7022B8F414F4AEED 35 | encrypted_regex: ^(data|stringData)$ 36 | version: 3.8.1 37 | -------------------------------------------------------------------------------- /cluster/apps/default/actual-budget/sms-proxy/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: actual-budget-sms-proxy 5 | namespace: default 6 | labels: 7 | app: actual-budget-sms-proxy 8 | spec: 9 | replicas: 1 10 | strategy: 11 | type: Recreate 12 | selector: 13 | matchLabels: 14 | app: actual-budget-sms-proxy 15 | template: 16 | metadata: 17 | labels: 18 | app: actual-budget-sms-proxy 19 | spec: 20 | # TODO: Add requests and limits 21 | # TODO: Add readiness and liveness probes 22 | containers: 23 | - name: proxy 24 | image: ghcr.io/shadyf/actual-budget-sms-proxy:v1.3.20 25 | imagePullPolicy: IfNotPresent 26 | ports: 27 | - containerPort: 8080 28 | env: 29 | - name: NODE_ENV 30 | value: "production" 31 | - name: SERVER_PORT 32 | value: "8080" 33 | - name: ACTUAL_SERVER_PROTOCOL 34 | value: "http" 35 | - name: ACTUAL_SERVER_HOST 36 | value: "actual-budget-svc.default.svc.cluster.local" 37 | - name: ACTUAL_SERVER_PORT 38 | value: "5006" 39 | - name: FX_FEE_PERCENT 40 | value: "0.1" 41 | - name: MAIN_CURRENCY 42 | value: "egp" 43 | envFrom: 44 | - secretRef: 45 | name: actual-budget-sms-proxy-secrets 46 | volumeMounts: 47 | - name: config-volume 48 | mountPath: /usr/src/app/config.json 49 | subPath: config.json 50 | resources: 51 | requests: 52 | cpu: 10m 53 | memory: 138Mi 54 | limits: 55 | memory: 138Mi 56 | volumes: 57 | - name: config-volume 58 | configMap: 59 | name: actual-budget-sms-proxy-configmap 60 | items: 61 | - key: config.json 62 | path: config.json -------------------------------------------------------------------------------- /cluster/apps/default/prowlarr/prowlarr-helm-release.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: prowlarr 5 | namespace: default 6 | spec: 7 | interval: 5m 8 | chart: 9 | spec: 10 | # renovate: registryUrl=https://k8s-at-home.com/charts/ 11 | chart: prowlarr 12 | version: 4.5.2 13 | sourceRef: 14 | kind: HelmRepository 15 | name: k8s-at-home-charts 16 | namespace: flux-system 17 | interval: 5m 18 | dependsOn: 19 | - name: longhorn 20 | namespace: longhorn-system 21 | install: 22 | remediation: 23 | retries: 3 24 | upgrade: 25 | cleanupOnFail: true 26 | remediation: 27 | retries: 3 28 | values: 29 | image: 30 | repository: ghcr.io/home-operations/prowlarr 31 | tag: 2.3.1.5238 32 | env: 33 | TZ: "Africa/Cairo" 34 | ingress: 35 | main: 36 | enabled: true 37 | ingressClassName: "external" 38 | annotations: 39 | nginx.ingress.kubernetes.io/auth-url: "http://oauth2-proxy.networking.svc.cluster.local/oauth2/auth" 40 | nginx.ingress.kubernetes.io/auth-signin: "https://auth.${SECRET_DOMAIN}/oauth2/sign_in" 41 | external-dns/is-public: "true" 42 | external-dns.alpha.kubernetes.io/target: "ipv4.${SECRET_DOMAIN}" 43 | hosts: 44 | - host: "prowlarr.${SECRET_DOMAIN}" 45 | paths: 46 | - path: / 47 | pathType: Prefix 48 | tls: 49 | - hosts: 50 | - "prowlarr.${SECRET_DOMAIN}" 51 | persistence: 52 | config: 53 | enabled: true 54 | existingClaim: prowlarr-config-pvc 55 | media: 56 | enabled: true 57 | existingClaim: nfs-big-media-pvc 58 | resources: 59 | requests: 60 | cpu: 300m 61 | memory: 250Mi 62 | limits: 63 | memory: 250Mi 64 | podSecurityContext: 65 | runAsUser: 1001 66 | runAsGroup: 1001 67 | fsGroup: 1001 -------------------------------------------------------------------------------- /cluster/apps/default/yourls/secrets.enc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: yourls-secrets 5 | namespace: default 6 | stringData: 7 | username: ENC[AES256_GCM,data:PdgrwpKK,iv:lP39cHNBIU2tKw1qeLUxTrexJW2x8yOWVYtWSTIEsCQ=,tag:wkhLXIhLJCW+GPb/PEW4uw==,type:str] 8 | password: ENC[AES256_GCM,data:L/W9WiGJhc7Q5C/+hMYlw4N0MtFYyNYGcqEAUHn7Ii1bs/cc6+MNaw==,iv:MbfejL5DyQXfFE2+jzCJGJkUf8g2G8cED8RHdC5ue/M=,tag:bofHG+zZCFJsUHdMfWVyxQ==,type:str] 9 | sops: 10 | kms: [] 11 | gcp_kms: [] 12 | azure_kv: [] 13 | hc_vault: [] 14 | age: [] 15 | lastmodified: "2023-01-18T20:16:29Z" 16 | mac: ENC[AES256_GCM,data:LTMPsZg2vUQ1/NXGKQJ0ZLospElOMnvq7l6PsZpAg1yOMQtZNXjjMI0QsDQ0M1x7l+2s8qw9PuuptNXVOJIZo4emG7o+4/dgEasBHN0UTb4mMR5UOEDd+LhrrlW0q7HoDL19pht7TiC//1gwp8hISG5+kCZIlzzgJUC6eaU0kGE=,iv:LpHkhvKE8ul3rnb/v/3k/5bWCOE0a0Oq86KZETig8mw=,tag:EczNWjP6loyqruBu9YZMTg==,type:str] 17 | pgp: 18 | - created_at: "2023-01-18T20:16:28Z" 19 | enc: | 20 | -----BEGIN PGP MESSAGE----- 21 | 22 | hQGMA71EESEn7N2iAQv/YEFdKvi6D/3x5roqWQkRbAA6aZQvzJ3dgxm5D45NyO7X 23 | qTv6f1RZQv6ueIGBcJS8aD6mGJqJzv8hxBwE1c9rvMqx299t4RZBA4iSwlAeubW1 24 | I8ZyvzSaDbmZ3e15CtYCSEEZA6CMoK9cWcCyEgSjUl9a8qLnKL1Ci1YbqWlyVIkW 25 | mmiJRt4JehFAsypvJeSKNPluJWH5yabK1EV/TfxC7LzXhqSoqdz2DWNbt+4/UYcg 26 | dR9kF4WEERVXP/lesu4qUnMz8yqhb0K7iEQCNS/EXApZo+dHQeglWquQaiHC3OcJ 27 | uWO1Dw1a0AxICzFjoewitj6i5jtlgRWJkOGg+c9d/9bAjjBYd6EXRkjCMfrO4F1b 28 | 3eBOsxwNvzaa1MfnSKCHu/EI5pd3J3jf2bUkrQ6hCmhZr53kZQrPViXdhND0VTqP 29 | 1PUQ8I+saKwIWs7wTV39A57YuPoJYBg57YB4PyGcAfZdc5T/kubtB6/9eXBfWtLP 30 | p6ZoN2PG6zjLT7toGLex0l4B8YModsr5sFiqeo5BO6g7nDN7SZTHrsswhPBt5qfw 31 | Y8IpJk1xh7SOlIMh5CkGSjVObwZ5vIETpspNXlB5XH88YDiCE3lJ9ZKK1o/wU0wO 32 | 6Bsdje6RiDHyDBIKQZ5U 33 | =gcZz 34 | -----END PGP MESSAGE----- 35 | fp: 2D47B9C25AAD87860CC1778F7022B8F414F4AEED 36 | encrypted_regex: ^(data|stringData)$ 37 | version: 3.7.3 38 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/grafana/secrets.enc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: grafana-secrets 5 | namespace: monitoring 6 | stringData: 7 | admin-user: ENC[AES256_GCM,data:ZcZ64UHp,iv:pp0+A4FOah2HYngY3V94MWu+Ppnb9N1yYxrDA3fX/bQ=,tag:YqScd5rvrs556j3ieVzwhw==,type:str] 8 | admin-password: ENC[AES256_GCM,data:zJNnxYKkr9zQnrm8AqG3Alv9VMplGC2x7tnVqPT/,iv:0DZdNHseJfmtE22PSp//VlPIC8uCyBWj14eK3S5DL2Q=,tag:U7IzL0VBJB1hgX1ni7I7yw==,type:str] 9 | sops: 10 | kms: [] 11 | gcp_kms: [] 12 | azure_kv: [] 13 | hc_vault: [] 14 | age: [] 15 | lastmodified: "2021-07-18T11:23:52Z" 16 | mac: ENC[AES256_GCM,data:P6z/GE8Hgh+n/Ng2ZfdCFEp1E+heS8y0VKjSxiUB8UgvfdqzJhHIyxPKZwRZ5ANhEX61O2iTRsrk1lBdnqrY8diCtbVcXGrd4TRX/+BrrnBv8qGjkSysReW5jdaHBez4SXYKvvWCrypcEyM4vUK+ZjkZKvD+MQt1iOQW6IQw3lU=,iv:ZincdMv8MSWDvxOc1tzs7n+rLPlATYJbCuqH11WJuu8=,tag:neRarfsH+95JWTr65OZmbA==,type:str] 17 | pgp: 18 | - created_at: "2021-07-18T11:23:47Z" 19 | enc: | 20 | -----BEGIN PGP MESSAGE----- 21 | 22 | hQGMA71EESEn7N2iAQwAjfXk7pM8t9H8bWxDRdDBnQ/WnP6L9A7eZkqDOntoIP3S 23 | 2/GmnBK41EXtJZbkGBiJ38UHYjRExZ8R9RjBDzuVzuQuNAI2zcqSjXsHblQC4h1o 24 | 8oBWy37MGvG+HOY7LfjmwHnmBSspXmZKGaig6y1lrNYBDmcPLQ9oLEHhOJ71cuRc 25 | wg2X5F19+FhZ4aQhpAsnu4w+cVIj4IYkkoMXEyDNq8hvC0/YUPIDYMHYulHgDB7k 26 | jt3b7zPwjh9xHAG7ifUXmFLkpbv2CMajgkzx+It4lVqBx04sH5GX8fdMkcURH0M8 27 | ADwrNrmIGZ2sb5fWVknYroQTk5pIyHzQQWs7U044EYlRV9Op2TR7adEDEH3ZXmZF 28 | AU4lT47qGZYq32FAU5IAJ97VEGqC7fpY3lTqzwXEvR24pmDU4zu3eaZtFmPt99KR 29 | CSTnKM5TVjXl5us6nh74jLZjnaUlqd6+RHD13SdhG9BNgmvzAAOOKqZXSi/ZmGJL 30 | MJsaWFb+EjBQ9RJohrH/0lwB1Ah3nMlyZf6bont/XwwLA/loOsTGHSJnfPh8ZOTr 31 | 0ObqNjMZDKJRHx8iEvL+r422D+a9bCyZEQD1qdqmOqA+OqpFP2Gn03mlUi8q2NB3 32 | 6tOozSLBORf+fBeBPQ== 33 | =jKIR 34 | -----END PGP MESSAGE----- 35 | fp: 2D47B9C25AAD87860CC1778F7022B8F414F4AEED 36 | encrypted_regex: ^(data|stringData)$ 37 | version: 3.7.1 38 | -------------------------------------------------------------------------------- /cluster/apps/wazuh-system/wazuh-dashboard/indexer-secret.enc.yaml: -------------------------------------------------------------------------------- 1 | #apiVersion: v1 2 | #kind: Secret 3 | #type: Opaque 4 | #metadata: 5 | # name: wazuh-indexer-secrets 6 | # namespace: wazuh-system 7 | #stringData: 8 | # username: ENC[AES256_GCM,data:pKvk+jI=,iv:Z5cpOK68Y0jHOF9U71lHbt9AW4aAdSp2Rc7hAXtIBmw=,tag:ZWzu4Lu+C8ZM6nTcNXs3Yw==,type:str] 9 | # password: ENC[AES256_GCM,data:gxKBOQND/qnlVkBDs14=,iv:PrmAqjfeo7J13/GxeDaEfTY9nInCfx26cloVSNJ5zgQ=,tag:49+ILeHIpaS1TATtsinTAw==,type:str] 10 | #sops: 11 | # kms: [] 12 | # gcp_kms: [] 13 | # azure_kv: [] 14 | # hc_vault: [] 15 | # age: [] 16 | # lastmodified: "2024-09-09T17:53:18Z" 17 | # mac: ENC[AES256_GCM,data:mnxpYRwS086K50jpmJgsq/JW3ibAbPPWoPuVUgSQTbG5VHZxXinIwkmmkk3GKCfuA6ARzaKsqHGFioqlrLjEnH49l68knuJ/z+ximHSENwZSRiKxbUQGbiNfBgestaNEvLJV8TZBjqKH2hcC3IyAxaFCxrhgAHEeMSX1FvvemDc=,iv:rOuFKuWd7JxXGUvJJkF2qp14dn6yt4yeRbP5TQIkDcM=,tag:i/77HD8YvvrCpOW8b1+/gw==,type:str] 18 | # pgp: 19 | # - created_at: "2024-09-09T17:53:18Z" 20 | # enc: |- 21 | # -----BEGIN PGP MESSAGE----- 22 | # 23 | # hQGMA71EESEn7N2iAQv/fpTspvr+4lCzNP6nxGzSg7Pf9kb7yF6TJoGDLEUXjksC 24 | # 81BCo8vguXuIscOLqjv/C7y3xJjcHwJ3RS6mrPMcVaP633lri4JlEHexbCyLoDj7 25 | # jd73EUuPsdA+CqdUvN21cUl7ltQL8O35g6Ob9y29EHs0dI0VvEmRm7Nr98+bhVHK 26 | # b39EOIH/edIVEAtYQW1ahPyi3kWe8rs7FDdb+yCyn69iZ6OW46WWSf+JHuugnHdr 27 | # rK1FYIoAadPkBX1rojzVJkWDNGy7PTCes91buidSxbc44yYQrlgOM7laVoyqeHqf 28 | # TZWoHRZnuyQ4Gzp01KY3p2Nuh7T03a0dM0SXBgkkhkqdBaMcAnO8lhqFyBImCLl8 29 | # Ffct0tI3g6kwtyPXybAuo/42OSb63VylKdhvC1GbMxKg2SCgNPPORz1koi3axjkW 30 | # D9DW7F6sG9prJuyuH9bzGDbXXaZsWryp34QmqcCuXZfA0c+aWzDJEfJnJCSAhaP3 31 | # PG6KBzvkZVYaqWIaq7DM0lwBz2zNGhTy6IdKOlK5xaqmryyPTveJg4lyoiE/7Vhi 32 | # FEWPuoT70K6ggvr+NTNfLCIgGrUx46eaU25JOmVFvbLUCF1SZfO6/wzc+jLSEFH+ 33 | # L+XAtHu8ahp6JbLXEQ== 34 | # =F+iw 35 | # -----END PGP MESSAGE----- 36 | # fp: 2D47B9C25AAD87860CC1778F7022B8F414F4AEED 37 | # encrypted_regex: ^(data|stringData)$ 38 | # version: 3.9.0 39 | -------------------------------------------------------------------------------- /cluster/apps/wazuh-system/wazuh-dashboard/secret.enc.yaml: -------------------------------------------------------------------------------- 1 | #apiVersion: v1 2 | #kind: Secret 3 | #type: Opaque 4 | #metadata: 5 | # name: wazuh-dashboard-secrets 6 | # namespace: wazuh-system 7 | #stringData: 8 | # username: ENC[AES256_GCM,data:usRM16SfOqGifc6r,iv:SyJu272uo/HckfgCZ0zQX/nN0f+D2U7IJLyb0iHZa14=,tag:w0mWHMKb/9SSk4XT8p9FNw==,type:str] 9 | # password: ENC[AES256_GCM,data:hyYDidUnsjwmtb4Y,iv:KfjLH9PyrYUykI2BJqMZEsMuQHxiU13J2hwCCIjQE3M=,tag:YUFeiwP2P16YqwUSSIg0WQ==,type:str] 10 | #sops: 11 | # kms: [] 12 | # gcp_kms: [] 13 | # azure_kv: [] 14 | # hc_vault: [] 15 | # age: [] 16 | # lastmodified: "2024-09-09T17:57:02Z" 17 | # mac: ENC[AES256_GCM,data:eb94X4VlZJhwjVpavKKdgk54EDjs6vqNUaFR7YqA4eYYq51Mypu/5Zu6Dzg8+VZ5WpJ28uIokD6rFxPAnzVNo0p+Ix4C9MQOonPyIKa9wXk0Jjrsp/bonoGJsP2nWagBuJXvBOO5xWFKEDnplWsp/pKsFSRwcXkgcD5cLnETk8c=,iv:SjrdCu5h4jWaMf8y1Adi0J0iB/ZQHN6te94lQXC1FJY=,tag:D8vB8+dRrPasxQJ7EM7rHw==,type:str] 18 | # pgp: 19 | # - created_at: "2024-09-09T17:57:02Z" 20 | # enc: |- 21 | # -----BEGIN PGP MESSAGE----- 22 | # 23 | # hQGMA71EESEn7N2iAQwAiqcLq/gaVeAoBzLRLmdO/ZSVmkYLHgBinIfwgqAZx2Jh 24 | # 49VJZG/cv5yDqNF5xEamC86WGlHNB4Tq7qDCPCrXs8JyKhzf9mEp6YHybyFS5IwP 25 | # qidbK95lBT+LuOT6CntiTc6KqNnU6X7qvOKGoDklwZ27kc2J8CEpU4VfZIa8Kv6K 26 | # NrFM5z92F/ggnkxQwniQAlQTmk7wAxX1TKF/T6p/4Ws467vmkTB8ZCtXhCoDLcLG 27 | # qQKdiI5mzofWWVfCz6hl0tk9qWWrEAqEwdH+nxUfvjTsx+qJUZ8AgojLAZfWSmNk 28 | # U2KbonSEPCCJtT3F+z2pFRZV6SVb/zDnBSPm9Qa9B4RV+MgI6BHoV0coPlIKMO6/ 29 | # DBQuyru5jAarWysfDCr55iolaEOPpnvsjAQBYs5l+sGnapdGVr4Kfkn5t6Ae6hQW 30 | # RPFiKWy9hcRAh2Ly54BDzs6sWIflK4Iy5FFmCpfhLQ028V/zlS2+FqZdd/Q18hrD 31 | # cZ8eZfXpkTTVmVazjjWZ0l4Bn+1hUyFd/FSKufU/qP+uJzOP/2ZeEoIMxM2B8Z6y 32 | # KzhRV0yKGscvl9m1IYbGWQS+HYN4DifnrBSAJHuGNlTrz1frvFltTBRtHXkgIFxb 33 | # 7+sPi6nrJtUk5zWUTeB+ 34 | # =bpfB 35 | # -----END PGP MESSAGE----- 36 | # fp: 2D47B9C25AAD87860CC1778F7022B8F414F4AEED 37 | # encrypted_regex: ^(data|stringData)$ 38 | # version: 3.9.0 39 | -------------------------------------------------------------------------------- /cluster/apps/wazuh-system/manager/api-secrets.enc.yaml: -------------------------------------------------------------------------------- 1 | #apiVersion: v1 2 | #kind: Secret 3 | #type: Opaque 4 | #metadata: 5 | # name: wazuh-api-secrets 6 | # namespace: wazuh-system 7 | #stringData: 8 | # username: ENC[AES256_GCM,data:nU4HoHK3c7n+,iv:UApcAIM5tBJ8j+1BoYvbB7sgyXFqwpHoewbhxmN0IeI=,tag:LW27cqFe+LQChOGckEhKBA==,type:str] 9 | # password: ENC[AES256_GCM,data:8zbkr43pFkgT8H+rp0IcLkOso9vDoFljf7ahtg==,iv:ZYIIhynrOvJ/6ecKPPwa9Oie4nRiva/8O8nT7Lom5nk=,tag:Z88UfHPy3mL3j893odYG9Q==,type:str] 10 | #sops: 11 | # kms: [] 12 | # gcp_kms: [] 13 | # azure_kv: [] 14 | # hc_vault: [] 15 | # age: [] 16 | # lastmodified: "2024-09-09T18:19:54Z" 17 | # mac: ENC[AES256_GCM,data:4BTorO5D44GWl150IdRTFNwvDzwM4V60Po1r7zTaTHrimPQjJDQhXBbg0ugn8nfSrwSB66Sxd8C/udz80au4Nr0sAiHTPuggFXB54QXcZgRP/YQDd+rIMjaneM+SMh5o6sIlJEUiEBtlnBfhd7CTj2T88jXTaD2yAICScj/deeg=,iv:PsJf/plvy++vRrfKwX30ZeZ7M5jVm4MfVc72yvPvY1U=,tag:8pbxttOCXXojIzkNaejmWg==,type:str] 18 | # pgp: 19 | # - created_at: "2024-09-09T18:19:54Z" 20 | # enc: |- 21 | # -----BEGIN PGP MESSAGE----- 22 | # 23 | # hQGMA71EESEn7N2iAQv/ZPTkvhA5wZcEcpQpQInUWZys4F7IyaAd2sD5dRbbS21e 24 | # lDDxB+spywtMoQ9doJ5zBzF6NDeBxASTgeBj0ielnWaRYCDJssj8Up0CcFA623qW 25 | # 1DfniGyxTL/+AOJ028whIXHy2G2fyiUsX3UL4/ZrktSPbQS4IbF9oFw1jvvUlbWq 26 | # op3Rg1d2OaFN2HeKSEH5PrEUtzFuqPjGOwTedNAVj3MLL6vhRvCiXyiNZBsHpEpy 27 | # LcEjyYU8MyiDR5kc1CaKB1PcGnteN/eFLLgRQXFg8z6fEtP19soFGYs+OdBvveax 28 | # lWGe+zVX23kSrPzdJmY+1+0zDYhsLe9nK0Bn3p5WDqT/2E4giljWkNSGRPeNEufK 29 | # LJ0XD6tN8NF34Uxt3+32RHB+cH183jSagPXdXU3zzpJueoyepe+vNwMRP5dmLL/c 30 | # niSD7SfC6sDjHtPJSdcxPbGh2B1z09Ym2h2SUIppOt+KH0oyBHKXm9uLvB2EQB87 31 | # QK10BSRC58UNqhe+rK980l4BrUtK4bey7Fq+uaBDda0HDRoDuOpvGB2vaKzEOzJd 32 | # fKCzMV5e4G8CARhkhij/Q4mLnevZwcaoqgL4AOGZl8ArBePbwvq8FyXMwQ+vT4Is 33 | # QAIvpMmoKvPRd5MUgrAx 34 | # =01m9 35 | # -----END PGP MESSAGE----- 36 | # fp: 2D47B9C25AAD87860CC1778F7022B8F414F4AEED 37 | # encrypted_regex: ^(data|stringData)$ 38 | # version: 3.9.0 39 | -------------------------------------------------------------------------------- /cluster/apps/default/hyperion/hyperion-helm-release.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: hyperion 5 | namespace: default 6 | spec: 7 | interval: 5m 8 | chart: 9 | spec: 10 | # renovate: registryUrl=https://k8s-at-home.com/charts/ 11 | chart: hyperion-ng 12 | version: 5.4.2 13 | sourceRef: 14 | kind: HelmRepository 15 | name: k8s-at-home-charts 16 | namespace: flux-system 17 | interval: 5m 18 | dependsOn: 19 | - name: longhorn 20 | namespace: longhorn-system 21 | values: 22 | # https://github.com/k8s-at-home/charts/blob/master/charts/stable/syncthing/values.yaml 23 | image: 24 | repository: sirfragalot/hyperion.ng 25 | tag: 2.1.1 26 | 27 | service: 28 | flatbuffer: 29 | enabled: true 30 | type: LoadBalancer 31 | loadBalancerIP: 192.168.1.243 32 | externalTrafficPolicy: Local 33 | ports: 34 | listen: 35 | enabled: true 36 | port: 19400 37 | protocol: TCP 38 | targetPort: 19400 39 | 40 | ingress: 41 | main: 42 | enabled: true 43 | ingressClassName: "internal" 44 | annotations: 45 | nginx.ingress.kubernetes.io/auth-url: "http://oauth2-proxy.networking.svc.cluster.local/oauth2/auth" 46 | nginx.ingress.kubernetes.io/auth-signin: "https://auth.${SECRET_DOMAIN}/oauth2/sign_in" 47 | hosts: 48 | - host: "hyperion.${SECRET_DOMAIN}" 49 | paths: 50 | - path: / 51 | pathType: Prefix 52 | tls: 53 | - hosts: 54 | - "hyperion.${SECRET_DOMAIN}" 55 | 56 | persistence: 57 | config: 58 | enabled: true 59 | existingClaim: hyperion-pvc 60 | 61 | # affinity: 62 | # nodeAffinity: 63 | # requiredDuringSchedulingIgnoredDuringExecution: 64 | # nodeSelectorTerms: 65 | # - matchExpressions: 66 | # - key: beta.kubernetes.io/arch 67 | # operator: In 68 | # values: 69 | # - arm64 70 | -------------------------------------------------------------------------------- /cluster/apps/wazuh-system/manager/secrets.enc.yaml: -------------------------------------------------------------------------------- 1 | #apiVersion: v1 2 | #kind: Secret 3 | #type: Opaque 4 | #metadata: 5 | # name: wazuh-manager-secrets 6 | # namespace: wazuh-system 7 | #stringData: 8 | # authd.pass: ENC[AES256_GCM,data:eWi/KLR5ZTsGAhrUddZo/r7Yo2Hs45QUYg7605Dm,iv:4Tk24LzuYX17aJBylY+pfduLh4/L5IWX/CrZHsCRzRc=,tag:8Ikx/OQr8zXlCOMqaOYwig==,type:str] 9 | # cluster_key: ENC[AES256_GCM,data:2GTkcLhKxSW2j4OTTt7he6RMdmEnTkhKurdwYkk3tdM=,iv:7Xq/SCoLHA6fFgcdFDvdmFfxVgmXShc+f8VbGICtB/8=,tag:CkkgZ6aVdDLw7JIJWvvQPw==,type:str] 10 | #sops: 11 | # kms: [] 12 | # gcp_kms: [] 13 | # azure_kv: [] 14 | # hc_vault: [] 15 | # age: [] 16 | # lastmodified: "2024-09-10T12:40:28Z" 17 | # mac: ENC[AES256_GCM,data:SwEyku16iXnsnGUdGbTP0JnIdbQsnD3/roQ+E/0hE2HU71cfnl9IY0ZVY0DUHlwEWp9HOe6oOcBYS0tRPec727XDVPzAi/gJRA1EF4kYeAWylWgW3HZL1RKzkkracoXxiT37EEoUDEhvE14UsJkWsAF8cOAGEJzyt2j4AKqaSds=,iv:q/OPAz4W2ui/VhC9w/Gpziz4rQ87TnL8bSr4l6p4Qec=,tag:Z5DF0U4tgV07OcPSgawvPg==,type:str] 18 | # pgp: 19 | # - created_at: "2024-09-10T12:40:28Z" 20 | # enc: |- 21 | # -----BEGIN PGP MESSAGE----- 22 | # 23 | # hQGMA71EESEn7N2iAQv/TVnq9YZGsHVobcX9C0+bK8zLB4jkMWoRGZ1inNf3hfTQ 24 | # IAqYyzNtg0NoEtTtibbaP0LDdjsKEiwnv/Otbb/44kgbmvlK9KuWAvnlyyg03grq 25 | # Fcc89w07cG2778lZugmykhNrXiVQ5Xxc67vpQRHL2lbYPISiDkteZhsI1cMkV1Wm 26 | # +ivBe2PY25uXH16yKHbsLF/bphf+htbFJPczZHI1p7zYSk5nDiuKstNYIUN2l8oz 27 | # BbiGwUMZcqGfBUK883lD257fV33WwKDZJOhd+ySXsdbfybglEyPCE71SnhIqnnx9 28 | # emxzwpTVj1Q+askyhmEh67ALLDP4ZEyAl0S/NOpBAmFaudsIuo/VeJhZNP6aU51h 29 | # SyXIiaYv52xXBM6cZrxE0VR7HC8lyV82HwY1s+VKDiGVKGqdlbp3P/q+8i05N7Eo 30 | # wuymZcyNNw4FMfqUf4SYk7oZGtYTVpn1KlKX2x09xgAzP8fdlJfFlW5N5ocFjQy/ 31 | # Qhjee24MVZS0eJAzdiRE0l4BuXDGVKEAPH3IGbMrQE6ceeO9gfqM2xkRT2wJj2u4 32 | # zne9qbK2md9Zr83Fs++DVxImT4/EOOO+uP2PjqEvJO555xrNRCuhvR3zW7t02QfU 33 | # MFNlhIJlao6gvYoEDOZx 34 | # =HlJE 35 | # -----END PGP MESSAGE----- 36 | # fp: 2D47B9C25AAD87860CC1778F7022B8F414F4AEED 37 | # encrypted_regex: ^(data|stringData)$ 38 | # version: 3.9.0 39 | -------------------------------------------------------------------------------- /cluster/apps/default/atuin/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: atuin 5 | namespace: default 6 | spec: 7 | replicas: 1 8 | strategy: 9 | type: Recreate 10 | selector: 11 | matchLabels: 12 | app: atuin 13 | template: 14 | metadata: 15 | labels: 16 | app: atuin 17 | spec: 18 | securityContext: 19 | runAsUser: 1000 20 | runAsGroup: 1000 21 | fsGroup: 1000 22 | containers: 23 | - name: atuin 24 | image: ghcr.io/atuinsh/atuin:v18.10.0 25 | args: 26 | - server 27 | - start 28 | env: 29 | - name: ATUIN_DB_URI 30 | valueFrom: 31 | secretKeyRef: 32 | name: atuin-secrets 33 | key: ATUIN_DB_URI 34 | - name: ATUIN_HOST 35 | value: 0.0.0.0 36 | - name: ATUIN_PORT 37 | value: "8888" 38 | - name: ATUIN_OPEN_REGISTRATION 39 | valueFrom: 40 | secretKeyRef: 41 | name: atuin-secrets 42 | key: ATUIN_OPEN_REGISTRATION 43 | ports: 44 | - containerPort: &port 8888 45 | name: http 46 | resources: 47 | limits: 48 | cpu: 250m 49 | memory: 256Mi 50 | requests: 51 | cpu: 100m 52 | memory: 128Mi 53 | startupProbe: 54 | httpGet: 55 | path: /healthz 56 | port: *port 57 | failureThreshold: 30 58 | periodSeconds: 10 59 | livenessProbe: 60 | httpGet: 61 | path: /healthz 62 | port: *port 63 | initialDelaySeconds: 3 64 | periodSeconds: 3 65 | readinessProbe: 66 | tcpSocket: 67 | port: *port 68 | initialDelaySeconds: 15 69 | periodSeconds: 10 70 | volumeMounts: 71 | - mountPath: /config 72 | name: atuin-config 73 | volumes: 74 | - name: atuin-config 75 | persistentVolumeClaim: 76 | claimName: atuin-config 77 | -------------------------------------------------------------------------------- /cluster/apps/mb-scheduler/secret.enc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | type: kubernetes.io/dockerconfigjson 4 | metadata: 5 | name: ghcr-login-secret 6 | namespace: mb-scheduler 7 | data: 8 | .dockerconfigjson: ENC[AES256_GCM,data:EtrzIvQx3Gh8fhJqp0aw2o7XvmUei/sVQd2FF5XhM1n8xiFvD0UXjVdhpjoNp61+lzxJxAe63+vyY6QE3sZTYr8C9dHw0dIotInbgSApw5/cEO44jK8I9eURTrLDXQdzgtAdLQPCOLwmiaXf89DNBChZbhZ1BzljJDvPPu80umceC92N0QQ8+RGRslMCjH0mUx8qo6Ii83KSGzouzZbEx/Y7pX1u660vuclYeR8ic2/ftjXvbx14Rpqi3mqdIiw81VHauwigRHqYN+fDYqoUtAtNnYXeDwVTOfeXp6l4X/DWQi009MHnx38xg7GGHPXD,iv:ldNwgezYiIUI6cMwW21p+ZZwYIVqnkaCTAn2s52CQcw=,tag:NxfW8uwD20Bwpm+HyNzDOQ==,type:str] 9 | sops: 10 | kms: [] 11 | gcp_kms: [] 12 | azure_kv: [] 13 | hc_vault: [] 14 | age: [] 15 | lastmodified: "2023-09-21T20:08:47Z" 16 | mac: ENC[AES256_GCM,data:7+xBdqWe0zXBB7Nj/9nXvrQzCNm9LQKhByVKk7ARixikGiWN/4I9AHG6dFfMeOk0mAc0szRzOvKDsaN7X7mewYwocQQbxB346OHahngG0i5GsytdcQLN3+BltUlAo66yjCoS0veDInQrSwoWniYaBlbiE3dUP7gMNNZYD2H2s4U=,iv:t40Q9ui3BBlk1551KGoPYjAk5hUz/AuqGtzr4ghUb5M=,tag:fcNg1WzOeJR5FcMhnADtFA==,type:str] 17 | pgp: 18 | - created_at: "2023-09-21T20:08:47Z" 19 | enc: |- 20 | -----BEGIN PGP MESSAGE----- 21 | 22 | hQGMA71EESEn7N2iAQv/RzLYAJTdGeR7QtU2j7xjl/zdrnpbrEfdYrbcrct1N+uh 23 | 3PhRnDXfcJ5c5g6NkqwXZE6kpA8WO+z7ovftrAFnTQsOib1VvkhFz9mmM4VmpH4j 24 | /5IcspFMw1XH9XVnZnauwipzbPEpz5YrHZCZgvEcerQtdV5HlpbYjIcP20DHAhf8 25 | Ox5718Ig46ShMrgtFkU9wyk0cqPEs8KOQT7VW8VlPso6j0FtooOfg6HokIdA3VeT 26 | GKVJmaedU9KBAmtjKuZvQ1QznjOpNlWB7AQZeoHLqkF2T41NpmER5Qtf9TwoeslY 27 | wWLFCEXoQUnjehZ/mJ9Kj2Hl9NtgqIc9Zy/aQSkA53A3NrEdjHfsCU4AGFPYnjHw 28 | bFY5ejX0ROKWRU0rZ74eTI2YX/WVt5DgionIPXSQ6YO2kB5oTjPOwATUwSHGbW0J 29 | xmKnVKnuu1NlSKGC+31HctYWZXT5gZEIsR0YUXHyrsAsExKly4oTTptjoz9myl2w 30 | jXNcUDFnO9arV37/+F2x0l4BZGyslu39WXdOebOOM76FRJuddovMUJyJD99+rSGE 31 | 1HOtG2kDKmF9rTUoZgWPyJKNmEXyV5ieRfV9QamY/6/W0z6wvPSIHij1V3QG237z 32 | bKt/fBvp3Xz/iucU/Y2G 33 | =dzxP 34 | -----END PGP MESSAGE----- 35 | fp: 2D47B9C25AAD87860CC1778F7022B8F414F4AEED 36 | encrypted_regex: ^(data|stringData)$ 37 | version: 3.8.0 38 | -------------------------------------------------------------------------------- /cluster/base/cluster-secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: cluster-secrets 5 | namespace: flux-system 6 | stringData: 7 | SECRET_DOMAIN: ENC[AES256_GCM,data:OEkLcpNOnQP30w==,iv:FJDecDTIG4a1mtW+UjSRmlxNa4Y22Wak494rxIYMJqM=,tag:rCz8lohaqd0DKYAr7tK/9A==,type:str] 8 | SECRET_EMAIL: ENC[AES256_GCM,data:LXChUSmsJosbnoOeCYkFjouHPpQ=,iv:2f9wIrQ0X756pSt8NWc83/5R1BtznVl/oZ1rIo9tvJY=,tag:A1+kBxRd4rFiTsWlIYqSjw==,type:str] 9 | SECRET_GEOIP2_LICENSE: ENC[AES256_GCM,data:THKEX5DSVfxU4kfW1SXd1J/ZFG5My/rP3OKDn39rvZeN/aALx9A6iQ==,iv:5X3uNrKO8CdF4ICXsi4yIMKzfHvv/udMk1vrlJYVnEE=,tag:/Mi39HesLRiaXSVe+foJmw==,type:str] 10 | sops: 11 | kms: [] 12 | gcp_kms: [] 13 | azure_kv: [] 14 | hc_vault: [] 15 | age: [] 16 | lastmodified: "2024-07-22T19:57:41Z" 17 | mac: ENC[AES256_GCM,data:34RUyOt247YVszO5iKW7BjUCqrLTzPbTGQkQR81z42whnghcjQcf7+2E6EpXYuXMtU+yvNs/xAMxAv9+YaZy/PP8QwmqfHXVqaXdsu4L+zg3uDl7I53T1LzQ62zu/orju6caT26zFWJtMXEvpGZmJL13MuWYqozU/iHfJNnTMQ4=,iv:DqZtbR5ka5NnEcYyePzOI8JR5+y5QgVxDUC51iqMC7M=,tag:h9liJdLoEgLWNwyM2vnsxQ==,type:str] 18 | pgp: 19 | - created_at: "2024-07-22T19:57:41Z" 20 | enc: |- 21 | -----BEGIN PGP MESSAGE----- 22 | 23 | hQGMA71EESEn7N2iAQv/UD6IJafdgBcRtEOC/blUqpilvmdlqApiQsKA/XL/Yph/ 24 | s+m7FOgzFY4Ph641TdelY6x8yEdHjkzyiaOxcW7fGaC9CNGIB6h45Yk0ChzfrMpe 25 | WNFEsSGIqxM/4rIkxhDR7SoUAz0f5mZE/tN5+Q6Ci5yT1+4jsjPFOvoAcisc1H0H 26 | glUOug/yz5hgZRY+uyRugMHoMjvUsrA8bnNiEjg8L6N2OSKasKG7pZvBNAQYoYyk 27 | yhUg++7k0LXH0yuyJsowQSNbZWOghb5XqaklFRGXIjvKyEaUfwOEtacrrVbb6KJs 28 | 14suuj0E+pYFGtCE81S12L3LoT04R3D/ADTdDEYxa/ohp04C+hACb/MhDClhQ+oA 29 | o1vNZhFqheLaxbt8GZrpTOm1ahSIRj7PB4+gJRA4/7xExMc7p8lWNvNU7xoPIgeb 30 | JvPiRo5K/F1i9hsaD4NMb6/1VHcYLa7JXGbek+0eRuHA9nptZPAQwWG40vh9QZUx 31 | zdWzaTskO5CVDAZ4a1AH0l4Bk9I14nsdvxuDE7VPf9UKiQBLbuuxStbF5eDApqzk 32 | KrPSomNi/8L4aHhuJxTn1jE6ThfotLpoy3UBzwvFKf55CQxYBvCYMLXKO96gmugI 33 | XuSIIhmshQV8CpnfYnjl 34 | =lNXN 35 | -----END PGP MESSAGE----- 36 | fp: 2D47B9C25AAD87860CC1778F7022B8F414F4AEED 37 | encrypted_regex: ^(data|stringData)$ 38 | version: 3.8.1 39 | -------------------------------------------------------------------------------- /cluster/apps/default/atuin/postgresql/secrets.enc.yaml: -------------------------------------------------------------------------------- 1 | # IMPORTANT: This file contains secrets and should be encrypted with SOPS. 2 | # After replacing placeholders, run: 3 | # sops --encrypt --in-place cluster/apps/default/atuin/postgresql/secrets.enc.yaml 4 | apiVersion: v1 5 | kind: Secret 6 | metadata: 7 | name: atuin-postgresql-secrets 8 | namespace: default 9 | stringData: 10 | ATUIN_DB_PASSWORD: ENC[AES256_GCM,data:yT39hmkVnLwM+1WZ4WKRBXuBm1xUoxB/mggjxo4+JCnKGSS+6h/IzjALrZVu64SiaGnxO0mjtX0cxwBYZn2Slw==,iv:FLF1sms+y1IvQMyFXw762xhCCxJr24ZWJo4+3nYVFXc=,tag:FXUpYcJtXUhqeYFJEStAjw==,type:str] 11 | ATUIN_DB_USERNAME: ENC[AES256_GCM,data:eeI8ebM=,iv:UfoTcpjwORvlcqri/0JRP0OfUfYhsevl7ABB9olS2q8=,tag:VtB9N4ldL26LGKXCeAgM8g==,type:str] 12 | sops: 13 | lastmodified: "2025-11-06T15:07:25Z" 14 | mac: ENC[AES256_GCM,data:oIZF+qM7eJwWIrL9WN6Z1SLLB9O/LRPXTpTOMVAUT9r8KeHF7nzNiC6FGjq0Pz/n/rlpL0VWBYzowshvNfeR6dn7400lCiTBokEZPzeEdUwu+VPjdiNhcW7/EZO5YLAueVvmosgpUT2PXpTHeaDgdMRdIZWj2t5UKNMlwxq5XrM=,iv:FJ6ywKWhHMP5ghm82b5jV0+4VWA3Boq0Iroz8Xu5Rxw=,tag:KYU+CeL6kF9vst8MGr9uew==,type:str] 15 | pgp: 16 | - created_at: "2025-11-06T15:07:25Z" 17 | enc: |- 18 | -----BEGIN PGP MESSAGE----- 19 | 20 | hQGMA71EESEn7N2iAQwAk/Tn9n76N+c2oEcCqR52P+W0NxOYY0ec62u+HT0fbuh0 21 | QUMacjVq8I4SdtD1XAl5ETsQn7UlKOZ6hw+eUZq7DaIMP3y2GlToURDiueezq530 22 | zrBCw0vikUK4Iv8+GBSSxU6/YwPxLG7lu3xfw51wV44UzJegZh8mhiNYg5Wi9/WD 23 | sm89eDZaJOczXPoKn0KUJVtNs+paSBY0Py3no13OpAHlbKNzPHcWcBQ0lDlTZ/LT 24 | Vzux1P5vzSUN7SDIBOD9TalwZJq6eHzOtauh42RxvfXuLC2NpejfZroJFX57xidb 25 | zupr4m/4hFEht+KKvf4BdxKN5l9u1ocJBxnviHgk01JgXT1kQfMTREMbwc0eSZeA 26 | tgL1KkHd1wJ4Fm1sEiWj9V6grZNTOIIRrsI5xtN2lXvCBavgdTYP9dhJmkjurDQ3 27 | GGW5eG5YqJ/PvNHsC+bdq+3kbqwwrsXBDJRv9QJKIZky/of7Bc794WObwb2q4a8e 28 | RL6d/1yNQqr4LR4Sa+MZ0l4BqnR91v2vmJu51duTFRGSDLxm9DGnyp2a2eaCaDbA 29 | r3tsFCCTb1+4m9gfqh1ceQyweaPNiO2/1maEH3BhmPDOcR9GQ8VudanhcmO7JyYd 30 | dA+lufabkC0MbbWLgty8 31 | =iyek 32 | -----END PGP MESSAGE----- 33 | fp: 2D47B9C25AAD87860CC1778F7022B8F414F4AEED 34 | encrypted_regex: ^(data|stringData)$ 35 | version: 3.11.0 36 | -------------------------------------------------------------------------------- /cluster/apps/default/yourls/yourls-helm-release.yaml: -------------------------------------------------------------------------------- 1 | #--- 2 | #apiVersion: helm.toolkit.fluxcd.io/v2beta2 3 | #kind: HelmRelease 4 | #metadata: 5 | # name: yourls 6 | # namespace: default 7 | #spec: 8 | # interval: 5m 9 | # chart: 10 | # spec: 11 | # # renovate: registryUrl=https://charts.yourls.org/ 12 | # chart: yourls 13 | # version: 5.13.0 14 | # sourceRef: 15 | # kind: HelmRepository 16 | # name: yourls-charts 17 | # namespace: flux-system 18 | # interval: 5m 19 | # values: 20 | # # https://github.com/YOURLS/charts/blob/main/charts/yourls/values.yaml 21 | # image: 22 | # registry: ghcr.io 23 | # repository: yourls/yourls 24 | # tag: 1.9.2 25 | # 26 | # replicaCount: 1 27 | # updateStrategy: 28 | # type: Recreate 29 | # 30 | # yourls: 31 | # domain: "yourls.${SECRET_DOMAIN}" 32 | # scheme: http 33 | # existingSecret: yourls-secrets 34 | # 35 | # ingress: 36 | # enabled: true 37 | # ingressClassName: "internal" 38 | # hostname: "yourls.${SECRET_DOMAIN}" 39 | # annotations: 40 | # external-dns/is-public: "false" 41 | # nginx.ingress.kubernetes.io/connection-proxy-header: "upgrade" 42 | # nginx.ingress.kubernetes.io/auth-url: "http://oauth2-proxy.networking.svc.cluster.local/oauth2/auth" 43 | # nginx.ingress.kubernetes.io/auth-signin: "https://auth.${SECRET_DOMAIN}/oauth2/sign_in" 44 | # tls: true 45 | # 46 | # service: 47 | # type: ClusterIP 48 | # port: 80 49 | # 50 | # persistence: 51 | # enabled: true 52 | # storageClass: longhorn 53 | # accessModes: 54 | # - ReadWriteOnce 55 | # size: 10Gi 56 | # 57 | # resources: 58 | # requests: 59 | # cpu: 10m 60 | # memory: 100Mi 61 | # limits: 62 | # memory: 100Mi 63 | # 64 | # mariadb: 65 | # # https://github.com/bitnami/charts/blob/master/bitnami/mariadb/values.yaml 66 | # auth: 67 | # database: yourls 68 | # username: yourls 69 | # existingSecret: yourls-mariadb 70 | # primary: 71 | # persistence: 72 | # enabled: true 73 | # storageClass: longhorn 74 | # accessModes: 75 | # - ReadWriteOnce 76 | # size: 8Gi -------------------------------------------------------------------------------- /cluster/apps/default/atuin/secrets.enc.yaml: -------------------------------------------------------------------------------- 1 | # IMPORTANT: This file contains secrets and should be encrypted with SOPS. 2 | # After replacing placeholders, run: 3 | # sops --encrypt --in-place cluster/apps/default/atuin/secrets.enc.yaml 4 | apiVersion: v1 5 | kind: Secret 6 | metadata: 7 | name: atuin-secrets 8 | namespace: default 9 | stringData: 10 | ATUIN_DB_URI: ENC[AES256_GCM,data:tq5HCnHWhcx8Vp7WxJVYSP+NGS2zrjiRpMpwmbfplDznecku2A8W2OM6HBCqOJ53gXNJxEtPrLdQGWKd2PS4dPGAlca5B9MmLZ6PuL9QCIfnx5u4CQXANm3Cpdh6d/Gt8HLu3iJW4fI=,iv:gMXsRHiPG3PTRrskq8kJXHz42hcU25SSpBQ8xZ0wR2A=,tag:OsyKPLRpZLEHt4nVGsATRg==,type:str] 11 | ATUIN_OPEN_REGISTRATION: ENC[AES256_GCM,data:5PSC3NM=,iv:WbjOlAwTMFUjRJFcQPR2/9IIMuO5WewzRH8+ANxQIfU=,tag:YT98C1/sof60c2fwFfRl7A==,type:str] 12 | sops: 13 | lastmodified: "2025-11-06T15:37:29Z" 14 | mac: ENC[AES256_GCM,data:CQO+ovkbPOGCL0dmR6Wa2luAz0SVj9OwXIlgiF9QORg15wADsR4H5MMkJUlk8uN0l+LN2Xmwjag8CCE7dyxdHQQ6HKGg+MdH021k4NkXzYLJ15SJLZWJW4TEnesT/eVR3JKMfPCt6ErjqJEvj3FLe/XHnBd1chtU0L+kRDJ7G00=,iv:zw2S+m/5UrxJvro1BOdrpQbdECaEHTVePJMSw/s7vk0=,tag:JdRjnvk0Iw/SODjTu5Wkkg==,type:str] 15 | pgp: 16 | - created_at: "2025-11-06T15:37:29Z" 17 | enc: |- 18 | -----BEGIN PGP MESSAGE----- 19 | 20 | hQGMA71EESEn7N2iAQwAjyn/Qu1vGUxdcY5UZdl/cZ2WvYfdTpH/M37dMTSRYrFY 21 | LI5x6tsxCrNf4jt2F+JN4mFR0XN7PyQipGrMa/Uln4jlj7RXE6+iqknrZ9Y/UzTM 22 | euqGuVRhqCxa+FaWMDZp7+4OspYXjKIH4BYNlJHjY001TFPxL4aO6lfeo5vDKOo7 23 | cTKwWyYxne+qtNnOzwVe45zTHNVKr34iF4HtBMa1RePjFQAmEAcKoNf1R2s9PU7J 24 | NT6ZhXMqzeTlZZnvDQBYsRtEk7K7tjebOtE8f5tevo+QZy8lYP0+OGdMYQ3bvYF+ 25 | EhZTFkO3NZAQP7lujaS+LFX6z4qwO6QtIDwGGgdbMREVFg0+I1RTR43tz+vp8d3c 26 | +gsJTYqHKkXzd7KjXbYJXzXOmY8l+DTS57YlGoPc6nLqod/a3/nUzt8y2NEeqHGW 27 | shceypg20CFjrO2Kn0xXuzgaH9Wn0zzhfgor9qXgBJZms+FMG/UrHvOTSMRgnbSH 28 | JryaPXIhMnqEeo0J1L0S0l4BvIt5tBEo6H6WJ1b01wdnEL22j032ISghGKUwSU9f 29 | 28iyAyTDWOTcgl4nbKllld+covjMzR21M/FJFHqUy/UWcMErAVXVXTJDYmoB5lB3 30 | H2z/pOY0V/UluTuOQjlP 31 | =/EVO 32 | -----END PGP MESSAGE----- 33 | fp: 2D47B9C25AAD87860CC1778F7022B8F414F4AEED 34 | encrypted_regex: ^(data|stringData)$ 35 | version: 3.11.0 36 | -------------------------------------------------------------------------------- /cluster/apps/default/paperless/secrets.enc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: paperless-secrets 5 | namespace: default 6 | stringData: 7 | PAPERLESS_ADMIN_USER: ENC[AES256_GCM,data:3Av+5vpo,iv:Qi7gzRj0KzUMynusDMezvfl/HNUPhpJfv6eFldax/Fo=,tag:DbqqiZsrn1Yqp76urBKXgw==,type:str] 8 | PAPERLESS_ADMIN_PASSWORD: ENC[AES256_GCM,data:XIlqrKPHVzLQYTHgzG8+XJYapTAyE1e0hJ0jBTHHlN2R434/J9leew==,iv:BxvPpxKfU0oQsfPpqlUa6A9f+5ZtshAv3Ye/ao+sCTE=,tag:yzgS0yrgGRV4s1NssAQBVw==,type:str] 9 | PAPERLESS_SECRET_KEY: ENC[AES256_GCM,data:TFVmyU5uf5dsyiT7GFxnU8zU+oJ5g9aTfPzDpinVbSCgIEKn9IY01g==,iv:zh+9TDlQRCfwZsmpLLBDZrLy0gnt7z4/HaEgIUpz4pE=,tag:m/0mcsqEBEyd6Hsxj/4o6g==,type:str] 10 | sops: 11 | kms: [] 12 | gcp_kms: [] 13 | azure_kv: [] 14 | hc_vault: [] 15 | age: [] 16 | lastmodified: "2022-09-28T20:18:06Z" 17 | mac: ENC[AES256_GCM,data:EY5VZpkKIHSwb/goyBFXjc/cir45an63ojh5lJWMwHRR8n9hlr9f6pntUbvirtZeDjW2WHvtgukmC+bpRBYdHvXfeLsE+wx/p9I64PdAdTisFPOSqrtA+GJNd/RHDd3Dd77IYRb88AXjlLqilssjnhVUG/sl90gUZ0NEex+gzBI=,iv:xvunCnJkq8DSndeMYPfDiK0DxHhgdEn+33EI1I7y5n0=,tag:zDoBpmfWZ1g2Nd2HP0eyig==,type:str] 18 | pgp: 19 | - created_at: "2022-09-28T20:18:05Z" 20 | enc: | 21 | -----BEGIN PGP MESSAGE----- 22 | 23 | hQGMA71EESEn7N2iAQv+M4mN2tzMNrYnfEqL21hrgq1W9EPun//zMiZCc6iPVvRO 24 | dl5OVCB9Ghe5SbPoLxvcXkZfxIYrVarCqzUa46dBaqdjk9OtGHec+mFAwbprNLB1 25 | 7DbCfA8eqFJBIaHKmSr7tLN1eyi3drxgU0L0+FkFX8yznrZIoUb6EVXLfP3oI113 26 | U4p2HxTMg91zIdw4n2ve4nFqzOM+qBUM6h0yCqgIOR/2AIOGnJWoTkxxd5GKjSWQ 27 | tx7wgex14NFza1b8JgFS68xjlI0AxjjN3sAVeDDytIsj8FwXkHL80zt1HcYLK2fX 28 | Yrs3ydM2ZdRyyzMmKcJY+4zuNObR0gPMoA0WwcWnmaxmYnaLY6DriGCummBeRbuF 29 | uPFRXZ1pd0EWF6PQ7v0uzvGB2UVAYiDGgwmGnJ+zOtzXYIxjdtH4VMG5myBDjCQN 30 | Q32L6yBhjloxNQxYuColeURjV4h84R0O118Q4Gd9gy8FkdRrggfEIcQvA9IwvVKa 31 | uSFxp1ld2yO/1xQIwl1Y0l4BVxqTLhfBexj5Y6H3z+OA+4GbWEH4dAjZ262Za91o 32 | RCpYjbD/voSUFUVt3IM7YTzttnH6GnZyGWYP6AmIcNhhN7+F8W6NEVKqQP5wNH2b 33 | ua/8KjXpUj5ZHvvtY/P5 34 | =8NJ6 35 | -----END PGP MESSAGE----- 36 | fp: 2D47B9C25AAD87860CC1778F7022B8F414F4AEED 37 | encrypted_regex: ^(data|stringData)$ 38 | version: 3.7.3 39 | -------------------------------------------------------------------------------- /cluster/apps/default/actual-budget/sms-proxy/secrets.enc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: actual-budget-sms-proxy-secrets 5 | namespace: default 6 | stringData: 7 | API_KEY: ENC[AES256_GCM,data:wqY+6wPQdLRh0BOf/VRigODAuCL07yOEyYKcwBMW,iv:/1sr3Pw5ZdsC0bsR53rssi9p6lLHznx88oquugpbzJ4=,tag:cGIZz3xwgAlDOPDU1hhdPQ==,type:str] 8 | ACTUAL_SERVER_PASSWORD: ENC[AES256_GCM,data:HuF+5wofSfBIAf4a+pu8X+KqMrbKTqRt1M+pIYIY,iv:i4I7OlTIRezdPav6gauuaxKWfRk8ZuTIGqVipxg8ZEU=,tag:CiIWGpupPVUKJpw6SNjBZQ==,type:str] 9 | ACTUAL_SERVER_BUDGET_ID: ENC[AES256_GCM,data:y7Ip9U0WxFqR9dJU1Ym4YI31pcKm5dRAfyTWvyV1ezz1vssR,iv:TsPIzoZZDijZhvHwMUm3C7cY5cLGOtiikVFjrtDnv0E=,tag:kIQzxPYHbl7dUr5IUvlBzg==,type:str] 10 | sops: 11 | kms: [] 12 | gcp_kms: [] 13 | azure_kv: [] 14 | hc_vault: [] 15 | age: [] 16 | lastmodified: "2024-01-17T21:35:13Z" 17 | mac: ENC[AES256_GCM,data:IBAWZBFM9oEi4F+BjN0gcgjtG9hqmlWxWOPic2MAoKLAmLBw5AOLJB15yLWbmzhM85mTzXYJam80kB1yUmvPBQMVUxy8jRyh3pDNqG1H4eChD5aIaqt1oQr9sgfG3vZmM9cAuQkvNaA6n8E1OYWMB9BlgYiEXgFo5TomT0ibutc=,iv:qpLLXejMAZeABPzzL17NkxuNuj+GPIt6FYed6Ia6VBg=,tag:YfS72ihn3GfTGjS++CcdGQ==,type:str] 18 | pgp: 19 | - created_at: "2024-01-17T21:35:13Z" 20 | enc: |- 21 | -----BEGIN PGP MESSAGE----- 22 | 23 | hQGMA71EESEn7N2iAQv/YQWz3teOeZdQmNjkp4qfwb5sQohxdbw57aPK6jODksI7 24 | 0g+2tRtppLRHtlRuTDmblKCzRgjNi5Ozq7COc/BZ6L2WB+ewqKsaHem7kFY/boyQ 25 | P/TlCe/CAv/fyR/BiQSJZw76M/73cp+M38QqMUF2nqilVVkI8qPfNehm47f7Lbb/ 26 | LSOdWuXy4spUxLG/jmVgOj1gUKh24DG39Q/eIY8sscmUL50wsVsfajW9zf9Z9fbu 27 | IGCsjkBVrLTQMNkVan1fcJw2ha8uYUL3NfqbOXJkpbURP09I9BoPZlMo+cyy07Ox 28 | e85RVo3Nu9hrd79R+kC6ZeOa0FyBRTAYbn/IU3qqIkLncfYBZa4jAm28LzpNCg6i 29 | wQKmuAzEdwENerZBXbolF263Ka7EEpvuLJi/EcEsveQ/gr2wz56fa4l5iNC1JPFT 30 | FQZbVYjgm0Xylfx1eE/EmQ1NRewC8U1fRgnEMPC4ZhfBMQ5tfmWvOG2Tjrl9ya+r 31 | 026/AmFguTvsGUqfq1rh0lEB9XJeuCWEI9qAwo2NL3gs104DELqU69+Yao0GEmK+ 32 | N1v7d5byGjLS4f+3Zwn5zQgQjZ5iXNFoVVFhsd8NhoUQiZrgMSOdPkQmrDr4TWX5 33 | S0o= 34 | =LfXL 35 | -----END PGP MESSAGE----- 36 | fp: 2D47B9C25AAD87860CC1778F7022B8F414F4AEED 37 | encrypted_regex: ^(data|stringData)$ 38 | version: 3.8.1 39 | -------------------------------------------------------------------------------- /cluster/apps/vpn/wstunnel/secret.enc.yaml: -------------------------------------------------------------------------------- 1 | #apiVersion: v1 2 | #kind: Secret 3 | #type: kubernetes.io/dockerconfigjson 4 | #metadata: 5 | # name: regcred 6 | # namespace: vpn 7 | #data: 8 | # .dockerconfigjson: ENC[AES256_GCM,data:2ijbZc3KCHN5hagmdgqJRpRO1QEnaeH3hdUZQ8S234WBvHCCvLTdaoSHSKCTJb4audS+1NjQ8FCCbB383b107Qcm8WIj/W6TrszinZBGk2FQaqaQnjMQ9vi5oTtiW8dZwd7X3sXym6LSut7noFF+Ef8aflBQUV6Noy8PqdBZ6cm5rNeNxvPhS+gHVaO4HY/+yVk2dnAPluOAhnCTegZCJMUfAfv1hCwaKrAIqqRAYuBxxaHQ1dXRPwaG6Nj2b4H6J8l3V5o1rKY3Dd62rieznWQnjstsaN0MEUzysavCPQ9mBQQ+ZZKuUcQwpZtA2X7iYa6fp//KvD4FoalPk20E0TudvxR3bUT6tJUjmw==,iv:mILjSyAeuUfwinkqrLmuIAo3dACOREQU71EV3URQhOk=,tag:m7FHVHiC9R0CpqYF2YzCqQ==,type:str] 9 | #sops: 10 | # kms: [] 11 | # gcp_kms: [] 12 | # azure_kv: [] 13 | # hc_vault: [] 14 | # age: [] 15 | # lastmodified: "2021-05-29T09:44:01Z" 16 | # mac: ENC[AES256_GCM,data:+9NHEtrA4SSGSJAWe/WEBLiZnou72eV8OAkV2BHSNfWiuJMFO+HaHbQeFLkh6MpAY50c0YWadEq8WtBYAYHGUTEl12dFQM6QLooSKwufgKaXrlbYbxmPyLZmrkR4E1+bWVGx50PXASHVwMo7iFPzvN16sZoo5uat/mB3s0JFdcA=,iv:rG4vx1n7qg6MKshp2gSWLikAVFzj6yfOTH5oMqPEl9c=,tag:F9EOT0BSrKMaWkqCmfKepQ==,type:str] 17 | # pgp: 18 | # - created_at: "2021-05-29T09:44:01Z" 19 | # enc: | 20 | # -----BEGIN PGP MESSAGE----- 21 | # 22 | # hQGMA71EESEn7N2iAQv/SqzLvx8X5sy2VF34H889ZxbKbNegO8W+bnIlzW9UGOWg 23 | # RfHX2Meo+UXOAFKs5qmVfE7rsCIL/EGac7Oq1iCz+Bfbsi3P+OUl6E9OFTWldVh2 24 | # jcnAr2CzCi2slC692jxWHhl1MHMDJj4WJ3QLDDaPPcr/rGE8JgN7wGFBprBzxtxh 25 | # 5NS5gWgRb2DzwuK8JIeGPI1NYr9eGsecvk80KUEhOzOaXSCghUPy1KrcVV8JdEqY 26 | # R3NNbdVx+dKwhLUnIxphuBAokW8HZaHHsPBTj+bbCu3d6is0czowKeTYPYvgcN/b 27 | # pWQqS5e/2xOoKHJztO7WXDN9Ki6OQyZpbNXMPkU1YoK1qxo66Oc+aXhifSTtYmdd 28 | # VPzc8uupuIU++c/PZ4vMcD7JYWu08JcCzvFPXcROrBl2VgQFraZ25PjJX0PQqmXl 29 | # 8lSNUA7WQVYV/eit5S3b13p/TlhKiuimz/8ehskOqtHWgddJt4ScLbg7/gHp8/uK 30 | # jbmIPiaGD30b543jRas00l4BydcAif0ZnJTZyQP4umVan2b9o+6F/Cy5ckDA5S78 31 | # lTUJNRbAHq9r+vYZP5NWQnISaC4ct1vva3YsnLITtzDNGJUX7h5bUZ6c1JE2VhQ1 32 | # Bn9DceV7RRFkULbVvnye 33 | # =tESE 34 | # -----END PGP MESSAGE----- 35 | # fp: 2D47B9C25AAD87860CC1778F7022B8F414F4AEED 36 | # encrypted_regex: ^(data|stringData)$ 37 | # version: 3.7.1 38 | -------------------------------------------------------------------------------- /cluster/apps/default/yourls/mariadb-secrets.enc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: yourls-mariadb 5 | namespace: default 6 | stringData: 7 | mariadb-root-password: ENC[AES256_GCM,data:bhxrnwlDQG+xF4anQx/LIMusQE5I/dyJvx+E9vMFxSYrbXRKz+lqEw==,iv:qluD3+UPkoImp3Cx5TZTcDS3yaWDvng8mQoFxqCZF+Y=,tag:vefpI3EDzo0O+Luq4iAS2Q==,type:str] 8 | mariadb-replication-password: ENC[AES256_GCM,data:6vTls99qp47m/Lfsc/tlq07G51Zf31WSwF2Xl3VyXWS8Jro8PFBWfQ==,iv:tlKriA08jyUXtZQ3VK1c6iK9X6I45Iush4CAs0LXZ+8=,tag:Vn9BADiuZc/xP6cPINnS/A==,type:str] 9 | mariadb-password: ENC[AES256_GCM,data:7xoYFiy3COgxak6ew3UTryluB3tuW9v9Dj4fCtnWKojGDKjUBeGbug==,iv:e99PX2QlJoupMLhDIJ+T/Qf30/UQftEb92ufONGO5Og=,tag:duFRgMZeTGcRXBfTRLqAVQ==,type:str] 10 | sops: 11 | kms: [] 12 | gcp_kms: [] 13 | azure_kv: [] 14 | hc_vault: [] 15 | age: [] 16 | lastmodified: "2023-01-18T20:38:48Z" 17 | mac: ENC[AES256_GCM,data:BXwPV3wbHInUjcOx46wikLIXwYve3wQDVoQriFTrpR+bDizVBgxjA00XKcLnLQlJ/z8qlEhaJnXp/LKhxgOdXqRJECenctOqgc3zJ0uDbaN3tyDu92bmC9edzRvwfhKzp5Wsc25pqJqFXDEggkl5fIgRK+o13YIwPW2Rf2RY7r4=,iv:y862ygm8Vis+kpHmPdL20TdewHFmyj6VNPURD2o9JIA=,tag:D6FcdDpceksL4MGqh6pK2Q==,type:str] 18 | pgp: 19 | - created_at: "2023-01-18T20:38:47Z" 20 | enc: | 21 | -----BEGIN PGP MESSAGE----- 22 | 23 | hQGMA71EESEn7N2iAQwAi5wtPrYonww2Cb0NsGLikLsN+DOqmTeNcObpXGmvTB+G 24 | igdTM6VMqOh7W35xic6YpJkwu573tQcV/2YPvY2CLO1buan1xMkcStPkl+kDtq6H 25 | 0RZop2mO2WuQ1kpCRMwQLNMTv1i983zTc4FPDAtgovydd5gg/SpGzYV9eFs8ThdP 26 | EJErAsHxYb2sHA/qxC+e42qFKxKBZocJQpvRcJ5ht+fAi6jLBj+GMBJoGehzz00u 27 | OvPLp0fkIWokvQpTummkoQXVfOwqcDyZvX4a3bdt6xwKOm01ae/YYR6L/YZXmnFF 28 | /1uVuF54xxDaw/mRBTLwClw1G74Qq4hq0TIqorB+w1F589JfgGAIsSCyaoWR/uZT 29 | SnK3LoXm+NLEWgWvb/rwXelLhv6789y3kcoS8esj4MPfRpMxbwVp2WBaUmJKn8hY 30 | HC/qrS9CnRVVuoH3rUQ62mq5YMwJcfZp3+1E2q6USTETute7L2zE4P/EFr56DnmT 31 | yUANKx/EXAeIjBQ2dh7w0l4B5BISPNFcUIWoONydX1F1X50apE5a3E+Ft726Dw+2 32 | 0SGatvYuNoClYufnxRdqL5AM0tcWBsscFdcyOHLRbZIVBK/w+JF1GQ+a5xwzGspa 33 | IIdSNlj00hcHlmYRLBjL 34 | =/yzh 35 | -----END PGP MESSAGE----- 36 | fp: 2D47B9C25AAD87860CC1778F7022B8F414F4AEED 37 | encrypted_regex: ^(data|stringData)$ 38 | version: 3.7.3 39 | -------------------------------------------------------------------------------- /cluster/apps/networking/oauth2-proxy/secret.enc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | type: Opaque 4 | metadata: 5 | name: oauth2-proxy-client-secrets 6 | namespace: networking 7 | stringData: 8 | client-id: ENC[AES256_GCM,data:jfHyoLxtgmdRMYMenHC+K3bFJJKmpa7fhX/uBjPTkiqsp4eAhE6+gly226vmAlQFzDuuss1pGMdkij2gw7DA4/8T42ey7l7A,iv:IopfgLNXagWTlXcvRE3V4cVXCg/A/AfyiEdpmK5ZM9I=,tag:UuZKPo0+yZuBwi8v6YQgnw==,type:str] 9 | client-secret: ENC[AES256_GCM,data:RGo/VtMUFk3PpoGpAZvdl9RQZkWbNAFs,iv:qLqW6SyNnRq/v25sL8I4UyQxTTBasKqM5S2alwudHqo=,tag:XE8CE+sabkV8g01mT8Vx/w==,type:str] 10 | cookie-secret: ENC[AES256_GCM,data:e09m7HMbIQGXm7fXJSkV5zMkdDMHjsQgeY1vihwL+OPjEdSouWoiHubcBzs=,iv:BZpEEVEMSe3Mxi9NyHkVKHZM5QGWEOl0kCcKIkh3kHo=,tag:3Li44t9E1njxFxXtkJO2TQ==,type:str] 11 | sops: 12 | kms: [] 13 | gcp_kms: [] 14 | azure_kv: [] 15 | hc_vault: [] 16 | age: [] 17 | lastmodified: "2021-06-12T16:45:04Z" 18 | mac: ENC[AES256_GCM,data:4xUibOIJXTrkYKSlLzA8VmsTQcuUpj8t9zcecdQyC3c9eRSxVPuqUx1kvuj6bCnJRu4erzIkxfe4+S/0as6vXpFc7REMGOe5gmXY7ItPUq2FKPftKHmN8NVAvbP2sTMSscvy7soz7cHU/gp7cgk/lj2slWlHrl2/gUvqVGRXbFI=,iv:J0by8nL7MNXRc4wDdiCS6g2wiezx9Ry7B86xCej8HBk=,tag:SEAhGlIbfSIICcw38Y4JEA==,type:str] 19 | pgp: 20 | - created_at: "2021-06-12T16:45:03Z" 21 | enc: | 22 | -----BEGIN PGP MESSAGE----- 23 | 24 | hQGMA71EESEn7N2iAQv/SMgK5a0w/n3WnTxvgjKduukpQO3+nsZfCgR8vQXWIi6b 25 | XP9tNFP+MIn/WJdsb0a1kzmZFOdlSwZ+OVxHV9qVLD8a6gC4iX8AcvdVW4f2ANx2 26 | 5z/lxF/muZzJ4PgbIOZflNWAVa+/7SVBHEPhopqA3SnwMlM9CtlwAOuTsd7ACS34 27 | +ZiOIeG3wFx+vy3Er5UQyz89mz3hTuyarMIB4x3eozdGIMe996vmdESb1ZRBUyzi 28 | 09t8CmqrveflOw1f+Hb6G04mAjJ1Ran3bHX9xBcPD+FWJJ5Hw6Zdt6kwwFTULA7G 29 | FRzSgTipwPvxomn+Up1SZi+/iDPlNodchIPIH+REjioeWZDBbeA6w8G96LM3Pof3 30 | tkNaM++dthEOWTNc/ndXyGjKeFUwp83rYhxqV/8HXtqbSjr52hAH63gkLOc123qL 31 | T0g2j34aIybCVLucgnPtrS/QqGp/oQ9/6CbJ8j6kLRqZrGoflMd/iFxPgF7EeS4S 32 | Fl71CBgJwLKrqX7kxnXJ0l4BEuAu3z8phlg6DUL5taolrAqJHllz2LFxP4IN2Fww 33 | Up6dvyKq14mPAJJH6miwaDzkbEGnMDK1QsEaPnNCkz5AuY8i0SIirJJ7VtOHb6Ax 34 | eymT26pLGUGEAhZmxUzo 35 | =4kbD 36 | -----END PGP MESSAGE----- 37 | fp: 2D47B9C25AAD87860CC1778F7022B8F414F4AEED 38 | encrypted_regex: ^(data|stringData)$ 39 | version: 3.7.1 40 | -------------------------------------------------------------------------------- /cluster/apps/mb-scheduler/mb-scheduler-backend/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: mb-scheduler-backend 5 | namespace: mb-scheduler 6 | labels: 7 | app: mb-scheduler-backend 8 | spec: 9 | replicas: 1 10 | strategy: 11 | type: Recreate 12 | selector: 13 | matchLabels: 14 | app: mb-scheduler-backend 15 | template: 16 | metadata: 17 | labels: 18 | app: mb-scheduler-backend 19 | spec: 20 | initContainers: 21 | - name: wait-for-database 22 | image: ghcr.io/shadyf/mb-backend:sha-42ba302 23 | envFrom: 24 | - secretRef: 25 | name: mb-scheduler-backend-secrets 26 | command: [ 'python', 'manage.py', 'wait_for_database' ] 27 | containers: 28 | - name: django 29 | image: ghcr.io/shadyf/mb-backend:sha-42ba302 30 | imagePullPolicy: IfNotPresent 31 | # ports: 32 | # - protocol: TCP 33 | # containerPort: 8000 34 | env: 35 | - name: DEBUG 36 | value: "False" 37 | envFrom: 38 | - secretRef: 39 | name: mb-scheduler-backend-secrets 40 | volumeMounts: 41 | - name: static-folder 42 | mountPath: /app/staticfiles 43 | resources: 44 | requests: 45 | cpu: 10m 46 | memory: 300Mi 47 | limits: 48 | memory: 300Mi 49 | # NGINX container to server static files 50 | - name: nginx 51 | image: nginx:1.29.4 52 | ports: 53 | - protocol: TCP 54 | containerPort: 80 55 | volumeMounts: 56 | - name: static-folder 57 | mountPath: /static/ 58 | - name: nginx-config 59 | mountPath: /etc/nginx/conf.d/ 60 | resources: 61 | requests: 62 | cpu: 10m 63 | memory: 100Mi 64 | limits: 65 | memory: 100Mi 66 | 67 | imagePullSecrets: 68 | - name: ghcr-login-secret 69 | volumes: 70 | - name: static-folder 71 | emptyDir: 72 | sizeLimit: 500Mi 73 | - name: nginx-config 74 | configMap: 75 | name: mb-scheduler-backend-nginx-conf 76 | 77 | 78 | -------------------------------------------------------------------------------- /cluster/apps/networking/oauth2-proxy/oauth2-proxy-helm-release.yaml: -------------------------------------------------------------------------------- 1 | # If using Auth0, https://auth.domain.com/oauth2/callback needs to be added as callback in auth0 2 | # TODO: Add toleration to ARM devices since the image in this release will only work on ARM devices 3 | apiVersion: helm.toolkit.fluxcd.io/v2 4 | kind: HelmRelease 5 | metadata: 6 | name: oauth2-proxy 7 | namespace: networking 8 | spec: 9 | interval: 5m 10 | chart: 11 | spec: 12 | # renovate: registryUrl=https://oauth2-proxy.github.io/manifests 13 | chart: oauth2-proxy 14 | version: 10.0.0 15 | sourceRef: 16 | kind: HelmRepository 17 | name: oauth2-proxy-charts 18 | namespace: flux-system 19 | interval: 5m 20 | install: 21 | remediation: 22 | retries: 3 23 | upgrade: 24 | cleanupOnFail: true 25 | remediation: 26 | retries: 3 27 | values: 28 | config: 29 | existingSecret: oauth2-proxy-client-secrets 30 | # Override configFile to disable all email domains to be used 31 | configFile: |- 32 | email_domains = [] 33 | upstreams = [ "file:///dev/null" ] 34 | 35 | replicaCount: 1 36 | 37 | # Won't work if email_domains = "*" 38 | authenticatedEmailsFile: 39 | enabled: true 40 | # One email per line 41 | restricted_access: | 42 | ${SECRET_EMAIL} 43 | 44 | ingress: 45 | enabled: true 46 | className: external 47 | path: /oauth2 48 | hosts: [ "auth.${SECRET_DOMAIN}" ] 49 | tls: 50 | - hosts: 51 | - "auth.${SECRET_DOMAIN}" 52 | annotations: 53 | external-dns/is-public: "true" 54 | external-dns.alpha.kubernetes.io/target: "ipv4.${SECRET_DOMAIN}" 55 | 56 | # TODO: Check if --reverse-proxy flag is useful or not 57 | extraArgs: 58 | provider: google 59 | provider-display-name: "an authenticated email" 60 | skip-provider-button: "false" 61 | pass-basic-auth: "false" 62 | cookie-domain: ".${SECRET_DOMAIN}" 63 | whitelist-domain: ".${SECRET_DOMAIN}" 64 | exclude-logging-path: "/ping" 65 | 66 | # affinity: 67 | # nodeAffinity: 68 | # requiredDuringSchedulingIgnoredDuringExecution: 69 | # nodeSelectorTerms: 70 | # - matchExpressions: 71 | # - key: beta.kubernetes.io/arch 72 | # operator: In 73 | # values: 74 | # - arm64 75 | -------------------------------------------------------------------------------- /cluster/apps/networking/crowdsec/secrets.enc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | type: Opaque 4 | metadata: 5 | name: crowdsec-secrets 6 | namespace: networking 7 | stringData: 8 | CROWDSEC_API_URL: ENC[AES256_GCM,data:HmW5umQcCSuMx8FTOMDzNyeJpbUetUs=,iv:USnOufFTvfLfZdF1Bw/C+aOxp3gWJ2CtleIPz4s2Pk0=,tag:28jF+7e3UDTKB0tsMEBLaA==,type:str] 9 | CROWDSEC_INGRESS_NGINX_BOUNCER_API_KEY: ENC[AES256_GCM,data:tXQ4N/PMogtICVAMZSp0XY+ns+2y/g==,iv:FpVxP2tRLcZmdVKVMmt8LBQ+Y4p/ohAXBFFZQ3mBKis=,tag:KwUgBfU8aGSiBa5NsZVhGg==,type:str] 10 | CROWDSEC_AGENT_USERNAME: ENC[AES256_GCM,data:3ehL,iv:fE4bHS6+WYBjegfvK88tKXnt0hwDU57LMg1ddSnA46A=,tag:8dr10KzuNSugjgtYUp0edg==,type:str] 11 | CROWDSEC_AGENT_PASSWORD: ENC[AES256_GCM,data:NwoqxpeucbUHvBoaf/8+M/77ZDTUlP1MF1IsSu/8n25Gr3NSVtYFh7RR7WF5BEpy4zPN571GQffYuDpjwu4WFA==,iv:0jwfT9cCYb+n+RqoJ+kW19kg7YGrQy6qIgvUl8vfTuQ=,tag:9Yptp1RxoZAY3DrVQQeahw==,type:str] 12 | sops: 13 | kms: [] 14 | gcp_kms: [] 15 | azure_kv: [] 16 | hc_vault: [] 17 | age: [] 18 | lastmodified: "2023-10-06T17:55:23Z" 19 | mac: ENC[AES256_GCM,data:pc1ZqJkaBTBEqq88uoyGiKFEn08RYsqZ4yn8zqoGW+laeHCqMtlqHOgr6gG9z9lXEacYCGPXq4EO8SS+7F9vMCE78RQCNXl9xblAqho2KZUHQNaclXVQVfWVD3O2lPDi6W2rWfDS9eSDf5kjbPJbFRKfqgjm3tg9yWiXt9l96G4=,iv:mU/lA/Yo0vgMlc8mhXajYnVjgPVPrLdkxL+8ggXevHo=,tag:jYPi5nSV62loHJsGVZK5Ag==,type:str] 20 | pgp: 21 | - created_at: "2023-10-06T17:55:23Z" 22 | enc: |- 23 | -----BEGIN PGP MESSAGE----- 24 | 25 | hQGMA71EESEn7N2iAQv/dF8dvSkQWDdlIi4vFyeynwWyGWEfx4Q2pZAdkPEPsrE3 26 | 6yZqnqVQTNZ3eucDKo2sry0LzGk9KwxkeL86SXJR4MHzJMp9jHR6WPPVZKZNUn1h 27 | RF930CGRsWFehnSBDkw2wo9jz6e0QTVRa0p4gWUrjRj8Mjpvqwxbo/Wrg3bL+IJS 28 | Q+RxqsbV2PGrIdgoFSYXJP3voMJgK7BYH+ENx++SJrJagxPKYElespvQQTR/xy9U 29 | vhffP9Pd31D//lkYTJo9QqlnRBeRDTy7zBLtCmBtuAP3/O4NXVcLVLFY4T7rHr0/ 30 | iWgkGuLMKw5HUkDgxb5d581mCUiCqd4kJRL2ZffXMlHA0xkQcY93+ptrwEd0bgwu 31 | uWdhacgJBNbHGBFc1tjv4kJBLGFJGky5uwgKd4SgwUnrItSTYYXasf3YJB7/KYWa 32 | WWniR/h3sAonX1Ga/r0QwMvCoPNUbIQBalDdCYt+Ed4/Hq8p+NpyrxCHCnqYOHtd 33 | 0diolV+2b2XAOgHfB2js0lwBVgUfquZ/K9x+Xh0NAhAmBa6LKioSP/soeM0LA3Tx 34 | 2P9tJPU9dzpb+pOoYX2IK3UWogbXdQ+d9IPAQruBeOwKx9boqWsj53/qBbTdDfYc 35 | NUJZEuqYJMwHsM/SvA== 36 | =g+jp 37 | -----END PGP MESSAGE----- 38 | fp: 2D47B9C25AAD87860CC1778F7022B8F414F4AEED 39 | encrypted_regex: ^(data|stringData)$ 40 | version: 3.8.0 41 | -------------------------------------------------------------------------------- /cluster/apps/longhorn-system/longhorn/longhorn-helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2 3 | kind: HelmRelease 4 | metadata: 5 | name: longhorn 6 | namespace: longhorn-system 7 | spec: 8 | interval: 5m 9 | chart: 10 | spec: 11 | # renovate: registryUrl=https://charts.longhorn.io 12 | chart: longhorn 13 | version: 1.10.1 14 | sourceRef: 15 | kind: HelmRepository 16 | name: longhorn-charts 17 | namespace: flux-system 18 | interval: 5m 19 | install: 20 | remediation: 21 | retries: 3 22 | upgrade: 23 | cleanupOnFail: true 24 | remediation: 25 | retries: 3 26 | # https://github.com/longhorn/charts/blob/v1.10.x/charts/longhorn/values.yaml 27 | values: 28 | # https://github.com/longhorn/longhorn/issues/1861#issuecomment-716459507 29 | csi: 30 | kubeletRootDir: /var/lib/kubelet 31 | attacherReplicaCount: 2 32 | provisionerReplicaCount: 2 33 | resizerReplicaCount: 2 34 | snapshotterReplicaCount: 2 35 | longhornUI: 36 | replicas: 1 37 | # Needed when using longhorn v1.4.0 to work with kubernetes <=1.24 38 | enablePSP: false 39 | persistence: 40 | defaultClassReplicaCount: 2 41 | # Prevents PVC and daata being lost when helm release is uninstalled 42 | reclaimPolicy: Retain 43 | defaultDataLocality: best-effort 44 | defaultSettings: 45 | defaultReplicaCount: "2" 46 | defaultDataLocality: best-effort 47 | replicaAutoBalance: best-effort 48 | # Prevents pods from being rescheduled on healthy nodes 49 | nodeDownPodDeletionPolicy: delete-both-statefulset-and-deployment-pod 50 | # Allows kured to be able to work with longhorn 51 | nodeDrainPolicy: block-if-contains-last-replica 52 | # Make the daily backup job the default for all volumes via group selector 53 | defaultRecurringJobSelector: 54 | - name: "default" 55 | isGroup: true 56 | defaultBackupStore: 57 | backupTarget: "nfs://192.168.1.213:/srv/nfs/longhorn-backups" 58 | ingress: 59 | enabled: true 60 | host: "longhorn.${SECRET_DOMAIN}" 61 | tls: true 62 | ingressClassName: "internal" 63 | tlsSecret: longhorn-tls 64 | annotations: 65 | external-dns/is-public: "false" 66 | nginx.ingress.kubernetes.io/auth-url: "http://oauth2-proxy.networking.svc.cluster.local/oauth2/auth" 67 | nginx.ingress.kubernetes.io/auth-signin: "https://auth.${SECRET_DOMAIN}/oauth2/sign_in" -------------------------------------------------------------------------------- /cluster/apps/mb-scheduler/mb-scheduler-worker/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: mb-scheduler-worker 5 | namespace: mb-scheduler 6 | labels: 7 | app: mb-scheduler-worker 8 | spec: 9 | replicas: 1 10 | strategy: 11 | type: Recreate 12 | selector: 13 | matchLabels: 14 | app: mb-scheduler-worker 15 | template: 16 | metadata: 17 | labels: 18 | app: mb-scheduler-worker 19 | spec: 20 | initContainers: 21 | - name: wait-for-database 22 | image: ghcr.io/shadyf/mb-backend:sha-42ba302 23 | envFrom: 24 | - secretRef: 25 | name: mb-scheduler-backend-secrets 26 | command: [ 'python', 'manage.py', 'wait_for_database' ] 27 | containers: 28 | # By default, this will run 4 workers in the same pod. Use the --concurrency flag to reduce that to 1 and scale 29 | # using kubernetes 30 | - name: celery-worker 31 | image: ghcr.io/shadyf/mb-backend:sha-42ba302 32 | imagePullPolicy: IfNotPresent 33 | command: [ 'celery', '-A', 'mb_heba_backend', 'worker' , '--loglevel=INFO' ] 34 | env: 35 | - name: DEBUG 36 | value: "False" 37 | - name: CELERY_BROKER_URL 38 | value: redis://mb-scheduler-redis.mb-scheduler.svc.cluster.local:6379/0 39 | envFrom: 40 | - secretRef: 41 | name: mb-scheduler-backend-secrets 42 | resources: 43 | requests: 44 | cpu: 10m 45 | memory: 350Mi 46 | limits: 47 | memory: 350Mi 48 | # Consider using redbeat if going to scale workers and want a highly available celery-beat 49 | - name: celery-beat 50 | image: ghcr.io/shadyf/mb-backend:sha-42ba302 51 | imagePullPolicy: IfNotPresent 52 | command: [ 'celery', '-A' ,'mb_heba_backend','beat','-l','INFO', '--scheduler','django_celery_beat.schedulers:DatabaseScheduler' ] 53 | env: 54 | - name: DEBUG 55 | value: "False" 56 | - name: CELERY_BROKER_URL 57 | value: redis://mb-scheduler-redis.mb-scheduler.svc.cluster.local:6379/0 58 | envFrom: 59 | - secretRef: 60 | name: mb-scheduler-backend-secrets 61 | resources: 62 | requests: 63 | cpu: 10m 64 | memory: 120Mi 65 | limits: 66 | memory: 120Mi 67 | 68 | imagePullSecrets: 69 | - name: ghcr-login-secret 70 | 71 | 72 | -------------------------------------------------------------------------------- /cluster/apps/default/flaresolverr/flaresolverr-helm-release.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/app-template-3.0.2/charts/other/app-template/schemas/helmrelease-helm-v2beta2.schema.json 2 | apiVersion: helm.toolkit.fluxcd.io/v2 3 | kind: HelmRelease 4 | metadata: 5 | name: flaresolverr 6 | namespace: default 7 | spec: 8 | interval: 15m 9 | chart: 10 | spec: 11 | # renovate: registryUrl=https://bjw-s-labs.github.io/helm-charts 12 | chart: app-template 13 | version: 4.5.0 14 | interval: 15m 15 | sourceRef: 16 | kind: HelmRepository 17 | name: bjw-s 18 | namespace: flux-system 19 | install: 20 | remediation: 21 | retries: 3 22 | upgrade: 23 | cleanupOnFail: true 24 | remediation: 25 | retries: 3 26 | values: 27 | # https://github.com/bjw-s/helm-charts/blob/main/charts/library/common/values.yaml 28 | controllers: 29 | main: 30 | strategy: Recreate 31 | 32 | containers: 33 | main: 34 | image: 35 | repository: ghcr.io/flaresolverr/flaresolverr 36 | tag: v3.4.6 37 | pullPolicy: IfNotPresent 38 | env: 39 | TZ: Africa/Cairo 40 | # LOG_LEVEL: debug 41 | PROMETHEUS_ENABLED: true 42 | probes: 43 | liveness: &probes 44 | enabled: true 45 | custom: true 46 | spec: 47 | httpGet: 48 | path: /health 49 | port: &port 8191 50 | initialDelaySeconds: 0 51 | periodSeconds: 10 52 | timeoutSeconds: 1 53 | failureThreshold: 3 54 | readiness: *probes 55 | startup: 56 | enabled: false 57 | resources: 58 | requests: 59 | cpu: 15m 60 | memory: 1024Mi 61 | limits: 62 | memory: 1024Mi 63 | 64 | service: 65 | main: 66 | controller: main 67 | ports: 68 | http: 69 | port: *port 70 | http-metrics: 71 | port: 8192 72 | protocol: TCP 73 | targetPort: 8192 74 | 75 | serviceMonitor: 76 | main: 77 | serviceName: flaresolverr 78 | enabled: true 79 | 80 | endpoints: 81 | - port: http-metrics 82 | scheme: http 83 | path: /metrics 84 | interval: 1m 85 | scrapeTimeout: 10s -------------------------------------------------------------------------------- /docs/apps/oauth2-proxy.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: OAuth2 Proxy 3 | --- 4 | 5 | # OAuth2 Proxy slows down k8s cluster 6 | 7 | ## The Problem 8 | 9 | When `ingress-nginx` and `oauth2-proxy` are used together in a k8s cluster, as described in 10 | this [tutorial](https://kubernetes.github.io/ingress-nginx/examples/auth/oauth-external-auth/), the cluster immediately 11 | starts slowing down when accessing any application using HTTP / HTTPS that go through the ingress. 12 | 13 | The issue is described further 14 | in [this stackoverflow post](https://stackoverflow.com/questions/58997958/oauth2-proxy-authentication-calls-slow-on-kubernetes-cluster-with-auth-annotatio) 15 | 16 | ## The Cause 17 | 18 | Setting the following in an application's annotation causes this issue 19 | 20 | ```yaml 21 | annotations: 22 | nginx.ingress.kubernetes.io/auth-url: "http://auth.domain.com/oauth2/auth" 23 | nginx.ingress.kubernetes.io/auth-signin: "https://auth.domain.com/oauth2/sign_in" 24 | ``` 25 | 26 | ### Why does adding these annotations cause the issue? 27 | 28 | When `auth-url` is set to `auth.domain.com`, this means that the request goes outside the cluster (so-called hairpin 29 | mode), and goes back via External IP of Ingress that routes to internal ClusterIP Service (which adds extra network 30 | hops), instead going directly with ClusterIP/Service DNS name (you stay within Kubernetes cluster)[^1] 31 | 32 | If a request is made to `auth.domain.com` from **inside** the internal network, the **internal** DNS resolves this to an 33 | internal IP. 34 | 35 | If a request is made to `auth.domain.com` from **outside** the network, the **external** DNS (cloudflare, google, 36 | etc...) resolves this to the external IP set in the DNS records. 37 | 38 | !!! note 39 | This doesn't happen with other repos (the ones 40 | at [awesome-home-kubernetes](https://github.com/k8s-at-home/awesome-home-kubernetes)) because they use 41 | a [split-horizon DNS](https://en.wikipedia.org/wiki/Split-horizon_DNS), meaning they have a DNS internal to their 42 | network that resolves queries to internal IPs and another one externaly that resolves queries to external IPs. 43 | 44 | ## The Solution 45 | 46 | Set the `auth-url` to the internal `oauth2` service so that the application doesn't resolve the `auth-url` to an 47 | external IP. 48 | 49 | ```yaml 50 | annotations: 51 | nginx.ingress.kubernetes.io/auth-url: "http://oauth2-proxy.networking.svc.cluster.local/oauth2/auth" 52 | nginx.ingress.kubernetes.io/auth-signin: "https://auth.domain.com/oauth2/sign_in" 53 | ``` 54 | 55 | [^1]: [https://stackoverflow.com/a/60280114](https://stackoverflow.com/a/60280114) 56 | -------------------------------------------------------------------------------- /cluster/apps/default/v-rising/deployment.yaml: -------------------------------------------------------------------------------- 1 | #apiVersion: apps/v1 2 | #kind: Deployment 3 | #metadata: 4 | # name: v-rising-deployment 5 | # namespace: default 6 | # labels: 7 | # app: v-rising 8 | #spec: 9 | # replicas: 0 10 | # strategy: 11 | # type: Recreate 12 | # selector: 13 | # matchLabels: 14 | # app: v-rising 15 | # template: 16 | # metadata: 17 | # labels: 18 | # app: v-rising 19 | # spec: 20 | # containers: 21 | # - name: v-rising 22 | # # https://hub.docker.com/r/mephi00/v-rising-wine 23 | # image: mephi00/v-rising-wine 24 | # imagePullPolicy: IfNotPresent 25 | # ports: 26 | # - protocol: UDP 27 | # containerPort: 9876 28 | # - protocol: UDP 29 | # containerPort: 9877 30 | # env: 31 | # - name: V_RISING_NAME 32 | # value: "Orange Red Yellow" 33 | # - name: V_RISING_SAVE_NAME 34 | # value: "save1" 35 | # - name: V_RISING_PUBLIC_LIST 36 | # value: "true" 37 | # - name: V_RISING_GAME_MODE 38 | # value: "PvE" 39 | # - name: V_RISING_MAX_USER 40 | # value: "4" 41 | # - name: V_RISING_AUTOSAVE_COUNT 42 | # value: "15" 43 | # - name: V_RISING_AUTOSAVE_INTERVAL 44 | # value: "120" 45 | # envFrom: 46 | # - secretRef: 47 | # name: v-rising-secrets 48 | # volumeMounts: 49 | # - mountPath: "/saves" 50 | # name: v-rising-volume 51 | # - name: v-rising-host-settings-volume 52 | # mountPath: /templates/ServerHostSetting.templ 53 | # subPath: ServerHostSetting.templ 54 | # resources: 55 | # requests: 56 | # memory: 2048Mi 57 | # cpu: 500m 58 | # volumes: 59 | # - name: v-rising-volume 60 | # persistentVolumeClaim: 61 | # claimName: v-rising-saves-pvc-v3 62 | # - name: v-rising-host-settings-volume 63 | # configMap: 64 | # name: v-rising-host-settings 65 | # # Only run on k8-w1, non ARM 66 | # affinity: 67 | # nodeAffinity: 68 | # requiredDuringSchedulingIgnoredDuringExecution: 69 | # nodeSelectorTerms: 70 | # - matchExpressions: 71 | # - key: kubernetes.io/hostname 72 | # operator: In 73 | # values: 74 | # - k8-w1 75 | # # id of "steam" user in image 76 | # securityContext: 77 | # runAsUser: 1000 78 | # runAsGroup: 1000 79 | # fsGroup: 1000 -------------------------------------------------------------------------------- /cluster/apps/default/n8n/secrets.enc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: n8n-secrets 5 | namespace: default 6 | stringData: 7 | DB_POSTGRESDB_DATABASE: ENC[AES256_GCM,data:C/Nw2HKG1ok=,iv:tTnV26zvItmWpBFt+M7wNDNTCmGezJPAcfsSmMIrpTU=,tag:wj5g9w+31Y1+mO/zDmbvgg==,type:str] 8 | DB_POSTGRESDB_HOST: ENC[AES256_GCM,data:bUZmjUPv3NZeGpvNXlHl6dqk/yc3k3twEg8w2kwPMTAqXOQFI4zujA==,iv:zngiwWGZ/h6fwFQE1xA90OaTDnW6TIZ/vRdTUssCVJI=,tag:CfrtQhpAO19ZPRi4WL5K/w==,type:str] 9 | DB_POSTGRESDB_PASSWORD: ENC[AES256_GCM,data:IabWP1o6LSXtQzltXRatcKuhcFpCS9MzLztXK5fA,iv:LTELYbDRZd5iF/wz9N723uxg0YMSar791FBG+shXMnE=,tag:NgXiZBS6gHY00wFqkp8ldg==,type:str] 10 | DB_POSTGRESDB_USER: ENC[AES256_GCM,data:0jha,iv:qbi54pep/F8HpnjTZi7+UhLqNxjjRW/+Fu6fcNasJ60=,tag:5hcl2QijyOsnIvxWz1dJVA==,type:str] 11 | DB_POSTGRESDB_SCHEMA: ENC[AES256_GCM,data:R7+cqKZ6,iv:niH1isF3BiR9MSyMGoIeUQtrvdJzliV+/0Tfw5AgWy0=,tag:sYXYZcqpql+KAkyuDda1tg==,type:str] 12 | N8N_HOST: ENC[AES256_GCM,data:/XEs0MYdPF/+7K5ORuQ=,iv:TaFVFalsrVfBQG/V+2FHBvcFHApxX+Y5KExLlpNliKM=,tag:OQqmK0epS7n2+n96MI4nrg==,type:str] 13 | sops: 14 | kms: [] 15 | gcp_kms: [] 16 | azure_kv: [] 17 | hc_vault: [] 18 | age: [] 19 | lastmodified: "2023-10-05T13:12:34Z" 20 | mac: ENC[AES256_GCM,data:G2k4lTcA9JIBqINzVIF9tu2YjkbzYiehx78rC5IAwtFYF1iu5xs/u02qlJ3m84uapLKDj7wvPIOslj7pa2A6Qo00tT23N2GqtZsaYzGywl4R/CHau3NRgfhscGyTl8FGQvL/jYH4ziiUimjFbXMvoRCLgF/d6K4FkkbZ0FxoXsg=,iv:PhaGBF8d4CDUKceEZaJKRlm/4ysaXrv8bdLwddNoHcM=,tag:AjcVO3xfn+3W80RT85Z+nw==,type:str] 21 | pgp: 22 | - created_at: "2023-10-05T13:12:34Z" 23 | enc: |- 24 | -----BEGIN PGP MESSAGE----- 25 | 26 | hQGMA71EESEn7N2iAQwAmRE6FRebzND8U2TfYPnQKDmg32Rj1LcVWyBH9CK1kxp7 27 | SNiqlFckIb+No+EFu9cA+cQuzkypKJigDzECmLAKFkg0EvBUjAixz8psCKU+7NcJ 28 | nWYO8Gj7+2oMHP5+9Bfh4aa/Y+B4RS3oh7j7Oq6bxZDTjv5WMxwsS4oVNhMkQXFl 29 | 1e8nAMao5MlaZAbtpUELNxHRkQ7sVM8UBgMF6qvEIHfGsTJuDBznCnoSGTJ1chI0 30 | zD3LzOCIX6HgoUj+4K7DQf4/w2BDcWp55nIMEXyUHEh+btFQi9cljJ/6XmM+FEXj 31 | mZHhitW0PtEah6pxkbdw49Dv2nqV0ntBo8Uph/R/KYOFJoSkVMKmGFy+4Iiu/k1I 32 | id3fWzAMj+dgjw4foviGKLcI00yPrwsmPQBVJ/pkViC1vWph1qEH7D4t+//vje19 33 | tZirtcow/wxY9PByqvoAv88urJ8U8oBU1FCPAAlGMmt7m/OzkhcXuf80onaETs2O 34 | h29V4iYeDvDuT5uO2N1L0lwBkaxeuZA3frynmkLwZBPHNvW8JcXJFufxL4MBuKLe 35 | yeqm6Y0sI5YqNCL5COia/cr9GL4fQqtMdR5byDZxy0214SK+3lUcgrwFZzO+CpEQ 36 | 4pfnfp8xSXrTiNqmnw== 37 | =k8vQ 38 | -----END PGP MESSAGE----- 39 | fp: 2D47B9C25AAD87860CC1778F7022B8F414F4AEED 40 | encrypted_regex: ^(data|stringData)$ 41 | version: 3.8.0 42 | -------------------------------------------------------------------------------- /cluster/apps/networking/crowdsec/parsers-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: crowdsec-parsers-confimap 5 | namespace: networking 6 | # JSON formatting of parsers adapted from https://discourse.crowdsec.net/t/nginx-logs-in-gelf-json-format/53/14 7 | data: 8 | nginx-json-logs.yaml: | 9 | filter: "evt.Parsed.program == 'nginx'" 10 | #debug: true 11 | name: crowdsecurity/nginx-logs 12 | description: "Parse nginx access and error logs" 13 | statics: 14 | - target: evt.StrTime 15 | expression: JsonExtract(evt.Parsed.message, "time_iso8601") 16 | - parsed: "logsource" 17 | value: "nginx-json" 18 | - parsed: remote_addr 19 | expression: JsonExtract(evt.Parsed.message, "remote_addr") 20 | - parsed: remote_user 21 | expression: JsonExtract(evt.Parsed.message, "remote_user") 22 | - meta: source_ip 23 | expression: JsonExtract(evt.Parsed.message, "remote_addr") 24 | - meta: http_status 25 | expression: JsonExtract(evt.Parsed.message, "status") 26 | - meta: http_path 27 | expression: JsonExtract(evt.Parsed.message, "request") 28 | - meta: log_type 29 | value: http_access-log 30 | - meta: service 31 | value: http 32 | - parsed: http_user_agent 33 | expression: JsonExtract(evt.Parsed.message, "http_user_agent") 34 | - parsed: http_referer 35 | expression: JsonExtract(evt.Parsed.message, "http_referrer") 36 | - parsed: target_fqdn 37 | expression: JsonExtract(evt.Parsed.message, "http_host") 38 | - parsed: method 39 | expression: JsonExtract(evt.Parsed.message, "request_method") 40 | - parsed: body_bytes_sent 41 | expression: JsonExtract(evt.Parsed.message, "body_bytes_sent") 42 | - parsed: http_version 43 | expression: JsonExtract(evt.Parsed.message, "server_protocol") 44 | - parsed: status 45 | expression: JsonExtract(evt.Parsed.message, "status") 46 | - parsed: full_request 47 | expression: JsonExtract(evt.Parsed.message, "request") 48 | --- 49 | filter: "evt.Meta.service == 'http' && evt.Meta.log_type in ['http_access-log', 'http_error-log']" 50 | onsuccess: next_stage 51 | # debug: true 52 | name: local/nginx-json 53 | grok: 54 | pattern: '%{WORD:method} %{URIPATHPARAM:request} HTTP/%{NUMBER:http_version}' 55 | apply_on: full_request 56 | whitelist.yaml: | 57 | name: crowdsecurity/whitelists 58 | description: "Whitelist events actual budget" 59 | whitelist: 60 | reason: "actual budget" 61 | expression: 62 | - evt.Parsed.target_fqdn == 'actual.${SECRET_DOMAIN}' 63 | -------------------------------------------------------------------------------- /docs/miscellaneous/cloudflare_port_forwarding_openwrt.md: -------------------------------------------------------------------------------- 1 | # Restrict Port Forwarding to only allow Cloudflare IPs using OpenWRT 2 | 3 | Default firewall rules only allow for one source IP to be defined when created a rule. Hence, there's two options when 4 | you want to create a rule that uses multiple Source IPs: 5 | 6 | 1. Repeat the firewall multiple times 7 | 2. Use `ipset` (See [here](https://openwrt.org/docs/guide-user/firewall/fw3_configurations/fw3_config_ipset)) 8 | 9 | We're going with the second option as it's much easier. 10 | 11 | In `/etc/config/firewall` add the following 12 | 13 | ``` 14 | config ipset 15 | option name 'cloudflareips' 16 | option match 'src_net' 17 | option storage 'hash' 18 | option enabled '1' 19 | list entry '103.21.244.0/22' 20 | list entry '103.22.200.0/22' 21 | list entry '103.31.4.0/22' 22 | list entry '104.16.0.0/13' 23 | list entry '104.24.0.0/14' 24 | list entry '108.162.192.0/18' 25 | list entry '131.0.72.0/22' 26 | list entry '141.101.64.0/18' 27 | list entry '162.158.0.0/15' 28 | list entry '172.64.0.0/13' 29 | list entry '173.245.48.0/20' 30 | list entry '188.114.96.0/20' 31 | list entry '190.93.240.0/20' 32 | list entry '197.234.240.0/22' 33 | list entry '198.41.128.0/17' 34 | ``` 35 | 36 | Now edit your port forwardings (called `redirect` in `/etc/config/firewall`) to utilize the newly created `ipset` 37 | 38 | Again, in `/etc/config/firewall` 39 | 40 | ``` 41 | config redirect 42 | option target 'DNAT' 43 | option name 'KubeHTTP' 44 | option src 'wan' 45 | option ipset 'cloudflareips' # <- This like here 46 | option src_dport '80' 47 | option dest 'lan' 48 | option dest_ip '' 49 | option dest_port '80' 50 | list proto 'tcp' 51 | 52 | config redirect 53 | option target 'DNAT' 54 | option name 'KubeHTTPS' 55 | list proto 'tcp' 56 | option src 'wan' 57 | option ipset 'cloudflareips' # <- This like here 58 | option src_dport '443' 59 | option dest 'lan' 60 | option dest_ip '' 61 | option dest_port '443' 62 | ``` 63 | 64 | Finally, reload the firewall by running `/etc/init.d/firewall reload` 65 | 66 | [^1]: [https://openwrt.org/docs/guide-user/firewall/firewall_configuration](https://openwrt.org/docs/guide-user/firewall/firewall_configuration) -------------------------------------------------------------------------------- /cluster/apps/default/flood/flood-helm-release.yaml: -------------------------------------------------------------------------------- 1 | #apiVersion: helm.toolkit.fluxcd.io/v2 2 | #kind: HelmRelease 3 | #metadata: 4 | # name: flood 5 | # namespace: default 6 | #spec: 7 | # interval: 5m 8 | # chart: 9 | # spec: 10 | # # renovate: registryUrl=https://k8s-at-home.com/charts/ 11 | # chart: flood 12 | # version: 6.4.2 13 | # sourceRef: 14 | # kind: HelmRepository 15 | # name: k8s-at-home-charts 16 | # namespace: flux-system 17 | # interval: 5m 18 | # dependsOn: 19 | # - name: longhorn 20 | # namespace: longhorn-system 21 | # values: 22 | # image: 23 | # repository: jesec/flood 24 | # tag: master 25 | # 26 | # env: 27 | # - name: FLOOD_OPTION_AUTH 28 | # value: "none" 29 | # - name: FLOOD_OPTION_QBURL 30 | # value: "http://qbittorrent.default.svc.cluster.local:8080" 31 | # - name: FLOOD_OPTION_RUNDIR 32 | # value: "/config" 33 | # - name: FLOOD_OPTION_QBUSER 34 | # value: dummy 35 | # - name: FLOOD_OPTION_QBPASS 36 | # value: dummy2 37 | # 38 | # ingress: 39 | # main: 40 | # enabled: true 41 | # ingressClassName: "external" 42 | # annotations: 43 | # nginx.ingress.kubernetes.io/auth-url: "http://oauth2-proxy.networking.svc.cluster.local/oauth2/auth" 44 | # nginx.ingress.kubernetes.io/auth-signin: "https://auth.${SECRET_DOMAIN}/oauth2/sign_in" 45 | # external-dns/is-public: "true" 46 | # external-dns.alpha.kubernetes.io/target: "ipv4.${SECRET_DOMAIN}" 47 | # hosts: 48 | # - host: "flood.${SECRET_DOMAIN}" 49 | # paths: 50 | # - path: / 51 | # pathType: Prefix 52 | # tls: 53 | # - hosts: 54 | # - "flood.${SECRET_DOMAIN}" 55 | # 56 | # persistence: 57 | # config: 58 | # enabled: true 59 | # existingClaim: flood-config-pvc-2 60 | # # PVC from qbittorrent 61 | # downloads: 62 | # enabled: true 63 | # existingClaim: nfs-media-downloads-pvc 64 | # readOnly: true 65 | # 66 | # probes: 67 | # liveness: 68 | # enabled: true 69 | # initialDelaySeconds: 30 70 | # failureThreshold: 5 71 | # timeoutSeconds: 10 72 | # readiness: 73 | # enabled: true 74 | # initialDelaySeconds: 30 75 | # failureThreshold: 5 76 | # timeoutSeconds: 10 77 | # startup: 78 | # enabled: true 79 | # initialDelaySeconds: 5 80 | # failureThreshold: 30 81 | # periodSeconds: 10 82 | # 83 | # resources: 84 | # requests: 85 | # cpu: 10m 86 | # memory: 120Mi 87 | # limits: 88 | # memory: 120Mi 89 | # 90 | # podSecurityContext: 91 | # runAsUser: 1001 92 | # runAsGroup: 1001 93 | # fsGroup: 1001 94 | -------------------------------------------------------------------------------- /cluster/apps/default/n8n/n8n-helm-release.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2 2 | kind: HelmRelease 3 | metadata: 4 | name: n8n 5 | namespace: default 6 | spec: 7 | interval: 15m 8 | chart: 9 | spec: 10 | chart: n8n 11 | version: 0.13.0 12 | interval: 15m 13 | sourceRef: 14 | kind: HelmRepository 15 | name: open-8gears-charts 16 | namespace: flux-system 17 | install: 18 | remediation: 19 | retries: 3 20 | upgrade: 21 | cleanupOnFail: true 22 | remediation: 23 | retries: 3 24 | dependsOn: 25 | - name: longhorn 26 | namespace: longhorn-system 27 | values: 28 | image: 29 | repository: n8nio/n8n 30 | pullPolicy: IfNotPresent 31 | tag: 2.1.1 32 | 33 | deploymentStrategy: 34 | type: "Recreate" 35 | 36 | replicaCount: 1 37 | 38 | ingress: 39 | enabled: true 40 | className: internal 41 | annotations: 42 | external-dns/is-public: "false" 43 | external-dns.alpha.kubernetes.io/target: "ipv4.${SECRET_DOMAIN}" 44 | hosts: 45 | - host: "n8n.${SECRET_DOMAIN}" 46 | paths: 47 | - / 48 | 49 | tls: 50 | - hosts: 51 | - "n8n.${SECRET_DOMAIN}" 52 | 53 | config: 54 | database: 55 | type: postgresdb 56 | generic: 57 | timezone: Africa/Cairo 58 | executions: 59 | pruneData: "true" # prune executions by default 60 | pruneDataMaxAge: 3760 # Per defaut we store 1 year of history 61 | ai: 62 | enabled: true 63 | port: 443 64 | protocol: https 65 | 66 | extraEnvSecrets: 67 | DB_POSTGRESDB_USER: 68 | name: n8n-secrets 69 | key: DB_POSTGRESDB_USER 70 | DB_POSTGRESDB_PASSWORD: 71 | name: n8n-secrets 72 | key: DB_POSTGRESDB_PASSWORD 73 | DB_POSTGRESDB_DATABASE: 74 | name: n8n-secrets 75 | key: DB_POSTGRESDB_DATABASE 76 | DB_POSTGRESDB_HOST: 77 | name: n8n-secrets 78 | key: DB_POSTGRESDB_HOST 79 | DB_POSTGRESDB_SCHEMA: 80 | name: n8n-secrets 81 | key: DB_POSTGRESDB_SCHEMA 82 | N8N_HOST: 83 | name: n8n-secrets 84 | key: N8N_HOST 85 | 86 | extraEnv: 87 | N8N_ENFORCE_SETTINGS_FILE_PERMISSIONS: "true" 88 | 89 | # Removed persistence because n8n-pvc was not even used. 90 | # Have a hunch it isn't used because we're using a PSQL database for persistence 91 | # persistence: 92 | # enabled: true 93 | # existingClaim: n8n-pvc 94 | 95 | resources: 96 | requests: 97 | cpu: 14m 98 | memory: 500Mi 99 | limits: 100 | memory: 500Mi 101 | 102 | scaling: 103 | enabled: false -------------------------------------------------------------------------------- /cluster/apps/default/mealie/mealie-helm-release.yaml: -------------------------------------------------------------------------------- 1 | # yaml-language-server: $schema=https://raw.githubusercontent.com/bjw-s/helm-charts/app-template-3.0.2/charts/other/app-template/schemas/helmrelease-helm-v2beta2.schema.json 2 | apiVersion: helm.toolkit.fluxcd.io/v2 3 | kind: HelmRelease 4 | metadata: 5 | name: mealie 6 | namespace: default 7 | spec: 8 | interval: 15m 9 | chart: 10 | spec: 11 | # renovate: registryUrl=https://bjw-s-labs.github.io/helm-charts 12 | chart: app-template 13 | version: 4.5.0 14 | interval: 15m 15 | sourceRef: 16 | kind: HelmRepository 17 | name: bjw-s 18 | namespace: flux-system 19 | install: 20 | remediation: 21 | retries: 3 22 | upgrade: 23 | cleanupOnFail: true 24 | remediation: 25 | retries: 3 26 | dependsOn: 27 | - name: longhorn 28 | namespace: longhorn-system 29 | values: 30 | # https://github.com/bjw-s/helm-charts/blob/main/charts/library/common/values.yaml 31 | controllers: 32 | main: 33 | strategy: Recreate 34 | 35 | containers: 36 | main: 37 | image: 38 | repository: ghcr.io/mealie-recipes/mealie 39 | tag: v1.0.0-RC1.1 40 | pullPolicy: IfNotPresent 41 | 42 | env: 43 | BASE_URL: https://mealie.${SECRET_DOMAIN} 44 | ALLOW_SIGNUP: false 45 | API_DOCS: false 46 | TZ: Africa/Cairo 47 | # Default Values 48 | WORKERS_PER_CORE: 1 49 | MAX_WORKERS: 1 50 | WEB_CONCURRENCY: 1 51 | # resources: 52 | # limits: 53 | # cpu: 100m 54 | # memory: 128Mi 55 | # requests: 56 | # cpu: 100m 57 | # memory: 128Mi 58 | 59 | service: 60 | main: 61 | controller: main 62 | ports: 63 | http: 64 | port: 9000 65 | 66 | ingress: 67 | main: 68 | enabled: true 69 | className: "internal" 70 | annotations: 71 | nginx.ingress.kubernetes.io/auth-url: "http://oauth2-proxy.networking.svc.cluster.local/oauth2/auth" 72 | nginx.ingress.kubernetes.io/auth-signin: "https://auth.${SECRET_DOMAIN}/oauth2/sign_in" 73 | external-dns/is-public: "false" 74 | external-dns.alpha.kubernetes.io/target: "ipv4.${SECRET_DOMAIN}" 75 | hosts: 76 | - host: "mealie.${SECRET_DOMAIN}" 77 | paths: 78 | - path: / 79 | pathType: Prefix 80 | service: 81 | identifier: main 82 | port: http 83 | tls: 84 | - hosts: 85 | - "mealie.${SECRET_DOMAIN}" 86 | 87 | persistence: 88 | data: 89 | enabled: true 90 | type: persistentVolumeClaim 91 | existingClaim: mealie-pvc -------------------------------------------------------------------------------- /cluster/apps/vpn/v2ray/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: v2ray-server-config 5 | namespace: vpn 6 | data: 7 | config.json: ENC[AES256_GCM,data:KsXFFSx5UMQfSDxCjzTRh1i+Yndh44FqM8jx45PKIswLGvte8eZxVdHiPszsxlGvPrKliF6bQT2x10qAU8MeqvohwD790H1QheB9K7GxsAj1fh9unMqtpGTHK3Su3GeHqSXjJcGq79vtYgcw9aqYoeqr5LyvXe+E9nYSUP/nwvP50ttuit+B6ku3mcqrTiesge/YssZdxQslbjK8mE+OQahwoZTWkOpv9etOSAz0NWmcDyvxzm5wpM/8DDMeI7zs6ZPH8864ISbMQQne5S3rh1v6KpcpSaqO2cnMa0mqtLrLBg3g9j/vK3SYVsSrJvKQBuNOPj4a98vT20LiMrUYCd0zUn+cTB59uAOHMTAJDcL5j/zjxkqeS2rZN2dOD4E0kiw7qoe2y3x/MhXxTZCt+0MKAIlUvuQJOT1OIrMtKVP/2Wk0BLVDdC+cow1/am18z8kTYIVt+gVeX3X3t0rTREWtf+X+rsEokbbsxbl6YAD5e6FaqxBDGY8dYNgxpA9b/QqIsSNOl750hHsXnhC1kslHPVP4tdSu1OnqYKifU6F5XhD4LB/Drz2kShQ6yIkxIZZiYCI1DybbdrAeqX1ZjbDB6UusbmfR/2Q4h0J1VB/ka6KhuFfLsKLx6mTp6k4c4KlN4Nn1OtsdPLc6wi9ZESpjyFEF1HvuJ/mehM7i47cPl/qi/1OgRhy3hWwpuOAMyvBi8D6OtUu+VXCONId6Jx6G12iV1EGiql8s0zr+SvnI2P4XfJUz2EGzsFGbgBfEFZSnVtn/Y1fjTD71NMd0jDaL2coQGd0v9GMN5Vvg5gWNd1yzqpy0+EornZVwOqGmO816lhx59kEaZ3f9fW1zffxnO6nrpJQA7/0R5kd7FM6VMOQSrpSHqZ8A1M0mk+sRovrnTLz6192z9qNOV0s9j61w7wJM2+BfE1QlaCwWBx3+gNKmQZXijSiwCJrHn2FZ9LBHMWIiWKiXfAbl73VLpEI6BkYRE2tPjuF/knXH7/sPKftmpmh4cSZtMOozQl94UWmRLRYMfBqm5RVbWRoFBZbdsv2UvOVioPRj/gO0zgA2GCBvtRxNyiByJtYrpBgT0JuUziSsyolg6e3IqkQbtC0aFCwaac5FhY5LFqC6g+8qgjiMMVnmQLeNnCS+7tyENyhR8z4bO34HTEBwoLFrTCg6S9Lvi8CISCiK0M/I1WkWCe99vg2mhMtR8hQavG0a,iv:KdCa7eb6ZswmiH0keOQMcmwzcVhzHHA8YMsADYegtQc=,tag:EEwkxUVYPIJcJssCoECEOA==,type:str] 8 | sops: 9 | lastmodified: "2025-04-22T20:22:39Z" 10 | mac: ENC[AES256_GCM,data:tQyOs7t+OxPbQz/Q7Dmt+CrqlhQrc4sFMxwCtDSNSr9YvMEQZdcEVzcLXOE+Sjk81anY0bSyOjyVBZSX3zFx1U8+oG2aP1MRemhXuzoz5SIexxsHgLaogDqsZ7CWZdmZURgGGFDLM1zBPkBs2I0CPBPO8u8kEb5ibsB9h8LDdE4=,iv:XNuv9KCvpUMox94xyY0UH/Ar6wMpFFryHI0/Zuasfgk=,tag:A7IlN8cSSUYJ52k0kkOuyA==,type:str] 11 | pgp: 12 | - created_at: "2025-04-22T20:22:39Z" 13 | enc: |- 14 | -----BEGIN PGP MESSAGE----- 15 | 16 | hQGMA71EESEn7N2iAQv+Jo7+h13Z7R5hKDz3PbVqU/CMTCHBFURQQKboStl65efg 17 | qp2BmzJvwIETMVxqPQZ6OB6TPfJG8WEPJ3Be4FLV/VBpXLRtyr/NlG2eZQ/nHlSu 18 | 95Upwynd0Y8KKwCVH+TYwSRwVKL4GeGxaj4+4oaw6FuZtjN8sOJ1EaIgpFLbqI2o 19 | 9r62eswiigXdPuZI0uzqmdtg7oYhq88qmZ3SKRalo4EK8P6m+MeMyQxd29/8fZIM 20 | Iztm6KX6PgSO9SZ1SotcMYXA1FbzYaBFepblpC35pZLjhEleEcPbkvgduYWu1SFK 21 | 1oFyi7LWHBASNDbJ4KsrhaXpMcReSrhxz+wQLXMqcU/7hHwJbD+NDsWlFtUe1rPS 22 | fh5kzPxPlkP0NCz/bPDiNbQtHdiDHgYtkImZb2mibzL/wUNdDTCRKDnIY4qdvuKZ 23 | 2XAqL5hheNQKu32n2Lbu8lRtRGqTqVx6ks267aUWqG4KKATfIg6eCvHss3spyPns 24 | HCzgW9pqwIuCVTtn4hZF0l4BAE1kEVGCoJcArc1WjKwpP+DoN50jT1ge45KWOTwF 25 | fMNtuVmzJRy5lo30vMFlo0L3lFb6Ky5kuVYmBitXRy/86nJAhh+Q9z4bBc1dIgFl 26 | aGv4mOcDQTCENx75WaZ0 27 | =Uk1k 28 | -----END PGP MESSAGE----- 29 | fp: 2D47B9C25AAD87860CC1778F7022B8F414F4AEED 30 | encrypted_regex: ^(data|stringData)$ 31 | version: 3.10.1 32 | --------------------------------------------------------------------------------