├── .envrc ├── .gitattributes ├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE │ ├── bug-report.md │ ├── config.yml │ ├── feature-request.md │ └── question.md ├── PULL_REQUEST_TEMPLATE.md ├── labeler.yaml ├── labels.yaml ├── linters │ ├── .ansible-lint │ ├── .markdownlint.yaml │ ├── .prettierignore │ ├── .prettierrc.yaml │ ├── .tflint.hcl │ └── .yamllint.yaml ├── renovate.json5 ├── renovate │ ├── allowedVersions.json5 │ ├── autoMerge.json5 │ ├── groups.json5 │ ├── labels.json5 │ └── semanticCommits.json5 ├── scripts │ ├── cloudflare-proxied-networks.sh │ ├── container-parser.sh │ ├── helm-release-differ.sh │ └── lib │ │ └── functions.sh └── workflows │ ├── helm-release-differ.yaml │ ├── megalinter.yaml │ ├── meta-label-size.yaml │ ├── meta-labeler.yml │ ├── meta-sync-labels.yaml │ ├── scan-containers.yaml │ ├── schedule-cloudflare-proxied-networks-update.yaml │ ├── schedule-link-checker.yaml │ └── schedule-renovate.yaml ├── .gitignore ├── .pre-commit-config.yaml ├── .sops.yaml ├── .sourceignore ├── .taskfiles ├── AnsibleTasks.yml ├── ClusterTasks.yml ├── FormatTasks.yml ├── LintTasks.yml ├── PreCommitTasks.yml └── TerraformTasks.yml ├── .vscode ├── extensions.json └── settings.json ├── LICENSE ├── README.md ├── Taskfile.yml ├── ansible ├── kubernetes │ ├── .envrc │ ├── ansible.cfg │ ├── inventory │ │ ├── group_vars │ │ │ ├── all │ │ │ │ └── k3s.sops.yml │ │ │ ├── master │ │ │ │ └── k3s.yml │ │ │ └── worker │ │ │ │ └── k3s.yml │ │ ├── host_vars │ │ │ ├── k8s-0.sops.yml │ │ │ ├── k8s-1.sops.yml │ │ │ ├── k8s-2.sops.yml │ │ │ ├── k8s-3.sops.yml │ │ │ ├── k8s-4.sops.yml │ │ │ └── k8s-5.sops.yml │ │ └── hosts.yml │ ├── playbooks │ │ ├── k3s.yml │ │ ├── nuke-k3s.yml │ │ ├── nuke-rook-ceph.yml │ │ └── os.yml │ └── roles │ │ ├── k3s.kubernetes │ │ ├── defaults │ │ │ └── main.yml │ │ ├── files │ │ │ └── containerd-hack.service │ │ ├── tasks │ │ │ ├── home-dns.yml │ │ │ ├── k3s.yml │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── calico │ │ │ │ ├── calico-bgpconfiguration.yaml.j2 │ │ │ │ ├── calico-bgppeer.yaml.j2 │ │ │ │ └── calico-installation.yaml.j2 │ │ │ ├── home-dns │ │ │ │ └── home-dns-rbac.yaml.j2 │ │ │ └── k3s │ │ │ │ └── 10-etcd-snapshots.yaml.j2 │ │ └── vars │ │ │ └── main │ │ │ ├── calico.yml │ │ │ ├── etcd-snapshots.yml │ │ │ └── k3s.yml │ │ └── os.kubernetes │ │ ├── handlers │ │ └── main.yml │ │ ├── tasks │ │ ├── filesystem.yml │ │ ├── kernel.yml │ │ ├── locale.yml │ │ ├── main.yml │ │ ├── network.yml │ │ ├── packages.yml │ │ ├── security.yml │ │ ├── unattended-upgrades.yml │ │ └── user.yml │ │ └── vars │ │ └── main.yml ├── kvm │ ├── .envrc │ ├── ansible.cfg │ ├── inventory │ │ ├── host_vars │ │ │ └── pikvm.sops.yml │ │ └── hosts.yml │ ├── playbooks │ │ └── pikvm.yml │ └── roles │ │ ├── acme.kvm │ │ ├── defaults │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── vars │ │ │ └── main.yml │ │ ├── node-exporter.kvm │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ ├── templates │ │ │ └── node-exporter.service.j2 │ │ └── vars │ │ │ └── main.yml │ │ ├── os.kvm │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── override.yaml.j2 │ │ │ └── tc358743-edid.hex.j2 │ │ └── vars │ │ │ └── main.yml │ │ └── vector.kvm │ │ ├── defaults │ │ └── main.yml │ │ ├── handlers │ │ └── main.yml │ │ ├── tasks │ │ └── main.yml │ │ ├── templates │ │ ├── vector.service.j2 │ │ └── vector.yaml.j2 │ │ └── vars │ │ └── main.yml ├── requirements.yml ├── router │ ├── .envrc │ ├── ansible.cfg │ ├── inventory │ │ ├── host_vars │ │ │ └── opnsense.sops.yml │ │ └── hosts.yml │ ├── playbooks │ │ ├── apps.yml │ │ └── os.yml │ └── roles │ │ ├── adguardhome.router │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── 99-adguardhome.j2 │ │ │ ├── actions_adguardhome.conf.j2 │ │ │ ├── adguardhome-newsyslog.conf.j2 │ │ │ ├── adguardhome-rc.conf.j2 │ │ │ └── adguardhome-rc.d.j2 │ │ └── vars │ │ │ └── main.yml │ │ ├── coredns.router │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── 99-coredns.j2 │ │ │ ├── Corefile.j2 │ │ │ ├── actions_coredns.conf.j2 │ │ │ ├── coredns-newsyslog.conf.j2 │ │ │ ├── coredns-rc.conf.j2 │ │ │ └── coredns-rc.d.j2 │ │ └── vars │ │ │ └── main.yml │ │ └── os.router │ │ ├── defaults │ │ └── main.yml │ │ ├── tasks │ │ ├── filesystem.yml │ │ ├── main.yml │ │ └── packages.yml │ │ └── vars │ │ └── main.yml └── storage │ ├── .envrc │ ├── ansible.cfg │ ├── inventory │ ├── group_vars │ │ └── storage │ │ │ ├── docker.yml │ │ │ └── pip.yml │ ├── host_vars │ │ └── expanse.sops.yml │ └── hosts.yml │ ├── playbooks │ ├── apps.yml │ └── os.yml │ └── roles │ ├── apps.storage │ ├── defaults │ │ └── main.yml │ ├── files │ │ ├── docker-cleanup.service │ │ ├── docker-cleanup.timer │ │ └── docker-override.conf │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ ├── docker.yml │ │ ├── kopia.yml │ │ ├── main.yml │ │ ├── nexus.yml │ │ ├── node-exporter.yml │ │ ├── time-machine.yml │ │ ├── traefik.yml │ │ └── vector.yml │ ├── templates │ │ ├── docker-compose@.service.j2 │ │ ├── kopia │ │ │ └── docker-compose.yml.j2 │ │ ├── nexus │ │ │ └── docker-compose.yml.j2 │ │ ├── node-exporter │ │ │ └── docker-compose.yml.j2 │ │ ├── time-machine │ │ │ └── docker-compose.yml.j2 │ │ ├── traefik │ │ │ └── docker-compose.yml.j2 │ │ └── vector │ │ │ ├── docker-compose.yml.j2 │ │ │ └── vector.yaml.j2 │ └── vars │ │ └── main.yml │ └── os.storage │ ├── defaults │ └── main.yml │ ├── handlers │ └── main.yml │ ├── tasks │ ├── filesystem.yml │ ├── locale.yml │ ├── main.yml │ ├── network.yml │ ├── notifications.yml │ ├── packages.yml │ ├── security.yml │ └── user.yml │ ├── templates │ ├── aliases.j2 │ ├── msmtprc.j2 │ ├── smartd.conf.j2 │ └── zed.rc.j2 │ └── vars │ └── main.yml ├── cluster ├── apps │ ├── cert-manager │ │ ├── dashboard │ │ │ └── kustomization.yaml │ │ ├── kustomization.yaml │ │ └── prometheus-rule.yaml │ ├── default │ │ ├── discord-support-threads-bot │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── messages.json │ │ │ └── secret.sops.yaml │ │ ├── gitea │ │ │ ├── config-pvc.yaml │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ └── secret.sops.yaml │ │ ├── kustomization.yaml │ │ ├── minio │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── nfs-claim.yaml │ │ │ └── secret.sops.yaml │ │ └── theme-park │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ ├── flux-system │ │ ├── dashboard │ │ │ └── kustomization.yaml │ │ ├── kustomization.yaml │ │ ├── monitoring │ │ │ ├── kustomization.yaml │ │ │ ├── pod-monitor.yaml │ │ │ └── prometheus-rule.yaml │ │ ├── notifications │ │ │ ├── alert-manager │ │ │ │ ├── kustomization.yaml │ │ │ │ └── notification.yaml │ │ │ ├── github │ │ │ │ ├── kustomization.yaml │ │ │ │ ├── notification.yaml │ │ │ │ └── secret.sops.yaml │ │ │ └── kustomization.yaml │ │ └── webhook │ │ │ ├── github │ │ │ ├── ingress.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── receiver.yaml │ │ │ └── secret.sops.yaml │ │ │ └── kustomization.yaml │ ├── home │ │ ├── emqx │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ └── secret.sops.yaml │ │ ├── frigate │ │ │ ├── config-pvc.yaml │ │ │ ├── config.yaml │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ └── secret.sops.yaml │ │ ├── home-assistant │ │ │ ├── code-server-ssh-key.sops.yaml │ │ │ ├── config-pvc.yaml │ │ │ ├── helm-release.yaml │ │ │ ├── home-assistant.sops.yaml │ │ │ └── kustomization.yaml │ │ ├── kustomization.yaml │ │ ├── mosquitto │ │ │ ├── config-pvc.yaml │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ └── secret.sops.yaml │ │ ├── node-red │ │ │ ├── config-pvc.yaml │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── zigbee2mqtt │ │ │ ├── config-pvc.yaml │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ └── zwavejs2mqtt │ │ │ ├── config-pvc.yaml │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ ├── kasten-io │ │ ├── k10 │ │ │ ├── blueprints │ │ │ │ ├── home.yaml │ │ │ │ ├── k10-disaster-recovery.yaml │ │ │ │ ├── kustomization.yaml │ │ │ │ ├── media.yaml │ │ │ │ ├── pod-spec-override.yaml │ │ │ │ └── secret.sops.yaml │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── monitoring │ │ │ │ ├── kustomization.yaml │ │ │ │ ├── prometheus-rule.yaml │ │ │ │ └── service-monitor.yaml │ │ │ └── profiles │ │ │ │ ├── home.yaml │ │ │ │ ├── k10-backups-pvc.yaml │ │ │ │ ├── k10-disaster-recovery.yaml │ │ │ │ ├── kustomization.yaml │ │ │ │ └── media.yaml │ │ └── kustomization.yaml │ ├── kube-system │ │ ├── descheduler │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── external-snapshotter │ │ │ └── kustomization.yaml │ │ ├── home-dns │ │ │ ├── kustomization.yaml │ │ │ └── rbac.yaml │ │ ├── intel-gpu-plugin │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── kustomization.yaml │ │ ├── metrics-server │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── nfs-subdir-external-provisioner │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── node-feature-discovery │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── reflector │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ └── reloader │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ ├── kustomization.yaml │ ├── media │ │ ├── jellyfin │ │ │ ├── config-pvc.yaml │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── kustomization.yaml │ │ ├── lidarr │ │ │ ├── config-pvc.yaml │ │ │ ├── dashboard │ │ │ │ ├── dashboard.json │ │ │ │ └── kustomization.yaml │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── media-browser │ │ │ ├── config-pvc.yaml │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── overseerr │ │ │ ├── config-pvc.yaml │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── plex │ │ │ ├── config-pvc.yaml │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── prowlarr │ │ │ ├── config-pvc.yaml │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── qbittorrent │ │ │ ├── config-pvc.yaml │ │ │ ├── dashboard │ │ │ │ ├── dashboard.json │ │ │ │ └── kustomization.yaml │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ └── tag-tracker-errors │ │ │ │ ├── cron-job.yaml │ │ │ │ └── kustomization.yaml │ │ ├── radarr-uhd │ │ │ ├── config-pvc.yaml │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── radarr │ │ │ ├── config-pvc.yaml │ │ │ ├── dashboard │ │ │ │ ├── dashboard.json │ │ │ │ └── kustomization.yaml │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── readarr │ │ │ ├── config-pvc.yaml │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── sabnzbd │ │ │ ├── config-pvc.yaml │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── sonarr-uhd │ │ │ ├── config-pvc.yaml │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── sonarr │ │ │ ├── config-pvc.yaml │ │ │ ├── dashboard │ │ │ │ ├── dashboard.json │ │ │ │ └── kustomization.yaml │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── tautulli │ │ │ ├── config-pvc.yaml │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── trash-updater │ │ │ ├── cron-job.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── secret.sops.yaml │ │ │ └── trash.yaml │ │ └── unpackerr │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ └── secret.sops.yaml │ ├── monitoring │ │ ├── blackbox-exporter │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── generic-rules │ │ │ ├── kustomization.yaml │ │ │ └── zfs │ │ │ │ ├── kustomization.yaml │ │ │ │ └── prometheus-rule.yaml │ │ ├── goldilocks │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── grafana │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ └── secret.sops.yaml │ │ ├── kube-prometheus-stack │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── kustomization.yaml │ │ ├── loki │ │ │ ├── ceph-buckets │ │ │ │ ├── chunks.yaml │ │ │ │ ├── kustomization.yaml │ │ │ │ └── ruler.yaml │ │ │ ├── config-map.yaml │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── node-problem-detector │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── snmp-exporter │ │ │ ├── apc-ups │ │ │ │ ├── config-map.yaml │ │ │ │ ├── dashboard │ │ │ │ │ ├── dashboard.json │ │ │ │ │ └── kustomization.yaml │ │ │ │ ├── helm-release.yaml │ │ │ │ ├── kustomization.yaml │ │ │ │ └── prometheus-rule.yaml │ │ │ ├── cyberpower-pdu │ │ │ │ ├── config-map.yaml │ │ │ │ ├── dashboard │ │ │ │ │ ├── dashboard.json │ │ │ │ │ └── kustomization.yaml │ │ │ │ ├── helm-release.yaml │ │ │ │ └── kustomization.yaml │ │ │ ├── dell-idrac │ │ │ │ ├── config-map.yaml │ │ │ │ ├── dashboard │ │ │ │ │ ├── dashboard.json │ │ │ │ │ └── kustomization.yaml │ │ │ │ ├── helm-release.yaml │ │ │ │ └── kustomization.yaml │ │ │ └── kustomization.yaml │ │ ├── thanos │ │ │ ├── dashboard │ │ │ │ └── kustomization.yaml │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ └── object-bucket-claim.yaml │ │ ├── unifi-poller │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── uptimerobot-heartbeat │ │ │ ├── cron-job.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── secret.sops.yaml │ │ │ └── uptimerobot-heartbeat.sh │ │ ├── vector │ │ │ ├── agent │ │ │ │ ├── helm-release.yaml │ │ │ │ └── kustomization.yaml │ │ │ ├── aggregator │ │ │ │ ├── filterlog-regex.txt │ │ │ │ ├── helm-release.yaml │ │ │ │ └── kustomization.yaml │ │ │ ├── geoipupdate │ │ │ │ ├── config-pvc.yaml │ │ │ │ ├── cron-job.yaml │ │ │ │ ├── kustomization.yaml │ │ │ │ └── secret.sops.yaml │ │ │ └── kustomization.yaml │ │ └── vpa │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ ├── networking │ │ ├── cloudflare-ddns │ │ │ ├── cloudflare-ddns.sh │ │ │ ├── cron-job.yaml │ │ │ ├── kustomization.yaml │ │ │ └── secret.sops.yaml │ │ ├── echo-server │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── external-dns │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ └── secret.sops.yaml │ │ ├── ingress-nginx │ │ │ ├── cloudflare-proxied-networks.txt │ │ │ ├── dashboard │ │ │ │ └── kustomization.yaml │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── kustomization.yaml │ │ ├── opnsense │ │ │ ├── dashboard │ │ │ │ ├── dashboard.json │ │ │ │ └── kustomization.yaml │ │ │ └── kustomization.yaml │ │ └── wildcard-certificate │ │ │ ├── certificate.yaml │ │ │ └── kustomization.yaml │ ├── rook-ceph │ │ ├── dashboard │ │ │ ├── ceph-cluster-dashboard.json │ │ │ ├── ceph-osd-dashboard.json │ │ │ ├── ceph-pools-dashboard.json │ │ │ └── kustomization.yaml │ │ ├── kustomization.yaml │ │ └── rook-direct-mount │ │ │ ├── backup.sh │ │ │ ├── deployment.yaml │ │ │ └── kustomization.yaml │ └── system-upgrade │ │ ├── kustomization.yaml │ │ └── system-upgrade-controller │ │ ├── kustomization.yaml │ │ ├── plans │ │ ├── agent.yaml │ │ ├── kustomization.yaml │ │ └── server.yaml │ │ └── system-upgrade-patches.yaml ├── base │ ├── apps.yaml │ ├── charts.yaml │ ├── config.yaml │ ├── core.yaml │ ├── crds.yaml │ └── flux-system │ │ ├── github-deploy-key.sops.yaml │ │ ├── gotk-components.yaml │ │ ├── gotk-patches.yaml │ │ ├── gotk-sync.yaml │ │ └── kustomization.yaml ├── charts │ ├── aqua-charts.yaml │ ├── bitnami-charts.yaml │ ├── deliveryhero-charts.yaml │ ├── descheduler-charts.yaml │ ├── emberstack-charts.yaml │ ├── emqx-charts.yaml │ ├── external-dns-charts.yaml │ ├── fairwinds-charts.yaml │ ├── falco-security-charts.yaml │ ├── gitea-charts.yaml │ ├── grafana-charts.yaml │ ├── ingress-nginx-charts.yaml │ ├── jetstack-charts.yaml │ ├── k8s-at-home-charts.yaml │ ├── kasten-charts.yaml │ ├── kustomization.yaml │ ├── kyverno-charts.yaml │ ├── metrics-server-charts.yaml │ ├── nfs-subdir-external-provisioner-charts.yaml │ ├── node-feature-discovery-charts.yaml │ ├── oauth2-proxy-charts.yaml │ ├── prometheus-community-charts.yaml │ ├── rook-ceph-charts.yaml │ ├── stakater-charts.yaml │ └── vector-charts.yaml ├── config │ ├── cluster-secrets.sops.yaml │ ├── cluster-settings.yaml │ └── kustomization.yaml ├── core │ ├── cert-manager │ │ ├── helm-release.yaml │ │ ├── issuers │ │ │ ├── kustomization.yaml │ │ │ ├── letsencrypt-production.yaml │ │ │ ├── letsencrypt-staging.yaml │ │ │ └── secret.sops.yaml │ │ └── kustomization.yaml │ ├── kustomization.yaml │ ├── kyverno │ │ ├── helm-release.yaml │ │ ├── kustomization.yaml │ │ └── policies │ │ │ ├── ingress.yaml │ │ │ ├── kustomization.yaml │ │ │ └── resources.yaml │ ├── namespaces │ │ ├── calico-system.yaml │ │ ├── cert-manager.yaml │ │ ├── flux-system.yaml │ │ ├── home.yaml │ │ ├── kasten-io.yaml │ │ ├── kube-system.yaml │ │ ├── kustomization.yaml │ │ ├── kyverno.yaml │ │ ├── media.yaml │ │ ├── monitoring.yaml │ │ ├── networking.yaml │ │ ├── rook-ceph.yaml │ │ ├── starboard-system.yaml │ │ └── system-upgrade.yaml │ ├── rook-ceph │ │ ├── cluster │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── kustomization.yaml │ │ └── operator │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ └── starboard-system │ │ ├── kustomization.yaml │ │ └── starboard-operator │ │ ├── helm-release.yaml │ │ └── kustomization.yaml └── crds │ ├── cert-manager │ ├── crds.yaml │ └── kustomization.yaml │ ├── external-snapshotter │ └── kustomization.yaml │ ├── kube-prometheus-stack │ ├── crds.yaml │ └── kustomization.yaml │ ├── kustomization.yaml │ ├── kyverno │ ├── crds.yaml │ └── kustomization.yaml │ ├── rook-ceph │ ├── crds.yaml │ └── kustomization.yaml │ └── system-upgrade-controller │ └── kustomization.yaml ├── hack ├── delete-stuck-ns.sh ├── delete-stuck-snapshots.sh └── valetudo │ ├── S11node_exporter │ ├── S11vector │ ├── vacuum-updater.sh │ └── vector-config.yml └── terraform ├── cloudflare ├── .terraform.lock.hcl ├── dns_records_casa.tf ├── dns_records_io.tf ├── firewall_rules_io.tf ├── main.tf ├── page_rules_io.tf ├── secret.sops.yaml ├── zone_settings_casa.tf └── zone_settings_io.tf └── nexus ├── .terraform.lock.hcl ├── main.tf └── secret.sops.yaml /.envrc: -------------------------------------------------------------------------------- 1 | #shellcheck disable=SC2148,SC2155 2 | export KUBECONFIG=$(expand_path ./cluster/kubeconfig) 3 | export SOPS_AGE_KEY_FILE=$(expand_path ~/.config/sops/age/keys.txt) 4 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | *.sops.* diff=sopsdiffer 2 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners 2 | * @onedr0p 3 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug-report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: "" 5 | labels: kind/bug 6 | assignees: "" 7 | --- 8 | 9 | # Details 10 | 11 | **What steps did you take and what happened:** 12 | 13 | 14 | 15 | **What did you expect to happen:** 16 | 17 | 18 | 19 | **Anything else you would like to add:** 20 | 21 | 22 | 23 | **Additional Information:** 24 | 25 | 26 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | blank_issues_enabled: false 3 | contact_links: 4 | - name: Discuss on Discord 5 | url: https://discord.gg/k8s-at-home 6 | about: Join our Discord community 7 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature-request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: "" 5 | labels: kind/enhancement 6 | assignees: "" 7 | --- 8 | 9 | # Details 10 | 11 | **Describe the solution you'd like:** 12 | 13 | 14 | 15 | **Anything else you would like to add:** 16 | 17 | 18 | 19 | **Additional Information:** 20 | 21 | 22 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/question.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Question 3 | about: Ask a question to the maintainer 4 | title: "" 5 | labels: kind/question 6 | assignees: "" 7 | --- 8 | 9 | # Details 10 | 11 | **Ask your question:** 12 | 13 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | **Description of the change** 2 | 3 | 4 | 5 | **Benefits** 6 | 7 | 8 | 9 | **Possible drawbacks** 10 | 11 | 12 | 13 | **Applicable issues** 14 | 15 | 16 | 17 | - fixes # 18 | 19 | **Additional information** 20 | 21 | 22 | -------------------------------------------------------------------------------- /.github/labeler.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | area/ansible: 3 | - "ansible/**/*" 4 | area/github: 5 | - ".github/**/*" 6 | area/cluster: 7 | - "cluster/**/*" 8 | area/hack: 9 | - "hack/**/*" 10 | area/terraform: 11 | - "terraform/**/*" 12 | -------------------------------------------------------------------------------- /.github/linters/.ansible-lint: -------------------------------------------------------------------------------- 1 | # .ansible-lint 2 | warn_list: 3 | - unnamed-task 4 | -------------------------------------------------------------------------------- /.github/linters/.markdownlint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | default: true 3 | 4 | # MD013/line-length - Line length 5 | MD013: 6 | # Number of characters 7 | line_length: 240 8 | # Number of characters for headings 9 | heading_line_length: 80 10 | # Number of characters for code blocks 11 | code_block_line_length: 80 12 | # Include code blocks 13 | code_blocks: true 14 | # Include tables 15 | tables: true 16 | # Include headings 17 | headings: true 18 | # Include headings 19 | headers: true 20 | # Strict length checking 21 | strict: false 22 | # Stern length checking 23 | stern: false 24 | -------------------------------------------------------------------------------- /.github/linters/.prettierignore: -------------------------------------------------------------------------------- 1 | charts/ 2 | docs/ 3 | .private/ 4 | .terraform/ 5 | .vscode/ 6 | *.sops.* 7 | gotk-components.yaml 8 | -------------------------------------------------------------------------------- /.github/linters/.prettierrc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | trailingComma: "es5" 3 | tabWidth: 2 4 | semi: false 5 | singleQuote: false 6 | bracketSpacing: false 7 | useTabs: false 8 | -------------------------------------------------------------------------------- /.github/linters/.tflint.hcl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/timothystewart6/home-ops-1/4e970a9e11fa4921a192599478e9c6199b9078cc/.github/linters/.tflint.hcl -------------------------------------------------------------------------------- /.github/linters/.yamllint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ignore: | 3 | charts/ 4 | docs/ 5 | .private/ 6 | .terraform/ 7 | .vscode/ 8 | *.sops.* 9 | gotk-components.yaml 10 | extends: default 11 | rules: 12 | truthy: 13 | allowed-values: ["true", "false", "on"] 14 | comments: 15 | min-spaces-from-content: 1 16 | line-length: disable 17 | braces: 18 | min-spaces-inside: 0 19 | max-spaces-inside: 1 20 | brackets: 21 | min-spaces-inside: 0 22 | max-spaces-inside: 0 23 | indentation: enable 24 | -------------------------------------------------------------------------------- /.github/renovate/allowedVersions.json5: -------------------------------------------------------------------------------- 1 | { 2 | "packageRules": [ 3 | { 4 | "matchDatasources": ["docker"], 5 | "matchPackageNames": ["docker.io/kopia/kopia"], 6 | "allowedVersions": "<10" 7 | } 8 | ] 9 | } 10 | -------------------------------------------------------------------------------- /.github/renovate/autoMerge.json5: -------------------------------------------------------------------------------- 1 | { 2 | "packageRules": [ 3 | { 4 | "matchDatasources": [ 5 | "docker", 6 | "github-actions", 7 | "github-releases", 8 | "github-tags", 9 | "helm" 10 | ], 11 | "automerge": true, 12 | "automergeType": "branch", 13 | "requiredStatusChecks": null, 14 | "matchUpdateTypes": [ 15 | "minor", 16 | "patch" 17 | ], 18 | "matchPackageNames": [ 19 | "ghcr.io/k8s-at-home/prowlarr-nightly", 20 | "lycheeverse/lychee-action", 21 | "minio", 22 | "renovatebot/github-action" 23 | ] 24 | } 25 | ] 26 | } 27 | -------------------------------------------------------------------------------- /.github/renovate/groups.json5: -------------------------------------------------------------------------------- 1 | { 2 | "packageRules": [ 3 | { 4 | "description": "Rook-Ceph image and chart", 5 | "groupName": "Rook Ceph", 6 | "matchPackagePatterns": ["rook.ceph"], 7 | "matchDatasources": ["docker", "helm"], 8 | "group": { "commitMessageTopic": "{{{groupName}}} group" }, 9 | "separateMinorPatch": true 10 | }, 11 | { 12 | "description": "Thanos image and chart versions do not match", 13 | "groupName": "Thanos", 14 | "matchPackagePatterns": ["thanos"], 15 | "matchDatasources": ["docker", "github-releases", "helm"], 16 | "matchUpdateTypes": ["minor", "patch"], 17 | "group": { "commitMessageTopic": "{{{groupName}}} group" }, 18 | "separateMinorPatch": false 19 | }, 20 | { 21 | "description": "Vector image and chart versions do not match", 22 | "groupName": "Vector", 23 | "matchPackagePatterns": ["vector"], 24 | "matchDatasources": ["docker", "github-releases", "helm"], 25 | "matchUpdateTypes": ["minor", "patch"], 26 | "group": { "commitMessageTopic": "{{{groupName}}} group" }, 27 | "separateMinorPatch": false 28 | } 29 | ] 30 | } 31 | -------------------------------------------------------------------------------- /.github/renovate/labels.json5: -------------------------------------------------------------------------------- 1 | { 2 | "packageRules": [ 3 | { 4 | "matchUpdateTypes": ["major"], 5 | "labels": ["type/major"] 6 | }, 7 | { 8 | "matchUpdateTypes": ["minor"], 9 | "labels": ["type/minor"] 10 | }, 11 | { 12 | "matchUpdateTypes": ["patch"], 13 | "labels": ["type/patch"] 14 | }, 15 | { 16 | "matchDatasources": ["docker"], 17 | "addLabels": ["renovate/container"] 18 | }, 19 | { 20 | "matchDatasources": ["helm"], 21 | "addLabels": ["renovate/helm"] 22 | }, 23 | { 24 | "matchDatasources": ["galaxy", "galaxy-collection"], 25 | "addLabels": ["renovate/ansible"] 26 | }, 27 | { 28 | "matchDatasources": ["terraform-provider"], 29 | "addLabels": ["renovate/terraform"] 30 | }, 31 | { 32 | "matchDatasources": ["github-releases", "github-tags"], 33 | "addLabels": ["renovate/github-release"] 34 | }, 35 | { 36 | "matchManagers": ["github-actions"], 37 | "addLabels": ["renovate/github-action"] 38 | }, 39 | { 40 | "matchDatasources": ["pypi"], 41 | "addLabels": ["renovate/pip"] 42 | } 43 | ] 44 | } 45 | -------------------------------------------------------------------------------- /.github/scripts/cloudflare-proxied-networks.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Get all local networks 4 | # ipv4_rfc1918='[ "10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16" ]' 5 | 6 | # Get all cloudflare ipv4 ranges in an array 7 | ipv4_cloudflare="$(curl -sL https://www.cloudflare.com/ips-v4 | jq --raw-input --slurp 'split("\n")')" 8 | if [[ -z "${ipv4_cloudflare}" ]]; then 9 | exit 1 10 | fi 11 | 12 | # Get all cloudflare ipv6 ranges in an array 13 | ipv6_cloudflare="$(curl -sL https://www.cloudflare.com/ips-v6 | jq --raw-input --slurp 'split("\n")')" 14 | if [[ -z "${ipv6_cloudflare}" ]]; then 15 | exit 1 16 | fi 17 | 18 | # Merge rfc1918 ipv4, cloudflare ipv4, and cloudflare ipv6 ranges into one array 19 | combined=$(jq \ 20 | --argjson ipv4_cloudflare "${ipv4_cloudflare}" \ 21 | --argjson ipv6_cloudflare "${ipv6_cloudflare}" \ 22 | -n '$ipv4_cloudflare + $ipv6_cloudflare' \ 23 | ) 24 | 25 | # Output array as a string with \, as delimiter 26 | echo "${combined}" | jq --raw-output '. | join("\\,")' 27 | -------------------------------------------------------------------------------- /.github/workflows/meta-label-size.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Meta - Label Size 3 | 4 | on: # yamllint disable-line rule:truthy 5 | pull_request: 6 | branches: 7 | - main 8 | 9 | jobs: 10 | label-size: 11 | name: Label Size 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Generate Token 15 | uses: tibdex/github-app-token@v1 16 | id: generate-token 17 | with: 18 | app_id: "${{ secrets.BOT_APP_ID }}" 19 | private_key: "${{ secrets.BOT_APP_PRIVATE_KEY }}" 20 | - name: Label Size 21 | uses: pascalgn/size-label-action@v0.4.3 22 | env: 23 | GITHUB_TOKEN: "${{ steps.generate-token.outputs.token }}" 24 | with: 25 | sizes: > 26 | { 27 | "0": "XS", 28 | "20": "S", 29 | "50": "M", 30 | "200": "L", 31 | "800": "XL", 32 | "2000": "XXL" 33 | } 34 | -------------------------------------------------------------------------------- /.github/workflows/meta-labeler.yml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Meta - Labeler 3 | 4 | on: # yamllint disable-line rule:truthy 5 | pull_request: 6 | branches: 7 | - main 8 | 9 | jobs: 10 | labeler: 11 | name: Labeler 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Generate Token 15 | uses: tibdex/github-app-token@v1 16 | id: generate-token 17 | with: 18 | app_id: "${{ secrets.BOT_APP_ID }}" 19 | private_key: "${{ secrets.BOT_APP_PRIVATE_KEY }}" 20 | - name: Labeler 21 | uses: actions/labeler@v4 22 | with: 23 | configuration-path: .github/labeler.yaml 24 | repo-token: "${{ steps.generate-token.outputs.token }}" 25 | -------------------------------------------------------------------------------- /.github/workflows/meta-sync-labels.yaml: -------------------------------------------------------------------------------- 1 | name: Meta - Sync labels 2 | 3 | on: # yamllint disable-line rule:truthy 4 | workflow_dispatch: 5 | push: 6 | branches: 7 | - main 8 | paths: 9 | - ".github/labels.yaml" 10 | 11 | jobs: 12 | labels: 13 | name: Sync Labels 14 | runs-on: ubuntu-latest 15 | steps: 16 | - name: Checkout 17 | uses: actions/checkout@v3 18 | - name: Generate Token 19 | uses: tibdex/github-app-token@v1 20 | id: generate-token 21 | with: 22 | app_id: "${{ secrets.BOT_APP_ID }}" 23 | private_key: "${{ secrets.BOT_APP_PRIVATE_KEY }}" 24 | - name: Sync Labels 25 | uses: EndBug/label-sync@v2 26 | with: 27 | config-file: .github/labels.yaml 28 | token: "${{ steps.generate-token.outputs.token }}" 29 | delete-other-labels: true 30 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Trash 2 | .DS_Store 3 | Thumbs.db 4 | # Binaries 5 | bin 6 | /flux 7 | *.iso 8 | # Temp folders 9 | .temp* 10 | .private/ 11 | .logs/ 12 | .task/ 13 | # Ansible 14 | xanmanning.k3s* 15 | mrlesmithjr.zfs* 16 | geerlingguy.docker* 17 | geerlingguy.pip* 18 | # Terraform 19 | .terraform 20 | .terraform.tfstate* 21 | terraform.tfstate* 22 | # Sops 23 | .decrypted~* 24 | *.agekey 25 | # Kubernetes 26 | kubeconfig* 27 | talosconfig* 28 | *.pub 29 | *.key 30 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fail_fast: false 3 | repos: 4 | - repo: https://github.com/adrienverge/yamllint 5 | rev: v1.26.3 6 | hooks: 7 | - args: 8 | - --config-file 9 | - .github/linters/.yamllint.yaml 10 | id: yamllint 11 | - repo: https://github.com/pre-commit/pre-commit-hooks 12 | rev: v4.2.0 13 | hooks: 14 | - id: trailing-whitespace 15 | - id: end-of-file-fixer 16 | - id: mixed-line-ending 17 | - repo: https://github.com/Lucas-C/pre-commit-hooks 18 | rev: v1.1.14 19 | hooks: 20 | - id: remove-crlf 21 | - id: remove-tabs 22 | - repo: https://github.com/sirosen/texthooks 23 | rev: 0.3.1 24 | hooks: 25 | - id: fix-smartquotes 26 | - repo: https://github.com/k8s-at-home/sops-pre-commit 27 | rev: v2.1.0 28 | hooks: 29 | - id: forbid-secrets 30 | - repo: https://github.com/zricethezav/gitleaks 31 | rev: v8.8.4 32 | hooks: 33 | - id: gitleaks 34 | -------------------------------------------------------------------------------- /.sops.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | creation_rules: 3 | - path_regex: cluster/.*\.ya?ml 4 | encrypted_regex: "^(data|stringData)$" 5 | age: >- 6 | age15uzrw396e67z9wdzsxzdk7ka0g2gr3l460e0slaea563zll3hdfqwqxdta 7 | - path_regex: ansible/.*\.sops\.ya?ml 8 | unencrypted_regex: "^(kind)$" 9 | age: >- 10 | age15uzrw396e67z9wdzsxzdk7ka0g2gr3l460e0slaea563zll3hdfqwqxdta 11 | - path_regex: terraform/.*\.sops\.ya?ml 12 | unencrypted_regex: "^(kind)$" 13 | age: >- 14 | age15uzrw396e67z9wdzsxzdk7ka0g2gr3l460e0slaea563zll3hdfqwqxdta 15 | -------------------------------------------------------------------------------- /.sourceignore: -------------------------------------------------------------------------------- 1 | # See https://toolkit.fluxcd.io/components/source/gitrepositories/#excluding-files 2 | .github/ 3 | .taskfiles/ 4 | .vscode/ 5 | ansible/ 6 | hack/ 7 | terraform/ 8 | .envrc 9 | .gitattributes 10 | .gitignore 11 | .pre-commit-config.yaml 12 | .sops.yaml 13 | LICENSE 14 | README.md 15 | Taskfile.yaml 16 | -------------------------------------------------------------------------------- /.taskfiles/AnsibleTasks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3" 3 | 4 | tasks: 5 | 6 | init: 7 | desc: install/upgrade ansible deps 8 | dir: ansible 9 | cmds: 10 | - "ansible-galaxy install -r requirements.yml --roles-path ~/.ansible/roles --force" 11 | - "ansible-galaxy collection install -r requirements.yml --collections-path ~/.ansible/collections --force" 12 | -------------------------------------------------------------------------------- /.taskfiles/FormatTasks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3" 3 | 4 | tasks: 5 | all: 6 | - task: json 7 | - task: md 8 | - task: yaml 9 | 10 | md: 11 | desc: Format Markdown 12 | cmds: 13 | - >- 14 | prettier 15 | --ignore-path '.github/linters/.prettierignore' 16 | --config '.github/linters/.prettierrc.yaml' 17 | --list-different 18 | --ignore-unknown 19 | --parser=markdown 20 | --write '*.md' '**/*.md' 21 | 22 | yaml: 23 | desc: Format YAML 24 | cmds: 25 | - >- 26 | prettier 27 | --ignore-path '.github/linters/.prettierignore' 28 | --config 29 | '.github/linters/.prettierrc.yaml' 30 | --list-different 31 | --ignore-unknown 32 | --parser=yaml 33 | --write '*.y*ml' '**/*.y*ml' '**/*.y*ml.j2' 34 | ignore_error: true 35 | 36 | json: 37 | desc: Format JSON 38 | cmds: 39 | - >- 40 | prettier 41 | --ignore-path '.github/linters/.prettierignore' 42 | --config 43 | '.github/linters/.prettierrc.yaml' 44 | --list-different 45 | --ignore-unknown 46 | --parser=json 47 | --write '*.json' '**/*.json' 48 | ignore_error: true 49 | -------------------------------------------------------------------------------- /.taskfiles/LintTasks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3" 3 | 4 | tasks: 5 | all: 6 | - task: json 7 | - task: md 8 | - task: tf 9 | - task: yaml 10 | 11 | json: 12 | desc: Lint JSON 13 | cmds: 14 | - >- 15 | prettier 16 | --ignore-path '.github/linters/.prettierignore' 17 | --config 18 | '.github/linters/.prettierrc.yaml' 19 | --ignore-unknown 20 | --parser=json 21 | --check '*.json' '**/*.json' 22 | ignore_error: true 23 | 24 | md: 25 | desc: Lint Markdown 26 | cmds: 27 | - markdownlint -c '.github/linters/.markdownlint.yaml' *.md **/*.md 28 | ignore_error: true 29 | 30 | tf: 31 | desc: Lint Terraform 32 | cmds: 33 | - tflint --config .github/linters/.tflint.hcl --loglevel info terraform/b2 34 | - tflint --config .github/linters/.tflint.hcl --loglevel info terraform/cloudflare 35 | - tflint --config .github/linters/.tflint.hcl --loglevel info terraform/minio 36 | - tflint --config .github/linters/.tflint.hcl --loglevel info terraform/nexus 37 | ignore_error: true 38 | 39 | yaml: 40 | desc: Lint YAML 41 | cmds: 42 | - yamllint -c '.github/linters/.yamllint.yaml' . 43 | ignore_error: true 44 | -------------------------------------------------------------------------------- /.taskfiles/PreCommitTasks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3" 3 | 4 | tasks: 5 | 6 | init: 7 | desc: Install pre-commit hooks 8 | cmds: 9 | - pre-commit install --install-hooks 10 | 11 | update: 12 | desc: Update pre-commit hooks 13 | cmds: 14 | - pre-commit autoupdate 15 | 16 | run: 17 | desc: Run pre-commit on all files 18 | cmds: 19 | - pre-commit run --all-files 20 | -------------------------------------------------------------------------------- /.taskfiles/TerraformTasks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3" 3 | 4 | tasks: 5 | 6 | init: 7 | desc: Initialize terraform dependencies 8 | dir: terraform 9 | cmds: 10 | - find . -maxdepth 1 -type d \( ! -name . \) -exec bash -c "cd '{}' && terraform init {{.CLI_ARGS}}" \; 11 | 12 | plan: 13 | desc: Show the terraform plan 14 | dir: terraform 15 | cmds: 16 | - find . -maxdepth 1 -type d \( ! -name . \) -exec bash -c "cd '{}' && terraform plan {{.CLI_ARGS}}" \; 17 | 18 | apply: 19 | desc: Apply the terraform changes 20 | dir: terraform 21 | interactive: true 22 | cmds: 23 | - find . -maxdepth 1 -type d \( ! -name . \) -exec bash -c "cd '{}' && terraform apply {{.CLI_ARGS}}" \; 24 | -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | "recommendations": [ 3 | "HashiCorp.terraform", 4 | "britesnow.vscode-toggle-quotes", 5 | "mitchdenny.ecdc", 6 | "ms-kubernetes-tools.vscode-kubernetes-tools", 7 | "oderwat.indent-rainbow", 8 | "redhat.ansible", 9 | "signageos.signageos-vscode-sops", 10 | "usernamehw.errorlens", 11 | "fcrespo82.markdown-table-formatter" 12 | ] 13 | } 14 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "ansible.ansibleLint.arguments": "-c .github/linters/.ansible-lint", 3 | "discord.enabled": true, 4 | "files.associations": { 5 | "*.json5": "jsonc", 6 | "**/ansible/**/*.yml": "ansible", 7 | "**/ansible/**/*.sops.yml": "yaml", 8 | "**/ansible/**/inventory/**/*.yml": "yaml", 9 | "**/terraform/**/*.tf": "terraform" 10 | }, 11 | "material-icon-theme.folders.associations": { 12 | ".taskfiles": "utils", 13 | "hack": "scripts" 14 | }, 15 | "prettier.configPath": ".github/linters/.prettierrc.yaml", 16 | "prettier.ignorePath": ".github/linters/.prettierignore", 17 | "yaml.schemas": { 18 | "Kubernetes": "cluster/*.yaml" 19 | }, 20 | "editor.fontFamily": "FiraCode Nerd Font", 21 | "editor.fontLigatures": true, 22 | "editor.bracketPairColorization.enabled": true, 23 | "editor.guides.bracketPairs":"active", 24 | "editor.hover.delay": 1500, 25 | "explorer.autoReveal": false 26 | } 27 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE 2 | Version 2, December 2004 3 | 4 | Copyright (C) 2020 Devin Buhl 5 | 6 | Everyone is permitted to copy and distribute verbatim or modified 7 | copies of this license document, and changing it is allowed as long 8 | as the name is changed. 9 | 10 | DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE 11 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 12 | 13 | 0. You just DO WHAT THE FUCK YOU WANT TO. 14 | -------------------------------------------------------------------------------- /Taskfile.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3" 3 | 4 | vars: 5 | PROJECT_DIR: 6 | sh: "git rev-parse --show-toplevel" 7 | CLUSTER_DIR: "{{.PROJECT_DIR}}/cluster" 8 | 9 | env: 10 | KUBECONFIG: "{{.CLUSTER_DIR}}/kubeconfig" 11 | 12 | includes: 13 | ansible: .taskfiles/AnsibleTasks.yml 14 | cluster: .taskfiles/ClusterTasks.yml 15 | format: .taskfiles/FormatTasks.yml 16 | linter: .taskfiles/LintTasks.yml 17 | precommit: .taskfiles/PreCommitTasks.yml 18 | terraform: .taskfiles/TerraformTasks.yml 19 | 20 | tasks: 21 | 22 | kubeconfig: 23 | desc: Remotely fetch kubeconfig from Kubernetes 24 | cmds: 25 | - rsync --verbose --progress --partial --rsync-path="sudo rsync" {{.K3S_PRIMARY_MASTER_NODE_USERNAME}}@{{.K3S_PRIMARY_MASTER_NODE_ADDR}}:/etc/rancher/k3s/k3s.yaml "{{.CLUSTER_DIR}}/kubeconfig" 26 | - sed -i '' 's/127.0.0.1/{{.K3S_LB_ADDR}}/g' "{{.CLUSTER_DIR}}/kubeconfig" 27 | - chmod go-r "{{.CLUSTER_DIR}}/kubeconfig" 28 | vars: 29 | K3S_PRIMARY_MASTER_NODE_USERNAME: "ubuntu" 30 | K3S_PRIMARY_MASTER_NODE_ADDR: "192.168.42.10" 31 | K3S_LB_ADDR: "192.168.1.1" 32 | -------------------------------------------------------------------------------- /ansible/kubernetes/.envrc: -------------------------------------------------------------------------------- 1 | #shellcheck disable=SC2148,SC2155 2 | export ANSIBLE_CONFIG=$(expand_path ./ansible.cfg) 3 | -------------------------------------------------------------------------------- /ansible/kubernetes/inventory/group_vars/worker/k3s.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config/ 3 | # https://github.com/PyratLabs/ansible-role-k3s#agent-worker-configuration 4 | 5 | # Don't define the host as control plane nodes 6 | k3s_control_node: false 7 | 8 | # k3s settings for all worker nodes 9 | k3s_agent: 10 | node-ip: "{{ ansible_host }}" 11 | kubelet-arg: 12 | # Enable Alpha/Beta features 13 | - "feature-gates=EphemeralContainers=true,MixedProtocolLBService=true,ReadWriteOncePod=true" 14 | # Allow pods to be rescheduled quicker in the case of a node failure 15 | # https://github.com/k3s-io/k3s/issues/1264 16 | - "node-status-update-frequency=4s" 17 | -------------------------------------------------------------------------------- /ansible/kubernetes/inventory/host_vars/k8s-0.sops.yml: -------------------------------------------------------------------------------- 1 | kind: Secret 2 | ansible_become_pass: ENC[AES256_GCM,data:gE6ifQ9Z,iv:WwAZEUctSOo5u1SJKlNFqWVCOGFoTw6mf5b/W0d968A=,tag:dLL2KCmN+WmRCIzyLgoTtA==,type:str] 3 | sops: 4 | kms: [] 5 | gcp_kms: [] 6 | azure_kv: [] 7 | hc_vault: [] 8 | age: 9 | - recipient: age15uzrw396e67z9wdzsxzdk7ka0g2gr3l460e0slaea563zll3hdfqwqxdta 10 | enc: | 11 | -----BEGIN AGE ENCRYPTED FILE----- 12 | YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBOeTVmalh5S3grTHpVekth 13 | Z2hEUjZHWFJ0anRhbmx5RXhiSXpnU1NKSEhZClpyQ1Q4Z1RobW1nSHpWNEFyRzRR 14 | QTV3S0dDc1BqN0NpalNwSmpZVTUvSUEKLS0tIEtKVjErS241SHgybHlSWldsbE4z 15 | L0tDTys2UldBT0NnK1hTTnArSEgySFEKPXcDa9SNLgRcAzIOlgwHvsYgoeSvLWxi 16 | WqMNaALSOPzWQZxXm1TOh8EsCykLo4fmdSBLdEfwNapf1BBHdGV3WQ== 17 | -----END AGE ENCRYPTED FILE----- 18 | lastmodified: "2021-11-24T15:58:16Z" 19 | mac: ENC[AES256_GCM,data:4iiXfowinku/182ndIK3AmF3+ttawShr22VSjSHX+pZnLmXvnviEAyTJ659K8BbjErLP6APZmLtCD4MeO0TfSe+ovh5FlqDTOWsrMScbVA/MZ5AyRXLJg5iffc8rZlAtZg8LsAeSSeWpIjm/GJt3P3Q22sn8QW7JaTGhFq3+oA0=,iv:M9w2H4VxIH8rG7AUDAWvtApdJ8awx7KaFTeTJgOhe4U=,tag:BEkTid4ixpiaiLzbMKx1yw==,type:str] 20 | pgp: [] 21 | unencrypted_regex: ^(kind)$ 22 | version: 3.7.1 23 | -------------------------------------------------------------------------------- /ansible/kubernetes/inventory/host_vars/k8s-1.sops.yml: -------------------------------------------------------------------------------- 1 | kind: Secret 2 | ansible_become_pass: ENC[AES256_GCM,data:Gyt6WJOm,iv:ewddizdq13hJ/JwHPjCyquxt3Cki7iabQUVYPshux8M=,tag:uoEPWFhgv5jeI+8YdEBC+w==,type:str] 3 | sops: 4 | kms: [] 5 | gcp_kms: [] 6 | azure_kv: [] 7 | hc_vault: [] 8 | age: 9 | - recipient: age15uzrw396e67z9wdzsxzdk7ka0g2gr3l460e0slaea563zll3hdfqwqxdta 10 | enc: | 11 | -----BEGIN AGE ENCRYPTED FILE----- 12 | YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBMdnJ5ZklWaHVEemN4bG00 13 | c0VnaG9ZYnZWU2hxNVdGbmoxYmpwNEtXRjIwCks2SjJzR2ZyS2NBcEwyZVJ3QlZL 14 | eklQaDdsVWVoRno0TElqNld6Sk5MQnMKLS0tIFR5NStVd0F1T1l0c1VJWkdwdWRH 15 | WHRQbldLNFRVZ1ZYRlNnVFAySWhBbG8KJl1l20oG+mBGSTIXkrbOP8W+8GozQmvT 16 | 3f85La25Ijvnx78Ji7nqZMZDx53jdXTuk3DbvRuLvo6fJhwXBu1MwA== 17 | -----END AGE ENCRYPTED FILE----- 18 | lastmodified: "2021-11-24T15:58:19Z" 19 | mac: ENC[AES256_GCM,data:O6zao4iS785HbUhAk9jA1Hq5SXYkwg/7ejmnJq/+e9c2kjWKycUJSjvkYOdZd9kYDMD2CvoK4wsh4S1v6ZpWW44i5FkcDRwpvlGJx2zgitpagFOk9IZrEu4YqU1lhRM1kIEjNJH4H8ZNAMNKZlLUVv0RfkORCKfP5iYBK1yaJkc=,iv:HcBUEAk4wljV2Yw5fGfAJCKJ3GpTRjAHSJG9RYWHtrA=,tag:nPLjlIVtoRwcNDLd6RpP0A==,type:str] 20 | pgp: [] 21 | unencrypted_regex: ^(kind)$ 22 | version: 3.7.1 23 | -------------------------------------------------------------------------------- /ansible/kubernetes/inventory/host_vars/k8s-2.sops.yml: -------------------------------------------------------------------------------- 1 | kind: Secret 2 | ansible_become_pass: ENC[AES256_GCM,data:pP/cf4GD,iv:6QSD48jFBeuKhpkficlJxfjHJsTnRVSmmeSSyfpY/j0=,tag:rKlKZygt5KJ2NYmVLgdM3w==,type:str] 3 | sops: 4 | kms: [] 5 | gcp_kms: [] 6 | azure_kv: [] 7 | hc_vault: [] 8 | age: 9 | - recipient: age15uzrw396e67z9wdzsxzdk7ka0g2gr3l460e0slaea563zll3hdfqwqxdta 10 | enc: | 11 | -----BEGIN AGE ENCRYPTED FILE----- 12 | YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBRdHhGc0dCcURGQ0I3Lzlk 13 | d2hma1pIaEJmd0drZWtza3JkU21BeWZIWW1nCkZ6QWdzQnBzaHk2cFkrU0ZDMG9Z 14 | MUNnaG5mQU95UzRPWkRLc3p5NC82QTgKLS0tIE5sc3BpNTJ6dWxJVFJ3WXFNcG9h 15 | dHFmTEJsanFYRmd1bmNVSmUwa2VJa2MKByAjtTCiqH5nKkW/4VZ0Ra/4jgtwKpQG 16 | T4XhlWgruWQmPZuhRuZEA5uJIhsNl5TcJ3ur22W1Am4dVysHBxmBCg== 17 | -----END AGE ENCRYPTED FILE----- 18 | lastmodified: "2021-11-24T15:58:22Z" 19 | mac: ENC[AES256_GCM,data:I08vA+8gBy1eSVtW95H174/rZqEyms5pRNznvVS+qAyMSMI9rzAmnSsU8WDfrtYUMVdoVI1SjGNhsNtV+HPlsxlAqNOtb00WHxkX8vWaR/1seFBarenvkjMq6NFn/E2W3yhne2QFhvEN3YnWRo6WJzL2AdXlPShRPJM7kC8SdB4=,iv:OipA7eLqUBAReu347z5qns+/V3NeDI99YiVl+hEzMsY=,tag:OH1iD7hhjfjM+cruUuyRZA==,type:str] 20 | pgp: [] 21 | unencrypted_regex: ^(kind)$ 22 | version: 3.7.1 23 | -------------------------------------------------------------------------------- /ansible/kubernetes/inventory/host_vars/k8s-3.sops.yml: -------------------------------------------------------------------------------- 1 | kind: Secret 2 | ansible_become_pass: ENC[AES256_GCM,data:myZbvtbW,iv:DvrRat6TzILxoU1RzlY+QtVxQUfHxWJ3IHuqSaFEUOw=,tag:g2eYpUeiPFYs45tQEJWBfQ==,type:str] 3 | sops: 4 | kms: [] 5 | gcp_kms: [] 6 | azure_kv: [] 7 | hc_vault: [] 8 | age: 9 | - recipient: age15uzrw396e67z9wdzsxzdk7ka0g2gr3l460e0slaea563zll3hdfqwqxdta 10 | enc: | 11 | -----BEGIN AGE ENCRYPTED FILE----- 12 | YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBaOGNlQWdwRk9SQ1dNNGJL 13 | Y3lMTHk5U2pHNjg1MllVems3SUc1TzZiVVJ3ClphUnVncFJGMzNqNGJVZFBhUjgy 14 | a0VFdFVNQkl4MmZHbEFML3RMMzhjU0kKLS0tIG1sdmZSNE5rdGFxdlVLd0ErT1Jp 15 | dGNKdURVVngzZk0wdTBHYXZwaU9rTkkK7FGYYrwzoWKsnCmxJ2Ca6q4EdPFPOfhD 16 | D59yr5mRu/hEbPE4lK0qmDzXr5cpI6cojfjJ+N2b3eCMeuiplk6z1A== 17 | -----END AGE ENCRYPTED FILE----- 18 | lastmodified: "2021-11-24T15:58:24Z" 19 | mac: ENC[AES256_GCM,data:sujaVr7bNPmkZ12hYns6o8vSdpIhiyaNzdiSD/cPuDzc21sNUTzlbppg7sHopY4/wGqDoaqTMNowCi+ykrFgxgl2GaHSXV2YkxfjKLrUtdvD6r9BmpBAEfGz9NzJbXjlzNJfNPGIIsokEYiie/22WoGn5bhQxopoteFZI7Wshvo=,iv:fX5qYSHuYUmpRhRdetnZDnnnA5HhVO2j0kmbsOyr3qw=,tag:fLMPJYm0WHynBKn4hV+z5A==,type:str] 20 | pgp: [] 21 | unencrypted_regex: ^(kind)$ 22 | version: 3.7.1 23 | -------------------------------------------------------------------------------- /ansible/kubernetes/inventory/host_vars/k8s-4.sops.yml: -------------------------------------------------------------------------------- 1 | kind: Secret 2 | ansible_become_pass: ENC[AES256_GCM,data:2pQCF5xf,iv:52YMBfp3Nn/EKGcMq9DP/3QV/nTaXwhQ7ZwZURsdP9Y=,tag:dJq8PRM3YlWWJ+KdkAdjRA==,type:str] 3 | sops: 4 | kms: [] 5 | gcp_kms: [] 6 | azure_kv: [] 7 | hc_vault: [] 8 | age: 9 | - recipient: age15uzrw396e67z9wdzsxzdk7ka0g2gr3l460e0slaea563zll3hdfqwqxdta 10 | enc: | 11 | -----BEGIN AGE ENCRYPTED FILE----- 12 | YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBCalU4YjZIeVNjanFDaWVR 13 | anhHc3FYVDJHVzFmQ3FVNjYvdk9lVE4rNXlJCkh4cnpicFM2YitZOTlCVGtFQVpv 14 | cUw5ODN3MVk3SWVRM3E3aVNab1BIdmsKLS0tIHdWQWdYRXRXQ0JvekxVZm1VeEVp 15 | VGc4Yi9Pdm5QVG1ZbURjN1J5bmp0azgKJv+W9xBuq5eaytxfqnmyFcOxyZvfjpnf 16 | 6TcXS2rz+I5netLpCSUadyJzfyTmw7/gQwEi6pmNEaqFLwtiFYunMw== 17 | -----END AGE ENCRYPTED FILE----- 18 | lastmodified: "2021-11-24T15:58:26Z" 19 | mac: ENC[AES256_GCM,data:WuIRB7jlLMI214za+ED9E3Gvuo5lpeKlpGlaTMS3A3iC+Lr8CEuAmYvdwnUItp6PteOOpY802tNjbhy08CK4GJQIBCRb7leiceeJiOiLb135W3T5+wnRCHqKQMvEWMC6ppH2wa48U0Zdsosx/EGC3fgc6x4lih4vSHe7lgYKTxA=,iv:lQlA0J2Co+9yAot56Fdlq1X6cJluqO8U5/PE95Df+ZA=,tag:ObOo8iSyba3Ru6FCvOix0g==,type:str] 20 | pgp: [] 21 | unencrypted_regex: ^(kind)$ 22 | version: 3.7.1 23 | -------------------------------------------------------------------------------- /ansible/kubernetes/inventory/host_vars/k8s-5.sops.yml: -------------------------------------------------------------------------------- 1 | kind: Secret 2 | ansible_become_pass: ENC[AES256_GCM,data:cGBpAAgt,iv:kNnM10JusYoCLstqEi0fzgMkKBaNAavI4Lh2xbVOSA0=,tag:ADKE88f9Q8rrBvHW4IgCNA==,type:str] 3 | sops: 4 | kms: [] 5 | gcp_kms: [] 6 | azure_kv: [] 7 | hc_vault: [] 8 | age: 9 | - recipient: age15uzrw396e67z9wdzsxzdk7ka0g2gr3l460e0slaea563zll3hdfqwqxdta 10 | enc: | 11 | -----BEGIN AGE ENCRYPTED FILE----- 12 | YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBXYThIQWFSRXNxc3FmT0JB 13 | ZHRZaG9YRjM4TW1oQTNVMnVYdDg2a0hOUUF3CnZVbHNmWlY4ckhsa2p2bWh0RTl0 14 | Ui9nbmg1YnJYWWowNXhaSnFvdk9EeWcKLS0tIE5qRU55OGxPYTBBR3lMT3JKYkdR 15 | QklDejg4UUhSUFVtL1o0WE9JM1dDQlEKNPufKYIWMPciD0UcvIbMRKNieh8TV+kC 16 | lHJ+4NilirdVvihmzuAaviOoMtfEI4L8GDQC79vmBSFWcaGG4ACFmA== 17 | -----END AGE ENCRYPTED FILE----- 18 | lastmodified: "2021-11-24T15:58:55Z" 19 | mac: ENC[AES256_GCM,data:sRTeRsd6HialqMcmF/FAcYNiFcmkot+D8Lr8KGMkzqQLmNR28O/aMu0JGt/drdBNTTvcr+xEBed43rTaSyNs786ApgiVvcH0pAYxil/VnLDuOB9vnQB7O7Lnlrgo3U9Tu0O26HzrtDZAaFrxOVldakUGaF1+iTOmLlPophGv8lA=,iv:RUf9l0g+vJyuJBJc39+YKU8OfDacHHUqVvgOL7CSZhM=,tag:zsjT0zV/8aAMAi8KrffGZA==,type:str] 20 | pgp: [] 21 | unencrypted_regex: ^(kind)$ 22 | version: 3.7.1 23 | -------------------------------------------------------------------------------- /ansible/kubernetes/inventory/hosts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kubernetes: 3 | children: 4 | master: 5 | hosts: 6 | k8s-0: 7 | ansible_host: 192.168.42.10 8 | k8s-1: 9 | ansible_host: 192.168.42.11 10 | k8s-2: 11 | ansible_host: 192.168.42.12 12 | vars: 13 | ansible_user: ubuntu 14 | worker: 15 | hosts: 16 | k8s-3: 17 | ansible_host: 192.168.42.13 18 | rook_devices: 19 | - /dev/nvme0n1 20 | k8s-4: 21 | ansible_host: 192.168.42.14 22 | rook_devices: 23 | - /dev/nvme0n1 24 | k8s-5: 25 | ansible_host: 192.168.42.15 26 | rook_devices: 27 | - /dev/nvme0n1 28 | vars: 29 | ansible_user: ubuntu 30 | -------------------------------------------------------------------------------- /ansible/kubernetes/playbooks/k3s.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - master 4 | - worker 5 | become: true 6 | gather_facts: true 7 | any_errors_fatal: true 8 | pre_tasks: 9 | - name: Pausing for 5 seconds... 10 | pause: 11 | seconds: 5 12 | roles: 13 | - k3s.kubernetes 14 | -------------------------------------------------------------------------------- /ansible/kubernetes/playbooks/nuke-k3s.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - master 4 | - worker 5 | become: true 6 | gather_facts: true 7 | any_errors_fatal: true 8 | pre_tasks: 9 | - name: Pausing for 5 seconds... 10 | pause: 11 | seconds: 5 12 | roles: 13 | - xanmanning.k3s 14 | vars: 15 | k3s_state: uninstalled 16 | tasks: 17 | - name: Remove containerd-hack 18 | ansible.builtin.file: 19 | path: /etc/systemd/system/containerd-hack.service 20 | state: absent 21 | notify: 22 | - Reload systemd daemon 23 | - name: Gather list of CNI files 24 | ansible.builtin.find: 25 | paths: /etc/cni/net.d 26 | patterns: "*" 27 | hidden: true 28 | register: directory_contents 29 | - name: Delete CNI files 30 | ansible.builtin.file: 31 | path: "{{ item.path }}" 32 | state: absent 33 | loop: "{{ directory_contents.files }}" 34 | handlers: 35 | - name: Reload systemd daemon 36 | systemd: 37 | daemon-reload: true 38 | -------------------------------------------------------------------------------- /ansible/kubernetes/playbooks/nuke-rook-ceph.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - worker 4 | become: true 5 | gather_facts: true 6 | any_errors_fatal: true 7 | pre_tasks: 8 | - name: Pausing for 5 seconds... 9 | pause: 10 | seconds: 5 11 | tasks: 12 | - name: Reset disks 13 | block: 14 | - name: Remove /var/lib/rook 15 | ansible.builtin.file: 16 | state: absent 17 | path: "/var/lib/rook" 18 | - name: Zap the drives 19 | ansible.builtin.shell: > 20 | sgdisk --zap-all {{ item }} || true 21 | loop: 22 | - "{{ rook_devices | default([]) }}" 23 | - name: Remove lvm partitions 24 | ansible.builtin.shell: "{{ item }}" 25 | loop: 26 | - ls /dev/mapper/ceph--* | xargs -I% -- fuser --kill % 27 | - ls /dev/mapper/ceph--* | xargs -I% -- dmsetup clear % 28 | - ls /dev/mapper/ceph--* | xargs -I% -- dmsetup remove -f % 29 | - ls /dev/mapper/ceph--* | xargs -I% -- rm -rf % 30 | - name: Wipe the block device 31 | ansible.builtin.command: "wipefs -af {{ item }}" 32 | with_items: 33 | - "{{ rook_devices | default([]) }}" 34 | -------------------------------------------------------------------------------- /ansible/kubernetes/playbooks/os.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - master 4 | - worker 5 | become: true 6 | gather_facts: true 7 | any_errors_fatal: true 8 | pre_tasks: 9 | - name: Pausing for 5 seconds... 10 | pause: 11 | seconds: 5 12 | roles: 13 | - os.kubernetes 14 | -------------------------------------------------------------------------------- /ansible/kubernetes/roles/k3s.kubernetes/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | k3s_etcd_s3: false 3 | -------------------------------------------------------------------------------- /ansible/kubernetes/roles/k3s.kubernetes/files/containerd-hack.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Gracefully shutdown k3s workloads 3 | DefaultDependencies=no 4 | Before=shutdown.target 5 | 6 | [Service] 7 | Type=oneshot 8 | ExecStart=/usr/local/bin/k3s-killall.sh 9 | TimeoutStartSec=0 10 | 11 | [Install] 12 | WantedBy=shutdown.target 13 | -------------------------------------------------------------------------------- /ansible/kubernetes/roles/k3s.kubernetes/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_tasks: k3s.yml 3 | tags: 4 | - k3s 5 | 6 | - import_tasks: home-dns.yml 7 | when: "'k8s-0' in inventory_hostname" 8 | tags: 9 | - home-dns 10 | -------------------------------------------------------------------------------- /ansible/kubernetes/roles/k3s.kubernetes/templates/calico/calico-bgpconfiguration.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: crd.projectcalico.org/v1 3 | kind: BGPConfiguration 4 | metadata: 5 | name: default 6 | spec: 7 | asNumber: {{ calico_bgp_as_number }} 8 | nodeToNodeMeshEnabled: true 9 | serviceClusterIPs: 10 | - cidr: "{{ k3s_server['service-cidr'] }}" 11 | serviceExternalIPs: 12 | - cidr: "{{ calico_bgp_external_ips }}" 13 | -------------------------------------------------------------------------------- /ansible/kubernetes/roles/k3s.kubernetes/templates/calico/calico-bgppeer.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: crd.projectcalico.org/v1 3 | kind: BGPPeer 4 | metadata: 5 | name: global 6 | spec: 7 | peerIP: {{ calico_bgp_peer_ip }} 8 | asNumber: {{ calico_bgp_as_number }} 9 | -------------------------------------------------------------------------------- /ansible/kubernetes/roles/k3s.kubernetes/templates/calico/calico-installation.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: operator.tigera.io/v1 3 | kind: Installation 4 | metadata: 5 | name: default 6 | spec: 7 | registry: quay.io 8 | imagePath: calico 9 | calicoNetwork: 10 | # Note: The ipPools section cannot be modified post-install. 11 | ipPools: 12 | - blockSize: 26 13 | cidr: "{{ k3s_server['cluster-cidr'] }}" 14 | encapsulation: "{{ calico_encapsulation }}" 15 | natOutgoing: Enabled 16 | nodeSelector: all() 17 | nodeAddressAutodetectionV4: 18 | cidrs: 19 | - "{{ calico_node_cidr }}" 20 | nodeMetricsPort: 9091 21 | typhaMetricsPort: 9093 22 | -------------------------------------------------------------------------------- /ansible/kubernetes/roles/k3s.kubernetes/templates/home-dns/home-dns-rbac.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: home-dns 5 | namespace: kube-system 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1 8 | kind: ClusterRole 9 | metadata: 10 | name: home-dns 11 | rules: 12 | - apiGroups: 13 | - "" 14 | resources: 15 | - services 16 | - namespaces 17 | verbs: 18 | - list 19 | - watch 20 | - apiGroups: 21 | - extensions 22 | - networking.k8s.io 23 | resources: 24 | - ingresses 25 | verbs: 26 | - list 27 | - watch 28 | --- 29 | apiVersion: rbac.authorization.k8s.io/v1 30 | kind: ClusterRoleBinding 31 | metadata: 32 | name: home-dns 33 | roleRef: 34 | apiGroup: rbac.authorization.k8s.io 35 | kind: ClusterRole 36 | name: home-dns 37 | subjects: 38 | - kind: ServiceAccount 39 | name: home-dns 40 | namespace: kube-system 41 | -------------------------------------------------------------------------------- /ansible/kubernetes/roles/k3s.kubernetes/templates/k3s/10-etcd-snapshots.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | etcd-s3: {{ k3s_etcd_s3 | bool | lower }} 3 | {% if k3s_etcd_s3 %} 4 | etcd-snapshot-schedule-cron: "{{ k3s_etcd_snapshot_schedule_cron }}" 5 | etcd-snapshot-retention: {{ k3s_etcd_snapshot_retention }} 6 | etcd-s3-endpoint: "s3.{{ SECRET_PRIVATE_DOMAIN }}" 7 | etcd-s3-bucket: "{{ k3s_etcd_s3_bucket }}" 8 | etcd-s3-access-key: "{{ SECRET_MINIO_ACCESS_KEY }}" 9 | etcd-s3-secret-key: "{{ SECRET_MINIO_SECRET_KEY }}" 10 | {% endif %} 11 | -------------------------------------------------------------------------------- /ansible/kubernetes/roles/k3s.kubernetes/vars/main/calico.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # -- Encapsulation type 3 | calico_encapsulation: "None" 4 | # -- BGP Peer IP 5 | # -- (usually your router IP address) 6 | calico_bgp_peer_ip: 192.168.42.1 7 | # -- BGP Autonomous System Number 8 | # -- (must be the same across all BGP peers) 9 | calico_bgp_as_number: 64512 10 | # -- BGP Network you want services to consume 11 | # -- (this network should not exist or be defined anywhere in your network) 12 | calico_bgp_external_ips: 192.168.69.0/24 13 | # -- CIDR of the host node interface Calico should use 14 | calico_node_cidr: 192.168.42.0/24 15 | -------------------------------------------------------------------------------- /ansible/kubernetes/roles/k3s.kubernetes/vars/main/etcd-snapshots.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # -- enable exporting etcd backups to s3 3 | k3s_etcd_s3: "true" 4 | # -- snapshot bucket name 5 | k3s_etcd_s3_bucket: k3s 6 | # -- snapshot schedule 7 | k3s_etcd_snapshot_schedule_cron: "0 */6 * * *" 8 | # -- snapshot retention 9 | k3s_etcd_snapshot_retention: 28 10 | -------------------------------------------------------------------------------- /ansible/kubernetes/roles/os.kubernetes/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart smartd 3 | ansible.builtin.service: 4 | name: smartd.service 5 | daemon_reload: true 6 | enabled: true 7 | state: restarted 8 | 9 | - name: Restart unattended-upgrades 10 | ansible.builtin.service: 11 | name: unattended-upgrades.service 12 | daemon_reload: true 13 | enabled: true 14 | state: restarted 15 | -------------------------------------------------------------------------------- /ansible/kubernetes/roles/os.kubernetes/tasks/kernel.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Enable kernel modules runtime 3 | community.general.modprobe: 4 | name: "{{ item }}" 5 | state: present 6 | loop: 7 | - br_netfilter 8 | - overlay 9 | - rbd 10 | 11 | - name: Enable kernel modules on boot 12 | ansible.builtin.copy: 13 | mode: 0644 14 | content: "{{ item }}" 15 | dest: "/etc/modules-load.d/{{ item }}.conf" 16 | loop: 17 | - br_netfilter 18 | - overlay 19 | - rbd 20 | -------------------------------------------------------------------------------- /ansible/kubernetes/roles/os.kubernetes/tasks/locale.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Set timezone 3 | community.general.timezone: 4 | name: "{{ os_timezone | default('America/New_York') }}" 5 | -------------------------------------------------------------------------------- /ansible/kubernetes/roles/os.kubernetes/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_tasks: locale.yml 3 | tags: 4 | - locale 5 | 6 | - import_tasks: packages.yml 7 | tags: 8 | - packages 9 | 10 | - import_tasks: kernel.yml 11 | tags: 12 | - kernel 13 | 14 | - import_tasks: network.yml 15 | tags: 16 | - network 17 | 18 | - import_tasks: filesystem.yml 19 | tags: 20 | - filesystem 21 | 22 | - import_tasks: security.yml 23 | tags: 24 | - security 25 | 26 | - import_tasks: unattended-upgrades.yml 27 | tags: 28 | - unattended-upgrades 29 | 30 | - import_tasks: user.yml 31 | tags: 32 | - user 33 | -------------------------------------------------------------------------------- /ansible/kubernetes/roles/os.kubernetes/tasks/security.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check for existence of grub 3 | ansible.builtin.stat: 4 | path: /etc/default/grub 5 | register: grub_result 6 | 7 | - name: Disable apparmor and mitigations in grub 8 | ansible.builtin.replace: 9 | path: /etc/default/grub 10 | regexp: '^(GRUB_CMDLINE_LINUX=(?:(?![" ]{{ item.key | regex_escape }}=).)*)(?:[" ]{{ item.key | regex_escape }}=\S+)?(.*")$' 11 | replace: '\1 {{ item.key }}={{ item.value }}\2' 12 | with_dict: "{{ grub_config }}" 13 | vars: 14 | grub_config: 15 | apparmor: "0" 16 | mitigations: "off" 17 | register: grub_status 18 | when: 19 | - grub_result.stat.exists 20 | 21 | - name: Run grub-mkconfig 22 | ansible.builtin.command: update-grub 23 | when: 24 | - grub_result.stat.exists 25 | - grub_status.changed 26 | -------------------------------------------------------------------------------- /ansible/kubernetes/roles/os.kubernetes/tasks/unattended-upgrades.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install unattended-upgrades 3 | ansible.builtin.apt: 4 | name: unattended-upgrades 5 | update_cache: true 6 | 7 | - block: 8 | - name: Copy 20auto-upgrades unattended-upgrades config 9 | ansible.builtin.blockinfile: 10 | path: /etc/apt/apt.conf.d/20auto-upgrades 11 | mode: 0644 12 | create: true 13 | block: | 14 | APT::Periodic::Update-Package-Lists "1"; 15 | APT::Periodic::Download-Upgradeable-Packages "1"; 16 | APT::Periodic::AutocleanInterval "7"; 17 | APT::Periodic::Unattended-Upgrade "1"; 18 | 19 | - name: Copy 50unattended-upgrades unattended-upgrades config 20 | ansible.builtin.blockinfile: 21 | path: /etc/apt/apt.conf.d/50unattended-upgrades 22 | mode: 0644 23 | create: true 24 | block: | 25 | Unattended-Upgrade::Automatic-Reboot "false"; 26 | Unattended-Upgrade::Remove-Unused-Dependencies "true"; 27 | Unattended-Upgrade::Allowed-Origins { 28 | "${distro_id} stable"; 29 | "${distro_id} ${distro_codename}-security"; 30 | "${distro_id} ${distro_codename}-updates"; 31 | }; 32 | notify: Restart unattended-upgrades 33 | -------------------------------------------------------------------------------- /ansible/kubernetes/roles/os.kubernetes/tasks/user.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add user to sudoers 3 | ansible.builtin.copy: 4 | content: "{{ ansible_user }} ALL=(ALL:ALL) NOPASSWD:ALL" 5 | dest: "/etc/sudoers.d/{{ ansible_user }}_nopasswd" 6 | mode: "0440" 7 | 8 | - name: Add additional user SSH public keys 9 | ansible.posix.authorized_key: 10 | user: "{{ ansible_user }}" 11 | key: "{{ item }}" 12 | loop: "{{ os_ssh_authorized_keys | default([]) }}" 13 | 14 | - name: Check if hushlogin exists 15 | ansible.builtin.stat: 16 | path: "/home/{{ ansible_user }}/.hushlogin" 17 | register: hushlogin_status 18 | 19 | - name: Silence the login prompt 20 | ansible.builtin.file: 21 | dest: "/home/{{ ansible_user }}/.hushlogin" 22 | state: touch 23 | owner: "{{ ansible_user }}" 24 | mode: "0775" 25 | when: 26 | - not hushlogin_status.stat.exists 27 | -------------------------------------------------------------------------------- /ansible/kubernetes/roles/os.kubernetes/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # SECRET* vars are encrypted with sops 4 | # 5 | 6 | os_timezone: "America/New_York" 7 | os_ssh_authorized_keys: 8 | - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJnRHFdQWqxfTRuioNM4G3vZyWQy18Xty1+vQV0qm/6G devin@macbook" 9 | - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB75L4ZEN17wcsK765dWzqlf0lY+sbK25QCIvebB/+1x devin@phone" 10 | os_packages_install: 11 | - apt-transport-https 12 | - ca-certificates 13 | - curl 14 | - htop 15 | - nano 16 | - ntpdate 17 | - psmisc 18 | - software-properties-common 19 | - unzip 20 | os_packages_remove: 21 | - apparmor 22 | - apport 23 | - byobu 24 | - cloud-init 25 | - cloud-guest-utils 26 | - cloud-initramfs-copymods 27 | - cloud-initramfs-dyn-netconf 28 | - friendly-recovery 29 | - landscape-common 30 | - lxd-agent-loader 31 | - ntfs-3g 32 | - plymouth 33 | - plymouth-theme-ubuntu-text 34 | - popularity-contest 35 | - snapd 36 | - sosreport 37 | - ubuntu-advantage-tools 38 | - ufw 39 | -------------------------------------------------------------------------------- /ansible/kvm/.envrc: -------------------------------------------------------------------------------- 1 | #shellcheck disable=SC2148,SC2155 2 | export ANSIBLE_CONFIG=$(expand_path ./ansible.cfg) 3 | -------------------------------------------------------------------------------- /ansible/kvm/inventory/hosts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kvm: 3 | hosts: 4 | pikvm: 5 | ansible_host: 192.168.1.80 6 | ansible_user: root 7 | -------------------------------------------------------------------------------- /ansible/kvm/playbooks/pikvm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - pikvm 4 | become: true 5 | gather_facts: true 6 | any_errors_fatal: true 7 | pre_tasks: 8 | - name: Pausing for 5 seconds... 9 | pause: 10 | seconds: 5 11 | - name: Mount read-write 12 | ansible.builtin.command: /usr/local/bin/rw 13 | tags: always 14 | roles: 15 | - role: os.kvm 16 | tags: os 17 | - role: acme.kvm 18 | tags: acme 19 | - role: node-exporter.kvm 20 | tags: node-exporter 21 | - role: vector.kvm 22 | tags: vector 23 | post_tasks: 24 | - name: Mount read-only 25 | ansible.builtin.command: /usr/local/bin/ro 26 | tags: always 27 | -------------------------------------------------------------------------------- /ansible/kvm/roles/acme.kvm/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /ansible/kvm/roles/acme.kvm/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # SECRET* vars are encrypted with sops 4 | # 5 | -------------------------------------------------------------------------------- /ansible/kvm/roles/node-exporter.kvm/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /ansible/kvm/roles/node-exporter.kvm/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart node-exporter 3 | ansible.builtin.systemd: 4 | name: node-exporter.service 5 | state: restarted 6 | enabled: true 7 | daemon_reload: true 8 | -------------------------------------------------------------------------------- /ansible/kvm/roles/node-exporter.kvm/templates/node-exporter.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=NodeExporter 3 | 4 | [Service] 5 | TimeoutStartSec=0 6 | User=root 7 | ExecStart=/usr/local/bin/node-exporter 8 | 9 | [Install] 10 | WantedBy=multi-user.target 11 | -------------------------------------------------------------------------------- /ansible/kvm/roles/node-exporter.kvm/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # SECRET* vars are encrypted with sops 4 | # 5 | 6 | # renovate: datasource=github-releases depName=prometheus/node_exporter 7 | node_exporter_version: "v1.3.1" 8 | node_exporter_arch: armv7 9 | node_exporter_download_url: https://github.com/prometheus/node_exporter/releases/download/{{ node_exporter_version }}/node_exporter-{{ node_exporter_version[1:] }}.linux-{{ node_exporter_arch }}.tar.gz 10 | -------------------------------------------------------------------------------- /ansible/kvm/roles/os.kvm/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ansible_python_interpreter: /usr/bin/python3 3 | -------------------------------------------------------------------------------- /ansible/kvm/roles/os.kvm/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart kvmd 3 | ansible.builtin.systemd: 4 | name: "kvmd.service" 5 | state: restarted 6 | enabled: true 7 | daemon_reload: true 8 | -------------------------------------------------------------------------------- /ansible/kvm/roles/os.kvm/templates/tc358743-edid.hex.j2: -------------------------------------------------------------------------------- 1 | 00FFFFFFFFFFFF0052628888008888881C150103800000780AEE91A3544C99260F505425400001000100010001000100010001010101D32C80A070381A403020350040442100001E7E1D00A0500019403020370080001000001E000000FC0050492D4B564D20566964656F0A000000FD00323D0F2E0F000000000000000001C402030400DE0D20A03058122030203400F0B400000018E01500A04000163030203400000000000018B41400A050D011203020350080D810000018AB22A0A050841A3030203600B00E1100001800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000045 2 | -------------------------------------------------------------------------------- /ansible/kvm/roles/os.kvm/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # SECRET* vars are encrypted with sops 4 | # 5 | 6 | os_timezone: "America/New_York" 7 | os_ssh_authorized_keys: 8 | - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJnRHFdQWqxfTRuioNM4G3vZyWQy18Xty1+vQV0qm/6G devin@macbook" 9 | - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB75L4ZEN17wcsK765dWzqlf0lY+sbK25QCIvebB/+1x devin@phone" 10 | pikvm_tesmart_kvm_addr: 192.168.1.10 11 | pikvm_tesmart_kvm_port: 5000 12 | -------------------------------------------------------------------------------- /ansible/kvm/roles/vector.kvm/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /ansible/kvm/roles/vector.kvm/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart vector 3 | ansible.builtin.systemd: 4 | name: vector.service 5 | state: restarted 6 | enabled: true 7 | daemon_reload: true 8 | -------------------------------------------------------------------------------- /ansible/kvm/roles/vector.kvm/templates/vector.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Vector 3 | Documentation=https://vector.dev 4 | After=network-online.target 5 | Wants=network-online.target 6 | 7 | [Service] 8 | User=root 9 | ExecStart=/usr/local/bin/vector --config /etc/vector/vector.yaml 10 | ExecReload=/bin/kill -HUP $MAINPID 11 | Restart=no 12 | AmbientCapabilities=CAP_NET_BIND_SERVICE 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /ansible/kvm/roles/vector.kvm/templates/vector.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | data_dir: /tmp 3 | sources: 4 | journal_logs: 5 | type: journald 6 | journal_directory: /run/log/journal 7 | sinks: 8 | vector_sink: 9 | type: vector 10 | inputs: 11 | - journal_logs 12 | address: "{{ vector_aggregator_addr }}:{{ vector_aggregator_port }}" 13 | version: "2" 14 | -------------------------------------------------------------------------------- /ansible/kvm/roles/vector.kvm/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # SECRET* vars are encrypted with sops 4 | # 5 | 6 | # renovate: datasource=github-releases depName=vectordotdev/vector 7 | vector_version: "v0.21.2" 8 | vector_download_url: "https://github.com/vectordotdev/vector/releases/download/{{ vector_version }}/vector-{{ vector_version[1:] }}-armv7-unknown-linux-gnueabihf.tar.gz" 9 | 10 | vector_aggregator_addr: "192.168.69.111" 11 | vector_aggregator_port: "6000" 12 | -------------------------------------------------------------------------------- /ansible/requirements.yml: -------------------------------------------------------------------------------- 1 | --- 2 | collections: 3 | - name: ansible.posix 4 | version: 1.3.0 5 | - name: community.general 6 | version: 4.8.0 7 | - name: kubernetes.core 8 | version: 2.3.1 9 | - name: community.sops 10 | version: 1.2.1 11 | roles: 12 | - src: geerlingguy.pip 13 | version: 2.1.0 14 | - src: geerlingguy.docker 15 | version: 4.2.2 16 | - src: xanmanning.k3s 17 | version: v3.1.2 18 | - src: https://github.com/mrlesmithjr/ansible-zfs.git 19 | scm: git 20 | name: mrlesmithjr.zfs 21 | version: b80e84d6938eb7730faeffc7ff7d8aa06ad5bef7 22 | -------------------------------------------------------------------------------- /ansible/router/.envrc: -------------------------------------------------------------------------------- 1 | #shellcheck disable=SC2148,SC2155 2 | export ANSIBLE_CONFIG=$(expand_path ./ansible.cfg) 3 | -------------------------------------------------------------------------------- /ansible/router/inventory/hosts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | router: 3 | hosts: 4 | opnsense: 5 | ansible_host: 192.168.1.1 6 | ansible_user: root 7 | -------------------------------------------------------------------------------- /ansible/router/playbooks/apps.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - opnsense 4 | become: false 5 | gather_facts: false 6 | any_errors_fatal: true 7 | roles: 8 | - role: adguardhome.router 9 | - role: coredns.router 10 | -------------------------------------------------------------------------------- /ansible/router/playbooks/os.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - opnsense 4 | become: false 5 | gather_facts: false 6 | any_errors_fatal: true 7 | roles: 8 | - role: os.router 9 | -------------------------------------------------------------------------------- /ansible/router/roles/adguardhome.router/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ansible_shell_executable: /usr/local/bin/bash 3 | ansible_python_interpreter: /usr/local/bin/python3 4 | ansible_perl_interpreter: /usr/local/bin/perl 5 | -------------------------------------------------------------------------------- /ansible/router/roles/adguardhome.router/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart adguardhome 3 | ansible.builtin.shell: | 4 | /usr/local/etc/rc.d/adguardhome restart 5 | -------------------------------------------------------------------------------- /ansible/router/roles/adguardhome.router/templates/99-adguardhome.j2: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | /usr/local/etc/rc.d/adguardhome start 4 | -------------------------------------------------------------------------------- /ansible/router/roles/adguardhome.router/templates/actions_adguardhome.conf.j2: -------------------------------------------------------------------------------- 1 | [start] 2 | command:/usr/local/etc/rc.d/adguardhome start 3 | parameters: 4 | type:script 5 | message:starting adguardhome 6 | 7 | [stop] 8 | command:/usr/local/etc/rc.d/adguardhome stop 9 | parameters: 10 | type:script 11 | message:stopping adguardhome 12 | 13 | [restart] 14 | command:/usr/local/etc/rc.d/adguardhome restart 15 | parameters: 16 | type:script 17 | message:restarting adguardhome 18 | -------------------------------------------------------------------------------- /ansible/router/roles/adguardhome.router/templates/adguardhome-newsyslog.conf.j2: -------------------------------------------------------------------------------- 1 | # logfilename [owner:group] mode count size when flags [/pid_file] [sig_num] 2 | /var/log/adguardhome/adguardhome.log root:wheel 640 7 * @T00 B /var/run/adguardhome.pid 1 3 | -------------------------------------------------------------------------------- /ansible/router/roles/adguardhome.router/templates/adguardhome-rc.conf.j2: -------------------------------------------------------------------------------- 1 | adguardhome_enable="YES" 2 | -------------------------------------------------------------------------------- /ansible/router/roles/adguardhome.router/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # SECRET* vars are encrypted with sops 4 | # 5 | 6 | # renovate: datasource=github-releases depName=AdguardTeam/AdGuardHome 7 | adguardhome_version: "v0.107.6" 8 | adguardhome_download_url: https://github.com/AdguardTeam/AdGuardHome/releases/download/{{ adguardhome_version }}/AdGuardHome_freebsd_amd64.tar.gz 9 | -------------------------------------------------------------------------------- /ansible/router/roles/coredns.router/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ansible_shell_executable: /usr/local/bin/bash 3 | ansible_python_interpreter: /usr/local/bin/python3 4 | ansible_perl_interpreter: /usr/local/bin/perl 5 | -------------------------------------------------------------------------------- /ansible/router/roles/coredns.router/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart coredns 3 | ansible.builtin.shell: | 4 | /usr/local/etc/rc.d/coredns restart 5 | -------------------------------------------------------------------------------- /ansible/router/roles/coredns.router/templates/99-coredns.j2: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | /usr/local/etc/rc.d/coredns start 4 | -------------------------------------------------------------------------------- /ansible/router/roles/coredns.router/templates/Corefile.j2: -------------------------------------------------------------------------------- 1 | (common) { 2 | # AdGuardHome is bound to port 53 on other interfaces 3 | bind 127.0.0.1 ::1 4 | errors 5 | log 6 | reload 7 | loadbalance 8 | cache 300 9 | loop 10 | local 11 | # dnsmasq is only enabled for setting hostnames in the Opnsense UI. 12 | # In the dnsmasq settings: 13 | # 1) Set 'Listen Port' to 6363 14 | # 2) Network Interfaces is set to one local interface 15 | # 3) Make sure 'Register DHCP static mappings' is enabled 16 | # 4) Adding an 'Host Overrides' also appends to this host file 17 | hosts /var/etc/dnsmasq-hosts { 18 | ttl 600 19 | reload 5s 20 | fallthrough 21 | } 22 | prometheus 192.168.1.1:9153 23 | } 24 | 25 | . { 26 | import common 27 | k8s_gateway {{ SECRET_PUBLIC_DOMAIN }} { 28 | resources Ingress 29 | ttl 1 30 | kubeconfig /usr/local/etc/coredns/kubeconfig 31 | fallthrough 32 | } 33 | forward . tls://1.1.1.1 tls://1.0.0.1 { 34 | tls_servername cloudflare-dns.com 35 | } 36 | } 37 | 38 | {{ SECRET_PRIVATE_DOMAIN }} { 39 | import common 40 | k8s_gateway . { 41 | resources Ingress 42 | ttl 30 43 | kubeconfig /usr/local/etc/coredns/kubeconfig 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /ansible/router/roles/coredns.router/templates/actions_coredns.conf.j2: -------------------------------------------------------------------------------- 1 | [start] 2 | command:/usr/local/etc/rc.d/coredns start 3 | parameters: 4 | type:script 5 | message:starting coredns 6 | 7 | [stop] 8 | command:/usr/local/etc/rc.d/coredns stop 9 | parameters: 10 | type:script 11 | message:stopping coredns 12 | 13 | [restart] 14 | command:/usr/local/etc/rc.d/coredns restart 15 | parameters: 16 | type:script 17 | message:restarting coredns 18 | -------------------------------------------------------------------------------- /ansible/router/roles/coredns.router/templates/coredns-newsyslog.conf.j2: -------------------------------------------------------------------------------- 1 | # logfilename [owner:group] mode count size when flags [/pid_file] [sig_num] 2 | /var/log/coredns/coredns.log root:wheel 640 7 * @T00 B /var/run/coredns.pid 1 3 | -------------------------------------------------------------------------------- /ansible/router/roles/coredns.router/templates/coredns-rc.conf.j2: -------------------------------------------------------------------------------- 1 | coredns_enable="YES" 2 | -------------------------------------------------------------------------------- /ansible/router/roles/coredns.router/templates/coredns-rc.d.j2: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # PROVIDE: coredns 4 | # REQUIRE: DAEMON NETWORKING 5 | # KEYWORD: shutdown 6 | # 7 | # Add the following to /etc/rc.conf[.local] to enable this service 8 | # 9 | # coredns_enable (bool): Set to NO by default. 10 | # Set it to YES to enable coredns. 11 | # coredns_config (str): Set to /usr/local/etc/coredns/Corefile by default. 12 | # 13 | # coredns_logfile (str): Set to /var/log/coredns/coredns.log by default. 14 | # 15 | 16 | . /etc/rc.subr 17 | 18 | name=coredns 19 | rcvar=coredns_enable 20 | 21 | load_rc_config ${name} 22 | 23 | : ${coredns_enable:=NO} 24 | : ${coredns_config:="/usr/local/etc/coredns/Corefile"} 25 | : ${coredns_flags:=} 26 | : ${coredns_logfile:="/var/log/coredns/coredns.log"} 27 | 28 | pidfile=/var/run/coredns.pid 29 | command="/usr/local/sbin/coredns" 30 | 31 | start_cmd="${name}_start" 32 | 33 | coredns_start() 34 | { 35 | echo -n "Starting ${name}." 36 | /usr/sbin/daemon -p ${pidfile} -H -m 3 -f -o ${coredns_logfile} \ 37 | ${command} \ 38 | -conf ${coredns_config} \ 39 | ${coredns_flags} 40 | } 41 | 42 | run_rc_command "$1" 43 | -------------------------------------------------------------------------------- /ansible/router/roles/coredns.router/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # SECRET* vars are encrypted with sops 4 | # 5 | 6 | # renovate: datasource=github-releases depName=ori-edge/k8s_gateway 7 | coredns_version: "v0.3.0" 8 | coredns_download_url: https://github.com/ori-edge/k8s_gateway/releases/download/{{ coredns_version }}/k8s_gateway_{{ coredns_version[1:] }}_freebsd_amd64.tar.gz 9 | -------------------------------------------------------------------------------- /ansible/router/roles/os.router/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ansible_shell_executable: /usr/local/bin/bash 3 | ansible_python_interpreter: /usr/local/bin/python3 4 | ansible_perl_interpreter: /usr/local/bin/perl 5 | -------------------------------------------------------------------------------- /ansible/router/roles/os.router/tasks/filesystem.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Enable ZFS autotrim on zroot 3 | ansible.builtin.command: zpool set autotrim=on zroot 4 | -------------------------------------------------------------------------------- /ansible/router/roles/os.router/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_tasks: packages.yml 3 | tags: 4 | - packages 5 | 6 | - import_tasks: filesystem.yml 7 | tags: 8 | - filesystem 9 | -------------------------------------------------------------------------------- /ansible/router/roles/os.router/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # SECRET* vars are encrypted with sops 4 | # 5 | -------------------------------------------------------------------------------- /ansible/storage/.envrc: -------------------------------------------------------------------------------- 1 | #shellcheck disable=SC2148,SC2155 2 | export ANSIBLE_CONFIG=$(expand_path ./ansible.cfg) 3 | -------------------------------------------------------------------------------- /ansible/storage/inventory/group_vars/storage/docker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | docker_edition: ce 4 | docker_package: "docker-{{ docker_edition }}" 5 | docker_users: "{{ ansible_user }}" 6 | docker_install_compose: true 7 | # renovate: datasource=github-releases depName=docker/compose 8 | docker_compose_version: "v2.5.0" 9 | docker_daemon_options: 10 | log-driver: journald 11 | dns: ["192.168.1.1"] 12 | -------------------------------------------------------------------------------- /ansible/storage/inventory/group_vars/storage/pip.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | pip_package: python3-pip 4 | 5 | # renovate: datasource=pypi depName=docker versioning=pep440 6 | pip_docker_version: "5.0.3" 7 | 8 | # renovate: datasource=pypi depName=docker-compose versioning=pep440 9 | pip_docker_compose_version: "1.29.2" 10 | 11 | pip_install_packages: 12 | - docker=={{ pip_docker_version }} 13 | - docker-compose=={{ pip_docker_compose_version }} 14 | -------------------------------------------------------------------------------- /ansible/storage/inventory/hosts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | storage: 3 | hosts: 4 | expanse: 5 | ansible_host: 192.168.1.81 6 | ansible_user: devin 7 | -------------------------------------------------------------------------------- /ansible/storage/playbooks/apps.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - storage 4 | become: true 5 | gather_facts: true 6 | any_errors_fatal: true 7 | pre_tasks: 8 | - name: Pausing for 5 seconds... 9 | pause: 10 | seconds: 5 11 | roles: 12 | - role: geerlingguy.pip 13 | - role: geerlingguy.docker 14 | - role: apps.storage 15 | tags: 16 | - apps.storage 17 | -------------------------------------------------------------------------------- /ansible/storage/playbooks/os.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - storage 4 | become: true 5 | gather_facts: true 6 | any_errors_fatal: true 7 | pre_tasks: 8 | - name: Pausing for 5 seconds... 9 | pause: 10 | seconds: 5 11 | roles: 12 | - os.storage 13 | -------------------------------------------------------------------------------- /ansible/storage/roles/apps.storage/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /ansible/storage/roles/apps.storage/files/docker-cleanup.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Docker cleanup 3 | Requires=docker.service 4 | After=docker.service 5 | 6 | [Service] 7 | Type=oneshot 8 | WorkingDirectory=/tmp 9 | User=root 10 | Group=root 11 | ExecStart=/usr/bin/docker system prune -af 12 | 13 | [Install] 14 | WantedBy=multi-user.target 15 | -------------------------------------------------------------------------------- /ansible/storage/roles/apps.storage/files/docker-cleanup.timer: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Docker cleanup timer 3 | 4 | [Timer] 5 | OnUnitInactiveSec=12h 6 | 7 | [Install] 8 | WantedBy=timers.target 9 | -------------------------------------------------------------------------------- /ansible/storage/roles/apps.storage/files/docker-override.conf: -------------------------------------------------------------------------------- 1 | [Unit] 2 | After=zfs-mount.service 3 | Requires=zfs-mount.service 4 | Wants=zfs-mount.service 5 | BindsTo=zfs-mount.service 6 | -------------------------------------------------------------------------------- /ansible/storage/roles/apps.storage/tasks/docker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create docker override systemd file 3 | ansible.builtin.copy: 4 | src: "files/docker-override.conf" 5 | dest: "/etc/systemd/system/docker.service.d/override.conf" 6 | mode: 0644 7 | remote_src: false 8 | 9 | - name: Create docker clean up systemd files 10 | ansible.builtin.copy: 11 | src: "files/{{ item }}" 12 | dest: "/etc/systemd/system/{{ item }}" 13 | mode: 0644 14 | remote_src: false 15 | loop: 16 | - docker-cleanup.service 17 | - docker-cleanup.timer 18 | notify: Restart docker-cleanup 19 | -------------------------------------------------------------------------------- /ansible/storage/roles/apps.storage/tasks/kopia.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create kopia directories 3 | ansible.builtin.file: 4 | path: "{{ item }}" 5 | state: directory 6 | owner: "{{ ansible_user }}" 7 | group: users 8 | mode: 0775 9 | loop: 10 | - "{{ docker_base_dir }}/kopia" 11 | 12 | - block: 13 | - name: Create kopia docker compose file 14 | ansible.builtin.template: 15 | src: kopia/docker-compose.yml.j2 16 | dest: "{{ docker_base_dir }}/kopia/docker-compose.yml" 17 | owner: "{{ ansible_user }}" 18 | group: users 19 | mode: 0775 20 | - name: Create kopia systemd service file 21 | ansible.builtin.template: 22 | src: "docker-compose@.service.j2" 23 | dest: "/etc/systemd/system/docker-compose@kopia.service" 24 | mode: 0644 25 | vars: 26 | condition_path_is_mount_point: "{{ kopia_data_dir }}" 27 | notify: Restart kopia 28 | -------------------------------------------------------------------------------- /ansible/storage/roles/apps.storage/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create default directories 3 | ansible.builtin.file: 4 | path: "{{ item }}" 5 | state: directory 6 | owner: "{{ ansible_user }}" 7 | group: users 8 | mode: 0775 9 | loop: 10 | - "{{ docker_base_dir }}" 11 | tags: 12 | - always 13 | 14 | # Manage Docker installation 15 | - import_tasks: docker.yml 16 | 17 | # Manage applications 18 | - import_tasks: kopia.yml 19 | - import_tasks: nexus.yml 20 | - import_tasks: node-exporter.yml 21 | - import_tasks: time-machine.yml 22 | - import_tasks: traefik.yml 23 | - import_tasks: vector.yml 24 | -------------------------------------------------------------------------------- /ansible/storage/roles/apps.storage/tasks/nexus.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create nexus directories 3 | ansible.builtin.file: 4 | path: "{{ item }}" 5 | state: directory 6 | owner: "{{ ansible_user }}" 7 | group: users 8 | mode: 0775 9 | loop: 10 | - "{{ docker_base_dir }}/nexus" 11 | 12 | - block: 13 | - name: Create nexus docker compose file 14 | ansible.builtin.template: 15 | src: nexus/docker-compose.yml.j2 16 | dest: "{{ docker_base_dir }}/nexus/docker-compose.yml" 17 | owner: "{{ ansible_user }}" 18 | group: users 19 | mode: 0755 20 | - name: Create nexus systemd service file 21 | ansible.builtin.template: 22 | src: "docker-compose@.service.j2" 23 | dest: "/etc/systemd/system/docker-compose@nexus.service" 24 | mode: 0644 25 | vars: 26 | condition_path_is_mount_point: "{{ nexus_data_dir }}" 27 | notify: Restart nexus 28 | -------------------------------------------------------------------------------- /ansible/storage/roles/apps.storage/tasks/node-exporter.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create node-exporter directories 3 | ansible.builtin.file: 4 | path: "{{ item }}" 5 | state: directory 6 | owner: "{{ ansible_user }}" 7 | group: users 8 | mode: 0775 9 | loop: 10 | - "{{ docker_base_dir }}/node-exporter" 11 | 12 | - block: 13 | - name: Create node-exporter docker compose file 14 | ansible.builtin.template: 15 | src: node-exporter/docker-compose.yml.j2 16 | dest: "{{ docker_base_dir }}/node-exporter/docker-compose.yml" 17 | owner: "{{ ansible_user }}" 18 | group: users 19 | mode: 0775 20 | - name: Create node-exporter systemd service file 21 | ansible.builtin.template: 22 | src: "docker-compose@.service.j2" 23 | dest: "/etc/systemd/system/docker-compose@node-exporter.service" 24 | mode: 0644 25 | vars: 26 | condition_path_is_mount_point: "{{ zfs_zpool_base_dir }}" 27 | notify: Restart node-exporter 28 | -------------------------------------------------------------------------------- /ansible/storage/roles/apps.storage/tasks/time-machine.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create time-machine directories 3 | ansible.builtin.file: 4 | path: "{{ item }}" 5 | state: directory 6 | owner: "{{ ansible_user }}" 7 | group: users 8 | mode: 0775 9 | loop: 10 | - "{{ docker_base_dir }}/time-machine" 11 | 12 | - block: 13 | - name: Create time-machine docker compose file 14 | ansible.builtin.template: 15 | src: time-machine/docker-compose.yml.j2 16 | dest: "{{ docker_base_dir }}/time-machine/docker-compose.yml" 17 | owner: "{{ ansible_user }}" 18 | group: users 19 | mode: 0775 20 | - name: Create time-machine systemd service file 21 | ansible.builtin.template: 22 | src: "docker-compose@.service.j2" 23 | dest: "/etc/systemd/system/docker-compose@time-machine.service" 24 | mode: 0644 25 | vars: 26 | condition_path_is_mount_point: "{{ time_machine_data_dir }}" 27 | notify: Restart time-machine 28 | -------------------------------------------------------------------------------- /ansible/storage/roles/apps.storage/tasks/traefik.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create traefik directories 3 | ansible.builtin.file: 4 | path: "{{ item }}" 5 | state: directory 6 | owner: "{{ ansible_user }}" 7 | group: users 8 | mode: 0775 9 | loop: 10 | - "{{ docker_base_dir }}/traefik" 11 | 12 | - block: 13 | - name: Create traefik docker compose file 14 | ansible.builtin.template: 15 | src: traefik/docker-compose.yml.j2 16 | dest: "{{ docker_base_dir }}/traefik/docker-compose.yml" 17 | owner: "{{ ansible_user }}" 18 | group: users 19 | mode: 0775 20 | - name: Create traefik systemd service file 21 | ansible.builtin.template: 22 | src: "docker-compose@.service.j2" 23 | dest: "/etc/systemd/system/docker-compose@traefik.service" 24 | mode: 0644 25 | vars: 26 | condition_path_is_mount_point: "{{ traefik_data_dir }}" 27 | notify: Restart traefik 28 | -------------------------------------------------------------------------------- /ansible/storage/roles/apps.storage/tasks/vector.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create vector directories 3 | ansible.builtin.file: 4 | path: "{{ item }}" 5 | state: directory 6 | owner: "{{ ansible_user }}" 7 | group: users 8 | mode: 0775 9 | loop: 10 | - "{{ docker_base_dir }}/vector" 11 | 12 | - block: 13 | - name: Create vector docker compose file 14 | ansible.builtin.template: 15 | src: vector/docker-compose.yml.j2 16 | dest: "{{ docker_base_dir }}/vector/docker-compose.yml" 17 | owner: "{{ ansible_user }}" 18 | group: users 19 | mode: 0775 20 | - name: Create vector config 21 | ansible.builtin.template: 22 | src: vector/vector.yaml.j2 23 | dest: "{{ vector_data_dir }}/config/vector.yaml" 24 | owner: "{{ ansible_user }}" 25 | group: users 26 | mode: 0775 27 | - name: Create vector systemd service file 28 | ansible.builtin.template: 29 | src: "docker-compose@.service.j2" 30 | dest: "/etc/systemd/system/docker-compose@vector.service" 31 | mode: 0644 32 | vars: 33 | condition_path_is_mount_point: "{{ vector_data_dir }}" 34 | notify: Restart vector 35 | -------------------------------------------------------------------------------- /ansible/storage/roles/apps.storage/templates/docker-compose@.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=%i service with docker compose 3 | Requires=docker.service 4 | After=docker.service 5 | ConditionPathIsMountPoint={{ condition_path_is_mount_point }} 6 | 7 | [Service] 8 | Type=oneshot 9 | RemainAfterExit=true 10 | WorkingDirectory={{ docker_base_dir }}/%i 11 | ExecStart=/usr/local/bin/docker-compose up -d --remove-orphans 12 | ExecStop=/usr/local/bin/docker-compose down 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /ansible/storage/roles/apps.storage/templates/node-exporter/docker-compose.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3.8" 3 | 4 | services: 5 | node-exporter: 6 | image: quay.io/prometheus/node-exporter:v1.3.1 7 | container_name: node-exporter 8 | restart: unless-stopped 9 | network_mode: host 10 | pid: host 11 | cap_add: 12 | - SYS_TIME 13 | - SYS_NICE 14 | command: 15 | - --path.procfs=/host/proc 16 | - --path.rootfs=/rootfs 17 | - --path.sysfs=/host/sys 18 | volumes: 19 | - /proc:/host/proc:ro 20 | - /sys:/host/sys:ro 21 | - /:/rootfs:ro,rslave 22 | -------------------------------------------------------------------------------- /ansible/storage/roles/apps.storage/templates/time-machine/docker-compose.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3.8" 3 | 4 | services: 5 | time-machine: 6 | image: docker.io/mbentley/timemachine:smb-20220407 7 | container_name: time-machine 8 | restart: unless-stopped 9 | network_mode: host 10 | environment: 11 | PASSWORD: "{{ SECRET_TIMEMACHINE_PASSWORD }}" 12 | TM_UID: 568 13 | TM_GID: 568 14 | volumes: 15 | - time-machine-data:/opt/timemachine 16 | - time-machine-var-lib-smb:/var/lib/samba 17 | - time-machine-var-cache-smb:/var/cache/samba 18 | - time-machine-run-smb:/run/samba 19 | 20 | volumes: 21 | time-machine-data: 22 | driver: local 23 | driver_opts: 24 | type: none 25 | o: bind 26 | device: "{{ time_machine_data_dir }}/data" 27 | time-machine-var-lib-smb: 28 | driver: local 29 | driver_opts: 30 | type: none 31 | o: bind 32 | device: "{{ time_machine_data_dir }}/var-lib-smb" 33 | time-machine-var-cache-smb: 34 | driver: local 35 | driver_opts: 36 | type: none 37 | o: bind 38 | device: "{{ time_machine_data_dir }}/var-cache-smb" 39 | time-machine-run-smb: 40 | driver: local 41 | driver_opts: 42 | type: none 43 | o: bind 44 | device: "{{ time_machine_data_dir }}/run-smb" 45 | -------------------------------------------------------------------------------- /ansible/storage/roles/apps.storage/templates/vector/docker-compose.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3.8" 3 | 4 | services: 5 | vector: 6 | image: docker.io/timberio/vector:0.21.2-debian 7 | container_name: vector 8 | restart: unless-stopped 9 | network_mode: host 10 | command: 11 | - --config=/etc/vector/vector.yaml 12 | volumes: 13 | - "{{ vector_data_dir }}/config/vector.yaml:/etc/vector/vector.yaml:ro" 14 | - vector-data:/vector-data-dir 15 | - /var/log:/var/log:ro 16 | 17 | volumes: 18 | vector-data: 19 | driver: local 20 | driver_opts: 21 | type: none 22 | o: bind 23 | device: "{{ vector_data_dir }}/data" 24 | -------------------------------------------------------------------------------- /ansible/storage/roles/apps.storage/templates/vector/vector.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | data_dir: /vector-data-dir 3 | sources: 4 | journal_logs: 5 | type: journald 6 | journal_directory: /var/log/journal 7 | sinks: 8 | vector_sink: 9 | type: vector 10 | inputs: 11 | - journal_logs 12 | address: "{{ vector_aggregator_addr }}:{{ vector_aggregator_port }}" 13 | version: "2" 14 | -------------------------------------------------------------------------------- /ansible/storage/roles/apps.storage/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # SECRET* vars are encrypted with sops 4 | # 5 | 6 | # -- Base Docker Directory 7 | docker_base_dir: /opt/docker 8 | 9 | # -- Base ZFS Dataset 10 | zfs_zpool_base_dir: /tycho 11 | 12 | # -- Minio Dataset Directory 13 | kopia_data_dir: "{{ zfs_zpool_base_dir }}/Apps/Internal/Kopia" 14 | 15 | # -- Nexus Dataset Directory 16 | nexus_data_dir: "{{ zfs_zpool_base_dir }}/Apps/Internal/Nexus" 17 | 18 | # -- Time Machine Dataset Directory 19 | time_machine_data_dir: "{{ zfs_zpool_base_dir }}/Apps/Internal/TimeMachine" 20 | 21 | # -- Traefik Dataset Directory 22 | traefik_data_dir: "{{ zfs_zpool_base_dir }}/Apps/Internal/Traefik" 23 | 24 | # -- Vector Dataset Directory 25 | vector_data_dir: "{{ zfs_zpool_base_dir }}/Apps/Internal/Vector" 26 | 27 | # -- Vector Aggregator Address 28 | vector_aggregator_addr: "192.168.69.111" 29 | 30 | # -- Vector Aggregator Port 31 | vector_aggregator_port: "6000" 32 | -------------------------------------------------------------------------------- /ansible/storage/roles/os.storage/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /ansible/storage/roles/os.storage/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart smartd 3 | ansible.builtin.service: 4 | name: smartd.service 5 | daemon_reload: true 6 | enabled: true 7 | state: restarted 8 | -------------------------------------------------------------------------------- /ansible/storage/roles/os.storage/tasks/filesystem.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install filesystem tools 3 | ansible.builtin.apt: 4 | name: "{{ item }}" 5 | update_cache: true 6 | loop: 7 | - dmraid 8 | - gdisk 9 | - hdparm 10 | - lvm2 11 | - nfs-common 12 | - nfs-kernel-server 13 | - nvme-cli 14 | - open-iscsi 15 | - samba 16 | - smartmontools 17 | - socat 18 | 19 | - name: Configure smartd 20 | ansible.builtin.copy: 21 | dest: /etc/smartd.conf 22 | mode: 0644 23 | content: | 24 | DEVICESCAN -a -o on -S on -n standby,q -s (S/../.././02|L/../../6/03) -W 4,35,40 25 | notify: Restart smartd 26 | 27 | - name: Disable swap at runtime 28 | ansible.builtin.command: swapoff -a 29 | when: ansible_swaptotal_mb > 0 30 | 31 | - name: Disable swap on boot 32 | ansible.posix.mount: 33 | name: "{{ item }}" 34 | fstype: swap 35 | state: absent 36 | loop: 37 | - swap 38 | - none 39 | -------------------------------------------------------------------------------- /ansible/storage/roles/os.storage/tasks/locale.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Set timezone 3 | community.general.timezone: 4 | name: "{{ os_timezone | default('America/New_York') }}" 5 | -------------------------------------------------------------------------------- /ansible/storage/roles/os.storage/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_tasks: user.yml 3 | tags: 4 | - user 5 | 6 | - import_tasks: locale.yml 7 | tags: 8 | - locale 9 | 10 | - import_tasks: packages.yml 11 | tags: 12 | - packages 13 | 14 | - import_tasks: network.yml 15 | tags: 16 | - network 17 | 18 | - import_tasks: filesystem.yml 19 | tags: 20 | - filesystem 21 | 22 | # - import_tasks: notifications.yml 23 | # tags: 24 | # - notifications 25 | 26 | - import_tasks: security.yml 27 | tags: 28 | - security 29 | -------------------------------------------------------------------------------- /ansible/storage/roles/os.storage/tasks/network.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install network tools 3 | ansible.builtin.apt: 4 | name: "{{ item }}" 5 | update_cache: true 6 | loop: 7 | - iputils-ping 8 | - net-tools 9 | 10 | - name: Set hostname to inventory hostname 11 | ansible.builtin.hostname: 12 | name: "{{ inventory_hostname }}" 13 | when: 14 | - ansible_hostname != inventory_hostname 15 | 16 | - name: Update /etc/hosts to include hostname 17 | ansible.builtin.blockinfile: 18 | path: /etc/hosts 19 | mode: 0644 20 | create: true 21 | block: | 22 | 127.0.0.1 localhost 23 | 127.0.1.1 {{ inventory_hostname }} 24 | 25 | # The following lines are desirable for IPv6 capable hosts 26 | ::1 ip6-localhost ip6-loopback 27 | fe00::0 ip6-localnet 28 | ff00::0 ip6-mcastprefix 29 | ff02::1 ip6-allnodes 30 | ff02::2 ip6-allrouters 31 | -------------------------------------------------------------------------------- /ansible/storage/roles/os.storage/tasks/notifications.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if msmtp log exists 3 | ansible.builtin.stat: 4 | path: "/var/log/msmtp" 5 | register: msmtp_status 6 | 7 | - name: Create msmtp log file 8 | ansible.builtin.file: 9 | dest: /var/log/msmtp 10 | state: touch 11 | owner: msmtp 12 | group: msmtp 13 | mode: 0660 14 | when: 15 | - not msmtp_status.stat.exists 16 | 17 | # /etc/zfs/zed.d/zed.rc 18 | # /etc/smartd.conf 19 | # /etc/msmtprc 20 | # /etc/aliases 21 | # systemctl enable zfs-zed 22 | # systemctl restart zfs-zed 23 | -------------------------------------------------------------------------------- /ansible/storage/roles/os.storage/templates/aliases.j2: -------------------------------------------------------------------------------- 1 | # /etc/aliases 2 | mailer-daemon: postmaster@ 3 | postmaster: root@ 4 | nobody: root@ 5 | hostmaster: root@ 6 | usenet: root@ 7 | news: root@ 8 | webmaster: root@ 9 | www: root@ 10 | ftp: root@ 11 | abuse: root@ 12 | noc: root@ 13 | security: root@ 14 | root: {{ ansible_user }}@, {{ email }} 15 | -------------------------------------------------------------------------------- /ansible/storage/roles/os.storage/templates/msmtprc.j2: -------------------------------------------------------------------------------- 1 | # /etc/msmtprc 2 | defaults 3 | auth on 4 | tls on 5 | tls_trust_file /etc/ssl/certs/ca-certificates.crt 6 | logfile /var/log/msmtp 7 | 8 | account {{ smtp_account_name }} 9 | host {{ smtp_address }} 10 | port {{ smtp_port }} 11 | from {{ smtp_from }} 12 | user {{ smtp_username }} 13 | password {{ smtp_password }} 14 | 15 | account default: {{ smtp_account_name }} 16 | 17 | aliases /etc/aliases 18 | -------------------------------------------------------------------------------- /ansible/storage/roles/os.storage/templates/smartd.conf.j2: -------------------------------------------------------------------------------- 1 | # /etc/smartd.conf 2 | DEVICESCAN -a -o on -S on -n standby,q -s (S/../.././02|L/../../6/03) -W 4,35,55 -m {{ email }} 3 | -------------------------------------------------------------------------------- /ansible/storage/roles/os.storage/templates/zed.rc.j2: -------------------------------------------------------------------------------- 1 | # /etc/zfs/zed.d/zed.rc 2 | ZED_DEBUG_LOG="/var/log/zed.debug.log" 3 | ZED_EMAIL_ADDR="{{ email }}" 4 | ZED_EMAIL_OPTS="-s '@SUBJECT@' @ADDRESS@ -r {{ smtp_from }}" 5 | ZED_NOTIFY_VERBOSE=1 6 | ZED_NOTIFY_DATA=1 7 | -------------------------------------------------------------------------------- /ansible/storage/roles/os.storage/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # SECRET* vars are encrypted with sops 4 | # 5 | 6 | os_smtp_account_name: mailgun 7 | os_smtp_port: 587 8 | os_timezone: "America/New_York" 9 | os_ssh_authorized_keys: 10 | - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJnRHFdQWqxfTRuioNM4G3vZyWQy18Xty1+vQV0qm/6G devin@macbook" 11 | - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIB75L4ZEN17wcsK765dWzqlf0lY+sbK25QCIvebB/+1x devin@phone" 12 | os_packages_repositories: 13 | - ppa:fish-shell/release-3 14 | os_packages_install: 15 | - apt-transport-https 16 | - ca-certificates 17 | - containernetworking-plugins 18 | - curl 19 | - ffmpeg 20 | - figlet 21 | - fish 22 | - fzf 23 | - gettext 24 | - git 25 | - htop 26 | - lolcat 27 | - msmtp 28 | - msmtp-mta 29 | - nano 30 | - neofetch 31 | - ntpdate 32 | - podman 33 | - psmisc 34 | - rclone 35 | - software-properties-common 36 | - tmux 37 | - tree 38 | - uidmap 39 | - unzip 40 | - zfs-zed 41 | - zfsutils-linux 42 | os_packages_remove: 43 | - apparmor 44 | - apport 45 | - byobu 46 | - friendly-recovery 47 | - landscape-common 48 | - lxd-agent-loader 49 | - plymouth 50 | - plymouth-theme-ubuntu-text 51 | - popularity-contest 52 | - snapd 53 | - sosreport 54 | - ubuntu-advantage-tools 55 | - ufw 56 | -------------------------------------------------------------------------------- /cluster/apps/cert-manager/dashboard/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | namespace: cert-manager 5 | configMapGenerator: 6 | - name: cert-manager-dashboard 7 | files: 8 | - cert-manager-dashboard.json=https://raw.githubusercontent.com/monitoring-mixins/website/master/assets/cert-manager/dashboards/cert-manager.json 9 | generatorOptions: 10 | disableNameSuffixHash: true 11 | annotations: 12 | kustomize.toolkit.fluxcd.io/substitute: disabled 13 | labels: 14 | grafana_dashboard: "true" 15 | -------------------------------------------------------------------------------- /cluster/apps/cert-manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - dashboard 6 | - prometheus-rule.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/default/discord-support-threads-bot/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - secret.sops.yaml 6 | - helm-release.yaml 7 | namespace: default 8 | configMapGenerator: 9 | - name: discord-support-threads-bot 10 | files: 11 | - messages.json 12 | generatorOptions: 13 | disableNameSuffixHash: true 14 | -------------------------------------------------------------------------------- /cluster/apps/default/discord-support-threads-bot/messages.json: -------------------------------------------------------------------------------- 1 | { 2 | "threadCreated": "Hi there! I have created this support thread for you. While awaiting a response, please be sure to review our support guidelines.", 3 | "threadResolveHint": "If you no longer need assistance, please use the `/resolve` command to archive this thread.", 4 | "threadResolved": "It appears that your problem has been resolved, so I've archived this thread. If you require further assistance with this issue, simply reply to unarchive the thread. If you have a question unrelated to your original inquiry, please open a new thread by posting in #support!", 5 | "errorNotThread": "You can only use this command inside of a support thread.", 6 | "errorNoPermission": "You do not have permission to perform this action. Is this your support thread?" 7 | } 8 | -------------------------------------------------------------------------------- /cluster/apps/default/gitea/config-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: gitea-config-v1 6 | namespace: default 7 | labels: 8 | kasten.io/backup-volume: "enabled" 9 | spec: 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 10Gi 15 | storageClassName: ceph-block 16 | -------------------------------------------------------------------------------- /cluster/apps/default/gitea/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - config-pvc.yaml 6 | - secret.sops.yaml 7 | - helm-release.yaml 8 | -------------------------------------------------------------------------------- /cluster/apps/default/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - gitea 6 | - minio 7 | - discord-support-threads-bot 8 | - theme-park 9 | -------------------------------------------------------------------------------- /cluster/apps/default/minio/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - secret.sops.yaml 6 | - nfs-claim.yaml 7 | - helm-release.yaml 8 | -------------------------------------------------------------------------------- /cluster/apps/default/minio/nfs-claim.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolume 4 | metadata: 5 | name: minio-nfs 6 | spec: 7 | capacity: 8 | storage: 1Mi 9 | accessModes: 10 | - ReadWriteMany 11 | storageClassName: nfs 12 | persistentVolumeReclaimPolicy: Retain 13 | nfs: 14 | server: "expanse.${SECRET_PRIVATE_DOMAIN}" 15 | path: /tycho/Apps/External/Minio 16 | --- 17 | apiVersion: v1 18 | kind: PersistentVolumeClaim 19 | metadata: 20 | name: minio-nfs 21 | namespace: default 22 | spec: 23 | accessModes: 24 | - ReadWriteMany 25 | storageClassName: nfs 26 | resources: 27 | requests: 28 | storage: 1Mi 29 | -------------------------------------------------------------------------------- /cluster/apps/default/theme-park/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/flux-system/dashboard/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | namespace: flux-system 5 | configMapGenerator: 6 | - name: flux-cluster-dashboard 7 | files: 8 | - flux-cluster-dashboard.json=https://raw.githubusercontent.com/fluxcd/flux2/main/manifests/monitoring/grafana/dashboards/cluster.json 9 | - name: flux-control-plane-dashboard 10 | files: 11 | - flux-control-plane-dashboard.json=https://raw.githubusercontent.com/fluxcd/flux2/main/manifests/monitoring/grafana/dashboards/control-plane.json 12 | generatorOptions: 13 | disableNameSuffixHash: true 14 | annotations: 15 | kustomize.toolkit.fluxcd.io/substitute: disabled 16 | labels: 17 | grafana_dashboard: "true" 18 | -------------------------------------------------------------------------------- /cluster/apps/flux-system/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - dashboard 6 | - monitoring 7 | - notifications 8 | - webhook 9 | -------------------------------------------------------------------------------- /cluster/apps/flux-system/monitoring/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - pod-monitor.yaml 6 | - prometheus-rule.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/flux-system/monitoring/pod-monitor.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: PodMonitor 4 | metadata: 5 | name: flux-system 6 | namespace: flux-system 7 | labels: 8 | app.kubernetes.io/part-of: flux 9 | spec: 10 | namespaceSelector: 11 | matchNames: 12 | - flux-system 13 | selector: 14 | matchExpressions: 15 | - key: app 16 | operator: Exists 17 | podMetricsEndpoints: 18 | - port: http-prom 19 | honorLabels: true 20 | -------------------------------------------------------------------------------- /cluster/apps/flux-system/monitoring/prometheus-rule.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: PrometheusRule 4 | metadata: 5 | name: flux 6 | namespace: flux-system 7 | spec: 8 | groups: 9 | - name: flux 10 | rules: 11 | - alert: FluxComponentAbsent 12 | annotations: 13 | description: Flux component has disappeared from Prometheus target discovery. 14 | summary: Flux component is down. 15 | expr: | 16 | absent(up{job=~".*flux-system.*"} == 1) 17 | for: 15m 18 | labels: 19 | severity: critical 20 | - alert: FluxReconciliationFailure 21 | annotations: 22 | description: 23 | "{{ $labels.kind }} {{ $labels.namespace }}/{{ $labels.name }} reconciliation has been failing 24 | for more than ten minutes." 25 | summary: Flux reconciliation failure. 26 | expr: | 27 | max(gotk_reconcile_condition{status="False",type="Ready"}) by (namespace, name, kind) 28 | + 29 | on(namespace, name, kind) (max(gotk_reconcile_condition{status="Deleted"}) 30 | by (namespace, name, kind)) * 2 == 1 31 | for: 15m 32 | labels: 33 | severity: critical 34 | -------------------------------------------------------------------------------- /cluster/apps/flux-system/notifications/alert-manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - notification.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/flux-system/notifications/alert-manager/notification.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: notification.toolkit.fluxcd.io/v1beta1 3 | kind: Provider 4 | metadata: 5 | name: alert-manager 6 | namespace: flux-system 7 | spec: 8 | type: alertmanager 9 | address: http://kube-prometheus-stack-alertmanager.monitoring:9093/api/v2/alerts/ 10 | --- 11 | apiVersion: notification.toolkit.fluxcd.io/v1beta1 12 | kind: Alert 13 | metadata: 14 | name: alert-manager 15 | namespace: flux-system 16 | spec: 17 | providerRef: 18 | name: alert-manager 19 | eventSeverity: error 20 | eventSources: 21 | - kind: GitRepository 22 | name: "*" 23 | - kind: HelmRelease 24 | name: "*" 25 | - kind: HelmRepository 26 | name: "*" 27 | - kind: Kustomization 28 | name: "*" 29 | exclusionList: 30 | - "error.*lookup github\\.com" 31 | - "waiting.*socket" 32 | - "dial.*tcp.*timeout" 33 | suspend: false 34 | -------------------------------------------------------------------------------- /cluster/apps/flux-system/notifications/github/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - notification.yaml 6 | - secret.sops.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/flux-system/notifications/github/notification.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: notification.toolkit.fluxcd.io/v1beta1 3 | kind: Provider 4 | metadata: 5 | name: github 6 | namespace: flux-system 7 | spec: 8 | type: github 9 | address: https://github.com/onedr0p/home-ops 10 | secretRef: 11 | name: github-token 12 | --- 13 | apiVersion: notification.toolkit.fluxcd.io/v1beta1 14 | kind: Alert 15 | metadata: 16 | name: github 17 | namespace: flux-system 18 | spec: 19 | providerRef: 20 | name: github 21 | eventSeverity: info 22 | eventSources: 23 | - kind: Kustomization 24 | name: "*" 25 | - kind: HelmRelease 26 | name: "*" 27 | -------------------------------------------------------------------------------- /cluster/apps/flux-system/notifications/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - alert-manager 6 | - github 7 | -------------------------------------------------------------------------------- /cluster/apps/flux-system/webhook/github/ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.k8s.io/v1 3 | kind: Ingress 4 | metadata: 5 | name: webhook-receiver 6 | namespace: flux-system 7 | annotations: 8 | external-dns/is-public: "true" 9 | external-dns.alpha.kubernetes.io/target: "ipv4.${SECRET_PUBLIC_DOMAIN}" 10 | spec: 11 | ingressClassName: "nginx" 12 | rules: 13 | - host: "flux-webhook.${SECRET_PUBLIC_DOMAIN}" 14 | http: 15 | paths: 16 | - path: /hook/ 17 | pathType: Prefix 18 | backend: 19 | service: 20 | name: webhook-receiver 21 | port: 22 | number: 80 23 | tls: 24 | - hosts: 25 | - "flux-webhook.${SECRET_PUBLIC_DOMAIN}" 26 | -------------------------------------------------------------------------------- /cluster/apps/flux-system/webhook/github/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - secret.sops.yaml 6 | - ingress.yaml 7 | - receiver.yaml 8 | -------------------------------------------------------------------------------- /cluster/apps/flux-system/webhook/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - github 6 | -------------------------------------------------------------------------------- /cluster/apps/home/emqx/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - secret.sops.yaml 6 | - helm-release.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/home/frigate/config-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: frigate-config-v2 6 | namespace: home 7 | spec: 8 | accessModes: 9 | - ReadWriteOnce 10 | resources: 11 | requests: 12 | storage: 5Gi 13 | storageClassName: ceph-block 14 | -------------------------------------------------------------------------------- /cluster/apps/home/frigate/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - config-pvc.yaml 6 | - helm-release.yaml 7 | - secret.sops.yaml 8 | namespace: home 9 | configMapGenerator: 10 | - name: frigate 11 | files: 12 | - config.yaml 13 | generatorOptions: 14 | disableNameSuffixHash: true 15 | annotations: 16 | kustomize.toolkit.fluxcd.io/substitute: disabled 17 | -------------------------------------------------------------------------------- /cluster/apps/home/home-assistant/config-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: home-assistant-config-v1 6 | namespace: home 7 | labels: 8 | kasten.io/backup-volume: "enabled" 9 | spec: 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 5Gi 15 | storageClassName: ceph-block 16 | -------------------------------------------------------------------------------- /cluster/apps/home/home-assistant/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - config-pvc.yaml 6 | - code-server-ssh-key.sops.yaml 7 | - home-assistant.sops.yaml 8 | - helm-release.yaml 9 | -------------------------------------------------------------------------------- /cluster/apps/home/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - emqx 6 | - frigate 7 | - home-assistant 8 | # - mosquitto 9 | - node-red 10 | - zigbee2mqtt 11 | - zwavejs2mqtt 12 | -------------------------------------------------------------------------------- /cluster/apps/home/mosquitto/config-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: mosquitto-config-v1 6 | namespace: home 7 | spec: 8 | accessModes: 9 | - ReadWriteOnce 10 | resources: 11 | requests: 12 | storage: 100Mi 13 | storageClassName: ceph-block 14 | -------------------------------------------------------------------------------- /cluster/apps/home/mosquitto/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 3 | kind: HelmRelease 4 | metadata: 5 | name: mosquitto 6 | namespace: home 7 | spec: 8 | interval: 5m 9 | chart: 10 | spec: 11 | chart: mosquitto 12 | version: 4.3.2 13 | sourceRef: 14 | kind: HelmRepository 15 | name: k8s-at-home-charts 16 | namespace: flux-system 17 | interval: 5m 18 | install: 19 | createNamespace: true 20 | values: 21 | image: 22 | repository: ghcr.io/k8s-at-home/eclipse-mosquitto 23 | tag: v2.0.14 24 | env: 25 | TZ: "${TIMEZONE}" 26 | service: 27 | main: 28 | type: LoadBalancer 29 | externalIPs: 30 | - "${SVC_MOSQUITTO_ADDR}" 31 | externalTrafficPolicy: Local 32 | auth: 33 | enabled: true 34 | podAnnotations: 35 | secret.reloader.stakater.com/reload: mosquitto 36 | persistence: 37 | data: 38 | enabled: true 39 | existingClaim: mosquitto-config-v1 40 | configinc: 41 | enabled: true 42 | type: custom 43 | volumeSpec: 44 | secret: 45 | secretName: mosquitto 46 | resources: 47 | requests: 48 | cpu: 100m 49 | memory: 250Mi 50 | limits: 51 | memory: 500Mi 52 | -------------------------------------------------------------------------------- /cluster/apps/home/mosquitto/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - config-pvc.yaml 6 | - secret.sops.yaml 7 | - helm-release.yaml 8 | -------------------------------------------------------------------------------- /cluster/apps/home/node-red/config-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: node-red-config-v1 6 | namespace: home 7 | labels: 8 | kasten.io/backup-volume: "enabled" 9 | spec: 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 1Gi 15 | storageClassName: ceph-block 16 | -------------------------------------------------------------------------------- /cluster/apps/home/node-red/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - config-pvc.yaml 6 | - helm-release.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/home/zigbee2mqtt/config-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: zigbee2mqtt-config-v2 6 | namespace: home 7 | labels: 8 | kasten.io/backup-volume: "enabled" 9 | spec: 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 1Gi 15 | storageClassName: ceph-block 16 | -------------------------------------------------------------------------------- /cluster/apps/home/zigbee2mqtt/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - config-pvc.yaml 6 | - helm-release.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/home/zwavejs2mqtt/config-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: zwavejs2mqtt-config-v1 6 | namespace: home 7 | labels: 8 | kasten.io/backup-volume: "enabled" 9 | spec: 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 1Gi 15 | storageClassName: ceph-block 16 | -------------------------------------------------------------------------------- /cluster/apps/home/zwavejs2mqtt/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - config-pvc.yaml 6 | - helm-release.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/kasten-io/k10/blueprints/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - secret.sops.yaml 6 | - pod-spec-override.yaml 7 | - home.yaml 8 | - media.yaml 9 | - k10-disaster-recovery.yaml 10 | -------------------------------------------------------------------------------- /cluster/apps/kasten-io/k10/blueprints/pod-spec-override.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: pod-spec-override 6 | namespace: kasten-io 7 | data: 8 | override: | 9 | kind: Pod 10 | spec: 11 | nodeSelector: 12 | node-role.kubernetes.io/worker: "true" 13 | -------------------------------------------------------------------------------- /cluster/apps/kasten-io/k10/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 3 | kind: HelmRelease 4 | metadata: 5 | name: k10 6 | namespace: kasten-io 7 | spec: 8 | releaseName: k10 9 | interval: 5m 10 | chart: 11 | spec: 12 | chart: k10 13 | version: 4.5.15 14 | sourceRef: 15 | kind: HelmRepository 16 | name: kasten-charts 17 | namespace: flux-system 18 | interval: 5m 19 | install: 20 | createNamespace: true 21 | crds: CreateReplace 22 | remediation: 23 | retries: 5 24 | upgrade: 25 | crds: CreateReplace 26 | remediation: 27 | retries: 5 28 | values: 29 | eula: 30 | accept: true 31 | company: onedr0p 32 | email: onedr0p@@users.noreply.github.com 33 | global: 34 | persistence: 35 | storageClass: ceph-block 36 | auth: 37 | tokenAuth: 38 | enabled: true 39 | clusterName: k8s 40 | ingress: 41 | create: true 42 | class: nginx 43 | host: &host "k10.${SECRET_PUBLIC_DOMAIN}" 44 | urlPath: k10 45 | hosts: 46 | - *host 47 | tls: 48 | enabled: true 49 | secretName: "${SECRET_PUBLIC_DOMAIN/./-}-tls" 50 | grafana: 51 | enabled: false 52 | -------------------------------------------------------------------------------- /cluster/apps/kasten-io/k10/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | - monitoring 7 | - blueprints 8 | - profiles 9 | -------------------------------------------------------------------------------- /cluster/apps/kasten-io/k10/monitoring/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - prometheus-rule.yaml 6 | - service-monitor.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/kasten-io/k10/monitoring/prometheus-rule.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: PrometheusRule 4 | metadata: 5 | labels: 6 | prometheus: k8s 7 | role: alert-rules 8 | name: kasten-io 9 | namespace: kasten-io 10 | spec: 11 | groups: 12 | - name: kasten.rules 13 | rules: 14 | - alert: JobsFailing 15 | annotations: 16 | summary: More than 1 failed K10 jobs occurred for the {{ $labels.policy }} policy in the last 10 minutes 17 | expr: increase(catalog_actions_count{status="failed"}[10m]) > 0 18 | for: 15m 19 | labels: 20 | severity: critical 21 | -------------------------------------------------------------------------------- /cluster/apps/kasten-io/k10/monitoring/service-monitor.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: ServiceMonitor 4 | metadata: 5 | name: k10 6 | namespace: kasten-io 7 | spec: 8 | namespaceSelector: 9 | matchNames: 10 | - kasten-io 11 | selector: 12 | matchLabels: 13 | app: prometheus 14 | endpoints: 15 | - port: http 16 | scheme: http 17 | path: /k10/prometheus/federate 18 | honorLabels: true 19 | interval: 1m 20 | params: 21 | "match[]": 22 | - '{__name__=~"jobs.*"}' 23 | - '{__name__=~"catalog.*"}' 24 | -------------------------------------------------------------------------------- /cluster/apps/kasten-io/k10/profiles/home.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Profile 3 | apiVersion: config.kio.kasten.io/v1alpha1 4 | metadata: 5 | name: home 6 | namespace: kasten-io 7 | spec: 8 | locationSpec: 9 | type: FileStore 10 | fileStore: 11 | claimName: k10-backups 12 | path: home 13 | credential: 14 | secretType: "" 15 | secret: 16 | apiVersion: "" 17 | kind: "" 18 | name: "" 19 | namespace: "" 20 | type: Location 21 | -------------------------------------------------------------------------------- /cluster/apps/kasten-io/k10/profiles/k10-backups-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolume 4 | metadata: 5 | name: k10-backups 6 | spec: 7 | storageClassName: k10-backup-nfs 8 | capacity: 9 | storage: 1Mi 10 | accessModes: 11 | - ReadWriteMany 12 | persistentVolumeReclaimPolicy: Retain 13 | nfs: 14 | server: "expanse.${SECRET_PRIVATE_DOMAIN}" 15 | path: /tycho/Apps/External/Kasten 16 | --- 17 | apiVersion: v1 18 | kind: PersistentVolumeClaim 19 | metadata: 20 | name: k10-backups 21 | namespace: kasten-io 22 | spec: 23 | accessModes: 24 | - ReadWriteMany 25 | storageClassName: k10-backup-nfs 26 | resources: 27 | requests: 28 | storage: 1Mi 29 | -------------------------------------------------------------------------------- /cluster/apps/kasten-io/k10/profiles/k10-disaster-recovery.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Profile 3 | apiVersion: config.kio.kasten.io/v1alpha1 4 | metadata: 5 | name: k10-disaster-recovery 6 | namespace: kasten-io 7 | spec: 8 | locationSpec: 9 | type: FileStore 10 | fileStore: 11 | claimName: k10-backups 12 | path: k10-disaster-recovery 13 | credential: 14 | secretType: "" 15 | secret: 16 | apiVersion: "" 17 | kind: "" 18 | name: "" 19 | namespace: "" 20 | type: Location 21 | -------------------------------------------------------------------------------- /cluster/apps/kasten-io/k10/profiles/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - k10-backups-pvc.yaml 6 | - home.yaml 7 | - media.yaml 8 | - k10-disaster-recovery.yaml 9 | -------------------------------------------------------------------------------- /cluster/apps/kasten-io/k10/profiles/media.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Profile 3 | apiVersion: config.kio.kasten.io/v1alpha1 4 | metadata: 5 | name: media 6 | namespace: kasten-io 7 | spec: 8 | locationSpec: 9 | type: FileStore 10 | fileStore: 11 | claimName: k10-backups 12 | path: media 13 | credential: 14 | secretType: "" 15 | secret: 16 | apiVersion: "" 17 | kind: "" 18 | name: "" 19 | namespace: "" 20 | type: Location 21 | -------------------------------------------------------------------------------- /cluster/apps/kasten-io/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - k10 6 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/descheduler/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/external-snapshotter/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - github.com/kubernetes-csi/external-snapshotter//deploy/kubernetes/snapshot-controller?ref=v5.0.1 6 | images: 7 | - name: gcr.io/k8s-staging-sig-storage/snapshot-controller 8 | newTag: v5.0.1 9 | patchesStrategicMerge: 10 | - |- 11 | kind: Deployment 12 | apiVersion: apps/v1 13 | metadata: 14 | name: snapshot-controller 15 | namespace: kube-system 16 | spec: 17 | replicas: 3 18 | template: 19 | spec: 20 | containers: 21 | - name: snapshot-controller 22 | topologySpreadConstraints: 23 | - maxSkew: 1 24 | topologyKey: kubernetes.io/hostname 25 | whenUnsatisfiable: DoNotSchedule 26 | labelSelector: 27 | matchLabels: 28 | app: snapshot-controller 29 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/home-dns/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - rbac.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/home-dns/rbac.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: home-dns 6 | namespace: kube-system 7 | --- 8 | apiVersion: rbac.authorization.k8s.io/v1 9 | kind: ClusterRole 10 | metadata: 11 | name: home-dns 12 | rules: 13 | - apiGroups: 14 | - "" 15 | resources: 16 | - services 17 | - namespaces 18 | verbs: 19 | - list 20 | - watch 21 | - apiGroups: 22 | - extensions 23 | - networking.k8s.io 24 | resources: 25 | - ingresses 26 | verbs: 27 | - list 28 | - watch 29 | --- 30 | apiVersion: rbac.authorization.k8s.io/v1 31 | kind: ClusterRoleBinding 32 | metadata: 33 | name: home-dns 34 | roleRef: 35 | apiGroup: rbac.authorization.k8s.io 36 | kind: ClusterRole 37 | name: home-dns 38 | subjects: 39 | - kind: ServiceAccount 40 | name: home-dns 41 | namespace: kube-system 42 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/intel-gpu-plugin/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - descheduler 6 | - external-snapshotter 7 | - home-dns 8 | - intel-gpu-plugin 9 | - metrics-server 10 | - nfs-subdir-external-provisioner 11 | - node-feature-discovery 12 | - reflector 13 | - reloader 14 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/metrics-server/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 3 | kind: HelmRelease 4 | metadata: 5 | name: metrics-server 6 | namespace: kube-system 7 | spec: 8 | interval: 5m 9 | chart: 10 | spec: 11 | chart: metrics-server 12 | version: 3.8.2 13 | sourceRef: 14 | kind: HelmRepository 15 | name: metrics-server-charts 16 | namespace: flux-system 17 | interval: 5m 18 | install: 19 | createNamespace: true 20 | values: 21 | args: 22 | - --kubelet-insecure-tls 23 | - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname 24 | - --kubelet-use-node-status-port 25 | - --metric-resolution=15s 26 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/metrics-server/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/nfs-subdir-external-provisioner/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 3 | kind: HelmRelease 4 | metadata: 5 | name: nfs-subdir-external-provisioner 6 | namespace: kube-system 7 | spec: 8 | interval: 5m 9 | chart: 10 | spec: 11 | chart: nfs-subdir-external-provisioner 12 | version: 4.0.16 13 | sourceRef: 14 | kind: HelmRepository 15 | name: nfs-subdir-external-provisioner-charts 16 | namespace: flux-system 17 | interval: 5m 18 | install: 19 | createNamespace: true 20 | values: 21 | replicaCount: 3 22 | nfs: 23 | server: "expanse.${SECRET_PRIVATE_DOMAIN}" 24 | path: /tycho/Apps/External/PVCs 25 | mountOptions: 26 | - noatime 27 | storageClass: 28 | defaultClass: false 29 | affinity: 30 | podAntiAffinity: 31 | preferredDuringSchedulingIgnoredDuringExecution: 32 | - weight: 100 33 | podAffinityTerm: 34 | labelSelector: 35 | matchExpressions: 36 | - key: app 37 | operator: In 38 | values: 39 | - nfs-subdir-external-provisioner 40 | topologyKey: kubernetes.io/hostname 41 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/nfs-subdir-external-provisioner/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/node-feature-discovery/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/reflector/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 3 | kind: HelmRelease 4 | metadata: 5 | name: reflector 6 | namespace: kube-system 7 | spec: 8 | interval: 5m 9 | chart: 10 | spec: 11 | chart: reflector 12 | version: 6.1.47 13 | sourceRef: 14 | kind: HelmRepository 15 | name: emberstack-charts 16 | namespace: flux-system 17 | interval: 5m 18 | install: 19 | createNamespace: true 20 | values: 21 | configuration: 22 | logging: 23 | minimumLevel: Debug 24 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/reflector/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/reloader/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 3 | kind: HelmRelease 4 | metadata: 5 | name: reloader 6 | namespace: kube-system 7 | spec: 8 | interval: 5m 9 | chart: 10 | spec: 11 | chart: reloader 12 | version: v0.0.110 13 | sourceRef: 14 | kind: HelmRepository 15 | name: stakater-charts 16 | namespace: flux-system 17 | interval: 5m 18 | install: 19 | createNamespace: true 20 | values: 21 | fullnameOverride: reloader 22 | reloader: 23 | podMonitor: 24 | enabled: true 25 | namespace: kube-system 26 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/reloader/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - cert-manager 6 | - default 7 | - flux-system 8 | - home 9 | - kasten-io 10 | - kube-system 11 | - media 12 | - monitoring 13 | - networking 14 | - rook-ceph 15 | - system-upgrade 16 | -------------------------------------------------------------------------------- /cluster/apps/media/jellyfin/config-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: jellyfin-config-v1 6 | namespace: media 7 | labels: 8 | kasten.io/backup-volume: "enabled" 9 | spec: 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 30Gi 15 | storageClassName: ceph-block 16 | -------------------------------------------------------------------------------- /cluster/apps/media/jellyfin/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - config-pvc.yaml 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/media/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - jellyfin 6 | - lidarr 7 | - media-browser 8 | - overseerr 9 | - plex 10 | - prowlarr 11 | - qbittorrent 12 | - radarr 13 | - radarr-uhd 14 | - readarr 15 | - sabnzbd 16 | - sonarr 17 | - sonarr-uhd 18 | - tautulli 19 | - trash-updater 20 | - unpackerr 21 | -------------------------------------------------------------------------------- /cluster/apps/media/lidarr/config-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: lidarr-config-v1 6 | namespace: media 7 | labels: 8 | kasten.io/backup-volume: "enabled" 9 | spec: 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 10Gi 15 | storageClassName: ceph-block 16 | -------------------------------------------------------------------------------- /cluster/apps/media/lidarr/dashboard/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | namespace: media 5 | configMapGenerator: 6 | - name: lidarr-dashboard 7 | files: 8 | - lidarr-dashboard.json=dashboard.json 9 | generatorOptions: 10 | disableNameSuffixHash: true 11 | annotations: 12 | kustomize.toolkit.fluxcd.io/substitute: disabled 13 | labels: 14 | grafana_dashboard: "true" 15 | -------------------------------------------------------------------------------- /cluster/apps/media/lidarr/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - dashboard 6 | - config-pvc.yaml 7 | - helm-release.yaml 8 | -------------------------------------------------------------------------------- /cluster/apps/media/media-browser/config-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: media-browser-config-v1 6 | namespace: media 7 | spec: 8 | accessModes: 9 | - ReadWriteOnce 10 | resources: 11 | requests: 12 | storage: 1Gi 13 | storageClassName: ceph-block 14 | -------------------------------------------------------------------------------- /cluster/apps/media/media-browser/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - config-pvc.yaml 6 | - helm-release.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/media/overseerr/config-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: overseerr-config-v1 6 | namespace: media 7 | labels: 8 | kasten.io/backup-volume: "enabled" 9 | spec: 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 1Gi 15 | storageClassName: ceph-block 16 | -------------------------------------------------------------------------------- /cluster/apps/media/overseerr/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - config-pvc.yaml 6 | - helm-release.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/media/plex/config-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: plex-config-v1 6 | namespace: media 7 | labels: 8 | kasten.io/backup-volume: "enabled" 9 | spec: 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 100Gi 15 | storageClassName: ceph-block 16 | -------------------------------------------------------------------------------- /cluster/apps/media/plex/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - config-pvc.yaml 6 | - helm-release.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/media/prowlarr/config-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: prowlarr-config-v1 6 | namespace: media 7 | labels: 8 | kasten.io/backup-volume: "enabled" 9 | spec: 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 2Gi 15 | storageClassName: ceph-block 16 | -------------------------------------------------------------------------------- /cluster/apps/media/prowlarr/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - config-pvc.yaml 6 | - helm-release.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/media/qbittorrent/config-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: qbittorrent-config-v1 6 | namespace: media 7 | labels: 8 | kasten.io/backup-volume: "enabled" 9 | spec: 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 1Gi 15 | storageClassName: ceph-block 16 | -------------------------------------------------------------------------------- /cluster/apps/media/qbittorrent/dashboard/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | namespace: media 5 | configMapGenerator: 6 | - name: qbittorrent-dashboard 7 | files: 8 | - qbittorrent-dashboard.json=dashboard.json 9 | generatorOptions: 10 | disableNameSuffixHash: true 11 | annotations: 12 | kustomize.toolkit.fluxcd.io/substitute: disabled 13 | labels: 14 | grafana_dashboard: "true" 15 | -------------------------------------------------------------------------------- /cluster/apps/media/qbittorrent/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - dashboard 6 | - config-pvc.yaml 7 | - helm-release.yaml 8 | - tag-tracker-errors 9 | -------------------------------------------------------------------------------- /cluster/apps/media/qbittorrent/tag-tracker-errors/cron-job.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: CronJob 4 | metadata: 5 | name: tag-tracker-errors 6 | namespace: media 7 | spec: 8 | schedule: "0 * * * *" 9 | concurrencyPolicy: "Forbid" 10 | successfulJobsHistoryLimit: 3 11 | failedJobsHistoryLimit: 5 12 | jobTemplate: 13 | spec: 14 | backoffLimit: 3 15 | ttlSecondsAfterFinished: 300 16 | template: 17 | spec: 18 | automountServiceAccountToken: false 19 | restartPolicy: Never 20 | containers: 21 | - name: qbittorrent-scripts 22 | image: ghcr.io/k8s-at-home/qbittorrent-scripts:v1.0.2 23 | env: 24 | - name: QB_HOST 25 | value: http://qbittorrent:8080 26 | - name: QB_TAG 27 | value: "Tracker Errors" 28 | args: 29 | - "/app/tag-tracker-errors.py" 30 | -------------------------------------------------------------------------------- /cluster/apps/media/qbittorrent/tag-tracker-errors/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - cron-job.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/media/radarr-uhd/config-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: radarr-uhd-config-v1 6 | namespace: media 7 | labels: 8 | kasten.io/backup-volume: "enabled" 9 | spec: 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 5Gi 15 | storageClassName: ceph-block 16 | -------------------------------------------------------------------------------- /cluster/apps/media/radarr-uhd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - config-pvc.yaml 6 | - helm-release.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/media/radarr/config-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: radarr-config-v1 6 | namespace: media 7 | labels: 8 | kasten.io/backup-volume: "enabled" 9 | spec: 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 15Gi 15 | storageClassName: ceph-block 16 | -------------------------------------------------------------------------------- /cluster/apps/media/radarr/dashboard/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | namespace: media 5 | configMapGenerator: 6 | - name: radarr-dashboard 7 | files: 8 | - radarr-dashboard.json=dashboard.json 9 | generatorOptions: 10 | disableNameSuffixHash: true 11 | annotations: 12 | kustomize.toolkit.fluxcd.io/substitute: disabled 13 | labels: 14 | grafana_dashboard: "true" 15 | -------------------------------------------------------------------------------- /cluster/apps/media/radarr/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - dashboard 6 | - config-pvc.yaml 7 | - helm-release.yaml 8 | -------------------------------------------------------------------------------- /cluster/apps/media/readarr/config-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: readarr-config-v1 6 | namespace: media 7 | labels: 8 | kasten.io/backup-volume: "enabled" 9 | spec: 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 1Gi 15 | storageClassName: ceph-block 16 | -------------------------------------------------------------------------------- /cluster/apps/media/readarr/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - config-pvc.yaml 6 | - helm-release.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/media/sabnzbd/config-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: sabnzbd-config-v1 6 | namespace: media 7 | labels: 8 | kasten.io/backup-volume: "enabled" 9 | spec: 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 1Gi 15 | storageClassName: ceph-block 16 | -------------------------------------------------------------------------------- /cluster/apps/media/sabnzbd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - config-pvc.yaml 6 | - helm-release.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/media/sonarr-uhd/config-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: sonarr-uhd-config-v1 6 | namespace: media 7 | labels: 8 | kasten.io/backup-volume: "enabled" 9 | spec: 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 5Gi 15 | storageClassName: ceph-block 16 | -------------------------------------------------------------------------------- /cluster/apps/media/sonarr-uhd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - config-pvc.yaml 6 | - helm-release.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/media/sonarr/config-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: sonarr-config-v1 6 | namespace: media 7 | labels: 8 | kasten.io/backup-volume: "enabled" 9 | spec: 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 10Gi 15 | storageClassName: ceph-block 16 | -------------------------------------------------------------------------------- /cluster/apps/media/sonarr/dashboard/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | namespace: media 5 | configMapGenerator: 6 | - name: sonarr-dashboard 7 | files: 8 | - sonarr-dashboard.json=dashboard.json 9 | generatorOptions: 10 | disableNameSuffixHash: true 11 | annotations: 12 | kustomize.toolkit.fluxcd.io/substitute: disabled 13 | labels: 14 | grafana_dashboard: "true" 15 | -------------------------------------------------------------------------------- /cluster/apps/media/sonarr/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - dashboard 6 | - config-pvc.yaml 7 | - helm-release.yaml 8 | -------------------------------------------------------------------------------- /cluster/apps/media/tautulli/config-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: tautulli-config-v1 6 | namespace: media 7 | labels: 8 | kasten.io/backup-volume: "enabled" 9 | spec: 10 | accessModes: 11 | - ReadWriteOnce 12 | resources: 13 | requests: 14 | storage: 10Gi 15 | storageClassName: ceph-block 16 | -------------------------------------------------------------------------------- /cluster/apps/media/tautulli/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - config-pvc.yaml 6 | - helm-release.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/media/trash-updater/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - secret.sops.yaml 6 | - cron-job.yaml 7 | namespace: media 8 | configMapGenerator: 9 | - name: trash-updater-config 10 | files: 11 | - trash.yaml 12 | generatorOptions: 13 | disableNameSuffixHash: true 14 | annotations: 15 | kustomize.toolkit.fluxcd.io/substitute: disabled 16 | -------------------------------------------------------------------------------- /cluster/apps/media/unpackerr/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - secret.sops.yaml 6 | - helm-release.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/blackbox-exporter/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/generic-rules/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - zfs 6 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/generic-rules/zfs/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - prometheus-rule.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/generic-rules/zfs/prometheus-rule.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: PrometheusRule 4 | metadata: 5 | labels: 6 | prometheus: k8s 7 | role: alert-rules 8 | name: zfs-rules 9 | namespace: monitoring 10 | spec: 11 | groups: 12 | - name: zfs.rules 13 | rules: 14 | - alert: ZFSPoolNotHealthy 15 | annotations: 16 | summary: ZFS Pool {{$labels.zpool}} on {{$labels.instance}} 17 | is currently {{$labels.state}} 18 | expr: | 19 | node_zfs_zpool_state{state="online"} != 1 20 | for: 15m 21 | labels: 22 | severity: critical 23 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/goldilocks/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 3 | kind: HelmRelease 4 | metadata: 5 | name: goldilocks 6 | namespace: monitoring 7 | spec: 8 | interval: 5m 9 | chart: 10 | spec: 11 | chart: goldilocks 12 | version: 6.1.2 13 | sourceRef: 14 | kind: HelmRepository 15 | name: fairwinds-charts 16 | namespace: flux-system 17 | interval: 5m 18 | install: 19 | createNamespace: true 20 | remediation: 21 | retries: 5 22 | upgrade: 23 | remediation: 24 | retries: 5 25 | values: 26 | controller: 27 | resources: 28 | requests: 29 | cpu: 100m 30 | memory: 64M 31 | limits: 32 | memory: 250M 33 | dashboard: 34 | replicaCount: 1 35 | ingress: 36 | enabled: true 37 | ingressClassName: "nginx" 38 | hosts: 39 | - host: &host "goldilocks.${SECRET_PUBLIC_DOMAIN}" 40 | paths: 41 | - path: / 42 | type: Prefix 43 | tls: 44 | - hosts: 45 | - *host 46 | resources: 47 | requests: 48 | cpu: 100m 49 | memory: 64Mi 50 | limits: 51 | memory: 100M 52 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/goldilocks/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/grafana/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - secret.sops.yaml 6 | - helm-release.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/kube-prometheus-stack/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - blackbox-exporter 6 | - generic-rules 7 | - goldilocks 8 | - grafana 9 | - kube-prometheus-stack 10 | - loki 11 | - node-problem-detector 12 | - snmp-exporter 13 | - thanos 14 | - unifi-poller 15 | - uptimerobot-heartbeat 16 | - vector 17 | - vpa 18 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/loki/ceph-buckets/chunks.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: objectbucket.io/v1alpha1 3 | kind: ObjectBucketClaim 4 | metadata: 5 | name: loki-chunks-bucket 6 | namespace: monitoring 7 | spec: 8 | bucketName: loki-chunks 9 | storageClassName: ceph-bucket 10 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/loki/ceph-buckets/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - chunks.yaml 6 | - ruler.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/loki/ceph-buckets/ruler.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: objectbucket.io/v1alpha1 3 | kind: ObjectBucketClaim 4 | metadata: 5 | name: loki-ruler-bucket 6 | namespace: monitoring 7 | spec: 8 | bucketName: loki-ruler 9 | storageClassName: ceph-bucket 10 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/loki/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - ceph-buckets 6 | - config-map.yaml 7 | - helm-release.yaml 8 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/node-problem-detector/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 3 | kind: HelmRelease 4 | metadata: 5 | name: node-problem-detector 6 | namespace: monitoring 7 | spec: 8 | interval: 5m 9 | chart: 10 | spec: 11 | chart: node-problem-detector 12 | version: 2.2.1 13 | sourceRef: 14 | kind: HelmRepository 15 | name: deliveryhero-charts 16 | namespace: flux-system 17 | interval: 5m 18 | install: 19 | createNamespace: true 20 | values: 21 | metrics: 22 | enabled: true 23 | serviceMonitor: 24 | enabled: true 25 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/node-problem-detector/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/snmp-exporter/apc-ups/dashboard/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | namespace: monitoring 5 | configMapGenerator: 6 | - name: apc-ups-dashboard 7 | files: 8 | - apc-ups-dashboard.json=dashboard.json 9 | generatorOptions: 10 | disableNameSuffixHash: true 11 | annotations: 12 | kustomize.toolkit.fluxcd.io/substitute: disabled 13 | labels: 14 | grafana_dashboard: "true" 15 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/snmp-exporter/apc-ups/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 3 | kind: HelmRelease 4 | metadata: 5 | name: snmp-exporter-apc-ups 6 | namespace: monitoring 7 | spec: 8 | interval: 5m 9 | chart: 10 | spec: 11 | chart: prometheus-snmp-exporter 12 | version: 1.1.0 13 | sourceRef: 14 | kind: HelmRepository 15 | name: prometheus-community-charts 16 | namespace: flux-system 17 | interval: 5m 18 | install: 19 | createNamespace: true 20 | values: 21 | fullnameOverride: snmp-exporter-apc-ups 22 | image: 23 | repository: quay.io/prometheus/snmp-exporter 24 | extraArgs: 25 | - "--config.file=/config/snmp.yaml" 26 | extraConfigmapMounts: 27 | - name: snmp-exporter-apc-ups 28 | mountPath: /config/snmp.yaml 29 | subPath: snmp.yaml 30 | configMap: snmp-exporter-apc-ups 31 | readOnly: true 32 | defaultMode: 420 33 | serviceMonitor: 34 | enabled: true 35 | namespace: monitoring 36 | params: 37 | - name: apc-ups-0 38 | module: 39 | - apcups 40 | target: apc-ups-0 41 | path: /snmp 42 | scrapeTimeout: 10s 43 | relabelings: 44 | - sourceLabels: [__param_target] 45 | targetLabel: instance 46 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/snmp-exporter/apc-ups/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - config-map.yaml 6 | - helm-release.yaml 7 | - prometheus-rule.yaml 8 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/snmp-exporter/apc-ups/prometheus-rule.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: PrometheusRule 4 | metadata: 5 | labels: 6 | prometheus: k8s 7 | role: alert-rules 8 | name: ups-rules 9 | namespace: monitoring 10 | spec: 11 | groups: 12 | - name: ups.rules 13 | rules: 14 | - alert: UPSOnBattery 15 | annotations: 16 | summary: ZPM {{$labels.instance}} is running on batteries 17 | and has less than 20 minutes of battery left 18 | expr: | 19 | ( 20 | upsAdvBatteryRunTimeRemaining/60/100 <= 20 21 | and 22 | upsBasicBatteryTimeOnBattery > 0 23 | ) 24 | for: 1m 25 | labels: 26 | severity: critical 27 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/snmp-exporter/cyberpower-pdu/dashboard/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | namespace: monitoring 5 | configMapGenerator: 6 | - name: cyberpower-pdu-dashboard 7 | files: 8 | - cyberpower-pdu-dashboard.json=dashboard.json 9 | generatorOptions: 10 | disableNameSuffixHash: true 11 | annotations: 12 | kustomize.toolkit.fluxcd.io/substitute: disabled 13 | labels: 14 | grafana_dashboard: "true" 15 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/snmp-exporter/cyberpower-pdu/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - dashboard 6 | - config-map.yaml 7 | - helm-release.yaml 8 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/snmp-exporter/dell-idrac/dashboard/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | namespace: monitoring 5 | configMapGenerator: 6 | - name: dell-idrac-dashboard 7 | files: 8 | - dell-idrac-dashboard.json=dashboard.json 9 | generatorOptions: 10 | disableNameSuffixHash: true 11 | annotations: 12 | kustomize.toolkit.fluxcd.io/substitute: disabled 13 | labels: 14 | grafana_dashboard: "true" 15 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/snmp-exporter/dell-idrac/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - dashboard 6 | - config-map.yaml 7 | - helm-release.yaml 8 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/snmp-exporter/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - apc-ups 6 | - cyberpower-pdu 7 | - dell-idrac 8 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/thanos/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - dashboard 6 | - object-bucket-claim.yaml 7 | - helm-release.yaml 8 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/thanos/object-bucket-claim.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: objectbucket.io/v1alpha1 3 | kind: ObjectBucketClaim 4 | metadata: 5 | name: thanos-ceph-bucket 6 | namespace: monitoring 7 | spec: 8 | bucketName: thanos 9 | storageClassName: ceph-bucket 10 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/unifi-poller/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 3 | kind: HelmRelease 4 | metadata: 5 | name: unifi-poller 6 | namespace: monitoring 7 | spec: 8 | interval: 5m 9 | chart: 10 | spec: 11 | chart: unifi-poller 12 | version: 10.3.3 13 | sourceRef: 14 | kind: HelmRepository 15 | name: k8s-at-home-charts 16 | namespace: flux-system 17 | interval: 5m 18 | install: 19 | createNamespace: true 20 | values: 21 | image: 22 | repository: ghcr.io/k8s-at-home/unpoller 23 | tag: 2.1.3 24 | env: 25 | TZ: "${TIMEZONE}" 26 | UP_UNIFI_DEFAULT_ROLE: "homelab-controller" 27 | UP_UNIFI_DEFAULT_URL: "https://unifi.${SECRET_PRIVATE_DOMAIN}" 28 | UP_UNIFI_DEFAULT_VERIFY_SSL: false 29 | UP_UNIFI_DEFAULT_USER: "unifipoller" 30 | UP_UNIFI_DEFAULT_PASS: "unifipoller" 31 | UP_UNIFI_DEFAULT_SAVE_SITES: true 32 | UP_UNIFI_DEFAULT_SAVE_DPI: false 33 | UP_INFLUXDB_DISABLE: true 34 | UP_PROMETHEUS_DISABLE: false 35 | UP_PROMETHEUS_NAMESPACE: "unifipoller" 36 | metrics: 37 | enabled: true 38 | serviceMonitor: 39 | interval: 2m 40 | scrapeTimeout: 30s 41 | prometheusRule: 42 | enabled: false 43 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/unifi-poller/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/uptimerobot-heartbeat/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - cron-job.yaml 6 | - secret.sops.yaml 7 | namespace: monitoring 8 | configMapGenerator: 9 | - name: uptimerobot-heartbeat 10 | files: 11 | - uptimerobot-heartbeat.sh 12 | generatorOptions: 13 | disableNameSuffixHash: true 14 | annotations: 15 | kustomize.toolkit.fluxcd.io/substitute: disabled 16 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/uptimerobot-heartbeat/uptimerobot-heartbeat.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [[ -z "${UPTIMEROBOT_HEARTBEAT_URL}" ]]; then 4 | printf "%s - Yikes - Missing UPTIMEROBOT_HEARTBEAT_URL environment variable" "$(date -u)" 5 | exit 0 6 | fi 7 | 8 | status_code=$(curl --connect-timeout 10 --max-time 30 -I -s -o /dev/null -w '%{http_code}' "${UPTIMEROBOT_HEARTBEAT_URL}") 9 | if [[ ! ${status_code} =~ ^[2|3][0-9]{2}$ ]]; then 10 | printf "%s - Yikes - Heartbeat request failed, http code: %s" "$(date -u)" "${status_code}" 11 | exit 0 12 | fi 13 | 14 | printf "%s - Success - Heartbeat request received and processed successfully" "$(date -u)" 15 | exit 0 16 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/vector/agent/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/vector/aggregator/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/vector/geoipupdate/config-pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolumeClaim 4 | metadata: 5 | name: vector-geoipupdate-config-v1 6 | namespace: monitoring 7 | labels: 8 | excluded_from_alerts: "true" 9 | spec: 10 | accessModes: 11 | - ReadWriteMany 12 | resources: 13 | requests: 14 | storage: 1Gi 15 | storageClassName: ceph-filesystem 16 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/vector/geoipupdate/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - cron-job.yaml 6 | - config-pvc.yaml 7 | - secret.sops.yaml 8 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/vector/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - geoipupdate 6 | - agent 7 | - aggregator 8 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/vpa/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 3 | kind: HelmRelease 4 | metadata: 5 | name: vpa 6 | namespace: monitoring 7 | spec: 8 | interval: 5m 9 | chart: 10 | spec: 11 | chart: vpa 12 | version: 1.3.1 13 | sourceRef: 14 | kind: HelmRepository 15 | name: fairwinds-charts 16 | namespace: flux-system 17 | interval: 5m 18 | install: 19 | createNamespace: true 20 | remediation: 21 | retries: 5 22 | upgrade: 23 | remediation: 24 | retries: 5 25 | values: 26 | recommender: 27 | enabled: true 28 | extraArgs: 29 | storage: prometheus 30 | prometheus-address: |- 31 | http://thanos-query.monitoring.svc.cluster.local:9090 32 | updater: 33 | enabled: false 34 | admissionController: 35 | enabled: false 36 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/vpa/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/networking/cloudflare-ddns/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - cron-job.yaml 6 | - secret.sops.yaml 7 | namespace: networking 8 | configMapGenerator: 9 | - name: cloudflare-ddns 10 | files: 11 | - cloudflare-ddns.sh 12 | generatorOptions: 13 | disableNameSuffixHash: true 14 | annotations: 15 | kustomize.toolkit.fluxcd.io/substitute: disabled 16 | -------------------------------------------------------------------------------- /cluster/apps/networking/echo-server/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/networking/external-dns/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 3 | kind: HelmRelease 4 | metadata: 5 | name: external-dns 6 | namespace: networking 7 | spec: 8 | interval: 5m 9 | chart: 10 | spec: 11 | chart: external-dns 12 | version: 1.9.0 13 | sourceRef: 14 | kind: HelmRepository 15 | name: external-dns-charts 16 | namespace: flux-system 17 | interval: 5m 18 | install: 19 | createNamespace: true 20 | values: 21 | interval: 5m 22 | logLevel: debug 23 | provider: cloudflare 24 | env: 25 | - name: CF_API_EMAIL 26 | valueFrom: 27 | secretKeyRef: 28 | name: cloudflare-creds 29 | key: email 30 | - name: CF_API_KEY 31 | valueFrom: 32 | secretKeyRef: 33 | name: cloudflare-creds 34 | key: api-key 35 | extraArgs: 36 | - --cloudflare-proxied 37 | - --annotation-filter=external-dns/is-public in (true) 38 | policy: sync 39 | sources: 40 | - ingress 41 | txtPrefix: "k8s." 42 | txtOwnerId: "default" 43 | domainFilters: 44 | - "${SECRET_PUBLIC_DOMAIN}" 45 | resources: 46 | requests: 47 | memory: 100Mi 48 | cpu: 25m 49 | limits: 50 | memory: 250Mi 51 | -------------------------------------------------------------------------------- /cluster/apps/networking/external-dns/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - secret.sops.yaml 6 | - helm-release.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/networking/ingress-nginx/cloudflare-proxied-networks.txt: -------------------------------------------------------------------------------- 1 | 173.245.48.0/20\,103.21.244.0/22\,103.22.200.0/22\,103.31.4.0/22\,141.101.64.0/18\,108.162.192.0/18\,190.93.240.0/20\,188.114.96.0/20\,197.234.240.0/22\,198.41.128.0/17\,162.158.0.0/15\,104.16.0.0/13\,104.24.0.0/14\,172.64.0.0/13\,131.0.72.0/22\,2400:cb00::/32\,2606:4700::/32\,2803:f800::/32\,2405:b500::/32\,2405:8100::/32\,2a06:98c0::/29\,2c0f:f248::/32 2 | -------------------------------------------------------------------------------- /cluster/apps/networking/ingress-nginx/dashboard/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | namespace: networking 5 | configMapGenerator: 6 | - name: nginx-dashboard 7 | files: 8 | - nginx-dashboard.json=https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/grafana/dashboards/nginx.json 9 | generatorOptions: 10 | disableNameSuffixHash: true 11 | annotations: 12 | kustomize.toolkit.fluxcd.io/substitute: disabled 13 | labels: 14 | grafana_dashboard: "true" 15 | -------------------------------------------------------------------------------- /cluster/apps/networking/ingress-nginx/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | namespace: networking 5 | resources: 6 | - dashboard 7 | - helm-release.yaml 8 | configMapGenerator: 9 | - name: cloudflare-proxied-networks 10 | files: 11 | - cloudflare-proxied-networks.txt 12 | generatorOptions: 13 | disableNameSuffixHash: true 14 | -------------------------------------------------------------------------------- /cluster/apps/networking/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - cloudflare-ddns 6 | - echo-server 7 | - external-dns 8 | - ingress-nginx 9 | - opnsense 10 | - wildcard-certificate 11 | -------------------------------------------------------------------------------- /cluster/apps/networking/opnsense/dashboard/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | namespace: networking 5 | configMapGenerator: 6 | - name: opnsense-dashboard 7 | files: 8 | - opnsense-dashboard.json=dashboard.json 9 | generatorOptions: 10 | disableNameSuffixHash: true 11 | annotations: 12 | kustomize.toolkit.fluxcd.io/substitute: disabled 13 | labels: 14 | grafana_dashboard: "true" 15 | -------------------------------------------------------------------------------- /cluster/apps/networking/opnsense/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - dashboard 6 | -------------------------------------------------------------------------------- /cluster/apps/networking/wildcard-certificate/certificate.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cert-manager.io/v1 3 | kind: Certificate 4 | metadata: 5 | name: "${SECRET_PUBLIC_DOMAIN/./-}" 6 | namespace: networking 7 | spec: 8 | secretTemplate: 9 | annotations: 10 | reflector.v1.k8s.emberstack.com/reflection-auto-enabled: "true" 11 | reflector.v1.k8s.emberstack.com/reflection-auto-namespaces: "kasten-io" 12 | secretName: "${SECRET_PUBLIC_DOMAIN/./-}-tls" 13 | issuerRef: 14 | name: letsencrypt-production 15 | kind: ClusterIssuer 16 | commonName: "${SECRET_PUBLIC_DOMAIN}" 17 | dnsNames: 18 | - "${SECRET_PUBLIC_DOMAIN}" 19 | - "*.${SECRET_PUBLIC_DOMAIN}" 20 | -------------------------------------------------------------------------------- /cluster/apps/networking/wildcard-certificate/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - certificate.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/rook-ceph/dashboard/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | namespace: rook-ceph 5 | # TODO(rook-ceph): Make sure ceph dashboards are using the latest grafana components 6 | # Some have problems displaying certian panels 7 | configMapGenerator: 8 | - name: ceph-cluster-dashboard 9 | files: 10 | - ceph-cluster-dashboard.json 11 | - name: ceph-osd-dashboard 12 | files: 13 | - ceph-osd-dashboard.json 14 | - name: ceph-pools-dashboard 15 | files: 16 | - ceph-pools-dashboard.json 17 | generatorOptions: 18 | disableNameSuffixHash: true 19 | annotations: 20 | kustomize.toolkit.fluxcd.io/substitute: disabled 21 | labels: 22 | grafana_dashboard: "true" 23 | -------------------------------------------------------------------------------- /cluster/apps/rook-ceph/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - dashboard 6 | - rook-direct-mount 7 | -------------------------------------------------------------------------------- /cluster/apps/rook-ceph/rook-direct-mount/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | namespace: rook-ceph 5 | resources: 6 | - deployment.yaml 7 | configMapGenerator: 8 | - name: direct-mount-backup-script 9 | files: 10 | - backup.sh 11 | generatorOptions: 12 | disableNameSuffixHash: true 13 | annotations: 14 | kustomize.toolkit.fluxcd.io/substitute: disabled 15 | -------------------------------------------------------------------------------- /cluster/apps/system-upgrade/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - system-upgrade-controller 6 | -------------------------------------------------------------------------------- /cluster/apps/system-upgrade/system-upgrade-controller/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - github.com/rancher/system-upgrade-controller?ref=v0.9.1 6 | - plans 7 | images: 8 | - name: rancher/system-upgrade-controller 9 | newTag: v0.9.1 10 | patchesStrategicMerge: 11 | # Delete namespace resource 12 | - ./system-upgrade-patches.yaml 13 | # Add labels 14 | - |- 15 | apiVersion: apps/v1 16 | kind: Deployment 17 | metadata: 18 | name: system-upgrade-controller 19 | namespace: system-upgrade 20 | labels: 21 | app.kubernetes.io/name: system-upgrade-controller 22 | app.kubernetes.io/instance: system-upgrade-controller 23 | -------------------------------------------------------------------------------- /cluster/apps/system-upgrade/system-upgrade-controller/plans/agent.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: upgrade.cattle.io/v1 3 | kind: Plan 4 | metadata: 5 | name: k3s-agent 6 | namespace: system-upgrade 7 | labels: 8 | k3s-upgrade: agent 9 | spec: 10 | serviceAccountName: system-upgrade 11 | concurrency: 1 12 | channel: https://update.k3s.io/v1-release/channels/v1.23 13 | nodeSelector: 14 | matchExpressions: 15 | - key: node-role.kubernetes.io/master 16 | operator: NotIn 17 | values: 18 | - "true" 19 | - key: k3s-upgrade 20 | operator: In 21 | values: 22 | - "true" 23 | prepare: 24 | image: rancher/k3s-upgrade 25 | args: 26 | - "prepare" 27 | - "k3s-server" 28 | upgrade: 29 | image: rancher/k3s-upgrade 30 | -------------------------------------------------------------------------------- /cluster/apps/system-upgrade/system-upgrade-controller/plans/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - server.yaml 6 | - agent.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/system-upgrade/system-upgrade-controller/plans/server.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: upgrade.cattle.io/v1 3 | kind: Plan 4 | metadata: 5 | name: k3s-server 6 | namespace: system-upgrade 7 | spec: 8 | serviceAccountName: system-upgrade 9 | concurrency: 1 10 | channel: https://update.k3s.io/v1-release/channels/v1.23 11 | cordon: true 12 | nodeSelector: 13 | matchExpressions: 14 | - key: node-role.kubernetes.io/master 15 | operator: In 16 | values: 17 | - "true" 18 | - key: k3s-upgrade 19 | operator: In 20 | values: 21 | - "true" 22 | tolerations: 23 | - key: "node-role.kubernetes.io/master" 24 | operator: "Exists" 25 | upgrade: 26 | image: rancher/k3s-upgrade 27 | -------------------------------------------------------------------------------- /cluster/apps/system-upgrade/system-upgrade-controller/system-upgrade-patches.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Namespace should already exist 3 | # Delete the system-upgrade namespace 4 | # from the kustomization 5 | $patch: delete 6 | apiVersion: v1 7 | kind: Namespace 8 | metadata: 9 | name: system-upgrade 10 | -------------------------------------------------------------------------------- /cluster/base/apps.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 3 | kind: Kustomization 4 | metadata: 5 | name: apps 6 | namespace: flux-system 7 | spec: 8 | interval: 10m0s 9 | dependsOn: 10 | - name: charts 11 | - name: config 12 | - name: core 13 | - name: crds 14 | path: ./cluster/apps 15 | prune: true 16 | sourceRef: 17 | kind: GitRepository 18 | name: flux-system 19 | decryption: 20 | provider: sops 21 | secretRef: 22 | name: sops-age 23 | postBuild: 24 | substitute: {} 25 | substituteFrom: 26 | - kind: ConfigMap 27 | name: cluster-settings 28 | - kind: Secret 29 | name: cluster-secrets 30 | -------------------------------------------------------------------------------- /cluster/base/charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 3 | kind: Kustomization 4 | metadata: 5 | name: charts 6 | namespace: flux-system 7 | spec: 8 | interval: 10m0s 9 | path: ./cluster/charts 10 | prune: true 11 | sourceRef: 12 | kind: GitRepository 13 | name: flux-system 14 | -------------------------------------------------------------------------------- /cluster/base/config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 3 | kind: Kustomization 4 | metadata: 5 | name: config 6 | namespace: flux-system 7 | spec: 8 | interval: 10m0s 9 | path: ./cluster/config 10 | prune: true 11 | sourceRef: 12 | kind: GitRepository 13 | name: flux-system 14 | decryption: 15 | provider: sops 16 | secretRef: 17 | name: sops-age 18 | -------------------------------------------------------------------------------- /cluster/base/core.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 3 | kind: Kustomization 4 | metadata: 5 | name: core 6 | namespace: flux-system 7 | spec: 8 | interval: 10m0s 9 | dependsOn: 10 | - name: charts 11 | - name: config 12 | - name: crds 13 | path: ./cluster/core 14 | prune: false 15 | sourceRef: 16 | kind: GitRepository 17 | name: flux-system 18 | decryption: 19 | provider: sops 20 | secretRef: 21 | name: sops-age 22 | postBuild: 23 | substitute: {} 24 | substituteFrom: 25 | - kind: ConfigMap 26 | name: cluster-settings 27 | - kind: Secret 28 | name: cluster-secrets 29 | -------------------------------------------------------------------------------- /cluster/base/crds.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 3 | kind: Kustomization 4 | metadata: 5 | name: crds 6 | namespace: flux-system 7 | spec: 8 | interval: 10m0s 9 | path: ./cluster/crds 10 | prune: false 11 | sourceRef: 12 | kind: GitRepository 13 | name: flux-system 14 | -------------------------------------------------------------------------------- /cluster/base/flux-system/gotk-patches.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Delete the allow-egress NetworkPolicy 3 | $patch: delete 4 | apiVersion: networking.k8s.io/v1 5 | kind: NetworkPolicy 6 | metadata: 7 | name: allow-egress 8 | namespace: flux-system 9 | --- 10 | # Delete the allow-scraping NetworkPolicy 11 | $patch: delete 12 | apiVersion: networking.k8s.io/v1 13 | kind: NetworkPolicy 14 | metadata: 15 | name: allow-scraping 16 | namespace: flux-system 17 | --- 18 | # Delete the allow-webhooks NetworkPolicy 19 | $patch: delete 20 | apiVersion: networking.k8s.io/v1 21 | kind: NetworkPolicy 22 | metadata: 23 | name: allow-webhooks 24 | namespace: flux-system 25 | -------------------------------------------------------------------------------- /cluster/base/flux-system/gotk-sync.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: GitRepository 4 | metadata: 5 | name: flux-system 6 | namespace: flux-system 7 | spec: 8 | interval: 5m0s 9 | # https://github.com/k8s-at-home/template-cluster-k3s/issues/324 10 | url: ssh://git@github.com/onedr0p/home-ops 11 | ref: 12 | branch: main 13 | secretRef: 14 | name: github-deploy-key 15 | --- 16 | apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 17 | kind: Kustomization 18 | metadata: 19 | name: flux-system 20 | namespace: flux-system 21 | spec: 22 | interval: 10m0s 23 | path: ./cluster/base 24 | prune: true 25 | sourceRef: 26 | kind: GitRepository 27 | name: flux-system 28 | decryption: 29 | provider: sops 30 | secretRef: 31 | name: sops-age 32 | -------------------------------------------------------------------------------- /cluster/base/flux-system/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - gotk-components.yaml 6 | - gotk-sync.yaml 7 | patchesStrategicMerge: 8 | - gotk-patches.yaml 9 | -------------------------------------------------------------------------------- /cluster/charts/aqua-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: aqua-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://aquasecurity.github.io/helm-charts 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/charts/bitnami-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: bitnami-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://charts.bitnami.com/bitnami 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/charts/deliveryhero-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: deliveryhero-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://charts.deliveryhero.io/ 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/charts/descheduler-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: descheduler-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://kubernetes-sigs.github.io/descheduler 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/charts/emberstack-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: emberstack-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://emberstack.github.io/helm-charts/ 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/charts/emqx-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta1 3 | kind: HelmRepository 4 | metadata: 5 | name: emqx-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://repos.emqx.io/charts 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/charts/external-dns-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: external-dns-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://kubernetes-sigs.github.io/external-dns 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/charts/fairwinds-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta1 3 | kind: HelmRepository 4 | metadata: 5 | name: fairwinds-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://charts.fairwinds.com/stable 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/charts/falco-security-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: falco-security-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://falcosecurity.github.io/charts 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/charts/gitea-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: gitea-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://dl.gitea.io/charts/ 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/charts/grafana-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: grafana-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://grafana.github.io/helm-charts 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/charts/ingress-nginx-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: ingress-nginx-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://kubernetes.github.io/ingress-nginx 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/charts/jetstack-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: jetstack-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://charts.jetstack.io/ 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/charts/k8s-at-home-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: k8s-at-home-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://k8s-at-home.com/charts/ 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/charts/kasten-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: kasten-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://charts.kasten.io/ 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/charts/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - aqua-charts.yaml 6 | - bitnami-charts.yaml 7 | - deliveryhero-charts.yaml 8 | - descheduler-charts.yaml 9 | - emberstack-charts.yaml 10 | - emqx-charts.yaml 11 | - external-dns-charts.yaml 12 | - fairwinds-charts.yaml 13 | - falco-security-charts.yaml 14 | - gitea-charts.yaml 15 | - grafana-charts.yaml 16 | - ingress-nginx-charts.yaml 17 | - jetstack-charts.yaml 18 | - k8s-at-home-charts.yaml 19 | - kasten-charts.yaml 20 | - kyverno-charts.yaml 21 | - metrics-server-charts.yaml 22 | - nfs-subdir-external-provisioner-charts.yaml 23 | - node-feature-discovery-charts.yaml 24 | - oauth2-proxy-charts.yaml 25 | - prometheus-community-charts.yaml 26 | - rook-ceph-charts.yaml 27 | - stakater-charts.yaml 28 | - vector-charts.yaml 29 | -------------------------------------------------------------------------------- /cluster/charts/kyverno-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: kyverno-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://kyverno.github.io/kyverno/ 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/charts/metrics-server-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: metrics-server-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://kubernetes-sigs.github.io/metrics-server 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/charts/nfs-subdir-external-provisioner-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: nfs-subdir-external-provisioner-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner/ 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/charts/node-feature-discovery-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: node-feature-discovery-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://kubernetes-sigs.github.io/node-feature-discovery/charts 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/charts/oauth2-proxy-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: oauth2-proxy-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://oauth2-proxy.github.io/manifests 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/charts/prometheus-community-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: prometheus-community-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://prometheus-community.github.io/helm-charts 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/charts/rook-ceph-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: rook-ceph-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://charts.rook.io/release 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/charts/stakater-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: stakater-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://stakater.github.io/stakater-charts 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/charts/vector-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: vector-charts 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://helm.vector.dev 10 | timeout: 3m 11 | -------------------------------------------------------------------------------- /cluster/config/cluster-settings.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | namespace: flux-system 6 | name: cluster-settings 7 | data: 8 | NAS_ADDR: "192.168.42.60" 9 | TIMEZONE: "America/New_York" 10 | # 11 | SVC_MOSQUITTO_ADDR: "192.168.69.110" # deprecated 12 | SVC_EMQX_ADDR: "192.168.69.110" 13 | SVC_HOME_ASSISTANT_ADDR: "192.168.69.103" 14 | SVC_PLEX_ADDR: "192.168.69.104" 15 | SVC_JELLYFIN_ADDR: "192.168.69.105" 16 | SVC_QBITTORRENT_ADDR: "192.168.69.106" 17 | SVC_SYSLOG_ADDR: "192.168.69.111" 18 | SVC_NGINX_ADDR: "192.168.69.101" 19 | SVC_GITEA_ADDR: "192.168.69.116" 20 | -------------------------------------------------------------------------------- /cluster/config/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - cluster-secrets.sops.yaml 6 | - cluster-settings.yaml 7 | -------------------------------------------------------------------------------- /cluster/core/cert-manager/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 3 | kind: HelmRelease 4 | metadata: 5 | name: cert-manager 6 | namespace: cert-manager 7 | spec: 8 | interval: 5m 9 | chart: 10 | spec: 11 | chart: cert-manager 12 | version: v1.8.0 13 | sourceRef: 14 | kind: HelmRepository 15 | name: jetstack-charts 16 | namespace: flux-system 17 | install: 18 | createNamespace: true 19 | remediation: 20 | retries: 5 21 | upgrade: 22 | remediation: 23 | retries: 5 24 | values: 25 | installCRDs: false 26 | webhook: 27 | enabled: true 28 | extraArgs: 29 | - --dns01-recursive-nameservers=1.1.1.1:53,9.9.9.9:53 30 | - --dns01-recursive-nameservers-only 31 | replicaCount: 1 32 | podDnsPolicy: "None" 33 | podDnsConfig: 34 | nameservers: 35 | - "1.1.1.1" 36 | - "9.9.9.9" 37 | prometheus: 38 | enabled: true 39 | servicemonitor: 40 | enabled: true 41 | prometheusInstance: monitoring 42 | -------------------------------------------------------------------------------- /cluster/core/cert-manager/issuers/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - secret.sops.yaml 6 | - letsencrypt-staging.yaml 7 | - letsencrypt-production.yaml 8 | -------------------------------------------------------------------------------- /cluster/core/cert-manager/issuers/letsencrypt-production.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cert-manager.io/v1 3 | kind: ClusterIssuer 4 | metadata: 5 | name: letsencrypt-production 6 | spec: 7 | acme: 8 | server: https://acme-v02.api.letsencrypt.org/directory 9 | email: "${SECRET_EMAIL}" 10 | privateKeySecretRef: 11 | name: letsencrypt-production 12 | solvers: 13 | - dns01: 14 | cloudflare: 15 | email: "${SECRET_EMAIL}" 16 | apiKeySecretRef: 17 | name: cloudflare-token-secret 18 | key: cloudflare-token 19 | selector: 20 | dnsZones: 21 | - ${SECRET_PUBLIC_DOMAIN} 22 | -------------------------------------------------------------------------------- /cluster/core/cert-manager/issuers/letsencrypt-staging.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cert-manager.io/v1 3 | kind: ClusterIssuer 4 | metadata: 5 | name: letsencrypt-staging 6 | spec: 7 | acme: 8 | server: https://acme-staging-v02.api.letsencrypt.org/directory 9 | email: "${SECRET_EMAIL}" 10 | privateKeySecretRef: 11 | name: letsencrypt-staging 12 | solvers: 13 | - dns01: 14 | cloudflare: 15 | email: "${SECRET_EMAIL}" 16 | apiKeySecretRef: 17 | name: cloudflare-token-secret 18 | key: cloudflare-token 19 | selector: 20 | dnsZones: 21 | - ${SECRET_PUBLIC_DOMAIN} 22 | -------------------------------------------------------------------------------- /cluster/core/cert-manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | - issuers 7 | -------------------------------------------------------------------------------- /cluster/core/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - cert-manager 6 | - kyverno 7 | - namespaces 8 | - rook-ceph 9 | - starboard-system 10 | -------------------------------------------------------------------------------- /cluster/core/kyverno/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 3 | kind: HelmRelease 4 | metadata: 5 | name: kyverno 6 | namespace: kyverno 7 | spec: 8 | interval: 5m 9 | chart: 10 | spec: 11 | chart: kyverno 12 | version: v2.3.3 13 | sourceRef: 14 | kind: HelmRepository 15 | name: kyverno-charts 16 | namespace: flux-system 17 | interval: 5m 18 | install: 19 | createNamespace: true 20 | remediation: 21 | retries: 5 22 | upgrade: 23 | remediation: 24 | retries: 5 25 | values: 26 | installCRDs: false 27 | mode: standalone 28 | serviceMonitor: 29 | enabled: true 30 | -------------------------------------------------------------------------------- /cluster/core/kyverno/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | - policies 7 | -------------------------------------------------------------------------------- /cluster/core/kyverno/policies/ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kyverno.io/v1 3 | kind: ClusterPolicy 4 | metadata: 5 | name: ingress 6 | spec: 7 | rules: 8 | - name: add-whitelist-source-range-annotation 9 | match: 10 | any: 11 | - resources: 12 | kinds: 13 | - Ingress 14 | exclude: 15 | any: 16 | - resources: 17 | annotations: 18 | external-dns/is-public: "true" 19 | mutate: 20 | patchStrategicMerge: 21 | metadata: 22 | annotations: 23 | +(nginx.ingress.kubernetes.io/whitelist-source-range): "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" 24 | -------------------------------------------------------------------------------- /cluster/core/kyverno/policies/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - ingress.yaml 6 | - resources.yaml 7 | -------------------------------------------------------------------------------- /cluster/core/kyverno/policies/resources.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kyverno.io/v1 3 | kind: ClusterPolicy 4 | metadata: 5 | name: resources 6 | spec: 7 | rules: 8 | - name: remove-cpu-limits 9 | match: 10 | any: 11 | - resources: 12 | kinds: 13 | - Pod 14 | mutate: 15 | patchStrategicMerge: 16 | spec: 17 | initContainers: 18 | - (name): "*" 19 | resources: 20 | limits: 21 | cpu: null 22 | containers: 23 | - (name): "*" 24 | resources: 25 | limits: 26 | cpu: null 27 | -------------------------------------------------------------------------------- /cluster/core/namespaces/calico-system.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: calico-system 6 | labels: 7 | goldilocks.fairwinds.com/enabled: "true" 8 | k10.kasten.io/ignorebackuppolicy: "true" 9 | -------------------------------------------------------------------------------- /cluster/core/namespaces/cert-manager.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: cert-manager 6 | labels: 7 | goldilocks.fairwinds.com/enabled: "true" 8 | k10.kasten.io/ignorebackuppolicy: "true" 9 | -------------------------------------------------------------------------------- /cluster/core/namespaces/flux-system.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: flux-system 6 | labels: 7 | goldilocks.fairwinds.com/enabled: "true" 8 | k10.kasten.io/ignorebackuppolicy: "true" 9 | -------------------------------------------------------------------------------- /cluster/core/namespaces/home.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: home 6 | labels: 7 | goldilocks.fairwinds.com/enabled: "true" 8 | -------------------------------------------------------------------------------- /cluster/core/namespaces/kasten-io.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: kasten-io 6 | labels: 7 | goldilocks.fairwinds.com/enabled: "true" 8 | -------------------------------------------------------------------------------- /cluster/core/namespaces/kube-system.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: kube-system 6 | labels: 7 | goldilocks.fairwinds.com/enabled: "true" 8 | k10.kasten.io/ignorebackuppolicy: "true" 9 | -------------------------------------------------------------------------------- /cluster/core/namespaces/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - calico-system.yaml 6 | - cert-manager.yaml 7 | - flux-system.yaml 8 | - home.yaml 9 | - kasten-io.yaml 10 | - kube-system.yaml 11 | - kyverno.yaml 12 | - media.yaml 13 | - monitoring.yaml 14 | - networking.yaml 15 | - rook-ceph.yaml 16 | - starboard-system.yaml 17 | - system-upgrade.yaml 18 | -------------------------------------------------------------------------------- /cluster/core/namespaces/kyverno.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: kyverno 6 | labels: 7 | goldilocks.fairwinds.com/enabled: "true" 8 | k10.kasten.io/ignorebackuppolicy: "true" 9 | -------------------------------------------------------------------------------- /cluster/core/namespaces/media.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: media 6 | labels: 7 | goldilocks.fairwinds.com/enabled: "true" 8 | -------------------------------------------------------------------------------- /cluster/core/namespaces/monitoring.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: monitoring 6 | labels: 7 | goldilocks.fairwinds.com/enabled: "true" 8 | -------------------------------------------------------------------------------- /cluster/core/namespaces/networking.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: networking 6 | labels: 7 | goldilocks.fairwinds.com/enabled: "true" 8 | k10.kasten.io/ignorebackuppolicy: "true" 9 | -------------------------------------------------------------------------------- /cluster/core/namespaces/rook-ceph.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: rook-ceph 6 | labels: 7 | goldilocks.fairwinds.com/enabled: "true" 8 | k10.kasten.io/ignorebackuppolicy: "true" 9 | -------------------------------------------------------------------------------- /cluster/core/namespaces/starboard-system.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: starboard-system 6 | labels: 7 | goldilocks.fairwinds.com/enabled: "true" 8 | k10.kasten.io/ignorebackuppolicy: "true" 9 | -------------------------------------------------------------------------------- /cluster/core/namespaces/system-upgrade.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: system-upgrade 6 | labels: 7 | goldilocks.fairwinds.com/enabled: "true" 8 | k10.kasten.io/ignorebackuppolicy: "true" 9 | -------------------------------------------------------------------------------- /cluster/core/rook-ceph/cluster/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/core/rook-ceph/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - cluster 6 | - operator 7 | -------------------------------------------------------------------------------- /cluster/core/rook-ceph/operator/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 3 | kind: HelmRelease 4 | metadata: 5 | # TODO(rook-ceph): Rename HR to rook-ceph-operator 6 | name: rook-ceph 7 | namespace: rook-ceph 8 | spec: 9 | interval: 5m 10 | chart: 11 | spec: 12 | chart: rook-ceph 13 | version: v1.9.3 14 | sourceRef: 15 | kind: HelmRepository 16 | name: rook-ceph-charts 17 | namespace: flux-system 18 | install: 19 | createNamespace: true 20 | # TODO(rook-ceph): On rebuild, switch CRDs to be managed outside helm release 21 | crds: CreateReplace 22 | remediation: 23 | retries: 5 24 | upgrade: 25 | # TODO(rook-ceph): On rebuild, switch CRDs to be managed outside helm release 26 | crds: CreateReplace 27 | remediation: 28 | retries: 5 29 | values: 30 | # TODO(rook-ceph): On rebuild, switch CRDs to be managed outside helm release 31 | crds: 32 | enabled: true 33 | monitoring: 34 | enabled: true 35 | resources: 36 | requests: 37 | cpu: 100m 38 | memory: 128Mi 39 | limits: 40 | memory: 256Mi 41 | -------------------------------------------------------------------------------- /cluster/core/rook-ceph/operator/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/core/starboard-system/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - starboard-operator 6 | -------------------------------------------------------------------------------- /cluster/core/starboard-system/starboard-operator/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta1 3 | kind: HelmRelease 4 | metadata: 5 | name: starboard-operator 6 | namespace: starboard-system 7 | spec: 8 | interval: 5m 9 | chart: 10 | spec: 11 | chart: starboard-operator 12 | version: 0.10.4 13 | sourceRef: 14 | kind: HelmRepository 15 | name: aqua-charts 16 | namespace: flux-system 17 | install: 18 | createNamespace: true 19 | crds: CreateReplace 20 | remediation: 21 | retries: 5 22 | upgrade: 23 | crds: CreateReplace 24 | remediation: 25 | retries: 5 26 | values: 27 | operator: 28 | replicas: 3 29 | scanJobsConcurrentLimit: 3 30 | vulnerabilityScannerScanOnlyCurrentRevisions: true 31 | trivy: 32 | ignoreUnfixed: true 33 | -------------------------------------------------------------------------------- /cluster/core/starboard-system/starboard-operator/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/crds/cert-manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - crds.yaml 6 | -------------------------------------------------------------------------------- /cluster/crds/external-snapshotter/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - github.com/kubernetes-csi/external-snapshotter//client/config/crd?ref=v5.0.1 6 | -------------------------------------------------------------------------------- /cluster/crds/kube-prometheus-stack/crds.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: GitRepository 4 | metadata: 5 | name: kube-prometheus-stack-source 6 | namespace: flux-system 7 | spec: 8 | interval: 12h 9 | url: https://github.com/prometheus-community/helm-charts.git 10 | ref: 11 | # renovate: registryUrl=https://prometheus-community.github.io/helm-charts chart=kube-prometheus-stack 12 | tag: kube-prometheus-stack-35.2.0 13 | ignore: | 14 | # exclude all 15 | /* 16 | # include crd directory 17 | !/charts/kube-prometheus-stack/crds 18 | --- 19 | apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 20 | kind: Kustomization 21 | metadata: 22 | name: crds-kube-prometheus-stack 23 | namespace: flux-system 24 | spec: 25 | interval: 30m 26 | prune: false 27 | wait: true 28 | sourceRef: 29 | kind: GitRepository 30 | name: kube-prometheus-stack-source 31 | -------------------------------------------------------------------------------- /cluster/crds/kube-prometheus-stack/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - crds.yaml 6 | -------------------------------------------------------------------------------- /cluster/crds/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - cert-manager 6 | - external-snapshotter 7 | - kube-prometheus-stack 8 | - kyverno 9 | # TODO(rook-ceph): On rebuild, switch CRDs to be managed outside helm release 10 | # - rook-ceph 11 | - system-upgrade-controller 12 | -------------------------------------------------------------------------------- /cluster/crds/kyverno/crds.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: GitRepository 4 | metadata: 5 | name: kyverno-source 6 | namespace: flux-system 7 | spec: 8 | interval: 12h 9 | url: https://github.com/kyverno/kyverno.git 10 | ref: 11 | # renovate: registryUrl=https://kyverno.github.io/kyverno chart=kyverno 12 | tag: helm-chart-v2.3.3 13 | ignore: | 14 | # exclude all 15 | /* 16 | # include crd directory 17 | !/config/crds 18 | --- 19 | apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 20 | kind: Kustomization 21 | metadata: 22 | name: crds-kyverno 23 | namespace: flux-system 24 | spec: 25 | interval: 30m 26 | prune: false 27 | wait: true 28 | sourceRef: 29 | kind: GitRepository 30 | name: kyverno-source 31 | -------------------------------------------------------------------------------- /cluster/crds/kyverno/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - crds.yaml 6 | -------------------------------------------------------------------------------- /cluster/crds/rook-ceph/crds.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta1 3 | kind: GitRepository 4 | metadata: 5 | name: rook-ceph-source 6 | namespace: flux-system 7 | spec: 8 | interval: 12h 9 | url: https://github.com/rook/rook.git 10 | ref: 11 | # renovate: registryUrl=https://charts.rook.io/release chart=rook-ceph 12 | tag: v1.9.3 13 | ignore: | 14 | # exclude all 15 | /* 16 | # include crd directory 17 | !/deploy/examples/crds.yaml 18 | --- 19 | apiVersion: kustomize.toolkit.fluxcd.io/v1beta2 20 | kind: Kustomization 21 | metadata: 22 | name: crds-rook-ceph 23 | namespace: flux-system 24 | spec: 25 | interval: 30m 26 | prune: false 27 | wait: true 28 | sourceRef: 29 | kind: GitRepository 30 | name: rook-ceph-source 31 | -------------------------------------------------------------------------------- /cluster/crds/rook-ceph/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - crds.yaml 6 | -------------------------------------------------------------------------------- /cluster/crds/system-upgrade-controller/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | # renovate: datasource=docker image=rancher/system-upgrade-controller 6 | - https://github.com/rancher/system-upgrade-controller/releases/download/v0.9.1/crd.yaml 7 | -------------------------------------------------------------------------------- /hack/delete-stuck-ns.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function delete_namespace () { 4 | echo "Deleting namespace $1" 5 | kubectl get namespace $1 -o json > tmp.json 6 | sed -i 's/"kubernetes"//g' tmp.json 7 | kubectl replace --raw "/api/v1/namespaces/$1/finalize" -f ./tmp.json 8 | rm ./tmp.json 9 | } 10 | 11 | TERMINATING_NS=$(kubectl get ns | awk '$2=="Terminating" {print $1}') 12 | 13 | for ns in $TERMINATING_NS 14 | do 15 | delete_namespace $ns 16 | done 17 | -------------------------------------------------------------------------------- /hack/delete-stuck-snapshots.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | volumesnapshotcontents=$(kubectl get --no-headers volumesnapshotcontents | awk '{print $1}') 4 | for volumesnapshotcontent in $volumesnapshotcontents 5 | do 6 | kubectl patch volumesnapshotcontents "${volumesnapshotcontent}" -p '{"metadata":{"finalizers":null}}' --type=merge 7 | done 8 | 9 | volumesnapshots=$(kubectl get --no-headers volumesnapshots -A | awk '{print $1","$2}') 10 | for item in $volumesnapshots 11 | do 12 | namespace="$(echo "${item}" | awk -F',' '{print $1}')" 13 | volumesnapshot="$(echo "${item}" | awk -F',' '{print $2}')" 14 | kubectl patch volumesnapshots "${volumesnapshot}" -n "${namespace}" -p '{"metadata":{"finalizers":null}}' --type=merge 15 | done 16 | -------------------------------------------------------------------------------- /hack/valetudo/S11node_exporter: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | load() { 4 | echo "starting node_exporter" 5 | /sbin/start-stop-daemon -S -b -q -m -p /var/run/node_exporter.pid -x /mnt/data/node_exporter/node_exporter 6 | } 7 | 8 | unload() { 9 | echo "stopping node_exporter" 10 | /sbin/start-stop-daemon -K -q -p /var/run/node_exporter.pid 11 | } 12 | 13 | case "$1" in 14 | start) 15 | load 16 | ;; 17 | stop) 18 | unload 19 | ;; 20 | restart) 21 | unload 22 | load 23 | ;; 24 | *) 25 | echo "$0 " 26 | ;; 27 | esac 28 | -------------------------------------------------------------------------------- /hack/valetudo/S11vector: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | load() { 4 | echo "starting vector" 5 | /sbin/start-stop-daemon -S -b -q -m -p /var/run/vector.pid -x /mnt/data/vector/vector -- --config /mnt/data/vector/config.yml 6 | } 7 | 8 | unload() { 9 | echo "stopping vector" 10 | /sbin/start-stop-daemon -K -q -p /var/run/vector.pid 11 | } 12 | 13 | case "$1" in 14 | start) 15 | load 16 | ;; 17 | stop) 18 | unload 19 | ;; 20 | restart) 21 | unload 22 | load 23 | ;; 24 | *) 25 | echo "$0 " 26 | ;; 27 | esac 28 | -------------------------------------------------------------------------------- /hack/valetudo/vector-config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | data_dir: /tmp 3 | sources: 4 | valetudo_logs: 5 | type: file 6 | include: 7 | - /tmp/valetudo.log 8 | transforms: 9 | valetudo_remap: 10 | type: remap 11 | inputs: 12 | - valetudo_logs 13 | # Parse the valetudo logs e.g. 14 | # [2021-09-23T14:30:20.403Z] [INFO] MQTT configured 15 | source: | 16 | . |= parse_regex!(.message, r'^\[(?P.+)\] \[(?P(TRACE|DEBUG|INFO|WARNING|ERROR))\] (?P.*)$') 17 | .timestamp = to_timestamp!(.timestamp) 18 | sinks: 19 | loki_valetudo_sink: 20 | type: loki 21 | inputs: 22 | - valetudo_remap 23 | endpoint: https://loki.${SECRET_DOMAIN} 24 | encoding: 25 | codec: json 26 | batch: 27 | max_bytes: 400000 28 | out_of_order_action: rewrite_timestamp 29 | tls: 30 | verify_certificate: false 31 | labels: 32 | hostname: "{{ name }}" 33 | -------------------------------------------------------------------------------- /terraform/cloudflare/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | 3 | backend "remote" { 4 | organization = "onedr0p" 5 | workspaces { 6 | name = "home-cloudflare" 7 | } 8 | } 9 | 10 | required_providers { 11 | cloudflare = { 12 | source = "cloudflare/cloudflare" 13 | version = "3.14.0" 14 | } 15 | http = { 16 | source = "hashicorp/http" 17 | version = "2.1.0" 18 | } 19 | sops = { 20 | source = "carlpett/sops" 21 | version = "0.7.0" 22 | } 23 | } 24 | } 25 | 26 | data "sops_file" "cloudflare_secrets" { 27 | source_file = "secret.sops.yaml" 28 | } 29 | 30 | provider "cloudflare" { 31 | email = data.sops_file.cloudflare_secrets.data["cloudflare_email"] 32 | api_key = data.sops_file.cloudflare_secrets.data["cloudflare_apikey"] 33 | } 34 | 35 | data "cloudflare_zones" "domain_io" { 36 | filter { 37 | name = data.sops_file.cloudflare_secrets.data["cloudflare_domain_io"] 38 | } 39 | } 40 | 41 | data "cloudflare_zones" "domain_ac" { 42 | filter { 43 | name = data.sops_file.cloudflare_secrets.data["cloudflare_domain_ac"] 44 | } 45 | } 46 | 47 | data "cloudflare_zones" "domain_casa" { 48 | filter { 49 | name = data.sops_file.cloudflare_secrets.data["cloudflare_domain_casa"] 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /terraform/cloudflare/page_rules_io.tf: -------------------------------------------------------------------------------- 1 | resource "cloudflare_page_rule" "plex_bypass_cache" { 2 | zone_id = lookup(data.cloudflare_zones.domain_io.zones[0], "id") 3 | target = "plex.${data.sops_file.cloudflare_secrets.data["cloudflare_domain_io"]}/*" 4 | status = "active" 5 | 6 | actions { 7 | cache_level = "bypass" 8 | } 9 | } 10 | --------------------------------------------------------------------------------