├── .editorconfig ├── .gitignore ├── .pre-commit-config.yaml ├── .taskfiles ├── Ansible │ └── Tasks.yaml ├── Kubernetes │ └── Tasks.yaml ├── Pre-Commit │ └── Tasks.yaml ├── Rook │ └── Tasks.yaml ├── Terraform │ └── Tasks.yaml └── Volsync │ ├── ListJob.tmpl.yaml │ ├── ReplicationDestination.tmpl.yaml │ ├── Tasks.yaml │ ├── WipeJob.tmpl.yaml │ └── wait-for-job.sh ├── .vscode └── settings.json ├── .yamllint.yaml ├── LICENSE ├── README.md ├── Taskfile.yaml ├── ansible.cfg ├── ansible ├── inventory │ ├── examples │ │ ├── all.yaml │ │ └── hosts.yaml │ ├── group_vars │ │ ├── kubernetes │ │ │ ├── k3s.yaml │ │ │ └── os.yaml │ │ ├── master │ │ │ └── k3s.yaml │ │ └── worker │ │ │ └── k3s.yaml │ └── host_vars │ │ └── .gitkeep ├── playbooks │ ├── cluster-installation.yaml │ ├── cluster-nuke.yaml │ ├── cluster-prepare.yaml │ ├── cluster-reboot.yaml │ ├── cluster-sudoers.yaml │ ├── configs │ │ └── kubelet.config │ └── templates │ │ ├── calico-installation.yml.j2 │ │ └── kube-vip-daemonset.yml.j2 ├── requirements.txt └── requirements.yaml ├── docs ├── dr-ceph.md ├── dr-cloudnative-pg.md ├── logo.png ├── setup-ceph-dashboard.md ├── setup-jellyfin.md ├── setup-unifi.md └── startup.md ├── kubernetes ├── helm │ ├── argocd │ │ └── argocd │ │ │ ├── Chart.yaml │ │ │ ├── manifests │ │ │ └── public-ingress.yaml │ │ │ └── values.yaml │ ├── home-automation │ │ └── hassio │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ ├── kyverno │ │ └── kyverno │ │ │ ├── Chart.yaml │ │ │ ├── manifests │ │ │ ├── add-online-snapshot-label-to-pvc.yaml │ │ │ ├── add-snapshot-label-to-pvc.yaml │ │ │ ├── create-replication-for-pvc.yaml │ │ │ ├── generate-monthly-snapshots.yaml │ │ │ ├── ingress-apply-internal-annotations.yaml │ │ │ ├── init-inject-tz.yaml │ │ │ ├── namespace-apply-annotations.yaml │ │ │ └── set-resources.yaml │ │ │ └── values.yaml │ ├── media │ │ ├── bazarr │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ │ ├── flaresolverr │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ │ ├── foundryvtt-ddb-proxy │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ │ ├── foundryvtt │ │ │ ├── Chart.yaml │ │ │ ├── manifests │ │ │ │ ├── foundry-admin-key.yaml │ │ │ │ └── password.yaml │ │ │ └── values.yaml │ │ ├── jellyfin │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ │ ├── lazylibrarian │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ │ ├── plex │ │ │ ├── Chart.yaml │ │ │ ├── manifests │ │ │ │ └── plex-secrets.yaml │ │ │ └── values.yaml │ │ ├── prowlarr │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ │ ├── qbittorrent │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ │ ├── radarr │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ │ ├── samba │ │ │ ├── Chart.yaml │ │ │ ├── manifests │ │ │ │ └── password.yaml │ │ │ └── values.yaml │ │ ├── sonarr │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ │ └── tylercash-dev-api │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ ├── monitoring │ │ ├── descheduler │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ │ ├── gotify │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ │ ├── health-all │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ │ ├── health-public │ │ │ ├── Chart.yaml │ │ │ ├── manifests │ │ │ │ ├── gatus-rules.yaml │ │ │ │ └── gatus-servicemonitor.yaml │ │ │ └── values.yaml │ │ ├── loki │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ │ ├── node-feature-discovery │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ │ ├── prometheus-crds │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ │ ├── prometheus │ │ │ ├── Chart.yaml │ │ │ ├── manifests │ │ │ │ ├── ceph-cephfs.yaml │ │ │ │ ├── ceph-cluster-advanced.yaml │ │ │ │ ├── ceph-cluster.yaml │ │ │ │ ├── ceph-host-details.yaml │ │ │ │ ├── ceph-host-overview.yaml │ │ │ │ ├── ceph-osd-device-details.yaml │ │ │ │ ├── ceph-osd-overview.yaml │ │ │ │ ├── ceph-pool-detail.yaml │ │ │ │ ├── ceph-pool-overview.yaml │ │ │ │ ├── ceph-radosgw-detail.yaml │ │ │ │ ├── ceph-radosgw-overview.yaml │ │ │ │ ├── ceph-radosgw-sync-overview.yaml │ │ │ │ ├── ceph-rbd-details.yaml │ │ │ │ ├── ceph-rbd-overview.yaml │ │ │ │ ├── ceph-rgw-s3-analytics.yaml │ │ │ │ ├── cnpg-dashboard.yaml │ │ │ │ ├── peepbot-dashboard.yaml │ │ │ │ └── volsync-dashboard.yaml │ │ │ └── values.yaml │ │ └── promtail │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ ├── networking │ │ ├── coredns │ │ │ ├── Chart.yaml │ │ │ ├── manifests │ │ │ │ ├── cron.yaml │ │ │ │ ├── pvc.yaml │ │ │ │ └── scripts │ │ │ │ │ ├── download_blacklists.sh │ │ │ │ │ └── lancache.txt │ │ │ └── values.yaml │ │ ├── external-dns │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ │ ├── ingress-nginx │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ │ ├── k8s-gateway │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ │ ├── metallb │ │ │ ├── Chart.yaml │ │ │ ├── manifests │ │ │ │ └── config.yaml │ │ │ └── values.yaml │ │ └── unifi │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ ├── security │ │ ├── authentik │ │ │ ├── Chart.yaml │ │ │ ├── manifests │ │ │ │ ├── authentik-database.yaml │ │ │ │ ├── authentik-email.yaml │ │ │ │ ├── authentik-redis-password.yaml │ │ │ │ └── authentik-secrets.yaml │ │ │ └── values.yaml │ │ ├── bitwarden │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ │ ├── cert-manager │ │ │ ├── Chart.yaml │ │ │ ├── manifests │ │ │ │ ├── prod-issuer.yaml │ │ │ │ └── staging-issuer.yaml │ │ │ └── values.yaml │ │ ├── external-secrets │ │ │ ├── Chart.yaml │ │ │ ├── manifests │ │ │ │ ├── monitoring-secrets.yaml │ │ │ │ └── storage-secrets.yaml │ │ │ └── values.yaml │ │ ├── intel-device-plugins-gpu │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ │ ├── intel-device-plugins-operator │ │ │ ├── Chart.yaml │ │ │ ├── manifests │ │ │ │ └── node-feature-rule.yaml │ │ │ └── values.yaml │ │ ├── kubed │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ │ ├── node-feature-discovery │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ │ └── wg-easy │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ ├── storage │ │ ├── cloudnative-pg │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ │ ├── lancache │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ │ ├── rook-ceph │ │ │ ├── Chart.yaml │ │ │ ├── manifests │ │ │ │ ├── objectstore-ingress.yaml │ │ │ │ └── post-sync-hook.yaml │ │ │ └── values.yaml │ │ ├── rook-operator │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ │ ├── snap-scheduler │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ │ ├── syncthing │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ │ └── volsync │ │ │ ├── Chart.yaml │ │ │ ├── manifests │ │ │ ├── volsync-rules.yaml │ │ │ └── volsync-servicemonitor.yaml │ │ │ └── values.yaml │ └── tylerbot │ │ ├── tyler-bot-backend │ │ ├── Chart.yaml │ │ ├── manifests │ │ │ ├── ingress.yaml │ │ │ ├── tylerbot-database.yaml │ │ │ ├── tylerbot-secrets.yaml │ │ │ └── tylerbot-service-monitor.yaml │ │ └── values.yaml │ │ └── tyler-bot-frontend │ │ ├── Chart.yaml │ │ └── values.yaml └── manifests │ ├── argocd │ └── orchestrator │ │ ├── helm-apps.yaml │ │ ├── local-path-storage.yaml │ │ ├── manifest-apps.yaml │ │ ├── manifest-helm-apps.yaml │ │ └── system-upgrade-controller.yaml │ ├── csi-addons-system │ └── kubernetes-csi-addons │ │ ├── crds.yaml │ │ ├── rbac.yaml │ │ └── setup-controller.yaml │ ├── kube-system │ ├── csi-snapshotter │ │ ├── csi-snapshotter-controller.yaml │ │ └── csi-snapshotter-crds.yaml │ └── scheduling │ │ └── normal-pc.yaml │ └── system-upgrade │ └── system-upgrade-controller │ └── upgrade.yaml ├── renovate.json └── terraform ├── .terraform.lock.hcl ├── backups ├── account.tf ├── bucket.tf ├── k8s.tf ├── main.tf └── vars.tf ├── dns ├── main.tf └── vars.tf ├── idp ├── auth_argocd.tf ├── auth_ceph.tf ├── auth_grafana.tf ├── auth_jellyfin.tf ├── email_auth.tf ├── groups.tf ├── main.tf ├── mappings.tf ├── policies.tf ├── stage_flow.tf ├── users.tf └── vars.tf ├── main.tf ├── secrets_storage ├── account.tf ├── iam.tf ├── k8s.tf ├── main.tf ├── project.tf ├── secret-manager.tf ├── secrets-all.tf └── vars.tf └── vars.tf /.editorconfig: -------------------------------------------------------------------------------- 1 | # editorconfig.org 2 | root = true 3 | 4 | [*] 5 | indent_style = space 6 | indent_size = 2 7 | end_of_line = lf 8 | charset = utf-8 9 | trim_trailing_whitespace = true 10 | insert_final_newline = true 11 | 12 | [Makefile] 13 | indent_style = space 14 | indent_size = 4 15 | 16 | [*.{bash,sh}] 17 | indent_style = space 18 | indent_size = 4 19 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Trash 2 | .DS_Store 3 | Thumbs.db 4 | # k8s 5 | kubeconfig 6 | # vscode-sops 7 | .decrypted~*.yaml 8 | *.envrc 9 | *.agekey 10 | *.pub 11 | *.key 12 | # Ansible 13 | xanmanning.k3s* 14 | hosts.yaml 15 | all.yaml 16 | # Terraform 17 | .terraform 18 | .terraform.tfstate* 19 | *.tfvars 20 | terraform.tfstate* 21 | .idea 22 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fail_fast: false 3 | repos: 4 | - repo: https://github.com/adrienverge/yamllint 5 | rev: v1.29.0 6 | hooks: 7 | - args: 8 | - --config-file 9 | - .yamllint.yaml 10 | id: yamllint 11 | - repo: https://github.com/gruntwork-io/pre-commit 12 | rev: v0.1.18 13 | hooks: 14 | - id: terraform-fmt 15 | -------------------------------------------------------------------------------- /.taskfiles/Ansible/Tasks.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3" 3 | 4 | env: 5 | ANSIBLE_CONFIG: "{{.ROOT_DIR}}/ansible.cfg" 6 | K8S_AUTH_KUBECONFIG: "{{.ROOT_DIR}}/kubeconfig" 7 | 8 | vars: 9 | ANSIBLE_PLAYBOOK_DIR: "{{.ANSIBLE_DIR}}/playbooks" 10 | ANSIBLE_INVENTORY_DIR: "{{.ANSIBLE_DIR}}/inventory" 11 | 12 | tasks: 13 | init: 14 | desc: Install / Upgrade Ansible galaxy deps 15 | dir: "{{.ANSIBLE_DIR}}" 16 | cmds: 17 | - pip3 install --user --requirement requirements.txt 18 | - ansible-galaxy install -r requirements.yaml --roles-path ~/.ansible/roles --force 19 | - ansible-galaxy collection install -r requirements.yaml --collections-path ~/.ansible/collections --force 20 | 21 | list: 22 | desc: List all the hosts 23 | dir: "{{.ANSIBLE_DIR}}" 24 | cmds: 25 | - ansible all -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml --list-hosts 26 | 27 | prepare: 28 | desc: Prepare all the k8s nodes for running k3s 29 | dir: "{{.ANSIBLE_DIR}}" 30 | cmds: 31 | - ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml {{.ANSIBLE_PLAYBOOK_DIR}}/cluster-prepare.yaml {{.CLI_ARGS}} 32 | 33 | install: 34 | desc: Install Kubernetes on the nodes 35 | dir: "{{.ANSIBLE_DIR}}" 36 | cmds: 37 | - ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml {{.ANSIBLE_PLAYBOOK_DIR}}/cluster-installation.yaml {{.CLI_ARGS}} 38 | 39 | nuke: 40 | desc: Uninstall Kubernetes on the nodes 41 | dir: "{{.ANSIBLE_DIR}}" 42 | interactive: true 43 | cmds: 44 | - ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml {{.ANSIBLE_PLAYBOOK_DIR}}/cluster-nuke.yaml {{.CLI_ARGS}} 45 | - task: force-reboot 46 | 47 | ping: 48 | desc: Ping all the hosts 49 | dir: "{{.ANSIBLE_DIR}}" 50 | cmds: 51 | - ansible all -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml --one-line -m 'ping' 52 | 53 | uptime: 54 | desc: Uptime of all the hosts 55 | dir: "{{.ANSIBLE_DIR}}" 56 | cmds: 57 | - ansible all -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml --one-line -a 'uptime' 58 | 59 | rollout-reboot: 60 | desc: Rollout a reboot across all the k8s nodes 61 | dir: "{{.ANSIBLE_DIR}}" 62 | cmds: 63 | - ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml {{.ANSIBLE_PLAYBOOK_DIR}}/cluster-rollout-reboot.yaml {{.CLI_ARGS}} 64 | 65 | force-reboot: 66 | desc: Reboot all the k8s nodes 67 | dir: "{{.ANSIBLE_DIR}}" 68 | cmds: 69 | - ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml {{.ANSIBLE_PLAYBOOK_DIR}}/cluster-reboot.yaml {{.CLI_ARGS}} 70 | 71 | force-poweroff: 72 | desc: Shutdown all the k8s nodes 73 | dir: "{{.ANSIBLE_DIR}}" 74 | cmds: 75 | - ansible kubernetes -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml -a '/usr/bin/systemctl poweroff' --become 76 | 77 | sudoers: 78 | desc: Enable passwordless SSH to root 79 | dir: "{{.ANSIBLE_DIR}}" 80 | cmds: 81 | - ansible-playbook --ask-become-pass -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yaml {{.ANSIBLE_PLAYBOOK_DIR}}/cluster-sudoers.yaml {{.CLI_ARGS}} 82 | -------------------------------------------------------------------------------- /.taskfiles/Kubernetes/Tasks.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3" 3 | 4 | tasks: 5 | # https://github.com/kubernetes/kubernetes/issues/100485 6 | unbind-stuck-pvcs: 7 | desc: Sometimes the PVCs will become stuck due to the linked issues. This force unsticks them. 8 | cmds: 9 | - | 10 | declare -A nodes 11 | while read node; do 12 | nodes["${node#node/}"]=exists 13 | done < <(kubectl get nodes -o name) 14 | 15 | kubectl get pvc -A -o json | 16 | jq '.items[].metadata | [.namespace, .name, .annotations["volume.kubernetes.io/selected-node"]] | @tsv' -r | 17 | while read namespace name node; do 18 | test -n "$node" || continue 19 | if ! [[ ${nodes[$node]-} == "exists" ]]; then 20 | kubectl annotate -n "${namespace}" "pvc/${name}" volume.kubernetes.io/selected-node- 21 | fi 22 | done 23 | -------------------------------------------------------------------------------- /.taskfiles/Pre-Commit/Tasks.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3" 3 | 4 | tasks: 5 | 6 | init: 7 | desc: Initialize pre-commit hooks 8 | cmds: 9 | - pre-commit install --install-hooks 10 | 11 | run: 12 | desc: Run pre-commit 13 | cmds: 14 | - pre-commit run --all-files 15 | 16 | update: 17 | desc: Update pre-commit hooks 18 | cmds: 19 | - pre-commit autoupdate 20 | -------------------------------------------------------------------------------- /.taskfiles/Rook/Tasks.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3" 3 | 4 | tasks: 5 | toolbox: 6 | desc: Exec into the Rook Ceph toolbox 7 | interactive: true 8 | cmds: 9 | - kubectl -n storage exec -it deploy/rook-ceph-tools -- bash 10 | 11 | del-toolbox: 12 | desc: Delete the Rook Ceph toolbox 13 | cmds: 14 | - kubectl -n storage delete pod -l app=rook-ceph-tools 15 | 16 | password: 17 | desc: Retrieve the rook-ceph password 18 | cmds: 19 | - kubectl -n storage get secret rook-ceph-dashboard-password -o jsonpath="{['data']['password']}" | base64 --decode 20 | 21 | clear: 22 | desc: Clear all Ceph health warnings 23 | cmds: 24 | - kubectl -n storage exec -it deploy/rook-ceph-tools -- bash -c "ceph crash archive-all" 25 | -------------------------------------------------------------------------------- /.taskfiles/Terraform/Tasks.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3" 3 | 4 | tasks: 5 | 6 | init: 7 | desc: Initialize terraform dependencies 8 | dir: "{{.TERRAFORM_DIR}}/" 9 | cmds: 10 | - terraform init {{.CLI_ARGS}} 11 | 12 | plan: 13 | desc: Show the changes terraform will make 14 | dir: "{{.TERRAFORM_DIR}}/" 15 | cmds: 16 | - terraform plan {{.CLI_ARGS}} 17 | 18 | apply: 19 | desc: Apply the changes to Cloudflare 20 | dir: "{{.TERRAFORM_DIR}}/" 21 | cmds: 22 | - terraform apply {{.CLI_ARGS}} 23 | passwords: 24 | desc: Get passwords for all secrets that are needed 25 | dir: "{{.TERRAFORM_DIR}}/" 26 | cmds: 27 | - terraform output -raw authentik_password && printf '\n' 28 | -------------------------------------------------------------------------------- /.taskfiles/Volsync/ListJob.tmpl.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | name: "list-${rsrc}-${ts}" 6 | namespace: "${namespace}" 7 | spec: 8 | ttlSecondsAfterFinished: 3600 9 | template: 10 | spec: 11 | automountServiceAccountToken: false 12 | restartPolicy: OnFailure 13 | containers: 14 | - name: list 15 | image: docker.io/restic/restic:0.14.0 16 | args: ["snapshots"] 17 | envFrom: 18 | - secretRef: 19 | name: "${rsrc}" 20 | -------------------------------------------------------------------------------- /.taskfiles/Volsync/ReplicationDestination.tmpl.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: volsync.backube/v1alpha1 3 | kind: ReplicationDestination 4 | metadata: 5 | name: "${rsrc}-${claim}-${ts}" 6 | namespace: "${namespace}" 7 | spec: 8 | trigger: 9 | manual: restore-once 10 | restic: 11 | repository: "restic-${rsrc}" 12 | destinationPVC: "${claim}" 13 | copyMethod: Direct 14 | cleanupTempPVC: true 15 | restoreAsOf: "2024-11-16T00:00:00-00:00" 16 | storageClassName: "${storageClassName}" 17 | moverSecurityContext: 18 | runAsUser: 0 19 | runAsGroup: 0 20 | fsGroup: 0 21 | -------------------------------------------------------------------------------- /.taskfiles/Volsync/WipeJob.tmpl.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: Job 4 | metadata: 5 | name: "wipe-${rsrc}-${claim}-${ts}" 6 | namespace: "${namespace}" 7 | spec: 8 | ttlSecondsAfterFinished: 3600 9 | template: 10 | spec: 11 | automountServiceAccountToken: false 12 | restartPolicy: OnFailure 13 | containers: 14 | - name: wipe 15 | image: ubuntu 16 | command: ["/bin/bash", "-c", "cd /config; find . -delete"] 17 | volumeMounts: 18 | - name: config 19 | mountPath: /config 20 | securityContext: 21 | privileged: true 22 | volumes: 23 | - name: config 24 | persistentVolumeClaim: 25 | claimName: "${claim}" 26 | -------------------------------------------------------------------------------- /.taskfiles/Volsync/wait-for-job.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | JOB_NAME=$1 3 | NAMESPACE="${2:-default}" 4 | 5 | [[ -z "${JOB_NAME}" ]] && echo "Job name not specified" && exit 1 6 | 7 | while true; do 8 | STATUS="$(kubectl -n "${NAMESPACE}" get pod -l job-name="${JOB_NAME}" -o jsonpath='{.items[*].status.phase}')" 9 | if [ "${STATUS}" == "Pending" ]; then 10 | break 11 | fi 12 | sleep 1 13 | done 14 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "workbench.colorCustomizations": { 3 | "activityBar.background": "#103502", 4 | "titleBar.activeBackground": "#164A02", 5 | "titleBar.activeForeground": "#F2FEED" 6 | } 7 | } -------------------------------------------------------------------------------- /.yamllint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ignore: | 3 | *.sops.* 4 | extends: default 5 | rules: 6 | truthy: 7 | allowed-values: ["true", "false", "on"] 8 | comments: 9 | min-spaces-from-content: 1 10 | line-length: disable 11 | braces: 12 | min-spaces-inside: 0 13 | max-spaces-inside: 1 14 | brackets: 15 | min-spaces-inside: 0 16 | max-spaces-inside: 0 17 | indentation: enable 18 | -------------------------------------------------------------------------------- /Taskfile.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: '3' 3 | 4 | vars: 5 | PROJECT_DIR: '{{.ROOT_DIR}}' 6 | KUBERNETES_DIR: "{{.ROOT_DIR}}/kubernetes" 7 | ANSIBLE_DIR: "{{.ROOT_DIR}}/ansible" 8 | TERRAFORM_DIR: "{{.ROOT_DIR}}/terraform" 9 | 10 | dotenv: [".config.env"] 11 | 12 | env: 13 | KUBECONFIG: "{{.ROOT_DIR}}/kubeconfig" 14 | 15 | includes: 16 | ro: .taskfiles/Rook/Tasks.yaml 17 | ansible: .taskfiles/Ansible/Tasks.yaml 18 | k8s: .taskfiles/Kubernetes/Tasks.yaml 19 | terraform: .taskfiles/Terraform/Tasks.yaml 20 | precommit: .taskfiles/Pre-Commit/Tasks.yaml 21 | volsync: .taskfiles/Volsync/Tasks.yaml 22 | 23 | tasks: 24 | init: 25 | desc: Initialize workstation dependencies with Brew 26 | cmds: 27 | - brew install {{.DEPS}} {{.CLI_ARGS}} 28 | - task: install-gcloud 29 | preconditions: 30 | - sh: command -v brew 31 | msg: | 32 | Homebrew is not installed. Using MacOS, Linux or WSL? 33 | Head over to https://brew.sh to get up and running. 34 | vars: 35 | DEPS: >- 36 | ansible 37 | direnv 38 | go-task/tap/go-task 39 | helm 40 | ipcalc 41 | jq 42 | kubernetes-cli 43 | kustomize 44 | kyverno 45 | pre-commit 46 | prettier 47 | stern 48 | terraform 49 | tflint 50 | yamllint 51 | yq 52 | 53 | install-gcloud: 54 | desc: Install gcloud CLI (On Ubuntu) 55 | cmds: 56 | - sudo apt-get install apt-transport-https ca-certificates gnupg 57 | - echo "deb https://packages.cloud.google.com/apt cloud-sdk main" | sudo tee -a /etc/apt/sources.list.d/google-cloud-sdk.list 58 | - curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - 59 | - sudo apt-get update && sudo apt-get install google-cloud-cli 60 | status: 61 | - test $(gcloud) 62 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | # General settings 3 | nocows = True 4 | executable = /bin/bash 5 | stdout_callback = yaml 6 | force_valid_group_names = ignore 7 | # File/Directory settings 8 | log_path = ~/.ansible/ansible.log 9 | inventory = ./ansible/inventory 10 | roles_path = ~/.ansible/roles:./ansible/roles 11 | collections_path = ~/.ansible/collections 12 | remote_tmp = /tmp 13 | local_tmp = ~/.ansible/tmp 14 | # Fact Caching settings 15 | fact_caching = jsonfile 16 | fact_caching_connection = ~/.ansible/facts_cache 17 | # SSH settings 18 | remote_port = 22 19 | timeout = 60 20 | host_key_checking = False 21 | # Plugin settings 22 | vars_plugins_enabled = host_group_vars,community.sops.sops 23 | 24 | [inventory] 25 | unparsed_is_failed = true 26 | 27 | [privilege_escalation] 28 | become = True 29 | 30 | [ssh_connection] 31 | scp_if_ssh = smart 32 | retries = 3 33 | ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o Compression=yes -o ServerAliveInterval=15s 34 | pipelining = True 35 | control_path = %(directory)s/%%h-%%r 36 | -------------------------------------------------------------------------------- /ansible/inventory/examples/all.yaml: -------------------------------------------------------------------------------- 1 | kubevip_address: 0.0.0.0 2 | dns: 1.1.1.1 3 | -------------------------------------------------------------------------------- /ansible/inventory/examples/hosts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kubernetes: 3 | children: 4 | master: 5 | hosts: 6 | k8s-0: 7 | ansible_host: 192.168.42.10 8 | k8s-1: 9 | ansible_host: 192.168.42.11 10 | k8s-2: 11 | ansible_host: 192.168.42.12 12 | worker: 13 | hosts: 14 | k8s-3: 15 | ansible_host: 192.168.42.13 16 | k8s-4: 17 | ansible_host: 192.168.42.14 18 | k8s-5: 19 | ansible_host: 192.168.42.15 20 | disks: 21 | - /dev/disk/by-id/tests 22 | vars: 23 | ansible_user: tests 24 | ansible_ssh_common_args: "-o StrictHostKeyChecking=no" 25 | -------------------------------------------------------------------------------- /ansible/inventory/group_vars/kubernetes/k3s.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Below vars are for the xanmanning.k3s role 4 | # ...see https://github.com/PyratLabs/ansible-role-k3s 5 | # 6 | 7 | # (string) Use a specific version of k3s 8 | # renovate: datasource=github-releases depName=k3s-io/k3s 9 | k3s_release_version: "v1.29.6+k3s2" 10 | 11 | # (bool) Install using hard links rather than symbolic links. 12 | k3s_install_hard_links: true 13 | 14 | # (bool) Escalate user privileges for all tasks 15 | k3s_become: true 16 | 17 | # (bool) Enable debug logging on the k3s service 18 | k3s_debug: false 19 | 20 | # (bool) Enable etcd embedded datastore 21 | k3s_etcd_datastore: true 22 | 23 | # (bool) Allow the use of unsupported configurations in k3s 24 | k3s_use_unsupported_config: true 25 | 26 | k3s_use_experimental: true 27 | 28 | # (string) Control Plane registration address 29 | k3s_registration_address: "{{ kubevip_address }}" 30 | 31 | # (list) A list of URLs to deploy on the primary control plane. Read notes below. 32 | k3s_server_manifests_urls: 33 | # Kube-vip 34 | - url: https://raw.githubusercontent.com/kube-vip/kube-vip/main/docs/manifests/rbac.yaml 35 | filename: kube-vip-rbac.yaml 36 | # Tigera Operator 37 | - url: https://raw.githubusercontent.com/projectcalico/calico/v3.25.0/manifests/tigera-operator.yaml 38 | filename: calico-tigera-operator.yaml 39 | # ArgoCD 40 | - url: https://raw.githubusercontent.com/Tyler-Cash/homelab/master/kubernetes/manifests/argocd/orchestrator/manifest-apps.yaml 41 | filename: argocd-app-of-apps.yaml 42 | # Prometheus Operator 43 | - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml 44 | filename: prometheus-alertmanagerconfigs.yaml 45 | - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml 46 | filename: prometheus-alertmanagers.yaml 47 | - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml 48 | filename: prometheus-podmonitors.yaml 49 | - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml 50 | filename: prometheus-probes.yaml 51 | - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml 52 | filename: prometheus-prometheuses.yaml 53 | - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml 54 | filename: prometheus-prometheusrules.yaml 55 | - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml 56 | filename: prometheus-servicemonitors.yaml 57 | - url: https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.61.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml 58 | filename: prometheus-thanosrulers.yaml 59 | 60 | # (list) A flat list of templates to deploy on the primary control plane 61 | # /var/lib/rancher/k3s/server/manifests 62 | k3s_server_manifests_templates: 63 | - calico-installation.yml.j2 64 | - kube-vip-daemonset.yml.j2 65 | 66 | 67 | k3s_registries: 68 | mirrors: 69 | "*": 70 | -------------------------------------------------------------------------------- /ansible/inventory/group_vars/kubernetes/os.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # (string) Timezone for the servers 3 | timezone: "Australia/Sydney" 4 | 5 | # (list) Additional ssh public keys to add to the nodes 6 | # ssh_authorized_keys: 7 | 8 | fedora: 9 | packages: 10 | - dnf-plugin-system-upgrade 11 | - dnf-utils 12 | - hdparm 13 | - htop 14 | - ipvsadm 15 | - lm_sensors 16 | - nano 17 | - nvme-cli 18 | - socat 19 | - python3-kubernetes 20 | - python3-libselinux 21 | - python3-pyyaml 22 | 23 | ubuntu: 24 | packages: 25 | - hdparm 26 | - htop 27 | - ipvsadm 28 | - lm-sensors 29 | - nano 30 | - nfs-common 31 | - nvme-cli 32 | - socat 33 | - python3-kubernetes 34 | - python3-yaml 35 | -------------------------------------------------------------------------------- /ansible/inventory/group_vars/master/k3s.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://rancher.com/docs/k3s/latest/en/installation/install-options/server-config/ 3 | # https://github.com/PyratLabs/ansible-role-k3s 4 | 5 | # (bool) Specify if a host (or host group) are part of the control plane 6 | k3s_control_node: true 7 | 8 | # (dict) k3s settings for all control-plane nodes 9 | k3s_server: 10 | node-ip: "{{ ansible_host }}" 11 | tls-san: 12 | - "{{ kubevip_address }}" 13 | # Disable Docker - this will use the default containerd CRI 14 | docker: false 15 | flannel-backend: "none" # This needs to be in quotes 16 | disable: 17 | # Disable flannel - replaced with Calico 18 | - flannel 19 | # Disable local-path-provisioner - installed with Flux 20 | - local-storage 21 | # Disable metrics-server - installed with Flux 22 | - metrics-server 23 | # Disable servicelb - replaced with metallb and installed with Flux 24 | - servicelb 25 | # Disable traefik - replaced with ingress-nginx and installed with Flux 26 | - traefik 27 | disable-network-policy: true 28 | disable-cloud-controller: true 29 | write-kubeconfig-mode: "644" 30 | # Network CIDR to use for pod IPs 31 | cluster-cidr: "10.42.0.0/16" 32 | # Network CIDR to use for service IPs 33 | service-cidr: "10.43.0.0/16" 34 | embedded-registry: true 35 | kubelet-arg: 36 | # Don't pull /etc/resolv.conf from host 37 | - "resolv-conf=/etc/resolv.conf" 38 | - "config=/etc/rancher/k3s/kubelet.config" 39 | - "v=0" 40 | kube-controller-manager-arg: 41 | # Required to monitor kube-controller-manager with kube-prometheus-stack 42 | - "bind-address=0.0.0.0" 43 | - "terminated-pod-gc-threshold=10" 44 | kube-proxy-arg: 45 | # Required to monitor kube-proxy with kube-prometheus-stack 46 | - "metrics-bind-address=0.0.0.0" 47 | kube-scheduler-arg: 48 | # Required to monitor kube-scheduler with kube-prometheus-stack 49 | - "bind-address=0.0.0.0" 50 | # Required to monitor etcd with kube-prometheus-stack 51 | etcd-expose-metrics: true 52 | kube-apiserver-arg: 53 | # Required for HAProxy health-checks 54 | - "anonymous-auth=true" 55 | etcd-arg: [ 'heartbeat-interval=500', 'election-timeout=5000', 'snapshot-count=5000', 'log-level=warn', 'auto-compaction-retention=24h', 'auto-compaction-mode=periodic' ] 56 | -------------------------------------------------------------------------------- /ansible/inventory/group_vars/worker/k3s.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config/ 3 | # https://github.com/PyratLabs/ansible-role-k3s 4 | 5 | # (bool) Specify if a host (or host group) are part of the control plane 6 | k3s_control_node: false 7 | 8 | # (dict) k3s settings for all worker nodes 9 | k3s_agent: 10 | node-ip: "{{ ansible_host }}" 11 | embedded-registry: true 12 | kubelet-arg: 13 | # Don't pull /etc/resolv.conf from host 14 | - "resolv-conf=/etc/resolv.conf" 15 | - "eviction-hard=memory.available<500Mi" 16 | -------------------------------------------------------------------------------- /ansible/inventory/host_vars/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tyler-Cash/homelab/1f00eb07f19e805bb9047efc2d0ad2e9053e08ac/ansible/inventory/host_vars/.gitkeep -------------------------------------------------------------------------------- /ansible/playbooks/cluster-nuke.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - master 4 | - worker 5 | become: true 6 | gather_facts: true 7 | any_errors_fatal: true 8 | vars_prompt: 9 | - name: nuke 10 | prompt: |- 11 | Are you sure you want to nuke this cluster? 12 | Type YES I WANT TO DESTROY THIS CLUSTER to proceed 13 | default: "n" 14 | private: false 15 | pre_tasks: 16 | - name: Check for confirmation 17 | ansible.builtin.fail: 18 | msg: Aborted nuking the cluster 19 | when: nuke != 'YES I WANT TO DESTROY THIS CLUSTER' 20 | 21 | - name: Pausing for 5 seconds... 22 | ansible.builtin.pause: 23 | seconds: 5 24 | tasks: 25 | - name: Uninstall k3s 26 | ansible.builtin.include_role: 27 | name: xanmanning.k3s 28 | public: true 29 | vars: 30 | k3s_state: uninstalled 31 | - name: Gather list of CNI files 32 | ansible.builtin.find: 33 | paths: /etc/cni/net.d 34 | patterns: "*" 35 | hidden: true 36 | register: directory_contents 37 | - name: Delete CNI files 38 | ansible.builtin.file: 39 | path: "{{ item.path }}" 40 | state: absent 41 | loop: "{{ directory_contents.files }}" 42 | - name: Delete rook directory 43 | file: 44 | state: absent 45 | path: /var/lib/rook 46 | - name: wipe disks as needed 47 | ansible.builtin.shell: | 48 | wipefs -a {{ item }} 49 | dd if=/dev/zero of={{ item }} bs=512 count=1 conv=notrunc 50 | loop: "{{ disks }}" 51 | register: wipe 52 | changed_when: wipe.stdout | length > 0 -------------------------------------------------------------------------------- /ansible/playbooks/cluster-reboot.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - master 4 | - worker 5 | become: true 6 | gather_facts: true 7 | any_errors_fatal: true 8 | pre_tasks: 9 | - name: Pausing for 5 seconds... 10 | ansible.builtin.pause: 11 | seconds: 5 12 | tasks: 13 | - name: Reboot 14 | ansible.builtin.reboot: 15 | msg: Rebooting nodes 16 | reboot_timeout: 3600 17 | -------------------------------------------------------------------------------- /ansible/playbooks/cluster-sudoers.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - master 4 | - worker 5 | become: true 6 | gather_facts: true 7 | any_errors_fatal: true 8 | pre_tasks: 9 | - name: Pausing for 5 seconds... 10 | ansible.builtin.pause: 11 | seconds: 5 12 | tasks: 13 | - name: Make sure we have a 'wheel' group 14 | group: 15 | name: wheel 16 | state: present 17 | 18 | - name: Allow 'wheel' group to have passwordless sudo 19 | lineinfile: 20 | dest: /etc/sudoers 21 | state: present 22 | regexp: '^%wheel' 23 | line: '%wheel ALL=(ALL:ALL) NOPASSWD:ALL' 24 | validate: 'visudo -cf %s' 25 | 26 | - name: Add sudoers users to wheel group 27 | user: 28 | name={{ ansible_user }} 29 | groups=wheel 30 | append=yes 31 | state=present 32 | createhome=yes 33 | 34 | - name: Creating a file with content 35 | copy: 36 | dest: "/etc/sudoers.d/{{ ansible_user }}" 37 | content: "{{ ansible_user }} ALL=(ALL) NOPASSWD:ALL" -------------------------------------------------------------------------------- /ansible/playbooks/configs/kubelet.config: -------------------------------------------------------------------------------- 1 | apiVersion: kubelet.config.k8s.io/v1beta1 2 | kind: KubeletConfiguration 3 | shutdownGracePeriod: 60s 4 | shutdownGracePeriodCriticalPods: 60s 5 | -------------------------------------------------------------------------------- /ansible/playbooks/templates/calico-installation.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: operator.tigera.io/v1 3 | kind: Installation 4 | metadata: 5 | name: default 6 | namespace: networking 7 | spec: 8 | registry: quay.io 9 | imagePath: calico 10 | calicoNetwork: 11 | # https://projectcalico.docs.tigera.io/networking/ip-autodetection 12 | nodeAddressAutodetectionV4: 13 | cidrs: 14 | - "{{ (ansible_default_ipv4.network + '/' + ansible_default_ipv4.netmask) | ansible.utils.ipaddr('network/prefix') }}" 15 | # Note: The ipPools section cannot be modified post-install. 16 | ipPools: 17 | - blockSize: 26 18 | cidr: "{{ k3s_server['cluster-cidr'] }}" 19 | encapsulation: "VXLANCrossSubnet" 20 | natOutgoing: Enabled 21 | nodeSelector: all() 22 | nodeMetricsPort: 9091 23 | typhaMetricsPort: 9093 24 | template: 25 | spec: 26 | containers: 27 | resources: 28 | requests: 29 | cpu: 4m 30 | -------------------------------------------------------------------------------- /ansible/playbooks/templates/kube-vip-daemonset.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: DaemonSet 4 | metadata: 5 | name: kube-vip 6 | namespace: kube-system 7 | labels: 8 | app.kubernetes.io/instance: kube-vip 9 | app.kubernetes.io/name: kube-vip 10 | spec: 11 | selector: 12 | matchLabels: 13 | app.kubernetes.io/instance: kube-vip 14 | app.kubernetes.io/name: kube-vip 15 | template: 16 | metadata: 17 | labels: 18 | app.kubernetes.io/instance: kube-vip 19 | app.kubernetes.io/name: kube-vip 20 | spec: 21 | containers: 22 | - name: kube-vip 23 | image: ghcr.io/kube-vip/kube-vip:v0.5.9 24 | imagePullPolicy: IfNotPresent 25 | args: ["manager"] 26 | env: 27 | - name: vip_arp 28 | value: "true" 29 | - name: port 30 | value: "6443" 31 | - name: vip_cidr 32 | value: "32" 33 | - name: cp_enable 34 | value: "true" 35 | - name: cp_namespace 36 | value: kube-system 37 | - name: svc_enable 38 | value: "false" 39 | - name: vip_leaderelection 40 | value: "true" 41 | - name: vip_leaseduration 42 | value: "15" 43 | - name: vip_renewdeadline 44 | value: "10" 45 | - name: vip_retryperiod 46 | value: "2" 47 | - name: address 48 | value: "{{ k3s_registration_address }}" 49 | securityContext: 50 | capabilities: 51 | add: ["NET_ADMIN", "NET_RAW"] 52 | hostAliases: 53 | - hostnames: 54 | - kubernetes 55 | ip: 127.0.0.1 56 | hostNetwork: true 57 | serviceAccountName: kube-vip 58 | affinity: 59 | nodeAffinity: 60 | requiredDuringSchedulingIgnoredDuringExecution: 61 | nodeSelectorTerms: 62 | - matchExpressions: 63 | - key: node-role.kubernetes.io/master 64 | operator: Exists 65 | - matchExpressions: 66 | - key: node-role.kubernetes.io/control-plane 67 | operator: Exists 68 | tolerations: 69 | - effect: NoSchedule 70 | operator: Exists 71 | - effect: NoExecute 72 | operator: Exists 73 | -------------------------------------------------------------------------------- /ansible/requirements.txt: -------------------------------------------------------------------------------- 1 | openshift==0.13.2 2 | netaddr==1.3.0 3 | -------------------------------------------------------------------------------- /ansible/requirements.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | collections: 3 | - name: community.general 4 | version: 9.5.8 5 | - name: ansible.posix 6 | version: 1.6.2 7 | - name: ansible.utils 8 | version: 5.1.2 9 | - name: kubernetes.core 10 | version: 5.3.0 11 | roles: 12 | - name: xanmanning.k3s 13 | src: https://github.com/PyratLabs/ansible-role-k3s.git 14 | version: v3.4.4 15 | -------------------------------------------------------------------------------- /docs/dr-ceph.md: -------------------------------------------------------------------------------- 1 | ## Ceph disaster recovery 2 | Something has gone terribly wrong and Ceph needs to be restored from backup! 3 | 4 | #### Steps for restoring from s3 5 | 1. Update [.taskfiles/Volsync/ReplicationDestination.tmpl.yaml](.taskfiles/Volsync/ReplicationDestination.tmpl.yaml) with correct `restoreOf` time 6 | 2. Run below command and wait for all volumes to restore from backup 7 | ```shell 8 | k get replicationsources --all-namespaces --no-headers | awk '{print $2, $1}' | xargs --max-procs=20 -l bash -c 'task volsync:restore rsrc=$0 namespace=$1' 9 | ``` 10 | 3. Within 10m-30m likely most services will recover 11 | 12 | -------------------------------------------------------------------------------- /docs/dr-cloudnative-pg.md: -------------------------------------------------------------------------------- 1 | ## Cloudnative configuration 2 | Something has gone terribly wrong and Cloudnative PG needs to be restored from backup! 3 | 4 | #### Steps for restoring from s3 5 | 1. Remove spec.backup.barmanObjectStore field 6 | 2. Apply manifest 7 | 3. Reconfigure spec.backup.barmanObjectStore 8 | 4. Validate backups are working again 9 | -------------------------------------------------------------------------------- /docs/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tyler-Cash/homelab/1f00eb07f19e805bb9047efc2d0ad2e9053e08ac/docs/logo.png -------------------------------------------------------------------------------- /docs/setup-ceph-dashboard.md: -------------------------------------------------------------------------------- 1 | ## Ceph dashboard configuration 2 | The current config should setup the Ceph dashboard completely automatically. 3 | 4 | ### WARNING 5 | If a login loop occurs with SSO (Ie, bouncing over and over between Ceph and Authentik), this is caused by the user not being created in the dashboard. Refer to the below from the Ceph docs 6 | > The Ceph Dashboard supports external authentication of users via the SAML 2.0 protocol. You need to first create user accounts and associate them with desired roles, as authorization is performed by the Dashboard. However, the authentication process can be performed by an existing Identity Provider (IdP). 7 | -------------------------------------------------------------------------------- /docs/setup-jellyfin.md: -------------------------------------------------------------------------------- 1 | ## Jellyfin configuration 2 | I should probably investigate just putting this in an init container, but that would require modifying the internal DB state as plugins don't really have much of an interface outside of the API. For the meantime.... I'll just refer to this. 3 | 4 | Run Terraform onboarding to initialize Authentik as needed. 5 | Once that's onboarded, go to the UI -> Admin interface -> Applications -> Outposts and save the LDAP outpost (Why is this needed???) 6 | Generate a very long pass and update the Jellyfin user with that pass 7 | 8 | #### Configure LDAP server 9 | LDAP server: ak-outpost-jellyfin-outpost.security.svc.cluster.local 10 | LDAP Port: 636 11 | 12 | Secure LDAP: tick 13 | StartTLS: 14 | Skip SSL/TLS Verification: tick 15 | 16 | Allow Password Change: 17 | Password Reset Url: 18 | LDAP Bind User: cn=jellyfin,dc=ldap,dc=tylercash,dc=dev 19 | 20 | LDAP Base DN: dc=ldap,dc=tylercash,dc=dev 21 | LDAP User Filter: (|(memberOf=cn=jellyfin,ou=groups,dc=ldap,dc=tylercash,dc=dev)(memberOf=cn=jellyfin_admin,ou=groups,dc=ldap,dc=tylercash,dc=dev)(memberOf=cn=admin,ou=groups,dc=ldap,dc=tylercash,dc=dev)) 22 | LDAP Admin Base DN: 23 | LDAP Admin Filter: (|(memberOf=cn=jellyfin_admin,ou=groups,dc=ldap,dc=tylercash,dc=dev)(memberOf=cn=admin,ou=groups,dc=ldap,dc=tylercash,dc=dev)) 24 | 25 | Enable Case Insensitive Username: tick 26 | 27 | LDAP Name Attribute: mail -------------------------------------------------------------------------------- /docs/setup-unifi.md: -------------------------------------------------------------------------------- 1 | ## Unifi configuration 2 | Run below command to get the IP of the service 3 | ```sh 4 | homelab|git:(master*)⇒ k get svc -n networking unifi 5 | NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 6 | unifi LoadBalancer 10.43.70.17 10.0.90.124 8080:31833/TCP,10001:30487/UDP,8443:31675/TCP,${many_ports} 16m 7 | ``` 8 | 9 | Take the External IP and log into the [controller](unifi.k8s.tylercash.dev). 10 | 11 | Navigate to Settings -> System -> Override Inform Host and input the value (assuming above). Note: This IP is different to the IP the UI is served on. (For the most part) 12 | ```sh 13 | 10.0.90.124 14 | ``` 15 | 16 | Now that the controller is setup, time to onboard the APs. Access the APs like so 17 | ```sh 18 | homelab|git:(master*)⇒ ssh ubnt@${AP_IP_ADDRESS} 19 | ubnt@${AP_IP_ADDRESS}'s password: ubnt 20 | 21 | U6-Pro-BZ.6.0.15# set-inform http://10.0.90.124:8080/inform 22 | ``` 23 | 24 | Now that the APs are configured, go to the [controller](unifi.k8s.tylercash.dev) and adopt them, everything should spin up and be auto configured per backup. -------------------------------------------------------------------------------- /docs/startup.md: -------------------------------------------------------------------------------- 1 | # Startup guide for starting the cluster 2 | 3 | Follow onedr0p's guide on installing k3s. Just putting it here as this project will be mostly branching off. So reorgs may not be relevant here. 4 | 5 | Note, although the steps are listed in sequence, on the ansible:install, it's possible to also run terraform:apply. 6 | 7 | ### ⚡ Preparing Fedora or Ubuntu Server with Ansible 8 | 9 | 📍 Here we will be running a Ansible Playbook to prepare Fedora or Ubuntu Server for running a Kubernetes cluster. 10 | 11 | 📍 Nodes are not security hardened by default, you can do this with [dev-sec/ansible-collection-hardening](https://github.com/dev-sec/ansible-collection-hardening) or similar if supported. This is an advanced configuration and generally not recommended unless you want to [DevSecOps](https://www.ibm.com/topics/devsecops) your cluster and nodes. 12 | 13 | 1. Install the Ansible deps 14 | 15 | ```sh 16 | task ansible:init 17 | ``` 18 | 19 | 2. Verify Ansible can view your config 20 | 21 | ```sh 22 | task ansible:list 23 | ``` 24 | 3. Setup nodes to be accessed with only the configured SSH keys 25 | ```sh 26 | task ansible:sudoers 27 | ``` 28 | 4. Verify Ansible can ping your nodes (Also validates root access) 29 | 30 | ```sh 31 | task ansible:ping 32 | ``` 33 | 34 | 5. Run the Fedora/Ubuntu Server Ansible prepare playbook 35 | 36 | ```sh 37 | task ansible:prepare 38 | ``` 39 | 40 | 6. Reboot the nodes (if not done in step 5) 41 | 42 | ```sh 43 | task ansible:force-reboot 44 | ``` 45 | 46 | ### ⛵ Installing k3s with Ansible 47 | 48 | 📍 Here we will be running a Ansible Playbook to install [k3s](https://k3s.io/) with [this](https://galaxy.ansible.com/xanmanning/k3s) wonderful k3s Ansible galaxy role. After completion, Ansible will drop a `kubeconfig` in `./kubeconfig` for use with interacting with your cluster with `kubectl`. 49 | 50 | ☢️ If you run into problems, you can run `task ansible:nuke` to destroy the k3s cluster and start over. 51 | 52 | 1. Verify Ansible can view your config 53 | 54 | ```sh 55 | task ansible:list 56 | ``` 57 | 58 | 2. Verify Ansible can ping your nodes 59 | 60 | ```sh 61 | task ansible:ping 62 | ``` 63 | 64 | 3. Install k3s with Ansible 65 | 66 | ```sh 67 | task ansible:install 68 | ``` 69 | 4. Move the kubeconfig 70 | ```sh 71 | ``` 72 | 5. Verify the nodes are online 73 | 74 | ```sh 75 | kubectl get nodes 76 | # NAME STATUS ROLES AGE VERSION 77 | # k8s-0 Ready control-plane,master 4d20h v1.21.5+k3s1 78 | # k8s-1 Ready worker 4d20h v1.21.5+k3s1 79 | ``` 80 | 81 | ### Setup cloud components 82 | 83 | 📍 The cluster requires some cloud based components to be created. For secret encryption GCP is used so we can put secrets in and decrypt them based on a single cred that's regeneratable. Cloudflare is also used for DNS components, we'll also setup this information here. Necessary configs need to be put into the Ansible configs to allow this to be auto configured. 84 | 85 | 1. Login to gcloud 86 | ```sh 87 | gcloud auth application-default login 88 | ``` 89 | 90 | 2. Plan terraform changes 91 | ```sh 92 | task terraform:plan 93 | ``` 94 | 95 | 3. Onboard all cloud infrastructure 96 | ```sh 97 | task terraform:apply -- -auto-approve -target=module.dns 98 | ``` 99 | 100 | ### Start syncing Argo components 101 | 102 | 📍 Once the cluster is up, Argo will start syncing through everything gradually. To speed things up, we can sync up the 'core' services, which will allow everything to sync through successfully. These were the ones I manually synced to speed things along. 103 | 104 | ```sh 105 | argocd app sync argocd 106 | argocd app sync metallb 107 | argocd app sync metallb-manifests 108 | argocd app sync rook-operator 109 | argocd app sync rook-ceph 110 | argocd app sync cert-manager 111 | argocd app sync kubed 112 | task terraform:apply -- -auto-approve -target=module.secrets_storage 113 | ``` 114 | 115 | Wait for Storage to come up, Authentik (Aka all UIs) depend on the Rook cluster coming up 116 | ```shell 117 | kubectl get -n storage pod -w 118 | ``` 119 | -------------------------------------------------------------------------------- /kubernetes/helm/argocd/argocd/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: argo-cd 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: argo-cd 8 | version: 8.0.14 9 | repository: https://argoproj.github.io/argo-helm -------------------------------------------------------------------------------- /kubernetes/helm/argocd/argocd/manifests/public-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: argocd-public-ingress 5 | annotations: 6 | nginx.ingress.kubernetes.io/use-regex: "true" 7 | external-dns.home.arpa/enabled: "true" 8 | nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" 9 | nginx.org/websocket-services: "argocd-server" 10 | spec: 11 | tls: 12 | - hosts: 13 | - argocd-public.k8s.tylercash.dev 14 | secretName: argocd-public-tylercash-dev 15 | rules: 16 | - host: argocd-public.k8s.tylercash.dev 17 | http: 18 | paths: 19 | - path: /api/webhook 20 | pathType: Prefix 21 | backend: 22 | service: 23 | name: argocd-server 24 | port: 25 | name: https 26 | -------------------------------------------------------------------------------- /kubernetes/helm/argocd/argocd/values.yaml: -------------------------------------------------------------------------------- 1 | argo-cd: 2 | global: 3 | domain: &host argocd.k8s.tylercash.dev 4 | addPrometheusAnnotations: true 5 | priorityClassName: system-cluster-critical 6 | 7 | redis-ha: 8 | enabled: true 9 | topologySpreadConstraints: 10 | enabled: true 11 | maxSkew: 1 12 | topologyKey: kubernetes.io/hostname 13 | pdb: 14 | enabled: true 15 | minAvailable: 1 16 | resources: 17 | requests: 18 | cpu: 100m 19 | memory: 300Mi 20 | 21 | certificate: 22 | enabled: true 23 | 24 | server: 25 | metrics: 26 | enabled: true 27 | replicas: 2 28 | pdb: 29 | enabled: true 30 | minAvailable: 1 31 | ingress: 32 | enabled: true 33 | annotations: 34 | nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" 35 | nginx.org/websocket-services: "argocd-server" 36 | extraTls: 37 | - hosts: 38 | - *host 39 | secretName: argocd-tls 40 | 41 | configs: 42 | params: 43 | applicationsetcontroller.policy: "create-only" 44 | controller.diff.server.side: "true" 45 | cm: 46 | admin.enabled: "false" 47 | url: https://argocd.k8s.tylercash.dev 48 | exec.enabled: "true" 49 | resource: 50 | customizations: 51 | ignoreDifferences: 52 | all: | 53 | jqPathExpressions: 54 | - .spec.template.spec.labels[] | select(.name == "*kyverno.io/*") 55 | - .spec.template.spec.annotations[] | select(.name == "*kyverno.io/*") 56 | - .metadata.labels[] | select(.name == "policies.kyverno.io/*") 57 | - .metadata.annotations[] | select(.name == "policies.kyverno.io/*") 58 | admissionregistration.k8s.io_MutatingWebhookConfiguration: | 59 | jqPathExpressions: 60 | - '.webhooks[]?.clientConfig.caBundle' 61 | ClusterPolicy.kyverno.io: | 62 | jqPathExpressions: 63 | - .spec.rules[] | select(.name|test("autogen-.")) 64 | Deployment.apps: &depoymentExpressions | 65 | jqPathExpressions: 66 | - .spec.template.spec.containers[].volumeMounts | select(.name == "timezone") 67 | - .spec.template.volumes[] | select(.name == "timezone") 68 | 69 | exclusions: | 70 | - apiGroups: 71 | - kyverno.io 72 | kinds: 73 | - AdmissionReport 74 | - BackgroundScanReport 75 | - ClusterAdmissionReport 76 | - ClusterBackgroundScanReport 77 | clusters: 78 | - '*' 79 | compareoptions: | 80 | ignoreAggregatedRoles: true 81 | ignoreResourceStatusField: crd 82 | oidc.config: | 83 | name: Authentik 84 | issuer: "https://authentik.k8s.tylercash.dev/application/o/argocd/" 85 | clientID: $argocd-oidc-secrets:oidc.authentik.client_id 86 | clientSecret: $argocd-oidc-secrets:oidc.authentik.client_secret 87 | requestedScopes: 88 | - openid 89 | - profile 90 | - email 91 | oidc.tls.insecure.skip.verify: true 92 | 93 | rbac: 94 | policy.csv: | 95 | g, argo-admin, role:admin 96 | 97 | controller: 98 | metrics: 99 | enabled: true 100 | replicas: 2 101 | pdb: 102 | enabled: true 103 | minAvailable: 1 104 | 105 | repoServer: 106 | metrics: 107 | enabled: true 108 | replicas: 2 109 | 110 | applicationSet: 111 | replicaCount: 2 112 | 113 | dex: 114 | metrics: 115 | enabled: true 116 | enabled: true 117 | pdb: 118 | enabled: true 119 | minAvailable: 1 120 | -------------------------------------------------------------------------------- /kubernetes/helm/home-automation/hassio/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: hassio 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: app-template 8 | version: 1.5.1 9 | repository: https://bjw-s-labs.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /kubernetes/helm/home-automation/hassio/values.yaml: -------------------------------------------------------------------------------- 1 | app-template: 2 | image: 3 | repository: ghcr.io/home-operations/home-assistant 4 | tag: 2025.5.3@sha256:a480637f5064050f27e053a756ef2083b4346656e7c15713b574cfb1a9bbf3af 5 | 6 | env: 7 | TZ: &timezone Australia/Sydney 8 | 9 | service: 10 | main: 11 | type: LoadBalancer 12 | ports: 13 | http: 14 | port: 8123 15 | 16 | ingress: 17 | main: 18 | enabled: true 19 | annotations: 20 | nginx.org/websocket-services: hassio 21 | external-dns.home.arpa/enabled: "true" 22 | hosts: 23 | - host: &host "hassio.k8s.tylercash.dev" 24 | paths: 25 | - path: / 26 | pathType: Prefix 27 | tls: 28 | - secretName: hassio-letsencrypt-certificate 29 | hosts: 30 | - *host 31 | addons: 32 | codeserver: 33 | enabled: true 34 | image: 35 | repository: ghcr.io/coder/code-server 36 | tag: 4.100.3 37 | env: 38 | TZ: *timezone 39 | workingDir: "/config" 40 | args: 41 | - --auth 42 | - "none" 43 | - --user-data-dir 44 | - "/config/.vscode" 45 | - --extensions-dir 46 | - "/config/.vscode" 47 | ingress: 48 | enabled: true 49 | annotations: 50 | kubernetes.io/ingress.class: nginx 51 | nginx.ingress.kubernetes.io/proxy-read-timeout: "360000" 52 | nginx.ingress.kubernetes.io/proxy-send-timeout: "360000" 53 | nginx.org/websocket-services: hassio-codeserver 54 | nginx.ingress.kubernetes.io/whitelist-source-range: "10.0.0.0/8" 55 | hosts: 56 | - host: &vs-host "hassio-code.k8s.tylercash.dev" 57 | paths: 58 | - path: / 59 | pathType: Prefix 60 | tls: 61 | - secretName: hassio-code-letsencrypt-certificate 62 | hosts: 63 | - *vs-host 64 | volumeMounts: 65 | - name: config 66 | mountPath: /config 67 | 68 | probes: 69 | liveness: 70 | enabled: false 71 | readiness: 72 | enabled: false 73 | startup: 74 | enabled: false 75 | 76 | podSecurityContext: 77 | runAsUser: 568 78 | runAsGroup: 568 79 | fsGroup: 568 80 | fsGroupChangePolicy: "OnRootMismatch" 81 | supplementalGroups: 82 | - 100 83 | 84 | persistence: 85 | config: 86 | enabled: true 87 | storageClass: ceph-block 88 | accessMode: ReadWriteOnce 89 | size: 10Gi 90 | 91 | resources: 92 | requests: 93 | cpu: 10m 94 | memory: 500Mi 95 | limits: 96 | memory: 750Mi 97 | -------------------------------------------------------------------------------- /kubernetes/helm/kyverno/kyverno/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: kyverno 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: kyverno 8 | version: 3.4.2 9 | repository: https://kyverno.github.io/kyverno/ -------------------------------------------------------------------------------- /kubernetes/helm/kyverno/kyverno/manifests/add-online-snapshot-label-to-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: add-online-snapshot-labels 5 | annotations: 6 | policies.kyverno.io/title: Add online snapshot label 7 | policies.kyverno.io/severity: medium 8 | policies.kyverno.io/subject: Label 9 | policies.kyverno.io/description: >- 10 | Automatically enable online snapshots unless volume has been excluded 11 | spec: 12 | rules: 13 | - name: whitelist 14 | match: 15 | any: 16 | - resources: 17 | kinds: ["PersistentVolumeClaim"] 18 | exclude: 19 | any: 20 | - resources: 21 | selector: 22 | matchLabels: 23 | snapshot.home.arpa/enabled: "false" 24 | - resources: 25 | selector: 26 | matchLabels: 27 | online-snapshot.home.arpa/enabled: "false" 28 | - resources: 29 | selector: 30 | matchLabels: 31 | app.kubernetes.io/created-by: "volsync" 32 | - resources: 33 | selector: 34 | matchLabels: 35 | release: "loki" 36 | - resources: 37 | selector: 38 | matchLabels: 39 | app.kubernetes.io/name: loki 40 | mutate: 41 | patchStrategicMerge: 42 | metadata: 43 | labels: 44 | +(online-snapshot.home.arpa/enabled): "true" 45 | -------------------------------------------------------------------------------- /kubernetes/helm/kyverno/kyverno/manifests/add-snapshot-label-to-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: add-local-snapshot-labels 5 | annotations: 6 | policies.kyverno.io/title: Add local snapshot label 7 | policies.kyverno.io/category: Sample 8 | policies.kyverno.io/minversion: 1.6.0 9 | policies.kyverno.io/severity: medium 10 | policies.kyverno.io/subject: Label 11 | policies.kyverno.io/description: >- 12 | Automatically enable local snapshots unless volume has been excluded 13 | spec: 14 | rules: 15 | - name: whitelist 16 | match: 17 | any: 18 | - resources: 19 | kinds: ["PersistentVolumeClaim"] 20 | exclude: 21 | any: 22 | - resources: 23 | selector: 24 | matchLabels: 25 | snapshot.home.arpa/enabled: "false" 26 | - resources: 27 | selector: 28 | matchLabels: 29 | app.kubernetes.io/created-by: "volsync" 30 | - resources: 31 | selector: 32 | matchLabels: 33 | release: "loki" 34 | - resources: 35 | selector: 36 | matchLabels: 37 | app.kubernetes.io/name: loki 38 | mutate: 39 | patchStrategicMerge: 40 | metadata: 41 | labels: 42 | +(snapshot.home.arpa/enabled): "true" 43 | -------------------------------------------------------------------------------- /kubernetes/helm/kyverno/kyverno/manifests/create-replication-for-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: create-replication-for-pvc 5 | spec: 6 | rules: 7 | - name: create-replication-for-pvc 8 | context: 9 | - name: storageClassName 10 | apiCall: 11 | method: GET 12 | urlPath: "/api/v1/namespaces/{{request.object.metadata.namespace}}/persistentvolumeclaims/{{request.object.metadata.name}}" 13 | jmesPath: "spec.storageClassName" 14 | match: &match 15 | any: 16 | - resources: 17 | kinds: 18 | - PersistentVolumeClaim 19 | selector: 20 | matchLabels: 21 | online-snapshot.home.arpa/enabled: "true" 22 | exclude: &exclude 23 | any: 24 | - resources: 25 | kinds: 26 | - PersistentVolumeClaim 27 | selector: 28 | matchLabels: 29 | cnpg.io/pvcRole: "PG_DATA" 30 | - resources: 31 | kinds: 32 | - PersistentVolumeClaim 33 | selector: 34 | matchLabels: 35 | online-snapshot.home.arpa/enabled: "false" 36 | - resources: 37 | kinds: 38 | - PersistentVolumeClaim 39 | selector: 40 | matchLabels: 41 | app.kubernetes.io/name: prometheus 42 | - resources: 43 | kinds: 44 | - PersistentVolumeClaim 45 | selector: 46 | matchLabels: 47 | app.kubernetes.io/name: alertmanager 48 | - resources: 49 | kinds: 50 | - PersistentVolumeClaim 51 | selector: 52 | matchLabels: 53 | app.kubernetes.io/name: redis 54 | generate: 55 | synchronize: true 56 | generateExisting: true 57 | orphanDownstreamOnPolicyDelete: false 58 | namespace: &repl_namespace "{{request.object.metadata.namespace}}" 59 | name: &repl_name "{{request.object.metadata.name}}" 60 | apiVersion: &repl_api volsync.backube/v1alpha1 61 | kind: &repl_kind ReplicationSource 62 | data: 63 | apiVersion: *repl_api 64 | kind: *repl_kind 65 | metadata: 66 | name: *repl_name 67 | namespace: *repl_namespace 68 | spec: 69 | sourcePVC: "{{request.object.metadata.name}}" 70 | trigger: 71 | schedule: "0 4 * * *" 72 | restic: 73 | repository: &restic_name "restic-{{request.object.metadata.name}}" 74 | pruneIntervalDays: 30 75 | retain: 76 | daily: 1 77 | weekly: 1 78 | monthly: 1 79 | yearly: 1 80 | storageClassName: "{{storageClassName}}" 81 | copyMethod: Clone 82 | cacheCapacity: "{{request.object.spec.resources.requests.storage}}" 83 | moverSecurityContext: 84 | runAsUser: 0 85 | runAsGroup: 0 86 | fsGroup: 0 87 | - name: create-restic-config 88 | context: 89 | - name: restic 90 | configMap: 91 | name: restic-config 92 | namespace: kyverno 93 | match: *match 94 | exclude: *exclude 95 | generate: 96 | synchronize: true 97 | generateExisting: true 98 | orphanDownstreamOnPolicyDelete: false 99 | namespace: &restic_namespace "{{request.object.metadata.namespace}}" 100 | name: *restic_name 101 | apiVersion: &restic_api v1 102 | kind: &restic_kind Secret 103 | data: 104 | apiVersion: *restic_api 105 | kind: *restic_kind 106 | metadata: 107 | name: *restic_name 108 | namespace: *restic_namespace 109 | stringData: 110 | RESTIC_REPOSITORY: "{{restic.data.RESTIC_REPOSITORY}}:{{request.object.metadata.name}}" 111 | RESTIC_PASSWORD: "{{restic.data.RESTIC_PASSWORD}}" 112 | GOOGLE_PROJECT_ID: "{{restic.data.GOOGLE_PROJECT_ID}}" 113 | GOOGLE_APPLICATION_CREDENTIALS: "{{restic.data.GOOGLE_APPLICATION_CREDENTIALS}}" 114 | -------------------------------------------------------------------------------- /kubernetes/helm/kyverno/kyverno/manifests/generate-monthly-snapshots.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: generate-monthly-snapshots 5 | spec: 6 | rules: 7 | - name: generate-monthly-snapshots 8 | match: 9 | any: 10 | - resources: 11 | kinds: 12 | - Namespace 13 | generate: 14 | namespace: "{{request.object.metadata.name}}" 15 | synchronize: true 16 | orphanDownstreamOnPolicyDelete: false 17 | apiVersion: snapscheduler.backube/v1 18 | kind: SnapshotSchedule 19 | name: monthly-snapshot-schedule 20 | data: 21 | apiVersion: snapscheduler.backube/v1 22 | kind: SnapshotSchedule 23 | metadata: 24 | name: monthly-snapshot-schedule 25 | spec: 26 | disabled: false 27 | claimSelector: 28 | matchLabels: 29 | snapshot.home.arpa/enabled: "true" 30 | retention: 31 | maxCount: 2 32 | schedule: "@monthly" 33 | snapshotTemplate: 34 | snapshotClassName: ceph-block-snapshot 35 | -------------------------------------------------------------------------------- /kubernetes/helm/kyverno/kyverno/manifests/ingress-apply-internal-annotations.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: update-ingress-annotations 5 | annotations: 6 | policies.kyverno.io/title: Update ingress annotations 7 | policies.kyverno.io/subject: Ingress 8 | policies.kyverno.io/description: >- 9 | This policy creates auth annotations on ingresses, external-dns, 10 | and sets an explicit RFC1918 whitelist for any ingresses 11 | that are not not exposed to the internet. 12 | spec: 13 | rules: 14 | - name: apply-common-ingress-configs 15 | match: 16 | any: 17 | - resources: 18 | kinds: ["Ingress"] 19 | mutate: 20 | patchStrategicMerge: 21 | metadata: 22 | annotations: 23 | +(kubernetes.io/tls-acme): "true" 24 | +(cert-manager.io/cluster-issuer): "prod-issuer" 25 | +(kubernetes.io/ingress.class): "nginx" 26 | +(kubernetes.io/ingress-allow-http): "false" 27 | +(nginx.ingress.kubernetes.io/proxy-read-timeout): "36000" 28 | +(nginx.ingress.kubernetes.io/proxy-send-timeout): "36000" 29 | +(nginx.ingress.kubernetes.io/proxy-buffering): "on" 30 | +(nginx.ingress.kubernetes.io/proxy-buffers-number): "4" 31 | +(nginx.ingress.kubernetes.io/proxy-buffer-size): "512k" 32 | +(nginx.ingress.kubernetes.io/proxy-max-temp-file-size): "512m" 33 | 34 | - name: only-access-locally 35 | match: 36 | any: 37 | - resources: 38 | kinds: ["Ingress"] 39 | exclude: 40 | any: 41 | - resources: 42 | annotations: 43 | external-dns.home.arpa/enabled: "true" 44 | - resources: 45 | annotations: 46 | external-dns.home.arpa/proxied: "true" 47 | mutate: 48 | patchStrategicMerge: 49 | metadata: 50 | annotations: 51 | +(nginx.ingress.kubernetes.io/whitelist-source-range): "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16" 52 | 53 | - name: external-dns 54 | match: 55 | any: 56 | - resources: 57 | kinds: 58 | - Ingress 59 | - Service 60 | annotations: 61 | external-dns.home.arpa/enabled: "true" 62 | mutate: 63 | patchStrategicMerge: 64 | metadata: 65 | annotations: 66 | +(external-dns.alpha.kubernetes.io/target): "ingress.k8s.tylercash.dev" 67 | - name: external-dns-proxied 68 | match: 69 | any: 70 | - resources: 71 | kinds: ["Ingress"] 72 | annotations: 73 | external-dns.home.arpa/proxied: "true" 74 | mutate: 75 | patchStrategicMerge: 76 | metadata: 77 | annotations: 78 | +(external-dns.home.arpa/enabled): "true" 79 | +(external-dns.alpha.kubernetes.io/cloudflare-proxied): "true" 80 | -------------------------------------------------------------------------------- /kubernetes/helm/kyverno/kyverno/manifests/init-inject-tz.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: init-inject-tz 5 | annotations: 6 | policies.kyverno.io/title: Inject timezone 7 | policies.kyverno.io/category: Other 8 | policies.kyverno.io/subject: Pod 9 | policies.kyverno.io/description: >- 10 | This policy injects an initContainer to Pods and setup TZ 11 | environment variable so the container will have the correct 12 | time. 13 | spec: 14 | rules: 15 | - name: inject-timezone 16 | match: 17 | any: 18 | - resources: 19 | kinds: 20 | - Pod 21 | mutate: 22 | patchStrategicMerge: 23 | spec: 24 | initContainers: 25 | - name: provide-timezone 26 | image: quay.io/k8tz/k8tz:0.8.0@sha256:7bba4420c8decfad816cc77a180f121cfc1e0e51058dec7662837b4ba41812b7 27 | imagePullPolicy: IfNotPresent 28 | resources: 29 | requests: 30 | cpu: 10m 31 | memory: 100Mi 32 | volumeMounts: 33 | - mountPath: /mnt/zoneinfo 34 | name: timezone 35 | args: 36 | - bootstrap 37 | containers: 38 | - (name): "*" 39 | volumeMounts: 40 | - mountPath: /etc/localtime 41 | name: timezone 42 | readOnly: true 43 | subPath: &timezone Australia/Sydney 44 | - mountPath: /usr/share/zoneinfo 45 | name: timezone 46 | readOnly: true 47 | env: 48 | - name: TZ 49 | value: *timezone 50 | volumes: 51 | - name: timezone 52 | emptyDir: 53 | sizeLimit: 100Mi 54 | 55 | -------------------------------------------------------------------------------- /kubernetes/helm/kyverno/kyverno/manifests/namespace-apply-annotations.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: update-namespaces-annotations 5 | annotations: 6 | policies.kyverno.io/title: Update namespaces annotations 7 | policies.kyverno.io/subject: Namespace 8 | policies.kyverno.io/description: >- 9 | This policy creates common annotations on all namespaces 10 | spec: 11 | mutateExistingOnPolicyUpdate: true 12 | rules: 13 | - name: apply-common-namespace-configs 14 | match: 15 | any: 16 | - resources: 17 | kinds: ["Namespace"] 18 | mutate: 19 | targets: 20 | - apiVersion: v1 21 | kind: Namespace 22 | patchStrategicMerge: 23 | metadata: 24 | annotations: 25 | +(volsync.backube/privileged-movers): "true" 26 | -------------------------------------------------------------------------------- /kubernetes/helm/kyverno/kyverno/manifests/set-resources.yaml: -------------------------------------------------------------------------------- 1 | apiVersion : kyverno.io/v1 2 | kind: ClusterPolicy 3 | metadata: 4 | name: add-default-resources 5 | annotations: 6 | policies.kyverno.io/title: Add Default Resources 7 | policies.kyverno.io/category: Other 8 | policies.kyverno.io/severity: medium 9 | kyverno.io/kyverno-version: 1.6.0 10 | policies.kyverno.io/minversion: 1.6.0 11 | kyverno.io/kubernetes-version: "1.23" 12 | policies.kyverno.io/subject: Pod 13 | policies.kyverno.io/description: >- 14 | Pods which don't specify at least resource requests are assigned a QoS class 15 | of BestEffort which can hog resources for other Pods on Nodes. At a minimum, 16 | all Pods should specify resource requests in order to be labeled as the QoS 17 | class Burstable. This sample mutates any container in a Pod which doesn't 18 | specify memory or cpu requests to apply some sane defaults. 19 | spec: 20 | background: false 21 | rules: 22 | - name: add-default-requests 23 | match: 24 | any: 25 | - resources: 26 | kinds: 27 | - Pod 28 | preconditions: 29 | any: 30 | - key: "{{request.operation || 'BACKGROUND'}}" 31 | operator: AnyIn 32 | value: 33 | - CREATE 34 | - UPDATE 35 | mutate: 36 | patchStrategicMerge: 37 | spec: 38 | containers: 39 | - (name): "*" 40 | resources: 41 | requests: 42 | +(memory): "100Mi" 43 | +(cpu): "10m" 44 | -------------------------------------------------------------------------------- /kubernetes/helm/kyverno/kyverno/values.yaml: -------------------------------------------------------------------------------- 1 | kyverno: 2 | crds: 3 | install: true 4 | 5 | # Caused by https://github.com/kyverno/kyverno/issues/11561 6 | features: 7 | reporting: 8 | validate: false 9 | mutate: false 10 | mutateExisting: false 11 | imageVerify: false 12 | generate: false 13 | 14 | admissionController: 15 | container: 16 | resources: 17 | limits: 18 | memory: 4196Mi 19 | requests: 20 | cpu: 300m 21 | memory: 1024Mi 22 | 23 | replicas: 3 24 | 25 | updateStrategy: 26 | rollingUpdate: 27 | maxSurge: 1 28 | maxUnavailable: 1 29 | type: RollingUpdate 30 | 31 | podDisruptionBudget: 32 | enabled: true 33 | minAvailable: 2 34 | 35 | readinessProbe: 36 | failureThreshold: 20 37 | 38 | rbac: 39 | clusterRole: 40 | extraResources: &extraPermissions 41 | - apiGroups: [ "*" ] 42 | resources: [ "*" ] 43 | verbs: 44 | - "*" 45 | - apiGroups: [ "volsync.backube" ] 46 | resources: [ "ReplicationSources" ] 47 | verbs: 48 | - create 49 | - apiGroups: ["networking.k8s.io"] 50 | resources: ["Ingress"] 51 | verbs: ["*"] 52 | - apiGroups: [""] 53 | resources: ["Secret"] 54 | verbs: ["*"] 55 | 56 | serviceMonitor: 57 | enabled: true 58 | namespace: monitoring 59 | 60 | backgroundController: 61 | resources: 62 | limits: 63 | memory: 2048Mi 64 | requests: 65 | cpu: 100m 66 | memory: 512Mi 67 | rbac: 68 | clusterRole: 69 | extraResources: *extraPermissions 70 | 71 | replicas: 3 72 | 73 | updateStrategy: 74 | rollingUpdate: 75 | maxSurge: 1 76 | maxUnavailable: 1 77 | type: RollingUpdate 78 | 79 | podDisruptionBudget: 80 | enabled: true 81 | minAvailable: 2 82 | -------------------------------------------------------------------------------- /kubernetes/helm/media/bazarr/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: bazarr 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: app-template 8 | version: 1.5.1 9 | repository: https://bjw-s-labs.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /kubernetes/helm/media/bazarr/values.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | app-template: 3 | image: 4 | repository: ghcr.io/home-operations/bazarr 5 | tag: 1.5.2 6 | pullPolicy: IfNotPresent 7 | 8 | service: 9 | main: 10 | ports: 11 | http: 12 | port: 6767 13 | 14 | ingress: 15 | main: 16 | enabled: true 17 | hosts: 18 | - host: bazarr.k8s.tylercash.dev 19 | paths: 20 | - path: / 21 | pathType: Prefix 22 | service: 23 | port: 6767 24 | tls: 25 | - secretName: bazarr-letsencrypt-certificate 26 | hosts: 27 | - bazarr.k8s.tylercash.dev 28 | 29 | podSecurityContext: 30 | runAsUser: 568 31 | runAsGroup: 568 32 | fsGroup: 568 33 | fsGroupChangePolicy: "Always" 34 | 35 | persistence: 36 | config: 37 | enabled: true 38 | storageClass: ceph-block 39 | size: 10Gi 40 | labels: 41 | online-snapshot.home.arpa/enabled: "false" 42 | tv: 43 | enabled: true 44 | type: pvc 45 | existingClaim: jellyfin-tv 46 | movies: 47 | enabled: true 48 | type: pvc 49 | existingClaim: jellyfin-movies 50 | backups: 51 | enabled: true 52 | type: pvc 53 | existingClaim: sonarr-backups 54 | 55 | resources: 56 | requests: 57 | cpu: 100m 58 | memory: 256Mi 59 | 60 | initContainers: 61 | get-clean-subs-script: 62 | name: get-clean-subs-script 63 | image: curlimages/curl:8.13.0 64 | command: ['sh', '-c', "curl -o /config/sub-clean.sh https://gist.githubusercontent.com/Tyler-Cash/f77c2bda3e17a1d098543de216454812/raw/8071fc8d7fb9cb61ea4ffac7118c4a9614e6d764/sub-clean.sh && chmod 755 /config/sub-clean.sh"] 65 | volumeMounts: 66 | - name: config 67 | mountPath: "/config" 68 | -------------------------------------------------------------------------------- /kubernetes/helm/media/flaresolverr/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: flaresolverr 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: app-template 8 | version: 1.5.1 9 | repository: https://bjw-s-labs.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /kubernetes/helm/media/flaresolverr/values.yaml: -------------------------------------------------------------------------------- 1 | app-template: 2 | image: 3 | repository: ghcr.io/flaresolverr/flaresolverr 4 | tag: v3.3.24 5 | 6 | service: 7 | main: 8 | ports: 9 | http: 10 | port: 8191 11 | 12 | podSecurityContext: 13 | runAsUser: 1000 14 | 15 | probes: 16 | liveness: 17 | spec: 18 | initialDelaySeconds: 10 19 | periodSeconds: 10 20 | timeoutSeconds: 5 21 | failureThreshold: 6 22 | 23 | resources: 24 | requests: 25 | cpu: 10m 26 | memory: 500Mi 27 | -------------------------------------------------------------------------------- /kubernetes/helm/media/foundryvtt-ddb-proxy/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: foundryvtt 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: app-template 8 | version: 1.5.1 9 | repository: https://bjw-s-labs.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /kubernetes/helm/media/foundryvtt-ddb-proxy/values.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | app-template: 3 | image: 4 | repository: ghcr.io/mrprimate/ddb-proxy 5 | tag: 0.0.25 6 | pullPolicy: IfNotPresent 7 | 8 | service: 9 | main: 10 | ports: 11 | http: 12 | port: 3000 13 | 14 | ingress: 15 | main: 16 | enabled: true 17 | hosts: 18 | - host: &host ddb-proxy.k8s.tylercash.dev 19 | paths: 20 | - path: / 21 | pathType: Prefix 22 | service: 23 | port: 3000 24 | tls: 25 | - secretName: ddb-proxy-letsencrypt-certificate 26 | hosts: 27 | - *host 28 | 29 | resources: 30 | requests: 31 | cpu: 100m 32 | memory: 256Mi 33 | -------------------------------------------------------------------------------- /kubernetes/helm/media/foundryvtt/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: foundryvtt 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: app-template 8 | version: 3.7.3 9 | repository: https://bjw-s-labs.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /kubernetes/helm/media/foundryvtt/manifests/foundry-admin-key.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: generators.external-secrets.io/v1alpha1 2 | kind: Password 3 | metadata: 4 | name: foundry-admin-key 5 | spec: 6 | length: 20 7 | allowRepeat: true 8 | symbolCharacters: "" 9 | symbols: 0 10 | 11 | --- 12 | apiVersion: external-secrets.io/v1 13 | 14 | kind: ExternalSecret 15 | metadata: 16 | name: foundry-admin-key 17 | spec: 18 | refreshInterval: "87600h" 19 | target: 20 | name: foundry-admin-key-secret 21 | template: 22 | data: 23 | FOUNDRY_ADMIN_KEY: "{{ .password }}" 24 | dataFrom: 25 | - sourceRef: 26 | generatorRef: 27 | apiVersion: generators.external-secrets.io/v1alpha1 28 | kind: Password 29 | name: "foundry-admin-key" 30 | -------------------------------------------------------------------------------- /kubernetes/helm/media/foundryvtt/manifests/password.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | 3 | kind: ExternalSecret 4 | metadata: 5 | name: media-foundryvtt-es 6 | spec: 7 | refreshInterval: "1h" 8 | secretStoreRef: 9 | kind: ClusterSecretStore 10 | name: gcp-clusterstore 11 | target: 12 | name: media-foundryvtt-secrets 13 | deletionPolicy: Delete 14 | creationPolicy: Owner 15 | template: 16 | engineVersion: v2 17 | templateFrom: 18 | - target: Data 19 | literal: | 20 | {{- .foundry }} 21 | dataFrom: 22 | - extract: 23 | key: "all_secrets" 24 | -------------------------------------------------------------------------------- /kubernetes/helm/media/foundryvtt/values.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | app-template: 3 | controllers: 4 | foundry: 5 | containers: 6 | app: 7 | image: 8 | repository: felddy/foundryvtt 9 | tag: 13 10 | pullPolicy: IfNotPresent 11 | 12 | env: 13 | CONTAINER_CACHE: /cache/container_cache/ 14 | CONTAINER_PATCHES: /cache/container_patches/ 15 | CONTAINER_PRESERVE_OWNER: /* 16 | CONTAINER_URL_FETCH_RETRY: "50" 17 | FOUNDRY_COMPRESS_WEBSOCKET: true 18 | FOUNDRY_VERSION: 12.331 19 | FOUNDRY_HOSTNAME: &host tabletop.k8s.tylercash.dev 20 | FOUNDRY_LOCAL_HOSTNAME: *host 21 | FOUNDRY_MINIFY_STATIC_FILES: true 22 | FOUNDRY_PROTOCOL: "4" 23 | FOUNDRY_PROXY_PORT: "443" 24 | FOUNDRY_PROXY_SSL: true 25 | FOUNDRY_TELEMETRY: false 26 | 27 | 28 | 29 | envFrom: 30 | - secretRef: 31 | name: media-foundryvtt-secrets 32 | - secretRef: 33 | name: foundry-admin-key-secret 34 | 35 | resources: 36 | requests: 37 | cpu: 100m 38 | memory: 256Mi 39 | 40 | probes: 41 | startup: 42 | enabled: true 43 | spec: 44 | failureThreshold: 70 45 | periodSeconds: 5 46 | 47 | service: 48 | app: 49 | controller: foundry 50 | ports: 51 | http: 52 | port: 30000 53 | 54 | ingress: 55 | app: 56 | enabled: true 57 | annotations: 58 | nginx.ingress.kubernetes.io/proxy-body-size: "0" 59 | hosts: 60 | - host: *host 61 | paths: 62 | - path: / 63 | pathType: Prefix 64 | service: 65 | identifier: app 66 | port: 30000 67 | tls: 68 | - secretName: foundry-letsencrypt-certificate 69 | hosts: 70 | - *host 71 | 72 | persistence: 73 | data: 74 | enabled: true 75 | storageClass: ceph-block 76 | accessMode: ReadWriteOnce 77 | size: 10Gi 78 | cache: 79 | enabled: true 80 | storageClass: ceph-block 81 | accessMode: ReadWriteOnce 82 | size: 10Gi 83 | labels: 84 | snapshot.home.arpa/enabled: "false" 85 | 86 | defaultPodOptions: 87 | securityContext: 88 | runAsUser: 421 89 | runAsGroup: 421 90 | fsGroup: 421 91 | fsGroupChangePolicy: "Always" 92 | -------------------------------------------------------------------------------- /kubernetes/helm/media/jellyfin/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: flaresolverr 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: app-template 8 | version: 1.5.1 9 | repository: https://bjw-s-labs.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /kubernetes/helm/media/jellyfin/values.yaml: -------------------------------------------------------------------------------- 1 | app-template: 2 | image: 3 | repository: jellyfin/jellyfin 4 | tag: 10.10.7 5 | 6 | service: 7 | main: 8 | ports: 9 | http: 10 | port: 8096 11 | 12 | affinity: 13 | nodeAffinity: 14 | requiredDuringSchedulingIgnoredDuringExecution: 15 | nodeSelectorTerms: 16 | - matchExpressions: 17 | - key: intel.feature.node.kubernetes.io/gpu 18 | operator: In 19 | values: 20 | - "true" 21 | 22 | ingress: 23 | main: 24 | enabled: true 25 | annotations: 26 | external-dns.home.arpa/enabled: "true" 27 | nginx.org/websocket-services: "jellyfin" 28 | hosts: 29 | - host: &host jellyfin.k8s.tylercash.dev 30 | paths: 31 | - path: / 32 | pathType: Prefix 33 | service: 34 | port: 8096 35 | tls: 36 | - secretName: jellyfin-letsencrypt-certificate 37 | hosts: [*host] 38 | 39 | securityContext: 40 | privileged: true # Required for GPU passthrough 41 | 42 | podSecurityContext: 43 | runAsUser: 568 44 | runAsGroup: 568 45 | fsGroup: 568 46 | fsGroupChangePolicy: "Always" 47 | 48 | persistence: 49 | config: 50 | enabled: true 51 | storageClass: ceph-block 52 | size: 16Gi 53 | cache: 54 | enabled: true 55 | storageClass: ceph-block 56 | accessMode: ReadWriteOnce 57 | size: 50Gi 58 | labels: 59 | snapshot.home.arpa/enabled: "false" 60 | transcodes: 61 | enabled: true 62 | storageClass: ceph-block 63 | accessMode: ReadWriteOnce 64 | size: 50Gi 65 | labels: 66 | snapshot.home.arpa/enabled: "false" 67 | tv: 68 | enabled: true 69 | storageClass: ceph-filesystem-rust 70 | accessMode: ReadWriteMany 71 | size: 30Ti 72 | retain: true 73 | labels: 74 | snapshot.home.arpa/enabled: "false" 75 | movies: 76 | enabled: true 77 | storageClass: ceph-filesystem-rust 78 | accessMode: ReadWriteMany 79 | size: 30Ti 80 | retain: true 81 | labels: 82 | snapshot.home.arpa/enabled: "false" 83 | 84 | metrics: 85 | enabled: true 86 | 87 | probes: 88 | liveness: 89 | spec: 90 | initialDelaySeconds: 10 91 | periodSeconds: 10 92 | timeoutSeconds: 5 93 | failureThreshold: 6 94 | 95 | resources: 96 | limits: 97 | memory: 8Gi 98 | cpu: 10000m 99 | gpu.intel.com/i915: 1 100 | requests: 101 | cpu: 1000m 102 | memory: 1Gi 103 | gpu.intel.com/i915: 1 104 | 105 | nodeSelector: 106 | cpu: fast 107 | -------------------------------------------------------------------------------- /kubernetes/helm/media/lazylibrarian/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: radarr 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: app-template 8 | version: 1.5.1 9 | repository: https://bjw-s-labs.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /kubernetes/helm/media/lazylibrarian/values.yaml: -------------------------------------------------------------------------------- 1 | app-template: 2 | image: 3 | repository: linuxserver/lazylibrarian 4 | tag: amd64-version-37322235 5 | pullPolicy: IfNotPresent 6 | 7 | service: 8 | main: 9 | ports: 10 | http: 11 | port: &servicePort 5299 12 | 13 | ingress: 14 | main: 15 | enabled: true 16 | hosts: 17 | - host: &hostname books.k8s.tylercash.dev 18 | paths: 19 | - path: / 20 | pathType: Prefix 21 | service: 22 | port: *servicePort 23 | tls: 24 | - secretName: books-letsencrypt-certificate 25 | hosts: 26 | - *hostname 27 | 28 | 29 | podSecurityContext: 30 | fsGroup: 568 31 | fsGroupChangePolicy: "Always" 32 | 33 | persistence: 34 | config: 35 | enabled: true 36 | accessMode: ReadWriteOnce 37 | storageClass: ceph-block 38 | size: 5Gi 39 | downloads: 40 | enabled: true 41 | type: pvc 42 | existingClaim: qbittorrent-share 43 | books: 44 | enabled: true 45 | storageClass: ceph-filesystem-rust 46 | accessMode: ReadWriteMany 47 | size: 1Gi 48 | retain: true 49 | labels: 50 | snapshot.home.arpa/enabled: "false" 51 | 52 | 53 | resources: 54 | limits: 55 | cpu: 500m 56 | memory: 606Mi 57 | requests: 58 | cpu: 100m 59 | memory: 500Mi 60 | -------------------------------------------------------------------------------- /kubernetes/helm/media/plex/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: sonarr 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: app-template 8 | version: 1.5.1 9 | repository: https://bjw-s-labs.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /kubernetes/helm/media/plex/manifests/plex-secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | 3 | kind: ExternalSecret 4 | metadata: 5 | name: media-plex-es 6 | spec: 7 | refreshInterval: "1h" 8 | secretStoreRef: 9 | kind: ClusterSecretStore 10 | name: gcp-clusterstore 11 | target: 12 | name: media-plex-secrets 13 | deletionPolicy: Delete 14 | creationPolicy: Owner 15 | template: 16 | engineVersion: v2 17 | templateFrom: 18 | - target: Data 19 | literal: | 20 | {{- .plex }} 21 | dataFrom: 22 | - extract: 23 | key: "all_secrets" 24 | -------------------------------------------------------------------------------- /kubernetes/helm/media/plex/values.yaml: -------------------------------------------------------------------------------- 1 | app-template: 2 | image: 3 | repository: ghcr.io/home-operations/plex 4 | tag: 1.41.7.9823 5 | pullPolicy: IfNotPresent 6 | 7 | # uncomment for initial claiming of server 8 | # envFrom: 9 | # - secretRef: 10 | # name: media-plex-secrets 11 | env: 12 | PLEX_UID: 568 13 | PLEX_GID: 568 14 | CHANGE_CONFIG_DIR_OWNERSHIP: false 15 | PLEX_ADVERTISE_URL: https://plex.k8s.tylercash.dev:443 16 | 17 | service: 18 | main: 19 | ports: 20 | http: 21 | port: 32400 22 | 23 | ingress: 24 | main: 25 | enabled: true 26 | annotations: 27 | external-dns.home.arpa/enabled: "true" 28 | hosts: 29 | - host: plex.k8s.tylercash.dev 30 | paths: 31 | - path: / 32 | pathType: Prefix 33 | service: 34 | port: 32400 35 | tls: 36 | - secretName: plex-letsencrypt-certificate 37 | hosts: 38 | - plex.k8s.tylercash.dev 39 | 40 | podSecurityContext: 41 | runAsUser: 568 42 | runAsGroup: 568 43 | fsGroup: 568 44 | fsGroupChangePolicy: "Always" 45 | 46 | persistence: 47 | config: 48 | enabled: true 49 | storageClass: ceph-block 50 | accessMode: ReadWriteOnce 51 | size: 5Gi 52 | transcode: 53 | enabled: true 54 | storageClass: ceph-block 55 | accessMode: ReadWriteOnce 56 | size: 50Gi 57 | tv: 58 | enabled: true 59 | type: pvc 60 | existingClaim: jellyfin-tv 61 | movies: 62 | enabled: true 63 | type: pvc 64 | existingClaim: jellyfin-movies 65 | 66 | resources: 67 | limits: {} 68 | requests: 69 | cpu: 100m 70 | memory: 700Mi 71 | -------------------------------------------------------------------------------- /kubernetes/helm/media/prowlarr/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: prowlarr 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: app-template 8 | version: 1.5.1 9 | repository: https://bjw-s-labs.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /kubernetes/helm/media/prowlarr/values.yaml: -------------------------------------------------------------------------------- 1 | app-template: 2 | image: 3 | repository: ghcr.io/home-operations/prowlarr 4 | tag: 1.36.3.5071 5 | pullPolicy: IfNotPresent 6 | 7 | service: 8 | main: 9 | ports: 10 | http: 11 | port: 9696 12 | 13 | ingress: 14 | main: 15 | enabled: true 16 | hosts: 17 | - host: prowlarr.k8s.tylercash.dev 18 | paths: 19 | - path: / 20 | pathType: Prefix 21 | service: 22 | port: 9696 23 | tls: 24 | - secretName: prowlarr-letsencrypt-certificate 25 | hosts: 26 | - prowlarr.k8s.tylercash.dev 27 | 28 | persistence: 29 | config: 30 | enabled: true 31 | storageClass: ceph-block 32 | labels: 33 | online-snapshot.home.arpa/enabled: "false" 34 | backups: 35 | enabled: true 36 | type: pvc 37 | existingClaim: sonarr-backups 38 | 39 | podSecurityContext: 40 | runAsUser: 568 41 | runAsGroup: 568 42 | fsGroup: 568 43 | fsGroupChangePolicy: "Always" 44 | 45 | metrics: 46 | enabled: true 47 | 48 | probes: 49 | liveness: 50 | spec: 51 | exec: 52 | command: 53 | - /usr/bin/env 54 | - bash 55 | - -c 56 | - test -w /config/config.xml && 57 | test -w /backups/prowlarr/scheduled/ 58 | 59 | resources: 60 | limits: 61 | cpu: 500m 62 | memory: 256Mi 63 | requests: 64 | cpu: 100m 65 | memory: 200Mi 66 | -------------------------------------------------------------------------------- /kubernetes/helm/media/qbittorrent/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: qbittorrent 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: app-template 8 | version: 1.5.1 9 | repository: https://bjw-s-labs.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /kubernetes/helm/media/radarr/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: radarr 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: app-template 8 | version: 1.5.1 9 | repository: https://bjw-s-labs.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /kubernetes/helm/media/radarr/values.yaml: -------------------------------------------------------------------------------- 1 | app-template: 2 | image: 3 | repository: ghcr.io/home-operations/radarr 4 | tag: 5.26.0.10051 5 | pullPolicy: IfNotPresent 6 | 7 | service: 8 | main: 9 | ports: 10 | http: 11 | port: 7878 12 | 13 | ingress: 14 | main: 15 | enabled: true 16 | hosts: 17 | - host: radarr.k8s.tylercash.dev 18 | paths: 19 | - path: / 20 | pathType: Prefix 21 | service: 22 | port: 7878 23 | tls: 24 | - secretName: radarr-letsencrypt-certificate 25 | hosts: 26 | - radarr.k8s.tylercash.dev 27 | 28 | podSecurityContext: 29 | runAsUser: 568 30 | runAsGroup: 568 31 | fsGroup: 568 32 | fsGroupChangePolicy: "Always" 33 | 34 | persistence: 35 | config: 36 | enabled: true 37 | storageClass: ceph-block 38 | size: 5Gi 39 | labels: 40 | snapshot.home.arpa/enabled: "false" 41 | share: 42 | enabled: true 43 | type: pvc 44 | existingClaim: qbittorrent-share 45 | backups: 46 | enabled: true 47 | type: pvc 48 | existingClaim: sonarr-backups 49 | movies: 50 | enabled: true 51 | type: pvc 52 | existingClaim: jellyfin-movies 53 | 54 | resources: 55 | limits: 56 | cpu: 500m 57 | memory: 606Mi 58 | requests: 59 | cpu: 100m 60 | memory: 500Mi 61 | -------------------------------------------------------------------------------- /kubernetes/helm/media/samba/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: samba 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: app-template 8 | version: 1.5.1 9 | repository: https://bjw-s-labs.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /kubernetes/helm/media/samba/manifests/password.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: generators.external-secrets.io/v1alpha1 2 | kind: Password 3 | metadata: 4 | name: samba-password 5 | spec: 6 | length: 20 7 | allowRepeat: true 8 | symbolCharacters: "" 9 | symbols: 0 10 | 11 | --- 12 | apiVersion: external-secrets.io/v1 13 | 14 | kind: ExternalSecret 15 | metadata: 16 | name: samba-password 17 | spec: 18 | refreshInterval: "87600h" 19 | target: 20 | name: samba-password-secret 21 | template: 22 | data: 23 | ACCOUNT_tcash: "{{ .password }}" 24 | dataFrom: 25 | - sourceRef: 26 | generatorRef: 27 | apiVersion: generators.external-secrets.io/v1alpha1 28 | kind: Password 29 | name: "samba-password" 30 | -------------------------------------------------------------------------------- /kubernetes/helm/media/samba/values.yaml: -------------------------------------------------------------------------------- 1 | app-template: 2 | image: 3 | repository: ghcr.io/servercontainers/samba 4 | tag: smbd-only-a3.18.4-s4.18.5-r0 5 | 6 | env: 7 | WSDD2_DISABLE: 1 8 | AVAHI_DISABLE: 1 9 | SAMBA_VOLUME_CONFIG_replays: "[Replays]; path=/shares/replays; valid users = tcash; guest ok = no; read only = no; browseable = yes" 10 | SAMBA_VOLUME_CONFIG_photos: "[Photos]; path=/shares/photos; valid users = tcash; guest ok = no; read only = no; browseable = yes" 11 | SAMBA_VOLUME_CONFIG_photos_ssd: "[Photos_ssd]; path=/shares/photos-ssd; valid users = tcash; guest ok = no; read only = no; browseable = yes" 12 | SAMBA_VOLUME_CONFIG_photos_hdd: "[Photos_hdd]; path=/shares/photos-hdd; valid users = tcash; guest ok = no; read only = no; browseable = yes" 13 | SAMBA_CONF_LOG_LEVEL: 1 passdb:5 auth:5 14 | UID_tcash: 568 15 | 16 | envFrom: 17 | - secretRef: 18 | name: samba-password-secret 19 | 20 | service: 21 | main: 22 | enabled: false 23 | samba: 24 | enabled: true 25 | type: LoadBalancer 26 | annotations: 27 | coredns.io/hostname: samba.k8s.tylercash.dev 28 | ports: 29 | samba: 30 | enabled: true 31 | port: 445 32 | protocol: TCP 33 | targetPort: 445 34 | 35 | podSecurityContext: 36 | fsGroup: 568 37 | fsGroupChangePolicy: "OnRootMismatch" 38 | 39 | persistence: 40 | photos: 41 | enabled: true 42 | storageClass: ceph-filesystem-rust 43 | accessMode: ReadWriteMany 44 | mountPath: /shares/photos 45 | retain: true 46 | size: 5Ti 47 | labels: 48 | online-snapshot.home.arpa/enabled: "false" 49 | photos-hdd: 50 | enabled: true 51 | storageClass: ceph-filesystem-rust 52 | accessMode: ReadWriteMany 53 | mountPath: /shares/photos-hdd 54 | retain: true 55 | size: 5Ti 56 | labels: 57 | online-snapshot.home.arpa/enabled: "false" 58 | photos-ssd: 59 | enabled: true 60 | storageClass: ceph-block 61 | accessMode: ReadWriteOnce 62 | mountPath: /shares/photos-ssd 63 | retain: true 64 | size: 1.5Ti 65 | replays: 66 | enabled: true 67 | storageClass: ceph-filesystem-rust 68 | mountPath: /shares/replays 69 | accessMode: ReadWriteMany 70 | retain: true 71 | size: 5Ti 72 | -------------------------------------------------------------------------------- /kubernetes/helm/media/sonarr/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: sonarr 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: app-template 8 | version: 1.5.1 9 | repository: https://bjw-s-labs.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /kubernetes/helm/media/sonarr/values.yaml: -------------------------------------------------------------------------------- 1 | app-template: 2 | image: 3 | repository: ghcr.io/home-operations/sonarr 4 | tag: 4.0.14.2938 5 | pullPolicy: IfNotPresent 6 | 7 | service: 8 | main: 9 | ports: 10 | http: 11 | port: 8989 12 | 13 | ingress: 14 | main: 15 | enabled: true 16 | hosts: 17 | - host: sonarr.k8s.tylercash.dev 18 | paths: 19 | - path: / 20 | pathType: Prefix 21 | service: 22 | port: 8989 23 | tls: 24 | - secretName: sonarr-letsencrypt-certificate 25 | hosts: 26 | - sonarr.k8s.tylercash.dev 27 | 28 | podSecurityContext: 29 | runAsUser: 568 30 | runAsGroup: 568 31 | fsGroup: 568 32 | fsGroupChangePolicy: "Always" 33 | 34 | persistence: 35 | config: 36 | enabled: true 37 | storageClass: ceph-block 38 | size: 5Gi 39 | labels: 40 | online-snapshot.home.arpa/enabled: "false" 41 | tv: 42 | enabled: true 43 | type: pvc 44 | existingClaim: jellyfin-tv 45 | share: 46 | enabled: true 47 | type: pvc 48 | existingClaim: qbittorrent-share 49 | backups: 50 | enabled: true 51 | storageClass: ceph-filesystem-rust 52 | accessMode: ReadWriteMany 53 | size: 10Gi 54 | retain: true 55 | 56 | resources: 57 | limits: {} 58 | requests: 59 | cpu: 100m 60 | memory: 700Mi 61 | -------------------------------------------------------------------------------- /kubernetes/helm/media/tylercash-dev-api/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: sonarr 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: app-template 8 | version: 1.5.1 9 | repository: https://bjw-s-labs.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /kubernetes/helm/media/tylercash-dev-api/values.yaml: -------------------------------------------------------------------------------- 1 | app-template: 2 | image: 3 | repository: ghcr.io/tyler-cash/tylercash-api 4 | tag: 1.0.0-SNAPSHOT 5 | pullPolicy: Always 6 | 7 | service: 8 | main: 9 | ports: 10 | http: 11 | port: 8080 12 | 13 | ingress: 14 | main: 15 | enabled: true 16 | annotations: 17 | external-dns.home.arpa/enabled: "true" 18 | hosts: 19 | - host: api.k8s.tylercash.dev 20 | paths: 21 | - path: / 22 | pathType: Prefix 23 | service: 24 | port: 8080 25 | tls: 26 | - secretName: api-letsencrypt-certificate 27 | hosts: 28 | - api.k8s.tylercash.dev 29 | 30 | resources: 31 | requests: 32 | cpu: 10m 33 | memory: 100Mi 34 | -------------------------------------------------------------------------------- /kubernetes/helm/monitoring/descheduler/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: descheduler 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: descheduler 8 | version: 0.33.0 9 | repository: https://kubernetes-sigs.github.io/descheduler/ 10 | -------------------------------------------------------------------------------- /kubernetes/helm/monitoring/descheduler/values.yaml: -------------------------------------------------------------------------------- 1 | descheduler: 2 | kind: Deployment 3 | schedule: "*/5 * * * *" 4 | deschedulerPolicy: 5 | strategies: 6 | RemovePodsHavingTooManyRestarts: 7 | enabled: true 8 | params: 9 | podsHavingTooManyRestarts: 10 | podRestartThreshold: 10 11 | includingInitContainers: true 12 | RemovePodsViolatingTopologySpreadConstraint: 13 | enabled: true 14 | params: 15 | includeSoftConstraints: true 16 | PodLifeTime: 17 | enabled: true 18 | params: 19 | podLifeTime: 20 | states: 21 | - Pending 22 | - PodInitializing 23 | - ContainerCreating 24 | maxPodLifeTimeSeconds: 3600 25 | RemoveFailedPods: 26 | enabled: true 27 | params: 28 | failedPods: 29 | reasons: 30 | - NodeShutdown 31 | - ImagePullBackOff 32 | - CreateContainerConfigError 33 | includingInitContainers: true 34 | excludeOwnerKinds: 35 | - Job 36 | minPodLifetimeSeconds: 3600 37 | RemoveDuplicates: 38 | enabled: true 39 | RemovePodsViolatingNodeTaints: 40 | enabled: true 41 | RemovePodsViolatingNodeAffinity: 42 | enabled: true 43 | params: 44 | nodeAffinityType: 45 | - requiredDuringSchedulingIgnoredDuringExecution 46 | RemovePodsViolatingInterPodAntiAffinity: 47 | enabled: true 48 | LowNodeUtilization: 49 | enabled: true 50 | params: 51 | nodeResourceUtilizationThresholds: 52 | thresholds: 53 | cpu: 40 54 | memory: 40 55 | targetThresholds: 56 | cpu: 70 57 | memory: 70 58 | service: 59 | enabled: true 60 | serviceMonitor: 61 | enabled: true 62 | -------------------------------------------------------------------------------- /kubernetes/helm/monitoring/gotify/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: gotify 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: app-template 8 | version: 1.5.1 9 | repository: https://bjw-s-labs.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /kubernetes/helm/monitoring/gotify/values.yaml: -------------------------------------------------------------------------------- 1 | app-template: 2 | image: 3 | repository: gotify/server 4 | pullPolicy: IfNotPresent 5 | tag: "2.6.3" 6 | 7 | # See more environment variables in the gotify documentation 8 | # https://gotify.net/docs/config#environment-variables 9 | env: 10 | GOTIFY_SERVER_PORT: "80" 11 | GOTIFY_SERVER_KEEPALIVEPERIODSECONDS: "0" 12 | GOTIFY_SERVER_STREAM_PINGPERIODSECONDS: "45" 13 | GOTIFY_DATABASE_DIALECT: "sqlite3" 14 | GOTIFY_DATABASE_CONNECTION: "/config/gotify.db" 15 | GOTIFY_PASSSTRENGTH: "10" 16 | GOTIFY_UPLOADEDIMAGESDIR: "config/images" 17 | GOTIFY_PLUGINSDIR: "/config/plugins" 18 | 19 | service: 20 | main: 21 | ports: 22 | http: 23 | port: &port 80 24 | 25 | ingress: 26 | main: 27 | enabled: true 28 | annotations: 29 | nginx.org/websocket-services: "gotify" 30 | hosts: 31 | - host: gotify.k8s.tylercash.dev 32 | paths: 33 | - path: / 34 | pathType: Prefix 35 | service: 36 | port: *port 37 | tls: 38 | - secretName: gotify-letsencrypt-certificate 39 | hosts: 40 | - gotify.k8s.tylercash.dev 41 | 42 | persistence: 43 | data: 44 | enabled: true 45 | storageClass: ceph-block 46 | accessMode: ReadWriteOnce 47 | mountPath: /config 48 | size: 5Gi 49 | resources: 50 | limits: 51 | cpu: 80m 52 | memory: 64Mi 53 | requests: 54 | cpu: 50m 55 | memory: 32Mi 56 | -------------------------------------------------------------------------------- /kubernetes/helm/monitoring/health-all/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: gatus 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: gatus 8 | version: "1.2.0" 9 | repository: https://twin.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /kubernetes/helm/monitoring/health-all/values.yaml: -------------------------------------------------------------------------------- 1 | gatus: 2 | deployment: 3 | strategy: Recreate 4 | 5 | config: 6 | metrics: true 7 | storage: 8 | type: sqlite 9 | path: /data/data.db 10 | endpoints: 11 | - name: bazarr 12 | interval: &interval 20s 13 | url: http://bazarr.media.svc.cluster.local:6767/ 14 | conditions: &default_conditions 15 | - "[STATUS] == 200" 16 | - "[CONNECTED] == true" 17 | - "[RESPONSE_TIME] < 400" 18 | - 19 | - name: bitwarden 20 | url: http://bitwarden.security.svc.cluster.local/ 21 | interval: *interval 22 | conditions: *default_conditions 23 | 24 | - name: gotify 25 | url: http://gotify.monitoring.svc.cluster.local/ 26 | interval: *interval 27 | conditions: *default_conditions 28 | 29 | 30 | - name: jellyfin 31 | url: http://jellyfin.media.svc.cluster.local:8096/ 32 | interval: *interval 33 | conditions: *default_conditions 34 | 35 | 36 | - name: prowlarr 37 | url: http://prowlarr.media.svc.cluster.local:9696/ 38 | interval: *interval 39 | conditions: *default_conditions 40 | 41 | 42 | - name: radarr 43 | url: http://radarr.media.svc.cluster.local:7878/ 44 | interval: *interval 45 | conditions: *default_conditions 46 | 47 | 48 | - name: sonarr 49 | url: http://sonarr.media.svc.cluster.local:8989/ 50 | interval: *interval 51 | conditions: *default_conditions 52 | 53 | 54 | - name: k8s-master 55 | url: tcp://10.0.90.100:6443 56 | interval: *interval 57 | conditions: 58 | - "[CONNECTED] == true" 59 | - "[RESPONSE_TIME] < 1000" 60 | 61 | 62 | - name: argo 63 | url: https://argocd.k8s.tylercash.dev/ 64 | interval: *interval 65 | conditions: *default_conditions 66 | 67 | - name: prometheus 68 | url: http://prometheus-prometheus.monitoring.svc.cluster.local:9090/ 69 | interval: *interval 70 | conditions: *default_conditions 71 | 72 | 73 | - name: alert-manager 74 | url: http://prometheus-alertmanager.monitoring.svc.cluster.local:9093/ 75 | interval: *interval 76 | conditions: *default_conditions 77 | 78 | 79 | - name: grafana 80 | url: http://grafana.k8s.tylercash.dev/ 81 | interval: *interval 82 | conditions: *default_conditions 83 | 84 | - name: ceph 85 | url: https://ceph.k8s.tylercash.dev/ 86 | interval: *interval 87 | conditions: *default_conditions 88 | 89 | - name: qbittorrent 90 | url: http://qbittorrent.media.svc.cluster.local:8080/ 91 | interval: *interval 92 | conditions: *default_conditions 93 | 94 | - name: mealie 95 | url: http://mealie.media.svc.cluster.local:3000/ 96 | interval: *interval 97 | conditions: *default_conditions 98 | 99 | 100 | ingress: 101 | enabled: true 102 | annotations: 103 | nginx.org/websocket-services: gatus 104 | hosts: 105 | - &host health-all.k8s.tylercash.dev 106 | tls: 107 | - secretName: health-letsencrypt-certificate 108 | hosts: 109 | - *host 110 | 111 | persistence: 112 | enabled: true 113 | storageClass: ceph-block 114 | accessMode: ReadWriteOnce 115 | size: 5Gi 116 | 117 | resources: 118 | limits: 119 | cpu: 100m 120 | memory: 64Mi 121 | requests: 122 | cpu: 50m 123 | memory: 32Mi 124 | -------------------------------------------------------------------------------- /kubernetes/helm/monitoring/health-public/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: gatus 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: gatus 8 | version: "1.2.0" 9 | repository: https://twin.github.io/helm-charts 10 | 11 | -------------------------------------------------------------------------------- /kubernetes/helm/monitoring/health-public/manifests/gatus-rules.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PrometheusRule 3 | metadata: 4 | name: gatus-rules 5 | spec: 6 | groups: 7 | - name: gatus.rules 8 | rules: 9 | - alert: CertAlmostExpired 10 | annotations: 11 | description: | 12 | {{ $labels.name }} certificate will expire in a week. 13 | expr: | 14 | min_over_time(gatus_results_certificate_expiration_seconds[5m]) < 604800 15 | for: 10m 16 | labels: 17 | severity: critical 18 | 19 | - alert: GatusServiceDown 20 | annotations: 21 | summary: Service with less than 90% availability over the last 30m 22 | description: | 23 | {{ $labels.key }} is down. 24 | expr: | 25 | (sum(rate(gatus_results_total{success="true"}[30m])) by (key) / sum(rate(gatus_results_total[30m])) by (key)) < 0.7 26 | for: 60m 27 | labels: 28 | severity: critical 29 | -------------------------------------------------------------------------------- /kubernetes/helm/monitoring/health-public/manifests/gatus-servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: gatus-servicemonitor 5 | namespace: monitoring 6 | spec: 7 | endpoints: 8 | - honorLabels: true 9 | interval: 1m 10 | path: /metrics 11 | port: http 12 | scheme: http 13 | scrapeTimeout: 30s 14 | jobLabel: health-public 15 | namespaceSelector: 16 | matchNames: 17 | - monitoring 18 | selector: 19 | matchLabels: 20 | app.kubernetes.io/name: gatus 21 | -------------------------------------------------------------------------------- /kubernetes/helm/monitoring/health-public/values.yaml: -------------------------------------------------------------------------------- 1 | gatus: 2 | deployment: 3 | strategy: Recreate 4 | 5 | config: 6 | metrics: true 7 | storage: 8 | type: sqlite 9 | path: /data/data.db 10 | endpoints: 11 | - name: tyler-bot-backend 12 | url: https://event.k8s.tylercash.dev/api/auth/is-logged-in 13 | conditions: 14 | - "[STATUS] < 400" 15 | - "[CONNECTED] == true" 16 | - "[RESPONSE_TIME] < 2000" 17 | 18 | - name: tyler-bot-frontend 19 | url: https://event.k8s.tylercash.dev/ 20 | conditions: &default_conditions 21 | - "[STATUS] == 200" 22 | - "[CONNECTED] == true" 23 | - "[RESPONSE_TIME] < 400" 24 | 25 | - name: Jellyfin 26 | url: https://jellyfin.k8s.tylercash.dev/ 27 | conditions: *default_conditions 28 | 29 | - name: Authentik 30 | url: https://authentik.k8s.tylercash.dev/ 31 | conditions: *default_conditions 32 | 33 | - name: Kubernetes Cluster 34 | url: tcp://10.0.90.100:6443 35 | conditions: 36 | - "[CONNECTED] == true" 37 | - "[RESPONSE_TIME] < 1000" 38 | 39 | - name: Domain expiry 40 | url: https://tylercash.dev/ 41 | interval: 1h 42 | conditions: 43 | - "[DOMAIN_EXPIRATION] > 720h" 44 | 45 | ingress: 46 | enabled: true 47 | annotations: 48 | nginx.org/websocket-services: gatus 49 | external-dns.home.arpa/enabled: "true" 50 | hosts: 51 | - &host health.k8s.tylercash.dev 52 | tls: 53 | - secretName: status-letsencrypt-certificate 54 | hosts: 55 | - *host 56 | 57 | persistence: 58 | enabled: true 59 | storageClass: ceph-block 60 | accessMode: ReadWriteOnce 61 | size: 5Gi 62 | 63 | resources: 64 | requests: 65 | cpu: 50m 66 | memory: 32Mi 67 | -------------------------------------------------------------------------------- /kubernetes/helm/monitoring/loki/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: loki 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: loki 8 | version: 6.30.1 9 | repository: https://grafana.github.io/helm-charts 10 | 11 | -------------------------------------------------------------------------------- /kubernetes/helm/monitoring/loki/values.yaml: -------------------------------------------------------------------------------- 1 | loki: 2 | loki: 3 | auth_enabled: false 4 | commonConfig: 5 | replication_factor: 1 6 | schemaConfig: 7 | configs: 8 | - from: "2024-04-01" 9 | store: tsdb 10 | object_store: s3 11 | schema: v13 12 | index: 13 | prefix: loki_index_ 14 | period: 24h 15 | pattern_ingester: 16 | enabled: true 17 | limits_config: 18 | allow_structured_metadata: true 19 | volume_enabled: true 20 | ingestion_rate_mb: 3 21 | ingestion_burst_size_mb: 16 22 | ruler: 23 | enable_api: true 24 | resources: 25 | requests: 26 | cpu: 100m 27 | memory: 256Mi 28 | limits: 29 | cpu: 500m 30 | memory: 512Mi 31 | 32 | minio: 33 | enabled: true 34 | persistence: 35 | enabled: true 36 | storageClassName: "ceph-block" 37 | size: 10Gi 38 | 39 | deploymentMode: SingleBinary 40 | 41 | singleBinary: 42 | replicas: 1 43 | persistence: 44 | enabled: true 45 | storageClassName: "ceph-block" 46 | size: 10Gi 47 | 48 | # Zero out replica counts of other deployment modes 49 | backend: 50 | replicas: 0 51 | read: 52 | replicas: 0 53 | write: 54 | replicas: 0 55 | 56 | ingester: 57 | replicas: 0 58 | querier: 59 | replicas: 0 60 | queryFrontend: 61 | replicas: 0 62 | queryScheduler: 63 | replicas: 0 64 | distributor: 65 | replicas: 0 66 | compactor: 67 | replicas: 0 68 | indexGateway: 69 | replicas: 0 70 | bloomCompactor: 71 | replicas: 0 72 | bloomGateway: 73 | replicas: 0 74 | 75 | service: 76 | type: ClusterIP 77 | port: 3100 78 | -------------------------------------------------------------------------------- /kubernetes/helm/monitoring/node-feature-discovery/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: node-feature-discovery 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: node-feature-discovery 8 | version: 0.17.3 9 | repository: https://kubernetes-sigs.github.io/node-feature-discovery/charts -------------------------------------------------------------------------------- /kubernetes/helm/monitoring/node-feature-discovery/values.yaml: -------------------------------------------------------------------------------- 1 | node-feature-discovery: 2 | worker: 3 | resources: 4 | requests: 5 | cpu: 15m 6 | memory: 64M 7 | limits: 8 | memory: 64M 9 | config: 10 | core: 11 | sources: 12 | - custom 13 | - pci 14 | - usb 15 | sources: 16 | usb: 17 | deviceClassWhitelist: 18 | - "02" 19 | - "0e" 20 | - "ef" 21 | - "fe" 22 | - "ff" 23 | deviceLabelFields: 24 | - "class" 25 | - "vendor" 26 | - "device" 27 | custom: 28 | - name: "intel-gpu" 29 | matchOn: 30 | - pciId: 31 | class: ["0300"] 32 | vendor: ["8086"] 33 | tls: 34 | enable: true 35 | certManager: true -------------------------------------------------------------------------------- /kubernetes/helm/monitoring/prometheus-crds/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: prometheus-crds 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: prometheus-operator-crds 8 | version: 20.0.1 9 | repository: https://prometheus-community.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /kubernetes/helm/monitoring/prometheus-crds/values.yaml: -------------------------------------------------------------------------------- 1 | prometheus-operator-crds: {} 2 | 3 | -------------------------------------------------------------------------------- /kubernetes/helm/monitoring/prometheus/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: prometheus 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: kube-prometheus-stack 8 | version: 72.9.1 9 | repository: https://prometheus-community.github.io/helm-charts -------------------------------------------------------------------------------- /kubernetes/helm/monitoring/promtail/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: promtail 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: promtail 8 | version: 6.16.6 9 | repository: https://grafana.github.io/helm-charts 10 | 11 | -------------------------------------------------------------------------------- /kubernetes/helm/monitoring/promtail/values.yaml: -------------------------------------------------------------------------------- 1 | promtail: 2 | serviceName: loki 3 | config: 4 | lokiAddress: http://loki:3100/loki/api/v1/push 5 | -------------------------------------------------------------------------------- /kubernetes/helm/networking/coredns/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: coredns 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: coredns 8 | version: 1.42.2 9 | repository: https://coredns.github.io/helm 10 | -------------------------------------------------------------------------------- /kubernetes/helm/networking/coredns/manifests/cron.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: CronJob 3 | metadata: 4 | name: adblock-updater 5 | spec: 6 | schedule: "0 * * * *" 7 | jobTemplate: 8 | spec: 9 | template: 10 | spec: 11 | containers: 12 | - name: adblock-updater 13 | image: centos 14 | imagePullPolicy: IfNotPresent 15 | volumeMounts: 16 | - name: coredns-blacklist 17 | mountPath: /blacklist/ 18 | command: 19 | - /bin/sh 20 | - -c 21 | - curl https://raw.githubusercontent.com/Tyler-Cash/homelab/master/kubernetes/helm/networking/coredns/manifests/scripts/download_blacklists.sh | /bin/sh 22 | restartPolicy: OnFailure 23 | volumes: 24 | - name: coredns-blacklist 25 | persistentVolumeClaim: 26 | claimName: coredns-blacklist 27 | readOnly: false 28 | -------------------------------------------------------------------------------- /kubernetes/helm/networking/coredns/manifests/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: coredns-blacklist 5 | labels: 6 | online-snapshot.home.arpa/enabled: "false" 7 | spec: 8 | accessModes: 9 | - ReadWriteMany 10 | resources: 11 | requests: 12 | storage: 100Mi 13 | storageClassName: ceph-filesystem 14 | -------------------------------------------------------------------------------- /kubernetes/helm/networking/coredns/manifests/scripts/download_blacklists.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | set -v 4 | # Taken from https://news.ycombinator.com/item?id=21238213 5 | HOSTS_FILE="/tmp/hosts.blacklist" 6 | HOSTS_FILES="$HOSTS_FILE.d" 7 | destination="/blacklist" 8 | mkdir -p "${HOSTS_FILES}" 9 | download() { 10 | echo "download($1)" 11 | curl \ 12 | --location --max-redirs 3 \ 13 | --max-time 600 --retry 3 --retry-delay 0 --retry-max-time 1000 \ 14 | "$1" > "$(mktemp "${HOSTS_FILES}"/XXXXXX)" 15 | } 16 | 17 | 18 | download "https://cdn.jsdelivr.net/gh/hagezi/dns-blocklists@latest/hosts/pro.txt" 19 | 20 | 21 | cat "${HOSTS_FILES}"/* > "${HOSTS_FILE}" 22 | 23 | cat "${HOSTS_FILE}" 24 | 25 | mv "${HOSTS_FILE}" "${destination}" 26 | 27 | curl --location --max-redirs 3 \ 28 | --max-time 600 --retry 3 --retry-delay 0 --retry-max-time 1000 \ 29 | "https://github.com/Tyler-Cash/homelab/raw/master/kubernetes/helm/networking/coredns/manifests/scripts/lancache.txt" >> "/blacklist/hosts.blacklist" 30 | 31 | -------------------------------------------------------------------------------- /kubernetes/helm/networking/coredns/manifests/scripts/lancache.txt: -------------------------------------------------------------------------------- 1 | 10.0.90.148 assetcdn.101.arenanetworks.com 2 | 10.0.90.148 assetcdn.102.arenanetworks.com 3 | 10.0.90.148 assetcdn.103.arenanetworks.com 4 | 10.0.90.148 blizzard.vo.llnwd.net 5 | 10.0.90.148 blzddist1-a.akamaihd.net 6 | 10.0.90.148 blzddist2-a.akamaihd.net 7 | 10.0.90.148 blzddist3-a.akamaihd.net 8 | 10.0.90.148 cdn.blizzard.com 9 | 10.0.90.148 dist.blizzard.com 10 | 10.0.90.148 dist.blizzard.com.edgesuite.net 11 | 10.0.90.148 edge.blizzard.top.comcast.net 12 | 10.0.90.148 level3.blizzard.com 13 | 10.0.90.148 nydus.battle.net 14 | 10.0.90.148 cdn-11.eft-store.com 15 | 10.0.90.148 cl-453343cd.gcdn.co 16 | 10.0.90.148 cdn-eu1.homecomingservers.com 17 | 10.0.90.148 cdn-na1.homecomingservers.com 18 | 10.0.90.148 cdn-na2.homecomingservers.com 19 | 10.0.90.148 cdn-na3.homecomingservers.com 20 | 10.0.90.148 pls.patch.daybreakgames.com 21 | 10.0.90.148 cdn.unrealengine.com 22 | 10.0.90.148 cdn1.epicgames.com 23 | 10.0.90.148 cdn1.unrealengine.com 24 | 10.0.90.148 cdn2.unrealengine.com 25 | 10.0.90.148 cdn3.unrealengine.com 26 | 10.0.90.148 download.epicgames.com 27 | 10.0.90.148 download2.epicgames.com 28 | 10.0.90.148 download3.epicgames.com 29 | 10.0.90.148 download4.epicgames.com 30 | 10.0.90.148 epicgames-download1.akamaized.net 31 | 10.0.90.148 fastly-download.epicgames.com 32 | 10.0.90.148 cdn.zaonce.net 33 | 10.0.90.148 filedelivery.nexusmods.com 34 | 10.0.90.148 level3.nwhttppatch.crypticstudios.com 35 | 10.0.90.148 .hac.lp1.d4c.nintendo.net 36 | 10.0.90.148 .hac.lp1.eshop.nintendo.net 37 | 10.0.90.148 .wup.eshop.nintendo.net 38 | 10.0.90.148 ccs.cdn.wup.shop.nintendo.net 39 | 10.0.90.148 ccs.cdn.wup.shop.nintendo.net.edgesuite.net 40 | 10.0.90.148 ecs-lp1.hac.shop.nintendo.net 41 | 10.0.90.148 geisha-wup.cdn.nintendo.net 42 | 10.0.90.148 geisha-wup.cdn.nintendo.net.edgekey.net 43 | 10.0.90.148 idbe-wup.cdn.nintendo.net 44 | 10.0.90.148 idbe-wup.cdn.nintendo.net.edgekey.net 45 | 10.0.90.148 receive-lp1.dg.srv.nintendo.net 46 | 10.0.90.148 cdn-patch.swtor.com 47 | 10.0.90.148 lvlt.cdn.ea.com 48 | 10.0.90.148 origin-a.akamaihd.net 49 | 10.0.90.148 patchcdn.pathofexile.com 50 | 10.0.90.148 apac-sg.nexusbytes.ren-x.com 51 | 10.0.90.148 apac-tyo.nexusbytes.ren-x.com 52 | 10.0.90.148 eu-lux.buyvm.ren-x.com 53 | 10.0.90.148 eu-nl.nexusbytes.ren-x.com 54 | 10.0.90.148 eu-uk.nexusbytes.ren-x.com 55 | 10.0.90.148 rxp-de1.ts3-server.ch 56 | 10.0.90.148 us-chi.racknerd.ren-x.com 57 | 10.0.90.148 us-chi2.cncirc.net 58 | 10.0.90.148 us-dal.boomerhost.ren-x.com 59 | 10.0.90.148 us-la.cncfps.com 60 | 10.0.90.148 us-lv.buyvm.ren-x.com 61 | 10.0.90.148 us-lv2.cncirc.net 62 | 10.0.90.148 us-mia.buyvm.ren-x.com 63 | 10.0.90.148 us-nj.shockhosting.ctgamehosts.uk 64 | 10.0.90.148 us-ny.buyvm.ren-x.com 65 | 10.0.90.148 us-phx.speedy.ren-x.com 66 | 10.0.90.148 us-va.ovh.ctgamehosts.uk 67 | 10.0.90.148 .dyn.riotcdn.net 68 | 10.0.90.148 l3cdn.riotgames.com 69 | 10.0.90.148 riotgamespatcher-a.akamaihd.net 70 | 10.0.90.148 riotgamespatcher-a.akamaihd.net.edgesuite.net 71 | 10.0.90.148 worldwide.l3cdn.riotgames.com 72 | 10.0.90.148 patches.rockstargames.com 73 | 10.0.90.148 .gs2.sonycoment.loris-e.llnwd.net 74 | 10.0.90.148 gs-sec.ww.np.dl.playstation.net 75 | 10.0.90.148 gs2-ww-prod.psn.akadns.net 76 | 10.0.90.148 gs2.ww.prod.dl.playstation.net 77 | 10.0.90.148 gs2.ww.prod.dl.playstation.net.edgesuite.net 78 | 10.0.90.148 playstation4.sony.akadns.net 79 | 10.0.90.148 theia.dl.playstation.net 80 | 10.0.90.148 tmdb.np.dl.playstation.net 81 | 10.0.90.148 patch-dl.ffxiv.com 82 | 10.0.90.148 lancache.steamcontent.com 83 | 10.0.90.148 .cdn.ubi.com 84 | 10.0.90.148 live.patcher.elderscrollsonline.com 85 | 10.0.90.148 content.warframe.com 86 | 10.0.90.148 dl-wot-ak.wargaming.net 87 | 10.0.90.148 dl-wot-cdx.wargaming.net 88 | 10.0.90.148 dl-wot-gc.wargaming.net 89 | 10.0.90.148 dl-wot-se.wargaming.net 90 | 10.0.90.148 dl-wowp-ak.wargaming.net 91 | 10.0.90.148 dl-wowp-cdx.wargaming.net 92 | 10.0.90.148 dl-wowp-gc.wargaming.net 93 | 10.0.90.148 dl-wowp-se.wargaming.net 94 | 10.0.90.148 dl-wows-ak.wargaming.net 95 | 10.0.90.148 dl-wows-cdx.wargaming.net 96 | 10.0.90.148 dl-wows-gc.wargaming.net 97 | 10.0.90.148 dl-wows-se.wargaming.net 98 | 10.0.90.148 dl2.wargaming.net 99 | 10.0.90.148 wg.gcdn.co 100 | 10.0.90.148 wgus-wotasia.wargaming.net 101 | 10.0.90.148 wgus-woteu.wargaming.net 102 | 10.0.90.148 .do.dsp.mp.microsoft.com 103 | 10.0.90.148 .microsoft.com.edgesuite.net 104 | 10.0.90.148 .update.microsoft.com 105 | 10.0.90.148 .windowsupdate.com 106 | 10.0.90.148 amupdatedl.microsoft.com 107 | 10.0.90.148 amupdatedl2.microsoft.com 108 | 10.0.90.148 amupdatedl3.microsoft.com 109 | 10.0.90.148 amupdatedl4.microsoft.com 110 | 10.0.90.148 amupdatedl5.microsoft.com 111 | 10.0.90.148 dl.delivery.mp.microsoft.com 112 | 10.0.90.148 assets1.xboxlive.com 113 | 10.0.90.148 assets1.xboxlive.com.nsatc.net 114 | 10.0.90.148 assets2.xboxlive.com 115 | 10.0.90.148 d1.xboxlive.com 116 | 10.0.90.148 xbox-mbr.xboxlive.com 117 | 10.0.90.148 xvcf1.xboxlive.com 118 | 10.0.90.148 xvcf2.xboxlive.com 119 | -------------------------------------------------------------------------------- /kubernetes/helm/networking/coredns/values.yaml: -------------------------------------------------------------------------------- 1 | coredns: 2 | extraVolumeMounts: 3 | - name: coredns-blacklist 4 | mountPath: /blacklist 5 | 6 | extraVolumes: 7 | - name: coredns-blacklist 8 | persistentVolumeClaim: 9 | claimName: coredns-blacklist 10 | podSecurityContext: 11 | runAsUser: 3000 12 | runAsGroup: 3000 13 | fsGroup: 3000 14 | fsGroupChangePolicy: "Always" 15 | 16 | resources: 17 | limits: 18 | cpu: 400m 19 | memory: 700Mi 20 | requests: 21 | cpu: 250m 22 | memory: 400Mi 23 | 24 | prometheus: 25 | service: 26 | enabled: true 27 | monitor: 28 | enabled: true 29 | namespace: "monitoring" 30 | 31 | isClusterService: false 32 | serviceType: LoadBalancer 33 | 34 | service: 35 | type: udp 36 | LoadBalancerIp: "10.0.90.120" 37 | 38 | autoscaler: 39 | enabled: true 40 | min: 3 41 | preventSinglePointFailure: true 42 | resources: 43 | requests: 44 | cpu: "100m" 45 | memory: "50Mi" 46 | limits: 47 | cpu: "500m" 48 | memory: "50Mi" 49 | 50 | podDisruptionBudget: 51 | minAvailable: 2 52 | 53 | 54 | servers: 55 | - zones: 56 | - zone: . 57 | scheme: dns:// 58 | port: 53 59 | plugins: 60 | - name: log 61 | parameters: . 62 | configBlock: |- 63 | class denial 64 | - name: errors 65 | - name: health 66 | configBlock: |- 67 | lameduck 5s 68 | - name: ready 69 | - name: kubernetes 70 | parameters: cluster.local in-addr.arpa ip6.arpa 71 | configBlock: |- 72 | pods insecure 73 | fallthrough in-addr.arpa ip6.arpa 74 | ttl 30 75 | - name: prometheus 76 | parameters: 0.0.0.0:9153 77 | - name: forward 78 | parameters: . /etc/resolv.conf tls://1.0.0.1 tls://1.1.1.1 79 | - name: k8s_external 80 | parameters: home.tylercash.dev 81 | - name: hosts 82 | parameters: /blacklist/hosts.blacklist 83 | configBlock: |- 84 | reload 3600s 85 | no_reverse 86 | fallthrough 87 | - name: cache 88 | parameters: 30 89 | - name: loop 90 | - name: reload 91 | - name: loadbalance 92 | - zones: 93 | - zone: k8s.tylercash.dev 94 | scheme: dns:// 95 | port: 53 96 | plugins: 97 | - name: errors 98 | - name: forward 99 | parameters: k8s.tylercash.dev dns://10.0.90.149 100 | - name: hosts 101 | parameters: /blacklist/hosts.blacklist 102 | configBlock: |- 103 | reload 3600s 104 | no_reverse 105 | fallthrough 106 | - name: loop 107 | - name: reload 108 | - name: loadbalance 109 | -------------------------------------------------------------------------------- /kubernetes/helm/networking/external-dns/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: external-dns 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: external-dns 8 | version: 1.16.1 9 | repository: https://kubernetes-sigs.github.io/external-dns/ -------------------------------------------------------------------------------- /kubernetes/helm/networking/external-dns/values.yaml: -------------------------------------------------------------------------------- 1 | external-dns: 2 | serviceMonitor: 3 | enabled: true 4 | provider: cloudflare 5 | txtPrefix: prefix.k8s. 6 | extraArgs: 7 | - --annotation-filter=external-dns.home.arpa/enabled in (true) 8 | env: 9 | - name: CF_API_EMAIL 10 | valueFrom: 11 | secretKeyRef: 12 | name: cloudflare-secret 13 | key: email 14 | - name: CF_API_TOKEN 15 | valueFrom: 16 | secretKeyRef: 17 | name: cloudflare-secret 18 | key: api-token 19 | -------------------------------------------------------------------------------- /kubernetes/helm/networking/ingress-nginx/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: ingress-nginx 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: ingress-nginx 8 | version: "4.12.2" 9 | repository: https://kubernetes.github.io/ingress-nginx -------------------------------------------------------------------------------- /kubernetes/helm/networking/ingress-nginx/values.yaml: -------------------------------------------------------------------------------- 1 | ingress-nginx: 2 | controller: 3 | allowSnippetAnnotations: true 4 | config: 5 | client-max-body-size: 8m 6 | proxy-read-timeout: "300" 7 | proxy-send-timeout: "300" 8 | proxy_buffer_size: "128k" 9 | proxy_buffers: 4 256k 10 | proxy_busy_buffers_size: 256k 11 | annotations-risk-level: Critical 12 | 13 | watchIngressWithoutClass: true 14 | ingressClassByName: true 15 | 16 | service: 17 | externalTrafficPolicy: Local 18 | internal: 19 | enabled: true 20 | externalTrafficPolicy: "Local" 21 | 22 | updateStrategy: 23 | rollingUpdate: 24 | maxUnavailable: 1 25 | type: RollingUpdate 26 | 27 | replicaCount: 2 28 | 29 | minAvailable: 2 30 | 31 | podAnnotations: 32 | prometheus.io/scrape: "true" 33 | prometheus.io/port: "10254" 34 | 35 | metrics: 36 | port: 10254 37 | portName: metrics 38 | enabled: true 39 | 40 | serviceMonitor: 41 | enabled: true 42 | namespaceSelector: 43 | any: true 44 | scrapeInterval: 30s 45 | honorLabels: true 46 | prometheusRule: 47 | enabled: true 48 | rules: 49 | # These are just examples rules, please adapt them to your needs 50 | - alert: NGINXConfigFailed 51 | expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0 52 | for: 1s 53 | labels: 54 | severity: critical 55 | annotations: 56 | description: bad ingress config - nginx config test failed 57 | summary: uninstall the latest ingress changes to allow config reloads to resume 58 | - alert: NGINXCertificateExpiry 59 | expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds{host!="_"}) by (host) - time()) < 604800 60 | for: 1s 61 | labels: 62 | severity: critical 63 | annotations: 64 | description: ssl certificate(s) will expire in less then a week 65 | summary: renew expiring certificates to avoid downtime 66 | -------------------------------------------------------------------------------- /kubernetes/helm/networking/k8s-gateway/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: dragonfly 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: k8s-gateway 8 | version: 2.4.0 9 | repository: https://ori-edge.github.io/k8s_gateway/ -------------------------------------------------------------------------------- /kubernetes/helm/networking/k8s-gateway/values.yaml: -------------------------------------------------------------------------------- 1 | k8s-gateway: 2 | replicaCount: 2 3 | domain: k8s.tylercash.dev 4 | service: 5 | loadBalancerIP: "10.0.90.149" 6 | extraZonePlugins: 7 | - name: log 8 | - name: errors 9 | # Serves a /health endpoint on :8080, required for livenessProbe 10 | - name: health 11 | configBlock: |- 12 | lameduck 5s 13 | # Serves a /ready endpoint on :8181, required for readinessProbe 14 | - name: ready 15 | # Serves a /metrics endpoint on :9153, required for serviceMonitor 16 | - name: prometheus 17 | parameters: 0.0.0.0:9153 18 | - name: forward 19 | parameters: . 1.1.1.1 8.8.8.8 20 | - name: loop 21 | - name: reload 22 | - name: loadbalance 23 | -------------------------------------------------------------------------------- /kubernetes/helm/networking/metallb/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: metallb 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: metallb 8 | version: 0.15.2 9 | repository: https://metallb.github.io/metallb -------------------------------------------------------------------------------- /kubernetes/helm/networking/metallb/manifests/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: IPAddressPool 3 | metadata: 4 | name: default-pool 5 | spec: 6 | addresses: 7 | - 10.0.90.120-10.0.90.150 8 | 9 | --- 10 | apiVersion: metallb.io/v1beta1 11 | kind: L2Advertisement 12 | metadata: 13 | name: default 14 | 15 | --- 16 | apiVersion: v1 17 | kind: Namespace 18 | metadata: 19 | labels: 20 | pod-security.kubernetes.io/audit: privileged 21 | pod-security.kubernetes.io/enforce: privileged 22 | pod-security.kubernetes.io/warn: privileged 23 | name: networking 24 | -------------------------------------------------------------------------------- /kubernetes/helm/networking/metallb/values.yaml: -------------------------------------------------------------------------------- 1 | metallb: 2 | controller: 3 | priorityClassName: system-cluster-critical 4 | 5 | speaker: 6 | priorityClassName: system-cluster-critical 7 | -------------------------------------------------------------------------------- /kubernetes/helm/networking/unifi/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: unifi 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: app-template 8 | version: 1.5.1 9 | repository: https://bjw-s-labs.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /kubernetes/helm/networking/unifi/values.yaml: -------------------------------------------------------------------------------- 1 | app-template: 2 | image: 3 | repository: jacobalberty/unifi 4 | tag: v9.1.120 5 | 6 | env: 7 | RUNAS_UID0: "false" 8 | UNIFI_UID: "999" 9 | UNIFI_GID: "999" 10 | UNIFI_STDOUT: "true" 11 | JVM_INIT_HEAP_SIZE: 12 | JVM_MAX_HEAP_SIZE: 1024M 13 | 14 | ingress: 15 | main: 16 | enabled: true 17 | annotations: 18 | nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" 19 | hosts: 20 | - host: &host "unifi.k8s.tylercash.dev" 21 | paths: 22 | - path: / 23 | pathType: Prefix 24 | tls: 25 | - secretName: "tls.{{ .Release.Name }}" 26 | hosts: 27 | - *host 28 | 29 | service: 30 | main: 31 | annotations: 32 | metallb.universe.tf/allow-shared-ip: unifi 33 | type: LoadBalancer 34 | externalTrafficPolicy: Local 35 | ports: 36 | http: 37 | port: 8443 38 | protocol: HTTPS 39 | controller: 40 | enabled: true 41 | port: 8080 42 | protocol: TCP 43 | portal-http: 44 | enabled: false 45 | port: 8880 46 | protocol: HTTP 47 | portal-https: 48 | enabled: false 49 | port: 8843 50 | protocol: HTTPS 51 | speedtest: 52 | enabled: true 53 | port: 6789 54 | protocol: TCP 55 | stun: 56 | enabled: true 57 | port: 3478 58 | protocol: UDP 59 | syslog: 60 | enabled: true 61 | port: 5514 62 | protocol: UDP 63 | discovery: 64 | enabled: true 65 | port: 10001 66 | protocol: UDP 67 | 68 | persistence: 69 | data: 70 | enabled: true 71 | size: 5Gi 72 | storageClass: ceph-block 73 | accessMode: ReadWriteOnce 74 | mountPath: /unifi/data 75 | 76 | resources: 77 | requests: 78 | cpu: 23m 79 | memory: 1390M 80 | limits: 81 | memory: 1390M 82 | -------------------------------------------------------------------------------- /kubernetes/helm/security/authentik/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: authentik 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: authentik 8 | version: 2025.2.2 9 | repository: https://charts.goauthentik.io/ 10 | - name: redis 11 | version: 21.0.2 12 | repository: oci://registry-1.docker.io/bitnamicharts 13 | 14 | -------------------------------------------------------------------------------- /kubernetes/helm/security/authentik/manifests/authentik-database.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: postgresql.cnpg.io/v1 2 | kind: Cluster 3 | metadata: 4 | name: &name authentik-database 5 | spec: 6 | instances: 3 7 | primaryUpdateStrategy: unsupervised 8 | bootstrap: 9 | initdb: 10 | database: authentik 11 | owner: authentik 12 | storage: 13 | storageClass: local-path 14 | size: 5Gi 15 | 16 | monitoring: 17 | enablePodMonitor: true 18 | 19 | affinity: 20 | enablePodAntiAffinity: true 21 | topologyKey: kubernetes.io/hostname 22 | podAntiAffinityType: preferred 23 | 24 | --- 25 | apiVersion: postgresql.cnpg.io/v1 26 | kind: ScheduledBackup 27 | metadata: 28 | name: authentik-backup 29 | spec: 30 | immediate: true 31 | schedule: "@weekly" 32 | backupOwnerReference: self 33 | cluster: 34 | name: authentik-database 35 | -------------------------------------------------------------------------------- /kubernetes/helm/security/authentik/manifests/authentik-email.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: authentik-email 5 | data: 6 | from: "authentik@tylercash.dev" 7 | host: "in-v3.mailjet.com" 8 | -------------------------------------------------------------------------------- /kubernetes/helm/security/authentik/manifests/authentik-redis-password.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: generators.external-secrets.io/v1alpha1 2 | kind: Password 3 | metadata: 4 | name: redis-password 5 | spec: 6 | length: 20 7 | allowRepeat: true 8 | symbolCharacters: "" 9 | symbols: 0 10 | 11 | --- 12 | apiVersion: external-secrets.io/v1 13 | 14 | kind: ExternalSecret 15 | metadata: 16 | name: redis-password 17 | spec: 18 | refreshInterval: "87600h" 19 | target: 20 | name: redis-password-secret 21 | template: 22 | data: 23 | redis-password: "{{ .password }}" 24 | dataFrom: 25 | - sourceRef: 26 | generatorRef: 27 | apiVersion: generators.external-secrets.io/v1alpha1 28 | kind: Password 29 | name: "redis-password" 30 | -------------------------------------------------------------------------------- /kubernetes/helm/security/authentik/manifests/authentik-secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | 3 | kind: ExternalSecret 4 | metadata: 5 | name: security-authentik-es 6 | spec: 7 | refreshInterval: "1h" 8 | secretStoreRef: 9 | kind: ClusterSecretStore 10 | name: gcp-clusterstore 11 | target: 12 | name: security-authentik-secrets 13 | deletionPolicy: Delete 14 | creationPolicy: Owner 15 | template: 16 | engineVersion: v2 17 | templateFrom: 18 | - target: Data 19 | literal: | 20 | {{- .authentik }} 21 | dataFrom: 22 | - extract: 23 | key: "all_secrets" 24 | -------------------------------------------------------------------------------- /kubernetes/helm/security/authentik/values.yaml: -------------------------------------------------------------------------------- 1 | authentik: 2 | worker: 3 | replicas: 2 4 | pdb: 5 | enabled: true 6 | 7 | server: 8 | replicas: 2 9 | priorityClassName: system-cluster-critical 10 | 11 | pdb: 12 | enabled: true 13 | 14 | metrics: 15 | enabled: true 16 | serviceMonitor: 17 | enabled: true 18 | rules: 19 | enabled: true 20 | 21 | ingress: 22 | enabled: true 23 | annotations: 24 | external-dns.home.arpa/enabled: "true" 25 | hosts: 26 | - authentik.k8s.tylercash.dev 27 | tls: 28 | - secretName: authentik-letsencrypt-certificate 29 | hosts: 30 | - authentik.k8s.tylercash.dev 31 | 32 | global: 33 | # Check that env variable isn't provided in normal env map. Sync will fail if it's duplicated 34 | env: 35 | - name: AUTHENTIK_POSTGRESQL__PASSWORD 36 | valueFrom: 37 | secretKeyRef: 38 | name: authentik-database-app 39 | key: password 40 | - name: AUTHENTIK_BOOTSTRAP_TOKEN 41 | valueFrom: 42 | secretKeyRef: 43 | name: security-authentik-secrets 44 | key: bootstrap-token 45 | - name: AUTHENTIK_BOOTSTRAP_PASSWORD 46 | valueFrom: 47 | secretKeyRef: 48 | name: security-authentik-secrets 49 | key: bootstrap-password 50 | - name: AUTHENTIK_SECRET_KEY 51 | valueFrom: 52 | secretKeyRef: 53 | name: security-authentik-secrets 54 | key: secret-key 55 | - name: AUTHENTIK_EMAIL__HOST 56 | valueFrom: 57 | configMapKeyRef: 58 | name: authentik-email 59 | key: host 60 | - name: AUTHENTIK_EMAIL__FROM 61 | valueFrom: 62 | configMapKeyRef: 63 | name: authentik-email 64 | key: from 65 | - name: AUTHENTIK_EMAIL__USERNAME 66 | valueFrom: 67 | secretKeyRef: 68 | name: security-authentik-secrets 69 | key: email-username 70 | - name: AUTHENTIK_EMAIL__PASSWORD 71 | valueFrom: 72 | secretKeyRef: 73 | name: security-authentik-secrets 74 | key: email-password 75 | - name: AUTHENTIK_REDIS__PASSWORD 76 | valueFrom: 77 | secretKeyRef: 78 | name: &redis-secret redis-password-secret 79 | key: redis-password 80 | - name: AUTHENTIK_COOKIE_DOMAIN 81 | value: tylercash.dev 82 | - name: AUTHENTIK_POSTGRESQL__HOST 83 | value: authentik-database-rw 84 | 85 | redis: 86 | enabled: true 87 | architecture: replication 88 | global: 89 | defaultStorageClass: local-path 90 | auth: 91 | enabled: true 92 | existingSecret: *redis-secret 93 | 94 | 95 | postgresql: 96 | enabled: false 97 | -------------------------------------------------------------------------------- /kubernetes/helm/security/bitwarden/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: bitwarden 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: app-template 8 | version: 1.5.1 9 | repository: https://bjw-s-labs.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /kubernetes/helm/security/bitwarden/values.yaml: -------------------------------------------------------------------------------- 1 | app-template: 2 | image: 3 | repository: vaultwarden/server 4 | pullPolicy: IfNotPresent 5 | tag: 1.33.2 6 | env: 7 | DATA_FOLDER: "config" 8 | 9 | service: 10 | main: 11 | ports: 12 | http: 13 | port: 80 14 | websocket: 15 | enabled: true 16 | port: 3012 17 | 18 | ingress: 19 | main: 20 | enabled: true 21 | hosts: 22 | - host: bitwarden.k8s.tylercash.dev 23 | paths: 24 | - path: / 25 | pathType: Prefix 26 | service: 27 | port: 80 28 | - path: /notifications/hub/negotiate 29 | pathType: Prefix 30 | service: 31 | port: 80 32 | - path: /notifications/hub 33 | pathType: Prefix 34 | service: 35 | port: 3012 36 | tls: 37 | - secretName: bitwarden-letsencrypt-certificate 38 | hosts: 39 | - bitwarden.k8s.tylercash.dev 40 | 41 | persistence: 42 | config: 43 | enabled: true 44 | storageClass: ceph-block 45 | accessMode: ReadWriteOnce 46 | size: 10Gi 47 | 48 | resources: 49 | requests: 50 | cpu: 1m 51 | memory: 32Mi 52 | -------------------------------------------------------------------------------- /kubernetes/helm/security/cert-manager/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: cert-manager 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: cert-manager 8 | version: v1.17.2 9 | repository: https://charts.jetstack.io -------------------------------------------------------------------------------- /kubernetes/helm/security/cert-manager/manifests/prod-issuer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: prod-issuer 5 | namespace: cert-manager 6 | spec: 7 | acme: 8 | email: certs@tylercash.dev 9 | server: https://acme-v02.api.letsencrypt.org/directory 10 | privateKeySecretRef: 11 | name: certs-issuer-account-key 12 | solvers: 13 | - dns01: 14 | cloudflare: 15 | email: tyler@tylercash.dev 16 | apiTokenSecretRef: 17 | name: cloudflare-secret 18 | key: api-token 19 | -------------------------------------------------------------------------------- /kubernetes/helm/security/cert-manager/manifests/staging-issuer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: staging-issuer 5 | namespace: cert-manager 6 | spec: 7 | acme: 8 | email: certs@tylercash.dev 9 | server: https://acme-staging-v02.api.letsencrypt.org/directory 10 | privateKeySecretRef: 11 | name: certs-issuer-staging-account-key 12 | solvers: 13 | - dns01: 14 | cloudflare: 15 | email: tyler@tylercash.dev 16 | apiTokenSecretRef: 17 | name: cloudflare-secret 18 | key: api-token -------------------------------------------------------------------------------- /kubernetes/helm/security/cert-manager/values.yaml: -------------------------------------------------------------------------------- 1 | cert-manager: 2 | crds: 3 | enabled: true 4 | podDnsPolicy: "None" 5 | podDnsConfig: 6 | nameservers: 7 | - "1.1.1.1" 8 | - "8.8.8.8" 9 | 10 | resources: 11 | requests: 12 | cpu: "1m" 13 | memory: "32Mi" 14 | 15 | 16 | ingressShim: 17 | defaultIssuerName: "prod-issuer" 18 | defaultIssuerKind: "ClusterIssuer" 19 | -------------------------------------------------------------------------------- /kubernetes/helm/security/external-secrets/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: external-secrets 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: external-secrets 8 | version: 0.17.0 9 | repository: https://charts.external-secrets.io/ -------------------------------------------------------------------------------- /kubernetes/helm/security/external-secrets/manifests/monitoring-secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | 3 | kind: ExternalSecret 4 | metadata: 5 | name: monitoring-secrets 6 | namespace: monitoring 7 | 8 | spec: 9 | refreshInterval: "1h" 10 | secretStoreRef: 11 | kind: ClusterSecretStore 12 | name: gcp-clusterstore 13 | target: 14 | name: alertmanager-secrets 15 | deletionPolicy: Delete 16 | creationPolicy: Owner 17 | template: 18 | engineVersion: v2 19 | templateFrom: 20 | - target: Data 21 | literal: | 22 | {{- .alertmanager }} 23 | dataFrom: 24 | - extract: 25 | key: "all_secrets" 26 | -------------------------------------------------------------------------------- /kubernetes/helm/security/external-secrets/manifests/storage-secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | 3 | kind: ExternalSecret 4 | metadata: 5 | name: storage-secrets 6 | namespace: storage 7 | spec: 8 | refreshInterval: "1h" 9 | secretStoreRef: 10 | kind: ClusterSecretStore 11 | name: gcp-clusterstore 12 | target: 13 | name: storage-secrets 14 | deletionPolicy: Delete 15 | dataFrom: 16 | - find: 17 | conversionStrategy: Default 18 | decodingStrategy: None 19 | name: 20 | regexp: storage- 21 | -------------------------------------------------------------------------------- /kubernetes/helm/security/external-secrets/values.yaml: -------------------------------------------------------------------------------- 1 | external-secrets: 2 | installCRDs: true 3 | 4 | serviceMonitor: 5 | enabled: true 6 | 7 | webhook: 8 | serviceMonitor: 9 | enabled: true 10 | 11 | resources: 12 | requests: 13 | cpu: "10m" 14 | -------------------------------------------------------------------------------- /kubernetes/helm/security/intel-device-plugins-gpu/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: intel-device-plugins-gpu 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: intel-device-plugins-gpu 8 | version: 0.32.1 9 | repository: https://intel.github.io/helm-charts/ 10 | -------------------------------------------------------------------------------- /kubernetes/helm/security/intel-device-plugins-gpu/values.yaml: -------------------------------------------------------------------------------- 1 | intel-device-plugins-gpu: {} 2 | -------------------------------------------------------------------------------- /kubernetes/helm/security/intel-device-plugins-operator/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: intel-device-plugins-operator 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: intel-device-plugins-operator 8 | version: 0.32.1 9 | repository: https://intel.github.io/helm-charts/ 10 | -------------------------------------------------------------------------------- /kubernetes/helm/security/intel-device-plugins-operator/manifests/node-feature-rule.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: nfd.k8s-sigs.io/v1alpha1 2 | kind: NodeFeatureRule 3 | metadata: 4 | name: intel-dp-devices 5 | spec: 6 | rules: 7 | - name: "intel.dlb" 8 | labels: 9 | "intel.feature.node.kubernetes.io/dlb": "true" 10 | matchFeatures: 11 | - feature: pci.device 12 | matchExpressions: 13 | vendor: {op: In, value: ["8086"]} 14 | device: {op: In, value: ["2710"]} 15 | class: {op: In, value: ["0b40"]} 16 | - feature: kernel.loadedmodule 17 | matchExpressions: 18 | dlb2: {op: Exists} 19 | 20 | - name: "intel.dsa" 21 | labels: 22 | "intel.feature.node.kubernetes.io/dsa": "true" 23 | matchFeatures: 24 | - feature: pci.device 25 | matchExpressions: 26 | vendor: {op: In, value: ["8086"]} 27 | device: {op: In, value: ["0b25", "11fb", "1212"]} 28 | class: {op: In, value: ["0880"]} 29 | - feature: kernel.loadedmodule 30 | matchExpressions: 31 | idxd: {op: Exists} 32 | 33 | - name: "intel.fpga-arria10" 34 | labels: 35 | "intel.feature.node.kubernetes.io/fpga-arria10": "true" 36 | matchFeatures: 37 | - feature: pci.device 38 | matchExpressions: 39 | vendor: {op: In, value: ["8086"]} 40 | device: {op: In, value: ["09c4"]} 41 | class: {op: In, value: ["1200"]} 42 | matchAny: 43 | - matchFeatures: 44 | - feature: kernel.loadedmodule 45 | matchExpressions: 46 | dfl_pci: {op: Exists} 47 | - matchFeatures: 48 | - feature: kernel.loadedmodule 49 | matchExpressions: 50 | intel_fpga_pci: {op: Exists} 51 | 52 | - name: "intel.gpu" 53 | labels: 54 | "intel.feature.node.kubernetes.io/gpu": "true" 55 | matchFeatures: 56 | - feature: pci.device 57 | matchExpressions: 58 | vendor: {op: In, value: ["8086"]} 59 | class: {op: In, value: ["0300", "0380"]} 60 | matchAny: 61 | - matchFeatures: 62 | - feature: kernel.loadedmodule 63 | matchExpressions: 64 | i915: {op: Exists} 65 | - matchFeatures: 66 | - feature: kernel.enabledmodule 67 | matchExpressions: 68 | i915: {op: Exists} 69 | - matchFeatures: 70 | - feature: kernel.loadedmodule 71 | matchExpressions: 72 | xe: {op: Exists} 73 | - matchFeatures: 74 | - feature: kernel.enabledmodule 75 | matchExpressions: 76 | xe: {op: Exists} 77 | 78 | - name: "intel.iaa" 79 | labels: 80 | "intel.feature.node.kubernetes.io/iaa": "true" 81 | matchFeatures: 82 | - feature: pci.device 83 | matchExpressions: 84 | vendor: {op: In, value: ["8086"]} 85 | device: {op: In, value: ["0cfe", "1216"]} 86 | class: {op: In, value: ["0880"]} 87 | - feature: kernel.loadedmodule 88 | matchExpressions: 89 | idxd: {op: Exists} 90 | 91 | - name: "intel.qat" 92 | labels: 93 | "intel.feature.node.kubernetes.io/qat": "true" 94 | matchFeatures: 95 | - feature: pci.device 96 | matchExpressions: 97 | vendor: {op: In, value: ["8086"]} 98 | device: {op: In, value: ["37c8", "4940", "4942", "4944", "4946"]} 99 | class: {op: In, value: ["0b40"]} 100 | - feature: kernel.loadedmodule 101 | matchExpressions: 102 | intel_qat: {op: Exists} 103 | matchAny: 104 | - matchFeatures: 105 | - feature: kernel.loadedmodule 106 | matchExpressions: 107 | vfio_pci: {op: Exists} 108 | - matchFeatures: 109 | - feature: kernel.enabledmodule 110 | matchExpressions: 111 | vfio-pci: {op: Exists} 112 | 113 | - name: "intel.sgx" 114 | labels: 115 | "intel.feature.node.kubernetes.io/sgx": "true" 116 | extendedResources: 117 | sgx.intel.com/epc: "@cpu.security.sgx.epc" 118 | matchFeatures: 119 | - feature: cpu.cpuid 120 | matchExpressions: 121 | SGX: {op: Exists} 122 | SGXLC: {op: Exists} 123 | - feature: cpu.security 124 | matchExpressions: 125 | sgx.enabled: {op: IsTrue} 126 | - feature: kernel.config 127 | matchExpressions: 128 | X86_SGX: {op: Exists} 129 | -------------------------------------------------------------------------------- /kubernetes/helm/security/intel-device-plugins-operator/values.yaml: -------------------------------------------------------------------------------- 1 | intel-device-plugins-operator: {} 2 | -------------------------------------------------------------------------------- /kubernetes/helm/security/kubed/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: kubed 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: kubed 8 | version: v0.13.2 9 | repository: https://charts.appscode.com/stable/ 10 | -------------------------------------------------------------------------------- /kubernetes/helm/security/kubed/values.yaml: -------------------------------------------------------------------------------- 1 | kubed: 2 | operator: 3 | registry: rancher 4 | repository: mirrored-appscode-kubed 5 | tag: v0.13.2 6 | resources: 7 | requests: 8 | cpu: "1m" 9 | -------------------------------------------------------------------------------- /kubernetes/helm/security/node-feature-discovery/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: node-feature-discovery 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: node-feature-discovery 8 | version: 0.17.3 9 | repository: https://kubernetes-sigs.github.io/node-feature-discovery/charts 10 | -------------------------------------------------------------------------------- /kubernetes/helm/security/node-feature-discovery/values.yaml: -------------------------------------------------------------------------------- 1 | node-feature-discovery: 2 | master: 3 | tolerations: 4 | - key: "node-role.kubernetes.io/master" 5 | operator: "Exists" 6 | effect: "NoSchedule" 7 | - key: "node-role.kubernetes.io/control-plane" 8 | operator: "Exists" 9 | effect: "NoSchedule" 10 | -------------------------------------------------------------------------------- /kubernetes/helm/security/wg-easy/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: wg-easy 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: app-template 8 | version: 1.5.1 9 | repository: https://bjw-s-labs.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /kubernetes/helm/security/wg-easy/values.yaml: -------------------------------------------------------------------------------- 1 | app-template: 2 | image: 3 | repository: ghcr.io/wg-easy/wg-easy 4 | pullPolicy: IfNotPresent 5 | tag: 14@sha256:5f26407fd2ede54df76d63304ef184576a6c1bb73f934a58a11abdd852fab549 6 | env: 7 | WG_DEFAULT_DNS: '10.43.0.10' 8 | WG_HOST: wg.k8s.tylercash.dev 9 | WG_ALLOWED_IPS: '0.0.0.0/0, ::/0' 10 | WG_PERSISTENT_KEEPALIVE: 20 11 | 12 | service: 13 | main: 14 | ports: 15 | http: 16 | port: '51821' 17 | wg: 18 | enabled: true 19 | type: LoadBalancer 20 | annotations: 21 | external-dns.home.arpa/enabled: 'true' 22 | ports: 23 | wg: 24 | port: '51820' 25 | protocol: UDP 26 | 27 | initContainers: 28 | setup-wg: 29 | image: busybox:1.37.0 30 | command: 31 | - sh 32 | - -c 33 | - sysctl -w net.ipv4.ip_forward=1 && sysctl -w net.ipv4.conf.all.forwarding=1 34 | securityContext: 35 | privileged: true 36 | capabilities: 37 | add: 38 | - NET_ADMIN 39 | 40 | ingress: 41 | main: 42 | enabled: true 43 | hosts: 44 | - host: &host wg-ui.k8s.tylercash.dev 45 | paths: 46 | - path: / 47 | pathType: Prefix 48 | service: 49 | port: 51821 50 | tls: 51 | - secretName: wg-letsencrypt-certificate 52 | hosts: 53 | - *host 54 | 55 | persistence: 56 | config: 57 | enabled: true 58 | storageClass: ceph-block 59 | accessMode: ReadWriteOnce 60 | size: 1Gi 61 | mountPath: /etc/wireguard/ 62 | 63 | securityContext: 64 | privileged: true 65 | capabilities: 66 | add: 67 | - NET_ADMIN 68 | 69 | resources: 70 | requests: 71 | cpu: 1m 72 | memory: 32Mi 73 | -------------------------------------------------------------------------------- /kubernetes/helm/storage/cloudnative-pg/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: cloudnative-pg 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: cloudnative-pg 8 | version: 0.24.0 9 | repository: https://cloudnative-pg.github.io/charts -------------------------------------------------------------------------------- /kubernetes/helm/storage/cloudnative-pg/values.yaml: -------------------------------------------------------------------------------- 1 | cloudnative-pg: 2 | replicaCount: 3 3 | config: 4 | INHERITED_LABELS: instance, name 5 | 6 | priorityClassName: system-cluster-critical 7 | -------------------------------------------------------------------------------- /kubernetes/helm/storage/lancache/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: lancache 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: app-template 8 | version: 1.5.1 9 | repository: https://bjw-s-labs.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /kubernetes/helm/storage/lancache/values.yaml: -------------------------------------------------------------------------------- 1 | app-template: 2 | image: 3 | repository: lancachenet/monolithic 4 | tag: latest 5 | pullPolicy: IfNotPresent 6 | 7 | env: 8 | CACHE_DISK_SIZE: 5000g 9 | CACHE_INDEX_SIZE: 1000m 10 | 11 | service: 12 | main: 13 | type: LoadBalancer 14 | loadBalancerIP: "10.0.90.148" 15 | ports: 16 | http: 17 | port: 80 18 | https: 19 | enabled: true 20 | port: 443 21 | 22 | probes: 23 | readiness: 24 | custom: true 25 | spec: 26 | httpGet: 27 | path: /lancache-heartbeat 28 | port: 80 29 | liveness: 30 | custom: true 31 | spec: 32 | httpGet: 33 | path: /lancache-heartbeat 34 | port: 80 35 | startup: 36 | custom: true 37 | spec: 38 | httpGet: 39 | path: /lancache-heartbeat 40 | port: 80 41 | 42 | persistence: 43 | cache: 44 | enabled: true 45 | storageClass: ceph-filesystem-rust 46 | accessMode: ReadWriteOnce 47 | size: 5Ti 48 | mountPath: /data/cache 49 | labels: 50 | snapshot.home.arpa/enabled: "false" 51 | logs: 52 | enabled: false 53 | mountPath: /data/logs 54 | 55 | nodeSelector: 56 | kubernetes.io/arch: amd64 57 | -------------------------------------------------------------------------------- /kubernetes/helm/storage/rook-ceph/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: rook-ceph-cluster 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: rook-ceph-cluster 8 | version: v1.17.3 9 | repository: https://charts.rook.io/release -------------------------------------------------------------------------------- /kubernetes/helm/storage/rook-ceph/manifests/objectstore-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: rook-ceph-rgw-ceph-objectstore 5 | spec: 6 | rules: 7 | - host: &host s3.k8s.tylercash.dev 8 | http: 9 | paths: 10 | - backend: 11 | service: 12 | name: rook-ceph-rgw-ceph-objectstore 13 | port: 14 | number: 80 15 | path: / 16 | pathType: Prefix 17 | tls: 18 | - hosts: 19 | - *host 20 | secretName: s3-letsencrypt-certificate -------------------------------------------------------------------------------- /kubernetes/helm/storage/rook-ceph/manifests/post-sync-hook.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: CronJob 3 | metadata: 4 | name: ceph-configurer 5 | spec: 6 | schedule: "@hourly" 7 | jobTemplate: 8 | spec: 9 | template: 10 | spec: 11 | initContainers: 12 | - name: config-init 13 | image: rook/ceph:master 14 | command: ["/usr/local/bin/toolbox.sh"] 15 | args: ["--skip-watch"] 16 | imagePullPolicy: IfNotPresent 17 | env: 18 | - name: ROOK_CEPH_USERNAME 19 | valueFrom: 20 | secretKeyRef: 21 | name: rook-ceph-mon 22 | key: ceph-username 23 | volumeMounts: 24 | - mountPath: /etc/ceph 25 | name: ceph-config 26 | - name: mon-endpoint-volume 27 | mountPath: /etc/rook 28 | - name: ceph-admin-secret 29 | mountPath: /var/lib/rook-ceph-mon 30 | containers: 31 | - name: script 32 | image: rook/ceph:master 33 | volumeMounts: 34 | - mountPath: /etc/ceph 35 | name: ceph-config 36 | readOnly: true 37 | command: 38 | - "bash" 39 | - "-c" 40 | - | 41 | set -e 42 | 43 | echo "Enable modules" 44 | ceph mgr module enable rook 45 | ceph orch set backend rook 46 | ceph mgr module enable insights 47 | ceph mgr module disable diskprediction_local 48 | ceph dashboard set-grafana-api-url https://grafana.k8s.tylercash.dev 49 | ceph dashboard set-alertmanager-api-host https://alert-manager.k8s.tylercash.dev 50 | ceph dashboard set-prometheus-api-host https://prometheus.k8s.tylercash.dev 51 | ceph config set global osd_scrub_load_threshold 6.0 52 | ceph config set global osd_scrub_during_recovery true 53 | ceph config set global osd_scrub_auto_repair true 54 | 55 | echo "Configure PG autoscaler" 56 | ceph osd pool set ceph-filesystem-rust-data0 pg_num 8 57 | ceph osd pool set ceph-blockpool pg_num 8 58 | ceph config set global mon_target_pg_per_osd 100 59 | ceph config set mon mon_max_pg_per_osd 500 60 | 61 | echo "Configure disk prediction" 62 | ceph config set global device_failure_prediction_mode local 63 | # if ceph dashboard sso status | grep "disabled"; then 64 | # echo "Configuring SAML" 65 | # ceph dashboard sso setup saml2 https://ceph.k8s.tylercash.dev https://authentik.k8s.tylercash.dev/api/v3/providers/saml/3/metadata/?download http://schemas.goauthentik.io/2021/02/saml/username 66 | # ceph dashboard sso enable saml2 67 | # ceph mgr module disable dashboard 68 | # ceph mgr module enable dashboard 69 | # ceph dashboard sso status 70 | # fi 71 | if ! ceph dashboard ac-user-show tcash ; then # Not 100% foolproof comparison, but good enough for home 72 | echo "Creating users" 73 | cat /dev/urandom | tr -dc '[:alpha:]' | fold -w ${1:-100} | head -n 1 > /tmp/pass 74 | ceph dashboard ac-user-create tcash -i /tmp/pass administrator 2>&1 /dev/null 75 | rm -f pass 76 | fi 77 | volumes: 78 | - name: ceph-admin-secret 79 | secret: 80 | secretName: rook-ceph-mon 81 | optional: false 82 | items: 83 | - key: ceph-secret 84 | path: secret.keyring 85 | - name: mon-endpoint-volume 86 | configMap: 87 | name: rook-ceph-mon-endpoints 88 | items: 89 | - key: data 90 | path: mon-endpoints 91 | - name: ceph-config 92 | emptyDir: 93 | sizeLimit: 100Mi 94 | restartPolicy: Never 95 | -------------------------------------------------------------------------------- /kubernetes/helm/storage/rook-operator/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: rook-ceph 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: rook-ceph 8 | version: v1.17.3 9 | repository: https://charts.rook.io/release -------------------------------------------------------------------------------- /kubernetes/helm/storage/snap-scheduler/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: snap-scheduler 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: snapscheduler 8 | version: 3.4.0 9 | repository: https://backube.github.io/helm-charts/ -------------------------------------------------------------------------------- /kubernetes/helm/storage/snap-scheduler/values.yaml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Tyler-Cash/homelab/1f00eb07f19e805bb9047efc2d0ad2e9053e08ac/kubernetes/helm/storage/snap-scheduler/values.yaml -------------------------------------------------------------------------------- /kubernetes/helm/storage/syncthing/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: syncthing 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: app-template 8 | version: 1.5.1 9 | repository: https://bjw-s-labs.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /kubernetes/helm/storage/syncthing/values.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | app-template: 3 | image: 4 | repository: syncthing/syncthing 5 | tag: 1.29 6 | pullPolicy: IfNotPresent 7 | 8 | service: 9 | main: 10 | ports: 11 | http: 12 | port: 8384 13 | tcp: 14 | port: 22000 15 | udp: 16 | port: 22000 17 | protocol: UDP 18 | broadcast: 19 | port: 21027 20 | protocol: UDP 21 | 22 | 23 | ingress: 24 | main: 25 | enabled: true 26 | hosts: 27 | - host: syncthing.k8s.tylercash.dev 28 | paths: 29 | - path: / 30 | pathType: Prefix 31 | service: 32 | port: 8384 33 | tls: 34 | - secretName: syncthing-letsencrypt-certificate 35 | hosts: 36 | - syncthing.k8s.tylercash.dev 37 | 38 | podSecurityContext: 39 | runAsUser: 568 40 | runAsGroup: 568 41 | fsGroup: 568 42 | fsGroupChangePolicy: "Always" 43 | 44 | persistence: 45 | data: 46 | enabled: true 47 | storageClass: ceph-block 48 | accessMode: ReadWriteOnce 49 | mountPath: /var/syncthing 50 | size: 50Gi 51 | photos: 52 | enabled: true 53 | storageClass: ceph-filesystem-rust 54 | accessMode: ReadWriteMany 55 | size: 5Ti 56 | 57 | resources: 58 | requests: 59 | cpu: 100m 60 | memory: 256Mi 61 | -------------------------------------------------------------------------------- /kubernetes/helm/storage/volsync/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: volsync 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: volsync 8 | version: 0.12.1 9 | repository: https://backube.github.io/helm-charts/ 10 | -------------------------------------------------------------------------------- /kubernetes/helm/storage/volsync/manifests/volsync-rules.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: PrometheusRule 3 | metadata: 4 | name: volsync-rules 5 | spec: 6 | groups: 7 | - name: volsync.rules 8 | rules: 9 | - alert: VolsyncBackupFailed 10 | annotations: 11 | description: | 12 | {{ $labels.obj_name }} backup has failed. 13 | summary: Volsync backup failed for a specific PVC. 14 | expr: | 15 | max_over_time(volsync_volume_out_of_sync{job="volsync-metrics",namespace="storage"}[5m]) > 0 16 | for: 1s 17 | labels: 18 | severity: critical 19 | -------------------------------------------------------------------------------- /kubernetes/helm/storage/volsync/manifests/volsync-servicemonitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: volsync-monitor 5 | labels: 6 | control-plane: volsync-controller 7 | spec: 8 | endpoints: 9 | - interval: 30s 10 | path: /metrics 11 | port: https 12 | scheme: https 13 | tlsConfig: 14 | # Using self-signed cert for connection 15 | insecureSkipVerify: true 16 | selector: 17 | matchLabels: 18 | control-plane: volsync-controller 19 | -------------------------------------------------------------------------------- /kubernetes/helm/storage/volsync/values.yaml: -------------------------------------------------------------------------------- 1 | volsync: 2 | manageCRDs: true 3 | 4 | metrics: 5 | disableAuth: true 6 | 7 | securityContext: 8 | allowPrivilegeEscalation: true 9 | 10 | resources: 11 | limits: {} 12 | requests: 13 | cpu: 500m 14 | memory: 500Mi 15 | -------------------------------------------------------------------------------- /kubernetes/helm/tylerbot/tyler-bot-backend/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: backend 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: app-template 8 | version: 3.7.3 9 | repository: https://bjw-s-labs.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /kubernetes/helm/tylerbot/tyler-bot-backend/manifests/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: event-ingress 5 | annotations: 6 | nginx.ingress.kubernetes.io/use-regex: "true" 7 | external-dns.home.arpa/enabled: "true" 8 | spec: 9 | tls: 10 | - hosts: 11 | - event.k8s.tylercash.dev 12 | secretName: event-tylercash-dev 13 | rules: 14 | - host: event.k8s.tylercash.dev 15 | http: 16 | paths: 17 | - path: / 18 | pathType: Prefix 19 | backend: 20 | service: 21 | name: tyler-bot-frontend 22 | port: 23 | number: 80 24 | - path: /api(/|$)(.*) 25 | pathType: ImplementationSpecific 26 | backend: 27 | service: 28 | name: tyler-bot-backend 29 | port: 30 | number: 8080 31 | -------------------------------------------------------------------------------- /kubernetes/helm/tylerbot/tyler-bot-backend/manifests/tylerbot-database.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: postgresql.cnpg.io/v1 2 | kind: Cluster 3 | metadata: 4 | name: &name tylerbot-database 5 | spec: 6 | instances: 3 7 | primaryUpdateStrategy: unsupervised 8 | bootstrap: 9 | initdb: 10 | database: tylerbot 11 | owner: tylerbot 12 | storage: 13 | storageClass: local-path 14 | size: 5Gi 15 | 16 | monitoring: 17 | enablePodMonitor: true 18 | 19 | affinity: 20 | enablePodAntiAffinity: true 21 | topologyKey: kubernetes.io/hostname 22 | podAntiAffinityType: preferred 23 | 24 | --- 25 | apiVersion: postgresql.cnpg.io/v1 26 | kind: ScheduledBackup 27 | metadata: 28 | name: tylerbot-backup 29 | spec: 30 | immediate: true 31 | schedule: "@weekly" 32 | backupOwnerReference: self 33 | cluster: 34 | name: tylerbot-database 35 | -------------------------------------------------------------------------------- /kubernetes/helm/tylerbot/tyler-bot-backend/manifests/tylerbot-secrets.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1 2 | 3 | kind: ExternalSecret 4 | metadata: 5 | name: security-authentik-es 6 | spec: 7 | refreshInterval: "1h" 8 | secretStoreRef: 9 | kind: ClusterSecretStore 10 | name: gcp-clusterstore 11 | target: 12 | name: tylerbot-secrets 13 | deletionPolicy: Delete 14 | creationPolicy: Owner 15 | template: 16 | engineVersion: v2 17 | templateFrom: 18 | - target: Data 19 | literal: | 20 | {{- .tylerbot }} 21 | dataFrom: 22 | - extract: 23 | key: "all_secrets" 24 | -------------------------------------------------------------------------------- /kubernetes/helm/tylerbot/tyler-bot-backend/manifests/tylerbot-service-monitor.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: monitoring.coreos.com/v1 2 | kind: ServiceMonitor 3 | metadata: 4 | name: tylerbot-backend 5 | spec: 6 | endpoints: 7 | - interval: 30s 8 | port: http 9 | path: /api/actuator/prometheus 10 | namespaceSelector: 11 | matchNames: 12 | - tylerbot 13 | selector: 14 | matchLabels: 15 | app.kubernetes.io/instance: tyler-bot-backend 16 | -------------------------------------------------------------------------------- /kubernetes/helm/tylerbot/tyler-bot-backend/values.yaml: -------------------------------------------------------------------------------- 1 | app-template: 2 | controllers: 3 | main: 4 | # type: deployment 5 | # replicas: 3 6 | # # -- Set the controller upgrade strategy 7 | # # For Deployments, valid values are Recreate (default) and RollingUpdate. 8 | # # For StatefulSets, valid values are OnDelete and RollingUpdate (default). 9 | # # DaemonSets/CronJobs/Jobs ignore this. 10 | # strategy: RollingUpdate 11 | # 12 | # rollingUpdate: 13 | # unavailable: 2 14 | # surge: 5 15 | containers: 16 | main: 17 | image: 18 | repository: ghcr.io/tyler-cash/peep-bot-backend 19 | tag: latest@sha256:3307d8d89b3a4056b4afbb24f0c926600e947ec93aea60b8be3689afba9df33e 20 | env: 21 | TZ: Australia/Sydney 22 | SPRING_CONFIG_ADDITIONAL-LOCATION: &secret_folder /secrets/config/ 23 | SPRING_PROFILES_ACTIVE: prod 24 | SPRING_DATASOURCE_URL: 25 | valueFrom: 26 | secretKeyRef: 27 | name: tylerbot-database-app 28 | key: jdbc-uri 29 | SPRING_DATASOURCE_USERNAME: 30 | valueFrom: 31 | secretKeyRef: 32 | name: tylerbot-database-app 33 | key: username 34 | SPRING_DATASOURCE_PASSWORD: 35 | valueFrom: 36 | secretKeyRef: 37 | name: tylerbot-database-app 38 | key: password 39 | probes: 40 | liveness: &probe 41 | enabled: true 42 | custom: true 43 | spec: 44 | httpGet: 45 | path: /api/actuator/health 46 | port: &port 8080 47 | initialDelaySeconds: 10 48 | periodSeconds: 10 49 | timeoutSeconds: 1 50 | failureThreshold: 3 51 | readiness: *probe 52 | 53 | service: 54 | main: 55 | type: LoadBalancer 56 | controller: main 57 | ports: 58 | http: 59 | port: *port 60 | 61 | persistence: 62 | secrets: 63 | enabled: true 64 | type: secret 65 | name: tylerbot-secrets 66 | globalMounts: 67 | - path: *secret_folder 68 | 69 | resources: 70 | requests: 71 | cpu: 50m 72 | memory: 1Gi 73 | limits: 74 | memory: 2Gi 75 | -------------------------------------------------------------------------------- /kubernetes/helm/tylerbot/tyler-bot-frontend/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: backend 3 | type: application 4 | version: 1.0.0 5 | appVersion: "1.0.0" 6 | dependencies: 7 | - name: app-template 8 | version: 3.7.3 9 | repository: https://bjw-s-labs.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /kubernetes/helm/tylerbot/tyler-bot-frontend/values.yaml: -------------------------------------------------------------------------------- 1 | app-template: 2 | controllers: 3 | main: 4 | containers: 5 | main: 6 | image: 7 | repository: ghcr.io/tyler-cash/peep-bot-frontend 8 | tag: latest@sha256:95f24ac34b73a4acd637f50ae64f9077138ba2418ae89d2c815805be8bd05e8b 9 | env: 10 | TZ: Australia/Sydney 11 | 12 | service: 13 | main: 14 | type: LoadBalancer 15 | controller: main 16 | ports: 17 | http: 18 | port: 80 19 | 20 | resources: 21 | requests: 22 | cpu: 50m 23 | memory: 100Mi 24 | limits: 25 | memory: 500Mi 26 | 27 | -------------------------------------------------------------------------------- /kubernetes/manifests/argocd/orchestrator/helm-apps.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: ApplicationSet 3 | metadata: 4 | name: helm-apps 5 | namespace: argocd 6 | spec: 7 | generators: 8 | - git: 9 | repoURL: https://github.com/Tyler-Cash/homelab.git 10 | revision: HEAD 11 | directories: 12 | - path: "kubernetes/helm/*/*" 13 | template: 14 | metadata: 15 | name: "{{path.basenameNormalized}}" 16 | spec: 17 | project: default 18 | source: 19 | repoURL: https://github.com/Tyler-Cash/homelab.git 20 | targetRevision: HEAD 21 | path: "{{path}}" 22 | helm: 23 | releaseName: "{{path.basename}}" 24 | valueFiles: 25 | - values.yaml 26 | destination: 27 | server: https://kubernetes.default.svc 28 | namespace: "{{path[2]}}" 29 | syncPolicy: 30 | syncOptions: 31 | - RespectIgnoreDifferences=true 32 | - CreateNamespace=true 33 | - ServerSideApply=true 34 | automated: 35 | prune: true 36 | selfHeal: true 37 | -------------------------------------------------------------------------------- /kubernetes/manifests/argocd/orchestrator/local-path-storage.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: local-path-storage 5 | namespace: argocd 6 | spec: 7 | project: default 8 | source: 9 | repoURL: https://github.com/rancher/local-path-provisioner.git 10 | targetRevision: v0.0.23 11 | path: deploy/chart/local-path-provisioner/ 12 | helm: 13 | releaseName: "local-path-storage" 14 | destination: 15 | server: https://kubernetes.default.svc 16 | namespace: storage 17 | syncPolicy: 18 | syncOptions: 19 | - CreateNamespace=true 20 | - ServerSideApply=true 21 | automated: 22 | prune: true 23 | selfHeal: true 24 | -------------------------------------------------------------------------------- /kubernetes/manifests/argocd/orchestrator/manifest-apps.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: ApplicationSet 3 | metadata: 4 | name: manifest-apps 5 | namespace: argocd 6 | spec: 7 | generators: 8 | - git: 9 | repoURL: https://github.com/Tyler-Cash/homelab.git 10 | revision: HEAD 11 | directories: 12 | - path: "kubernetes/manifests/*/*" 13 | syncPolicy: 14 | preserveResourcesOnDeletion: true 15 | template: 16 | metadata: 17 | name: "{{path.basenameNormalized}}" 18 | spec: 19 | project: default 20 | source: 21 | repoURL: https://github.com/Tyler-Cash/homelab.git 22 | targetRevision: HEAD 23 | path: "{{path}}" 24 | destination: 25 | server: https://kubernetes.default.svc 26 | namespace: "{{path[2]}}" 27 | syncPolicy: 28 | syncOptions: 29 | - RespectIgnoreDifferences=true 30 | - CreateNamespace=true 31 | - ServerSideApply=true 32 | automated: 33 | prune: true 34 | selfHeal: true 35 | -------------------------------------------------------------------------------- /kubernetes/manifests/argocd/orchestrator/manifest-helm-apps.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: ApplicationSet 3 | metadata: 4 | name: manifest-helm-apps 5 | namespace: argocd 6 | spec: 7 | generators: 8 | - git: 9 | repoURL: https://github.com/Tyler-Cash/homelab.git 10 | revision: HEAD 11 | directories: 12 | - path: "kubernetes/helm/*/*/manifests" 13 | syncPolicy: 14 | preserveResourcesOnDeletion: true 15 | template: 16 | metadata: 17 | name: "{{path[3]}}-manifests" 18 | spec: 19 | project: default 20 | source: 21 | repoURL: https://github.com/Tyler-Cash/homelab.git 22 | targetRevision: HEAD 23 | path: "{{path}}" 24 | destination: 25 | server: https://kubernetes.default.svc 26 | namespace: "{{path[2]}}" 27 | syncPolicy: 28 | syncOptions: 29 | - RespectIgnoreDifferences=true 30 | - CreateNamespace=true 31 | - ServerSideApply=true 32 | automated: 33 | prune: true 34 | selfHeal: true 35 | -------------------------------------------------------------------------------- /kubernetes/manifests/argocd/orchestrator/system-upgrade-controller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: system-upgrade-controller-git 5 | namespace: argocd 6 | spec: 7 | project: default 8 | source: 9 | repoURL: https://github.com/rancher/system-upgrade-controller.git 10 | path: manifests/ 11 | targetRevision: v0.13.2 12 | 13 | destination: 14 | server: https://kubernetes.default.svc 15 | namespace: security 16 | syncPolicy: 17 | syncOptions: 18 | - RespectIgnoreDifferences=true 19 | - CreateNamespace=true 20 | - ServerSideApply=true 21 | automated: 22 | prune: true 23 | selfHeal: true 24 | -------------------------------------------------------------------------------- /kubernetes/manifests/csi-addons-system/kubernetes-csi-addons/setup-controller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | labels: 5 | app.kubernetes.io/managed-by: kustomize 6 | app.kubernetes.io/name: csi-addons 7 | control-plane: controller-manager 8 | name: csi-addons-system 9 | --- 10 | apiVersion: v1 11 | data: 12 | controller_manager_config.yaml: | 13 | --- 14 | apiVersion: controller-runtime.sigs.k8s.io/v1alpha1 15 | kind: ControllerManagerConfig 16 | health: 17 | healthProbeBindAddress: :8081 18 | metrics: 19 | bindAddress: 127.0.0.1:8080 20 | webhook: 21 | port: 9443 22 | leaderElection: 23 | leaderElect: true 24 | resourceName: e8cd140a.openshift.io 25 | kind: ConfigMap 26 | metadata: 27 | name: csi-addons-manager-config 28 | namespace: csi-addons-system 29 | --- 30 | apiVersion: apps/v1 31 | kind: Deployment 32 | metadata: 33 | labels: 34 | app.kubernetes.io/name: csi-addons 35 | name: csi-addons-controller-manager 36 | namespace: csi-addons-system 37 | spec: 38 | replicas: 1 39 | selector: 40 | matchLabels: 41 | app.kubernetes.io/name: csi-addons 42 | template: 43 | metadata: 44 | annotations: 45 | kubectl.kubernetes.io/default-container: manager 46 | labels: 47 | app.kubernetes.io/name: csi-addons 48 | spec: 49 | containers: 50 | - args: 51 | - --secure-listen-address=0.0.0.0:8443 52 | - --upstream=http://127.0.0.1:8080/ 53 | - --logtostderr=true 54 | - --v=10 55 | image: quay.io/brancz/kube-rbac-proxy:v0.18.0 56 | name: kube-rbac-proxy 57 | ports: 58 | - containerPort: 8443 59 | name: https 60 | protocol: TCP 61 | resources: 62 | limits: 63 | cpu: 500m 64 | memory: 128Mi 65 | requests: 66 | cpu: 10m 67 | memory: 64Mi 68 | securityContext: 69 | allowPrivilegeEscalation: false 70 | readOnlyRootFilesystem: true 71 | - args: 72 | - --namespace=$(POD_NAMESPACE) 73 | - --health-probe-bind-address=:8081 74 | - --metrics-bind-address=127.0.0.1:8080 75 | - --leader-elect 76 | command: 77 | - /csi-addons-manager 78 | env: 79 | - name: POD_NAMESPACE 80 | valueFrom: 81 | fieldRef: 82 | fieldPath: metadata.namespace 83 | image: quay.io/csiaddons/k8s-controller:v0.10.0 84 | livenessProbe: 85 | httpGet: 86 | path: /healthz 87 | port: 8081 88 | initialDelaySeconds: 15 89 | periodSeconds: 20 90 | name: manager 91 | readinessProbe: 92 | httpGet: 93 | path: /readyz 94 | port: 8081 95 | initialDelaySeconds: 5 96 | periodSeconds: 10 97 | resources: 98 | limits: 99 | cpu: 1000m 100 | memory: 512Mi 101 | requests: 102 | cpu: 10m 103 | memory: 64Mi 104 | securityContext: 105 | allowPrivilegeEscalation: false 106 | readOnlyRootFilesystem: true 107 | securityContext: 108 | runAsNonRoot: true 109 | serviceAccountName: csi-addons-controller-manager 110 | terminationGracePeriodSeconds: 10 111 | -------------------------------------------------------------------------------- /kubernetes/manifests/kube-system/csi-snapshotter/csi-snapshotter-controller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: csi-snapshotter-controller 5 | namespace: argocd 6 | spec: 7 | project: default 8 | source: 9 | path: deploy/kubernetes/snapshot-controller 10 | repoURL: https://github.com/kubernetes-csi/external-snapshotter.git 11 | targetRevision: v6.1.0 12 | destination: 13 | server: https://kubernetes.default.svc 14 | namespace: kube-system 15 | syncPolicy: 16 | syncOptions: 17 | - CreateNamespace=true 18 | - Replace=true 19 | automated: 20 | prune: true 21 | selfHeal: true 22 | -------------------------------------------------------------------------------- /kubernetes/manifests/kube-system/csi-snapshotter/csi-snapshotter-crds.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: Application 3 | metadata: 4 | name: csi-snapshotter-crds 5 | namespace: argocd 6 | spec: 7 | project: default 8 | source: 9 | path: client/config/crd/ 10 | repoURL: https://github.com/kubernetes-csi/external-snapshotter.git 11 | targetRevision: v6.1.0 12 | destination: 13 | server: https://kubernetes.default.svc 14 | namespace: kube-system 15 | syncPolicy: 16 | syncOptions: 17 | - CreateNamespace=true 18 | - Replace=true 19 | automated: 20 | prune: true 21 | selfHeal: true 22 | -------------------------------------------------------------------------------- /kubernetes/manifests/kube-system/scheduling/normal-pc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: scheduling.k8s.io/v1 2 | kind: PriorityClass 3 | metadata: 4 | name: normal-priority 5 | value: 100 6 | preemptionPolicy: PreemptLowerPriority 7 | globalDefault: true 8 | description: "Normal priority for all pods." 9 | -------------------------------------------------------------------------------- /kubernetes/manifests/system-upgrade/system-upgrade-controller/upgrade.yaml: -------------------------------------------------------------------------------- 1 | # Server plan 2 | apiVersion: upgrade.cattle.io/v1 3 | kind: Plan 4 | metadata: 5 | name: server-plan 6 | spec: 7 | concurrency: 1 8 | cordon: true 9 | drain: 10 | force: true 11 | skipWaitForDeleteTimeout: 30 12 | nodeSelector: 13 | matchExpressions: 14 | - key: node-role.kubernetes.io/master 15 | operator: In 16 | values: 17 | - "true" 18 | - key: node-role.kubernetes.io/master 19 | operator: DoesNotExist 20 | serviceAccountName: system-upgrade 21 | upgrade: 22 | image: rancher/k3s-upgrade 23 | channel: https://update.k3s.io/v1-release/channels/stable 24 | -------------------------------------------------------------------------------- /renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "automergeType": "branch", 4 | "extends": [ 5 | "config:recommended" 6 | ], 7 | "timezone": "Australia/Sydney", 8 | "ignoreTests": true, 9 | "schedule": [ 10 | "before 7am every weekday" 11 | ], 12 | "automergeSchedule": [ 13 | "before 9am every weekday" 14 | ], 15 | "prHourlyLimit": 0, 16 | "branchConcurrentLimit": 0, 17 | "prConcurrentLimit": 0, 18 | "packageRules": [ 19 | { 20 | "matchPackageNames": [ 21 | "rook-ceph", 22 | "rook-ceph-cluster" 23 | ], 24 | "groupName": "rook" 25 | }, 26 | { 27 | "matchPackageNames": [ 28 | "felddy/foundryvtt" 29 | ], 30 | "automerge": false 31 | }, 32 | { 33 | "matchPackageNames": [ 34 | "ghcr.io/tyler-cash/tyler-bot-backend", 35 | "ghcr.io/tyler-cash/tyler-bot-frontend" 36 | ], 37 | "automerge": false 38 | }, 39 | { 40 | "matchCurrentVersion": "/^0\\./", 41 | "automerge": false 42 | }, 43 | { 44 | "matchUpdateTypes": [ 45 | "minor", 46 | "patch", 47 | "pin", 48 | "digest" 49 | ], 50 | "automerge": false 51 | } 52 | ] 53 | } 54 | -------------------------------------------------------------------------------- /terraform/backups/account.tf: -------------------------------------------------------------------------------- 1 | resource "google_service_account" "backup_operator" { 2 | account_id = "homelab-backup-k8s-account" 3 | display_name = "homelab-backup-k8s-account" 4 | project = google_project.homelab_backups.project_id 5 | } 6 | resource "google_service_account_key" "backup_operator_key" { 7 | service_account_id = google_service_account.backup_operator.email 8 | } 9 | 10 | data "google_iam_policy" "secrets_k8s_policy" { 11 | binding { 12 | role = "roles/editor" 13 | members = [ 14 | "serviceAccount:${google_service_account.backup_operator.email}" 15 | ] 16 | } 17 | binding { 18 | role = "roles/browser" 19 | members = [ 20 | "serviceAccount:${google_service_account.backup_operator.email}" 21 | ] 22 | } 23 | } 24 | 25 | resource "google_project_iam_policy" "project" { 26 | project = google_project.homelab_backups.project_id 27 | policy_data = data.google_iam_policy.secrets_k8s_policy.policy_data 28 | } 29 | -------------------------------------------------------------------------------- /terraform/backups/bucket.tf: -------------------------------------------------------------------------------- 1 | resource "google_storage_bucket" "backup-bucket" { 2 | name = "homelab-backups-k8s" 3 | location = "ASIA-SOUTHEAST1" 4 | storage_class = "STANDARD" 5 | uniform_bucket_level_access = true 6 | project = google_project.homelab_backups.project_id 7 | } 8 | -------------------------------------------------------------------------------- /terraform/backups/k8s.tf: -------------------------------------------------------------------------------- 1 | resource "kubernetes_config_map" "restic-config" { 2 | metadata { 3 | name = "restic-config" 4 | # Create in Kyverno's namespace so kyverno can propogate it as needed 5 | namespace = "kyverno" 6 | } 7 | 8 | data = { 9 | RESTIC_REPOSITORY = "gs:${google_storage_bucket.backup-bucket.name}" 10 | # Merely to complicate unapproved access to backup files 11 | # Targeted attacks and data access would be successful 12 | RESTIC_PASSWORD = "H8%G3SN!MJb^65rBNk4@Ug4ZASRfsD*JKwQPi8aehh^2tq*@gyUJ@W2z4T#o&cQD5ry*GdYHJ&" 13 | GOOGLE_PROJECT_ID = google_project.homelab_backups.id 14 | GOOGLE_APPLICATION_CREDENTIALS = base64decode(google_service_account_key.backup_operator_key.private_key) 15 | } 16 | } 17 | 18 | 19 | resource "kubernetes_secret" "cloudnativepg-secrets" { 20 | metadata { 21 | name = "cloudnativepg-secrets" 22 | namespace = "security" 23 | annotations = { 24 | "kubed.appscode.com/sync" = "" 25 | } 26 | } 27 | 28 | data = { 29 | destination = "${google_storage_bucket.backup-bucket.url}" 30 | GOOGLE_PROJECT_ID = google_project.homelab_backups.id 31 | GOOGLE_APPLICATION_CREDENTIALS = base64decode(google_service_account_key.backup_operator_key.private_key) 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /terraform/backups/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | b2 = { 4 | source = "Backblaze/b2" 5 | } 6 | } 7 | } 8 | 9 | provider "b2" { 10 | application_key = var.b2_app_key 11 | application_key_id = var.b2_app_key_id 12 | } 13 | 14 | provider "google" { 15 | project = var.homelab_project_id 16 | region = "australia-southeast1" 17 | } 18 | 19 | provider "google-beta" { 20 | project = var.homelab_project_id 21 | region = "australia-southeast1" 22 | } 23 | 24 | module "project-services" { 25 | source = "terraform-google-modules/project-factory/google//modules/project_services" 26 | version = "17.1" 27 | 28 | project_id = google_project.homelab_backups.project_id 29 | 30 | activate_apis = [ 31 | "storage-component.googleapis.com", 32 | "cloudresourcemanager.googleapis.com" 33 | ] 34 | } 35 | 36 | 37 | resource "google_project" "homelab_backups" { 38 | lifecycle { 39 | ignore_changes = [org_id] 40 | } 41 | name = "Homelab Backups" 42 | project_id = var.homelab_project_id 43 | billing_account = data.google_billing_account.homelab_billing.id 44 | } 45 | 46 | data "google_billing_account" "homelab_billing" { 47 | display_name = "Homelab" 48 | open = true 49 | } 50 | -------------------------------------------------------------------------------- /terraform/backups/vars.tf: -------------------------------------------------------------------------------- 1 | variable "storage_namespace" { 2 | description = "namespace where external secrets operator will be deployed" 3 | type = string 4 | default = "storage" 5 | } 6 | 7 | variable "b2_app_key_id" { 8 | type = string 9 | sensitive = true 10 | 11 | } 12 | 13 | variable "b2_app_key_name" { 14 | type = string 15 | } 16 | 17 | variable "b2_app_key" { 18 | type = string 19 | sensitive = true 20 | } 21 | 22 | variable "homelab_project_id" {} 23 | 24 | variable "email_username" { 25 | sensitive = true # Sensitive as value is a key, not a username 26 | } 27 | variable "email_password" { 28 | sensitive = true 29 | } 30 | -------------------------------------------------------------------------------- /terraform/dns/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | cloudflare = { 4 | source = "cloudflare/cloudflare" 5 | } 6 | } 7 | } 8 | 9 | resource "kubernetes_secret" "cloudflare_secret" { 10 | metadata { 11 | name = "cloudflare-secret" 12 | namespace = "security" 13 | annotations = { 14 | "kubed.appscode.com/sync"= "" 15 | } 16 | } 17 | 18 | data = { 19 | "email" = var.cloudflare_email 20 | "api-token" = var.cloudflare_api_token 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /terraform/dns/vars.tf: -------------------------------------------------------------------------------- 1 | variable "homelab_account_id" {} 2 | variable "homelab_domain" {} 3 | variable "cloudflare_email" {} 4 | variable "cloudflare_api_token" { 5 | sensitive = true 6 | } -------------------------------------------------------------------------------- /terraform/idp/auth_argocd.tf: -------------------------------------------------------------------------------- 1 | resource "authentik_group" "argocd_admin" { 2 | name = "argocd_admin" 3 | is_superuser = false 4 | } 5 | 6 | resource "authentik_group" "argocd" { 7 | name = "argocd" 8 | is_superuser = false 9 | } 10 | 11 | resource "authentik_provider_oauth2" "argocd_ouath2" { 12 | name = "argocd" 13 | client_id = "argocd" 14 | authorization_flow = data.authentik_flow.default-authorization-flow.id 15 | access_code_validity = "minutes=10" 16 | redirect_uris = [ "https://argocd.k8s.tylercash.dev/auth/callback"] 17 | signing_key = data.authentik_certificate_key_pair.generated.id 18 | property_mappings = [ 19 | data.authentik_scope_mapping.openid.id, 20 | data.authentik_scope_mapping.email.id, 21 | data.authentik_scope_mapping.profile.id 22 | ] 23 | } 24 | 25 | resource "authentik_application" "argocd_application" { 26 | name = authentik_provider_oauth2.argocd_ouath2.name 27 | slug = authentik_provider_oauth2.argocd_ouath2.name 28 | protocol_provider = authentik_provider_oauth2.argocd_ouath2.id 29 | meta_icon = "https://cncf-branding.netlify.app/img/projects/argo/icon/white/argo-icon-white.svg" 30 | meta_launch_url = "https://argocd.k8s.tylercash.dev/applications" 31 | policy_engine_mode = "all" 32 | } 33 | 34 | resource "kubernetes_secret" "argocd-oidc-secrets" { 35 | metadata { 36 | name = "argocd-oidc-secrets" 37 | namespace = "argocd" 38 | labels = { 39 | "app.kubernetes.io/part-of": "argocd" 40 | } 41 | } 42 | 43 | data = { 44 | "oidc.authentik.client_id" = authentik_provider_oauth2.argocd_ouath2.client_id 45 | "oidc.authentik.client_secret" = authentik_provider_oauth2.argocd_ouath2.client_secret 46 | } 47 | } 48 | 49 | resource "authentik_policy_binding" "argocd_stop_brute_force_username" { 50 | target = authentik_application.argocd_application.uuid 51 | policy = authentik_policy_reputation.minimum_username_reputation.id 52 | order = 0 53 | negate = true 54 | } 55 | resource "authentik_policy_binding" "argocd_stop_brute_force_ip" { 56 | target = authentik_application.argocd_application.uuid 57 | policy = authentik_policy_reputation.minimum_ip_reputation.id 58 | order = authentik_policy_binding.argocd_stop_brute_force_username.order + 1 59 | negate = true 60 | } 61 | 62 | resource "authentik_policy_binding" "argocd_is_enabled" { 63 | target = authentik_application.argocd_application.uuid 64 | policy = authentik_policy_expression.account_enabled.id 65 | order = authentik_policy_binding.argocd_stop_brute_force_ip.order + 1 66 | } 67 | -------------------------------------------------------------------------------- /terraform/idp/auth_ceph.tf: -------------------------------------------------------------------------------- 1 | resource "authentik_group" "ceph" { 2 | name = "ceph" 3 | is_superuser = false 4 | } 5 | 6 | resource "authentik_provider_saml" "ceph_saml" { 7 | name = "ceph" 8 | authorization_flow = data.authentik_flow.default-authorization-flow.id 9 | acs_url = "https://ceph.k8s.tylercash.dev/auth/saml2" 10 | signature_algorithm = "http://www.w3.org/2000/09/xmldsig#rsa-sha1" 11 | digest_algorithm = "http://www.w3.org/2000/09/xmldsig#sha1" 12 | sp_binding = "post" 13 | signing_kp = data.authentik_certificate_key_pair.generated.id 14 | property_mappings = [ 15 | data.authentik_property_mapping_saml.username.id, 16 | data.authentik_property_mapping_saml.name.id, 17 | data.authentik_property_mapping_saml.groups.id, 18 | data.authentik_property_mapping_saml.upn.id, 19 | data.authentik_property_mapping_saml.email.id 20 | ] 21 | } 22 | 23 | resource "authentik_application" "ceph_application" { 24 | name = authentik_provider_saml.ceph_saml.name 25 | slug = authentik_provider_saml.ceph_saml.name 26 | protocol_provider = authentik_provider_saml.ceph_saml.id 27 | meta_icon = "https://cncf-branding.netlify.app/img/projects/rook/icon/color/rook-icon-color.svg" 28 | meta_launch_url = "https://ceph.k8s.tylercash.dev/" 29 | policy_engine_mode = "all" 30 | } 31 | 32 | resource "authentik_policy_binding" "ceph_stop_brute_force_username" { 33 | target = authentik_application.ceph_application.uuid 34 | policy = authentik_policy_reputation.minimum_username_reputation.id 35 | order = 0 36 | negate = true 37 | } 38 | resource "authentik_policy_binding" "ceph_stop_brute_force_ip" { 39 | target = authentik_application.ceph_application.uuid 40 | policy = authentik_policy_reputation.minimum_ip_reputation.id 41 | order = authentik_policy_binding.ceph_stop_brute_force_username.order + 1 42 | negate = true 43 | } 44 | 45 | resource "authentik_policy_binding" "ceph_is_enabled" { 46 | target = authentik_application.ceph_application.uuid 47 | policy = authentik_policy_expression.account_enabled.id 48 | order = authentik_policy_binding.ceph_stop_brute_force_ip.order + 1 49 | } 50 | -------------------------------------------------------------------------------- /terraform/idp/auth_grafana.tf: -------------------------------------------------------------------------------- 1 | resource "authentik_group" "grafana_admin" { 2 | name = "grafana_admin" 3 | is_superuser = false 4 | } 5 | 6 | resource "authentik_group" "grafana" { 7 | name = "grafana" 8 | is_superuser = false 9 | } 10 | 11 | resource "authentik_provider_oauth2" "grafana_ouath2" { 12 | name = "grafana" 13 | client_id = "grafana" 14 | authorization_flow = data.authentik_flow.default-authorization-flow.id 15 | access_code_validity = "minutes=10" 16 | redirect_uris = [ "https://grafana.k8s.tylercash.dev/login/generic_oauth"] 17 | signing_key = data.authentik_certificate_key_pair.generated.id 18 | property_mappings = [ 19 | data.authentik_scope_mapping.openid.id, 20 | data.authentik_scope_mapping.email.id, 21 | data.authentik_scope_mapping.profile.id 22 | ] 23 | } 24 | 25 | resource "authentik_application" "grafana_application" { 26 | name = authentik_provider_oauth2.grafana_ouath2.name 27 | slug = authentik_provider_oauth2.grafana_ouath2.name 28 | protocol_provider = authentik_provider_oauth2.grafana_ouath2.id 29 | meta_icon = "https://upload.wikimedia.org/wikipedia/commons/thumb/a/a1/Grafana_logo.svg/768px-Grafana_logo.svg.png" 30 | meta_launch_url = "https://grafana.k8s.tylercash.dev" 31 | policy_engine_mode = "all" 32 | } 33 | 34 | resource "kubernetes_secret" "grafana-oidc-secrets" { 35 | metadata { 36 | name = "grafana-oidc-secrets" 37 | namespace = "monitoring" 38 | } 39 | 40 | data = { 41 | "client_id" = authentik_provider_oauth2.grafana_ouath2.client_id 42 | "client_secret" = authentik_provider_oauth2.grafana_ouath2.client_secret 43 | } 44 | } 45 | 46 | resource "authentik_policy_binding" "grafana_stop_brute_force_username" { 47 | target = authentik_application.grafana_application.uuid 48 | policy = authentik_policy_reputation.minimum_username_reputation.id 49 | order = 0 50 | negate = true 51 | } 52 | resource "authentik_policy_binding" "grafana_stop_brute_force_ip" { 53 | target = authentik_application.grafana_application.uuid 54 | policy = authentik_policy_reputation.minimum_ip_reputation.id 55 | order = authentik_policy_binding.grafana_stop_brute_force_username.order + 1 56 | negate = true 57 | } 58 | 59 | resource "authentik_policy_binding" "grafana_is_enabled" { 60 | target = authentik_application.grafana_application.uuid 61 | policy = authentik_policy_expression.account_enabled.id 62 | order = authentik_policy_binding.grafana_stop_brute_force_ip.order + 1 63 | } 64 | -------------------------------------------------------------------------------- /terraform/idp/auth_jellyfin.tf: -------------------------------------------------------------------------------- 1 | resource "authentik_group" "jellyfin_admin" { 2 | name = "jellyfin_admin" 3 | is_superuser = false 4 | } 5 | 6 | resource "authentik_group" "jellyfin" { 7 | name = "jellyfin" 8 | is_superuser = false 9 | } 10 | 11 | resource "authentik_provider_ldap" "jellyfin_ldap" { 12 | name = "jellyfin" 13 | base_dn = "dc=ldap,dc=tylercash,dc=dev" 14 | bind_flow = data.authentik_flow.default-authentication-flow.id 15 | search_group = authentik_group.bind.id 16 | } 17 | 18 | resource "authentik_application" "jellyfin_application" { 19 | name = authentik_provider_ldap.jellyfin_ldap.name 20 | slug = authentik_provider_ldap.jellyfin_ldap.name 21 | protocol_provider = authentik_provider_ldap.jellyfin_ldap.id 22 | meta_icon = "https://developer.asustor.com/uploadIcons/0020_999_1568614457_Jellyfin_256.png" 23 | meta_launch_url = "https://jellyfin.k8s.tylercash.dev" 24 | policy_engine_mode = "all" 25 | } 26 | 27 | resource "authentik_outpost" "jellyfin_outpost" { 28 | name = "jellyfin-outpost" 29 | type = "ldap" 30 | protocol_providers = [ 31 | authentik_provider_ldap.jellyfin_ldap.id 32 | ] 33 | } 34 | 35 | resource "authentik_policy_binding" "jellyfin_stop_brute_force_username" { 36 | target = authentik_application.jellyfin_application.uuid 37 | policy = authentik_policy_reputation.minimum_username_reputation.id 38 | order = 0 39 | negate = true 40 | } 41 | resource "authentik_policy_binding" "jellyfin_stop_brute_force_ip" { 42 | target = authentik_application.jellyfin_application.uuid 43 | policy = authentik_policy_reputation.minimum_ip_reputation.id 44 | order = authentik_policy_binding.jellyfin_stop_brute_force_username.order + 1 45 | negate = true 46 | } 47 | 48 | resource "authentik_policy_binding" "jellyfin_is_enabled" { 49 | target = authentik_application.jellyfin_application.uuid 50 | policy = authentik_policy_expression.account_enabled.id 51 | order = authentik_policy_binding.jellyfin_stop_brute_force_ip.order + 1 52 | } 53 | -------------------------------------------------------------------------------- /terraform/idp/email_auth.tf: -------------------------------------------------------------------------------- 1 | resource "authentik_stage_user_write" "write_user" { 2 | name = "write_user" 3 | create_users_group = authentik_group.jellyfin.id 4 | } 5 | 6 | resource "authentik_stage_email" "email_confirmation" { 7 | from_address = var.authentik_from_address 8 | name = "email-verification" 9 | subject = "Account Confirmation" 10 | template = "email/account_confirmation.html" 11 | activate_user_on_success = true 12 | } 13 | 14 | resource "authentik_stage_invitation" "invite_link" { 15 | name = "invite_link" 16 | continue_flow_without_invitation = false 17 | } 18 | 19 | resource "authentik_stage_prompt_field" "username" { 20 | field_key = "username" 21 | name = "username" 22 | label = "Username" 23 | type = "username" 24 | } 25 | 26 | resource "authentik_stage_prompt_field" "password" { 27 | field_key = "password" 28 | name = "password" 29 | label = "Password" 30 | type = "password" 31 | } 32 | 33 | resource "authentik_stage_prompt_field" "email" { 34 | field_key = "email" 35 | name = "email" 36 | label = "Email" 37 | type = "email" 38 | } 39 | 40 | resource "authentik_stage_prompt_field" "name" { 41 | field_key = "name" 42 | name = "name" 43 | label = "Name" 44 | type = "text" 45 | } 46 | 47 | data "authentik_stage" "auto_login" { 48 | name = "default-source-enrollment-login" 49 | } 50 | 51 | resource "authentik_policy_password" "password_length" { 52 | name = "password is minimum length" 53 | length_min = 8 54 | error_message = "Password must be at least 8 characters long" 55 | } 56 | resource "authentik_policy_password" "password_hibp" { 57 | name = "Have I been pwned check" 58 | check_have_i_been_pwned = true 59 | hibp_allowed_count = 0 60 | error_message = "Password is in public list of compromised passwords" 61 | } 62 | 63 | resource "authentik_stage_prompt" "user_information" { 64 | name = "user_information" 65 | fields = [ 66 | authentik_stage_prompt_field.name.id, 67 | authentik_stage_prompt_field.email.id, 68 | authentik_stage_prompt_field.username.id, 69 | authentik_stage_prompt_field.password.id, 70 | ] 71 | 72 | validation_policies = [ 73 | authentik_policy_password.password_length.id, 74 | authentik_policy_password.password_hibp.id 75 | ] 76 | } 77 | 78 | resource "authentik_flow_stage_binding" "invite_link_binding" { 79 | target = authentik_flow.email_enrollment.uuid 80 | stage = authentik_stage_invitation.invite_link.id 81 | order = 10 82 | policy_engine_mode = "all" 83 | } 84 | 85 | resource "authentik_flow_stage_binding" "user_information_binding" { 86 | target = authentik_flow.email_enrollment.uuid 87 | stage = authentik_stage_prompt.user_information.id 88 | order = authentik_flow_stage_binding.invite_link_binding.order + 1 89 | policy_engine_mode = "all" 90 | } 91 | 92 | resource "authentik_flow_stage_binding" "create_user_binding" { 93 | target = authentik_flow.email_enrollment.uuid 94 | stage = authentik_stage_user_write.write_user.id 95 | order = authentik_flow_stage_binding.user_information_binding.order + 1 96 | policy_engine_mode = "all" 97 | } 98 | resource "authentik_flow_stage_binding" "verify_email_binding" { 99 | target = authentik_flow.email_enrollment.uuid 100 | stage = authentik_stage_email.email_confirmation.id 101 | order = authentik_flow_stage_binding.create_user_binding.order + 1 102 | policy_engine_mode = "all" 103 | } 104 | 105 | resource "authentik_flow_stage_binding" "login_binding" { 106 | target = authentik_flow.email_enrollment.uuid 107 | stage = data.authentik_stage.auto_login.id 108 | order = authentik_flow_stage_binding.verify_email_binding.order + 1 109 | policy_engine_mode = "all" 110 | } 111 | 112 | 113 | resource "authentik_flow" "email_enrollment" { 114 | name = "email-enrollment" 115 | title = "Email Enrollment" 116 | slug = "email-enrollment" 117 | designation = "enrollment" 118 | policy_engine_mode = "all" 119 | layout = "stacked" 120 | } 121 | -------------------------------------------------------------------------------- /terraform/idp/groups.tf: -------------------------------------------------------------------------------- 1 | 2 | resource "authentik_group" "admin" { 3 | name = "admin" 4 | is_superuser = true 5 | } 6 | 7 | resource "authentik_group" "bind" { 8 | name = "bind" 9 | is_superuser = false 10 | } 11 | -------------------------------------------------------------------------------- /terraform/idp/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | authentik = { 4 | source = "goauthentik/authentik" 5 | version = "2023.10.0" 6 | } 7 | } 8 | } 9 | 10 | provider "authentik" { 11 | url = "https://authentik.k8s.tylercash.dev" 12 | token = var.authentik_bootstrap_token 13 | } 14 | 15 | data "authentik_certificate_key_pair" "generated" { 16 | name = "authentik Self-signed Certificate" 17 | } 18 | -------------------------------------------------------------------------------- /terraform/idp/mappings.tf: -------------------------------------------------------------------------------- 1 | data "authentik_scope_mapping" "email" { 2 | managed = "goauthentik.io/providers/oauth2/scope-email" 3 | } 4 | 5 | data "authentik_scope_mapping" "profile" { 6 | managed = "goauthentik.io/providers/oauth2/scope-profile" 7 | } 8 | 9 | data "authentik_scope_mapping" "openid" { 10 | managed = "goauthentik.io/providers/oauth2/scope-openid" 11 | } 12 | 13 | data "authentik_property_mapping_saml" "upn" { 14 | managed = "goauthentik.io/providers/saml/upn" 15 | } 16 | 17 | data "authentik_property_mapping_saml" "name" { 18 | managed = "goauthentik.io/providers/saml/name" 19 | } 20 | 21 | data "authentik_property_mapping_saml" "groups" { 22 | managed = "goauthentik.io/providers/saml/groups" 23 | } 24 | 25 | data "authentik_property_mapping_saml" "username" { 26 | managed = "goauthentik.io/providers/saml/username" 27 | } 28 | 29 | data "authentik_property_mapping_saml" "email" { 30 | managed = "goauthentik.io/providers/saml/email" 31 | } 32 | -------------------------------------------------------------------------------- /terraform/idp/policies.tf: -------------------------------------------------------------------------------- 1 | resource "authentik_policy_reputation" "minimum_username_reputation" { 2 | name = "minimum_username_reputation" 3 | execution_logging = true 4 | threshold = -10 5 | check_ip = false 6 | check_username = true 7 | } 8 | 9 | resource "authentik_policy_reputation" "minimum_ip_reputation" { 10 | name = "minimum_ip_reputation" 11 | execution_logging = true 12 | threshold = -10 13 | check_ip = true 14 | check_username = false 15 | } 16 | 17 | resource "authentik_policy_expression" "account_enabled" { 18 | name = "account_enabled" 19 | expression = "return request.user.is_active" 20 | } 21 | -------------------------------------------------------------------------------- /terraform/idp/stage_flow.tf: -------------------------------------------------------------------------------- 1 | data "authentik_flow" "default-authentication-flow" { 2 | slug = "default-authentication-flow" 3 | } 4 | 5 | data "authentik_flow" "default-authorization-flow" { 6 | slug = "default-provider-authorization-implicit-consent" 7 | } 8 | -------------------------------------------------------------------------------- /terraform/idp/users.tf: -------------------------------------------------------------------------------- 1 | 2 | locals { 3 | service_accounts = { 4 | "jellyfin" = {active = true, groups = [authentik_group.jellyfin.id, authentik_group.bind.id]}, 5 | "test_user" = {active = false, groups = []} 6 | } 7 | } 8 | 9 | resource "authentik_user" "service_account" { 10 | for_each = local.service_accounts 11 | 12 | username = each.key 13 | name = each.key 14 | groups = each.value.groups 15 | is_active = each.value.active 16 | path = "service_accounts" 17 | } -------------------------------------------------------------------------------- /terraform/idp/vars.tf: -------------------------------------------------------------------------------- 1 | variable "authentik_bootstrap_token" { 2 | sensitive = true 3 | } 4 | 5 | variable "authentik_from_address" { 6 | default = "authentik@tylercash.dev" 7 | } 8 | -------------------------------------------------------------------------------- /terraform/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | kubernetes = { 4 | source = "hashicorp/kubernetes" 5 | } 6 | } 7 | } 8 | 9 | provider "kubernetes" { 10 | config_path = "../kubeconfig" 11 | config_context = "default" 12 | ignore_annotations = [ 13 | "kubed\\.appscode\\.com/origin" 14 | ] 15 | ignore_labels = [ 16 | "kubed.+" 17 | ] 18 | } 19 | 20 | module "secrets_storage" { 21 | source = "./secrets_storage" 22 | homelab_project_id = "${var.homelab_project_prefix}-external-secrets-op" 23 | email_username = var.email_username 24 | email_password = var.email_password 25 | foundry_username = var.foundry_username 26 | foundry_password = var.foundry_password 27 | alertmanager_config = var.alertmanager_config 28 | tailscale_authkey = var.tailscale_authkey 29 | tylerbot_config = var.tylerbot_config 30 | plex_claim = var.plex_claim 31 | } 32 | 33 | module "dns" { 34 | source = "./dns" 35 | homelab_domain = var.homelab_domain 36 | homelab_account_id = var.homelab_account_id 37 | cloudflare_api_token = var.cloudflare_api_token 38 | cloudflare_email = var.cloudflare_email 39 | } 40 | 41 | module "idp" { 42 | source = "./idp" 43 | authentik_bootstrap_token = var.authentik_token 44 | } 45 | 46 | module "backups" { 47 | source = "./backups" 48 | b2_app_key = var.b2_app_key 49 | b2_app_key_id = var.b2_app_key_id 50 | b2_app_key_name = var.b2_app_key_name 51 | homelab_project_id = "${var.homelab_project_prefix}-backups" 52 | 53 | email_username = var.email_username 54 | email_password = var.email_password 55 | } 56 | -------------------------------------------------------------------------------- /terraform/secrets_storage/account.tf: -------------------------------------------------------------------------------- 1 | resource "google_service_account" "external_secrets_operator" { 2 | account_id = "homelab-k8s-account" 3 | display_name = "homelab-k8s-account" 4 | } 5 | resource "google_service_account_key" "external_secrets_operator_key" { 6 | service_account_id = google_service_account.external_secrets_operator.email 7 | } 8 | -------------------------------------------------------------------------------- /terraform/secrets_storage/iam.tf: -------------------------------------------------------------------------------- 1 | data "google_iam_policy" "secrets_k8s_policy" { 2 | binding { 3 | role = "roles/secretmanager.viewer" 4 | members = [ 5 | "serviceAccount:${google_service_account.external_secrets_operator.email}" 6 | ] 7 | } 8 | binding { 9 | role = "roles/secretmanager.secretAccessor" 10 | members = [ 11 | "serviceAccount:${google_service_account.external_secrets_operator.email}" 12 | ] 13 | } 14 | binding { 15 | role = "roles/browser" 16 | members = [ 17 | "serviceAccount:${google_service_account.external_secrets_operator.email}" 18 | ] 19 | } 20 | } 21 | 22 | resource "google_project_iam_policy" "project" { 23 | project = google_project.homelab_secrets_storage.project_id 24 | policy_data = data.google_iam_policy.secrets_k8s_policy.policy_data 25 | } 26 | -------------------------------------------------------------------------------- /terraform/secrets_storage/k8s.tf: -------------------------------------------------------------------------------- 1 | 2 | resource "kubernetes_secret" "gcpsm-secret" { 3 | metadata { 4 | name = "gcpsm-secret" 5 | namespace = "security" 6 | labels = { 7 | type = "gcpsm" 8 | } 9 | annotations = { 10 | "kubed.appscode.com/sync"= "" 11 | } 12 | } 13 | 14 | binary_data = { 15 | "gcp_config" = google_service_account_key.external_secrets_operator_key.private_key 16 | } 17 | } 18 | 19 | resource "kubernetes_manifest" "gcp-clusterstore" { 20 | manifest = { 21 | "apiVersion" = "external-secrets.io/v1beta1" 22 | "kind" = "ClusterSecretStore" 23 | "metadata" = { 24 | "name" = "gcp-clusterstore" 25 | } 26 | "spec" = { 27 | "provider" = { 28 | "gcpsm" = { 29 | "auth" = { 30 | "secretRef" = { 31 | "secretAccessKeySecretRef" = { 32 | "name" = "gcpsm-secret" 33 | "key" = "gcp_config" 34 | } 35 | } 36 | } 37 | "projectID" = google_project.homelab_secrets_storage.project_id 38 | } 39 | } 40 | } 41 | } 42 | } 43 | -------------------------------------------------------------------------------- /terraform/secrets_storage/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | } 4 | } 5 | 6 | provider "google" { 7 | project = var.homelab_project_id 8 | region = "australia-southeast1" 9 | } 10 | 11 | provider "google-beta" { 12 | project = var.homelab_project_id 13 | region = "australia-southeast1" 14 | } 15 | 16 | module "project-services" { 17 | source = "terraform-google-modules/project-factory/google//modules/project_services" 18 | version = "17.1" 19 | 20 | project_id = google_project.homelab_secrets_storage.project_id 21 | 22 | activate_apis = [ 23 | "secretmanager.googleapis.com", 24 | "cloudkms.googleapis.com", 25 | "cloudresourcemanager.googleapis.com" 26 | ] 27 | } 28 | 29 | -------------------------------------------------------------------------------- /terraform/secrets_storage/project.tf: -------------------------------------------------------------------------------- 1 | 2 | resource "google_project" "homelab_secrets_storage" { 3 | lifecycle { 4 | ignore_changes = [org_id] 5 | } 6 | 7 | name = "Homelab External Secrets Op" 8 | project_id = var.homelab_project_id 9 | billing_account = data.google_billing_account.homelab_billing.id 10 | } 11 | 12 | data "google_billing_account" "homelab_billing" { 13 | display_name = "Homelab" 14 | open = true 15 | } 16 | -------------------------------------------------------------------------------- /terraform/secrets_storage/secret-manager.tf: -------------------------------------------------------------------------------- 1 | module "secret-manager" { 2 | source = "GoogleCloudPlatform/secret-manager/google" 3 | version = "~> 0.8" 4 | project_id = google_project.homelab_secrets_storage.id 5 | } 6 | 7 | resource "google_kms_key_ring" "homelab_keyring" { 8 | name = "homelab_keyring" 9 | location = "global" 10 | } 11 | 12 | resource "google_kms_crypto_key" "homelab_crypto_key" { 13 | name = "homelab_crypto_key" 14 | key_ring = google_kms_key_ring.homelab_keyring.id 15 | rotation_period = "31536000s" 16 | lifecycle { 17 | prevent_destroy = true 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /terraform/secrets_storage/secrets-all.tf: -------------------------------------------------------------------------------- 1 | resource "random_password" "tandoor_secret_key" { 2 | length = 50 3 | special = true 4 | } 5 | 6 | resource "random_password" "authentik_secret_key" { 7 | length = 100 8 | special = true 9 | } 10 | 11 | resource "random_password" "authentik_bootstrap_password" { 12 | length = 100 13 | special = true 14 | } 15 | 16 | resource "random_password" "authentik_bootstrap_token" { 17 | length = 100 18 | special = true 19 | } 20 | 21 | locals { 22 | all_secrets = { 23 | "tandoor" = { 24 | "secret-key" = random_password.tandoor_secret_key.result 25 | } 26 | "plex" = { 27 | "PLEX_CLAIM_TOKEN" = var.plex_claim 28 | } 29 | "alertmanager" = { 30 | "alertmanager.yaml" = var.alertmanager_config 31 | } 32 | "tylerbot" = { 33 | "application-prod.yaml" = var.tylerbot_config 34 | } 35 | "foundry" = { 36 | "FOUNDRY_USERNAME" = var.foundry_username 37 | "FOUNDRY_PASSWORD" = var.foundry_password 38 | } 39 | "authentik" = { 40 | "email-username" = var.email_username 41 | "email-password" = var.email_password 42 | "secret-key" = random_password.authentik_secret_key.result 43 | "bootstrap-password" = random_password.authentik_bootstrap_password.result 44 | "bootstrap-token" = random_password.authentik_bootstrap_token.result 45 | } 46 | } 47 | } 48 | 49 | 50 | resource "google_secret_manager_secret" "all" { 51 | secret_id = "all_secrets" 52 | replication { 53 | auto {} 54 | } 55 | } 56 | 57 | resource "google_secret_manager_secret_version" "all_version" { 58 | secret = google_secret_manager_secret.all.id 59 | secret_data = jsonencode(local.all_secrets) 60 | } 61 | -------------------------------------------------------------------------------- /terraform/secrets_storage/vars.tf: -------------------------------------------------------------------------------- 1 | output "authentik_token" { 2 | value = random_password.authentik_bootstrap_token.result 3 | } 4 | 5 | variable "foundry_username" { 6 | type = string 7 | sensitive = true 8 | } 9 | 10 | variable "foundry_password" { 11 | type = string 12 | sensitive = true 13 | } 14 | 15 | output "authentik_admin_password" { 16 | value = random_password.authentik_bootstrap_password.result 17 | } 18 | 19 | variable "secrets_namespace" { 20 | description = "namespace where external secrets operator will be deployed" 21 | type = string 22 | default = "security" 23 | } 24 | 25 | variable "homelab_project_id" {} 26 | 27 | variable "plex_claim" { 28 | sensitive = true 29 | } 30 | 31 | variable "email_username" { 32 | sensitive = true # Sensitive as value is a key, not a username 33 | } 34 | 35 | variable "email_password" { 36 | sensitive = true 37 | } 38 | 39 | variable "alertmanager_config" { 40 | sensitive = true 41 | } 42 | 43 | variable "tylerbot_config" { 44 | sensitive = true 45 | } 46 | 47 | variable "tailscale_authkey" { 48 | sensitive = true 49 | } 50 | -------------------------------------------------------------------------------- /terraform/vars.tf: -------------------------------------------------------------------------------- 1 | output "authentik_password" { 2 | value = module.secrets_storage.authentik_admin_password 3 | sensitive = true 4 | } 5 | 6 | variable "homelab_domain" { 7 | type = string 8 | } 9 | 10 | variable "plex_claim" { 11 | type = string 12 | } 13 | 14 | variable "homelab_account_id" { 15 | type = string 16 | } 17 | 18 | variable "homelab_project_prefix" { 19 | type = string 20 | } 21 | 22 | variable "authentik_token" { 23 | type = string 24 | sensitive = true 25 | } 26 | 27 | variable "foundry_username" { 28 | type = string 29 | sensitive = true 30 | } 31 | 32 | variable "foundry_password" { 33 | type = string 34 | sensitive = true 35 | } 36 | 37 | variable "cloudflare_api_token" { 38 | type = string 39 | sensitive = true 40 | } 41 | 42 | variable "cloudflare_email" { 43 | type = string 44 | } 45 | 46 | variable "b2_app_key_id" { 47 | type = string 48 | sensitive = true 49 | 50 | } 51 | 52 | variable "b2_app_key_name" { 53 | type = string 54 | } 55 | 56 | variable "b2_app_key" { 57 | type = string 58 | sensitive = true 59 | } 60 | 61 | variable "email_username" { 62 | sensitive = true # Sensitive as value is a key, not a username 63 | } 64 | 65 | variable "email_password" { 66 | sensitive = true 67 | } 68 | 69 | variable "alertmanager_config" { 70 | sensitive = true 71 | } 72 | 73 | variable "tailscale_authkey" { 74 | sensitive = true 75 | } 76 | 77 | variable "tylerbot_config" { 78 | sensitive = true 79 | } 80 | --------------------------------------------------------------------------------