├── .gitattributes ├── .github ├── CODEOWNERS ├── ISSUE_TEMPLATE │ ├── bug-report.md │ ├── config.yml │ ├── feature-request.md │ └── question.md ├── PULL_REQUEST_TEMPLATE.md ├── labeler.yaml ├── labels.yaml ├── linters │ ├── .ansible-lint │ ├── .eslintrc.json │ ├── .markdownlint.yaml │ ├── .prettierignore │ ├── .prettierrc.yaml │ ├── .tflint.hcl │ └── .yamllint.yaml ├── renovate.json5 ├── renovate │ ├── autoMerge.json5 │ ├── commitMessage.json5 │ ├── groups.json5 │ ├── labels.json5 │ ├── regexManagers.json5 │ ├── semanticCommits.json5 │ └── versioning.json5 ├── scripts │ ├── cloudflare-proxied-networks.sh │ ├── container-parser.sh │ ├── helm-release-differ.sh │ └── lib │ │ └── functions.sh └── workflows │ ├── cloudflare.yaml │ ├── helm-releases.yaml │ ├── labels.yaml │ ├── lint.yaml │ └── renovate.yaml ├── .gitignore ├── .pre-commit-config.yaml ├── .sops.yaml ├── .sourceignore ├── .taskfiles ├── ansible.yml ├── blocky.yml └── cluster.yml ├── .vscode ├── extensions.json └── settings.json ├── LICENSE ├── README.md ├── Taskfile.yml ├── cluster ├── apps │ ├── databases │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ └── postgresql │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ └── pvc.yaml │ ├── discord │ │ ├── discord-bot-react-frontend │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── discord-stock-ticker │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── payload.sh │ │ │ ├── prometheus-rule.yaml │ │ │ └── secret.sops.yaml │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ ├── puppeteer │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── secret.sops.yaml │ │ ├── whalestream-bot │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── ws-staging-bot │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ └── wyoa-bot │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ ├── home │ │ ├── homebridge │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ └── pvc.yaml │ │ ├── kustomization.yaml │ │ └── namespace.yaml │ ├── kube-system │ │ ├── cert-manager │ │ │ ├── helm-release.yaml │ │ │ ├── issuers │ │ │ │ ├── kustomization.yaml │ │ │ │ ├── letsencrypt-production.yaml │ │ │ │ └── letsencrypt-staging.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── prometheus-rule.yaml │ │ │ └── secret.sops.yaml │ │ ├── cilium │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── coredns │ │ │ ├── kustomization.yaml │ │ │ └── prometheus-rule.yaml │ │ ├── kustomization.yaml │ │ ├── metrics-server │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ └── reloader │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ ├── kustomization.yaml │ ├── kyverno │ │ ├── helm-release.yaml │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ ├── policies │ │ │ ├── delete-cpu-limits.yaml │ │ │ ├── kustomization.yaml │ │ │ └── snapshot-job-controller.yaml │ │ └── rbac.yaml │ ├── media │ │ ├── _pvc │ │ │ ├── kustomization.yaml │ │ │ └── pvc.yaml │ │ ├── kustomization.yaml │ │ ├── lidarr │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ └── pvc.yaml │ │ ├── namespace.yaml │ │ ├── overseerr │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ └── pvc.yaml │ │ ├── plex │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ └── pvc.yaml │ │ ├── prowlarr │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ └── pvc.yaml │ │ ├── qbittorrent │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ └── pvc.yaml │ │ ├── radarr │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ └── pvc.yaml │ │ ├── sonarr │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ └── pvc.yaml │ │ ├── tautulli │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ └── pvc.yaml │ │ └── theme-park │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ ├── monitoring │ │ ├── blackbox-exporter │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ └── prometheus-rule.yaml │ │ ├── grafana │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ └── secret.sops.yaml │ │ ├── kube-prometheus-stack │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── kustomization.yaml │ │ ├── loki │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── namespace.yaml │ │ ├── network-ups-tools │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ └── prometheus-rule.yaml │ │ ├── smartctl-exporter │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── snmp-exporter │ │ │ ├── kustomization.yaml │ │ │ └── mikrotik │ │ │ │ ├── config-map.yaml │ │ │ │ ├── helm-release.yaml │ │ │ │ ├── kustomization.yaml │ │ │ │ └── prometheus-rule.yaml │ │ ├── speedtest-exporter │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ └── prometheus-rule.yaml │ │ ├── thanos │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── monitoring │ │ │ │ ├── compact │ │ │ │ │ ├── alerts.yaml │ │ │ │ │ └── kustomization.yaml │ │ │ │ ├── kustomization.yaml │ │ │ │ ├── query │ │ │ │ │ ├── alerts.yaml │ │ │ │ │ ├── kustomization.yaml │ │ │ │ │ └── rules.yaml │ │ │ │ ├── rule │ │ │ │ │ ├── alerts.yaml │ │ │ │ │ └── kustomization.yaml │ │ │ │ ├── sidecar │ │ │ │ │ ├── alerts.yaml │ │ │ │ │ └── kustomization.yaml │ │ │ │ └── store-gateway │ │ │ │ │ ├── alerts.yaml │ │ │ │ │ ├── kustomization.yaml │ │ │ │ │ └── rules.yaml │ │ │ └── secret.sops.yaml │ │ └── vector │ │ │ ├── agent │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ │ ├── aggregator │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ │ └── kustomization.yaml │ ├── networking │ │ ├── blocky │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ └── prometheus-rule.yaml │ │ ├── cloudflare-ddns │ │ │ ├── cloudflare-ddns.sh │ │ │ ├── cronjob.yaml │ │ │ ├── kustomization.yaml │ │ │ └── secret.sops.yaml │ │ ├── external-dns │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ └── secret.sops.yaml │ │ ├── ingress-nginx │ │ │ ├── cloudflare-proxied-networks.txt │ │ │ ├── default-certificate.yaml │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ └── monitoring │ │ │ │ ├── alerts.yaml │ │ │ │ └── kustomization.yaml │ │ ├── k8s-gateway │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ ├── kustomization.yaml │ │ ├── metallb │ │ │ ├── custom-resources │ │ │ │ ├── bgp-advertisement.yaml │ │ │ │ ├── bgp-peer.yaml │ │ │ │ ├── ip-address-pool.yaml │ │ │ │ └── kustomization.yaml │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ │ └── namespace.yaml │ ├── openebs │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ └── zfs-localpv │ │ │ ├── config │ │ │ ├── hostpath-volumes.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── storage-class.yaml │ │ │ └── volume-snapshot-class.yaml │ │ │ ├── helm-release.yaml │ │ │ └── kustomization.yaml │ ├── storage │ │ ├── kopia │ │ │ ├── config │ │ │ │ └── repository.config │ │ │ ├── cron-job.yaml │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── pvc.yaml │ │ │ └── secret.sops.yaml │ │ ├── kustomization.yaml │ │ ├── minio │ │ │ ├── helm-release.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── prometheus-rule.yaml │ │ │ └── pvc.yaml │ │ └── namespace.yaml │ └── system-upgrade │ │ ├── kustomization.yaml │ │ ├── namespace.yaml │ │ └── system-upgrade-controller │ │ ├── kustomization.yaml │ │ ├── patches.yaml │ │ └── plans │ │ ├── agent.yaml │ │ ├── kustomization.yaml │ │ └── server.yaml ├── bootstrap.yaml ├── crds │ ├── kube-prometheus-stack │ │ ├── crds.yaml │ │ └── kustomization.yaml │ ├── kustomization.yaml │ └── openebs-zfs-localpv │ │ ├── crds.yaml │ │ └── kustomization.yaml ├── flux │ ├── addons │ │ ├── kustomization.yaml │ │ ├── notifications │ │ │ ├── alertmanager │ │ │ │ ├── kustomization.yaml │ │ │ │ └── notification.yaml │ │ │ ├── kustomization.yaml │ │ │ └── prometheus-rule.yaml │ │ └── webhook │ │ │ ├── github │ │ │ ├── ingress.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── receiver.yaml │ │ │ └── secret.sops.yaml │ │ │ └── kustomization.yaml │ ├── bootstrap │ │ ├── cluster-apps.yaml │ │ ├── cluster-crds.yaml │ │ ├── flux-addons.yaml │ │ ├── flux-config.yaml │ │ ├── flux-repositories.yaml │ │ ├── flux.yaml │ │ ├── kustomization.yaml │ │ └── metallb.yaml │ ├── config │ │ ├── config-map.yaml │ │ ├── kustomization.yaml │ │ └── secret.sops.yaml │ └── repositories │ │ ├── git │ │ ├── flux.yaml │ │ ├── home-ops.yaml │ │ ├── kube-prometheus-stack.yaml │ │ ├── kustomization.yaml │ │ ├── openebs-zfs-localpv.yaml │ │ └── secret.sops.yaml │ │ ├── helm │ │ ├── bitnami-charts.yaml │ │ ├── bjw-s-charts.yaml │ │ ├── cilium-charts.yaml │ │ ├── external-dns-charts.yaml │ │ ├── grafana-charts.yaml │ │ ├── hajimari-charts.yaml │ │ ├── ingress-nginx-charts.yaml │ │ ├── jetstack-charts.yaml │ │ ├── k8s-gateway-charts.yaml │ │ ├── kustomization.yaml │ │ ├── kyverno-charts.yaml │ │ ├── metallb-charts.yaml │ │ ├── metrics-server-charts.yaml │ │ ├── minio-charts.yaml │ │ ├── mongodb-charts.yaml │ │ ├── openebs-zfs-localpv-charts.yaml │ │ ├── prometheus-community-charts.yaml │ │ ├── stakater-charts.yaml │ │ ├── vector-charts.yaml │ │ └── vmware-tanzu-charts.yaml │ │ ├── image │ │ ├── discord-ghcr.yaml │ │ ├── kustomization.yaml │ │ ├── secret.sops.yaml │ │ └── wyoa-ghcr.yaml │ │ └── kustomization.yaml └── kustomization.yaml ├── hack ├── blocky.sh ├── delete-stuck-ns.sh ├── delete-stuck-snapshots.sh └── flux-validate.sh ├── infrastructure ├── ansible │ ├── ansible.cfg │ ├── inventory │ │ ├── group_vars │ │ │ ├── all │ │ │ │ ├── k3s.yml │ │ │ │ ├── secret.sops.yml │ │ │ │ └── ubuntu.yml │ │ │ ├── master-nodes │ │ │ │ └── k3s.yml │ │ │ ├── storage-nodes │ │ │ │ └── .gitkeep │ │ │ └── worker-nodes │ │ │ │ └── k3s.yml │ │ ├── host_vars │ │ │ └── k8s-0-nas │ │ │ │ └── nas.yml │ │ └── hosts.yml │ ├── playbooks │ │ ├── k3s │ │ │ ├── install.yml │ │ │ └── nuke.yml │ │ ├── nas │ │ │ └── install.yml │ │ └── ubuntu │ │ │ ├── prepare.yml │ │ │ └── upgrade.yml │ ├── requirements.yml │ └── roles │ │ ├── k3s │ │ ├── tasks │ │ │ ├── checks.yml │ │ │ ├── cleanup.yml │ │ │ ├── kubeconfig.yml │ │ │ ├── main.yml │ │ │ └── network.yml │ │ └── templates │ │ │ ├── 10-etcd-snapshots.yaml.j2 │ │ │ ├── cilium │ │ │ ├── cilium-bgp-config.yaml.j2 │ │ │ ├── cilium-installation.yaml.j2 │ │ │ └── values.yaml │ │ │ ├── home-dns │ │ │ └── home-dns-rbac.yaml.j2 │ │ │ └── kube-vip │ │ │ ├── kube-vip-daemonset.yaml.j2 │ │ │ ├── kube-vip-pod.yaml.j2 │ │ │ └── kube-vip-rbac.yaml.j2 │ │ ├── nas │ │ ├── defaults │ │ │ └── main.yml │ │ ├── tasks │ │ │ ├── avahi.yml │ │ │ ├── disks.yml │ │ │ ├── email.yml │ │ │ ├── main.yml │ │ │ ├── nfs.yml │ │ │ ├── packages.yml │ │ │ ├── samba.yml │ │ │ ├── scrub.yml │ │ │ └── zed.yml │ │ └── templates │ │ │ ├── aliases.j2 │ │ │ ├── exports.j2 │ │ │ ├── mail.rc.j2 │ │ │ ├── msmtprc.j2 │ │ │ ├── smartd.conf.j2 │ │ │ ├── smb.conf.j2 │ │ │ ├── zed.rc.j2 │ │ │ ├── zfs-scrub@.service.j2 │ │ │ └── zfs-scrub@.timer.j2 │ │ └── ubuntu │ │ ├── defaults │ │ └── main.yml │ │ ├── tasks │ │ ├── disks.yml │ │ ├── filesystem.yml │ │ ├── group.yml │ │ ├── grub.yml │ │ ├── kernel.yml │ │ ├── locale.yml │ │ ├── main.yml │ │ ├── network.yml │ │ ├── packages.yml │ │ ├── power.yml │ │ ├── unattended-upgrades.yml │ │ └── user.yml │ │ └── templates │ │ ├── netplan-config.yaml.j2 │ │ └── smartd.conf.j2 ├── mikrotik │ └── scripts │ │ └── blocky.rsc └── terraform │ └── cloudflare │ ├── .terraform.lock.hcl │ ├── dns_records.tf │ ├── firewall_rules.tf │ ├── main.tf │ ├── page_rules.tf │ ├── secret.sops.yaml │ └── zone_settings.tf └── jsconfig.json /.gitattributes: -------------------------------------------------------------------------------- 1 | *.sops.* diff=sopsdiffer 2 | -------------------------------------------------------------------------------- /.github/CODEOWNERS: -------------------------------------------------------------------------------- 1 | # https://docs.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners 2 | * @jr0dd 3 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug-report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: "" 5 | labels: kind/bug 6 | assignees: "" 7 | --- 8 | 9 | # Details 10 | 11 | **What steps did you take and what happened:** 12 | 13 | 14 | 15 | **What did you expect to happen:** 16 | 17 | 18 | 19 | **Anything else you would like to add:** 20 | 21 | 22 | 23 | **Additional Information:** 24 | 25 | 26 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | blank_issues_enabled: false 3 | contact_links: 4 | - name: Discuss on Discord 5 | url: https://discord.gg/k8s-at-home 6 | about: Join our Discord community 7 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature-request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: "" 5 | labels: kind/enhancement 6 | assignees: "" 7 | --- 8 | 9 | # Details 10 | 11 | **Describe the solution you'd like:** 12 | 13 | 14 | 15 | **Anything else you would like to add:** 16 | 17 | 18 | 19 | **Additional Information:** 20 | 21 | 22 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/question.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Question 3 | about: Ask a question to the maintainer 4 | title: "" 5 | labels: kind/question 6 | assignees: "" 7 | --- 8 | 9 | # Details 10 | 11 | **Ask your question:** 12 | 13 | 18 | 19 | 20 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | **Description of the change** 2 | 3 | 4 | 5 | **Benefits** 6 | 7 | 8 | 9 | **Possible drawbacks** 10 | 11 | 12 | 13 | **Applicable issues** 14 | 15 | 16 | 17 | - fixes # 18 | 19 | **Additional information** 20 | 21 | 22 | -------------------------------------------------------------------------------- /.github/labeler.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | area/ansible: 3 | - changed-files: 4 | - any-glob-to-any-file: "infrastructure/ansible/**/*" 5 | area/cluster: 6 | - changed-files: 7 | - any-glob-to-any-file: "cluster/**/*" 8 | area/github: 9 | - changed-files: 10 | - any-glob-to-any-file: ".github/**/*" 11 | area/hack: 12 | - changed-files: 13 | - any-glob-to-any-file: "hack/**/*" 14 | area/infrastructure: 15 | - changed-files: 16 | - any-glob-to-any-file: "infrastructure/**/*" 17 | area/terraform: 18 | - changed-files: 19 | - any-glob-to-any-file: "infrastructure/terraform/**/*" 20 | -------------------------------------------------------------------------------- /.github/linters/.ansible-lint: -------------------------------------------------------------------------------- 1 | --- 2 | exclude_paths: 3 | - .github/ 4 | - .taskfiles/ 5 | - .vscode/ 6 | - charts/ 7 | - cluster/ 8 | - hack/ 9 | - terraform/ 10 | - '*.sops.*' 11 | skip_list: 12 | - role-name 13 | warn_list: 14 | - unnamed-task 15 | -------------------------------------------------------------------------------- /.github/linters/.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": ["standard"], 3 | "plugins": ["standard", "@babel/eslint-plugin", "import"], 4 | "parser": "@babel/eslint-parser", 5 | "parserOptions": { 6 | "requireConfigFile": false, 7 | "ecmaVersion": 2021, 8 | "sourceType": "module" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /.github/linters/.markdownlint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | default: true 3 | 4 | # MD013/line-length - Line length 5 | MD013: 6 | # Number of characters 7 | line_length: 240 8 | # Number of characters for headings 9 | heading_line_length: 80 10 | # Number of characters for code blocks 11 | code_block_line_length: 80 12 | # Include code blocks 13 | code_blocks: true 14 | # Include tables 15 | tables: true 16 | # Include headings 17 | headings: true 18 | # Include headings 19 | headers: true 20 | # Strict length checking 21 | strict: false 22 | # Stern length checking 23 | stern: false 24 | -------------------------------------------------------------------------------- /.github/linters/.prettierignore: -------------------------------------------------------------------------------- 1 | charts/ 2 | .github/renovate/ 3 | .terraform/ 4 | .vscode/ 5 | *.sops.* 6 | -------------------------------------------------------------------------------- /.github/linters/.prettierrc.yaml: -------------------------------------------------------------------------------- 1 | trailingComma: "es5" 2 | tabWidth: 2 3 | semi: false 4 | singleQuote: true 5 | bracketSpacing: true 6 | useTabs: false 7 | overrides: 8 | - files: 9 | - "*.html" 10 | - "*.css" 11 | options: 12 | tabWidth: 4 13 | -------------------------------------------------------------------------------- /.github/linters/.tflint.hcl: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jr0dd/home-ops/c11790ea98ba3a483d3d5575acbefb26e5f75e37/.github/linters/.tflint.hcl -------------------------------------------------------------------------------- /.github/linters/.yamllint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ignore: | 3 | charts/ 4 | .terraform/ 5 | .vscode/ 6 | .taskfiles/ 7 | Taskfile.yml 8 | *.sops.* 9 | extends: default 10 | rules: 11 | truthy: 12 | allowed-values: ["true", "false", "on"] 13 | comments: 14 | min-spaces-from-content: 1 15 | line-length: disable 16 | braces: 17 | min-spaces-inside: 0 18 | max-spaces-inside: 1 19 | brackets: 20 | min-spaces-inside: 0 21 | max-spaces-inside: 0 22 | indentation: enable 23 | -------------------------------------------------------------------------------- /.github/renovate.json5: -------------------------------------------------------------------------------- 1 | { 2 | extends: [ 3 | "config:base", 4 | "docker:enableMajor", 5 | ":disableRateLimiting", 6 | ":dependencyDashboard", 7 | ":semanticCommits", 8 | ":enablePreCommit", 9 | ":automergeDigest", 10 | ":automergeBranchPush", 11 | "github>jr0dd/home-ops//.github/renovate/autoMerge.json5", 12 | "github>jr0dd/home-ops//.github/renovate/commitMessage.json5", 13 | "github>jr0dd/home-ops//.github/renovate/groups.json5", 14 | "github>jr0dd/home-ops//.github/renovate/labels.json5", 15 | "github>jr0dd/home-ops//.github/renovate/regexManagers.json5", 16 | "github>jr0dd/home-ops//.github/renovate/semanticCommits.json5", 17 | "github>jr0dd/home-ops//.github/renovate/versioning.json5" 18 | ], 19 | platform: "github", 20 | username: "wyoa-bot[bot]", 21 | repositories: ["jr0dd/home-ops"], 22 | onboarding: false, 23 | requireConfig: false, 24 | gitAuthor: "wyoa-bot <98073241+wyoa-bot[bot]@users.noreply.github.com>", 25 | dependencyDashboardTitle: "Renovate Dashboard 🤖", 26 | suppressNotifications: ["prIgnoreNotification"], 27 | rebaseWhen: "conflicted", 28 | commitBodyTable: true, 29 | ignorePaths: ["**/discord/*bot"], 30 | // set up renovate managers 31 | flux: { 32 | fileMatch: ["cluster/.+\\.ya?ml$"] 33 | }, 34 | 'helm-values': { 35 | fileMatch: ["cluster/.+\\.ya?ml$"] 36 | }, 37 | kubernetes: { 38 | fileMatch: ["ansible/.+\\.ya?ml(\\.j2)?$", "cluster/.+\\.ya?ml$"] 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /.github/renovate/autoMerge.json5: -------------------------------------------------------------------------------- 1 | { 2 | packageRules: [ 3 | { 4 | description: "Auto merge container digests", 5 | matchDatasources: ["docker"], 6 | automerge: true, 7 | automergeType: "branch", 8 | requiredStatusChecks: null, 9 | matchUpdateTypes: ["minor", "patch", "digest"], 10 | matchPackagePatterns: ["ghcr.io/onedr0p", "ghcr.io/jr0dd"] 11 | }, 12 | { 13 | description: "Auto merge containers", 14 | matchDatasources: ["docker"], 15 | automerge: false, 16 | automergeType: "branch", 17 | requiredStatusChecks: null, 18 | matchUpdateTypes: ["minor", "patch"], 19 | matchPackageNames: ["ghcr.io/jr0dd/puppeteer"] 20 | }, 21 | { 22 | description: "Auto merge GitHub Actions", 23 | matchManagers: ["github-actions"], 24 | automerge: true, 25 | automergeType: "branch", 26 | requiredStatusChecks: null, 27 | matchUpdateTypes: ["minor", "patch"] 28 | }, 29 | { 30 | description: "Auto merge Helm charts", 31 | matchDatasources: ["helm"], 32 | automerge: true, 33 | automergeType: "branch", 34 | requiredStatusChecks: null, 35 | matchUpdateTypes: ["minor", "patch"], 36 | matchPackageNames: ["grafana", "kube-prometheus-stack", "thanos", "postgresql"] 37 | }, 38 | { 39 | description: "Auto merge Pre-Commit Hooks", 40 | matchManagers: ["pre-commit"], 41 | automerge: true, 42 | automergeType: "branch", 43 | requiredStatusChecks: null, 44 | matchUpdateTypes: ["minor", "patch"] 45 | } 46 | ] 47 | } 48 | -------------------------------------------------------------------------------- /.github/renovate/commitMessage.json5: -------------------------------------------------------------------------------- 1 | { 2 | commitMessageTopic: "{{depName}}", 3 | commitMessageExtra: "to {{newVersion}}", 4 | commitMessageSuffix: "", 5 | packageRules: [ 6 | { 7 | matchDatasources: ["helm"], 8 | commitMessageTopic: "chart {{depName}}" 9 | }, 10 | { 11 | matchDatasources: ["docker"], 12 | commitMessageTopic: "image {{depName}}" 13 | } 14 | ] 15 | } 16 | -------------------------------------------------------------------------------- /.github/renovate/labels.json5: -------------------------------------------------------------------------------- 1 | { 2 | packageRules: [ 3 | { 4 | matchUpdateTypes: ["major"], 5 | labels: ["type/major"] 6 | }, 7 | { 8 | matchUpdateTypes: ["minor"], 9 | labels: ["type/minor"] 10 | }, 11 | { 12 | matchUpdateTypes: ["patch"], 13 | labels: ["type/patch"] 14 | }, 15 | { 16 | matchDatasources: ["docker"], 17 | addLabels: ["renovate/container"] 18 | }, 19 | { 20 | matchDatasources: ["galaxy", "galaxy-collection"], 21 | addLabels: ["renovate/ansible"] 22 | }, 23 | { 24 | matchManagers: ["github-actions"], 25 | addLabels: ["renovate/github-action"] 26 | }, 27 | { 28 | matchDatasources: ["github-releases", "github-tags"], 29 | addLabels: ["renovate/github-release"] 30 | }, 31 | { 32 | matchDatasources: ["helm"], 33 | addLabels: ["renovate/helm"] 34 | }, 35 | { 36 | matchManagers: ["pre-commit"], 37 | addLabels: ["renovate/pre-commit"] 38 | }, 39 | { 40 | matchDatasources: ["terraform-provider"], 41 | addLabels: ["renovate/terraform"] 42 | }, 43 | ] 44 | } 45 | -------------------------------------------------------------------------------- /.github/renovate/regexManagers.json5: -------------------------------------------------------------------------------- 1 | { 2 | regexManagers: [ 3 | { 4 | description: "Process CRD dependencies", 5 | fileMatch: ["cluster/.+\\.ya?ml$"], 6 | matchStrings: [ 7 | // GitRepository and Flux Kustomization where 'Git release/tag' matches 'Helm' version 8 | "registryUrl=(?\\S+) chart=(?\\S+)\n.*?(?[^-\\s]*)\n", 9 | // Kustomization where 'GitHub release artifact URL' matches 'Docker image' version 10 | "datasource=(?\\S+) image=(?\\S+)\n.*?-\\s(.*?)\/(?[^/]+)\/[^/]+\n" 11 | ], 12 | datasourceTemplate: "{{#if datasource}}{{{datasource}}}{{else}}helm{{/if}}" 13 | }, 14 | { 15 | description: "Process various other dependencies", 16 | fileMatch: ["ansible/.+\\.ya?ml$", "cluster/.+\\.ya?ml$"], 17 | matchStrings: [ 18 | "datasource=(?\\S+) depName=(?\\S+)( versioning=(?\\S+))?\n.+:\\s?[\"]?(?.+)\\b" 19 | ], 20 | datasourceTemplate: "{{#if datasource}}{{{datasource}}}{{else}}github-releases{{/if}}", 21 | versioningTemplate: "{{#if versioning}}{{{versioning}}}{{else}}semver{{/if}}" 22 | }, 23 | { 24 | description: "Process raw GitHub URLs", 25 | fileMatch: ["monitoring/.+\\.ya?ml$"], 26 | matchStrings: [ 27 | "https:\\/\\/raw.githubusercontent.com\\/(?[\\w\\d\\-_]+\\/[\\w\\d\\-_]+)\\/(?[\\w\\d\\.\\-_]+)\\/.*" 28 | ], 29 | datasourceTemplate: "github-releases", 30 | versioningTemplate: "{{#if versioning}}{{{versioning}}}{{else}}semver{{/if}}" 31 | } 32 | ] 33 | } 34 | -------------------------------------------------------------------------------- /.github/renovate/versioning.json5: -------------------------------------------------------------------------------- 1 | { 2 | packageRules: [ 3 | { 4 | description: "Use custom versioning for MinIO", 5 | matchDatasources: ["docker"], 6 | matchPackageNames: ["quay.io/minio/minio"], 7 | versioning: "regex:^RELEASE\\.(?\\d+)-(?\\d+)-(?\\d+)T.*Z(-(?.*))?$" 8 | }, 9 | { 10 | description: "Use loose versioning for certain dependencies", 11 | matchDatasources: ["docker"], 12 | versioning: "loose", 13 | matchPackageNames: [ 14 | "ghcr.io/onedr0p/plex-beta", 15 | "ghcr.io/onedr0p/qbittorrent" 16 | ] 17 | } 18 | ] 19 | } 20 | -------------------------------------------------------------------------------- /.github/scripts/cloudflare-proxied-networks.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Get all local networks 4 | # ipv4_rfc1918='[ "10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16" ]' 5 | 6 | # Get all cloudflare ipv4 ranges in an array 7 | ipv4_cloudflare="$(curl -sL https://www.cloudflare.com/ips-v4 | jq --raw-input --slurp 'split("\n")')" 8 | if [[ -z "${ipv4_cloudflare}" ]]; then 9 | exit 1 10 | fi 11 | 12 | # Get all cloudflare ipv6 ranges in an array 13 | ipv6_cloudflare="$(curl -sL https://www.cloudflare.com/ips-v6 | jq --raw-input --slurp 'split("\n")')" 14 | if [[ -z "${ipv6_cloudflare}" ]]; then 15 | exit 1 16 | fi 17 | 18 | # Merge rfc1918 ipv4, cloudflare ipv4, and cloudflare ipv6 ranges into one array 19 | combined=$(jq \ 20 | --argjson ipv4_cloudflare "${ipv4_cloudflare}" \ 21 | --argjson ipv6_cloudflare "${ipv6_cloudflare}" \ 22 | -n '$ipv4_cloudflare + $ipv6_cloudflare' \ 23 | ) 24 | 25 | # Output array as a string with \, as delimiter 26 | echo "${combined}" | jq --raw-output '. | join("\\,")' 27 | -------------------------------------------------------------------------------- /.github/scripts/lib/functions.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o errexit 4 | set -o nounset 5 | set -o pipefail 6 | shopt -s lastpipe 7 | 8 | check() { 9 | command -v "${1}" >/dev/null 2>&1 || { 10 | echo >&2 "ERROR: ${1} is not installed or not found in \$PATH" >&2 11 | exit 1 12 | } 13 | } 14 | 15 | chart_registry_url() { 16 | local helm_release= 17 | local chart_id= 18 | helm_release="${1}" 19 | chart_id=$(yq eval .spec.chart.spec.sourceRef.name "${helm_release}" 2>/dev/null) 20 | # Discover all HelmRepository 21 | find . -iname '*-charts.yaml' -type f -print0 | while IFS= read -r -d '' file; do 22 | # Skip non HelmRepository 23 | [[ $(yq eval .kind "${file}" 2>/dev/null) != "HelmRepository" ]] && continue 24 | # Skip unrelated HelmRepository 25 | [[ "${chart_id}" != $(yq eval .metadata.name "${file}" 2>/dev/null) ]] && continue 26 | yq eval .spec.url "${file}" 27 | break 28 | done 29 | } 30 | 31 | chart_name() { 32 | local helm_release= 33 | helm_release="${1}" 34 | yq eval .spec.chart.spec.chart "${helm_release}" 2>/dev/null 35 | } 36 | 37 | chart_version() { 38 | local helm_release= 39 | helm_release="${1}" 40 | yq eval .spec.chart.spec.version "${helm_release}" 2>/dev/null 41 | } 42 | 43 | chart_values() { 44 | local helm_release= 45 | helm_release="${1}" 46 | yq eval .spec.values "${helm_release}" 2>/dev/null 47 | } 48 | -------------------------------------------------------------------------------- /.github/workflows/cloudflare.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Cloudflare Proxied Networks Update 3 | 4 | on: # yamllint disable-line rule:truthy 5 | workflow_dispatch: 6 | schedule: 7 | - cron: "0 0 * * *" 8 | 9 | env: 10 | CLOUDFLARE_PROXIED_NETWORKS_FILE: >- 11 | cluster/apps/networking/ingress-nginx/cloudflare-proxied-networks.txt 12 | jobs: 13 | update: 14 | name: Update 15 | runs-on: ubuntu-22.04 16 | steps: 17 | - name: Checkout 18 | uses: actions/checkout@v4 19 | 20 | - name: Generate Token 21 | uses: tibdex/github-app-token@v2 22 | id: generate-token 23 | with: 24 | app_id: ${{ secrets.BOT_APP_ID }} 25 | private_key: ${{ secrets.BOT_APP_PRIVATE_KEY }} 26 | 27 | - name: Cloudflare Proxied Networks 28 | run: | 29 | bash ./.github/scripts/cloudflare-proxied-networks.sh > ${{ env.CLOUDFLARE_PROXIED_NETWORKS_FILE }} 30 | 31 | - name: Create pull request 32 | uses: peter-evans/create-pull-request@v5 33 | with: 34 | token: ${{ steps.generate-token.outputs.token }} 35 | branch: github-action/update-cloudflare-proxied-networks 36 | delete-branch: true 37 | title: "chore(github-action): update cloudflare proxied networks" 38 | signoff: true 39 | commit-message: "chore(github-action): update cloudflare proxied networks" 40 | body: | 41 | Update cloudflare proxy networks configmap from https://www.cloudflare.com/ips/ 42 | labels: | 43 | renovate/github-action 44 | -------------------------------------------------------------------------------- /.github/workflows/labels.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Labels 3 | 4 | on: # yamllint disable-line rule:truthy 5 | pull_request: 6 | branches: 7 | - main 8 | 9 | jobs: 10 | labels: 11 | name: Labels 12 | runs-on: ubuntu-22.04 13 | steps: 14 | - name: Checkout 15 | uses: actions/checkout@v4 16 | 17 | - name: Generate Token 18 | uses: tibdex/github-app-token@v2 19 | id: generate-token 20 | with: 21 | app_id: ${{ secrets.BOT_APP_ID }} 22 | private_key: ${{ secrets.BOT_APP_PRIVATE_KEY }} 23 | 24 | - name: Generate Size 25 | uses: pascalgn/size-label-action@v0.5.5 26 | env: 27 | GITHUB_TOKEN: ${{ steps.generate-token.outputs.token }} 28 | with: 29 | sizes: > 30 | { 31 | "0": "XS", 32 | "20": "S", 33 | "50": "M", 34 | "200": "L", 35 | "800": "XL", 36 | "2000": "XXL" 37 | } 38 | 39 | - name: Generate Labels 40 | uses: actions/labeler@v5 41 | with: 42 | configuration-path: .github/labeler.yaml 43 | repo-token: ${{ steps.generate-token.outputs.token }} 44 | 45 | - name: Sync Labels 46 | uses: EndBug/label-sync@v2 47 | with: 48 | config-file: .github/labels.yaml 49 | token: ${{ steps.generate-token.outputs.token }} 50 | -------------------------------------------------------------------------------- /.github/workflows/lint.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Lint 3 | 4 | on: # yamllint disable-line rule:truthy 5 | workflow_dispatch: 6 | pull_request: 7 | branches: 8 | - main 9 | 10 | concurrency: 11 | group: ${{ github.ref }}-${{ github.workflow }} 12 | cancel-in-progress: true 13 | 14 | jobs: 15 | build: 16 | name: MegaLinter 17 | runs-on: ubuntu-22.04 18 | steps: 19 | - name: Checkout 20 | uses: actions/checkout@v4 21 | with: 22 | fetch-depth: 0 23 | 24 | - name: Generate Token 25 | uses: tibdex/github-app-token@v2 26 | id: generate-token 27 | with: 28 | app_id: ${{ secrets.BOT_APP_ID }} 29 | private_key: ${{ secrets.BOT_APP_PRIVATE_KEY }} 30 | 31 | - name: MegaLinter 32 | uses: oxsecurity/megalinter@v7.13.0 33 | env: 34 | VALIDATE_ALL_CODEBASE: ${{ github.event_name == 'workflow_dispatch' }} 35 | GITHUB_TOKEN: ${{ steps.generate-token.outputs.token }} 36 | ENABLE_LINTERS: ACTION_ACTIONLINT,BASH_SHELLCHECK,GIT_GIT_DIFF,JSON_PRETTIER,KUBERNETES_KUBEVAL,TERRAFORM_TFLINT,YAML_YAMLLINT 37 | ACTION_ACTIONLINT_FILTER_REGEX_EXCLUDE: (helm-release-differ.yaml) 38 | FILTER_REGEX_EXCLUDE: (.*\\.sops\\.ya?ml) 39 | JSON_PRETTIER_CONFIG_FILE: .prettierrc.yaml 40 | JSON_PRETTIER_FILTER_REGEX_EXCLUDE: (.vscode) 41 | KUBERNETES_DIRECTORY: cluster 42 | KUBERNETES_KUBEVAL_ARGUMENTS: --ignore-missing-schemas 43 | TERRAFORM_TFLINT_CONFIG_FILE: .tflint.hcl 44 | YAML_YAMLLINT_CONFIG_FILE: .yamllint.yaml 45 | -------------------------------------------------------------------------------- /.github/workflows/renovate.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | name: Renovate 3 | 4 | on: # yamllint disable-line rule:truthy 5 | workflow_dispatch: 6 | inputs: 7 | dryRun: 8 | description: "Dry-Run" 9 | default: "false" 10 | required: false 11 | logLevel: 12 | description: "Log-Level" 13 | default: "debug" 14 | required: false 15 | schedule: 16 | - cron: "0 */4 * * *" 17 | push: 18 | branches: 19 | - main 20 | paths: 21 | - ".github/renovate.json5" 22 | - ".github/renovate/**.json5" 23 | 24 | env: 25 | LOG_LEVEL: debug 26 | RENOVATE_DRY_RUN: false 27 | RENOVATE_CONFIG_FILE: .github/renovate.json5 28 | 29 | jobs: 30 | renovate: 31 | name: Renovate 32 | runs-on: ubuntu-22.04 33 | steps: 34 | - name: Checkout 35 | uses: actions/checkout@v4 36 | 37 | - name: Generate Token 38 | uses: tibdex/github-app-token@v2 39 | id: generate-token 40 | with: 41 | app_id: ${{ secrets.BOT_APP_ID }} 42 | private_key: ${{ secrets.BOT_APP_PRIVATE_KEY }} 43 | 44 | - name: Override default config from dispatch variables 45 | run: | 46 | echo "RENOVATE_DRY_RUN=${{ github.event.inputs.dryRun || env.RENOVATE_DRY_RUN }}" >> $GITHUB_OUTPUT 47 | echo "LOG_LEVEL=${{ github.event.inputs.logLevel || env.LOG_LEVEL }}" >> $GITHUB_OUTPUT 48 | 49 | - name: Renovate 50 | uses: renovatebot/github-action@v39.2.4 51 | with: 52 | configurationFile: ${{ env.RENOVATE_CONFIG_FILE }} 53 | token: ${{ steps.generate-token.outputs.token }} 54 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Trash 2 | .DS_Store 3 | Thumbs.db 4 | # Binaries 5 | bin 6 | /flux 7 | *.iso 8 | # Temp folders 9 | .temp* 10 | .private/ 11 | .logs/ 12 | .task/ 13 | # Ansible 14 | mrlesmithjr.zfs* 15 | xanmanning.k3s* 16 | # Terraform 17 | .terraform 18 | .terraform.tfstate* 19 | terraform.tfstate* 20 | # Sops 21 | .decrypted~* 22 | *.agekey 23 | # Kubernetes 24 | kubeconfig 25 | # Other 26 | report.html 27 | renovate.log 28 | # Node 29 | node_modules 30 | package-lock.json 31 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | fail_fast: false 3 | repos: 4 | - repo: https://github.com/adrienverge/yamllint 5 | rev: v1.37.1 6 | hooks: 7 | - args: 8 | - --config-file 9 | - .github/linters/.yamllint.yaml 10 | id: yamllint 11 | - repo: https://github.com/pre-commit/pre-commit-hooks 12 | rev: v4.6.0 13 | hooks: 14 | - id: trailing-whitespace 15 | - id: end-of-file-fixer 16 | - id: mixed-line-ending 17 | - repo: https://github.com/Lucas-C/pre-commit-hooks 18 | rev: v1.5.5 19 | hooks: 20 | - id: remove-crlf 21 | - id: remove-tabs 22 | - repo: https://github.com/sirosen/fix-smartquotes 23 | rev: 0.2.0 24 | hooks: 25 | - id: fix-smartquotes 26 | - repo: https://github.com/k8s-at-home/sops-pre-commit 27 | rev: v2.1.1 28 | hooks: 29 | - id: forbid-secrets 30 | - repo: https://github.com/zricethezav/gitleaks 31 | rev: v8.27.0 32 | hooks: 33 | - id: gitleaks 34 | -------------------------------------------------------------------------------- /.sops.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | creation_rules: 3 | - path_regex: infrastructure/.*\.sops\.ya?ml 4 | unencrypted_regex: "^(kind)$" 5 | age: >- 6 | age1vfgg4n4snp0ktjm83gsv3nptdw39mw8q7fw7dzghgfpllc040vgsd6yypm 7 | 8 | - path_regex: cluster/.*\.ya?ml 9 | encrypted_regex: "^(data|stringData)$" 10 | age: >- 11 | age1vfgg4n4snp0ktjm83gsv3nptdw39mw8q7fw7dzghgfpllc040vgsd6yypm 12 | -------------------------------------------------------------------------------- /.sourceignore: -------------------------------------------------------------------------------- 1 | .github/ 2 | .taskfiles/ 3 | .vscode/ 4 | hack/ 5 | infrastructure/ 6 | .gitattributes 7 | .gitignore 8 | .pre-commit-config.yaml 9 | .sops.yaml 10 | LICENSE 11 | README.md 12 | Taskfile.yml 13 | -------------------------------------------------------------------------------- /.taskfiles/ansible.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3" 3 | 4 | tasks: 5 | list: 6 | desc: List all the hosts 7 | dir: ansible 8 | cmds: 9 | - "ansible all -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yml --list-hosts" 10 | silent: true 11 | 12 | ping: 13 | desc: Ping all the hosts 14 | dir: ansible 15 | cmds: 16 | - "ansible all -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yml --one-line -m 'ping'" 17 | 18 | uptime: 19 | desc: Uptime of all the hosts 20 | dir: ansible 21 | cmds: 22 | - ansible all -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yml --one-line -a 'uptime' 23 | 24 | reboot: 25 | desc: Reboot all the k8s nodes 26 | dir: ansible 27 | cmds: 28 | - "ansible all -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yml -a '/usr/bin/systemctl reboot' --become" 29 | 30 | shutdown: 31 | desc: Shutdown all the k8s nodes 32 | dir: ansible 33 | cmds: 34 | - "ansible all -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yml -a '/usr/bin/systemctl poweroff' --become" 35 | 36 | k3s-install: 37 | desc: Install Kubernetes on the nodes 38 | dir: ansible 39 | cmds: 40 | - "ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yml {{.ANSIBLE_PLAYBOOK_DIR}}/k3s/install.yml" 41 | silent: true 42 | 43 | k3s-nuke: 44 | desc: Install Kubernetes on the nodes 45 | dir: ansible 46 | cmds: 47 | - "ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yml {{.ANSIBLE_PLAYBOOK_DIR}}/k3s/nuke.yml" 48 | 49 | nas-install: 50 | desc: Prepare storage node for k8s cluster 51 | dir: ansible 52 | cmds: 53 | - "ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yml {{.ANSIBLE_PLAYBOOK_DIR}}/nas/install.yml" 54 | silent: true 55 | 56 | ubuntu-prepare: 57 | desc: Prepare all the k8s nodes for running k3s 58 | dir: ansible 59 | cmds: 60 | - "ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yml {{.ANSIBLE_PLAYBOOK_DIR}}/ubuntu/prepare.yml" 61 | silent: true 62 | 63 | ubuntu-upgrade: 64 | desc: Upgrade all the k8s nodes operating system 65 | dir: ansible 66 | cmds: 67 | - "ansible-playbook -i {{.ANSIBLE_INVENTORY_DIR}}/hosts.yml {{.ANSIBLE_PLAYBOOK_DIR}}/ubuntu/upgrade.yml" 68 | silent: true 69 | -------------------------------------------------------------------------------- /.taskfiles/blocky.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3" 3 | 4 | tasks: 5 | enable: 6 | desc: Enable adblocking in blocky 7 | cmds: 8 | - "{{.PROJECT_DIR}}/hack/blocky.sh enable" 9 | preconditions: 10 | - "test -f {{.PROJECT_DIR}}/hack/blocky.sh" 11 | 12 | disable: 13 | desc: Disable adblocking in blocky 14 | cmds: 15 | - "{{.PROJECT_DIR}}/hack/blocky.sh disable" 16 | preconditions: 17 | - "test -f {{.PROJECT_DIR}}/hack/blocky.sh" 18 | -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | "recommendations": [ 3 | "HashiCorp.terraform", 4 | "britesnow.vscode-toggle-quotes", 5 | "ms-kubernetes-tools.vscode-kubernetes-tools", 6 | "oderwat.indent-rainbow", 7 | "redhat.ansible", 8 | "signageos.signageos-vscode-sops", 9 | "usernamehw.errorlens", 10 | "wholroyd.jinja", 11 | "standard.vscode-standard" 12 | ] 13 | } 14 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "ansible.ansibleLint.enabled": true, 3 | "ansible.ansibleLint.arguments": "-c .github/linters/.ansible-lint", 4 | "discord.enabled": true, 5 | "files.associations": { 6 | "*.json5": "json5", 7 | "**/ansible/**/*.yml": "ansible", 8 | "**/ansible/**/*.sops.yml": "yaml", 9 | "**/ansible/**/inventory/**/*.yml": "yaml", 10 | "**/terraform/**/*.tf": "terraform" 11 | }, 12 | "material-icon-theme.folders.associations": { 13 | ".taskfiles": "utils", 14 | "hack": "scripts" 15 | }, 16 | "prettier.configPath": ".github/linters/.prettierrc.yaml", 17 | "prettier.ignorePath": ".github/linters/.prettierignore", 18 | "yaml.schemas": { 19 | "Kubernetes": "cluster/*.yaml" 20 | }, 21 | "editor.bracketPairColorization.enabled": true, 22 | "editor.guides.bracketPairs": "active", 23 | "standard.enable": true, 24 | "standard.engine": "standard", 25 | "standard.usePackageJson": true, 26 | "standard.workingDirectories": ["./gluctl"], 27 | "standard.autoFixOnSave": true 28 | } 29 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 k8s@home 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Taskfile.yml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3" 3 | 4 | vars: 5 | PROJECT_DIR: 6 | sh: "git rev-parse --show-toplevel" 7 | CLUSTER_DIR: "{{.PROJECT_DIR}}/cluster" 8 | ANSIBLE_DIR: "{{.PROJECT_DIR}}/infrastructure/ansible" 9 | ANSIBLE_PLAYBOOK_DIR: "{{.ANSIBLE_DIR}}/playbooks" 10 | ANSIBLE_INVENTORY_DIR: "{{.ANSIBLE_DIR}}/inventory" 11 | K3S_PRIMARY_MASTER_NODE_USERNAME: "ubuntu" 12 | K3S_PRIMARY_MASTER_NODE_ADDR: "10.10.0.10" 13 | K3S_LB_ADDR: "10.10.0.10" 14 | 15 | env: 16 | ANSIBLE_CONFIG: "{{.ANSIBLE_DIR}}/ansible.cfg" 17 | KUBECONFIG: "{{.CLUSTER_DIR}}/kubeconfig" 18 | 19 | includes: 20 | ansible: .taskfiles/ansible.yml 21 | blocky: .taskfiles/blocky.yml 22 | cluster: .taskfiles/cluster.yml 23 | 24 | tasks: 25 | deps: 26 | - task: deps:ansible 27 | - task: deps:pre-commit 28 | 29 | deps:ansible: 30 | desc: install/upgrade ansible deps 31 | dir: ansible 32 | cmds: 33 | - "ansible-galaxy install -r requirements.yml --force" 34 | - "ansible-galaxy collection install -r requirements.yml --collections-path ~/.ansible/collections --force" 35 | 36 | deps:pre-commit: 37 | desc: install/upgrade pre-commit deps 38 | cmds: 39 | - pre-commit install-hooks 40 | 41 | pre-commit: 42 | desc: Run pre-commit 43 | cmds: 44 | - pre-commit run --all-files 45 | -------------------------------------------------------------------------------- /cluster/apps/databases/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - namespace.yaml 6 | - postgresql 7 | -------------------------------------------------------------------------------- /cluster/apps/databases/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: databases 6 | labels: 7 | kustomize.toolkit.fluxcd.io/prune: disabled 8 | -------------------------------------------------------------------------------- /cluster/apps/databases/postgresql/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta2 3 | kind: HelmRelease 4 | metadata: 5 | name: postgresql 6 | namespace: databases 7 | spec: 8 | interval: 30m 9 | chart: 10 | spec: 11 | chart: postgresql 12 | version: 13.2.25 13 | sourceRef: 14 | kind: HelmRepository 15 | name: bitnami 16 | namespace: flux-system 17 | maxHistory: 3 18 | install: 19 | remediation: 20 | retries: 3 21 | upgrade: 22 | cleanupOnFail: true 23 | remediation: 24 | retries: 3 25 | uninstall: 26 | keepHistory: false 27 | values: 28 | diagnosticMode: 29 | enabled: false 30 | global: 31 | postgresql: 32 | auth: 33 | postgresPassword: "${POSTGRES_ADMIN_PASSWORD}" 34 | username: j_r0dd 35 | password: "${POSTGRES_PASSWORD}" 36 | database: wyoa 37 | image: 38 | registry: docker.io 39 | repository: bitnami/postgresql 40 | tag: "15.5.0" 41 | primary: 42 | persistence: 43 | enabled: true 44 | existingClaim: postgresql-data 45 | metrics: 46 | enabled: true 47 | serviceMonitor: 48 | enabled: true 49 | -------------------------------------------------------------------------------- /cluster/apps/databases/postgresql/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - pvc.yaml 6 | - helm-release.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/databases/postgresql/pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: postgresql-data 6 | namespace: databases 7 | labels: 8 | snapshot.home.arpa/enabled: "true" 9 | app.kubernetes.io/instance: postgresql 10 | app.kubernetes.io/name: postgresql 11 | spec: 12 | accessModes: 13 | - ReadWriteOnce 14 | resources: 15 | requests: 16 | storage: 5Gi 17 | storageClassName: openebs-zfspv-ssd 18 | -------------------------------------------------------------------------------- /cluster/apps/discord/discord-bot-react-frontend/helm-release.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: helm.toolkit.fluxcd.io/v2beta2 2 | kind: HelmRelease 3 | metadata: 4 | name: discord-bot-react-frontend 5 | namespace: discord 6 | spec: 7 | interval: 30m 8 | chart: 9 | spec: 10 | chart: app-template 11 | version: 2.4.0 12 | sourceRef: 13 | kind: HelmRepository 14 | name: bjw-s 15 | namespace: flux-system 16 | maxHistory: 3 17 | install: 18 | remediation: 19 | retries: 3 20 | upgrade: 21 | cleanupOnFail: true 22 | remediation: 23 | retries: 3 24 | uninstall: 25 | keepHistory: false 26 | values: 27 | defaultPodOptions: 28 | imagePullSecrets: 29 | - name: ghcr-io-creds 30 | controllers: 31 | main: 32 | strategy: RollingUpdate 33 | annotations: 34 | reloader.stakater.com/auto: "true" 35 | containers: 36 | main: 37 | image: 38 | repository: ghcr.io/jr0dd/discord-bot-react-frontend 39 | tag: v0.1.10 # {"$imagepolicy": "flux-system:discord-ghcr-io:tag"} 40 | pullPolicy: Always 41 | env: 42 | TZ: "${TZ}" 43 | NODE_ENV: "production" 44 | REACT_APP_BASE_URL: "https://ws.${SECRET_DOMAIN}" 45 | resources: 46 | requests: 47 | cpu: 10m 48 | memory: 100Mi 49 | limits: 50 | memory: 750Mi 51 | service: 52 | main: 53 | ports: 54 | http: 55 | port: 3000 56 | ingress: 57 | main: 58 | enabled: true 59 | ingressClassName: nginx 60 | annotations: 61 | cert-manager.io/cluster-issuer: letsencrypt-production 62 | external-dns.alpha.kubernetes.io/target: "ipv4.${SECRET_DOMAIN}" 63 | hosts: 64 | - host: &host "discord.${SECRET_DOMAIN}" 65 | paths: 66 | - path: / 67 | service: 68 | name: main 69 | port: http 70 | tls: 71 | - secretName: discord-tls 72 | hosts: 73 | - *host 74 | -------------------------------------------------------------------------------- /cluster/apps/discord/discord-bot-react-frontend/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/discord/discord-stock-ticker/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta2 3 | kind: HelmRelease 4 | metadata: 5 | name: &app discord-stock-ticker 6 | namespace: discord 7 | spec: 8 | interval: 30m 9 | chart: 10 | spec: 11 | chart: app-template 12 | version: 2.4.0 13 | sourceRef: 14 | kind: HelmRepository 15 | name: bjw-s 16 | namespace: flux-system 17 | maxHistory: 3 18 | install: 19 | remediation: 20 | retries: 3 21 | upgrade: 22 | cleanupOnFail: true 23 | remediation: 24 | retries: 3 25 | uninstall: 26 | keepHistory: false 27 | values: 28 | controllers: 29 | main: 30 | strategy: RollingUpdate 31 | annotations: 32 | reloader.stakater.com/auto: "true" 33 | containers: 34 | main: 35 | image: 36 | repository: ghcr.io/jr0dd/discord-stock-ticker 37 | tag: v3.10.8@sha256:cfa5cc11666caa4384aaef00b252b8ab8502a403507a2b8d5196d54e224b6b09 38 | env: 39 | TZ: "${TZ}" 40 | envFrom: 41 | - secretRef: 42 | name: discord-tokens 43 | lifecycle: 44 | postStart: 45 | exec: 46 | command: ["bash", "/config/payload.sh"] 47 | resources: 48 | requests: 49 | cpu: 10m 50 | memory: 100Mi 51 | limits: 52 | memory: 200Mi 53 | service: 54 | main: 55 | ports: 56 | http: 57 | port: 8080 58 | serviceMonitor: 59 | main: 60 | enabled: true 61 | serviceName: *app 62 | endpoints: 63 | - interval: 30s 64 | path: /metrics 65 | port: http 66 | scrapeTimeout: 10s 67 | persistence: 68 | config: 69 | enabled: true 70 | type: configMap 71 | name: payload-config 72 | globalMounts: 73 | - path: /config/payload.sh 74 | subPath: payload.sh 75 | defaultMode: 493 76 | -------------------------------------------------------------------------------- /cluster/apps/discord/discord-stock-ticker/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | - prometheus-rule.yaml 7 | - secret.sops.yaml 8 | namespace: discord 9 | configMapGenerator: 10 | - name: payload-config 11 | files: 12 | - payload.sh 13 | generatorOptions: 14 | disableNameSuffixHash: true 15 | annotations: 16 | kustomize.toolkit.fluxcd.io/substitute: disabled 17 | -------------------------------------------------------------------------------- /cluster/apps/discord/discord-stock-ticker/prometheus-rule.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: PrometheusRule 4 | metadata: 5 | name: discord-stock-ticker 6 | namespace: discord 7 | spec: 8 | groups: 9 | - name: discord-stock-ticker 10 | rules: 11 | - alert: DiscordStockTickerAbsent 12 | annotations: 13 | description: Discord Stock Ticker has disappeared from Prometheus service 14 | discovery. 15 | summary: Discord Stock Ticker is down. 16 | expr: | 17 | absent(up{job="discord-stock-ticker"} == 1) 18 | for: 5m 19 | labels: 20 | severity: critical 21 | - alert: ZeroTickers 22 | annotations: 23 | description: Discord Stock Ticker did not receive the payload to the api. 24 | summary: Bot has 0 tickers. 25 | expr: | 26 | ticker_count == 0 27 | for: 5m 28 | labels: 29 | severity: critical 30 | -------------------------------------------------------------------------------- /cluster/apps/discord/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | namespace: discord 5 | resources: 6 | - namespace.yaml 7 | - secret.sops.yaml 8 | - discord-bot-react-frontend 9 | - discord-stock-ticker 10 | - puppeteer 11 | - whalestream-bot 12 | # - ws-staging-bot 13 | - wyoa-bot 14 | -------------------------------------------------------------------------------- /cluster/apps/discord/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: discord 6 | -------------------------------------------------------------------------------- /cluster/apps/discord/puppeteer/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/discord/secret.sops.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: ghcr-io-creds 5 | namespace: discord 6 | type: kubernetes.io/dockerconfigjson 7 | data: 8 | .dockerconfigjson: ENC[AES256_GCM,data:pIuasFlMv2ojOJTm9F19+gAL9ao3xfDuGQtap6dxvXQsqo5EyIEEOFB/920NoY5j704tkwmdkC1/9Kkfa3szpGoT33S9lhPbKy7O9YKENyQysNE5X5CX7NvSVh5IG4YMphT4RrdMtYk/61RoDGSA2lB70woYYP3snVJsseIn5NsmD0aCeH0fqck3NOBquIKDGQ+aWR3YABH5KGx0tUMiTJIV604n8Mu69QadToN9NgWLiaJGsGW+zyImRC/4jwRkeFSplNXXjr/O2xIz1zM3sWY1tzMohxZCL13lpdfge4Szp0UK1fsYgE03z3+pqo0SC1mEvlR0Q7BjRZ27zba1mnax7Z4MMXku,iv:Egz1Jw1zImJxWhaoFjwVidr7h+Rbdqs7hLH9OOQ59Mg=,tag:aSd8bDEEKCkjuw1L7zcI3g==,type:str] 9 | sops: 10 | kms: [] 11 | gcp_kms: [] 12 | azure_kv: [] 13 | hc_vault: [] 14 | age: 15 | - recipient: age1vfgg4n4snp0ktjm83gsv3nptdw39mw8q7fw7dzghgfpllc040vgsd6yypm 16 | enc: | 17 | -----BEGIN AGE ENCRYPTED FILE----- 18 | YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB6OEtzM0QyNlFmSHE2bjdm 19 | NDQyNHFhbERtWTFQR21aOThTZzZ1bElhaXpvCkkzclpQZzFDejNQc1VPV2tOYThq 20 | ZDN0OXVDMFRVUjBQZkpTTW0zdUxJbzAKLS0tIGQvOSt1SnZNb2dQZTY1Q0NySUdP 21 | THdNVnM0ZTRjVWxYWTJaOW83dU1MOVkKCZHsN6md1uc9NybP+R5ntnfJ5efHONkk 22 | E07sCwALSGdpHP+S/zXjL3ku+q30X7dbWb845nDqfLYXyYbQE/kiuw== 23 | -----END AGE ENCRYPTED FILE----- 24 | lastmodified: "2022-06-18T22:03:36Z" 25 | mac: ENC[AES256_GCM,data:oRL/O6tvzLM+atpMrCM96pyIZkpIqYHmNY8uuNfv9Ulp8rAx9VOuBFs/hHowr0+cOst4y7xAIIOVHKLWNB0UmVHii6xDv7eW+P8p8N/n8hAKqLp85R6NqbFMKPkDEmflJBjREMn8s6ktFCX9/mHwWBquS6FYQf7G3cN6xiOnYKU=,iv:5pKKui3JIqQd36PhXJXMgmdJ/7ZNbBpA193TjDWayd4=,tag:4awOVKpM15nyOs9P00KB8w==,type:str] 26 | pgp: [] 27 | encrypted_regex: ^(data|stringData)$ 28 | version: 3.7.3 29 | -------------------------------------------------------------------------------- /cluster/apps/discord/whalestream-bot/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/discord/ws-staging-bot/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/discord/wyoa-bot/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/home/homebridge/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | - pvc.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/home/homebridge/pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: homebridge-config 6 | namespace: home 7 | labels: 8 | snapshot.home.arpa/enabled: "true" 9 | app.kubernetes.io/instance: homebridge 10 | app.kubernetes.io/name: homebridge 11 | spec: 12 | accessModes: 13 | - ReadWriteOnce 14 | resources: 15 | requests: 16 | storage: 250Mi 17 | storageClassName: openebs-zfspv-ssd 18 | -------------------------------------------------------------------------------- /cluster/apps/home/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | namespace: home 5 | resources: 6 | - namespace.yaml 7 | - homebridge 8 | -------------------------------------------------------------------------------- /cluster/apps/home/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: home 6 | labels: 7 | kustomize.toolkit.fluxcd.io/prune: disabled 8 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/cert-manager/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta2 3 | kind: HelmRelease 4 | metadata: 5 | name: cert-manager 6 | namespace: kube-system 7 | spec: 8 | interval: 30m 9 | chart: 10 | spec: 11 | chart: cert-manager 12 | version: v1.13.3 13 | sourceRef: 14 | kind: HelmRepository 15 | name: jetstack 16 | namespace: flux-system 17 | maxHistory: 3 18 | install: 19 | remediation: 20 | retries: 3 21 | upgrade: 22 | cleanupOnFail: true 23 | remediation: 24 | retries: 3 25 | uninstall: 26 | keepHistory: false 27 | values: 28 | installCRDs: false 29 | webhook: 30 | enabled: true 31 | extraArgs: 32 | - "--dns01-recursive-nameservers=1.1.1.1:53,9.9.9.9:53" 33 | - "--dns01-recursive-nameservers-only" 34 | podDnsPolicy: "None" 35 | podDnsConfig: 36 | nameservers: 37 | - "1.1.1.1" 38 | - "9.9.9.9" 39 | prometheus: 40 | enabled: true 41 | servicemonitor: 42 | enabled: true 43 | prometheusInstance: monitoring 44 | resources: 45 | requests: 46 | cpu: 10m 47 | memory: 100Mi 48 | limits: 49 | memory: 250Mi 50 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/cert-manager/issuers/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - letsencrypt-production.yaml 6 | - letsencrypt-staging.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/cert-manager/issuers/letsencrypt-production.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cert-manager.io/v1 3 | kind: ClusterIssuer 4 | metadata: 5 | name: letsencrypt-production 6 | spec: 7 | acme: 8 | server: https://acme-v02.api.letsencrypt.org/directory 9 | email: "${SECRET_EMAIL}" 10 | privateKeySecretRef: 11 | name: letsencrypt-production 12 | solvers: 13 | - dns01: 14 | cloudflare: 15 | email: "${SECRET_EMAIL}" 16 | apiTokenSecretRef: 17 | name: cloudflare-token 18 | key: token 19 | selector: 20 | dnsZones: 21 | - ${SECRET_DOMAIN} 22 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/cert-manager/issuers/letsencrypt-staging.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cert-manager.io/v1 3 | kind: ClusterIssuer 4 | metadata: 5 | name: letsencrypt-staging 6 | spec: 7 | acme: 8 | server: https://acme-staging-v02.api.letsencrypt.org/directory 9 | email: "${SECRET_EMAIL}" 10 | privateKeySecretRef: 11 | name: letsencrypt-staging 12 | solvers: 13 | - dns01: 14 | cloudflare: 15 | email: "${SECRET_EMAIL}" 16 | apiTokenSecretRef: 17 | name: cloudflare-token 18 | key: token 19 | selector: 20 | dnsZones: 21 | - ${SECRET_DOMAIN} 22 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/cert-manager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | # renovate: datasource=docker image=quay.io/jetstack/cert-manager-controller 6 | - https://github.com/cert-manager/cert-manager/releases/download/v1.13.3/cert-manager.crds.yaml 7 | - helm-release.yaml 8 | - issuers 9 | - prometheus-rule.yaml 10 | - secret.sops.yaml 11 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/cert-manager/secret.sops.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: cloudflare-token 5 | namespace: kube-system 6 | type: Opaque 7 | stringData: 8 | token: ENC[AES256_GCM,data:5R076EBMxkAS1jdEnonE+JnjxJCHkt84UWT6BxuFAyHNlgZ2n/NdsA==,iv:HDjZv7uem3i+j+5nanR0Wqs+FxlW1KcFpTe/9pmzMTg=,tag:Txkvr+RwbaH7Hfz/ghTddQ==,type:str] 9 | sops: 10 | kms: [] 11 | gcp_kms: [] 12 | azure_kv: [] 13 | hc_vault: [] 14 | age: 15 | - recipient: age1vfgg4n4snp0ktjm83gsv3nptdw39mw8q7fw7dzghgfpllc040vgsd6yypm 16 | enc: | 17 | -----BEGIN AGE ENCRYPTED FILE----- 18 | YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBLa3l4TDgzNDdKUE96MEhR 19 | YWJ0ZUdHUkZJd0hlVEVjb0F6RlRGK2dzRWtRCmFIRHR2YXhXYzhmN3JoT2xabjFz 20 | ekVxOHJVRXFML1VFNkNUZWtlNXhIVEkKLS0tIDV5VXVBSXhhMXBKcEdveWppaWpa 21 | aWhxd2RuWi8wQ05UclNFTXljSVFIMDgKiRU7MWS+4p9EC+eLxvfg0SUwQ7RlODlc 22 | QPbra3dQ0L7VLDO2m/d6yuLdNZ0uLfNvtwuzM6PfZy1SedSAc8R4mQ== 23 | -----END AGE ENCRYPTED FILE----- 24 | lastmodified: "2022-07-06T06:20:08Z" 25 | mac: ENC[AES256_GCM,data:TcC+NWCMdKqqMcinj6dzyDri3aPMkD3e8jdM7y2g1dxYZky/pgQfNSsjG8w5t4h4jfi34mzFvFEdR75dqLe3yl3evz/SnrDcYq4fo6WAd7HjWbxSTz/ajKFNWcboZt5NpV/Cyziidc99dSNd0OIPzl+ooMo8RXiZatwVtPgkJUA=,iv:6sVJp27QITQFDNA0Gb9Ahd0c1UwVUWQggEBZz8S0Izo=,tag:anj9kPTJcJR4GOliugBBPw==,type:str] 26 | pgp: [] 27 | encrypted_regex: ^(data|stringData)$ 28 | version: 3.7.3 29 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/cilium/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/coredns/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - prometheus-rule.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | namespace: kube-system 5 | resources: 6 | - cert-manager 7 | - cilium 8 | - coredns 9 | - metrics-server 10 | - reloader 11 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/metrics-server/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta2 3 | kind: HelmRelease 4 | metadata: 5 | name: metrics-server 6 | namespace: kube-system 7 | spec: 8 | interval: 30m 9 | chart: 10 | spec: 11 | chart: metrics-server 12 | version: 3.11.0 13 | sourceRef: 14 | kind: HelmRepository 15 | name: metrics-server 16 | namespace: flux-system 17 | maxHistory: 3 18 | install: 19 | remediation: 20 | retries: 3 21 | upgrade: 22 | cleanupOnFail: true 23 | remediation: 24 | retries: 3 25 | uninstall: 26 | keepHistory: false 27 | values: 28 | args: 29 | - --kubelet-insecure-tls 30 | - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname 31 | - --kubelet-use-node-status-port 32 | - --metric-resolution=15s 33 | metrics: 34 | enabled: true 35 | serviceMonitor: 36 | enabled: true 37 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/metrics-server/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/reloader/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta2 3 | kind: HelmRelease 4 | metadata: 5 | name: reloader 6 | namespace: kube-system 7 | spec: 8 | interval: 30m 9 | chart: 10 | spec: 11 | chart: reloader 12 | version: 1.0.58 13 | sourceRef: 14 | kind: HelmRepository 15 | name: stakater 16 | namespace: flux-system 17 | maxHistory: 3 18 | install: 19 | remediation: 20 | retries: 3 21 | upgrade: 22 | cleanupOnFail: true 23 | remediation: 24 | retries: 3 25 | uninstall: 26 | keepHistory: false 27 | values: 28 | reloader: 29 | podMonitor: 30 | enabled: true 31 | resources: 32 | requests: 33 | cpu: 15m 34 | memory: 184M 35 | limits: 36 | memory: 184M 37 | -------------------------------------------------------------------------------- /cluster/apps/kube-system/reloader/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - databases 6 | - discord 7 | - home 8 | - kube-system 9 | # - kyverno 10 | - media 11 | - monitoring 12 | - networking 13 | - openebs 14 | - storage 15 | - system-upgrade 16 | -------------------------------------------------------------------------------- /cluster/apps/kyverno/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta2 3 | kind: HelmRelease 4 | metadata: 5 | name: kyverno 6 | namespace: kyverno 7 | spec: 8 | interval: 30m 9 | chart: 10 | spec: 11 | chart: kyverno 12 | version: 3.1.1 13 | sourceRef: 14 | kind: HelmRepository 15 | name: kyverno 16 | namespace: flux-system 17 | maxHistory: 3 18 | install: 19 | remediation: 20 | retries: 3 21 | upgrade: 22 | cleanupOnFail: true 23 | remediation: 24 | retries: 3 25 | uninstall: 26 | keepHistory: false 27 | values: 28 | installCRDs: true 29 | extraArgs: 30 | - --autogenInternals=false 31 | - --clientRateLimitQPS=30 32 | - --clientRateLimitBurst=60 33 | serviceMonitor: 34 | enabled: true 35 | topologySpreadConstraints: 36 | - maxSkew: 1 37 | topologyKey: kubernetes.io/hostname 38 | whenUnsatisfiable: DoNotSchedule 39 | labelSelector: 40 | matchLabels: 41 | app.kubernetes.io/instance: kyverno 42 | -------------------------------------------------------------------------------- /cluster/apps/kyverno/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - namespace.yaml 6 | - helm-release.yaml 7 | - policies 8 | - rbac.yaml 9 | -------------------------------------------------------------------------------- /cluster/apps/kyverno/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: kyverno 6 | labels: 7 | kustomize.toolkit.fluxcd.io/prune: disabled 8 | -------------------------------------------------------------------------------- /cluster/apps/kyverno/policies/delete-cpu-limits.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kyverno.io/v1 3 | kind: ClusterPolicy 4 | metadata: 5 | name: delete-cpu-limits 6 | annotations: 7 | policies.kyverno.io/title: Delete CPU limits 8 | policies.kyverno.io/subject: Pod 9 | policies.kyverno.io/description: >- 10 | This policy deletes CPU limits from all Pods. 11 | spec: 12 | mutateExistingOnPolicyUpdate: true 13 | generateExistingOnPolicyUpdate: true 14 | rules: 15 | - name: delete-cpu-limits 16 | match: 17 | any: 18 | - resources: 19 | kinds: [Pod] 20 | exclude: 21 | any: 22 | - resources: 23 | kinds: [Pod] 24 | selector: 25 | matchLabels: 26 | job-name: "*" 27 | mutate: 28 | patchStrategicMerge: 29 | spec: 30 | initContainers: 31 | - (name): "*" 32 | resources: 33 | limits: 34 | cpu: null 35 | containers: 36 | - (name): "*" 37 | resources: 38 | limits: 39 | cpu: null 40 | -------------------------------------------------------------------------------- /cluster/apps/kyverno/policies/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - delete-cpu-limits.yaml 6 | # - snapshot-job-controller.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/kyverno/rbac.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: rbac.authorization.k8s.io/v1 3 | kind: ClusterRoleBinding 4 | metadata: 5 | name: kyverno:admin 6 | roleRef: 7 | apiGroup: rbac.authorization.k8s.io 8 | kind: ClusterRole 9 | name: admin 10 | subjects: 11 | - kind: ServiceAccount 12 | name: kyverno 13 | namespace: kyverno 14 | -------------------------------------------------------------------------------- /cluster/apps/media/_pvc/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - pvc.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/media/_pvc/pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: media 6 | namespace: media 7 | spec: 8 | volumeName: media 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 100Gi 14 | storageClassName: openebs-zfspv-rust 15 | -------------------------------------------------------------------------------- /cluster/apps/media/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | namespace: media 5 | resources: 6 | - namespace.yaml 7 | - _pvc 8 | - lidarr 9 | - plex 10 | - prowlarr 11 | - qbittorrent 12 | - overseerr 13 | - radarr 14 | - sonarr 15 | - tautulli 16 | - theme-park 17 | -------------------------------------------------------------------------------- /cluster/apps/media/lidarr/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | - pvc.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/media/lidarr/pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: lidarr-config 6 | namespace: media 7 | labels: 8 | snapshot.home.arpa/enabled: "true" 9 | app.kubernetes.io/instance: lidarr 10 | app.kubernetes.io/name: lidarr 11 | spec: 12 | accessModes: 13 | - ReadWriteOnce 14 | resources: 15 | requests: 16 | storage: 5Gi 17 | storageClassName: openebs-zfspv-ssd 18 | -------------------------------------------------------------------------------- /cluster/apps/media/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: media 6 | labels: 7 | kustomize.toolkit.fluxcd.io/prune: disabled 8 | -------------------------------------------------------------------------------- /cluster/apps/media/overseerr/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | - pvc.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/media/overseerr/pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: overseerr-config 6 | namespace: media 7 | labels: 8 | snapshot.home.arpa/enabled: "true" 9 | app.kubernetes.io/instance: overseerr 10 | app.kubernetes.io/name: overseerr 11 | spec: 12 | accessModes: 13 | - ReadWriteOnce 14 | resources: 15 | requests: 16 | storage: 2Gi 17 | storageClassName: openebs-zfspv-ssd 18 | -------------------------------------------------------------------------------- /cluster/apps/media/plex/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | - pvc.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/media/plex/pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: plex-config 6 | namespace: media 7 | labels: 8 | snapshot.home.arpa/enabled: "true" 9 | app.kubernetes.io/instance: plex 10 | app.kubernetes.io/name: plex 11 | spec: 12 | accessModes: 13 | - ReadWriteOnce 14 | resources: 15 | requests: 16 | storage: 150Gi 17 | storageClassName: openebs-zfspv-ssd 18 | -------------------------------------------------------------------------------- /cluster/apps/media/prowlarr/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | - pvc.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/media/prowlarr/pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: prowlarr-config 6 | namespace: media 7 | labels: 8 | snapshot.home.arpa/enabled: "true" 9 | app.kubernetes.io/instance: prowlarr 10 | app.kubernetes.io/name: prowlarr 11 | spec: 12 | accessModes: 13 | - ReadWriteOnce 14 | resources: 15 | requests: 16 | storage: 1Gi 17 | storageClassName: openebs-zfspv-ssd 18 | -------------------------------------------------------------------------------- /cluster/apps/media/qbittorrent/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | - pvc.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/media/qbittorrent/pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: qbittorrent-config 6 | namespace: media 7 | labels: 8 | snapshot.home.arpa/enabled: "true" 9 | app.kubernetes.io/instance: qbittorrent 10 | app.kubernetes.io/name: qbittorrent 11 | spec: 12 | accessModes: 13 | - ReadWriteOnce 14 | resources: 15 | requests: 16 | storage: 1Gi 17 | storageClassName: openebs-zfspv-ssd 18 | -------------------------------------------------------------------------------- /cluster/apps/media/radarr/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | - pvc.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/media/radarr/pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: radarr-config 6 | namespace: media 7 | labels: 8 | snapshot.home.arpa/enabled: "true" 9 | app.kubernetes.io/instance: radarr 10 | app.kubernetes.io/name: radarr 11 | spec: 12 | accessModes: 13 | - ReadWriteOnce 14 | resources: 15 | requests: 16 | storage: 2Gi 17 | storageClassName: openebs-zfspv-ssd 18 | -------------------------------------------------------------------------------- /cluster/apps/media/sonarr/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | - pvc.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/media/sonarr/pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: sonarr-config 6 | namespace: media 7 | labels: 8 | snapshot.home.arpa/enabled: "true" 9 | app.kubernetes.io/instance: sonarr 10 | app.kubernetes.io/name: sonarr 11 | spec: 12 | accessModes: 13 | - ReadWriteOnce 14 | resources: 15 | requests: 16 | storage: 2Gi 17 | storageClassName: openebs-zfspv-ssd 18 | -------------------------------------------------------------------------------- /cluster/apps/media/tautulli/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | - pvc.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/media/tautulli/pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: tautulli-config 6 | namespace: media 7 | labels: 8 | snapshot.home.arpa/enabled: "true" 9 | app.kubernetes.io/instance: tautulli 10 | app.kubernetes.io/name: tautulli 11 | spec: 12 | accessModes: 13 | - ReadWriteOnce 14 | resources: 15 | requests: 16 | storage: 2Gi 17 | storageClassName: openebs-zfspv-ssd 18 | -------------------------------------------------------------------------------- /cluster/apps/media/theme-park/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta2 3 | kind: HelmRelease 4 | metadata: 5 | name: theme-park 6 | namespace: media 7 | spec: 8 | interval: 30m 9 | chart: 10 | spec: 11 | chart: app-template 12 | version: 2.4.0 13 | sourceRef: 14 | kind: HelmRepository 15 | name: bjw-s 16 | namespace: flux-system 17 | maxHistory: 3 18 | install: 19 | remediation: 20 | retries: 3 21 | upgrade: 22 | cleanupOnFail: true 23 | remediation: 24 | retries: 3 25 | uninstall: 26 | keepHistory: false 27 | values: 28 | controllers: 29 | main: 30 | annotations: 31 | reloader.stakater.com/auto: "true" 32 | containers: 33 | main: 34 | image: 35 | repository: ghcr.io/onedr0p/theme-park 36 | tag: 1.18.0@sha256:6d6ad5a1c3b55b83f7441ba0346cbd60939c776a5038d34c7fb4ac1136bfa271 37 | resources: 38 | requests: 39 | cpu: 10m 40 | memory: 50Mi 41 | limits: 42 | memory: 150Mi 43 | pod: 44 | securityContext: 45 | runAsUser: 568 46 | runAsGroup: 568 47 | fsGroup: 568 48 | fsGroupChangePolicy: OnRootMismatch 49 | service: 50 | main: 51 | ports: 52 | http: 53 | port: 8080 54 | ingress: 55 | main: 56 | enabled: true 57 | ingressClassName: nginx 58 | annotations: 59 | cert-manager.io/cluster-issuer: letsencrypt-production 60 | nginx.ingress.kubernetes.io/whitelist-source-range: 10.0.0.0/8,172.16.0.0/12,192.168.0.0/16 61 | hosts: 62 | - host: &host "theme-park.${SECRET_DOMAIN}" 63 | paths: 64 | - path: / 65 | service: 66 | name: main 67 | port: http 68 | tls: 69 | - hosts: 70 | - *host 71 | -------------------------------------------------------------------------------- /cluster/apps/media/theme-park/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/blackbox-exporter/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | - prometheus-rule.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/grafana/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | - secret.sops.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/grafana/secret.sops.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | type: Opaque 4 | metadata: 5 | name: grafana 6 | namespace: monitoring 7 | stringData: 8 | adminUser: ENC[AES256_GCM,data:UdNeq8Uz,iv:et+Yu5VC09h8tKktEYgFxI2Kd2blThdNBKgw0YfkhEM=,tag:Emyg80rvBgPMzeH12hdzDw==,type:str] 9 | adminPass: ENC[AES256_GCM,data:qV/SYVGD5MC1XUEdqDZ1GlBQ/AHoXSvjdM9GcOH9LMMnmAJNVS+bQCQ+UnJdEeR6,iv:3W6iI2hmyTaFOh1loMJgyEFMxcXrBSEla+SsIZ+DuI8=,tag:B6oGw+a0Apj1NlyJBXmU4g==,type:str] 10 | sops: 11 | kms: [] 12 | gcp_kms: [] 13 | azure_kv: [] 14 | hc_vault: [] 15 | age: 16 | - recipient: age1vfgg4n4snp0ktjm83gsv3nptdw39mw8q7fw7dzghgfpllc040vgsd6yypm 17 | enc: | 18 | -----BEGIN AGE ENCRYPTED FILE----- 19 | YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBPNnVVb2JnaXVkSlk1MkFk 20 | WWFUaTZtaXFUOGk0UkEvMHVoclQ5elVnQ0M4CkwvVmJMd3JjcVNka2d2YkFkNzls 21 | WEdJS3pEd0gwaFd1ZVpZUitoL2lLbU0KLS0tIGpqb0VyOXdlcUNTRlNPOEo3aWpQ 22 | aHA3WlJqQkVOVHh3OHFncTFnQ3VpenMK+epUYbmAI1vEKtNNph4HCd69bze9Uelv 23 | niU+Dyf1pG1pLTL159Z7TG7on0Wkc9LglqxCCyYkvlZ9Ll+xziRSOA== 24 | -----END AGE ENCRYPTED FILE----- 25 | lastmodified: "2021-12-01T05:29:02Z" 26 | mac: ENC[AES256_GCM,data:uNj0l0GHTggh7e8+bNFEU9vofEhvoXa65X2x5ONwfAdf5i/qClEqUoMZInQGAEsFcNV9RPa47INj079tCiKA30tjSb5s1Gg6p5PDI7wVfItKiuQtgLS68qduM6l22kQ2uWkigFzHNLIkcpYzaszB5omAyKiPlzIsHTkHhNc9G50=,iv:jBtLgSTraDPbFGET4G2RibGNWdLEV9GsjkMxG0gbbRk=,tag:1UplsYK/tUb3pm0UfrMm5Q==,type:str] 27 | pgp: [] 28 | encrypted_regex: ^(data|stringData)$ 29 | version: 3.7.1 30 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/kube-prometheus-stack/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | namespace: monitoring 5 | resources: 6 | - namespace.yaml 7 | - blackbox-exporter 8 | - grafana 9 | - kube-prometheus-stack 10 | - loki 11 | - network-ups-tools 12 | - smartctl-exporter 13 | - snmp-exporter 14 | - speedtest-exporter 15 | - thanos 16 | - vector 17 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/loki/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: monitoring 6 | labels: 7 | kustomize.toolkit.fluxcd.io/prune: disabled 8 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/network-ups-tools/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | - prometheus-rule.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/network-ups-tools/prometheus-rule.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: PrometheusRule 4 | metadata: 5 | name: nut-exporter 6 | spec: 7 | groups: 8 | - name: nut-exporter 9 | rules: 10 | - alert: NutExporterAbsent 11 | annotations: 12 | description: NUT Exporter has disappeared from Prometheus target discovery. 13 | summary: NUT Exporter is down. 14 | expr: | 15 | absent(up{job=~".*network-ups-tools.*"} == 1) 16 | for: 5m 17 | labels: 18 | severity: critical 19 | - alert: UpsOnBattery 20 | annotations: 21 | description: UPS {{ $labels.ups }} has lost power and is running on battery. 22 | summary: UPS is running on battery. 23 | expr: | 24 | network_ups_tools_ups_status{flag="OB"} == 1 25 | for: 10s 26 | labels: 27 | severity: critical 28 | - alert: UpsLowBattery 29 | annotations: 30 | description: UPS {{ $labels.ups }} battery is low and the system is getting 31 | ready to shutdown. 32 | summary: UPS battery is low. 33 | expr: | 34 | network_ups_tools_ups_status{flag="OB"} == 1 35 | for: 5s 36 | labels: 37 | severity: critical 38 | - alert: UpsBatteryReplace 39 | annotations: 40 | description: UPS {{ $labels.ups }} battery needs to be replaced. 41 | summary: Replace UPS battery. 42 | expr: | 43 | network_ups_tools_ups_status{flag="RB"} == 1 44 | for: 10s 45 | labels: 46 | severity: critical 47 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/smartctl-exporter/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta2 3 | kind: HelmRelease 4 | metadata: 5 | name: &app smartctl-exporter 6 | namespace: monitoring 7 | spec: 8 | interval: 30m 9 | chart: 10 | spec: 11 | chart: prometheus-smartctl-exporter 12 | version: 0.7.0 13 | sourceRef: 14 | kind: HelmRepository 15 | name: prometheus-community 16 | namespace: flux-system 17 | maxHistory: 3 18 | install: 19 | remediation: 20 | retries: 3 21 | upgrade: 22 | cleanupOnFail: true 23 | remediation: 24 | retries: 3 25 | uninstall: 26 | keepHistory: false 27 | values: 28 | fullnameOverride: *app 29 | serviceMonitor: 30 | enabled: true 31 | prometheusRules: 32 | enabled: true 33 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/smartctl-exporter/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/snmp-exporter/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - mikrotik 6 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/snmp-exporter/mikrotik/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta2 3 | kind: HelmRelease 4 | metadata: 5 | name: prometheus-snmp-exporter 6 | namespace: monitoring 7 | spec: 8 | interval: 30m 9 | chart: 10 | spec: 11 | chart: prometheus-snmp-exporter 12 | version: 1.8.1 13 | sourceRef: 14 | kind: HelmRepository 15 | name: prometheus-community 16 | namespace: flux-system 17 | maxHistory: 3 18 | install: 19 | remediation: 20 | retries: 3 21 | upgrade: 22 | cleanupOnFail: true 23 | remediation: 24 | retries: 3 25 | uninstall: 26 | keepHistory: false 27 | values: 28 | fullnameOverride: snmp-exporter-mikrotik 29 | image: 30 | repository: quay.io/prometheus/snmp-exporter 31 | extraArgs: 32 | - "--config.file=/config/snmp.yaml" 33 | extraConfigmapMounts: 34 | - name: snmp-exporter-mikrotik 35 | mountPath: /config/snmp.yaml 36 | subPath: snmp.yaml 37 | configMap: snmp-exporter-mikrotik 38 | readOnly: true 39 | defaultMode: 420 40 | serviceMonitor: 41 | enabled: true 42 | namespace: monitoring 43 | params: 44 | - name: mikrotik-rb3011 45 | target: 10.10.0.1 46 | module: 47 | - mikrotik 48 | - name: mikrotik-crs328 49 | target: 10.10.0.2 50 | module: 51 | - mikrotik 52 | path: /snmp 53 | scrapeTimeout: 10s 54 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/snmp-exporter/mikrotik/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - config-map.yaml 6 | - helm-release.yaml 7 | - prometheus-rule.yaml 8 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/snmp-exporter/mikrotik/prometheus-rule.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: PrometheusRule 4 | metadata: 5 | name: snmp-exporter 6 | namespace: monitoring 7 | spec: 8 | groups: 9 | - name: snmp-exporter 10 | rules: 11 | - alert: SnmpExporterAbsent 12 | annotations: 13 | description: SNMP Exporter has disappeared from Prometheus target discovery. 14 | summary: SNMP Exporter is down. 15 | expr: | 16 | absent(up{job=~".*snmp.*"} == 1) 17 | for: 5m 18 | labels: 19 | severity: critical 20 | - alert: SnmpExporterScrapeSlow 21 | annotations: 22 | description: 23 | SNMP Exporter is experiencing slow scraping of {{ $labels.target }} 24 | with a duration of {{ $value }} seconds. 25 | summary: SNMP Exporter slow scraping. 26 | expr: | 27 | sum(snmp_scrape_duration_seconds) by (target) 28 | > 10 29 | for: 5m 30 | labels: 31 | severity: critical 32 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/speedtest-exporter/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta2 3 | kind: HelmRelease 4 | metadata: 5 | name: speedtest-exporter 6 | namespace: monitoring 7 | spec: 8 | interval: 30m 9 | chart: 10 | spec: 11 | chart: app-template 12 | version: 2.4.0 13 | sourceRef: 14 | kind: HelmRepository 15 | name: bjw-s 16 | namespace: flux-system 17 | maxHistory: 3 18 | install: 19 | remediation: 20 | retries: 3 21 | upgrade: 22 | cleanupOnFail: true 23 | remediation: 24 | retries: 3 25 | uninstall: 26 | keepHistory: false 27 | values: 28 | controllers: 29 | main: 30 | strategy: RollingUpdate 31 | annotations: 32 | reloader.stakater.com/auto: "true" 33 | containers: 34 | main: 35 | image: 36 | repository: ghcr.io/miguelndecarvalho/speedtest-exporter 37 | tag: v3.5.4 38 | env: 39 | TZ: "${TZ}" 40 | resources: 41 | requests: 42 | cpu: 15m 43 | memory: 50M 44 | limits: 45 | memory: 100M 46 | service: 47 | main: 48 | ports: 49 | http: 50 | enabled: false 51 | metrics: 52 | enabled: true 53 | controller: main 54 | protocol: TCP 55 | port: 9798 56 | serviceMonitor: 57 | main: 58 | enabled: true 59 | endpoints: 60 | - interval: 60m 61 | path: /metrics 62 | port: metrics 63 | scrapeTimeout: 1m 64 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/speedtest-exporter/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | - prometheus-rule.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/thanos/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | - monitoring 7 | - secret.sops.yaml 8 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/thanos/monitoring/compact/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - alerts.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/thanos/monitoring/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - compact 6 | - query 7 | - rule 8 | - sidecar 9 | - store-gateway 10 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/thanos/monitoring/query/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - alerts.yaml 6 | - rules.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/thanos/monitoring/query/rules.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: PrometheusRule 4 | metadata: 5 | name: thanos-query.rules 6 | namespace: monitoring 7 | spec: 8 | groups: 9 | - name: thanos-query.rules 10 | rules: 11 | - expr: | 12 | ( 13 | sum by (job) (rate(grpc_client_handled_total{grpc_code=~"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss|DeadlineExceeded", job=~".*thanos-query.*", grpc_type="unary"}[5m])) 14 | / 15 | sum by (job) (rate(grpc_client_started_total{job=~".*thanos-query.*", grpc_type="unary"}[5m])) 16 | ) 17 | record: :grpc_client_failures_per_unary:sum_rate 18 | - expr: | 19 | ( 20 | sum by (job) (rate(grpc_client_handled_total{grpc_code=~"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss|DeadlineExceeded", job=~".*thanos-query.*", grpc_type="server_stream"}[5m])) 21 | / 22 | sum by (job) (rate(grpc_client_started_total{job=~".*thanos-query.*", grpc_type="server_stream"}[5m])) 23 | ) 24 | record: :grpc_client_failures_per_stream:sum_rate 25 | - expr: | 26 | ( 27 | sum by (job) (rate(thanos_query_store_apis_dns_failures_total{job=~".*thanos-query.*"}[5m])) 28 | / 29 | sum by (job) (rate(thanos_query_store_apis_dns_lookups_total{job=~".*thanos-query.*"}[5m])) 30 | ) 31 | record: :thanos_query_store_apis_dns_failures_per_lookup:sum_rate 32 | - expr: | 33 | histogram_quantile(0.99, 34 | sum by (job, le) (rate(http_request_duration_seconds_bucket{job=~".*thanos-query.*", handler="query"}[5m])) 35 | ) 36 | labels: 37 | quantile: "0.99" 38 | record: :query_duration_seconds:histogram_quantile 39 | - expr: | 40 | histogram_quantile(0.99, 41 | sum by (job, le) (rate(http_request_duration_seconds_bucket{job=~".*thanos-query.*", handler="query_range"}[5m])) 42 | ) 43 | labels: 44 | quantile: "0.99" 45 | record: :api_range_query_duration_seconds:histogram_quantile 46 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/thanos/monitoring/rule/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - alerts.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/thanos/monitoring/sidecar/alerts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: PrometheusRule 4 | metadata: 5 | name: thanos-sidecar 6 | namespace: monitoring 7 | spec: 8 | groups: 9 | - name: thanos-sidecar 10 | rules: 11 | - alert: ThanosSidecarIsDown 12 | annotations: 13 | description: ThanosSidecar has disappeared. Prometheus target for the component 14 | cannot be discovered. 15 | runbook_url: https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanossidecarisdown 16 | summary: Thanos component has disappeared. 17 | expr: | 18 | absent(up{job=~".*thanos-discovery.*"} == 1) 19 | for: 5m 20 | labels: 21 | severity: critical 22 | - alert: ThanosSidecarBucketOperationsFailed 23 | annotations: 24 | description: Thanos Sidecar {{$labels.instance}} bucket operations are failing 25 | runbook_url: https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanossidecarbucketoperationsfailed 26 | summary: Thanos Sidecar bucket operations are failing 27 | expr: | 28 | sum by (job, instance) (rate(thanos_objstore_bucket_operation_failures_total{job=~".*thanos-discovery.*"}[5m])) > 0 29 | for: 5m 30 | labels: 31 | severity: critical 32 | - alert: ThanosSidecarNoConnectionToStartedPrometheus 33 | annotations: 34 | description: Thanos Sidecar {{$labels.instance}} is unhealthy. 35 | runbook_url: https://github.com/thanos-io/thanos/tree/main/mixin/runbook.md#alert-name-thanossidecarnoconnectiontostartedprometheus 36 | summary: Thanos Sidecar cannot access Prometheus, even though Prometheus seems 37 | healthy and has reloaded WAL. 38 | expr: | 39 | thanos_sidecar_prometheus_up{job=~".*thanos-discovery.*"} == 0 40 | AND on (namespace, pod) 41 | prometheus_tsdb_data_replay_duration_seconds != 0 42 | for: 5m 43 | labels: 44 | severity: critical 45 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/thanos/monitoring/sidecar/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - alerts.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/thanos/monitoring/store-gateway/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - alerts.yaml 6 | - rules.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/thanos/monitoring/store-gateway/rules.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: PrometheusRule 4 | metadata: 5 | name: thanos-store.rules 6 | namespace: monitoring 7 | spec: 8 | groups: 9 | - name: thanos-store.rules 10 | rules: 11 | - expr: | 12 | ( 13 | sum by (job) (rate(grpc_server_handled_total{grpc_code=~"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss|DeadlineExceeded", job=~".*thanos-store.*", grpc_type="unary"}[5m])) 14 | / 15 | sum by (job) (rate(grpc_server_started_total{job=~".*thanos-store.*", grpc_type="unary"}[5m])) 16 | ) 17 | record: :grpc_server_failures_per_unary:sum_rate 18 | - expr: | 19 | ( 20 | sum by (job) (rate(grpc_server_handled_total{grpc_code=~"Unknown|ResourceExhausted|Internal|Unavailable|DataLoss|DeadlineExceeded", job=~".*thanos-store.*", grpc_type="server_stream"}[5m])) 21 | / 22 | sum by (job) (rate(grpc_server_started_total{job=~".*thanos-store.*", grpc_type="server_stream"}[5m])) 23 | ) 24 | record: :grpc_server_failures_per_stream:sum_rate 25 | - expr: | 26 | ( 27 | sum by (job) (rate(thanos_objstore_bucket_operation_failures_total{job=~".*thanos-store.*"}[5m])) 28 | / 29 | sum by (job) (rate(thanos_objstore_bucket_operations_total{job=~".*thanos-store.*"}[5m])) 30 | ) 31 | record: :thanos_objstore_bucket_failures_per_operation:sum_rate 32 | - expr: | 33 | histogram_quantile(0.99, 34 | sum by (job, le) (rate(thanos_objstore_bucket_operation_duration_seconds_bucket{job=~".*thanos-store.*"}[5m])) 35 | ) 36 | labels: 37 | quantile: "0.99" 38 | record: :thanos_objstore_bucket_operation_duration_seconds:histogram_quantile 39 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/thanos/secret.sops.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: thanos-objstore 5 | namespace: monitoring 6 | type: Opaque 7 | stringData: 8 | objstore.yml: ENC[AES256_GCM,data:g1RHg+SbmicDTPlR18wBbnKsA2l/EhNwOeLgmTGYFXCreSgkRMT+qF+7SfAB7LFc2Pc6SCVfPETzD/BCbswubzHlQK3yYJyfCHgqG1Ulc0tXrufpnSVwTMX6gnxX7OqEAorn9W4Xsj3CZ8adKmU0NZfCeGUtERDRDJxDmCdptHbxvd7504ouFOLDf1Bld0uIrVVTbeONEKI90os=,iv:sEWrCfdM4Rq1Wlrk3JfKFUEEFG9Nkvfe6fDoMt6yuqs=,tag:2mW2TimhnANqGL5E3SebrA==,type:str] 9 | sops: 10 | kms: [] 11 | gcp_kms: [] 12 | azure_kv: [] 13 | hc_vault: [] 14 | age: 15 | - recipient: age1vfgg4n4snp0ktjm83gsv3nptdw39mw8q7fw7dzghgfpllc040vgsd6yypm 16 | enc: | 17 | -----BEGIN AGE ENCRYPTED FILE----- 18 | YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBCQUZuVTBkTUNJZTlmMlZW 19 | bWJEeFp5N0JiVnU5T2NLbUk3OC9PclNKSTJJCnBBcVVQNkdtQW1PU1cydkRKR0gw 20 | M3hraHNtS1p1WW9CaExxUHZiWktPWmcKLS0tIEQ4M3FhdU9pZ1EwS1E5ajI3TEFK 21 | RW1hdjF3RXhJcllqQTgxRzhxZUVaTm8KnmGB506/W2Jg6sv2NgZBgrRW+wscsVmI 22 | xCRNk665LuMv4Fw5hAMhYtbxuWBjxt6rz+ihDuGRAoj3Uj/pH9bgVA== 23 | -----END AGE ENCRYPTED FILE----- 24 | lastmodified: "2022-09-05T12:47:29Z" 25 | mac: ENC[AES256_GCM,data:fKVXw89pRkBFdGlOlkhDdaGEc2X32PjmSIDYCi1EqTAUmv2lcPjxPTVlqDY2h3lVJOSRLLBkVKiFPwVp7dJUq8L5pkhyqzzrZtQ9z/PCxJLfBSPtYIRlfLG3kwNM7qGV1vtl9CMaJ5aTcpKUgJg5vPbIrelQ/Ys4WnNCqOCEF84=,iv:nnA0QLl4G2YqK4g9/hROGdnS+pkpXLCDSyYDkGSVIKE=,tag:0bRZV4+oBUwpLryi6jPE6Q==,type:str] 26 | pgp: [] 27 | encrypted_regex: ^(data|stringData)$ 28 | version: 3.7.3 29 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/vector/agent/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta2 3 | kind: HelmRelease 4 | metadata: 5 | name: vector-agent 6 | namespace: monitoring 7 | spec: 8 | interval: 30m 9 | chart: 10 | spec: 11 | chart: vector 12 | version: 0.29.0 13 | sourceRef: 14 | kind: HelmRepository 15 | name: vector 16 | namespace: flux-system 17 | dependsOn: 18 | - name: vector-aggregator 19 | maxHistory: 3 20 | install: 21 | remediation: 22 | retries: 3 23 | upgrade: 24 | cleanupOnFail: true 25 | remediation: 26 | retries: 3 27 | uninstall: 28 | keepHistory: false 29 | values: 30 | image: 31 | repository: timberio/vector 32 | tag: 0.34.1-debian 33 | role: Agent 34 | customConfig: 35 | data_dir: /vector-data-dir 36 | api: 37 | enabled: false 38 | sources: 39 | journal_logs: 40 | type: journald 41 | journal_directory: /var/log/journal 42 | kubernetes_logs: 43 | type: kubernetes_logs 44 | pod_annotation_fields: 45 | container_image: "container_image" 46 | container_name: "container_name" 47 | pod_annotations: "pod_annotations" 48 | pod_labels: "pod_labels" 49 | pod_name: "pod_name" 50 | vector_metrics: 51 | type: internal_metrics 52 | sinks: 53 | loki_journal: 54 | type: vector 55 | inputs: 56 | - journal_logs 57 | address: vector-aggregator:6000 58 | version: "2" 59 | loki_kubernetes: 60 | type: vector 61 | inputs: 62 | - kubernetes_logs 63 | address: vector-aggregator:6010 64 | version: "2" 65 | prom_exporter: 66 | type: prometheus_exporter 67 | inputs: 68 | - vector_metrics 69 | address: 0.0.0.0:9090 70 | service: 71 | enabled: false 72 | podMonitor: 73 | enabled: true 74 | securityContext: 75 | privileged: true 76 | tolerations: 77 | - effect: NoSchedule 78 | operator: Exists 79 | - effect: NoExecute 80 | operator: Exists 81 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/vector/agent/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/vector/aggregator/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/monitoring/vector/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | namespace: monitoring 5 | resources: 6 | - agent 7 | - aggregator 8 | -------------------------------------------------------------------------------- /cluster/apps/networking/blocky/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | - prometheus-rule.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/networking/blocky/prometheus-rule.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: PrometheusRule 4 | metadata: 5 | name: blocky 6 | namespace: networking 7 | spec: 8 | groups: 9 | - name: blocky 10 | rules: 11 | - alert: BlockyAbsent 12 | annotations: 13 | description: Blocky has disappeared from Prometheus service discovery. 14 | summary: Blocky is down. 15 | expr: | 16 | absent(up{job=~".*blocky.*"} == 1) 17 | for: 5m 18 | labels: 19 | severity: critical 20 | - alert: BlockyDisabled 21 | annotations: 22 | description: Blocky's ad blocking has been disabled for 15min. Please re-enable 23 | protection. 24 | summary: Blocky is disabled. 25 | expr: | 26 | blocky_blocking_enabled{job=~".*blocky.*"} == 0 27 | for: 15m 28 | labels: 29 | severity: critical 30 | -------------------------------------------------------------------------------- /cluster/apps/networking/cloudflare-ddns/cloudflare-ddns.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o nounset 4 | set -o errexit 5 | 6 | current_ipv4="$(curl -s https://ipv4.icanhazip.com/)" 7 | zone_id=$(curl -s -X GET \ 8 | "https://api.cloudflare.com/client/v4/zones?name=${CF_RECORD_NAME#*.}&status=active" \ 9 | -H "Authorization: Bearer $CF_DNS_API_TOKEN" \ 10 | -H "Content-Type: application/json" \ 11 | | jq --raw-output ".result[0] | .id" 12 | ) 13 | record_ipv4=$(curl -s -X GET \ 14 | "https://api.cloudflare.com/client/v4/zones/${zone_id}/dns_records?name=${CF_RECORD_NAME}&type=A" \ 15 | -H "Authorization: Bearer $CF_DNS_API_TOKEN" \ 16 | -H "Content-Type: application/json" \ 17 | ) 18 | old_ip4=$(echo "$record_ipv4" | jq --raw-output '.result[0] | .content') 19 | if [[ "${current_ipv4}" == "${old_ip4}" ]]; then 20 | printf "%s - IP Address '%s' has not changed" "$(date -u)" "${current_ipv4}" 21 | exit 0 22 | fi 23 | record_ipv4_identifier="$(echo "$record_ipv4" | jq --raw-output '.result[0] | .id')" 24 | update_ipv4=$(curl -s -X PUT \ 25 | "https://api.cloudflare.com/client/v4/zones/${zone_id}/dns_records/${record_ipv4_identifier}" \ 26 | -H "Authorization: Bearer $CF_DNS_API_TOKEN" \ 27 | -H "Content-Type: application/json" \ 28 | --data "{\"id\":\"${zone_id}\",\"type\":\"A\",\"proxied\":true,\"name\":\"${CF_RECORD_NAME}\",\"content\":\"${current_ipv4}\"}" \ 29 | ) 30 | if [[ "$(echo "$update_ipv4" | jq --raw-output '.success')" == "true" ]]; then 31 | printf "%s - Success - IP Address '%s' has been updated" "$(date -u)" "${current_ipv4}" 32 | exit 0 33 | else 34 | printf "%s - Yikes - Updating IP Address '%s' has failed" "$(date -u)" "${current_ipv4}" 35 | exit 1 36 | fi 37 | -------------------------------------------------------------------------------- /cluster/apps/networking/cloudflare-ddns/cronjob.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: batch/v1 3 | kind: CronJob 4 | metadata: 5 | name: &app cloudflare-ddns 6 | namespace: networking 7 | spec: 8 | schedule: "0 * * * *" 9 | concurrencyPolicy: "Forbid" 10 | successfulJobsHistoryLimit: 3 11 | failedJobsHistoryLimit: 5 12 | jobTemplate: 13 | spec: 14 | backoffLimit: 3 15 | ttlSecondsAfterFinished: 300 16 | template: 17 | spec: 18 | restartPolicy: Never 19 | containers: 20 | - name: *app 21 | image: ghcr.io/onedr0p/kubernetes-kubectl:1.29.2@sha256:6324f99979877f3ded7647f4efa4df6d73f11bb9d8302dc81ab4a5d38e406867 22 | envFrom: 23 | - secretRef: 24 | name: cloudflare-creds 25 | command: 26 | - "/bin/bash" 27 | - &scriptPath "/app/cloudflare-ddns.sh" 28 | volumeMounts: 29 | - name: *app 30 | mountPath: *scriptPath 31 | subPath: &scriptName cloudflare-ddns.sh 32 | readOnly: true 33 | volumes: 34 | - name: *app 35 | projected: 36 | defaultMode: 0755 37 | sources: 38 | - configMap: 39 | name: *app 40 | items: 41 | - key: *scriptName 42 | path: *scriptName 43 | -------------------------------------------------------------------------------- /cluster/apps/networking/cloudflare-ddns/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - cronjob.yaml 6 | - secret.sops.yaml 7 | namespace: networking 8 | configMapGenerator: 9 | - name: cloudflare-ddns 10 | files: 11 | - cloudflare-ddns.sh 12 | generatorOptions: 13 | disableNameSuffixHash: true 14 | annotations: 15 | kustomize.toolkit.fluxcd.io/substitute: disabled 16 | -------------------------------------------------------------------------------- /cluster/apps/networking/cloudflare-ddns/secret.sops.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: cloudflare-creds 5 | namespace: networking 6 | type: Opaque 7 | stringData: 8 | CF_DNS_API_TOKEN: ENC[AES256_GCM,data:H8O7bUiSjB/mX7huJau661dJeeOgG4d8H4KDiuG2V/TpJ/TaO2mm7A==,iv:Gg3HLx+6cJVtjg6fffkLm0HAt/g0bSVfnkWj4bSrlcc=,tag:H/EjPEveBa7+LvbIV2jZ0Q==,type:str] 9 | CF_RECORD_NAME: ENC[AES256_GCM,data:FWILW9VEvWn9M2KO,iv:aPJP4SnUsYCgGzKTCUkV+Nak0gphzLz0/sV79lOVE4U=,tag:KCWM+bu70nr4aI0O22PN+w==,type:str] 10 | sops: 11 | kms: [] 12 | gcp_kms: [] 13 | azure_kv: [] 14 | hc_vault: [] 15 | age: 16 | - recipient: age1vfgg4n4snp0ktjm83gsv3nptdw39mw8q7fw7dzghgfpllc040vgsd6yypm 17 | enc: | 18 | -----BEGIN AGE ENCRYPTED FILE----- 19 | YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBzdVdCNjUwQ1NGQUYwTlJ5 20 | Vk5EOTlaMEJOdVRybmdDckVnVkVkUkx5QWdrCjVQM0YwTlpPVTNlNTVnbnVjUW1y 21 | TG56QTRnZ24rRHRUZXNRMTlZb3RDcGcKLS0tIGRaNWNRaEhBN1c1L1dHUWR3U2kx 22 | VDE1UWNZeHhIdXp2Rm1VMU54Qkk5bncKfKwHQJ2KISsojBNnreY5SmAzZRnKxBc3 23 | iXDHQGV26m/09tiQBDj3o58ktd0sy7nfBtsJxCXeK2YGaCcy+fJHwA== 24 | -----END AGE ENCRYPTED FILE----- 25 | lastmodified: "2022-01-28T01:15:18Z" 26 | mac: ENC[AES256_GCM,data:54ejip+aSUK3ji/Ezx8kj6/EBXrnSjJDymmYgl3uNGsss//FvInUi1sFyk6Gf44BRbMJCrtXElyLRltZ6fBqUKEr0QcuCmCUbkkoBAMMaNZoX9Ip3iB0H0aYwe/aizvVsjfbcfU3PavcJQyLWuvU116Dx6rLeLdnC5jnllZugNI=,iv:EivovRoBK6Elz9fbTsGdvyjbFTAJCSKreJPODNPuTSc=,tag:bbUtWfipQv8xbiVh4CeTAg==,type:str] 27 | pgp: [] 28 | encrypted_regex: ^(data|stringData)$ 29 | version: 3.7.1 30 | -------------------------------------------------------------------------------- /cluster/apps/networking/external-dns/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta2 3 | kind: HelmRelease 4 | metadata: 5 | name: external-dns 6 | namespace: networking 7 | spec: 8 | interval: 30m 9 | chart: 10 | spec: 11 | chart: external-dns 12 | version: 1.13.1 13 | sourceRef: 14 | kind: HelmRepository 15 | name: external-dns 16 | namespace: flux-system 17 | maxHistory: 3 18 | install: 19 | remediation: 20 | retries: 3 21 | upgrade: 22 | cleanupOnFail: true 23 | remediation: 24 | retries: 3 25 | uninstall: 26 | keepHistory: false 27 | values: 28 | nameOverride: external-dns 29 | interval: 2m 30 | logLevel: debug 31 | env: 32 | - name: CF_API_TOKEN 33 | valueFrom: 34 | secretKeyRef: 35 | key: token 36 | name: cloudflare-token 37 | sources: 38 | - ingress 39 | provider: cloudflare 40 | policy: sync 41 | registry: txt 42 | txtPrefix: k8s. 43 | domainFilters: 44 | - "${SECRET_DOMAIN}" 45 | extraArgs: 46 | - --cloudflare-proxied 47 | - --annotation-filter=external-dns.alpha.kubernetes.io/target 48 | serviceMonitor: 49 | enabled: true 50 | resources: 51 | requests: 52 | memory: 100Mi 53 | cpu: 25m 54 | limits: 55 | memory: 250Mi 56 | -------------------------------------------------------------------------------- /cluster/apps/networking/external-dns/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | - secret.sops.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/networking/external-dns/secret.sops.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: cloudflare-token 5 | namespace: networking 6 | type: Opaque 7 | stringData: 8 | token: ENC[AES256_GCM,data:r+sUwwQeGcMVGM1ZkwJFUvRKMNGjdQ/Sc3wv4hfqBn7IOGYoe+P9pQ==,iv:Tj8SnYj3sUhLGqDWf9ZDBLs631us1ZFQKHSQQbH4dhA=,tag:8Kg2u5OL0TpMRuYiTiv9xQ==,type:str] 9 | sops: 10 | kms: [] 11 | gcp_kms: [] 12 | azure_kv: [] 13 | hc_vault: [] 14 | age: 15 | - recipient: age1vfgg4n4snp0ktjm83gsv3nptdw39mw8q7fw7dzghgfpllc040vgsd6yypm 16 | enc: | 17 | -----BEGIN AGE ENCRYPTED FILE----- 18 | YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBPcjM2VGU2RlhMSHBIckdl 19 | Rk85NFFmdGFTK3c4aFRKd3JCMGpaK3dSdEFBCnQ4cWJzenF0R01VbnNFcjRrYWc5 20 | dXBsVHdJUFZHZ2E1UXNFa1J5K2x3WTQKLS0tIFlEcG5kaXpnL2dSTmdna2RTOUQy 21 | blVrZmdYczdQczNoN3dhdG1QbDB6dk0KXKCXhPA8LyFwYaC8FMWeVOCZvHtnSKUv 22 | oaYjt9OZNyHu2bTDB9od+VVwbOWTfVN0kHlZglMc1c44OOvU2oKABQ== 23 | -----END AGE ENCRYPTED FILE----- 24 | lastmodified: "2021-12-01T05:29:02Z" 25 | mac: ENC[AES256_GCM,data:BaseKXWR6C4izSkg8VA7fVCDHEuy4beYmGFD26zFMIRFvikUUx6YdfhmMEYKeNCLeuS5iS7JZXuz/cxlFZAHar2AewX/v418e5ArXIvbffLON2SFQN8BmElsDlQjZqN91IeH183sXZ0PEZS3vX2lTMHKSoO5gES5Q12qkSxpbdw=,iv:EsNeYCq8BD+PFa8fE4YIsvqAEFrMVb0Bix8OAhIjP2Y=,tag:EP4v6/Vbjmn0H9ndoJHqWg==,type:str] 26 | pgp: [] 27 | encrypted_regex: ^(data|stringData)$ 28 | version: 3.7.1 29 | -------------------------------------------------------------------------------- /cluster/apps/networking/ingress-nginx/cloudflare-proxied-networks.txt: -------------------------------------------------------------------------------- 1 | 173.245.48.0/20\,103.21.244.0/22\,103.22.200.0/22\,103.31.4.0/22\,141.101.64.0/18\,108.162.192.0/18\,190.93.240.0/20\,188.114.96.0/20\,197.234.240.0/22\,198.41.128.0/17\,162.158.0.0/15\,104.16.0.0/13\,104.24.0.0/14\,172.64.0.0/13\,131.0.72.0/22\,2400:cb00::/32\,2606:4700::/32\,2803:f800::/32\,2405:b500::/32\,2405:8100::/32\,2a06:98c0::/29\,2c0f:f248::/32 2 | -------------------------------------------------------------------------------- /cluster/apps/networking/ingress-nginx/default-certificate.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: cert-manager.io/v1 3 | kind: Certificate 4 | metadata: 5 | name: "${SECRET_DOMAIN/./-}" 6 | namespace: networking 7 | spec: 8 | secretName: "${SECRET_DOMAIN/./-}-tls" 9 | issuerRef: 10 | kind: ClusterIssuer 11 | name: letsencrypt-production 12 | commonName: "${SECRET_DOMAIN}" 13 | dnsNames: 14 | - "${SECRET_DOMAIN}" 15 | - "*.${SECRET_DOMAIN}" 16 | -------------------------------------------------------------------------------- /cluster/apps/networking/ingress-nginx/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | namespace: networking 5 | resources: 6 | - default-certificate.yaml 7 | - helm-release.yaml 8 | - monitoring 9 | configMapGenerator: 10 | - name: cloudflare-proxied-networks 11 | files: 12 | - cloudflare-proxied-networks.txt 13 | generatorOptions: 14 | disableNameSuffixHash: true 15 | -------------------------------------------------------------------------------- /cluster/apps/networking/ingress-nginx/monitoring/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - alerts.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/networking/k8s-gateway/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta2 3 | kind: HelmRelease 4 | metadata: 5 | name: k8s-gateway 6 | namespace: networking 7 | spec: 8 | interval: 30m 9 | chart: 10 | spec: 11 | chart: k8s-gateway 12 | version: 2.1.0 13 | sourceRef: 14 | kind: HelmRepository 15 | name: k8s-gateway 16 | namespace: flux-system 17 | maxHistory: 3 18 | install: 19 | remediation: 20 | retries: 3 21 | upgrade: 22 | cleanupOnFail: true 23 | remediation: 24 | retries: 3 25 | uninstall: 26 | keepHistory: false 27 | values: 28 | nameOverride: k8s-gateway 29 | domain: "${SECRET_DOMAIN}" 30 | ttl: 1 31 | service: 32 | type: LoadBalancer 33 | port: 53 34 | annotations: 35 | metallb.universe.tf/loadBalancerIPs: "${LB_K8S_GATEWAY}" 36 | externalTrafficPolicy: Local 37 | resources: 38 | requests: 39 | memory: 100Mi 40 | cpu: 25m 41 | limits: 42 | memory: 250Mi 43 | -------------------------------------------------------------------------------- /cluster/apps/networking/k8s-gateway/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/networking/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | namespace: networking 5 | resources: 6 | - namespace.yaml 7 | # - blocky 8 | - cloudflare-ddns 9 | - external-dns 10 | - ingress-nginx 11 | - k8s-gateway 12 | -------------------------------------------------------------------------------- /cluster/apps/networking/metallb/custom-resources/bgp-advertisement.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: metallb.io/v1beta1 3 | kind: BGPAdvertisement 4 | metadata: 5 | name: bgp-lb 6 | namespace: networking 7 | spec: 8 | aggregationLength: 32 9 | ipAddressPools: 10 | - bgp-pool 11 | peers: 12 | - mikrotik 13 | -------------------------------------------------------------------------------- /cluster/apps/networking/metallb/custom-resources/bgp-peer.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: metallb.io/v1beta2 3 | kind: BGPPeer 4 | metadata: 5 | name: mikrotik 6 | namespace: networking 7 | spec: 8 | peerAddress: ${GATEWAY_IP} 9 | peerASN: 64512 10 | peerPort: 179 11 | myASN: 64512 12 | holdTime: 1m30s 13 | -------------------------------------------------------------------------------- /cluster/apps/networking/metallb/custom-resources/ip-address-pool.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: metallb.io/v1beta1 3 | kind: IPAddressPool 4 | metadata: 5 | name: bgp-pool 6 | namespace: networking 7 | spec: 8 | addresses: 9 | - ${K8S_LB_CIDR} 10 | autoAssign: true 11 | -------------------------------------------------------------------------------- /cluster/apps/networking/metallb/custom-resources/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - bgp-advertisement.yaml 6 | - bgp-peer.yaml 7 | - ip-address-pool.yaml 8 | -------------------------------------------------------------------------------- /cluster/apps/networking/metallb/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta2 3 | kind: HelmRelease 4 | metadata: 5 | name: metallb 6 | namespace: networking 7 | spec: 8 | interval: 30m 9 | chart: 10 | spec: 11 | chart: metallb 12 | version: 0.13.12 13 | sourceRef: 14 | kind: HelmRepository 15 | name: metallb 16 | namespace: flux-system 17 | dependsOn: 18 | - name: cilium 19 | namespace: kube-system 20 | maxHistory: 3 21 | install: 22 | remediation: 23 | retries: 3 24 | upgrade: 25 | cleanupOnFail: true 26 | remediation: 27 | retries: 3 28 | uninstall: 29 | keepHistory: false 30 | values: 31 | prometheus: 32 | serviceAccount: kube-prometheus-stack-prometheus 33 | namespace: monitoring 34 | podMonitor: 35 | enabled: true 36 | prometheusRule: 37 | enabled: true 38 | frr: 39 | enabled: disabled 40 | image: 41 | repository: docker.io/frrouting/frr 42 | tag: v8.4.1 43 | controller: 44 | logLevel: error 45 | crds: 46 | enabled: true 47 | -------------------------------------------------------------------------------- /cluster/apps/networking/metallb/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | -------------------------------------------------------------------------------- /cluster/apps/networking/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: networking 6 | labels: 7 | kustomize.toolkit.fluxcd.io/prune: disabled 8 | -------------------------------------------------------------------------------- /cluster/apps/openebs/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | namespace: openebs 5 | resources: 6 | - namespace.yaml 7 | - zfs-localpv 8 | -------------------------------------------------------------------------------- /cluster/apps/openebs/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: openebs 6 | labels: 7 | kustomize.toolkit.fluxcd.io/prune: disabled 8 | -------------------------------------------------------------------------------- /cluster/apps/openebs/zfs-localpv/config/hostpath-volumes.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: PersistentVolume 4 | metadata: 5 | name: media 6 | spec: 7 | capacity: 8 | storage: 100Gi 9 | volumeMode: Filesystem 10 | accessModes: 11 | - ReadWriteOnce 12 | persistentVolumeReclaimPolicy: Retain 13 | hostPath: 14 | path: /deadpool/media 15 | storageClassName: openebs-zfspv-rust 16 | --- 17 | apiVersion: v1 18 | kind: PersistentVolume 19 | metadata: 20 | name: minio 21 | spec: 22 | capacity: 23 | storage: 100Gi 24 | volumeMode: Filesystem 25 | accessModes: 26 | - ReadWriteOnce 27 | persistentVolumeReclaimPolicy: Retain 28 | hostPath: 29 | path: /deadpool/containous/minio 30 | storageClassName: openebs-zfspv-rust 31 | --- 32 | apiVersion: v1 33 | kind: PersistentVolume 34 | metadata: 35 | name: snapshots 36 | spec: 37 | capacity: 38 | storage: 250Gi 39 | volumeMode: Filesystem 40 | accessModes: 41 | - ReadWriteOnce 42 | persistentVolumeReclaimPolicy: Retain 43 | hostPath: 44 | path: /deadpool/containous/kopia-snapshots 45 | storageClassName: openebs-zfspv-rust 46 | -------------------------------------------------------------------------------- /cluster/apps/openebs/zfs-localpv/config/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - hostpath-volumes.yaml 6 | - storage-class.yaml 7 | # - volume-snapshot-class.yaml 8 | -------------------------------------------------------------------------------- /cluster/apps/openebs/zfs-localpv/config/storage-class.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: storage.k8s.io/v1 3 | kind: StorageClass 4 | metadata: 5 | name: openebs-zfspv-rust 6 | parameters: 7 | fstype: "zfs" 8 | poolname: deadpool/containous 9 | shared: "yes" 10 | provisioner: zfs.csi.openebs.io 11 | reclaimPolicy: Delete 12 | allowVolumeExpansion: true 13 | volumeBindingMode: Immediate 14 | --- 15 | apiVersion: storage.k8s.io/v1 16 | kind: StorageClass 17 | metadata: 18 | annotations: 19 | storageclass.kubernetes.io/is-default-class: "true" 20 | name: openebs-zfspv-ssd 21 | parameters: 22 | fstype: "zfs" 23 | poolname: k8s/containous 24 | shared: "yes" 25 | provisioner: zfs.csi.openebs.io 26 | reclaimPolicy: Delete 27 | allowVolumeExpansion: true 28 | volumeBindingMode: Immediate 29 | --- 30 | apiVersion: storage.k8s.io/v1 31 | kind: StorageClass 32 | metadata: 33 | name: openebs-zfspv-zvol 34 | parameters: 35 | fstype: "ext4" 36 | poolname: deadpool/containous 37 | provisioner: zfs.csi.openebs.io 38 | reclaimPolicy: Delete 39 | allowVolumeExpansion: true 40 | volumeBindingMode: Immediate 41 | --- 42 | apiVersion: storage.k8s.io/v1 43 | kind: StorageClass 44 | metadata: 45 | name: openebs-zfspv-block 46 | parameters: 47 | poolname: deadpool/containous 48 | provisioner: zfs.csi.openebs.io 49 | reclaimPolicy: Delete 50 | allowVolumeExpansion: true 51 | volumeBindingMode: Immediate 52 | -------------------------------------------------------------------------------- /cluster/apps/openebs/zfs-localpv/config/volume-snapshot-class.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: snapshot.storage.k8s.io/v1 3 | kind: VolumeSnapshotClass 4 | metadata: 5 | name: zfspv-snapclass 6 | annotations: 7 | snapshot.storage.kubernetes.io/is-default-class: "true" 8 | velero.io/csi-volumesnapshot-class: "true" 9 | driver: zfs.csi.openebs.io 10 | deletionPolicy: Retain 11 | -------------------------------------------------------------------------------- /cluster/apps/openebs/zfs-localpv/helm-release.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: helm.toolkit.fluxcd.io/v2beta2 3 | kind: HelmRelease 4 | metadata: 5 | name: zfs-localpv 6 | namespace: openebs 7 | spec: 8 | interval: 30m 9 | chart: 10 | spec: 11 | chart: zfs-localpv 12 | version: 2.4.0 13 | sourceRef: 14 | kind: HelmRepository 15 | name: openebs-zfs-localpv 16 | namespace: flux-system 17 | maxHistory: 3 18 | install: 19 | remediation: 20 | retries: 3 21 | upgrade: 22 | cleanupOnFail: true 23 | remediation: 24 | retries: 3 25 | uninstall: 26 | keepHistory: false 27 | values: 28 | crd: 29 | enableInstall: false 30 | -------------------------------------------------------------------------------- /cluster/apps/openebs/zfs-localpv/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | - config 7 | -------------------------------------------------------------------------------- /cluster/apps/storage/kopia/config/repository.config: -------------------------------------------------------------------------------- 1 | { 2 | "storage": { 3 | "type": "filesystem", 4 | "config": { 5 | "path": "/snapshots", 6 | "dirShards": null 7 | } 8 | }, 9 | "caching": { 10 | "cacheDirectory": "cache", 11 | "maxCacheSize": 5242880000, 12 | "maxMetadataCacheSize": 5242880000, 13 | "maxListCacheDuration": 30 14 | }, 15 | "hostname": "cluster", 16 | "username": "root", 17 | "description": "Cluster", 18 | "enableActions": false, 19 | "formatBlobCacheDuration": 900000000000 20 | } 21 | -------------------------------------------------------------------------------- /cluster/apps/storage/kopia/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | - pvc.yaml 7 | # - secret.sops.yaml 8 | namespace: storage 9 | configMapGenerator: 10 | - name: kopia 11 | files: 12 | - ./config/repository.config 13 | generatorOptions: 14 | disableNameSuffixHash: true 15 | -------------------------------------------------------------------------------- /cluster/apps/storage/kopia/pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: snapshots 6 | namespace: storage 7 | spec: 8 | volumeName: snapshots 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 250Gi 14 | storageClassName: openebs-zfspv-rust 15 | -------------------------------------------------------------------------------- /cluster/apps/storage/kopia/secret.sops.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: minio-config 5 | namespace: storage 6 | type: Opaque 7 | stringData: 8 | repository.config: ENC[AES256_GCM,data:ac2rwMtmM8FuXlqoAq1Hw/Ri6uw98ny6rqoxjWW1RmoJXbqUxY6LnbHegeCIyPJBwIjzDAd46C+2W0b/v+Ncx9LZwU6uSrZ+X9NhKTu5tAL4o6LtgoP+VoRrsz1XMoEhl9XBTd0P8BCjdLCZOleqw1o1wdMJSfZACVuPfkF5xPtD/Tci+vnRiBZheWS8q0cVvhDXQJ7FxZX6OQaGkzlHSQ4aEkoGNEJ5YE6JkQ3Xkb7N7rlZjdb4IsOZck2gHC3Qil2Gxywo9GTLcpTToOVWvi+P+/ZQhWPIPFs73vkcIpN6QffknTvJBGmNAgVj+JOS241hDAVJhI6P43WLGMW2YTjuMEGEJv6BljM3Wv+0dWSRBr0J0f4TLRJuZoL6be1pxivPwq+za6/UWltbZk+CumU+Qi8Wax/bzR93eZSfsEjd99MLHOobGcCTIOjawSALmOQUqO7UAMPHFt8scl8W2MR8l+XJ9KkJUJDPcSezq7apKNuQd9DmpMY44iSMjsw1D+dUPF013aoyxzK8NnzO3hG0tHCVqz0cFoypFGYb6ocNKjzGMkOvKhvNuLaU8H00arh8whqbSMC7ouu7p0UuU3RY0MM2HYXWL5ejtAHTUnu+DeAwKRyfDZucLOVzdrfk4qxzIjXBMAj0Wop3k3vridtVz3jmhuTfkwymCuLbVDIdmukgYionbXBV20m+VkNLgC4gXhyTT73TFIinpqjiiMpxiRpFzdT8dXMFIWNVMy3KhRjEKihaTzWuGJfi3cfSxqXURjA7bqve1tqW/22wG51Wt3g9CsZK4jol04B7zm9vtLeh5ICGSzXnemD9eIwiTebaD+buo+ERWKpHwLwwb3O1lxGeHf5PaLNZilNFrxFdirzhBJqm4wI=,iv:gtV9CbaxHW+Rb9qodYewrxXR9TsHHK+mFKhpjYvqQwA=,tag:KNXtvUMXOL2FTwSNm5+mXw==,type:str] 9 | sops: 10 | kms: [] 11 | gcp_kms: [] 12 | azure_kv: [] 13 | hc_vault: [] 14 | age: 15 | - recipient: age1vfgg4n4snp0ktjm83gsv3nptdw39mw8q7fw7dzghgfpllc040vgsd6yypm 16 | enc: | 17 | -----BEGIN AGE ENCRYPTED FILE----- 18 | YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAxUk5LMTBPV2NKa0ZQMThp 19 | NmxFSHlVck1HUkJWYTdDdDVTaUExOHl0dW5BCjRpYVliQlR0ci9wbnFxRDh6YzhO 20 | dFJRVWhnRms0cEFxVlNjQ04yWHlOd28KLS0tIFQ2cXU3N0NWV0FlcTcrV1RWdFp6 21 | dFlPQmhoc0hmYW1VMXZWcEFVNmxDK3MK0lcjqGLJrCdax69SMWSznJxS9Dfp4GS3 22 | /QegwEoDRjd9dY6eDN/iMnjBe6zXsHeTpIiHdHFNx32LbHT7qCk/7Q== 23 | -----END AGE ENCRYPTED FILE----- 24 | lastmodified: "2022-09-06T05:57:02Z" 25 | mac: ENC[AES256_GCM,data:1TA0RE526qa2EkvOfGAHnu82b2F7yFCFHGC1Q3ZaIiuLokE5M2M1iwVW7CA2kEgaJNmFTBkN6XtMXLmU8YLaqcqOYrjwXc6ggGqBa3A0gcgGF9mqNezUiVfAqKCg2BXIBsp2tum9KrbtIgIDlnmu2jl03B1L5Ka51zd+l1uWd9s=,iv:Q0RsdooKXdDVBuMydHn5rtUnxm8iiOEZ2yRSrxoVE44=,tag:VMpvkmUJjCOMCHCISgLqXQ==,type:str] 26 | pgp: [] 27 | encrypted_regex: ^(data|stringData)$ 28 | version: 3.7.3 29 | -------------------------------------------------------------------------------- /cluster/apps/storage/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | namespace: storage 5 | resources: 6 | - namespace.yaml 7 | # - kopia 8 | - minio 9 | -------------------------------------------------------------------------------- /cluster/apps/storage/minio/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - helm-release.yaml 6 | - prometheus-rule.yaml 7 | - pvc.yaml 8 | -------------------------------------------------------------------------------- /cluster/apps/storage/minio/pvc.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | kind: PersistentVolumeClaim 3 | apiVersion: v1 4 | metadata: 5 | name: minio 6 | namespace: storage 7 | spec: 8 | volumeName: minio 9 | accessModes: 10 | - ReadWriteOnce 11 | resources: 12 | requests: 13 | storage: 100Gi 14 | storageClassName: openebs-zfspv-rust 15 | -------------------------------------------------------------------------------- /cluster/apps/storage/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: storage 6 | labels: 7 | kustomize.toolkit.fluxcd.io/prune: disabled 8 | -------------------------------------------------------------------------------- /cluster/apps/system-upgrade/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | namespace: system-upgrade 5 | resources: 6 | # renovate: datasource=docker image=rancher/system-upgrade-controller 7 | - https://github.com/rancher/system-upgrade-controller/releases/download/v0.13.2/crd.yaml 8 | - namespace.yaml 9 | - system-upgrade-controller 10 | -------------------------------------------------------------------------------- /cluster/apps/system-upgrade/namespace.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: system-upgrade 6 | labels: 7 | kustomize.toolkit.fluxcd.io/prune: disabled 8 | -------------------------------------------------------------------------------- /cluster/apps/system-upgrade/system-upgrade-controller/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - github.com/rancher/system-upgrade-controller?ref=v0.13.2 6 | - plans 7 | images: 8 | - name: rancher/system-upgrade-controller 9 | newTag: v0.13.2 10 | patchesStrategicMerge: 11 | # Delete namespace resource 12 | - patches.yaml 13 | -------------------------------------------------------------------------------- /cluster/apps/system-upgrade/system-upgrade-controller/patches.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Namespace should already exist 3 | # Delete the system-upgrade namespace 4 | # from the kustomization 5 | $patch: delete 6 | apiVersion: v1 7 | kind: Namespace 8 | metadata: 9 | name: system-upgrade 10 | -------------------------------------------------------------------------------- /cluster/apps/system-upgrade/system-upgrade-controller/plans/agent.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: upgrade.cattle.io/v1 3 | kind: Plan 4 | metadata: 5 | name: k3s-agent 6 | namespace: system-upgrade 7 | labels: 8 | k3s-upgrade: agent 9 | spec: 10 | # renovate: datasource=github-releases depName=k3s-io/k3s 11 | version: "v1.29.0+k3s1" 12 | serviceAccountName: system-upgrade 13 | concurrency: 1 14 | nodeSelector: 15 | matchExpressions: 16 | - key: node-role.kubernetes.io/master 17 | operator: NotIn 18 | values: 19 | - "true" 20 | prepare: 21 | image: rancher/k3s-upgrade 22 | args: 23 | - "prepare" 24 | - "k3s-server" 25 | drain: 26 | force: true 27 | skipWaitForDeleteTimeout: 60 28 | upgrade: 29 | image: rancher/k3s-upgrade 30 | -------------------------------------------------------------------------------- /cluster/apps/system-upgrade/system-upgrade-controller/plans/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - server.yaml 6 | - agent.yaml 7 | -------------------------------------------------------------------------------- /cluster/apps/system-upgrade/system-upgrade-controller/plans/server.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: upgrade.cattle.io/v1 3 | kind: Plan 4 | metadata: 5 | name: k3s-server 6 | namespace: system-upgrade 7 | labels: 8 | k3s-upgrade: server 9 | spec: 10 | # renovate: datasource=github-releases depName=k3s-io/k3s 11 | version: "v1.29.0+k3s1" 12 | serviceAccountName: system-upgrade 13 | concurrency: 1 14 | cordon: true 15 | nodeSelector: 16 | matchExpressions: 17 | - key: node-role.kubernetes.io/master 18 | operator: In 19 | values: 20 | - "true" 21 | tolerations: 22 | - key: node-role.kubernetes.io/master 23 | operator: Exists 24 | upgrade: 25 | image: rancher/k3s-upgrade 26 | -------------------------------------------------------------------------------- /cluster/bootstrap.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.toolkit.fluxcd.io/v1 3 | kind: Kustomization 4 | metadata: 5 | name: flux-cluster-bootstrap 6 | namespace: flux-system 7 | spec: 8 | interval: 30m 9 | path: ./cluster/flux/bootstrap 10 | prune: true 11 | wait: false 12 | sourceRef: 13 | kind: GitRepository 14 | name: home-ops 15 | decryption: 16 | provider: sops 17 | secretRef: 18 | name: sops-age 19 | -------------------------------------------------------------------------------- /cluster/crds/kube-prometheus-stack/crds.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.toolkit.fluxcd.io/v1 3 | kind: Kustomization 4 | metadata: 5 | name: kube-prometheus-stack-crds 6 | namespace: flux-system 7 | spec: 8 | interval: 30m 9 | retryInterval: 1m 10 | prune: false 11 | wait: true 12 | sourceRef: 13 | kind: GitRepository 14 | name: kube-prometheus-stack 15 | -------------------------------------------------------------------------------- /cluster/crds/kube-prometheus-stack/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - crds.yaml 6 | -------------------------------------------------------------------------------- /cluster/crds/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - kube-prometheus-stack 5 | - openebs-zfs-localpv 6 | -------------------------------------------------------------------------------- /cluster/crds/openebs-zfs-localpv/crds.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.toolkit.fluxcd.io/v1 3 | kind: Kustomization 4 | metadata: 5 | name: openebs-zfs-localpv-crds 6 | namespace: flux-system 7 | spec: 8 | interval: 30m 9 | retryInterval: 1m 10 | prune: false 11 | wait: true 12 | sourceRef: 13 | kind: GitRepository 14 | name: openebs-zfs-localpv 15 | -------------------------------------------------------------------------------- /cluster/crds/openebs-zfs-localpv/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - crds.yaml 6 | -------------------------------------------------------------------------------- /cluster/flux/addons/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - notifications 6 | - webhook 7 | -------------------------------------------------------------------------------- /cluster/flux/addons/notifications/alertmanager/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - notification.yaml 6 | -------------------------------------------------------------------------------- /cluster/flux/addons/notifications/alertmanager/notification.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: notification.toolkit.fluxcd.io/v1beta3 3 | kind: Provider 4 | metadata: 5 | name: alertmanager 6 | namespace: flux-system 7 | spec: 8 | type: alertmanager 9 | address: http://kube-prometheus-stack-alertmanager.monitoring:9093/api/v2/alerts/ 10 | --- 11 | apiVersion: notification.toolkit.fluxcd.io/v1beta3 12 | kind: Alert 13 | metadata: 14 | name: home-ops 15 | namespace: flux-system 16 | spec: 17 | providerRef: 18 | name: alertmanager 19 | eventSeverity: error 20 | eventSources: 21 | - kind: GitRepository 22 | name: "*" 23 | - kind: HelmRelease 24 | name: "*" 25 | - kind: HelmRepository 26 | name: "*" 27 | - kind: ImageRepository 28 | name: "*" 29 | - kind: ImageUpdateAutomation 30 | name: "*" 31 | - kind: Kustomization 32 | name: "*" 33 | exclusionList: 34 | - "error.*lookup github\\.com" 35 | - "waiting.*socket" 36 | - "dial.*tcp.*timeout" 37 | suspend: false 38 | -------------------------------------------------------------------------------- /cluster/flux/addons/notifications/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - https://raw.githubusercontent.com/fluxcd/flux2/v0.36.0/manifests/monitoring/monitoring-config/podmonitor.yaml 6 | - alertmanager 7 | - prometheus-rule.yaml 8 | -------------------------------------------------------------------------------- /cluster/flux/addons/notifications/prometheus-rule.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: monitoring.coreos.com/v1 3 | kind: PrometheusRule 4 | metadata: 5 | name: flux 6 | namespace: flux-system 7 | spec: 8 | groups: 9 | - name: flux 10 | rules: 11 | - alert: FluxComponentAbsent 12 | annotations: 13 | description: Flux component has disappeared from Prometheus target discovery. 14 | summary: Flux component is down. 15 | expr: | 16 | absent(up{job=~".*flux-system.*"} == 1) 17 | for: 5m 18 | labels: 19 | severity: critical 20 | - alert: FluxReconciliationFailure 21 | annotations: 22 | description: 23 | "{{ $labels.kind }} {{ $labels.namespace }}/{{ $labels.name }} reconciliation has been failing 24 | for more than ten minutes." 25 | summary: Flux reconciliation failure. 26 | expr: | 27 | max(gotk_reconcile_condition{status="False",type="Ready"}) by (namespace, name, kind) 28 | + 29 | on(namespace, name, kind) (max(gotk_reconcile_condition{status="Deleted"}) 30 | by (namespace, name, kind)) * 2 == 1 31 | for: 10m 32 | labels: 33 | severity: critical 34 | -------------------------------------------------------------------------------- /cluster/flux/addons/webhook/github/ingress.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: networking.k8s.io/v1 3 | kind: Ingress 4 | metadata: 5 | name: webhook-receiver 6 | namespace: flux-system 7 | annotations: 8 | cert-manager.io/cluster-issuer: letsencrypt-production 9 | external-dns.alpha.kubernetes.io/target: "ipv4.${SECRET_DOMAIN}" 10 | spec: 11 | ingressClassName: nginx 12 | rules: 13 | - host: &host "flux.${SECRET_DOMAIN}" 14 | http: 15 | paths: 16 | - path: / 17 | pathType: Prefix 18 | backend: 19 | service: 20 | name: webhook-receiver 21 | port: 22 | number: 80 23 | tls: 24 | - secretName: flux-tls 25 | hosts: 26 | - *host 27 | -------------------------------------------------------------------------------- /cluster/flux/addons/webhook/github/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - ingress.yaml 6 | - receiver.yaml 7 | - secret.sops.yaml 8 | -------------------------------------------------------------------------------- /cluster/flux/addons/webhook/github/receiver.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: notification.toolkit.fluxcd.io/v1 3 | kind: Receiver 4 | metadata: 5 | name: home-ops 6 | namespace: flux-system 7 | spec: 8 | type: github 9 | events: 10 | - "ping" 11 | - "push" 12 | secretRef: 13 | name: github-webhook-token 14 | resources: 15 | - kind: GitRepository 16 | name: home-ops 17 | -------------------------------------------------------------------------------- /cluster/flux/addons/webhook/github/secret.sops.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: github-webhook-token 5 | namespace: flux-system 6 | stringData: 7 | token: ENC[AES256_GCM,data:y3DuzZpDHdcLvffoO3khYMRUkO0ejHHNvbghL8+yggsIY0xiDmbPcQ==,iv:bjQp4wmtBLaBqR+6d8q9WzdBaEKCL3DiU86PcnMFjUM=,tag:0Z27xXFeuFsYQEpJs0+xsw==,type:str] 8 | sops: 9 | kms: [] 10 | gcp_kms: [] 11 | azure_kv: [] 12 | hc_vault: [] 13 | age: 14 | - recipient: age1vfgg4n4snp0ktjm83gsv3nptdw39mw8q7fw7dzghgfpllc040vgsd6yypm 15 | enc: | 16 | -----BEGIN AGE ENCRYPTED FILE----- 17 | YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSB1K0s2T0F3TXY4NXdZbWlr 18 | Q2t0K0FyQ1NNdFc5cmRuMzN5Nm1CT1daQXpnCmZSVklZbkFoKzVWZDZnakxTZWhE 19 | YllQOW9Bd01rSWFyeXk4d0FJOW5ZMTAKLS0tIE9BNmo4Rkdqc3ZWOHAwU2U0eFc1 20 | MnRuV1lPMWRVeFdZNnJ0cDRYSUdoWjgKJo4WNB9MshDj+5pcXYNzIgO2adXtQWos 21 | Uv2AmTRftDN6yCVwz5rtZj6n0edTZjf3hMVr6ZgmkgB8EyLAOyf49g== 22 | -----END AGE ENCRYPTED FILE----- 23 | lastmodified: "2022-02-23T15:49:05Z" 24 | mac: ENC[AES256_GCM,data:qP5QycixWMOvz6iJyE9TUfNnb3nLfIq+HzHq+PKoGLSMbxT0GLz589XEr/w9j2b5cMpRsw9wrm5UbNNNiuGKy+RXBf0efrNlzIv+SFw1nMDguHDyRt4TzksIPXR9nPiQRaADZQESi68sE+Xabq3bjY8ve7Cfz/l+jAmGShPhYtM=,iv:BNsBCPSu/gA1A6zAhycy7Jui40X8xI/CtZO3iLcBvrU=,tag:KgusThVvYuiCBRVZneQBTA==,type:str] 25 | pgp: [] 26 | encrypted_regex: ^(data|stringData)$ 27 | version: 3.7.1 28 | -------------------------------------------------------------------------------- /cluster/flux/addons/webhook/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - github 6 | -------------------------------------------------------------------------------- /cluster/flux/bootstrap/cluster-apps.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.toolkit.fluxcd.io/v1 3 | kind: Kustomization 4 | metadata: 5 | name: cluster-apps 6 | namespace: flux-system 7 | spec: 8 | interval: 30m 9 | retryInterval: 1m 10 | timeout: 3m 11 | dependsOn: 12 | - name: cluster-crds 13 | - name: metallb-crs 14 | path: ./cluster/apps 15 | prune: true 16 | wait: true 17 | sourceRef: 18 | kind: GitRepository 19 | name: home-ops 20 | decryption: 21 | provider: sops 22 | secretRef: 23 | name: sops-age 24 | postBuild: 25 | substitute: {} 26 | substituteFrom: 27 | - kind: ConfigMap 28 | name: cluster-config 29 | - kind: Secret 30 | name: cluster-secrets 31 | -------------------------------------------------------------------------------- /cluster/flux/bootstrap/cluster-crds.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.toolkit.fluxcd.io/v1 3 | kind: Kustomization 4 | metadata: 5 | name: cluster-crds 6 | namespace: flux-system 7 | spec: 8 | interval: 30m 9 | retryInterval: 1m 10 | timeout: 3m 11 | dependsOn: 12 | - name: flux-repositories 13 | path: ./cluster/crds 14 | prune: false 15 | wait: true 16 | sourceRef: 17 | kind: GitRepository 18 | name: home-ops 19 | -------------------------------------------------------------------------------- /cluster/flux/bootstrap/flux-addons.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.toolkit.fluxcd.io/v1 3 | kind: Kustomization 4 | metadata: 5 | name: flux-addons 6 | namespace: flux-system 7 | spec: 8 | interval: 30m 9 | retryInterval: 1m 10 | timeout: 3m 11 | dependsOn: 12 | - name: flux 13 | - name: kube-prometheus-stack-crds 14 | path: ./cluster/flux/addons 15 | prune: true 16 | wait: false 17 | sourceRef: 18 | kind: GitRepository 19 | name: home-ops 20 | decryption: 21 | provider: sops 22 | secretRef: 23 | name: sops-age 24 | postBuild: 25 | substitute: {} 26 | substituteFrom: 27 | - kind: ConfigMap 28 | name: cluster-config 29 | - kind: Secret 30 | name: cluster-secrets 31 | -------------------------------------------------------------------------------- /cluster/flux/bootstrap/flux-config.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.toolkit.fluxcd.io/v1 3 | kind: Kustomization 4 | metadata: 5 | name: flux-config 6 | namespace: flux-system 7 | spec: 8 | interval: 30m 9 | retryInterval: 1m 10 | timeout: 3m 11 | path: ./cluster/flux/config 12 | prune: true 13 | wait: true 14 | sourceRef: 15 | kind: GitRepository 16 | name: home-ops 17 | decryption: 18 | provider: sops 19 | secretRef: 20 | name: sops-age 21 | -------------------------------------------------------------------------------- /cluster/flux/bootstrap/flux-repositories.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.toolkit.fluxcd.io/v1 3 | kind: Kustomization 4 | metadata: 5 | name: flux-repositories 6 | namespace: flux-system 7 | spec: 8 | interval: 30m 9 | retryInterval: 1m 10 | timeout: 3m 11 | path: ./cluster/flux/repositories 12 | prune: true 13 | wait: true 14 | sourceRef: 15 | kind: GitRepository 16 | name: home-ops 17 | decryption: 18 | provider: sops 19 | secretRef: 20 | name: sops-age 21 | -------------------------------------------------------------------------------- /cluster/flux/bootstrap/flux.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.toolkit.fluxcd.io/v1 3 | kind: Kustomization 4 | metadata: 5 | name: flux 6 | namespace: flux-system 7 | spec: 8 | interval: 30m 9 | retryInterval: 1m 10 | timeout: 3m 11 | dependsOn: 12 | - name: flux-repositories 13 | path: ./manifests/install 14 | prune: true 15 | wait: true 16 | sourceRef: 17 | kind: GitRepository 18 | name: flux 19 | patches: 20 | - target: 21 | kind: Deployment 22 | patch: |- 23 | - op: replace 24 | path: /spec/template/spec/containers/0/resources/requests/memory 25 | value: 512Mi 26 | - op: replace 27 | path: /spec/template/spec/containers/0/resources/limits/memory 28 | value: 2Gi 29 | -------------------------------------------------------------------------------- /cluster/flux/bootstrap/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - cluster-apps.yaml 6 | - cluster-crds.yaml 7 | - flux.yaml 8 | - flux-addons.yaml 9 | - flux-config.yaml 10 | - flux-repositories.yaml 11 | - metallb.yaml 12 | -------------------------------------------------------------------------------- /cluster/flux/bootstrap/metallb.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.toolkit.fluxcd.io/v1 3 | kind: Kustomization 4 | metadata: 5 | name: metallb 6 | namespace: flux-system 7 | spec: 8 | interval: 30m 9 | retryInterval: 1m 10 | timeout: 3m 11 | path: ./cluster/apps/networking/metallb 12 | prune: true 13 | wait: true 14 | sourceRef: 15 | kind: GitRepository 16 | name: home-ops 17 | --- 18 | apiVersion: kustomize.toolkit.fluxcd.io/v1 19 | kind: Kustomization 20 | metadata: 21 | name: metallb-crs 22 | namespace: flux-system 23 | spec: 24 | interval: 30m 25 | retryInterval: 1m 26 | timeout: 3m 27 | dependsOn: 28 | - name: metallb 29 | path: ./cluster/apps/networking/metallb/custom-resources 30 | prune: true 31 | wait: true 32 | sourceRef: 33 | kind: GitRepository 34 | name: home-ops 35 | postBuild: 36 | substitute: {} 37 | substituteFrom: 38 | - kind: ConfigMap 39 | name: cluster-config 40 | -------------------------------------------------------------------------------- /cluster/flux/config/config-map.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: cluster-config 6 | namespace: flux-system 7 | data: 8 | GATEWAY_IP: 10.10.0.1 9 | MANAGEMENT_CIDR: 10.10.0.0/24 10 | TZ: America/New_York 11 | # 12 | K8S_POD_CIDR: 172.20.0.0/16 13 | K8S_SERVICE_CIDR: 172.22.0.0/16 14 | K8S_LB_CIDR: 10.10.2.0/24 15 | # 16 | LB_NGINX: 10.10.2.1 17 | LB_K8S_GATEWAY: 10.10.2.2 18 | LB_BLOCKY: 10.10.2.53 19 | LB_NUT: 10.10.2.101 20 | LB_PLEX: 10.10.2.200 21 | LB_QBITTORRENT: 10.10.2.201 22 | LB_VECTOR: 10.10.2.20 23 | -------------------------------------------------------------------------------- /cluster/flux/config/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - secret.sops.yaml 6 | - config-map.yaml 7 | -------------------------------------------------------------------------------- /cluster/flux/repositories/git/flux.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1 3 | kind: GitRepository 4 | metadata: 5 | name: flux 6 | namespace: flux-system 7 | spec: 8 | interval: 30m 9 | url: https://github.com/fluxcd/flux2.git 10 | ref: 11 | # renovate: datasource=github-releases depName=fluxcd/flux2 12 | tag: v2.2.2 13 | ignore: | 14 | # exclude all 15 | /* 16 | # include manifest dir 17 | !/manifests 18 | -------------------------------------------------------------------------------- /cluster/flux/repositories/git/home-ops.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1 3 | kind: GitRepository 4 | metadata: 5 | name: home-ops 6 | namespace: flux-system 7 | spec: 8 | interval: 30m 9 | url: ssh://git@github.com/jr0dd/home-ops 10 | ref: 11 | branch: main 12 | secretRef: 13 | name: flux-github-key 14 | -------------------------------------------------------------------------------- /cluster/flux/repositories/git/kube-prometheus-stack.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1 3 | kind: GitRepository 4 | metadata: 5 | name: kube-prometheus-stack 6 | namespace: flux-system 7 | spec: 8 | interval: 30m 9 | url: https://github.com/prometheus-community/helm-charts.git 10 | ref: 11 | # renovate: registryUrl=https://prometheus-community.github.io/helm-charts chart=kube-prometheus-stack 12 | tag: kube-prometheus-stack-55.11.0 13 | ignore: | 14 | # exclude all 15 | /* 16 | # include deploy crds dir 17 | !/charts/kube-prometheus-stack/charts/crds/crds 18 | -------------------------------------------------------------------------------- /cluster/flux/repositories/git/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - secret.sops.yaml 6 | - flux.yaml 7 | - home-ops.yaml 8 | - kube-prometheus-stack.yaml 9 | - openebs-zfs-localpv.yaml 10 | -------------------------------------------------------------------------------- /cluster/flux/repositories/git/openebs-zfs-localpv.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1 3 | kind: GitRepository 4 | metadata: 5 | name: openebs-zfs-localpv 6 | namespace: flux-system 7 | spec: 8 | interval: 30m 9 | url: https://github.com/openebs/zfs-localpv.git 10 | ref: 11 | # renovate: registryUrl=https://openebs.github.io/zfs-localpv chart=zfs-localpv 12 | tag: zfs-localpv-2.4.0 13 | ignore: | 14 | # exclude all 15 | /* 16 | # include deploy crds dir 17 | !/deploy/helm/charts/crds 18 | -------------------------------------------------------------------------------- /cluster/flux/repositories/helm/bitnami-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: bitnami 6 | namespace: flux-system 7 | spec: 8 | type: oci 9 | interval: 1h 10 | url: oci://registry-1.docker.io/bitnamicharts 11 | -------------------------------------------------------------------------------- /cluster/flux/repositories/helm/bjw-s-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: bjw-s 6 | namespace: flux-system 7 | spec: 8 | type: oci 9 | interval: 1h 10 | url: oci://ghcr.io/bjw-s/helm 11 | -------------------------------------------------------------------------------- /cluster/flux/repositories/helm/cilium-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: cilium 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://helm.cilium.io/ 10 | -------------------------------------------------------------------------------- /cluster/flux/repositories/helm/external-dns-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: external-dns 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://kubernetes-sigs.github.io/external-dns/ 10 | -------------------------------------------------------------------------------- /cluster/flux/repositories/helm/grafana-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: grafana 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://grafana.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /cluster/flux/repositories/helm/hajimari-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: hajimari 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://hajimari.io 10 | -------------------------------------------------------------------------------- /cluster/flux/repositories/helm/ingress-nginx-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: ingress-nginx 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://kubernetes.github.io/ingress-nginx 10 | -------------------------------------------------------------------------------- /cluster/flux/repositories/helm/jetstack-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: jetstack 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://charts.jetstack.io/ 10 | -------------------------------------------------------------------------------- /cluster/flux/repositories/helm/k8s-gateway-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: k8s-gateway 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://ori-edge.github.io/k8s_gateway/ 10 | -------------------------------------------------------------------------------- /cluster/flux/repositories/helm/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - bitnami-charts.yaml 6 | - bjw-s-charts.yaml 7 | - cilium-charts.yaml 8 | - external-dns-charts.yaml 9 | - grafana-charts.yaml 10 | - hajimari-charts.yaml 11 | - ingress-nginx-charts.yaml 12 | - jetstack-charts.yaml 13 | - k8s-gateway-charts.yaml 14 | - kyverno-charts.yaml 15 | - metallb-charts.yaml 16 | - metrics-server-charts.yaml 17 | - minio-charts.yaml 18 | - mongodb-charts.yaml 19 | - openebs-zfs-localpv-charts.yaml 20 | - prometheus-community-charts.yaml 21 | - stakater-charts.yaml 22 | - vector-charts.yaml 23 | - vmware-tanzu-charts.yaml 24 | -------------------------------------------------------------------------------- /cluster/flux/repositories/helm/kyverno-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: kyverno 6 | namespace: flux-system 7 | spec: 8 | type: oci 9 | interval: 1h 10 | url: oci://ghcr.io/kyverno/charts 11 | -------------------------------------------------------------------------------- /cluster/flux/repositories/helm/metallb-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: metallb 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://metallb.github.io/metallb 10 | -------------------------------------------------------------------------------- /cluster/flux/repositories/helm/metrics-server-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: metrics-server 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://kubernetes-sigs.github.io/metrics-server 10 | -------------------------------------------------------------------------------- /cluster/flux/repositories/helm/minio-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: minio 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://charts.min.io/ 10 | -------------------------------------------------------------------------------- /cluster/flux/repositories/helm/mongodb-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: mongodb 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://mongodb.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /cluster/flux/repositories/helm/openebs-zfs-localpv-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: openebs-zfs-localpv 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://openebs.github.io/zfs-localpv 10 | -------------------------------------------------------------------------------- /cluster/flux/repositories/helm/prometheus-community-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: prometheus-community 6 | namespace: flux-system 7 | spec: 8 | type: oci 9 | interval: 1h 10 | url: oci://ghcr.io/prometheus-community/charts 11 | -------------------------------------------------------------------------------- /cluster/flux/repositories/helm/stakater-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: stakater 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://stakater.github.io/stakater-charts 10 | -------------------------------------------------------------------------------- /cluster/flux/repositories/helm/vector-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: vector 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://helm.vector.dev 10 | -------------------------------------------------------------------------------- /cluster/flux/repositories/helm/vmware-tanzu-charts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: source.toolkit.fluxcd.io/v1beta2 3 | kind: HelmRepository 4 | metadata: 5 | name: vmware-tanzu 6 | namespace: flux-system 7 | spec: 8 | interval: 1h 9 | url: https://vmware-tanzu.github.io/helm-charts 10 | -------------------------------------------------------------------------------- /cluster/flux/repositories/image/discord-ghcr.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: image.toolkit.fluxcd.io/v1beta2 3 | kind: ImageRepository 4 | metadata: 5 | name: discord-ghcr-io 6 | namespace: flux-system 7 | spec: 8 | image: ghcr.io/jr0dd/discord-bot-react-frontend 9 | interval: 1h 10 | secretRef: 11 | name: ghcr-io-creds 12 | --- 13 | apiVersion: image.toolkit.fluxcd.io/v1beta1 14 | kind: ImagePolicy 15 | metadata: 16 | name: discord-ghcr-io 17 | namespace: flux-system 18 | spec: 19 | imageRepositoryRef: 20 | name: discord-ghcr-io 21 | policy: 22 | semver: 23 | range: ">=0.1.0" 24 | --- 25 | apiVersion: image.toolkit.fluxcd.io/v1beta1 26 | kind: ImageUpdateAutomation 27 | metadata: 28 | name: discord-ghcr-io 29 | namespace: flux-system 30 | spec: 31 | sourceRef: 32 | kind: GitRepository 33 | name: home-ops 34 | interval: 1h 35 | update: 36 | path: ./cluster/apps/discord 37 | strategy: Setters 38 | git: 39 | checkout: 40 | ref: 41 | branch: main 42 | push: 43 | branch: main 44 | commit: 45 | author: 46 | name: wyoa-bot 47 | email: wyoa-bot[bot]@users.noreply.github.com 48 | messageTemplate: | 49 | fix(container): update {{range .Updated.Images}}{{println .}}{{end}} 50 | -------------------------------------------------------------------------------- /cluster/flux/repositories/image/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - secret.sops.yaml 6 | - discord-ghcr.yaml 7 | - wyoa-ghcr.yaml 8 | -------------------------------------------------------------------------------- /cluster/flux/repositories/image/secret.sops.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: ghcr-io-creds 5 | namespace: flux-system 6 | type: kubernetes.io/dockerconfigjson 7 | data: 8 | .dockerconfigjson: ENC[AES256_GCM,data:52ono20ROg/cJhmYLWvtX0AJK4JqLvSjs6HqVM9Yz35C3K59uFauYTy2JlEvu02G7cU5Df7bTouGmN2T4dQW5TO1BG1re4y0jSJ4eVtBDj4kzx45vmC1jPvWNBPRS5JfF8guF/n05LelwDb7PXnQXEcD05ZP1ic0KEY/A+xJ+1Ue71bqmXCv54zbt/qVL3i/B18WN7b55ixYiTMgfk2BtLvEzyPfXRgjDnUne31erpdNKuh7xQpg8Pckpb0UQYr8kTC5rySwQEDhOeZIZzC4qKH0S8qsKWMRMn6sORlvhZJRD/58HB+GaVWeVIGW8vND/zLqp07Y2w3RVXboxoUxh1Cjq9mZ9F9B,iv:DLtXQmF4Ff611IgbWLB4xnH3w2ahZUlTLxWs13WIAaQ=,tag:GZMuVoVe1sS0Ihr2niXncQ==,type:str] 9 | sops: 10 | kms: [] 11 | gcp_kms: [] 12 | azure_kv: [] 13 | hc_vault: [] 14 | age: 15 | - recipient: age1vfgg4n4snp0ktjm83gsv3nptdw39mw8q7fw7dzghgfpllc040vgsd6yypm 16 | enc: | 17 | -----BEGIN AGE ENCRYPTED FILE----- 18 | YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSBnd3JxN3gxU3FQWGc5bnRp 19 | NkRncjYrT2RXVmt3d3AvMzhlNmF0T3hmaEZNCnVDN0h3TzNILy94U1A5YzZGWnFM 20 | cUZVb0xiNDRJTklaeHl3N0xuaG16TG8KLS0tIEppUlc3allkWWdiNEZwcnhYSG42 21 | MklEY2tGWUJFRXo2VEg1SGFzMzVWdm8KVVVd7cXKFh11PFMfEYvsUfz191vHWSiw 22 | kyxj7fNDYUwWCS4kXdInBZ4mD2LPhMdNT0syRQY10zvaokwcZVxtxg== 23 | -----END AGE ENCRYPTED FILE----- 24 | lastmodified: "2022-06-18T21:14:35Z" 25 | mac: ENC[AES256_GCM,data:WisaU6sLitp4gAiJrT0jRyPM1j3cf+fMJVQMT2uLf1sUZi5gAMtktadLbzzKp7FPYYx1cCl5AYh2hCRG4hyqFaTC9lg9Ittf6Kb+YUnO31k75MfhTFV4f+/Za15o8cqp9pu7CitsYmTIBvA4JiwFmOAoIFy5cDiLqB1sUgUSTBY=,iv:By9Xmfrux9mgHSideGajluCb/l7JsLGeFHb2ahnTTYo=,tag:knPDy+qojG+fodDkmaf5sQ==,type:str] 26 | pgp: [] 27 | encrypted_regex: ^(data|stringData)$ 28 | version: 3.7.3 29 | -------------------------------------------------------------------------------- /cluster/flux/repositories/image/wyoa-ghcr.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: image.toolkit.fluxcd.io/v1beta2 3 | kind: ImageRepository 4 | metadata: 5 | name: wyoa-ghcr-io 6 | namespace: flux-system 7 | spec: 8 | image: ghcr.io/jr0dd/wyoa-bot 9 | interval: 30m 10 | secretRef: 11 | name: ghcr-io-creds 12 | --- 13 | apiVersion: image.toolkit.fluxcd.io/v1beta1 14 | kind: ImagePolicy 15 | metadata: 16 | name: wyoa-ghcr-io 17 | namespace: flux-system 18 | spec: 19 | imageRepositoryRef: 20 | name: wyoa-ghcr-io 21 | policy: 22 | semver: 23 | range: ">=0.6.0" 24 | --- 25 | apiVersion: image.toolkit.fluxcd.io/v1beta1 26 | kind: ImageUpdateAutomation 27 | metadata: 28 | name: wyoa-ghcr-io 29 | namespace: flux-system 30 | spec: 31 | sourceRef: 32 | kind: GitRepository 33 | name: home-ops 34 | interval: 1m 35 | update: 36 | path: ./cluster/apps/discord 37 | strategy: Setters 38 | git: 39 | checkout: 40 | ref: 41 | branch: main 42 | push: 43 | branch: main 44 | commit: 45 | author: 46 | name: wyoa-bot 47 | email: wyoa-bot[bot]@users.noreply.github.com 48 | messageTemplate: | 49 | fix(container): update {{range .Updated.Images}}{{println .}}{{end}} 50 | -------------------------------------------------------------------------------- /cluster/flux/repositories/kustomization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: kustomize.config.k8s.io/v1beta1 3 | kind: Kustomization 4 | resources: 5 | - git 6 | - helm 7 | - image 8 | -------------------------------------------------------------------------------- /cluster/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | resources: 4 | - github.com/fluxcd/flux2//manifests/install?ref=v2.2.2 5 | - flux/repositories 6 | - bootstrap.yaml 7 | -------------------------------------------------------------------------------- /hack/blocky.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | ACTION="${1}" 4 | DURATION="${2}" 5 | 6 | NAMESPACE="networking" 7 | BLOCKY_PODS=$(kubectl get pods -n "${NAMESPACE}" -o=jsonpath="{range .items[*]}{.metadata.name} " -l app.kubernetes.io/name=blocky) 8 | 9 | for pod in $BLOCKY_PODS; do 10 | case "${ACTION}" in 11 | status) 12 | kubectl -n "${NAMESPACE}" exec -it "${pod}" -- /app/blocky blocking status 13 | ;; 14 | enable) 15 | kubectl -n "${NAMESPACE}" exec -it "${pod}" -- /app/blocky blocking enable 16 | ;; 17 | disable) 18 | if [ -z "${DURATION}" ]; then 19 | kubectl -n "${NAMESPACE}" exec -it "${pod}" -- /app/blocky blocking disable 20 | else 21 | kubectl -n "${NAMESPACE}" exec -it "${pod}" -- /app/blocky blocking disable --duration "${DURATION}" 22 | fi 23 | ;; 24 | esac 25 | done 26 | -------------------------------------------------------------------------------- /hack/delete-stuck-ns.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function delete_namespace () { 4 | echo "Deleting namespace '$1'" 5 | kubectl get namespace "$1" -o json > tmp.json 6 | sed -i 's/"kubernetes"//g' tmp.json 7 | kubectl replace --raw "/api/v1/namespaces/$1/finalize" -f ./tmp.json 8 | rm ./tmp.json 9 | } 10 | 11 | TERMINATING_NS=$(kubectl get ns | awk '$2=="Terminating" {print $1}') 12 | 13 | for ns in $TERMINATING_NS 14 | do 15 | delete_namespace "$ns" 16 | done 17 | -------------------------------------------------------------------------------- /hack/delete-stuck-snapshots.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | volumesnapshotcontents=$(kubectl get --no-headers volumesnapshotcontents | awk '{print $1}') 4 | for volumesnapshotcontent in $volumesnapshotcontents 5 | do 6 | kubectl patch volumesnapshotcontents "${volumesnapshotcontent}" -p '{"metadata":{"finalizers":null}}' --type=merge 7 | done 8 | 9 | volumesnapshots=$(kubectl get --no-headers volumesnapshots -A | awk '{print $1","$2}') 10 | for item in $volumesnapshots 11 | do 12 | namespace="$(echo "${item}" | awk -F',' '{print $1}')" 13 | volumesnapshot="$(echo "${item}" | awk -F',' '{print $2}')" 14 | kubectl patch volumesnapshots "${volumesnapshot}" -n "${namespace}" -p '{"metadata":{"finalizers":null}}' --type=merge 15 | done 16 | -------------------------------------------------------------------------------- /infrastructure/ansible/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | 3 | #--- General settings ---# 4 | nocows = True 5 | forks = 8 6 | module_name = command 7 | deprecation_warnings = True 8 | executable = /bin/bash 9 | stdout_callback = yaml 10 | 11 | #--- Files/Directory settings ---# 12 | log_path = ~/ansible.log 13 | inventory = ./inventory 14 | library = /usr/share/my_modules 15 | remote_tmp = /tmp/.ansible/tmp 16 | local_tmp = /tmp/.ansible/tmp 17 | roles_path = ./roles 18 | retry_files_enabled = False 19 | 20 | #--- Fact Caching settings ---# 21 | fact_caching = jsonfile 22 | fact_caching_connection = ~/.ansible/facts_cache 23 | fact_caching_timeout = 7200 24 | 25 | #--- SSH settings ---# 26 | remote_port = 22 27 | timeout = 60 28 | host_key_checking = False 29 | ssh_executable = /usr/bin/ssh 30 | private_key_file = ~/.ssh/id_ed25519 31 | 32 | force_valid_group_names = ignore 33 | 34 | #--- Speed ---# 35 | callback_enabled = true 36 | internal_poll_interval = 0.001 37 | 38 | #--- Plugin settings ---# 39 | vars_plugins_enabled = host_group_vars,community.sops.sops 40 | 41 | [inventory] 42 | unparsed_is_failed = true 43 | 44 | [privilege_escalation] 45 | become = True 46 | become_method = sudo 47 | become_user = root 48 | become_ask_pass = False 49 | 50 | [ssh_connection] 51 | scp_if_ssh = smart 52 | transfer_method = smart 53 | retries = 3 54 | timeout = 10 55 | ssh_args = -o ControlMaster=auto -o ControlPersist=30m -o Compression=yes -o ServerAliveInterval=15s 56 | pipelining = True 57 | control_path = %(directory)s/%%h-%%r 58 | -------------------------------------------------------------------------------- /infrastructure/ansible/inventory/group_vars/all/k3s.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # Below vars are for the xanmanning.k3s role 4 | # ...see https://github.com/PyratLabs/ansible-role-k3s#globalcluster-variables 5 | # 6 | 7 | # Use a specific version of k3s 8 | # renovate: datasource=github-releases depName=k3s-io/k3s 9 | k3s_release_version: "v1.29.0+k3s1" 10 | 11 | # Install using hard links rather than symbolic links. 12 | # ...if you are using the system-upgrade-controller you will need to use hard links 13 | # rather than symbolic links as the controller will not be able to follow symbolic links. 14 | k3s_install_hard_links: true 15 | 16 | # Escalate user privileges for all tasks. 17 | k3s_become: true 18 | 19 | # Enable debugging 20 | k3s_debug: false 21 | 22 | # Enabled embedded etcd 23 | k3s_etcd_datastore: true 24 | 25 | # Network CIDR to use for Pods 26 | k3s_cluster_cidr: 10.42.0.0/16 27 | 28 | # Network CIDR to use for service IPs 29 | k3s_service_cidr: 10.43.0.0/16 30 | 31 | # Enable for single or even number of masters 32 | k3s_use_unsupported_config: true 33 | 34 | # Control Plane registration address 35 | k3s_registration_address: "k8s.{{ domain }}" 36 | 37 | # A list of templates used for configuring the server. 38 | k3s_server_config_yaml_d_files: 39 | - "10-etcd-snapshots.yaml.j2" 40 | 41 | # /var/lib/rancher/k3s/server/manifests 42 | k3s_server_manifests_templates: 43 | - "cilium/cilium-installation.yaml.j2" 44 | -------------------------------------------------------------------------------- /infrastructure/ansible/inventory/group_vars/all/ubuntu.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Timezone for the servers 3 | timezone: "America/New_York" 4 | 5 | # Add cloudflare ntp server 6 | ntp_servers: 7 | primary: 8 | - "time.cloudflare.com" 9 | fallback: 10 | - "0.us.pool.ntp.org" 11 | - "1.us.pool.ntp.org" 12 | - "2.us.pool.ntp.org" 13 | 14 | # Additional ssh public keys to add to the nodes 15 | ssh_authorized_keys: 16 | # MacBook 17 | - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFJavnCHDj6nsfkpDdfKxQN6hlYnXfQmx6TEe4W8dErN j_r0dd@mb-pro.local" 18 | # iPhone 19 | - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfQEufDi4o/kO8EH7J7QB7ikM4CCsgsUJd9cwdOEXv8 j_r0dd@icloud.com" 20 | -------------------------------------------------------------------------------- /infrastructure/ansible/inventory/group_vars/storage-nodes/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jr0dd/home-ops/c11790ea98ba3a483d3d5575acbefb26e5f75e37/infrastructure/ansible/inventory/group_vars/storage-nodes/.gitkeep -------------------------------------------------------------------------------- /infrastructure/ansible/inventory/group_vars/worker-nodes/k3s.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # https://rancher.com/docs/k3s/latest/en/installation/install-options/agent-config/ 3 | # https://github.com/PyratLabs/ansible-role-k3s#agent-worker-configuration 4 | 5 | # Don't define the host as control plane nodes 6 | k3s_control_node: false 7 | 8 | # k3s settings for all worker nodes 9 | k3s_agent: 10 | node-ip: "{{ ansible_host }}" 11 | kubelet-arg: 12 | - "feature-gates=MixedProtocolLBService=true" 13 | - "node-status-update-frequency=4s" 14 | -------------------------------------------------------------------------------- /infrastructure/ansible/inventory/hosts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | all: 3 | children: 4 | master-nodes: 5 | hosts: 6 | k8s-0-nas: 7 | ansible_host: 10.10.0.10 8 | ansible_user: ubuntu 9 | worker-nodes: 10 | hosts: 11 | storage-nodes: 12 | hosts: 13 | k8s-0-nas: 14 | -------------------------------------------------------------------------------- /infrastructure/ansible/playbooks/k3s/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - master-nodes 4 | - worker-nodes 5 | - storage-nodes 6 | become: true 7 | gather_facts: true 8 | any_errors_fatal: true 9 | pre_tasks: 10 | - name: Pausing for 5 seconds... 11 | ansible.builtin.pause: 12 | seconds: 5 13 | roles: 14 | - k3s 15 | tasks: 16 | - name: k3s | install extra binaries 17 | ansible.builtin.shell: | 18 | # cilium 19 | curl -L --remote-name-all https://github.com/cilium/cilium-cli/releases/latest/download/cilium-linux-amd64.tar.gz{,.sha256sum} 20 | sha256sum --check cilium-linux-amd64.tar.gz.sha256sum 21 | tar xzvfC cilium-linux-amd64.tar.gz /usr/local/bin 22 | rm cilium-linux-amd64.tar.gz{,.sha256sum} 23 | # flux 24 | curl -s "https://fluxcd.io/install.sh" | bash 25 | # kustomize 26 | curl -s "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" | bash 27 | install -o root -g root -m 0755 kustomize /usr/local/bin/kustomize 28 | -------------------------------------------------------------------------------- /infrastructure/ansible/playbooks/k3s/nuke.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - master-nodes 4 | - worker-nodes 5 | - storage-nodes 6 | become: true 7 | gather_facts: true 8 | any_errors_fatal: true 9 | pre_tasks: 10 | - name: Pausing for 5 seconds... 11 | ansible.builtin.pause: 12 | seconds: 5 13 | roles: 14 | - xanmanning.k3s 15 | vars: 16 | k3s_state: uninstalled 17 | tasks: 18 | - name: k3s | gather list of cni files to delete 19 | ansible.builtin.find: 20 | paths: /etc/cni/net.d 21 | patterns: "*" 22 | register: directory_contents 23 | 24 | - name: k3s | delete cni files 25 | ansible.builtin.file: 26 | path: "{{ item.path }}" 27 | state: absent 28 | loop: "{{ directory_contents.files }}" 29 | 30 | - name: k3s | disable killall service 31 | ansible.builtin.systemd: 32 | name: k3s-killall 33 | daemon_reload: true 34 | enabled: false 35 | state: stopped 36 | 37 | - name: k3s | delete extra files 38 | ansible.builtin.file: 39 | path: '{{ item }}' 40 | state: absent 41 | with_items: 42 | - /usr/local/bin/cilium 43 | - /usr/local/bin/flux 44 | - /etc/systemd/system/k3s-killall.service 45 | - /usr/local/bin/kubectl 46 | - /usr/local/bin/kustomize 47 | -------------------------------------------------------------------------------- /infrastructure/ansible/playbooks/nas/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - storage-nodes 4 | become: true 5 | gather_facts: true 6 | any_errors_fatal: true 7 | pre_tasks: 8 | - name: Pausing for 5 seconds... 9 | ansible.builtin.pause: 10 | seconds: 5 11 | roles: 12 | - nas 13 | -------------------------------------------------------------------------------- /infrastructure/ansible/playbooks/ubuntu/prepare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - master-nodes 4 | - worker-nodes 5 | - storage-nodes 6 | become: true 7 | gather_facts: true 8 | any_errors_fatal: true 9 | pre_tasks: 10 | - name: Pausing for 5 seconds... 11 | ansible.builtin.pause: 12 | seconds: 5 13 | roles: 14 | - ubuntu 15 | -------------------------------------------------------------------------------- /infrastructure/ansible/playbooks/ubuntu/upgrade.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - master-nodes 4 | - worker-nodes 5 | - storage-nodes 6 | become: true 7 | gather_facts: true 8 | any_errors_fatal: true 9 | tasks: 10 | - name: ubuntu | upgrade 11 | ansible.builtin.apt: 12 | upgrade: full 13 | update_cache: true 14 | cache_valid_time: 3600 15 | autoclean: true 16 | autoremove: true 17 | register: apt_upgrade 18 | retries: 5 19 | until: apt_upgrade is success 20 | -------------------------------------------------------------------------------- /infrastructure/ansible/requirements.yml: -------------------------------------------------------------------------------- 1 | --- 2 | collections: 3 | - name: ansible.posix 4 | version: 1.5.4 5 | - name: community.docker 6 | version: 3.5.0 7 | - name: community.general 8 | version: 8.1.0 9 | - name: community.routeros 10 | version: 2.11.0 11 | - name: community.sops 12 | version: 1.6.7 13 | - name: kubernetes.core 14 | version: 3.0.0 15 | roles: 16 | - src: xanmanning.k3s 17 | version: v3.4.3 18 | - src: https://github.com/mrlesmithjr/ansible-zfs.git 19 | scm: git 20 | name: mrlesmithjr.zfs 21 | version: b80e84d6938eb7730faeffc7ff7d8aa06ad5bef7 22 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/k3s/tasks/checks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: checks | check if cluster is installed 3 | ansible.builtin.stat: 4 | path: "/etc/rancher/k3s/config.yaml" 5 | register: k3s_check_installed 6 | check_mode: false 7 | 8 | - name: checks | set manifest facts 9 | ansible.builtin.set_fact: 10 | k3s_server_manifests_templates: [] 11 | k3s_server_manifests_urls: [] 12 | when: k3s_check_installed.stat.exists 13 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/k3s/tasks/cleanup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: cleanup | remove deployed manifest templates 3 | ansible.builtin.file: 4 | path: "{{ k3s_server_manifests_dir }}/{{ item | basename | regex_replace('\\.j2$', '') }}" 5 | state: absent 6 | loop: "{{ k3s_server_manifests_templates }}" 7 | when: 8 | - k3s_server_manifests_templates 9 | - k3s_server_manifests_templates | length > 0 10 | 11 | - name: cleanup | remove deployed manifest urls 12 | ansible.builtin.file: 13 | path: "{{ k3s_server_manifests_dir }}/{{ item.filename }}" 14 | state: absent 15 | loop: "{{ k3s_server_manifests_urls }}" 16 | when: 17 | - k3s_server_manifests_urls 18 | - k3s_server_manifests_urls | length > 0 19 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/k3s/tasks/kubeconfig.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: kubeconfig | copy kubeconfig locally to /tmp 3 | run_once: true 4 | ansible.builtin.fetch: 5 | src: "/etc/rancher/k3s/k3s.yaml" 6 | dest: "/tmp/kubeconfig" 7 | flat: true 8 | when: 9 | - k3s_control_node is defined 10 | - k3s_control_node 11 | 12 | - name: kubeconfig | update kubeconfig with the right address 13 | delegate_to: localhost 14 | become: false 15 | run_once: true 16 | ansible.builtin.replace: 17 | path: "/tmp/kubeconfig" 18 | regexp: "https://127.0.0.1:6443" 19 | replace: "https://{{ k3s_registration_address }}:6443" 20 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/k3s/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: checks.yml 3 | tags: 4 | - checks 5 | 6 | - name: Install Kubernetes 7 | include_role: 8 | name: xanmanning.k3s 9 | public: true 10 | 11 | - include: network.yml 12 | tags: 13 | - network 14 | 15 | - include: cleanup.yml 16 | tags: 17 | - cleanup 18 | 19 | - include: kubeconfig.yml 20 | tags: 21 | - kubeconfig 22 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/k3s/tasks/network.yml: -------------------------------------------------------------------------------- 1 | - name: network | check for bridge-nf-call-iptables 2 | ansible.builtin.stat: 3 | path: /proc/sys/net/bridge/bridge-nf-call-iptables 4 | register: bridge_nf_call_iptables_result 5 | 6 | - name: network | set kubernetes network configuration 7 | ansible.builtin.blockinfile: 8 | path: /etc/sysctl.d/99-kubernetes-cri.conf 9 | mode: 0644 10 | create: true 11 | block: | 12 | net.ipv4.ip_forward = 1 13 | net.bridge.bridge-nf-call-iptables = 1 14 | net.ipv6.conf.all.forwarding = 1 15 | net.bridge.bridge-nf-call-ip6tables = 1 16 | when: 17 | - bridge_nf_call_iptables_result.stat.exists 18 | register: sysctl_network 19 | 20 | - name: network | check for rp_filter 21 | ansible.builtin.stat: 22 | path: /proc/sys/net/ipv4/conf/default/rp_filter 23 | register: rp_filter_result 24 | 25 | - name: network | disable rp_filter for cilium 26 | ansible.builtin.blockinfile: 27 | path: /etc/sysctl.d/90-override.conf 28 | mode: 0644 29 | create: true 30 | block: | 31 | net.ipv4.conf.default.rp_filter = 0 32 | net.ipv4.conf.lxc*.rp_filter = 0 33 | when: 34 | - rp_filter_result.stat.exists 35 | register: sysctl_network 36 | 37 | - name: network | restart systemd-sysctl 38 | ansible.builtin.systemd: 39 | name: systemd-sysctl 40 | state: restarted 41 | when: 42 | - sysctl_network.changed 43 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/k3s/templates/10-etcd-snapshots.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | etcd-s3: {{ k3s_etcd_s3 | bool | lower }} 3 | {% if k3s_etcd_s3 %} 4 | etcd-snapshot-schedule-cron: "{{ k3s_etcd_snapshot_schedule_cron }}" 5 | etcd-snapshot-retention: {{ k3s_etcd_snapshot_retention }} 6 | etcd-s3-endpoint: "{{ k3s_etcd_s3_endpoint }}" 7 | etcd-s3-bucket: "{{ k3s_etcd_s3_bucket }}" 8 | etcd-s3-access-key: "{{ k3s_etcd_s3_access_key }}" 9 | etcd-s3-secret-key: "{{ k3s_etcd_s3_secret_key }}" 10 | {% endif %} 11 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/k3s/templates/cilium/cilium-bgp-config.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: bgp-config 6 | namespace: kube-system 7 | data: 8 | config.yaml: | 9 | peers: 10 | - peer-address: {{ cilium_peer_ip }} 11 | peer-asn: {{ cilium_peer_asn }} 12 | my-asn: {{ cilium_asn }} 13 | address-pools: 14 | - name: "{{ cilium_pool_name | default('default') }}" 15 | protocol: bgp 16 | avoid-buggy-ips: true 17 | addresses: 18 | - "{{ cilium_cidr }}" 19 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/k3s/templates/cilium/values.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # basic helm values to generate ansible templates 3 | autoDirectNodeRoutes: true 4 | k8sServiceHost: "k8s.${SECRET_DOMAIN}" 5 | k8sServicePort: 6443 6 | nativeRoutingCIDR: 10.42.0.0/16 7 | rollOutCiliumPods: true 8 | tunnel: disabled 9 | kubeProxyReplacement: "strict" 10 | kubeProxyReplacementHealthzBindAddr: 0.0.0.0:10256 11 | containerRuntime: 12 | integration: containerd 13 | ipam: 14 | mode: "kubernetes" 15 | operator: 16 | rollOutPods: true 17 | replicas: 1 18 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/k3s/templates/home-dns/home-dns-rbac.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: home-dns 5 | namespace: kube-system 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1 8 | kind: ClusterRole 9 | metadata: 10 | name: home-dns 11 | rules: 12 | - apiGroups: 13 | - "" 14 | resources: 15 | - services 16 | - namespaces 17 | verbs: 18 | - list 19 | - watch 20 | - apiGroups: 21 | - extensions 22 | - networking.k8s.io 23 | resources: 24 | - ingresses 25 | verbs: 26 | - list 27 | - watch 28 | --- 29 | apiVersion: rbac.authorization.k8s.io/v1 30 | kind: ClusterRoleBinding 31 | metadata: 32 | name: home-dns 33 | roleRef: 34 | apiGroup: rbac.authorization.k8s.io 35 | kind: ClusterRole 36 | name: home-dns 37 | subjects: 38 | - kind: ServiceAccount 39 | name: home-dns 40 | namespace: kube-system 41 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/k3s/templates/kube-vip/kube-vip-pod.yaml.j2: -------------------------------------------------------------------------------- 1 | #jinja2: lstrip_blocks: "True" 2 | --- 3 | apiVersion: v1 4 | kind: Pod 5 | metadata: 6 | name: kube-vip 7 | namespace: kube-system 8 | spec: 9 | containers: 10 | - name: kube-vip 11 | image: ghcr.io/kube-vip/kube-vip:v0.6.4 12 | imagePullPolicy: IfNotPresent 13 | args: 14 | - manager 15 | env: 16 | - name: address 17 | value: "{{ kubevip_address }}" 18 | - name: vip_arp 19 | value: "false" 20 | - name: vip_interface 21 | value: "{{ kubevip_interface }}" 22 | - name: port 23 | value: "6443" 24 | - name: vip_cidr 25 | value: "32" 26 | - name: cp_enable 27 | value: "true" 28 | - name: cp_namespace 29 | value: "kube-system" 30 | - name: vip_loglevel 31 | value: "5" 32 | - name: bgp_enable 33 | value: "true" 34 | - name: bgp_as 35 | value: "{{ kubevip_asn }}" 36 | {% if kubevip_source_ip is defined %} 37 | - name: bgp_source_ip 38 | value: "{{ kubevip_source_ip }}" 39 | {% endif %} 40 | - name: bgp_routerinterface 41 | value: "{{ kubevip_interface }}" 42 | - name: bgp_peeraddress 43 | value: "{{ kubevip_peer_ip }}" 44 | - name: bgp_peeras 45 | value: "{{ kubevip_peer_asn }}" 46 | securityContext: 47 | capabilities: 48 | add: 49 | - NET_ADMIN 50 | - NET_RAW 51 | volumeMounts: 52 | - mountPath: /etc/kubernetes/admin.conf 53 | name: kubeconfig 54 | hostAliases: 55 | - hostnames: 56 | - kubernetes 57 | ip: 127.0.0.1 58 | hostNetwork: true 59 | volumes: 60 | - hostPath: 61 | path: /etc/rancher/k3s/k3s.yaml 62 | name: kubeconfig 63 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/k3s/templates/kube-vip/kube-vip-rbac.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: kube-vip 6 | namespace: kube-system 7 | --- 8 | apiVersion: rbac.authorization.k8s.io/v1 9 | kind: ClusterRole 10 | metadata: 11 | annotations: 12 | rbac.authorization.kubernetes.io/autoupdate: "true" 13 | name: system:kube-vip-role 14 | rules: 15 | - apiGroups: [""] 16 | resources: ["services", "services/status", "nodes"] 17 | verbs: ["list", "get", "watch", "update"] 18 | - apiGroups: ["coordination.k8s.io"] 19 | resources: ["leases"] 20 | verbs: ["list", "get", "watch", "update", "create"] 21 | --- 22 | kind: ClusterRoleBinding 23 | apiVersion: rbac.authorization.k8s.io/v1 24 | metadata: 25 | name: system:kube-vip-binding 26 | roleRef: 27 | apiGroup: rbac.authorization.k8s.io 28 | kind: ClusterRole 29 | name: system:kube-vip-role 30 | subjects: 31 | - kind: ServiceAccount 32 | name: kube-vip 33 | namespace: kube-system 34 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/nas/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | zfs_sharing: 3 | samba_enabled: false 4 | timemachine_enabled: false 5 | nfs_enabled: false 6 | 7 | # monitoring options not in mrlessmith.zfs role or more fine-tuned 8 | zfs_monitoring: 9 | # enable email alerts 10 | email_alerts: false 11 | # enable smartd monitoring 12 | smartd_alerts: false 13 | # enable zfs-zed monitoring. needs email configured to work. 14 | zed_alerts: false 15 | # Use systemd timers instead of cron for scrubs 16 | systemd_scrub_timer: false 17 | 18 | # systemd timer settings 19 | scrub_pools_timer: 20 | # Run every month on the 1st and 15th at 1am 21 | on_calendar: "*-*-1,15 01:00:00" 22 | accuracy: 1h 23 | 24 | nfs_exports: [] 25 | samba_global: [] 26 | samba_shares: [] 27 | samba_users: [] 28 | 29 | apt_install_packages: 30 | - acl 31 | - dmraid 32 | - git 33 | - gnupg2 34 | - ipmitool 35 | - lm-sensors 36 | - moreutils 37 | - rclone 38 | - tmux 39 | - tree 40 | - uidmap 41 | - zsh 42 | 43 | apt_remove_packages: 44 | - apparmor 45 | - apport 46 | - byobu 47 | - friendly-recovery 48 | - landscape-common 49 | - lxd-agent-loader 50 | - ntfs-3g 51 | - plymouth 52 | - plymouth-theme-ubuntu-text 53 | - popularity-contest 54 | - snapd 55 | - sosreport 56 | - ubuntu-advantage-tools 57 | - ufw 58 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/nas/tasks/avahi.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: avahi | install avahi service 3 | ansible.builtin.apt: 4 | update_cache: yes # yamllint disable-line rule:truthy 5 | pkg: 6 | - avahi-daemon 7 | state: present 8 | 9 | - name: avahi | restart avahi service 10 | ansible.builtin.systemd: 11 | name: avahi-daemon 12 | daemon_reload: true 13 | enabled: true 14 | state: restarted 15 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/nas/tasks/disks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: disks | install smartd 3 | ansible.builtin.apt: 4 | update_cache: yes # yamllint disable-line rule:truthy 5 | pkg: 6 | - smartmontools 7 | state: present 8 | 9 | - name: disks | configure smartd 10 | ansible.builtin.template: 11 | src: smartd.conf.j2 12 | dest: /etc/smartd.conf 13 | mode: 0644 14 | 15 | - name: disks | restart smartd service 16 | ansible.builtin.systemd: 17 | name: smartd 18 | daemon_reload: true 19 | enabled: true 20 | state: restarted 21 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/nas/tasks/email.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: email | install mail packages 3 | ansible.builtin.apt: 4 | update_cache: yes # yamllint disable-line rule:truthy 5 | pkg: 6 | - mailutils 7 | - msmtp 8 | - msmtp-mta 9 | - bsd-mailx 10 | state: present 11 | 12 | - name: email | configure aliases 13 | ansible.builtin.template: 14 | src: aliases.j2 15 | dest: /etc/aliases 16 | mode: 0644 17 | 18 | - name: email | configure mail 19 | ansible.builtin.template: 20 | src: mail.rc.j2 21 | dest: /etc/mail.rc 22 | mode: 0644 23 | 24 | - name: email | configure msmtpd 25 | ansible.builtin.template: 26 | src: msmtprc.j2 27 | dest: /etc/msmtprc 28 | mode: 0644 29 | 30 | - name: email | restart msmtpd service 31 | ansible.builtin.systemd: 32 | name: msmtpd 33 | daemon_reload: true 34 | enabled: true 35 | state: restarted 36 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/nas/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: disks.yml 3 | when: zfs_monitoring.smartd_alerts 4 | tags: 5 | - disks 6 | 7 | - include: packages.yml 8 | tags: 9 | - packages 10 | 11 | - include: email.yml 12 | when: zfs_monitoring.email_alerts 13 | tags: 14 | - email 15 | 16 | - name: nas | install zfs features 17 | include_role: 18 | name: mrlesmithjr.zfs 19 | public: true 20 | 21 | - include: scrub.yml 22 | when: zfs_monitoring.systemd_scrub_timer 23 | tags: 24 | - scrub 25 | 26 | - include: nfs.yml 27 | when: zfs_sharing.nfs_enabled 28 | tags: 29 | - nfs 30 | 31 | - include: samba.yml 32 | when: zfs_sharing.samba_enabled 33 | tags: 34 | - samba 35 | 36 | - include: avahi.yml 37 | when: 38 | - zfs_sharing.samba_enabled 39 | - zfs_sharing.timemachine_enabled 40 | tags: 41 | - avahi 42 | 43 | - include: zed.yml 44 | when: zfs_monitoring.zed_alerts 45 | tags: 46 | - zed 47 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/nas/tasks/nfs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: nfs | install nfs packages 3 | ansible.builtin.apt: 4 | update_cache: yes # yamllint disable-line rule:truthy 5 | pkg: 6 | - nfs-common 7 | - nfs-kernel-server 8 | state: present 9 | 10 | - name: nfs | configure nfs exports 11 | ansible.builtin.template: 12 | src: exports.j2 13 | dest: /etc/exports 14 | mode: 0644 15 | when: 16 | - nfs_exports 17 | - nfs_exports | length > 0 18 | 19 | - name: nfs | reload nfs exports 20 | ansible.builtin.command: 21 | cmd: "exportfs -ar" 22 | when: 23 | - nfs_exports 24 | - nfs_exports | length > 0 25 | 26 | - name: nfs | restart nfs service 27 | ansible.builtin.systemd: 28 | name: nfs-kernel-server 29 | daemon_reload: true 30 | enabled: true 31 | state: restarted 32 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/nas/tasks/packages.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: packages | upgrade all system packages 3 | ansible.builtin.apt: 4 | upgrade: full 5 | update_cache: true 6 | cache_valid_time: 3600 7 | autoclean: true 8 | autoremove: true 9 | register: apt_upgrade 10 | retries: 5 11 | until: apt_upgrade is success 12 | 13 | - name: packages | gather installed packages 14 | ansible.builtin.package_facts: 15 | manager: auto 16 | 17 | - name: packages | install common packages 18 | ansible.builtin.apt: 19 | name: "{{ apt_install_packages }}" 20 | install_recommends: false 21 | update_cache: true 22 | cache_valid_time: 3600 23 | autoclean: true 24 | autoremove: true 25 | register: apt_install_common 26 | retries: 5 27 | until: apt_install_common is success 28 | when: 29 | - apt_install_packages is defined 30 | - apt_install_packages is iterable 31 | - apt_install_packages | length > 0 32 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/nas/tasks/samba.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: samba | install samba packages 3 | ansible.builtin.apt: 4 | update_cache: yes # yamllint disable-line rule:truthy 5 | pkg: 6 | - samba 7 | - samba-vfs-modules 8 | state: present 9 | 10 | - name: samba | create users if absent 11 | ansible.builtin.shell: > 12 | set -o pipefail \ 13 | (pdbedit --user={{ item.name }} 2>&1 > /dev/null) \ 14 | || (echo {{ item.password }}; echo {{ item.password }}) \ 15 | | smbpasswd -s -a {{ item.name }} 16 | with_items: "{{ samba_users }}" 17 | no_log: true 18 | register: create_user_output 19 | changed_when: "'Added user' in create_user_output.stdout" 20 | when: 21 | - samba_users 22 | - samba_users | length > 0 23 | 24 | - name: samba | configure samba 25 | ansible.builtin.template: 26 | src: smb.conf.j2 27 | dest: /etc/samba/smb.conf 28 | validate: "testparm -s %s" 29 | mode: 0644 30 | 31 | - name: samba | restart samba service 32 | ansible.builtin.systemd: 33 | name: smbd 34 | daemon_reload: true 35 | enabled: true 36 | state: restarted 37 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/nas/tasks/scrub.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: scrub | copy systemd scrub service 3 | ansible.builtin.template: 4 | src: zfs-scrub@.service.j2 5 | dest: /etc/systemd/system/zfs-scrub@.service 6 | mode: 0644 7 | 8 | - name: scrub | copy systemd scrub timer 9 | ansible.builtin.template: 10 | src: zfs-scrub@.timer.j2 11 | dest: /etc/systemd/system/zfs-scrub@.timer 12 | mode: 0644 13 | 14 | - name: scrub | start systemd scrub timer 15 | ansible.builtin.systemd: 16 | name: "zfs-scrub@{{ item.name }}.timer" 17 | enabled: true 18 | state: started 19 | with_items: "{{ zfs_pools }}" 20 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/nas/tasks/zed.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: zed | install zed packages 3 | ansible.builtin.apt: 4 | update_cache: yes # yamllint disable-line rule:truthy 5 | pkg: 6 | - zfs-zed 7 | state: present 8 | 9 | - name: zed | configure zed service 10 | ansible.builtin.template: 11 | src: zed.rc.j2 12 | dest: /etc/zfs/zed.d/zed.rc 13 | mode: 0600 14 | 15 | - name: zed | restart zed service 16 | ansible.builtin.systemd: 17 | name: zfs-zed 18 | daemon_reload: true 19 | enabled: true 20 | state: restarted 21 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/nas/templates/aliases.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | root: {{ email }} 3 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/nas/templates/exports.j2: -------------------------------------------------------------------------------- 1 | # /etc/exports: the access control list for filesystems which may be exported 2 | # to NFS clients. See exports(5). 3 | # 4 | # Example for NFSv2 and NFSv3: 5 | # /srv/homes hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check) 6 | # 7 | # Example for NFSv4: 8 | # /srv/nfs4 gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check) 9 | # /srv/nfs4/homes gss/krb5i(rw,sync,no_subtree_check) 10 | # 11 | {% for export in nfs_exports %} 12 | {{ export }} 13 | {% endfor %} 14 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/nas/templates/mail.rc.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | set ask askcc append dot save crt 3 | ignore Received Message-Id Resent-Message-Id Status Mail-From Return-Path Via Delivered-To 4 | 5 | set mta=/usr/bin/msmtp 6 | alias root root<{{ email }}> 7 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/nas/templates/msmtprc.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | defaults 3 | auth on 4 | tls on 5 | tls_trust_file /etc/ssl/certs/ca-certificates.crt 6 | logfile /var/log/msmtp 7 | 8 | account {{ smtp_account_name }} 9 | host {{ smtp_address }} 10 | port {{ smtp_port | default('587') }} 11 | from {{ smtp_from }} 12 | user {{ smtp_user }} 13 | password {{ smtp_password }} 14 | 15 | account default: {{ smtp_account_name }} 16 | maildomain: {{ ansible_fqdn }} 17 | 18 | aliases /etc/aliases 19 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/nas/templates/smartd.conf.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | {% if smartd_config is defined and smartd_config | length %} 3 | {{ smartd_config }} 4 | {% else %} 5 | DEVICESCAN -a -o on -S on -n standby,q -s (L/../../6/03|S/../.././02) -W 4,35,40 -m root 6 | {% endif %} 7 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/nas/templates/smb.conf.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | #======================== Global Settings ========================# 3 | {% if samba_global is defined and samba_global | length %} 4 | {{ samba_global }} 5 | {% else %} 6 | [global] 7 | server string = Ubuntu SMB Server 8 | server min protocol = SMB3 9 | server multi channel support = yes 10 | server role = standalone server 11 | 12 | ## user settings 13 | unix password sync = yes 14 | passwd program = /usr/bin/passwd %u 15 | passwd chat = *Enter\snew\s*\spassword:* %n\n *Retype\snew\s*\spassword:* %n\n *password\supdated\ssuccessfully* . 16 | 17 | ## permissions 18 | browseable = yes 19 | map to guest = bad user 20 | access based share enum = yes 21 | create mode = 0664 22 | directory mode = 0775 23 | 24 | ## printers 25 | load printers = no 26 | printcap name = /dev/null 27 | disable spoolss = yes 28 | 29 | ## logging 30 | logging = systemd 31 | log level = 3 passdb:3 auth:3 vfs:3 32 | 33 | ## networking osx/linux specific 34 | bind interfaces only = yes 35 | dns proxy = no 36 | disable netbios = yes 37 | smb ports = 445 38 | name resolve order = host bcast 39 | 40 | ## vfs objects 41 | vfs objects = acl_xattr catia fruit streams_xattr io_uring 42 | 43 | ## osx settings 44 | spotlight = yes 45 | fruit:aapl = yes 46 | fruit:model = RackMac 47 | fruit:copyfile = yes 48 | fruit:nfs_aces = yes 49 | fruit:posix_rename = yes 50 | fruit:veto_appledouble = no 51 | fruit:wipe_intentionally_left_blank_rfork = yes 52 | fruit:delete_empty_adfiles = yes 53 | fruit:locking = netatalk 54 | fruit:encoding = native 55 | fruit:resource = file 56 | fruit:metadata = netatalk 57 | fruit:zero_file_id = yes 58 | durable handles = yes 59 | ea support = yes 60 | kernel oplocks = no 61 | kernel share modes = no 62 | posix locking = no 63 | {% endif %} 64 | #======================= Share Definitions =======================# 65 | {% if samba_shares is defined and samba_shares | length %} 66 | {{ samba_shares }} 67 | {% else %} 68 | # no shares defined 69 | {% endif %} 70 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/nas/templates/zed.rc.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | ZED_DEBUG_LOG="/var/log/zed.debug.log" 3 | ZED_EMAIL_ADDR="{{ email }}" 4 | ZED_EMAIL_OPTS="-s '@SUBJECT@' @ADDRESS@ -r {{ smtp_from }}" 5 | ZED_NOTIFY_VERBOSE=1 6 | ZED_NOTIFY_DATA=1 7 | ZED_USE_ENCLOSURE_LEDS=1 8 | ZED_SCRUB_AFTER_RESILVER=0 9 | ZED_SYSLOG_SUBCLASS_EXCLUDE="history_event" 10 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/nas/templates/zfs-scrub@.service.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | [Unit] 3 | Description=zpool scrub on %i 4 | 5 | [Service] 6 | Nice=19 7 | IOSchedulingClass=idle 8 | KillSignal=SIGINT 9 | ExecStart=/usr/sbin/zpool scrub %i 10 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/nas/templates/zfs-scrub@.timer.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | [Unit] 3 | Description=zpool scrub on %i 4 | 5 | [Timer] 6 | OnCalendar={{ scrub_pools_timer.on_calendar | default('*-*-1,15 01:00:00') }} 7 | AccuracySec={{ scrub_pools_timer.accuracy | default('1h') }} 8 | Persistent=true 9 | 10 | [Install] 11 | WantedBy=multi-user.target 12 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/ubuntu/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | apt_install_packages: 3 | - apt-transport-https 4 | - arptables 5 | - ca-certificates 6 | - curl 7 | - ebtables 8 | - gdisk 9 | - hdparm 10 | - htop 11 | - iputils-ping 12 | - ipvsadm 13 | - lvm2 14 | - net-tools 15 | - nfs-common 16 | - ntpdate 17 | - nvme-cli 18 | - open-iscsi 19 | - psmisc 20 | - smartmontools 21 | - socat 22 | - software-properties-common 23 | - unattended-upgrades 24 | - unzip 25 | - vim 26 | 27 | apt_remove_packages: 28 | - apparmor 29 | - apport 30 | - bcache-tools 31 | - btrfs-progs 32 | - byobu 33 | - cloud-init 34 | - cloud-guest-utils 35 | - cloud-initramfs-copymods 36 | - cloud-initramfs-dyn-netconf 37 | - friendly-recovery 38 | - fwupd 39 | - landscape-common 40 | - lxd-agent-loader 41 | - ntfs-3g 42 | - open-vm-tools 43 | - plymouth 44 | - plymouth-theme-ubuntu-text 45 | - popularity-contest 46 | - snapd 47 | - sosreport 48 | - tmux 49 | - ubuntu-advantage-tools 50 | - ufw 51 | - vim.tiny 52 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/ubuntu/tasks/disks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: disks | configure smartd 3 | ansible.builtin.template: 4 | src: smartd.conf.j2 5 | dest: /etc/smartd.conf 6 | mode: 0644 7 | 8 | - name: disks | restart smartd service 9 | ansible.builtin.systemd: 10 | name: smartd 11 | daemon_reload: true 12 | enabled: true 13 | state: restarted 14 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/ubuntu/tasks/filesystem.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Update user open file limit 3 | ansible.builtin.blockinfile: 4 | path: /etc/security/limits.conf 5 | mode: 0644 6 | create: true 7 | block: | 8 | * hard nofile 97816 9 | * soft nofile 97816 10 | session required pam_limits.so 11 | 12 | - name: filesystem | increase max_user_watches 13 | ansible.posix.sysctl: 14 | name: fs.inotify.max_user_watches 15 | value: "524288" 16 | state: present 17 | sysctl_file: /etc/sysctl.d/99-max_user_watches.conf 18 | 19 | - name: filesystem | increase max_user_instances 20 | ansible.posix.sysctl: 21 | name: fs.inotify.max_user_instances 22 | value: "512" 23 | state: present 24 | sysctl_file: /etc/sysctl.d/99-max_user_instances.conf 25 | 26 | - name: filesystem | disable swap at runtime 27 | ansible.builtin.command: swapoff -a 28 | when: ansible_swaptotal_mb > 0 29 | 30 | - name: filesystem | disable swap on boot 31 | ansible.posix.mount: 32 | name: "{{ item }}" 33 | fstype: swap 34 | state: absent 35 | loop: 36 | - swap 37 | - none 38 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/ubuntu/tasks/group.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: system | manage groups 3 | ansible.builtin.group: 4 | name: "{{ group.name | default(group) }}" 5 | gid: "{{ group.gid | default(omit) }}" 6 | local: "{{ group.local | default(omit) }}" 7 | non_unique: "{{ group.non_unique | default(omit) }}" 8 | state: "{{ group.state | default('present') }}" 9 | system: "{{ group.system | default(omit) }}" 10 | loop: "{{ managed_groups }}" 11 | loop_control: 12 | label: "{{ group.name | default(group) }}" 13 | loop_var: group 14 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/ubuntu/tasks/grub.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: grub | check for existence of grub 3 | ansible.builtin.stat: 4 | path: /etc/default/grub 5 | register: grub_result 6 | 7 | - name: grub | check if apparmor is disabled 8 | ansible.builtin.shell: grep "GRUB_CMDLINE_LINUX=" /etc/default/grub | grep -c "apparmor=0" 9 | register: apparmor_status 10 | ignore_errors: true 11 | 12 | - name: grub | check if mitigations is disabled 13 | ansible.builtin.shell: grep "GRUB_CMDLINE_LINUX=" /etc/default/grub | grep -c "mitigations=off" 14 | register: mitigations_status 15 | ignore_errors: true 16 | 17 | - name: grub | disable apparmor 18 | ansible.builtin.replace: 19 | path: /etc/default/grub 20 | regexp: '^(GRUB_CMDLINE_LINUX=(?:(?![" ]{{ option | regex_escape }}=).)*)(?:[" ]{{ option | regex_escape }}=\S+)?(.*")$' 21 | replace: '\1 {{ option }}={{ value }}\2' 22 | vars: 23 | option: apparmor 24 | value: 0 25 | when: 26 | - grub_result.stat.exists 27 | - apparmor_status.stdout == "0" 28 | 29 | - name: grub | disable mitigations 30 | ansible.builtin.replace: 31 | path: /etc/default/grub 32 | regexp: '^(GRUB_CMDLINE_LINUX=(?:(?![" ]{{ option | regex_escape }}=).)*)(?:[" ]{{ option | regex_escape }}=\S+)?(.*")$' 33 | replace: '\1 {{ option }}={{ value }}\2' 34 | vars: 35 | option: mitigations 36 | value: "off" 37 | when: 38 | - grub_result.stat.exists 39 | - mitigations_status.stdout == "0" 40 | 41 | - name: grub | run grub-mkconfig 42 | ansible.builtin.command: update-grub 43 | when: 44 | - grub_result.stat.exists 45 | - apparmor_status.stdout == "0" 46 | - mitigations_status.stdout == "0" 47 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/ubuntu/tasks/kernel.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: kernel | enable kernel modules runtime 3 | community.general.modprobe: 4 | name: "{{ item }}" 5 | state: present 6 | loop: 7 | - br_netfilter 8 | - overlay 9 | - rbd 10 | 11 | - name: kernel | enable kernel modules on boot 12 | ansible.builtin.copy: 13 | mode: 0644 14 | content: "{{ item }}" 15 | dest: "/etc/modules-load.d/{{ item }}.conf" 16 | loop: 17 | - br_netfilter 18 | - overlay 19 | - rbd 20 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/ubuntu/tasks/locale.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: locale | set timezone 3 | community.general.timezone: 4 | name: "{{ timezone | default('America/New_York') }}" 5 | 6 | - name: locale | copy timesyncd config 7 | ansible.builtin.copy: 8 | mode: 0644 9 | content: | 10 | [Time] 11 | NTP={{ ntp_servers.primary | default("") | join(" ") }} 12 | FallbackNTP={{ ntp_servers.fallback | join(" ") }} 13 | dest: /etc/systemd/timesyncd.conf 14 | when: 15 | - ntp_servers.primary is defined 16 | - ntp_servers.primary is iterable 17 | - ntp_servers.primary | length > 0 18 | - ntp_servers.fallback is defined 19 | - ntp_servers.fallback is iterable 20 | - ntp_servers.fallback | length > 0 21 | 22 | - name: locale | start systemd service 23 | ansible.builtin.systemd: 24 | name: systemd-timesyncd 25 | enabled: true 26 | state: started 27 | 28 | - name: locale | restart systemd service 29 | ansible.builtin.systemd: 30 | name: systemd-timesyncd 31 | daemon_reload: true 32 | enabled: true 33 | state: restarted 34 | 35 | - name: locale | run timedatectl status 36 | ansible.builtin.command: /usr/bin/timedatectl show 37 | changed_when: false 38 | check_mode: false 39 | register: timedatectl_result 40 | 41 | - name: locale | enable ntp 42 | ansible.builtin.command: /usr/bin/timedatectl set-ntp true 43 | when: 44 | - "'NTP=no' in timedatectl_result.stdout" 45 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/ubuntu/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include: locale.yml 3 | tags: 4 | - locale 5 | 6 | - include: packages.yml 7 | tags: 8 | - packages 9 | 10 | - include: kernel.yml 11 | tags: 12 | - kernel 13 | 14 | - include: network.yml 15 | tags: 16 | - network 17 | 18 | - include: filesystem.yml 19 | tags: 20 | - filesystem 21 | 22 | - include: disks.yml 23 | when: inventory_hostname not in groups['storage-nodes'] 24 | tags: 25 | - disks 26 | 27 | - include: power.yml 28 | tags: 29 | - power 30 | 31 | - include: grub.yml 32 | tags: 33 | - grub 34 | 35 | - include: unattended-upgrades.yml 36 | tags: 37 | - unattended-upgrades 38 | 39 | - include: group.yml 40 | tags: 41 | - group 42 | 43 | - include: user.yml 44 | tags: 45 | - user 46 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/ubuntu/tasks/network.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: network | hostname to inventory hostname 3 | ansible.builtin.hostname: 4 | name: "{{ inventory_hostname }}" 5 | when: 6 | - ansible_hostname != inventory_hostname 7 | 8 | - name: network | update /etc/hosts to include hostname 9 | ansible.builtin.blockinfile: 10 | path: /etc/hosts 11 | create: true 12 | mode: 0644 13 | block: | 14 | 127.0.0.1 localhost 15 | 127.0.1.1 {{ inventory_hostname }} 16 | 17 | # The following lines are desirable for IPv6 capable hosts 18 | ::1 ip6-localhost ip6-loopback 19 | fe00::0 ip6-localnet 20 | ff00::0 ip6-mcastprefix 21 | ff02::1 ip6-allnodes 22 | ff02::2 ip6-allrouters 23 | ff02::3 ip6-allhosts 24 | 25 | - name: network | check original netplan 26 | ansible.builtin.stat: 27 | path: /etc/netplan/00-installer-config.yaml 28 | register: original_netplan 29 | 30 | - name: network | remove original netplan 31 | ansible.builtin.file: 32 | path: /etc/netplan/00-installer-config.yaml 33 | state: absent 34 | 35 | - name: network | install new netplan 36 | ansible.builtin.template: 37 | src: netplan-config.yaml.j2 38 | dest: /etc/netplan/config.yaml 39 | mode: 0644 40 | lstrip_blocks: true 41 | validate: "netplan apply %s" 42 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/ubuntu/tasks/power.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # - name: power | disable single power button press shutdown 3 | # ansible.builtin.lineinfile: 4 | # path: /etc/systemd/logind.conf 5 | # regexp: "{{ item.setting }}" 6 | # line: "{{ item.setting }}={{ item.value }}" 7 | # loop: 8 | # - {setting: HandlePowerKey, value: ignore} 9 | 10 | - name: power | restart logind systemd service 11 | ansible.builtin.systemd: 12 | name: systemd-logind 13 | daemon_reload: true 14 | enabled: true 15 | state: restarted 16 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/ubuntu/tasks/unattended-upgrades.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: unattended-upgrades | copy 20auto-upgrades config 3 | ansible.builtin.blockinfile: 4 | path: /etc/apt/apt.conf.d/20auto-upgrades 5 | mode: 0644 6 | create: true 7 | block: | 8 | APT::Periodic::Update-Package-Lists "1"; 9 | APT::Periodic::Download-Upgradeable-Packages "1"; 10 | APT::Periodic::AutocleanInterval "7"; 11 | APT::Periodic::Unattended-Upgrade "1"; 12 | 13 | - name: unattended-upgrades | copy 50unattended-upgrades config 14 | ansible.builtin.blockinfile: 15 | path: /etc/apt/apt.conf.d/50unattended-upgrades 16 | mode: 0644 17 | create: true 18 | block: | 19 | Unattended-Upgrade::Automatic-Reboot "false"; 20 | Unattended-Upgrade::Remove-Unused-Dependencies "true"; 21 | Unattended-Upgrade::Allowed-Origins { 22 | "${distro_id} stable"; 23 | "${distro_id} ${distro_codename}-security"; 24 | "${distro_id} ${distro_codename}-updates"; 25 | }; 26 | 27 | - name: unattended-upgrades | restart unattended-upgrades service 28 | ansible.builtin.systemd: 29 | name: unattended-upgrades 30 | daemon_reload: true 31 | enabled: true 32 | state: restarted 33 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/ubuntu/templates/netplan-config.yaml.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | --- 3 | network: 4 | version: 2 5 | renderer: networkd 6 | ethernets: 7 | enp0s0f0: 8 | addresses: 9 | - {{ ansible_default_ipv4.address }}/24 10 | routes: 11 | - to: default 12 | via: {{ ansible_default_ipv4.gateway }} 13 | nameservers: 14 | addresses: 15 | - {{ ansible_default_ipv4.gateway }} 16 | search: [] 17 | -------------------------------------------------------------------------------- /infrastructure/ansible/roles/ubuntu/templates/smartd.conf.j2: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | {% if smartd_config is defined %} 3 | {{ smartd_config }} 4 | {% else %} 5 | DEVICESCAN -a -o on -S on -n standby,q -s (L/../../6/03|S/../.././02) -W 4,35,40 -m root 6 | {% endif %} 7 | -------------------------------------------------------------------------------- /infrastructure/mikrotik/scripts/blocky.rsc: -------------------------------------------------------------------------------- 1 | :local blocky 10.10.2.53 2 | :local fallbackDns 1.1.1.1 3 | :local currentDns [/ip dns get servers] 4 | :local status 5 | 6 | :do { 7 | :set status [/tool fetch url="http://$blocky:4000/api/blocking/status" as-value output=user] 8 | :if ($currentDns != $blocky) do={ 9 | /ip dns set servers=$blocky 10 | /ip dns cache flush 11 | :log info "Blocky is up, ad-blocking is enabled" 12 | } 13 | } on-error={ 14 | :if ($currentDns != $fallbackDns) do={ 15 | /ip dns set servers=$fallbackDns 16 | /ip dns cache flush 17 | :log error "Blocky is down, switched to Cloudflare DNS" 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /infrastructure/terraform/cloudflare/firewall_rules.tf: -------------------------------------------------------------------------------- 1 | ## GeoIP blocking 2 | 3 | resource "cloudflare_filter" "countries" { 4 | zone_id = lookup(data.cloudflare_zones.domain.zones[0], "id") 5 | description = "Expression to block all countries except US, CA and AU" 6 | expression = "(ip.geoip.country ne \"US\" and ip.geoip.country ne \"CA\" and ip.geoip.country ne \"AU\")" 7 | } 8 | 9 | resource "cloudflare_firewall_rule" "countries" { 10 | zone_id = lookup(data.cloudflare_zones.domain.zones[0], "id") 11 | description = "Firewall rule to block all countries except US, CA and AU" 12 | filter_id = cloudflare_filter.countries.id 13 | action = "block" 14 | } 15 | -------------------------------------------------------------------------------- /infrastructure/terraform/cloudflare/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | 3 | required_providers { 4 | cloudflare = { 5 | source = "cloudflare/cloudflare" 6 | version = "4.20.0" 7 | } 8 | http = { 9 | source = "hashicorp/http" 10 | version = "3.4.0" 11 | } 12 | sops = { 13 | source = "carlpett/sops" 14 | version = "1.0.0" 15 | } 16 | } 17 | } 18 | 19 | data "sops_file" "cloudflare_secrets" { 20 | source_file = "secret.sops.yaml" 21 | } 22 | 23 | provider "cloudflare" { 24 | email = data.sops_file.cloudflare_secrets.data["cloudflare_email"] 25 | api_key = data.sops_file.cloudflare_secrets.data["cloudflare_apikey"] 26 | } 27 | 28 | data "cloudflare_zones" "domain" { 29 | filter { 30 | name = data.sops_file.cloudflare_secrets.data["cloudflare_domain"] 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /infrastructure/terraform/cloudflare/page_rules.tf: -------------------------------------------------------------------------------- 1 | resource "cloudflare_page_rule" "plex_bypass_cache" { 2 | zone_id = lookup(data.cloudflare_zones.domain.zones[0], "id") 3 | target = "test.${data.sops_file.cloudflare_secrets.data["cloudflare_domain"]}/*" 4 | status = "active" 5 | 6 | actions { 7 | cache_level = "bypass" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /infrastructure/terraform/cloudflare/secret.sops.yaml: -------------------------------------------------------------------------------- 1 | kind: Secret 2 | cloudflare_email: ENC[AES256_GCM,data:CXECLcLPtc8IrkbOwb13dos=,iv:BCXunvEMkPR7B7PIN+o5KG36PZ3FUCVpBMlEZNTogz4=,tag:deQac4Zvy1Yni82OEkuCPA==,type:str] 3 | cloudflare_apikey: ENC[AES256_GCM,data:+Ot35J7TCdSHsZWKDquLAtn2RBP2/JIJhVpkBM3VuLjgCPAlUw==,iv:rvuCIamcnDlgGUIqVKchsbhyk87cYgCDiVLYtTTMvas=,tag:xA4lnB7rH4N1psMARB7chw==,type:str] 4 | cloudflare_domain: ENC[AES256_GCM,data:gzHKhsbwXw==,iv:H2E4VtZuqCBr4O0S6s9BvlMOI2nFYQiT7R6hMEYQA1k=,tag:+95r4+ZzBk0yVcmHBLMSLA==,type:str] 5 | mailgun_cert: ENC[AES256_GCM,data:0z3foNwXFfRgsySHGdkGJteck3R/lQU5jadjmBrTJPq9DMScBwpwXbZDb2tsSK9qXOYZnzyMbAamZjt6HrXEQWcXPXIRRI0t1on3EPbBDmPowTgrqMOw1vTWiLOPoIapbL6KUXyJS5pwHVdcfjk7R+jdm8YVqvKzsyAoVnUXTYZzum033lA/PFqkW2t1fpYtx9aaRoWgoDeSQsQptN1+Pj5Ginocnz+i4SY7jDju0hFRORn5f+3iZ5orSX7rBRMqoyMexZuPjk1ZpAAY/lpp+FVhVeEqDZsBuZS86AtoZHHJ,iv:3ghWl/NBUy7xGESuYBinwi/WnL8TJcKfkjDvMaOUt0M=,tag:FqRlrf8Wl78N9z1HEtSw+w==,type:str] 6 | sops: 7 | kms: [] 8 | gcp_kms: [] 9 | azure_kv: [] 10 | hc_vault: [] 11 | age: 12 | - recipient: age1vfgg4n4snp0ktjm83gsv3nptdw39mw8q7fw7dzghgfpllc040vgsd6yypm 13 | enc: | 14 | -----BEGIN AGE ENCRYPTED FILE----- 15 | YWdlLWVuY3J5cHRpb24ub3JnL3YxCi0+IFgyNTUxOSAxVEdIUnMrclc4R2toUGoz 16 | Q3Y4NEJuZ29Da28wMFdVYXMxMk4yUU5nQkN3CmZnaURQd2ZBWE1KSmJjdklJYXhL 17 | bi9JMndpTUQ4Q1cwcVVhMWwwMGYxUzQKLS0tIHl1TmVlOEpkNUl2aVp6V1ltd1F1 18 | aE4rZW0zRHlmWHhMc3Zoa3FxNjVOckUKJWd7Ggn8MoWH3/Xeb48TfRlaCozQINOp 19 | 29Bzx9hH1W08B7/l8JGDUULAi0SyDc4+4f6Z/92/G4gOnVHCl0a50w== 20 | -----END AGE ENCRYPTED FILE----- 21 | lastmodified: "2022-01-17T17:50:17Z" 22 | mac: ENC[AES256_GCM,data:OyODxjGVy8T3Ncryoc2S0po6VRUN/BYtst3vUjhKLM42bNsN8YM2Zg0O0+qZ9Zs0J/IXpjofLCRM4WstVABFY9NjXV1QJZKH2gLFNMd+ato89qXROqbZk/cvZQMPwy55CT+DLsVF75T0wezA1qIIoOwz+Lf8sXc3o/8B0m/mVQ8=,iv:DLDD+dch5iaiWsdXxS/jm4H3j72ktoUdSMBpDK14FWc=,tag:GOTUDs46+w/ym/a8zWd+aw==,type:str] 23 | pgp: [] 24 | unencrypted_regex: ^(kind)$ 25 | version: 3.7.1 26 | -------------------------------------------------------------------------------- /infrastructure/terraform/cloudflare/zone_settings.tf: -------------------------------------------------------------------------------- 1 | resource "cloudflare_zone_settings_override" "cloudflare_settings" { 2 | zone_id = lookup(data.cloudflare_zones.domain.zones[0], "id") 3 | settings { 4 | # /ssl-tls 5 | ssl = "strict" 6 | # /ssl-tls/edge-certificates 7 | always_use_https = "on" 8 | min_tls_version = "1.0" 9 | opportunistic_encryption = "on" 10 | tls_1_3 = "zrt" 11 | automatic_https_rewrites = "on" 12 | universal_ssl = "on" 13 | # /firewall/settings 14 | browser_check = "on" 15 | challenge_ttl = 1800 16 | privacy_pass = "on" 17 | security_level = "medium" 18 | # /speed/optimization 19 | brotli = "on" 20 | minify { 21 | css = "on" 22 | js = "on" 23 | html = "on" 24 | } 25 | rocket_loader = "on" 26 | # /caching/configuration 27 | always_online = "off" 28 | development_mode = "off" 29 | # /network 30 | http3 = "on" 31 | zero_rtt = "on" 32 | ipv6 = "on" 33 | websockets = "on" 34 | opportunistic_onion = "on" 35 | pseudo_ipv4 = "off" 36 | ip_geolocation = "on" 37 | # /content-protection 38 | email_obfuscation = "on" 39 | server_side_exclude = "on" 40 | hotlink_protection = "off" 41 | # /workers 42 | security_header { 43 | enabled = false 44 | } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /jsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ESNext", 4 | "lib": ["ESNext"], 5 | "moduleResolution": "NodeNext", 6 | "module": "NodeNext", 7 | "resolveJsonModule": true, 8 | "strict": true, 9 | "rootDir": "./gluctl", 10 | "baseUrl": ".", 11 | "paths": { 12 | "lib": ["./gluctl/lib"] 13 | } 14 | }, 15 | "exclude": ["node_modules", "**/node_modules/*"] 16 | } 17 | --------------------------------------------------------------------------------