├── .gitignore ├── .taskfiles ├── gitlab.yaml ├── helm.yaml ├── helmfile.yaml ├── kubernetes.yaml ├── terraform.yaml └── terragrunt.yaml ├── LICENSE ├── README.md ├── Taskfile.yaml ├── appliances └── printing-server │ └── docker-compose.yaml ├── docs ├── img │ ├── logo.png │ └── logo.svg └── mesh │ ├── k0s │ └── install-k0s-on-nodes.md │ ├── k3s │ └── emergency-dont-ask.md │ ├── odroid │ └── install-os-from-network.md │ ├── opnsense │ ├── bgp-for-metallb.pdf │ ├── install-wireguard.pdf │ └── ip-fixed-ranges.md │ ├── orange-pi │ ├── install-ubuntu-server-on-ssd-crosstalk-solutions.pdf │ └── install-ubuntu-server-on-ssd.md │ └── talos │ ├── Add-additional-cluster-certs.md │ ├── Add-additonal-disks-to-nodes.md │ └── how-to-upgrade.md ├── infrastructure └── terraform │ ├── .gitkeep │ ├── modules │ ├── talos-cluster │ │ ├── main.tf │ │ ├── terraform.tf │ │ └── variables.tf │ └── talos-vms │ │ ├── instances.tf │ │ ├── storage.tf │ │ ├── templates │ │ └── xsl │ │ │ └── global.xsl │ │ ├── terraform.tf │ │ └── variables.tf │ ├── opnsense │ ├── firewall.tf │ ├── terraform.tf │ ├── terragrunt.hcl │ ├── unbound.tf │ └── variables.tf │ ├── talos │ ├── kubernetes-01.tf │ ├── templates │ │ ├── controlplane.yaml │ │ └── worker.yaml │ ├── terraform.tf │ ├── terragrunt.hcl │ └── variables.tf │ ├── terragrunt.hcl │ └── vms │ ├── compute-10.tf │ ├── compute-20.tf │ ├── main.tf │ ├── templates │ └── xsl │ │ └── attach-usb.xsl │ ├── terraform.tf │ ├── terragrunt.hcl │ └── variables.tf ├── kubernetes ├── applications │ ├── .gitkeep │ ├── 00-special-namespaces │ │ ├── csi-driver-nfs.yaml │ │ ├── csi-driver-s3.yaml │ │ ├── home-assistant.yaml │ │ ├── kustomization.yaml │ │ ├── metallb.yaml │ │ ├── omada-controller.yaml │ │ ├── wireguard.yaml │ │ └── zigbee2mqtt.yaml │ ├── autoheater │ │ ├── base │ │ │ ├── deployment.yaml │ │ │ └── kustomization.yaml │ │ └── kubernetes-01 │ │ │ ├── configmap-env.yaml │ │ │ ├── configmap.yaml │ │ │ └── kustomization.yaml │ ├── blocky │ │ ├── base │ │ │ ├── deployment.yaml │ │ │ └── kustomization.yaml │ │ └── kubernetes-01 │ │ │ ├── configmap.yaml │ │ │ ├── kustomization.yaml │ │ │ └── service.yaml │ ├── cert-manager │ │ ├── extra │ │ │ ├── clusterissuer-production.yaml │ │ │ ├── clusterissuer-self-signed.yaml │ │ │ ├── clusterissuer-staging.yaml │ │ │ ├── externalsecret-cloudflare-api-token.yaml │ │ │ └── kustomization.yaml │ │ └── operator │ │ │ ├── Chart.yaml │ │ │ ├── values-global.yaml │ │ │ └── values.yaml │ ├── congatudo │ │ ├── base │ │ │ ├── configmap-config.yaml │ │ │ ├── deployment.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── service-exposed.yaml │ │ │ └── service.yaml │ │ └── kubernetes-01 │ │ │ ├── ingress.yaml │ │ │ └── kustomization.yaml │ ├── csi-driver-nfs │ │ ├── extra │ │ │ ├── kustomization.yaml │ │ │ └── storageClass-standard-nfs.yaml │ │ └── operator │ │ │ ├── Chart.yaml │ │ │ ├── values-global.yaml │ │ │ └── values.yaml │ ├── csi-driver-s3 │ │ ├── extra │ │ │ ├── externalsecret-csi-driver-s3-credentials.yaml │ │ │ └── kustomization.yaml │ │ └── operator │ │ │ ├── Chart.yaml │ │ │ ├── values-global.yaml │ │ │ └── values.yaml │ ├── ddns-updater │ │ ├── base │ │ │ ├── configmap-config.yaml │ │ │ ├── deployment.yaml │ │ │ └── kustomization.yaml │ │ └── kubernetes-01 │ │ │ ├── externalsecret-cloudflare-credentials.yaml │ │ │ └── kustomization.yaml │ ├── external-secrets │ │ ├── extra │ │ │ ├── clustersecretstore-gitlab.yaml │ │ │ └── kustomization.yaml │ │ └── operator │ │ │ ├── Chart.yaml │ │ │ └── values.yaml │ ├── forecastle │ │ ├── extra │ │ │ ├── compute-10.yaml │ │ │ ├── compute-20.yaml │ │ │ ├── congatudo.yaml │ │ │ ├── home-assistant.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── omada-controller.yaml │ │ │ ├── opnsense.yaml │ │ │ ├── storage-01.yaml │ │ │ └── vaultwarden.yaml │ │ └── operator │ │ │ ├── Chart.yaml │ │ │ ├── values-kubernetes-01.yaml │ │ │ └── values.yaml │ ├── home-assistant │ │ ├── base │ │ │ ├── deployment.yml │ │ │ ├── kustomization.yaml │ │ │ └── service.yml │ │ └── kubernetes-01 │ │ │ ├── certificate.yaml │ │ │ ├── configmap-env.yaml │ │ │ ├── configmap.yaml │ │ │ ├── ingress.yml │ │ │ ├── kustomization.yaml │ │ │ └── pvc.yml │ ├── ingress-nginx │ │ ├── Chart.yaml │ │ ├── values-global.yaml │ │ ├── values-kubernetes-01.yaml │ │ └── values.yaml │ ├── metallb │ │ ├── extra │ │ │ ├── base │ │ │ │ ├── kustomization.yaml │ │ │ │ └── l2Advertisement.yaml │ │ │ └── kubernetes-01 │ │ │ │ ├── ipAddressPool.yaml │ │ │ │ └── kustomization.yaml │ │ └── operator │ │ │ ├── Chart.yaml │ │ │ ├── values-global.yaml │ │ │ └── values.yaml │ ├── metrics-server │ │ ├── Chart.yaml │ │ ├── values-kubernetes-01.yaml │ │ └── values.yaml │ ├── mongodb │ │ ├── Chart.yaml │ │ ├── templates │ │ │ ├── _helpers.tpl │ │ │ └── external-secrets.yaml │ │ ├── values-kubernetes-01.yaml │ │ └── values.yaml │ ├── nats │ │ ├── Chart.yaml │ │ ├── templates │ │ │ ├── _helpers.tpl │ │ │ └── external-secrets.yaml │ │ ├── values-kubernetes-01.yaml │ │ └── values.yaml │ ├── omada-controller │ │ ├── base │ │ │ ├── configmap-env.yaml │ │ │ ├── cronjob-backup.yaml │ │ │ ├── deployment.yml │ │ │ ├── kustomization.yaml │ │ │ ├── pvc-omada-backup.yaml │ │ │ ├── pvc-omada-data.yml │ │ │ ├── pvc-omada-logs.yml │ │ │ └── service.yml │ │ └── kubernetes-01 │ │ │ ├── certificate.yaml │ │ │ ├── externalsecret-mongodb-user-credentials.yaml │ │ │ ├── externalsecret-omada-user-credentials.yaml │ │ │ ├── ingress.yml │ │ │ ├── kustomization.yaml │ │ │ └── overlays │ │ │ └── service.yml │ ├── vaultwarden │ │ ├── base │ │ │ ├── cronjob-backup.yaml │ │ │ ├── deployment.yaml │ │ │ ├── kustomization.yaml │ │ │ ├── pvc-backup.yaml │ │ │ ├── pvc.yaml │ │ │ └── service.yaml │ │ └── kubernetes-01 │ │ │ ├── certificate.yaml │ │ │ ├── configmap.yaml │ │ │ ├── ingress.yaml │ │ │ └── kustomization.yaml │ └── wireguard │ │ ├── base │ │ ├── configmap.yaml │ │ ├── cronjob-backup.yaml │ │ ├── deployment.yaml │ │ ├── kustomization.yaml │ │ ├── pvc-backup.yaml │ │ ├── pvc.yaml │ │ └── service.yaml │ │ └── kubernetes-01 │ │ ├── configmap-patch.yaml │ │ ├── kustomization.yaml │ │ └── service-patch.yaml └── helmfile │ ├── base.yaml │ └── kubernetes-01.yaml └── scripts ├── .gitkeep ├── conga └── update-hosts-congatudo-redirection.sh └── prepare-host-ubuntu.sh /.gitignore: -------------------------------------------------------------------------------- 1 | # Autogenerated stuff 2 | **/temp/* 3 | 4 | # Local .terraform directories 5 | **/.terraform/* 6 | **/.terraform.lock.hcl 7 | 8 | # .tfstate files 9 | *.tfstate 10 | *.tfstate.* 11 | 12 | # Crash log files 13 | crash.log 14 | crash.*.log 15 | 16 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as 17 | # password, private keys, and other secrets. These should not be part of version 18 | # control as they are data points which are potentially sensitive and subject 19 | # to change depending on the environment. 20 | *.tfvars 21 | *.tfvars.json 22 | 23 | # Ignore override files as they are usually used to override resources locally and so 24 | # are not checked in 25 | override.tf 26 | override.tf.json 27 | *_override.tf 28 | *_override.tf.json 29 | 30 | # Include override files you do wish to add to version control using negated pattern 31 | # !example_override.tf 32 | 33 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 34 | # example: *tfplan* 35 | 36 | # Ignore CLI configuration files 37 | .terraformrc 38 | terraform.rc 39 | 40 | # Ignore Helm temporary auto-generated files 41 | *.tgz 42 | Chart.lock 43 | 44 | # Ignore autogenerated VMs SSH keys 45 | **/*.pem 46 | -------------------------------------------------------------------------------- /.taskfiles/gitlab.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | vars: 4 | # Fixed variables 5 | GITLABCTL_VERSION: "1.43.0" 6 | GITLAB_PROJECT_ID: "49083217" # homelab-ops 7 | 8 | # Dynamic variables 9 | GITLAB_TEMPORARY_DIR: "{{ .TEMPORARY_DIR }}/gitlab" 10 | GITLAB_GLAB_TEMPORARY_DIR: "{{ .GITLAB_TEMPORARY_DIR }}/glab" 11 | GITLAB_ACCESS_TOKENS_TEMPORARY_DIR: "{{ .GITLAB_TEMPORARY_DIR }}/access-tokens" 12 | GITLAB_ACCESS_TOKEN_RO_EXPIRY_DATE: 13 | sh: date +%Y-%m-%d -d "+6 months" 14 | GITLAB_ACCESS_TOKEN_RW_EXPIRY_DATE: 15 | sh: date +%Y-%m-%d -d "+5 days" 16 | 17 | tasks: 18 | bootstrap: 19 | desc: Bootstrap required stuff for other steps (task gitlab:bootstrap) 20 | internal: true 21 | cmds: 22 | - mkdir -p {{ .GITLAB_TEMPORARY_DIR }} 23 | - mkdir -p {{ .GITLAB_GLAB_TEMPORARY_DIR }} 24 | - mkdir -p {{ .GITLAB_ACCESS_TOKENS_TEMPORARY_DIR }} 25 | status: 26 | - test -d {{ .GITLAB_TEMPORARY_DIR }} 27 | - test -d {{ .GITLAB_GLAB_TEMPORARY_DIR }} 28 | - test -d {{ .GITLAB_ACCESS_TOKENS_TEMPORARY_DIR }} 29 | 30 | 31 | install-cli: 32 | desc: Install gitlab CLI (task gitlab:install-cli) 33 | internal: true 34 | deps: 35 | - bootstrap 36 | cmds: 37 | - wget -O {{ .GITLAB_GLAB_TEMPORARY_DIR }}/glab.tar.gz https://gitlab.com/gitlab-org/cli/-/releases/v{{ .GITLABCTL_VERSION }}/downloads/glab_{{ .GITLABCTL_VERSION }}_Linux_x86_64.tar.gz 38 | - tar -C {{ .GITLAB_GLAB_TEMPORARY_DIR }} -xvf {{ .GITLAB_GLAB_TEMPORARY_DIR }}/glab.tar.gz 39 | - sudo mv {{ .GITLAB_GLAB_TEMPORARY_DIR }}/bin/glab /usr/local/bin/glab 40 | - sudo chmod +x /usr/local/bin/glab 41 | status: 42 | - | 43 | if [[ "$(glab --version | grep 'glab version' | cut -d' ' -f3)" = "{{ .GITLABCTL_VERSION }}" ]]; then exit 0; else exit 1; fi 44 | 45 | 46 | login: 47 | desc: Login against Gitlab (task gitlab:login) 48 | cmds: 49 | - glab auth login 50 | status: 51 | - gitlab auth status 52 | 53 | 54 | revoke-token: 55 | desc: Revoke an access token (task gitlab:revoke-token TOKEN_NAME=cluster-01) 56 | requires: 57 | vars: [ TOKEN_NAME ] 58 | vars: 59 | TOKEN_ID: 60 | sh: glab api projects/{{ .GITLAB_PROJECT_ID }}/access_tokens | jq 'map(select(.name == "{{ .TOKEN_NAME }}").id) | join(",")' | tr -d '"' 61 | cmds: 62 | - for: { var: TOKEN_ID, split: ',' } 63 | cmd: glab api --silent --method DELETE projects/{{ .GITLAB_PROJECT_ID }}/access_tokens/{{ .ITEM }} | jq 64 | status: 65 | - 'if [[ "{{ .TOKEN_ID }}" = "" ]]; then exit 0; else exit 1; fi' 66 | 67 | 68 | generate-token: 69 | desc: Generate an access token with read-only permissions (task gitlab:generate-token TOKEN_NAME=cluster-01) 70 | deps: 71 | - bootstrap 72 | requires: 73 | vars: [ TOKEN_NAME ] 74 | cmds: 75 | - | 76 | glab api \ 77 | --method POST \ 78 | --header content-type:application/json \ 79 | --field name={{ .TOKEN_NAME }} \ 80 | --raw-field "scopes=[read_api]" \ 81 | --field access_level=40 \ 82 | --field expires_at={{ .GITLAB_ACCESS_TOKEN_RO_EXPIRY_DATE }} \ 83 | projects/{{ .GITLAB_PROJECT_ID }}/access_tokens | jq > {{ .GITLAB_ACCESS_TOKENS_TEMPORARY_DIR }}/{{ .TOKEN_NAME }}.json 84 | 85 | 86 | generate-supertoken: 87 | desc: Generate an access token with full permissions (task gitlab:generate-supertoken TOKEN_NAME=terraform-01) 88 | #internal: true 89 | deps: 90 | - bootstrap 91 | requires: 92 | vars: [ TOKEN_NAME ] 93 | cmds: 94 | - mkdir -p /tmp/gitlab/project_access_tokens 95 | - | 96 | glab api \ 97 | --method POST \ 98 | --header content-type:application/json \ 99 | --field name={{ .TOKEN_NAME }} \ 100 | --raw-field "scopes=[api]" \ 101 | --field access_level=40 \ 102 | --field expires_at={{ .GITLAB_ACCESS_TOKEN_RW_EXPIRY_DATE }} \ 103 | projects/{{ .GITLAB_PROJECT_ID }}/access_tokens | jq > {{ .GITLAB_ACCESS_TOKENS_TEMPORARY_DIR }}/{{ .TOKEN_NAME }}.json 104 | 105 | -------------------------------------------------------------------------------- /.taskfiles/helm.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3" 3 | 4 | vars: 5 | HELM_VERSION: "v3.16.3" 6 | 7 | tasks: 8 | 9 | install-cli: 10 | internal: true 11 | desc: Install Helm (task helm:install-cli) 12 | cmds: 13 | - wget -O /tmp/helm.tar.gz https://github.com/helm/helm/releases/download/{{ .HELM_VERSION }}/helm-{{ .helm_VERSION }}-linux-amd64.tar.gz 14 | 15 | - tar -xvf /tmp/helm.tar.gz -C /tmp 16 | - sudo mv /tmp/helm /usr/local/bin/helm 17 | - sudo chmod +x /usr/local/bin/helm 18 | status: 19 | - | 20 | if [[ "$(helm --version | grep 'helm version' | cut -d' ' -f3)" = "{{ .HELM_VERSION }}" ]]; then exit 0; else exit 1; fi 21 | 22 | -------------------------------------------------------------------------------- /.taskfiles/helmfile.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3" 3 | 4 | vars: 5 | HELMFILE_VERSION: "0.169.1" 6 | 7 | tasks: 8 | 9 | install-cli: 10 | internal: true 11 | desc: Install Helmfile (task helmfile:install-cli) 12 | cmds: 13 | - wget -O /tmp/helmfile.tar.gz https://github.com/helmfile/helmfile/releases/download/v{{ .HELMFILE_VERSION }}/helmfile_{{ .HELMFILE_VERSION }}_linux_amd64.tar.gz 14 | - tar -xvf /tmp/helmfile.tar.gz -C /tmp 15 | - sudo mv /tmp/helmfile /usr/local/bin/helmfile 16 | - sudo chmod +x /usr/local/bin/helmfile 17 | status: 18 | - | 19 | if [[ "$(helmfile --version | grep 'helmfile version' | cut -d' ' -f3)" = "{{ .HELMFILE_VERSION }}" ]]; then exit 0; else exit 1; fi 20 | 21 | ###################################### 22 | ## APPLY COMMANDS 23 | ###################################### 24 | apply-all: &applyAllSpec 25 | internal: false 26 | silent: true 27 | desc: Perform an apply of all the releases (task helmfile:apply-all CLUSTER_NAME=cluster-01 EXTRA_ARGS=) 28 | dir: kubernetes/helmfile 29 | requires: 30 | vars: [ CLUSTER_NAME ] 31 | interactive: true 32 | prompt: Following action is interactive and will ask you before performing actions. Continue? 33 | cmds: 34 | - cmd: kubectl config use-context {{ .CLUSTER_NAME }} 35 | - cmd: | 36 | helmfile -f $(echo "{{ .CLUSTER_NAME }}" | cut -d@ -f2).yaml apply --include-transitive-needs --interactive --diff-args '--dry-run=server' {{ .EXTRA_ARGS }} 37 | 38 | apply-one: &applyOneSpec 39 | internal: false 40 | silent: true 41 | desc: Perform an apply of selected release (task helmfile:apply-one CLUSTER_NAME=cluster-01 RELEASE_NAME=ingress-nginx EXTRA_ARGS=) 42 | interactive: true 43 | dir: kubernetes/helmfile 44 | requires: 45 | vars: [ CLUSTER_NAME, RELEASE_NAME ] 46 | prompt: Following action is interactive and will ask you before performing actions. Continue? 47 | cmds: 48 | - cmd: kubectl config use-context {{ .CLUSTER_NAME }} 49 | - cmd: helmfile -f $(echo "{{ .CLUSTER_NAME }}" | cut -d@ -f2).yaml apply --selector name={{ .RELEASE_NAME }} --include-transitive-needs --interactive --diff-args '--dry-run=server' {{ .EXTRA_ARGS }} 50 | 51 | ###################################### 52 | ## SYNC COMMANDS 53 | ###################################### 54 | sync-all: 55 | <<: *applyAllSpec 56 | desc: Perform an sync of all the releases (task helmfile:sync-all CLUSTER_NAME=cluster-01 EXTRA_ARGS=) 57 | cmds: 58 | - cmd: kubectl config use-context {{ .CLUSTER_NAME }} 59 | - cmd: helmfile -f $(echo "{{ .CLUSTER_NAME }}" | cut -d@ -f2).yaml sync --include-transitive-needs --interactive {{ .EXTRA_ARGS }} 60 | 61 | sync-one: 62 | <<: *applyOneSpec 63 | desc: Perform an sync of selected release (task helmfile:sync-one CLUSTER_NAME=cluster-01 RELEASE_NAME=ingress-nginx EXTRA_ARGS=) 64 | cmds: 65 | - cmd: kubectl config use-context {{ .CLUSTER_NAME }} 66 | - cmd: helmfile -f $(echo "{{ .CLUSTER_NAME }}" | cut -d@ -f2).yaml sync --selector name={{ .RELEASE_NAME }} --include-transitive-needs --interactive {{ .EXTRA_ARGS }} 67 | -------------------------------------------------------------------------------- /.taskfiles/kubernetes.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3" 3 | 4 | vars: 5 | KUBECTL_VERSION: "v1.28.2" 6 | 7 | tasks: 8 | 9 | install-cli: 10 | internal: true 11 | desc: Install kubectl CLI (task kubernetes:install-cli) 12 | cmds: 13 | - curl -LO "https://dl.k8s.io/release/{{ .KUBECTL_VERSION }}/bin/linux/amd64/kubectl" 14 | - sudo mv kubectl /usr/local/bin/kubectl 15 | - sudo chmod +x /usr/local/bin/kubectl 16 | status: 17 | - | 18 | if [[ "$(kubectl version --client=true | grep 'Client Version:' | cut -d' ' -f3)" = "{{ .KUBECTL_VERSION }}" ]]; then exit 0; else exit 1; fi 19 | 20 | delete-failed-pods: 21 | desc: Deletes failed pods 22 | cmds: 23 | - kubectl delete pods --field-selector status.phase=Failed -A --ignore-not-found=true 24 | 25 | delete-completed-pods: 26 | desc: Deletes failed pods 27 | cmds: 28 | - kubectl delete pods --field-selector status.phase=Succeeded -A --ignore-not-found=true 29 | 30 | certificates: 31 | internal: true 32 | desc: List all the certificates in your cluster 33 | cmds: 34 | - kubectl get certificates {{.CLI_ARGS | default "-A"}} 35 | - kubectl get certificaterequests {{.CLI_ARGS | default "-A"}} 36 | 37 | ingresses: 38 | internal: true 39 | desc: List all the ingresses in your cluster 40 | cmds: 41 | - kubectl get ingress {{.CLI_ARGS | default "-A"}} 42 | 43 | nodes: 44 | internal: true 45 | desc: List all the nodes in your cluster 46 | cmds: 47 | - kubectl get nodes {{.CLI_ARGS | default "-o wide"}} 48 | 49 | services: 50 | internal: true 51 | desc: List all the services in your cluster 52 | cmds: 53 | - kubectl get svc {{.CLI_ARGS | default "-A"}} 54 | 55 | pods: 56 | internal: true 57 | desc: List all the pods in your cluster 58 | cmds: 59 | - kubectl get pods {{.CLI_ARGS | default "-A"}} 60 | 61 | resources: 62 | desc: Gather common resources in your cluster, useful when asking for support 63 | cmds: 64 | - task: nodes 65 | - task: pods 66 | - task: services 67 | - task: certificates 68 | - task: ingresses 69 | 70 | # This is a special task to create generic secrets on Kubernetes. It receives a variable called 'CONTENT' 71 | # which is a line-separated list of key=value to put into the secret. As Task can not expand variables in that way 72 | # I made the same using Golang template, so if some value matches the name of a defined variable, it's value will 73 | # be replaced with the value from the variable. 74 | # 75 | # vars: 76 | # TEST: test_value 77 | # CONTENT: | 78 | # token=TEST -----> token=test_value 79 | # 80 | create-generic-secret: 81 | internal: true 82 | desc: Create a generic secret on your Kubernetes cluster 83 | summary: | 84 | Create a generic secret on your Kubernetes cluster 85 | 86 | It will delete existing secret before creating the new one. 87 | Please make sure you don't need the old one before starting. 88 | required: 89 | vars: [ NAME, NAMESPACE, CONTENT ] 90 | cmds: 91 | - | 92 | kubectl create secret generic {{ .NAME }} --namespace {{ .NAMESPACE }} 93 | {{- $this := . -}} 94 | {{/* Each line should be a literal */}} 95 | {{- $lines := .CONTENT | splitLines -}} 96 | {{- range $i, $line := $lines -}} 97 | 98 | {{/* Drop lines not matching the pattern */}} 99 | {{- if not (regexMatch `(.*?)=(.*?)` $line) -}} 100 | {{- continue -}} 101 | {{- end -}} 102 | 103 | {{/* Replace vars on matching lines when possible */}} 104 | {{- $literalKeyValue := $line | split "=" -}} 105 | 106 | {{- $key := $literalKeyValue._0 -}} 107 | {{- $value := default $literalKeyValue._1 (get $this $literalKeyValue._1) -}} 108 | 109 | {{- printf " --from-literal %s=%s " $key $value -}} 110 | {{- end -}} 111 | -------------------------------------------------------------------------------- /.taskfiles/terraform.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3" 3 | 4 | vars: 5 | TERRAFORM_VERSION: "1.9.0" 6 | 7 | tasks: 8 | 9 | install-cli: 10 | internal: true 11 | desc: Install gitlab CLI (task gitlab:install-cli) 12 | cmds: 13 | - wget -O /tmp/terraform.zip https://releases.hashicorp.com/terraform/{{ .TERRAFORM_VERSION }}/terraform_{{ .TERRAFORM_VERSION }}_linux_amd64.zip 14 | - unzip -o /tmp/terraform.zip -d /tmp 15 | - sudo mv /tmp/terraform /usr/local/bin/terraform 16 | - sudo chmod +x /usr/local/bin/terraform 17 | status: 18 | - | 19 | if [[ "$(terraform --version | grep -x -E "^Terraform v(\.?[0-9]){3}$" | cut -d' ' -f2)" = "v{{ .TERRAFORM_VERSION }}" ]]; then exit 0; else exit 1; fi 20 | -------------------------------------------------------------------------------- /.taskfiles/terragrunt.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3" 3 | 4 | vars: 5 | TERRAGRUNT_VERSION: "v0.51.3" 6 | 7 | # TODO: Find a way to reference the value for this variable from other taskfile. If several of them 8 | # have the same name, they overlap and crash the process 9 | #GITLAB_ACCESS_TOKENS_TEMPORARY_DIR: "{{ .TEMPORARY_DIR }}/gitlab/access-tokens" 10 | 11 | tasks: 12 | 13 | install-cli: 14 | internal: true 15 | desc: Install Terragrunt CLI (task terraform:install-terragrunt-cli) 16 | cmds: 17 | - wget -O terragrunt https://github.com/gruntwork-io/terragrunt/releases/download/{{ .TERRAGRUNT_VERSION }}/terragrunt_linux_amd64 18 | - sudo mv terragrunt /usr/local/bin/terragrunt 19 | - sudo chmod +x /usr/local/bin/terragrunt 20 | status: 21 | - | 22 | if [[ "$(terragrunt --version | grep 'terragrunt version' | cut -d' ' -f3)" = "{{ .TERRAGRUNT_VERSION }}" ]]; then exit 0; else exit 1; fi 23 | 24 | plan: &planSpec 25 | internal: true 26 | silent: true 27 | desc: Perform a plan for resources creation (task terragrunt:plan GITLAB_ACCESS_TOKEN_NAME=supertoken INFRASTRUCTURE_SCOPE=opnsense) 28 | dir: infrastructure/terraform/{{ .INFRASTRUCTURE_SCOPE }} 29 | requires: 30 | vars: [ GITLAB_ACCESS_TOKEN_NAME, INFRASTRUCTURE_SCOPE ] 31 | vars: 32 | GITLAB_ACCESS_TOKEN: 33 | sh: cat {{ .GITLAB_ACCESS_TOKENS_TEMPORARY_DIR }}/{{ .GITLAB_ACCESS_TOKEN_NAME }}.json | jq '.token' 34 | preconditions: 35 | - test -f {{ .GITLAB_ACCESS_TOKENS_TEMPORARY_DIR }}/{{ .GITLAB_ACCESS_TOKEN_NAME }}.json 36 | cmds: 37 | - GITLAB_ACCESS_TOKEN={{ .GITLAB_ACCESS_TOKEN}} terragrunt plan 38 | 39 | apply: 40 | <<: *planSpec 41 | desc: Perform a apply for resources creation (task terragrunt:apply GITLAB_ACCESS_TOKEN_NAME=supertoken INFRASTRUCTURE_SCOPE=opnsense) 42 | cmds: 43 | - GITLAB_ACCESS_TOKEN={{ .GITLAB_ACCESS_TOKEN}} terragrunt apply 44 | 45 | destroy: 46 | <<: *planSpec 47 | desc: Perform a destruction of resources (task terragrunt:destroy GITLAB_ACCESS_TOKEN_NAME=supertoken INFRASTRUCTURE_SCOPE=opnsense) 48 | cmds: 49 | - GITLAB_ACCESS_TOKEN={{ .GITLAB_ACCESS_TOKEN}} terragrunt destroy 50 | 51 | # TODO: Decide if plan is necessary here 52 | plan-opnsense: 53 | internal: true 54 | desc: Perform a plan for OPNsense resources creation (task terragrunt:plan-opnsense GITLAB_ACCESS_TOKEN_NAME=supertoken) 55 | requires: 56 | vars: [ GITLAB_ACCESS_TOKEN_NAME ] 57 | cmds: 58 | - task: plan 59 | vars: {GITLAB_ACCESS_TOKEN_NAME: "{{ .GITLAB_ACCESS_TOKEN_NAME }}", INFRASTRUCTURE_SCOPE: "opnsense" } 60 | 61 | apply-opnsense: 62 | desc: Perform a apply for OPNsense resources creation (task terragrunt:apply-opnsense GITLAB_ACCESS_TOKEN_NAME=supertoken) 63 | requires: 64 | vars: [ GITLAB_ACCESS_TOKEN_NAME ] 65 | cmds: 66 | - task: apply 67 | vars: { GITLAB_ACCESS_TOKEN_NAME: "{{ .GITLAB_ACCESS_TOKEN_NAME }}", INFRASTRUCTURE_SCOPE: "opnsense" } 68 | 69 | destroy-opnsense: 70 | desc: Perform a destroy for OPNsense resources destruction (task terragrunt:destroy-opnsense GITLAB_ACCESS_TOKEN_NAME=supertoken) 71 | requires: 72 | vars: [ GITLAB_ACCESS_TOKEN_NAME ] 73 | cmds: 74 | - task: destroy 75 | vars: { GITLAB_ACCESS_TOKEN_NAME: "{{ .GITLAB_ACCESS_TOKEN_NAME }}", INFRASTRUCTURE_SCOPE: "opnsense" } 76 | 77 | # TODO: Decide if plan is necessary here 78 | plan-vms: 79 | internal: true 80 | desc: Perform a plan for VMs resources creation (task terragrunt:plan-vms GITLAB_ACCESS_TOKEN_NAME=supertoken) 81 | requires: 82 | vars: [ GITLAB_ACCESS_TOKEN_NAME ] 83 | cmds: 84 | - task: plan 85 | vars: { GITLAB_ACCESS_TOKEN_NAME: "{{ .GITLAB_ACCESS_TOKEN_NAME }}", INFRASTRUCTURE_SCOPE: "vms" } 86 | 87 | apply-vms: 88 | desc: Perform a apply for VMs resources creation (task terragrunt:apply-vms GITLAB_ACCESS_TOKEN_NAME=supertoken) 89 | requires: 90 | vars: [ GITLAB_ACCESS_TOKEN_NAME ] 91 | cmds: 92 | - task: apply 93 | vars: { GITLAB_ACCESS_TOKEN_NAME: "{{ .GITLAB_ACCESS_TOKEN_NAME }}", INFRASTRUCTURE_SCOPE: "vms" } 94 | 95 | destroy-vms: 96 | desc: Perform a destroy for VMs resources destruction (task terragrunt:destroy-vms GITLAB_ACCESS_TOKEN_NAME=supertoken) 97 | requires: 98 | vars: [ GITLAB_ACCESS_TOKEN_NAME ] 99 | cmds: 100 | - task: destroy 101 | vars: { GITLAB_ACCESS_TOKEN_NAME: "{{ .GITLAB_ACCESS_TOKEN_NAME }}", INFRASTRUCTURE_SCOPE: "vms" } 102 | 103 | # TODO: Decide if plan is necessary here 104 | plan-talos: 105 | internal: true 106 | desc: Perform a plan for Talos resources creation (task terragrunt:plan-talos GITLAB_ACCESS_TOKEN_NAME=supertoken) 107 | requires: 108 | vars: [ GITLAB_ACCESS_TOKEN_NAME ] 109 | cmds: 110 | - task: plan 111 | vars: { GITLAB_ACCESS_TOKEN_NAME: "{{ .GITLAB_ACCESS_TOKEN_NAME }}", INFRASTRUCTURE_SCOPE: "talos" } 112 | 113 | apply-talos: 114 | desc: Perform a apply for Talos resources creation (task terragrunt:apply-talos GITLAB_ACCESS_TOKEN_NAME=supertoken) 115 | requires: 116 | vars: [ GITLAB_ACCESS_TOKEN_NAME ] 117 | cmds: 118 | - task: apply 119 | vars: { GITLAB_ACCESS_TOKEN_NAME: "{{ .GITLAB_ACCESS_TOKEN_NAME }}", INFRASTRUCTURE_SCOPE: "talos" } 120 | 121 | destroy-talos: 122 | desc: Perform a destroy for Talos resources destruction (task terragrunt:destroy-vms GITLAB_ACCESS_TOKEN_NAME=supertoken) 123 | requires: 124 | vars: [ GITLAB_ACCESS_TOKEN_NAME ] 125 | cmds: 126 | - task: destroy 127 | vars: { GITLAB_ACCESS_TOKEN_NAME: "{{ .GITLAB_ACCESS_TOKEN_NAME }}", INFRASTRUCTURE_SCOPE: "talos" } 128 | -------------------------------------------------------------------------------- /Taskfile.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3" 3 | 4 | vars: 5 | PROJECT_DIR: 6 | sh: "git rev-parse --show-toplevel" 7 | 8 | # 9 | TERRAFORM_DIR: "{{.PROJECT_DIR}}/infrastructure/terraform" 10 | KUBERNETES_DIR: "{{.PROJECT_DIR}}/kubernetes" 11 | 12 | # 13 | TEMPORARY_DIR: "{{.PROJECT_DIR}}/temp" 14 | 15 | env: 16 | KUBECONFIG: 17 | sh: find ~/.kube/clusters -type f | sed ':a;N;s/\n/:/;ba' 18 | 19 | includes: 20 | gitlab: .taskfiles/gitlab.yaml 21 | terraform: .taskfiles/terraform.yaml 22 | terragrunt: .taskfiles/terragrunt.yaml 23 | kubernetes: .taskfiles/kubernetes.yaml 24 | helm: .taskfiles/helm.yaml 25 | helmfile: .taskfiles/helmfile.yaml 26 | 27 | tasks: 28 | 29 | ####################################################################################### 30 | # Global tasks: 31 | # Globally accessed tasks 32 | ####################################################################################### 33 | 34 | default: 35 | silent: true 36 | cmds: 37 | - task -l 38 | 39 | install-dependencies: 40 | desc: Initialize workstation dependencies 41 | internal: true 42 | cmds: 43 | - task: gitlab:install-cli 44 | - task: terraform:install-cli 45 | - task: terragrunt:install-cli 46 | - task: kubernetes:install-cli 47 | - task: helm:install-cli 48 | - task: helmfile:install-cli 49 | 50 | global:init: 51 | desc: Initialize needed stuff to work with this repository 52 | cmds: 53 | - task: install-dependencies 54 | 55 | global:cleanup: 56 | desc: Clean autogenerated stuff 57 | prompt: This will remove autogenerated files (this includes locally stored tokens)... Do you want to continue? 58 | cmds: 59 | - cmd: rm -rf {{.TEMPORARY_DIR}} 60 | 61 | ####################################################################################### 62 | # Glue tasks: 63 | # These are globally accessed tasks, but usually composed by child ones 64 | ####################################################################################### 65 | 66 | glue:inject-external-secrets-token: 67 | desc: >- 68 | Inject a secret into Kubernetes, with a token for External Secrets to read variables from Gitlab 69 | (task glue:inject-external-secrets-token CLUSTER_NAME=cluster-01) 70 | summary: | 71 | Inject a secret into Kubernetes, with a token for External Secrets to read variables from Gitlab 72 | 73 | It will delete existing secret before creating the new one. 74 | Please make sure you don't need the old one before starting. 75 | prompt: This will remove the previous existing secret if found... Do you want to continue? 76 | requires: 77 | vars: [ CLUSTER_NAME ] 78 | vars: 79 | KUBERNETES_ES_SECRET_NAME: &secretName "gitlab-secret" 80 | KUBERNETES_ES_SECRET_NAMESPACE: &secretNamespace "external-secrets" 81 | cmds: 82 | - task: gitlab:revoke-token 83 | vars: { TOKEN_NAME: "{{ .CLUSTER_NAME }}" } 84 | - task: gitlab:generate-token 85 | vars: { TOKEN_NAME: "{{ .CLUSTER_NAME }}" } 86 | 87 | - cmd: kubectl config use-context {{ .CLUSTER_NAME }} 88 | - cmd: kubectl delete secret {{ .KUBERNETES_ES_SECRET_NAME }} --namespace {{ .KUBERNETES_ES_SECRET_NAMESPACE }} 89 | ignore_error: true 90 | - task: kubernetes:create-generic-secret 91 | vars: 92 | # Exchange variables (needed here as Task cannot parse them on a more global scope) 93 | GITLAB_ACCESS_TOKEN: 94 | sh: cat {{ .GITLAB_ACCESS_TOKENS_TEMPORARY_DIR }}/{{ .CLUSTER_NAME }}.json | jq '.token' 95 | 96 | # Actual task variables 97 | NAME: *secretName 98 | NAMESPACE: *secretNamespace 99 | CONTENT: | 100 | token=GITLAB_ACCESS_TOKEN 101 | -------------------------------------------------------------------------------- /appliances/printing-server/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | # Sometimes it is super hard to configure a remote printer from Gnome GUI, so the following commands will save you: 2 | # lpadmin -p Printer-01-hplip -E -v ipp://192.168.2.110:631/printers/HP_Deskjet_2540_series_USB_CN43U3F2080604_HPLIP -m everywhere 3 | # lpadmin -p Printer-01-basic -E -v ipp://192.168.2.110:631/printers/HP_Deskjet_2540_series -m everywhere 4 | 5 | version: "3.5" 6 | 7 | volumes: 8 | cups_data: 9 | name: cups_data 10 | 11 | services: 12 | cups: 13 | image: anujdatar/cups:24.05.01 14 | container_name: cups 15 | restart: unless-stopped 16 | privileged: true 17 | ports: 18 | - "631:631" 19 | devices: 20 | - /dev/bus/usb:/dev/bus/usb 21 | environment: 22 | CUPSADMIN: ${CUPS_ADMIN_USERNAME} 23 | CUPSPASSWORD: ${CUPS_ADMIN_PASSWORD} 24 | TZ: Europe/London 25 | volumes: 26 | - cups_data:/etc/cups 27 | -------------------------------------------------------------------------------- /docs/img/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/achetronic/homelab-ops/188b980a9ed69508cfe7d695a90dcbd03d9e2db7/docs/img/logo.png -------------------------------------------------------------------------------- /docs/mesh/k0s/install-k0s-on-nodes.md: -------------------------------------------------------------------------------- 1 | # Install K0s on nodes 2 | 3 | ## Installation 4 | 5 | ### Pre-steps 6 | 7 | Install k0sctl in the computer 8 | 9 | ```console 10 | wget https://github.com/k0sproject/k0sctl/releases/download/v0.15.4/k0sctl-linux-x64 && \ 11 | sudo mv k0sctl-linux-x64 /usr/local/bin/k0sctl && \ 12 | sudo chmod +x /usr/local/bin/k0sctl 13 | ``` 14 | ### Actually install the K0s 15 | 16 | Generate the configuration 17 | 18 | ```console 19 | k0sctl init --k0s > k0sctl.yaml 20 | ``` 21 | 22 | > Probably the configuration is already created inside k0s directory 23 | 24 | Authorize 'root' user to login using password on all the servers iterating on them executing the following commands: 25 | 26 | > Take into account that the default username may change depending on the distribution. 27 | > This username is commonly 'ubuntu' on odroid M1 boards and 'orangepi' on Orange Pi 5 28 | 29 | ```console 30 | ssh server-username@compute-XX.internal.place 31 | 32 | sudo passwd 33 | sudo sed -i 's/#PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config 34 | sudo systemctl restart ssh 35 | ``` 36 | 37 | Authorize your public SSH key on remote servers to configure 38 | 39 | ```console 40 | ssh-copy-id root@compute-01.internal.place && \ 41 | ssh-copy-id root@compute-02.internal.place && \ 42 | ssh-copy-id root@compute-03.internal.place && \ 43 | ssh-copy-id root@compute-04.internal.place && \ 44 | ssh-copy-id root@compute-05.internal.place 45 | ``` 46 | 47 | Apply k0s config to the servers 48 | 49 | ```console 50 | env SSH_KNOWN_HOSTS=/dev/null k0sctl apply --config k0sctl.yaml 51 | ``` 52 | 53 | > Some SBC manufacturers set the same machine ID for all the boards. This is problematic as K0s needs them 54 | > to be different between machines. To change it just delete it with 55 | > `rm -f /etc/machine-id && rm /var/lib/dbus/machine-id` and regenerate it with `dbus-uuidgen --ensure=/etc/machine-id` 56 | > [Reference here](https://unix.stackexchange.com/a/403054) 57 | 58 | Configure the kubeconfig file to access the cluster 59 | 60 | ```console 61 | mkdir -p ~/.kube/clusters 62 | k0sctl kubeconfig --config k0sctl-spc.yaml > ~/.kube/clusters/home-ops 63 | 64 | export KUBECONFIG=$(find ~/.kube/clusters -type f | sed ':a;N;s/\n/:/;ba') 65 | ``` 66 | 67 | ## FAQ: 68 | 69 | ### Applying thrown error like: 'ssh dial: ssh: handshake failed: host key mismatch: knownhosts: key mismatch' 70 | 71 | This is because you have used the same IP before but have installed the operating system again which changes the host's ssh host key. 72 | The same happens if you try to ssh root@192.168.122.144. 73 | 74 | Solution 1 - Remove the keys from ~/.ssh/known_hosts file: 75 | 76 | ```console 77 | ssh-keygen -R root@compute-01.internal.place && \ 78 | ssh-keygen -R root@compute-02.internal.place && \ 79 | ssh-keygen -R root@compute-03.internal.place && \ 80 | ssh-keygen -R root@compute-04.internal.place && \ 81 | ssh-keygen -R root@compute-05.internal.place 82 | ``` 83 | 84 | Solution 2 - Configure the address range to not use host key checking: 85 | 86 | ```console 87 | # ~/.ssh/config 88 | Host 192.168.122.* 89 | UserKnownHostsFile=/dev/null 90 | ``` 91 | 92 | Solution 3 - Disable host key checking while running k0sctl: 93 | 94 | ```console 95 | env SSH_KNOWN_HOSTS=/dev/null k0sctl apply -c k0sctl.yaml 96 | ``` 97 | 98 | ### K0s can not apply because 'same ID found' 99 | 100 | Some SBC manufacturers set the same machine ID for all the boards. This is problematic as K0s needs them 101 | to be different between machines. To change it just delete it with `rm -f /etc/machine-id && rm /var/lib/dbus/machine-id` 102 | and regenerate it executing `dbus-uuidgen --ensure=/etc/machine-id` 103 | 104 | > [Reference here](https://unix.stackexchange.com/a/403054) 105 | 106 | ### How do I reset a node? 107 | 108 | There is a command combo for this: 109 | 110 | ```console 111 | k0s stop 112 | k0s reset --debug --verbose 113 | ``` 114 | 115 | The previous should work, but the problem is sometimes it get stuck, so the first thing is to look for the PID of the 116 | `k0s` process and kill it. After it, you need to remove some directories executing the following commands: 117 | 118 | ```console 119 | sudo rm -rf /etc/k0s 120 | sudo rm -rf /var/lib/k0s 121 | ``` 122 | 123 | After it, you can re-apply the config and the node will automatically be configured -------------------------------------------------------------------------------- /docs/mesh/k3s/emergency-dont-ask.md: -------------------------------------------------------------------------------- 1 | # Control Plane nodes: 2 | 3 | ## Installing: 4 | 5 | ```console 6 | curl -sfL https://get.k3s.io | INSTALL_K3S_EXEC="--tls-san=kubernetes-01.internal.place --cluster-cidr=10.90.0.0/16 --service-cidr=10.96.0.0/16 --disable-helm-controller --disable=traefik --disable=local-storage --disable=servicelb" sh -s - 7 | cat /var/lib/rancher/k3s/server/node-token > K3S_CLUSTER_TOKEN 8 | 9 | kubectl taint node orangepi-01 node-role.kubernetes.io/control-plane=:NoSchedule 10 | kubectl drain orangepi-01 --delete-emptydir-data 11 | ``` 12 | 13 | ## Get Kubeconfig: 14 | ```console 15 | cat /etc/rancher/k3s/k3s.yaml 16 | ``` 17 | 18 | ## Uninstalling: 19 | 20 | ```console 21 | /usr/local/bin/k3s-uninstall.sh 22 | ``` 23 | 24 | # Agent nodes: 25 | 26 | ## Installing: 27 | 28 | ```console 29 | curl -sfL https://get.k3s.io | K3S_URL=https://compute-04.internal.place:6443 K3S_TOKEN= sh - 30 | ``` 31 | 32 | ## Uninstalling: 33 | 34 | ```console 35 | /usr/local/bin/k3s-agent-uninstall.sh 36 | ``` 37 | -------------------------------------------------------------------------------- /docs/mesh/odroid/install-os-from-network.md: -------------------------------------------------------------------------------- 1 | # Install OS into SSD on Odroid M1 2 | 3 | ## Installation 4 | 5 | ### Pre-steps 6 | 7 | * Connect the SBC to the network using an ethernet wire 8 | * Connect the SPC to the power supply 9 | 10 | ### Actually install the OS 11 | 12 | * Enter into menu 'Exit to shell' and execute the following commands: 13 | 14 | ```console 15 | udhcpc 16 | netboot_default 17 | exit 18 | ``` 19 | 20 | * A new menu will appear in Petitboot. Just select the OS wanted and hit enter on your keyboard 21 | 22 | ## FAQ: 23 | 24 | ### Bootloader does not autostart Linux and throw 'Default boot cancelled': 25 | 26 | This is not documented anywhere. Your Petitboot is recognizing your USB keyboard/mouse as a boot device. 27 | Just disconnect it and reboot the SPC. 28 | 29 | ### Where can I find the images to flash 30 | 31 | These methods are not documented at all, so use them at your own risk. This is more like a links collection 32 | 33 | * Raw images can be found here: http://ppa.linuxfactory.or.kr/images/raw/arm64/jammy/ 34 | * Netinstall images can be found here: http://ppa.linuxfactory.or.kr/installer/ODROID-M1/ 35 | * Netboot (PXE) configuration can be found here: http://ppa.linuxfactory.or.kr/installer/pxeboot/ODROID-M1/netboot-odroidm1.cfg 36 | 37 | ### Netboot installation is not detecting properly my nvme 38 | 39 | Some NVME drives are not fully compatible with the controllers included in some Ubuntu modified versions by 40 | hardkernel. Something I discovered is that Ubuntu 22.04 was not detecting the full size of one of my drives, but 41 | Ubuntu 20.04 was. So install the old one and upgrade it 42 | 43 | ```console 44 | sudo su 45 | 46 | apt update 47 | apt upgrade 48 | do-release-upgrade 49 | ``` 50 | 51 | ### How to update the Kernel to newer versions 52 | 53 | Once you have your system up-to-date, it's interesting to upgrade the kernel to newer versions to have some features 54 | related to sensors, etc. 55 | 56 | ```console 57 | sudo su 58 | 59 | apt search odroid-arm64 60 | apt install 61 | flash-kernel 62 | update-initramfs -u 63 | ``` -------------------------------------------------------------------------------- /docs/mesh/opnsense/bgp-for-metallb.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/achetronic/homelab-ops/188b980a9ed69508cfe7d695a90dcbd03d9e2db7/docs/mesh/opnsense/bgp-for-metallb.pdf -------------------------------------------------------------------------------- /docs/mesh/opnsense/install-wireguard.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/achetronic/homelab-ops/188b980a9ed69508cfe7d695a90dcbd03d9e2db7/docs/mesh/opnsense/install-wireguard.pdf -------------------------------------------------------------------------------- /docs/mesh/opnsense/ip-fixed-ranges.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | To maintain infrastructure, some order is needed. The following sections describe 4 | IP ranges used (and their corresponding CIDRs) and for what. 5 | 6 | 7 | # Compute nodes 8 | 9 | These nodes are described as those nodes used to deploy applications. 10 | They can be bare metal or VMs, and can work as standalone or as part of a cluster. 11 | 12 | > Obviously, more computing nodes are expected than those used for storage, so the range is wider 13 | 14 | ## Range 15 | 192.168.2.10 - 192.168.2.29 16 | 17 | ## CIDR 18 | 192.168.2.10/31 19 | 192.168.2.12/30 20 | 192.168.2.16/29 21 | 192.168.2.24/30 22 | 192.168.2.28/31 23 | 24 | 25 | # Storage nodes 26 | 27 | These nodes are described as those nodes used to store data. These nodes can be VMs or bare metal too, 28 | as they can be running something like SeaweedFS, Ceph or things like TrueNas. As it is for domestic usage, 29 | TrueNAS is preferred to keep data maintenance lower. 30 | 31 | ## Range 32 | 192.168.30 - 192.168.2.40 33 | 34 | ## CIDR 35 | 192.168.2.30/31 36 | 192.168.2.32/29 37 | 192.168.2.40/32 38 | 39 | 40 | # Kubernetes LoadBalancers 41 | 42 | These IPs will be used to automatically provision Kubernetes LoadBalancers from inside the clusters. 43 | They are used mainly to expose ingress controllers 44 | 45 | ## Range 46 | 192.168.2.60 - 192.168.2.80 47 | 48 | ## CIDR 49 | 192.168.2.60/30 50 | 192.168.2.64/28 51 | 192.168.2.80/32 52 | 53 | # Ref: https://www.ipaddressguide.com/cidr -------------------------------------------------------------------------------- /docs/mesh/orange-pi/install-ubuntu-server-on-ssd-crosstalk-solutions.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/achetronic/homelab-ops/188b980a9ed69508cfe7d695a90dcbd03d9e2db7/docs/mesh/orange-pi/install-ubuntu-server-on-ssd-crosstalk-solutions.pdf -------------------------------------------------------------------------------- /docs/mesh/orange-pi/install-ubuntu-server-on-ssd.md: -------------------------------------------------------------------------------- 1 | # Install Ubuntu Server into SSD on OrangePi 5 2 | 3 | ## Installation 4 | 5 | ### Pre-steps 6 | 7 | The first thing before going forward is to have everything up-to-date (even firmware), so open the config CLI 8 | executing the command `orangepi-config` 9 | 10 | Then go to `System > Firmware` and hit ``. This will execute a bunch of commands, and after all the process, 11 | your system will be up-to-date 12 | 13 | > This is similar to executing `apt update && apt upgrade` but for some reason, failing with the second 14 | > commands in the first time you did it just after starting your new OS 15 | 16 | Now it's time to install some tools that will be used later, executing the following command: 17 | 18 | `apt install gdisk fdisk` 19 | 20 | > I know, I know, parted is already included in the distro, but I prefer not to play with bombs and gdisk is easier 21 | 22 | ### Delete all the partitions on SPI flash memory 23 | 24 | SPI flash memory is that place storing the bootloader, so what we want to do is deleting all the partitions there, 25 | and then rebuild them (the last, later). 26 | 27 | Get the path to the dev related to SPI executing `fdisk -l`. The device called something like `mtdblock0` and 28 | with 16MB size is the right one. Probably you will get something like `/dev/mtdblock0` 29 | 30 | After getting the device, delete all the partitions inside, entering into gdisk: 31 | 32 | ```console 33 | sudo gdisk /dev/mtdblock0 34 | ``` 35 | 36 | After entering inside, write `p` and hit key. You will get a list of the partitions present into that device. 37 | To delete them, write `d` + , then write the number of the partition and hit again. You will get the 38 | following output 39 | 40 | ```console 41 | Number Start (sector) End (sector) Size Code Name 42 | 1 64 7167 3.5 MiB 8300 idbloader 43 | 2 7168 7679 256.0 KiB 8300 vnvm 44 | 3 7680 8063 192.0 KiB 8300 reserved_space 45 | 4 8064 8127 32.0 KiB 8300 reserved1 46 | 5 8128 8191 32.0 KiB 8300 uboot_env 47 | 6 8192 16383 4.0 MiB 8300 reserved2 48 | 7 16384 32734 8.0 MiB 8300 uboot 49 | 50 | Command (? for help): d 51 | Partition number (1-7): 1 52 | 53 | Command (? for help): d 54 | Partition number (2-7): 2 55 | 56 | Command (? for help): d 57 | Partition number (3-7): 3 58 | 59 | Command (? for help): d 60 | Partition number (4-7): 4 61 | 62 | Command (? for help): d 63 | Partition number (5-7): 5 64 | 65 | Command (? for help): d 66 | Partition number (6-7): 6 67 | 68 | Command (? for help): d 69 | Using 7 70 | ``` 71 | 72 | > Don't forget about writing the changes into disk: ` + ` 73 | 74 | ### Delete all the partitions on the SSD drive 75 | 76 | Repeat the previous steps, but for the device `/dev/nvme0n1` 77 | 78 | ### Reinstall the bootloader into SPI flash memory 79 | 80 | Enter into config CLI with the command `orangepi-config`, 81 | and go to `System > Install (bootloader) > Install/Update the bootloader on SPI flash`. 82 | 83 | Hit `` and wait. After that process finish, your bootloader is ready to rock 84 | 85 | ### Copy the OS ISO into OrangePi 86 | 87 | After flashing the bootloader, it's needed to transfer the ISO image into the OrangePi. No matter which method is used 88 | (USB stick, SCP...) but personally I prefer to use SCP because the fastest one in terms of ease: 89 | 90 | ```console 91 | scp ./Orangepi5_1.1.6_ubuntu_jammy_server_linux5.10.110.img root@192.168.2.31:/root/ 92 | ``` 93 | 94 | ### Flash the OS into the SSD 95 | 96 | ```console 97 | cd /root 98 | 99 | sudo dd bs=1M if=Orangepi5_1.1.6_ubuntu_jammy_server_linux5.10.110.img of=/dev/nvme0n1 status=progress 100 | ``` 101 | 102 | ## FAQ: 103 | 104 | ### Where to find all the instructions: 105 | 106 | Some fixes and steps are documented on my own, but some of them were extracted from the 107 | [following video](https://www.youtube.com/watch?v=cBqV4QWj0lE) 108 | 109 | ### MAC address keep changing every reboot: 110 | 111 | A quick fix is fixing the MAC address on the connection settings 112 | 113 | ```console 114 | sudo mv /etc/network/interfaces /etc/network/interfaces.bak 115 | 116 | nano /etc/network/interfaces 117 | ``` 118 | 119 | ```console 120 | auto eth0 121 | iface eth0 inet dhcp 122 | hwaddress ether AA:BB:CC:DD:EE:FF 123 | ``` 124 | 125 | > Kudos to this random guy 126 | > [for the fix](https://www.reddit.com/r/OrangePI/comments/14sleyi/comment/jqza65e/?utm_source=share&utm_medium=web2x&context=3) 127 | 128 | ### My router is not assigning the static IP to the MAC 129 | 130 | This is because your DHCP session is not expired. To force your DHCP server to start another session for your 131 | client, just drop the current session and ask for a new one 132 | 133 | ```console 134 | # Drop the current lease 135 | dhclient -r eth0 136 | 137 | # Ask for some new DHCP lease 138 | dhclient eth0 139 | ``` 140 | 141 | ### I lost the ISO image. Where do I download it? 142 | 143 | Just go to [downloads section](http://www.orangepi.org/html/hardWare/computerAndMicrocontrollers/service-and-support/Orange-pi-5.html) 144 | -------------------------------------------------------------------------------- /docs/mesh/talos/Add-additional-cluster-certs.md: -------------------------------------------------------------------------------- 1 | # Add additional certificate SANs to Kubernetes with Talos Linux 2 | 3 | 4 | ## Short introduction 5 | 6 | This guide will explain how to add additional cluster certificates. This is useful if you want to access the Kubernetes API outside of the network where it is installed, for example over a DNS record. 7 | 8 | {{< admonition tip >}} 9 | You can do this before installing Talos, or if you have already installed it, do it with `talosctl edit machineconfig`. 10 | {{< /admonition >}} 11 | 12 | ## Configuration 13 | 14 | This configuration has to be done only on the **control plane** node, as we are not configuring the API server on worker nodes. Here is the configuration (click to expand): 15 | 16 | ```yaml 17 | cluster: 18 | apiServer: 19 | # Extra certificate subject alternative names for the API server's certificate. 20 | certSANs: 21 | - 192.168.0.241 22 | - example.com # This is the additional certificate. 23 | ``` 24 | 25 | -------------------------------------------------------------------------------- /docs/mesh/talos/Add-additonal-disks-to-nodes.md: -------------------------------------------------------------------------------- 1 | # Add additional disks to Kubernetes nodes with Talos Linux 2 | 3 | 4 | ## Short introduction 5 | 6 | This guide will show you how to add additional disks for storage on Talos Linux. This is useful when you want to expand the storage capabilities of your Kubernetes cluster, no matter which storage provisioner you're using. 7 | 8 | {{< admonition tip >}} 9 | You can do this before installing Talos, or if you have already installed it, do it with `talosctl edit machineconfig`. 10 | {{< /admonition >}} 11 | 12 | ## Configuration 13 | 14 | This configuration has to be done **only on the node where you are mounting the disk**. 15 | 16 | The configuration will focus mostly on setting this up on a Raspberry Pi. If you are not doing this on a Raspberry Pi, you can just mount the disk and proceed with the configuration. 17 | 18 | However, if you are doing it on a Raspberry Pi, you need to prepare the disk first: 19 | 20 | * Connect the disk via USB to your machine. 21 | 22 | * Check the name of the disk with: 23 | 24 | ```bash 25 | lsblk 26 | ``` 27 | 28 | * Now, you need to wipe the disk completely, including the partition table information on it: 29 | {{< admonition warning >}} 30 | Make sure you do a backup first if the disk is not empty. This will wipe it clean. 31 | {{< /admonition >}} 32 | 33 | ```bash 34 | sudo wipefs -a /dev/sdX 35 | ``` 36 | 37 | * Now, you need to create a partition of type `XFS` before proceeding. We will use `gparted` for this: 38 | 39 | 1. Install `gparted` 40 | 41 | ```bash 42 | # Debian based systems 43 | sudo apt install gparted 44 | 45 | # Arch based systems 46 | sudo pacman -S gparted 47 | ``` 48 | 49 | 2. Open `gparted`, select the disk from the dropdown in the top right. 50 | 51 | 3. In the menu, select `Device` -> `Create Partition Table` -> `Type GPT`. 52 | 53 | 4. Then, select `Partition`. 54 | 55 | 5. In the menu, leave everything as default, except `File system` -> `XFS`. 56 | 57 | 6. Click apply and wait for the disk to get partitioned, then you can eject it from the machine. 58 | 59 | * Plug the disk on the node you wish to configure, and proceed. 60 | 61 | {{< admonition warning >}} 62 | Take care where you are mounting the disk. Talos Linux expects it to be mounted in a directory under `/var/mnt`. If you mount it somewhere else, the configuration will not be successful. 63 | {{< /admonition >}} 64 | 65 | Here is the configuration (click to expand): 66 | 67 | ```yaml 68 | machine: 69 | kubelet: 70 | extraMounts: 71 | - destination: /var/mnt/storage 72 | type: bind 73 | source: /var/mnt/storage 74 | options: 75 | - bind 76 | - rshared 77 | - rw 78 | 79 | disks: 80 | - device: /dev/sdX # The name of the disk to use. 81 | partitions: 82 | - mountpoint: /var/mnt/storage # Where to mount the partition. 83 | ``` 84 | 85 | # Ref: https://kubito.dev/page/3/ 86 | -------------------------------------------------------------------------------- /docs/mesh/talos/how-to-upgrade.md: -------------------------------------------------------------------------------- 1 | # Upgrading process 2 | 3 | ## Short introduction 4 | 5 | Upgrading process is not well-documented on their website. I think they forgot to explain things for non talos developers. 6 | Basically, I took the adventage of Talos being super hard to break (Yay for that!) to do some experiments and document 7 | this for myself in the future 8 | 9 | ## Process 10 | 11 | 1. Be completely sure about following points: 12 | 13 | * Your `talosconfig.yaml` file includes, as endpoints, the domains or IPs pointing to your master nodes. 14 | Their Terraform provider tends to include only the machine hostnames. Get `talosconfig` from terraform outputs, 15 | and change the endpoints to add mentioned domains. 16 | 17 | Spoiler: this is not valid because you are out of the machine, obviously. Remember this point as `talosctl` 18 | endpoints are read from the file. 19 | 20 | * Your Kubernetes cluster certificate must include `localhost` and `127.0.0.1` in its SANS. `talosctl` seems to perform 21 | some calls to the API, and it is THAT api which triggers all the process. Are those Talos machines-related certificates 22 | the same as used by Kubernetes? I don't know cause Talos is hermetic, but when they are not included, the process fails. 23 | 24 | * You MUST have odd number of etcd members to proceed: at least 3. This seems to be obvious but there is not a reminder 25 | at ANY part of the process. I started upgrading the workers, everything fine until started with the control-plane nodes: 26 | got an error from Talos: process can potentially break etcd quorum (and stopped) 27 | 28 | 2. Upgrade `talosctl` to the version you want. If possible upgrade minor by minor. By experience, you can jump all the 29 | patch versions at once. 30 | 31 | 3. Right AFTER previous points, you can trigger the following command: 32 | 33 | ```console 34 | talosctl --talosconfig talosconfig.yaml -n compute-13.internal.place upgrade --image ghcr.io/siderolabs/installer:v1.6.1 35 | ``` 36 | 37 | > ONE NODE AT A TIME, if not, you can break the cluster 38 | > [Docs here](https://www.talos.dev/v1.8/talos-guides/upgrading-talos/) 39 | 40 | 4. Remember upgrading Talos version is not upgrading your Kuberneter cluster. To do that, just 41 | [follow this guide](https://www.talos.dev/v1.8/kubernetes-guides/upgrading-kubernetes/) 42 | 43 | It's more or less just `talosctl --talosconfig talosconfig.yaml -n compute-21.internal.place upgrade-k8s --to 1.33` -------------------------------------------------------------------------------- /infrastructure/terraform/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/achetronic/homelab-ops/188b980a9ed69508cfe7d695a90dcbd03d9e2db7/infrastructure/terraform/.gitkeep -------------------------------------------------------------------------------- /infrastructure/terraform/modules/talos-cluster/main.tf: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/siderolabs/contrib/blob/main/examples/terraform/basic/main.tf 2 | 3 | # Generate initial secrets to be used in later configuration 4 | # Ref: https://registry.terraform.io/providers/siderolabs/talos/latest/docs/resources/machine_secrets 5 | resource "talos_machine_secrets" "this" { 6 | talos_version = var.globals.talos.version 7 | } 8 | 9 | # Generate a machine configuration for a node type 10 | # Equivalent to generate config YAMLs using: talosctl gen config cluster-name https://cluster-endpoint:6443 11 | # Ref: https://registry.terraform.io/providers/siderolabs/talos/latest/docs/data-sources/machine_configuration 12 | data "talos_machine_configuration" "controlplane" { 13 | cluster_name = var.globals.config.cluster_name 14 | cluster_endpoint = var.globals.config.controlplane_endpoint 15 | machine_type = "controlplane" 16 | machine_secrets = talos_machine_secrets.this.machine_secrets 17 | 18 | docs = false 19 | examples = false 20 | 21 | talos_version = var.globals.talos.version 22 | #config_patches = [] 23 | } 24 | 25 | # Generate 'talosconfig' configuration for talosctl to perform requests 26 | data "talos_client_configuration" "this" { 27 | cluster_name = var.globals.config.cluster_name 28 | client_configuration = talos_machine_secrets.this.client_configuration 29 | endpoints = [for k, v in var.node_data.controlplanes : k] 30 | } 31 | 32 | # Generate a machine configuration for a node type 33 | # Equivalent to generate config YAMLs using: 34 | # talosctl gen config cluster-name https://cluster-endpoint:6443 35 | # Ref: https://registry.terraform.io/providers/siderolabs/talos/latest/docs/data-sources/machine_configuration 36 | data "talos_machine_configuration" "worker" { 37 | cluster_name = var.globals.config.cluster_name 38 | cluster_endpoint = var.globals.config.controlplane_endpoint 39 | machine_type = "worker" 40 | machine_secrets = talos_machine_secrets.this.machine_secrets 41 | 42 | docs = false 43 | examples = false 44 | talos_version = var.globals.talos.version 45 | #config_patches = [] 46 | } 47 | 48 | # Apply configuration YAML to the controlplane machines 49 | # Equivalent to apply config by executing: 50 | # talosctl apply-config -n {node-ip} -e {endpoint-ip} --talosconfig={path} --file {path} 51 | resource "talos_machine_configuration_apply" "controlplane" { 52 | for_each = var.node_data.controlplanes 53 | 54 | client_configuration = talos_machine_secrets.this.client_configuration 55 | machine_configuration_input = data.talos_machine_configuration.controlplane.machine_configuration 56 | node = each.value.node_address 57 | 58 | config_patches = [templatefile(each.value.config_template_path, each.value.config_template_vars)] 59 | } 60 | 61 | # Apply configuration YAML to the worker machines 62 | # Equivalent to apply config by executing: 63 | # talosctl apply-config -n {node-ip} -e {endpoint-ip} --talosconfig={path} --file {path} 64 | resource "talos_machine_configuration_apply" "worker" { 65 | for_each = var.node_data.workers 66 | 67 | client_configuration = talos_machine_secrets.this.client_configuration 68 | machine_configuration_input = data.talos_machine_configuration.worker.machine_configuration 69 | node = each.value.node_address 70 | 71 | config_patches = [templatefile(each.value.config_template_path, each.value.config_template_vars)] 72 | } 73 | 74 | # Launch bootstrap process on controlplane machines: 75 | # All the nodes start kubelet; one controlplane node start etcd, renders 76 | # static pods manifests for Kubernetes controlplane components and injects them 77 | # into API server 78 | # Ref: https://www.talos.dev/v1.6/learn-more/control-plane/#cluster-bootstrapping 79 | resource "talos_machine_bootstrap" "this" { 80 | depends_on = [talos_machine_configuration_apply.controlplane] 81 | 82 | client_configuration = talos_machine_secrets.this.client_configuration 83 | node = [for k, v in var.node_data.controlplanes : v.node_address][0] 84 | } 85 | 86 | # Get Kubeconfig from one controlplane node 87 | resource "talos_cluster_kubeconfig" "kubeconfig" { 88 | depends_on = [talos_machine_bootstrap.this] 89 | 90 | client_configuration = talos_machine_secrets.this.client_configuration 91 | node = [for k, v in var.node_data.controlplanes : v.node_address][0] 92 | } 93 | 94 | # 95 | output "talosconfig" { 96 | value = data.talos_client_configuration.this.talos_config 97 | sensitive = true 98 | } 99 | 100 | output "kubeconfig" { 101 | value = talos_cluster_kubeconfig.kubeconfig.kubeconfig_raw 102 | sensitive = true 103 | } 104 | -------------------------------------------------------------------------------- /infrastructure/terraform/modules/talos-cluster/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | 3 | required_providers { 4 | talos = { 5 | source = "siderolabs/talos" 6 | version = "0.8.1" 7 | } 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /infrastructure/terraform/modules/talos-cluster/variables.tf: -------------------------------------------------------------------------------- 1 | # The globals block is the place to define global stuff 2 | # mostly related to parameters for Talos to generate 3 | # initial configuration YAMLs and secrets 4 | variable "globals" { 5 | description = "Global configuration definition block" 6 | 7 | type = object({ 8 | 9 | # Talos version that generated initial secrets and config 10 | talos = object({ 11 | version = string 12 | }) 13 | 14 | # Configuration parameters used to generate initial config 15 | config = object({ 16 | cluster_name = string 17 | controlplane_endpoint = string 18 | }) 19 | }) 20 | } 21 | 22 | # TODO 23 | variable "node_data" { 24 | description = "A map of node data" 25 | 26 | type = object({ 27 | 28 | controlplanes = map(object({ 29 | # IP or FQDN that is reachable through current network 30 | node_address = string 31 | 32 | # YAML terraform template and vars to substitute to patch 33 | # auto-generated talos cluster+machine config 34 | config_template_path = string 35 | config_template_vars = optional(any) 36 | })) 37 | 38 | workers = map(object({ 39 | # IP or FQDN that is reachable through current network 40 | node_address = string 41 | 42 | # YAML terraform template and vars to substitute to patch 43 | # auto-generated talos cluster+machine config 44 | config_template_path = string 45 | config_template_vars = optional(any) 46 | })) 47 | }) 48 | } 49 | 50 | 51 | 52 | -------------------------------------------------------------------------------- /infrastructure/terraform/modules/talos-vms/instances.tf: -------------------------------------------------------------------------------- 1 | # Create all instances 2 | resource "libvirt_domain" "instance" { 3 | for_each = var.instances 4 | 5 | cpu { 6 | mode = "host-passthrough" 7 | } 8 | 9 | # Merge between global XSLT and user-defined XSLT 10 | # As Terraform can not perform XML parsing to merge XML, 11 | # some tags are deleted to avoid conflicts: xml, xsl:stylesheet, xsl:transform 12 | xml { 13 | xslt = templatefile("${path.module}/templates/xsl/global.xsl", 14 | { 15 | user_xslt = each.value.xslt != null ? each.value.xslt : "" 16 | } 17 | ) 18 | } 19 | 20 | # Set config related directly to the VM 21 | name = each.key 22 | memory = each.value.memory 23 | vcpu = each.value.vcpu 24 | 25 | # Use UEFI capable machine 26 | machine = "q35" 27 | firmware = "/usr/share/OVMF/OVMF_CODE_4M.fd" 28 | 29 | 30 | # Setting CDROM after HDD gives the opportunity to install on first boot, 31 | # and boot from HDD in the following ones 32 | boot_device { 33 | dev = ["hd", "cdrom"] 34 | } 35 | 36 | # Attach MACVTAP networks 37 | dynamic "network_interface" { 38 | for_each = each.value.networks 39 | 40 | iterator = network 41 | content { 42 | macvtap = network.value.interface 43 | hostname = each.key 44 | mac = network.value.mac 45 | addresses = network.value.addresses 46 | wait_for_lease = false 47 | # Guest virtualized network interface is connected directly to a physical device on the Host, 48 | # As a result, requested IP address can only be claimed by the OS: Linux is configured in static mode by cloud-init 49 | } 50 | } 51 | 52 | # Read-Only disk. Used to load ISO images on first boot 53 | disk { 54 | file = "${local.volume_pool_base_path}/${each.value.image}.iso" 55 | } 56 | 57 | # Writable disk used as a data storage 58 | disk { 59 | volume_id = libvirt_volume.instance_disk[each.key].id 60 | scsi = true 61 | } 62 | 63 | # IMPORTANT: this is a known bug on cloud images, since they expect a console 64 | # we need to pass it 65 | # https://bugs.launchpad.net/cloud-images/+bug/1573095 66 | console { 67 | type = "pty" 68 | target_port = "0" 69 | target_type = "serial" 70 | } 71 | 72 | console { 73 | type = "pty" 74 | target_port = "1" 75 | target_type = "virtio" 76 | } 77 | 78 | video { 79 | type = "vga" 80 | } 81 | 82 | graphics { 83 | type = "vnc" 84 | listen_type = "address" 85 | autoport = true 86 | } 87 | 88 | qemu_agent = false 89 | autostart = true 90 | 91 | lifecycle { 92 | ignore_changes = [ 93 | nvram, 94 | disk[0], 95 | network_interface[0], 96 | ] 97 | } 98 | 99 | } 100 | -------------------------------------------------------------------------------- /infrastructure/terraform/modules/talos-vms/storage.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | volume_pool_base_path = "/opt/libvirt/vms-volume-pool" 3 | } 4 | 5 | # Create a dir where all the volumes will be created 6 | resource "libvirt_pool" "volume_pool" { 7 | name = "vms-volume-pool" 8 | type = "dir" 9 | target { 10 | path = "${local.volume_pool_base_path}" 11 | } 12 | } 13 | 14 | # Ref: https://factory.talos.dev 15 | resource "libvirt_volume" "os_image" { 16 | for_each = var.globals.iso_image_urls 17 | 18 | source = each.value 19 | 20 | name = join("", [each.key, ".iso"]) 21 | pool = libvirt_pool.volume_pool.name 22 | format = "iso" 23 | } 24 | 25 | # General purpose volumes for all the instances 26 | resource "libvirt_volume" "instance_disk" { 27 | for_each = var.instances 28 | 29 | name = join("", [each.key, ".qcow2"]) 30 | pool = libvirt_pool.volume_pool.name 31 | format = "qcow2" 32 | 33 | # 10GB (as bytes) as default 34 | size = try(each.value.disk, 10 * 1000 * 1000 * 1000) 35 | 36 | } 37 | -------------------------------------------------------------------------------- /infrastructure/terraform/modules/talos-vms/templates/xsl/global.xsl: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | ${~ 19 | 20 | # 1. Delete comments 21 | # 2. Trim unneeded spaces or new lines 22 | # 3. Delete potentially conflicting XSLT tags 23 | 24 | replace(replace(replace(user_xslt, 25 | "//", ""), 26 | "/\\s+\\n/", ""), 27 | "/<(\\?)?(/)?(?:xml|xsl:stylesheet|xsl:transform)[^>]*>/", "") 28 | ~} 29 | 30 | -------------------------------------------------------------------------------- /infrastructure/terraform/modules/talos-vms/terraform.tf: -------------------------------------------------------------------------------- 1 | # WARNING: Unable the SELinux for qemu 2 | # Ref: https://github.com/dmacvicar/terraform-provider-libvirt/issues/546 3 | terraform { 4 | required_providers { 5 | libvirt = { 6 | source = "dmacvicar/libvirt" 7 | } 8 | 9 | tls = { 10 | source = "hashicorp/tls" 11 | } 12 | } 13 | } 14 | 15 | # Configure the Libvirt provider 16 | provider "libvirt" { 17 | # Use password when mode is set to 'password'. Use SSH key in other cases 18 | uri = var.globals.ssh_connection.mode == "password" ? ( 19 | "qemu+ssh://${var.globals.ssh_connection.username}:${var.globals.ssh_connection.password}@${var.globals.ssh_connection.host}/system?sshauth=ssh-password&no_verify=1" 20 | ) : ( 21 | "qemu+ssh://${var.globals.ssh_connection.username}@${var.globals.ssh_connection.host}/system?keyfile=${var.globals.ssh_connection.key_path}&sshauth=privkey,agent&no_verify=1" 22 | ) 23 | } 24 | -------------------------------------------------------------------------------- /infrastructure/terraform/modules/talos-vms/variables.tf: -------------------------------------------------------------------------------- 1 | # The globals block is the place to define global stuff 2 | # mostly related to things that are common to all the VMs, 3 | # such as the credentials to connect to the hypervisor 4 | variable "globals" { 5 | type = object({ 6 | 7 | # SSH parameters to connect to the host 8 | ssh_connection = object({ 9 | # IP address to connect to the host 10 | host = string 11 | 12 | # Username to be authenticated in the host 13 | username = string 14 | 15 | # Password to be authenticated in the host 16 | password = optional(string) 17 | 18 | # Path to the ssh key (public or private) to be authenticated in the host 19 | # This key should already exists on the host machine 20 | key_path = optional(string) 21 | 22 | # Which auth method use on SSH connection: password, key 23 | mode = string 24 | }) 25 | 26 | # Map of names that reference ISO images URIs. 27 | # These images will be downloaded instead of using them from remote 28 | # Expected format: map[string]string 29 | # Example: {"desired_name" = "https://url/to/image.iso"} 30 | iso_image_urls = map(string) 31 | }) 32 | 33 | description = "Global configuration definition block" 34 | 35 | validation { 36 | condition = contains(["password", "key"], var.globals.ssh_connection.mode) 37 | error_message = "Allowed values for ssh_connection.mode are \"password\" or \"key\"." 38 | } 39 | 40 | validation { 41 | condition = alltrue([for url in values(var.globals.iso_image_urls) : endswith(url, ".iso")]) 42 | error_message = "All ISO image URLs must end with '.iso'." 43 | } 44 | } 45 | 46 | # The instances block is the place to define all the VMs 47 | # (and their resources) that will be created 48 | variable "instances" { 49 | type = map(object({ 50 | 51 | image = string 52 | 53 | # TODO 54 | vcpu = number 55 | memory = number 56 | disk = number 57 | networks = list(object({ 58 | interface = string 59 | addresses = list(string) 60 | mac = string 61 | })) 62 | 63 | # XLST template to modify params of VM that are not covered by the module 64 | # Some XSLT tags will be deleted as Terraform cannot merge: xml, xsl:stylesheet, xsl:transform 65 | xslt = optional(string) 66 | 67 | })) 68 | description = "Instances definition block" 69 | 70 | validation { 71 | condition = alltrue(flatten([ 72 | for instance_name, instance_definition in var.instances : 73 | [for network in instance_definition.networks : 74 | can(regex("^[a-fA-F0-9]{2}(:[a-fA-F0-9]{2}){5}$", network.mac)) ] 75 | ])) 76 | 77 | error_message = "Allowed values for instance.networks.mac are like: AA:BB:CC:DD:EE:FF." 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /infrastructure/terraform/opnsense/firewall.tf: -------------------------------------------------------------------------------- 1 | ######################################## 2 | ### TODO ALIAS 3 | ### Permissions: Firewall: Alias: Edit 4 | ######################################## 5 | 6 | #resource "opnsense_firewall_alias" "kubernetes_lb_wireguard" { 7 | # name = "kubernetes_lb_wireguard" 8 | # 9 | # type = "host" 10 | # content = [ 11 | # "192.168.2.63" 12 | # ] 13 | # 14 | # stats = true 15 | # description = "Custom name for Kubernetes' LoadBalancer exposing Wireguard" 16 | #} 17 | 18 | #resource "opnsense_firewall_alias" "kubernetes_lb_wireguard_test" { 19 | # name = "test_kubernetes_lb_wireguard" 20 | # 21 | # type = "host" 22 | # content = [ 23 | # "192.168.2.63" 24 | # ] 25 | # 26 | # categories = [] 27 | # 28 | # stats = false 29 | # description = "[TEST] Custom name for Kubernetes' LoadBalancer exposing Wireguard" 30 | #} 31 | 32 | 33 | ######################################## 34 | ### TODO NAT XXXXXX 35 | ### Permissions: Firewall: SourceNat: API 36 | ######################################## 37 | #resource "opnsense_firewall_nat" "wireguard_forwarding_rule" { 38 | # interface = "wan" 39 | # protocol = "TCP" 40 | # 41 | # source = { 42 | # net = "any" 43 | # invert = false 44 | # } 45 | # 46 | # destination = { 47 | # net = "wanip" 48 | # port = 31820 49 | # } 50 | # 51 | # target = { 52 | # ip = opnsense_firewall_alias.kubernetes_lb_wireguard_test.name 53 | # port = 31820 54 | # } 55 | # 56 | # description = "TEST . Forward HTTP traffic" 57 | #} 58 | 59 | ## TODO 60 | #resource "opnsense_firewall_nat" "http_forwarding_rule" { 61 | # interface = "wan" 62 | # protocol = "TCP" 63 | # 64 | # source = { 65 | # net = "any" 66 | # invert = false 67 | # } 68 | # 69 | # destination = { 70 | # net = "wanip" 71 | # port = 80 72 | # } 73 | # 74 | # target = { 75 | # ip = "heimdal" 76 | # port = 80 77 | # } 78 | # 79 | # description = "Forward HTTP traffic to Nginx Gateway" 80 | #} 81 | 82 | ## TODO 83 | #resource "opnsense_firewall_nat" "https_forwarding_rule" { 84 | # interface = "wan" 85 | # protocol = "TCP" 86 | # 87 | # source = { 88 | # net = "any" 89 | # invert = false 90 | # } 91 | # 92 | # destination = { 93 | # net = "wanip" 94 | # port = 443 95 | # } 96 | # 97 | # target = { 98 | # ip = "heimdal" 99 | # port = 443 100 | # } 101 | # 102 | # description = "Forward HTTPS traffic to Nginx Gateway" 103 | #} 104 | 105 | # TODO 106 | #resource "opnsense_firewall_nat" "wireguard_forwarding_rule" { 107 | # interface = "wan" 108 | # protocol = "UDP" 109 | # 110 | # source = { 111 | # net = "any" 112 | # invert = false 113 | # } 114 | # 115 | # destination = { 116 | # net = "wanip" 117 | # port = 31830 118 | # } 119 | # 120 | # target = { 121 | # ip = "kubernetes_lb_wireguard" 122 | # port = 31830 123 | # } 124 | # 125 | # #description = "Forward UDP traffic on port '31825' to Wireguard" 126 | # description = "Example" 127 | #} 128 | -------------------------------------------------------------------------------- /infrastructure/terraform/opnsense/terraform.tf: -------------------------------------------------------------------------------- 1 | # Ref: https://registry.terraform.io/providers/browningluke/opnsense/latest/docs 2 | terraform { 3 | # Storing the .tfstate locally. 4 | # This is suitable only for testing purposes. Store it on S3 compatible backend for reliability 5 | #backend "local" {} 6 | backend "http" {} 7 | 8 | required_providers { 9 | gitlab = { 10 | source = "gitlabhq/gitlab" 11 | version = "~> 16.3.0" 12 | } 13 | 14 | opnsense = { 15 | version = "~> 0.9.0" 16 | source = "browningluke/opnsense" 17 | } 18 | } 19 | } 20 | 21 | # Ref: https://registry.terraform.io/providers/gitlabhq/gitlab/latest/docs 22 | provider "gitlab" { 23 | token = var.GITLAB_ACCESS_TOKEN 24 | } 25 | 26 | data "gitlab_project_variable" "opnsense_user_api_key_automation" { 27 | project = var.GITLAB_VARIABLES_PROJECT_ID 28 | environment_scope = var.GITLAB_VARIABLES_ENVIRONMENT 29 | 30 | key = "OPNSENSE_USER_API_KEY_AUTOMATION" 31 | } 32 | 33 | data "gitlab_project_variable" "opnsense_user_api_secret_automation" { 34 | project = var.GITLAB_VARIABLES_PROJECT_ID 35 | environment_scope = var.GITLAB_VARIABLES_ENVIRONMENT 36 | 37 | key = "OPNSENSE_USER_API_SECRET_AUTOMATION" 38 | } 39 | 40 | # Ref: https://registry.terraform.io/providers/browningluke/opnsense/latest/docs 41 | provider "opnsense" { 42 | uri = "https://192.168.2.1" 43 | allow_insecure = true 44 | 45 | api_key = data.gitlab_project_variable.opnsense_user_api_key_automation.value 46 | api_secret = data.gitlab_project_variable.opnsense_user_api_secret_automation.value 47 | } 48 | -------------------------------------------------------------------------------- /infrastructure/terraform/opnsense/terragrunt.hcl: -------------------------------------------------------------------------------- 1 | include "root" { 2 | path = find_in_parent_folders() 3 | } 4 | 5 | inputs = { 6 | GITLAB_ACCESS_TOKEN = get_env("GITLAB_ACCESS_TOKEN") 7 | } 8 | -------------------------------------------------------------------------------- /infrastructure/terraform/opnsense/unbound.tf: -------------------------------------------------------------------------------- 1 | # REMEMBER: execute 'resolvectl flush-caches' after modifying CNAME registries to flush DNS cache 2 | 3 | ####################################### 4 | ## Firewall machines DNS registries 5 | ####################################### 6 | resource "opnsense_unbound_host_override" "router_01" { 7 | enabled = true 8 | description = "Router @ OPNsense" 9 | 10 | hostname = "router-01" 11 | domain = "internal.place" 12 | server = "192.168.2.1" 13 | } 14 | 15 | ####################################### 16 | ## Compute machines DNS registries 17 | ####################################### 18 | # Resources related to Compute 10 (this is an hypervisor) 19 | resource "opnsense_unbound_host_override" "metal_compute_10" { 20 | enabled = true 21 | description = "Metal @ Compute 10" 22 | 23 | hostname = "compute-10" 24 | domain = "internal.place" 25 | server = "192.168.2.10" 26 | } 27 | 28 | resource "opnsense_unbound_host_override" "vm_01_compute_10" { 29 | enabled = true 30 | description = "VM 01 @ Compute 10" 31 | 32 | hostname = "compute-11" 33 | domain = "internal.place" 34 | server = "192.168.2.11" 35 | } 36 | 37 | resource "opnsense_unbound_host_override" "vm_02_compute_10" { 38 | enabled = true 39 | description = "VM 02 @ Compute 10" 40 | 41 | hostname = "compute-12" 42 | domain = "internal.place" 43 | server = "192.168.2.12" 44 | } 45 | 46 | resource "opnsense_unbound_host_override" "vm_03_compute_10" { 47 | enabled = true 48 | description = "VM 03 @ Compute 10" 49 | 50 | hostname = "compute-13" 51 | domain = "internal.place" 52 | server = "192.168.2.13" 53 | } 54 | 55 | # Resources related to Compute 20 (this is an hypervisor) 56 | resource "opnsense_unbound_host_override" "metal_compute_20" { 57 | enabled = true 58 | description = "Metal @ Compute 20" 59 | 60 | hostname = "compute-20" 61 | domain = "internal.place" 62 | server = "192.168.2.20" 63 | } 64 | 65 | resource "opnsense_unbound_host_override" "vm_01_compute_20" { 66 | enabled = true 67 | description = "VM 01 @ Compute 20" 68 | 69 | hostname = "compute-21" 70 | domain = "internal.place" 71 | server = "192.168.2.21" 72 | } 73 | 74 | resource "opnsense_unbound_host_override" "vm_02_compute_20" { 75 | enabled = true 76 | description = "VM 02 @ Compute 20" 77 | 78 | hostname = "compute-22" 79 | domain = "internal.place" 80 | server = "192.168.2.22" 81 | } 82 | 83 | ####################################### 84 | ## Storage machines DNS registries 85 | ####################################### 86 | resource "opnsense_unbound_host_override" "storage_01" { 87 | enabled = true 88 | description = "Storage @ TrueNAS 01" 89 | 90 | hostname = "storage-01" 91 | domain = "internal.place" 92 | server = "192.168.2.31" 93 | } 94 | 95 | ####################################### 96 | ## Applications DNS registries 97 | ####################################### 98 | 99 | # Balance the requests between Kubernetes 01 master servers 100 | resource "opnsense_unbound_host_alias" "kubernetes_01_masters_balance_11" { 101 | override = opnsense_unbound_host_override.vm_01_compute_10.id 102 | 103 | enabled = true 104 | hostname = "kubernetes-01" 105 | domain = "internal.place" 106 | } 107 | 108 | resource "opnsense_unbound_host_alias" "kubernetes_01_masters_balance_12" { 109 | override = opnsense_unbound_host_override.vm_02_compute_10.id 110 | 111 | enabled = true 112 | hostname = "kubernetes-01" 113 | domain = "internal.place" 114 | } 115 | 116 | resource "opnsense_unbound_host_alias" "kubernetes_01_masters_balance_21" { 117 | override = opnsense_unbound_host_override.vm_01_compute_20.id 118 | 119 | enabled = true 120 | hostname = "kubernetes-01" 121 | domain = "internal.place" 122 | } 123 | 124 | # TODO 125 | resource "opnsense_unbound_host_override" "kubernetes_ingress_lb" { 126 | enabled = true 127 | description = "Point all the tools to Kubernetes ingress controller's LB" 128 | 129 | hostname = "*" 130 | domain = "tools.internal.place" 131 | server = "192.168.2.60" 132 | } 133 | -------------------------------------------------------------------------------- /infrastructure/terraform/opnsense/variables.tf: -------------------------------------------------------------------------------- 1 | # Token to access Gitlab. This is used for storing TFstate and get credentials to access OPNsense 2 | variable "GITLAB_ACCESS_TOKEN" { 3 | type = string 4 | description = "Token to access Gitlab" 5 | default = "api-token-placeholder" 6 | } 7 | 8 | # TODO 9 | variable "GITLAB_VARIABLES_PROJECT_ID" { 10 | type = string 11 | description = "Project ID on Gitlab to get the variables" 12 | default = "49083217" 13 | } 14 | 15 | # TODO 16 | variable "GITLAB_VARIABLES_ENVIRONMENT" { 17 | type = string 18 | description = "Environment on Gitlab to get the variables" 19 | default = "terraform" 20 | } -------------------------------------------------------------------------------- /infrastructure/terraform/talos/kubernetes-01.tf: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/siderolabs/contrib/blob/main/examples/terraform/basic/main.tf 2 | # Ref: talosctl -n -e disks --insecure 3 | 4 | locals { 5 | 6 | #################################################### 7 | # TODO 8 | #################################################### 9 | kubernetes_01_reusable_vars = { 10 | cluster_name = "kubernetes-01" 11 | controlplane_endpoint = "https://kubernetes-01.internal.place:6443" 12 | cert_sans = [ 13 | # Authorize local hostnames as TalosCTL seems to use them to upgrade from inside 14 | "localhost", 15 | "127.0.0.1", 16 | 17 | # Authorize all the nodes' hostnames to query Kube Apiserver 18 | "compute-11.internal.place", 19 | "compute-12.internal.place", 20 | "compute-13.internal.place", 21 | "compute-21.internal.place", 22 | "compute-22.internal.place", 23 | "kubernetes-01.internal.place", 24 | ] 25 | pod_subnets = ["10.90.0.0/16"] 26 | service_subnets = ["10.96.0.0/16"] 27 | 28 | templates = { 29 | controlplane = "${path.module}/templates/controlplane.yaml" 30 | worker = "${path.module}/templates/worker.yaml" 31 | } 32 | } 33 | 34 | #################################################### 35 | # TODO 36 | #################################################### 37 | kubernetes_01_config = { 38 | 39 | # TODO 40 | globals = { 41 | talos = { 42 | version = "v1.10.2" 43 | } 44 | config = { 45 | cluster_name = local.kubernetes_01_reusable_vars.cluster_name 46 | controlplane_endpoint = local.kubernetes_01_reusable_vars.controlplane_endpoint 47 | } 48 | } 49 | 50 | # TODO 51 | nodes_data = { 52 | controlplanes = { 53 | compute-11 = { 54 | node_address = "192.168.2.11" 55 | config_template_path = local.kubernetes_01_reusable_vars.templates.controlplane 56 | config_template_vars = { 57 | hostname = "compute-11" 58 | install_disk = "/dev/sda" 59 | cluster_name = local.kubernetes_01_reusable_vars.cluster_name 60 | cert_sans = local.kubernetes_01_reusable_vars.cert_sans 61 | pod_subnets = local.kubernetes_01_reusable_vars.pod_subnets 62 | service_subnets = local.kubernetes_01_reusable_vars.service_subnets 63 | controlplane_endpoint = local.kubernetes_01_reusable_vars.controlplane_endpoint 64 | } 65 | }, 66 | compute-12 = { 67 | node_address = "192.168.2.12" 68 | config_template_path = local.kubernetes_01_reusable_vars.templates.controlplane 69 | config_template_vars = { 70 | hostname = "compute-12" 71 | install_disk = "/dev/sda" 72 | cluster_name = local.kubernetes_01_reusable_vars.cluster_name 73 | cert_sans = local.kubernetes_01_reusable_vars.cert_sans 74 | pod_subnets = local.kubernetes_01_reusable_vars.pod_subnets 75 | service_subnets = local.kubernetes_01_reusable_vars.service_subnets 76 | controlplane_endpoint = local.kubernetes_01_reusable_vars.controlplane_endpoint 77 | } 78 | }, 79 | compute-21 = { 80 | node_address = "192.168.2.21" 81 | config_template_path = local.kubernetes_01_reusable_vars.templates.controlplane 82 | config_template_vars = { 83 | hostname = "compute-21" 84 | install_disk = "/dev/sda" 85 | cluster_name = local.kubernetes_01_reusable_vars.cluster_name 86 | cert_sans = local.kubernetes_01_reusable_vars.cert_sans 87 | pod_subnets = local.kubernetes_01_reusable_vars.pod_subnets 88 | service_subnets = local.kubernetes_01_reusable_vars.service_subnets 89 | controlplane_endpoint = local.kubernetes_01_reusable_vars.controlplane_endpoint 90 | } 91 | } 92 | } 93 | workers = { 94 | compute-13 = { 95 | node_address = "192.168.2.13" 96 | config_template_path = local.kubernetes_01_reusable_vars.templates.worker 97 | config_template_vars = { 98 | hostname = "compute-13" 99 | install_disk = "/dev/sda" 100 | cert_sans = local.kubernetes_01_reusable_vars.cert_sans 101 | pod_subnets = local.kubernetes_01_reusable_vars.pod_subnets 102 | service_subnets = local.kubernetes_01_reusable_vars.service_subnets 103 | controlplane_endpoint = local.kubernetes_01_reusable_vars.controlplane_endpoint 104 | } 105 | }, 106 | compute-22 = { 107 | node_address = "192.168.2.22" 108 | config_template_path = local.kubernetes_01_reusable_vars.templates.worker 109 | config_template_vars = { 110 | hostname = "compute-22" 111 | install_disk = "/dev/sda" 112 | cert_sans = local.kubernetes_01_reusable_vars.cert_sans 113 | pod_subnets = local.kubernetes_01_reusable_vars.pod_subnets 114 | service_subnets = local.kubernetes_01_reusable_vars.service_subnets 115 | controlplane_endpoint = local.kubernetes_01_reusable_vars.controlplane_endpoint 116 | } 117 | } 118 | } 119 | } 120 | } 121 | 122 | } 123 | 124 | # Create the workload resources in the target host through SSH 125 | module "talos_kubernetes_01" { 126 | 127 | source = "../modules/talos-cluster" 128 | 129 | globals = local.kubernetes_01_config.globals 130 | node_data = local.kubernetes_01_config.nodes_data 131 | } 132 | 133 | output "kubeconfig_kubernetes_01" { 134 | sensitive = true 135 | value = module.talos_kubernetes_01.kubeconfig 136 | } 137 | 138 | output "talosconfig_kubernetes_01" { 139 | sensitive = true 140 | value = module.talos_kubernetes_01.talosconfig 141 | } 142 | -------------------------------------------------------------------------------- /infrastructure/terraform/talos/templates/controlplane.yaml: -------------------------------------------------------------------------------- 1 | # Terraform templating ref: https://developer.hashicorp.com/terraform/language/functions/templatefile#generating-json-or-yaml-from-a-template 2 | # Talos config ref: https://www.talos.dev/v1.6/reference/configuration/v1alpha1/config/ 3 | machine: 4 | certSANs: [${join(",", formatlist("\"%s\"", cert_sans))}] 5 | install: 6 | disk: ${install_disk} 7 | network: 8 | hostname: ${hostname} 9 | nodeTaints: 10 | node-role.kubernetes.io/control-plane: :NoSchedule 11 | cluster: 12 | clusterName: ${cluster_name} 13 | network: 14 | dnsDomain: cluster.local 15 | podSubnets: [${join(",", formatlist("\"%s\"", pod_subnets))}] 16 | serviceSubnets: [${join(",", formatlist("\"%s\"", service_subnets))}] 17 | controlPlane: 18 | endpoint: ${controlplane_endpoint} 19 | apiServer: 20 | certSANs: [${join(",", formatlist("\"%s\"", cert_sans))}] 21 | adminKubeconfig: 22 | certLifetime: 43800h0m0s # Magic number: 5 years (default is 1 year). 23 | allowSchedulingOnControlPlanes: true 24 | -------------------------------------------------------------------------------- /infrastructure/terraform/talos/templates/worker.yaml: -------------------------------------------------------------------------------- 1 | # Terraform templating ref: https://developer.hashicorp.com/terraform/language/functions/templatefile#generating-json-or-yaml-from-a-template 2 | # Talos config ref: https://www.talos.dev/v1.6/reference/configuration/v1alpha1/config/ 3 | machine: 4 | certSANs: [${join(",", formatlist("\"%s\"", cert_sans))}] 5 | install: 6 | disk: ${install_disk} 7 | network: 8 | hostname: ${hostname} 9 | cluster: 10 | network: 11 | dnsDomain: cluster.local 12 | podSubnets: [ ${ join(",", formatlist("\"%s\"", pod_subnets)) } ] 13 | serviceSubnets: [ ${ join(",", formatlist("\"%s\"", service_subnets)) } ] 14 | controlPlane: 15 | endpoint: ${controlplane_endpoint} 16 | -------------------------------------------------------------------------------- /infrastructure/terraform/talos/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | # Storing the .tfstate locally. 3 | # This is suitable only for testing purposes. Store it on S3 compatible backend for reliability 4 | #backend "local" {} 5 | backend "http" {} 6 | 7 | required_providers { 8 | gitlab = { 9 | source = "gitlabhq/gitlab" 10 | version = "~> 16.3.0" 11 | } 12 | 13 | talos = { 14 | source = "siderolabs/talos" 15 | version = "0.8.1" 16 | } 17 | 18 | } 19 | } 20 | 21 | # Ref: https://registry.terraform.io/providers/gitlabhq/gitlab/latest/docs 22 | provider "gitlab" { 23 | token = var.GITLAB_ACCESS_TOKEN 24 | } 25 | -------------------------------------------------------------------------------- /infrastructure/terraform/talos/terragrunt.hcl: -------------------------------------------------------------------------------- 1 | include "root" { 2 | path = find_in_parent_folders() 3 | } 4 | 5 | inputs = { 6 | GITLAB_ACCESS_TOKEN = get_env("GITLAB_ACCESS_TOKEN") 7 | } -------------------------------------------------------------------------------- /infrastructure/terraform/talos/variables.tf: -------------------------------------------------------------------------------- 1 | # Token to access Gitlab. 2 | # This is used for storing TFstate and get credentials to access hypervisor machines 3 | variable "GITLAB_ACCESS_TOKEN" { 4 | type = string 5 | description = "Token to access Gitlab" 6 | default = "api-token-placeholder" 7 | } 8 | 9 | # TODO 10 | variable "GITLAB_VARIABLES_PROJECT_ID" { 11 | type = string 12 | description = "Project ID on Gitlab to get the variables" 13 | default = "49083217" 14 | } 15 | 16 | # TODO 17 | variable "GITLAB_VARIABLES_ENVIRONMENT" { 18 | type = string 19 | description = "Environment on Gitlab to get the variables" 20 | default = "terraform" 21 | } 22 | -------------------------------------------------------------------------------- /infrastructure/terraform/terragrunt.hcl: -------------------------------------------------------------------------------- 1 | remote_state { 2 | backend = "http" 3 | 4 | config = { 5 | address = "https://gitlab.com/api/v4/projects/49083217/terraform/state/${path_relative_to_include()}" 6 | lock_address = "https://gitlab.com/api/v4/projects/49083217/terraform/state/${path_relative_to_include()}/lock" 7 | unlock_address = "https://gitlab.com/api/v4/projects/49083217/terraform/state/${path_relative_to_include()}/lock" 8 | username = "achetronic" 9 | password = get_env("GITLAB_ACCESS_TOKEN") 10 | lock_method = "POST" 11 | unlock_method = "DELETE" 12 | retry_wait_min = "5" 13 | } 14 | 15 | } 16 | -------------------------------------------------------------------------------- /infrastructure/terraform/vms/compute-10.tf: -------------------------------------------------------------------------------- 1 | # TODO 2 | data "gitlab_project_variable" "instance_access_compute_10_username" { 3 | project = var.GITLAB_VARIABLES_PROJECT_ID 4 | environment_scope = var.GITLAB_VARIABLES_ENVIRONMENT 5 | 6 | key = "INSTANCE_ACCESS_COMPUTE_10_USERNAME" 7 | } 8 | 9 | data "gitlab_project_variable" "instance_access_compute_10_password" { 10 | project = var.GITLAB_VARIABLES_PROJECT_ID 11 | environment_scope = var.GITLAB_VARIABLES_ENVIRONMENT 12 | 13 | key = "INSTANCE_ACCESS_COMPUTE_10_PASSWORD" 14 | } 15 | 16 | data "gitlab_project_variable" "instance_access_compute_10_host" { 17 | project = var.GITLAB_VARIABLES_PROJECT_ID 18 | environment_scope = var.GITLAB_VARIABLES_ENVIRONMENT 19 | 20 | key = "INSTANCE_ACCESS_COMPUTE_10_HOST" 21 | } 22 | 23 | # TODO 24 | locals { 25 | 26 | # Globals definition 27 | globals_compute_10 = { 28 | 29 | # Configuration for SSH connection parameters 30 | ssh_connection = { 31 | host = data.gitlab_project_variable.instance_access_compute_10_host.value 32 | username = data.gitlab_project_variable.instance_access_compute_10_username.value 33 | password = data.gitlab_project_variable.instance_access_compute_10_password.value 34 | mode = "password" 35 | } 36 | 37 | # You can define as many urls as needed 38 | # Expected format: map[string]string 39 | # Example: {"desired_name" = "https://url/to/image.iso"} 40 | iso_image_urls = { 41 | "talos_v1.6.1_metal_amd64" = "https://github.com/siderolabs/talos/releases/download/v1.6.1/metal-amd64.iso" 42 | } 43 | } 44 | 45 | 46 | # Instance basic definition. 47 | # WARNING: Choose IP a address inside the right subnet 48 | instances_compute_10 = { 49 | 50 | # Define the masters 51 | compute-11 = { 52 | image = "talos_v1.6.1_metal_amd64" 53 | 54 | vcpu = 4 55 | memory = 5 * 1024 56 | disk = 20000000000 57 | 58 | networks = [ 59 | { 60 | interface = "enp1s0" 61 | addresses = ["192.168.2.11"] 62 | mac = "CE:65:9B:BC:66:BE" 63 | } 64 | ] 65 | } 66 | 67 | compute-12 = { 68 | image = "talos_v1.6.1_metal_amd64" 69 | 70 | vcpu = 4 71 | memory = 5 * 1024 72 | disk = 20000000000 73 | 74 | networks = [ 75 | { 76 | interface = "enp1s0" 77 | addresses = ["192.168.2.12"] 78 | mac = "46:8F:5E:5B:DF:5F" 79 | } 80 | ] 81 | } 82 | 83 | # Define the workers 84 | compute-13 = { 85 | image = "talos_v1.6.1_metal_amd64" 86 | 87 | vcpu = 6 88 | memory = 20 * 1024 89 | disk = 20000000000 90 | 91 | networks = [ 92 | { 93 | interface = "enp1s0" 94 | addresses = ["192.168.2.13"] 95 | mac = "46:6D:08:95:51:3B" 96 | } 97 | ] 98 | 99 | # TODO: Craft a task to install xsltproc 100 | # This requires to have installed 'xsltproc' in client's machine 101 | # Ref: https://gist.github.com/goja288/9b8122cedd042156a1cea2af2bfa0f09 102 | xslt = file("${path.module}/templates/xsl/attach-usb.xsl") 103 | } 104 | } 105 | } 106 | 107 | -------------------------------------------------------------------------------- /infrastructure/terraform/vms/compute-20.tf: -------------------------------------------------------------------------------- 1 | # TODO 2 | data "gitlab_project_variable" "instance_access_compute_20_username" { 3 | project = var.GITLAB_VARIABLES_PROJECT_ID 4 | environment_scope = var.GITLAB_VARIABLES_ENVIRONMENT 5 | 6 | key = "INSTANCE_ACCESS_COMPUTE_20_USERNAME" 7 | } 8 | 9 | data "gitlab_project_variable" "instance_access_compute_20_password" { 10 | project = var.GITLAB_VARIABLES_PROJECT_ID 11 | environment_scope = var.GITLAB_VARIABLES_ENVIRONMENT 12 | 13 | key = "INSTANCE_ACCESS_COMPUTE_20_PASSWORD" 14 | } 15 | 16 | data "gitlab_project_variable" "instance_access_compute_20_host" { 17 | project = var.GITLAB_VARIABLES_PROJECT_ID 18 | environment_scope = var.GITLAB_VARIABLES_ENVIRONMENT 19 | 20 | key = "INSTANCE_ACCESS_COMPUTE_20_HOST" 21 | } 22 | 23 | # TODO 24 | locals { 25 | 26 | # Globals definition 27 | globals_compute_20 = { 28 | 29 | # Configuration for SSH connection parameters 30 | ssh_connection = { 31 | host = data.gitlab_project_variable.instance_access_compute_20_host.value 32 | username = data.gitlab_project_variable.instance_access_compute_20_username.value 33 | password = data.gitlab_project_variable.instance_access_compute_20_password.value 34 | mode = "password" 35 | } 36 | 37 | # You can define as many urls as needed 38 | # Expected format: map[string]string 39 | # Example: {"desired_name" = "https://url/to/image.iso"} 40 | iso_image_urls = { 41 | "talos_v1.6.1_metal_amd64" = "https://github.com/siderolabs/talos/releases/download/v1.6.1/metal-amd64.iso" 42 | } 43 | } 44 | 45 | 46 | # Instance basic definition. 47 | # WARNING: Choose IP a address inside the right subnet 48 | instances_compute_20 = { 49 | 50 | # Define the masters 51 | compute-21 = { 52 | image = "talos_v1.6.1_metal_amd64" 53 | 54 | vcpu = 4 55 | memory = 5 * 1024 56 | disk = 20000000000 57 | 58 | networks = [ 59 | { 60 | interface = "enp1s0" 61 | addresses = ["192.168.2.21"] 62 | mac = "02:7A:4F:3B:1E:62" 63 | } 64 | ] 65 | } 66 | 67 | # Define the workers 68 | compute-22 = { 69 | image = "talos_v1.6.1_metal_amd64" 70 | 71 | vcpu = 10 72 | memory = 25 * 1024 73 | disk = 20000000000 74 | 75 | networks = [ 76 | { 77 | interface = "enp1s0" 78 | addresses = ["192.168.2.22"] 79 | mac = "1A:4F:30:B8:F8:2D" 80 | } 81 | ] 82 | } 83 | 84 | } 85 | } 86 | 87 | -------------------------------------------------------------------------------- /infrastructure/terraform/vms/main.tf: -------------------------------------------------------------------------------- 1 | # Create the workload resources in the target host through SSH 2 | module "compute-10-virtual-machines" { 3 | 4 | source = "../modules/talos-vms" 5 | 6 | # Global configuration 7 | globals = local.globals_compute_10 8 | 9 | # Configuration related to VMs directly 10 | instances = local.instances_compute_10 11 | } 12 | 13 | module "compute-20-virtual-machines" { 14 | 15 | source = "../modules/talos-vms" 16 | 17 | # Global configuration 18 | globals = local.globals_compute_20 19 | 20 | # Configuration related to VMs directly 21 | instances = local.instances_compute_20 22 | } 23 | -------------------------------------------------------------------------------- /infrastructure/terraform/vms/templates/xsl/attach-usb.xsl: -------------------------------------------------------------------------------- 1 | 2 | 8 | 9 | 15 | 16 | 26 | 27 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | -------------------------------------------------------------------------------- /infrastructure/terraform/vms/terraform.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | # Storing the .tfstate locally. 3 | # This is suitable only for testing purposes. Store it on S3 compatible backend for reliability 4 | #backend "local" {} 5 | backend "http" {} 6 | 7 | required_providers { 8 | gitlab = { 9 | source = "gitlabhq/gitlab" 10 | version = "~> 16.3.0" 11 | } 12 | } 13 | } 14 | 15 | # Ref: https://registry.terraform.io/providers/gitlabhq/gitlab/latest/docs 16 | provider "gitlab" { 17 | token = var.GITLAB_ACCESS_TOKEN 18 | } 19 | -------------------------------------------------------------------------------- /infrastructure/terraform/vms/terragrunt.hcl: -------------------------------------------------------------------------------- 1 | include "root" { 2 | path = find_in_parent_folders() 3 | } 4 | 5 | inputs = { 6 | GITLAB_ACCESS_TOKEN = get_env("GITLAB_ACCESS_TOKEN") 7 | } -------------------------------------------------------------------------------- /infrastructure/terraform/vms/variables.tf: -------------------------------------------------------------------------------- 1 | # Token to access Gitlab. 2 | # This is used for storing TFstate and get credentials to access hypervisor machines 3 | variable "GITLAB_ACCESS_TOKEN" { 4 | type = string 5 | description = "Token to access Gitlab" 6 | default = "api-token-placeholder" 7 | } 8 | 9 | # TODO 10 | variable "GITLAB_VARIABLES_PROJECT_ID" { 11 | type = string 12 | description = "Project ID on Gitlab to get the variables" 13 | default = "49083217" 14 | } 15 | 16 | # TODO 17 | variable "GITLAB_VARIABLES_ENVIRONMENT" { 18 | type = string 19 | description = "Environment on Gitlab to get the variables" 20 | default = "terraform" 21 | } 22 | -------------------------------------------------------------------------------- /kubernetes/applications/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/achetronic/homelab-ops/188b980a9ed69508cfe7d695a90dcbd03d9e2db7/kubernetes/applications/.gitkeep -------------------------------------------------------------------------------- /kubernetes/applications/00-special-namespaces/csi-driver-nfs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: csi-driver-nfs 5 | labels: 6 | pod-security.kubernetes.io/enforce: privileged 7 | -------------------------------------------------------------------------------- /kubernetes/applications/00-special-namespaces/csi-driver-s3.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: csi-driver-s3 5 | labels: 6 | pod-security.kubernetes.io/enforce: privileged 7 | -------------------------------------------------------------------------------- /kubernetes/applications/00-special-namespaces/home-assistant.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: home-assistant 5 | labels: 6 | pod-security.kubernetes.io/enforce: privileged 7 | -------------------------------------------------------------------------------- /kubernetes/applications/00-special-namespaces/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | commonLabels: 5 | app.kubernetes.io/name: namespaces 6 | 7 | labels: 8 | - pairs: 9 | app.kubernetes.io/managed-by: Helm 10 | includeSelectors: false 11 | includeTemplates: false 12 | 13 | commonAnnotations: 14 | meta.helm.sh/release-name: namespaces 15 | meta.helm.sh/release-namespace: default 16 | 17 | resources: 18 | - csi-driver-nfs.yaml 19 | - csi-driver-s3.yaml 20 | - home-assistant.yaml 21 | - metallb.yaml 22 | - omada-controller.yaml 23 | - wireguard.yaml 24 | - zigbee2mqtt.yaml 25 | -------------------------------------------------------------------------------- /kubernetes/applications/00-special-namespaces/metallb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: metallb 5 | labels: 6 | pod-security.kubernetes.io/enforce: privileged 7 | -------------------------------------------------------------------------------- /kubernetes/applications/00-special-namespaces/omada-controller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: omada-controller 5 | labels: 6 | pod-security.kubernetes.io/enforce: privileged 7 | -------------------------------------------------------------------------------- /kubernetes/applications/00-special-namespaces/wireguard.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: wireguard 5 | labels: 6 | pod-security.kubernetes.io/enforce: privileged 7 | -------------------------------------------------------------------------------- /kubernetes/applications/00-special-namespaces/zigbee2mqtt.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: zigbee2mqtt 5 | labels: 6 | pod-security.kubernetes.io/enforce: privileged 7 | -------------------------------------------------------------------------------- /kubernetes/applications/autoheater/base/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: autoheater 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: {} 9 | strategy: 10 | type: Recreate 11 | template: 12 | spec: 13 | hostname: autoheater-hostname 14 | restartPolicy: Always 15 | containers: 16 | - name: autoheater 17 | image: ghcr.io/achetronic/autoheater:latest 18 | imagePullPolicy: Always 19 | securityContext: 20 | seccompProfile: 21 | type: RuntimeDefault 22 | runAsNonRoot: true 23 | allowPrivilegeEscalation: false 24 | capabilities: 25 | drop: ["ALL"] 26 | resources: {} 27 | args: 28 | - run 29 | - --config 30 | - /tmp/autoheater.yaml 31 | - --log-level 32 | - debug 33 | 34 | envFrom: 35 | - configMapRef: 36 | name: autoheater-config-env 37 | 38 | volumeMounts: 39 | - mountPath: /tmp/autoheater.yaml 40 | subPath: autoheater.yaml 41 | name: autoheater-data 42 | 43 | volumes: 44 | - name: autoheater-data 45 | configMap: 46 | name: autoheater-config 47 | -------------------------------------------------------------------------------- /kubernetes/applications/autoheater/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | commonLabels: 5 | app.kubernetes.io/name: autoheater 6 | 7 | resources: 8 | - deployment.yaml 9 | -------------------------------------------------------------------------------- /kubernetes/applications/autoheater/kubernetes-01/configmap-env.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: autoheater-config-env 5 | data: 6 | TZ: "Atlantic/Canary" 7 | -------------------------------------------------------------------------------- /kubernetes/applications/autoheater/kubernetes-01/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: autoheater-config 5 | data: 6 | # Ref: https://github.com/achetronic/autoheater/blob/master/config/samples/autoheater.yaml 7 | autoheater.yaml: | 8 | apiVersion: v1alpha1 9 | kind: Autoheater 10 | metadata: 11 | name: laundry-room-heater 12 | spec: 13 | global: 14 | # Main scheduler calculates the schedules for the day just at 00:00h. But when the application is started 15 | # on a different moment, commonly in the middle of the day, may be, the best N hours were already passed. 16 | # This option is allowing to select the next cheapest N hours in the first startup even if they are 17 | # more expensive than the real cheapest ones 18 | ignorePassedHours: true 19 | 20 | # Take into account the weather as first filter. The idea is not to switch the heater on really hot days 21 | weather: 22 | enabled: true 23 | coordinates: 24 | latitude: 28.0935849 25 | longitude: -16.6357006 26 | 27 | # 28 | temperature: 29 | # Type of temperature to take into account. Possible values: apparent or real 30 | # Attention: apparent is recommended as it is the perceived feels-like temperature combining 31 | # wind chill factor, relative humidity and solar radiation 32 | type: apparent 33 | 34 | # Possible values are: fahrenheit or celsius 35 | unit: celsius 36 | 37 | # Max temperature to switch the heater on. Switching on the heater will be ignored on higher temperatures 38 | threshold: 26 39 | 40 | # Prices for today's day are coming from Apaga Luz, as these data are already filtered and ease-to-access 41 | # Ref: https://raw.githubusercontent.com/jorgeatgu/apaga-luz/main/public/data/today_price.json 42 | # Ref: https://raw.githubusercontent.com/jorgeatgu/apaga-luz/main/public/data/canary_price.json 43 | price: 44 | # Spanish pricing zone due to geographical differences. Possible values: mainland or canaryislands 45 | zone: canaryislands 46 | 47 | # Configuration related to the device 48 | device: 49 | # The type of the device to act on. This is used together with 'weather.temperature.threshold'. 50 | # In case 'heater' is selected, temperatures higher than the threshold won't act 51 | # In case 'cooler' is selected, temperatures lower than the threshold won't act 52 | # Possible values: cooler, heater 53 | type: heater 54 | 55 | # Time to keep the device turned on. 56 | # At this moment, the cheapest N hours are always the chosen ones 57 | activeHours: 3 58 | 59 | # Several integrations are covered to use this CLI as 'standalone' process, or as a possible adaptor 60 | # between different domotic systems (sending the events to an HTTP endpoint, mqtt, etc.) 61 | # ATTENTION: All configured integrations will act at the same time 62 | integrations: 63 | # Endpoints to send the request on events 64 | # POST : { event: 'start', name: 'pepito', timestamp: ''} 65 | webhook: 66 | url: "https://home-assistant.tools.internal.place/api/webhook/3b1ad8ee-770e-4d2b-8fb6-9226f59872ba" 67 | 68 | -------------------------------------------------------------------------------- /kubernetes/applications/autoheater/kubernetes-01/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | namespace: autoheater 5 | 6 | labels: 7 | - pairs: 8 | app.kubernetes.io/managed-by: Helm 9 | includeSelectors: false 10 | includeTemplates: false 11 | 12 | - pairs: 13 | cluster: kubernetes-01 14 | includeSelectors: true 15 | includeTemplates: true 16 | 17 | commonAnnotations: 18 | meta.helm.sh/release-name: autoheater 19 | meta.helm.sh/release-namespace: autoheater 20 | 21 | replicas: 22 | - name: autoheater 23 | count: 1 24 | 25 | images: 26 | - name: ghcr.io/achetronic/autoheater:latest 27 | newTag: v0.5.0 28 | 29 | resources: 30 | - ../base 31 | - configmap.yaml 32 | - configmap-env.yaml 33 | -------------------------------------------------------------------------------- /kubernetes/applications/blocky/base/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: blocky 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: {} 9 | strategy: 10 | type: Recreate 11 | template: 12 | spec: 13 | hostname: blocky-hostname 14 | restartPolicy: Always 15 | containers: 16 | - name: blocky 17 | image: spx01/blocky 18 | resources: {} 19 | env: 20 | - name: TZ 21 | value: Europe/Berlin 22 | ports: 23 | - containerPort: 53 24 | - containerPort: 53 25 | protocol: UDP 26 | - containerPort: 4000 27 | 28 | volumeMounts: 29 | - mountPath: /app/config.yml 30 | subPath: config.yml 31 | name: blocky-data 32 | 33 | volumes: 34 | - name: blocky-data 35 | configMap: 36 | name: blocky-config 37 | -------------------------------------------------------------------------------- /kubernetes/applications/blocky/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | commonLabels: 5 | app.kubernetes.io/name: blocky 6 | 7 | resources: 8 | - deployment.yaml 9 | -------------------------------------------------------------------------------- /kubernetes/applications/blocky/kubernetes-01/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: blocky-config 5 | data: 6 | config.yml: | 7 | # Forward DNS requests related to domain 'cluster.local' to kube DNS 8 | conditional: 9 | fallbackUpstream: false 10 | mapping: 11 | cluster.local: 10.96.0.10 12 | 13 | # Forward not-found DNS requests to Opnsense's Unbound 14 | upstream: 15 | default: 16 | - 192.168.2.1 17 | 18 | # Set several ads block lists (loaded on runtime) 19 | # Ref: https://github.com/lightswitch05/hosts 20 | # Ref: https://github.com/StevenBlack/hosts 21 | blocking: 22 | blackLists: 23 | ads: 24 | - https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts 25 | - https://www.github.developerdan.com/hosts/lists/ads-and-tracking-extended.txt 26 | clientGroupsBlock: 27 | default: 28 | - ads 29 | 30 | # TODO 31 | port: 53 32 | httpPort: 4000 33 | -------------------------------------------------------------------------------- /kubernetes/applications/blocky/kubernetes-01/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://dev.to/ivanmoreno/how-to-connect-with-kubernetes-internal-network-using-wireguard-48bh 2 | # Ref: https://github.com/wg-easy/wg-easy 3 | 4 | apiVersion: kustomize.config.k8s.io/v1beta1 5 | kind: Kustomization 6 | 7 | namespace: blocky 8 | 9 | labels: 10 | - pairs: 11 | app.kubernetes.io/managed-by: Helm 12 | includeSelectors: false 13 | includeTemplates: false 14 | 15 | - pairs: 16 | cluster: kubernetes-01 17 | includeSelectors: true 18 | includeTemplates: true 19 | 20 | commonAnnotations: 21 | meta.helm.sh/release-name: blocky 22 | meta.helm.sh/release-namespace: blocky 23 | 24 | replicas: 25 | - name: blocky 26 | count: 3 27 | 28 | resources: 29 | - ../base 30 | - configmap.yaml 31 | - service.yaml 32 | -------------------------------------------------------------------------------- /kubernetes/applications/blocky/kubernetes-01/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: blocky 5 | annotations: 6 | metallb.universe.tf/loadBalancerIPs: &loadbalancerIP 192.168.2.61 7 | spec: 8 | type: LoadBalancer 9 | clusterIP: 10.96.100.10 10 | ports: 11 | - name: "53" 12 | port: 53 13 | targetPort: 53 14 | - name: 53-udp 15 | port: 53 16 | protocol: UDP 17 | targetPort: 53 18 | - name: "4000" 19 | port: 4000 20 | targetPort: 4000 21 | selector: {} 22 | -------------------------------------------------------------------------------- /kubernetes/applications/cert-manager/extra/clusterissuer-production.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: letsencrypt-production 5 | spec: 6 | acme: 7 | email: donfumero@gmail.com 8 | server: https://acme-v02.api.letsencrypt.org/directory 9 | privateKeySecretRef: 10 | # Secret resource that will be used to store the account's private key. 11 | name: production-certificate-account-key 12 | solvers: 13 | - dns01: 14 | cloudflare: 15 | apiTokenSecretRef: 16 | name: cloudflare-api-token 17 | key: CLOUDFLARE_API_TOKEN -------------------------------------------------------------------------------- /kubernetes/applications/cert-manager/extra/clusterissuer-self-signed.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: self-signed 5 | spec: 6 | selfSigned: {} -------------------------------------------------------------------------------- /kubernetes/applications/cert-manager/extra/clusterissuer-staging.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: ClusterIssuer 3 | metadata: 4 | name: letsencrypt-staging 5 | spec: 6 | acme: 7 | email: donfumero@gmail.com 8 | server: https://acme-staging-v02.api.letsencrypt.org/directory 9 | privateKeySecretRef: 10 | # Secret resource that will be used to store the account's private key. 11 | name: staging-certificate-account-key 12 | solvers: 13 | - dns01: 14 | cloudflare: 15 | apiTokenSecretRef: 16 | name: cloudflare-api-token 17 | key: CLOUDFLARE_API_TOKEN -------------------------------------------------------------------------------- /kubernetes/applications/cert-manager/extra/externalsecret-cloudflare-api-token.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1beta1 2 | kind: ExternalSecret 3 | metadata: 4 | name: cloudflare-api-token 5 | spec: 6 | refreshInterval: 1h 7 | 8 | secretStoreRef: 9 | kind: ClusterSecretStore 10 | name: gitlab-secret-store 11 | 12 | target: 13 | name: cloudflare-api-token 14 | creationPolicy: Owner 15 | 16 | data: 17 | - secretKey: CLOUDFLARE_API_TOKEN 18 | remoteRef: 19 | key: CLOUDFLARE_API_TOKEN -------------------------------------------------------------------------------- /kubernetes/applications/cert-manager/extra/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | namespace: cert-manager 5 | 6 | labels: 7 | - pairs: 8 | app.kubernetes.io/managed-by: Helm 9 | includeSelectors: false 10 | includeTemplates: false 11 | 12 | - pairs: 13 | cluster: kubernetes-01 14 | includeSelectors: true 15 | includeTemplates: true 16 | 17 | commonAnnotations: 18 | meta.helm.sh/release-name: cert-manager-extra 19 | meta.helm.sh/release-namespace: cert-manager 20 | 21 | resources: 22 | - clusterissuer-self-signed.yaml 23 | - clusterissuer-staging.yaml 24 | - clusterissuer-production.yaml 25 | - externalsecret-cloudflare-api-token.yaml 26 | -------------------------------------------------------------------------------- /kubernetes/applications/cert-manager/operator/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: cert-manager 3 | version: 0.1.0 4 | dependencies: 5 | - name: cert-manager 6 | version: v1.16.1 7 | repository: https://charts.jetstack.io -------------------------------------------------------------------------------- /kubernetes/applications/cert-manager/operator/values-global.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/cert-manager/cert-manager/blob/master/deploy/charts/cert-manager/values.yaml 2 | 3 | cert-manager: 4 | 5 | # Install the CRDs inside the cluster as part of the release process 6 | installCRDs: true -------------------------------------------------------------------------------- /kubernetes/applications/cert-manager/operator/values.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/cert-manager/cert-manager/blob/master/deploy/charts/cert-manager/values.yaml 2 | 3 | cert-manager: {} -------------------------------------------------------------------------------- /kubernetes/applications/congatudo/base/configmap-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: congatudo-config 5 | data: 6 | config.json: | 7 | { 8 | "embedded": false, 9 | "robot": { 10 | "implementation": "CecotecCongaRobot", 11 | "implementationSpecificConfig": { 12 | "ip": "127.0.0.1" 13 | } 14 | }, 15 | "webserver": { 16 | "port": 8080, 17 | "basicAuth": { 18 | "enabled": false, 19 | "username": "valetudo", 20 | "password": "valetudo" 21 | }, 22 | "blockExternalAccess": true 23 | }, 24 | "mqtt": { 25 | "enabled": false, 26 | "connection": { 27 | "host": "foobar.example", 28 | "port": 1883, 29 | "tls": { 30 | "enabled": false, 31 | "ca": "", 32 | "ignoreCertificateErrors": false 33 | }, 34 | "authentication": { 35 | "credentials": { 36 | "enabled": false, 37 | "username": "", 38 | "password": "" 39 | }, 40 | "clientCertificate": { 41 | "enabled": false, 42 | "certificate": "", 43 | "key": "" 44 | } 45 | } 46 | }, 47 | "identity": { 48 | "identifier": "" 49 | }, 50 | "interfaces": { 51 | "homie": { 52 | "enabled": true, 53 | "addICBINVMapProperty": false, 54 | "cleanAttributesOnShutdown": false 55 | }, 56 | "homeassistant": { 57 | "enabled": true, 58 | "cleanAutoconfOnShutdown": false 59 | } 60 | }, 61 | "customizations": { 62 | "topicPrefix": "", 63 | "provideMapData": true 64 | }, 65 | "optionalExposedCapabilities": [] 66 | }, 67 | "ntpClient": { 68 | "enabled": true, 69 | "server": "pool.ntp.org", 70 | "port": 123, 71 | "interval": 28800000, 72 | "timeout": 10000 73 | }, 74 | "timers": {}, 75 | "logLevel": "info", 76 | "debug": { 77 | "debugHassAnchors": false, 78 | "storeRawFDSUploads": false 79 | }, 80 | "networkAdvertisement": { 81 | "enabled": true 82 | }, 83 | "updater": { 84 | "enabled": true, 85 | "updateProvider": { 86 | "type": "github", 87 | "implementationSpecificConfig": {} 88 | } 89 | }, 90 | "oobe": { 91 | "welcomeDialogDismissed": true 92 | }, 93 | "valetudo": { 94 | "customizations": { 95 | "friendlyName": "" 96 | } 97 | } 98 | } -------------------------------------------------------------------------------- /kubernetes/applications/congatudo/base/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: congatudo 5 | spec: 6 | replicas: 1 7 | selector: {} 8 | strategy: 9 | type: Recreate 10 | template: 11 | metadata: 12 | labels: {} 13 | spec: 14 | initContainers: 15 | - name: copy-writable-config 16 | image: busybox:latest 17 | args: 18 | - cp 19 | - -a 20 | - /tmp/valetudo/. 21 | - /etc/valetudo/ 22 | 23 | volumeMounts: &volumeMounts 24 | - name: config-vol 25 | mountPath: /tmp/valetudo/ 26 | - name: config-writable-vol 27 | mountPath: /etc/valetudo/ 28 | 29 | containers: 30 | - image: ghcr.io/congatudo/congatudo:alpine-latest 31 | name: congatudo 32 | env: 33 | - name: LGUI 34 | value: "1000" 35 | - name: LUID 36 | value: "1000" 37 | - name: TZ 38 | value: Etc/UTC 39 | ports: 40 | - containerPort: 80 41 | - containerPort: 4010 42 | - containerPort: 4030 43 | - containerPort: 4050 44 | volumeMounts: *volumeMounts 45 | restartPolicy: Always 46 | volumes: 47 | - name: config-writable-vol 48 | emptyDir: {} 49 | 50 | - name: config-vol 51 | configMap: 52 | name: congatudo-config 53 | -------------------------------------------------------------------------------- /kubernetes/applications/congatudo/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | namespace: default 5 | 6 | commonLabels: 7 | app.kubernetes.io/name: congatudo 8 | 9 | resources: 10 | - configmap-config.yaml 11 | - deployment.yaml 12 | - service.yaml 13 | - service-exposed.yaml 14 | -------------------------------------------------------------------------------- /kubernetes/applications/congatudo/base/service-exposed.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: congatudo-exposed 5 | annotations: 6 | metallb.universe.tf/loadBalancerIPs: &loadbalancerIP 192.168.2.62 7 | spec: 8 | type: LoadBalancer 9 | # Ref: https://github.com/congatudo/congatudo-add-on/blob/main/congatudo-beta/config.yml#L22-L26 10 | ports: 11 | # Conga CMD port 12 | - name: "4010" 13 | port: 4010 14 | targetPort: 4010 15 | 16 | # Conga map port 17 | - name: "4030" 18 | port: 4030 19 | targetPort: 4030 20 | 21 | # Conga RTC port 22 | - name: "4050" 23 | port: 4050 24 | targetPort: 4050 25 | selector: {} 26 | -------------------------------------------------------------------------------- /kubernetes/applications/congatudo/base/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: congatudo 5 | spec: 6 | ports: 7 | - name: "80" 8 | port: 80 9 | targetPort: 8080 10 | selector: {} 11 | -------------------------------------------------------------------------------- /kubernetes/applications/congatudo/kubernetes-01/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: congatudo 5 | annotations: 6 | nginx.ingress.kubernetes.io/rewrite-target: / 7 | cert-manager.io/cluster-issuer: "letsencrypt-production" 8 | spec: 9 | ingressClassName: nginx 10 | rules: 11 | - host: &host "congatudo.tools.internal.place" 12 | http: 13 | paths: 14 | - path: / 15 | pathType: Prefix 16 | backend: 17 | service: 18 | name: congatudo 19 | port: 20 | number: 80 21 | tls: 22 | - hosts: 23 | - *host 24 | secretName: congatudo-tls -------------------------------------------------------------------------------- /kubernetes/applications/congatudo/kubernetes-01/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://forum.opnsense.org/index.php?topic=15756.0 2 | # Ref: https://blog.xirion.net/posts/metallb-opnsense/ 3 | 4 | apiVersion: kustomize.config.k8s.io/v1beta1 5 | kind: Kustomization 6 | 7 | namespace: congatudo 8 | 9 | labels: 10 | - pairs: 11 | app.kubernetes.io/managed-by: Helm 12 | includeSelectors: false 13 | includeTemplates: false 14 | 15 | - pairs: 16 | cluster: kubernetes-01 17 | includeSelectors: true 18 | includeTemplates: true 19 | 20 | commonAnnotations: 21 | meta.helm.sh/release-name: congatudo 22 | meta.helm.sh/release-namespace: congatudo 23 | 24 | commonLabels: 25 | cluster: kubernetes-01 26 | 27 | resources: 28 | - ../base 29 | - ingress.yaml 30 | -------------------------------------------------------------------------------- /kubernetes/applications/csi-driver-nfs/extra/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | labels: 5 | - pairs: 6 | app.kubernetes.io/managed-by: Helm 7 | includeSelectors: false 8 | includeTemplates: false 9 | 10 | - pairs: 11 | cluster: kubernetes-01 12 | includeSelectors: true 13 | includeTemplates: true 14 | 15 | commonAnnotations: 16 | meta.helm.sh/release-name: csi-driver-nfs-extra 17 | meta.helm.sh/release-namespace: csi-driver-nfs 18 | 19 | resources: 20 | - storageClass-standard-nfs.yaml 21 | -------------------------------------------------------------------------------- /kubernetes/applications/csi-driver-nfs/extra/storageClass-standard-nfs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: standard-nfs 5 | provisioner: nfs.csi.k8s.io 6 | reclaimPolicy: Retain 7 | volumeBindingMode: Immediate 8 | parameters: 9 | server: storage-01.internal.place 10 | share: "/mnt/pool0/shared/kubernetes.nfs" 11 | 12 | # ref: https://kubernetes-csi.github.io/docs/secrets-and-credentials-storage-class.html#operations 13 | # csi.storage.k8s.io/provisioner-secret is only needed for providing mountOptions in DeleteVolume 14 | # csi.storage.k8s.io/provisioner-secret-name: "mount-options" 15 | # csi.storage.k8s.io/provisioner-secret-namespace: "default" 16 | mountOptions: 17 | - nfsvers=4.2 18 | - nconnect=8 19 | - hard 20 | - noatime 21 | -------------------------------------------------------------------------------- /kubernetes/applications/csi-driver-nfs/operator/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: csi-driver-nfs 3 | version: 0.1.0 4 | dependencies: 5 | - name: csi-driver-nfs 6 | version: 4.6.0 7 | repository: https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts 8 | -------------------------------------------------------------------------------- /kubernetes/applications/csi-driver-nfs/operator/values-global.yaml: -------------------------------------------------------------------------------- 1 | # DISCLAIMER: THESE VALUES ARE FOR PRODUCTION PURPOSES ONLY. 2 | # PLEASE, DON'T DO DIRTY THINGS 3 | 4 | csi-driver-nfs: 5 | controller: 6 | replicas: 2 7 | strategyType: Recreate 8 | workingMountDir: "/mnt/kubernetes.nfs" 9 | -------------------------------------------------------------------------------- /kubernetes/applications/csi-driver-nfs/operator/values.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/kubernetes-csi/csi-driver-nfs/blob/master/charts/v4.6.0/csi-driver-nfs/values.yaml 2 | 3 | csi-driver-nfs: {} 4 | -------------------------------------------------------------------------------- /kubernetes/applications/csi-driver-s3/extra/externalsecret-csi-driver-s3-credentials.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1beta1 2 | kind: ExternalSecret 3 | metadata: 4 | name: csi-driver-s3-credentials 5 | spec: 6 | refreshInterval: 1h 7 | 8 | secretStoreRef: 9 | kind: ClusterSecretStore 10 | name: gitlab-secret-store 11 | 12 | target: 13 | name: csi-driver-s3-credentials 14 | creationPolicy: Owner 15 | 16 | data: 17 | - secretKey: endpoint 18 | remoteRef: 19 | key: CSI_S3_ENDPOINT 20 | 21 | - secretKey: accessKeyID 22 | remoteRef: 23 | key: CSI_S3_ACCESS_KEY_ID 24 | 25 | - secretKey: secretAccessKey 26 | remoteRef: 27 | key: CSI_S3_SECRET_ACCESS_KEY 28 | 29 | -------------------------------------------------------------------------------- /kubernetes/applications/csi-driver-s3/extra/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | namespace: csi-driver-s3 5 | 6 | labels: 7 | - pairs: 8 | app.kubernetes.io/managed-by: Helm 9 | includeSelectors: false 10 | includeTemplates: false 11 | 12 | - pairs: 13 | cluster: kubernetes-01 14 | includeSelectors: true 15 | includeTemplates: true 16 | 17 | commonAnnotations: 18 | meta.helm.sh/release-name: csi-driver-s3-extra 19 | meta.helm.sh/release-namespace: csi-driver-s3 20 | 21 | resources: 22 | - externalsecret-csi-driver-s3-credentials.yaml 23 | -------------------------------------------------------------------------------- /kubernetes/applications/csi-driver-s3/operator/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: csi-driver-s3 3 | version: 0.1.0 4 | dependencies: 5 | - name: csi-s3 6 | alias: csi-driver-s3 7 | version: 0.41.1 8 | repository: https://yandex-cloud.github.io/k8s-csi-s3/charts 9 | -------------------------------------------------------------------------------- /kubernetes/applications/csi-driver-s3/operator/values-global.yaml: -------------------------------------------------------------------------------- 1 | # DISCLAIMER: THESE VALUES ARE FOR PRODUCTION PURPOSES ONLY. 2 | # PLEASE, DON'T DO DIRTY THINGS 3 | 4 | csi-driver-s3: 5 | 6 | storageClass: 7 | create: true 8 | name: standard-s3 9 | singleBucket: backups-kubernetes 10 | 11 | secret: 12 | create: false 13 | name: csi-driver-s3-credentials 14 | -------------------------------------------------------------------------------- /kubernetes/applications/csi-driver-s3/operator/values.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/yandex-cloud/k8s-csi-s3/blob/master/deploy/helm/csi-s3/values.yaml 2 | 3 | csi-driver-s3: {} 4 | 5 | -------------------------------------------------------------------------------- /kubernetes/applications/ddns-updater/base/configmap-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: config-ddns-updater 5 | data: 6 | config.template.json: | 7 | { 8 | "settings":[ 9 | { 10 | "provider": "cloudflare", 11 | "zone_identifier": "${CLOUDFLARE_ZONE_ID}", 12 | "domain": "internal.place", 13 | "host": "gateway", 14 | "ttl": 600, 15 | "token": "${CLOUDFLARE_API_TOKEN}", 16 | "ip_version": "ipv4", 17 | "proxied": false 18 | } 19 | ] 20 | } 21 | -------------------------------------------------------------------------------- /kubernetes/applications/ddns-updater/base/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: ddns-updater 5 | spec: 6 | replicas: 1 7 | selector: {} 8 | strategy: 9 | type: Recreate 10 | template: 11 | metadata: 12 | labels: {} 13 | spec: 14 | restartPolicy: Always 15 | 16 | # Substitute variables inside template file with their real values from environment vars 17 | # Done this way as the actual application does not support environment variables 18 | initContainers: 19 | - name: config-variables-expansion 20 | image: alpine 21 | imagePullPolicy: IfNotPresent 22 | command: [ "/bin/sh", "-c" ] 23 | args: 24 | - | 25 | apk add --no-cache gettext 26 | envsubst < /tmp/config/config.template.json > /updater/data/config.json 27 | cat /updater/data/config.json 28 | ls -alh /updater/data/ 29 | envFrom: 30 | - secretRef: 31 | name: cloudflare-credentials 32 | volumeMounts: 33 | - mountPath: '/tmp/config/' 34 | name: config 35 | readOnly: true 36 | - mountPath: '/updater/data' 37 | name: data 38 | containers: 39 | - name: cloudflare-ddns 40 | image: qmcgaw/ddns-updater:latest 41 | env: 42 | # Disable backups 43 | - name: BACKUP_PERIOD 44 | value: '0' 45 | - name: PERIOD 46 | value: 5m 47 | - name: UPDATE_COOLDOWN_PERIOD 48 | value: 5m 49 | - name: LOG_LEVEL 50 | value: debug 51 | volumeMounts: 52 | - mountPath: '/updater/data' 53 | name: data 54 | volumes: 55 | - name: config 56 | configMap: 57 | name: config-ddns-updater 58 | 59 | # A place to store config between initContainers and the main container 60 | - name: data 61 | emptyDir: {} 62 | 63 | -------------------------------------------------------------------------------- /kubernetes/applications/ddns-updater/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | namespace: default 5 | 6 | commonLabels: 7 | app.kubernetes.io/name: ddns-updater 8 | 9 | resources: 10 | - deployment.yaml 11 | - configmap-config.yaml 12 | -------------------------------------------------------------------------------- /kubernetes/applications/ddns-updater/kubernetes-01/externalsecret-cloudflare-credentials.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1beta1 2 | kind: ExternalSecret 3 | metadata: 4 | name: cloudflare-credentials 5 | spec: 6 | refreshInterval: 1h 7 | 8 | secretStoreRef: 9 | kind: ClusterSecretStore 10 | name: gitlab-secret-store 11 | 12 | target: 13 | name: cloudflare-credentials 14 | creationPolicy: Owner 15 | 16 | data: 17 | - secretKey: CLOUDFLARE_API_TOKEN 18 | remoteRef: 19 | key: CLOUDFLARE_API_TOKEN 20 | 21 | - secretKey: CLOUDFLARE_ZONE_ID 22 | remoteRef: 23 | key: CLOUDFLARE_ZONE_ID -------------------------------------------------------------------------------- /kubernetes/applications/ddns-updater/kubernetes-01/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://forum.opnsense.org/index.php?topic=15756.0 2 | # Ref: https://blog.xirion.net/posts/metallb-opnsense/ 3 | 4 | apiVersion: kustomize.config.k8s.io/v1beta1 5 | kind: Kustomization 6 | 7 | namespace: ddns-updater 8 | 9 | labels: 10 | - pairs: 11 | app.kubernetes.io/managed-by: Helm 12 | includeSelectors: false 13 | includeTemplates: false 14 | 15 | - pairs: 16 | cluster: kubernetes-01 17 | includeSelectors: true 18 | includeTemplates: true 19 | 20 | commonAnnotations: 21 | meta.helm.sh/release-name: ddns-updater 22 | meta.helm.sh/release-namespace: ddns-updater 23 | 24 | resources: 25 | - ../base 26 | - externalsecret-cloudflare-credentials.yaml 27 | -------------------------------------------------------------------------------- /kubernetes/applications/external-secrets/extra/clustersecretstore-gitlab.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1beta1 2 | kind: ClusterSecretStore 3 | metadata: 4 | name: gitlab-secret-store 5 | spec: 6 | provider: 7 | # provider type: gitlab 8 | gitlab: 9 | #url: https://gitlab.com 10 | 11 | # Project homelab-ops 12 | projectID: "49083217" 13 | environment: "kubernetes-01" 14 | #groupIDs: "**groupID(s) go here**" 15 | #inheritFromGroups: "**automatically looks for variables in parent groups**" 16 | 17 | # TODO 18 | auth: 19 | SecretRef: 20 | accessToken: 21 | name: gitlab-secret 22 | key: token 23 | namespace: external-secrets 24 | -------------------------------------------------------------------------------- /kubernetes/applications/external-secrets/extra/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | labels: 5 | - pairs: 6 | app.kubernetes.io/managed-by: Helm 7 | includeSelectors: false 8 | includeTemplates: false 9 | 10 | - pairs: 11 | cluster: kubernetes-01 12 | includeSelectors: true 13 | includeTemplates: true 14 | 15 | commonAnnotations: 16 | meta.helm.sh/release-name: external-secrets-extra 17 | meta.helm.sh/release-namespace: external-secrets 18 | 19 | resources: 20 | - clustersecretstore-gitlab.yaml 21 | -------------------------------------------------------------------------------- /kubernetes/applications/external-secrets/operator/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: external-secrets 3 | version: 0.1.0 4 | dependencies: 5 | - name: external-secrets 6 | version: 0.10.4 7 | repository: https://charts.external-secrets.io -------------------------------------------------------------------------------- /kubernetes/applications/external-secrets/operator/values.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/external-secrets/external-secrets/blob/main/deploy/charts/external-secrets/values.yaml 2 | external-secrets: {} -------------------------------------------------------------------------------- /kubernetes/applications/forecastle/extra/compute-10.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: forecastle.stakater.com/v1alpha1 2 | kind: ForecastleApp 3 | metadata: 4 | name: compute-10 5 | spec: 6 | name: Compute 10 7 | url: http://compute-10.internal.place:9090 8 | networkRestricted: true 9 | group: infrastructure 10 | icon: https://raw.githubusercontent.com/cockpit-project/cockpit/main/src/branding/default/apple-touch-icon.png 11 | properties: 12 | allowed-connection-type: sftp 13 | allowed-connection-port: "22" 14 | -------------------------------------------------------------------------------- /kubernetes/applications/forecastle/extra/compute-20.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: forecastle.stakater.com/v1alpha1 2 | kind: ForecastleApp 3 | metadata: 4 | name: compute-20 5 | spec: 6 | name: Compute 20 7 | url: http://compute-20.internal.place:9090 8 | networkRestricted: true 9 | group: infrastructure 10 | icon: https://raw.githubusercontent.com/cockpit-project/cockpit/main/src/branding/default/apple-touch-icon.png 11 | properties: 12 | allowed-connection-type: sftp 13 | allowed-connection-port: "22" 14 | -------------------------------------------------------------------------------- /kubernetes/applications/forecastle/extra/congatudo.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: forecastle.stakater.com/v1alpha1 2 | kind: ForecastleApp 3 | metadata: 4 | name: congatudo 5 | spec: 6 | name: Congatudo 7 | url: https://congatudo.tools.internal.place 8 | networkRestricted: true 9 | group: applications 10 | #icon: http://congatudo.tools.internal.place/android-chrome-512x512.png 11 | icon: https://raw.githubusercontent.com/Hypfer/Valetudo/master/assets/logo/valetudo_logo_with_name.svg -------------------------------------------------------------------------------- /kubernetes/applications/forecastle/extra/home-assistant.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: forecastle.stakater.com/v1alpha1 2 | kind: ForecastleApp 3 | metadata: 4 | name: home-assistant 5 | spec: 6 | name: "Home Assistant" 7 | url: https://home-assistant.tools.internal.place 8 | networkRestricted: true 9 | group: applications 10 | icon: https://www.home-assistant.io/images/blog/2023-09-ha10/home-assistant-logo-new.png 11 | -------------------------------------------------------------------------------- /kubernetes/applications/forecastle/extra/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | namespace: forecastle 5 | 6 | labels: 7 | - pairs: 8 | app.kubernetes.io/managed-by: Helm 9 | includeSelectors: false 10 | includeTemplates: false 11 | 12 | - pairs: 13 | cluster: kubernetes-01 14 | app.kubernetes.io/name: forecastle 15 | includeSelectors: true 16 | includeTemplates: true 17 | 18 | commonAnnotations: 19 | meta.helm.sh/release-name: forecastle-extra 20 | meta.helm.sh/release-namespace: forecastle 21 | 22 | resources: 23 | # Infrastructure 24 | - compute-10.yaml 25 | - compute-20.yaml 26 | - opnsense.yaml 27 | - storage-01.yaml 28 | 29 | # Applications 30 | - congatudo.yaml 31 | - home-assistant.yaml 32 | - omada-controller.yaml 33 | - vaultwarden.yaml 34 | -------------------------------------------------------------------------------- /kubernetes/applications/forecastle/extra/omada-controller.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: forecastle.stakater.com/v1alpha1 2 | kind: ForecastleApp 3 | metadata: 4 | name: omada-controller 5 | spec: 6 | name: "Omada Controller" 7 | url: https://omada-controller.tools.internal.place 8 | networkRestricted: true 9 | group: applications 10 | icon: https://upload.wikimedia.org/wikipedia/commons/a/a7/Tp-Link_logo_2016.png 11 | -------------------------------------------------------------------------------- /kubernetes/applications/forecastle/extra/opnsense.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: forecastle.stakater.com/v1alpha1 2 | kind: ForecastleApp 3 | metadata: 4 | name: opnsense 5 | spec: 6 | name: OPNsense 7 | url: http://router-01.internal.place 8 | networkRestricted: true 9 | group: infrastructure 10 | icon: https://upload.wikimedia.org/wikipedia/commons/thumb/c/c3/Opnsense-logo.svg/600px-Opnsense-logo.svg.png 11 | properties: 12 | main-url: http://192.168.2.1 -------------------------------------------------------------------------------- /kubernetes/applications/forecastle/extra/storage-01.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: forecastle.stakater.com/v1alpha1 2 | kind: ForecastleApp 3 | metadata: 4 | name: storage-01 5 | spec: 6 | name: Storage 01 7 | url: http://storage-01.internal.place 8 | networkRestricted: true 9 | group: infrastructure 10 | icon: https://www.ixsystems.com/wp-content/uploads/2021/02/truenas-logo-full-color-rgb.png 11 | properties: 12 | allowed-connection-type: sftp 13 | allowed-connection-port: "22" -------------------------------------------------------------------------------- /kubernetes/applications/forecastle/extra/vaultwarden.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: forecastle.stakater.com/v1alpha1 2 | kind: ForecastleApp 3 | metadata: 4 | name: vaultwarden 5 | spec: 6 | name: Vaultwarden 7 | url: http://vaultwarden.tools.internal.place 8 | networkRestricted: true 9 | group: applications 10 | icon: https://vaultwarden.tools.internal.place/images/icons/android-chrome-512x512.png -------------------------------------------------------------------------------- /kubernetes/applications/forecastle/operator/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: forecastle 3 | version: 0.1.0 4 | dependencies: 5 | - name: forecastle 6 | alias: forecastle 7 | version: v1.0.145 8 | repository: https://stakater.github.io/stakater-charts -------------------------------------------------------------------------------- /kubernetes/applications/forecastle/operator/values-kubernetes-01.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/stakater/Forecastle/blob/master/deployments/kubernetes/chart/forecastle/values.yaml 2 | forecastle: 3 | 4 | forecastle: 5 | 6 | # TODO 7 | namespace: forecastle 8 | 9 | # TODO 10 | config: 11 | title: Homelab Ops 12 | crdEnabled: true 13 | namespaceSelector: 14 | any: true 15 | matchNames: [] 16 | 17 | # TODO 18 | ingress: 19 | enabled: true 20 | className: "nginx" 21 | annotations: 22 | cert-manager.io/cluster-issuer: "letsencrypt-production" 23 | hosts: 24 | - host: &host home.tools.internal.place 25 | paths: 26 | - path: / 27 | pathType: Prefix 28 | tls: 29 | - hosts: 30 | - *host 31 | secretName: forecastle-tls 32 | -------------------------------------------------------------------------------- /kubernetes/applications/forecastle/operator/values.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/stakater/Forecastle/blob/master/deployments/kubernetes/chart/forecastle/values.yaml 2 | forecastle: {} 3 | -------------------------------------------------------------------------------- /kubernetes/applications/home-assistant/base/deployment.yml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: home-assistant 5 | spec: 6 | replicas: 1 7 | strategy: 8 | type: Recreate 9 | selector: 10 | matchLabels: {} 11 | template: 12 | spec: 13 | # securityContext: 14 | # fsGroup: 33 15 | # fsGroupChangePolicy: "OnRootMismatch" 16 | restartPolicy: Always 17 | # initContainers: 18 | # - name: merge-configs 19 | # image: linuxserver/yq 20 | # command: [ "/bin/sh", "-c", "cp /config/cassandra.yaml /config/cassandra/" ] 21 | # volumeMounts: 22 | # - name: tmp-config 23 | # mountPath: /config/cassandra/ 24 | # - name: cassandraconfig 25 | # mountPath: /config/ 26 | containers: 27 | - name: home-assistant 28 | image: "ghcr.io/home-assistant/home-assistant:latest" 29 | imagePullPolicy: IfNotPresent 30 | 31 | # Needed to interact with USB devices connected to the node 32 | securityContext: 33 | privileged: true 34 | 35 | envFrom: 36 | - configMapRef: 37 | name: home-assistant-env 38 | ports: 39 | - name: http 40 | containerPort: 8123 41 | protocol: TCP 42 | 43 | livenessProbe: 44 | tcpSocket: 45 | port: 8123 46 | initialDelaySeconds: 0 47 | failureThreshold: 3 48 | timeoutSeconds: 1 49 | periodSeconds: 10 50 | 51 | readinessProbe: 52 | tcpSocket: 53 | port: 8123 54 | initialDelaySeconds: 0 55 | failureThreshold: 3 56 | timeoutSeconds: 1 57 | periodSeconds: 10 58 | 59 | startupProbe: 60 | tcpSocket: 61 | port: 8123 62 | initialDelaySeconds: 0 63 | failureThreshold: 30 64 | timeoutSeconds: 1 65 | periodSeconds: 5 66 | 67 | volumeMounts: 68 | - name: runudev 69 | mountPath: /run/udev 70 | readOnly: true 71 | 72 | - name: ha-config-data 73 | mountPath: /config 74 | 75 | - mountPath: /config/configuration.yaml 76 | subPath: configuration.yaml 77 | name: configmap-file 78 | 79 | # - mountPath: /config/automations.yaml 80 | # subPath: automations.yaml 81 | # name: configmap-file 82 | # 83 | # - mountPath: /config/scripts.yaml 84 | # subPath: scripts.yaml 85 | # name: configmap-file 86 | 87 | # - mountPath: /config/python_scripts.yaml 88 | # subPath: python_scripts.yaml 89 | # name: configmap-file 90 | 91 | volumes: 92 | # Only required for auto-detecting the port and some adapters like ConBee 93 | - name: runudev 94 | hostPath: 95 | path: /run/udev 96 | 97 | - name: ha-config-data 98 | persistentVolumeClaim: 99 | claimName: home-assistant-config-data 100 | 101 | - name: configmap-file 102 | configMap: 103 | name: home-assistant-config 104 | -------------------------------------------------------------------------------- /kubernetes/applications/home-assistant/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | commonLabels: 5 | app.kubernetes.io/name: home-assistant 6 | 7 | resources: 8 | - deployment.yml 9 | - service.yml 10 | -------------------------------------------------------------------------------- /kubernetes/applications/home-assistant/base/service.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: home-assistant 5 | spec: 6 | selector: {} 7 | type: ClusterIP 8 | ports: 9 | - port: 8123 10 | targetPort: http 11 | protocol: TCP 12 | name: http 13 | -------------------------------------------------------------------------------- /kubernetes/applications/home-assistant/kubernetes-01/certificate.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: home-assistant 5 | spec: 6 | secretName: home-assistant-tls 7 | dnsNames: 8 | - home-assistant.tools.internal.place 9 | issuerRef: 10 | name: letsencrypt-production 11 | kind: ClusterIssuer 12 | group: cert-manager.io 13 | -------------------------------------------------------------------------------- /kubernetes/applications/home-assistant/kubernetes-01/configmap-env.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: home-assistant-env 5 | data: 6 | TZ: "Atlantic/Canary" 7 | -------------------------------------------------------------------------------- /kubernetes/applications/home-assistant/kubernetes-01/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: home-assistant-config 5 | data: 6 | # Ref: https://github.com/arsaboo/homeassistant-config/blob/master/configuration.yaml 7 | 8 | # Some parts of the configuration such as automation or scripts are included 9 | # from different files or dirs. 10 | # Doing this allows creating them dynamically (using GUI, testing purposes) 11 | # or statically (creating YAML, production-ready scripts or automations) 12 | # Ref: https://www.home-assistant.io/docs/configuration/splitting_configuration/ 13 | configuration.yaml: | 14 | # Loads default set of integrations. Do not remove. 15 | # Ref: https://www.home-assistant.io/integrations/default_config/ 16 | default_config: 17 | 18 | # Set log level (global and by integration) 19 | logger: 20 | default: warning 21 | logs: 22 | homeassistant.core: warning 23 | homeassistant.components.mqtt: warning 24 | 25 | # Include scenes, scripts and automations I create in the UI 26 | # TODO: Create the files if missing with an init container 27 | automation ui: !include automations.yaml 28 | scene ui: !include scenes.yaml 29 | script: !include scripts.yaml 30 | 31 | # Include my own handmade scripts and automations 32 | # TODO: Create the dirs if missing with an init container 33 | automation manual: !include_dir_merge_list automations 34 | python_script: 35 | scene manual: !include_dir_merge_list scenes 36 | script extra: !include_dir_merge_list scripts 37 | 38 | # Load frontend themes from the themes folder 39 | frontend: 40 | themes: !include_dir_merge_named themes 41 | 42 | # Text to speech 43 | tts: 44 | - platform: google_translate 45 | 46 | # Allows you to issue voice commands from the frontend in enabled browsers 47 | conversation: 48 | 49 | # TODO 50 | http: 51 | use_x_forwarded_for: true 52 | trusted_proxies: 53 | - 127.0.0.1 54 | - 10.90.0.0/16 55 | 56 | # Add links to side panel 57 | # Ref: https://www.home-assistant.io/integrations/panel_iframe/ 58 | #panel_iframe: 59 | # zigbee2mqtt: 60 | # title: "Zigbee 2 MQTT" 61 | # icon: mdi:zigbee 62 | # url: "https://zigbee2mqtt.tools.internal.place" 63 | 64 | # Temporary here. This can be done with automation scripts 65 | # Create a switch that sends IR codes to MQTT 66 | # Automating AC 67 | #switch: 68 | # - platform: template 69 | # switches: 70 | # living_room_air_conditioned_remote: 71 | # turn_on: 72 | # service: mqtt.publish 73 | # data: 74 | # payload: >- 75 | # {"ir_code_to_send": "CPoNUQYyAnUEiSADAzIC+QFAAwH5AUAFBXUE+QH5AUAJAzICdQSAA4ALQAWAAwN1BDICQAMD+QEyAkAHQANACwP5AfkB4AELATIC4AMLQA+AA0ABwAvgBwdAAeADE0ABAzICdQRABQn5AfkBMgJ1BDICQAcD+QH5AUAF4AEPQA0B+QFABUADA/kBMgJAG8ADQA+AAwH5AYAJATICQBtADUAHA3UE+QHAAwMyAjICQAXgAQMB+QFADUABQAfAAwH5AUAJgAGACYAFgAGACwcyAjIC+QH5AYAFAfkBQAeAAxMyAvkB+QEyAjIC+QH5ATICMgL5AYABQAlAA8CP4AsHAzIC+QE="} 76 | # topic: zigbee2mqtt/living_room/air_conditioned_remote/set 77 | # turn_off: 78 | # service: mqtt.publish 79 | # data: 80 | # payload: >- 81 | # {"ir_code_to_send": "CMYNlQWbAogEJCADB5sC6ACoBnwBQAtABxXrAw4DmwKIBA4DuwAOA+sDJALjAZsCQAsOiAQkAugA4wEuAQ4D6ACbIAsNfAHjAZsC6wObAnwBJAJACxAkApUFcwCbArsA6wPrAyQC42AXCeMBJAIkAnwBDgNAAwIuAZsgCwGbAkAVQAsA4yALBHwBJALjIAMDfAEOA0AHCeMB4wEkAiQC4wFAkwCbIAcDJAKIBEAF4AEBCYgEJALjAQ4DLgFAB+AFA0AbwAMHJALjAeMBJAJAAYAHAeMBQBsBJAKAB0ALwAMD4wEkAoABwAnAB+APAYAfgAXgDwFAHcADQAHAC+ARBwGIBIADQCPACwuIBOMBiATjASQC4wE="} 82 | # topic: zigbee2mqtt/living_room/air_conditioned_remote/set 83 | 84 | # rest_command: 85 | # get_json_data: 86 | # url: "https://raw.githubusercontent.com/jorgeatgu/apaga-luz/main/public/data/canary_price.json" 87 | 88 | # automations.yaml: | 89 | # [] 90 | # 91 | # scripts.yaml: | 92 | # [] 93 | -------------------------------------------------------------------------------- /kubernetes/applications/home-assistant/kubernetes-01/ingress.yml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: home-assistant 5 | spec: 6 | ingressClassName: nginx 7 | rules: 8 | - host: &host "home-assistant.tools.internal.place" 9 | http: 10 | paths: 11 | - path: / 12 | pathType: Prefix 13 | backend: 14 | service: 15 | name: home-assistant 16 | port: 17 | name: "http" 18 | tls: 19 | - hosts: 20 | - *host 21 | secretName: home-assistant-tls 22 | -------------------------------------------------------------------------------- /kubernetes/applications/home-assistant/kubernetes-01/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://www.manelrodero.com/blog/instalacion-de-hacs-en-home-assistant-docker 2 | # Ref: https://hacs.xyz/ 3 | --- 4 | apiVersion: kustomize.config.k8s.io/v1beta1 5 | kind: Kustomization 6 | 7 | namespace: home-assistant 8 | 9 | labels: 10 | - pairs: 11 | app.kubernetes.io/managed-by: Helm 12 | includeSelectors: false 13 | includeTemplates: false 14 | 15 | - pairs: 16 | cluster: kubernetes-01 17 | includeSelectors: true 18 | includeTemplates: true 19 | 20 | commonAnnotations: 21 | meta.helm.sh/release-name: home-assistant 22 | meta.helm.sh/release-namespace: home-assistant 23 | 24 | images: 25 | - name: ghcr.io/home-assistant/home-assistant 26 | newTag: "2025.2" 27 | 28 | resources: 29 | - ../base 30 | - pvc.yml 31 | - configmap.yaml 32 | - configmap-env.yaml 33 | - certificate.yaml 34 | - ingress.yml 35 | -------------------------------------------------------------------------------- /kubernetes/applications/home-assistant/kubernetes-01/pvc.yml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: home-assistant-config-data 5 | spec: 6 | storageClassName: standard-nfs 7 | accessModes: 8 | - ReadWriteOnce 9 | resources: 10 | requests: 11 | storage: 2Gi 12 | -------------------------------------------------------------------------------- /kubernetes/applications/ingress-nginx/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: ingress-nginx 3 | version: 0.1.0 4 | dependencies: 5 | - name: ingress-nginx 6 | version: 4.11.2 7 | repository: https://kubernetes.github.io/ingress-nginx -------------------------------------------------------------------------------- /kubernetes/applications/ingress-nginx/values-global.yaml: -------------------------------------------------------------------------------- 1 | # DISCLAIMER: THESE VALUES ARE FOR PRODUCTION PURPOSES ONLY. 2 | # PLEASE, DON'T DO DIRTY THINGS 3 | 4 | ingress-nginx: 5 | controller: 6 | # Mark the created ingressClass as the default in the cluster 7 | ingressClassResource: 8 | default: true 9 | 10 | # Resources for Ingress' pods 11 | # Ideally, no limits needed 12 | resources: 13 | requests: 14 | cpu: 200m 15 | memory: 200Mi 16 | 17 | # Enforce the reliability enabling HA for the controller 18 | autoscaling: 19 | enabled: true 20 | maxReplicas: 20 21 | targetCPUUtilizationPercentage: 80 22 | targetMemoryUtilizationPercentage: 80 23 | behavior: 24 | scaleDown: 25 | stabilizationWindowSeconds: 300 26 | policies: 27 | - type: Pods 28 | value: 1 29 | periodSeconds: 180 30 | scaleUp: 31 | stabilizationWindowSeconds: 300 32 | policies: 33 | - type: Pods 34 | value: 2 35 | periodSeconds: 60 36 | 37 | # Enable SSL connections directly to the services when requested on annotations 38 | extraArgs: 39 | enable-ssl-passthrough: "true" 40 | -------------------------------------------------------------------------------- /kubernetes/applications/ingress-nginx/values-kubernetes-01.yaml: -------------------------------------------------------------------------------- 1 | # DISCLAIMER: THESE VALUES ARE FOR PRODUCTION PURPOSES ONLY. 2 | # PLEASE, DON'T DO DIRTY THINGS 3 | 4 | ingress-nginx: 5 | controller: 6 | 7 | # Resources for Ingress' pods 8 | # Ideally, no limits needed 9 | resources: 10 | requests: 11 | cpu: 200m 12 | memory: 200Mi 13 | 14 | # Assign a fixed IP for the ingress controller's LB 15 | service: 16 | annotations: 17 | metallb.universe.tf/loadBalancerIPs: &loadbalancerIP 192.168.2.60 18 | #loadBalancerIP: *loadbalancerIP 19 | -------------------------------------------------------------------------------- /kubernetes/applications/ingress-nginx/values.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/kubernetes/ingress-nginx/blob/main/charts/ingress-nginx/values.yaml 2 | 3 | ingress-nginx: {} -------------------------------------------------------------------------------- /kubernetes/applications/metallb/extra/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://forum.opnsense.org/index.php?topic=15756.0 2 | # Ref: https://blog.xirion.net/posts/metallb-opnsense/ 3 | 4 | apiVersion: kustomize.config.k8s.io/v1beta1 5 | kind: Kustomization 6 | 7 | namespace: metallb 8 | 9 | resources: 10 | #- bgpAdvertisement.yaml 11 | - l2Advertisement.yaml 12 | -------------------------------------------------------------------------------- /kubernetes/applications/metallb/extra/base/l2Advertisement.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: L2Advertisement 3 | metadata: 4 | name: external 5 | spec: 6 | ipAddressPools: 7 | - lb-range 8 | -------------------------------------------------------------------------------- /kubernetes/applications/metallb/extra/kubernetes-01/ipAddressPool.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: metallb.io/v1beta1 2 | kind: IPAddressPool 3 | metadata: 4 | name: lb-range 5 | spec: 6 | addresses: 7 | - 192.168.2.60-192.168.2.80 -------------------------------------------------------------------------------- /kubernetes/applications/metallb/extra/kubernetes-01/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://forum.opnsense.org/index.php?topic=15756.0 2 | # Ref: https://blog.xirion.net/posts/metallb-opnsense/ 3 | 4 | apiVersion: kustomize.config.k8s.io/v1beta1 5 | kind: Kustomization 6 | 7 | namespace: metallb 8 | 9 | labels: 10 | - pairs: 11 | app.kubernetes.io/managed-by: Helm 12 | includeSelectors: false 13 | includeTemplates: false 14 | 15 | - pairs: 16 | cluster: kubernetes-01 17 | includeSelectors: true 18 | includeTemplates: true 19 | 20 | commonAnnotations: 21 | meta.helm.sh/release-name: metallb-extra 22 | meta.helm.sh/release-namespace: metallb 23 | 24 | resources: 25 | - ../base 26 | - ipAddressPool.yaml 27 | -------------------------------------------------------------------------------- /kubernetes/applications/metallb/operator/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | description: Deployment for MetalLB 3 | name: metallb 4 | version: 0.1.0 5 | dependencies: 6 | - name: metallb 7 | version: 0.14.8 8 | repository: https://metallb.github.io/metallb 9 | -------------------------------------------------------------------------------- /kubernetes/applications/metallb/operator/values-global.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/metallb/metallb/blob/main/charts/metallb/values.yaml 2 | # Controller: Cluster-wide controller that handles IP address assignments 3 | # Speaker: Component that speaks the protocol(s) of your choice to make the services reachable 4 | metallb: 5 | speaker: 6 | tolerateMaster: false 7 | -------------------------------------------------------------------------------- /kubernetes/applications/metallb/operator/values.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/metallb/metallb/blob/main/charts/metallb/values.yaml 2 | metallb: {} -------------------------------------------------------------------------------- /kubernetes/applications/metrics-server/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: metrics-server 3 | version: 0.2.0 4 | dependencies: 5 | - name: metrics-server 6 | version: 3.12.1 7 | repository: https://kubernetes-sigs.github.io/metrics-server/ -------------------------------------------------------------------------------- /kubernetes/applications/metrics-server/values-kubernetes-01.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/kubernetes-sigs/metrics-server/blob/master/charts/metrics-server/values.yaml 2 | # Ref: https://github.com/kubernetes-sigs/metrics-server/blob/master/docs/command-line-flags.txt 3 | metrics-server: 4 | 5 | # Disable TLS SAN validation as I still have to regenerate Kubernetes certificate with nodes hostnames included 6 | args: 7 | - --kubelet-insecure-tls 8 | -------------------------------------------------------------------------------- /kubernetes/applications/metrics-server/values.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/kubernetes-sigs/metrics-server/blob/master/charts/metrics-server/values.yaml 2 | metrics-server: {} -------------------------------------------------------------------------------- /kubernetes/applications/mongodb/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: mongodb 3 | version: 0.1.0 4 | dependencies: 5 | - name: mongodb 6 | version: 14.13.0 7 | repository: https://charts.bitnami.com/bitnami 8 | -------------------------------------------------------------------------------- /kubernetes/applications/mongodb/templates/_helpers.tpl: -------------------------------------------------------------------------------- 1 | {{/* 2 | Expand the name of the chart. 3 | */}} 4 | {{- define "meta-mongodb.name" -}} 5 | {{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} 6 | {{- end }} 7 | 8 | {{/* 9 | Create a default fully qualified app name. 10 | We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). 11 | If release name contains chart name it will be used as a full name. 12 | */}} 13 | {{- define "meta-mongodb.fullname" -}} 14 | {{- if .Values.fullnameOverride }} 15 | {{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} 16 | {{- else }} 17 | {{- $name := default .Chart.Name .Values.nameOverride }} 18 | {{- if contains $name .Release.Name }} 19 | {{- .Release.Name | trunc 63 | trimSuffix "-" }} 20 | {{- else }} 21 | {{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} 22 | {{- end }} 23 | {{- end }} 24 | {{- end }} 25 | 26 | {{/* 27 | Create chart name and version as used by the chart label. 28 | */}} 29 | {{- define "meta-mongodb.chart" -}} 30 | {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} 31 | {{- end }} 32 | 33 | {{/* 34 | Common labels 35 | */}} 36 | {{- define "meta-mongodb.labels" -}} 37 | helm.sh/chart: {{ include "meta-mongodb.chart" . }} 38 | {{ include "meta-mongodb.selectorLabels" . }} 39 | {{- if .Chart.AppVersion }} 40 | app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} 41 | {{- end }} 42 | app.kubernetes.io/managed-by: {{ .Release.Service }} 43 | {{- end }} 44 | 45 | {{/* 46 | Selector labels 47 | */}} 48 | {{- define "meta-mongodb.selectorLabels" -}} 49 | app.kubernetes.io/name: {{ include "meta-mongodb.name" . }} 50 | app.kubernetes.io/instance: {{ .Release.Name }} 51 | {{- end }} 52 | -------------------------------------------------------------------------------- /kubernetes/applications/mongodb/templates/external-secrets.yaml: -------------------------------------------------------------------------------- 1 | {{- range $key, $value := .Values.customComponents.externalSecrets }} 2 | apiVersion: external-secrets.io/v1beta1 3 | kind: ExternalSecret 4 | metadata: 5 | name: {{ include "meta-mongodb.fullname" $ }}-{{ $key }} 6 | labels: 7 | {{- include "meta-mongodb.labels" $ | nindent 4 }} 8 | {{- with $value.annotations }} 9 | annotations: 10 | {{- toYaml . | nindent 4 }} 11 | {{- end }} 12 | spec: 13 | {{ toYaml $value.spec | nindent 2 }} 14 | --- 15 | {{- end }} 16 | -------------------------------------------------------------------------------- /kubernetes/applications/mongodb/values-kubernetes-01.yaml: -------------------------------------------------------------------------------- 1 | customComponents: 2 | 3 | externalSecrets: 4 | mongodb-users-credentials: 5 | annotations: {} 6 | spec: 7 | secretStoreRef: 8 | kind: ClusterSecretStore 9 | name: gitlab-secret-store 10 | target: 11 | name: mongodb-users-credentials 12 | data: 13 | - secretKey: mongodb-root-password 14 | remoteRef: 15 | key: MONGODB_USERS_CREDENTIALS_PASSWORD_ROOT 16 | - secretKey: mongodb-passwords 17 | remoteRef: 18 | key: MONGODB_USERS_CREDENTIALS_PASSWORD_OMADA 19 | 20 | # Ref: https://github.com/bitnami/charts/blob/main/bitnami/mongodb/values.yaml 21 | mongodb: 22 | # MongoDB Authentication parameters 23 | auth: 24 | enabled: true 25 | 26 | # MongoDB root user 27 | #rootUser: root 28 | 29 | # MongoDB root password 30 | # Ref: https://github.com/bitnami/containers/tree/main/bitnami/mongodb#setting-the-root-user-and-password-on-first-run 31 | #rootPassword: "" 32 | 33 | # MongoDB custom users and databases 34 | # Ref: https://github.com/bitnami/containers/tree/main/bitnami/mongodb#creating-a-user-and-database-on-first-run 35 | # List of custom users to be created during the initialization 36 | # List of passwords for the custom users set at `auth.usernames` 37 | # List of custom databases to be created during the initialization 38 | # Ref: https://github.com/bitnami/charts/issues/16975#issuecomment-1803017023 39 | # Relationship between following lists is [0]->[0]->[0]; [1]->[1]->[1]; ... 40 | databases: ["omada"] 41 | usernames: ["omada"] 42 | #passwords: [] 43 | 44 | # Existing secret with MongoDB credentials (keys: `mongodb-passwords`, `mongodb-root-password`, `mongodb-metrics-password`, `mongodb-replica-set-key`) 45 | # When it's set the passwords defined in previous parameters are ignored. 46 | existingSecret: "mongodb-users-credentials" 47 | 48 | # Dictionary of initdb scripts 49 | # Specify dictionary of scripts to be run at first boot 50 | initdbScripts: 51 | 52 | # Ref: https://github.com/mbentley/docker-omada-controller/blob/master/external_mongodb/omada.js 53 | init-mongo.sh: | 54 | #!/bin/bash 55 | 56 | mongosh --authenticationDatabase admin -u root -p $MONGODB_ROOT_PASSWORD <" }, 41 | subscribe: { deny: ">" } 42 | } 43 | }, 44 | { 45 | user: homeassistant, 46 | password: << $NATS_USERS_CREDENTIALS_PASSWORD_HOMEASSISTANT >>, 47 | permissions: { 48 | allow_responses: true, 49 | publish: [">",], 50 | subscribe: [">"] 51 | } 52 | }, 53 | { 54 | user: zigbee2mqtt, 55 | password: << $NATS_USERS_CREDENTIALS_PASSWORD_ZIGBEE2MQTT >>, 56 | permissions: { 57 | allow_responses: true, 58 | publish: [ 59 | "zigbee2mqtt", "zigbee2mqtt.>", 60 | "homeassistant", "homeassistant.>" 61 | ], 62 | subscribe: [ 63 | ">", 64 | "zigbee2mqtt", "zigbee2mqtt.>", 65 | "homeassistant", "homeassistant.>" 66 | ] 67 | } 68 | }, 69 | { 70 | user: tasmota, 71 | password: << $NATS_USERS_CREDENTIALS_PASSWORD_TASMOTA >>, 72 | permissions: { 73 | allow_responses: true, 74 | publish: [ 75 | "tasmota", "tasmota.>", 76 | "cmnd", "cmnd.>", 77 | "stat", "stat.>", 78 | "tele", "tele.>" 79 | ], 80 | subscribe: [ 81 | ">", 82 | "tasmota", "tasmota.>", 83 | "cmnd", "cmnd.>", 84 | "stat", "stat.>", 85 | "tele", "tele.>" 86 | ] 87 | } 88 | } 89 | ] 90 | } 91 | 92 | no_auth_user: anonymous 93 | 94 | # Enable clustering mode 95 | cluster: 96 | enabled: true 97 | replicas: 2 98 | 99 | # Persistence is needed for the sessions and retained messages since 100 | # even retained messages of QoS 0 are persisted. 101 | jetstream: 102 | enabled: true 103 | fileStore: 104 | pvc: 105 | storageClassName: standard-nfs 106 | 107 | # Enable MQTT 3.1.1 compatibility layer 108 | # Ref: https://docs.nats.io/running-a-nats-service/configuration/mqtt 109 | mqtt: 110 | enabled: true 111 | 112 | service: 113 | # Expose clustering port at service level (used on RAFT discovery) 114 | ports: 115 | cluster: 116 | enabled: true 117 | 118 | # Expose NATS in my internal network 119 | merge: 120 | metadata: 121 | annotations: 122 | metallb.universe.tf/loadBalancerIPs: &loadbalancerIP 192.168.2.65 123 | spec: 124 | type: LoadBalancer 125 | 126 | container: 127 | env: 128 | NATS_USERS_CREDENTIALS_PASSWORD_HOMEASSISTANT: 129 | valueFrom: 130 | secretKeyRef: 131 | name: nats-users-credentials 132 | key: homeassistant-password 133 | NATS_USERS_CREDENTIALS_PASSWORD_ZIGBEE2MQTT: 134 | valueFrom: 135 | secretKeyRef: 136 | name: nats-users-credentials 137 | key: zigbee2mqtt-password 138 | NATS_USERS_CREDENTIALS_PASSWORD_TASMOTA: 139 | valueFrom: 140 | secretKeyRef: 141 | name: nats-users-credentials 142 | key: tasmota-password 143 | 144 | # Disable useless tools 145 | # https://docs.nats.io/using-nats/nats-tools 146 | natsBox: 147 | enabled: false 148 | -------------------------------------------------------------------------------- /kubernetes/applications/nats/values.yaml: -------------------------------------------------------------------------------- 1 | nameOverride: "" 2 | fullnameOverride: "" 3 | 4 | customComponents: 5 | 6 | externalSecrets: {} 7 | # nats-auth-secret: 8 | # annotations: {} 9 | # spec: {} 10 | 11 | # Ref: https://github.com/nats-io/k8s/blob/main/helm/charts/nats/values.yaml 12 | nats: {} 13 | -------------------------------------------------------------------------------- /kubernetes/applications/omada-controller/base/configmap-env.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/mbentley/docker-omada-controller/blob/master/docker-compose.yml 2 | 3 | apiVersion: v1 4 | kind: ConfigMap 5 | metadata: 6 | name: omada-controller-environment 7 | data: 8 | # 9 | PUID: "508" 10 | PGID: "508" 11 | 12 | # 13 | SHOW_SERVER_LOGS: "true" 14 | SHOW_MONGODB_LOGS: "false" 15 | SSL_CERT_NAME: "tls.crt" 16 | SSL_KEY_NAME: "tls.key" 17 | TZ: "Etc/UTC" 18 | 19 | # Ref: https://github.com/mbentley/docker-omada-controller/tree/master/external_mongodb#common-steps 20 | NO_MONGODB: "true" 21 | MONGO_EXTERNAL: "true" 22 | MONGODB_HOST: "mongodb.mongodb.svc:27017" 23 | EAP_MONGOD_URI: "mongodb://omada:${MONGODB_USERS_CREDENTIALS_PASSWORD_OMADA}@${MONGODB_HOST}/omada" 24 | 25 | # 26 | MANAGE_HTTP_PORT: "8088" 27 | MANAGE_HTTPS_PORT: "8043" 28 | PORTAL_HTTP_PORT: "8088" 29 | PORTAL_HTTPS_PORT: "8843" 30 | 31 | # 32 | PORT_APP_DISCOVERY: "27001" 33 | PORT_DISCOVERY: "29810" 34 | 35 | # V1 36 | PORT_MANAGER_V1: "29811" 37 | PORT_ADOPT_V1: "29812" 38 | PORT_UPGRADE_V1: "29813" 39 | 40 | # V2 41 | PORT_MANAGER_V2: "29814" 42 | PORT_TRANSFER_V2: "29815" 43 | 44 | PORT_RTTY: "29816" 45 | -------------------------------------------------------------------------------- /kubernetes/applications/omada-controller/base/cronjob-backup.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: CronJob 3 | metadata: 4 | name: omada-backup 5 | spec: 6 | schedule: "0 2 * * *" 7 | jobTemplate: 8 | spec: 9 | backoffLimit: 3 10 | template: 11 | spec: 12 | restartPolicy: OnFailure 13 | containers: 14 | - name: omada-backup 15 | image: alpine:latest 16 | imagePullPolicy: IfNotPresent 17 | command: 18 | - /bin/sh 19 | - -ec 20 | - | 21 | #!/bin/sh 22 | 23 | # The way to login into Omada Controller was crafted by Matt Bentley (https://github.com/mbentley) 24 | # Ref: https://gist.github.com/mbentley/03c198077c81d52cb029b825e9a6dc18 25 | 26 | # The way to perform backup using the API is not documented anywhere in API docs. 27 | # It was reversed engineered from GUI calls by Alby Hernández (https://github.com/achetronic) 28 | 29 | # DISCLAIMER: Omada Controller can automatically create periodic backups into the controller's directory. 30 | # I did not use this feature as it implies sharing the data volume between the controller and this cronjob. 31 | # Related PV was created as ReadWriteOnce in Kubernetes and didn't feel for migrating it at this moment 32 | # (may be in the future) 33 | 34 | OMADA_URL="https://omada-controller.tools.internal.place" 35 | USERNAME="${OMADA_USERS_CREDENTIALS_USERNAME_ADMIN}" 36 | PASSWORD="${OMADA_USERS_CREDENTIALS_PASSWORD_ADMIN}" 37 | BACKUP_DIR="/backup" 38 | MAX_BACKUPS=7 39 | 40 | # Don't look at this 41 | apk update && apk add curl jq 42 | 43 | # Create a new backup 44 | mkdir -p $BACKUP_DIR 45 | 46 | # Get controller details from the API 47 | CONTROLLER_INFO="$(curl -sk "${OMADA_URL}/api/info")" 48 | CONTROLLER_ID="$(echo "${CONTROLLER_INFO}" | jq -r .result.omadacId)" 49 | CONTROLLER_VERSION="$(echo "${CONTROLLER_INFO}" | jq -r .result.controllerVer)" 50 | 51 | # Login, get token, set & use cookies 52 | TOKEN="$(curl -sk -X POST -c "/tmp/omada-cookies.txt" -b "/tmp/omada-cookies.txt" -H "Content-Type: application/json" \ 53 | "${OMADA_URL}/${CONTROLLER_ID}/api/v2/login" -d '{"username": "'"${USERNAME}"'", "password": "'"${PASSWORD}"'"}' | \ 54 | jq -r .result.token)" 55 | 56 | # Prepare backup 57 | printf "\nStarting backup preparation...\n" 58 | curl -sk -X POST -b "/tmp/omada-cookies.txt" -H "Content-Type: application/json" -H "Csrf-Token: ${TOKEN}" \ 59 | "${OMADA_URL}/${CONTROLLER_ID}/api/v2/maintenance/backup/prepare?token=${TOKEN}" 60 | 61 | # Verify backup status during preparation 62 | printf "\nVerifying backup status...\n" 63 | while true; do 64 | # Verification request 65 | NOW_TIMESTAMP=$(date +%s%3N) 66 | NOW_AS_DATE=$(date -d @"${NOW_TIMESTAMP:0:10}" +"%Y-%m-%d_%H-%M") 67 | 68 | OMADA_BACKUP_FILE="${BACKUP_DIR}/omada_backup_${CONTROLLER_VERSION}_SettingsOnly_${NOW_AS_DATE}.cfg" 69 | MD5_FILE="${OMADA_BACKUP_FILE}.md5" 70 | 71 | RESPONSE=$(curl -sk -X GET -b "/tmp/omada-cookies.txt" -H "Content-Type: application/json" -H "Csrf-Token: ${TOKEN}" \ 72 | "${OMADA_URL}/${CONTROLLER_ID}/api/v2/maintenance/backup/result?token=${TOKEN}&_t=${NOW_TIMESTAMP}") 73 | 74 | # Extract important values from JSON response 75 | STATUS=$(echo "$RESPONSE" | jq -r '.result.status') 76 | ERROR_CODE=$(echo "$RESPONSE" | jq -r '.result.errorCode') 77 | 78 | # Eval the state 79 | if [ "$STATUS" -eq 1 ]; then 80 | printf "Backup process is still being performed...\n" 81 | sleep 5 82 | 83 | elif [ "$STATUS" -eq 0 ]; then 84 | if [ "$ERROR_CODE" -eq 0 ]; then 85 | printf "Backup process was completed successfully. Downloading the file...\n" 86 | 87 | curl -sk -X GET -b "/tmp/omada-cookies.txt" -H "Content-Type: application/json" -H "Csrf-Token: ${TOKEN}" \ 88 | "${OMADA_URL}/${CONTROLLER_ID}/api/v2/files/backup?token=${TOKEN}&retention=-1&retainUser=false" \ 89 | --output "${OMADA_BACKUP_FILE}" 90 | printf "File downloaded as 'omada_backup_%s_SettingsOnly_%s.cfg'.\n" "${CONTROLLER_VERSION}" "${NOW_AS_DATE}" 91 | 92 | # Create a checksum for the backup 93 | md5sum ${OMADA_BACKUP_FILE} > ${MD5_FILE} 94 | 95 | else 96 | printf "Backup process was completed with errors (errorCode=$ERROR_CODE). Review details.\n" 97 | fi 98 | break 99 | else 100 | printf "Unexpected status: $STATUS. Review the response.\n" 101 | echo "$RESPONSE" 102 | break 103 | fi 104 | done 105 | 106 | # Delete old backups if it exceeds MAX_BACKUPS 107 | BACKUP_COUNT=$(ls $BACKUP_DIR/omada_backup_*.cfg 2>/dev/null | wc -l || echo "0") 108 | if [ "$BACKUP_COUNT" -ge "$MAX_BACKUPS" ]; then 109 | REMOVE_COUNT=$(expr $BACKUP_COUNT - $MAX_BACKUPS + 1) 110 | ls -1 $BACKUP_DIR/omada_backup_*.cfg | sort | head -n $REMOVE_COUNT | \ 111 | while read -r file; do 112 | echo "Deleting $file y ${file}.md5" 113 | rm -f "$file" "${file}.md5" 114 | done 115 | else 116 | echo "There are no backups to delete" 117 | fi 118 | 119 | envFrom: 120 | - secretRef: 121 | name: omada-user-credentials 122 | optional: false 123 | volumeMounts: 124 | - name: omada-backup 125 | mountPath: /backup 126 | 127 | volumes: 128 | - name: omada-backup 129 | persistentVolumeClaim: 130 | claimName: omada-backup 131 | -------------------------------------------------------------------------------- /kubernetes/applications/omada-controller/base/deployment.yml: -------------------------------------------------------------------------------- 1 | # Ref: https://github.com/mbentley/docker-omada-controller/blob/master/docker-compose.yml 2 | # Fixed: https://github.com/mbentley/docker-omada-controller/commit/2b4ba51143740e727c03f6f2a38c8a0006a80415 3 | 4 | apiVersion: apps/v1 5 | kind: Deployment 6 | metadata: 7 | name: omada-controller 8 | spec: 9 | replicas: 1 10 | strategy: 11 | type: Recreate 12 | selector: 13 | matchLabels: {} 14 | template: 15 | spec: 16 | # Omada Controller is listening on the host network to look for the APs 17 | # and then, cannot resolve internal DNS request properly for MongoDB's Service FQDN. 18 | # This is only needed to allow the controller to discover the devices on the same network. 19 | # Not needed if EAPs are configured to point directly to controller's IP 20 | hostNetwork: true 21 | dnsPolicy: ClusterFirstWithHostNet 22 | 23 | restartPolicy: Always 24 | containers: 25 | - name: omada-controller 26 | image: "mbentley/omada-controller:latest-amd64" 27 | imagePullPolicy: IfNotPresent 28 | ports: 29 | # Ref: https://www.tp-link.com/us/support/faq/3281/ 30 | 31 | # 1. When you visit the management page of Omada Software Controller via an HTTP connection. 32 | # 2. When the clients visit the Portal page via an HTTP connection. 33 | - name: manage-http 34 | containerPort: 8088 35 | protocol: TCP 36 | 37 | # 1. When you visit the management page of Omada Software Controller via an HTTPS connection. 38 | # 2. When upgrading the firmware for the Omada devices with Omada Software Controller. 39 | - name: manage-https 40 | containerPort: 8043 41 | protocol: TCP 42 | 43 | # When the clients visit the Portal page via an HTTPS connection. 44 | - name: portal-https 45 | containerPort: 8843 46 | protocol: TCP 47 | 48 | ############################################################ 49 | ## Initialization Check 50 | ############################################################ 51 | 52 | # Omada Controller can be discovered by the Omada APP within the same network through this port 53 | - name: app-discovery 54 | containerPort: 27001 55 | protocol: UDP 56 | 57 | ############################################################ 58 | ## EAP Discovery, Adoption, Management, and Upgrade 59 | ############################################################ 60 | 61 | # Omada Controller and Omada Discovery Utility discover Omada devices 62 | - name: discovery 63 | containerPort: 29810 64 | protocol: UDP 65 | 66 | # Omada Controller and Omada Discovery Utility manage the Omada devices running firmware 67 | # fully adapted to Omada Controller v4*. 68 | - name: manager-v1 69 | containerPort: 29811 70 | protocol: TCP 71 | - name: adopt-v1 72 | containerPort: 29812 73 | protocol: TCP 74 | 75 | # When upgrading the firmware for the Omada devices running firmware fully adapted to 76 | # Omada Controller v4*. 77 | - name: upgrade-v1 78 | containerPort: 29813 79 | protocol: TCP 80 | 81 | # Omada Controller and Omada Discovery Utility manage the Omada devices running firmware 82 | # fully adapted to Omada Controller v5*. 83 | - name: manager-v2 84 | containerPort: 29814 85 | protocol: TCP 86 | 87 | # Starting from v5.9, Omada Controller receives Device Info, Packet Capture Files, 88 | # and DPI Application Statistic Data from the Omada devices 89 | - name: transfer-v2 90 | containerPort: 29815 91 | protocol: TCP 92 | 93 | # Starting from v5.9, Omada Controller establishes the remote control terminal 94 | # session with the Omada devices 95 | - name: rtty 96 | containerPort: 29816 97 | protocol: TCP 98 | 99 | envFrom: 100 | - configMapRef: 101 | name: omada-controller-environment 102 | optional: false 103 | - secretRef: 104 | name: mongodb-user-credentials 105 | optional: false 106 | volumeMounts: 107 | - name: omada-data 108 | mountPath: /opt/tplink/EAPController/data 109 | - name: omada-logs 110 | mountPath: /opt/tplink/EAPController/logs 111 | 112 | volumes: 113 | - name: omada-data 114 | persistentVolumeClaim: 115 | claimName: omada-data 116 | - name: omada-logs 117 | persistentVolumeClaim: 118 | claimName: omada-logs 119 | -------------------------------------------------------------------------------- /kubernetes/applications/omada-controller/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | commonLabels: 5 | app.kubernetes.io/name: omada-controller 6 | 7 | # Ref: https://github.com/mbentley/docker-omada-controller/blob/master/docker-compose.yml 8 | # Ref: https://github.com/mbentley/docker-omada-controller/issues/356 9 | resources: 10 | - pvc-omada-data.yml 11 | - pvc-omada-logs.yml 12 | - configmap-env.yaml 13 | - deployment.yml 14 | - service.yml 15 | 16 | # Backup process executed from time to time uploading results to S3 17 | - pvc-omada-backup.yaml 18 | - cronjob-backup.yaml -------------------------------------------------------------------------------- /kubernetes/applications/omada-controller/base/pvc-omada-backup.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: omada-backup 5 | spec: 6 | storageClassName: standard-s3 7 | accessModes: 8 | - ReadWriteMany 9 | resources: 10 | requests: 11 | storage: 1Gi 12 | -------------------------------------------------------------------------------- /kubernetes/applications/omada-controller/base/pvc-omada-data.yml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: omada-data 5 | spec: 6 | storageClassName: standard-nfs 7 | accessModes: 8 | - ReadWriteOnce 9 | resources: 10 | requests: 11 | storage: 10Gi 12 | -------------------------------------------------------------------------------- /kubernetes/applications/omada-controller/base/pvc-omada-logs.yml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: omada-logs 5 | spec: 6 | storageClassName: standard-nfs 7 | accessModes: 8 | - ReadWriteOnce 9 | resources: 10 | requests: 11 | storage: 10Gi 12 | -------------------------------------------------------------------------------- /kubernetes/applications/omada-controller/base/service.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: omada-controller 5 | annotations: {} 6 | spec: 7 | selector: {} 8 | type: ClusterIP 9 | ports: 10 | #- name: manage-http 11 | # targetPort: manage-http 12 | # port: 8088 13 | # protocol: TCP 14 | 15 | - name: manage-https 16 | targetPort: manage-https 17 | port: 8043 18 | protocol: TCP 19 | 20 | #- name: portal-https 21 | # targetPort: portal-https 22 | # port: 8843 23 | # protocol: TCP 24 | 25 | - name: app-discovery 26 | targetPort: app-discovery 27 | port: 27001 28 | protocol: UDP 29 | 30 | - name: discovery 31 | targetPort: discovery 32 | port: 29810 33 | protocol: UDP 34 | 35 | - name: manager-v1 36 | targetPort: manager-v1 37 | port: 29811 38 | protocol: TCP 39 | 40 | - name: adopt-v1 41 | targetPort: adopt-v1 42 | port: 29812 43 | protocol: TCP 44 | 45 | - name: upgrade-v1 46 | targetPort: upgrade-v1 47 | port: 29813 48 | protocol: TCP 49 | 50 | - name: manager-v2 51 | targetPort: manager-v2 52 | port: 29814 53 | protocol: TCP 54 | 55 | - name: transfer-v2 56 | targetPort: transfer-v2 57 | port: 29815 58 | protocol: TCP 59 | 60 | - name: rtty 61 | targetPort: rtty 62 | port: 29816 63 | protocol: TCP 64 | -------------------------------------------------------------------------------- /kubernetes/applications/omada-controller/kubernetes-01/certificate.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: omada-controller 5 | spec: 6 | secretName: omada-controller-tls 7 | dnsNames: 8 | - omada-controller.tools.internal.place 9 | issuerRef: 10 | name: letsencrypt-production 11 | kind: ClusterIssuer 12 | group: cert-manager.io 13 | -------------------------------------------------------------------------------- /kubernetes/applications/omada-controller/kubernetes-01/externalsecret-mongodb-user-credentials.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1beta1 2 | kind: ExternalSecret 3 | metadata: 4 | name: mongodb-user-credentials 5 | spec: 6 | secretStoreRef: 7 | kind: ClusterSecretStore 8 | name: gitlab-secret-store 9 | target: 10 | name: mongodb-user-credentials 11 | data: 12 | - secretKey: MONGODB_USERS_CREDENTIALS_PASSWORD_OMADA 13 | remoteRef: 14 | key: MONGODB_USERS_CREDENTIALS_PASSWORD_OMADA 15 | -------------------------------------------------------------------------------- /kubernetes/applications/omada-controller/kubernetes-01/externalsecret-omada-user-credentials.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: external-secrets.io/v1beta1 2 | kind: ExternalSecret 3 | metadata: 4 | name: omada-user-credentials 5 | spec: 6 | secretStoreRef: 7 | kind: ClusterSecretStore 8 | name: gitlab-secret-store 9 | target: 10 | name: omada-user-credentials 11 | data: 12 | - secretKey: OMADA_USERS_CREDENTIALS_USERNAME_ADMIN 13 | remoteRef: 14 | key: OMADA_USERS_CREDENTIALS_USERNAME_ADMIN 15 | - secretKey: OMADA_USERS_CREDENTIALS_PASSWORD_ADMIN 16 | remoteRef: 17 | key: OMADA_USERS_CREDENTIALS_PASSWORD_ADMIN 18 | -------------------------------------------------------------------------------- /kubernetes/applications/omada-controller/kubernetes-01/ingress.yml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: omada-controller 5 | annotations: 6 | nginx.ingress.kubernetes.io/backend-protocol: "HTTPS" 7 | spec: 8 | ingressClassName: nginx 9 | rules: 10 | - host: &host "omada-controller.tools.internal.place" 11 | http: 12 | paths: 13 | - path: / 14 | pathType: Prefix 15 | backend: 16 | service: 17 | name: omada-controller 18 | port: 19 | name: "manage-https" 20 | tls: 21 | - hosts: 22 | - *host 23 | secretName: omada-controller-tls 24 | -------------------------------------------------------------------------------- /kubernetes/applications/omada-controller/kubernetes-01/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://www.manelrodero.com/blog/instalacion-de-hacs-en-home-assistant-docker 2 | # Ref: https://hacs.xyz/ 3 | --- 4 | apiVersion: kustomize.config.k8s.io/v1beta1 5 | kind: Kustomization 6 | 7 | namespace: omada-controller 8 | 9 | labels: 10 | - pairs: 11 | app.kubernetes.io/managed-by: Helm 12 | includeSelectors: false 13 | includeTemplates: false 14 | 15 | - pairs: 16 | cluster: kubernetes-01 17 | includeSelectors: true 18 | includeTemplates: true 19 | 20 | commonAnnotations: 21 | meta.helm.sh/release-name: omada-controller 22 | meta.helm.sh/release-namespace: omada-controller 23 | 24 | images: 25 | - name: mbentley/omada-controller 26 | newTag: "5.13" 27 | 28 | resources: 29 | - ../base 30 | - externalsecret-mongodb-user-credentials.yaml 31 | - certificate.yaml 32 | - ingress.yml 33 | 34 | # Needed by backups' cronjob 35 | - externalsecret-omada-user-credentials.yaml 36 | 37 | patches: 38 | - path: overlays/service.yml 39 | -------------------------------------------------------------------------------- /kubernetes/applications/omada-controller/kubernetes-01/overlays/service.yml: -------------------------------------------------------------------------------- 1 | $patch: merge 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: omada-controller 6 | annotations: 7 | metallb.universe.tf/loadBalancerIPs: &loadbalancerIP 192.168.2.64 8 | spec: 9 | type: LoadBalancer 10 | 11 | -------------------------------------------------------------------------------- /kubernetes/applications/vaultwarden/base/cronjob-backup.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: CronJob 3 | metadata: 4 | name: vaultwarden-backup 5 | spec: 6 | schedule: "0 2 * * *" 7 | jobTemplate: 8 | spec: 9 | backoffLimit: 3 10 | template: 11 | spec: 12 | restartPolicy: OnFailure 13 | containers: 14 | - name: vaultwarden-backup 15 | image: alpine:latest 16 | imagePullPolicy: IfNotPresent 17 | command: 18 | - /bin/sh 19 | - -ec 20 | - | 21 | #!/bin/sh 22 | 23 | BACKUP_DIR="/backup" 24 | MAX_BACKUPS=7 25 | DATA_DIR="/data" 26 | 27 | mkdir -p $BACKUP_DIR 28 | 29 | # Count existing backups 30 | BACKUP_COUNT=$(ls $BACKUP_DIR/vaultwarden-backup-*.tar.gz 2>/dev/null | wc -l || echo "0") 31 | DATE=$(date '+%Y-%m-%d-%H-%M') 32 | BACKUP_FILE="${BACKUP_DIR}/vaultwarden-backup-${DATE}.tar.gz" 33 | MD5_FILE="${BACKUP_FILE}.md5" 34 | 35 | # Create a new backup 36 | tar -czf ${BACKUP_FILE} -C ${DATA_DIR} . 37 | 38 | # Create a checksum for the backup 39 | md5sum ${BACKUP_FILE} > ${MD5_FILE} 40 | 41 | # Delete old backups if it exceeds MAX_BACKUPS 42 | if [ "$BACKUP_COUNT" -ge "$MAX_BACKUPS" ]; then 43 | REMOVE_COUNT=$(expr $BACKUP_COUNT - $MAX_BACKUPS + 1) 44 | ls -1 $BACKUP_DIR/vaultwarden-backup-*.tar.gz | sort | head -n $REMOVE_COUNT | \ 45 | while read -r file; do 46 | echo "Deleting $file y ${file}.md5" 47 | rm -f "$file" "${file}.md5" 48 | done 49 | else 50 | echo "There are no backups to delete" 51 | fi 52 | 53 | volumeMounts: 54 | - name: vaultwarden-data 55 | mountPath: /data 56 | 57 | - name: vaultwarden-backup 58 | mountPath: /backup 59 | 60 | volumes: 61 | - name: vaultwarden-data 62 | persistentVolumeClaim: 63 | claimName: vaultwarden-data 64 | 65 | - name: vaultwarden-backup 66 | persistentVolumeClaim: 67 | claimName: vaultwarden-backup 68 | 69 | -------------------------------------------------------------------------------- /kubernetes/applications/vaultwarden/base/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: vaultwarden 5 | spec: 6 | selector: {} 7 | strategy: 8 | type: Recreate 9 | template: 10 | metadata: 11 | labels: {} 12 | spec: 13 | restartPolicy: Always 14 | containers: 15 | - image: vaultwarden/server:latest 16 | name: vaultwarden 17 | ports: 18 | - containerPort: 80 19 | resources: {} 20 | envFrom: 21 | - configMapRef: 22 | name: vaultwarden-environment 23 | volumeMounts: 24 | - mountPath: /data/ 25 | name: vaultwarden-data 26 | volumes: 27 | - name: vaultwarden-data 28 | persistentVolumeClaim: 29 | claimName: vaultwarden-data 30 | -------------------------------------------------------------------------------- /kubernetes/applications/vaultwarden/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | namespace: vaultwarden 5 | 6 | labels: 7 | - pairs: 8 | app.kubernetes.io/name: vaultwarden 9 | includeSelectors: true 10 | includeTemplates: true 11 | 12 | resources: 13 | - pvc.yaml 14 | - deployment.yaml 15 | - service.yaml 16 | 17 | - pvc-backup.yaml 18 | - cronjob-backup.yaml -------------------------------------------------------------------------------- /kubernetes/applications/vaultwarden/base/pvc-backup.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: vaultwarden-backup 5 | spec: 6 | storageClassName: standard-s3 7 | accessModes: 8 | - ReadWriteMany 9 | resources: 10 | requests: 11 | storage: 1Gi 12 | -------------------------------------------------------------------------------- /kubernetes/applications/vaultwarden/base/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: vaultwarden-data 5 | spec: 6 | storageClassName: standard-nfs 7 | accessModes: 8 | - ReadWriteMany 9 | resources: 10 | requests: 11 | storage: 20Gi 12 | -------------------------------------------------------------------------------- /kubernetes/applications/vaultwarden/base/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: vaultwarden 5 | spec: 6 | ports: 7 | - name: "8010" 8 | port: 8010 9 | targetPort: 80 10 | selector: {} 11 | -------------------------------------------------------------------------------- /kubernetes/applications/vaultwarden/kubernetes-01/certificate.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: cert-manager.io/v1 2 | kind: Certificate 3 | metadata: 4 | name: vaultwarden 5 | spec: 6 | secretName: vaultwarden-tls 7 | dnsNames: 8 | - vaultwarden.tools.internal.place 9 | issuerRef: 10 | name: letsencrypt-production 11 | kind: ClusterIssuer 12 | group: cert-manager.io -------------------------------------------------------------------------------- /kubernetes/applications/vaultwarden/kubernetes-01/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: vaultwarden-environment 5 | data: 6 | SENDS_ALLOWED: "false" 7 | SIGNUPS_ALLOWED: "false" 8 | WEBSOCKET_ENABLED: "true" -------------------------------------------------------------------------------- /kubernetes/applications/vaultwarden/kubernetes-01/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: vaultwarden 5 | annotations: 6 | nginx.ingress.kubernetes.io/rewrite-target: / 7 | spec: 8 | ingressClassName: nginx 9 | rules: 10 | - host: &host "vaultwarden.tools.internal.place" 11 | http: 12 | paths: 13 | - path: / 14 | pathType: Prefix 15 | backend: 16 | service: 17 | name: vaultwarden 18 | port: 19 | number: 8010 20 | tls: 21 | - hosts: 22 | - *host 23 | secretName: vaultwarden-tls -------------------------------------------------------------------------------- /kubernetes/applications/vaultwarden/kubernetes-01/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://forum.opnsense.org/index.php?topic=15756.0 2 | # Ref: https://blog.xirion.net/posts/metallb-opnsense/ 3 | 4 | apiVersion: kustomize.config.k8s.io/v1beta1 5 | kind: Kustomization 6 | 7 | namespace: vaultwarden 8 | 9 | labels: 10 | - pairs: 11 | app.kubernetes.io/managed-by: Helm 12 | includeSelectors: false 13 | includeTemplates: false 14 | 15 | - pairs: 16 | cluster: kubernetes-01 17 | includeSelectors: false 18 | includeTemplates: false 19 | 20 | commonAnnotations: 21 | meta.helm.sh/release-name: vaultwarden 22 | meta.helm.sh/release-namespace: vaultwarden 23 | 24 | images: 25 | - name: vaultwarden/server:latest 26 | newTag: 1.33.2 27 | 28 | replicas: 29 | - name: vaultwarden 30 | count: 1 31 | 32 | resources: 33 | - ../base 34 | - configmap.yaml 35 | - certificate.yaml 36 | - ingress.yaml 37 | -------------------------------------------------------------------------------- /kubernetes/applications/wireguard/base/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: wireguard-configmap 5 | data: 6 | PUID: "1000" 7 | PGID: "1000" 8 | TZ: "Etc/UTC" 9 | 10 | # External port for docker host. Used in server mode 11 | SERVERPORT: "31820" 12 | 13 | PERSISTENTKEEPALIVE_PEERS: "all" 14 | -------------------------------------------------------------------------------- /kubernetes/applications/wireguard/base/cronjob-backup.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: CronJob 3 | metadata: 4 | name: wireguard-backup 5 | spec: 6 | schedule: "0 2 * * *" 7 | jobTemplate: 8 | spec: 9 | backoffLimit: 3 10 | template: 11 | spec: 12 | restartPolicy: OnFailure 13 | containers: 14 | - name: wireguard-backup 15 | image: alpine:latest 16 | imagePullPolicy: IfNotPresent 17 | command: 18 | - /bin/sh 19 | - -ec 20 | - | 21 | #!/bin/sh 22 | 23 | BACKUP_DIR="/backup" 24 | MAX_BACKUPS=7 25 | DATA_DIR="/data" 26 | 27 | mkdir -p $BACKUP_DIR 28 | 29 | # Count existing backups 30 | BACKUP_COUNT=$(ls $BACKUP_DIR/wireguard-backup-*.tar.gz 2>/dev/null | wc -l || echo "0") 31 | DATE=$(date '+%Y-%m-%d-%H-%M') 32 | BACKUP_FILE="${BACKUP_DIR}/wireguard-backup-${DATE}.tar.gz" 33 | MD5_FILE="${BACKUP_FILE}.md5" 34 | 35 | # Create a new backup 36 | tar -czf ${BACKUP_FILE} -C ${DATA_DIR} . 37 | 38 | # Create a checksum for the backup 39 | md5sum ${BACKUP_FILE} > ${MD5_FILE} 40 | 41 | # Delete old backups if it exceeds MAX_BACKUPS 42 | if [ "$BACKUP_COUNT" -ge "$MAX_BACKUPS" ]; then 43 | REMOVE_COUNT=$(expr $BACKUP_COUNT - $MAX_BACKUPS + 1) 44 | ls -1 $BACKUP_DIR/wireguard-backup-*.tar.gz | sort | head -n $REMOVE_COUNT | \ 45 | while read -r file; do 46 | echo "Deleting $file y ${file}.md5" 47 | rm -f "$file" "${file}.md5" 48 | done 49 | else 50 | echo "There are no backups to delete" 51 | fi 52 | 53 | volumeMounts: 54 | - name: wireguard-data 55 | mountPath: /data 56 | 57 | - name: wireguard-backup 58 | mountPath: /backup 59 | 60 | volumes: 61 | - name: wireguard-data 62 | persistentVolumeClaim: 63 | claimName: wireguard-data 64 | 65 | - name: wireguard-backup 66 | persistentVolumeClaim: 67 | claimName: wireguard-backup 68 | 69 | -------------------------------------------------------------------------------- /kubernetes/applications/wireguard/base/deployment.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://docs.linuxserver.io/images/docker-wireguard/#docker-compose-recommended-click-here-for-more-info 2 | 3 | apiVersion: apps/v1 4 | kind: Deployment 5 | metadata: 6 | name: wireguard 7 | spec: 8 | selector: {} 9 | template: 10 | spec: 11 | # TODO 12 | initContainers: 13 | - name: sysctls 14 | image: busybox 15 | command: 16 | - sh 17 | - -c 18 | - sysctl -w net.ipv4.conf.all.src_valid_mark=1 19 | securityContext: &securityContextSpec 20 | capabilities: 21 | add: 22 | - NET_ADMIN 23 | privileged: true 24 | 25 | # Deploy Wireguard containers 26 | containers: 27 | - name: wireguard 28 | image: ghcr.io/linuxserver/wireguard 29 | envFrom: 30 | - configMapRef: 31 | name: wireguard-configmap 32 | securityContext: *securityContextSpec 33 | volumeMounts: 34 | - name: wg-config 35 | mountPath: /config 36 | ports: 37 | - containerPort: 51820 38 | protocol: UDP 39 | resources: {} 40 | volumes: 41 | - name: wg-config 42 | persistentVolumeClaim: 43 | claimName: wireguard-data 44 | -------------------------------------------------------------------------------- /kubernetes/applications/wireguard/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://dev.to/ivanmoreno/how-to-connect-with-kubernetes-internal-network-using-wireguard-48bh 2 | # Ref: https://www.wireguardconfig.com/ 3 | # Ref: https://github.com/wg-easy/wg-easy 4 | 5 | apiVersion: kustomize.config.k8s.io/v1beta1 6 | kind: Kustomization 7 | 8 | commonLabels: 9 | app.kubernetes.io/name: wireguard 10 | 11 | resources: 12 | - pvc.yaml 13 | - configmap.yaml 14 | - deployment.yaml 15 | - service.yaml 16 | 17 | - pvc-backup.yaml 18 | - cronjob-backup.yaml 19 | -------------------------------------------------------------------------------- /kubernetes/applications/wireguard/base/pvc-backup.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: wireguard-backup 5 | spec: 6 | storageClassName: standard-s3 7 | accessModes: 8 | - ReadWriteMany 9 | resources: 10 | requests: 11 | storage: 1Gi 12 | -------------------------------------------------------------------------------- /kubernetes/applications/wireguard/base/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: wireguard-data 5 | spec: 6 | storageClassName: "standard-nfs" 7 | accessModes: 8 | - ReadWriteMany 9 | resources: 10 | requests: 11 | storage: 1G 12 | -------------------------------------------------------------------------------- /kubernetes/applications/wireguard/base/service.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: wireguard 5 | annotations: 6 | metallb.universe.tf/loadBalancerIPs: &loadbalancerIP 192.168.2.63 7 | spec: 8 | type: LoadBalancer 9 | ports: 10 | - port: 31820 11 | protocol: UDP 12 | targetPort: 51820 13 | selector: {} 14 | 15 | # Deprecated, not dual-stack, no alternative yet 16 | # Ref: https://stackoverflow.com/questions/73750700/what-is-the-replacement-for-the-deprecated-loadbalancerip-attribute-in-services 17 | #loadBalancerIP: *loadbalancerIP 18 | -------------------------------------------------------------------------------- /kubernetes/applications/wireguard/kubernetes-01/configmap-patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: wireguard-configmap 5 | data: 6 | # External IP or domain name for docker host. Used in server mode. 7 | # If set to auto, the container will try to determine and set the external IP automatically 8 | SERVERURL: "gateway.internal.place" 9 | 10 | # Number of peers to create confs for. 11 | # Required for server mode. Can also be a list of names: myPC,myPhone,myTablet (alphanumeric only) 12 | # Peer 1: A.H. 13 | # Peer 2: J.A. 14 | # Peer 3: M.F. 15 | # Peer 4: I.H. 16 | # Peer 5: A.M. 17 | # To see connection configuration for a peer: k exec -it -n wireguard pod/ -- /app/show-peer 18 | PEERS: "5" 19 | 20 | # DNS server set in peer/client configs (can be set as 8.8.8.8). Used in server mode. 21 | # Defaults to auto, which uses wireguard docker host's DNS via included CoreDNS forward. 22 | PEERDNS: "10.96.100.10" 23 | 24 | # The IPs/Ranges that the peers will be able to reach using the VPN connection. 25 | # If not specified the default value is: '0.0.0.0/0, ::0/0' This will cause ALL traffic to route through the VPN, 26 | # if you want split tunneling, set this to only the IPs you would like to use the tunnel 27 | # AND the ip of the server's WG ip, such as 10.13.13.1 28 | ALLOWEDIPS: "0.0.0.0/0, ::/0" 29 | 30 | # Internal subnet for the wireguard and server and peers (only change if it clashes) 31 | INTERNAL_SUBNET: "10.30.0.0" 32 | -------------------------------------------------------------------------------- /kubernetes/applications/wireguard/kubernetes-01/kustomization.yaml: -------------------------------------------------------------------------------- 1 | # Ref: https://dev.to/ivanmoreno/how-to-connect-with-kubernetes-internal-network-using-wireguard-48bh 2 | # Ref: https://www.wireguardconfig.com/ 3 | # Ref: https://github.com/wg-easy/wg-easy 4 | 5 | apiVersion: kustomize.config.k8s.io/v1beta1 6 | kind: Kustomization 7 | 8 | namespace: wireguard 9 | 10 | labels: 11 | - pairs: 12 | app.kubernetes.io/managed-by: Helm 13 | includeSelectors: false 14 | includeTemplates: false 15 | 16 | - pairs: 17 | cluster: kubernetes-01 18 | includeSelectors: true 19 | includeTemplates: true 20 | 21 | commonAnnotations: 22 | meta.helm.sh/release-name: wireguard 23 | meta.helm.sh/release-namespace: wireguard 24 | 25 | replicas: 26 | - name: wireguard 27 | count: 3 28 | 29 | resources: 30 | - ../base 31 | 32 | patches: 33 | - path: service-patch.yaml 34 | target: 35 | kind: Service 36 | 37 | - path: configmap-patch.yaml 38 | target: 39 | kind: ConfigMap 40 | 41 | 42 | --- 43 | # To see the peer connection config: k exec -it -n wireguard pod/ -- /app/show-peer -------------------------------------------------------------------------------- /kubernetes/applications/wireguard/kubernetes-01/service-patch.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: wireguard 5 | annotations: 6 | metallb.universe.tf/loadBalancerIPs: &loadbalancerIP 192.168.2.63 7 | -------------------------------------------------------------------------------- /kubernetes/helmfile/base.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # yaml-language-server: $schema=https://json.schemastore.org/helmfile 3 | 4 | helmDefaults: 5 | createNamespace: true 6 | wait: true 7 | waitForJobs: true 8 | timeout: 600 9 | 10 | # repositories: 11 | # - name: notifik 12 | # url: https://freepik-company.github.io/notifik/ 13 | 14 | # - name: admitik 15 | # url: https://freepik-company.github.io/admitik/ 16 | 17 | # - name: metrics-server 18 | # url: https://kubernetes-sigs.github.io/metrics-server/ 19 | 20 | # - name: bitnami 21 | # url: https://charts.bitnami.com/bitnami 22 | 23 | # - name: bitnami-full-index 24 | # url: https://raw.githubusercontent.com/bitnami/charts/archive-full-index/bitnami 25 | 26 | # - name: external-secrets 27 | # url: https://charts.external-secrets.io 28 | 29 | # - name: prosimcorp 30 | # url: https://prosimcorp.github.io/helm-charts/ 31 | 32 | # - name: jetstack 33 | # url: https://charts.jetstack.io 34 | 35 | # - name: stakater 36 | # url: https://stakater.github.io/stakater-charts 37 | 38 | # - name: csi-driver-nfs 39 | # url: https://raw.githubusercontent.com/kubernetes-csi/csi-driver-nfs/master/charts 40 | 41 | # - name: hitman 42 | # url: https://achetronic.github.io/hitman/ 43 | 44 | # - name: csi-s3 45 | # url: https://yandex-cloud.github.io/k8s-csi-s3/charts 46 | 47 | # - name: ingress-nginx 48 | # url: https://kubernetes.github.io/ingress-nginx 49 | 50 | # - name: cilium 51 | # url: https://helm.cilium.io/ 52 | 53 | # - name: metallb 54 | # url: https://metallb.github.io/metallb 55 | 56 | # # Metacharts: as chartmander, but generics 57 | # - name: bjw-s 58 | # url: https://bjw-s.github.io/helm-charts/ 59 | -------------------------------------------------------------------------------- /kubernetes/helmfile/kubernetes-01.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # yaml-language-server: $schema=https://json.schemastore.org/helmfile 3 | 4 | bases: 5 | - base.yaml 6 | 7 | releases: 8 | # ATTENTION: This is a special release to create namespaces. 9 | # It's not possible to set release namespaces as this would try to change all the Namespace 10 | # resources existing in the release to defined one, causing conflicts. 11 | # I will look for a better solution in the future to this issue 12 | - name: special-namespaces 13 | chart: ../applications/00-special-namespaces 14 | installed: true 15 | 16 | - name: autoheater 17 | namespace: autoheater 18 | createNamespace: true 19 | chart: ../applications/autoheater/kubernetes-01 20 | installed: true 21 | 22 | - name: blocky 23 | namespace: blocky 24 | createNamespace: true 25 | chart: ../applications/blocky/kubernetes-01 26 | installed: true 27 | 28 | - name: cert-manager 29 | namespace: cert-manager 30 | createNamespace: true 31 | chart: ../applications/cert-manager/operator 32 | values: 33 | - ../applications/cert-manager/operator/values-global.yaml 34 | installed: true 35 | 36 | - name: cert-manager-extra 37 | namespace: cert-manager 38 | chart: ../applications/cert-manager/extra 39 | needs: 40 | - cert-manager/cert-manager 41 | installed: true 42 | 43 | - name: congatudo 44 | namespace: congatudo 45 | createNamespace: true 46 | chart: ../applications/congatudo/kubernetes-01 47 | needs: 48 | - cert-manager/cert-manager-extra 49 | - ingress-nginx/ingress-nginx 50 | installed: true 51 | 52 | - name: csi-driver-nfs 53 | namespace: csi-driver-nfs 54 | chart: ../applications/csi-driver-nfs/operator 55 | values: 56 | - ../applications/csi-driver-nfs/operator/values-global.yaml 57 | installed: true 58 | 59 | - name: csi-driver-nfs-extra 60 | namespace: csi-driver-nfs 61 | chart: ../applications/csi-driver-nfs/extra 62 | needs: 63 | - csi-driver-nfs/csi-driver-nfs 64 | installed: true 65 | 66 | - name: csi-driver-s3 67 | namespace: csi-driver-s3 68 | chart: ../applications/csi-driver-s3/operator 69 | values: 70 | - ../applications/csi-driver-s3/operator/values-global.yaml 71 | needs: 72 | 73 | installed: true 74 | 75 | - name: csi-driver-s3-extra 76 | namespace: csi-driver-s3 77 | chart: ../applications/csi-driver-s3/extra 78 | needs: 79 | - csi-driver-s3/csi-driver-s3 80 | installed: true 81 | 82 | - name: ddns-updater 83 | namespace: ddns-updater 84 | createNamespace: true 85 | chart: ../applications/ddns-updater/kubernetes-01 86 | needs: 87 | - external-secrets/external-secrets-extra 88 | installed: true 89 | 90 | - name: external-secrets 91 | namespace: external-secrets 92 | createNamespace: true 93 | chart: ../applications/external-secrets/operator 94 | installed: true 95 | 96 | - name: external-secrets-extra 97 | namespace: external-secrets 98 | chart: ../applications/external-secrets/extra 99 | needs: 100 | - external-secrets/external-secrets 101 | installed: true 102 | 103 | - name: faster-whisper 104 | namespace: faster-whisper 105 | createNamespace: true 106 | chart: ../applications/faster-whisper 107 | values: 108 | - ../applications/faster-whisper/values-kubernetes-01.yaml 109 | installed: true 110 | 111 | - name: forecastle 112 | namespace: forecastle 113 | createNamespace: true 114 | chart: ../applications/forecastle/operator 115 | values: 116 | - ../applications/forecastle/operator/values-kubernetes-01.yaml 117 | needs: 118 | - cert-manager/cert-manager-extra 119 | - ingress-nginx/ingress-nginx 120 | installed: true 121 | 122 | - name: forecastle-extra 123 | namespace: forecastle 124 | chart: ../applications/forecastle/extra 125 | needs: 126 | - forecastle/forecastle 127 | installed: true 128 | 129 | - name: home-assistant 130 | namespace: home-assistant 131 | chart: ../applications/home-assistant/kubernetes-01 132 | needs: 133 | - cert-manager/cert-manager-extra 134 | - ingress-nginx/ingress-nginx 135 | installed: true 136 | 137 | - name: ingress-nginx 138 | namespace: ingress-nginx 139 | createNamespace: true 140 | chart: ../applications/ingress-nginx 141 | values: 142 | - ../applications/ingress-nginx/values-global.yaml 143 | - ../applications/ingress-nginx/values-kubernetes-01.yaml 144 | needs: 145 | - metallb/metallb-extra 146 | installed: true 147 | 148 | - name: metallb 149 | namespace: metallb 150 | chart: ../applications/metallb/operator 151 | values: 152 | - ../applications/metallb/operator/values-global.yaml 153 | installed: true 154 | 155 | - name: metallb-extra 156 | namespace: metallb 157 | chart: ../applications/metallb/extra/kubernetes-01 158 | needs: 159 | - metallb/metallb 160 | installed: true 161 | 162 | - name: metrics-server 163 | namespace: metrics-server 164 | createNamespace: true 165 | chart: ../applications/metrics-server 166 | values: 167 | - ../applications/metrics-server/values-kubernetes-01.yaml 168 | installed: true 169 | 170 | - name: mongodb 171 | namespace: mongodb 172 | createNamespace: true 173 | chart: ../applications/mongodb 174 | values: 175 | - ../applications/mongodb/values-kubernetes-01.yaml 176 | needs: 177 | - external-secrets/external-secrets-extra 178 | - csi-driver-nfs/csi-driver-nfs-extra 179 | installed: true 180 | 181 | - name: nats 182 | namespace: nats 183 | createNamespace: true 184 | chart: ../applications/nats 185 | values: 186 | - ../applications/nats/values-kubernetes-01.yaml 187 | needs: 188 | - external-secrets/external-secrets-extra 189 | - csi-driver-nfs/csi-driver-nfs-extra 190 | installed: true 191 | 192 | - name: omada-controller 193 | namespace: omada-controller 194 | chart: ../applications/omada-controller/kubernetes-01 195 | needs: 196 | - cert-manager/cert-manager-extra 197 | - ingress-nginx/ingress-nginx 198 | - external-secrets/external-secrets-extra 199 | - csi-driver-nfs/csi-driver-nfs-extra 200 | installed: true 201 | 202 | - name: vaultwarden 203 | namespace: vaultwarden 204 | createNamespace: true 205 | chart: ../applications/vaultwarden/kubernetes-01 206 | needs: 207 | - cert-manager/cert-manager-extra 208 | - ingress-nginx/ingress-nginx 209 | - csi-driver-nfs/csi-driver-nfs-extra 210 | installed: true 211 | 212 | - name: wireguard 213 | namespace: wireguard 214 | chart: ../applications/wireguard/kubernetes-01 215 | needs: 216 | - csi-driver-nfs/csi-driver-nfs-extra 217 | installed: true 218 | 219 | - name: zigbee2mqtt 220 | namespace: zigbee2mqtt 221 | chart: ../applications/zigbee2mqtt/kubernetes-01 222 | needs: 223 | - external-secrets/external-secrets-extra 224 | - csi-driver-nfs/csi-driver-nfs-extra 225 | installed: true 226 | -------------------------------------------------------------------------------- /scripts/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/achetronic/homelab-ops/188b980a9ed69508cfe7d695a90dcbd03d9e2db7/scripts/.gitkeep -------------------------------------------------------------------------------- /scripts/conga/update-hosts-congatudo-redirection.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Hostname to update 4 | SEARCH_DOMAIN="3irobotix.net" 5 | 6 | # Replacement IP address to assign 7 | CONGATUDO_NEW_IP=${CONGATUDO_IP:-"192.168.2.2"} 8 | 9 | 10 | # Update hostsfile with a new IP for the hostname 11 | sed -E "s#^([a-zA-Z1-9.:]+[[:space:]]+)(.*)(${SEARCH_DOMAIN}[[:space:]].*)#${CONGATUDO_NEW_IP} \2\3#" /etc/hosts 12 | 13 | -------------------------------------------------------------------------------- /scripts/prepare-host-ubuntu.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -eo pipefail 3 | 4 | # Steps are taken from official Ubuntu website 5 | # Ref: https://ubuntu.com/server/docs/virtualization-libvirt 6 | 7 | USERNAME=$1 8 | 9 | # Path to the QEMU config file used by libvirt 10 | _QEMU_CONFIG_PATH="/etc/libvirt/qemu.conf" 11 | 12 | # Check virtualization availability 13 | function install_cpu_checker () { 14 | EXIT_CODE=0 15 | 16 | echo "[···] Installing CPU checker" 17 | apt-get install cpu-checker 2>/dev/null || EXIT_CODE=$? 18 | 19 | case $EXIT_CODE in 20 | 0) 21 | # All ok 22 | ;; 23 | *) 24 | # Other errors 25 | echo "[ERROR] KVM not available" 26 | exit $EXIT_CODE 27 | ;; 28 | esac 29 | } 30 | 31 | # Check virtualization availability 32 | function check_kvm () { 33 | EXIT_CODE=0 34 | 35 | echo "[···] Checking KVM availability" 36 | kvm-ok 2>/dev/null || EXIT_CODE=$? 37 | 38 | case $EXIT_CODE in 39 | 0) 40 | # All ok 41 | ;; 42 | *) 43 | # Other errors 44 | echo "[ERROR] KVM not available" 45 | exit $EXIT_CODE 46 | ;; 47 | esac 48 | } 49 | 50 | # Update the repositories packages list 51 | function update_packages_list () { 52 | EXIT_CODE=0 53 | 54 | echo "[···] Updating packages lists" 55 | apt-get --quiet update || EXIT_CODE=$? 56 | 57 | case $EXIT_CODE in 58 | 0) 59 | # All ok 60 | ;; 61 | *) 62 | # Other errors 63 | echo "[ERROR] Impossible to perform this action" 64 | exit $EXIT_CODE 65 | ;; 66 | esac 67 | } 68 | 69 | # Install virtualization components 70 | function install_virtualization_packages () { 71 | EXIT_CODE=0 72 | 73 | echo "[···] Installing virtualization packages" 74 | apt-get --quiet --assume-yes install \ 75 | qemu-kvm \ 76 | libvirt-daemon-system \ 77 | ovmf \ 78 | qemu-utils \ 79 | bridge-utils \ 80 | libvirt-clients 2>/dev/null || EXIT_CODE=$? 81 | 82 | case $EXIT_CODE in 83 | 0) 84 | # All ok 85 | ;; 86 | *) 87 | # Other errors 88 | echo "[ERROR] Impossible to perform this action" 89 | exit $EXIT_CODE 90 | ;; 91 | esac 92 | } 93 | 94 | # Add user to libvirt group 95 | function add_user_to_libvirt_group () { 96 | EXIT_CODE=0 97 | 98 | echo "[···] Adding user ${USERNAME} to the group: libvirt" 99 | adduser "${USERNAME}" libvirt 2>/dev/null || EXIT_CODE=$? 100 | 101 | case $EXIT_CODE in 102 | 0) 103 | # All ok 104 | ;; 105 | *) 106 | # Other errors 107 | echo "[ERROR] Impossible to perform this action" 108 | exit $EXIT_CODE 109 | ;; 110 | esac 111 | } 112 | 113 | # Disable security_driver parameter for Qemu 114 | # It's possible to disable AppArmor or SELinux directly in the host, but this it much less risky 115 | # Ref: https://github.com/dmacvicar/terraform-provider-libvirt/issues/546 116 | function disable_qemu_security_driver () { 117 | EXIT_CODE=0 118 | 119 | echo "[···] Disabling security driver for Qemu" 120 | sed --in-place -E s/"^#?security_driver = \".*\"$"/"security_driver = \"none\""/ "${_QEMU_CONFIG_PATH}" || EXIT_CODE=$? 121 | 122 | case $EXIT_CODE in 123 | 0) 124 | # All ok 125 | ;; 126 | *) 127 | # Other errors 128 | echo "[ERROR] Impossible to perform this action" 129 | exit $EXIT_CODE 130 | ;; 131 | esac 132 | } 133 | 134 | # Restart libvirt 135 | function restart_libvirt () { 136 | EXIT_CODE=0 137 | 138 | echo "[···] Restarting libvirt to apply all changes" 139 | systemctl restart libvirtd || EXIT_CODE=$? 140 | 141 | case $EXIT_CODE in 142 | 0) 143 | # All ok 144 | ;; 145 | *) 146 | # Other errors 147 | echo "[ERROR] Impossible to perform this action" 148 | exit $EXIT_CODE 149 | ;; 150 | esac 151 | } 152 | 153 | # Install Cockpit as GUI to review 154 | function install_cockpit () { 155 | EXIT_CODE=0 156 | 157 | echo "[···] Installing Cockpit" 158 | apt-get --quiet --assume-yes install \ 159 | cockpit \ 160 | cockpit-machines 2>/dev/null || EXIT_CODE=$? 161 | 162 | case $EXIT_CODE in 163 | 0) 164 | # All ok 165 | ;; 166 | *) 167 | # Other errors 168 | echo "[ERROR] Impossible to perform this action" 169 | exit $EXIT_CODE 170 | ;; 171 | esac 172 | } 173 | 174 | echo "[···] Installing dependencies for the user: $USERNAME" 175 | install_cpu_checker 176 | check_kvm 177 | update_packages_list 178 | install_virtualization_packages 179 | add_user_to_libvirt_group 180 | disable_qemu_security_driver 181 | restart_libvirt 182 | install_cockpit 183 | --------------------------------------------------------------------------------