├── .github └── workflows │ └── lint.yml ├── .gitignore ├── .terraform.lock.hcl ├── .tflint.hcl ├── .vscode └── settings.json ├── README.md ├── argocd.tf ├── cert-manager.tf ├── cilium.tf ├── do ├── example-spin.yml ├── example.yml ├── gitea.tf ├── kubernetes-hello.yml ├── libvirt.tf ├── outputs.tf ├── providers.tf ├── reloader.tf ├── renovate.json5 ├── renovate.sh ├── talos.tf ├── trust-manager.tf ├── variables.tf └── zot.tf /.github/workflows/lint.yml: -------------------------------------------------------------------------------- 1 | name: Lint 2 | on: [push] 3 | jobs: 4 | lint: 5 | name: Lint 6 | runs-on: ubuntu-22.04 7 | steps: 8 | - uses: actions/checkout@v4 9 | - name: Cache the plugins directory 10 | uses: actions/cache@v4 11 | with: 12 | path: ~/.tflint.d/plugins 13 | key: tflint-${{ hashFiles('.tflint.hcl') }} 14 | - uses: terraform-linters/setup-tflint@v4 15 | name: Setup 16 | with: 17 | # see https://github.com/terraform-linters/tflint/releases 18 | # renovate: datasource=github-releases depName=terraform-linters/tflint 19 | tflint_version: v0.58.0 20 | - name: Init 21 | run: tflint --init 22 | env: 23 | # https://github.com/terraform-linters/tflint/blob/master/docs/user-guide/plugins.md#avoiding-rate-limiting 24 | GITHUB_TOKEN: ${{ github.token }} 25 | - name: Lint 26 | run: tflint --format compact 27 | fmt: 28 | name: terraform fmt check 29 | runs-on: ubuntu-22.04 30 | steps: 31 | - uses: actions/checkout@v4 32 | - name: terraform fmt check 33 | run: terraform fmt -check -diff 34 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .terraform/ 2 | terraform.tfvars 3 | *terraform.tfstate* 4 | tfplan 5 | *.log 6 | *.raw 7 | *.qcow2 8 | *.gz 9 | support.zip 10 | support/ 11 | talosconfig.yml 12 | kubeconfig.yml 13 | kubernetes-ingress-ca-crt.pem 14 | tmp/ 15 | -------------------------------------------------------------------------------- /.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/dmacvicar/libvirt" { 5 | version = "0.8.3" 6 | constraints = "0.8.3" 7 | hashes = [ 8 | "h1:Tttxr3E9O75MM+dDmq5sYHQEw29PwtIj+XDj/5drdfE=", 9 | "zh:06ff0169beafd1891dc5a30616983abd32004a4f570d1d3dbb5851d84bd1c007", 10 | "zh:2dbdd726d0987cda73b56ecdfbcb98a67485e86a7a44aec976c0081b7239d89d", 11 | "zh:2e195a7bbdfcc13c45460571a5ba848a5c1e746b477c8381058767560f0ac93b", 12 | "zh:3952da13080018c5aec498b73e343c4c22ad884afb8c983138fb7255617aa991", 13 | "zh:478841bcf57df938726ddb90f55c7953fad09db4f6348747519afe7fc84b403b", 14 | "zh:53bce78b03a82c4782acfe1f32c2b46a68fa5fb2fb90d4a5392c90b436b44244", 15 | "zh:5c157f23e9768c67cddf9e847a571adca441607cb5adfb96dbfdd626ceadf92c", 16 | "zh:6bc78d631959fb695664966851308e140c38f3f5cf648dd89756320c2d91765d", 17 | "zh:8605d7d6915190836802654920a8eea3d751ae437273c4f4476dc0ebb9167a1d", 18 | "zh:8b66a22b97331c2a56aed092fd39152d06ad957fd4810aa3f0c4ade0f9b15755", 19 | "zh:92586a47a04082f70bb33f722672127a287caeed109beaaca2668e2e1d6a9caf", 20 | "zh:99a9ee414f5c4268e287660ce8edec2efcba1f79351f83791b64c7e5ab04f569", 21 | "zh:b7cff09fe74b0eb63b5b9aa94de5b33dadbd006d6d5b9578ac476039ea20b062", 22 | "zh:d4188a343ff32c0e03ff28c7e84abce0f43cad2fdbcd9046eaafc247429039ff", 23 | ] 24 | } 25 | 26 | provider "registry.terraform.io/hashicorp/helm" { 27 | version = "2.17.0" 28 | constraints = "2.17.0" 29 | hashes = [ 30 | "h1:K5FEjxvDnxb1JF1kG1xr8J3pNGxoaR3Z0IBG9Csm/Is=", 31 | "zh:06fb4e9932f0afc1904d2279e6e99353c2ddac0d765305ce90519af410706bd4", 32 | "zh:104eccfc781fc868da3c7fec4385ad14ed183eb985c96331a1a937ac79c2d1a7", 33 | "zh:129345c82359837bb3f0070ce4891ec232697052f7d5ccf61d43d818912cf5f3", 34 | "zh:3956187ec239f4045975b35e8c30741f701aa494c386aaa04ebabffe7749f81c", 35 | "zh:66a9686d92a6b3ec43de3ca3fde60ef3d89fb76259ed3313ca4eb9bb8c13b7dd", 36 | "zh:88644260090aa621e7e8083585c468c8dd5e09a3c01a432fb05da5c4623af940", 37 | "zh:a248f650d174a883b32c5b94f9e725f4057e623b00f171936dcdcc840fad0b3e", 38 | "zh:aa498c1f1ab93be5c8fbf6d48af51dc6ef0f10b2ea88d67bcb9f02d1d80d3930", 39 | "zh:bf01e0f2ec2468c53596e027d376532a2d30feb72b0b5b810334d043109ae32f", 40 | "zh:c46fa84cc8388e5ca87eb575a534ebcf68819c5a5724142998b487cb11246654", 41 | "zh:d0c0f15ffc115c0965cbfe5c81f18c2e114113e7a1e6829f6bfd879ce5744fbb", 42 | "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", 43 | ] 44 | } 45 | 46 | provider "registry.terraform.io/hashicorp/random" { 47 | version = "3.7.2" 48 | constraints = "3.7.2" 49 | hashes = [ 50 | "h1:356j/3XnXEKr9nyicLUufzoF4Yr6hRy481KIxRVpK0c=", 51 | "zh:14829603a32e4bc4d05062f059e545a91e27ff033756b48afbae6b3c835f508f", 52 | "zh:1527fb07d9fea400d70e9e6eb4a2b918d5060d604749b6f1c361518e7da546dc", 53 | "zh:1e86bcd7ebec85ba336b423ba1db046aeaa3c0e5f921039b3f1a6fc2f978feab", 54 | "zh:24536dec8bde66753f4b4030b8f3ef43c196d69cccbea1c382d01b222478c7a3", 55 | "zh:29f1786486759fad9b0ce4fdfbbfece9343ad47cd50119045075e05afe49d212", 56 | "zh:4d701e978c2dd8604ba1ce962b047607701e65c078cb22e97171513e9e57491f", 57 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", 58 | "zh:7b8434212eef0f8c83f5a90c6d76feaf850f6502b61b53c329e85b3b281cba34", 59 | "zh:ac8a23c212258b7976e1621275e3af7099e7e4a3d4478cf8d5d2a27f3bc3e967", 60 | "zh:b516ca74431f3df4c6cf90ddcdb4042c626e026317a33c53f0b445a3d93b720d", 61 | "zh:dc76e4326aec2490c1600d6871a95e78f9050f9ce427c71707ea412a2f2f1a62", 62 | "zh:eac7b63e86c749c7d48f527671c7aee5b4e26c10be6ad7232d6860167f99dbb0", 63 | ] 64 | } 65 | 66 | provider "registry.terraform.io/rgl/kustomizer" { 67 | version = "0.0.2" 68 | constraints = "0.0.2" 69 | hashes = [ 70 | "h1:ueLyLtw7B773SjX1RLY98l1Ph+IkEBj3UlEqwfBognU=", 71 | "zh:742193ca9a0b3395253647062bddf90b522f55da8194c106ad5397411e8ca60a", 72 | "zh:bcea589b08ddb79f58d2f7083e5037072fa8ea6ed27912a6afa12caef7a5e419", 73 | ] 74 | } 75 | 76 | provider "registry.terraform.io/siderolabs/talos" { 77 | version = "0.8.1" 78 | constraints = "0.8.1" 79 | hashes = [ 80 | "h1:rhNnAJ4MLaKZ5hNbdFzUJYxEFg0Es5+jg+WlzQ1D/g4=", 81 | "zh:02aeea4001ea216d37fd948e0760971f2525d31609d75dd1a7871f483e43260d", 82 | "zh:0bd6d2f9b6daf9cec0e20d1e22cad635983b5c071c106a3bec51be283c9fa254", 83 | "zh:0fa82a384b25a58b65523e0ea4768fa1212b1f5cfc0c9379d31162454fedcc9d", 84 | "zh:290ced18cfa372681d53522b5ea6c392206e90181ef0884719768b3ef627d077", 85 | "zh:3270a27a483d2be332915e339b910a4810fb16505e060fc4a988b0d653f06d90", 86 | "zh:34f91c967ae25219abc81d21a477d3fc514c62a73084bfb9d3d2d1490e98070b", 87 | "zh:46a0eb4397e97d9dc354087ce1b16ccdca3876b0e339d7ec1919002a43aa0a6d", 88 | "zh:4d382770c97675c6b4355a91f50b38f9b6bd088707834c9efb308e608bbdae48", 89 | "zh:6ec2828c419615cce850ba1eafffee2797cae62876999f3b0a163c17f579c97a", 90 | "zh:9405b011c631d9fb001d8b96a1657e071181434960543e857fb14ec2230618b0", 91 | "zh:a9a6f6824793e811ec52c0b1c4b8d19855c851120a91f7617ffa7e36aa65710a", 92 | "zh:aa472818c7880c7cf19f5bd584dcf513a3daa2fe636d4af730b0bcf495eadebf", 93 | "zh:cd6037f6267987fb606f98b1a425d71a1826289ac39e62973a45b60f0f37de06", 94 | "zh:ddea6372fef17de6648018c4c64b87acecaba9f5443fcf46ff3d92c048605b30", 95 | "zh:e367b0359c8b413f705ded1d0d7b4a3c09cee1bd0028337faa80a150b08b945a", 96 | ] 97 | } 98 | -------------------------------------------------------------------------------- /.tflint.hcl: -------------------------------------------------------------------------------- 1 | # NB the terraform plugin is built into tflint, so no need to declare it here. 2 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "cSpell.words": [ 3 | "cmdline", 4 | "containerd", 5 | "coredns", 6 | "crds", 7 | "daemonset", 8 | "datasource", 9 | "drbdadm", 10 | "finalizer", 11 | "finalizers", 12 | "healthz", 13 | "KUBECONFIG", 14 | "kubelet", 15 | "libvirtd", 16 | "LINSTOR", 17 | "lvdisplay", 18 | "machineconfigs", 19 | "nodeaddresses", 20 | "nslookup", 21 | "pgrep", 22 | "piraeusdatastore", 23 | "polkit", 24 | "popd", 25 | "pushd", 26 | "pvdisplay", 27 | "resolv", 28 | "resourcedefinitions", 29 | "siderolabs", 30 | "statefulset", 31 | "staticpods", 32 | "staticpodstatus", 33 | "storageclass", 34 | "TALOSCONFIG", 35 | "talosctl", 36 | "usermod", 37 | "usermode", 38 | "vgdisplay", 39 | "virt", 40 | "wasmtime" 41 | ] 42 | } -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # About 2 | 3 | [![Lint](https://github.com/rgl/terraform-libvirt-talos/actions/workflows/lint.yml/badge.svg)](https://github.com/rgl/terraform-libvirt-talos/actions/workflows/lint.yml) 4 | 5 | An example [Talos Linux](https://www.talos.dev) Kubernetes cluster in libvirt QEMU/KVM Virtual Machines using terraform. 6 | 7 | [Cilium](https://cilium.io) is used to augment the Networking (e.g. the [`LoadBalancer`](https://cilium.io/use-cases/load-balancer/) and [`Ingress`](https://docs.cilium.io/en/stable/network/servicemesh/ingress/) controllers), Observability (e.g. [Service Map](https://cilium.io/use-cases/service-map/)), and Security (e.g. [Network Policy](https://cilium.io/use-cases/network-policy/)). 8 | 9 | [LVM](https://en.wikipedia.org/wiki/Logical_Volume_Manager_(Linux)), [DRBD](https://linbit.com/drbd/), [LINSTOR](https://github.com/LINBIT/linstor-server), and the [Piraeus Operator](https://github.com/piraeusdatastore/piraeus-operator), are used for providing persistent storage volumes. 10 | 11 | The [spin extension](https://github.com/siderolabs/extensions/tree/main/container-runtime/spin), which installs [containerd-shim-spin](https://github.com/spinkube/containerd-shim-spin), is used to provide the ability to run [Spin Applications](https://developer.fermyon.com/spin/v2/index) ([WebAssembly/Wasm](https://webassembly.org/)). 12 | 13 | [Zot](https://github.com/project-zot/zot) is used as the in-cluster container registry. It stores and manages container images and other OCI artifacts. 14 | 15 | [Gitea](https://github.com/go-gitea/gitea) is used as the in-cluster git repository manager. 16 | 17 | [Argo CD](https://github.com/argoproj/argo-cd) is used as the in-cluster continuous delivery tool (aka gitops). 18 | 19 | # Usage (Ubuntu 22.04 host) 20 | 21 | Install libvirt: 22 | 23 | ```bash 24 | # install libvirt et al. 25 | apt-get install -y virt-manager 26 | # configure the security_driver to prevent errors alike (when using terraform): 27 | # Could not open '/var/lib/libvirt/images/terraform_talos_example_c0.img': Permission denied' 28 | sed -i -E 's,#?(security_driver)\s*=.*,\1 = "none",g' /etc/libvirt/qemu.conf 29 | systemctl restart libvirtd 30 | # let the current user manage libvirtd. 31 | # see /usr/share/polkit-1/rules.d/60-libvirt.rules 32 | usermod -aG libvirt $USER 33 | # restart the shell. 34 | exit 35 | ``` 36 | 37 | Install Terraform: 38 | 39 | ```bash 40 | # see https://github.com/hashicorp/terraform/releases 41 | # renovate: datasource=github-releases depName=hashicorp/terraform 42 | terraform_version='1.12.1' 43 | wget "https://releases.hashicorp.com/terraform/$terraform_version/terraform_${$terraform_version}_linux_amd64.zip" 44 | unzip "terraform_${$terraform_version}_linux_amd64.zip" 45 | sudo install terraform /usr/local/bin 46 | rm terraform terraform_*_linux_amd64.zip 47 | ``` 48 | 49 | Install cilium cli: 50 | 51 | ```bash 52 | # see https://github.com/cilium/cilium-cli/releases 53 | # renovate: datasource=github-releases depName=cilium/cilium-cli 54 | cilium_version='0.18.3' 55 | cilium_url="https://github.com/cilium/cilium-cli/releases/download/v$cilium_version/cilium-linux-amd64.tar.gz" 56 | wget -O- "$cilium_url" | tar xzf - cilium 57 | sudo install cilium /usr/local/bin/cilium 58 | rm cilium 59 | ``` 60 | 61 | Install cilium hubble: 62 | 63 | ```bash 64 | # see https://github.com/cilium/hubble/releases 65 | # renovate: datasource=github-releases depName=cilium/hubble 66 | hubble_version='1.17.3' 67 | hubble_url="https://github.com/cilium/hubble/releases/download/v$hubble_version/hubble-linux-amd64.tar.gz" 68 | wget -O- "$hubble_url" | tar xzf - hubble 69 | sudo install hubble /usr/local/bin/hubble 70 | rm hubble 71 | ``` 72 | 73 | Install kubectl-linstor: 74 | 75 | ```bash 76 | # NB kubectl linstor storage-pool list is equivalent to: 77 | # kubectl -n piraeus-datastore exec deploy/linstor-controller -- linstor storage-pool list 78 | # see https://github.com/piraeusdatastore/kubectl-linstor/releases 79 | # renovate: datasource=github-releases depName=piraeusdatastore/kubectl-linstor 80 | kubectl_linstor_version='0.3.2' 81 | kubectl_linstor_url="https://github.com/piraeusdatastore/kubectl-linstor/releases/download/v${kubectl_linstor_version}/kubectl-linstor_v${kubectl_linstor_version}_linux_amd64.tar.gz" 82 | wget -O- "$kubectl_linstor_url" | tar xzf - kubectl-linstor 83 | sudo install kubectl-linstor /usr/local/bin/kubectl-linstor 84 | rm kubectl-linstor 85 | ``` 86 | 87 | Install talosctl: 88 | 89 | ```bash 90 | # see https://github.com/siderolabs/talos/releases 91 | # renovate: datasource=github-releases depName=siderolabs/talos 92 | talos_version='1.10.3' 93 | wget https://github.com/siderolabs/talos/releases/download/v$talos_version/talosctl-linux-amd64 94 | sudo install talosctl-linux-amd64 /usr/local/bin/talosctl 95 | rm talosctl-linux-amd64 96 | ``` 97 | 98 | Install the talos image into libvirt, and initialize terraform: 99 | 100 | ```bash 101 | ./do init 102 | ``` 103 | 104 | Create the infrastructure: 105 | 106 | ```bash 107 | time ./do plan-apply 108 | ``` 109 | 110 | Show talos information: 111 | 112 | ```bash 113 | export TALOSCONFIG=$PWD/talosconfig.yml 114 | controllers="$(terraform output -raw controllers)" 115 | workers="$(terraform output -raw workers)" 116 | all="$controllers,$workers" 117 | c0="$(echo $controllers | cut -d , -f 1)" 118 | w0="$(echo $workers | cut -d , -f 1)" 119 | talosctl -n $all version 120 | talosctl -n $all dashboard 121 | ``` 122 | 123 | Show kubernetes information: 124 | 125 | ```bash 126 | export KUBECONFIG=$PWD/kubeconfig.yml 127 | kubectl cluster-info 128 | kubectl get nodes -o wide 129 | ``` 130 | 131 | Show Cilium information: 132 | 133 | ```bash 134 | export KUBECONFIG=$PWD/kubeconfig.yml 135 | cilium status --wait 136 | kubectl -n kube-system exec ds/cilium -- cilium-dbg status --verbose 137 | ``` 138 | 139 | In another shell, open the Hubble UI: 140 | 141 | ```bash 142 | export KUBECONFIG=$PWD/kubeconfig.yml 143 | cilium hubble ui 144 | ``` 145 | 146 | Execute an example workload: 147 | 148 | ```bash 149 | export KUBECONFIG=$PWD/kubeconfig.yml 150 | kubectl apply -f example.yml 151 | kubectl rollout status deployment/example 152 | kubectl get ingresses,services,pods,deployments 153 | example_ip="$(kubectl get ingress/example -o json | jq -r .status.loadBalancer.ingress[0].ip)" 154 | example_fqdn="$(kubectl get ingress/example -o json | jq -r .spec.rules[0].host)" 155 | example_url="http://$example_fqdn" 156 | curl --resolve "$example_fqdn:80:$example_ip" "$example_url" 157 | echo "$example_ip $example_fqdn" | sudo tee -a /etc/hosts 158 | curl "$example_url" 159 | xdg-open "$example_url" 160 | kubectl delete -f example.yml 161 | ``` 162 | 163 | Execute the [example hello-etcd stateful application](https://github.com/rgl/hello-etcd): 164 | 165 | ```bash 166 | # see https://github.com/rgl/hello-etcd/tags 167 | # renovate: datasource=github-tags depName=rgl/hello-etcd 168 | hello_etcd_version='0.0.4' 169 | rm -rf tmp/hello-etcd 170 | install -d tmp/hello-etcd 171 | pushd tmp/hello-etcd 172 | wget -qO- "https://raw.githubusercontent.com/rgl/hello-etcd/v$hello_etcd_version/manifest.yml" \ 173 | | perl -pe 's,(storageClassName:).+,$1 linstor-lvm-r1,g' \ 174 | | perl -pe 's,(storage:).+,$1 1Gi,g' \ 175 | > manifest.yml 176 | kubectl apply -f manifest.yml 177 | kubectl rollout status deployment hello-etcd 178 | kubectl rollout status statefulset hello-etcd-etcd 179 | kubectl get service,statefulset,pod,pvc,pv,sc 180 | kubectl linstor volume list 181 | ``` 182 | 183 | Access the `hello-etcd` service from a [kubectl port-forward local port](https://kubernetes.io/docs/tasks/access-application-cluster/port-forward-access-application-cluster/): 184 | 185 | ```bash 186 | kubectl port-forward service/hello-etcd 6789:web & 187 | sleep 3 188 | wget -qO- http://localhost:6789 # Hello World #1! 189 | wget -qO- http://localhost:6789 # Hello World #2! 190 | wget -qO- http://localhost:6789 # Hello World #3! 191 | ``` 192 | 193 | Delete the etcd pod: 194 | 195 | ```bash 196 | # NB the used StorageClass is configured with ReclaimPolicy set to Delete. this 197 | # means that, when we delete the application PersistentVolumeClaim, the 198 | # volume will be deleted from the linstor storage-pool. please note that 199 | # this will only happen when the pvc finalizers list is empty. since the 200 | # pvc is created by the statefulset (due to having 201 | # persistentVolumeClaimRetentionPolicy set to Retain), and it adds the 202 | # kubernetes.io/pvc-protection finalizer, which means, the pvc will only be 203 | # deleted when you explicitly delete it (and nothing is using it as noted by 204 | # an empty finalizers list) 205 | # NB although we delete the pod, the StatefulSet will create a fresh pod to 206 | # replace it. using the same persistent volume as the old one. 207 | kubectl delete pod/hello-etcd-etcd-0 208 | kubectl get pod/hello-etcd-etcd-0 # NB its age should be in the seconds range. 209 | kubectl rollout status deployment hello-etcd 210 | kubectl rollout status statefulset hello-etcd-etcd 211 | kubectl get pvc,pv 212 | kubectl linstor volume list 213 | ``` 214 | 215 | Access the application, and notice that the counter continues after the previously returned value, which means that although the etcd instance is different, it picked up the same persistent volume: 216 | 217 | ```bash 218 | wget -qO- http://localhost:6789 # Hello World #4! 219 | wget -qO- http://localhost:6789 # Hello World #5! 220 | wget -qO- http://localhost:6789 # Hello World #6! 221 | ``` 222 | 223 | Delete everything: 224 | 225 | ```bash 226 | kubectl delete -f manifest.yml 227 | kill %1 && sleep 1 # kill the kubectl port-forward background command execution. 228 | # NB the pvc will not be automatically deleted because it has the 229 | # kubernetes.io/pvc-protection finalizer (set by the statefulset, due to 230 | # having persistentVolumeClaimRetentionPolicy set to Retain), which prevents 231 | # it from being automatically deleted. 232 | kubectl get pvc,pv 233 | kubectl linstor volume list 234 | # delete the pvc (which will also trigger the pv (persistent volume) deletion 235 | # because the associated storageclass reclaim policy is set to delete). 236 | kubectl delete pvc/etcd-data-hello-etcd-etcd-0 237 | # NB you should wait until its actually deleted. 238 | kubectl get pvc,pv 239 | kubectl linstor volume list 240 | popd 241 | ``` 242 | 243 | Execute an [example WebAssembly (Wasm) Spin workload](https://github.com/rgl/spin-http-rust-example): 244 | 245 | ```bash 246 | export KUBECONFIG=$PWD/kubeconfig.yml 247 | kubectl apply -f example-spin.yml 248 | kubectl rollout status deployment/example-spin 249 | kubectl get ingresses,services,pods,deployments 250 | example_spin_ip="$(kubectl get ingress/example-spin -o json | jq -r .status.loadBalancer.ingress[0].ip)" 251 | example_spin_fqdn="$(kubectl get ingress/example-spin -o json | jq -r .spec.rules[0].host)" 252 | example_spin_url="http://$example_spin_fqdn" 253 | curl --resolve "$example_spin_fqdn:80:$example_spin_ip" "$example_spin_url" 254 | echo "$example_spin_ip $example_spin_fqdn" | sudo tee -a /etc/hosts 255 | curl "$example_spin_url" 256 | xdg-open "$example_spin_url" 257 | kubectl delete -f example-spin.yml 258 | ``` 259 | 260 | Access Zot: 261 | 262 | ```bash 263 | export KUBECONFIG=$PWD/kubeconfig.yml 264 | export SSL_CERT_FILE="$PWD/kubernetes-ingress-ca-crt.pem" 265 | zot_ip="$(kubectl get -n zot ingress/zot -o json | jq -r .status.loadBalancer.ingress[0].ip)" 266 | zot_fqdn="$(kubectl get -n zot ingress/zot -o json | jq -r .spec.rules[0].host)" 267 | zot_url="https://$zot_fqdn" 268 | echo "zot_url: $zot_url" 269 | echo "zot_username: admin" 270 | echo "zot_password: admin" 271 | curl --resolve "$zot_fqdn:443:$zot_ip" "$zot_url" 272 | echo "$zot_ip $zot_fqdn" | sudo tee -a /etc/hosts 273 | xdg-open "$zot_url" 274 | ``` 275 | 276 | Upload the `kubernetes-hello` example image: 277 | 278 | ```bash 279 | skopeo login \ 280 | --username admin \ 281 | --password-stdin \ 282 | "$zot_fqdn" \ 283 | <<<"admin" 284 | skopeo copy \ 285 | --format oci \ 286 | docker://docker.io/ruilopes/kubernetes-hello:v0.0.202408161942 \ 287 | "docker://$zot_fqdn/ruilopes/kubernetes-hello:v0.0.202408161942" 288 | skopeo logout "$zot_fqdn" 289 | ``` 290 | 291 | Inspect the `kubernetes-hello` example image: 292 | 293 | ```bash 294 | skopeo login \ 295 | --username talos \ 296 | --password-stdin \ 297 | "$zot_fqdn" \ 298 | <<<"talos" 299 | skopeo list-tags "docker://$zot_fqdn/ruilopes/kubernetes-hello" 300 | skopeo inspect "docker://$zot_fqdn/ruilopes/kubernetes-hello:v0.0.202408161942" 301 | skopeo inspect "docker://$zot_fqdn/ruilopes/kubernetes-hello:v0.0.202408161942" \ 302 | --raw | jq 303 | skopeo logout "$zot_fqdn" 304 | ``` 305 | 306 | Launch a Pod using the example image: 307 | 308 | ```bash 309 | kubectl apply -f kubernetes-hello.yml 310 | kubectl rollout status deployment/kubernetes-hello 311 | kubectl get ingresses,services,pods,deployments 312 | kubernetes_hello_ip="$(kubectl get ingress/kubernetes-hello -o json | jq -r .status.loadBalancer.ingress[0].ip)" 313 | kubernetes_hello_fqdn="$(kubectl get ingress/kubernetes-hello -o json | jq -r .spec.rules[0].host)" 314 | kubernetes_hello_url="http://$kubernetes_hello_fqdn" 315 | echo "kubernetes_hello_url: $kubernetes_hello_url" 316 | curl --resolve "$kubernetes_hello_fqdn:80:$kubernetes_hello_ip" "$kubernetes_hello_url" 317 | echo "$kubernetes_hello_ip $kubernetes_hello_fqdn" | sudo tee -a /etc/hosts 318 | xdg-open "$kubernetes_hello_url" 319 | ``` 320 | 321 | Delete the example Pod: 322 | 323 | ```bash 324 | kubectl delete -f kubernetes-hello.yml 325 | ``` 326 | 327 | Access Gitea: 328 | 329 | ```bash 330 | export KUBECONFIG=$PWD/kubeconfig.yml 331 | export SSL_CERT_FILE="$PWD/kubernetes-ingress-ca-crt.pem" 332 | gitea_ip="$(kubectl get -n gitea ingress/gitea -o json | jq -r .status.loadBalancer.ingress[0].ip)" 333 | gitea_fqdn="$(kubectl get -n gitea ingress/gitea -o json | jq -r .spec.rules[0].host)" 334 | gitea_url="https://$gitea_fqdn" 335 | echo "gitea_url: $gitea_url" 336 | echo "gitea_username: gitea" 337 | echo "gitea_password: gitea" 338 | curl --resolve "$gitea_fqdn:443:$gitea_ip" "$gitea_url" 339 | echo "$gitea_ip $gitea_fqdn" | sudo tee -a /etc/hosts 340 | xdg-open "$gitea_url" 341 | ``` 342 | 343 | Access Argo CD: 344 | 345 | ```bash 346 | export KUBECONFIG=$PWD/kubeconfig.yml 347 | argocd_server_ip="$(kubectl get -n argocd ingress/argocd-server -o json | jq -r .status.loadBalancer.ingress[0].ip)" 348 | argocd_server_fqdn="$(kubectl get -n argocd ingress/argocd-server -o json | jq -r .spec.rules[0].host)" 349 | argocd_server_url="https://$argocd_server_fqdn" 350 | argocd_server_admin_password="$( 351 | kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" \ 352 | | base64 --decode)" 353 | echo "argocd_server_url: $argocd_server_url" 354 | echo "argocd_server_admin_password: $argocd_server_admin_password" 355 | echo "$argocd_server_ip $argocd_server_fqdn" | sudo tee -a /etc/hosts 356 | xdg-open "$argocd_server_url" 357 | ``` 358 | 359 | If the Argo CD UI is showing these kind of errors: 360 | 361 | > Unable to load data: permission denied 362 | > Unable to load data: error getting cached app managed resources: NOAUTH Authentication required. 363 | > Unable to load data: error getting cached app managed resources: cache: key is missing 364 | > Unable to load data: error getting cached app managed resources: InvalidSpecError: Application referencing project default which does not exist 365 | 366 | Try restarting some of the Argo CD components, and after restarting them, the 367 | Argo CD UI should start working after a few minutes (e.g. at the next sync 368 | interval, which defaults to 3m): 369 | 370 | ```bash 371 | kubectl -n argocd rollout restart statefulset argocd-application-controller 372 | kubectl -n argocd rollout status statefulset argocd-application-controller --watch 373 | kubectl -n argocd rollout restart deployment argocd-server 374 | kubectl -n argocd rollout status deployment argocd-server --watch 375 | ``` 376 | 377 | Create the `argocd-example` repository: 378 | 379 | ```bash 380 | export SSL_CERT_FILE="$PWD/kubernetes-ingress-ca-crt.pem" 381 | export GIT_SSL_CAINFO="$SSL_CERT_FILE" 382 | curl \ 383 | --silent \ 384 | --show-error \ 385 | --fail-with-body \ 386 | -u gitea:gitea \ 387 | -X POST \ 388 | -H 'Accept: application/json' \ 389 | -H 'Content-Type: application/json' \ 390 | -d '{ 391 | "name": "argocd-example", 392 | "private": true 393 | }' \ 394 | https://gitea.example.test/api/v1/user/repos \ 395 | | jq 396 | rm -rf tmp/argocd-example 397 | git init tmp/argocd-example 398 | pushd tmp/argocd-example 399 | git branch -m main 400 | cp ../../example.yml . 401 | git add . 402 | git commit -m init 403 | git remote add origin https://gitea.example.test/gitea/argocd-example.git 404 | git push -u origin main 405 | popd 406 | ``` 407 | 408 | Create the `argocd-example` argocd application: 409 | 410 | ```bash 411 | argocd login \ 412 | "$argocd_server_fqdn" \ 413 | --username admin \ 414 | --password "$argocd_server_admin_password" 415 | argocd cluster list 416 | # NB we have to access gitea thru the internal cluster service because the 417 | # external/ingress domains does not resolve inside the cluster. 418 | # NB if git repository was hosted outside of the cluster, we might have 419 | # needed to execute the following to trust the certificate. 420 | # argocd cert add-tls gitea.example.test --from "$SSL_CERT_FILE" 421 | # argocd cert list --cert-type https 422 | argocd repo add \ 423 | http://gitea-http.gitea.svc:3000/gitea/argocd-example.git \ 424 | --username gitea \ 425 | --password gitea 426 | argocd app create \ 427 | argocd-example \ 428 | --dest-name in-cluster \ 429 | --dest-namespace default \ 430 | --project default \ 431 | --auto-prune \ 432 | --self-heal \ 433 | --sync-policy automatic \ 434 | --repo http://gitea-http.gitea.svc:3000/gitea/argocd-example.git \ 435 | --path . 436 | argocd app list 437 | argocd app wait argocd-example --health --timeout 300 438 | kubectl get crd | grep argoproj.io 439 | kubectl -n argocd get applications 440 | kubectl -n argocd get application/argocd-example -o yaml 441 | ``` 442 | 443 | Access the example application: 444 | 445 | ```bash 446 | kubectl rollout status deployment/example 447 | kubectl get ingresses,services,pods,deployments 448 | example_ip="$(kubectl get ingress/example -o json | jq -r .status.loadBalancer.ingress[0].ip)" 449 | example_fqdn="$(kubectl get ingress/example -o json | jq -r .spec.rules[0].host)" 450 | example_url="http://$example_fqdn" 451 | curl --resolve "$example_fqdn:80:$example_ip" "$example_url" 452 | echo "$example_ip $example_fqdn" | sudo tee -a /etc/hosts 453 | curl "$example_url" 454 | xdg-open "$example_url" 455 | ``` 456 | 457 | Modify the example application, by bumping the number of replicas: 458 | 459 | ```bash 460 | pushd tmp/argocd-example 461 | sed -i -E 's,(replicas:) .+,\1 3,g' example.yml 462 | git diff 463 | git add . 464 | git commit -m 'bump replicas' 465 | git push -u origin main 466 | popd 467 | ``` 468 | 469 | Then go the Argo CD UI, and wait for it to eventually sync the example argocd 470 | application, or click `Refresh` to sync it immediately. 471 | 472 | Delete the example argocd application and repository: 473 | 474 | ```bash 475 | argocd app delete \ 476 | argocd-example \ 477 | --yes 478 | argocd repo rm \ 479 | http://gitea-http.gitea.svc:3000/gitea/argocd-example.git 480 | curl \ 481 | --silent \ 482 | --show-error \ 483 | --fail-with-body \ 484 | -u gitea:gitea \ 485 | -X DELETE \ 486 | -H 'Accept: application/json' \ 487 | "$gitea_url/api/v1/repos/gitea/argocd-example" \ 488 | | jq 489 | ``` 490 | 491 | Destroy the infrastructure: 492 | 493 | ```bash 494 | time ./do destroy 495 | ``` 496 | 497 | List this repository dependencies (and which have newer versions): 498 | 499 | ```bash 500 | GITHUB_COM_TOKEN='YOUR_GITHUB_PERSONAL_TOKEN' ./renovate.sh 501 | ``` 502 | 503 | Update the talos extensions to match the talos version: 504 | 505 | ```bash 506 | ./do update-talos-extensions 507 | ``` 508 | 509 | # Troubleshoot 510 | 511 | Talos: 512 | 513 | ```bash 514 | # see https://www.talos.dev/v1.10/advanced/troubleshooting-control-plane/ 515 | talosctl -n $all support && rm -rf support && 7z x -osupport support.zip && code support 516 | talosctl -n $c0 service ext-qemu-guest-agent status 517 | talosctl -n $c0 service etcd status 518 | talosctl -n $c0 etcd status 519 | talosctl -n $c0 etcd alarm list 520 | talosctl -n $c0 etcd members 521 | # NB talosctl get members requires the talos discovery service, which we disable 522 | # by default, so this will not return anything. 523 | # see talos.tf. 524 | talosctl -n $c0 get members 525 | talosctl -n $c0 health --control-plane-nodes $controllers --worker-nodes $workers 526 | talosctl -n $c0 inspect dependencies | dot -Tsvg >c0.svg && xdg-open c0.svg 527 | talosctl -n $c0 dashboard 528 | talosctl -n $c0 logs controller-runtime 529 | talosctl -n $c0 logs kubelet 530 | talosctl -n $c0 mounts | sort 531 | talosctl -n $c0 get blockdevices 532 | talosctl -n $c0 get disks 533 | talosctl -n $c0 get systemdisk 534 | talosctl -n $c0 get resourcedefinitions 535 | talosctl -n $c0 get machineconfigs -o yaml 536 | talosctl -n $c0 get staticpods -o yaml 537 | talosctl -n $c0 get staticpodstatus 538 | talosctl -n $c0 get manifests 539 | talosctl -n $c0 get services 540 | talosctl -n $c0 get extensions 541 | talosctl -n $c0 get addresses 542 | talosctl -n $c0 get nodeaddresses 543 | talosctl -n $c0 netstat --extend --programs --pods --listening 544 | talosctl -n $c0 list -l -r -t f /etc 545 | talosctl -n $c0 list -l -r -t f /system 546 | talosctl -n $c0 list -l -r -t f /var 547 | talosctl -n $c0 list -l -r /dev 548 | talosctl -n $c0 list -l /sys/fs/cgroup 549 | talosctl -n $c0 read /proc/cmdline | tr ' ' '\n' 550 | talosctl -n $c0 read /proc/mounts | sort 551 | talosctl -n $w0 read /proc/modules | sort 552 | talosctl -n $w0 read /sys/module/drbd/parameters/usermode_helper 553 | talosctl -n $c0 read /etc/os-release 554 | talosctl -n $c0 read /etc/resolv.conf 555 | talosctl -n $c0 read /etc/containerd/config.toml 556 | talosctl -n $c0 read /etc/cri/containerd.toml 557 | talosctl -n $c0 read /etc/cri/conf.d/cri.toml 558 | talosctl -n $c0 read /etc/kubernetes/kubelet.yaml 559 | talosctl -n $c0 read /etc/kubernetes/kubeconfig-kubelet 560 | talosctl -n $c0 read /etc/kubernetes/bootstrap-kubeconfig 561 | talosctl -n $c0 ps 562 | talosctl -n $c0 containers -k 563 | ``` 564 | 565 | Cilium: 566 | 567 | ```bash 568 | cilium status --wait 569 | kubectl -n kube-system exec ds/cilium -- cilium-dbg status --verbose 570 | cilium config view 571 | cilium hubble ui 572 | # **NB** cilium connectivity test is not working out-of-the-box in the default 573 | # test namespaces and using it in kube-system namespace will leave garbage 574 | # behind. 575 | #cilium connectivity test --test-namespace kube-system 576 | kubectl -n kube-system get leases | grep cilium-l2announce- 577 | ``` 578 | 579 | Kubernetes: 580 | 581 | ```bash 582 | kubectl get events --all-namespaces --watch 583 | kubectl --namespace kube-system get events --watch 584 | kubectl --namespace kube-system debug node/w0 --stdin --tty --image=busybox:1.36 -- cat /host/etc/resolv.conf 585 | kubectl --namespace kube-system get configmaps coredns --output yaml 586 | pod_name="$(kubectl --namespace kube-system get pods --selector k8s-app=kube-dns --output json | jq -r '.items[0].metadata.name')" 587 | kubectl --namespace kube-system debug $pod_name --stdin --tty --image=busybox:1.36 --target=coredns -- sh -c 'cat /proc/$(pgrep coredns)/root/etc/resolv.conf' 588 | kubectl --namespace kube-system run busybox -it --rm --restart=Never --image=busybox:1.36 -- nslookup -type=a talos.dev 589 | kubectl get crds 590 | kubectl api-resources 591 | ``` 592 | 593 | Storage (lvm/drbd/linstor/piraeus): 594 | 595 | ```bash 596 | # NB kubectl linstor node list is equivalent to: 597 | # kubectl -n piraeus-datastore exec deploy/linstor-controller -- linstor node list 598 | kubectl linstor node list 599 | kubectl linstor storage-pool list 600 | kubectl linstor volume list 601 | kubectl -n piraeus-datastore exec daemonset/linstor-satellite.w0 -- drbdadm status 602 | kubectl -n piraeus-datastore exec daemonset/linstor-satellite.w0 -- lvdisplay 603 | kubectl -n piraeus-datastore exec daemonset/linstor-satellite.w0 -- vgdisplay 604 | kubectl -n piraeus-datastore exec daemonset/linstor-satellite.w0 -- pvdisplay 605 | w0_csi_node_pod_name="$( 606 | kubectl -n piraeus-datastore get pods \ 607 | --field-selector spec.nodeName=w0 \ 608 | --selector app.kubernetes.io/component=linstor-csi-node \ 609 | --output 'jsonpath={.items[*].metadata.name}')" 610 | kubectl -n piraeus-datastore exec "pod/$w0_csi_node_pod_name" -- lsblk 611 | kubectl -n piraeus-datastore exec "pod/$w0_csi_node_pod_name" -- bash -c 'mount | grep /dev/drbd' 612 | kubectl -n piraeus-datastore exec "pod/$w0_csi_node_pod_name" -- bash -c 'df -h | grep -P "Filesystem|/dev/drbd"' 613 | ``` 614 | -------------------------------------------------------------------------------- /argocd.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | argocd_domain = "argocd.${var.ingress_domain}" 3 | argocd_namespace = "argocd" 4 | argocd_manifests = [ 5 | # create the argocd-server tls secret. 6 | # NB argocd-server will automatically reload this secret. 7 | # NB alternatively we could set the server.certificate.enabled helm value. but 8 | # that does not allow us to fully customize the certificate (e.g. subject). 9 | # see https://github.com/argoproj/argo-helm/blob/argo-cd-8.0.12/charts/argo-cd/templates/argocd-server/certificate.yaml 10 | # see https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/ 11 | # see https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.Certificate 12 | { 13 | apiVersion = "cert-manager.io/v1" 14 | kind = "Certificate" 15 | metadata = { 16 | name = "argocd-server" 17 | namespace = local.argocd_namespace 18 | } 19 | spec = { 20 | subject = { 21 | organizations = [ 22 | var.ingress_domain, 23 | ] 24 | organizationalUnits = [ 25 | "Kubernetes", 26 | ] 27 | } 28 | commonName = "Argo CD Server" 29 | dnsNames = [ 30 | local.argocd_domain, 31 | ] 32 | privateKey = { 33 | algorithm = "ECDSA" # NB Ed25519 is not yet supported by chrome 93 or firefox 91. 34 | size = 256 35 | } 36 | duration = "4320h" # NB 4320h (180 days). default is 2160h (90 days). 37 | secretName = "argocd-server-tls" 38 | issuerRef = { 39 | kind = "ClusterIssuer" 40 | name = "ingress" 41 | } 42 | } 43 | }, 44 | ] 45 | argocd_manifest = join("---\n", [for d in local.argocd_manifests : yamlencode(d)]) 46 | } 47 | 48 | # set the configuration. 49 | # NB the default values are described at: 50 | # https://github.com/argoproj/argo-helm/blob/argo-cd-8.0.12/charts/argo-cd/values.yaml 51 | # NB make sure you are seeing the same version of the chart that you are installing. 52 | # NB this disables the tls between argocd components, that is, the internal 53 | # cluster traffic does not uses tls, and only the ingress uses tls. 54 | # see https://github.com/argoproj/argo-helm/tree/main/charts/argo-cd#ssl-termination-at-ingress-controller 55 | # see https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#inbound-tls-options-for-argocd-server 56 | # see https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#disabling-tls-to-argocd-repo-server 57 | # see https://argo-cd.readthedocs.io/en/stable/operator-manual/tls/#disabling-tls-to-argocd-dex-server 58 | # see https://argo-cd.readthedocs.io/en/stable/operator-manual/installation/#helm 59 | # see https://registry.terraform.io/providers/hashicorp/helm/latest/docs/data-sources/template 60 | data "helm_template" "argocd" { 61 | namespace = local.argocd_namespace 62 | name = "argocd" 63 | repository = "https://argoproj.github.io/argo-helm" 64 | chart = "argo-cd" 65 | # see https://artifacthub.io/packages/helm/argo/argo-cd 66 | # renovate: datasource=helm depName=argo-cd registryUrl=https://argoproj.github.io/argo-helm 67 | version = "8.0.12" # app version 3.0.4. 68 | kube_version = var.kubernetes_version 69 | api_versions = [] 70 | values = [yamlencode({ 71 | global = { 72 | domain = local.argocd_domain 73 | } 74 | configs = { 75 | params = { 76 | # disable tls between the argocd components. 77 | "server.insecure" = "true" 78 | "server.repo.server.plaintext" = "true" 79 | "server.dex.server.plaintext" = "true" 80 | "controller.repo.server.plaintext" = "true" 81 | "applicationsetcontroller.repo.server.plaintext" = "true" 82 | "reposerver.disable.tls" = "true" 83 | "dexserver.disable.tls" = "true" 84 | } 85 | } 86 | server = { 87 | ingress = { 88 | enabled = true 89 | tls = true 90 | } 91 | } 92 | })] 93 | } 94 | -------------------------------------------------------------------------------- /cert-manager.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | cert_manager_ingress_ca_manifests = [ 3 | # see https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.ClusterIssuer 4 | { 5 | apiVersion = "cert-manager.io/v1" 6 | kind = "ClusterIssuer" 7 | metadata = { 8 | name = "selfsigned" 9 | } 10 | spec = { 11 | selfSigned = {} 12 | } 13 | }, 14 | # see https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.Certificate 15 | { 16 | apiVersion = "cert-manager.io/v1" 17 | kind = "Certificate" 18 | metadata = { 19 | name = "ingress" 20 | namespace = "cert-manager" 21 | } 22 | spec = { 23 | isCA = true 24 | subject = { 25 | organizations = [ 26 | var.ingress_domain, 27 | ] 28 | organizationalUnits = [ 29 | "Kubernetes", 30 | ] 31 | } 32 | commonName = "Kubernetes Ingress" 33 | privateKey = { 34 | algorithm = "ECDSA" # NB Ed25519 is not yet supported by chrome 93 or firefox 91. 35 | size = 256 36 | } 37 | duration = "4320h" # NB 4320h (180 days). default is 2160h (90 days). 38 | secretName = "ingress-tls" 39 | issuerRef = { 40 | name = "selfsigned" 41 | kind = "ClusterIssuer" 42 | group = "cert-manager.io" 43 | } 44 | } 45 | }, 46 | # see https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.ClusterIssuer 47 | { 48 | apiVersion = "cert-manager.io/v1" 49 | kind = "ClusterIssuer" 50 | metadata = { 51 | name = "ingress" 52 | } 53 | spec = { 54 | ca = { 55 | secretName = "ingress-tls" 56 | } 57 | } 58 | }, 59 | ] 60 | cert_manager_ingress_ca_manifest = join("---\n", [for d in local.cert_manager_ingress_ca_manifests : yamlencode(d)]) 61 | } 62 | 63 | # NB YOU CANNOT INSTALL MULTIPLE INSTANCES OF CERT-MANAGER IN A CLUSTER. 64 | # see https://artifacthub.io/packages/helm/cert-manager/cert-manager 65 | # see https://github.com/cert-manager/cert-manager/tree/master/deploy/charts/cert-manager 66 | # see https://cert-manager.io/docs/installation/supported-releases/ 67 | # see https://cert-manager.io/docs/configuration/selfsigned/#bootstrapping-ca-issuers 68 | # see https://cert-manager.io/docs/usage/ingress/ 69 | # see https://registry.terraform.io/providers/hashicorp/helm/latest/docs/data-sources/template 70 | data "helm_template" "cert_manager" { 71 | namespace = "cert-manager" 72 | name = "cert-manager" 73 | repository = "https://charts.jetstack.io" 74 | chart = "cert-manager" 75 | # renovate: datasource=helm depName=cert-manager registryUrl=https://charts.jetstack.io 76 | version = "1.17.2" 77 | kube_version = var.kubernetes_version 78 | api_versions = [] 79 | # NB installCRDs is generally not recommended, BUT since this 80 | # is a development cluster we YOLO it. 81 | set { 82 | name = "installCRDs" 83 | value = "true" 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /cilium.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | # see https://docs.cilium.io/en/stable/network/lb-ipam/ 3 | # see https://docs.cilium.io/en/stable/network/l2-announcements/ 4 | # see the CiliumL2AnnouncementPolicy type at https://github.com/cilium/cilium/blob/v1.17.4/pkg/k8s/apis/cilium.io/v2alpha1/l2announcement_types.go#L23-L42 5 | # see the CiliumLoadBalancerIPPool type at https://github.com/cilium/cilium/blob/v1.17.4/pkg/k8s/apis/cilium.io/v2alpha1/lbipam_types.go#L23-L47 6 | cilium_external_lb_manifests = [ 7 | { 8 | apiVersion = "cilium.io/v2alpha1" 9 | kind = "CiliumL2AnnouncementPolicy" 10 | metadata = { 11 | name = "external" 12 | } 13 | spec = { 14 | loadBalancerIPs = true 15 | interfaces = [ 16 | "eth0", 17 | ] 18 | nodeSelector = { 19 | matchExpressions = [ 20 | { 21 | key = "node-role.kubernetes.io/control-plane" 22 | operator = "DoesNotExist" 23 | }, 24 | ] 25 | } 26 | } 27 | }, 28 | { 29 | apiVersion = "cilium.io/v2alpha1" 30 | kind = "CiliumLoadBalancerIPPool" 31 | metadata = { 32 | name = "external" 33 | } 34 | spec = { 35 | blocks = [ 36 | { 37 | start = cidrhost(var.cluster_node_network, var.cluster_node_network_load_balancer_first_hostnum) 38 | stop = cidrhost(var.cluster_node_network, var.cluster_node_network_load_balancer_last_hostnum) 39 | }, 40 | ] 41 | } 42 | }, 43 | ] 44 | cilium_external_lb_manifest = join("---\n", [for d in local.cilium_external_lb_manifests : yamlencode(d)]) 45 | } 46 | 47 | // see https://www.talos.dev/v1.10/kubernetes-guides/network/deploying-cilium/#method-4-helm-manifests-inline-install 48 | // see https://docs.cilium.io/en/stable/network/servicemesh/ingress/ 49 | // see https://docs.cilium.io/en/stable/gettingstarted/hubble_setup/ 50 | // see https://docs.cilium.io/en/stable/gettingstarted/hubble/ 51 | // see https://docs.cilium.io/en/stable/helm-reference/#helm-reference 52 | // see https://github.com/cilium/cilium/releases 53 | // see https://github.com/cilium/cilium/tree/v1.17.4/install/kubernetes/cilium 54 | // see https://registry.terraform.io/providers/hashicorp/helm/latest/docs/data-sources/template 55 | data "helm_template" "cilium" { 56 | namespace = "kube-system" 57 | name = "cilium" 58 | repository = "https://helm.cilium.io" 59 | chart = "cilium" 60 | # renovate: datasource=helm depName=cilium registryUrl=https://helm.cilium.io 61 | version = "1.17.4" 62 | kube_version = var.kubernetes_version 63 | api_versions = [] 64 | set { 65 | name = "ipam.mode" 66 | value = "kubernetes" 67 | } 68 | set { 69 | name = "securityContext.capabilities.ciliumAgent" 70 | value = "{CHOWN,KILL,NET_ADMIN,NET_RAW,IPC_LOCK,SYS_ADMIN,SYS_RESOURCE,DAC_OVERRIDE,FOWNER,SETGID,SETUID}" 71 | } 72 | set { 73 | name = "securityContext.capabilities.cleanCiliumState" 74 | value = "{NET_ADMIN,SYS_ADMIN,SYS_RESOURCE}" 75 | } 76 | set { 77 | name = "cgroup.autoMount.enabled" 78 | value = "false" 79 | } 80 | set { 81 | name = "cgroup.hostRoot" 82 | value = "/sys/fs/cgroup" 83 | } 84 | set { 85 | name = "k8sServiceHost" 86 | value = "localhost" 87 | } 88 | set { 89 | name = "k8sServicePort" 90 | value = local.common_machine_config.machine.features.kubePrism.port 91 | } 92 | set { 93 | name = "kubeProxyReplacement" 94 | value = "true" 95 | } 96 | set { 97 | name = "l2announcements.enabled" 98 | value = "true" 99 | } 100 | set { 101 | name = "devices" 102 | value = "{eth0}" 103 | } 104 | set { 105 | name = "ingressController.enabled" 106 | value = "true" 107 | } 108 | set { 109 | name = "ingressController.default" 110 | value = "true" 111 | } 112 | set { 113 | name = "ingressController.loadbalancerMode" 114 | value = "shared" 115 | } 116 | set { 117 | name = "ingressController.enforceHttps" 118 | value = "false" 119 | } 120 | set { 121 | name = "envoy.enabled" 122 | value = "true" 123 | } 124 | set { 125 | name = "hubble.relay.enabled" 126 | value = "true" 127 | } 128 | set { 129 | name = "hubble.ui.enabled" 130 | value = "true" 131 | } 132 | } 133 | -------------------------------------------------------------------------------- /do: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | 4 | # the talos image builder. 5 | # NB this can be one of: 6 | # imager: build locally using the ghcr.io/siderolabs/imager container image. 7 | # image_factory: build remotely using the image factory service at https://factory.talos.dev. 8 | # NB this is automatically set to imager when running on linux 6.1+; otherwise, 9 | # it is set to image_factory. 10 | talos_image_builder="$(perl -e 'print ((`uname -r` =~ /^(\d+\.\d+)/ && $1 >= 6.1) ? "imager" : "image_factory")')" 11 | 12 | # see https://github.com/siderolabs/talos/releases 13 | # renovate: datasource=github-releases depName=siderolabs/talos 14 | talos_version="1.10.3" 15 | 16 | # see https://github.com/siderolabs/extensions/pkgs/container/qemu-guest-agent 17 | # see https://github.com/siderolabs/extensions/tree/main/guest-agents/qemu-guest-agent 18 | talos_qemu_guest_agent_extension_tag="9.2.3@sha256:e2415c65f30c85857890d3c29413ba72c344e4465bfacab01d96799a79d00be9" 19 | 20 | # see https://github.com/siderolabs/extensions/pkgs/container/drbd 21 | # see https://github.com/siderolabs/extensions/tree/main/storage/drbd 22 | # see https://github.com/LINBIT/drbd 23 | talos_drbd_extension_tag="9.2.13-v1.10.3@sha256:8e80d5341fed7ec7d7e284ce37db85681d38ca53a11ec56be1e178efbc883cdb" 24 | 25 | # see https://github.com/siderolabs/extensions/pkgs/container/spin 26 | # see https://github.com/siderolabs/extensions/tree/main/container-runtime/spin 27 | talos_spin_extension_tag="v0.19.0@sha256:581cd61637716b01d49428512f604b9b375206a871bff9bca6868c9753288cb2" 28 | 29 | # see https://github.com/piraeusdatastore/piraeus-operator/releases 30 | # renovate: datasource=github-releases depName=piraeusdatastore/piraeus-operator 31 | piraeus_operator_version="2.8.1" 32 | 33 | export CHECKPOINT_DISABLE='1' 34 | export TF_LOG='DEBUG' # TRACE, DEBUG, INFO, WARN or ERROR. 35 | export TF_LOG_PATH='terraform.log' 36 | 37 | export TALOSCONFIG=$PWD/talosconfig.yml 38 | export KUBECONFIG=$PWD/kubeconfig.yml 39 | 40 | function step { 41 | echo "### $* ###" 42 | } 43 | 44 | function update-talos-extension { 45 | # see https://github.com/siderolabs/extensions?tab=readme-ov-file#installing-extensions 46 | local variable_name="$1" 47 | local image_name="$2" 48 | local images="$3" 49 | local image="$(grep -F "$image_name:" <<<"$images")" 50 | local tag="${image#*:}" 51 | echo "updating the talos extension to $image..." 52 | variable_name="$variable_name" tag="$tag" perl -i -pe ' 53 | BEGIN { 54 | $var = $ENV{variable_name}; 55 | $val = $ENV{tag}; 56 | } 57 | s/^(\Q$var\E=).*/$1"$val"/; 58 | ' do 59 | } 60 | 61 | function update-talos-extensions { 62 | step "updating the talos extensions" 63 | local images="$(crane export "ghcr.io/siderolabs/extensions:v$talos_version" | tar x -O image-digests)" 64 | update-talos-extension talos_qemu_guest_agent_extension_tag ghcr.io/siderolabs/qemu-guest-agent "$images" 65 | update-talos-extension talos_drbd_extension_tag ghcr.io/siderolabs/drbd "$images" 66 | update-talos-extension talos_spin_extension_tag ghcr.io/siderolabs/spin "$images" 67 | } 68 | 69 | function build_talos_image__imager { 70 | # see https://www.talos.dev/v1.10/talos-guides/install/boot-assets/ 71 | # see https://www.talos.dev/v1.10/advanced/metal-network-configuration/ 72 | # see Profile type at https://github.com/siderolabs/talos/blob/v1.10.3/pkg/imager/profile/profile.go#L23-L46 73 | local talos_version_tag="v$talos_version" 74 | rm -rf tmp/talos 75 | mkdir -p tmp/talos 76 | cat >"tmp/talos/talos-$talos_version.yml" <"tmp/talos/talos-$talos_version.yml" <terraform.tfvars <talosconfig.yml 195 | terraform output -raw kubeconfig >kubeconfig.yml 196 | health 197 | piraeus-install 198 | export-kubernetes-ingress-ca-crt 199 | info 200 | } 201 | 202 | function health { 203 | step 'talosctl health' 204 | local controllers="$(terraform output -raw controllers)" 205 | local workers="$(terraform output -raw workers)" 206 | local c0="$(echo $controllers | cut -d , -f 1)" 207 | talosctl -e $c0 -n $c0 \ 208 | health \ 209 | --control-plane-nodes $controllers \ 210 | --worker-nodes $workers 211 | } 212 | 213 | function piraeus-install { 214 | # see https://github.com/piraeusdatastore/piraeus-operator 215 | # see https://github.com/piraeusdatastore/piraeus-operator/blob/v2.8.1/docs/how-to/talos.md 216 | # see https://github.com/piraeusdatastore/piraeus-operator/blob/v2.8.1/docs/tutorial/get-started.md 217 | # see https://github.com/piraeusdatastore/piraeus-operator/blob/v2.8.1/docs/tutorial/replicated-volumes.md 218 | # see https://github.com/piraeusdatastore/piraeus-operator/blob/v2.8.1/docs/explanation/components.md 219 | # see https://github.com/piraeusdatastore/piraeus-operator/blob/v2.8.1/docs/reference/linstorsatelliteconfiguration.md 220 | # see https://github.com/piraeusdatastore/piraeus-operator/blob/v2.8.1/docs/reference/linstorcluster.md 221 | # see https://linbit.com/drbd-user-guide/linstor-guide-1_0-en/ 222 | # see https://linbit.com/drbd-user-guide/linstor-guide-1_0-en/#ch-kubernetes 223 | # see 5.7.1. Available Parameters in a Storage Class at https://linbit.com/drbd-user-guide/linstor-guide-1_0-en/#s-kubernetes-sc-parameters 224 | # see https://linbit.com/drbd-user-guide/drbd-guide-9_0-en/ 225 | # see https://www.talos.dev/v1.10/kubernetes-guides/configuration/storage/#piraeus--linstor 226 | step 'piraeus install' 227 | kubectl apply --server-side -k "https://github.com/piraeusdatastore/piraeus-operator//config/default?ref=v$piraeus_operator_version" 228 | step 'piraeus wait' 229 | kubectl wait pod --timeout=15m --for=condition=Ready -n piraeus-datastore -l app.kubernetes.io/component=piraeus-operator 230 | step 'piraeus configure' 231 | kubectl apply -n piraeus-datastore -f - <<'EOF' 232 | apiVersion: piraeus.io/v1 233 | kind: LinstorSatelliteConfiguration 234 | metadata: 235 | name: talos-loader-override 236 | spec: 237 | podTemplate: 238 | spec: 239 | initContainers: 240 | - name: drbd-shutdown-guard 241 | $patch: delete 242 | - name: drbd-module-loader 243 | $patch: delete 244 | volumes: 245 | - name: run-systemd-system 246 | $patch: delete 247 | - name: run-drbd-shutdown-guard 248 | $patch: delete 249 | - name: systemd-bus-socket 250 | $patch: delete 251 | - name: lib-modules 252 | $patch: delete 253 | - name: usr-src 254 | $patch: delete 255 | - name: etc-lvm-backup 256 | hostPath: 257 | path: /var/etc/lvm/backup 258 | type: DirectoryOrCreate 259 | - name: etc-lvm-archive 260 | hostPath: 261 | path: /var/etc/lvm/archive 262 | type: DirectoryOrCreate 263 | EOF 264 | kubectl apply -f - </dev/null 2>&1; do sleep 3; done 295 | step "piraeus create-device-pool $node" 296 | if ! kubectl linstor storage-pool list --node "$node" --storage-pool lvm | grep -q lvm; then 297 | kubectl linstor physical-storage create-device-pool \ 298 | --pool-name lvm \ 299 | --storage-pool lvm \ 300 | lvm \ 301 | "$node" \ 302 | "/dev/disk/by-id/wwn-0x$wwn" 303 | fi 304 | done 305 | } 306 | 307 | function piraeus-info { 308 | step 'piraeus node list' 309 | kubectl linstor node list 310 | step 'piraeus storage-pool list' 311 | kubectl linstor storage-pool list 312 | step 'piraeus volume list' 313 | kubectl linstor volume list 314 | } 315 | 316 | function info { 317 | local controllers="$(terraform output -raw controllers)" 318 | local workers="$(terraform output -raw workers)" 319 | local nodes=($(echo "$controllers,$workers" | tr ',' ' ')) 320 | step 'talos node installer image' 321 | for n in "${nodes[@]}"; do 322 | # NB there can be multiple machineconfigs in a machine. we only want to see 323 | # the ones with an id that looks like a version tag. 324 | talosctl -n $n get machineconfigs -o json \ 325 | | jq -r 'select(.metadata.id | test("v\\d+")) | .spec' \ 326 | | yq -r '.machine.install.image' \ 327 | | sed -E "s,(.+),$n: \1,g" 328 | done 329 | step 'talos node os-release' 330 | for n in "${nodes[@]}"; do 331 | talosctl -n $n read /etc/os-release \ 332 | | sed -E "s,(.+),$n: \1,g" 333 | done 334 | step 'kubernetes nodes' 335 | kubectl get nodes -o wide 336 | piraeus-info 337 | } 338 | 339 | function export-kubernetes-ingress-ca-crt { 340 | step 'export kubernetes-ingress-ca-crt.pem' 341 | kubectl get -n cert-manager secret/ingress-tls -o jsonpath='{.data.tls\.crt}' \ 342 | | base64 -d \ 343 | > kubernetes-ingress-ca-crt.pem 344 | } 345 | 346 | function upgrade { 347 | step 'talosctl upgrade' 348 | local controllers=($(terraform output -raw controllers | tr ',' ' ')) 349 | local workers=($(terraform output -raw workers | tr ',' ' ')) 350 | for n in "${controllers[@]}" "${workers[@]}"; do 351 | talosctl -e $n -n $n upgrade --preserve --wait 352 | done 353 | health 354 | } 355 | 356 | function destroy { 357 | terraform destroy -auto-approve 358 | } 359 | 360 | case $1 in 361 | update-talos-extensions) 362 | update-talos-extensions 363 | ;; 364 | init) 365 | init 366 | ;; 367 | plan) 368 | plan 369 | ;; 370 | apply) 371 | apply 372 | ;; 373 | plan-apply) 374 | plan 375 | apply 376 | ;; 377 | health) 378 | health 379 | ;; 380 | info) 381 | info 382 | ;; 383 | destroy) 384 | destroy 385 | ;; 386 | *) 387 | echo $"Usage: $0 {init|plan|apply|plan-apply|health|info}" 388 | exit 1 389 | ;; 390 | esac 391 | -------------------------------------------------------------------------------- /example-spin.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # see https://kubernetes.io/docs/concepts/services-networking/ingress/ 3 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#ingress-v1-networking-k8s-io 4 | apiVersion: networking.k8s.io/v1 5 | kind: Ingress 6 | metadata: 7 | name: example-spin 8 | spec: 9 | rules: 10 | - host: example-spin.example.test 11 | http: 12 | paths: 13 | - path: / 14 | pathType: Prefix 15 | backend: 16 | service: 17 | name: example-spin 18 | port: 19 | name: web 20 | --- 21 | # see https://kubernetes.io/docs/concepts/services-networking/service/#type-clusterip 22 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#service-v1-core 23 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#serviceport-v1-core 24 | apiVersion: v1 25 | kind: Service 26 | metadata: 27 | name: example-spin 28 | spec: 29 | type: ClusterIP 30 | selector: 31 | app: example-spin 32 | ports: 33 | - name: web 34 | port: 80 35 | protocol: TCP 36 | targetPort: web 37 | --- 38 | # see https://kubernetes.io/docs/concepts/workloads/controllers/deployment/ 39 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#deployment-v1-apps 40 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#podtemplatespec-v1-core 41 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#container-v1-core 42 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#probe-v1-core 43 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#httpgetaction-v1-core 44 | apiVersion: apps/v1 45 | kind: Deployment 46 | metadata: 47 | name: example-spin 48 | spec: 49 | replicas: 1 50 | selector: 51 | matchLabels: 52 | app: example-spin 53 | template: 54 | metadata: 55 | labels: 56 | app: example-spin 57 | spec: 58 | runtimeClassName: wasmtime-spin-v2 59 | enableServiceLinks: false 60 | containers: 61 | - name: example 62 | # see https://github.com/rgl/spin-http-rust-example 63 | # see https://github.com/rgl/spin-http-rust-example/pkgs/container/spin-http-rust-example 64 | image: ghcr.io/rgl/spin-http-rust-example:0.3.1 65 | ports: 66 | - name: web 67 | containerPort: 8080 68 | env: 69 | - name: SPIN_HTTP_LISTEN_ADDR 70 | value: 0.0.0.0:8080 71 | readinessProbe: 72 | httpGet: 73 | path: /healthz/ready 74 | port: web 75 | resources: 76 | requests: 77 | memory: 32Mi 78 | cpu: '0.1' 79 | limits: 80 | memory: 32Mi 81 | cpu: '0.1' 82 | securityContext: 83 | allowPrivilegeEscalation: false 84 | capabilities: 85 | drop: 86 | - ALL 87 | readOnlyRootFilesystem: false 88 | runAsNonRoot: true 89 | runAsUser: 65534 # 65534 is the uid of the nobody user. 90 | runAsGroup: 65534 # 65534 is the gid of the nogroup group. 91 | seccompProfile: 92 | type: RuntimeDefault 93 | -------------------------------------------------------------------------------- /example.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # see https://kubernetes.io/docs/concepts/services-networking/ingress/ 3 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#ingress-v1-networking-k8s-io 4 | apiVersion: networking.k8s.io/v1 5 | kind: Ingress 6 | metadata: 7 | name: example 8 | spec: 9 | rules: 10 | - host: example.example.test 11 | http: 12 | paths: 13 | - path: / 14 | pathType: Prefix 15 | backend: 16 | service: 17 | name: example 18 | port: 19 | name: web 20 | --- 21 | # see https://kubernetes.io/docs/concepts/services-networking/service/#type-clusterip 22 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#service-v1-core 23 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#serviceport-v1-core 24 | apiVersion: v1 25 | kind: Service 26 | metadata: 27 | name: example 28 | spec: 29 | type: ClusterIP 30 | selector: 31 | app: example 32 | ports: 33 | - name: web 34 | port: 80 35 | protocol: TCP 36 | targetPort: web 37 | --- 38 | # see https://kubernetes.io/docs/concepts/workloads/controllers/deployment/ 39 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#deployment-v1-apps 40 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#podtemplatespec-v1-core 41 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#container-v1-core 42 | apiVersion: apps/v1 43 | kind: Deployment 44 | metadata: 45 | name: example 46 | spec: 47 | replicas: 1 48 | selector: 49 | matchLabels: 50 | app: example 51 | template: 52 | metadata: 53 | labels: 54 | app: example 55 | spec: 56 | enableServiceLinks: false 57 | containers: 58 | # see https://github.com/rgl/example-docker-buildx-go 59 | - name: example 60 | image: ruilopes/example-docker-buildx-go:v1.11.0 61 | args: 62 | - -listen=0.0.0.0:9000 63 | env: 64 | # configure the go runtime to honor the k8s memory and cpu resource 65 | # limits. 66 | # NB resourceFieldRef will cast the limits to bytes and integer 67 | # number of cpus (rounding up to the nearest integer). 68 | # see https://pkg.go.dev/runtime 69 | # see https://www.riverphillips.dev/blog/go-cfs/ 70 | # see https://github.com/golang/go/issues/33803 71 | # see https://github.com/traefik/traefik-helm-chart/pull/1029 72 | - name: GOMEMLIMIT 73 | valueFrom: 74 | resourceFieldRef: 75 | resource: limits.memory 76 | - name: GOMAXPROCS 77 | valueFrom: 78 | resourceFieldRef: 79 | resource: limits.cpu 80 | # see https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/ 81 | # see https://github.com/kubernetes/kubernetes/blob/v1.33.1/test/e2e/common/node/downwardapi.go 82 | - name: EXAMPLE_NODE_NAME 83 | valueFrom: 84 | fieldRef: 85 | fieldPath: spec.nodeName 86 | - name: EXAMPLE_POD_NAMESPACE 87 | valueFrom: 88 | fieldRef: 89 | fieldPath: metadata.namespace 90 | - name: EXAMPLE_POD_NAME 91 | valueFrom: 92 | fieldRef: 93 | fieldPath: metadata.name 94 | - name: EXAMPLE_POD_UID 95 | valueFrom: 96 | fieldRef: 97 | fieldPath: metadata.uid 98 | - name: EXAMPLE_POD_IP 99 | valueFrom: 100 | fieldRef: 101 | fieldPath: status.podIP 102 | ports: 103 | - name: web 104 | containerPort: 9000 105 | resources: 106 | requests: 107 | memory: 20Mi 108 | cpu: '0.1' 109 | limits: 110 | memory: 20Mi 111 | cpu: '0.1' 112 | securityContext: 113 | allowPrivilegeEscalation: false 114 | capabilities: 115 | drop: 116 | - ALL 117 | readOnlyRootFilesystem: true 118 | runAsNonRoot: true 119 | seccompProfile: 120 | type: RuntimeDefault 121 | -------------------------------------------------------------------------------- /gitea.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | gitea_domain = "gitea.${var.ingress_domain}" 3 | gitea_namespace = "gitea" 4 | gitea_manifests = [ 5 | { 6 | apiVersion = "v1" 7 | kind = "Namespace" 8 | metadata = { 9 | name = local.gitea_namespace 10 | } 11 | }, 12 | { 13 | apiVersion = "cert-manager.io/v1" 14 | kind = "Certificate" 15 | metadata = { 16 | name = "gitea" 17 | namespace = local.gitea_namespace 18 | } 19 | spec = { 20 | subject = { 21 | organizations = [ 22 | var.ingress_domain, 23 | ] 24 | organizationalUnits = [ 25 | "Kubernetes", 26 | ] 27 | } 28 | commonName = "gitea" 29 | dnsNames = [ 30 | local.gitea_domain, 31 | ] 32 | privateKey = { 33 | algorithm = "ECDSA" # NB Ed25519 is not yet supported by chrome 93 or firefox 91. 34 | size = 256 35 | } 36 | duration = "4320h" # NB 4320h (180 days). default is 2160h (90 days). 37 | secretName = "gitea-tls" 38 | issuerRef = { 39 | kind = "ClusterIssuer" 40 | name = "ingress" 41 | } 42 | } 43 | }, 44 | ] 45 | gitea_manifest = join("---\n", [data.kustomizer_manifest.gitea.manifest], [for d in local.gitea_manifests : yamlencode(d)]) 46 | } 47 | 48 | # set the configuration. 49 | # NB the default values are described at: 50 | # https://gitea.com/gitea/helm-chart/src/tag/v12.0.0/values.yaml 51 | # NB make sure you are seeing the same version of the chart that you are installing. 52 | # see https://registry.terraform.io/providers/hashicorp/helm/latest/docs/data-sources/template 53 | data "helm_template" "gitea" { 54 | namespace = local.gitea_namespace 55 | name = "gitea" 56 | repository = "https://dl.gitea.com/charts" 57 | chart = "gitea" 58 | # see https://artifacthub.io/packages/helm/gitea/gitea 59 | # renovate: datasource=helm depName=gitea registryUrl=https://dl.gitea.com/charts 60 | version = "12.0.0" # app version 1.23.8. 61 | kube_version = var.kubernetes_version 62 | api_versions = [ 63 | "networking.k8s.io/v1/Ingress", 64 | ] 65 | values = [yamlencode({ 66 | valkey-cluster = { 67 | enabled = false 68 | } 69 | valkey = { 70 | enabled = false 71 | } 72 | postgresql = { 73 | enabled = false 74 | } 75 | postgresql-ha = { 76 | enabled = false 77 | } 78 | persistence = { 79 | enabled = true 80 | storageClass = "linstor-lvm-r1" 81 | claimName = "gitea" 82 | } 83 | gitea = { 84 | config = { 85 | database = { 86 | DB_TYPE = "sqlite3" 87 | } 88 | session = { 89 | PROVIDER = "memory" 90 | } 91 | cache = { 92 | ADAPTER = "memory" 93 | } 94 | queue = { 95 | TYPE = "level" 96 | } 97 | } 98 | admin = { 99 | username = "gitea" 100 | password = "gitea" 101 | email = "gitea@${var.ingress_domain}" 102 | } 103 | } 104 | service = { 105 | http = { 106 | type = "ClusterIP" 107 | port = 3000 108 | clusterIP = null 109 | } 110 | ssh = { 111 | type = "ClusterIP" 112 | port = 22 113 | clusterIP = null 114 | } 115 | } 116 | ingress = { 117 | enabled = true 118 | hosts = [ 119 | { 120 | host = local.gitea_domain 121 | paths = [ 122 | { 123 | path = "/" 124 | pathType = "Prefix" 125 | } 126 | ] 127 | } 128 | ] 129 | tls = [ 130 | { 131 | secretName = "gitea-tls" 132 | hosts = [ 133 | local.gitea_domain, 134 | ] 135 | } 136 | ] 137 | } 138 | })] 139 | } 140 | 141 | # NB we mainly use the Kustomization to set the gitea namespace (because the 142 | # helm chart cannot do it). 143 | # see https://gitea.com/gitea/helm-chart/issues/630 144 | # see https://registry.terraform.io/providers/rgl/kustomizer/latest/docs/data-sources/manifest 145 | data "kustomizer_manifest" "gitea" { 146 | files = { 147 | "kustomization.yaml" = <<-EOF 148 | apiVersion: kustomize.config.k8s.io/v1beta1 149 | kind: Kustomization 150 | namespace: ${yamlencode(local.gitea_namespace)} 151 | resources: 152 | - resources/resources.yaml 153 | EOF 154 | "resources/resources.yaml" = data.helm_template.gitea.manifest 155 | } 156 | } -------------------------------------------------------------------------------- /kubernetes-hello.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # see https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ 3 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#serviceaccount-v1-core 4 | apiVersion: v1 5 | kind: ServiceAccount 6 | metadata: 7 | name: kubernetes-hello 8 | --- 9 | # see https://kubernetes.io/docs/reference/access-authn-authz/rbac/ 10 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#role-v1-rbac-authorization-k8s-io 11 | apiVersion: rbac.authorization.k8s.io/v1 12 | kind: Role 13 | metadata: 14 | name: pod-read 15 | rules: 16 | - apiGroups: [""] 17 | resources: ["pods"] 18 | verbs: ["get", "list"] 19 | --- 20 | # see https://kubernetes.io/docs/reference/access-authn-authz/rbac/ 21 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#rolebinding-v1-rbac-authorization-k8s-io 22 | apiVersion: rbac.authorization.k8s.io/v1 23 | kind: RoleBinding 24 | metadata: 25 | name: kubernetes-hello-pod-read 26 | subjects: 27 | - kind: ServiceAccount 28 | name: kubernetes-hello 29 | roleRef: 30 | kind: Role 31 | name: pod-read 32 | apiGroup: rbac.authorization.k8s.io 33 | --- 34 | # see https://kubernetes.io/docs/concepts/services-networking/ingress/ 35 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#ingress-v1-networking-k8s-io 36 | apiVersion: networking.k8s.io/v1 37 | kind: Ingress 38 | metadata: 39 | name: kubernetes-hello 40 | spec: 41 | rules: 42 | - host: kubernetes-hello.example.test 43 | http: 44 | paths: 45 | - path: / 46 | pathType: Prefix 47 | backend: 48 | service: 49 | name: kubernetes-hello 50 | port: 51 | name: web 52 | --- 53 | # see https://kubernetes.io/docs/concepts/services-networking/service/#type-clusterip 54 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#service-v1-core 55 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#serviceport-v1-core 56 | apiVersion: v1 57 | kind: Service 58 | metadata: 59 | name: kubernetes-hello 60 | spec: 61 | type: ClusterIP 62 | selector: 63 | app: kubernetes-hello 64 | ports: 65 | - name: web 66 | port: 80 67 | protocol: TCP 68 | targetPort: web 69 | --- 70 | # see https://kubernetes.io/docs/concepts/workloads/controllers/deployment/ 71 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#deployment-v1-apps 72 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#podtemplatespec-v1-core 73 | # see https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.33/#container-v1-core 74 | apiVersion: apps/v1 75 | kind: Deployment 76 | metadata: 77 | name: kubernetes-hello 78 | spec: 79 | replicas: 1 80 | selector: 81 | matchLabels: 82 | app: kubernetes-hello 83 | template: 84 | metadata: 85 | labels: 86 | app: kubernetes-hello 87 | spec: 88 | serviceAccountName: kubernetes-hello 89 | enableServiceLinks: false 90 | containers: 91 | # see https://github.com/rgl/kubernetes-hello 92 | - name: kubernetes-hello 93 | image: zot.zot.svc.cluster.local:5000/ruilopes/kubernetes-hello:v0.0.202408161942 94 | env: 95 | # configure the go runtime to honor the k8s memory and cpu resource 96 | # limits. 97 | # NB resourceFieldRef will cast the limits to bytes and integer 98 | # number of cpus (rounding up to the nearest integer). 99 | # see https://pkg.go.dev/runtime 100 | # see https://www.riverphillips.dev/blog/go-cfs/ 101 | # see https://github.com/golang/go/issues/33803 102 | # see https://github.com/traefik/traefik-helm-chart/pull/1029 103 | - name: GOMEMLIMIT 104 | valueFrom: 105 | resourceFieldRef: 106 | resource: limits.memory 107 | - name: GOMAXPROCS 108 | valueFrom: 109 | resourceFieldRef: 110 | resource: limits.cpu 111 | # see https://github.com/kubernetes/kubernetes/blob/master/test/e2e/common/downward_api.go 112 | - name: POD_UID 113 | valueFrom: 114 | fieldRef: 115 | fieldPath: metadata.uid 116 | - name: POD_NAME 117 | valueFrom: 118 | fieldRef: 119 | fieldPath: metadata.name 120 | - name: POD_NAMESPACE 121 | valueFrom: 122 | fieldRef: 123 | fieldPath: metadata.namespace 124 | ports: 125 | - name: web 126 | containerPort: 8000 127 | resources: 128 | requests: 129 | memory: 20Mi 130 | cpu: '0.1' 131 | limits: 132 | memory: 20Mi 133 | cpu: '0.1' 134 | securityContext: 135 | allowPrivilegeEscalation: false 136 | capabilities: 137 | drop: 138 | - ALL 139 | readOnlyRootFilesystem: true 140 | runAsNonRoot: true 141 | seccompProfile: 142 | type: RuntimeDefault 143 | volumeMounts: 144 | - name: tokens 145 | readOnly: true 146 | mountPath: /var/run/secrets/tokens 147 | volumes: 148 | - name: tokens 149 | projected: 150 | sources: 151 | - serviceAccountToken: 152 | path: example.com-jwt.txt 153 | audience: example.com 154 | # NB the kubelet will periodically rotate this token. 155 | # NB the token is rotated when its older than 80% of its time 156 | # to live or if the token is older than 24h. 157 | # NB in production, set to a higher value (e.g. 3600 (1h)). 158 | # NB the minimum allowed value is 600 (10m). 159 | # NB this is equivalent of using the TokenRequest API. 160 | # see https://kubernetes.io/docs/reference/kubernetes-api/authentication-resources/token-request-v1/ 161 | # NB this is equivalent of executing: 162 | # kubectl create token kubernetes-hello --audience example.com --duration 600s 163 | # see https://kubernetes.io/docs/reference/kubectl/generated/kubectl_create/kubectl_create_token/ 164 | expirationSeconds: 600 165 | -------------------------------------------------------------------------------- /libvirt.tf: -------------------------------------------------------------------------------- 1 | # see https://github.com/dmacvicar/terraform-provider-libvirt/blob/v0.8.3/website/docs/r/network.markdown 2 | resource "libvirt_network" "talos" { 3 | name = var.prefix 4 | mode = "nat" 5 | domain = var.cluster_node_domain 6 | addresses = [var.cluster_node_network] 7 | dhcp { 8 | enabled = true 9 | } 10 | dns { 11 | enabled = true 12 | local_only = false 13 | } 14 | } 15 | 16 | # see https://github.com/dmacvicar/terraform-provider-libvirt/blob/v0.8.3/website/docs/r/volume.html.markdown 17 | resource "libvirt_volume" "controller" { 18 | count = var.controller_count 19 | name = "${var.prefix}_c${count.index}.img" 20 | base_volume_name = var.talos_libvirt_base_volume_name 21 | format = "qcow2" 22 | size = 40 * 1024 * 1024 * 1024 # 40GiB. 23 | } 24 | 25 | # see https://github.com/dmacvicar/terraform-provider-libvirt/blob/v0.8.3/website/docs/r/volume.html.markdown 26 | resource "libvirt_volume" "worker" { 27 | count = var.worker_count 28 | name = "${var.prefix}_w${count.index}.img" 29 | base_volume_name = var.talos_libvirt_base_volume_name 30 | format = "qcow2" 31 | size = 40 * 1024 * 1024 * 1024 # 40GiB. 32 | } 33 | 34 | # see https://github.com/dmacvicar/terraform-provider-libvirt/blob/v0.8.3/website/docs/r/volume.html.markdown 35 | resource "libvirt_volume" "worker_data0" { 36 | count = var.worker_count 37 | name = "${var.prefix}_w${count.index}d0.img" 38 | format = "qcow2" 39 | size = 32 * 1024 * 1024 * 1024 # 32GiB. 40 | } 41 | 42 | # see https://github.com/dmacvicar/terraform-provider-libvirt/blob/v0.8.3/website/docs/r/domain.html.markdown 43 | resource "libvirt_domain" "controller" { 44 | count = var.controller_count 45 | name = "${var.prefix}_${local.controller_nodes[count.index].name}" 46 | qemu_agent = false 47 | machine = "q35" 48 | firmware = "/usr/share/OVMF/OVMF_CODE.fd" 49 | cpu { 50 | mode = "host-passthrough" 51 | } 52 | vcpu = 4 53 | memory = 4 * 1024 54 | video { 55 | type = "qxl" 56 | } 57 | disk { 58 | volume_id = libvirt_volume.controller[count.index].id 59 | scsi = true 60 | } 61 | network_interface { 62 | network_id = libvirt_network.talos.id 63 | addresses = [local.controller_nodes[count.index].address] 64 | wait_for_lease = true 65 | } 66 | lifecycle { 67 | ignore_changes = [ 68 | nvram, 69 | disk[0].wwn, 70 | network_interface[0].addresses, 71 | ] 72 | } 73 | } 74 | 75 | # see https://github.com/dmacvicar/terraform-provider-libvirt/blob/v0.8.3/website/docs/r/domain.html.markdown 76 | resource "libvirt_domain" "worker" { 77 | count = var.worker_count 78 | name = "${var.prefix}_${local.worker_nodes[count.index].name}" 79 | qemu_agent = false 80 | machine = "q35" 81 | firmware = "/usr/share/OVMF/OVMF_CODE.fd" 82 | cpu { 83 | mode = "host-passthrough" 84 | } 85 | vcpu = 4 86 | memory = 4 * 1024 87 | video { 88 | type = "qxl" 89 | } 90 | disk { 91 | volume_id = libvirt_volume.worker[count.index].id 92 | scsi = true 93 | } 94 | disk { 95 | volume_id = libvirt_volume.worker_data0[count.index].id 96 | scsi = true 97 | wwn = format("000000000000ab%02x", count.index) 98 | } 99 | network_interface { 100 | network_id = libvirt_network.talos.id 101 | addresses = [local.worker_nodes[count.index].address] 102 | wait_for_lease = true 103 | } 104 | lifecycle { 105 | ignore_changes = [ 106 | nvram, 107 | disk[0].wwn, 108 | network_interface[0].addresses, 109 | ] 110 | } 111 | } 112 | -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | output "talosconfig" { 2 | value = data.talos_client_configuration.talos.talos_config 3 | sensitive = true 4 | } 5 | 6 | output "kubeconfig" { 7 | value = talos_cluster_kubeconfig.talos.kubeconfig_raw 8 | sensitive = true 9 | } 10 | 11 | output "controllers" { 12 | value = join(",", [for node in local.controller_nodes : node.address]) 13 | } 14 | 15 | output "workers" { 16 | value = join(",", [for node in local.worker_nodes : node.address]) 17 | } 18 | -------------------------------------------------------------------------------- /providers.tf: -------------------------------------------------------------------------------- 1 | # see https://github.com/hashicorp/terraform 2 | terraform { 3 | required_version = "1.12.1" 4 | required_providers { 5 | # see https://registry.terraform.io/providers/hashicorp/random 6 | # see https://github.com/hashicorp/terraform-provider-random 7 | random = { 8 | source = "hashicorp/random" 9 | version = "3.7.2" 10 | } 11 | # see https://registry.terraform.io/providers/dmacvicar/libvirt 12 | # see https://github.com/dmacvicar/terraform-provider-libvirt 13 | libvirt = { 14 | source = "dmacvicar/libvirt" 15 | version = "0.8.3" 16 | } 17 | # see https://registry.terraform.io/providers/siderolabs/talos 18 | # see https://github.com/siderolabs/terraform-provider-talos 19 | talos = { 20 | source = "siderolabs/talos" 21 | version = "0.8.1" 22 | } 23 | # see https://registry.terraform.io/providers/hashicorp/helm 24 | # see https://github.com/hashicorp/terraform-provider-helm 25 | helm = { 26 | source = "hashicorp/helm" 27 | version = "2.17.0" 28 | } 29 | # see https://registry.terraform.io/providers/rgl/kustomizer 30 | # see https://github.com/rgl/terraform-provider-kustomizer 31 | kustomizer = { 32 | source = "rgl/kustomizer" 33 | version = "0.0.2" 34 | } 35 | } 36 | } 37 | 38 | provider "libvirt" { 39 | uri = "qemu:///system" 40 | } 41 | 42 | provider "talos" { 43 | } 44 | -------------------------------------------------------------------------------- /reloader.tf: -------------------------------------------------------------------------------- 1 | # install reloader. 2 | # NB tls libraries typically load the certificates from ca-certificates.crt 3 | # file once, when they are started, and they never reload the file again. 4 | # reloader will automatically restart them when their configmap/secret 5 | # changes. 6 | # NB the default values are described at: 7 | # https://github.com/stakater/reloader/blob/v2.1.3/deployments/kubernetes/chart/reloader/values.yaml 8 | # NB make sure you are seeing the same version of the chart that you are installing. 9 | # see https://github.com/stakater/reloader 10 | # see https://artifacthub.io/packages/helm/stakater/reloader 11 | # see https://cert-manager.io/docs/tutorials/getting-started-with-trust-manager/ 12 | # see https://registry.terraform.io/providers/hashicorp/helm/latest/docs/data-sources/template 13 | data "helm_template" "reloader" { 14 | namespace = "kube-system" 15 | name = "reloader" 16 | repository = "https://stakater.github.io/stakater-charts" 17 | chart = "reloader" 18 | # renovate: datasource=helm depName=reloader registryUrl=https://stakater.github.io/stakater-charts 19 | version = "2.1.3" 20 | kube_version = var.kubernetes_version 21 | api_versions = [] 22 | set { 23 | name = "reloader.autoReloadAll" 24 | value = "false" 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /renovate.json5: -------------------------------------------------------------------------------- 1 | // see https://docs.renovatebot.com/templates/ 2 | // see https://docs.renovatebot.com/modules/manager/ 3 | // see https://docs.renovatebot.com/modules/manager/regex/ 4 | // see https://docs.renovatebot.com/configuration-options/ 5 | { 6 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 7 | "regexManagers": [ 8 | // default datasources. 9 | { 10 | "fileMatch": [ 11 | "\\.yml$", 12 | "\\.hcl$", 13 | "\\.tf$", 14 | "\\.sh$", 15 | "\\.md$", 16 | "^do$", 17 | ], 18 | "matchStrings": [ 19 | "# renovate: datasource=(?[^:]+?) depName=(?.+?)( versioning=(?.+?))?( extractVersion=(?.+?))?( registryUrl=(?.+?))?\\s.+?[:=]\\s*[\"']?(?.+?)[\"']?\\s" 20 | ], 21 | "versioningTemplate": "{{#if versioning}}{{{versioning}}}{{else}}semver-coerced{{/if}}", 22 | "extractVersionTemplate": "{{#if extractVersion}}{{{extractVersion}}}{{else}}^v?(?.+)${{/if}}" 23 | }, 24 | ] 25 | } -------------------------------------------------------------------------------- /renovate.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euo pipefail 3 | 4 | # this executes renovate against the local repository. 5 | # NB this uses a temporary gitea instance because running renovate against a 6 | # local directory not (yet?) supported. 7 | # see https://github.com/renovatebot/renovate/issues/3609 8 | 9 | export RENOVATE_USERNAME='renovate' 10 | export RENOVATE_NAME='Renovate Bot' 11 | export RENOVATE_PASSWORD='password' 12 | gitea_container_name="$(basename "$(dirname "$(realpath "${BASH_SOURCE[0]}")")")-renovate-gitea" 13 | 14 | # see https://hub.docker.com/r/gitea/gitea/tags 15 | # renovate: datasource=docker depName=gitea/gitea 16 | gitea_version='1.23.8' 17 | 18 | # see https://hub.docker.com/r/renovate/renovate/tags 19 | # renovate: datasource=docker depName=renovate/renovate 20 | renovate_version='40.26.3' 21 | 22 | # clean. 23 | echo 'Deleting existing Gitea...' 24 | docker rm --force "$gitea_container_name" >/dev/null 2>&1 25 | echo 'Deleting existing temporary files...' 26 | rm -f tmp/renovate-* 27 | install -d tmp 28 | 29 | # start gitea in background. 30 | # see https://docs.gitea.io/en-us/config-cheat-sheet/ 31 | # see https://github.com/go-gitea/gitea/releases 32 | # see https://github.com/go-gitea/gitea/blob/v1.23.8/docker/root/etc/s6/gitea/setup 33 | echo 'Starting Gitea...' 34 | docker run \ 35 | --detach \ 36 | --name "$gitea_container_name" \ 37 | -v /etc/timezone:/etc/timezone:ro \ 38 | -v /etc/localtime:/etc/localtime:ro \ 39 | -e SECRET_KEY=opensesame \ 40 | -p 3000 \ 41 | "gitea/gitea:$gitea_version" \ 42 | >/dev/null 43 | gitea_addr="$(docker port "$gitea_container_name" 3000 | head -1)" 44 | gitea_url="http://$gitea_addr" 45 | export RENOVATE_ENDPOINT="$gitea_url" 46 | export GIT_PUSH_REPOSITORY="http://$RENOVATE_USERNAME:$RENOVATE_PASSWORD@$gitea_addr/$RENOVATE_USERNAME/test.git" 47 | 48 | # wait for gitea to be ready. 49 | echo "Waiting for Gitea to be ready at $gitea_url..." 50 | GITEA_URL="$gitea_url" bash -euc 'while [ -z "$(wget -qO- "$GITEA_URL/api/v1/version" | jq -r ".version | select(.!=null)")" ]; do sleep 5; done' 51 | 52 | # create user in gitea. 53 | echo "Creating Gitea $RENOVATE_USERNAME user..." 54 | docker exec --user git "$gitea_container_name" gitea admin user create \ 55 | --admin \ 56 | --email "$RENOVATE_USERNAME@example.com" \ 57 | --username "$RENOVATE_USERNAME" \ 58 | --password "$RENOVATE_PASSWORD" 59 | curl \ 60 | --silent \ 61 | --show-error \ 62 | --fail-with-body \ 63 | -u "$RENOVATE_USERNAME:$RENOVATE_PASSWORD" \ 64 | -X 'PATCH' \ 65 | -H 'Accept: application/json' \ 66 | -H 'Content-Type: application/json' \ 67 | -d "{\"full_name\":\"$RENOVATE_NAME\"}" \ 68 | "$gitea_url/api/v1/user/settings" \ 69 | | jq \ 70 | > /dev/null 71 | 72 | # create the user personal access token. 73 | # see https://docs.gitea.io/en-us/api-usage/ 74 | # see https://docs.gitea.io/en-us/oauth2-provider/#scopes 75 | # see https://try.gitea.io/api/swagger#/user/userCreateToken 76 | echo "Creating Gitea $RENOVATE_USERNAME user personal access token..." 77 | curl \ 78 | --silent \ 79 | --show-error \ 80 | --fail-with-body \ 81 | -u "$RENOVATE_USERNAME:$RENOVATE_PASSWORD" \ 82 | -X POST \ 83 | -H "Content-Type: application/json" \ 84 | -d '{"name": "renovate", "scopes": ["read:user", "write:issue", "write:repository"]}' \ 85 | "$gitea_url/api/v1/users/$RENOVATE_USERNAME/tokens" \ 86 | | jq -r .sha1 \ 87 | >tmp/renovate-gitea-token.txt 88 | 89 | # try the token. 90 | echo "Trying the Gitea $RENOVATE_USERNAME user personal access token..." 91 | RENOVATE_TOKEN="$(cat tmp/renovate-gitea-token.txt)" 92 | export RENOVATE_TOKEN 93 | curl \ 94 | --silent \ 95 | --show-error \ 96 | --fail-with-body \ 97 | -H "Authorization: token $RENOVATE_TOKEN" \ 98 | -H 'Accept: application/json' \ 99 | "$gitea_url/api/v1/version" \ 100 | | jq \ 101 | > /dev/null 102 | 103 | # create remote repository in gitea. 104 | echo "Creating Gitea $RENOVATE_USERNAME test repository..." 105 | curl \ 106 | --silent \ 107 | --show-error \ 108 | --fail-with-body \ 109 | -u "$RENOVATE_USERNAME:$RENOVATE_PASSWORD" \ 110 | -X POST \ 111 | -H 'Accept: application/json' \ 112 | -H 'Content-Type: application/json' \ 113 | -d '{"name": "test"}' \ 114 | "$gitea_url/api/v1/user/repos" \ 115 | | jq \ 116 | > /dev/null 117 | 118 | # push the code to local gitea repository. 119 | # NB running renovate locally is not yet supported. 120 | # see https://github.com/renovatebot/renovate/issues/3609 121 | echo "Pushing local repository to Gitea $RENOVATE_USERNAME test repository..." 122 | git push --force "$GIT_PUSH_REPOSITORY" 123 | 124 | # see https://docs.renovatebot.com/modules/platform/gitea/ 125 | # see https://docs.renovatebot.com/self-hosted-configuration/#dryrun 126 | # see https://github.com/renovatebot/renovate/blob/main/docs/usage/examples/self-hosting.md 127 | # see https://github.com/renovatebot/renovate/tree/main/lib/modules/datasource 128 | # see https://github.com/renovatebot/renovate/tree/main/lib/modules/versioning 129 | RENOVATE_TOKEN="$(cat tmp/renovate-gitea-token.txt)" 130 | export RENOVATE_TOKEN 131 | # NB these can also be passed as raw positional arguments to docker run. 132 | export RENOVATE_REPOSITORIES="$RENOVATE_USERNAME/test" 133 | # see https://docs.github.com/en/rest/rate-limit#get-rate-limit-status-for-the-authenticated-user 134 | # see https://github.com/settings/tokens 135 | # NB this is only used for authentication. the token should not have any scope enabled. 136 | #export GITHUB_COM_TOKEN='TODO-YOUR-TOKEN' 137 | # let renovate create all the required pull requests. 138 | # see https://docs.renovatebot.com/configuration-options/#prhourlylimit 139 | # see https://docs.renovatebot.com/configuration-options/#prconcurrentlimit 140 | export RENOVATE_PR_HOURLY_LIMIT='0' 141 | export RENOVATE_PR_CONCURRENT_LIMIT='0' 142 | echo 'Running renovate...' 143 | # NB to capture the traffic using mitmproxy, start mitmweb in a different 144 | # shell, then enable the following if (i.e. true). 145 | docker_extra_args=() 146 | if false; then 147 | docker_extra_args+=( 148 | --env http_proxy=http://127.0.0.1:8080 149 | --env https_proxy=http://127.0.0.1:8080 150 | --env no_proxy= 151 | --env SSL_CERT_FILE=/usr/local/shared/ca-certificates/mitmproxy-ca.crt 152 | --volume "$HOME/.mitmproxy/mitmproxy-ca-cert.pem:/usr/local/shared/ca-certificates/mitmproxy-ca.crt:ro" 153 | ) 154 | fi 155 | # NB use --dry-run=lookup for not modifying the repository (e.g. for not 156 | # creating pull requests). 157 | docker run \ 158 | --rm \ 159 | --tty \ 160 | --interactive \ 161 | --net host \ 162 | --env GITHUB_COM_TOKEN \ 163 | --env RENOVATE_ENDPOINT \ 164 | --env RENOVATE_TOKEN \ 165 | --env RENOVATE_REPOSITORIES \ 166 | --env RENOVATE_PR_HOURLY_LIMIT \ 167 | --env RENOVATE_PR_CONCURRENT_LIMIT \ 168 | --env LOG_LEVEL=debug \ 169 | --env LOG_FORMAT=json \ 170 | "${docker_extra_args[@]}" \ 171 | "renovate/renovate:$renovate_version" \ 172 | --platform=gitea \ 173 | --git-url=endpoint \ 174 | >tmp/renovate-log.json 175 | 176 | echo 'Getting results...' 177 | # extract the errors. 178 | jq 'select(.err)' tmp/renovate-log.json >tmp/renovate-errors.json 179 | # extract the result from the renovate log. 180 | jq 'select(.msg == "packageFiles with updates") | .config' tmp/renovate-log.json >tmp/renovate-result.json 181 | # extract all the dependencies. 182 | jq 'to_entries[].value[] | {packageFile,dep:.deps[]}' tmp/renovate-result.json >tmp/renovate-dependencies.json 183 | # extract the dependencies that have updates. 184 | jq 'select((.dep.updates | length) > 0)' tmp/renovate-dependencies.json >tmp/renovate-dependencies-updates.json 185 | 186 | # helpers. 187 | function show-title { 188 | echo 189 | echo '#' 190 | echo "# $1" 191 | echo '#' 192 | echo 193 | } 194 | 195 | # show errors. 196 | if [ "$(jq --slurp length tmp/renovate-errors.json)" -ne '0' ]; then 197 | show-title errors 198 | jq . tmp/renovate-errors.json 199 | fi 200 | 201 | # show dependencies. 202 | function show-dependencies { 203 | show-title "$1" 204 | ( 205 | printf 'packageFile\tdatasource\tdepName\tcurrentValue\tnewVersions\tskipReason\twarnings\n' 206 | jq \ 207 | -r \ 208 | '[ 209 | .packageFile, 210 | .dep.datasource, 211 | .dep.depName, 212 | .dep.currentValue, 213 | (.dep | select(.updates) | .updates | map(.newVersion) | join(" | ")), 214 | .dep.skipReason, 215 | (.dep | select(.warnings) | .warnings | map(.message) | join(" | ")) 216 | ] | @tsv' \ 217 | "$2" \ 218 | | sort 219 | ) | column -t -s "$(printf \\t)" 220 | } 221 | show-dependencies 'Dependencies' tmp/renovate-dependencies.json 222 | show-dependencies 'Dependencies Updates' tmp/renovate-dependencies-updates.json 223 | 224 | # show the gitea project. 225 | show-title "See PRs at $gitea_url/$RENOVATE_USERNAME/test/pulls (you can login as $RENOVATE_USERNAME:$RENOVATE_PASSWORD)" 226 | -------------------------------------------------------------------------------- /talos.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | controller_nodes = [ 3 | for i in range(var.controller_count) : { 4 | name = "c${i}" 5 | address = cidrhost(var.cluster_node_network, var.cluster_node_network_first_controller_hostnum + i) 6 | } 7 | ] 8 | worker_nodes = [ 9 | for i in range(var.worker_count) : { 10 | name = "w${i}" 11 | address = cidrhost(var.cluster_node_network, var.cluster_node_network_first_worker_hostnum + i) 12 | } 13 | ] 14 | common_machine_config = { 15 | machine = { 16 | # NB the install section changes are only applied after a talos upgrade 17 | # (which we do not do). instead, its preferred to create a custom 18 | # talos image, which is created in the installed state. 19 | #install = {} 20 | features = { 21 | # see https://www.talos.dev/v1.10/kubernetes-guides/configuration/kubeprism/ 22 | # see talosctl -n $c0 read /etc/kubernetes/kubeconfig-kubelet | yq .clusters[].cluster.server 23 | # NB if you use a non-default CNI, you must configure it to use the 24 | # https://localhost:7445 kube-apiserver endpoint. 25 | kubePrism = { 26 | enabled = true 27 | port = 7445 28 | } 29 | # see https://www.talos.dev/v1.10/talos-guides/network/host-dns/ 30 | hostDNS = { 31 | enabled = true 32 | forwardKubeDNSToHost = true 33 | } 34 | } 35 | kernel = { 36 | modules = [ 37 | // piraeus dependencies. 38 | { 39 | name = "drbd" 40 | parameters = [ 41 | "usermode_helper=disabled", 42 | ] 43 | }, 44 | { 45 | name = "drbd_transport_tcp" 46 | }, 47 | ] 48 | } 49 | network = { 50 | extraHostEntries = [ 51 | { 52 | ip = local.zot_cluster_ip 53 | aliases = [ 54 | local.zot_cluster_domain, 55 | ] 56 | } 57 | ] 58 | } 59 | registries = { 60 | config = { 61 | (local.zot_cluster_host) = { 62 | auth = { 63 | username = "talos" 64 | password = "talos" 65 | } 66 | } 67 | } 68 | mirrors = { 69 | (local.zot_cluster_host) = { 70 | endpoints = [ 71 | local.zot_cluster_url, 72 | ] 73 | skipFallback = false 74 | } 75 | } 76 | } 77 | } 78 | cluster = { 79 | # disable kubernetes discovery as its no longer compatible with k8s 1.32+. 80 | # NB we actually disable the discovery altogether, at the other discovery 81 | # mechanism, service discovery, requires the public discovery service 82 | # from https://discovery.talos.dev/ (or a custom and paid one running 83 | # locally in your network). 84 | # NB without this, talosctl get members, always returns an empty set. 85 | # see https://www.talos.dev/v1.10/talos-guides/discovery/ 86 | # see https://www.talos.dev/v1.10/reference/configuration/v1alpha1/config/#Config.cluster.discovery 87 | # see https://github.com/siderolabs/talos/issues/9980 88 | # see https://github.com/siderolabs/talos/commit/c12b52491456d1e52204eb290d0686a317358c7c 89 | discovery = { 90 | enabled = false 91 | registries = { 92 | kubernetes = { 93 | disabled = true 94 | } 95 | service = { 96 | disabled = true 97 | } 98 | } 99 | } 100 | network = { 101 | cni = { 102 | name = "none" 103 | } 104 | } 105 | proxy = { 106 | disabled = true 107 | } 108 | } 109 | } 110 | } 111 | 112 | // see https://registry.terraform.io/providers/siderolabs/talos/0.8.1/docs/resources/machine_secrets 113 | resource "talos_machine_secrets" "talos" { 114 | talos_version = "v${var.talos_version}" 115 | } 116 | 117 | // see https://registry.terraform.io/providers/siderolabs/talos/0.8.1/docs/data-sources/machine_configuration 118 | data "talos_machine_configuration" "controller" { 119 | cluster_name = var.cluster_name 120 | cluster_endpoint = var.cluster_endpoint 121 | machine_secrets = talos_machine_secrets.talos.machine_secrets 122 | machine_type = "controlplane" 123 | talos_version = "v${var.talos_version}" 124 | kubernetes_version = var.kubernetes_version 125 | examples = false 126 | docs = false 127 | config_patches = [ 128 | yamlencode(local.common_machine_config), 129 | yamlencode({ 130 | machine = { 131 | network = { 132 | interfaces = [ 133 | # see https://www.talos.dev/v1.10/talos-guides/network/vip/ 134 | { 135 | interface = "eth0" 136 | dhcp = true 137 | vip = { 138 | ip = var.cluster_vip 139 | } 140 | } 141 | ] 142 | } 143 | } 144 | }), 145 | yamlencode({ 146 | cluster = { 147 | inlineManifests = [ 148 | { 149 | name = "spin" 150 | contents = <<-EOF 151 | apiVersion: node.k8s.io/v1 152 | kind: RuntimeClass 153 | metadata: 154 | name: wasmtime-spin-v2 155 | handler: spin 156 | EOF 157 | }, 158 | { 159 | name = "cilium" 160 | contents = join("---\n", [ 161 | data.helm_template.cilium.manifest, 162 | "# Source cilium.tf\n${local.cilium_external_lb_manifest}", 163 | ]) 164 | }, 165 | { 166 | name = "cert-manager" 167 | contents = join("---\n", [ 168 | yamlencode({ 169 | apiVersion = "v1" 170 | kind = "Namespace" 171 | metadata = { 172 | name = "cert-manager" 173 | } 174 | }), 175 | data.helm_template.cert_manager.manifest, 176 | "# Source cert-manager.tf\n${local.cert_manager_ingress_ca_manifest}", 177 | ]) 178 | }, 179 | { 180 | name = "trust-manager" 181 | contents = data.helm_template.trust_manager.manifest 182 | }, 183 | { 184 | name = "reloader" 185 | contents = data.helm_template.reloader.manifest 186 | }, 187 | { 188 | name = "zot" 189 | contents = local.zot_manifest 190 | }, 191 | { 192 | name = "gitea" 193 | contents = local.gitea_manifest 194 | }, 195 | { 196 | name = "argocd" 197 | contents = join("---\n", [ 198 | yamlencode({ 199 | apiVersion = "v1" 200 | kind = "Namespace" 201 | metadata = { 202 | name = local.argocd_namespace 203 | } 204 | }), 205 | data.helm_template.argocd.manifest, 206 | "# Source argocd.tf\n${local.argocd_manifest}", 207 | ]) 208 | }, 209 | ], 210 | }, 211 | }), 212 | ] 213 | } 214 | 215 | // see https://registry.terraform.io/providers/siderolabs/talos/0.8.1/docs/data-sources/machine_configuration 216 | data "talos_machine_configuration" "worker" { 217 | cluster_name = var.cluster_name 218 | cluster_endpoint = var.cluster_endpoint 219 | machine_secrets = talos_machine_secrets.talos.machine_secrets 220 | machine_type = "worker" 221 | talos_version = "v${var.talos_version}" 222 | kubernetes_version = var.kubernetes_version 223 | examples = false 224 | docs = false 225 | config_patches = [ 226 | yamlencode(local.common_machine_config), 227 | ] 228 | } 229 | 230 | // see https://registry.terraform.io/providers/siderolabs/talos/0.8.1/docs/data-sources/client_configuration 231 | data "talos_client_configuration" "talos" { 232 | cluster_name = var.cluster_name 233 | client_configuration = talos_machine_secrets.talos.client_configuration 234 | endpoints = [for node in local.controller_nodes : node.address] 235 | } 236 | 237 | // see https://registry.terraform.io/providers/siderolabs/talos/0.8.1/docs/resources/cluster_kubeconfig 238 | resource "talos_cluster_kubeconfig" "talos" { 239 | client_configuration = talos_machine_secrets.talos.client_configuration 240 | endpoint = local.controller_nodes[0].address 241 | node = local.controller_nodes[0].address 242 | depends_on = [ 243 | talos_machine_bootstrap.talos, 244 | ] 245 | } 246 | 247 | // see https://registry.terraform.io/providers/siderolabs/talos/0.8.1/docs/resources/machine_configuration_apply 248 | resource "talos_machine_configuration_apply" "controller" { 249 | count = var.controller_count 250 | client_configuration = talos_machine_secrets.talos.client_configuration 251 | machine_configuration_input = data.talos_machine_configuration.controller.machine_configuration 252 | endpoint = local.controller_nodes[count.index].address 253 | node = local.controller_nodes[count.index].address 254 | config_patches = [ 255 | yamlencode({ 256 | machine = { 257 | network = { 258 | hostname = local.controller_nodes[count.index].name 259 | } 260 | } 261 | }), 262 | ] 263 | depends_on = [ 264 | libvirt_domain.controller, 265 | ] 266 | } 267 | 268 | // see https://registry.terraform.io/providers/siderolabs/talos/0.8.1/docs/resources/machine_configuration_apply 269 | resource "talos_machine_configuration_apply" "worker" { 270 | count = var.worker_count 271 | client_configuration = talos_machine_secrets.talos.client_configuration 272 | machine_configuration_input = data.talos_machine_configuration.worker.machine_configuration 273 | endpoint = local.worker_nodes[count.index].address 274 | node = local.worker_nodes[count.index].address 275 | config_patches = [ 276 | yamlencode({ 277 | machine = { 278 | network = { 279 | hostname = local.worker_nodes[count.index].name 280 | } 281 | } 282 | }), 283 | ] 284 | depends_on = [ 285 | libvirt_domain.worker, 286 | ] 287 | } 288 | 289 | // see https://registry.terraform.io/providers/siderolabs/talos/0.8.1/docs/resources/machine_bootstrap 290 | resource "talos_machine_bootstrap" "talos" { 291 | client_configuration = talos_machine_secrets.talos.client_configuration 292 | endpoint = local.controller_nodes[0].address 293 | node = local.controller_nodes[0].address 294 | depends_on = [ 295 | talos_machine_configuration_apply.controller, 296 | ] 297 | } 298 | -------------------------------------------------------------------------------- /trust-manager.tf: -------------------------------------------------------------------------------- 1 | # install trust-manager. 2 | # NB the default values are described at: 3 | # https://github.com/cert-manager/trust-manager/blob/v0.17.1/deploy/charts/trust-manager/values.yaml 4 | # NB make sure you are seeing the same version of the chart that you are installing. 5 | # see https://cert-manager.io/docs/tutorials/getting-started-with-trust-manager/ 6 | # see https://github.com/cert-manager/trust-manager 7 | # see https://github.com/golang/go/blob/go1.22.3/src/crypto/x509/root_linux.go 8 | # see https://artifacthub.io/packages/helm/cert-manager/trust-manager 9 | # see https://registry.terraform.io/providers/hashicorp/helm/latest/docs/data-sources/template 10 | data "helm_template" "trust_manager" { 11 | namespace = "cert-manager" 12 | name = "trust-manager" 13 | repository = "https://charts.jetstack.io" 14 | chart = "trust-manager" 15 | # renovate: datasource=helm depName=trust-manager registryUrl=https://charts.jetstack.io 16 | version = "0.17.1" 17 | kube_version = var.kubernetes_version 18 | api_versions = [] 19 | set { 20 | name = "secretTargets.enabled" 21 | value = "true" 22 | } 23 | set { 24 | name = "secretTargets.authorizedSecretsAll" 25 | value = "true" 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | # see https://github.com/siderolabs/talos/releases 2 | # see https://www.talos.dev/v1.10/introduction/support-matrix/ 3 | variable "talos_version" { 4 | type = string 5 | # renovate: datasource=github-releases depName=siderolabs/talos 6 | default = "1.10.3" 7 | validation { 8 | condition = can(regex("^\\d+(\\.\\d+)+", var.talos_version)) 9 | error_message = "Must be a version number." 10 | } 11 | } 12 | 13 | # see https://github.com/siderolabs/kubelet/pkgs/container/kubelet 14 | # see https://www.talos.dev/v1.10/introduction/support-matrix/ 15 | variable "kubernetes_version" { 16 | type = string 17 | # renovate: datasource=github-releases depName=siderolabs/kubelet 18 | default = "1.33.1" 19 | validation { 20 | condition = can(regex("^\\d+(\\.\\d+)+", var.kubernetes_version)) 21 | error_message = "Must be a version number." 22 | } 23 | } 24 | 25 | variable "cluster_name" { 26 | description = "A name to provide for the Talos cluster" 27 | type = string 28 | default = "example" 29 | } 30 | 31 | variable "cluster_vip" { 32 | description = "The virtual IP (VIP) address of the Kubernetes API server. Ensure it is synchronized with the 'cluster_endpoint' variable." 33 | type = string 34 | default = "10.17.3.9" 35 | } 36 | 37 | variable "cluster_endpoint" { 38 | description = "The virtual IP (VIP) endpoint of the Kubernetes API server. Ensure it is synchronized with the 'cluster_vip' variable." 39 | type = string 40 | default = "https://10.17.3.9:6443" 41 | } 42 | 43 | variable "cluster_node_network" { 44 | description = "The IP network of the cluster nodes" 45 | type = string 46 | default = "10.17.3.0/24" 47 | } 48 | 49 | variable "cluster_node_network_first_controller_hostnum" { 50 | description = "The hostnum of the first controller host" 51 | type = number 52 | default = 80 53 | } 54 | 55 | variable "cluster_node_network_first_worker_hostnum" { 56 | description = "The hostnum of the first worker host" 57 | type = number 58 | default = 90 59 | } 60 | 61 | variable "cluster_node_network_load_balancer_first_hostnum" { 62 | description = "The hostnum of the first load balancer host" 63 | type = number 64 | default = 130 65 | } 66 | 67 | variable "cluster_node_network_load_balancer_last_hostnum" { 68 | description = "The hostnum of the last load balancer host" 69 | type = number 70 | default = 230 71 | } 72 | 73 | variable "cluster_node_domain" { 74 | description = "the DNS domain of the cluster nodes" 75 | type = string 76 | default = "talos.test" 77 | } 78 | 79 | variable "ingress_domain" { 80 | description = "the DNS domain of the ingress resources" 81 | type = string 82 | default = "example.test" 83 | } 84 | 85 | variable "controller_count" { 86 | type = number 87 | default = 1 88 | validation { 89 | condition = var.controller_count >= 1 90 | error_message = "Must be 1 or more." 91 | } 92 | } 93 | 94 | variable "worker_count" { 95 | type = number 96 | default = 1 97 | validation { 98 | condition = var.worker_count >= 1 99 | error_message = "Must be 1 or more." 100 | } 101 | } 102 | 103 | variable "talos_libvirt_base_volume_name" { 104 | type = string 105 | default = "talos-1.10.3.qcow2" 106 | validation { 107 | condition = can(regex(".+\\.qcow2+$", var.talos_libvirt_base_volume_name)) 108 | error_message = "Must be a name with a .qcow2 extension." 109 | } 110 | } 111 | 112 | variable "prefix" { 113 | type = string 114 | default = "terraform_talos_example" 115 | } 116 | -------------------------------------------------------------------------------- /zot.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | zot_domain = "zot.${var.ingress_domain}" 3 | zot_cluster_domain = "zot.${local.zot_namespace}.svc.cluster.local" 4 | zot_cluster_ip = "10.96.0.20" 5 | zot_cluster_host = "${local.zot_cluster_domain}:5000" 6 | zot_cluster_url = "http://${local.zot_cluster_host}" 7 | zot_namespace = "zot" 8 | zot_manifests = [ 9 | { 10 | apiVersion = "v1" 11 | kind = "Namespace" 12 | metadata = { 13 | name = local.zot_namespace 14 | } 15 | }, 16 | # create the zot tls secret. 17 | # see https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.Certificate 18 | { 19 | apiVersion = "cert-manager.io/v1" 20 | kind = "Certificate" 21 | metadata = { 22 | name = "zot" 23 | namespace = local.zot_namespace 24 | } 25 | spec = { 26 | subject = { 27 | organizations = [ 28 | var.ingress_domain, 29 | ] 30 | organizationalUnits = [ 31 | "Kubernetes", 32 | ] 33 | } 34 | commonName = "Zot" 35 | dnsNames = [ 36 | local.zot_domain, 37 | ] 38 | privateKey = { 39 | algorithm = "ECDSA" # NB Ed25519 is not yet supported by chrome 93 or firefox 91. 40 | size = 256 41 | } 42 | duration = "4320h" # NB 4320h (180 days). default is 2160h (90 days). 43 | secretName = "zot-tls" 44 | issuerRef = { 45 | kind = "ClusterIssuer" 46 | name = "ingress" 47 | } 48 | } 49 | }, 50 | ] 51 | zot_manifest = join("---\n", [data.kustomizer_manifest.zot.manifest], [for d in local.zot_manifests : yamlencode(d)]) 52 | } 53 | 54 | # set the configuration. 55 | # NB the default values are described at: 56 | # https://github.com/project-zot/helm-charts/tree/zot-0.1.71/charts/zot/values.yaml 57 | # NB make sure you are seeing the same version of the chart that you are installing. 58 | # see https://zotregistry.dev/v2.1.0/install-guides/install-guide-k8s/ 59 | # see https://registry.terraform.io/providers/hashicorp/helm/latest/docs/data-sources/template 60 | data "helm_template" "zot" { 61 | namespace = local.zot_namespace 62 | name = "zot" 63 | repository = "https://zotregistry.dev/helm-charts" 64 | chart = "zot" 65 | # see https://artifacthub.io/packages/helm/zot/zot 66 | # renovate: datasource=helm depName=zot registryUrl=https://zotregistry.dev/helm-charts 67 | version = "0.1.71" # app version 2.1.3. 68 | kube_version = var.kubernetes_version 69 | api_versions = [] 70 | values = [yamlencode({ 71 | service = { 72 | type = "ClusterIP" 73 | clusterIP = local.zot_cluster_ip 74 | } 75 | ingress = { 76 | enabled = true 77 | className = null 78 | pathtype = "Prefix" 79 | hosts = [ 80 | { 81 | host = local.zot_domain 82 | paths = [ 83 | { 84 | path = "/" 85 | pathType = "Prefix" 86 | } 87 | ] 88 | } 89 | ] 90 | tls = [ 91 | { 92 | secretName = "zot-tls" 93 | hosts = [ 94 | local.zot_domain, 95 | ] 96 | } 97 | ] 98 | } 99 | persistence = true 100 | pvc = { 101 | create = true 102 | storageClassName = "linstor-lvm-r1" 103 | storage = "8Gi" 104 | } 105 | mountConfig = true 106 | configFiles = { 107 | "config.json" = jsonencode({ 108 | storage = { 109 | rootDirectory = "/var/lib/registry" 110 | } 111 | http = { 112 | address = "0.0.0.0" 113 | port = "5000" 114 | auth = { 115 | htpasswd = { 116 | path = "/secret/htpasswd" 117 | } 118 | } 119 | accessControl = { 120 | repositories = { 121 | "**" = { 122 | policies = [{ 123 | users = ["talos"] 124 | actions = ["read"] 125 | }], 126 | anonymousPolicy = [] 127 | defaultPolicy = [] 128 | } 129 | } 130 | adminPolicy = { 131 | users = ["admin"] 132 | actions = ["read", "create", "update", "delete"] 133 | } 134 | } 135 | } 136 | log = { 137 | level = "debug" 138 | } 139 | extensions = { 140 | ui = { 141 | enable = true 142 | } 143 | search = { 144 | enable = true 145 | cve = { 146 | updateInterval = "2h" 147 | } 148 | } 149 | } 150 | }) 151 | } 152 | mountSecret = true 153 | secretFiles = { 154 | # htpasswd user:pass pairs: 155 | # admin:admin 156 | # talos:talos 157 | # create a pair with: 158 | # echo "talos:$(python3 -c 'import bcrypt;print(bcrypt.hashpw("talos".encode(), bcrypt.gensalt()).decode())')" 159 | # NB the pass value is computed as bcrypt(pass). 160 | htpasswd = <<-EOF 161 | admin:$2y$05$vmiurPmJvHylk78HHFWuruFFVePlit9rZWGA/FbZfTEmNRneGJtha 162 | talos:$2b$12$5nolGXPDH09gv7mGwsEpJOJx5SZj8w8y/Qt3X33wZJDnCdRs6y1Zm 163 | EOF 164 | } 165 | authHeader = base64encode("talos:talos") 166 | })] 167 | } 168 | 169 | # NB we mainly use the Kustomization to set the zot namespace (because the 170 | # helm chart cannot do it). 171 | # see https://github.com/project-zot/helm-charts/issues/46 172 | # see https://registry.terraform.io/providers/rgl/kustomizer/latest/docs/data-sources/manifest 173 | data "kustomizer_manifest" "zot" { 174 | files = { 175 | "kustomization.yaml" = <<-EOF 176 | apiVersion: kustomize.config.k8s.io/v1beta1 177 | kind: Kustomization 178 | namespace: ${yamlencode(local.zot_namespace)} 179 | resources: 180 | - resources/resources.yaml 181 | EOF 182 | "resources/resources.yaml" = data.helm_template.zot.manifest 183 | } 184 | } 185 | --------------------------------------------------------------------------------