├── README.md
├── kubernetes
├── apps
│ ├── cert-manager
│ │ ├── cert-manager
│ │ │ ├── app
│ │ │ │ ├── helmrelease.yaml
│ │ │ │ └── kustomization.yaml
│ │ │ └── ks.yaml
│ │ ├── kustomization.yaml
│ │ └── namespace.yaml
│ ├── clusters
│ │ ├── cluster-0
│ │ │ ├── ks.yaml
│ │ │ └── manifests
│ │ │ │ ├── cluster-0.yaml
│ │ │ │ ├── kustomization.yaml
│ │ │ │ ├── master-dev-sc.yaml
│ │ │ │ ├── vms.yaml
│ │ │ │ └── worker-dev-sc.yaml
│ │ └── kustomization.yaml
│ ├── dhcp
│ │ ├── dhcp.yaml
│ │ └── kustomization.yaml
│ ├── proxmox
│ │ ├── kustomization.yaml
│ │ └── operator
│ │ │ ├── app
│ │ │ ├── kustomization.yaml
│ │ │ ├── proxmox-csi-plugin.yml
│ │ │ └── proxmox-operator.yaml
│ │ │ └── ks.yaml
│ └── sidero
│ │ ├── capi
│ │ ├── app
│ │ │ ├── bootstrap.yaml
│ │ │ ├── cluster-api-components.yaml
│ │ │ ├── controlplane.yaml
│ │ │ ├── infra.yaml
│ │ │ ├── kustomization.yaml
│ │ │ └── metallb.yaml
│ │ └── ks.yaml
│ │ └── kustomization.yaml
└── flux
│ ├── apps.yaml
│ ├── config
│ ├── cluster.yaml
│ ├── crds
│ │ └── .gitkeep
│ ├── flux.yaml
│ └── kustomization.yaml
│ └── repositories
│ ├── git
│ ├── kustomization.yaml
│ └── local-path-provisioner.yaml
│ ├── helm
│ ├── external-dns.yaml
│ ├── jetstack.yaml
│ ├── kustomization.yml
│ └── proxmox-operator.yaml
│ ├── kustomization.yaml
│ └── oci
│ └── .gitkeep
├── manifests
├── optional
│ └── theila.yaml
└── talos
│ ├── cert-approval.yaml
│ ├── cilium.yaml
│ ├── coredns-local.yaml
│ ├── fluxcd-install.yaml
│ ├── fluxcd.yaml
│ ├── metallb-native.yaml
│ └── metrics-server.yaml
├── packer
├── proxmox.pkr.hcl
├── variables.pkr.hcl
└── vars
│ └── local.pkrvars.hcl
└── terraform
├── .gitignore
├── .terraform.lock.hcl
├── auth.tf
├── k8s.tf
├── master-nodes.tf
├── output.tf
├── talos.tf
├── templates
├── controlplane.yaml.tpl
└── worker.yaml.tpl
├── terraform.tfvars
├── variables.tf
├── versions.tf
└── worker-nodes.tf
/README.md:
--------------------------------------------------------------------------------
1 | Kubernetes As a Service (KAAS) in Proxmox
2 | =================
3 |
4 | ### Introduction
5 |
6 | The purpose of this lab to demonstrate capabilities of ***Talos Linux, Sidero (CAPI), FluxCD & Proxmox Operator***, and how they can be used to provision k8s clusters in a true GitOps way.
7 |
8 | ---
9 | ### Built With:
10 |
11 | * [Talos Linux](https://talos.dev)
12 | * [Sidero & CAPI](https://sidero.dev)
13 | * [Talos Terraform Provider](https://registry.terraform.io/providers/siderolabs/talos/latest)
14 | * [Proxmox Terraform Provider](https://registry.terraform.io/providers/Telmate/proxmox/latest)
15 | * [Packer](https://www.packer.io/)
16 | * [FluxCD](https://fluxcd.io/flux/)
17 |
18 | ### k8s in-cluster tools:
19 |
20 | * [Proxmox operator](https://github.com/CRASH-Tech/proxmox-operator)
21 | * [Proxmox CCM](https://github.com/sergelogvinov/proxmox-cloud-controller-manager)
22 | * [Talos CCM](https://github.com/sergelogvinov/talos-cloud-controller-manager)
23 | * [Proxmox CSI](https://github.com/sergelogvinov/proxmox-csi-plugin)
24 | * [Cilium](https://cilium.io/)
25 |
26 | Repository structure
27 | =================
28 |
29 | ```bash
30 | ├── kubernetes # App manifests synced via FluxCD & FluxCD configurations
31 | ├── manifests # App (system-components) applied via Talos Linux controlplane templates
32 | ├── packer # Builds Talos disk on top of Arch Linux for cloud-init functionalities
33 | ├── terraform # Proxmox & Talos terraform providers to provision Talos Management cluster
34 | ├── templates # Management Cluster configurations
35 | ├── output.tf # Terraform output
36 | ├── master-nodes.tf # Proxmox master nodes - Management cluster
37 | ├── worker-nodes.tf # Proxmox worker nodes - Management cluster
38 | ├── k8s.tf # Fetches Talosconfig & Kubeconfig
39 | ├── variables.tf # Terraform variables
40 | ├── terraform.tfvars # Variables to be set here
41 | ├── talos.tf # Talos provider generates secrets, encodes configuration templates, and applies them to the machines
42 | └── versions.tf # Terraform providers
43 | ```
44 |
45 | Overview
46 | =================
47 |
48 | The lab is divided into four stages:
49 |
50 | * Setting up the Proxmox nodes and preparing the cluster, with terraform variables set. This stage is not automated.
51 | * Building and templating the Talos image using packer. This process can also be done manually, with instructions provided in the documentation.
52 | * Setting the terraform.tfvars and running terraform to create ***the Management k8s cluster***. The cluster includes:
53 | * 3 Masters & 3 workers
54 | * [Cilium in strict, tunnel disabled mode](manifests/talos/cilium.yaml) automatically synced with the k8s API through the `api.cluster.local` domain.
55 | * Talos & Proxmox CCM
56 | * Metrics-Server
57 | * CordeDNS-local
58 | * [MetalLB in L2 mode](terraform/templates/controlplane.yaml.tpl)
59 | * Prometheus CRD's
60 | * Bootstrapped and installed FluxCD, which syncs the following apps:
61 | * cert-manager
62 | * dhcp server
63 | * proxmox operator and CSI plugin
64 | * Sidero & CAPI
65 | * Additionally, it creates [cluster-0](kubernetes/apps/clusters/cluster-0/) demo cluster
66 | * Sidero cluster bootstrap
67 |
68 | Table of contents
69 | =================
70 |
71 |
72 | - [Kubernetes As a Service (KAAS) in Proxmox](#kubernetes-as-a-service--kaas--in-proxmox)
73 | + [Introduction](#introduction)
74 | + [Built With:](#built-with-)
75 | + [k8s in-cluster tools:](#k8s-in-cluster-tools-)
76 | - [Repository structure](#repository-structure)
77 | - [Overview](#overview)
78 | - [Prerequisites](#prerequisites)
79 | * [CLI](#cli)
80 | * [Proxmox Node & Cluster configuration](#proxmox-node---cluster-configuration)
81 | * [Cilium CNI configuration](#cilium-cni-configuration)
82 | * [Pull Through Image Cache](#pull-through-image-cache)
83 | - [Installation](#installation)
84 | + [DHCP Disabled](#dhcp-disabled)
85 | * [Variables](#variables)
86 | * [Packer](#packer)
87 | + [Manual method](#manual-method)
88 | * [Terraform](#terraform)
89 | + [terraform plan](#terraform-plan)
90 | + [terraform apply](#terraform-apply)
91 | + [terraform output for talosconfig & kubeconfig can be checked if needed](#terraform-output-for-talosconfig---kubeconfig-can-be-checked-if-needed)
92 | + [CSI testing](#csi-testing)
93 | * [Sidero Bootstrap](#sidero-bootstrap)
94 | + [Scaling](#scaling)
95 | + [New clusters](#new-clusters)
96 | * [Teraform destroy](#teraform-destroy)
97 | - [References](#references)
98 | + [terraform gitignore template](#terraform-gitignore-template)
99 | + [Author's blog](#authors-blog)
100 |
101 |
102 |
103 |
(back to top)
104 |
105 | Prerequisites
106 | ============
107 |
108 | ## CLI
109 | You will need these CLI tools installed on your workstation
110 |
111 | * talosctl CLI:
112 | ```bash
113 | curl -sL https://talos.dev/install | sh
114 | ```
115 | * [kubectl](https://kubernetes.io/docs/tasks/tools/)
116 | * [clusterctl](https://cluster-api.sigs.k8s.io/user/quick-start.html#install-clusterctl)
117 | * [packer](https://developer.hashicorp.com/packer/downloads)
118 | * [terraform](https://developer.hashicorp.com/terraform/downloads?product_intent=terraform)
119 | * [helm](https://helm.sh/docs/intro/install/)
120 |
121 | ## Proxmox Node & Cluster configuration
122 |
123 | This lab consists of a single Proxmox node with [Masquerading (NAT) with iptables configured](https://pve.proxmox.com/wiki/Network_Configuration)
124 |
125 | ```bash
126 | auto lo
127 | iface lo inet loopback
128 |
129 | auto enp42s0
130 | iface enp42s0 inet static
131 | address 192.168.1.100/24
132 | gateway 192.168.1.1
133 |
134 | iface wlo1 inet manual
135 |
136 | auto vmbr0
137 | iface vmbr0 inet static
138 | address 10.1.1.1/24
139 | bridge-ports none
140 | bridge-stp off
141 | bridge-fd 0
142 | post-up echo 1 > /proc/sys/net/ipv4/ip_forward
143 | post-up iptables -t nat -A POSTROUTING -s '10.1.1.0/24' -o enp42s0 -j MASQUERADE
144 | post-down iptables -t nat -D POSTROUTING -s '10.1.1.0/24' -o enp42s0 -j MASQUERADE
145 | # wireguard vpn virtual machine config
146 | post-up iptables -t nat -A PREROUTING -p udp -d 192.168.1.100 --dport 52890 -i enp42s0 -j DNAT --to-destination 10.1.1.2:52890
147 | post-down iptables -t nat -A PREROUTING -p udp -d 192.168.1.100 --dport 52890 -i enp42s0 -j DNAT --to-destination 10.1.1.2:52890
148 | ```
149 |
150 | The lab infrastructure is provisioned using a flat 10.1.1.0/24 network. Make sure to change the variables according to your networking setup. In addition, ensure that the Proxmox storage names in the [terraform.tfvars](terraform/terraform.tfvars) file are correct.
151 |
152 | Although there is only one Proxmox node, a cluster (cluster-1) was initialized using the UI for testing purposes of [Proxmox CCM](https://github.com/sergelogvinov/proxmox-cloud-controller-manager) & [Talos CCM](https://github.com/sergelogvinov/talos-cloud-controller-manager) & [Proxmox CSI](https://github.com/sergelogvinov/proxmox-csi-plugin).
153 |
154 | To build the Talos image with cloud-init functionalities, download the Arch Linux image from [here](https://archlinux.org/download/), use the following command on the Proxmox machine:
155 |
156 | ```bash
157 | wget -nc -q --show-progress -O "/var/lib/vz/template/iso/archlinux-2023.04.01-x86_64.iso" "http://archlinux.uk.mirror.allworldit.com/archlinux/iso/2023.04.01/archlinux-2023.04.01-x86_64.iso"
158 | ```
159 |
160 | You will need to use that image in "iso_file" [packer here](packer/proxmox.pkr.hcl)
161 |
162 | (back to top)
163 |
164 | ## Cilium CNI configuration
165 |
166 | The following Helm chart template was used to generate a [plain yaml manifest](manifests/talos/cilium.yaml), which is then applied in the [Talos control plane template](terraform/templates/controlplane.yaml.tpl):
167 |
168 | ```bash
169 | helm template cilium \
170 | cilium/cilium \
171 | --version 1.13.2 \
172 | --namespace cilium \
173 | --set ipam.mode=kubernetes \
174 | --set tunnel=disabled \
175 | --set bpf.masquerade=true \
176 | --set endpointRoutes.enabled=true \
177 | --set kubeProxyReplacement=strict \
178 | --set autoDirectNodeRoutes=true \
179 | --set localRedirectPolicy=true \
180 | --set operator.rollOutPods=true \
181 | --set rollOutCiliumPods=true \
182 | --set ipv4NativeRoutingCIDR="10.244.0.0/16" \
183 | --set hubble.relay.enabled=true \
184 | --set hubble.ui.enabled=true \
185 | --set securityContext.capabilities.ciliumAgent="{CHOWN,KILL,NET_ADMIN,NET_RAW,IPC_LOCK,SYS_ADMIN,SYS_RESOURCE,DAC_OVERRIDE,FOWNER,SETGID,SETUID}" \
186 | --set=securityContext.capabilities.cleanCiliumState="{NET_ADMIN,SYS_ADMIN,SYS_RESOURCE}" \
187 | --set=cgroup.autoMount.enabled=false \
188 | --set=cgroup.hostRoot=/sys/fs/cgroup \
189 | --set=k8sServiceHost="api.cluster.local" \
190 | --set=k8sServicePort="6443"
191 | ```
192 | Talos `clusterDNS` & `ExtraHostEntries` were added to the controlplane & worker configurations:
193 |
194 | ```bash
195 | clusterDNS:
196 | - 169.254.2.53
197 | - ${cidrhost(split(",",serviceSubnets)[0], 10)}
198 |
199 | ---
200 |
201 | - interface: dummy0
202 | addresses:
203 | - 169.254.2.53/32
204 | extraHostEntries:
205 | - ip: 127.0.0.1
206 | aliases:
207 | - ${apiDomain}
208 | ```
209 | By using the configuration from the [coredns-local](manifests/talos/coredns-local.yaml) manifest, we can avoid to create separate cilium manifests for each k8s cluster. This is because each cluster has a different k8s cluster API endpoint. To accomplish this, the `api.cluster.local` domain is added to the coredns configuration, allowing us to apply the same cilium CNI manifest to multiple clusters.
210 |
211 | (back to top)
212 |
213 | ## Pull Through Image Cache
214 |
215 | In order to speed up the provisioning of the clusters and their components, we use [Talos Pull Through Image Cache](https://www.talos.dev/v1.4/talos-guides/configuration/pull-through-cache/) & Harbor registry. Although this lab does not include the creation and configuration of a Docker registry, automation for Proxmox may be added in the future. If you do not wish to use the Pull image cache, you can delete the following lines in [controlplane](terraform/templates/controlplane.yaml.tpl) & [workers](terraform/templates/worker.yaml.tpl):
216 |
217 | ```bash
218 | registries:
219 | mirrors:
220 | docker.io:
221 | endpoints:
222 | - http://${registry-endpoint}/v2/proxy-docker.io
223 | overridePath: true
224 | ghcr.io:
225 | endpoints:
226 | - http://${registry-endpoint}/v2/proxy-ghcr.io
227 | overridePath: true
228 | gcr.io:
229 | endpoints:
230 | - http://${registry-endpoint}/v2/proxy-gcr.io
231 | overridePath: true
232 | registry.k8s.io:
233 | endpoints:
234 | - http://${registry-endpoint}/v2/proxy-registry.k8s.io
235 | overridePath: true
236 | quay.io:
237 | endpoints:
238 | - http://${registry-endpoint}/v2/proxy-quay.io
239 | overridePath: true
240 | ```
241 |
242 | (back to top)
243 |
244 | Installation
245 | ============
246 | ### DHCP Disabled
247 |
248 | **To facilitate the testing of this lab, we need to disable the DHCP service in the infrastructure network; in my configuration vm network is 10.1.1.0/24. This is necessary because a DHCP server will be running inside the Management k8s cluster using FluxCD.**
249 |
250 | DHCP configuration needs to be changed according to your networking setup - [dhcp-config](kubernetes/apps/dhcp/dhcp.yaml)
251 |
252 |
253 | ## Variables
254 | The following variables need to be set in terminal:
255 |
256 | ```bash
257 | export PROXMOX_HOST="https://px.host.com:8006/api2/json"
258 | export PROXMOX_TOKEN_ID='root@pam!fire'
259 | export PROXMOX_TOKEN_SECRET="secret"
260 | export PROXMOX_NODE_NAME="proxmox"
261 | export CLUSTER_NAME="mgmt-cluster"
262 | ```
263 |
264 | ## Packer
265 |
266 | Configure variables in [local.pkrvars.hcl](packer/vars/local.pkrvars.hcl)
267 |
268 | ```bash
269 | cd packer
270 |
271 | packer init -upgrade .
272 |
273 | packer build -only=release.proxmox.talos -var-file="vars/local.pkrvars.hcl" -var proxmox_username="${PROXMOX_TOKEN_ID}" \
274 | -var proxmox_token="${PROXMOX_TOKEN_SECRET}" -var proxmox_nodename="${PROXMOX_NODE_NAME}" -var proxmox_url="${PROXMOX_HOST}" .
275 | ```
276 |
277 |
278 | Output:
279 |
280 | ```bash
281 | release.proxmox.talos: output will be in this color.
282 |
283 | ==> release.proxmox.talos: Creating VM
284 | ==> release.proxmox.talos: No VM ID given, getting next free from Proxmox
285 | ==> release.proxmox.talos: Starting VM
286 | ==> release.proxmox.talos: Waiting 25s for boot
287 | ==> release.proxmox.talos: Typing the boot command
288 | ==> release.proxmox.talos: Using SSH communicator to connect: 10.1.1.30
289 | ==> release.proxmox.talos: Waiting for SSH to become available...
290 | ==> release.proxmox.talos: Connected to SSH!
291 | ==> release.proxmox.talos: Provisioning with shell script: /var/folders/p9/q4lh62654_zbp_psdzkf5gj00000gn/T/packer-shell3807895858
292 | ==> release.proxmox.talos: % Total % Received % Xferd Average Speed Time Time Time Current
293 | ==> release.proxmox.talos: Dload Upload Total Spent Left Speed
294 | ==> release.proxmox.talos: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0
295 | ==> release.proxmox.talos: 0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0
296 | ==> release.proxmox.talos: 100 75.3M 100 75.3M 0 0 9789k 0 0:00:07 0:00:07 --:--:-- 10.6M
297 | ==> release.proxmox.talos: 2551808+0 records in
298 | ==> release.proxmox.talos: 2551808+0 records out
299 | ==> release.proxmox.talos: 1306525696 bytes (1.3 GB, 1.2 GiB) copied, 14.2868 s, 91.5 MB/s
300 | ==> release.proxmox.talos: Stopping VM
301 | ==> release.proxmox.talos: Converting VM to template
302 | Build 'release.proxmox.talos' finished after 2 minutes 14 seconds.
303 |
304 | ==> Wait completed after 2 minutes 14 seconds
305 |
306 | ==> Builds finished. The artifacts of successful builds are:
307 | --> release.proxmox.talos: A template was created: 101
308 | ```
309 |
310 |
311 |
312 | (back to top)
313 |
314 | ### Manual method
315 |
316 | We create an Arch Linux VM in Proxmox. Find the name of system disk, for example - `local-lvm:vm-106-disk-0`, `lvm volume vm-106-disk-0`.
317 | We copy Talos system disk using [talos nocloud image](https://github.com/siderolabs/talos/releases/download/v1.4.2/nocloud-amd64.raw.xz) to this volume.
318 |
319 | ```bash
320 | cd /tmp
321 | wget https://github.com/siderolabs/talos/releases/download/v1.4.1/nocloud-amd64.raw.xz
322 | xz -d -c nocloud-amd64.raw.xz | dd of=/dev/mapper/vg0-vm--106--disk--0
323 | ```
324 |
325 | And then stop & convert that VM into the template in Proxmox.
326 |
327 | ## Terraform
328 |
329 | Configure variables in [terraform.tfvars](terraform/terraform.tfvars)
330 |
331 | **!!! Keep apiDomain variable the same = api.cluster.local. Otherwise, Cilium init is going to fail.**
332 |
333 | ### terraform plan
334 |
335 | ```bash
336 | cd terraform
337 |
338 | terraform plan -var-file="terraform.tfvars" -var proxmox_token_id="${PROXMOX_TOKEN_ID}" \
339 | -var proxmox_token_secret="${PROXMOX_TOKEN_SECRET}" -var target_node_name="${PROXMOX_NODE_NAME}" \
340 | -var proxmox_host="${PROXMOX_HOST}" -var cluster_name="${CLUSTER_NAME}"
341 | ```
342 |
343 | ### terraform apply
344 |
345 | ```bash
346 | terraform apply -auto-approve -var-file="terraform.tfvars" -var proxmox_token_id="${PROXMOX_TOKEN_ID}" \
347 | -var proxmox_token_secret="${PROXMOX_TOKEN_SECRET}" -var target_node_name="${PROXMOX_NODE_NAME}" \
348 | -var proxmox_host="${PROXMOX_HOST}" -var cluster_name="${CLUSTER_NAME}"
349 | ```
350 |
351 |
352 | Output:
353 |
354 | ```bash
355 | # output truncated
356 | local_sensitive_file.talosconfig: Creation complete after 0s [id=542ee0511df16825d846eed4e0bf4f6ca5fdbe61]
357 | null_resource.kubeconfig: Creating...
358 | null_resource.kubeconfig: Provisioning with 'local-exec'...
359 | null_resource.kubeconfig (local-exec): Executing: ["/bin/sh" "-c" "talosctl kubeconfig --force -n 10.1.1.5 -e 10.1.1.5 --talosconfig ./talosconfig"]
360 | null_resource.kubeconfig: Creation complete after 0s [id=5310785605648426604]
361 | null_resource.kubeconfigapi: Creating...
362 | null_resource.kubeconfigapi: Provisioning with 'local-exec'...
363 | null_resource.kubeconfigapi (local-exec): Executing: ["/bin/sh" "-c" "kubectl --kubeconfig ~/.kube/config config set clusters.mgmt-cluster.server https://10.1.1.20:6443"]
364 | null_resource.kubeconfigapi (local-exec): Property "clusters.mgmt-cluster.server" set.
365 | null_resource.kubeconfigapi: Creation complete after 0s [id=3224005877932970184]
366 |
367 | Apply complete! Resources: 17 added, 0 changed, 0 destroyed.
368 |
369 | Outputs:
370 |
371 | cp =
372 | talosconfig =
373 | worker =
374 | ```
375 |
376 |
377 |
378 | After `terraform apply` is completed, within 10 minutes (depends if you use pull cache or not) you should have the following kubectl output:
379 |
380 | ```bash
381 | kubectl get node -o wide
382 | NAME STATUS ROLES AGE VERSION INTERNAL-IP EXTERNAL-IP OS-IMAGE KERNEL-VERSION CONTAINER-RUNTIME
383 | master-0 Ready control-plane 2m24s v1.27.1 10.1.1.5 Talos (v1.4.2) 6.1.27-talos containerd://1.6.21
384 | master-1 Ready control-plane 2m30s v1.27.1 10.1.1.6 Talos (v1.4.2) 6.1.27-talos containerd://1.6.21
385 | master-2 Ready control-plane 2m45s v1.27.1 10.1.1.7 Talos (v1.4.2) 6.1.27-talos containerd://1.6.21
386 | worker-0 Ready 2m49s v1.27.1 10.1.1.8 Talos (v1.4.2) 6.1.27-talos containerd://1.6.21
387 | worker-1 Ready 2m33s v1.27.1 10.1.1.9 Talos (v1.4.2) 6.1.27-talos containerd://1.6.21
388 | worker-2 Ready 2m31s v1.27.1 10.1.1.10 Talos (v1.4.2) 6.1.27-talos containerd://1.6.21
389 |
390 | kubectl get pod -A
391 | NAMESPACE NAME READY STATUS RESTARTS AGE
392 | cabpt-system cabpt-controller-manager-bcbb75fd8-fbczg 1/1 Running 0 70s
393 | cacppt-system cacppt-controller-manager-5b99d8794f-rphl2 1/1 Running 0 70s
394 | capi-system capi-controller-manager-86c6bfd9b5-g6xk7 1/1 Running 0 70s
395 | cert-manager cert-manager-555cc9b8b5-8snqq 1/1 Running 0 106s
396 | cert-manager cert-manager-cainjector-55c69fbf8-l77qj 1/1 Running 0 106s
397 | cert-manager cert-manager-webhook-65ddf78f48-mwq74 1/1 Running 0 106s
398 | cilium cilium-25ltk 1/1 Running 0 2m54s
399 | cilium cilium-8lhqv 1/1 Running 0 2m47s
400 | cilium cilium-dhhk4 1/1 Running 0 2m56s
401 | cilium cilium-kswnj 1/1 Running 0 3m8s
402 | cilium cilium-m9wfj 1/1 Running 0 3m12s
403 | cilium cilium-operator-7496b89b79-cpxkl 1/1 Running 0 3m13s
404 | cilium cilium-operator-7496b89b79-fhfhs 1/1 Running 0 3m13s
405 | cilium cilium-qcwgq 1/1 Running 0 2m53s
406 | cilium hubble-relay-84c586cc86-7kpts 1/1 Running 0 3m13s
407 | cilium hubble-ui-694cf76f4c-pjnv7 2/2 Running 0 3m12s
408 | csi-proxmox proxmox-csi-plugin-controller-666957fd94-rpz97 5/5 Running 0 117s
409 | csi-proxmox proxmox-csi-plugin-node-5gfng 3/3 Running 2 (106s ago) 117s
410 | csi-proxmox proxmox-csi-plugin-node-6vqvs 3/3 Running 0 117s
411 | csi-proxmox proxmox-csi-plugin-node-kq2l6 3/3 Running 2 (105s ago) 117s
412 | flux-system helm-controller-79ff5d8665-6bnxw 1/1 Running 0 119s
413 | flux-system image-automation-controller-679b595d96-62lwq 1/1 Running 0 119s
414 | flux-system image-reflector-controller-9b7d45fc5-bc7gh 1/1 Running 0 119s
415 | flux-system kustomize-controller-5b658b9864-9b5rv 1/1 Running 0 119s
416 | flux-system notification-controller-86d886486b-zb497 1/1 Running 0 119s
417 | flux-system source-controller-6fd5cb556d-kznjv 1/1 Running 0 116s
418 | kube-system coredns-d779cc7ff-mmhj7 1/1 Running 0 3m10s
419 | kube-system coredns-d779cc7ff-s2kg8 1/1 Running 0 3m10s
420 | kube-system coredns-local-87cfn 1/1 Running 0 2m29s
421 | kube-system coredns-local-bs9q4 1/1 Running 0 2m19s
422 | kube-system coredns-local-dgsp6 1/1 Running 0 2m27s
423 | kube-system coredns-local-jmqc5 1/1 Running 0 2m26s
424 | kube-system coredns-local-pmxp9 1/1 Running 0 2m54s
425 | kube-system coredns-local-qsj7z 1/1 Running 0 2m39s
426 | kube-system dhcp-talos-dhcp-server-7855bb8897-998zn 1/1 Running 0 2m
427 | kube-system kube-apiserver-master-0 1/1 Running 0 2m8s
428 | kube-system kube-apiserver-master-1 1/1 Running 0 2m23s
429 | kube-system kube-apiserver-master-2 1/1 Running 0 2m9s
430 | kube-system kube-controller-manager-master-0 1/1 Running 1 (3m32s ago) 2m4s
431 | kube-system kube-controller-manager-master-1 1/1 Running 2 (3m36s ago) 2m8s
432 | kube-system kube-controller-manager-master-2 1/1 Running 1 (3m17s ago) 2m7s
433 | kube-system kube-scheduler-master-0 1/1 Running 1 (3m32s ago) 2m6s
434 | kube-system kube-scheduler-master-1 1/1 Running 2 (3m37s ago) 112s
435 | kube-system kube-scheduler-master-2 1/1 Running 1 (3m17s ago) 2m9s
436 | kube-system metrics-server-7b4c4d4bfd-x6dzg 1/1 Running 0 2m51s
437 | kube-system proxmox-cloud-controller-manager-79c9ff5cf6-xpn6l 1/1 Running 0 2m49s
438 | kube-system proxmox-operator-5c79f67c66-w9g72 1/1 Running 0 117s
439 | kube-system talos-cloud-controller-manager-776fdbd456-wtmjk 1/1 Running 0 2m46s
440 | metallb-system controller-7948676b95-w58sw 1/1 Running 0 2m55s
441 | metallb-system speaker-9h28g 1/1 Running 0 2m10s
442 | metallb-system speaker-dkrvr 1/1 Running 0 2m11s
443 | metallb-system speaker-drxxr 1/1 Running 0 2m11s
444 | metallb-system speaker-fz8ls 1/1 Running 0 2m10s
445 | metallb-system speaker-j2v4g 1/1 Running 0 2m10s
446 | metallb-system speaker-nbpl7 1/1 Running 0 2m10s
447 | sidero-system caps-controller-manager-5b4b95bcdb-74bvb 1/1 Running 0 70s
448 | sidero-system sidero-controller-manager-7d5b96cf6d-tppq6 4/4 Running 0 70s
449 | ```
450 | ### terraform output for talosconfig & kubeconfig can be checked if needed
451 |
452 | ```bash
453 | terraform output -raw talosconfig
454 | terraform output -raw kubeconfig
455 | ```
456 |
457 | ### CSI testing
458 |
459 | Storageclasses should be already configured in the cluster:
460 | ```bash
461 | kubectl get sc
462 | NAME PROVISIONER RECLAIMPOLICY VOLUMEBINDINGMODE ALLOWVOLUMEEXPANSION AGE
463 | proxmox-data csi.proxmox.sinextra.dev Delete WaitForFirstConsumer true 6m33s
464 | proxmox-data-xfs csi.proxmox.sinextra.dev Delete WaitForFirstConsumer true 6m33s
465 | ```
466 |
467 | Let's apply a test Pod with volume attached and check on that:
468 |
469 | `kubectl apply -f https://raw.githubusercontent.com/sergelogvinov/proxmox-csi-plugin/main/docs/deploy/test-pod-ephemeral.yaml`
470 |
471 | ```bash
472 | kubectl apply -f https://raw.githubusercontent.com/sergelogvinov/proxmox-csi-plugin/main/docs/deploy/test-pod-ephemeral.yaml
473 | pod/test created
474 |
475 | kubectl -n default get pods,pvc
476 | NAME READY STATUS RESTARTS AGE
477 | pod/test 1/1 Running 0 70s
478 |
479 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
480 | persistentvolumeclaim/test-pvc Bound pvc-41b1aea6-fa99-4ad3-b28d-57f1ce4a85aa 1Gi RWO proxmox-data-xfs 70s
481 |
482 | ---
483 | kubectl describe pv pvc-41b1aea6-fa99-4ad3-b28d-57f1ce4a85aa
484 | Name: pvc-41b1aea6-fa99-4ad3-b28d-57f1ce4a85aa
485 | Labels:
486 | Annotations: pv.kubernetes.io/provisioned-by: csi.proxmox.sinextra.dev
487 | volume.kubernetes.io/provisioner-deletion-secret-name:
488 | volume.kubernetes.io/provisioner-deletion-secret-namespace:
489 | Finalizers: [kubernetes.io/pv-protection external-attacher/csi-proxmox-sinextra-dev]
490 | StorageClass: proxmox-data-xfs
491 | Status: Bound
492 | Claim: default/test-pvc
493 | Reclaim Policy: Delete
494 | Access Modes: RWO
495 | VolumeMode: Filesystem
496 | Capacity: 1Gi
497 | Node Affinity:
498 | Required Terms:
499 | Term 0: topology.kubernetes.io/region in [cluster-1]
500 | topology.kubernetes.io/zone in [proxmox]
501 | Message:
502 | Source:
503 | Type: CSI (a Container Storage Interface (CSI) volume source)
504 | Driver: csi.proxmox.sinextra.dev
505 | FSType: xfs
506 | VolumeHandle: cluster-1/proxmox/vms/vm-9999-pvc-41b1aea6-fa99-4ad3-b28d-57f1ce4a85aa
507 | ReadOnly: false
508 | VolumeAttributes: storage=vms
509 | storage.kubernetes.io/csiProvisionerIdentity=1683735975451-8081-csi.proxmox.sinextra.dev
510 | Events:
511 | ```
512 |
513 | Try StatefulSet:
514 |
515 | `kubectl apply -f https://raw.githubusercontent.com/sergelogvinov/proxmox-csi-plugin/main/docs/deploy/test-statefulset.yaml`
516 |
517 | ```bash
518 | kubectl -n default get pods,pvc -owide 4s ⎈ admin@mgmt-cluster
519 | NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
520 | pod/test-0 1/1 Running 0 57s 10.244.3.95 worker-2
521 | pod/test-1 1/1 Running 0 57s 10.244.0.119 worker-0
522 | pod/test-2 1/1 Running 0 57s 10.244.2.234 worker-1
523 |
524 | NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE
525 | persistentvolumeclaim/storage-test-0 Bound pvc-d8be278f-ffee-49c3-b303-10fc2ebd79ae 1Gi RWO proxmox-data 57s Filesystem
526 | persistentvolumeclaim/storage-test-1 Bound pvc-a7b73455-1ae2-4fb5-a7ca-1a671e81491c 1Gi RWO proxmox-data 57s Filesystem
527 | persistentvolumeclaim/storage-test-2 Bound pvc-f7ab10a6-50af-40d4-b704-8fefb0d1bff9 1Gi RWO proxmox-data 57s Filesystem
528 | ```
529 |
530 | (back to top)
531 |
532 | ## Sidero Bootstrap
533 |
534 | If you check your Proxmox UI, you might find 2 new VM's already running in there. What we can see in the Management k8s cluster though:
535 |
536 | ```bash
537 | kubectl get qemu
538 |
539 | NAME STATUS POWER CLUSTER NODE VMID
540 | sidero-master-1 SYNCED ON cluster-1 proxmox 108
541 | sidero-worker-1 SYNCED ON cluster-1 proxmox 109
542 | ---
543 |
544 | kubectl get servers
545 |
546 | NAME HOSTNAME ACCEPTED CORDONED ALLOCATED CLEAN POWER AGE
547 | f0ac3f32-ee63-11ed-a05b-0242ac120003 10.1.1.52 true true on 12m
548 | fe4fadea-ee63-11ed-a05b-0242ac120003 10.1.1.53 true true on 12m
549 | ---
550 |
551 | kubectl get serverclasses
552 |
553 | AME AVAILABLE IN USE AGE
554 | any ["f0ac3f32-ee63-11ed-a05b-0242ac120003","fe4fadea-ee63-11ed-a05b-0242ac120003"] [] 14m
555 | master-cluster-0 [] [] 14m
556 | worker-cluster-0 [] [] 14m
557 | ---
558 |
559 | kubectl get cluster
560 |
561 | NAME PHASE AGE VERSION
562 | cluster-0 Provisioned 14m
563 | ---
564 |
565 | kubectl get TalosControlPlane
566 |
567 | NAME READY INITIALIZED REPLICAS READY REPLICAS UNAVAILABLE REPLICAS
568 | cluster-0-cp 1 1
569 | ---
570 |
571 | kubectl get MachineDeployment
572 |
573 | NAME CLUSTER REPLICAS READY UPDATED UNAVAILABLE PHASE AGE VERSION
574 | cluster-0-workers cluster-0 1 1 1 ScalingUp 14m v1.27.1
575 |
576 | ```
577 |
578 | As mentioned before, all of these manifests were applied and synced via FluxCD from [cluster-0](kubernetes/apps/clusters/cluster-0/manifests/).
579 |
580 | If you look at ServerClasses for [masters](kubernetes/apps/clusters/cluster-0/manifests/master-dev-sc.yaml) and [workers](kubernetes/apps/clusters/cluster-0/manifests/worker-dev-sc.yaml), `labelSelectors` are specified in there hence we need to apply labels to the `servers` in order to start bootstrapping a new k8s cluster.
581 |
582 | ```bash
583 | kubectl label servers fe4fadea-ee63-11ed-a05b-0242ac120003 worker-dev=true
584 | kubectl label servers f0ac3f32-ee63-11ed-a05b-0242ac120003 master-dev=true
585 |
586 | server.metal.sidero.dev/fe4fadea-ee63-11ed-a05b-0242ac120003 labeled
587 | server.metal.sidero.dev/f0ac3f32-ee63-11ed-a05b-0242ac120003 labeled
588 | ```
589 |
590 | At this time, we can see that both servers are now In Use, which means that cluster creation is initiliazed:
591 |
592 | ```bash
593 | kubectl get serverclasses
594 |
595 | NAME AVAILABLE IN USE AGE
596 | any [] ["f0ac3f32-ee63-11ed-a05b-0242ac120003","fe4fadea-ee63-11ed-a05b-0242ac120003"] 22m
597 | master-cluster-0 [] ["f0ac3f32-ee63-11ed-a05b-0242ac120003"] 22m
598 | worker-cluster-0 [] ["fe4fadea-ee63-11ed-a05b-0242ac120003"] 22m
599 | ```
600 |
601 | Now, lets fetch talosconfig and kubeconfig from the cluster:
602 |
603 | ```bash
604 | kubectl get talosconfig -o yaml $(kubectl get talosconfig --no-headers | awk 'NR==1{print $1}') -o jsonpath='{.status.talosConfig}' > cluster-0.yaml
605 |
606 | talosctl --talosconfig cluster-0.yaml kubeconfig --force -n 10.1.1.40 -e 10.1.1.40
607 | ```
608 |
609 | Due to the fact that we use Cilium for all of our clusters with `api.cluster.local`, we want to change the API endpoint to the Talos cluster VIP, which in my terraform settings is set to 10.1.1.40:
610 |
611 | ```bash
612 | kubectl --kubeconfig ~/.kube/config config set clusters.cluster-0.server https://10.1.1.40:6443
613 |
614 | Property "clusters.cluster-0.server" set.
615 | ```
616 |
617 | At that point, we have a fully working cluster bootstrapped and provisioned via Sidero and FluxCD:
618 |
619 | ```bash
620 | kubectl get node
621 |
622 | NAME STATUS ROLES AGE VERSION
623 | talos-5o3-l4b Ready 3m2s v1.27.1
624 | talos-svd-m63 Ready control-plane 2m56s v1.27.1
625 | ---
626 |
627 | kubectl get pod -A
628 |
629 | NAMESPACE NAME READY STATUS RESTARTS AGE
630 | cilium cilium-c878h 1/1 Running 0 3m7s
631 | cilium cilium-operator-7496b89b79-7zx66 1/1 Running 0 3m13s
632 | cilium cilium-operator-7496b89b79-ff4vt 1/1 Running 0 3m13s
633 | cilium cilium-rhqxv 1/1 Running 0 3m1s
634 | cilium hubble-relay-84c586cc86-z82jt 1/1 Running 0 3m13s
635 | cilium hubble-ui-694cf76f4c-bzlq2 2/2 Running 0 3m13s
636 | kube-system coredns-5665966b56-7zz8m 1/1 Running 0 3m13s
637 | kube-system coredns-5665966b56-vbtwq 1/1 Running 0 3m13s
638 | kube-system coredns-local-7pls2 1/1 Running 0 2m36s
639 | kube-system coredns-local-qlkcg 1/1 Running 0 2m34s
640 | kube-system kube-apiserver-talos-svd-m63 1/1 Running 0 2m39s
641 | kube-system kube-controller-manager-talos-svd-m63 1/1 Running 2 (3m44s ago) 2m17s
642 | kube-system kube-scheduler-talos-svd-m63 1/1 Running 2 (3m44s ago) 2m1s
643 | kube-system kubelet-csr-approver-7759f94756-bkhzr 1/1 Running 0 3m13s
644 | kube-system metrics-server-7b4c4d4bfd-h2sc6 1/1 Running 0 3m8s
645 | metallb-system controller-7948676b95-947g5 1/1 Running 0 3m12s
646 | metallb-system speaker-6kzj8 1/1 Running 0 2m34s
647 | metallb-system speaker-m7d7z 1/1 Running 0 2m36s
648 | ```
649 |
650 | ### Scaling
651 | In case if you want to add more nodes, you can add QEMU objects in [vms.yaml](kubernetes/apps/clusters/cluster-0/manifests/vms.yaml) manifest. Even though we have all resources in the management cluster, we can't scale the nodes using [kubectl command like here](https://www.sidero.dev/v0.5/getting-started/scale-workload/) thats because we use FluxCD to sync our manifests in the cluster. Therefore, to add more worker nodes we need to scale replicas in [cluster-0.yaml](kubernetes/apps/clusters/cluster-0/manifests/cluster-0.yaml) manifest `MachineDeployment` object. Additionally, all of these new nodes need to be labeled with `worker-dev=true` `labelSelectors`.
652 |
653 | Likewise, if you add more nodes via Proxmox-operator, you can either set your custom uuid's for the nodes or delete the whole `smbios1` part.
654 |
655 | `smbios1: "uuid=f0ac3f32-ee63-11ed-a05b-0242ac120003,manufacturer=MTIz,product=MTIz,version=MTIz,serial=MTIz,sku=MTIz,family=MTIz,base64=1"`
656 |
657 | ### New clusters
658 |
659 | If you need to create a new cluster, follow this [doc](https://www.sidero.dev/v0.5/getting-started/create-workload/). It will be the same process of running these commands, and once cluster-1 manifest is generated you can add it to FluxCD git repo.
660 |
661 | ```bash
662 | export CONTROL_PLANE_SERVERCLASS=master-cluster-1
663 | export WORKER_SERVERCLASS=worker-cluster-1
664 | export TALOS_VERSION=v1.3.0
665 | export KUBERNETES_VERSION=v1.27.1
666 | export CONTROL_PLANE_PORT=6443
667 | export CONTROL_PLANE_ENDPOINT=api.cluster.local
668 |
669 | clusterctl generate cluster cluster-1 -i sidero:v0.5.8 > cluster-1.yaml
670 | ```
671 |
672 |
673 |
674 | (back to top)
675 |
676 | ## Teraform destroy
677 |
678 | ```bash
679 | terraform destroy -refresh=false -auto-approve -var-file="terraform.tfvars" -var proxmox_token_id="${PROXMOX_TOKEN_ID}" \
680 | -var proxmox_token_secret="${PROXMOX_TOKEN_SECRET}" -var target_node_name="${PROXMOX_NODE_NAME}" -var proxmox_host="${PROXMOX_HOST}" -var cluster_name="${CLUSTER_NAME}"
681 | ```
682 |
683 | Bear in mind that if you run `terraform destroy`, it's not going to delete VM's which were provisioned by Proxmox Operator/FluxCD. You would need to stop sync in FluxCD, and destroy those machines via `kubectl delete qemu vm-name` command.
684 |
685 |
686 | References
687 | ============
688 | https://www.sidero.dev/v0.5/getting-started/
689 |
690 | https://www.talos.dev/v1.4/talos-guides/install/cloud-platforms/nocloud/
691 |
692 | https://github.com/sergelogvinov/terraform-talos
693 |
694 | ### Author's blog
695 |
696 | https://dev.to/bnovickovs
697 |
698 | #### Hybrid cluster example
699 |
700 | https://github.com/kubebn/aws-talos-terraform-hybrid -- https://dev.to/bnovickovs/hybrid-k8s-cluster-talos-kubespan-kilo-wireguard-1f45
701 |
702 | ### terraform gitignore template
703 |
704 | ```bash
705 | # Local .terraform directories
706 | **/.terraform/*
707 |
708 | # .tfstate files
709 | *.tfstate
710 | *.tfstate.*
711 |
712 | # Crash log files
713 | crash.log
714 | crash.*.log
715 |
716 | # Ignore override files as they are usually used to override resources locally and so
717 | # are not checked in
718 | override.tf
719 | override.tf.json
720 | *_override.tf
721 | *_override.tf.json
722 |
723 | # Include override files you do wish to add to version control using negated pattern
724 | # !example_override.tf
725 |
726 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
727 | # example: *tfplan*
728 |
729 | # Ignore CLI configuration files
730 | .terraformrc
731 | terraform.rc
732 | .terraform.lock.hcl
733 |
734 | talosconfig
735 | kubeconfig
736 | ```
737 |
--------------------------------------------------------------------------------
/kubernetes/apps/cert-manager/cert-manager/app/helmrelease.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/helm.toolkit.fluxcd.io/helmrelease_v2beta1.json
3 | apiVersion: helm.toolkit.fluxcd.io/v2beta1
4 | kind: HelmRelease
5 | metadata:
6 | name: cert-manager
7 | namespace: cert-manager
8 | spec:
9 | interval: 15m
10 | chart:
11 | spec:
12 | chart: cert-manager
13 | version: v1.11.1
14 | sourceRef:
15 | kind: HelmRepository
16 | name: jetstack
17 | namespace: flux-system
18 | maxHistory: 2
19 | install:
20 | createNamespace: true
21 | remediation:
22 | retries: 3
23 | upgrade:
24 | cleanupOnFail: true
25 | remediation:
26 | retries: 3
27 | uninstall:
28 | keepHistory: false
29 | values:
30 | installCRDs: true
31 | extraArgs:
32 | - --dns01-recursive-nameservers=1.1.1.1:53,9.9.9.9:53
33 | - --dns01-recursive-nameservers-only
34 | podDnsPolicy: None
35 | podDnsConfig:
36 | nameservers:
37 | - "1.1.1.1"
38 | - "9.9.9.9"
39 | prometheus:
40 | enabled: true
41 | servicemonitor:
42 | enabled: true
43 | prometheusInstance: monitoring
44 |
--------------------------------------------------------------------------------
/kubernetes/apps/cert-manager/cert-manager/app/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://json.schemastore.org/kustomization
3 | apiVersion: kustomize.config.k8s.io/v1beta1
4 | kind: Kustomization
5 | namespace: cert-manager
6 | resources:
7 | - ./helmrelease.yaml
8 |
--------------------------------------------------------------------------------
/kubernetes/apps/cert-manager/cert-manager/ks.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/kustomize.toolkit.fluxcd.io/kustomization_v1.json
3 | apiVersion: kustomize.toolkit.fluxcd.io/v1
4 | kind: Kustomization
5 | metadata:
6 | name: cert-manager
7 | namespace: flux-system
8 | spec:
9 | path: ./kubernetes/apps/cert-manager/cert-manager/app
10 | prune: true
11 | sourceRef:
12 | kind: GitRepository
13 | name: px-kaas
14 | healthChecks:
15 | - apiVersion: helm.toolkit.fluxcd.io/v2beta1
16 | kind: HelmRelease
17 | name: cert-manager
18 | namespace: cert-manager
19 | interval: 15m
20 | retryInterval: 1m
21 | timeout: 3m
22 |
--------------------------------------------------------------------------------
/kubernetes/apps/cert-manager/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://json.schemastore.org/kustomization
3 | apiVersion: kustomize.config.k8s.io/v1beta1
4 | kind: Kustomization
5 | resources:
6 | # Pre Flux-Kustomizations
7 | - ./namespace.yaml
8 | # Flux-Kustomizations
9 | - ./cert-manager/ks.yaml
10 |
--------------------------------------------------------------------------------
/kubernetes/apps/cert-manager/namespace.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: cert-manager
6 | labels:
7 | kustomize.toolkit.fluxcd.io/prune: disabled
8 |
--------------------------------------------------------------------------------
/kubernetes/apps/clusters/cluster-0/ks.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/kustomize.toolkit.fluxcd.io/kustomization_v1.json
3 | apiVersion: kustomize.toolkit.fluxcd.io/v1
4 | kind: Kustomization
5 | metadata:
6 | name: cluster-0
7 | namespace: flux-system
8 | spec:
9 | dependsOn:
10 | - name: sidero
11 | - name: px-operator
12 | path: ./kubernetes/apps/clusters/cluster-0/manifests
13 | prune: true
14 | sourceRef:
15 | kind: GitRepository
16 | name: px-kaas
17 | interval: 5m
18 | retryInterval: 1m
19 | timeout: 3m
20 | postBuild:
21 | substituteFrom:
22 | - kind: ConfigMap
23 | name: cluster-settings
24 | optional: false
--------------------------------------------------------------------------------
/kubernetes/apps/clusters/cluster-0/manifests/cluster-0.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: cluster.x-k8s.io/v1beta1
2 | kind: Cluster
3 | metadata:
4 | name: cluster-0
5 | namespace: default
6 | spec:
7 | clusterNetwork:
8 | pods:
9 | cidrBlocks:
10 | - 10.244.0.0/16
11 | services:
12 | cidrBlocks:
13 | - 10.96.0.0/12
14 | controlPlaneRef:
15 | apiVersion: controlplane.cluster.x-k8s.io/v1alpha3
16 | kind: TalosControlPlane
17 | name: cluster-0-cp
18 | infrastructureRef:
19 | apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
20 | kind: MetalCluster
21 | name: cluster-0
22 | ---
23 | apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
24 | kind: MetalCluster
25 | metadata:
26 | name: cluster-0
27 | namespace: default
28 | spec:
29 | controlPlaneEndpoint:
30 | host: api.cluster.local
31 | port: 6443
32 | ---
33 | apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
34 | kind: MetalMachineTemplate
35 | metadata:
36 | name: cluster-0-cp
37 | namespace: default
38 | spec:
39 | template:
40 | spec:
41 | serverClassRef:
42 | apiVersion: metal.sidero.dev/v1alpha1
43 | kind: ServerClass
44 | name: master-cluster-0
45 | ---
46 | apiVersion: controlplane.cluster.x-k8s.io/v1alpha3
47 | kind: TalosControlPlane
48 | metadata:
49 | name: cluster-0-cp
50 | namespace: default
51 | spec:
52 | controlPlaneConfig:
53 | controlplane:
54 | generateType: controlplane
55 | talosVersion: v1.3.0
56 | infrastructureTemplate:
57 | apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
58 | kind: MetalMachineTemplate
59 | name: cluster-0-cp
60 | replicas: 1
61 | version: v1.27.1
62 | ---
63 | apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3
64 | kind: TalosConfigTemplate
65 | metadata:
66 | name: cluster-0-workers
67 | namespace: default
68 | spec:
69 | template:
70 | spec:
71 | generateType: join
72 | talosVersion: v1.3.0
73 | ---
74 | apiVersion: cluster.x-k8s.io/v1beta1
75 | kind: MachineDeployment
76 | metadata:
77 | name: cluster-0-workers
78 | namespace: default
79 | spec:
80 | clusterName: cluster-0
81 | replicas: 1
82 | selector:
83 | matchLabels: null
84 | template:
85 | spec:
86 | bootstrap:
87 | configRef:
88 | apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3
89 | kind: TalosConfigTemplate
90 | name: cluster-0-workers
91 | clusterName: cluster-0
92 | infrastructureRef:
93 | apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
94 | kind: MetalMachineTemplate
95 | name: cluster-0-workers
96 | version: v1.27.1
97 | ---
98 | apiVersion: infrastructure.cluster.x-k8s.io/v1alpha3
99 | kind: MetalMachineTemplate
100 | metadata:
101 | name: cluster-0-workers
102 | namespace: default
103 | spec:
104 | template:
105 | spec:
106 | serverClassRef:
107 | apiVersion: metal.sidero.dev/v1alpha1
108 | kind: ServerClass
109 | name: worker-cluster-0
110 |
--------------------------------------------------------------------------------
/kubernetes/apps/clusters/cluster-0/manifests/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://json.schemastore.org/kustomization
3 | apiVersion: kustomize.config.k8s.io/v1beta1
4 | kind: Kustomization
5 | resources:
6 | - ./cluster-0.yaml
7 | - ./master-dev-sc.yaml
8 | - ./worker-dev-sc.yaml
9 | - ./vms.yaml
10 |
--------------------------------------------------------------------------------
/kubernetes/apps/clusters/cluster-0/manifests/master-dev-sc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: metal.sidero.dev/v1alpha1
2 | kind: ServerClass
3 | metadata:
4 | name: master-cluster-0
5 | spec:
6 | qualifiers:
7 | cpu:
8 | - manufacturer: QEMU
9 | version: pc-i440fx-7.2
10 | labelSelectors:
11 | - "master-dev": "true"
12 | configPatches:
13 | - op: add
14 | path: /machine/network/interfaces
15 | value:
16 | - interface: eth0
17 | dhcp: true
18 | vip:
19 | ip: "${CLUSTER_0_VIP}"
20 | - interface: dummy0
21 | addresses:
22 | - 169.254.2.53/32
23 | - op: add
24 | path: /machine/network/extraHostEntries
25 | value:
26 | - ip: 127.0.0.1
27 | aliases:
28 | - api.cluster.local
29 | - op: add
30 | path: /machine/network/nameservers
31 | value:
32 | - 1.1.1.1
33 | - 1.0.0.1
34 | - op: replace
35 | path: /machine/install
36 | value:
37 | disk: /dev/sda
38 | extraKernelArgs: ['elevator=noop']
39 | - op: replace
40 | path: /cluster/network/cni
41 | value:
42 | name: "custom"
43 | urls:
44 | - "https://raw.githubusercontent.com/kubebn/talos-proxmox-kaas/main/manifests/talos/cilium.yaml"
45 | - op: replace
46 | path: /cluster/proxy
47 | value:
48 | disabled: true
49 | - op: replace
50 | path: /machine/kubelet/extraArgs
51 | value:
52 | rotate-server-certificates: true
53 | - op: replace
54 | path: /machine/kubelet/clusterDNS
55 | value:
56 | - 169.254.2.53
57 | - 10.96.0.10
58 | - op: replace
59 | path: /cluster/inlineManifests
60 | value:
61 | - name: cilium
62 | contents: |-
63 | apiVersion: v1
64 | kind: Namespace
65 | metadata:
66 | name: cilium
67 | labels:
68 | pod-security.kubernetes.io/enforce: "privileged"
69 | - op: replace
70 | path: /cluster/extraManifests
71 | value:
72 | - https://raw.githubusercontent.com/kubebn/talos-proxmox-kaas/main/manifests/talos/cert-approval.yaml
73 | - https://raw.githubusercontent.com/kubebn/talos-proxmox-kaas/main/manifests/talos/coredns-local.yaml
74 | - https://raw.githubusercontent.com/kubebn/talos-proxmox-kaas/main/manifests/talos/metallb-native.yaml
75 | - https://raw.githubusercontent.com/kubebn/talos-proxmox-kaas/main/manifests/talos/metrics-server.yaml
76 | - op: replace
77 | path: /machine/registries/mirrors
78 | value:
79 | docker.io:
80 | endpoints:
81 | - http://${CACHE_REGISTRY}/v2/proxy-docker.io
82 | overridePath: true
83 | ghcr.io:
84 | endpoints:
85 | - http://${CACHE_REGISTRY}/v2/proxy-ghcr.io
86 | overridePath: true
87 | gcr.io:
88 | endpoints:
89 | - http://${CACHE_REGISTRY}/v2/proxy-gcr.io
90 | overridePath: true
91 | registry.k8s.io:
92 | endpoints:
93 | - http://${CACHE_REGISTRY}/v2/proxy-registry.k8s.io
94 | overridePath: true
95 | quay.io:
96 | endpoints:
97 | - http://${CACHE_REGISTRY}/v2/proxy-quay.io
98 | overridePath: true
--------------------------------------------------------------------------------
/kubernetes/apps/clusters/cluster-0/manifests/vms.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: proxmox.xfix.org/v1alpha1
2 | kind: Qemu
3 | metadata:
4 | name: sidero-master-1
5 | finalizers:
6 | - resources-finalizer.proxmox-operator.xfix.org
7 | spec:
8 | cluster: cluster-1
9 | #node: crash-lab ### If not set it will set automaticly from "pool"
10 | #vmid: 222 ### If not set it will set automaticly
11 | pool: prod ### Cluster pool for place VM
12 | anti-affinity: "" ### The anti-affinity group. VM's with same anti-affinity group will be placed on different nodes
13 | autostart: true
14 | autostop: true
15 | cpu:
16 | type: host
17 | sockets: 2
18 | cores: 1
19 | memory:
20 | size: 2048
21 | balloon: 2048
22 | network:
23 | net0:
24 | model: virtio
25 | #mac: A2:7B:45:48:9C:E6 ### If not set it will set automaticly
26 | bridge: vmbr0
27 | tag: 0 # set to 0 if tag not needed
28 | disk:
29 | scsi0:
30 | storage: local-lvm
31 | size: 9G
32 | tags:
33 | - sidero
34 | - cluster-0
35 | options:
36 | ostype: "l26"
37 | bios: "seabios"
38 | smbios1: "uuid=f0ac3f32-ee63-11ed-a05b-0242ac120003,manufacturer=MTIz,product=MTIz,version=MTIz,serial=MTIz,sku=MTIz,family=MTIz,base64=1"
39 | scsihw: "virtio-scsi-pci"
40 | boot: "order=net0;ide2;scsi0"
41 | ide2: "none,media=cdrom"
42 | hotplug: "network,disk,usb"
43 | tablet: 1
44 | onboot: 0
45 | kvm: 1
46 | agent: "0"
47 | numa: 1
48 | protection: 0
49 | ---
50 | apiVersion: proxmox.xfix.org/v1alpha1
51 | kind: Qemu
52 | metadata:
53 | name: sidero-worker-1
54 | finalizers:
55 | - resources-finalizer.proxmox-operator.xfix.org
56 | spec:
57 | cluster: cluster-1
58 | #node: crash-lab ### If not set it will set automaticly from "pool"
59 | #vmid: 222 ### If not set it will set automaticly
60 | pool: prod ### Cluster pool for place VM
61 | anti-affinity: "" ### The anti-affinity group. VM's with same anti-affinity group will be placed on different nodes
62 | autostart: true
63 | autostop: true
64 | cpu:
65 | type: host
66 | sockets: 2
67 | cores: 1
68 | memory:
69 | size: 2048
70 | balloon: 2048
71 | network:
72 | net0:
73 | model: virtio
74 | #mac: A2:7B:45:48:9C:E6 ### If not set it will set automaticly
75 | bridge: vmbr0
76 | tag: 0 # set to 0 if tag not needed
77 | disk:
78 | scsi0:
79 | storage: local-lvm
80 | size: 9G
81 | tags:
82 | - sidero
83 | - cluster-0
84 | options:
85 | ostype: "l26"
86 | bios: "seabios"
87 | smbios1: "uuid=fe4fadea-ee63-11ed-a05b-0242ac120003,manufacturer=MTIz,product=MTIz,version=MTIz,serial=MTIz,sku=MTIz,family=MTIz,base64=1"
88 | scsihw: "virtio-scsi-pci"
89 | boot: "order=net0;ide2;scsi0"
90 | ide2: "none,media=cdrom"
91 | hotplug: "network,disk,usb"
92 | tablet: 1
93 | onboot: 0
94 | kvm: 1
95 | agent: "0"
96 | numa: 1
97 | protection: 0
--------------------------------------------------------------------------------
/kubernetes/apps/clusters/cluster-0/manifests/worker-dev-sc.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: metal.sidero.dev/v1alpha1
2 | kind: ServerClass
3 | metadata:
4 | name: worker-cluster-0
5 | spec:
6 | qualifiers:
7 | cpu:
8 | - manufacturer: QEMU
9 | version: pc-i440fx-7.2
10 | labelSelectors:
11 | - "worker-dev": "true"
12 | configPatches:
13 | - op: add
14 | path: /machine/network/interfaces
15 | value:
16 | - interface: eth0
17 | dhcp: true
18 | - interface: dummy0
19 | addresses:
20 | - 169.254.2.53/32
21 | - op: add
22 | path: /machine/network/extraHostEntries
23 | value:
24 | - ip: "${CLUSTER_0_VIP}"
25 | aliases:
26 | - api.cluster.local
27 | - op: add
28 | path: /machine/network/nameservers
29 | value:
30 | - 1.1.1.1
31 | - 1.0.0.1
32 | - op: replace
33 | path: /machine/install
34 | value:
35 | disk: /dev/sda
36 | extraKernelArgs: ['elevator=noop']
37 | - op: replace
38 | path: /cluster/proxy
39 | value:
40 | disabled: true
41 | - op: replace
42 | path: /machine/kubelet/extraArgs
43 | value:
44 | rotate-server-certificates: true
45 | - op: replace
46 | path: /machine/kubelet/clusterDNS
47 | value:
48 | - 169.254.2.53
49 | - 10.96.0.10
50 | - op: replace
51 | path: /machine/registries/mirrors
52 | value:
53 | docker.io:
54 | endpoints:
55 | - http://${CACHE_REGISTRY}/v2/proxy-docker.io
56 | overridePath: true
57 | ghcr.io:
58 | endpoints:
59 | - http://${CACHE_REGISTRY}/v2/proxy-ghcr.io
60 | overridePath: true
61 | gcr.io:
62 | endpoints:
63 | - http://${CACHE_REGISTRY}/v2/proxy-gcr.io
64 | overridePath: true
65 | registry.k8s.io:
66 | endpoints:
67 | - http://${CACHE_REGISTRY}/v2/proxy-registry.k8s.io
68 | overridePath: true
69 | quay.io:
70 | endpoints:
71 | - http://${CACHE_REGISTRY}/v2/proxy-quay.io
72 | overridePath: true
--------------------------------------------------------------------------------
/kubernetes/apps/clusters/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | resources:
5 | # Pre Flux-Kustomizations
6 | # Flux-Kustomizations
7 | - ./cluster-0/ks.yaml
8 |
9 |
--------------------------------------------------------------------------------
/kubernetes/apps/dhcp/dhcp.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # Source: talos-dhcp-server/templates/serviceaccount.yaml
3 | apiVersion: v1
4 | kind: ServiceAccount
5 | metadata:
6 | name: dhcp-talos-dhcp-server
7 | namespace: kube-system
8 | labels:
9 | helm.sh/chart: talos-dhcp-server-0.0.9
10 | app.kubernetes.io/name: talos-dhcp-server
11 | app.kubernetes.io/instance: dhcp
12 | app.kubernetes.io/version: "0.0.9"
13 | app.kubernetes.io/managed-by: Helm
14 | ---
15 | # Source: talos-dhcp-server/templates/secret.yaml
16 | apiVersion: v1
17 | kind: Secret
18 | metadata:
19 | name: dhcp-talos-dhcp-server
20 | namespace: kube-system
21 | type: Opaque
22 | stringData:
23 | dhcpd.conf: |-
24 | default-lease-time 600;
25 | max-lease-time 7200;
26 | ddns-update-style none;
27 | authoritative;
28 | log-facility local7;
29 | update-conflict-detection true;
30 | subnet 10.244.0.0 netmask 255.255.0.0 {
31 | }
32 | subnet 10.1.1.0 netmask 255.255.255.0 {
33 | range 10.1.1.50 10.1.1.180;
34 | option subnet-mask 255.255.255.0;
35 | option broadcast-address 10.1.1.250;
36 | option routers 10.1.1.1;
37 | option domain-name-servers 1.1.1.1, 1.0.0.1;
38 | option domain-name "weecodelab.nl";
39 | default-lease-time 600;
40 | max-lease-time 7200;
41 | }
42 | allow bootp;
43 | allow booting;
44 |
45 |
46 | # IP address for PXE-based TFTP methods
47 | next-server ${SIDERO_ENDPOINT};
48 |
49 | # Configuration for iPXE clients
50 | class "ipxeclient" {
51 | match if exists user-class and (option user-class = "iPXE");
52 | filename "http://${SIDERO_ENDPOINT}:8081/tftp/undionly.kpxe";
53 | }
54 |
55 | # Configuration for legacy BIOS-based PXE boot
56 | class "biosclients" {
57 | match if not exists user-class and substring (option vendor-class-identifier, 15, 5) = "00000";
58 | filename "undionly.kpxe";
59 | }
60 |
61 | # Configuration for UEFI-based PXE boot
62 | class "pxeclients" {
63 | match if not exists user-class and substring (option vendor-class-identifier, 0, 9) = "PXEClient";
64 | filename "ipxe.efi";
65 | }
66 |
67 | # Configuration for UEFI-based HTTP boot
68 | class "httpclients" {
69 | match if not exists user-class and substring (option vendor-class-identifier, 0, 10) = "HTTPClient";
70 | option vendor-class-identifier "HTTPClient";
71 | filename "http://${SIDERO_ENDPOINT}:8081/tftp/ipxe.efi";
72 | }
73 | ---
74 | # Source: talos-dhcp-server/templates/deployment.yaml
75 | apiVersion: apps/v1
76 | kind: Deployment
77 | metadata:
78 | name: dhcp-talos-dhcp-server
79 | namespace: kube-system
80 | labels:
81 | helm.sh/chart: talos-dhcp-server-0.0.9
82 | app.kubernetes.io/name: talos-dhcp-server
83 | app.kubernetes.io/instance: dhcp
84 | app.kubernetes.io/version: "0.0.9"
85 | app.kubernetes.io/managed-by: Helm
86 | spec:
87 | replicas: 1
88 | strategy:
89 | type: Recreate
90 | selector:
91 | matchLabels:
92 | app.kubernetes.io/name: talos-dhcp-server
93 | app.kubernetes.io/instance: dhcp
94 | template:
95 | metadata:
96 | labels:
97 | app.kubernetes.io/name: talos-dhcp-server
98 | app.kubernetes.io/instance: dhcp
99 | spec:
100 | serviceAccountName: dhcp-talos-dhcp-server
101 | hostNetwork: true
102 | containers:
103 | - name: talos-dhcp-server
104 | #command: ["sleep", "infinity"]
105 | securityContext:
106 | allowPrivilegeEscalation: true
107 | capabilities:
108 | add:
109 | - NET_ADMIN
110 | - NET_RAW
111 | runAsGroup: 0
112 | runAsNonRoot: false
113 | runAsUser: 0
114 | image: "crashntech/talos-dhcp-server:0.0.9"
115 | imagePullPolicy: IfNotPresent
116 | volumeMounts:
117 | - mountPath: /var/lib/dhcp/
118 | name: dhcpd-leases
119 | - mountPath: /etc/dhcp/dhcpd.conf
120 | name: config
121 | subPath: dhcpd.conf
122 | ports:
123 | - name: dhcp
124 | containerPort: 67
125 | protocol: UDP
126 | resources:
127 | limits:
128 | cpu: 100m
129 | memory: 128Mi
130 | requests:
131 | cpu: 100m
132 | memory: 128Mi
133 | tolerations:
134 | - effect: NoSchedule
135 | operator: Exists
136 | volumes:
137 | - name: dhcpd-leases
138 | emptyDir:
139 | sizeLimit: 100Mi
140 | - name: config
141 | secret:
142 | secretName: dhcp-talos-dhcp-server
143 |
--------------------------------------------------------------------------------
/kubernetes/apps/dhcp/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | resources:
5 | - ./dhcp.yaml
6 |
--------------------------------------------------------------------------------
/kubernetes/apps/proxmox/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | resources:
5 | # Pre Flux-Kustomizations
6 | # Flux-Kustomizations
7 | - ./operator/ks.yaml
8 |
9 |
--------------------------------------------------------------------------------
/kubernetes/apps/proxmox/operator/app/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | resources:
5 | # Pre Flux-Kustomizations
6 | - ./proxmox-operator.yaml
7 | - ./proxmox-csi-plugin.yml
8 |
--------------------------------------------------------------------------------
/kubernetes/apps/proxmox/operator/app/proxmox-csi-plugin.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Source: proxmox-csi-plugin/templates/namespace.yaml
3 | apiVersion: v1
4 | kind: Namespace
5 | metadata:
6 | name: csi-proxmox
7 | labels:
8 | pod-security.kubernetes.io/enforce: privileged
9 | pod-security.kubernetes.io/audit: baseline
10 | pod-security.kubernetes.io/warn: baseline
11 | ---
12 | # Source: proxmox-csi-plugin/templates/serviceaccount.yaml
13 | apiVersion: v1
14 | kind: ServiceAccount
15 | metadata:
16 | name: proxmox-csi-plugin-controller
17 | namespace: csi-proxmox
18 | labels:
19 | helm.sh/chart: proxmox-csi-plugin-0.1.0
20 | app.kubernetes.io/name: proxmox-csi-plugin
21 | app.kubernetes.io/instance: proxmox-csi-plugin
22 | app.kubernetes.io/version: "v0.1.0"
23 | app.kubernetes.io/managed-by: Helm
24 | ---
25 | # Source: proxmox-csi-plugin/templates/serviceaccount.yaml
26 | apiVersion: v1
27 | kind: ServiceAccount
28 | metadata:
29 | name: proxmox-csi-plugin-node
30 | namespace: csi-proxmox
31 | labels:
32 | helm.sh/chart: proxmox-csi-plugin-0.1.0
33 | app.kubernetes.io/name: proxmox-csi-plugin
34 | app.kubernetes.io/instance: proxmox-csi-plugin
35 | app.kubernetes.io/version: "v0.1.0"
36 | app.kubernetes.io/managed-by: Helm
37 | ---
38 | # Source: proxmox-csi-plugin/templates/storageclass.yaml
39 | apiVersion: storage.k8s.io/v1
40 | kind: StorageClass
41 | metadata:
42 | name: proxmox-data-xfs
43 | provisioner: csi.proxmox.sinextra.dev
44 | allowVolumeExpansion: true
45 | volumeBindingMode: WaitForFirstConsumer
46 | reclaimPolicy: Delete
47 | parameters:
48 | csi.storage.k8s.io/fstype: xfs
49 | storage: "${STORAGE_CLASS_XFS}"
50 | ---
51 | # Source: proxmox-csi-plugin/templates/storageclass.yaml
52 | apiVersion: storage.k8s.io/v1
53 | kind: StorageClass
54 | metadata:
55 | name: proxmox-data
56 | provisioner: csi.proxmox.sinextra.dev
57 | allowVolumeExpansion: true
58 | volumeBindingMode: WaitForFirstConsumer
59 | reclaimPolicy: Delete
60 | parameters:
61 | csi.storage.k8s.io/fstype: ext4
62 | storage: "${STORAGE_CLASS}"
63 | ssd: "true"
64 | ---
65 | # Source: proxmox-csi-plugin/templates/controller-clusterrole.yaml
66 | apiVersion: rbac.authorization.k8s.io/v1
67 | kind: ClusterRole
68 | metadata:
69 | name: proxmox-csi-plugin-controller
70 | namespace: csi-proxmox
71 | labels:
72 | helm.sh/chart: proxmox-csi-plugin-0.1.0
73 | app.kubernetes.io/name: proxmox-csi-plugin
74 | app.kubernetes.io/instance: proxmox-csi-plugin
75 | app.kubernetes.io/version: "v0.1.0"
76 | app.kubernetes.io/managed-by: Helm
77 | rules:
78 | - apiGroups: [""]
79 | resources: ["persistentvolumes"]
80 | verbs: ["get", "list", "watch", "create", "patch", "delete"]
81 | - apiGroups: [""]
82 | resources: ["persistentvolumeclaims"]
83 | verbs: ["get", "list", "watch", "update"]
84 | - apiGroups: [""]
85 | resources: ["persistentvolumeclaims/status"]
86 | verbs: ["patch"]
87 | - apiGroups: [""]
88 | resources: ["events"]
89 | verbs: ["get","list", "watch", "create", "update", "patch"]
90 |
91 | - apiGroups: ["storage.k8s.io"]
92 | resources: ["storageclasses"]
93 | verbs: ["get", "list", "watch"]
94 | - apiGroups: ["storage.k8s.io"]
95 | resources: ["csinodes"]
96 | verbs: ["get", "list", "watch"]
97 | - apiGroups: [""]
98 | resources: ["nodes"]
99 | verbs: ["get", "list", "watch"]
100 |
101 | - apiGroups: ["storage.k8s.io"]
102 | resources: ["volumeattachments"]
103 | verbs: ["get", "list", "watch", "patch"]
104 | - apiGroups: ["storage.k8s.io"]
105 | resources: ["volumeattachments/status"]
106 | verbs: ["patch"]
107 | ---
108 | # Source: proxmox-csi-plugin/templates/node-clusterrole.yaml
109 | apiVersion: rbac.authorization.k8s.io/v1
110 | kind: ClusterRole
111 | metadata:
112 | name: proxmox-csi-plugin-node
113 | namespace: csi-proxmox
114 | labels:
115 | helm.sh/chart: proxmox-csi-plugin-0.1.0
116 | app.kubernetes.io/name: proxmox-csi-plugin
117 | app.kubernetes.io/instance: proxmox-csi-plugin
118 | app.kubernetes.io/version: "v0.1.0"
119 | app.kubernetes.io/managed-by: Helm
120 | rules:
121 | - apiGroups:
122 | - ""
123 | resources:
124 | - nodes
125 | verbs:
126 | - get
127 | ---
128 | # Source: proxmox-csi-plugin/templates/controller-rolebinding.yaml
129 | apiVersion: rbac.authorization.k8s.io/v1
130 | kind: ClusterRoleBinding
131 | metadata:
132 | name: proxmox-csi-plugin-controller
133 | roleRef:
134 | apiGroup: rbac.authorization.k8s.io
135 | kind: ClusterRole
136 | name: proxmox-csi-plugin-controller
137 | subjects:
138 | - kind: ServiceAccount
139 | name: proxmox-csi-plugin-controller
140 | namespace: csi-proxmox
141 | ---
142 | # Source: proxmox-csi-plugin/templates/node-rolebinding.yaml
143 | apiVersion: rbac.authorization.k8s.io/v1
144 | kind: ClusterRoleBinding
145 | metadata:
146 | name: proxmox-csi-plugin-node
147 | roleRef:
148 | apiGroup: rbac.authorization.k8s.io
149 | kind: ClusterRole
150 | name: proxmox-csi-plugin-node
151 | subjects:
152 | - kind: ServiceAccount
153 | name: proxmox-csi-plugin-node
154 | namespace: csi-proxmox
155 | ---
156 | # Source: proxmox-csi-plugin/templates/controller-role.yaml
157 | apiVersion: rbac.authorization.k8s.io/v1
158 | kind: Role
159 | metadata:
160 | name: proxmox-csi-plugin-controller
161 | namespace: csi-proxmox
162 | labels:
163 | helm.sh/chart: proxmox-csi-plugin-0.1.0
164 | app.kubernetes.io/name: proxmox-csi-plugin
165 | app.kubernetes.io/instance: proxmox-csi-plugin
166 | app.kubernetes.io/version: "v0.1.0"
167 | app.kubernetes.io/managed-by: Helm
168 | rules:
169 | - apiGroups: ["coordination.k8s.io"]
170 | resources: ["leases"]
171 | verbs: ["get", "watch", "list", "delete", "update", "create"]
172 |
173 | - apiGroups: ["storage.k8s.io"]
174 | resources: ["csistoragecapacities"]
175 | verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
176 | - apiGroups: [""]
177 | resources: ["pods"]
178 | verbs: ["get"]
179 | - apiGroups: ["apps"]
180 | resources: ["replicasets"]
181 | verbs: ["get"]
182 | ---
183 | # Source: proxmox-csi-plugin/templates/controller-rolebinding.yaml
184 | apiVersion: rbac.authorization.k8s.io/v1
185 | kind: RoleBinding
186 | metadata:
187 | name: proxmox-csi-plugin-controller
188 | namespace: csi-proxmox
189 | roleRef:
190 | apiGroup: rbac.authorization.k8s.io
191 | kind: Role
192 | name: proxmox-csi-plugin-controller
193 | subjects:
194 | - kind: ServiceAccount
195 | name: proxmox-csi-plugin-controller
196 | namespace: csi-proxmox
197 | ---
198 | # Source: proxmox-csi-plugin/templates/node-deployment.yaml
199 | apiVersion: apps/v1
200 | kind: DaemonSet
201 | metadata:
202 | name: proxmox-csi-plugin-node
203 | namespace: csi-proxmox
204 | labels:
205 | helm.sh/chart: proxmox-csi-plugin-0.1.0
206 | app.kubernetes.io/name: proxmox-csi-plugin
207 | app.kubernetes.io/instance: proxmox-csi-plugin
208 | app.kubernetes.io/version: "v0.1.0"
209 | app.kubernetes.io/managed-by: Helm
210 | spec:
211 | updateStrategy:
212 | type: RollingUpdate
213 | selector:
214 | matchLabels:
215 | app.kubernetes.io/name: proxmox-csi-plugin
216 | app.kubernetes.io/instance: proxmox-csi-plugin
217 | app.kubernetes.io/component: node
218 | template:
219 | metadata:
220 | labels:
221 | app.kubernetes.io/name: proxmox-csi-plugin
222 | app.kubernetes.io/instance: proxmox-csi-plugin
223 | app.kubernetes.io/component: node
224 | spec:
225 | priorityClassName: system-node-critical
226 | enableServiceLinks: false
227 | serviceAccountName: proxmox-csi-plugin-node
228 | securityContext: {}
229 | containers:
230 | - name: proxmox-csi-plugin-node
231 | securityContext:
232 | runAsUser: 0
233 | runAsGroup: 0
234 | privileged: true
235 | image: "ghcr.io/sergelogvinov/proxmox-csi-node:edge"
236 | imagePullPolicy: Always
237 | args:
238 | - "-v=5"
239 | - "--csi-address=unix:///csi/csi.sock"
240 | - "--node-id=$(NODE_NAME)"
241 | env:
242 | - name: NODE_NAME
243 | valueFrom:
244 | fieldRef:
245 | fieldPath: spec.nodeName
246 | resources:
247 | {}
248 | volumeMounts:
249 | - name: socket
250 | mountPath: /csi
251 | - name: kubelet
252 | mountPath: /var/lib/kubelet
253 | mountPropagation: Bidirectional
254 | - name: dev
255 | mountPath: /dev
256 | - name: sys
257 | mountPath: /sys
258 | - name: csi-node-driver-registrar
259 | securityContext:
260 | capabilities:
261 | drop:
262 | - ALL
263 | # readOnlyRootFilesystem: true
264 | seccompProfile:
265 | type: RuntimeDefault
266 | image: "registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.7.0"
267 | imagePullPolicy: IfNotPresent
268 | args:
269 | - "-v=5"
270 | - "--csi-address=unix:///csi/csi.sock"
271 | - "--kubelet-registration-path=/var/lib/kubelet/plugins/csi.proxmox.sinextra.dev/csi.sock"
272 | volumeMounts:
273 | - name: socket
274 | mountPath: /csi
275 | - name: registration
276 | mountPath: /registration
277 | resources:
278 | requests:
279 | cpu: 10m
280 | memory: 16Mi
281 | - name: liveness-probe
282 | securityContext:
283 | capabilities:
284 | drop:
285 | - ALL
286 | readOnlyRootFilesystem: true
287 | seccompProfile:
288 | type: RuntimeDefault
289 | image: "registry.k8s.io/sig-storage/livenessprobe:v2.9.0"
290 | imagePullPolicy: IfNotPresent
291 | args:
292 | - "-v=5"
293 | - "--csi-address=unix:///csi/csi.sock"
294 | volumeMounts:
295 | - name: socket
296 | mountPath: /csi
297 | resources:
298 | requests:
299 | cpu: 10m
300 | memory: 16Mi
301 | volumes:
302 | - name: socket
303 | hostPath:
304 | path: /var/lib/kubelet/plugins/csi.proxmox.sinextra.dev/
305 | type: DirectoryOrCreate
306 | - name: registration
307 | hostPath:
308 | path: /var/lib/kubelet/plugins_registry/
309 | type: Directory
310 | - name: kubelet
311 | hostPath:
312 | path: /var/lib/kubelet
313 | type: Directory
314 | - name: dev
315 | hostPath:
316 | path: /dev
317 | type: Directory
318 | - name: sys
319 | hostPath:
320 | path: /sys
321 | type: Directory
322 | nodeSelector:
323 | node.cloudprovider.kubernetes.io/platform: nocloud
324 | tolerations:
325 | - effect: NoSchedule
326 | key: node.kubernetes.io/unschedulable
327 | operator: Exists
328 | - effect: NoSchedule
329 | key: node.kubernetes.io/disk-pressure
330 | operator: Exists
331 | ---
332 | # Source: proxmox-csi-plugin/templates/controller-deployment.yaml
333 | apiVersion: apps/v1
334 | kind: Deployment
335 | metadata:
336 | name: proxmox-csi-plugin-controller
337 | namespace: csi-proxmox
338 | labels:
339 | helm.sh/chart: proxmox-csi-plugin-0.1.0
340 | app.kubernetes.io/name: proxmox-csi-plugin
341 | app.kubernetes.io/instance: proxmox-csi-plugin
342 | app.kubernetes.io/version: "v0.1.0"
343 | app.kubernetes.io/managed-by: Helm
344 | spec:
345 | replicas: 1
346 | strategy:
347 | type: RollingUpdate
348 | rollingUpdate:
349 | maxUnavailable: 1
350 | selector:
351 | matchLabels:
352 | app.kubernetes.io/name: proxmox-csi-plugin
353 | app.kubernetes.io/instance: proxmox-csi-plugin
354 | app.kubernetes.io/component: controller
355 | template:
356 | metadata:
357 | labels:
358 | app.kubernetes.io/name: proxmox-csi-plugin
359 | app.kubernetes.io/instance: proxmox-csi-plugin
360 | app.kubernetes.io/component: controller
361 | spec:
362 | priorityClassName: system-cluster-critical
363 | enableServiceLinks: false
364 | serviceAccountName: proxmox-csi-plugin-controller
365 | securityContext:
366 | fsGroup: 65532
367 | fsGroupChangePolicy: OnRootMismatch
368 | runAsGroup: 65532
369 | runAsNonRoot: true
370 | runAsUser: 65532
371 | containers:
372 | - name: proxmox-csi-plugin-controller
373 | securityContext:
374 | capabilities:
375 | drop:
376 | - ALL
377 | readOnlyRootFilesystem: true
378 | seccompProfile:
379 | type: RuntimeDefault
380 | image: "ghcr.io/sergelogvinov/proxmox-csi-controller:edge"
381 | imagePullPolicy: Always
382 | args:
383 | - "-v=5"
384 | - "--csi-address=unix:///csi/csi.sock"
385 | - "--cloud-config=/etc/proxmox/config.yaml"
386 | resources:
387 | requests:
388 | cpu: 10m
389 | memory: 16Mi
390 | volumeMounts:
391 | - name: socket-dir
392 | mountPath: /csi
393 | - name: cloud-config
394 | mountPath: /etc/proxmox/
395 | - name: csi-attacher
396 | securityContext:
397 | capabilities:
398 | drop:
399 | - ALL
400 | readOnlyRootFilesystem: true
401 | seccompProfile:
402 | type: RuntimeDefault
403 | image: "registry.k8s.io/sig-storage/csi-attacher:v4.2.0"
404 | imagePullPolicy: IfNotPresent
405 | args:
406 | - "-v=5"
407 | - "--csi-address=unix:///csi/csi.sock"
408 | - "--timeout=3m"
409 | - "--leader-election"
410 | - "--default-fstype=ext4"
411 | volumeMounts:
412 | - name: socket-dir
413 | mountPath: /csi
414 | resources:
415 | requests:
416 | cpu: 10m
417 | memory: 16Mi
418 | - name: csi-provisioner
419 | securityContext:
420 | capabilities:
421 | drop:
422 | - ALL
423 | readOnlyRootFilesystem: true
424 | seccompProfile:
425 | type: RuntimeDefault
426 | image: "registry.k8s.io/sig-storage/csi-provisioner:v3.4.0"
427 | imagePullPolicy: IfNotPresent
428 | args:
429 | - "-v=5"
430 | - "--csi-address=unix:///csi/csi.sock"
431 | - "--timeout=3m"
432 | - "--leader-election"
433 | - "--default-fstype=ext4"
434 | - "--feature-gates=Topology=True"
435 | - "--enable-capacity"
436 | - "--capacity-ownerref-level=2"
437 | env:
438 | - name: NAMESPACE
439 | valueFrom:
440 | fieldRef:
441 | fieldPath: metadata.namespace
442 | - name: POD_NAME
443 | valueFrom:
444 | fieldRef:
445 | fieldPath: metadata.name
446 | volumeMounts:
447 | - name: socket-dir
448 | mountPath: /csi
449 | resources:
450 | requests:
451 | cpu: 10m
452 | memory: 16Mi
453 | - name: csi-resizer
454 | securityContext:
455 | capabilities:
456 | drop:
457 | - ALL
458 | readOnlyRootFilesystem: true
459 | seccompProfile:
460 | type: RuntimeDefault
461 | image: "registry.k8s.io/sig-storage/csi-resizer:v1.7.0"
462 | imagePullPolicy: IfNotPresent
463 | args:
464 | - "-v=5"
465 | - "--csi-address=unix:///csi/csi.sock"
466 | - "--timeout=3m"
467 | - "--handle-volume-inuse-error=false"
468 | - "--leader-election"
469 | volumeMounts:
470 | - name: socket-dir
471 | mountPath: /csi
472 | resources:
473 | requests:
474 | cpu: 10m
475 | memory: 16Mi
476 | - name: liveness-probe
477 | image: "registry.k8s.io/sig-storage/livenessprobe:v2.9.0"
478 | imagePullPolicy: IfNotPresent
479 | args:
480 | - "-v=5"
481 | - "--csi-address=unix:///csi/csi.sock"
482 | volumeMounts:
483 | - name: socket-dir
484 | mountPath: /csi
485 | resources:
486 | requests:
487 | cpu: 10m
488 | memory: 16Mi
489 | volumes:
490 | - name: socket-dir
491 | emptyDir: {}
492 | - name: cloud-config
493 | secret:
494 | secretName: proxmox-csi-plugin
495 | nodeSelector:
496 | node-role.kubernetes.io/control-plane: ""
497 | tolerations:
498 | - effect: NoSchedule
499 | key: node-role.kubernetes.io/control-plane
500 | ---
501 | # Source: proxmox-csi-plugin/templates/csidriver.yaml
502 | apiVersion: storage.k8s.io/v1
503 | kind: CSIDriver
504 | metadata:
505 | name: csi.proxmox.sinextra.dev
506 | spec:
507 | attachRequired: true
508 | podInfoOnMount: true
509 | storageCapacity: true
510 | volumeLifecycleModes:
511 | - Persistent
512 |
--------------------------------------------------------------------------------
/kubernetes/apps/proxmox/operator/app/proxmox-operator.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # Source: proxmox-operator/templates/serviceaccount.yaml
3 | apiVersion: v1
4 | kind: ServiceAccount
5 | metadata:
6 | name: proxmox-operator
7 | namespace: kube-system
8 | labels:
9 | helm.sh/chart: proxmox-operator-0.1.1
10 | app.kubernetes.io/name: proxmox-operator
11 | app.kubernetes.io/instance: proxmox-operator
12 | app.kubernetes.io/version: "0.1.1"
13 | app.kubernetes.io/managed-by: Helm
14 | ---
15 | # Source: proxmox-operator/templates/crds.yaml
16 | kind: CustomResourceDefinition
17 | apiVersion: apiextensions.k8s.io/v1
18 | metadata:
19 | name: qemu.proxmox.xfix.org
20 | labels:
21 | app: proxmox-operator
22 | spec:
23 | group: proxmox.xfix.org
24 | names:
25 | plural: qemu
26 | singular: qemu
27 | kind: Qemu
28 | listKind: QemuList
29 | scope: Cluster
30 | versions:
31 | - name: v1alpha1
32 | served: true
33 | storage: true
34 | schema:
35 | openAPIV3Schema:
36 | description: Server is the Schema for the servers API.
37 | type: object
38 | properties:
39 | apiVersion:
40 | type: string
41 | kind:
42 | type: string
43 | metadata:
44 | type: object
45 | spec:
46 | description: ServerSpec defines the desired state of Server.
47 | type: object
48 | required:
49 | - autostart
50 | - autostop
51 | - cpu
52 | - memory
53 | properties:
54 | autostart:
55 | type: boolean
56 | autostop:
57 | type: boolean
58 | cluster:
59 | type: string
60 | node:
61 | type: string
62 | pool:
63 | type: string
64 | anti-affinity:
65 | type: string
66 | vmid:
67 | type: integer
68 | clone:
69 | type: string
70 | cpu:
71 | type: object
72 | properties:
73 | type:
74 | type: string
75 | sockets:
76 | type: integer
77 | cores:
78 | type: integer
79 | memory:
80 | type: object
81 | properties:
82 | size:
83 | type: integer
84 | balloon:
85 | type: integer
86 | network:
87 | type: object
88 | x-kubernetes-preserve-unknown-fields: true
89 | disk:
90 | type: object
91 | x-kubernetes-preserve-unknown-fields: true
92 | tags:
93 | type: array
94 | items:
95 | type: string
96 | options:
97 | x-kubernetes-preserve-unknown-fields: true
98 | type: object
99 | description: Qemu devices
100 | status:
101 | type: object
102 | properties:
103 | status:
104 | type: string
105 | power:
106 | type: string
107 | cluster:
108 | type: string
109 | node:
110 | type: string
111 | vmid:
112 | type: integer
113 | net:
114 | type: array
115 | items:
116 | type: object
117 | properties:
118 | name:
119 | type: string
120 | mac:
121 | type: string
122 | subresources:
123 | status: {}
124 | additionalPrinterColumns:
125 | - name: Status
126 | type: string
127 | jsonPath: .status.status
128 | - name: Power
129 | type: string
130 | jsonPath: .status.power
131 | - name: Cluster
132 | type: string
133 | jsonPath: .status.cluster
134 | - name: Node
135 | type: string
136 | jsonPath: .status.node
137 | - name: VMID
138 | type: integer
139 | jsonPath: .status.vmid
140 | conversion:
141 | strategy: None
142 | ---
143 | # Source: proxmox-operator/templates/serviceaccount.yaml
144 | kind: ClusterRole
145 | apiVersion: rbac.authorization.k8s.io/v1
146 | metadata:
147 | name: proxmox-operator
148 | rules:
149 | - verbs:
150 | - '*'
151 | apiGroups:
152 | - proxmox.xfix.org
153 | resources:
154 | - '*'
155 | ---
156 | # Source: proxmox-operator/templates/serviceaccount.yaml
157 | kind: ClusterRoleBinding
158 | apiVersion: rbac.authorization.k8s.io/v1
159 | metadata:
160 | name: proxmox-operator
161 | subjects:
162 | - kind: ServiceAccount
163 | name: proxmox-operator
164 | namespace: kube-system
165 | roleRef:
166 | apiGroup: rbac.authorization.k8s.io
167 | kind: ClusterRole
168 | name: proxmox-operator
169 | ---
170 | # Source: proxmox-operator/templates/deployment.yaml
171 | apiVersion: apps/v1
172 | kind: Deployment
173 | metadata:
174 | name: proxmox-operator
175 | namespace: kube-system
176 | labels:
177 | helm.sh/chart: proxmox-operator-0.1.1
178 | app.kubernetes.io/name: proxmox-operator
179 | app.kubernetes.io/instance: proxmox-operator
180 | app.kubernetes.io/version: "0.1.1"
181 | app.kubernetes.io/managed-by: Helm
182 | spec:
183 | replicas: 1
184 | selector:
185 | matchLabels:
186 | app.kubernetes.io/name: proxmox-operator
187 | app.kubernetes.io/instance: proxmox-operator
188 | template:
189 | metadata:
190 | labels:
191 | app.kubernetes.io/name: proxmox-operator
192 | app.kubernetes.io/instance: proxmox-operator
193 | spec:
194 | serviceAccountName: proxmox-operator
195 | securityContext:
196 | {}
197 | containers:
198 | - name: proxmox-operator
199 | securityContext:
200 | {}
201 | image: "crashntech/proxmox-operator:0.1.2"
202 | imagePullPolicy: IfNotPresent
203 | resources:
204 | limits:
205 | cpu: 100m
206 | memory: 128Mi
207 | requests:
208 | cpu: 100m
209 | memory: 128Mi
210 | volumeMounts:
211 | - name: config
212 | mountPath: /app/config.yaml
213 | subPath: config.yaml
214 | volumes:
215 | - name: config
216 | secret:
217 | secretName: proxmox-operator-creds
218 |
--------------------------------------------------------------------------------
/kubernetes/apps/proxmox/operator/ks.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/kustomize.toolkit.fluxcd.io/kustomization_v1.json
3 | apiVersion: kustomize.toolkit.fluxcd.io/v1
4 | kind: Kustomization
5 | metadata:
6 | name: px-operator
7 | namespace: flux-system
8 | spec:
9 | path: ./kubernetes/apps/proxmox/operator/app
10 | prune: true
11 | sourceRef:
12 | kind: GitRepository
13 | name: px-kaas
14 | interval: 5m
15 | retryInterval: 1m
16 | timeout: 3m
17 | postBuild:
18 | substituteFrom:
19 | - kind: ConfigMap
20 | name: cluster-settings
21 | optional: false
--------------------------------------------------------------------------------
/kubernetes/apps/sidero/capi/app/bootstrap.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | labels:
5 | cluster.x-k8s.io/provider: bootstrap-talos
6 | clusterctl.cluster.x-k8s.io: ""
7 | name: cabpt-system
8 | ---
9 | apiVersion: apiextensions.k8s.io/v1
10 | kind: CustomResourceDefinition
11 | metadata:
12 | annotations:
13 | cert-manager.io/inject-ca-from: cabpt-system/cabpt-serving-cert
14 | controller-gen.kubebuilder.io/version: v0.11.3
15 | creationTimestamp: null
16 | labels:
17 | cluster.x-k8s.io/provider: bootstrap-talos
18 | cluster.x-k8s.io/v1alpha3: v1alpha3
19 | cluster.x-k8s.io/v1alpha4: v1alpha3
20 | cluster.x-k8s.io/v1beta1: v1alpha3
21 | clusterctl.cluster.x-k8s.io: ""
22 | name: talosconfigs.bootstrap.cluster.x-k8s.io
23 | spec:
24 | conversion:
25 | strategy: Webhook
26 | webhook:
27 | clientConfig:
28 | caBundle: Cg==
29 | service:
30 | name: cabpt-webhook-service
31 | namespace: cabpt-system
32 | path: /convert
33 | conversionReviewVersions:
34 | - v1
35 | - v1beta1
36 | group: bootstrap.cluster.x-k8s.io
37 | names:
38 | categories:
39 | - cluster-api
40 | kind: TalosConfig
41 | listKind: TalosConfigList
42 | plural: talosconfigs
43 | singular: talosconfig
44 | scope: Namespaced
45 | versions:
46 | - name: v1alpha2
47 | schema:
48 | openAPIV3Schema:
49 | description: TalosConfig is the Schema for the talosconfigs API
50 | properties:
51 | apiVersion:
52 | description: 'APIVersion defines the versioned schema of this representation
53 | of an object. Servers should convert recognized schemas to the latest
54 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
55 | type: string
56 | kind:
57 | description: 'Kind is a string value representing the REST resource this
58 | object represents. Servers may infer this from the endpoint the client
59 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
60 | type: string
61 | metadata:
62 | type: object
63 | spec:
64 | description: TalosConfigSpec defines the desired state of TalosConfig
65 | properties:
66 | data:
67 | type: string
68 | generateType:
69 | type: string
70 | required:
71 | - generateType
72 | type: object
73 | status:
74 | description: TalosConfigStatus defines the observed state of TalosConfig
75 | properties:
76 | bootstrapData:
77 | description: BootstrapData will be a slice of bootstrap data
78 | format: byte
79 | type: string
80 | errorMessage:
81 | description: ErrorMessage will be set on non-retryable errors
82 | type: string
83 | errorReason:
84 | description: ErrorReason will be set on non-retryable errors
85 | type: string
86 | ready:
87 | description: Ready indicates the BootstrapData field is ready to be
88 | consumed
89 | type: boolean
90 | talosConfig:
91 | description: Talos config will be a string containing the config for
92 | download
93 | type: string
94 | type: object
95 | type: object
96 | served: true
97 | storage: false
98 | subresources:
99 | status: {}
100 | - name: v1alpha3
101 | schema:
102 | openAPIV3Schema:
103 | description: TalosConfig is the Schema for the talosconfigs API
104 | properties:
105 | apiVersion:
106 | description: 'APIVersion defines the versioned schema of this representation
107 | of an object. Servers should convert recognized schemas to the latest
108 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
109 | type: string
110 | kind:
111 | description: 'Kind is a string value representing the REST resource this
112 | object represents. Servers may infer this from the endpoint the client
113 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
114 | type: string
115 | metadata:
116 | type: object
117 | spec:
118 | description: TalosConfigSpec defines the desired state of TalosConfig
119 | properties:
120 | configPatches:
121 | items:
122 | properties:
123 | op:
124 | type: string
125 | path:
126 | type: string
127 | value:
128 | x-kubernetes-preserve-unknown-fields: true
129 | required:
130 | - op
131 | - path
132 | type: object
133 | type: array
134 | data:
135 | type: string
136 | generateType:
137 | type: string
138 | hostname:
139 | description: Set hostname in the machine configuration to some value.
140 | properties:
141 | source:
142 | description: "Source of the hostname. \n Allowed values: \"MachineName\"
143 | (use linked Machine's Name)."
144 | type: string
145 | type: object
146 | talosVersion:
147 | type: string
148 | required:
149 | - generateType
150 | type: object
151 | status:
152 | description: TalosConfigStatus defines the observed state of TalosConfig
153 | properties:
154 | conditions:
155 | description: Conditions defines current service state of the TalosConfig.
156 | items:
157 | description: Condition defines an observation of a Cluster API resource
158 | operational state.
159 | properties:
160 | lastTransitionTime:
161 | description: Last time the condition transitioned from one status
162 | to another. This should be when the underlying condition changed.
163 | If that is not known, then using the time when the API field
164 | changed is acceptable.
165 | format: date-time
166 | type: string
167 | message:
168 | description: A human readable message indicating details about
169 | the transition. This field may be empty.
170 | type: string
171 | reason:
172 | description: The reason for the condition's last transition
173 | in CamelCase. The specific API may choose whether or not this
174 | field is considered a guaranteed API. This field may not be
175 | empty.
176 | type: string
177 | severity:
178 | description: Severity provides an explicit classification of
179 | Reason code, so the users or machines can immediately understand
180 | the current situation and act accordingly. The Severity field
181 | MUST be set only when Status=False.
182 | type: string
183 | status:
184 | description: Status of the condition, one of True, False, Unknown.
185 | type: string
186 | type:
187 | description: Type of condition in CamelCase or in foo.example.com/CamelCase.
188 | Many .condition.type values are consistent across resources
189 | like Available, but because arbitrary conditions can be useful
190 | (see .node.status.conditions), the ability to deconflict is
191 | important.
192 | type: string
193 | required:
194 | - lastTransitionTime
195 | - status
196 | - type
197 | type: object
198 | type: array
199 | dataSecretName:
200 | description: DataSecretName is the name of the secret that stores
201 | the bootstrap data script.
202 | type: string
203 | failureMessage:
204 | description: FailureMessage will be set on non-retryable errors
205 | type: string
206 | failureReason:
207 | description: FailureReason will be set on non-retryable errors
208 | type: string
209 | observedGeneration:
210 | description: ObservedGeneration is the latest generation observed
211 | by the controller.
212 | format: int64
213 | type: integer
214 | ready:
215 | description: Ready indicates the BootstrapData field is ready to be
216 | consumed
217 | type: boolean
218 | talosConfig:
219 | description: "Talos config will be a string containing the config
220 | for download. \n Deprecated: please use `-talosconfig`
221 | secret."
222 | type: string
223 | type: object
224 | type: object
225 | served: true
226 | storage: true
227 | subresources:
228 | status: {}
229 | status:
230 | acceptedNames:
231 | kind: ""
232 | plural: ""
233 | conditions: null
234 | storedVersions: null
235 | ---
236 | apiVersion: apiextensions.k8s.io/v1
237 | kind: CustomResourceDefinition
238 | metadata:
239 | annotations:
240 | cert-manager.io/inject-ca-from: cabpt-system/cabpt-serving-cert
241 | controller-gen.kubebuilder.io/version: v0.11.3
242 | creationTimestamp: null
243 | labels:
244 | cluster.x-k8s.io/provider: bootstrap-talos
245 | cluster.x-k8s.io/v1alpha3: v1alpha3
246 | cluster.x-k8s.io/v1alpha4: v1alpha3
247 | cluster.x-k8s.io/v1beta1: v1alpha3
248 | clusterctl.cluster.x-k8s.io: ""
249 | name: talosconfigtemplates.bootstrap.cluster.x-k8s.io
250 | spec:
251 | conversion:
252 | strategy: Webhook
253 | webhook:
254 | clientConfig:
255 | caBundle: Cg==
256 | service:
257 | name: cabpt-webhook-service
258 | namespace: cabpt-system
259 | path: /convert
260 | conversionReviewVersions:
261 | - v1
262 | - v1beta1
263 | group: bootstrap.cluster.x-k8s.io
264 | names:
265 | categories:
266 | - cluster-api
267 | kind: TalosConfigTemplate
268 | listKind: TalosConfigTemplateList
269 | plural: talosconfigtemplates
270 | singular: talosconfigtemplate
271 | scope: Namespaced
272 | versions:
273 | - name: v1alpha2
274 | schema:
275 | openAPIV3Schema:
276 | description: TalosConfigTemplate is the Schema for the talosconfigtemplates
277 | API
278 | properties:
279 | apiVersion:
280 | description: 'APIVersion defines the versioned schema of this representation
281 | of an object. Servers should convert recognized schemas to the latest
282 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
283 | type: string
284 | kind:
285 | description: 'Kind is a string value representing the REST resource this
286 | object represents. Servers may infer this from the endpoint the client
287 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
288 | type: string
289 | metadata:
290 | type: object
291 | spec:
292 | description: TalosConfigTemplateSpec defines the desired state of TalosConfigTemplate
293 | properties:
294 | template:
295 | description: TalosConfigTemplateResource defines the Template structure
296 | properties:
297 | spec:
298 | description: TalosConfigSpec defines the desired state of TalosConfig
299 | properties:
300 | data:
301 | type: string
302 | generateType:
303 | type: string
304 | required:
305 | - generateType
306 | type: object
307 | type: object
308 | required:
309 | - template
310 | type: object
311 | type: object
312 | served: true
313 | storage: false
314 | - name: v1alpha3
315 | schema:
316 | openAPIV3Schema:
317 | description: TalosConfigTemplate is the Schema for the talosconfigtemplates
318 | API
319 | properties:
320 | apiVersion:
321 | description: 'APIVersion defines the versioned schema of this representation
322 | of an object. Servers should convert recognized schemas to the latest
323 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
324 | type: string
325 | kind:
326 | description: 'Kind is a string value representing the REST resource this
327 | object represents. Servers may infer this from the endpoint the client
328 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
329 | type: string
330 | metadata:
331 | type: object
332 | spec:
333 | description: TalosConfigTemplateSpec defines the desired state of TalosConfigTemplate
334 | properties:
335 | template:
336 | description: TalosConfigTemplateResource defines the Template structure
337 | properties:
338 | spec:
339 | description: TalosConfigSpec defines the desired state of TalosConfig
340 | properties:
341 | configPatches:
342 | items:
343 | properties:
344 | op:
345 | type: string
346 | path:
347 | type: string
348 | value:
349 | x-kubernetes-preserve-unknown-fields: true
350 | required:
351 | - op
352 | - path
353 | type: object
354 | type: array
355 | data:
356 | type: string
357 | generateType:
358 | type: string
359 | hostname:
360 | description: Set hostname in the machine configuration to
361 | some value.
362 | properties:
363 | source:
364 | description: "Source of the hostname. \n Allowed values:
365 | \"MachineName\" (use linked Machine's Name)."
366 | type: string
367 | type: object
368 | talosVersion:
369 | type: string
370 | required:
371 | - generateType
372 | type: object
373 | type: object
374 | required:
375 | - template
376 | type: object
377 | type: object
378 | served: true
379 | storage: true
380 | subresources:
381 | status: {}
382 | status:
383 | acceptedNames:
384 | kind: ""
385 | plural: ""
386 | conditions: null
387 | storedVersions: null
388 | ---
389 | apiVersion: rbac.authorization.k8s.io/v1
390 | kind: Role
391 | metadata:
392 | labels:
393 | cluster.x-k8s.io/provider: bootstrap-talos
394 | clusterctl.cluster.x-k8s.io: ""
395 | name: cabpt-leader-election-role
396 | namespace: cabpt-system
397 | rules:
398 | - apiGroups:
399 | - ""
400 | resources:
401 | - configmaps
402 | verbs:
403 | - get
404 | - list
405 | - watch
406 | - create
407 | - update
408 | - patch
409 | - delete
410 | - apiGroups:
411 | - ""
412 | resources:
413 | - configmaps/status
414 | verbs:
415 | - get
416 | - update
417 | - patch
418 | - apiGroups:
419 | - ""
420 | resources:
421 | - events
422 | verbs:
423 | - create
424 | - apiGroups:
425 | - coordination.k8s.io
426 | resources:
427 | - leases
428 | verbs:
429 | - get
430 | - list
431 | - watch
432 | - create
433 | - update
434 | - patch
435 | - delete
436 | ---
437 | apiVersion: rbac.authorization.k8s.io/v1
438 | kind: ClusterRole
439 | metadata:
440 | creationTimestamp: null
441 | labels:
442 | cluster.x-k8s.io/provider: bootstrap-talos
443 | clusterctl.cluster.x-k8s.io: ""
444 | name: cabpt-manager-role
445 | rules:
446 | - apiGroups:
447 | - ""
448 | resources:
449 | - secrets
450 | verbs:
451 | - create
452 | - delete
453 | - get
454 | - list
455 | - patch
456 | - update
457 | - watch
458 | - apiGroups:
459 | - bootstrap.cluster.x-k8s.io
460 | resources:
461 | - talosconfigs
462 | verbs:
463 | - create
464 | - delete
465 | - get
466 | - list
467 | - patch
468 | - update
469 | - watch
470 | - apiGroups:
471 | - bootstrap.cluster.x-k8s.io
472 | resources:
473 | - talosconfigs/status
474 | verbs:
475 | - get
476 | - patch
477 | - update
478 | - apiGroups:
479 | - cluster.x-k8s.io
480 | resources:
481 | - clusters
482 | - clusters/status
483 | - machines
484 | - machines/status
485 | verbs:
486 | - get
487 | - list
488 | - watch
489 | - apiGroups:
490 | - exp.cluster.x-k8s.io
491 | resources:
492 | - machinepools
493 | - machinepools/status
494 | verbs:
495 | - get
496 | - list
497 | - watch
498 | ---
499 | apiVersion: rbac.authorization.k8s.io/v1
500 | kind: ClusterRole
501 | metadata:
502 | labels:
503 | cluster.x-k8s.io/provider: bootstrap-talos
504 | clusterctl.cluster.x-k8s.io: ""
505 | name: cabpt-proxy-role
506 | rules:
507 | - apiGroups:
508 | - authentication.k8s.io
509 | resources:
510 | - tokenreviews
511 | verbs:
512 | - create
513 | - apiGroups:
514 | - authorization.k8s.io
515 | resources:
516 | - subjectaccessreviews
517 | verbs:
518 | - create
519 | ---
520 | apiVersion: rbac.authorization.k8s.io/v1
521 | kind: ClusterRole
522 | metadata:
523 | labels:
524 | cluster.x-k8s.io/provider: bootstrap-talos
525 | clusterctl.cluster.x-k8s.io: ""
526 | name: cabpt-talosconfig-editor-role
527 | rules:
528 | - apiGroups:
529 | - bootstrap.cluster.x-k8s.io
530 | resources:
531 | - talosconfigs
532 | verbs:
533 | - create
534 | - delete
535 | - get
536 | - list
537 | - patch
538 | - update
539 | - watch
540 | - apiGroups:
541 | - bootstrap.cluster.x-k8s.io
542 | resources:
543 | - talosconfigs/status
544 | verbs:
545 | - get
546 | - patch
547 | - update
548 | ---
549 | apiVersion: rbac.authorization.k8s.io/v1
550 | kind: RoleBinding
551 | metadata:
552 | creationTimestamp: null
553 | labels:
554 | cluster.x-k8s.io/provider: bootstrap-talos
555 | clusterctl.cluster.x-k8s.io: ""
556 | name: cabpt-leader-election-rolebinding
557 | namespace: cabpt-system
558 | roleRef:
559 | apiGroup: rbac.authorization.k8s.io
560 | kind: Role
561 | name: cabpt-leader-election-role
562 | subjects:
563 | - kind: ServiceAccount
564 | name: default
565 | namespace: cabpt-system
566 | ---
567 | apiVersion: rbac.authorization.k8s.io/v1
568 | kind: ClusterRoleBinding
569 | metadata:
570 | creationTimestamp: null
571 | labels:
572 | cluster.x-k8s.io/provider: bootstrap-talos
573 | clusterctl.cluster.x-k8s.io: ""
574 | name: cabpt-manager-rolebinding
575 | roleRef:
576 | apiGroup: rbac.authorization.k8s.io
577 | kind: ClusterRole
578 | name: cabpt-manager-role
579 | subjects:
580 | - kind: ServiceAccount
581 | name: default
582 | namespace: cabpt-system
583 | ---
584 | apiVersion: rbac.authorization.k8s.io/v1
585 | kind: ClusterRoleBinding
586 | metadata:
587 | creationTimestamp: null
588 | labels:
589 | cluster.x-k8s.io/provider: bootstrap-talos
590 | clusterctl.cluster.x-k8s.io: ""
591 | name: cabpt-proxy-rolebinding
592 | roleRef:
593 | apiGroup: rbac.authorization.k8s.io
594 | kind: ClusterRole
595 | name: cabpt-proxy-role
596 | subjects:
597 | - kind: ServiceAccount
598 | name: default
599 | namespace: cabpt-system
600 | ---
601 | apiVersion: rbac.authorization.k8s.io/v1
602 | kind: ClusterRoleBinding
603 | metadata:
604 | creationTimestamp: null
605 | labels:
606 | cluster.x-k8s.io/provider: bootstrap-talos
607 | clusterctl.cluster.x-k8s.io: ""
608 | name: cabpt-talosconfig-editor-rolebinding
609 | roleRef:
610 | apiGroup: rbac.authorization.k8s.io
611 | kind: ClusterRole
612 | name: cabpt-talosconfig-editor-role
613 | subjects:
614 | - kind: ServiceAccount
615 | name: default
616 | namespace: cabpt-system
617 | ---
618 | apiVersion: v1
619 | kind: Service
620 | metadata:
621 | labels:
622 | cluster.x-k8s.io/provider: bootstrap-talos
623 | clusterctl.cluster.x-k8s.io: ""
624 | control-plane: controller-manager
625 | name: cabpt-controller-manager-metrics-service
626 | namespace: cabpt-system
627 | spec:
628 | ports:
629 | - name: https
630 | port: 8443
631 | targetPort: https
632 | selector:
633 | cluster.x-k8s.io/provider: bootstrap-talos
634 | control-plane: controller-manager
635 | ---
636 | apiVersion: v1
637 | kind: Service
638 | metadata:
639 | labels:
640 | cluster.x-k8s.io/provider: bootstrap-talos
641 | clusterctl.cluster.x-k8s.io: ""
642 | name: cabpt-webhook-service
643 | namespace: cabpt-system
644 | spec:
645 | ports:
646 | - port: 443
647 | targetPort: 9443
648 | selector:
649 | cluster.x-k8s.io/provider: bootstrap-talos
650 | control-plane: controller-manager
651 | ---
652 | apiVersion: apps/v1
653 | kind: Deployment
654 | metadata:
655 | creationTimestamp: null
656 | labels:
657 | cluster.x-k8s.io/provider: bootstrap-talos
658 | clusterctl.cluster.x-k8s.io: ""
659 | control-plane: controller-manager
660 | name: cabpt-controller-manager
661 | namespace: cabpt-system
662 | spec:
663 | replicas: 1
664 | selector:
665 | matchLabels:
666 | cluster.x-k8s.io/provider: bootstrap-talos
667 | control-plane: controller-manager
668 | strategy: {}
669 | template:
670 | metadata:
671 | creationTimestamp: null
672 | labels:
673 | cluster.x-k8s.io/provider: bootstrap-talos
674 | control-plane: controller-manager
675 | spec:
676 | containers:
677 | - args:
678 | - --metrics-bind-addr=127.0.0.1:8080
679 | - --enable-leader-election
680 | - --feature-gates=MachinePool=false
681 | command:
682 | - /manager
683 | image: ghcr.io/siderolabs/cluster-api-talos-controller:v0.6.0
684 | imagePullPolicy: Always
685 | livenessProbe:
686 | httpGet:
687 | path: /healthz
688 | port: healthz
689 | name: manager
690 | ports:
691 | - containerPort: 9443
692 | name: webhook-server
693 | protocol: TCP
694 | - containerPort: 9440
695 | name: healthz
696 | protocol: TCP
697 | readinessProbe:
698 | httpGet:
699 | path: /readyz
700 | port: healthz
701 | resources:
702 | limits:
703 | cpu: 500m
704 | memory: 500Mi
705 | requests:
706 | cpu: 100m
707 | memory: 128Mi
708 | volumeMounts:
709 | - mountPath: /tmp/k8s-webhook-server/serving-certs
710 | name: cert
711 | readOnly: true
712 | terminationGracePeriodSeconds: 10
713 | volumes:
714 | - name: cert
715 | secret:
716 | defaultMode: 420
717 | secretName: cabpt-webhook-service-cert
718 | status: {}
719 | ---
720 | apiVersion: cert-manager.io/v1
721 | kind: Certificate
722 | metadata:
723 | labels:
724 | cluster.x-k8s.io/provider: bootstrap-talos
725 | clusterctl.cluster.x-k8s.io: ""
726 | name: cabpt-serving-cert
727 | namespace: cabpt-system
728 | spec:
729 | dnsNames:
730 | - cabpt-webhook-service.cabpt-system.svc
731 | - cabpt-webhook-service.cabpt-system.svc.cluster.local
732 | issuerRef:
733 | kind: Issuer
734 | name: cabpt-selfsigned-issuer
735 | secretName: cabpt-webhook-service-cert
736 | ---
737 | apiVersion: cert-manager.io/v1
738 | kind: Issuer
739 | metadata:
740 | labels:
741 | cluster.x-k8s.io/provider: bootstrap-talos
742 | clusterctl.cluster.x-k8s.io: ""
743 | name: cabpt-selfsigned-issuer
744 | namespace: cabpt-system
745 | spec:
746 | selfSigned: {}
747 | ---
748 | apiVersion: admissionregistration.k8s.io/v1
749 | kind: ValidatingWebhookConfiguration
750 | metadata:
751 | annotations:
752 | cert-manager.io/inject-ca-from: cabpt-system/cabpt-serving-cert
753 | creationTimestamp: null
754 | labels:
755 | cluster.x-k8s.io/provider: bootstrap-talos
756 | clusterctl.cluster.x-k8s.io: ""
757 | name: cabpt-validating-webhook-configuration
758 | webhooks:
759 | - admissionReviewVersions:
760 | - v1
761 | clientConfig:
762 | service:
763 | name: cabpt-webhook-service
764 | namespace: cabpt-system
765 | path: /validate-bootstrap-cluster-x-k8s-io-v1alpha3-talosconfig
766 | failurePolicy: Fail
767 | name: vtalosconfig.cluster.x-k8s.io
768 | rules:
769 | - apiGroups:
770 | - bootstrap.cluster.x-k8s.io
771 | apiVersions:
772 | - v1alpha3
773 | operations:
774 | - CREATE
775 | - UPDATE
776 | resources:
777 | - talosconfigs
778 | sideEffects: None
779 | - admissionReviewVersions:
780 | - v1
781 | clientConfig:
782 | service:
783 | name: cabpt-webhook-service
784 | namespace: cabpt-system
785 | path: /validate-bootstrap-cluster-x-k8s-io-v1alpha3-talosconfigtemplate
786 | failurePolicy: Fail
787 | name: vtalosconfigtemplate.cluster.x-k8s.io
788 | rules:
789 | - apiGroups:
790 | - bootstrap.cluster.x-k8s.io
791 | apiVersions:
792 | - v1alpha3
793 | operations:
794 | - UPDATE
795 | resources:
796 | - talosconfigtemplates
797 | sideEffects: None
798 |
--------------------------------------------------------------------------------
/kubernetes/apps/sidero/capi/app/controlplane.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | labels:
5 | cluster.x-k8s.io/provider: control-plane-talos
6 | clusterctl.cluster.x-k8s.io: ""
7 | name: cacppt-system
8 | ---
9 | apiVersion: apiextensions.k8s.io/v1
10 | kind: CustomResourceDefinition
11 | metadata:
12 | annotations:
13 | cert-manager.io/inject-ca-from: cacppt-system/cacppt-serving-cert
14 | controller-gen.kubebuilder.io/version: v0.11.3
15 | creationTimestamp: null
16 | labels:
17 | cluster.x-k8s.io/provider: control-plane-talos
18 | cluster.x-k8s.io/v1alpha3: v1alpha3
19 | cluster.x-k8s.io/v1alpha4: v1alpha3
20 | cluster.x-k8s.io/v1beta1: v1alpha3
21 | clusterctl.cluster.x-k8s.io: ""
22 | name: taloscontrolplanes.controlplane.cluster.x-k8s.io
23 | spec:
24 | conversion:
25 | strategy: Webhook
26 | webhook:
27 | clientConfig:
28 | caBundle: Cg==
29 | service:
30 | name: webhook-service
31 | namespace: cacppt-system
32 | path: /convert
33 | conversionReviewVersions:
34 | - v1
35 | - v1beta1
36 | group: controlplane.cluster.x-k8s.io
37 | names:
38 | categories:
39 | - cluster-api
40 | kind: TalosControlPlane
41 | listKind: TalosControlPlaneList
42 | plural: taloscontrolplanes
43 | shortNames:
44 | - tcp
45 | singular: taloscontrolplane
46 | scope: Namespaced
47 | versions:
48 | - additionalPrinterColumns:
49 | - description: TalosControlPlane API Server is ready to receive requests
50 | jsonPath: .status.ready
51 | name: Ready
52 | type: boolean
53 | - description: This denotes whether or not the control plane has the uploaded
54 | talos-config configmap
55 | jsonPath: .status.initialized
56 | name: Initialized
57 | type: boolean
58 | - description: Total number of non-terminated machines targeted by this control
59 | plane
60 | jsonPath: .status.replicas
61 | name: Replicas
62 | type: integer
63 | - description: Total number of fully running and ready control plane machines
64 | jsonPath: .status.readyReplicas
65 | name: Ready Replicas
66 | type: integer
67 | - description: Total number of unavailable machines targeted by this control plane
68 | jsonPath: .status.unavailableReplicas
69 | name: Unavailable Replicas
70 | type: integer
71 | name: v1alpha3
72 | schema:
73 | openAPIV3Schema:
74 | description: TalosControlPlane is the Schema for the taloscontrolplanes API
75 | properties:
76 | apiVersion:
77 | description: 'APIVersion defines the versioned schema of this representation
78 | of an object. Servers should convert recognized schemas to the latest
79 | internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
80 | type: string
81 | kind:
82 | description: 'Kind is a string value representing the REST resource this
83 | object represents. Servers may infer this from the endpoint the client
84 | submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
85 | type: string
86 | metadata:
87 | type: object
88 | spec:
89 | description: TalosControlPlaneSpec defines the desired state of TalosControlPlane
90 | properties:
91 | controlPlaneConfig:
92 | description: ControlPlaneConfig is a two TalosConfigSpecs to use for
93 | initializing and joining machines to the control plane.
94 | properties:
95 | controlplane:
96 | description: TalosConfigSpec defines the desired state of TalosConfig
97 | properties:
98 | configPatches:
99 | items:
100 | properties:
101 | op:
102 | type: string
103 | path:
104 | type: string
105 | value:
106 | x-kubernetes-preserve-unknown-fields: true
107 | required:
108 | - op
109 | - path
110 | type: object
111 | type: array
112 | data:
113 | type: string
114 | generateType:
115 | type: string
116 | hostname:
117 | description: Set hostname in the machine configuration to
118 | some value.
119 | properties:
120 | source:
121 | description: "Source of the hostname. \n Allowed values:
122 | \"MachineName\" (use linked Machine's Name)."
123 | type: string
124 | type: object
125 | talosVersion:
126 | type: string
127 | required:
128 | - generateType
129 | type: object
130 | init:
131 | description: 'Deprecated: starting from cacppt v0.4.0 provider
132 | doesn''t use init configs.'
133 | properties:
134 | configPatches:
135 | items:
136 | properties:
137 | op:
138 | type: string
139 | path:
140 | type: string
141 | value:
142 | x-kubernetes-preserve-unknown-fields: true
143 | required:
144 | - op
145 | - path
146 | type: object
147 | type: array
148 | data:
149 | type: string
150 | generateType:
151 | type: string
152 | hostname:
153 | description: Set hostname in the machine configuration to
154 | some value.
155 | properties:
156 | source:
157 | description: "Source of the hostname. \n Allowed values:
158 | \"MachineName\" (use linked Machine's Name)."
159 | type: string
160 | type: object
161 | talosVersion:
162 | type: string
163 | required:
164 | - generateType
165 | type: object
166 | required:
167 | - controlplane
168 | type: object
169 | infrastructureTemplate:
170 | description: InfrastructureTemplate is a required reference to a custom
171 | resource offered by an infrastructure provider.
172 | properties:
173 | apiVersion:
174 | description: API version of the referent.
175 | type: string
176 | fieldPath:
177 | description: 'If referring to a piece of an object instead of
178 | an entire object, this string should contain a valid JSON/Go
179 | field access statement, such as desiredState.manifest.containers[2].
180 | For example, if the object reference is to a container within
181 | a pod, this would take on a value like: "spec.containers{name}"
182 | (where "name" refers to the name of the container that triggered
183 | the event) or if no container name is specified "spec.containers[2]"
184 | (container with index 2 in this pod). This syntax is chosen
185 | only to have some well-defined way of referencing a part of
186 | an object. TODO: this design is not final and this field is
187 | subject to change in the future.'
188 | type: string
189 | kind:
190 | description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
191 | type: string
192 | name:
193 | description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
194 | type: string
195 | namespace:
196 | description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
197 | type: string
198 | resourceVersion:
199 | description: 'Specific resourceVersion to which this reference
200 | is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
201 | type: string
202 | uid:
203 | description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
204 | type: string
205 | type: object
206 | x-kubernetes-map-type: atomic
207 | replicas:
208 | description: Number of desired machines. Defaults to 1. When stacked
209 | etcd is used only odd numbers are permitted, as per [etcd best practice](https://etcd.io/docs/v3.3.12/faq/#why-an-odd-number-of-cluster-members).
210 | This is a pointer to distinguish between explicit zero and not specified.
211 | format: int32
212 | type: integer
213 | rolloutStrategy:
214 | default:
215 | rollingUpdate:
216 | maxSurge: 1
217 | type: RollingUpdate
218 | description: The RolloutStrategy to use to replace control plane machines
219 | with new ones.
220 | properties:
221 | rollingUpdate:
222 | description: Rolling update config params. Present only if RolloutStrategyType
223 | = RollingUpdate.
224 | properties:
225 | maxSurge:
226 | anyOf:
227 | - type: integer
228 | - type: string
229 | description: 'The maximum number of control planes that can
230 | be scheduled above or under the desired number of control
231 | planes. Value can be an absolute number 1 or 0. Defaults
232 | to 1. Example: when this is set to 1, the control plane
233 | can be scaled up immediately when the rolling update starts.'
234 | x-kubernetes-int-or-string: true
235 | type: object
236 | type:
237 | description: "Change rollout strategy. \n Supported strategies:
238 | * \"RollingUpdate\". * \"OnDelete\" \n Default is RollingUpdate."
239 | type: string
240 | type: object
241 | version:
242 | description: Version defines the desired Kubernetes version.
243 | minLength: 2
244 | pattern: ^v(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)([-0-9a-zA-Z_\.+]*)?$
245 | type: string
246 | required:
247 | - controlPlaneConfig
248 | - infrastructureTemplate
249 | - version
250 | type: object
251 | status:
252 | description: TalosControlPlaneStatus defines the observed state of TalosControlPlane
253 | properties:
254 | bootstrapped:
255 | description: Bootstrapped denotes whether any nodes received bootstrap
256 | request which is required to start etcd and Kubernetes components
257 | in Talos.
258 | type: boolean
259 | conditions:
260 | description: Conditions defines current service state of the KubeadmControlPlane.
261 | items:
262 | description: Condition defines an observation of a Cluster API resource
263 | operational state.
264 | properties:
265 | lastTransitionTime:
266 | description: Last time the condition transitioned from one status
267 | to another. This should be when the underlying condition changed.
268 | If that is not known, then using the time when the API field
269 | changed is acceptable.
270 | format: date-time
271 | type: string
272 | message:
273 | description: A human readable message indicating details about
274 | the transition. This field may be empty.
275 | type: string
276 | reason:
277 | description: The reason for the condition's last transition
278 | in CamelCase. The specific API may choose whether or not this
279 | field is considered a guaranteed API. This field may not be
280 | empty.
281 | type: string
282 | severity:
283 | description: Severity provides an explicit classification of
284 | Reason code, so the users or machines can immediately understand
285 | the current situation and act accordingly. The Severity field
286 | MUST be set only when Status=False.
287 | type: string
288 | status:
289 | description: Status of the condition, one of True, False, Unknown.
290 | type: string
291 | type:
292 | description: Type of condition in CamelCase or in foo.example.com/CamelCase.
293 | Many .condition.type values are consistent across resources
294 | like Available, but because arbitrary conditions can be useful
295 | (see .node.status.conditions), the ability to deconflict is
296 | important.
297 | type: string
298 | required:
299 | - lastTransitionTime
300 | - status
301 | - type
302 | type: object
303 | type: array
304 | failureMessage:
305 | description: ErrorMessage indicates that there is a terminal problem
306 | reconciling the state, and will be set to a descriptive error message.
307 | type: string
308 | failureReason:
309 | description: FailureReason indicates that there is a terminal problem
310 | reconciling the state, and will be set to a token value suitable
311 | for programmatic interpretation.
312 | type: string
313 | initialized:
314 | description: Initialized denotes whether or not the control plane
315 | has the uploaded talos-config configmap.
316 | type: boolean
317 | observedGeneration:
318 | description: ObservedGeneration is the latest generation observed
319 | by the controller.
320 | format: int64
321 | type: integer
322 | ready:
323 | description: Ready denotes that the TalosControlPlane API Server is
324 | ready to receive requests.
325 | type: boolean
326 | readyReplicas:
327 | description: Total number of fully running and ready control plane
328 | machines.
329 | format: int32
330 | type: integer
331 | replicas:
332 | description: Total number of non-terminated machines targeted by this
333 | control plane (their labels match the selector).
334 | format: int32
335 | type: integer
336 | selector:
337 | description: 'Selector is the label selector in string format to avoid
338 | introspection by clients, and is used to provide the CRD-based integration
339 | for the scale subresource and additional integrations for things
340 | like kubectl describe.. The string will be in the same format as
341 | the query-param syntax. More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors'
342 | type: string
343 | unavailableReplicas:
344 | description: Total number of unavailable machines targeted by this
345 | control plane. This is the total number of machines that are still
346 | required for the deployment to have 100% available capacity. They
347 | may either be machines that are running but not yet ready or machines
348 | that still have not been created.
349 | format: int32
350 | type: integer
351 | type: object
352 | type: object
353 | served: true
354 | storage: true
355 | subresources:
356 | scale:
357 | labelSelectorPath: .status.selector
358 | specReplicasPath: .spec.replicas
359 | statusReplicasPath: .status.replicas
360 | status: {}
361 | status:
362 | acceptedNames:
363 | kind: ""
364 | plural: ""
365 | conditions: null
366 | storedVersions: null
367 | ---
368 | apiVersion: rbac.authorization.k8s.io/v1
369 | kind: Role
370 | metadata:
371 | labels:
372 | cluster.x-k8s.io/provider: control-plane-talos
373 | clusterctl.cluster.x-k8s.io: ""
374 | name: cacppt-leader-election-role
375 | namespace: cacppt-system
376 | rules:
377 | - apiGroups:
378 | - ""
379 | resources:
380 | - configmaps
381 | verbs:
382 | - get
383 | - list
384 | - watch
385 | - create
386 | - update
387 | - patch
388 | - delete
389 | - apiGroups:
390 | - ""
391 | resources:
392 | - configmaps/status
393 | verbs:
394 | - get
395 | - update
396 | - patch
397 | - apiGroups:
398 | - ""
399 | resources:
400 | - events
401 | verbs:
402 | - create
403 | - apiGroups:
404 | - coordination.k8s.io
405 | resources:
406 | - leases
407 | verbs:
408 | - get
409 | - list
410 | - watch
411 | - create
412 | - update
413 | - patch
414 | - delete
415 | ---
416 | apiVersion: rbac.authorization.k8s.io/v1
417 | kind: Role
418 | metadata:
419 | creationTimestamp: null
420 | labels:
421 | cluster.x-k8s.io/provider: control-plane-talos
422 | clusterctl.cluster.x-k8s.io: ""
423 | name: cacppt-manager-role
424 | namespace: cacppt-system
425 | rules:
426 | - apiGroups:
427 | - ""
428 | resources:
429 | - configmaps
430 | verbs:
431 | - create
432 | - get
433 | - list
434 | - watch
435 | - apiGroups:
436 | - rbac
437 | resources:
438 | - rolebindings
439 | verbs:
440 | - create
441 | - get
442 | - list
443 | - watch
444 | - apiGroups:
445 | - rbac
446 | resources:
447 | - roles
448 | verbs:
449 | - create
450 | - get
451 | - list
452 | - watch
453 | ---
454 | apiVersion: rbac.authorization.k8s.io/v1
455 | kind: ClusterRole
456 | metadata:
457 | creationTimestamp: null
458 | labels:
459 | cluster.x-k8s.io/provider: control-plane-talos
460 | clusterctl.cluster.x-k8s.io: ""
461 | name: cacppt-manager-role
462 | rules:
463 | - apiGroups:
464 | - bootstrap.cluster.x-k8s.io
465 | - controlplane.cluster.x-k8s.io
466 | - infrastructure.cluster.x-k8s.io
467 | resources:
468 | - '*'
469 | verbs:
470 | - create
471 | - delete
472 | - get
473 | - list
474 | - patch
475 | - update
476 | - watch
477 | - apiGroups:
478 | - cluster.x-k8s.io
479 | resources:
480 | - clusters
481 | - clusters/status
482 | verbs:
483 | - get
484 | - list
485 | - watch
486 | - apiGroups:
487 | - cluster.x-k8s.io
488 | resources:
489 | - machines
490 | - machines/status
491 | verbs:
492 | - create
493 | - delete
494 | - get
495 | - list
496 | - patch
497 | - update
498 | - watch
499 | - apiGroups:
500 | - ""
501 | resources:
502 | - events
503 | verbs:
504 | - create
505 | - get
506 | - list
507 | - patch
508 | - watch
509 | - apiGroups:
510 | - ""
511 | resources:
512 | - secrets
513 | verbs:
514 | - create
515 | - get
516 | - list
517 | - patch
518 | - update
519 | - watch
520 | ---
521 | apiVersion: rbac.authorization.k8s.io/v1
522 | kind: ClusterRole
523 | metadata:
524 | labels:
525 | cluster.x-k8s.io/provider: control-plane-talos
526 | clusterctl.cluster.x-k8s.io: ""
527 | name: cacppt-metrics-reader
528 | rules:
529 | - nonResourceURLs:
530 | - /metrics
531 | verbs:
532 | - get
533 | ---
534 | apiVersion: rbac.authorization.k8s.io/v1
535 | kind: ClusterRole
536 | metadata:
537 | labels:
538 | cluster.x-k8s.io/provider: control-plane-talos
539 | clusterctl.cluster.x-k8s.io: ""
540 | name: cacppt-proxy-role
541 | rules:
542 | - apiGroups:
543 | - authentication.k8s.io
544 | resources:
545 | - tokenreviews
546 | verbs:
547 | - create
548 | - apiGroups:
549 | - authorization.k8s.io
550 | resources:
551 | - subjectaccessreviews
552 | verbs:
553 | - create
554 | ---
555 | apiVersion: rbac.authorization.k8s.io/v1
556 | kind: RoleBinding
557 | metadata:
558 | creationTimestamp: null
559 | labels:
560 | cluster.x-k8s.io/provider: control-plane-talos
561 | clusterctl.cluster.x-k8s.io: ""
562 | name: cacppt-leader-election-rolebinding
563 | namespace: cacppt-system
564 | roleRef:
565 | apiGroup: rbac.authorization.k8s.io
566 | kind: Role
567 | name: cacppt-leader-election-role
568 | subjects:
569 | - kind: ServiceAccount
570 | name: default
571 | namespace: cacppt-system
572 | ---
573 | apiVersion: rbac.authorization.k8s.io/v1
574 | kind: ClusterRoleBinding
575 | metadata:
576 | creationTimestamp: null
577 | labels:
578 | cluster.x-k8s.io/provider: control-plane-talos
579 | clusterctl.cluster.x-k8s.io: ""
580 | name: cacppt-manager-rolebinding
581 | roleRef:
582 | apiGroup: rbac.authorization.k8s.io
583 | kind: ClusterRole
584 | name: cacppt-manager-role
585 | subjects:
586 | - kind: ServiceAccount
587 | name: default
588 | namespace: cacppt-system
589 | ---
590 | apiVersion: rbac.authorization.k8s.io/v1
591 | kind: ClusterRoleBinding
592 | metadata:
593 | creationTimestamp: null
594 | labels:
595 | cluster.x-k8s.io/provider: control-plane-talos
596 | clusterctl.cluster.x-k8s.io: ""
597 | name: cacppt-proxy-rolebinding
598 | roleRef:
599 | apiGroup: rbac.authorization.k8s.io
600 | kind: ClusterRole
601 | name: cacppt-proxy-role
602 | subjects:
603 | - kind: ServiceAccount
604 | name: default
605 | namespace: cacppt-system
606 | ---
607 | apiVersion: v1
608 | kind: Service
609 | metadata:
610 | labels:
611 | cluster.x-k8s.io/provider: control-plane-talos
612 | clusterctl.cluster.x-k8s.io: ""
613 | control-plane: controller-manager
614 | name: cacppt-controller-manager-metrics-service
615 | namespace: cacppt-system
616 | spec:
617 | ports:
618 | - name: https
619 | port: 8443
620 | targetPort: https
621 | selector:
622 | cluster.x-k8s.io/provider: control-plane-talos
623 | control-plane: controller-manager
624 | ---
625 | apiVersion: v1
626 | kind: Service
627 | metadata:
628 | labels:
629 | cluster.x-k8s.io/provider: control-plane-talos
630 | clusterctl.cluster.x-k8s.io: ""
631 | name: cacppt-webhook-service
632 | namespace: cacppt-system
633 | spec:
634 | ports:
635 | - port: 443
636 | targetPort: 9443
637 | selector:
638 | cluster.x-k8s.io/provider: control-plane-talos
639 | control-plane: controller-manager
640 | ---
641 | apiVersion: apps/v1
642 | kind: Deployment
643 | metadata:
644 | creationTimestamp: null
645 | labels:
646 | cluster.x-k8s.io/provider: control-plane-talos
647 | clusterctl.cluster.x-k8s.io: ""
648 | control-plane: controller-manager
649 | name: cacppt-controller-manager
650 | namespace: cacppt-system
651 | spec:
652 | replicas: 1
653 | selector:
654 | matchLabels:
655 | cluster.x-k8s.io/provider: control-plane-talos
656 | control-plane: controller-manager
657 | strategy: {}
658 | template:
659 | metadata:
660 | creationTimestamp: null
661 | labels:
662 | cluster.x-k8s.io/provider: control-plane-talos
663 | control-plane: controller-manager
664 | spec:
665 | containers:
666 | - args:
667 | - --metrics-bind-addr=127.0.0.1:8080
668 | - --enable-leader-election
669 | command:
670 | - /manager
671 | image: ghcr.io/siderolabs/cluster-api-control-plane-talos-controller:v0.5.0
672 | imagePullPolicy: Always
673 | name: manager
674 | ports:
675 | - containerPort: 9443
676 | name: webhook-server
677 | protocol: TCP
678 | resources:
679 | limits:
680 | cpu: "1"
681 | memory: 500Mi
682 | requests:
683 | cpu: 100m
684 | memory: 128Mi
685 | volumeMounts:
686 | - mountPath: /tmp/k8s-webhook-server/serving-certs
687 | name: cert
688 | readOnly: true
689 | terminationGracePeriodSeconds: 10
690 | volumes:
691 | - name: cert
692 | secret:
693 | defaultMode: 420
694 | secretName: cacppt-webhook-service-cert
695 | status: {}
696 | ---
697 | apiVersion: cert-manager.io/v1
698 | kind: Certificate
699 | metadata:
700 | labels:
701 | cluster.x-k8s.io/provider: control-plane-talos
702 | clusterctl.cluster.x-k8s.io: ""
703 | name: cacppt-serving-cert
704 | namespace: cacppt-system
705 | spec:
706 | dnsNames:
707 | - cacppt-webhook-service.cacppt-system.svc
708 | - cacppt-webhook-service.cacppt-system.svc.cluster.local
709 | issuerRef:
710 | kind: Issuer
711 | name: cacppt-selfsigned-issuer
712 | secretName: cacppt-webhook-service-cert
713 | ---
714 | apiVersion: cert-manager.io/v1
715 | kind: Issuer
716 | metadata:
717 | labels:
718 | cluster.x-k8s.io/provider: control-plane-talos
719 | clusterctl.cluster.x-k8s.io: ""
720 | name: cacppt-selfsigned-issuer
721 | namespace: cacppt-system
722 | spec:
723 | selfSigned: {}
724 | ---
725 | apiVersion: admissionregistration.k8s.io/v1
726 | kind: MutatingWebhookConfiguration
727 | metadata:
728 | annotations:
729 | cert-manager.io/inject-ca-from: cacppt-system/cacppt-serving-cert
730 | creationTimestamp: null
731 | labels:
732 | cluster.x-k8s.io/provider: control-plane-talos
733 | clusterctl.cluster.x-k8s.io: ""
734 | name: cacppt-mutating-webhook-configuration
735 | webhooks:
736 | - admissionReviewVersions:
737 | - v1
738 | - v1beta1
739 | clientConfig:
740 | service:
741 | name: cacppt-webhook-service
742 | namespace: cacppt-system
743 | path: /mutate-controlplane-cluster-x-k8s-io-v1alpha3-taloscontrolplane
744 | failurePolicy: Fail
745 | matchPolicy: Equivalent
746 | name: default.taloscontrolplane.controlplane.cluster.x-k8s.io
747 | rules:
748 | - apiGroups:
749 | - controlplane.cluster.x-k8s.io
750 | apiVersions:
751 | - v1alpha3
752 | operations:
753 | - CREATE
754 | - UPDATE
755 | resources:
756 | - taloscontrolplanes
757 | sideEffects: None
758 | ---
759 | apiVersion: admissionregistration.k8s.io/v1
760 | kind: ValidatingWebhookConfiguration
761 | metadata:
762 | annotations:
763 | cert-manager.io/inject-ca-from: cacppt-system/cacppt-serving-cert
764 | creationTimestamp: null
765 | labels:
766 | cluster.x-k8s.io/provider: control-plane-talos
767 | clusterctl.cluster.x-k8s.io: ""
768 | name: cacppt-validating-webhook-configuration
769 | webhooks:
770 | - admissionReviewVersions:
771 | - v1
772 | clientConfig:
773 | service:
774 | name: cacppt-webhook-service
775 | namespace: cacppt-system
776 | path: /validate-controlplane-cluster-x-k8s-io-v1alpha3-taloscontrolplane
777 | failurePolicy: Fail
778 | name: validate.taloscontrolplane.controlplane.cluster.x-k8s.io
779 | rules:
780 | - apiGroups:
781 | - controlplane.cluster.x-k8s.io
782 | apiVersions:
783 | - v1alpha3
784 | operations:
785 | - CREATE
786 | - UPDATE
787 | - DELETE
788 | resources:
789 | - taloscontrolplanes
790 | sideEffects: None
791 |
--------------------------------------------------------------------------------
/kubernetes/apps/sidero/capi/app/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | resources:
5 | # Pre Flux-Kustomizations
6 | - ./cluster-api-components.yaml
7 | - ./bootstrap.yaml
8 | - ./controlplane.yaml
9 | - ./infra.yaml
10 | - ./metallb.yaml
11 |
--------------------------------------------------------------------------------
/kubernetes/apps/sidero/capi/app/metallb.yaml:
--------------------------------------------------------------------------------
1 | kind: Service
2 | apiVersion: v1
3 | metadata:
4 | name: sidero-udp
5 | namespace: sidero-system
6 | annotations:
7 | metallb.universe.tf/allow-shared-ip: "sidero"
8 | spec:
9 | type: LoadBalancer
10 | loadBalancerIP: ${SIDERO_ENDPOINT}
11 | selector:
12 | app: sidero
13 | ports:
14 | - name: tftp
15 | targetPort: tftp
16 | protocol: UDP
17 | port: 69
18 | - name: siderolink
19 | targetPort: siderolink
20 | protocol: UDP
21 | port: 51821
22 | ---
23 | apiVersion: v1
24 | kind: Service
25 | metadata:
26 | name: sidero-tcp
27 | namespace: sidero-system
28 | annotations:
29 | metallb.universe.tf/allow-shared-ip: "sidero"
30 | spec:
31 | type: LoadBalancer
32 | loadBalancerIP: ${SIDERO_ENDPOINT}
33 | selector:
34 | app: sidero
35 | ports:
36 | - port: 8081
37 | name: http
38 | protocol: TCP
39 | targetPort: http
--------------------------------------------------------------------------------
/kubernetes/apps/sidero/capi/ks.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/kustomize.toolkit.fluxcd.io/kustomization_v1.json
3 | apiVersion: kustomize.toolkit.fluxcd.io/v1
4 | kind: Kustomization
5 | metadata:
6 | name: sidero
7 | namespace: flux-system
8 | spec:
9 | dependsOn:
10 | - name: cert-manager
11 | path: ./kubernetes/apps/sidero/capi/app
12 | prune: true
13 | sourceRef:
14 | kind: GitRepository
15 | name: px-kaas
16 | interval: 15m
17 | retryInterval: 1m
18 | timeout: 3m
19 | postBuild:
20 | substituteFrom:
21 | - kind: ConfigMap
22 | name: cluster-settings
23 | optional: false
--------------------------------------------------------------------------------
/kubernetes/apps/sidero/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: kustomize.config.k8s.io/v1beta1
3 | kind: Kustomization
4 | resources:
5 | # Pre Flux-Kustomizations
6 | # Flux-Kustomizations
7 | - ./capi/ks.yaml
8 |
9 |
--------------------------------------------------------------------------------
/kubernetes/flux/apps.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/kustomize.toolkit.fluxcd.io/kustomization_v1.json
3 | apiVersion: kustomize.toolkit.fluxcd.io/v1
4 | kind: Kustomization
5 | metadata:
6 | name: cluster-apps
7 | namespace: flux-system
8 | spec:
9 | interval: 10m
10 | path: ./kubernetes/apps
11 | prune: true
12 | sourceRef:
13 | kind: GitRepository
14 | name: px-kaas
15 | postBuild:
16 | substituteFrom:
17 | - kind: ConfigMap
18 | name: cluster-settings
19 | optional: false
--------------------------------------------------------------------------------
/kubernetes/flux/config/cluster.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/source.toolkit.fluxcd.io/gitrepository_v1.json
3 | apiVersion: source.toolkit.fluxcd.io/v1
4 | kind: GitRepository
5 | metadata:
6 | name: px-kaas
7 | namespace: flux-system
8 | spec:
9 | interval: 30m
10 | url: ssh://git@github.com/kubebn/talos-proxmox-kaas/
11 | ref:
12 | branch: main
13 | secretRef:
14 | name: github-creds
15 | ignore: |
16 | # exclude all
17 | /*
18 | # include kubernetes directory
19 | !/kubernetes
20 | ---
21 | # yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/kustomize.toolkit.fluxcd.io/kustomization_v1.json
22 | apiVersion: kustomize.toolkit.fluxcd.io/v1
23 | kind: Kustomization
24 | metadata:
25 | name: cluster
26 | namespace: flux-system
27 | spec:
28 | interval: 30m
29 | path: ./kubernetes/flux
30 | prune: true
31 | wait: false
32 | sourceRef:
33 | kind: GitRepository
34 | name: px-kaas
35 | postBuild:
36 | substituteFrom:
37 | - kind: ConfigMap
38 | name: cluster-settings
39 | optional: false
--------------------------------------------------------------------------------
/kubernetes/flux/config/crds/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kubebn/talos-proxmox-kaas/2eef483d7e79ac12f3744d59e02376869382d99c/kubernetes/flux/config/crds/.gitkeep
--------------------------------------------------------------------------------
/kubernetes/flux/config/flux.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/source.toolkit.fluxcd.io/ocirepository_v1beta2.json
3 | apiVersion: source.toolkit.fluxcd.io/v1beta2
4 | kind: OCIRepository
5 | metadata:
6 | name: flux-manifests
7 | namespace: flux-system
8 | spec:
9 | interval: 10m
10 | url: oci://ghcr.io/fluxcd/flux-manifests
11 | ref:
12 | tag: v2.0.0-rc.1
13 | ---
14 | # yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/kustomize.toolkit.fluxcd.io/kustomization_v1.json
15 | apiVersion: kustomize.toolkit.fluxcd.io/v1
16 | kind: Kustomization
17 | metadata:
18 | name: flux
19 | namespace: flux-system
20 | spec:
21 | interval: 10m
22 | path: ./
23 | prune: true
24 | wait: true
25 | sourceRef:
26 | kind: OCIRepository
27 | name: flux-manifests
28 | patches:
29 | # Remove the network policies that does not work with k3s
30 | - patch: |
31 | $patch: delete
32 | apiVersion: networking.k8s.io/v1
33 | kind: NetworkPolicy
34 | metadata:
35 | name: not-used
36 | target:
37 | group: networking.k8s.io
38 | kind: NetworkPolicy
39 | # Increase the number of reconciliations that can be performed in parallel and bump the resources limits
40 | # https://fluxcd.io/flux/cheatsheets/bootstrap/#increase-the-number-of-workers
41 | - patch: |
42 | - op: add
43 | path: /spec/template/spec/containers/0/args/-
44 | value: --concurrent=8
45 | - op: add
46 | path: /spec/template/spec/containers/0/args/-
47 | value: --kube-api-qps=500
48 | - op: add
49 | path: /spec/template/spec/containers/0/args/-
50 | value: --kube-api-burst=1000
51 | - op: add
52 | path: /spec/template/spec/containers/0/args/-
53 | value: --requeue-dependency=5s
54 | target:
55 | kind: Deployment
56 | name: (kustomize-controller|helm-controller|source-controller)
57 | - patch: |
58 | apiVersion: apps/v1
59 | kind: Deployment
60 | metadata:
61 | name: not-used
62 | spec:
63 | template:
64 | spec:
65 | containers:
66 | - name: manager
67 | resources:
68 | limits:
69 | cpu: 2000m
70 | memory: 2Gi
71 | target:
72 | kind: Deployment
73 | name: (kustomize-controller|helm-controller|source-controller)
74 | # Enable drift detection for HelmReleases and set the log level to debug
75 | # https://fluxcd.io/flux/components/helm/helmreleases/#drift-detection
76 | - patch: |
77 | - op: add
78 | path: /spec/template/spec/containers/0/args/-
79 | value: --feature-gates=DetectDrift=true,CorrectDrift=false
80 | - op: add
81 | path: /spec/template/spec/containers/0/args/-
82 | value: --log-level=debug
83 | target:
84 | kind: Deployment
85 | name: helm-controller
86 | # Enable Helm near OOM detection
87 | # https://fluxcd.io/flux/cheatsheets/bootstrap/#enable-helm-near-oom-detection
88 | - patch: |
89 | - op: add
90 | path: /spec/template/spec/containers/0/args/-
91 | value: --feature-gates=OOMWatch=true
92 | - op: add
93 | path: /spec/template/spec/containers/0/args/-
94 | value: --oom-watch-memory-threshold=95
95 | - op: add
96 | path: /spec/template/spec/containers/0/args/-
97 | value: --oom-watch-interval=500ms
98 | target:
99 | kind: Deployment
100 | name: helm-controller
101 | # Enable notifications for 3rd party Flux controllers such as tf-controller
102 | # https://fluxcd.io/flux/cheatsheets/bootstrap/#enable-notifications-for-third-party-controllers
103 | - patch: |
104 | - op: add
105 | path: /spec/versions/1/schema/openAPIV3Schema/properties/spec/properties/eventSources/items/properties/kind/enum/-
106 | value: Terraform
107 | target:
108 | kind: CustomResourceDefinition
109 | name: alerts.notification.toolkit.fluxcd.io
110 | - patch: |
111 | - op: add
112 | path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/resources/items/properties/kind/enum/-
113 | value: Terraform
114 | target:
115 | kind: CustomResourceDefinition
116 | name: receivers.notification.toolkit.fluxcd.io
117 | - patch: |
118 | - op: add
119 | path: /rules/-
120 | value:
121 | apiGroups: ["infra.contrib.fluxcd.io"]
122 | resources: ["*"]
123 | verbs: ["*"]
124 | target:
125 | kind: ClusterRole
126 | name: crd-controller-flux-system
127 |
--------------------------------------------------------------------------------
/kubernetes/flux/config/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://json.schemastore.org/kustomization
3 | apiVersion: kustomize.config.k8s.io/v1beta1
4 | kind: Kustomization
5 | resources:
6 | - ./flux.yaml
7 | - ./cluster.yaml
8 |
--------------------------------------------------------------------------------
/kubernetes/flux/repositories/git/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://json.schemastore.org/kustomization
3 | apiVersion: kustomize.config.k8s.io/v1beta1
4 | kind: Kustomization
5 | resources:
6 | - ./local-path-provisioner.yaml
7 |
--------------------------------------------------------------------------------
/kubernetes/flux/repositories/git/local-path-provisioner.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/source.toolkit.fluxcd.io/gitrepository_v1.json
3 | apiVersion: source.toolkit.fluxcd.io/v1
4 | kind: GitRepository
5 | metadata:
6 | name: local-path-provisioner
7 | namespace: flux-system
8 | spec:
9 | interval: 30m
10 | url: https://github.com/rancher/local-path-provisioner
11 | ref:
12 | tag: v0.0.24
13 | ignore: |
14 | # exclude all
15 | /*
16 | # include kubernetes directory
17 | !/deploy/chart/local-path-provisioner
18 |
--------------------------------------------------------------------------------
/kubernetes/flux/repositories/helm/external-dns.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/source.toolkit.fluxcd.io/helmrepository_v1beta2.json
3 | apiVersion: source.toolkit.fluxcd.io/v1beta2
4 | kind: HelmRepository
5 | metadata:
6 | name: external-dns
7 | namespace: flux-system
8 | spec:
9 | interval: 2h
10 | url: https://kubernetes-sigs.github.io/external-dns
--------------------------------------------------------------------------------
/kubernetes/flux/repositories/helm/jetstack.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/source.toolkit.fluxcd.io/helmrepository_v1beta2.json
3 | apiVersion: source.toolkit.fluxcd.io/v1beta2
4 | kind: HelmRepository
5 | metadata:
6 | name: jetstack
7 | namespace: flux-system
8 | spec:
9 | interval: 2h
10 | url: https://charts.jetstack.io/
--------------------------------------------------------------------------------
/kubernetes/flux/repositories/helm/kustomization.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://json.schemastore.org/kustomization
3 | apiVersion: kustomize.config.k8s.io/v1beta1
4 | kind: Kustomization
5 | resources:
6 | - ./external-dns.yaml
7 | - ./jetstack.yaml
8 | - ./proxmox-operator.yaml
9 |
--------------------------------------------------------------------------------
/kubernetes/flux/repositories/helm/proxmox-operator.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://kubernetes-schemas.devbu.io/source.toolkit.fluxcd.io/helmrepository_v1beta2.json
3 | apiVersion: source.toolkit.fluxcd.io/v1beta2
4 | kind: HelmRepository
5 | metadata:
6 | name: proxmox-operator
7 | namespace: flux-system
8 | spec:
9 | interval: 2h
10 | url: https://crash-tech.github.io/charts/
--------------------------------------------------------------------------------
/kubernetes/flux/repositories/kustomization.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | # yaml-language-server: $schema=https://json.schemastore.org/kustomization
3 | apiVersion: kustomize.config.k8s.io/v1beta1
4 | kind: Kustomization
5 | resources:
6 | - ./git
7 | - ./helm
8 |
--------------------------------------------------------------------------------
/kubernetes/flux/repositories/oci/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kubebn/talos-proxmox-kaas/2eef483d7e79ac12f3744d59e02376869382d99c/kubernetes/flux/repositories/oci/.gitkeep
--------------------------------------------------------------------------------
/manifests/optional/theila.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: Namespace
4 | metadata:
5 | name: theila
6 | ---
7 | apiVersion: apps/v1
8 | kind: Deployment
9 | metadata:
10 | name: theila
11 | namespace: theila
12 | labels:
13 | app: theila
14 | spec:
15 | replicas: 1
16 | selector:
17 | matchLabels:
18 | app: theila
19 | template:
20 | metadata:
21 | name: theila
22 | labels:
23 | app: theila
24 | spec:
25 | containers:
26 | - name: theila
27 | image: ghcr.io/siderolabs/theila:v0.2.1
28 | args:
29 | - "--port=8080"
30 | - "--address=0.0.0.0"
31 | env:
32 | - name: KUBECONFIG
33 | value: /etc/theila/kubeconfig
34 | - name: TALOSCONFIG
35 | value: /etc/theila/talosconfig
36 | ports:
37 | - containerPort: 8080
38 | volumeMounts:
39 | - mountPath: /etc/theila/
40 | name: config
41 | volumes:
42 | - name: config
43 | configMap:
44 | name: theila-config
45 | ---
46 | apiVersion: v1
47 | kind: ConfigMap
48 | metadata:
49 | name: theila-config
50 | namespace: theila
51 | data:
52 | talosconfig: |
53 | context: mgmt-cluster
54 | contexts:
55 | mgmt-cluster:
56 | kubeconfig: |
57 | apiVersion: v1
58 | kind: Config
59 | clusters:
60 | - name: mgmt-cluster
61 | ---
62 | apiVersion: v1
63 | kind: Service
64 | metadata:
65 | annotations:
66 | labels:
67 | app: theila
68 | name: theila
69 | namespace: theila
70 | spec:
71 | ports:
72 | - name: http
73 | port: 80
74 | protocol: TCP
75 | targetPort: 8080
76 | selector:
77 | app: theila
78 | type: LoadBalancer
--------------------------------------------------------------------------------
/manifests/talos/cert-approval.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | kind: ClusterRole
4 | metadata:
5 | name: kubelet-csr-approver
6 | rules:
7 | - apiGroups:
8 | - certificates.k8s.io
9 | resources:
10 | - certificatesigningrequests
11 | verbs:
12 | - get
13 | - list
14 | - watch
15 | - apiGroups:
16 | - certificates.k8s.io
17 | resources:
18 | - certificatesigningrequests/approval
19 | verbs:
20 | - update
21 | - apiGroups:
22 | - certificates.k8s.io
23 | resourceNames:
24 | - kubernetes.io/kubelet-serving
25 | resources:
26 | - signers
27 | verbs:
28 | - approve
29 | ---
30 | apiVersion: rbac.authorization.k8s.io/v1
31 | kind: ClusterRoleBinding
32 | metadata:
33 | name: kubelet-csr-approver
34 | namespace: kube-system
35 | roleRef:
36 | apiGroup: rbac.authorization.k8s.io
37 | kind: ClusterRole
38 | name: kubelet-csr-approver
39 | subjects:
40 | - kind: ServiceAccount
41 | name: kubelet-csr-approver
42 | namespace: kube-system
43 | ---
44 | apiVersion: v1
45 | kind: ServiceAccount
46 | metadata:
47 | name: kubelet-csr-approver
48 | namespace: kube-system
49 | ---
50 | apiVersion: apps/v1
51 | kind: Deployment
52 | metadata:
53 | name: kubelet-csr-approver
54 | namespace: kube-system
55 | spec:
56 | selector:
57 | matchLabels:
58 | app: kubelet-csr-approver
59 |
60 | template:
61 | metadata:
62 | annotations:
63 | prometheus.io/port: '8080'
64 | prometheus.io/scrape: 'true'
65 | labels:
66 | app: kubelet-csr-approver
67 |
68 | spec:
69 | serviceAccountName: kubelet-csr-approver
70 | containers:
71 | - name: kubelet-csr-approver
72 | image: postfinance/kubelet-csr-approver:v0.2.2
73 | resources:
74 | limits:
75 | memory: "128Mi"
76 | cpu: "500m"
77 |
78 | args:
79 | - -metrics-bind-address
80 | - ":8080"
81 | - -health-probe-bind-address
82 | - ":8081"
83 |
84 | livenessProbe:
85 | httpGet:
86 | path: /healthz
87 | port: 8081
88 |
89 | env:
90 | - name: PROVIDER_REGEX
91 | value: worker-.|master-.|talos-.
92 | - name: PROVIDER_IP_PREFIXES
93 | value: "0.0.0.0/0"
94 | - name: MAX_EXPIRATION_SECONDS
95 | value: "86400"
96 | - name: BYPASS_DNS_RESOLUTION
97 | value: "true"
98 | - name: ALLOWED_DNS_NAMES
99 | value: "1"
100 | - name: BYPASS_HOSTNAME_CHECK
101 | value: "true"
102 | tolerations:
103 | - effect: NoSchedule
104 | key: node-role.kubernetes.io/master
105 | operator: Equal
106 | - effect: NoSchedule
107 | key: node-role.kubernetes.io/control-plane
108 | operator: Equal
--------------------------------------------------------------------------------
/manifests/talos/coredns-local.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: v1
3 | kind: ConfigMap
4 | metadata:
5 | name: coredns-local
6 | namespace: kube-system
7 | data:
8 | empty.db: |
9 | @ 60 IN SOA localnet. root.localnet. (
10 | 1 ; serial
11 | 60 ; refresh
12 | 60 ; retry
13 | 60 ; expiry
14 | 60 ) ; minimum
15 | ;
16 | @ IN NS localnet.
17 |
18 | hosts: |
19 | # static hosts
20 | 169.254.2.53 dns.local
21 |
22 | Corefile.local: |
23 | (empty) {
24 | file /etc/coredns/empty.db
25 | }
26 |
27 | .:53 {
28 | errors
29 | bind 169.254.2.53
30 |
31 | health 127.0.0.1:8091 {
32 | lameduck 5s
33 | }
34 |
35 | hosts /etc/coredns/hosts {
36 | reload 60s
37 | fallthrough
38 | }
39 |
40 | kubernetes cluster.local in-addr.arpa ip6.arpa {
41 | endpoint https://api.cluster.local:6443
42 | kubeconfig /etc/coredns/kubeconfig.conf coredns
43 | pods insecure
44 | ttl 60
45 | }
46 | prometheus :9153
47 |
48 | forward . /etc/resolv.conf {
49 | policy sequential
50 | expire 30s
51 | }
52 |
53 | cache 300
54 | loop
55 | reload
56 | loadbalance
57 | }
58 | kubeconfig.conf: |-
59 | apiVersion: v1
60 | kind: Config
61 | clusters:
62 | - cluster:
63 | certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
64 | server: https://api.cluster.local:6443
65 | name: default
66 | contexts:
67 | - context:
68 | cluster: default
69 | namespace: kube-system
70 | user: coredns
71 | name: coredns
72 | current-context: coredns
73 | users:
74 | - name: coredns
75 | user:
76 | tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
77 | ---
78 | apiVersion: apps/v1
79 | kind: DaemonSet
80 | metadata:
81 | name: coredns-local
82 | namespace: kube-system
83 | labels:
84 | k8s-app: kube-dns-local
85 | kubernetes.io/name: CoreDNS
86 | spec:
87 | updateStrategy:
88 | type: RollingUpdate
89 | minReadySeconds: 15
90 | selector:
91 | matchLabels:
92 | k8s-app: kube-dns-local
93 | kubernetes.io/name: CoreDNS
94 | template:
95 | metadata:
96 | labels:
97 | k8s-app: kube-dns-local
98 | kubernetes.io/name: CoreDNS
99 | annotations:
100 | prometheus.io/scrape: "true"
101 | prometheus.io/port: "9153"
102 | spec:
103 | priorityClassName: system-node-critical
104 | serviceAccount: coredns
105 | serviceAccountName: coredns
106 | enableServiceLinks: false
107 | tolerations:
108 | - effect: NoSchedule
109 | key: node-role.kubernetes.io/control-plane
110 | operator: Exists
111 | - effect: NoSchedule
112 | key: node.cloudprovider.kubernetes.io/uninitialized
113 | operator: Exists
114 | hostNetwork: true
115 | containers:
116 | - name: coredns
117 | image: coredns/coredns:1.10.1
118 | imagePullPolicy: IfNotPresent
119 | resources:
120 | limits:
121 | cpu: 100m
122 | memory: 128Mi
123 | requests:
124 | cpu: 50m
125 | memory: 64Mi
126 | args: [ "-conf", "/etc/coredns/Corefile.local" ]
127 | volumeMounts:
128 | - name: config-volume
129 | mountPath: /etc/coredns
130 | readOnly: true
131 | livenessProbe:
132 | httpGet:
133 | host: 127.0.0.1
134 | path: /health
135 | port: 8091
136 | scheme: HTTP
137 | initialDelaySeconds: 60
138 | periodSeconds: 10
139 | successThreshold: 1
140 | timeoutSeconds: 5
141 | securityContext:
142 | allowPrivilegeEscalation: false
143 | capabilities:
144 | add:
145 | - NET_BIND_SERVICE
146 | drop:
147 | - all
148 | readOnlyRootFilesystem: true
149 | dnsPolicy: Default
150 | volumes:
151 | - name: config-volume
152 | configMap:
153 | name: coredns-local
--------------------------------------------------------------------------------
/manifests/talos/fluxcd-install.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: kustomize.toolkit.fluxcd.io/v1
2 | kind: Kustomization
3 | metadata:
4 | name: cluster
5 | namespace: flux-system
6 | spec:
7 | interval: 30m
8 | path: ./kubernetes/flux
9 | prune: true
10 | sourceRef:
11 | kind: GitRepository
12 | name: px-kaas
13 | wait: false
14 | postBuild:
15 | substituteFrom:
16 | - kind: ConfigMap
17 | name: cluster-settings
18 | optional: false
19 | ---
20 | apiVersion: kustomize.toolkit.fluxcd.io/v1
21 | kind: Kustomization
22 | metadata:
23 | name: flux
24 | namespace: flux-system
25 | spec:
26 | interval: 10m
27 | patches:
28 | - patch: |
29 | $patch: delete
30 | apiVersion: networking.k8s.io/v1
31 | kind: NetworkPolicy
32 | metadata:
33 | name: not-used
34 | target:
35 | group: networking.k8s.io
36 | kind: NetworkPolicy
37 | - patch: |
38 | - op: add
39 | path: /spec/template/spec/containers/0/args/-
40 | value: --concurrent=8
41 | - op: add
42 | path: /spec/template/spec/containers/0/args/-
43 | value: --kube-api-qps=500
44 | - op: add
45 | path: /spec/template/spec/containers/0/args/-
46 | value: --kube-api-burst=1000
47 | - op: add
48 | path: /spec/template/spec/containers/0/args/-
49 | value: --requeue-dependency=5s
50 | target:
51 | kind: Deployment
52 | name: (kustomize-controller|helm-controller|source-controller)
53 | - patch: |
54 | apiVersion: apps/v1
55 | kind: Deployment
56 | metadata:
57 | name: not-used
58 | spec:
59 | template:
60 | spec:
61 | containers:
62 | - name: manager
63 | resources:
64 | limits:
65 | cpu: 2000m
66 | memory: 2Gi
67 | target:
68 | kind: Deployment
69 | name: (kustomize-controller|helm-controller|source-controller)
70 | - patch: |
71 | - op: add
72 | path: /spec/template/spec/containers/0/args/-
73 | value: --feature-gates=DetectDrift=true,CorrectDrift=false
74 | - op: add
75 | path: /spec/template/spec/containers/0/args/-
76 | value: --log-level=debug
77 | target:
78 | kind: Deployment
79 | name: helm-controller
80 | - patch: |
81 | - op: add
82 | path: /spec/template/spec/containers/0/args/-
83 | value: --feature-gates=OOMWatch=true
84 | - op: add
85 | path: /spec/template/spec/containers/0/args/-
86 | value: --oom-watch-memory-threshold=95
87 | - op: add
88 | path: /spec/template/spec/containers/0/args/-
89 | value: --oom-watch-interval=500ms
90 | target:
91 | kind: Deployment
92 | name: helm-controller
93 | - patch: |
94 | - op: add
95 | path: /spec/versions/1/schema/openAPIV3Schema/properties/spec/properties/eventSources/items/properties/kind/enum/-
96 | value: Terraform
97 | target:
98 | kind: CustomResourceDefinition
99 | name: alerts.notification.toolkit.fluxcd.io
100 | - patch: |
101 | - op: add
102 | path: /spec/versions/0/schema/openAPIV3Schema/properties/spec/properties/resources/items/properties/kind/enum/-
103 | value: Terraform
104 | target:
105 | kind: CustomResourceDefinition
106 | name: receivers.notification.toolkit.fluxcd.io
107 | - patch: |
108 | - op: add
109 | path: /rules/-
110 | value:
111 | apiGroups: ["infra.contrib.fluxcd.io"]
112 | resources: ["*"]
113 | verbs: ["*"]
114 | target:
115 | kind: ClusterRole
116 | name: crd-controller-flux-system
117 | path: ./
118 | prune: true
119 | sourceRef:
120 | kind: OCIRepository
121 | name: flux-manifests
122 | wait: true
123 | ---
124 | apiVersion: source.toolkit.fluxcd.io/v1
125 | kind: GitRepository
126 | metadata:
127 | name: px-kaas
128 | namespace: flux-system
129 | spec:
130 | ignore: |
131 | # exclude all
132 | /*
133 | # include kubernetes directory
134 | !/kubernetes
135 | interval: 30m
136 | ref:
137 | branch: main
138 | secretRef:
139 | name: github-creds
140 | url: ssh://git@github.com/kubebn/talos-proxmox-kaas/
141 | ---
142 | apiVersion: source.toolkit.fluxcd.io/v1beta2
143 | kind: OCIRepository
144 | metadata:
145 | name: flux-manifests
146 | namespace: flux-system
147 | spec:
148 | interval: 10m
149 | ref:
150 | tag: v2.0.0-rc.1
151 | url: oci://ghcr.io/fluxcd/flux-manifests
152 |
--------------------------------------------------------------------------------
/manifests/talos/metrics-server.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | labels:
5 | k8s-app: metrics-server
6 | name: metrics-server
7 | namespace: kube-system
8 | ---
9 | apiVersion: rbac.authorization.k8s.io/v1
10 | kind: ClusterRole
11 | metadata:
12 | labels:
13 | k8s-app: metrics-server
14 | rbac.authorization.k8s.io/aggregate-to-admin: "true"
15 | rbac.authorization.k8s.io/aggregate-to-edit: "true"
16 | rbac.authorization.k8s.io/aggregate-to-view: "true"
17 | name: system:aggregated-metrics-reader
18 | rules:
19 | - apiGroups:
20 | - metrics.k8s.io
21 | resources:
22 | - pods
23 | - nodes
24 | verbs:
25 | - get
26 | - list
27 | - watch
28 | ---
29 | apiVersion: rbac.authorization.k8s.io/v1
30 | kind: ClusterRole
31 | metadata:
32 | labels:
33 | k8s-app: metrics-server
34 | name: system:metrics-server
35 | rules:
36 | - apiGroups:
37 | - ""
38 | resources:
39 | - nodes/metrics
40 | verbs:
41 | - get
42 | - apiGroups:
43 | - ""
44 | resources:
45 | - pods
46 | - nodes
47 | verbs:
48 | - get
49 | - list
50 | - watch
51 | ---
52 | apiVersion: rbac.authorization.k8s.io/v1
53 | kind: RoleBinding
54 | metadata:
55 | labels:
56 | k8s-app: metrics-server
57 | name: metrics-server-auth-reader
58 | namespace: kube-system
59 | roleRef:
60 | apiGroup: rbac.authorization.k8s.io
61 | kind: Role
62 | name: extension-apiserver-authentication-reader
63 | subjects:
64 | - kind: ServiceAccount
65 | name: metrics-server
66 | namespace: kube-system
67 | ---
68 | apiVersion: rbac.authorization.k8s.io/v1
69 | kind: ClusterRoleBinding
70 | metadata:
71 | labels:
72 | k8s-app: metrics-server
73 | name: metrics-server:system:auth-delegator
74 | roleRef:
75 | apiGroup: rbac.authorization.k8s.io
76 | kind: ClusterRole
77 | name: system:auth-delegator
78 | subjects:
79 | - kind: ServiceAccount
80 | name: metrics-server
81 | namespace: kube-system
82 | ---
83 | apiVersion: rbac.authorization.k8s.io/v1
84 | kind: ClusterRoleBinding
85 | metadata:
86 | labels:
87 | k8s-app: metrics-server
88 | name: system:metrics-server
89 | roleRef:
90 | apiGroup: rbac.authorization.k8s.io
91 | kind: ClusterRole
92 | name: system:metrics-server
93 | subjects:
94 | - kind: ServiceAccount
95 | name: metrics-server
96 | namespace: kube-system
97 | ---
98 | apiVersion: v1
99 | kind: Service
100 | metadata:
101 | labels:
102 | k8s-app: metrics-server
103 | name: metrics-server
104 | namespace: kube-system
105 | spec:
106 | ports:
107 | - name: https
108 | port: 443
109 | protocol: TCP
110 | targetPort: https
111 | selector:
112 | k8s-app: metrics-server
113 | ---
114 | apiVersion: apps/v1
115 | kind: Deployment
116 | metadata:
117 | labels:
118 | k8s-app: metrics-server
119 | name: metrics-server
120 | namespace: kube-system
121 | spec:
122 | selector:
123 | matchLabels:
124 | k8s-app: metrics-server
125 | strategy:
126 | rollingUpdate:
127 | maxUnavailable: 0
128 | template:
129 | metadata:
130 | labels:
131 | k8s-app: metrics-server
132 | spec:
133 | containers:
134 | - args:
135 | - --cert-dir=/tmp
136 | - --secure-port=4443
137 | - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
138 | - --kubelet-use-node-status-port
139 | - --metric-resolution=15s
140 | image: registry.k8s.io/metrics-server/metrics-server:v0.6.3
141 | imagePullPolicy: IfNotPresent
142 | livenessProbe:
143 | failureThreshold: 3
144 | httpGet:
145 | path: /livez
146 | port: https
147 | scheme: HTTPS
148 | periodSeconds: 10
149 | name: metrics-server
150 | ports:
151 | - containerPort: 4443
152 | name: https
153 | protocol: TCP
154 | readinessProbe:
155 | failureThreshold: 3
156 | httpGet:
157 | path: /readyz
158 | port: https
159 | scheme: HTTPS
160 | initialDelaySeconds: 20
161 | periodSeconds: 10
162 | resources:
163 | requests:
164 | cpu: 100m
165 | memory: 200Mi
166 | securityContext:
167 | allowPrivilegeEscalation: false
168 | readOnlyRootFilesystem: true
169 | runAsNonRoot: true
170 | runAsUser: 1000
171 | volumeMounts:
172 | - mountPath: /tmp
173 | name: tmp-dir
174 | nodeSelector:
175 | kubernetes.io/os: linux
176 | priorityClassName: system-cluster-critical
177 | serviceAccountName: metrics-server
178 | volumes:
179 | - emptyDir: {}
180 | name: tmp-dir
181 | ---
182 | apiVersion: apiregistration.k8s.io/v1
183 | kind: APIService
184 | metadata:
185 | labels:
186 | k8s-app: metrics-server
187 | name: v1beta1.metrics.k8s.io
188 | spec:
189 | group: metrics.k8s.io
190 | groupPriorityMinimum: 100
191 | insecureSkipTLSVerify: true
192 | service:
193 | name: metrics-server
194 | namespace: kube-system
195 | version: v1beta1
196 | versionPriority: 100
197 |
--------------------------------------------------------------------------------
/packer/proxmox.pkr.hcl:
--------------------------------------------------------------------------------
1 | packer {
2 | required_plugins {
3 | proxmox = {
4 | version = ">= 1.0.1"
5 | source = "github.com/hashicorp/proxmox"
6 | }
7 | }
8 | }
9 |
10 | source "proxmox" "talos" {
11 | proxmox_url = var.proxmox_url
12 | username = var.proxmox_username
13 | token = var.proxmox_token
14 | node = var.proxmox_nodename
15 | insecure_skip_tls_verify = true
16 |
17 | iso_file = "local:iso/archlinux-2023.04.01-x86_64.iso"
18 | unmount_iso = true
19 |
20 | scsi_controller = "virtio-scsi-pci"
21 | network_adapters {
22 | bridge = "vmbr0"
23 | model = "virtio"
24 | }
25 | disks {
26 | type = "scsi"
27 | storage_pool = var.proxmox_storage
28 | storage_pool_type = var.proxmox_storage_type
29 | format = "raw"
30 | disk_size = "1500M"
31 | cache_mode = "writethrough"
32 | }
33 |
34 | memory = 2048
35 | ssh_username = "root"
36 | ssh_password = "packer"
37 | ssh_timeout = "15m"
38 | qemu_agent = true
39 |
40 | template_name = "talos"
41 | template_description = "Talos system disk"
42 |
43 | boot_wait = "25s"
44 | boot_command = [
45 | "",
46 | "passwdpackerpacker",
47 | "ip address add ${var.static_ip} broadcast + dev ens18",
48 | "ip route add 0.0.0.0/0 via ${var.gateway} dev ens18"
49 | ]
50 | }
51 |
52 | build {
53 | name = "release"
54 | sources = ["source.proxmox.talos"]
55 |
56 | provisioner "shell" {
57 | inline = [
58 | "curl -L ${local.image} -o /tmp/talos.raw.xz",
59 | "xz -d -c /tmp/talos.raw.xz | dd of=/dev/sda && sync",
60 | ]
61 | }
62 | }
63 |
64 | build {
65 | name = "develop"
66 | sources = ["source.proxmox.talos"]
67 |
68 | provisioner "file" {
69 | source = "../../../talos/_out/nocloud-amd64.raw.xz"
70 | destination = "/tmp/talos.raw.xz"
71 | }
72 | provisioner "shell" {
73 | inline = [
74 | "xz -d -c /tmp/talos.raw.xz | dd of=/dev/sda && sync",
75 | ]
76 | }
77 | }
--------------------------------------------------------------------------------
/packer/variables.pkr.hcl:
--------------------------------------------------------------------------------
1 | variable "proxmox_username" {
2 | type = string
3 | }
4 |
5 | variable "proxmox_token" {
6 | type = string
7 | }
8 |
9 | variable "proxmox_url" {
10 | type = string
11 | }
12 |
13 | variable "proxmox_nodename" {
14 | type = string
15 | }
16 |
17 | variable "proxmox_storage" {
18 | type = string
19 | }
20 |
21 | variable "proxmox_storage_type" {
22 | type = string
23 | }
24 |
25 | variable "static_ip" {
26 | type = string
27 | }
28 |
29 | variable "gateway" {
30 | type = string
31 | }
32 |
33 |
34 | variable "talos_version" {
35 | type = string
36 | default = "v1.4.2"
37 | }
38 |
39 | locals {
40 | image = "https://github.com/talos-systems/talos/releases/download/${var.talos_version}/nocloud-amd64.raw.xz"
41 | }
--------------------------------------------------------------------------------
/packer/vars/local.pkrvars.hcl:
--------------------------------------------------------------------------------
1 | proxmox_storage = "local-lvm"
2 | proxmox_storage_type = "lvm"
3 | talos_version = "v1.4.2"
4 | static_ip = "10.1.1.30/24" # static ip for vm to ssh
5 | gateway = "10.1.1.1" # gateway for vm to ssh
--------------------------------------------------------------------------------
/terraform/.gitignore:
--------------------------------------------------------------------------------
1 | # Local .terraform directories
2 | **/.terraform/*
3 |
4 | # .tfstate files
5 | *.tfstate
6 | *.tfstate.*
7 |
8 | # Crash log files
9 | crash.log
10 | crash.*.log
11 |
12 | # Ignore override files as they are usually used to override resources locally and so
13 | # are not checked in
14 | override.tf
15 | override.tf.json
16 | *_override.tf
17 | *_override.tf.json
18 |
19 | # Include override files you do wish to add to version control using negated pattern
20 | # !example_override.tf
21 |
22 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
23 | # example: *tfplan*
24 |
25 | # Ignore CLI configuration files
26 | .terraformrc
27 | terraform.rc
28 | .terraform.lock.hcl
29 |
30 | talosconfig
31 | kubeconfig
--------------------------------------------------------------------------------
/terraform/.terraform.lock.hcl:
--------------------------------------------------------------------------------
1 | # This file is maintained automatically by "terraform init".
2 | # Manual edits may be lost in future updates.
3 |
4 | provider "registry.terraform.io/hashicorp/local" {
5 | version = "2.4.0"
6 | hashes = [
7 | "h1:ZUEYUmm2t4vxwzxy1BvN1wL6SDWrDxfH7pxtzX8c6d0=",
8 | "zh:53604cd29cb92538668fe09565c739358dc53ca56f9f11312b9d7de81e48fab9",
9 | "zh:66a46e9c508716a1c98efbf793092f03d50049fa4a83cd6b2251e9a06aca2acf",
10 | "zh:70a6f6a852dd83768d0778ce9817d81d4b3f073fab8fa570bff92dcb0824f732",
11 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
12 | "zh:82a803f2f484c8b766e2e9c32343e9c89b91997b9f8d2697f9f3837f62926b35",
13 | "zh:9708a4e40d6cc4b8afd1352e5186e6e1502f6ae599867c120967aebe9d90ed04",
14 | "zh:973f65ce0d67c585f4ec250c1e634c9b22d9c4288b484ee2a871d7fa1e317406",
15 | "zh:c8fa0f98f9316e4cfef082aa9b785ba16e36ff754d6aba8b456dab9500e671c6",
16 | "zh:cfa5342a5f5188b20db246c73ac823918c189468e1382cb3c48a9c0c08fc5bf7",
17 | "zh:e0e2b477c7e899c63b06b38cd8684a893d834d6d0b5e9b033cedc06dd7ffe9e2",
18 | "zh:f62d7d05ea1ee566f732505200ab38d94315a4add27947a60afa29860822d3fc",
19 | "zh:fa7ce69dde358e172bd719014ad637634bbdabc49363104f4fca759b4b73f2ce",
20 | ]
21 | }
22 |
23 | provider "registry.terraform.io/hashicorp/null" {
24 | version = "3.2.1"
25 | hashes = [
26 | "h1:ydA0/SNRVB1o95btfshvYsmxA+jZFRZcvKzZSB+4S1M=",
27 | "zh:58ed64389620cc7b82f01332e27723856422820cfd302e304b5f6c3436fb9840",
28 | "zh:62a5cc82c3b2ddef7ef3a6f2fedb7b9b3deff4ab7b414938b08e51d6e8be87cb",
29 | "zh:63cff4de03af983175a7e37e52d4bd89d990be256b16b5c7f919aff5ad485aa5",
30 | "zh:74cb22c6700e48486b7cabefa10b33b801dfcab56f1a6ac9b6624531f3d36ea3",
31 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
32 | "zh:79e553aff77f1cfa9012a2218b8238dd672ea5e1b2924775ac9ac24d2a75c238",
33 | "zh:a1e06ddda0b5ac48f7e7c7d59e1ab5a4073bbcf876c73c0299e4610ed53859dc",
34 | "zh:c37a97090f1a82222925d45d84483b2aa702ef7ab66532af6cbcfb567818b970",
35 | "zh:e4453fbebf90c53ca3323a92e7ca0f9961427d2f0ce0d2b65523cc04d5d999c2",
36 | "zh:e80a746921946d8b6761e77305b752ad188da60688cfd2059322875d363be5f5",
37 | "zh:fbdb892d9822ed0e4cb60f2fedbdbb556e4da0d88d3b942ae963ed6ff091e48f",
38 | "zh:fca01a623d90d0cad0843102f9b8b9fe0d3ff8244593bd817f126582b52dd694",
39 | ]
40 | }
41 |
42 | provider "registry.terraform.io/siderolabs/talos" {
43 | version = "0.2.0"
44 | constraints = "0.2.0"
45 | hashes = [
46 | "h1:N+FcjpsDPPs772sotGtqY1Ri2lsC5QaIkARf587F2O4=",
47 | "zh:06dd4c833ad51f103233be8ef31e044fd655def59354ca8f45140e52e8b671bc",
48 | "zh:0730971d0ad355f38c886916d66f19d34e02efb5bff526d9475d6a90e3c786ad",
49 | "zh:0fa82a384b25a58b65523e0ea4768fa1212b1f5cfc0c9379d31162454fedcc9d",
50 | "zh:10200a37d5d80cec8fc3511c404cdc780bf0201ca97ed676767d3d9cb8fa8ee3",
51 | "zh:26008fa81883a22904af0647691c8528bf7fdfad6f3b7c19750b9bae3e5bcda8",
52 | "zh:2bb55b3c85208f657554bbc0125c48697d398a13290092ed2168bc25706ec4fc",
53 | "zh:31e4a25daef71d75bd3b34c1d79ff5db89ee3e5379e6d48bcd3d41d708d97e4c",
54 | "zh:39f22c5ceb87509c534402f88d37f0c20f26baa45bdae859108417bc48ce8af1",
55 | "zh:3ed7e5e8a33544c34dfdcbddd3a84fd942811956ed90fdb2dcefe765443c5b68",
56 | "zh:5c068a620da7b96d56b23e6320fb2229eb3d799d0e8db8ad1a9fe6f057c6b44b",
57 | "zh:716f335a415614e99bf4a57052c28f18c70c0bbc320c824ffede99dec8eb26ce",
58 | "zh:795062cfb66b030d0e485fddcfd3e7d1241dfb24adbe4096141a3eaef3e07d6f",
59 | "zh:c46b6fe91837a00617897b43ef71c2a11187b2ab14786daf0b10946e3c53cc54",
60 | "zh:c8ba76e121b61fd325c698c730b15bc0fdeaf694a4ee8a5e18bc76687d7acbf7",
61 | "zh:e21634db387c676c30160f856a3c6d9b4b1c2627eedc2d4dc87d05e6bfca91ae",
62 | ]
63 | }
64 |
65 | provider "registry.terraform.io/telmate/proxmox" {
66 | version = "2.9.14"
67 | constraints = "~> 2.9.14"
68 | hashes = [
69 | "h1:asZa5VKbWeCpLNv1JAutt5CdD27HaGFjxxcr6mvn8Ps=",
70 | "zh:0d049d33f705e5b814d30028770c084151218439424e99684ce31d7e26a720b5",
71 | "zh:20b1c64ed56d81de95f3f37b82b45b4654c0de26670c0e87a474c5cce13cd015",
72 | "zh:2946058abd1d8e50e475b9ec39781eb02576b40dbd80f4653fade4493a4514c6",
73 | "zh:29e50a25c456f040ce072f23ac57b5b82ebd3b916ca5ae6688332b5ec62adc4a",
74 | "zh:3612932306ce5f08db94868f526cbb8c56d0d3c6ebe1c11a83f92bbf94354296",
75 | "zh:42d1699b0abebaac82ea5a19f4393541d8bb2741bde204a8ac1028cdc29d1b14",
76 | "zh:5ffd5dc567262eb8aafdf2f6eac63f7f21361da9c5d75a3c36b479638a0001b0",
77 | "zh:6692ef323e3b89de99934ad731f6a1850525bf8142916ae28ea4e4048d73a787",
78 | "zh:a5afc98e9a4038516bb58e788cb77dea67a60dce780dfcd206d7373c5a56b776",
79 | "zh:bf902cded709d84fa27fbf91b589c241f2238a6c4924e4e479eebd74320b93a5",
80 | "zh:cab0e1e72c9cebcf669fc6f35ec28cb8ab2dffb0237afc8860aa40d23bf8a49f",
81 | "zh:e523b99a48beec83d9bc04b2d336266044f9f53514cefb652fe6768611847196",
82 | "zh:f593915e8a24829d322d2eaeedcb153328cf9042f0d84f66040dde1be70ede04",
83 | "zh:fba1aff541133e2129dfda0160369635ab48503d5c44b8407ce5922ecc15d0bd",
84 | ]
85 | }
86 |
--------------------------------------------------------------------------------
/terraform/auth.tf:
--------------------------------------------------------------------------------
1 | provider "proxmox" {
2 | pm_api_url = var.proxmox_host
3 | pm_api_token_id = var.proxmox_token_id
4 | pm_api_token_secret = var.proxmox_token_secret
5 | pm_tls_insecure = true
6 | pm_debug = true
7 | }
--------------------------------------------------------------------------------
/terraform/k8s.tf:
--------------------------------------------------------------------------------
1 | resource "local_sensitive_file" "talosconfig" {
2 | content = data.talos_client_configuration.cc.talos_config
3 | filename = "${path.module}/talosconfig"
4 | depends_on = [talos_machine_bootstrap.bootstrap]
5 | }
6 |
7 | resource "null_resource" "kubeconfig" {
8 | provisioner "local-exec" {
9 | command = "talosctl kubeconfig --force -n ${cidrhost(var.vpc_main_cidr, var.first_ip)} -e ${cidrhost(var.vpc_main_cidr, var.first_ip)} --talosconfig ${path.module}/talosconfig"
10 | }
11 | depends_on = [local_sensitive_file.talosconfig]
12 | }
13 |
14 | resource "null_resource" "kubeconfigapi" {
15 | provisioner "local-exec" {
16 | command = "kubectl --kubeconfig ~/.kube/config config set clusters.${var.cluster_name}.server https://${var.kubernetes["ipv4_vip"]}:6443"
17 | }
18 | depends_on = [null_resource.kubeconfig]
19 | }
--------------------------------------------------------------------------------
/terraform/master-nodes.tf:
--------------------------------------------------------------------------------
1 | resource "proxmox_vm_qemu" "controlplanes" {
2 | count = 3
3 | name = "master-${count.index}"
4 | target_node = var.target_node_name
5 | clone = var.proxmox_image
6 |
7 | agent = 0
8 | define_connection_info = false
9 | os_type = "cloud-init"
10 | qemu_os = "l26"
11 | ipconfig0 = "ip=${cidrhost(var.vpc_main_cidr, var.first_ip + "${count.index}")}/24,gw=${var.gateway}"
12 | cloudinit_cdrom_storage = var.proxmox_storage2
13 |
14 | onboot = false
15 | cpu = "host,flags=+aes"
16 | sockets = 1
17 | cores = 2
18 | memory = 4048
19 | scsihw = "virtio-scsi-pci"
20 |
21 | vga {
22 | memory = 0
23 | type = "serial0"
24 | }
25 | serial {
26 | id = 0
27 | type = "socket"
28 | }
29 |
30 | network {
31 | model = "virtio"
32 | bridge = "vmbr0"
33 | firewall = false
34 | }
35 |
36 | boot = "order=scsi0"
37 | disk {
38 | type = "scsi"
39 | storage = var.proxmox_storage2
40 | size = "32G"
41 | cache = "writethrough"
42 | ssd = 1
43 | backup = false
44 | }
45 |
46 | lifecycle {
47 | ignore_changes = [
48 | boot,
49 | network,
50 | desc,
51 | numa,
52 | agent,
53 | ipconfig0,
54 | ipconfig1,
55 | define_connection_info,
56 | ]
57 | }
58 | }
--------------------------------------------------------------------------------
/terraform/output.tf:
--------------------------------------------------------------------------------
1 | output "talosconfig" {
2 | value = data.talos_client_configuration.cc.talos_config
3 | sensitive = true
4 | }
5 |
6 | output "cp" {
7 | value = data.talos_machine_configuration.mc_1.machine_configuration
8 | sensitive = true
9 | }
10 |
11 | output "worker" {
12 | value = data.talos_machine_configuration.worker_1.machine_configuration
13 | sensitive = true
14 | }
--------------------------------------------------------------------------------
/terraform/talos.tf:
--------------------------------------------------------------------------------
1 | resource "talos_machine_secrets" "secrets" {}
2 |
3 | data "talos_machine_configuration" "mc_1" {
4 | cluster_name = var.cluster_name
5 | machine_type = "controlplane"
6 | cluster_endpoint = var.cluster_endpoint
7 | machine_secrets = talos_machine_secrets.secrets.machine_secrets
8 | kubernetes_version = var.k8s_version
9 | talos_version = var.talos_version
10 | docs = false
11 | examples = false
12 | config_patches = [
13 | templatefile("${path.module}/templates/controlplane.yaml.tpl",
14 | merge(var.kubernetes, {
15 | hostname = "master-0"
16 | ipv4_local = "${cidrhost(var.vpc_main_cidr, var.first_ip)}"
17 | identity = "${file(var.private_key_file_path)}"
18 | identitypub = "${file(var.public_key_file_path)}"
19 | knownhosts = var.known_hosts
20 | px_region = var.region
21 | px_node = var.target_node_name
22 | storageclass = var.proxmox_storage2
23 | storageclass-xfs = var.proxmox_storage1
24 | clusters = yamlencode({
25 | clusters = [
26 | {
27 | token_id = var.proxmox_token_id
28 | token_secret = var.proxmox_token_secret
29 | url = var.proxmox_host
30 | region = var.region
31 | },
32 | ]
33 | })
34 | pxcreds = yamlencode({
35 | clusters = {
36 | cluster-1 = {
37 | api_token_id = var.proxmox_token_id
38 | api_token_secret = var.proxmox_token_secret
39 | api_url = var.proxmox_host
40 | pool = var.pool
41 | }
42 | }
43 | })
44 | })
45 | )
46 | ]
47 | }
48 |
49 | data "talos_machine_configuration" "mc_2" {
50 | cluster_name = var.cluster_name
51 | machine_type = "controlplane"
52 | cluster_endpoint = var.cluster_endpoint
53 | machine_secrets = talos_machine_secrets.secrets.machine_secrets
54 | kubernetes_version = var.k8s_version
55 | talos_version = var.talos_version
56 | docs = false
57 | examples = false
58 | config_patches = [
59 | templatefile("${path.module}/templates/controlplane.yaml.tpl",
60 | merge(var.kubernetes, {
61 | hostname = "master-1"
62 | ipv4_local = "${cidrhost(var.vpc_main_cidr, var.first_ip + 1)}"
63 | identity = "${file(var.private_key_file_path)}"
64 | identitypub = "${file(var.public_key_file_path)}"
65 | knownhosts = var.known_hosts
66 | px_region = var.region
67 | px_node = var.target_node_name
68 | storageclass = var.proxmox_storage2
69 | storageclass-xfs = var.proxmox_storage1
70 | clusters = yamlencode({
71 | clusters = [
72 | {
73 | token_id = var.proxmox_token_id
74 | token_secret = var.proxmox_token_secret
75 | url = var.proxmox_host
76 | region = var.region
77 | },
78 | ]
79 | })
80 | pxcreds = yamlencode({
81 | clusters = {
82 | cluster-1 = {
83 | api_token_id = var.proxmox_token_id
84 | api_token_secret = var.proxmox_token_secret
85 | api_url = var.proxmox_host
86 | pool = var.pool
87 | }
88 | }
89 | })
90 | })
91 | )
92 | ]
93 | }
94 |
95 | data "talos_machine_configuration" "mc_3" {
96 | cluster_name = var.cluster_name
97 | machine_type = "controlplane"
98 | cluster_endpoint = var.cluster_endpoint
99 | machine_secrets = talos_machine_secrets.secrets.machine_secrets
100 | kubernetes_version = var.k8s_version
101 | talos_version = var.talos_version
102 | docs = false
103 | examples = false
104 | config_patches = [
105 | templatefile("${path.module}/templates/controlplane.yaml.tpl",
106 | merge(var.kubernetes, {
107 | hostname = "master-2"
108 | ipv4_local = "${cidrhost(var.vpc_main_cidr, var.first_ip + 2)}"
109 | identity = "${file(var.private_key_file_path)}"
110 | identitypub = "${file(var.public_key_file_path)}"
111 | knownhosts = var.known_hosts
112 | px_region = var.region
113 | px_node = var.target_node_name
114 | storageclass = var.proxmox_storage2
115 | storageclass-xfs = var.proxmox_storage1
116 | clusters = yamlencode({
117 | clusters = [
118 | {
119 | token_id = var.proxmox_token_id
120 | token_secret = var.proxmox_token_secret
121 | url = var.proxmox_host
122 | region = var.region
123 | },
124 | ]
125 | })
126 | pxcreds = yamlencode({
127 | clusters = {
128 | cluster-1 = {
129 | api_token_id = var.proxmox_token_id
130 | api_token_secret = var.proxmox_token_secret
131 | api_url = var.proxmox_host
132 | pool = var.pool
133 | }
134 | }
135 | })
136 | })
137 | )
138 | ]
139 | }
140 |
141 | data "talos_client_configuration" "cc" {
142 | cluster_name = var.cluster_name
143 | client_configuration = talos_machine_secrets.secrets.client_configuration
144 | nodes = [var.kubernetes["ipv4_vip"], "${cidrhost(var.vpc_main_cidr, var.first_ip)}"]
145 | endpoints = [var.kubernetes["ipv4_vip"], "${cidrhost(var.vpc_main_cidr, var.first_ip)}"]
146 | }
147 |
148 |
149 | resource "talos_machine_configuration_apply" "mc_apply_1" {
150 | depends_on = [
151 | proxmox_vm_qemu.controlplanes
152 | ]
153 | client_configuration = talos_machine_secrets.secrets.client_configuration
154 | machine_configuration_input = data.talos_machine_configuration.mc_1.machine_configuration
155 | node = cidrhost(var.vpc_main_cidr, var.first_ip)
156 | }
157 |
158 | resource "talos_machine_configuration_apply" "mc_apply_2" {
159 | depends_on = [
160 | proxmox_vm_qemu.controlplanes
161 | ]
162 | client_configuration = talos_machine_secrets.secrets.client_configuration
163 | machine_configuration_input = data.talos_machine_configuration.mc_2.machine_configuration
164 | node = cidrhost(var.vpc_main_cidr, var.first_ip + 1)
165 | }
166 |
167 | resource "talos_machine_configuration_apply" "mc_apply_3" {
168 | depends_on = [
169 | proxmox_vm_qemu.controlplanes
170 | ]
171 | client_configuration = talos_machine_secrets.secrets.client_configuration
172 | machine_configuration_input = data.talos_machine_configuration.mc_3.machine_configuration
173 | node = cidrhost(var.vpc_main_cidr, var.first_ip + 2)
174 | }
175 |
176 | resource "talos_machine_bootstrap" "bootstrap" {
177 | depends_on = [
178 | talos_machine_configuration_apply.mc_apply_1
179 | ]
180 | node = cidrhost(var.vpc_main_cidr, var.first_ip)
181 | client_configuration = talos_machine_secrets.secrets.client_configuration
182 | }
183 |
184 | data "talos_machine_configuration" "worker_1" {
185 | cluster_name = var.cluster_name
186 | machine_type = "worker"
187 | cluster_endpoint = var.cluster_endpoint
188 | machine_secrets = talos_machine_secrets.secrets.machine_secrets
189 | kubernetes_version = var.k8s_version
190 | talos_version = var.talos_version
191 | docs = false
192 | examples = false
193 | config_patches = [
194 | templatefile("${path.module}/templates/worker.yaml.tpl",
195 | merge(var.kubernetes, {
196 | hostname = "worker-0"
197 | ipv4_local = "${cidrhost(var.vpc_main_cidr, var.worker_first_ip)}"
198 | px_region = var.region
199 | px_node = var.target_node_name
200 | })
201 | )
202 | ]
203 | }
204 |
205 | data "talos_machine_configuration" "worker_2" {
206 | cluster_name = var.cluster_name
207 | machine_type = "worker"
208 | cluster_endpoint = var.cluster_endpoint
209 | machine_secrets = talos_machine_secrets.secrets.machine_secrets
210 | kubernetes_version = var.k8s_version
211 | talos_version = var.talos_version
212 | docs = false
213 | examples = false
214 | config_patches = [
215 | templatefile("${path.module}/templates/worker.yaml.tpl",
216 | merge(var.kubernetes, {
217 | hostname = "worker-1"
218 | ipv4_local = "${cidrhost(var.vpc_main_cidr, var.worker_first_ip + 1)}"
219 | px_region = var.region
220 | px_node = var.target_node_name
221 | })
222 | )
223 | ]
224 | }
225 |
226 | data "talos_machine_configuration" "worker_3" {
227 | cluster_name = var.cluster_name
228 | machine_type = "worker"
229 | cluster_endpoint = var.cluster_endpoint
230 | machine_secrets = talos_machine_secrets.secrets.machine_secrets
231 | kubernetes_version = var.k8s_version
232 | talos_version = var.talos_version
233 | docs = false
234 | examples = false
235 | config_patches = [
236 | templatefile("${path.module}/templates/worker.yaml.tpl",
237 | merge(var.kubernetes, {
238 | hostname = "worker-2"
239 | ipv4_local = "${cidrhost(var.vpc_main_cidr, var.worker_first_ip + 2)}"
240 | px_region = var.region
241 | px_node = var.target_node_name
242 | })
243 | )
244 | ]
245 | }
246 |
247 | resource "talos_machine_configuration_apply" "worker_apply_1" {
248 | depends_on = [
249 | proxmox_vm_qemu.workers
250 | ]
251 | client_configuration = talos_machine_secrets.secrets.client_configuration
252 | machine_configuration_input = data.talos_machine_configuration.worker_1.machine_configuration
253 | node = cidrhost(var.vpc_main_cidr, var.worker_first_ip)
254 | }
255 |
256 | resource "talos_machine_configuration_apply" "worker_apply_2" {
257 | depends_on = [
258 | proxmox_vm_qemu.workers
259 | ]
260 | client_configuration = talos_machine_secrets.secrets.client_configuration
261 | machine_configuration_input = data.talos_machine_configuration.worker_2.machine_configuration
262 | node = cidrhost(var.vpc_main_cidr, var.worker_first_ip + 1)
263 | }
264 |
265 | resource "talos_machine_configuration_apply" "worker_apply_3" {
266 | depends_on = [
267 | proxmox_vm_qemu.workers
268 | ]
269 | client_configuration = talos_machine_secrets.secrets.client_configuration
270 | machine_configuration_input = data.talos_machine_configuration.worker_3.machine_configuration
271 | node = cidrhost(var.vpc_main_cidr, var.worker_first_ip + 2)
272 | }
--------------------------------------------------------------------------------
/terraform/templates/controlplane.yaml.tpl:
--------------------------------------------------------------------------------
1 | machine:
2 | nodeLabels:
3 | node.cloudprovider.kubernetes.io/platform: proxmox
4 | topology.kubernetes.io/region: ${px_region}
5 | topology.kubernetes.io/zone: ${px_node}
6 | certSANs:
7 | - ${apiDomain}
8 | - ${ipv4_vip}
9 | - ${ipv4_local}
10 | kubelet:
11 | defaultRuntimeSeccompProfileEnabled: true # Enable container runtime default Seccomp profile.
12 | disableManifestsDirectory: true # The `disableManifestsDirectory` field configures the kubelet to get static pod manifests from the /etc/kubernetes/manifests directory.
13 | extraArgs:
14 | rotate-server-certificates: true
15 | clusterDNS:
16 | - 169.254.2.53
17 | - ${cidrhost(split(",",serviceSubnets)[0], 10)}
18 | network:
19 | hostname: "${hostname}"
20 | interfaces:
21 | - interface: eth0
22 | addresses:
23 | - ${ipv4_local}/24
24 | vip:
25 | ip: ${ipv4_vip}
26 | - interface: dummy0
27 | addresses:
28 | - 169.254.2.53/32
29 | extraHostEntries:
30 | - ip: 127.0.0.1
31 | aliases:
32 | - ${apiDomain}
33 | nameservers:
34 | - 1.1.1.1
35 | - 8.8.8.8
36 | kubespan:
37 | enabled: false
38 | install:
39 | disk: /dev/sda
40 | image: ghcr.io/siderolabs/installer:${talos-version}
41 | bootloader: true
42 | wipe: false
43 | sysctls:
44 | net.core.somaxconn: 65535
45 | net.core.netdev_max_backlog: 4096
46 | systemDiskEncryption:
47 | state:
48 | provider: luks2
49 | options:
50 | - no_read_workqueue
51 | - no_write_workqueue
52 | keys:
53 | - nodeID: {}
54 | slot: 0
55 | ephemeral:
56 | provider: luks2
57 | options:
58 | - no_read_workqueue
59 | - no_write_workqueue
60 | keys:
61 | - nodeID: {}
62 | slot: 0
63 | time:
64 | servers:
65 | - time.cloudflare.com
66 | # Features describe individual Talos features that can be switched on or off.
67 | features:
68 | rbac: true # Enable role-based access control (RBAC).
69 | stableHostname: true # Enable stable default hostname.
70 | apidCheckExtKeyUsage: true # Enable checks for extended key usage of client certificates in apid.
71 | kubernetesTalosAPIAccess:
72 | enabled: true
73 | allowedRoles:
74 | - os:reader
75 | allowedKubernetesNamespaces:
76 | - kube-system
77 | kernel:
78 | modules:
79 | - name: br_netfilter
80 | parameters:
81 | - nf_conntrack_max=131072
82 | registries:
83 | mirrors:
84 | docker.io:
85 | endpoints:
86 | - http://${registry-endpoint}/v2/proxy-docker.io
87 | overridePath: true
88 | ghcr.io:
89 | endpoints:
90 | - http://${registry-endpoint}/v2/proxy-ghcr.io
91 | overridePath: true
92 | gcr.io:
93 | endpoints:
94 | - http://${registry-endpoint}/v2/proxy-gcr.io
95 | overridePath: true
96 | registry.k8s.io:
97 | endpoints:
98 | - http://${registry-endpoint}/v2/proxy-registry.k8s.io
99 | overridePath: true
100 | quay.io:
101 | endpoints:
102 | - http://${registry-endpoint}/v2/proxy-quay.io
103 | overridePath: true
104 | cluster:
105 | controlPlane:
106 | endpoint: https://${apiDomain}:6443
107 | network:
108 | dnsDomain: ${domain}
109 | podSubnets: ${format("%#v",split(",",podSubnets))}
110 | serviceSubnets: ${format("%#v",split(",",serviceSubnets))}
111 | cni:
112 | name: custom
113 | urls:
114 | - https://raw.githubusercontent.com/kubebn/talos-proxmox-kaas/main/manifests/talos/cilium.yaml
115 | proxy:
116 | disabled: true
117 | etcd:
118 | extraArgs:
119 | listen-metrics-urls: http://0.0.0.0:2381
120 | inlineManifests:
121 | - name: fluxcd
122 | contents: |-
123 | apiVersion: v1
124 | kind: Namespace
125 | metadata:
126 | name: flux-system
127 | labels:
128 | app.kubernetes.io/instance: flux-system
129 | app.kubernetes.io/part-of: flux
130 | pod-security.kubernetes.io/warn: restricted
131 | pod-security.kubernetes.io/warn-version: latest
132 | - name: cilium
133 | contents: |-
134 | apiVersion: v1
135 | kind: Namespace
136 | metadata:
137 | name: cilium
138 | labels:
139 | pod-security.kubernetes.io/enforce: "privileged"
140 | - name: d8-system
141 | contents: |-
142 | apiVersion: v1
143 | kind: Namespace
144 | metadata:
145 | name: d8-system
146 | labels:
147 | pod-security.kubernetes.io/enforce: "privileged"
148 | - name: external-dns
149 | contents: |-
150 | apiVersion: v1
151 | kind: Namespace
152 | metadata:
153 | name: external-dns
154 | - name: kasten
155 | contents: |-
156 | apiVersion: v1
157 | kind: Namespace
158 | metadata:
159 | name: kasten-io
160 | - name: cert-manager
161 | contents: |-
162 | apiVersion: v1
163 | kind: Namespace
164 | metadata:
165 | name: cert-manager
166 | - name: ingress-nginx
167 | contents: |-
168 | apiVersion: v1
169 | kind: Namespace
170 | metadata:
171 | name: ingress-nginx
172 | - name: flux-system-secret
173 | contents: |-
174 | apiVersion: v1
175 | kind: Secret
176 | type: Opaque
177 | metadata:
178 | name: github-creds
179 | namespace: flux-system
180 | data:
181 | identity: ${base64encode(identity)}
182 | identity.pub: ${base64encode(identitypub)}
183 | known_hosts: ${base64encode(knownhosts)}
184 | - name: proxmox-cloud-controller-manager
185 | contents: |-
186 | apiVersion: v1
187 | kind: Secret
188 | type: Opaque
189 | metadata:
190 | name: proxmox-cloud-controller-manager
191 | namespace: kube-system
192 | data:
193 | config.yaml: ${base64encode(clusters)}
194 | - name: proxmox-csi-plugin
195 | contents: |-
196 | apiVersion: v1
197 | kind: Secret
198 | type: Opaque
199 | metadata:
200 | name: proxmox-csi-plugin
201 | namespace: csi-proxmox
202 | data:
203 | config.yaml: ${base64encode(clusters)}
204 | - name: proxmox-operator-creds
205 | contents: |-
206 | apiVersion: v1
207 | kind: Secret
208 | type: Opaque
209 | metadata:
210 | name: proxmox-operator-creds
211 | namespace: kube-system
212 | data:
213 | config.yaml: ${base64encode(pxcreds)}
214 | - name: metallb-addresspool
215 | contents: |-
216 | apiVersion: metallb.io/v1beta1
217 | kind: IPAddressPool
218 | metadata:
219 | name: first-pool
220 | namespace: metallb-system
221 | spec:
222 | addresses:
223 | - ${metallb_l2_addressrange}
224 | - name: metallb-l2
225 | contents: |-
226 | apiVersion: metallb.io/v1beta1
227 | kind: L2Advertisement
228 | metadata:
229 | name: layer2
230 | namespace: metallb-system
231 | spec:
232 | ipAddressPools:
233 | - first-pool
234 | - name: flux-vars
235 | contents: |-
236 | apiVersion: v1
237 | kind: ConfigMap
238 | metadata:
239 | namespace: flux-system
240 | name: cluster-settings
241 | data:
242 | CACHE_REGISTRY: ${registry-endpoint}
243 | SIDERO_ENDPOINT: ${sidero-endpoint}
244 | STORAGE_CLASS: ${storageclass}
245 | STORAGE_CLASS_XFS: ${storageclass-xfs}
246 | CLUSTER_0_VIP: ${cluster-0-vip}
247 | externalCloudProvider:
248 | enabled: true
249 | manifests:
250 | - https://raw.githubusercontent.com/kubebn/talos-proxmox-kaas/main/manifests/talos/coredns-local.yaml
251 | - https://raw.githubusercontent.com/kubebn/talos-proxmox-kaas/main/manifests/talos/metallb-native.yaml
252 | - https://raw.githubusercontent.com/kubebn/talos-proxmox-kaas/main/manifests/talos/metrics-server.yaml
253 | - https://raw.githubusercontent.com/kubebn/talos-proxmox-kaas/main/manifests/talos/fluxcd.yaml
254 | - https://raw.githubusercontent.com/kubebn/talos-proxmox-kaas/main/manifests/talos/fluxcd-install.yaml
255 | - https://raw.githubusercontent.com/sergelogvinov/terraform-talos/main/_deployments/vars/talos-cloud-controller-manager-result.yaml
256 | - https://raw.githubusercontent.com/sergelogvinov/proxmox-cloud-controller-manager/main/docs/deploy/cloud-controller-manager-talos.yml
257 | - https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.64.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml
258 | - https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.64.1/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml
259 | - https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.64.1/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml
260 | - https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.64.1/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml
261 | - https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.64.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
262 | - https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.64.1/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml
263 | - https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.64.1/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml
264 | - https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.64.1/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml
--------------------------------------------------------------------------------
/terraform/templates/worker.yaml.tpl:
--------------------------------------------------------------------------------
1 | machine:
2 | nodeLabels:
3 | node.cloudprovider.kubernetes.io/platform: proxmox
4 | topology.kubernetes.io/region: ${px_region}
5 | topology.kubernetes.io/zone: ${px_node}
6 | kubelet:
7 | defaultRuntimeSeccompProfileEnabled: true # Enable container runtime default Seccomp profile.
8 | disableManifestsDirectory: true # The `disableManifestsDirectory` field configures the kubelet to get static pod manifests from the /etc/kubernetes/manifests directory.
9 | extraArgs:
10 | cloud-provider: external
11 | rotate-server-certificates: true
12 | node-labels: "project.io/node-pool=worker"
13 | clusterDNS:
14 | - 169.254.2.53
15 | - ${cidrhost(split(",",serviceSubnets)[0], 10)}
16 | network:
17 | hostname: "${hostname}"
18 | interfaces:
19 | - interface: eth0
20 | addresses:
21 | - ${ipv4_local}/24
22 | - interface: dummy0
23 | addresses:
24 | - 169.254.2.53/32
25 | extraHostEntries:
26 | - ip: ${ipv4_vip}
27 | aliases:
28 | - ${apiDomain}
29 | nameservers:
30 | - 1.1.1.1
31 | - 8.8.8.8
32 | kubespan:
33 | enabled: false
34 | install:
35 | disk: /dev/sda
36 | image: ghcr.io/siderolabs/installer:${talos-version}
37 | bootloader: true
38 | wipe: false
39 | sysctls:
40 | net.core.somaxconn: 65535
41 | net.core.netdev_max_backlog: 4096
42 | systemDiskEncryption:
43 | state:
44 | provider: luks2
45 | options:
46 | - no_read_workqueue
47 | - no_write_workqueue
48 | keys:
49 | - nodeID: {}
50 | slot: 0
51 | ephemeral:
52 | provider: luks2
53 | options:
54 | - no_read_workqueue
55 | - no_write_workqueue
56 | keys:
57 | - nodeID: {}
58 | slot: 0
59 | time:
60 | servers:
61 | - time.cloudflare.com
62 | # Features describe individual Talos features that can be switched on or off.
63 | features:
64 | rbac: true # Enable role-based access control (RBAC).
65 | stableHostname: true # Enable stable default hostname.
66 | apidCheckExtKeyUsage: true # Enable checks for extended key usage of client certificates in apid.
67 | kernel:
68 | modules:
69 | - name: br_netfilter
70 | parameters:
71 | - nf_conntrack_max=131072
72 | registries:
73 | mirrors:
74 | docker.io:
75 | endpoints:
76 | - http://${registry-endpoint}/v2/proxy-docker.io
77 | overridePath: true
78 | ghcr.io:
79 | endpoints:
80 | - http://${registry-endpoint}/v2/proxy-ghcr.io
81 | overridePath: true
82 | gcr.io:
83 | endpoints:
84 | - http://${registry-endpoint}/v2/proxy-gcr.io
85 | overridePath: true
86 | registry.k8s.io:
87 | endpoints:
88 | - http://${registry-endpoint}/v2/proxy-registry.k8s.io
89 | overridePath: true
90 | quay.io:
91 | endpoints:
92 | - http://${registry-endpoint}/v2/proxy-quay.io
93 | overridePath: true
94 | cluster:
95 | controlPlane:
96 | endpoint: https://${apiDomain}:6443
97 | network:
98 | dnsDomain: ${domain}
99 | podSubnets: ${format("%#v",split(",",podSubnets))}
100 | serviceSubnets: ${format("%#v",split(",",serviceSubnets))}
101 | proxy:
102 | disabled: true
--------------------------------------------------------------------------------
/terraform/terraform.tfvars:
--------------------------------------------------------------------------------
1 | vpc_main_cidr = "10.1.1.0/24" # nodes subnet
2 | gateway = "10.1.1.1" # subnet gateway
3 | first_ip = "5" # first ip address of the master-1 node - 10.1.1.5
4 | worker_first_ip = "8" # first ip address of the worker-1 node - 10.1.1.8
5 | proxmox_storage1 = "vms" # proxmox storage lvm 1
6 | proxmox_storage2 = "vms2" # proxmox storage lvm 2
7 | k8s_version = "v1.27.1" # k8s version
8 | proxmox_image = "talos" # talos image created by packer
9 | talos_version = "v1.4" # talos version for machineconfig gen
10 | cluster_endpoint = "https://10.1.1.20:6443" # cluster endpoint to fetch via talosctl
11 | region = "cluster-1" # proxmox cluster name
12 | pool = "prod" # proxmox pool for vms
13 | private_key_file_path = "~/.ssh/id_rsa" # fluxcd git creds for ssh
14 | public_key_file_path = "~/.ssh/id_rsa.pub" # fluxcd git creds for ssh
15 | known_hosts = "github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg="
16 |
17 | kubernetes = {
18 | podSubnets = "10.244.0.0/16" # pod subnet
19 | serviceSubnets = "10.96.0.0/12" # svc subnet
20 | domain = "cluster.local" # cluster local kube-dns svc.cluster.local
21 | ipv4_vip = "10.1.1.20" # vip ip address
22 | apiDomain = "api.cluster.local" # cluster endpoint
23 | talos-version = "v1.4.1" # talos installer version
24 | metallb_l2_addressrange = "10.1.1.30-10.1.1.35" # metallb L2 configuration ip range
25 | registry-endpoint = "reg.weecodelab.nl" # set registry url for cache image pull
26 | # FLUX ConfigMap settings
27 | sidero-endpoint = "10.1.1.30"
28 | cluster-0-vip = "10.1.1.40"
29 | }
--------------------------------------------------------------------------------
/terraform/variables.tf:
--------------------------------------------------------------------------------
1 | variable "kubernetes" {
2 | type = map(string)
3 | default = {
4 | hostname = ""
5 | podSubnets = "10.244.0.0/16"
6 | serviceSubnets = "10.96.0.0/12"
7 | domain = "cluster.local"
8 | apiDomain = ""
9 | ipv4_local = ""
10 | ipv4_vip = ""
11 | talos-version = ""
12 | metallb_l2_addressrange = ""
13 | registry-endpoint = ""
14 | identity = ""
15 | identitypub = ""
16 | knownhosts = ""
17 | px_region = ""
18 | px_node = ""
19 | sidero-endpoint = ""
20 | storageclass = ""
21 | storageclass-xfs = ""
22 | cluster-0-vip = ""
23 | }
24 | }
25 |
26 | variable "cluster_name" {
27 | description = "A name to provide for the Talos cluster"
28 | type = string
29 | default = "admin"
30 | }
31 |
32 | variable "region" {
33 | description = "A name to provide for the Talos cluster"
34 | type = string
35 | default = "cluster-1"
36 | }
37 |
38 | variable "pool" {
39 | description = "A name to provide for the Talos cluster"
40 | type = string
41 | default = "prod"
42 | }
43 |
44 | variable "cluster_endpoint" {
45 | description = "A name to provide for the Talos cluster"
46 | type = string
47 | default = "https://api.domain.local:6443"
48 | }
49 |
50 | variable "talos_version" {
51 | description = "A name to provide for the Talos cluster"
52 | type = string
53 | default = "v1.4.0"
54 | }
55 |
56 | variable "k8s_version" {
57 | description = "A name to provide for the Talos cluster"
58 | type = string
59 | default = "v1.27.1"
60 | }
61 |
62 | variable "proxmox_host" {
63 | description = "Proxmox host"
64 | type = string
65 | default = "192.168.1.1"
66 | }
67 |
68 | variable "proxmox_image" {
69 | description = "Proxmox source image name"
70 | type = string
71 | default = "talos"
72 | }
73 |
74 | variable "proxmox_storage1" {
75 | description = "Proxmox storage name"
76 | type = string
77 | }
78 |
79 | variable "proxmox_storage2" {
80 | description = "Proxmox storage name"
81 | type = string
82 | }
83 |
84 | variable "proxmox_token_id" {
85 | description = "Proxmox token id"
86 | type = string
87 | }
88 |
89 | variable "proxmox_token_secret" {
90 | description = "Proxmox token secret"
91 | type = string
92 | }
93 |
94 | variable "first_ip" {
95 | type = string
96 | }
97 | variable "worker_first_ip" {
98 | type = string
99 | }
100 |
101 | variable "vpc_main_cidr" {
102 | description = "Local proxmox subnet"
103 | type = string
104 | default = "10.1.1.0/24"
105 | }
106 |
107 | variable "gateway" {
108 | type = string
109 | default = "10.1.1.1"
110 | }
111 |
112 | variable "target_node_name" {
113 | description = "Proxmox node name"
114 | type = string
115 | }
116 |
117 | variable "private_key_file_path" {
118 | type = string
119 | }
120 |
121 | variable "public_key_file_path" {
122 | type = string
123 | }
124 |
125 | variable "known_hosts" {
126 | type = string
127 | }
--------------------------------------------------------------------------------
/terraform/versions.tf:
--------------------------------------------------------------------------------
1 | # TF setup
2 |
3 | terraform {
4 | required_providers {
5 | proxmox = {
6 | source = "Telmate/proxmox"
7 | version = "~> 2.9.14"
8 | }
9 | talos = {
10 | source = "siderolabs/talos"
11 | version = "0.2.0"
12 | }
13 | }
14 | }
--------------------------------------------------------------------------------
/terraform/worker-nodes.tf:
--------------------------------------------------------------------------------
1 | resource "proxmox_vm_qemu" "workers" {
2 | count = 3
3 | name = "worker-${count.index}"
4 | target_node = var.target_node_name
5 | clone = var.proxmox_image
6 |
7 | agent = 0
8 | define_connection_info = false
9 | os_type = "cloud-init"
10 | qemu_os = "l26"
11 | ipconfig0 = "ip=${cidrhost(var.vpc_main_cidr, var.worker_first_ip + "${count.index}")}/24,gw=${var.gateway}"
12 | cloudinit_cdrom_storage = var.proxmox_storage1
13 |
14 | onboot = false
15 | cpu = "host,flags=+aes"
16 | sockets = 1
17 | cores = 4
18 | memory = 8048
19 | scsihw = "virtio-scsi-pci"
20 |
21 | vga {
22 | memory = 0
23 | type = "serial0"
24 | }
25 | serial {
26 | id = 0
27 | type = "socket"
28 | }
29 |
30 | network {
31 | model = "virtio"
32 | bridge = "vmbr0"
33 | firewall = false
34 | }
35 |
36 | boot = "order=scsi0"
37 | disk {
38 | type = "scsi"
39 | storage = var.proxmox_storage1
40 | size = "40G"
41 | cache = "writethrough"
42 | ssd = 1
43 | backup = false
44 | }
45 |
46 | lifecycle {
47 | ignore_changes = [
48 | boot,
49 | network,
50 | desc,
51 | numa,
52 | agent,
53 | ipconfig0,
54 | ipconfig1,
55 | define_connection_info,
56 | ]
57 | }
58 | }
--------------------------------------------------------------------------------