├── .github ├── renovate.json └── workflows │ └── pull-request.yaml ├── .gitignore ├── LICENSE ├── Makefile ├── README.md ├── examples ├── omni │ ├── README.md │ ├── apps │ │ ├── argocd │ │ │ └── argocd │ │ │ │ ├── bootstrap-app-set.yaml │ │ │ │ ├── config-cmd-params.yaml │ │ │ │ ├── config.yaml │ │ │ │ ├── kustomization.yaml │ │ │ │ ├── namespace.yaml │ │ │ │ ├── params.yaml │ │ │ │ └── service.yaml │ │ ├── kube-system │ │ │ └── cilium │ │ │ │ ├── Chart.yaml │ │ │ │ └── values.yaml │ │ ├── monitoring │ │ │ ├── kube-prometheus-stack │ │ │ │ ├── Chart.yaml │ │ │ │ └── values.yaml │ │ │ └── namespace │ │ │ │ └── namespace.yaml │ │ └── rook-ceph │ │ │ ├── cluster │ │ │ └── Chart.yaml │ │ │ ├── namespace │ │ │ └── namespace.yaml │ │ │ └── operator │ │ │ └── Chart.yaml │ └── infra │ │ ├── cluster-template.yaml │ │ └── patches │ │ ├── argocd.yaml │ │ ├── cilium.yaml │ │ ├── cni.yaml │ │ └── monitoring.yaml ├── pulumi │ ├── azure │ │ ├── Pulumi.yaml │ │ ├── compute.go │ │ ├── go.mod │ │ ├── go.sum │ │ ├── main.go │ │ ├── network.go │ │ ├── storage.go │ │ └── talos.go │ ├── equinix-metal │ │ ├── Pulumi.yaml │ │ ├── README.md │ │ ├── go.mod │ │ ├── go.sum │ │ ├── main.go │ │ └── patches │ │ │ ├── common.yaml │ │ │ ├── ingress.yaml │ │ │ └── worker.yaml │ └── gcp │ │ ├── Pulumi.yaml │ │ ├── compute.go │ │ ├── go.mod │ │ ├── go.sum │ │ ├── main.go │ │ ├── network.go │ │ ├── storage.go │ │ └── talos.go └── terraform │ ├── advanced │ ├── .terraform.lock.hcl │ ├── README.md │ ├── main.tf │ ├── modules │ │ └── bootstrap_token │ │ │ ├── .terraform.lock.hcl │ │ │ ├── README.md │ │ │ ├── main.tf │ │ │ ├── outputs.tf │ │ │ └── versions.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf │ ├── aws │ ├── .terraform.lock.hcl │ ├── README.md │ ├── main.tf │ ├── manifests │ │ └── ccm.yaml │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf │ ├── azure │ ├── .terraform.lock.hcl │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf │ ├── basic │ ├── .terraform.lock.hcl │ ├── README.md │ ├── files │ │ └── cp-scheduling.yaml │ ├── main.tf │ ├── outputs.tf │ ├── templates │ │ └── install-disk-and-hostname.yaml.tmpl │ ├── variables.tf │ └── versions.tf │ ├── digitalocean │ ├── .terraform.lock.hcl │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf │ ├── equinix-metal │ ├── .terraform.lock.hcl │ ├── README.md │ ├── equinix-arm64.yaml │ ├── main.tf │ ├── outputs.tf │ ├── templates │ │ ├── installer.yaml.tmpl │ │ └── vip.yaml.tmpl │ ├── variables.tf │ └── versions.tf │ ├── gcp │ ├── .terraform.lock.hcl │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf │ ├── hcloud │ ├── README.md │ ├── packer │ │ └── hcloud_talosimage.pkr.hcl │ └── terraform │ │ ├── .terraform.lock.hcl │ │ ├── README.md │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── templates │ │ ├── controlplanepatch.yaml.tmpl │ │ └── workerpatch.yaml.tmpl │ │ ├── three_workers.tfvars │ │ ├── variables.tf │ │ └── versions.tf │ ├── oci │ ├── README.md │ ├── data.tf │ ├── iam.tf │ ├── image.tf │ ├── instances.tf │ ├── locals.tf │ ├── network.tf │ ├── outputs.tf │ ├── storage.tf │ ├── talos.tf │ ├── variables.tf │ └── versions.tf │ └── vultr │ ├── .terraform.lock.hcl │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── variables.tf │ └── versions.tf ├── go.work └── hack ├── backend-aws.tf └── backend.tf /.github/renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | ":dependencyDashboard", 5 | ":gitSignOff", 6 | ":semanticCommitScopeDisabled", 7 | "schedule:earlyMondays" 8 | ], 9 | "prHeader": "Update Request | Renovate Bot", 10 | "regexManagers": [ 11 | { 12 | "fileMatch": [ 13 | "Makefile" 14 | ], 15 | "matchStrings": [ 16 | "# renovate: datasource=(?.*?)(?:\\s+extractVersion=(?.+?))?\\s+depName=(?.+?)\\s.*_VERSION\\s+\\?=\\s+(?.+)" 17 | ], 18 | "versioningTemplate": "{{#if versioning}}{{versioning}}{{else}}semver{{/if}}" 19 | }, 20 | { 21 | "fileMatch": [ 22 | "Makefile" 23 | ], 24 | "matchStrings": [ 25 | "# renovate: depName=(?.+?)\\s.*_VERSION\\s+:=\\s+(?.+)" 26 | ], 27 | "datasourceTemplate": "helm", 28 | "versioningTemplate": "semver", 29 | "registryUrlTemplate": "https://kubernetes.github.io/cloud-provider-aws" 30 | } 31 | ], 32 | "packageRules": [ 33 | { 34 | "matchPackagePatterns": [ 35 | "*" 36 | ], 37 | "matchDatasources": [ 38 | "helm" 39 | ], 40 | "groupName": "helm charts" 41 | }, 42 | { 43 | "matchPackagePatterns": [ 44 | "*" 45 | ], 46 | "matchDatasources": [ 47 | "git-refs", 48 | "git-tags", 49 | "github-tags", 50 | "github-releases" 51 | ], 52 | "groupName": "releases" 53 | } 54 | ] 55 | } 56 | -------------------------------------------------------------------------------- /.github/workflows/pull-request.yaml: -------------------------------------------------------------------------------- 1 | name: check-dirty 2 | on: 3 | pull_request: 4 | jobs: 5 | run-tests: 6 | runs-on: ubuntu-latest 7 | steps: 8 | - name: Checkout 9 | uses: actions/checkout@v3 10 | - name: Set up Helm 11 | uses: azure/setup-helm@v3 12 | - name: Set up Terraform 13 | uses: hashicorp/setup-terraform@v2 14 | with: 15 | terraform_wrapper: false 16 | - name: Setup TF docs 17 | uses: jaxxstorm/action-install-gh-release@v1.10.0 18 | with: 19 | repo: terraform-docs/terraform-docs 20 | - name: dirty-check 21 | run: | 22 | make check-dirty 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Pulumi 2 | **/Pulumi.*.yaml 3 | 4 | ### Packer ### 5 | # Cache objects 6 | packer_cache/ 7 | 8 | # Crash log 9 | crash.log 10 | 11 | # https://www.packer.io/guides/hcl/variables 12 | # Exclude all .pkrvars.hcl files, which are likely to contain sensitive data, 13 | # such as password, private keys, and other secrets. These should not be part of 14 | # version control as they are data points which are potentially sensitive and 15 | # subject to change depending on the environment. 16 | # 17 | *.pkrvars.hcl 18 | 19 | # For built boxes 20 | *.box 21 | 22 | ### Packer Patch ### 23 | # ignore temporary output files 24 | output-*/ 25 | 26 | ### Terraform ### 27 | # Local .terraform directories 28 | **/.terraform/* 29 | 30 | # .tfstate files 31 | *.tfstate 32 | *.tfstate.* 33 | 34 | # Crash log files 35 | crash.*.log 36 | 37 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as 38 | # password, private keys, and other secrets. These should not be part of version 39 | # control as they are data points which are potentially sensitive and subject 40 | # to change depending on the environment. 41 | #*.tfvars 42 | #*.tfvars.json 43 | 44 | # Ignore override files as they are usually used to override resources locally and so 45 | # are not checked in 46 | override.tf 47 | override.tf.json 48 | *_override.tf 49 | *_override.tf.json 50 | 51 | # Include override files you do wish to add to version control using negated pattern 52 | # !example_override.tf 53 | 54 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 55 | # example: *tfplan* 56 | 57 | # Ignore CLI configuration files 58 | .terraformrc 59 | terraform.rc 60 | 61 | # ignore sensitive terraform outputs 62 | talosconfig 63 | kubeconfig 64 | vars*.json 65 | patch*.yaml 66 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | TAG ?= $(shell git describe --tag --always --dirty) 2 | 3 | TF_PROJECTS := $(shell find examples/terraform/ -name '.terraform' -prune -o -name 'main.tf' -exec dirname {} \;) 4 | 5 | # renovate: datasource=helm depName=aws-cloud-controller-manager 6 | AWS_CCM_HELM_CHART_VERSION ?= 0.0.8 7 | # renovate: datasource=github-releases depName=kubernetes/cloud-provider-aws 8 | AWS_CCM_VERSION ?= v1.32.1 9 | 10 | .PHONY: fmt 11 | fmt: 12 | terraform fmt -recursive 13 | 14 | .PHONY: generate 15 | generate: aws-ccm tfdocs 16 | 17 | tfdocs: 18 | $(foreach project,$(TF_PROJECTS),terraform-docs markdown --output-file README.md --output-mode inject $(project);) 19 | 20 | upgrade-providers: 21 | $(foreach project,$(TF_PROJECTS),terraform -chdir=$(project) init -upgrade;) 22 | 23 | 24 | .PHONY: check-dirty 25 | check-dirty: fmt generate ## Verifies that source tree is not dirty 26 | @if test -n "`git status --porcelain`"; then echo "Source tree is dirty"; git status; exit 1 ; fi 27 | 28 | aws-ccm: 29 | helm repo add aws-cloud-controller-manager https://kubernetes.github.io/cloud-provider-aws 30 | helm repo update 31 | helm template --version $(AWS_CCM_HELM_CHART_VERSION) aws-cloud-controller-manager aws-cloud-controller-manager/aws-cloud-controller-manager --set args="{--v=2,--cloud-provider=aws,--configure-cloud-routes=false}" --set image.tag=$(AWS_CCM_VERSION) > examples/terraform/aws/manifests/ccm.yaml 32 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # contrib 2 | 3 | This repo contains example setup of Talos/Sidero on different environments. 4 | 5 | The examples provided are maintained on a best effort basis and please file issues or create PR's to fix. 6 | 7 | ## Pulumi Examples 8 | 9 | - [azure](./examples/pulumi/azure) 10 | - [equinix metal](./examples/pulumi/equinix-metal) 11 | - [google cloud](./examples/pulumi/gcp) 12 | 13 | ## Terraform Examples 14 | 15 | - [hcloud](./examples/terraform/hcloud) 16 | - [vultr](./examples/terraform/vultr) 17 | - [AWS](./examples/terraform/aws) 18 | - [Azure](./examples/terraform/azure) 19 | 20 | ## Omni Example 21 | 22 | - [Omni](./examples/omni) 23 | -------------------------------------------------------------------------------- /examples/omni/README.md: -------------------------------------------------------------------------------- 1 | # Siderolabs Omni Example 2 | 3 | This example shows how to manage a Talos Kubernetes cluster with Sidero Labs' Omni. 4 | It deploys a Talos Kubernetes cluster using Omni, with the following tooling: 5 | 6 | * Cilium for CNI & Hubble UI for observability 7 | * ArgoCD for application management 8 | * Rook Ceph for persistent volume management 9 | * Prometheus & Grafana for monitoring 10 | 11 | ## Prereqs 12 | 13 | An [Omni account](https://signup.siderolabs.io/), and some machines registered to it. 14 | How the machines are started and joined to the Omni instance are not covered in this README, but [documentation is available](https://omni.siderolabs.com/tutorials/getting_started/). 15 | With the default configuration, a minimum of 6 machines, 3 of which with additional block devices for persistent storage. 16 | 17 | This example uses [Machine Classes](https://omni.siderolabs.com/how-to-guides/create-a-machine-class) called `omni-contrib-controlplane` and `omni-contrib-workers`. 18 | How they are defined is entirely dependent on the infrastructure available, they would need to be configured on the Omni instance. 19 | 20 | Lastly, `omnictl` the Omni CLI tool would also be needed. 21 | See the [How-to](https://omni.siderolabs.com/how-to-guides/install-and-configure-omnictl) on how to obtain and configure it. 22 | 23 | ## Usage 24 | 25 | Once the required machines are registered to Omni and machine classes have been configured, simply run 26 | 27 | ```bash 28 | omnictl cluster template sync --file cluster-template.yaml 29 | ``` 30 | 31 | Omni will then being to allocate your machines, install Talos, and configure and bootstrap the cluster. 32 | 33 | This setup makes use of the [Omni Workload Proxy](https://omni.siderolabs.com/how-to-guides/expose-an-http-service-from-a-cluster) feature, 34 | which allows access to the HTTP front end services *without* the need of a separate external Ingress Controller or LoadBalancer. 35 | Additionally, it leverages Omni's built-in authentication to protect the services, even those services that don't support authentication themselves. 36 | 37 | ## Applications 38 | 39 | Applications are managed by ArgoCD, and are defined in the `apps` directory. 40 | The first subdirectory defines the namespace and the second being the application name. 41 | Applications can be made of Helm charts, Kustomize definitions, or just Kubernetes manifest files in YAML format. 42 | 43 | ## Extending 44 | 45 | 1. Commit the contents from the `omni` directory to a new repository 46 | 2. Configure ArgoCD to use that repository [bootstrap-app-set.yaml](apps/argocd/argocd/bootstrap-app-set.yaml) 47 | 3. Regenerate the ArgoCD bootstrap cluster manifest patch [argocd.yaml](infra/patches/argocd.yaml) (instructions can be found at the top of that file). 48 | 4. Commit and push these changes to a hosted git repository the Omni instance has access to. 49 | 5. Create a cluster with Omni as described above. 50 | -------------------------------------------------------------------------------- /examples/omni/apps/argocd/argocd/bootstrap-app-set.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: argoproj.io/v1alpha1 2 | kind: ApplicationSet 3 | metadata: 4 | name: bootstrap 5 | spec: 6 | generators: 7 | - matrix: 8 | generators: 9 | - list: 10 | elements: 11 | - repoURL: https://github.com/siderolabs/contrib.git 12 | revision: HEAD 13 | repoPath: examples/omni/apps/*/* 14 | - git: 15 | repoURL: '{{ repoURL }}' 16 | revision: '{{ revision }}' 17 | directories: 18 | - path: '{{ repoPath }}' 19 | syncPolicy: 20 | preserveResourcesOnDeletion: true 21 | template: 22 | metadata: 23 | name: '{{ path[3] }}-{{ path.basenameNormalized }}' 24 | namespace: argocd 25 | spec: 26 | project: default 27 | source: 28 | repoURL: '{{ repoURL }}' 29 | targetRevision: '{{ revision }}' 30 | path: '{{ path }}' 31 | destination: 32 | server: https://kubernetes.default.svc 33 | namespace: '{{ path[3] }}' 34 | syncPolicy: 35 | automated: 36 | prune: false 37 | syncOptions: 38 | - CreateNamespace=true 39 | - ServerSideApply=true 40 | - ApplyOutOfSyncOnly=true 41 | - RespectIgnoreDifferences=true 42 | - SkipDryRunOnMissingResource=true 43 | retry: 44 | limit: -1 # Infinite retries 45 | backoff: 46 | duration: 30s 47 | factor: 2 48 | maxDuration: 5m 49 | ignoreDifferences: 50 | # Cilium generated certs 51 | - name: cilium-ca 52 | kind: Secret 53 | namespace: kube-system 54 | jsonPointers: 55 | - /data/ca.crt 56 | - /data/ca.key 57 | - name: hubble-server-certs 58 | kind: Secret 59 | namespace: kube-system 60 | jsonPointers: 61 | - /data/ca.crt 62 | - /data/tls.crt 63 | - /data/tls.key 64 | - name: hubble-relay-client-certs 65 | kind: Secret 66 | namespace: kube-system 67 | jsonPointers: 68 | - /data/ca.crt 69 | - /data/tls.crt 70 | - /data/tls.key 71 | -------------------------------------------------------------------------------- /examples/omni/apps/argocd/argocd/config-cmd-params.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: argocd-cmd-params-cm 5 | data: 6 | # Use Omni Workload Proxying HTTP-only 7 | server.insecure: "true" 8 | server.disable.auth: "true" 9 | -------------------------------------------------------------------------------- /examples/omni/apps/argocd/argocd/config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: argocd-cm 5 | data: 6 | application.resourceTrackingMethod: annotation 7 | -------------------------------------------------------------------------------- /examples/omni/apps/argocd/argocd/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | namespace: argocd 5 | resources: 6 | - namespace.yaml 7 | - github.com/argoproj/argo-cd/manifests/cluster-install?ref=v2.9.3 8 | - bootstrap-app-set.yaml 9 | 10 | patches: 11 | - path: config.yaml 12 | - path: params.yaml 13 | - path: service.yaml 14 | -------------------------------------------------------------------------------- /examples/omni/apps/argocd/argocd/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: argocd 5 | -------------------------------------------------------------------------------- /examples/omni/apps/argocd/argocd/params.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: argocd-cmd-params-cm 5 | data: 6 | # Use Omni Workload Proxying HTTP-only 7 | server.insecure: "true" 8 | server.disable.auth: "true" 9 | server.x.frame.options: "" 10 | -------------------------------------------------------------------------------- /examples/omni/apps/argocd/argocd/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: argocd-server 5 | annotations: 6 | # Enable Omni Workload Proxying for this service 7 | omni-kube-service-exposer.sidero.dev/port: "50081" 8 | omni-kube-service-exposer.sidero.dev/label: ArgoCD 9 | -------------------------------------------------------------------------------- /examples/omni/apps/kube-system/cilium/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: cilium 3 | version: v1.14.5 4 | dependencies: 5 | - name: cilium 6 | version: v1.14.5 7 | repository: https://helm.cilium.io 8 | -------------------------------------------------------------------------------- /examples/omni/apps/kube-system/cilium/values.yaml: -------------------------------------------------------------------------------- 1 | cilium: 2 | kubeProxyReplacement: true 3 | ipam: 4 | mode: kubernetes 5 | securityContext: 6 | capabilities: 7 | ciliumAgent: 8 | - CHOWN 9 | - KILL 10 | - NET_ADMIN 11 | - NET_RAW 12 | - IPC_LOCK 13 | - SYS_ADMIN 14 | - SYS_RESOURCE 15 | - DAC_OVERRIDE 16 | - FOWNER 17 | - SETUID 18 | - SETGID 19 | cleanCiliumState: 20 | - NET_ADMIN 21 | - SYS_ADMIN 22 | - SYS_RESOURCE 23 | cgroup: 24 | hostRoot: /sys/fs/cgroup 25 | autoMount: 26 | enabled: false 27 | # Enable Cilium Ingress Controller 28 | ingressController: 29 | enabled: true 30 | # Use KubePrism to access cluster API 31 | k8sServiceHost: localhost 32 | k8sServicePort: 7445 33 | # Enable Hubble 34 | hubble: 35 | relay: 36 | enabled: true 37 | ui: 38 | enabled: true 39 | service: 40 | annotations: 41 | # Enable Omni Workload Proxying for this service 42 | omni-kube-service-exposer.sidero.dev/port: "50080" 43 | omni-kube-service-exposer.sidero.dev/label: Hubble 44 | -------------------------------------------------------------------------------- /examples/omni/apps/monitoring/kube-prometheus-stack/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: kube-prometheus-stack 3 | version: 56.0.0 4 | dependencies: 5 | - name: kube-prometheus-stack 6 | version: 56.0.0 7 | repository: https://prometheus-community.github.io/helm-charts 8 | -------------------------------------------------------------------------------- /examples/omni/apps/monitoring/kube-prometheus-stack/values.yaml: -------------------------------------------------------------------------------- 1 | kube-prometheus-stack: 2 | # NOTE: These must be set manually to the control plane IP addresses 3 | #kubeEtcd: 4 | # endpoints: 5 | # - x.y.z.a 6 | # - x.y.z.b 7 | # - x.y.z.c 8 | kubeControllerManager: 9 | service: 10 | selector: 11 | k8s-app: kube-controller-manager 12 | kubeScheduler: 13 | service: 14 | selector: 15 | k8s-app: kube-scheduler 16 | alertmanager: 17 | alertmanagerspec: 18 | storage: 19 | volumeClaimTemplate: 20 | spec: 21 | accessModes: 22 | - ReadWriteOnce 23 | resources: 24 | requests: 25 | storage: 10Gi 26 | prometheus: 27 | prometheusSpec: 28 | storageSpec: 29 | volumeClaimTemplate: 30 | spec: 31 | accessModes: 32 | - ReadWriteOnce 33 | resources: 34 | requests: 35 | storage: 150Gi 36 | grafana: 37 | grafana.ini: 38 | auth: 39 | disable_login_form: true 40 | disable_signout_menu: true 41 | auth.basic: 42 | enabled: false 43 | auth.anonymous: 44 | enabled: true 45 | # Allow Omni Workload Proxying for this service 46 | service: 47 | annotations: 48 | omni-kube-service-exposer.sidero.dev/port: "50082" 49 | omni-kube-service-exposer.sidero.dev/label: Grafana 50 | -------------------------------------------------------------------------------- /examples/omni/apps/monitoring/namespace/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: monitoring 5 | labels: 6 | pod-security.kubernetes.io/warn: privileged 7 | pod-security.kubernetes.io/enforce: privileged 8 | -------------------------------------------------------------------------------- /examples/omni/apps/rook-ceph/cluster/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: rook-ceph-cluster 3 | version: v1.13.2 4 | dependencies: 5 | - name: rook-ceph-cluster 6 | version: v1.13.2 7 | repository: https://charts.rook.io/release 8 | -------------------------------------------------------------------------------- /examples/omni/apps/rook-ceph/namespace/namespace.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: rook-ceph 5 | labels: 6 | pod-security.kubernetes.io/warn: privileged 7 | pod-security.kubernetes.io/enforce: privileged 8 | -------------------------------------------------------------------------------- /examples/omni/apps/rook-ceph/operator/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: rook-ceph 3 | version: v1.13.2 4 | dependencies: 5 | - name: rook-ceph 6 | version: v1.13.2 7 | repository: https://charts.rook.io/release 8 | -------------------------------------------------------------------------------- /examples/omni/infra/cluster-template.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | name: omni-contrib 3 | talos: 4 | version: v1.6.1 5 | kubernetes: 6 | version: 1.28.6 7 | features: 8 | enableWorkloadProxy: true 9 | patches: 10 | - name: cni 11 | file: patches/cni.yaml 12 | --- 13 | kind: ControlPlane 14 | machineClass: 15 | name: omni-contrib-controlplane 16 | size: 3 17 | patches: 18 | - name: cilium 19 | file: patches/cilium.yaml 20 | - name: argocd 21 | file: patches/argocd.yaml 22 | - name: monitoring 23 | file: patches/monitoring.yaml 24 | --- 25 | kind: Workers 26 | name: workers 27 | machineClass: 28 | name: omni-contrib-workers 29 | size: unlimited 30 | -------------------------------------------------------------------------------- /examples/omni/infra/patches/cni.yaml: -------------------------------------------------------------------------------- 1 | cluster: 2 | network: 3 | cni: 4 | name: none 5 | proxy: 6 | disabled: true 7 | -------------------------------------------------------------------------------- /examples/omni/infra/patches/monitoring.yaml: -------------------------------------------------------------------------------- 1 | cluster: 2 | apiServer: 3 | extraArgs: 4 | bind-address: 0.0.0.0 5 | controllerManager: 6 | extraArgs: 7 | bind-address: 0.0.0.0 8 | etcd: 9 | extraArgs: 10 | listen-metrics-urls: http://0.0.0.0:2381 11 | scheduler: 12 | extraArgs: 13 | bind-address: 0.0.0.0 14 | -------------------------------------------------------------------------------- /examples/pulumi/azure/Pulumi.yaml: -------------------------------------------------------------------------------- 1 | name: azure 2 | runtime: go 3 | description: Talos on Azure 4 | -------------------------------------------------------------------------------- /examples/pulumi/azure/compute.go: -------------------------------------------------------------------------------- 1 | // compute.go holds functions specific to Azure compute resources 2 | package main 3 | 4 | import ( 5 | "archive/tar" 6 | "compress/gzip" 7 | "fmt" 8 | "io" 9 | "net/http" 10 | "os" 11 | 12 | "github.com/pulumi/pulumi-azure/sdk/v5/go/azure/compute" 13 | "github.com/pulumi/pulumi-azure/sdk/v5/go/azure/storage" 14 | "github.com/pulumi/pulumi/sdk/v3/go/pulumi" 15 | ) 16 | 17 | func (ri *ResourceInfo) createImage(ctx *pulumi.Context) error { 18 | // TODO: be intelligent about whether we actually dl this 19 | err := downloadVHD() 20 | if err != nil { 21 | return err 22 | } 23 | 24 | blob, err := storage.NewBlob( 25 | ctx, 26 | "talos-"+TalosVersion+".vhd", 27 | &storage.BlobArgs{ 28 | StorageAccountName: ri.StorageAcct.Name, 29 | StorageContainerName: ri.StorageContainer.Name, 30 | Type: pulumi.String("Page"), 31 | Source: pulumi.NewFileAsset(LocalVHDName), 32 | Name: pulumi.String("talos-" + TalosVersion + ".vhd"), 33 | }, 34 | ) 35 | if err != nil { 36 | return err 37 | } 38 | 39 | img, err := compute.NewImage( 40 | ctx, 41 | "talos-"+TalosVersion, 42 | &compute.ImageArgs{ 43 | ResourceGroupName: ri.ResourceGroup.Name, 44 | OsDisk: &compute.ImageOsDiskArgs{ 45 | OsType: pulumi.String("Linux"), 46 | OsState: pulumi.String("Generalized"), 47 | BlobUri: blob.Url, 48 | SizeGb: pulumi.Int(10), 49 | }, 50 | Name: pulumi.String("talos-" + TalosVersion), 51 | }) 52 | if err != nil { 53 | return err 54 | } 55 | 56 | ri.Image = img 57 | 58 | return nil 59 | } 60 | 61 | func (ri *ResourceInfo) createCPVMs(ctx *pulumi.Context) error { 62 | for i := 0; i < ControlPlaneNodesCount; i++ { 63 | _, err := compute.NewVirtualMachine( 64 | ctx, 65 | fmt.Sprintf("%s-cp-%d", ClusterName, i), 66 | &compute.VirtualMachineArgs{ 67 | NetworkInterfaceIds: pulumi.StringArray{ 68 | ri.CPNics[fmt.Sprintf("%s-cp-nic-%d", ClusterName, i)].ID(), 69 | }, 70 | ResourceGroupName: ri.ResourceGroup.Name, 71 | VmSize: pulumi.String("Standard_DS1_v2"), 72 | StorageImageReference: &compute.VirtualMachineStorageImageReferenceArgs{ 73 | Id: ri.Image.ID(), 74 | }, 75 | StorageOsDisk: &compute.VirtualMachineStorageOsDiskArgs{ 76 | Name: pulumi.String(fmt.Sprintf("%s-cp-disk-%d", ClusterName, i)), 77 | Caching: pulumi.String("ReadWrite"), 78 | CreateOption: pulumi.String("FromImage"), 79 | ManagedDiskType: pulumi.String("Standard_LRS"), 80 | }, 81 | OsProfile: &compute.VirtualMachineOsProfileArgs{ 82 | ComputerName: pulumi.String(fmt.Sprintf("%s-cp-%d", ClusterName, i)), 83 | AdminUsername: pulumi.String("testadmin"), 84 | AdminPassword: pulumi.String("Password1234!"), 85 | CustomData: ri.TalosClusterConfig.ControlplaneConfig, 86 | }, 87 | OsProfileLinuxConfig: &compute.VirtualMachineOsProfileLinuxConfigArgs{ 88 | DisablePasswordAuthentication: pulumi.Bool(false), 89 | }, 90 | }, 91 | ) 92 | if err != nil { 93 | return err 94 | } 95 | } 96 | 97 | return nil 98 | } 99 | 100 | func (ri *ResourceInfo) createWorkerVMs(ctx *pulumi.Context) error { 101 | for i := 0; i < WorkerNodesCount; i++ { 102 | _, err := compute.NewVirtualMachine( 103 | ctx, 104 | fmt.Sprintf("%s-worker-%d", ClusterName, i), 105 | &compute.VirtualMachineArgs{ 106 | NetworkInterfaceIds: pulumi.StringArray{ 107 | ri.WorkerNics[fmt.Sprintf("%s-worker-nic-%d", ClusterName, i)].ID(), 108 | }, 109 | ResourceGroupName: ri.ResourceGroup.Name, 110 | VmSize: pulumi.String("Standard_DS1_v2"), 111 | StorageImageReference: &compute.VirtualMachineStorageImageReferenceArgs{ 112 | Id: ri.Image.ID(), 113 | }, 114 | StorageOsDisk: &compute.VirtualMachineStorageOsDiskArgs{ 115 | Name: pulumi.String(fmt.Sprintf("%s-worker-disk-%d", ClusterName, i)), 116 | Caching: pulumi.String("ReadWrite"), 117 | CreateOption: pulumi.String("FromImage"), 118 | ManagedDiskType: pulumi.String("Standard_LRS"), 119 | }, 120 | OsProfile: &compute.VirtualMachineOsProfileArgs{ 121 | ComputerName: pulumi.String(fmt.Sprintf("%s-worker-%d", ClusterName, i)), 122 | AdminUsername: pulumi.String("testadmin"), 123 | AdminPassword: pulumi.String("Password1234!"), 124 | CustomData: ri.TalosClusterConfig.WorkerConfig, 125 | }, 126 | OsProfileLinuxConfig: &compute.VirtualMachineOsProfileLinuxConfigArgs{ 127 | DisablePasswordAuthentication: pulumi.Bool(false), 128 | }, 129 | }, 130 | ) 131 | if err != nil { 132 | return err 133 | } 134 | } 135 | 136 | return nil 137 | } 138 | 139 | func downloadVHD() error { 140 | // Download VHD tar.gz from our releases 141 | out, err := os.Create("azure-amd64.tar.gz") 142 | if err != nil { 143 | return err 144 | } 145 | 146 | //TODO: strings.Join() this or something better 147 | resp, err := http.Get("https://github.com/siderolabs/talos/releases/download/" + TalosVersion + "/azure-amd64.tar.gz") 148 | if err != nil { 149 | return err 150 | } 151 | defer resp.Body.Close() 152 | 153 | // Write the body to file 154 | _, err = io.Copy(out, resp.Body) 155 | if err != nil { 156 | return err 157 | } 158 | 159 | out.Close() 160 | 161 | // Extract file 162 | file, err := os.Open("azure-amd64.tar.gz") 163 | if err != nil { 164 | return err 165 | } 166 | 167 | defer file.Close() 168 | 169 | var fileReader io.ReadCloser = file 170 | 171 | fileReader, err = gzip.NewReader(file) 172 | if err != nil { 173 | return err 174 | } 175 | 176 | defer fileReader.Close() 177 | 178 | tarBallReader := tar.NewReader(fileReader) 179 | 180 | header, err := tarBallReader.Next() 181 | if err != nil { 182 | return err 183 | } 184 | 185 | writer, err := os.Create(LocalVHDName) 186 | if err != nil { 187 | return err 188 | } 189 | 190 | defer writer.Close() 191 | 192 | io.Copy(writer, tarBallReader) 193 | 194 | err = os.Chmod(LocalVHDName, os.FileMode(header.Mode)) 195 | if err != nil { 196 | return err 197 | } 198 | 199 | return nil 200 | } 201 | -------------------------------------------------------------------------------- /examples/pulumi/azure/go.mod: -------------------------------------------------------------------------------- 1 | module azure 2 | 3 | go 1.18 4 | 5 | require ( 6 | github.com/pulumi/pulumi-azure/sdk/v5 v5.3.0 7 | github.com/pulumi/pulumi/sdk/v3 v3.33.2 8 | github.com/siderolabs/pulumi-provider-talos/sdk v0.0.0-20220606192808-8b9d2507e9f3 9 | ) 10 | 11 | require ( 12 | github.com/Microsoft/go-winio v0.5.2 // indirect 13 | github.com/blang/semver v3.5.1+incompatible // indirect 14 | github.com/cheggaaa/pb v1.0.29 // indirect 15 | github.com/djherbis/times v1.5.0 // indirect 16 | github.com/emirpasic/gods v1.18.1 // indirect 17 | github.com/gofrs/uuid v4.2.0+incompatible // indirect 18 | github.com/gogo/protobuf v1.3.2 // indirect 19 | github.com/golang/glog v1.0.0 // indirect 20 | github.com/golang/protobuf v1.5.2 // indirect 21 | github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect 22 | github.com/hashicorp/errwrap v1.1.0 // indirect 23 | github.com/hashicorp/go-multierror v1.1.1 // indirect 24 | github.com/inconshreveable/mousetrap v1.0.0 // indirect 25 | github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect 26 | github.com/kevinburke/ssh_config v1.2.0 // indirect 27 | github.com/mattn/go-runewidth v0.0.13 // indirect 28 | github.com/mitchellh/go-homedir v1.1.0 // indirect 29 | github.com/mitchellh/go-ps v1.0.0 // indirect 30 | github.com/opentracing/basictracer-go v1.1.0 // indirect 31 | github.com/opentracing/opentracing-go v1.2.0 // indirect 32 | github.com/pkg/errors v0.9.1 // indirect 33 | github.com/pkg/term v1.1.0 // indirect 34 | github.com/rivo/uniseg v0.2.0 // indirect 35 | github.com/rogpeppe/go-internal v1.8.1 // indirect 36 | github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect 37 | github.com/sergi/go-diff v1.2.0 // indirect 38 | github.com/spf13/cast v1.3.1 // indirect 39 | github.com/spf13/cobra v1.4.0 // indirect 40 | github.com/spf13/pflag v1.0.5 // indirect 41 | github.com/src-d/gcfg v1.4.0 // indirect 42 | github.com/texttheater/golang-levenshtein v1.0.1 // indirect 43 | github.com/tweekmonster/luser v0.0.0-20161003172636-3fa38070dbd7 // indirect 44 | github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect 45 | github.com/uber/jaeger-lib v2.4.1+incompatible // indirect 46 | github.com/xanzy/ssh-agent v0.3.1 // indirect 47 | go.uber.org/atomic v1.9.0 // indirect 48 | golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect 49 | golang.org/x/net v0.0.0-20220526153639-5463443f8c37 // indirect 50 | golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect 51 | golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect 52 | golang.org/x/text v0.3.7 // indirect 53 | google.golang.org/genproto v0.0.0-20220527130721-00d5c0f3be58 // indirect 54 | google.golang.org/grpc v1.46.2 // indirect 55 | google.golang.org/protobuf v1.28.0 // indirect 56 | gopkg.in/src-d/go-billy.v4 v4.3.2 // indirect 57 | gopkg.in/src-d/go-git.v4 v4.13.1 // indirect 58 | gopkg.in/warnings.v0 v0.1.2 // indirect 59 | gopkg.in/yaml.v2 v2.4.0 // indirect 60 | sourcegraph.com/sourcegraph/appdash v0.0.0-20211028080628-e2786a622600 // indirect 61 | ) 62 | -------------------------------------------------------------------------------- /examples/pulumi/azure/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/pulumi/pulumi-azure/sdk/v5/go/azure/compute" 5 | "github.com/pulumi/pulumi-azure/sdk/v5/go/azure/core" 6 | "github.com/pulumi/pulumi-azure/sdk/v5/go/azure/lb" 7 | "github.com/pulumi/pulumi-azure/sdk/v5/go/azure/network" 8 | "github.com/pulumi/pulumi-azure/sdk/v5/go/azure/storage" 9 | "github.com/pulumi/pulumi/sdk/v3/go/pulumi" 10 | "github.com/pulumi/pulumi/sdk/v3/go/pulumi/config" 11 | "github.com/siderolabs/pulumi-provider-talos/sdk/go/talos" 12 | ) 13 | 14 | const ( 15 | ClusterName = "talos" 16 | TalosVersion = "v1.0.4" 17 | 18 | ControlPlaneNodesCount = 3 19 | WorkerNodesCount = 2 20 | 21 | LocalVHDName = "disk.vhd" 22 | ) 23 | 24 | // ResourceInfo holds pointers to the various resources that 25 | // need to be passed around to each other. 26 | type ResourceInfo struct { 27 | Location string 28 | 29 | ResourceGroup *core.ResourceGroup 30 | 31 | StorageAcct *storage.Account 32 | StorageContainer *storage.Container 33 | 34 | Image *compute.Image 35 | 36 | Subnet *network.Subnet 37 | CPSecurityGroup *network.NetworkSecurityGroup 38 | CPNics map[string]*network.NetworkInterface 39 | // CPPubIPs is more of a convenience here than anything. 40 | // Keeps us from having to go through each nic, find the IP resource, 41 | // look that up then get the IP address. 42 | CPPubIPs pulumi.StringArray 43 | WorkerSecurityGroup *network.NetworkSecurityGroup 44 | WorkerNics map[string]*network.NetworkInterface 45 | LBPubIP *network.PublicIp 46 | LBBackendPool *lb.BackendAddressPool 47 | 48 | TalosClusterConfig *talos.ClusterConfig 49 | TalosClusterSecrets *talos.ClusterSecrets 50 | } 51 | 52 | func (ri *ResourceInfo) createRG(ctx *pulumi.Context) error { 53 | resourceGroup, err := core.NewResourceGroup( 54 | ctx, 55 | ClusterName+"-rg", 56 | &core.ResourceGroupArgs{ 57 | Location: pulumi.String(ri.Location), 58 | Name: pulumi.String(ClusterName + "-rg"), 59 | }, 60 | ) 61 | if err != nil { 62 | return err 63 | } 64 | 65 | ri.ResourceGroup = resourceGroup 66 | 67 | return nil 68 | } 69 | 70 | func main() { 71 | pulumi.Run(func(ctx *pulumi.Context) error { 72 | ri := ResourceInfo{ 73 | CPNics: map[string]*network.NetworkInterface{}, 74 | WorkerNics: map[string]*network.NetworkInterface{}, 75 | } 76 | 77 | c := config.New(ctx, "") 78 | 79 | location := c.Get("location") 80 | if location == "" { 81 | location = "centralus" 82 | } 83 | 84 | ri.Location = location 85 | 86 | // Create an Azure Resource Group 87 | err := ri.createRG(ctx) 88 | if err != nil { 89 | return err 90 | } 91 | 92 | // Create an Storage Bucket 93 | err = ri.createStorage(ctx) 94 | if err != nil { 95 | return err 96 | } 97 | 98 | // Upload blob and create image 99 | err = ri.createImage(ctx) 100 | if err != nil { 101 | return err 102 | } 103 | 104 | // Setup security groups 105 | err = ri.createSecurityGroups(ctx) 106 | if err != nil { 107 | return err 108 | } 109 | 110 | // Setup networks 111 | err = ri.CreateNetworks(ctx) 112 | if err != nil { 113 | return err 114 | } 115 | 116 | // Setup LB 117 | err = ri.createLB(ctx) 118 | if err != nil { 119 | return err 120 | } 121 | 122 | // Create nics for CP to use 123 | err = ri.createCPNics(ctx) 124 | if err != nil { 125 | return err 126 | } 127 | 128 | // Create nics for Worker to use 129 | err = ri.createWorkerNics(ctx) 130 | if err != nil { 131 | return err 132 | } 133 | 134 | // Create Talos configs 135 | err = ri.createConfigs(ctx) 136 | if err != nil { 137 | return err 138 | } 139 | 140 | // Create control plane nodes 141 | err = ri.createCPVMs(ctx) 142 | if err != nil { 143 | return err 144 | } 145 | 146 | // Create worker nodes 147 | err = ri.createWorkerVMs(ctx) 148 | if err != nil { 149 | return err 150 | } 151 | 152 | // Create Talos configs 153 | err = ri.bootstrapTalos(ctx) 154 | if err != nil { 155 | return err 156 | } 157 | 158 | ctx.Export("loadBalancerIP", ri.LBPubIP.IpAddress) 159 | ctx.Export("controlPlaneIPs", ri.CPPubIPs) 160 | ctx.Export("talosConfig", ri.TalosClusterSecrets.TalosConfig) 161 | 162 | return nil 163 | }) 164 | } 165 | -------------------------------------------------------------------------------- /examples/pulumi/azure/storage.go: -------------------------------------------------------------------------------- 1 | // compute.go holds functions specific to Azure storage resources 2 | package main 3 | 4 | import ( 5 | "github.com/pulumi/pulumi-azure/sdk/v5/go/azure/storage" 6 | "github.com/pulumi/pulumi/sdk/v3/go/pulumi" 7 | ) 8 | 9 | func (ri *ResourceInfo) createStorage(ctx *pulumi.Context) error { 10 | account, err := storage.NewAccount( 11 | ctx, 12 | ClusterName+"storage", 13 | &storage.AccountArgs{ 14 | ResourceGroupName: ri.ResourceGroup.Name, 15 | Location: pulumi.String(ri.Location), 16 | AccountTier: pulumi.String("Standard"), 17 | AccountReplicationType: pulumi.String("LRS"), 18 | }, 19 | ) 20 | if err != nil { 21 | return err 22 | } 23 | 24 | ri.StorageAcct = account 25 | 26 | container, err := storage.NewContainer( 27 | ctx, 28 | ClusterName+"-blobcontainer", 29 | &storage.ContainerArgs{ 30 | StorageAccountName: ri.StorageAcct.Name, 31 | }, 32 | ) 33 | if err != nil { 34 | return err 35 | } 36 | 37 | ri.StorageContainer = container 38 | 39 | return nil 40 | } 41 | -------------------------------------------------------------------------------- /examples/pulumi/azure/talos.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/pulumi/pulumi/sdk/v3/go/pulumi" 5 | "github.com/siderolabs/pulumi-provider-talos/sdk/go/talos" 6 | ) 7 | 8 | func (ri *ResourceInfo) createConfigs(ctx *pulumi.Context) error { 9 | clusterSecrets, err := talos.NewClusterSecrets( 10 | ctx, 11 | ClusterName+"-cluster-secrets", 12 | &talos.ClusterSecretsArgs{}) 13 | if err != nil { 14 | return err 15 | } 16 | 17 | cc, err := talos.NewClusterConfig( 18 | ctx, 19 | ClusterName+"-cluster-config", 20 | &talos.ClusterConfigArgs{ 21 | AdditionalSans: ri.CPPubIPs, 22 | ClusterEndpoint: pulumi.Sprintf("https://%s:6443", ri.LBPubIP.IpAddress), 23 | ClusterName: pulumi.String(ClusterName), 24 | Secrets: clusterSecrets.Secrets, 25 | }, 26 | ) 27 | if err != nil { 28 | return err 29 | } 30 | 31 | ri.TalosClusterSecrets = clusterSecrets 32 | ri.TalosClusterConfig = cc 33 | 34 | return nil 35 | } 36 | 37 | func (ri *ResourceInfo) bootstrapTalos(ctx *pulumi.Context) error { 38 | _, err := talos.NewNodeBootstrap( 39 | ctx, 40 | ClusterName+"-bootstrap", 41 | &talos.NodeBootstrapArgs{ 42 | Endpoint: ri.CPPubIPs[0], 43 | Node: ri.CPPubIPs[0], 44 | TalosConfig: ri.TalosClusterSecrets.TalosConfig, 45 | }) 46 | if err != nil { 47 | return err 48 | } 49 | 50 | return nil 51 | } 52 | -------------------------------------------------------------------------------- /examples/pulumi/equinix-metal/Pulumi.yaml: -------------------------------------------------------------------------------- 1 | name: talos-em 2 | runtime: go 3 | description: Talos Cluster on Equinix Metal 4 | -------------------------------------------------------------------------------- /examples/pulumi/equinix-metal/README.md: -------------------------------------------------------------------------------- 1 | # talos-em 2 | 3 | This project brings up an example Talos HA cluster on Equinix Metal 4 | 5 | ## Setup 6 | 7 | Set the equinix metal project id and equinix metal api token 8 | 9 | ```bash 10 | pulumi config set projectID 11 | pulumi config set equinix-metal:authToken --secret 12 | ``` 13 | 14 | 15 | ## getting talosconfig 16 | 17 | > NB: This is the admin talosconfig, generate different talosconfig for other users 18 | 19 | `pulumi stack output talosConfig --show-secrets` 20 | 21 | ## getting admin kubeconfig 22 | 23 | `pulumi stack output kubeconfig --show-secrets` 24 | -------------------------------------------------------------------------------- /examples/pulumi/equinix-metal/go.mod: -------------------------------------------------------------------------------- 1 | module equinix-metal 2 | 3 | go 1.18 4 | 5 | require ( 6 | github.com/pulumi/pulumi-equinix-metal/sdk/v3 v3.2.1 7 | github.com/pulumi/pulumi/sdk/v3 v3.33.2 8 | github.com/siderolabs/pulumi-provider-talos/sdk v0.0.0-20220606192808-8b9d2507e9f3 9 | ) 10 | 11 | require ( 12 | github.com/Microsoft/go-winio v0.5.2 // indirect 13 | github.com/blang/semver v3.5.1+incompatible // indirect 14 | github.com/cheggaaa/pb v1.0.29 // indirect 15 | github.com/djherbis/times v1.5.0 // indirect 16 | github.com/emirpasic/gods v1.18.1 // indirect 17 | github.com/gofrs/uuid v4.2.0+incompatible // indirect 18 | github.com/gogo/protobuf v1.3.2 // indirect 19 | github.com/golang/glog v1.0.0 // indirect 20 | github.com/golang/protobuf v1.5.2 // indirect 21 | github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect 22 | github.com/hashicorp/errwrap v1.1.0 // indirect 23 | github.com/hashicorp/go-multierror v1.1.1 // indirect 24 | github.com/inconshreveable/mousetrap v1.0.0 // indirect 25 | github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect 26 | github.com/kevinburke/ssh_config v1.2.0 // indirect 27 | github.com/mattn/go-runewidth v0.0.13 // indirect 28 | github.com/mitchellh/go-homedir v1.1.0 // indirect 29 | github.com/mitchellh/go-ps v1.0.0 // indirect 30 | github.com/opentracing/basictracer-go v1.1.0 // indirect 31 | github.com/opentracing/opentracing-go v1.2.0 // indirect 32 | github.com/pkg/errors v0.9.1 // indirect 33 | github.com/pkg/term v1.1.0 // indirect 34 | github.com/rivo/uniseg v0.2.0 // indirect 35 | github.com/rogpeppe/go-internal v1.8.1 // indirect 36 | github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect 37 | github.com/sergi/go-diff v1.2.0 // indirect 38 | github.com/spf13/cast v1.4.1 // indirect 39 | github.com/spf13/cobra v1.4.0 // indirect 40 | github.com/spf13/pflag v1.0.5 // indirect 41 | github.com/src-d/gcfg v1.4.0 // indirect 42 | github.com/texttheater/golang-levenshtein v1.0.1 // indirect 43 | github.com/tweekmonster/luser v0.0.0-20161003172636-3fa38070dbd7 // indirect 44 | github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect 45 | github.com/uber/jaeger-lib v2.4.1+incompatible // indirect 46 | github.com/xanzy/ssh-agent v0.3.1 // indirect 47 | go.uber.org/atomic v1.9.0 // indirect 48 | golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect 49 | golang.org/x/net v0.0.0-20220526153639-5463443f8c37 // indirect 50 | golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect 51 | golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect 52 | golang.org/x/text v0.3.7 // indirect 53 | google.golang.org/genproto v0.0.0-20220527130721-00d5c0f3be58 // indirect 54 | google.golang.org/grpc v1.46.2 // indirect 55 | google.golang.org/protobuf v1.28.0 // indirect 56 | gopkg.in/src-d/go-billy.v4 v4.3.2 // indirect 57 | gopkg.in/src-d/go-git.v4 v4.13.1 // indirect 58 | gopkg.in/warnings.v0 v0.1.2 // indirect 59 | gopkg.in/yaml.v2 v2.4.0 // indirect 60 | sourcegraph.com/sourcegraph/appdash v0.0.0-20211028080628-e2786a622600 // indirect 61 | ) 62 | -------------------------------------------------------------------------------- /examples/pulumi/equinix-metal/patches/common.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /machine/kubelet/extraArgs 3 | value: 4 | volume-stats-agg-period: -1s 5 | -------------------------------------------------------------------------------- /examples/pulumi/equinix-metal/patches/ingress.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /machine/kubelet 3 | value: 4 | extraArgs: 5 | volume-stats-agg-period: -1s 6 | register-with-taints: dedicated=ingress:NoSchedule 7 | node-labels: node.kubernetes.io/role=ingress 8 | -------------------------------------------------------------------------------- /examples/pulumi/equinix-metal/patches/worker.yaml: -------------------------------------------------------------------------------- 1 | - op: add 2 | path: /machine/sysctls 3 | value: 4 | fs.inotify.max_user_instances: "512" 5 | fs.inotify.max_user_watches: "524288" 6 | -------------------------------------------------------------------------------- /examples/pulumi/gcp/Pulumi.yaml: -------------------------------------------------------------------------------- 1 | name: gcp 2 | runtime: go 3 | description: Talos on GCP 4 | -------------------------------------------------------------------------------- /examples/pulumi/gcp/compute.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | "strings" 6 | 7 | "github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/compute" 8 | "github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/storage" 9 | "github.com/pulumi/pulumi/sdk/v3/go/pulumi" 10 | ) 11 | 12 | func (ri *ResourceInfo) createImage(ctx *pulumi.Context) error { 13 | imgName := strings.Replace("talos-"+TalosVersion, ".", "-", -1) 14 | 15 | obj, err := storage.NewBucketObject( 16 | ctx, 17 | imgName+".tar.gz", 18 | &storage.BucketObjectArgs{ 19 | Bucket: ri.Bucket.Name, 20 | Name: pulumi.String(imgName + ".tar.gz"), 21 | Source: pulumi.NewRemoteAsset("https://github.com/siderolabs/talos/releases/download/" + TalosVersion + "/gcp-amd64.tar.gz"), 22 | }, 23 | ) 24 | if err != nil { 25 | return err 26 | } 27 | 28 | img, err := compute.NewImage( 29 | ctx, 30 | imgName, 31 | &compute.ImageArgs{ 32 | Name: pulumi.String(imgName), 33 | RawDisk: &compute.ImageRawDiskArgs{ 34 | Source: obj.MediaLink.ToStringOutput(), 35 | }, 36 | }, 37 | ) 38 | if err != nil { 39 | return err 40 | } 41 | 42 | ri.Image = img 43 | 44 | return nil 45 | } 46 | 47 | func (ri *ResourceInfo) createCPVMs(ctx *pulumi.Context) error { 48 | for i := 0; i < ControlPlaneNodesCount; i++ { 49 | instance, err := compute.NewInstance( 50 | ctx, 51 | fmt.Sprintf("%s-cp-%d", ClusterName, i), 52 | &compute.InstanceArgs{ 53 | MachineType: pulumi.String("e2-medium"), 54 | BootDisk: &compute.InstanceBootDiskArgs{ 55 | InitializeParams: &compute.InstanceBootDiskInitializeParamsArgs{ 56 | Image: ri.Image.SelfLink.ToStringOutput(), 57 | Size: pulumi.IntPtr(10), 58 | }, 59 | }, 60 | Metadata: pulumi.StringMap{"user-data": ri.TalosClusterConfig.ControlplaneConfig}, 61 | NetworkInterfaces: compute.InstanceNetworkInterfaceArray{ 62 | &compute.InstanceNetworkInterfaceArgs{ 63 | Network: ri.Network.Name, 64 | AccessConfigs: compute.InstanceNetworkInterfaceAccessConfigArray{ 65 | &compute.InstanceNetworkInterfaceAccessConfigArgs{ 66 | NatIp: ri.CPAddresses[fmt.Sprintf("%s-cp-%d", ClusterName, i)].Address, 67 | }, 68 | }, 69 | }, 70 | }, 71 | Tags: pulumi.StringArray{ 72 | pulumi.String(ClusterName + "-cp"), 73 | }, 74 | Zone: pulumi.String(ri.Zone), 75 | }, 76 | ) 77 | if err != nil { 78 | return err 79 | } 80 | 81 | ri.CPInstances = append(ri.CPInstances, instance) 82 | } 83 | 84 | return nil 85 | } 86 | 87 | func (ri *ResourceInfo) createWorkerVMs(ctx *pulumi.Context) error { 88 | for i := 0; i < WorkerNodesCount; i++ { 89 | _, err := compute.NewInstance( 90 | ctx, 91 | fmt.Sprintf("%s-worker-%d", ClusterName, i), 92 | &compute.InstanceArgs{ 93 | MachineType: pulumi.String("e2-medium"), 94 | BootDisk: &compute.InstanceBootDiskArgs{ 95 | InitializeParams: &compute.InstanceBootDiskInitializeParamsArgs{ 96 | Image: ri.Image.SelfLink.ToStringOutput(), 97 | Size: pulumi.IntPtr(10), 98 | }, 99 | }, 100 | Metadata: pulumi.StringMap{"user-data": ri.TalosClusterConfig.WorkerConfig}, 101 | NetworkInterfaces: compute.InstanceNetworkInterfaceArray{ 102 | &compute.InstanceNetworkInterfaceArgs{ 103 | Network: ri.Network.Name, 104 | AccessConfigs: compute.InstanceNetworkInterfaceAccessConfigArray{ 105 | &compute.InstanceNetworkInterfaceAccessConfigArgs{ 106 | NatIp: ri.WorkerAddresses[fmt.Sprintf("%s-worker-%d", ClusterName, i)].Address, 107 | }, 108 | }, 109 | }, 110 | }, 111 | Tags: pulumi.StringArray{ 112 | pulumi.String(ClusterName + "-worker"), 113 | }, 114 | Zone: pulumi.String(ri.Zone), 115 | }, 116 | ) 117 | if err != nil { 118 | return err 119 | } 120 | } 121 | 122 | return nil 123 | } 124 | -------------------------------------------------------------------------------- /examples/pulumi/gcp/go.mod: -------------------------------------------------------------------------------- 1 | module gcp 2 | 3 | go 1.17 4 | 5 | require ( 6 | github.com/pulumi/pulumi-gcp/sdk/v6 v6.25.0 7 | github.com/pulumi/pulumi/sdk/v3 v3.33.2 8 | github.com/siderolabs/pulumi-provider-talos/sdk v0.0.0-20220606192808-8b9d2507e9f3 9 | ) 10 | 11 | require ( 12 | github.com/Microsoft/go-winio v0.5.2 // indirect 13 | github.com/blang/semver v3.5.1+incompatible // indirect 14 | github.com/cheggaaa/pb v1.0.29 // indirect 15 | github.com/djherbis/times v1.5.0 // indirect 16 | github.com/emirpasic/gods v1.18.1 // indirect 17 | github.com/gofrs/uuid v4.2.0+incompatible // indirect 18 | github.com/gogo/protobuf v1.3.2 // indirect 19 | github.com/golang/glog v1.0.0 // indirect 20 | github.com/golang/protobuf v1.5.2 // indirect 21 | github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect 22 | github.com/hashicorp/errwrap v1.1.0 // indirect 23 | github.com/hashicorp/go-multierror v1.1.1 // indirect 24 | github.com/inconshreveable/mousetrap v1.0.0 // indirect 25 | github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect 26 | github.com/kevinburke/ssh_config v1.2.0 // indirect 27 | github.com/mattn/go-runewidth v0.0.13 // indirect 28 | github.com/mitchellh/go-homedir v1.1.0 // indirect 29 | github.com/mitchellh/go-ps v1.0.0 // indirect 30 | github.com/opentracing/basictracer-go v1.1.0 // indirect 31 | github.com/opentracing/opentracing-go v1.2.0 // indirect 32 | github.com/pkg/errors v0.9.1 // indirect 33 | github.com/pkg/term v1.1.0 // indirect 34 | github.com/rivo/uniseg v0.2.0 // indirect 35 | github.com/rogpeppe/go-internal v1.8.1 // indirect 36 | github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // indirect 37 | github.com/sergi/go-diff v1.2.0 // indirect 38 | github.com/spf13/cast v1.3.1 // indirect 39 | github.com/spf13/cobra v1.4.0 // indirect 40 | github.com/spf13/pflag v1.0.5 // indirect 41 | github.com/src-d/gcfg v1.4.0 // indirect 42 | github.com/texttheater/golang-levenshtein v1.0.1 // indirect 43 | github.com/tweekmonster/luser v0.0.0-20161003172636-3fa38070dbd7 // indirect 44 | github.com/uber/jaeger-client-go v2.30.0+incompatible // indirect 45 | github.com/uber/jaeger-lib v2.4.1+incompatible // indirect 46 | github.com/xanzy/ssh-agent v0.3.1 // indirect 47 | go.uber.org/atomic v1.9.0 // indirect 48 | golang.org/x/crypto v0.0.0-20220525230936-793ad666bf5e // indirect 49 | golang.org/x/net v0.0.0-20220526153639-5463443f8c37 // indirect 50 | golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect 51 | golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect 52 | golang.org/x/text v0.3.7 // indirect 53 | google.golang.org/genproto v0.0.0-20220527130721-00d5c0f3be58 // indirect 54 | google.golang.org/grpc v1.46.2 // indirect 55 | google.golang.org/protobuf v1.28.0 // indirect 56 | gopkg.in/src-d/go-billy.v4 v4.3.2 // indirect 57 | gopkg.in/src-d/go-git.v4 v4.13.1 // indirect 58 | gopkg.in/warnings.v0 v0.1.2 // indirect 59 | gopkg.in/yaml.v2 v2.4.0 // indirect 60 | sourcegraph.com/sourcegraph/appdash v0.0.0-20211028080628-e2786a622600 // indirect 61 | ) 62 | -------------------------------------------------------------------------------- /examples/pulumi/gcp/main.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/compute" 5 | "github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/storage" 6 | "github.com/pulumi/pulumi/sdk/v3/go/pulumi" 7 | "github.com/pulumi/pulumi/sdk/v3/go/pulumi/config" 8 | "github.com/siderolabs/pulumi-provider-talos/sdk/go/talos" 9 | ) 10 | 11 | const ( 12 | ClusterName = "talos" 13 | TalosVersion = "v1.0.6" 14 | 15 | ControlPlaneNodesCount = 3 16 | WorkerNodesCount = 2 17 | ) 18 | 19 | // ResourceInfo holds pointers to the various resources that 20 | // need to be passed around to each other. 21 | type ResourceInfo struct { 22 | PulumiConfig *config.Config 23 | 24 | BucketLocation string 25 | Region string 26 | Zone string 27 | 28 | Bucket *storage.Bucket 29 | Image *compute.Image 30 | 31 | Network *compute.Network 32 | 33 | CPInstances []*compute.Instance 34 | CPAddresses map[string]*compute.Address 35 | WorkerAddresses map[string]*compute.Address 36 | LBAddress *compute.GlobalAddress 37 | 38 | TalosClusterConfig *talos.ClusterConfig 39 | TalosClusterSecrets *talos.ClusterSecrets 40 | } 41 | 42 | func main() { 43 | pulumi.Run(func(ctx *pulumi.Context) error { 44 | ri := ResourceInfo{ 45 | CPInstances: []*compute.Instance{}, 46 | CPAddresses: map[string]*compute.Address{}, 47 | WorkerAddresses: map[string]*compute.Address{}, 48 | } 49 | 50 | // TODO: understand how to either set these programatically or let 51 | // GCP choose for zone during instance creation. 52 | ri.PulumiConfig = config.New(ctx, "") 53 | 54 | region := ri.PulumiConfig.Get("region") 55 | if region == "" { 56 | region = "us-central1" 57 | } 58 | 59 | ri.Region = region 60 | 61 | zone := ri.PulumiConfig.Get("zone") 62 | if zone == "" { 63 | zone = "us-central1-a" 64 | } 65 | 66 | ri.Zone = zone 67 | 68 | // Create an Storage Bucket 69 | err := ri.createStorage(ctx) 70 | if err != nil { 71 | return err 72 | } 73 | 74 | // Upload blob and create image 75 | err = ri.createImage(ctx) 76 | if err != nil { 77 | return err 78 | } 79 | 80 | // Create a virtual networkf or us to use 81 | err = ri.createNetworks(ctx) 82 | if err != nil { 83 | return err 84 | } 85 | 86 | // Carve out IP Addresses 87 | err = ri.createCPAddresses(ctx) 88 | if err != nil { 89 | return err 90 | } 91 | 92 | err = ri.createWorkerAddresses(ctx) 93 | if err != nil { 94 | return err 95 | } 96 | 97 | err = ri.createLBAddress(ctx) 98 | if err != nil { 99 | return err 100 | } 101 | 102 | // Setup all firewall rules 103 | err = ri.createFirewalls(ctx) 104 | if err != nil { 105 | return err 106 | } 107 | 108 | // Create Talos configs 109 | err = ri.createConfigs(ctx) 110 | if err != nil { 111 | return err 112 | } 113 | 114 | // Create VMs 115 | err = ri.createCPVMs(ctx) 116 | if err != nil { 117 | return err 118 | } 119 | 120 | err = ri.createWorkerVMs(ctx) 121 | if err != nil { 122 | return err 123 | } 124 | 125 | // Create K8s loadbalancer 126 | err = ri.createLB(ctx) 127 | if err != nil { 128 | return err 129 | } 130 | 131 | // Bootstrap it 132 | err = ri.bootstrapTalos(ctx) 133 | if err != nil { 134 | return err 135 | } 136 | 137 | for _, ip := range ri.CPAddresses { 138 | ctx.Export("controlPlaneIP", ip.Address) 139 | } 140 | 141 | ctx.Export("loadBalancerIP", ri.LBAddress.Address) 142 | ctx.Export("talosConfig", ri.TalosClusterSecrets.TalosConfig) 143 | 144 | return nil 145 | }) 146 | } 147 | -------------------------------------------------------------------------------- /examples/pulumi/gcp/network.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/compute" 7 | "github.com/pulumi/pulumi/sdk/v3/go/pulumi" 8 | ) 9 | 10 | func (ri *ResourceInfo) createNetworks(ctx *pulumi.Context) error { 11 | net, err := compute.NewNetwork( 12 | ctx, 13 | ClusterName+"-net", 14 | &compute.NetworkArgs{ 15 | Name: pulumi.String(ClusterName + "-net"), 16 | }, 17 | ) 18 | if err != nil { 19 | return err 20 | } 21 | 22 | ri.Network = net 23 | 24 | return nil 25 | 26 | } 27 | 28 | func (ri *ResourceInfo) createCPAddresses(ctx *pulumi.Context) error { 29 | for i := 0; i < ControlPlaneNodesCount; i++ { 30 | addr, err := compute.NewAddress( 31 | ctx, 32 | fmt.Sprintf("%s-cp-%d", ClusterName, i), 33 | &compute.AddressArgs{ 34 | Name: pulumi.String(fmt.Sprintf("%s-cp-%d", ClusterName, i)), 35 | Region: pulumi.String(ri.Region), 36 | }, 37 | ) 38 | if err != nil { 39 | return err 40 | } 41 | 42 | ri.CPAddresses[fmt.Sprintf("%s-cp-%d", ClusterName, i)] = addr 43 | } 44 | 45 | return nil 46 | } 47 | 48 | func (ri *ResourceInfo) createWorkerAddresses(ctx *pulumi.Context) error { 49 | for i := 0; i < WorkerNodesCount; i++ { 50 | addr, err := compute.NewAddress( 51 | ctx, 52 | fmt.Sprintf("%s-worker-%d", ClusterName, i), 53 | &compute.AddressArgs{ 54 | Name: pulumi.String(fmt.Sprintf("%s-worker-%d", ClusterName, i)), 55 | Region: pulumi.String(ri.Region), 56 | }, 57 | ) 58 | if err != nil { 59 | return err 60 | } 61 | 62 | ri.WorkerAddresses[fmt.Sprintf("%s-worker-%d", ClusterName, i)] = addr 63 | } 64 | 65 | return nil 66 | } 67 | 68 | func (ri *ResourceInfo) createLBAddress(ctx *pulumi.Context) error { 69 | addr, err := compute.NewGlobalAddress( 70 | ctx, 71 | ClusterName+"-lb", 72 | &compute.GlobalAddressArgs{ 73 | Name: pulumi.String(ClusterName + "-lb"), 74 | }, 75 | ) 76 | if err != nil { 77 | return err 78 | } 79 | 80 | ri.LBAddress = addr 81 | 82 | return nil 83 | } 84 | 85 | func (ri *ResourceInfo) createFirewalls(ctx *pulumi.Context) error { 86 | _, err := compute.NewFirewall( 87 | ctx, 88 | ClusterName+"-cp-health", 89 | &compute.FirewallArgs{ 90 | Network: ri.Network.SelfLink, 91 | Allows: &compute.FirewallAllowArray{ 92 | &compute.FirewallAllowArgs{ 93 | Protocol: pulumi.String("tcp"), 94 | Ports: pulumi.StringArray{ 95 | pulumi.String("6443"), 96 | }, 97 | }, 98 | }, 99 | SourceRanges: pulumi.ToStringArray([]string{"35.191.0.0/16", "130.211.0.0/22"}), 100 | TargetTags: pulumi.StringArray{ 101 | pulumi.String(ClusterName + "-cp"), 102 | }, 103 | }, 104 | ) 105 | if err != nil { 106 | return err 107 | } 108 | 109 | _, err = compute.NewFirewall( 110 | ctx, 111 | ClusterName+"-cp-talosapi", 112 | &compute.FirewallArgs{ 113 | Network: ri.Network.SelfLink, 114 | Allows: &compute.FirewallAllowArray{ 115 | &compute.FirewallAllowArgs{ 116 | Protocol: pulumi.String("tcp"), 117 | Ports: pulumi.StringArray{ 118 | pulumi.String("50000"), 119 | }, 120 | }, 121 | }, 122 | SourceRanges: pulumi.ToStringArray([]string{"0.0.0.0/0"}), 123 | TargetTags: pulumi.StringArray{ 124 | pulumi.String(ClusterName + "-cp"), 125 | }, 126 | }, 127 | ) 128 | if err != nil { 129 | return err 130 | } 131 | 132 | _, err = compute.NewFirewall( 133 | ctx, 134 | ClusterName+"-all-intracluster", 135 | &compute.FirewallArgs{ 136 | Network: ri.Network.SelfLink, 137 | Allows: &compute.FirewallAllowArray{ 138 | &compute.FirewallAllowArgs{ 139 | Protocol: pulumi.String("all"), 140 | }, 141 | }, 142 | SourceTags: pulumi.StringArray{ 143 | pulumi.String(ClusterName + "-cp"), 144 | pulumi.String(ClusterName + "-worker"), 145 | }, 146 | TargetTags: pulumi.StringArray{ 147 | pulumi.String(ClusterName + "-cp"), 148 | pulumi.String(ClusterName + "-worker"), 149 | }, 150 | }, 151 | ) 152 | if err != nil { 153 | return err 154 | } 155 | 156 | return nil 157 | } 158 | 159 | func (ri *ResourceInfo) createLB(ctx *pulumi.Context) error { 160 | // Create instance group 161 | instanceList := pulumi.StringArray{} 162 | 163 | for _, instance := range ri.CPInstances { 164 | instanceList = append(instanceList, instance.ID()) 165 | } 166 | 167 | ig, err := compute.NewInstanceGroup( 168 | ctx, 169 | ClusterName+"-cp-ig", 170 | &compute.InstanceGroupArgs{ 171 | Instances: instanceList, 172 | Name: pulumi.String(ClusterName + "-cp-ig"), 173 | NamedPorts: compute.InstanceGroupNamedPortTypeArray{ 174 | &compute.InstanceGroupNamedPortTypeArgs{ 175 | Name: pulumi.String(ClusterName + "-k8s-api"), 176 | Port: pulumi.Int(6443), 177 | }, 178 | }, 179 | Zone: pulumi.String(ri.Zone), 180 | }, 181 | ) 182 | if err != nil { 183 | return err 184 | } 185 | 186 | hc, err := compute.NewHealthCheck( 187 | ctx, 188 | ClusterName+"-k8s-hc", 189 | &compute.HealthCheckArgs{ 190 | Name: pulumi.String(ClusterName + "-k8s-hc"), 191 | SslHealthCheck: &compute.HealthCheckSslHealthCheckArgs{ 192 | Port: pulumi.Int(6443), 193 | PortSpecification: pulumi.String("USE_FIXED_PORT"), 194 | }, 195 | }, 196 | ) 197 | if err != nil { 198 | return err 199 | } 200 | 201 | backend, err := compute.NewBackendService( 202 | ctx, 203 | ClusterName+"-k8s-lb", 204 | &compute.BackendServiceArgs{ 205 | Backends: compute.BackendServiceBackendArray{ 206 | &compute.BackendServiceBackendArgs{ 207 | Group: ig.SelfLink, 208 | }, 209 | }, 210 | HealthChecks: hc.ID(), 211 | LoadBalancingScheme: pulumi.String("EXTERNAL"), 212 | Name: pulumi.String(ClusterName + "-k8s-lb"), 213 | Protocol: pulumi.String("TCP"), 214 | PortName: pulumi.String(ClusterName + "-k8s-api"), 215 | }, 216 | ) 217 | if err != nil { 218 | return err 219 | } 220 | 221 | tcpProxy, err := compute.NewTargetTCPProxy( 222 | ctx, 223 | ClusterName+"-k8s-tcpproxy", 224 | &compute.TargetTCPProxyArgs{ 225 | BackendService: backend.ID(), 226 | }, 227 | ) 228 | if err != nil { 229 | return err 230 | } 231 | 232 | _, err = compute.NewGlobalForwardingRule( 233 | ctx, 234 | ClusterName+"-k8s-forwarding", 235 | &compute.GlobalForwardingRuleArgs{ 236 | IpAddress: ri.LBAddress.Address, 237 | IpProtocol: pulumi.String("TCP"), 238 | Name: pulumi.String(ClusterName + "-k8s-forwarding"), 239 | PortRange: pulumi.String("6443"), 240 | Target: tcpProxy.ID(), 241 | }, 242 | ) 243 | if err != nil { 244 | return err 245 | } 246 | 247 | return nil 248 | } 249 | -------------------------------------------------------------------------------- /examples/pulumi/gcp/storage.go: -------------------------------------------------------------------------------- 1 | // compute.go holds functions specific to GCP storage resources 2 | package main 3 | 4 | import ( 5 | "github.com/pulumi/pulumi-gcp/sdk/v6/go/gcp/storage" 6 | "github.com/pulumi/pulumi/sdk/v3/go/pulumi" 7 | ) 8 | 9 | func (ri *ResourceInfo) createStorage(ctx *pulumi.Context) error { 10 | bucketLoc := ri.PulumiConfig.Get("bucket-location") 11 | if bucketLoc == "" { 12 | bucketLoc = "US" 13 | } 14 | 15 | ri.BucketLocation = bucketLoc 16 | 17 | bucket, err := storage.NewBucket( 18 | ctx, 19 | ClusterName+"-bucket", 20 | &storage.BucketArgs{ 21 | Location: pulumi.String(ri.BucketLocation), 22 | }) 23 | if err != nil { 24 | return err 25 | } 26 | 27 | ri.Bucket = bucket 28 | 29 | return nil 30 | } 31 | -------------------------------------------------------------------------------- /examples/pulumi/gcp/talos.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "fmt" 5 | 6 | "github.com/pulumi/pulumi/sdk/v3/go/pulumi" 7 | "github.com/siderolabs/pulumi-provider-talos/sdk/go/talos" 8 | ) 9 | 10 | func (ri *ResourceInfo) createConfigs(ctx *pulumi.Context) error { 11 | clusterSecrets, err := talos.NewClusterSecrets( 12 | ctx, 13 | ClusterName+"-cluster-secrets", 14 | &talos.ClusterSecretsArgs{ 15 | ClusterName: pulumi.String(ClusterName), 16 | }, 17 | ) 18 | if err != nil { 19 | return err 20 | } 21 | 22 | cc, err := talos.NewClusterConfig( 23 | ctx, 24 | ClusterName+"-cluster-config", 25 | &talos.ClusterConfigArgs{ 26 | ClusterEndpoint: pulumi.Sprintf("https://%s:6443", ri.LBAddress.Address), 27 | ClusterName: pulumi.String(ClusterName), 28 | Secrets: clusterSecrets.Secrets, 29 | }, 30 | ) 31 | if err != nil { 32 | return err 33 | } 34 | 35 | ri.TalosClusterSecrets = clusterSecrets 36 | ri.TalosClusterConfig = cc 37 | 38 | return nil 39 | } 40 | 41 | func (ri *ResourceInfo) bootstrapTalos(ctx *pulumi.Context) error { 42 | _, err := talos.NewNodeBootstrap( 43 | ctx, 44 | ClusterName+"-bootstrap", 45 | &talos.NodeBootstrapArgs{ 46 | Endpoint: ri.CPAddresses[fmt.Sprintf("%s-cp-%d", ClusterName, 0)].Address, 47 | Node: ri.CPAddresses[fmt.Sprintf("%s-cp-%d", ClusterName, 0)].Address, 48 | TalosConfig: ri.TalosClusterSecrets.TalosConfig, 49 | }, 50 | ) 51 | if err != nil { 52 | return err 53 | } 54 | 55 | return nil 56 | } 57 | -------------------------------------------------------------------------------- /examples/terraform/advanced/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/dmacvicar/libvirt" { 5 | version = "0.7.1" 6 | constraints = "0.7.1" 7 | hashes = [ 8 | "h1:1yEJVPVFkRkbRY63+sFRAWau/eJ0xlecHWLCV8spkWU=", 9 | "zh:1c59f2ab68da6326637ee8b03433e84af76b3e3562f251a7f2aa239a7b262a8d", 10 | "zh:236e24ecf036e99d9d1e2081a39dc9cb4b8993850a37141a1449f20750f883d6", 11 | "zh:4519c22b1f00c1d37d60ac6c2cb7ad5ab9dbcd44a80b4f61e68aacb54eae017d", 12 | "zh:54de4e3c979c32af1dc71ec2846912f669a28bdb0990e8a3c1fb8fea4ede7b61", 13 | "zh:6270a757bcf4e1f9efe47726cf0caefba30a25e59d151103cf03d1656325783c", 14 | "zh:68b8586d5b29c0a1cb7c608a309b38db911449c072d60eee9e40e01881f1c23a", 15 | "zh:724ba2290fea704714378e9363541420c36091e790c7f39150cde8987d4e0754", 16 | "zh:7b6860c92376cdad98273aab4bea62546622e08f50733e4b2e58a7a859d3b49d", 17 | "zh:986a0a4f8d9511c64bcac8010337deb43110b4c2f91969b2491fd9edc290b60e", 18 | "zh:aff0f6f24d69cd97a44cd6059edaf355769fbb8a7643a6db4d52c9a94f98e194", 19 | "zh:c46ca3f8384d06c13a7ed3d4b83c65b4f8dccbf9d5f624843b68d176add5c5c2", 20 | "zh:ef310534e7d38153aca4ce31655b52a6e6c4d76f32e49732c96b62e9de1ee843", 21 | "zh:f1566b094f4267ef2674889d874962dd41e0cba55251645e16d003c77ca8a19c", 22 | "zh:f2e019df7b537069828c5537c481e5b7f41d2404eef6fe5c86702c20900b303d", 23 | ] 24 | } 25 | 26 | provider "registry.terraform.io/hashicorp/random" { 27 | version = "3.5.1" 28 | constraints = "3.5.1" 29 | hashes = [ 30 | "h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=", 31 | "zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64", 32 | "zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d", 33 | "zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831", 34 | "zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3", 35 | "zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f", 36 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", 37 | "zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b", 38 | "zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2", 39 | "zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865", 40 | "zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03", 41 | "zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602", 42 | "zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014", 43 | ] 44 | } 45 | 46 | provider "registry.terraform.io/hashicorp/tls" { 47 | version = "4.0.4" 48 | constraints = "4.0.4" 49 | hashes = [ 50 | "h1:pe9vq86dZZKCm+8k1RhzARwENslF3SXb9ErHbQfgjXU=", 51 | "zh:23671ed83e1fcf79745534841e10291bbf34046b27d6e68a5d0aab77206f4a55", 52 | "zh:45292421211ffd9e8e3eb3655677700e3c5047f71d8f7650d2ce30242335f848", 53 | "zh:59fedb519f4433c0fdb1d58b27c210b27415fddd0cd73c5312530b4309c088be", 54 | "zh:5a8eec2409a9ff7cd0758a9d818c74bcba92a240e6c5e54b99df68fff312bbd5", 55 | "zh:5e6a4b39f3171f53292ab88058a59e64825f2b842760a4869e64dc1dc093d1fe", 56 | "zh:810547d0bf9311d21c81cc306126d3547e7bd3f194fc295836acf164b9f8424e", 57 | "zh:824a5f3617624243bed0259d7dd37d76017097dc3193dac669be342b90b2ab48", 58 | "zh:9361ccc7048be5dcbc2fafe2d8216939765b3160bd52734f7a9fd917a39ecbd8", 59 | "zh:aa02ea625aaf672e649296bce7580f62d724268189fe9ad7c1b36bb0fa12fa60", 60 | "zh:c71b4cd40d6ec7815dfeefd57d88bc592c0c42f5e5858dcc88245d371b4b8b1e", 61 | "zh:dabcd52f36b43d250a3d71ad7abfa07b5622c69068d989e60b79b2bb4f220316", 62 | "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", 63 | ] 64 | } 65 | 66 | provider "registry.terraform.io/siderolabs/talos" { 67 | version = "0.9.0-alpha.0" 68 | constraints = "0.9.0-alpha.0" 69 | hashes = [ 70 | "h1:aJIAbggkIN/2/7JEjSz/PfHVFRXOokeHYt3hDII6kI0=", 71 | "zh:0fa82a384b25a58b65523e0ea4768fa1212b1f5cfc0c9379d31162454fedcc9d", 72 | "zh:12a822ecfcc14da28bb88887811a796f5165ee50d64967c5afea389da12e3d18", 73 | "zh:5c2519abfbd5e45de4afd94e52d393235e3d09845af27c58aa98dd532311f47f", 74 | "zh:6a99b169eaf46789c7465de27d2dea52ca799f39412390d0a28bb28f5b9e5a4e", 75 | "zh:975daeb0ff5517e5a7a4357125f6e7d74041d87c853f9a52ecfd285ce972e51b", 76 | "zh:b358aefccccf84dab4bf07f2b039814755fc282cbe30f20496013e311eae3463", 77 | "zh:b4e3a0fddc38a05c25b8f1553098574d56959abeb2b5bf9e880208000a415231", 78 | "zh:ba331d61225fac3f787f7acd4cc298a7e0ca43ee7536ce5ab7f6c9dfae4c8e9e", 79 | "zh:bbd9bc936461d2be6c11a5abaa53f2618ac592bc7a6cc1ad9c4205fd73c95eac", 80 | "zh:bdd77e81bf65074fbc891a7429ec3264a342bc7545978a6c108e87cec5bb2f56", 81 | "zh:c132d34502d47436c5f31670f2c786c072bce6137e28cfb5d948f36721db5f66", 82 | "zh:c39ac5467fff7e326b31ada5e734ba88b8f811c5d758c3ce2c9c886504cc232f", 83 | "zh:f1083b82593be4c888e35f6c9c773a86551c8c7b5dac1f3fa69863820852fc87", 84 | "zh:f40bc8da36b6dc3b95cc13d208b81e254346d78ab81624c07a2fa74148de7a8b", 85 | "zh:f56b4589644078e21dbcdbb53cc278550a04fa9c02bc7eea3f5dc91648da2048", 86 | ] 87 | } 88 | -------------------------------------------------------------------------------- /examples/terraform/advanced/README.md: -------------------------------------------------------------------------------- 1 | # Advanced Terraform Example 2 | 3 | This example will create a local Talos cluster using libvirt. 4 | 5 | This example shows how to manage the whole Talos machine secrets using custom CA. 6 | It's recommended to pre-generate the keys required and pass it as variables to Terraform, since terraform stores the state in plain text. 7 | 8 | ## Prereqs 9 | 10 | This guide assumes that libvirt is installed and running. 11 | From this directory, issue `terraform init` to ensure the proper providers are pulled down. 12 | 13 | ## Usage 14 | 15 | To create a default cluster, this should be as simple as `terraform apply`. 16 | You will need to specify the `cluster_name` and `iso_path` variables during application. 17 | 18 | If different configurations are required, override them through command line with the `-var` flag or by creating a varsfile and overriding with `-var-file`. 19 | Destroying the cluster should, again, be a simple `terraform destroy`. 20 | 21 | Getting the kubeconfig and talosconfig for this cluster can be done with `terraform output -raw kubeconfig > ` and `terraform output -raw talosconfig > `. 22 | 23 | 24 | ## Requirements 25 | 26 | | Name | Version | 27 | |------|---------| 28 | | [libvirt](#requirement\_libvirt) | 0.7.1 | 29 | | [random](#requirement\_random) | 3.5.1 | 30 | | [talos](#requirement\_talos) | 0.9.0-alpha.0 | 31 | | [tls](#requirement\_tls) | 4.0.4 | 32 | 33 | ## Providers 34 | 35 | | Name | Version | 36 | |------|---------| 37 | | [libvirt](#provider\_libvirt) | 0.7.1 | 38 | | [random](#provider\_random) | 3.5.1 | 39 | | [talos](#provider\_talos) | 0.9.0-alpha.0 | 40 | | [tls](#provider\_tls) | 4.0.4 | 41 | 42 | ## Modules 43 | 44 | | Name | Source | Version | 45 | |------|--------|---------| 46 | | [bootstrap\_token](#module\_bootstrap\_token) | ./modules/bootstrap_token | n/a | 47 | | [trustdinfo\_token](#module\_trustdinfo\_token) | ./modules/bootstrap_token | n/a | 48 | 49 | ## Resources 50 | 51 | | Name | Type | 52 | |------|------| 53 | | [libvirt_domain.cp](https://registry.terraform.io/providers/dmacvicar/libvirt/0.7.1/docs/resources/domain) | resource | 54 | | [libvirt_volume.cp](https://registry.terraform.io/providers/dmacvicar/libvirt/0.7.1/docs/resources/volume) | resource | 55 | | [random_id.cluster_id](https://registry.terraform.io/providers/hashicorp/random/3.5.1/docs/resources/id) | resource | 56 | | [random_id.cluster_secret](https://registry.terraform.io/providers/hashicorp/random/3.5.1/docs/resources/id) | resource | 57 | | [random_id.secretbox_encryption_secret](https://registry.terraform.io/providers/hashicorp/random/3.5.1/docs/resources/id) | resource | 58 | | [talos_machine_bootstrap.this](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/resources/machine_bootstrap) | resource | 59 | | [talos_machine_configuration_apply.this](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/resources/machine_configuration_apply) | resource | 60 | | [tls_cert_request.client_csr](https://registry.terraform.io/providers/hashicorp/tls/4.0.4/docs/resources/cert_request) | resource | 61 | | [tls_cert_request.k8s_client_csr](https://registry.terraform.io/providers/hashicorp/tls/4.0.4/docs/resources/cert_request) | resource | 62 | | [tls_locally_signed_cert.client_cert](https://registry.terraform.io/providers/hashicorp/tls/4.0.4/docs/resources/locally_signed_cert) | resource | 63 | | [tls_locally_signed_cert.k8s_client_cert](https://registry.terraform.io/providers/hashicorp/tls/4.0.4/docs/resources/locally_signed_cert) | resource | 64 | | [tls_private_key.client_key](https://registry.terraform.io/providers/hashicorp/tls/4.0.4/docs/resources/private_key) | resource | 65 | | [tls_private_key.etcd_key](https://registry.terraform.io/providers/hashicorp/tls/4.0.4/docs/resources/private_key) | resource | 66 | | [tls_private_key.k8s_aggregator_key](https://registry.terraform.io/providers/hashicorp/tls/4.0.4/docs/resources/private_key) | resource | 67 | | [tls_private_key.k8s_client_key](https://registry.terraform.io/providers/hashicorp/tls/4.0.4/docs/resources/private_key) | resource | 68 | | [tls_private_key.k8s_key](https://registry.terraform.io/providers/hashicorp/tls/4.0.4/docs/resources/private_key) | resource | 69 | | [tls_private_key.k8s_serviceaccount_key](https://registry.terraform.io/providers/hashicorp/tls/4.0.4/docs/resources/private_key) | resource | 70 | | [tls_private_key.os_key](https://registry.terraform.io/providers/hashicorp/tls/4.0.4/docs/resources/private_key) | resource | 71 | | [tls_self_signed_cert.etcd_cert](https://registry.terraform.io/providers/hashicorp/tls/4.0.4/docs/resources/self_signed_cert) | resource | 72 | | [tls_self_signed_cert.k8s_aggregator_cert](https://registry.terraform.io/providers/hashicorp/tls/4.0.4/docs/resources/self_signed_cert) | resource | 73 | | [tls_self_signed_cert.k8s_cert](https://registry.terraform.io/providers/hashicorp/tls/4.0.4/docs/resources/self_signed_cert) | resource | 74 | | [tls_self_signed_cert.os_cert](https://registry.terraform.io/providers/hashicorp/tls/4.0.4/docs/resources/self_signed_cert) | resource | 75 | | [talos_client_configuration.this](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/data-sources/client_configuration) | data source | 76 | | [talos_machine_configuration.this](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/data-sources/machine_configuration) | data source | 77 | 78 | ## Inputs 79 | 80 | | Name | Description | Type | Default | Required | 81 | |------|-------------|------|---------|:--------:| 82 | | [cluster\_name](#input\_cluster\_name) | A name to provide for the Talos cluster | `string` | n/a | yes | 83 | | [iso\_path](#input\_iso\_path) | Path to the Talos ISO | `string` | n/a | yes | 84 | 85 | ## Outputs 86 | 87 | | Name | Description | 88 | |------|-------------| 89 | | [kubeconfig](#output\_kubeconfig) | n/a | 90 | | [talosconfig](#output\_talosconfig) | n/a | 91 | 92 | -------------------------------------------------------------------------------- /examples/terraform/advanced/modules/bootstrap_token/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/random" { 5 | version = "3.5.1" 6 | constraints = "3.5.1" 7 | hashes = [ 8 | "h1:VSnd9ZIPyfKHOObuQCaKfnjIHRtR7qTw19Rz8tJxm+k=", 9 | "zh:04e3fbd610cb52c1017d282531364b9c53ef72b6bc533acb2a90671957324a64", 10 | "zh:119197103301ebaf7efb91df8f0b6e0dd31e6ff943d231af35ee1831c599188d", 11 | "zh:4d2b219d09abf3b1bb4df93d399ed156cadd61f44ad3baf5cf2954df2fba0831", 12 | "zh:6130bdde527587bbe2dcaa7150363e96dbc5250ea20154176d82bc69df5d4ce3", 13 | "zh:6cc326cd4000f724d3086ee05587e7710f032f94fc9af35e96a386a1c6f2214f", 14 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", 15 | "zh:b6d88e1d28cf2dfa24e9fdcc3efc77adcdc1c3c3b5c7ce503a423efbdd6de57b", 16 | "zh:ba74c592622ecbcef9dc2a4d81ed321c4e44cddf7da799faa324da9bf52a22b2", 17 | "zh:c7c5cde98fe4ef1143bd1b3ec5dc04baf0d4cc3ca2c5c7d40d17c0e9b2076865", 18 | "zh:dac4bad52c940cd0dfc27893507c1e92393846b024c5a9db159a93c534a3da03", 19 | "zh:de8febe2a2acd9ac454b844a4106ed295ae9520ef54dc8ed2faf29f12716b602", 20 | "zh:eab0d0495e7e711cca367f7d4df6e322e6c562fc52151ec931176115b83ed014", 21 | ] 22 | } 23 | -------------------------------------------------------------------------------- /examples/terraform/advanced/modules/bootstrap_token/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Requirements 3 | 4 | | Name | Version | 5 | |------|---------| 6 | | [random](#requirement\_random) | 3.5.1 | 7 | 8 | ## Providers 9 | 10 | | Name | Version | 11 | |------|---------| 12 | | [random](#provider\_random) | 3.5.1 | 13 | 14 | ## Modules 15 | 16 | No modules. 17 | 18 | ## Resources 19 | 20 | | Name | Type | 21 | |------|------| 22 | | [random_string.token_prefix](https://registry.terraform.io/providers/hashicorp/random/3.5.1/docs/resources/string) | resource | 23 | | [random_string.token_suffix](https://registry.terraform.io/providers/hashicorp/random/3.5.1/docs/resources/string) | resource | 24 | 25 | ## Inputs 26 | 27 | No inputs. 28 | 29 | ## Outputs 30 | 31 | | Name | Description | 32 | |------|-------------| 33 | | [bootstrap\_token](#output\_bootstrap\_token) | n/a | 34 | -------------------------------------------------------------------------------- /examples/terraform/advanced/modules/bootstrap_token/main.tf: -------------------------------------------------------------------------------- 1 | resource "random_string" "token_prefix" { 2 | length = 6 3 | special = false 4 | upper = false 5 | } 6 | 7 | resource "random_string" "token_suffix" { 8 | length = 16 9 | special = false 10 | upper = false 11 | } 12 | -------------------------------------------------------------------------------- /examples/terraform/advanced/modules/bootstrap_token/outputs.tf: -------------------------------------------------------------------------------- 1 | output "bootstrap_token" { 2 | value = "${random_string.token_prefix.result}.${random_string.token_suffix.result}" 3 | } 4 | -------------------------------------------------------------------------------- /examples/terraform/advanced/modules/bootstrap_token/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | random = { 4 | source = "hashicorp/random" 5 | version = "3.5.1" 6 | } 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /examples/terraform/advanced/outputs.tf: -------------------------------------------------------------------------------- 1 | output "talosconfig" { 2 | value = data.talos_client_configuration.this.talos_config 3 | sensitive = true 4 | } 5 | 6 | output "kubeconfig" { 7 | value = local.kubeconfig_raw 8 | sensitive = true 9 | } 10 | -------------------------------------------------------------------------------- /examples/terraform/advanced/variables.tf: -------------------------------------------------------------------------------- 1 | variable "cluster_name" { 2 | description = "A name to provide for the Talos cluster" 3 | type = string 4 | } 5 | 6 | variable "iso_path" { 7 | description = "Path to the Talos ISO" 8 | type = string 9 | } 10 | -------------------------------------------------------------------------------- /examples/terraform/advanced/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | libvirt = { 4 | source = "dmacvicar/libvirt" 5 | version = "0.7.1" 6 | } 7 | random = { 8 | source = "hashicorp/random" 9 | version = "3.5.1" 10 | } 11 | tls = { 12 | source = "hashicorp/tls" 13 | version = "4.0.4" 14 | } 15 | talos = { 16 | source = "siderolabs/talos" 17 | version = "0.9.0-alpha.0" 18 | } 19 | } 20 | } 21 | 22 | provider "libvirt" { 23 | uri = "qemu:///system" 24 | } 25 | 26 | provider "random" {} 27 | 28 | provider "tls" {} 29 | 30 | provider "talos" {} 31 | -------------------------------------------------------------------------------- /examples/terraform/aws/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/aws" { 5 | version = "4.67.0" 6 | constraints = ">= 3.29.0, >= 3.73.0, >= 4.0.0, ~> 4.0, >= 4.20.0" 7 | hashes = [ 8 | "h1:dCRc4GqsyfqHEMjgtlM1EympBcgTmcTkWaJmtd91+KA=", 9 | "zh:0843017ecc24385f2b45f2c5fce79dc25b258e50d516877b3affee3bef34f060", 10 | "zh:19876066cfa60de91834ec569a6448dab8c2518b8a71b5ca870b2444febddac6", 11 | "zh:24995686b2ad88c1ffaa242e36eee791fc6070e6144f418048c4ce24d0ba5183", 12 | "zh:4a002990b9f4d6d225d82cb2fb8805789ffef791999ee5d9cb1fef579aeff8f1", 13 | "zh:559a2b5ace06b878c6de3ecf19b94fbae3512562f7a51e930674b16c2f606e29", 14 | "zh:6a07da13b86b9753b95d4d8218f6dae874cf34699bca1470d6effbb4dee7f4b7", 15 | "zh:768b3bfd126c3b77dc975c7c0e5db3207e4f9997cf41aa3385c63206242ba043", 16 | "zh:7be5177e698d4b547083cc738b977742d70ed68487ce6f49ecd0c94dbf9d1362", 17 | "zh:8b562a818915fb0d85959257095251a05c76f3467caa3ba95c583ba5fe043f9b", 18 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425", 19 | "zh:9c385d03a958b54e2afd5279cd8c7cbdd2d6ca5c7d6a333e61092331f38af7cf", 20 | "zh:b3ca45f2821a89af417787df8289cb4314b273d29555ad3b2a5ab98bb4816b3b", 21 | "zh:da3c317f1db2469615ab40aa6baba63b5643bae7110ff855277a1fb9d8eb4f2c", 22 | "zh:dc6430622a8dc5cdab359a8704aec81d3825ea1d305bbb3bbd032b1c6adfae0c", 23 | "zh:fac0d2ddeadf9ec53da87922f666e1e73a603a611c57bcbc4b86ac2821619b1d", 24 | ] 25 | } 26 | 27 | provider "registry.terraform.io/siderolabs/talos" { 28 | version = "0.9.0-alpha.0" 29 | constraints = "0.9.0-alpha.0" 30 | hashes = [ 31 | "h1:aJIAbggkIN/2/7JEjSz/PfHVFRXOokeHYt3hDII6kI0=", 32 | "zh:0fa82a384b25a58b65523e0ea4768fa1212b1f5cfc0c9379d31162454fedcc9d", 33 | "zh:12a822ecfcc14da28bb88887811a796f5165ee50d64967c5afea389da12e3d18", 34 | "zh:5c2519abfbd5e45de4afd94e52d393235e3d09845af27c58aa98dd532311f47f", 35 | "zh:6a99b169eaf46789c7465de27d2dea52ca799f39412390d0a28bb28f5b9e5a4e", 36 | "zh:975daeb0ff5517e5a7a4357125f6e7d74041d87c853f9a52ecfd285ce972e51b", 37 | "zh:b358aefccccf84dab4bf07f2b039814755fc282cbe30f20496013e311eae3463", 38 | "zh:b4e3a0fddc38a05c25b8f1553098574d56959abeb2b5bf9e880208000a415231", 39 | "zh:ba331d61225fac3f787f7acd4cc298a7e0ca43ee7536ce5ab7f6c9dfae4c8e9e", 40 | "zh:bbd9bc936461d2be6c11a5abaa53f2618ac592bc7a6cc1ad9c4205fd73c95eac", 41 | "zh:bdd77e81bf65074fbc891a7429ec3264a342bc7545978a6c108e87cec5bb2f56", 42 | "zh:c132d34502d47436c5f31670f2c786c072bce6137e28cfb5d948f36721db5f66", 43 | "zh:c39ac5467fff7e326b31ada5e734ba88b8f811c5d758c3ce2c9c886504cc232f", 44 | "zh:f1083b82593be4c888e35f6c9c773a86551c8c7b5dac1f3fa69863820852fc87", 45 | "zh:f40bc8da36b6dc3b95cc13d208b81e254346d78ab81624c07a2fa74148de7a8b", 46 | "zh:f56b4589644078e21dbcdbb53cc278550a04fa9c02bc7eea3f5dc91648da2048", 47 | ] 48 | } 49 | -------------------------------------------------------------------------------- /examples/terraform/aws/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Requirements 3 | 4 | | Name | Version | 5 | |------|---------| 6 | | [terraform](#requirement\_terraform) | ~> 1.3 | 7 | | [aws](#requirement\_aws) | ~> 4.0 | 8 | | [talos](#requirement\_talos) | 0.9.0-alpha.0 | 9 | 10 | ## Providers 11 | 12 | | Name | Version | 13 | |------|---------| 14 | | [aws](#provider\_aws) | 4.67.0 | 15 | | [talos](#provider\_talos) | 0.9.0-alpha.0 | 16 | 17 | ## Modules 18 | 19 | | Name | Source | Version | 20 | |------|--------|---------| 21 | | [cluster\_sg](#module\_cluster\_sg) | terraform-aws-modules/security-group/aws | ~> 4.0 | 22 | | [elb\_k8s\_elb](#module\_elb\_k8s\_elb) | terraform-aws-modules/elb/aws | ~> 4.0 | 23 | | [kubernetes\_api\_sg](#module\_kubernetes\_api\_sg) | terraform-aws-modules/security-group/aws//modules/https-443 | ~> 4.0 | 24 | | [talos\_control\_plane\_nodes](#module\_talos\_control\_plane\_nodes) | terraform-aws-modules/ec2-instance/aws | ~> 4.0 | 25 | | [talos\_worker\_group](#module\_talos\_worker\_group) | terraform-aws-modules/ec2-instance/aws | ~> 4.0 | 26 | | [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | 27 | 28 | ## Resources 29 | 30 | | Name | Type | 31 | |------|------| 32 | | [aws_iam_policy.control_plane_ccm_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | 33 | | [aws_iam_policy.worker_ccm_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource | 34 | | [talos_cluster_kubeconfig.this](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/resources/cluster_kubeconfig) | resource | 35 | | [talos_machine_bootstrap.this](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/resources/machine_bootstrap) | resource | 36 | | [talos_machine_configuration_apply.controlplane](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/resources/machine_configuration_apply) | resource | 37 | | [talos_machine_configuration_apply.worker_group](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/resources/machine_configuration_apply) | resource | 38 | | [talos_machine_secrets.this](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/resources/machine_secrets) | resource | 39 | | [aws_ami.talos](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | 40 | | [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | 41 | | [talos_client_configuration.this](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/data-sources/client_configuration) | data source | 42 | | [talos_cluster_health.this](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/data-sources/cluster_health) | data source | 43 | | [talos_machine_configuration.controlplane](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/data-sources/machine_configuration) | data source | 44 | | [talos_machine_configuration.worker_group](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/data-sources/machine_configuration) | data source | 45 | 46 | ## Inputs 47 | 48 | | Name | Description | Type | Default | Required | 49 | |------|-------------|------|---------|:--------:| 50 | | [ccm](#input\_ccm) | Whether to deploy aws cloud controller manager | `bool` | `false` | no | 51 | | [cluster\_name](#input\_cluster\_name) | Name of cluster | `string` | `"talos-aws-example"` | no | 52 | | [config\_patch\_files](#input\_config\_patch\_files) | Path to talos config path files that applies to all nodes | `list(string)` | `[]` | no | 53 | | [control\_plane](#input\_control\_plane) | Info for control plane that will be created |
object({
instance_type = optional(string, "c5.large")
ami_id = optional(string, null)
num_instances = optional(number, 3)
config_patch_files = optional(list(string), [])
tags = optional(map(string), {})
})
| `{}` | no | 54 | | [extra\_tags](#input\_extra\_tags) | Extra tags to add to the cluster cloud resources | `map(string)` | `{}` | no | 55 | | [kubernetes\_api\_allowed\_cidr](#input\_kubernetes\_api\_allowed\_cidr) | The CIDR from which to allow to access the Kubernetes API | `string` | `"0.0.0.0/0"` | no | 56 | | [kubernetes\_version](#input\_kubernetes\_version) | Kubernetes version to use for the cluster, if not set the k8s version shipped with the talos sdk version will be used | `string` | `null` | no | 57 | | [talos\_api\_allowed\_cidr](#input\_talos\_api\_allowed\_cidr) | The CIDR from which to allow to access the Talos API | `string` | `"0.0.0.0/0"` | no | 58 | | [talos\_version\_contract](#input\_talos\_version\_contract) | Talos API version to use for the cluster, if not set the the version shipped with the talos sdk version will be used | `string` | `null` | no | 59 | | [vpc\_cidr](#input\_vpc\_cidr) | The IPv4 CIDR block for the VPC. | `string` | `"172.16.0.0/16"` | no | 60 | | [worker\_groups](#input\_worker\_groups) | List of node worker node groups to create |
list(object({
name = string
instance_type = optional(string, "c5.large")
ami_id = optional(string, null)
num_instances = optional(number, 1)
config_patch_files = optional(list(string), [])
tags = optional(map(string), {})
}))
|
[
{
"name": "default"
}
]
| no | 61 | 62 | ## Outputs 63 | 64 | | Name | Description | 65 | |------|-------------| 66 | | [kubeconfig](#output\_kubeconfig) | The generated kubeconfig. | 67 | | [talosconfig](#output\_talosconfig) | The generated talosconfig. | 68 | -------------------------------------------------------------------------------- /examples/terraform/aws/manifests/ccm.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: aws-cloud-controller-manager/templates/serviceaccount.yaml 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: cloud-controller-manager 7 | namespace: kube-system 8 | labels: 9 | helm.sh/chart: "aws-cloud-controller-manager-0.0.8" 10 | --- 11 | # Source: aws-cloud-controller-manager/templates/clusterrole.yaml 12 | apiVersion: rbac.authorization.k8s.io/v1 13 | kind: ClusterRole 14 | metadata: 15 | name: system:cloud-controller-manager 16 | labels: 17 | helm.sh/chart: "aws-cloud-controller-manager-0.0.8" 18 | rules: 19 | - apiGroups: 20 | - "" 21 | resources: 22 | - events 23 | verbs: 24 | - create 25 | - patch 26 | - update 27 | - apiGroups: 28 | - "" 29 | resources: 30 | - nodes 31 | verbs: 32 | - '*' 33 | - apiGroups: 34 | - "" 35 | resources: 36 | - nodes/status 37 | verbs: 38 | - patch 39 | - apiGroups: 40 | - "" 41 | resources: 42 | - services 43 | verbs: 44 | - list 45 | - patch 46 | - update 47 | - watch 48 | - apiGroups: 49 | - "" 50 | resources: 51 | - services/status 52 | verbs: 53 | - list 54 | - patch 55 | - update 56 | - watch 57 | - apiGroups: 58 | - "" 59 | resources: 60 | - serviceaccounts 61 | verbs: 62 | - create 63 | - apiGroups: 64 | - "" 65 | resources: 66 | - persistentvolumes 67 | verbs: 68 | - get 69 | - list 70 | - update 71 | - watch 72 | - apiGroups: 73 | - "" 74 | resources: 75 | - endpoints 76 | verbs: 77 | - create 78 | - get 79 | - list 80 | - watch 81 | - update 82 | - apiGroups: 83 | - coordination.k8s.io 84 | resources: 85 | - leases 86 | verbs: 87 | - create 88 | - get 89 | - list 90 | - watch 91 | - update 92 | - apiGroups: 93 | - "" 94 | resources: 95 | - serviceaccounts/token 96 | verbs: 97 | - create 98 | --- 99 | # Source: aws-cloud-controller-manager/templates/cluserrolebinding.yaml 100 | apiVersion: rbac.authorization.k8s.io/v1 101 | kind: ClusterRoleBinding 102 | metadata: 103 | name: system:cloud-controller-manager 104 | labels: 105 | helm.sh/chart: "aws-cloud-controller-manager-0.0.8" 106 | roleRef: 107 | kind: ClusterRole 108 | name: system:cloud-controller-manager 109 | apiGroup: rbac.authorization.k8s.io 110 | subjects: 111 | - apiGroup: "" 112 | kind: ServiceAccount 113 | name: cloud-controller-manager 114 | namespace: kube-system 115 | --- 116 | # Source: aws-cloud-controller-manager/templates/rolebinding.yaml 117 | apiVersion: rbac.authorization.k8s.io/v1 118 | kind: RoleBinding 119 | metadata: 120 | name: cloud-controller-manager:apiserver-authentication-reader 121 | namespace: kube-system 122 | labels: 123 | helm.sh/chart: "aws-cloud-controller-manager-0.0.8" 124 | roleRef: 125 | apiGroup: rbac.authorization.k8s.io 126 | kind: Role 127 | name: extension-apiserver-authentication-reader 128 | subjects: 129 | - apiGroup: "" 130 | kind: ServiceAccount 131 | name: cloud-controller-manager 132 | namespace: kube-system 133 | --- 134 | # Source: aws-cloud-controller-manager/templates/daemonset.yaml 135 | apiVersion: apps/v1 136 | kind: DaemonSet 137 | metadata: 138 | name: aws-cloud-controller-manager 139 | labels: 140 | k8s-app: aws-cloud-controller-manager 141 | helm.sh/chart: "aws-cloud-controller-manager-0.0.8" 142 | namespace: kube-system 143 | spec: 144 | selector: 145 | matchLabels: 146 | k8s-app: aws-cloud-controller-manager 147 | updateStrategy: 148 | type: RollingUpdate 149 | template: 150 | metadata: 151 | name: aws-cloud-controller-manager 152 | labels: 153 | k8s-app: aws-cloud-controller-manager 154 | spec: 155 | tolerations: 156 | - effect: NoSchedule 157 | key: node.cloudprovider.kubernetes.io/uninitialized 158 | value: "true" 159 | - effect: NoSchedule 160 | key: node-role.kubernetes.io/master 161 | - effect: NoSchedule 162 | key: node-role.kubernetes.io/control-plane 163 | nodeSelector: 164 | node-role.kubernetes.io/control-plane: "" 165 | dnsPolicy: Default 166 | priorityClassName: system-node-critical 167 | serviceAccountName: cloud-controller-manager 168 | securityContext: 169 | {} 170 | containers: 171 | - name: aws-cloud-controller-manager 172 | image: "registry.k8s.io/provider-aws/cloud-controller-manager:v1.32.1" 173 | args: 174 | - --v=2 175 | - --cloud-provider=aws 176 | - --configure-cloud-routes=false 177 | resources: 178 | requests: 179 | cpu: 200m 180 | env: 181 | [] 182 | securityContext: 183 | {} 184 | -------------------------------------------------------------------------------- /examples/terraform/aws/outputs.tf: -------------------------------------------------------------------------------- 1 | output "talosconfig" { 2 | description = "The generated talosconfig." 3 | value = data.talos_client_configuration.this.talos_config 4 | sensitive = true 5 | } 6 | 7 | output "kubeconfig" { 8 | description = "The generated kubeconfig." 9 | value = talos_cluster_kubeconfig.this.kubeconfig_raw 10 | sensitive = true 11 | } 12 | -------------------------------------------------------------------------------- /examples/terraform/aws/variables.tf: -------------------------------------------------------------------------------- 1 | variable "cluster_name" { 2 | description = "Name of cluster" 3 | type = string 4 | default = "talos-aws-example" 5 | } 6 | 7 | variable "ccm" { 8 | description = "Whether to deploy aws cloud controller manager" 9 | type = bool 10 | default = false 11 | } 12 | 13 | variable "talos_version_contract" { 14 | description = "Talos API version to use for the cluster, if not set the the version shipped with the talos sdk version will be used" 15 | type = string 16 | default = null 17 | } 18 | 19 | variable "kubernetes_version" { 20 | description = "Kubernetes version to use for the cluster, if not set the k8s version shipped with the talos sdk version will be used" 21 | type = string 22 | default = null 23 | } 24 | 25 | variable "control_plane" { 26 | description = "Info for control plane that will be created" 27 | type = object({ 28 | instance_type = optional(string, "c5.large") 29 | ami_id = optional(string, null) 30 | num_instances = optional(number, 3) 31 | config_patch_files = optional(list(string), []) 32 | tags = optional(map(string), {}) 33 | }) 34 | 35 | validation { 36 | condition = var.control_plane.ami_id != null ? (length(var.control_plane.ami_id) > 4 && substr(var.control_plane.ami_id, 0, 4) == "ami-") : true 37 | error_message = "The ami_id value must be a valid AMI id, starting with \"ami-\"." 38 | } 39 | 40 | default = {} 41 | } 42 | 43 | variable "worker_groups" { 44 | description = "List of node worker node groups to create" 45 | type = list(object({ 46 | name = string 47 | instance_type = optional(string, "c5.large") 48 | ami_id = optional(string, null) 49 | num_instances = optional(number, 1) 50 | config_patch_files = optional(list(string), []) 51 | tags = optional(map(string), {}) 52 | })) 53 | 54 | validation { 55 | condition = ( 56 | alltrue([ 57 | for wg in var.worker_groups : ( 58 | wg.ami_id != null ? (length(wg.ami_id) > 4 && substr(wg.ami_id, 0, 4) == "ami-") : true 59 | ) 60 | ]) 61 | ) 62 | error_message = "The ami_id value must be a valid AMI id, starting with \"ami-\"." 63 | } 64 | default = [{ 65 | name = "default" 66 | }] 67 | } 68 | 69 | variable "extra_tags" { 70 | description = "Extra tags to add to the cluster cloud resources" 71 | type = map(string) 72 | default = {} 73 | } 74 | 75 | variable "vpc_cidr" { 76 | description = "The IPv4 CIDR block for the VPC." 77 | type = string 78 | default = "172.16.0.0/16" 79 | } 80 | 81 | variable "talos_api_allowed_cidr" { 82 | description = "The CIDR from which to allow to access the Talos API" 83 | type = string 84 | default = "0.0.0.0/0" 85 | } 86 | 87 | variable "kubernetes_api_allowed_cidr" { 88 | description = "The CIDR from which to allow to access the Kubernetes API" 89 | type = string 90 | default = "0.0.0.0/0" 91 | } 92 | 93 | variable "config_patch_files" { 94 | description = "Path to talos config path files that applies to all nodes" 95 | type = list(string) 96 | default = [] 97 | } 98 | -------------------------------------------------------------------------------- /examples/terraform/aws/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.3" 3 | required_providers { 4 | aws = { 5 | source = "hashicorp/aws" 6 | version = "~> 4.0" 7 | } 8 | talos = { 9 | source = "siderolabs/talos" 10 | version = "0.9.0-alpha.0" 11 | } 12 | } 13 | } 14 | 15 | provider "aws" { 16 | default_tags { 17 | tags = { 18 | Project = "Talos Kubernetes Cluster" 19 | Provisioner = "Terraform" 20 | Environment = "Testing" 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /examples/terraform/azure/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/azurerm" { 5 | version = "3.117.1" 6 | constraints = ">= 3.0.0, ~> 3.0, >= 3.11.0, < 4.0.0" 7 | hashes = [ 8 | "h1:3c9iOEtBMnHrpJLlhbQ0sCZPWhE/2dvEPcL8KkXAh7w=", 9 | "zh:0c513676836e3c50d004ece7d2624a8aff6faac14b833b96feeac2e4bc2c1c12", 10 | "zh:50ea01ada95bae2f187db9e926e463f45d860767a85ebc59160414e00e76c35d", 11 | "zh:52c2a9edacc06b3f72153f5ef6daca0761c6292158815961fe37f60bc576a3d7", 12 | "zh:618eed2a06b19b1a025b45b05891846d570a6a1cca4d23f4942f5a99e1f747ae", 13 | "zh:61cde5d3165d7e5ec311d5d89486819cd605c1b2d54611b5c97bd4e97dba2762", 14 | "zh:6a873358d5031fc222f5e05f029d1237f3dce8345c767665f393283dfa2627f6", 15 | "zh:afdd80064b2a04da311856feb4ed45f77ff4df6c356e8c2b10afb51fe7e61c70", 16 | "zh:b09113df7e0e8c8959539bd22bae6c39faeb269ba3c4cd948e742f5cf58c35fb", 17 | "zh:d340db7973109761cfc27d52aa02560363337c908b2c99b3628adc5a70a99d5b", 18 | "zh:d5a577226ebc8c65e8f19384878a86acc4b51ede4b4a82d37c3b331b0efcd4a7", 19 | "zh:e2962b147f9e71732df8dbc74940c10d20906f3c003cbfaa1eb9fabbf601a9f0", 20 | "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", 21 | ] 22 | } 23 | 24 | provider "registry.terraform.io/hashicorp/random" { 25 | version = "3.7.2" 26 | constraints = ">= 3.0.0" 27 | hashes = [ 28 | "h1:356j/3XnXEKr9nyicLUufzoF4Yr6hRy481KIxRVpK0c=", 29 | "zh:14829603a32e4bc4d05062f059e545a91e27ff033756b48afbae6b3c835f508f", 30 | "zh:1527fb07d9fea400d70e9e6eb4a2b918d5060d604749b6f1c361518e7da546dc", 31 | "zh:1e86bcd7ebec85ba336b423ba1db046aeaa3c0e5f921039b3f1a6fc2f978feab", 32 | "zh:24536dec8bde66753f4b4030b8f3ef43c196d69cccbea1c382d01b222478c7a3", 33 | "zh:29f1786486759fad9b0ce4fdfbbfece9343ad47cd50119045075e05afe49d212", 34 | "zh:4d701e978c2dd8604ba1ce962b047607701e65c078cb22e97171513e9e57491f", 35 | "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", 36 | "zh:7b8434212eef0f8c83f5a90c6d76feaf850f6502b61b53c329e85b3b281cba34", 37 | "zh:ac8a23c212258b7976e1621275e3af7099e7e4a3d4478cf8d5d2a27f3bc3e967", 38 | "zh:b516ca74431f3df4c6cf90ddcdb4042c626e026317a33c53f0b445a3d93b720d", 39 | "zh:dc76e4326aec2490c1600d6871a95e78f9050f9ce427c71707ea412a2f2f1a62", 40 | "zh:eac7b63e86c749c7d48f527671c7aee5b4e26c10be6ad7232d6860167f99dbb0", 41 | ] 42 | } 43 | 44 | provider "registry.terraform.io/siderolabs/talos" { 45 | version = "0.9.0-alpha.0" 46 | constraints = "0.9.0-alpha.0" 47 | hashes = [ 48 | "h1:aJIAbggkIN/2/7JEjSz/PfHVFRXOokeHYt3hDII6kI0=", 49 | "zh:0fa82a384b25a58b65523e0ea4768fa1212b1f5cfc0c9379d31162454fedcc9d", 50 | "zh:12a822ecfcc14da28bb88887811a796f5165ee50d64967c5afea389da12e3d18", 51 | "zh:5c2519abfbd5e45de4afd94e52d393235e3d09845af27c58aa98dd532311f47f", 52 | "zh:6a99b169eaf46789c7465de27d2dea52ca799f39412390d0a28bb28f5b9e5a4e", 53 | "zh:975daeb0ff5517e5a7a4357125f6e7d74041d87c853f9a52ecfd285ce972e51b", 54 | "zh:b358aefccccf84dab4bf07f2b039814755fc282cbe30f20496013e311eae3463", 55 | "zh:b4e3a0fddc38a05c25b8f1553098574d56959abeb2b5bf9e880208000a415231", 56 | "zh:ba331d61225fac3f787f7acd4cc298a7e0ca43ee7536ce5ab7f6c9dfae4c8e9e", 57 | "zh:bbd9bc936461d2be6c11a5abaa53f2618ac592bc7a6cc1ad9c4205fd73c95eac", 58 | "zh:bdd77e81bf65074fbc891a7429ec3264a342bc7545978a6c108e87cec5bb2f56", 59 | "zh:c132d34502d47436c5f31670f2c786c072bce6137e28cfb5d948f36721db5f66", 60 | "zh:c39ac5467fff7e326b31ada5e734ba88b8f811c5d758c3ce2c9c886504cc232f", 61 | "zh:f1083b82593be4c888e35f6c9c773a86551c8c7b5dac1f3fa69863820852fc87", 62 | "zh:f40bc8da36b6dc3b95cc13d208b81e254346d78ab81624c07a2fa74148de7a8b", 63 | "zh:f56b4589644078e21dbcdbb53cc278550a04fa9c02bc7eea3f5dc91648da2048", 64 | ] 65 | } 66 | -------------------------------------------------------------------------------- /examples/terraform/azure/outputs.tf: -------------------------------------------------------------------------------- 1 | output "talosconfig" { 2 | value = data.talos_client_configuration.this.talos_config 3 | sensitive = true 4 | } 5 | 6 | output "kubeconfig" { 7 | value = talos_cluster_kubeconfig.this.kubeconfig_raw 8 | sensitive = true 9 | } 10 | -------------------------------------------------------------------------------- /examples/terraform/azure/variables.tf: -------------------------------------------------------------------------------- 1 | variable "cluster_name" { 2 | description = "Name of cluster" 3 | type = string 4 | default = "talos-azure-example" 5 | } 6 | 7 | variable "talos_version_contract" { 8 | description = "Talos API version to use for the cluster, if not set the the version shipped with the talos sdk version will be used" 9 | type = string 10 | default = null 11 | } 12 | 13 | variable "kubernetes_version" { 14 | description = "Kubernetes version to use for the cluster, if not set the k8s version shipped with the talos sdk version will be used" 15 | type = string 16 | default = null 17 | } 18 | 19 | variable "control_plane" { 20 | description = "Info for control plane that will be created" 21 | type = object({ 22 | vm_size = optional(string, "Standard_B2s") 23 | vm_os_id = optional(string, "/subscriptions/7f739b7d-f399-4b97-9a9f-f1962309ee6e/resourceGroups/SideroGallery/providers/Microsoft.Compute/galleries/SideroLabs/images/talos-x64/versions/latest") 24 | num_instances = optional(number, 3) 25 | config_patch_files = optional(list(string), []) 26 | tags = optional(map(string), {}) 27 | }) 28 | 29 | default = {} 30 | } 31 | 32 | variable "worker_groups" { 33 | description = "List of node worker node groups to create" 34 | type = list(object({ 35 | name = string 36 | vm_size = optional(string, "Standard_B2s") 37 | vm_os_id = optional(string, "/subscriptions/7f739b7d-f399-4b97-9a9f-f1962309ee6e/resourceGroups/SideroGallery/providers/Microsoft.Compute/galleries/SideroLabs/images/talos-x64/versions/latest") 38 | num_instances = optional(number, 1) 39 | config_patch_files = optional(list(string), []) 40 | tags = optional(map(string), {}) 41 | })) 42 | 43 | default = [{ 44 | name = "default" 45 | }] 46 | } 47 | 48 | variable "extra_tags" { 49 | description = "Extra tags to add to the cluster cloud resources" 50 | type = map(string) 51 | default = {} 52 | } 53 | 54 | variable "config_patch_files" { 55 | description = "Path to talos config path files that applies to all nodes" 56 | type = list(string) 57 | default = [] 58 | } 59 | 60 | variable "azure_location" { 61 | description = "Azure location to use" 62 | type = string 63 | default = "West Europe" 64 | } 65 | 66 | variable "vnet_cidr" { 67 | description = "The IPv4 CIDR block for the Virtual Network." 68 | type = string 69 | default = "172.16.0.0/16" 70 | } 71 | 72 | variable "talos_api_allowed_cidr" { 73 | description = "The CIDR from which to allow to access the Talos API" 74 | type = string 75 | default = "0.0.0.0/0" 76 | } 77 | 78 | variable "kubernetes_api_allowed_cidr" { 79 | description = "The CIDR from which to allow to access the Kubernetes API" 80 | type = string 81 | default = "0.0.0.0/0" 82 | } 83 | -------------------------------------------------------------------------------- /examples/terraform/azure/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.3" 3 | required_providers { 4 | azurerm = { 5 | source = "hashicorp/azurerm" 6 | version = "~> 3.0" 7 | } 8 | talos = { 9 | source = "siderolabs/talos" 10 | version = "0.9.0-alpha.0" 11 | } 12 | } 13 | } 14 | 15 | provider "azurerm" { 16 | features {} 17 | } 18 | -------------------------------------------------------------------------------- /examples/terraform/basic/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/siderolabs/talos" { 5 | version = "0.9.0-alpha.0" 6 | constraints = "0.9.0-alpha.0" 7 | hashes = [ 8 | "h1:aJIAbggkIN/2/7JEjSz/PfHVFRXOokeHYt3hDII6kI0=", 9 | "zh:0fa82a384b25a58b65523e0ea4768fa1212b1f5cfc0c9379d31162454fedcc9d", 10 | "zh:12a822ecfcc14da28bb88887811a796f5165ee50d64967c5afea389da12e3d18", 11 | "zh:5c2519abfbd5e45de4afd94e52d393235e3d09845af27c58aa98dd532311f47f", 12 | "zh:6a99b169eaf46789c7465de27d2dea52ca799f39412390d0a28bb28f5b9e5a4e", 13 | "zh:975daeb0ff5517e5a7a4357125f6e7d74041d87c853f9a52ecfd285ce972e51b", 14 | "zh:b358aefccccf84dab4bf07f2b039814755fc282cbe30f20496013e311eae3463", 15 | "zh:b4e3a0fddc38a05c25b8f1553098574d56959abeb2b5bf9e880208000a415231", 16 | "zh:ba331d61225fac3f787f7acd4cc298a7e0ca43ee7536ce5ab7f6c9dfae4c8e9e", 17 | "zh:bbd9bc936461d2be6c11a5abaa53f2618ac592bc7a6cc1ad9c4205fd73c95eac", 18 | "zh:bdd77e81bf65074fbc891a7429ec3264a342bc7545978a6c108e87cec5bb2f56", 19 | "zh:c132d34502d47436c5f31670f2c786c072bce6137e28cfb5d948f36721db5f66", 20 | "zh:c39ac5467fff7e326b31ada5e734ba88b8f811c5d758c3ce2c9c886504cc232f", 21 | "zh:f1083b82593be4c888e35f6c9c773a86551c8c7b5dac1f3fa69863820852fc87", 22 | "zh:f40bc8da36b6dc3b95cc13d208b81e254346d78ab81624c07a2fa74148de7a8b", 23 | "zh:f56b4589644078e21dbcdbb53cc278550a04fa9c02bc7eea3f5dc91648da2048", 24 | ] 25 | } 26 | -------------------------------------------------------------------------------- /examples/terraform/basic/README.md: -------------------------------------------------------------------------------- 1 | # Basic Terraform Example 2 | 3 | This example will create a basic Talos cluster using local machines. 4 | 5 | ## Prereqs 6 | 7 | This guide assumes that you have pre-existing machines that have been booted with a Talos image or ISO without machine configuration, such that these machines are sitting in "maintenance mode" waiting to be provisioned. 8 | From this directory, issue `terraform init` to ensure the proper providers are pulled down. 9 | 10 | ## Usage 11 | 12 | To create a default cluster, this should be as simple as `terraform apply`. 13 | You will need to specify the `cluster_name` and `cluster_endpoint` variables during application. 14 | The `cluster_endpoint` variable should have the form `https://:6443`. 15 | This will create a cluster based on the `node_data` variable, containing the IPs of each Talos node, as well as the install disk and hostname (optional). 16 | 17 | If different configurations are required, override them through command line with the `-var` flag or by creating a varsfile and overriding with `-var-file`. 18 | Destroying the cluster should, again, be a simple `terraform destroy`. 19 | 20 | Getting the kubeconfig and talosconfig for this cluster can be done with `terraform output -raw kubeconfig > ` and `terraform output -raw talosconfig > `. 21 | 22 | 23 | ## Requirements 24 | 25 | | Name | Version | 26 | |------|---------| 27 | | [talos](#requirement\_talos) | 0.9.0-alpha.0 | 28 | 29 | ## Providers 30 | 31 | | Name | Version | 32 | |------|---------| 33 | | [talos](#provider\_talos) | 0.9.0-alpha.0 | 34 | 35 | ## Modules 36 | 37 | No modules. 38 | 39 | ## Resources 40 | 41 | | Name | Type | 42 | |------|------| 43 | | [talos_cluster_kubeconfig.this](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/resources/cluster_kubeconfig) | resource | 44 | | [talos_machine_bootstrap.this](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/resources/machine_bootstrap) | resource | 45 | | [talos_machine_configuration_apply.controlplane](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/resources/machine_configuration_apply) | resource | 46 | | [talos_machine_configuration_apply.worker](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/resources/machine_configuration_apply) | resource | 47 | | [talos_machine_secrets.this](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/resources/machine_secrets) | resource | 48 | | [talos_client_configuration.this](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/data-sources/client_configuration) | data source | 49 | | [talos_machine_configuration.controlplane](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/data-sources/machine_configuration) | data source | 50 | | [talos_machine_configuration.worker](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/data-sources/machine_configuration) | data source | 51 | 52 | ## Inputs 53 | 54 | | Name | Description | Type | Default | Required | 55 | |------|-------------|------|---------|:--------:| 56 | | [cluster\_endpoint](#input\_cluster\_endpoint) | The endpoint for the Talos cluster | `string` | n/a | yes | 57 | | [cluster\_name](#input\_cluster\_name) | A name to provide for the Talos cluster | `string` | n/a | yes | 58 | | [node\_data](#input\_node\_data) | A map of node data |
object({
controlplanes = map(object({
install_disk = string
hostname = optional(string)
}))
workers = map(object({
install_disk = string
hostname = optional(string)
}))
})
|
{
"controlplanes": {
"10.5.0.2": {
"install_disk": "/dev/sda"
},
"10.5.0.3": {
"install_disk": "/dev/sda"
},
"10.5.0.4": {
"install_disk": "/dev/sda"
}
},
"workers": {
"10.5.0.5": {
"hostname": "worker-1",
"install_disk": "/dev/nvme0n1"
},
"10.5.0.6": {
"hostname": "worker-2",
"install_disk": "/dev/nvme0n1"
}
}
}
| no | 59 | 60 | ## Outputs 61 | 62 | | Name | Description | 63 | |------|-------------| 64 | | [kubeconfig](#output\_kubeconfig) | n/a | 65 | | [talosconfig](#output\_talosconfig) | n/a | 66 | -------------------------------------------------------------------------------- /examples/terraform/basic/files/cp-scheduling.yaml: -------------------------------------------------------------------------------- 1 | cluster: 2 | allowSchedulingOnControlPlanes: true 3 | -------------------------------------------------------------------------------- /examples/terraform/basic/main.tf: -------------------------------------------------------------------------------- 1 | resource "talos_machine_secrets" "this" {} 2 | 3 | data "talos_machine_configuration" "controlplane" { 4 | cluster_name = var.cluster_name 5 | cluster_endpoint = var.cluster_endpoint 6 | machine_type = "controlplane" 7 | machine_secrets = talos_machine_secrets.this.machine_secrets 8 | } 9 | 10 | data "talos_machine_configuration" "worker" { 11 | cluster_name = var.cluster_name 12 | cluster_endpoint = var.cluster_endpoint 13 | machine_type = "worker" 14 | machine_secrets = talos_machine_secrets.this.machine_secrets 15 | } 16 | 17 | data "talos_client_configuration" "this" { 18 | cluster_name = var.cluster_name 19 | client_configuration = talos_machine_secrets.this.client_configuration 20 | endpoints = [for k, v in var.node_data.controlplanes : k] 21 | } 22 | 23 | resource "talos_machine_configuration_apply" "controlplane" { 24 | client_configuration = talos_machine_secrets.this.client_configuration 25 | machine_configuration_input = data.talos_machine_configuration.controlplane.machine_configuration 26 | for_each = var.node_data.controlplanes 27 | node = each.key 28 | config_patches = [ 29 | templatefile("${path.module}/templates/install-disk-and-hostname.yaml.tmpl", { 30 | hostname = each.value.hostname == null ? format("%s-cp-%s", var.cluster_name, index(keys(var.node_data.controlplanes), each.key)) : each.value.hostname 31 | install_disk = each.value.install_disk 32 | }), 33 | file("${path.module}/files/cp-scheduling.yaml"), 34 | ] 35 | } 36 | 37 | resource "talos_machine_configuration_apply" "worker" { 38 | client_configuration = talos_machine_secrets.this.client_configuration 39 | machine_configuration_input = data.talos_machine_configuration.worker.machine_configuration 40 | for_each = var.node_data.workers 41 | node = each.key 42 | config_patches = [ 43 | templatefile("${path.module}/templates/install-disk-and-hostname.yaml.tmpl", { 44 | hostname = each.value.hostname == null ? format("%s-worker-%s", var.cluster_name, index(keys(var.node_data.workers), each.key)) : each.value.hostname 45 | install_disk = each.value.install_disk 46 | }) 47 | ] 48 | } 49 | 50 | resource "talos_machine_bootstrap" "this" { 51 | depends_on = [talos_machine_configuration_apply.controlplane] 52 | 53 | client_configuration = talos_machine_secrets.this.client_configuration 54 | node = [for k, v in var.node_data.controlplanes : k][0] 55 | } 56 | 57 | resource "talos_cluster_kubeconfig" "this" { 58 | depends_on = [talos_machine_bootstrap.this] 59 | client_configuration = talos_machine_secrets.this.client_configuration 60 | node = [for k, v in var.node_data.controlplanes : k][0] 61 | } 62 | -------------------------------------------------------------------------------- /examples/terraform/basic/outputs.tf: -------------------------------------------------------------------------------- 1 | output "talosconfig" { 2 | value = data.talos_client_configuration.this.talos_config 3 | sensitive = true 4 | } 5 | 6 | output "kubeconfig" { 7 | value = talos_cluster_kubeconfig.this.kubeconfig_raw 8 | sensitive = true 9 | } 10 | -------------------------------------------------------------------------------- /examples/terraform/basic/templates/install-disk-and-hostname.yaml.tmpl: -------------------------------------------------------------------------------- 1 | machine: 2 | install: 3 | disk: ${install_disk} 4 | network: 5 | hostname: ${hostname} 6 | -------------------------------------------------------------------------------- /examples/terraform/basic/variables.tf: -------------------------------------------------------------------------------- 1 | variable "cluster_name" { 2 | description = "A name to provide for the Talos cluster" 3 | type = string 4 | } 5 | 6 | variable "cluster_endpoint" { 7 | description = "The endpoint for the Talos cluster" 8 | type = string 9 | } 10 | 11 | variable "node_data" { 12 | description = "A map of node data" 13 | type = object({ 14 | controlplanes = map(object({ 15 | install_disk = string 16 | hostname = optional(string) 17 | })) 18 | workers = map(object({ 19 | install_disk = string 20 | hostname = optional(string) 21 | })) 22 | }) 23 | default = { 24 | controlplanes = { 25 | "10.5.0.2" = { 26 | install_disk = "/dev/sda" 27 | }, 28 | "10.5.0.3" = { 29 | install_disk = "/dev/sda" 30 | }, 31 | "10.5.0.4" = { 32 | install_disk = "/dev/sda" 33 | } 34 | } 35 | workers = { 36 | "10.5.0.5" = { 37 | install_disk = "/dev/nvme0n1" 38 | hostname = "worker-1" 39 | }, 40 | "10.5.0.6" = { 41 | install_disk = "/dev/nvme0n1" 42 | hostname = "worker-2" 43 | } 44 | } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /examples/terraform/basic/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | talos = { 4 | source = "siderolabs/talos" 5 | version = "0.9.0-alpha.0" 6 | } 7 | } 8 | } 9 | 10 | provider "talos" {} 11 | -------------------------------------------------------------------------------- /examples/terraform/digitalocean/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/digitalocean/digitalocean" { 5 | version = "2.28.0" 6 | constraints = "2.28.0" 7 | hashes = [ 8 | "h1:ElTF9nGpHdCTT3MAQ/gdGuneDx+EtiftwVQZCUtuyeY=", 9 | "zh:0b4e5e0bfff78c947be5d8b056cc9bd3a7589b6a3aad90fb2feb10d5143e04b8", 10 | "zh:14b4eda0a4314a1969203ff2c7f688c49a5758a9c0ceebf0bb694157865f3166", 11 | "zh:195e5595853b5feea7a65853b77dce1f8842f3a4cea685d289dfbb7d53afaaa8", 12 | "zh:21570e4bbc07f4edff204cf2557b2e452a9610cd23bd2ad6367b43516c19a23a", 13 | "zh:2b183751bc9f3b088860424d6db45706f4af1740ab20da4e5b110dc9c7bd5fe5", 14 | "zh:3ced232fea6a4fab16e08e876f732e5fe95229c9e3d7cdbdd6b7ac44f27fd220", 15 | "zh:420065f8ab1cea1e2d65b04f03872984a2de6d1a5882cc7f33cb5bc6e1082a81", 16 | "zh:5ddd1db70e2b41258366ce5aae40403d76a6a2239318bc31e604435f57ee602c", 17 | "zh:6510a0c2e75a9d86b52b8607a9543a61f06b86da0c787a9f4e8d67c7fe577d80", 18 | "zh:72b453fa78ccf41bf43978654d37baebebbadedbf00692bc9f64e9cf655e8233", 19 | "zh:8b96a5627e515b8acfff7124f5c1a87ae47ff52de30e277c0ab12926ce9387f0", 20 | "zh:8d8571cf05c8f1e3424e8de259932b6cabb2c94e75290e977157e222e2eb03d3", 21 | "zh:cc8289adc262551376f1e4ac012c0f596e63181537de2e6ddb3bba4e0c5a6964", 22 | "zh:d73b0f76e36e37538519120a8179bb67c52175569536c517e7a4eee516219e82", 23 | "zh:e4b3cb1e3eac54bca8c647f77d8df3f2f5f55dfc3f5efd31710d0ae388809886", 24 | "zh:ee992cca10fa6bedbc6798d72f47bdbdfbb10dec56a004846309f90b209fd8b5", 25 | ] 26 | } 27 | 28 | provider "registry.terraform.io/hashicorp/tls" { 29 | version = "4.1.0" 30 | hashes = [ 31 | "h1:Ka8mEwRFXBabR33iN/WTIEW6RP0z13vFsDlwn11Pf2I=", 32 | "zh:14c35d89307988c835a7f8e26f1b83ce771e5f9b41e407f86a644c0152089ac2", 33 | "zh:2fb9fe7a8b5afdbd3e903acb6776ef1be3f2e587fb236a8c60f11a9fa165faa8", 34 | "zh:35808142ef850c0c60dd93dc06b95c747720ed2c40c89031781165f0c2baa2fc", 35 | "zh:35b5dc95bc75f0b3b9c5ce54d4d7600c1ebc96fbb8dfca174536e8bf103c8cdc", 36 | "zh:38aa27c6a6c98f1712aa5cc30011884dc4b128b4073a4a27883374bfa3ec9fac", 37 | "zh:51fb247e3a2e88f0047cb97bb9df7c228254a3b3021c5534e4563b4007e6f882", 38 | "zh:62b981ce491e38d892ba6364d1d0cdaadcee37cc218590e07b310b1dfa34be2d", 39 | "zh:bc8e47efc611924a79f947ce072a9ad698f311d4a60d0b4dfff6758c912b7298", 40 | "zh:c149508bd131765d1bc085c75a870abb314ff5a6d7f5ac1035a8892d686b6297", 41 | "zh:d38d40783503d278b63858978d40e07ac48123a2925e1a6b47e62179c046f87a", 42 | "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", 43 | "zh:fb07f708e3316615f6d218cec198504984c0ce7000b9f1eebff7516e384f4b54", 44 | ] 45 | } 46 | 47 | provider "registry.terraform.io/siderolabs/talos" { 48 | version = "0.9.0-alpha.0" 49 | constraints = "0.9.0-alpha.0" 50 | hashes = [ 51 | "h1:aJIAbggkIN/2/7JEjSz/PfHVFRXOokeHYt3hDII6kI0=", 52 | "zh:0fa82a384b25a58b65523e0ea4768fa1212b1f5cfc0c9379d31162454fedcc9d", 53 | "zh:12a822ecfcc14da28bb88887811a796f5165ee50d64967c5afea389da12e3d18", 54 | "zh:5c2519abfbd5e45de4afd94e52d393235e3d09845af27c58aa98dd532311f47f", 55 | "zh:6a99b169eaf46789c7465de27d2dea52ca799f39412390d0a28bb28f5b9e5a4e", 56 | "zh:975daeb0ff5517e5a7a4357125f6e7d74041d87c853f9a52ecfd285ce972e51b", 57 | "zh:b358aefccccf84dab4bf07f2b039814755fc282cbe30f20496013e311eae3463", 58 | "zh:b4e3a0fddc38a05c25b8f1553098574d56959abeb2b5bf9e880208000a415231", 59 | "zh:ba331d61225fac3f787f7acd4cc298a7e0ca43ee7536ce5ab7f6c9dfae4c8e9e", 60 | "zh:bbd9bc936461d2be6c11a5abaa53f2618ac592bc7a6cc1ad9c4205fd73c95eac", 61 | "zh:bdd77e81bf65074fbc891a7429ec3264a342bc7545978a6c108e87cec5bb2f56", 62 | "zh:c132d34502d47436c5f31670f2c786c072bce6137e28cfb5d948f36721db5f66", 63 | "zh:c39ac5467fff7e326b31ada5e734ba88b8f811c5d758c3ce2c9c886504cc232f", 64 | "zh:f1083b82593be4c888e35f6c9c773a86551c8c7b5dac1f3fa69863820852fc87", 65 | "zh:f40bc8da36b6dc3b95cc13d208b81e254346d78ab81624c07a2fa74148de7a8b", 66 | "zh:f56b4589644078e21dbcdbb53cc278550a04fa9c02bc7eea3f5dc91648da2048", 67 | ] 68 | } 69 | -------------------------------------------------------------------------------- /examples/terraform/digitalocean/README.md: -------------------------------------------------------------------------------- 1 | # Digitalocean Terraform Example 2 | 3 | This example will create a load-balanced, HA Talos cluster on Digitalocean. 4 | It will upload the specified Talos release as a custom image and should result in a stable, maintainable cluster. 5 | 6 | ## Prereqs 7 | 8 | Export the `DIGITALOCEAN_TOKEN` environment variable with your API key obtained from digitalocean.com. 9 | From this directory, issue `terraform init` to ensure the proper providers are pulled down. 10 | 11 | ## Usage 12 | 13 | To create a default cluster, this should be as simple as `terraform apply`. 14 | This will create a cluster called `talos-do` with 3 control plane nodes and a single worker in the NYC3 region. 15 | Each of these VMs will be 2 CPU / 4GB RAM VMs. 16 | If different specs or regions are required, override them through command line with the `-var` flag or by creating a varsfile and overriding with `-var-file`. 17 | Destroying the cluster should, again, be a simple `terraform destroy`. 18 | 19 | Getting the kubeconfig and talosconfig for this cluster can be done with `terraform output -raw kubeconfig > ` and `terraform output -raw talosconfig > ` 20 | 21 | 22 | ## Requirements 23 | 24 | | Name | Version | 25 | |------|---------| 26 | | [digitalocean](#requirement\_digitalocean) | 2.28.0 | 27 | | [talos](#requirement\_talos) | 0.9.0-alpha.0 | 28 | 29 | ## Providers 30 | 31 | | Name | Version | 32 | |------|---------| 33 | | [digitalocean](#provider\_digitalocean) | 2.28.0 | 34 | | [talos](#provider\_talos) | 0.9.0-alpha.0 | 35 | | [tls](#provider\_tls) | 4.1.0 | 36 | 37 | ## Modules 38 | 39 | No modules. 40 | 41 | ## Resources 42 | 43 | | Name | Type | 44 | |------|------| 45 | | [digitalocean_custom_image.talos_custom_image](https://registry.terraform.io/providers/digitalocean/digitalocean/2.28.0/docs/resources/custom_image) | resource | 46 | | [digitalocean_droplet.talos_control_plane](https://registry.terraform.io/providers/digitalocean/digitalocean/2.28.0/docs/resources/droplet) | resource | 47 | | [digitalocean_droplet.talos_workers](https://registry.terraform.io/providers/digitalocean/digitalocean/2.28.0/docs/resources/droplet) | resource | 48 | | [digitalocean_loadbalancer.talos_lb](https://registry.terraform.io/providers/digitalocean/digitalocean/2.28.0/docs/resources/loadbalancer) | resource | 49 | | [digitalocean_ssh_key.fake_ssh_key](https://registry.terraform.io/providers/digitalocean/digitalocean/2.28.0/docs/resources/ssh_key) | resource | 50 | | [talos_machine_bootstrap.bootstrap](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/resources/machine_bootstrap) | resource | 51 | | [talos_machine_configuration_apply.cp_config_apply](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/resources/machine_configuration_apply) | resource | 52 | | [talos_machine_configuration_apply.worker_config_apply](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/resources/machine_configuration_apply) | resource | 53 | | [talos_machine_secrets.machine_secrets](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/resources/machine_secrets) | resource | 54 | | [tls_private_key.fake_ssh_key](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource | 55 | | [talos_client_configuration.talosconfig](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/data-sources/client_configuration) | data source | 56 | | [talos_cluster_kubeconfig.kubeconfig](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/data-sources/cluster_kubeconfig) | data source | 57 | | [talos_machine_configuration.machineconfig_cp](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/data-sources/machine_configuration) | data source | 58 | | [talos_machine_configuration.machineconfig_worker](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/data-sources/machine_configuration) | data source | 59 | 60 | ## Inputs 61 | 62 | | Name | Description | Type | Default | Required | 63 | |------|-------------|------|---------|:--------:| 64 | | [cluster\_name](#input\_cluster\_name) | Name of cluster | `string` | `"talos-do"` | no | 65 | | [do\_plan\_control\_plane](#input\_do\_plan\_control\_plane) | DO plan to use for control plane nodes | `string` | `"s-2vcpu-4gb"` | no | 66 | | [do\_plan\_worker](#input\_do\_plan\_worker) | DO plan to use for worker nodes | `string` | `"s-2vcpu-4gb"` | no | 67 | | [do\_region](#input\_do\_region) | DO region to use | `string` | `"nyc3"` | no | 68 | | [num\_control\_plane](#input\_num\_control\_plane) | Number of control plane nodes to create | `number` | `3` | no | 69 | | [num\_workers](#input\_num\_workers) | Number of worker nodes to create | `number` | `1` | no | 70 | | [talos\_version](#input\_talos\_version) | Talos version to deploy | `string` | `"v1.4.0"` | no | 71 | 72 | ## Outputs 73 | 74 | | Name | Description | 75 | |------|-------------| 76 | | [kubeconfig](#output\_kubeconfig) | n/a | 77 | | [talosconfig](#output\_talosconfig) | n/a | 78 | -------------------------------------------------------------------------------- /examples/terraform/digitalocean/main.tf: -------------------------------------------------------------------------------- 1 | ## Upload a custom image to DigitalOcean 2 | resource "digitalocean_custom_image" "talos_custom_image" { 3 | name = "talos-linux-${var.talos_version}" 4 | url = "https://github.com/siderolabs/talos/releases/download/${var.talos_version}/digital-ocean-amd64.raw.gz" 5 | distribution = "Unknown OS" 6 | regions = ["${var.do_region}"] 7 | } 8 | 9 | ## Cheese the creation of an SSH key 10 | resource "tls_private_key" "fake_ssh_key" { 11 | algorithm = "RSA" 12 | rsa_bits = 4096 13 | } 14 | 15 | resource "digitalocean_ssh_key" "fake_ssh_key" { 16 | name = "${var.cluster_name}-fake-ssh-key" 17 | public_key = tls_private_key.fake_ssh_key.public_key_openssh 18 | } 19 | 20 | ## Create all instances 21 | resource "digitalocean_droplet" "talos_control_plane" { 22 | image = digitalocean_custom_image.talos_custom_image.id 23 | name = "${var.cluster_name}-control-plane-${count.index}" 24 | region = var.do_region 25 | size = var.do_plan_control_plane 26 | count = var.num_control_plane 27 | ssh_keys = [digitalocean_ssh_key.fake_ssh_key.id] 28 | } 29 | 30 | resource "digitalocean_droplet" "talos_workers" { 31 | image = digitalocean_custom_image.talos_custom_image.id 32 | name = "${var.cluster_name}-worker-${count.index}" 33 | region = var.do_region 34 | size = var.do_plan_worker 35 | count = var.num_workers 36 | ssh_keys = [digitalocean_ssh_key.fake_ssh_key.id] 37 | } 38 | 39 | ## Create a LB for control plane 40 | resource "digitalocean_loadbalancer" "talos_lb" { 41 | name = "${var.cluster_name}-k8s" 42 | region = var.do_region 43 | 44 | forwarding_rule { 45 | entry_port = 6443 46 | entry_protocol = "tcp" 47 | 48 | target_port = 6443 49 | target_protocol = "tcp" 50 | } 51 | 52 | healthcheck { 53 | port = 50000 54 | protocol = "tcp" 55 | } 56 | 57 | droplet_ids = digitalocean_droplet.talos_control_plane[*].id 58 | } 59 | 60 | ## Bootstrap talos 61 | 62 | resource "talos_machine_secrets" "machine_secrets" {} 63 | 64 | data "talos_client_configuration" "talosconfig" { 65 | cluster_name = var.cluster_name 66 | client_configuration = talos_machine_secrets.machine_secrets.client_configuration 67 | endpoints = digitalocean_droplet.talos_control_plane[*].ipv4_address 68 | } 69 | 70 | data "talos_machine_configuration" "machineconfig_cp" { 71 | cluster_name = var.cluster_name 72 | cluster_endpoint = "https://${digitalocean_loadbalancer.talos_lb.ip}:6443" 73 | machine_type = "controlplane" 74 | machine_secrets = talos_machine_secrets.machine_secrets.machine_secrets 75 | depends_on = [digitalocean_loadbalancer.talos_lb] 76 | } 77 | 78 | resource "talos_machine_configuration_apply" "cp_config_apply" { 79 | client_configuration = talos_machine_secrets.machine_secrets.client_configuration 80 | machine_configuration_input = data.talos_machine_configuration.machineconfig_cp.machine_configuration 81 | count = length(digitalocean_droplet.talos_control_plane) 82 | node = digitalocean_droplet.talos_control_plane[count.index].ipv4_address 83 | } 84 | 85 | data "talos_machine_configuration" "machineconfig_worker" { 86 | cluster_name = var.cluster_name 87 | cluster_endpoint = "https://${digitalocean_loadbalancer.talos_lb.ip}:6443" 88 | machine_type = "worker" 89 | machine_secrets = talos_machine_secrets.machine_secrets.machine_secrets 90 | depends_on = [digitalocean_loadbalancer.talos_lb] 91 | } 92 | 93 | resource "talos_machine_configuration_apply" "worker_config_apply" { 94 | client_configuration = talos_machine_secrets.machine_secrets.client_configuration 95 | machine_configuration_input = data.talos_machine_configuration.machineconfig_worker.machine_configuration 96 | count = length(digitalocean_droplet.talos_workers) 97 | node = digitalocean_droplet.talos_workers[count.index].ipv4_address 98 | } 99 | 100 | resource "talos_machine_bootstrap" "bootstrap" { 101 | client_configuration = talos_machine_secrets.machine_secrets.client_configuration 102 | node = digitalocean_droplet.talos_control_plane[0].ipv4_address 103 | } 104 | 105 | data "talos_cluster_kubeconfig" "kubeconfig" { 106 | client_configuration = talos_machine_secrets.machine_secrets.client_configuration 107 | node = digitalocean_droplet.talos_control_plane[0].ipv4_address 108 | } 109 | -------------------------------------------------------------------------------- /examples/terraform/digitalocean/outputs.tf: -------------------------------------------------------------------------------- 1 | 2 | output "talosconfig" { 3 | value = data.talos_client_configuration.talosconfig.talos_config 4 | sensitive = true 5 | } 6 | 7 | output "kubeconfig" { 8 | value = data.talos_cluster_kubeconfig.kubeconfig.kubeconfig_raw 9 | sensitive = true 10 | } 11 | -------------------------------------------------------------------------------- /examples/terraform/digitalocean/variables.tf: -------------------------------------------------------------------------------- 1 | variable "cluster_name" { 2 | description = "Name of cluster" 3 | type = string 4 | default = "talos-do" 5 | } 6 | 7 | variable "num_control_plane" { 8 | description = "Number of control plane nodes to create" 9 | type = number 10 | default = 3 11 | } 12 | 13 | variable "num_workers" { 14 | description = "Number of worker nodes to create" 15 | type = number 16 | default = 1 17 | } 18 | 19 | variable "talos_version" { 20 | description = "Talos version to deploy" 21 | type = string 22 | default = "v1.4.0" 23 | } 24 | 25 | variable "do_region" { 26 | description = "DO region to use" 27 | type = string 28 | default = "nyc3" 29 | } 30 | 31 | variable "do_plan_control_plane" { 32 | description = "DO plan to use for control plane nodes" 33 | type = string 34 | default = "s-2vcpu-4gb" 35 | } 36 | 37 | variable "do_plan_worker" { 38 | description = "DO plan to use for worker nodes" 39 | type = string 40 | default = "s-2vcpu-4gb" 41 | } 42 | -------------------------------------------------------------------------------- /examples/terraform/digitalocean/versions.tf: -------------------------------------------------------------------------------- 1 | # TF setup 2 | 3 | terraform { 4 | required_providers { 5 | digitalocean = { 6 | source = "digitalocean/digitalocean" 7 | version = "2.28.0" 8 | } 9 | talos = { 10 | source = "siderolabs/talos" 11 | version = "0.9.0-alpha.0" 12 | } 13 | } 14 | } 15 | 16 | # Configure providers 17 | 18 | provider "digitalocean" {} 19 | 20 | provider "talos" {} 21 | -------------------------------------------------------------------------------- /examples/terraform/equinix-metal/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/equinix/equinix" { 5 | version = "1.33.0" 6 | constraints = "1.33.0" 7 | hashes = [ 8 | "h1:Cp4jL4QO6tDJr0MWRXnxZ/2xoWukxX1VxQ35BEeAvgM=", 9 | "zh:0ee336623b14cf035ec86f6b14885af39e66c28fe78e4043fbebd2218c5a8f10", 10 | "zh:1638a63b9654ad2741827e0594a329aa01262a4f837ab4debdd5c960645ce967", 11 | "zh:1766e6962eac6c358b2220689f93ddf1a2636cb62f925688f9219fca54d90a03", 12 | "zh:30052a958f9f8ae4fcf24fdef2abf1dd16f306c04eadba90943208bdd6e2193e", 13 | "zh:34002b6a54ba942959ebdd32bb24487b22231ea200cd80700bc48f78a3de0165", 14 | "zh:556737ef80e7c7ec4d02874278675555b487cf1945605207c866ae76ab4b19e7", 15 | "zh:5ada9cbb554c7fc37857248a59d2d2f2272aaab9f6359a8f41307f728314abfb", 16 | "zh:623cf490304571157fe23767ae94416617d604935595eaae02b5a32c56d88a1d", 17 | "zh:8eb72c61b1d23c218405a33d82b40712c6dbd82ea8851eea8225ec2cd7126362", 18 | "zh:9853a78d696ee3937e2028603380ce2ff37812742813db1338455b3065d96a67", 19 | "zh:a2762f90f680e84ab8bd8c5c4a494f48142fb31d652b177762dac32627280659", 20 | "zh:a32ebed9e4b4d54225e4fd7aa0097dd287f0ce93363bb0cadd14d7d85d110b3a", 21 | "zh:beb21c3596090093b60306fa53a9e6d7103da7a560c5b8e2e19ab04a4b8a0dad", 22 | "zh:d3f9c303afbb14b43bb70ab736cb85a79e0ee98a360fa4ecd76e7dd11bad8ab6", 23 | ] 24 | } 25 | 26 | provider "registry.terraform.io/siderolabs/talos" { 27 | version = "0.9.0-alpha.0" 28 | constraints = "0.9.0-alpha.0" 29 | hashes = [ 30 | "h1:aJIAbggkIN/2/7JEjSz/PfHVFRXOokeHYt3hDII6kI0=", 31 | "zh:0fa82a384b25a58b65523e0ea4768fa1212b1f5cfc0c9379d31162454fedcc9d", 32 | "zh:12a822ecfcc14da28bb88887811a796f5165ee50d64967c5afea389da12e3d18", 33 | "zh:5c2519abfbd5e45de4afd94e52d393235e3d09845af27c58aa98dd532311f47f", 34 | "zh:6a99b169eaf46789c7465de27d2dea52ca799f39412390d0a28bb28f5b9e5a4e", 35 | "zh:975daeb0ff5517e5a7a4357125f6e7d74041d87c853f9a52ecfd285ce972e51b", 36 | "zh:b358aefccccf84dab4bf07f2b039814755fc282cbe30f20496013e311eae3463", 37 | "zh:b4e3a0fddc38a05c25b8f1553098574d56959abeb2b5bf9e880208000a415231", 38 | "zh:ba331d61225fac3f787f7acd4cc298a7e0ca43ee7536ce5ab7f6c9dfae4c8e9e", 39 | "zh:bbd9bc936461d2be6c11a5abaa53f2618ac592bc7a6cc1ad9c4205fd73c95eac", 40 | "zh:bdd77e81bf65074fbc891a7429ec3264a342bc7545978a6c108e87cec5bb2f56", 41 | "zh:c132d34502d47436c5f31670f2c786c072bce6137e28cfb5d948f36721db5f66", 42 | "zh:c39ac5467fff7e326b31ada5e734ba88b8f811c5d758c3ce2c9c886504cc232f", 43 | "zh:f1083b82593be4c888e35f6c9c773a86551c8c7b5dac1f3fa69863820852fc87", 44 | "zh:f40bc8da36b6dc3b95cc13d208b81e254346d78ab81624c07a2fa74148de7a8b", 45 | "zh:f56b4589644078e21dbcdbb53cc278550a04fa9c02bc7eea3f5dc91648da2048", 46 | ] 47 | } 48 | -------------------------------------------------------------------------------- /examples/terraform/equinix-metal/equinix-arm64.yaml: -------------------------------------------------------------------------------- 1 | machine: 2 | install: 3 | disk: /dev/nvme0n1 4 | -------------------------------------------------------------------------------- /examples/terraform/equinix-metal/main.tf: -------------------------------------------------------------------------------- 1 | # Create EM resources 2 | resource "equinix_metal_reserved_ip_block" "talos_control_plane_vip" { 3 | project_id = var.em_project_id 4 | type = "public_ipv4" 5 | metro = var.em_region 6 | tags = var.extra_tags 7 | quantity = 1 8 | description = "${var.cluster_name} Control Plane VIP" 9 | } 10 | 11 | resource "equinix_metal_device" "talos_control_plane_nodes" { 12 | count = var.control_plane.num_instances 13 | 14 | project_id = var.em_project_id 15 | plan = var.control_plane.plan 16 | metro = var.em_region 17 | tags = concat(var.extra_tags, var.control_plane.tags) 18 | operating_system = "custom_ipxe" 19 | ipxe_script_url = var.control_plane.ipxe_script_url 20 | billing_cycle = "hourly" 21 | hostname = "${var.cluster_name}-control-plane-${count.index}" 22 | } 23 | 24 | resource "equinix_metal_device" "talos_worker_group" { 25 | for_each = merge([ 26 | for info in var.worker_groups : { 27 | for index in range(0, info.num_instances) : "${info.name}.${index}" => info 28 | } 29 | ]...) 30 | 31 | project_id = var.em_project_id 32 | plan = each.value.plan 33 | metro = var.em_region 34 | tags = concat(var.extra_tags, each.value.tags) 35 | operating_system = "custom_ipxe" 36 | ipxe_script_url = each.value.ipxe_script_url 37 | billing_cycle = "hourly" 38 | hostname = "${var.cluster_name}-worker-group-${each.value.name}-${trimprefix(each.key, "${each.value.name}.")}" 39 | } 40 | 41 | # Configure and bootstrap Talos 42 | 43 | resource "talos_machine_secrets" "this" {} 44 | 45 | data "talos_machine_configuration" "controlplane" { 46 | cluster_name = var.cluster_name 47 | cluster_endpoint = "https://${equinix_metal_reserved_ip_block.talos_control_plane_vip.network}:6443" 48 | machine_type = "controlplane" 49 | machine_secrets = talos_machine_secrets.this.machine_secrets 50 | talos_version = var.talos_version_contract 51 | kubernetes_version = var.kubernetes_version 52 | docs = false 53 | examples = false 54 | config_patches = concat( 55 | [templatefile("${path.module}/templates/vip.yaml.tmpl", { 56 | em_vip_ip = equinix_metal_reserved_ip_block.talos_control_plane_vip.network 57 | em_api_token = var.em_api_token 58 | })], 59 | [templatefile("${path.module}/templates/installer.yaml.tmpl", { 60 | install_image = var.control_plane.install_image 61 | })], 62 | [for path in var.control_plane.config_patch_files : file(path)] 63 | ) 64 | } 65 | 66 | data "talos_machine_configuration" "worker_group" { 67 | for_each = merge([for info in var.worker_groups : { "${info.name}" = info }]...) 68 | 69 | cluster_name = var.cluster_name 70 | cluster_endpoint = "https://${equinix_metal_reserved_ip_block.talos_control_plane_vip.network}:6443" 71 | machine_type = "worker" 72 | machine_secrets = talos_machine_secrets.this.machine_secrets 73 | talos_version = var.talos_version_contract 74 | kubernetes_version = var.kubernetes_version 75 | docs = false 76 | examples = false 77 | config_patches = concat( 78 | [templatefile("${path.module}/templates/installer.yaml.tmpl", { 79 | install_image = each.value.install_image 80 | })], 81 | [for path in each.value.config_patch_files : file(path)] 82 | ) 83 | } 84 | 85 | resource "talos_machine_configuration_apply" "controlplane" { 86 | count = var.control_plane.num_instances 87 | 88 | client_configuration = talos_machine_secrets.this.client_configuration 89 | machine_configuration_input = data.talos_machine_configuration.controlplane.machine_configuration 90 | endpoint = equinix_metal_device.talos_control_plane_nodes[count.index].access_public_ipv4 91 | node = equinix_metal_device.talos_control_plane_nodes[count.index].access_private_ipv4 92 | } 93 | 94 | resource "talos_machine_configuration_apply" "worker_group" { 95 | for_each = merge([ 96 | for info in var.worker_groups : { 97 | for index in range(0, info.num_instances) : 98 | "${info.name}.${index}" => { 99 | name = info.name, 100 | public_ip = equinix_metal_device.talos_worker_group["${info.name}.${index}"].access_public_ipv4, 101 | private_ip = equinix_metal_device.talos_worker_group["${info.name}.${index}"].access_private_ipv4 102 | } 103 | } 104 | ]...) 105 | 106 | client_configuration = talos_machine_secrets.this.client_configuration 107 | machine_configuration_input = data.talos_machine_configuration.worker_group[each.value.name].machine_configuration 108 | endpoint = each.value.public_ip 109 | node = each.value.private_ip 110 | } 111 | 112 | resource "talos_machine_bootstrap" "this" { 113 | depends_on = [talos_machine_configuration_apply.controlplane] 114 | 115 | client_configuration = talos_machine_secrets.this.client_configuration 116 | endpoint = equinix_metal_device.talos_control_plane_nodes[0].access_public_ipv4 117 | node = equinix_metal_device.talos_control_plane_nodes[0].access_public_ipv4 118 | } 119 | 120 | data "talos_client_configuration" "this" { 121 | cluster_name = var.cluster_name 122 | client_configuration = talos_machine_secrets.this.client_configuration 123 | endpoints = equinix_metal_device.talos_control_plane_nodes.*.access_public_ipv4 124 | nodes = flatten([equinix_metal_device.talos_control_plane_nodes.*.access_public_ipv4, flatten([for node in equinix_metal_device.talos_worker_group : node.access_public_ipv4])]) 125 | } 126 | 127 | resource "talos_cluster_kubeconfig" "this" { 128 | depends_on = [talos_machine_bootstrap.this] 129 | 130 | client_configuration = talos_machine_secrets.this.client_configuration 131 | endpoint = equinix_metal_device.talos_control_plane_nodes.0.access_public_ipv4 132 | node = equinix_metal_device.talos_control_plane_nodes.0.access_public_ipv4 133 | } 134 | 135 | data "talos_cluster_health" "this" { 136 | depends_on = [ 137 | talos_machine_configuration_apply.controlplane, 138 | talos_machine_configuration_apply.worker_group, 139 | talos_cluster_kubeconfig.this 140 | ] 141 | 142 | client_configuration = talos_machine_secrets.this.client_configuration 143 | endpoints = equinix_metal_device.talos_control_plane_nodes.*.access_public_ipv4 144 | control_plane_nodes = equinix_metal_device.talos_control_plane_nodes.*.access_private_ipv4 145 | worker_nodes = [for node in equinix_metal_device.talos_worker_group : node.access_private_ipv4] 146 | } 147 | -------------------------------------------------------------------------------- /examples/terraform/equinix-metal/outputs.tf: -------------------------------------------------------------------------------- 1 | output "talosconfig" { 2 | value = data.talos_client_configuration.this.talos_config 3 | sensitive = true 4 | } 5 | 6 | output "kubeconfig" { 7 | value = talos_cluster_kubeconfig.this.kubeconfig_raw 8 | sensitive = true 9 | } 10 | -------------------------------------------------------------------------------- /examples/terraform/equinix-metal/templates/installer.yaml.tmpl: -------------------------------------------------------------------------------- 1 | machine: 2 | install: 3 | image: ${install_image} 4 | -------------------------------------------------------------------------------- /examples/terraform/equinix-metal/templates/vip.yaml.tmpl: -------------------------------------------------------------------------------- 1 | machine: 2 | network: 3 | interfaces: 4 | - interface: bond0 5 | vip: 6 | ip: ${em_vip_ip} 7 | equinixMetal: 8 | apiToken: ${em_api_token} -------------------------------------------------------------------------------- /examples/terraform/equinix-metal/variables.tf: -------------------------------------------------------------------------------- 1 | variable "em_api_token" { 2 | description = "API token for Equinix Metal" 3 | type = string 4 | sensitive = true 5 | } 6 | 7 | variable "cluster_name" { 8 | description = "Name of cluster" 9 | type = string 10 | default = "talos-em" 11 | } 12 | 13 | variable "talos_version_contract" { 14 | description = "Talos API version to use for the cluster, if not set the the version shipped with the talos sdk version will be used" 15 | type = string 16 | default = null 17 | } 18 | 19 | variable "kubernetes_version" { 20 | description = "Kubernetes version to use for the cluster, if not set the k8s version shipped with the talos sdk version will be used" 21 | type = string 22 | default = null 23 | } 24 | 25 | variable "control_plane" { 26 | description = "Info for control plane that will be created" 27 | type = object({ 28 | plan = optional(string, "c3.small.x86") 29 | ipxe_script_url = optional(string, "https://pxe.factory.talos.dev/pxe/376567988ad370138ad8b2698212367b8edcb69b5fd68c80be1f2ec7d603b4ba/v1.7.0/equinixMetal-amd64") 30 | install_image = optional(string, "ghcr.io/talos-systems/installer:v1.7.0") 31 | num_instances = optional(number, 3) 32 | config_patch_files = optional(list(string), []) 33 | tags = optional(list(string), []) 34 | }) 35 | 36 | default = {} 37 | } 38 | 39 | variable "worker_groups" { 40 | description = "List of node worker node groups to create" 41 | type = list(object({ 42 | name = string 43 | plan = optional(string, "c3.small.x86") 44 | ipxe_script_url = optional(string, "https://pxe.factory.talos.dev/pxe/376567988ad370138ad8b2698212367b8edcb69b5fd68c80be1f2ec7d603b4ba/v1.7.0/equinixMetal-amd64") 45 | install_image = optional(string, "ghcr.io/talos-systems/installer:v1.7.0") 46 | num_instances = optional(number, 1) 47 | config_patch_files = optional(list(string), []) 48 | tags = optional(list(string), []) 49 | })) 50 | 51 | default = [{ 52 | name = "default" 53 | }] 54 | } 55 | 56 | variable "extra_tags" { 57 | description = "Extra tags to add to the cluster cloud resources" 58 | type = list(string) 59 | default = [] 60 | } 61 | 62 | variable "config_patch_files" { 63 | description = "Path to talos config path files that applies to all nodes" 64 | type = list(string) 65 | default = [] 66 | } 67 | 68 | variable "em_region" { 69 | description = "Equinix Metal region to use" 70 | type = string 71 | default = "dc" 72 | } 73 | 74 | variable "em_project_id" { 75 | description = "Equinix Metal project ID" 76 | type = string 77 | } 78 | -------------------------------------------------------------------------------- /examples/terraform/equinix-metal/versions.tf: -------------------------------------------------------------------------------- 1 | # TF setup 2 | 3 | terraform { 4 | required_providers { 5 | equinix = { 6 | source = "equinix/equinix" 7 | version = "1.33.0" 8 | } 9 | talos = { 10 | source = "siderolabs/talos" 11 | version = "0.9.0-alpha.0" 12 | } 13 | } 14 | } 15 | 16 | # Configure providers 17 | 18 | provider "equinix" { 19 | auth_token = var.em_api_token 20 | } 21 | 22 | provider "talos" {} 23 | -------------------------------------------------------------------------------- /examples/terraform/gcp/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hashicorp/google" { 5 | version = "6.35.0" 6 | constraints = ">= 3.33.0, >= 3.83.0, >= 4.25.0, >= 4.64.0, ~> 6.0, < 7.0.0" 7 | hashes = [ 8 | "h1:CJKihKbsYi8PEBFt8wcIwY0PWcLqYjcfT6Yt2xNS1R4=", 9 | "zh:0f7b8da0050470b074c2c5124359a407a15298dc3374cb2fbf5d243f7a390712", 10 | "zh:27a8774e750a8b6ff296240d74ba322c3ac626ea847e150d745c014440c66927", 11 | "zh:3cf134f088f31811f35c5996282b316308ad7a02b801cc84712696d536ef57e3", 12 | "zh:3e08b3b451b33101f5d65fddc0d4f00e53260fcc8702ddd3d2ebab93b3553df1", 13 | "zh:5fe57182514244b410c33d1b7bde8b1a78442323d97b13d0b08ad0fac884ad55", 14 | "zh:6db9431b1bb9ac198eb5e0e6706d2c12ad5d47961e09d731be91cfd97f17c18b", 15 | "zh:76309d66d6806f75639126a49db996b51496fa64d2313466f46f476284802ab3", 16 | "zh:897c2cb1411dfed658419de9be24a2b82f64e236449f45f76d9a13c244c872de", 17 | "zh:a5b4e8771fe4547979a767c60cf44b590f52f296d75f161bfa032c6fe6364bc2", 18 | "zh:bb3297ad9bdf20b9d460fe0801ae3d555460bad3bfbf0c062094afa27125afc1", 19 | "zh:cb7ed92b9af88f29f8faef4de844d57d3f4aeccce92edf3b4a33e8b4c6049b38", 20 | "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", 21 | ] 22 | } 23 | 24 | provider "registry.terraform.io/hashicorp/google-beta" { 25 | version = "6.35.0" 26 | constraints = ">= 4.64.0, < 7.0.0" 27 | hashes = [ 28 | "h1:Q1VXolwlo9TM5IatjsII9d5oiwxEb3tzpGUDJQiX9OE=", 29 | "zh:119b4d11dbaff065dc9e91d11d0bfde3076ee21313a61eeee459928ae20ca366", 30 | "zh:137fda0ed8d2d4588021c8f18e61ee0d5219d4f5d95821ee65a1cda3d3574d2c", 31 | "zh:280a5ea88053511443c1b6b2270c415b46df6a7b2ab4e87889d64668c0840440", 32 | "zh:65bd3715520885330cb55289e62baf5354f75de039137657c3b1cf1a5f9761d4", 33 | "zh:7099ca906f54888131da1ceb0a25444b4a9e5a533341cba283d874ef144c51ac", 34 | "zh:b3b1f093f177cf5166b0e38257e8bc0e039cdf27f28693004304c3de387df165", 35 | "zh:b52eefd0a02a87ce1ff5e87026adfe7c693c7e6fa8bfad0e35f70b556486e103", 36 | "zh:bbec0ab75da4d3a972fe2f0bc28a0be8916a8b167d7d889ff6f4611b4536b4a9", 37 | "zh:c7638b10aecfe1659c9c829de52c6b83b58368c1d55c49ebc8945ef289d8f2cf", 38 | "zh:c92260e7b06f5394f101471d0c19759af620e38d145fa54b104d6ed3a68bf437", 39 | "zh:f4bd6cd39368d7a49ffedbbe03b49750316285cd48987a945d3bdc9a4dfb8cce", 40 | "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", 41 | ] 42 | } 43 | 44 | provider "registry.terraform.io/siderolabs/talos" { 45 | version = "0.9.0-alpha.0" 46 | constraints = "0.9.0-alpha.0" 47 | hashes = [ 48 | "h1:aJIAbggkIN/2/7JEjSz/PfHVFRXOokeHYt3hDII6kI0=", 49 | "zh:0fa82a384b25a58b65523e0ea4768fa1212b1f5cfc0c9379d31162454fedcc9d", 50 | "zh:12a822ecfcc14da28bb88887811a796f5165ee50d64967c5afea389da12e3d18", 51 | "zh:5c2519abfbd5e45de4afd94e52d393235e3d09845af27c58aa98dd532311f47f", 52 | "zh:6a99b169eaf46789c7465de27d2dea52ca799f39412390d0a28bb28f5b9e5a4e", 53 | "zh:975daeb0ff5517e5a7a4357125f6e7d74041d87c853f9a52ecfd285ce972e51b", 54 | "zh:b358aefccccf84dab4bf07f2b039814755fc282cbe30f20496013e311eae3463", 55 | "zh:b4e3a0fddc38a05c25b8f1553098574d56959abeb2b5bf9e880208000a415231", 56 | "zh:ba331d61225fac3f787f7acd4cc298a7e0ca43ee7536ce5ab7f6c9dfae4c8e9e", 57 | "zh:bbd9bc936461d2be6c11a5abaa53f2618ac592bc7a6cc1ad9c4205fd73c95eac", 58 | "zh:bdd77e81bf65074fbc891a7429ec3264a342bc7545978a6c108e87cec5bb2f56", 59 | "zh:c132d34502d47436c5f31670f2c786c072bce6137e28cfb5d948f36721db5f66", 60 | "zh:c39ac5467fff7e326b31ada5e734ba88b8f811c5d758c3ce2c9c886504cc232f", 61 | "zh:f1083b82593be4c888e35f6c9c773a86551c8c7b5dac1f3fa69863820852fc87", 62 | "zh:f40bc8da36b6dc3b95cc13d208b81e254346d78ab81624c07a2fa74148de7a8b", 63 | "zh:f56b4589644078e21dbcdbb53cc278550a04fa9c02bc7eea3f5dc91648da2048", 64 | ] 65 | } 66 | -------------------------------------------------------------------------------- /examples/terraform/gcp/outputs.tf: -------------------------------------------------------------------------------- 1 | output "talosconfig" { 2 | description = "The generated talosconfig." 3 | value = data.talos_client_configuration.this.talos_config 4 | sensitive = true 5 | } 6 | 7 | output "kubeconfig" { 8 | description = "The generated kubeconfig." 9 | value = talos_cluster_kubeconfig.this.kubeconfig_raw 10 | sensitive = true 11 | } 12 | -------------------------------------------------------------------------------- /examples/terraform/gcp/variables.tf: -------------------------------------------------------------------------------- 1 | variable "project" { 2 | description = "The GCP project to deploy resources to" 3 | type = string 4 | } 5 | 6 | variable "region" { 7 | description = "The GCP region to deploy resources to" 8 | type = string 9 | } 10 | 11 | variable "zone" { 12 | description = "The GCP zone to deploy resources to" 13 | type = string 14 | } 15 | 16 | variable "cluster_name" { 17 | description = "Name of cluster" 18 | type = string 19 | default = "talos-gcp-example" 20 | } 21 | 22 | variable "talos_version_contract" { 23 | description = "Talos API version to use for the cluster, if not set the the version shipped with the talos sdk version will be used" 24 | type = string 25 | default = null 26 | } 27 | variable "kubernetes_version" { 28 | description = "Kubernetes version to use for the cluster, if not set the k8s version shipped with the talos sdk version will be used" 29 | type = string 30 | default = null 31 | } 32 | 33 | variable "control_plane" { 34 | description = "Info for control plane that will be created" 35 | type = object({ 36 | instance_type = optional(string, "e2-standard-2") 37 | image = optional(string, null) 38 | num_instances = optional(number, 3) 39 | config_patch_files = optional(list(string), []) 40 | tags = optional(map(string), {}) 41 | }) 42 | 43 | default = {} 44 | } 45 | 46 | variable "worker_groups" { 47 | description = "List of node worker node groups to create" 48 | type = list(object({ 49 | name = string 50 | instance_type = optional(string, "e2-standard-2") 51 | image = optional(string, null) 52 | num_instances = optional(number, 1) 53 | config_patch_files = optional(list(string), []) 54 | tags = optional(map(string), {}) 55 | })) 56 | 57 | 58 | default = [{ 59 | name = "default" 60 | }] 61 | } 62 | 63 | variable "vpc_cidr" { 64 | description = "The IPv4 CIDR block for the VPC." 65 | type = string 66 | default = "172.16.0.0/16" 67 | } 68 | 69 | variable "talos_api_allowed_cidr" { 70 | description = "The CIDR from which to allow to access the Talos API" 71 | type = string 72 | default = "0.0.0.0/0" 73 | } 74 | 75 | variable "config_patch_files" { 76 | description = "Path to talos config path files that applies to all nodes" 77 | type = list(string) 78 | default = [] 79 | } 80 | -------------------------------------------------------------------------------- /examples/terraform/gcp/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = "~> 1.3" 3 | required_providers { 4 | google = { 5 | source = "hashicorp/google" 6 | version = "~> 6.0" 7 | } 8 | talos = { 9 | source = "siderolabs/talos" 10 | version = "0.9.0-alpha.0" 11 | } 12 | } 13 | } 14 | 15 | provider "google" { 16 | project = var.project 17 | region = var.region 18 | zone = var.zone 19 | } 20 | -------------------------------------------------------------------------------- /examples/terraform/hcloud/README.md: -------------------------------------------------------------------------------- 1 | * Tested Talos version: `1.3.0` 2 | * Currently only one control-plane is set up (no HA). 3 | * Number of workers need to be defined in a `tfvars` file. For an example with three workers see `terraform/three_workers.tfvars` 4 | * One additional volume is attached to each worker with size specified in the `worker_extra_volume_size` variable. 5 | * Required patches for [OpenEBS Mayastor](https://mayastor.gitbook.io/introduction/) are applied in `templates/controlplanepatch.yaml.tmpl` 6 | 7 | ## Prerequisites 8 | 9 | ```bash 10 | # hcloud cli 11 | brew install hcloud 12 | # talosctl, check for latest version https://github.com/siderolabs/talos 13 | sudo curl -Lo /usr/local/bin/talosctl https://github.com/siderolabs/talos/releases/download/v1.3.0/talosctl-$(uname -s | tr "[:upper:]" "[:lower:]")-amd64 14 | sudo chmod +x /usr/local/bin/talosctl 15 | # hashicorp packer 16 | brew tap hashicorp/tap 17 | brew install hashicorp/tap/packer 18 | # hashicrop terraform 19 | brew install hashicorp/tap/terraform 20 | ``` 21 | 22 | Export your hcloud token: 23 | 24 | ```bash 25 | export HCLOUD_TOKEN= 26 | ``` 27 | 28 | ## Packer 29 | 30 | Create the talos os image via packer. The talos os version is defined in the variable `talos_version` in `hcloud_talosimage.pkr.hcl`. 31 | 32 | ```bash 33 | cd packer 34 | packer init . 35 | packer build . 36 | # after completion, export the image ID 37 | export TF_VAR_image_id= 38 | ``` 39 | 40 | ## HCloud 41 | 42 | ```bash 43 | cd terraform 44 | terraform init 45 | # example with three worker nodes 46 | terraform plan -var-file=three_workers.tfvars 47 | terraform apply -var-file=three_workers.tfvars 48 | ``` 49 | 50 | ## Talosconfig and Kubeconfig 51 | 52 | Once terrafrom finished successfully, retrieve `talosconfig` and `kubeconfig` from the output. 53 | 54 | Example: 55 | 56 | ```bash 57 | terraform output -raw talosconfig > ~/hcloud-dev-cluster/talosconfig 58 | terraform output -raw kubeconfig > ~/hcloud-dev-cluster/kubeconfig 59 | export TALOSCONFIG=~/hcloud-dev-cluster/talosconfig 60 | export KUBECONFIG=~/hcloud-dev-cluster/kubeconfig 61 | # check if all nodes are available, target the control plane (default 10.0.0.3 set in variables.tf) 62 | talosctl get members -n 10.0.0.3 63 | # check nodes 64 | kubectl get nodes -o wide 65 | ``` -------------------------------------------------------------------------------- /examples/terraform/hcloud/packer/hcloud_talosimage.pkr.hcl: -------------------------------------------------------------------------------- 1 | # hcloud.pkr.hcl 2 | 3 | packer { 4 | required_plugins { 5 | hcloud = { 6 | version = ">= 1.0.0" 7 | source = "github.com/hashicorp/hcloud" 8 | } 9 | } 10 | } 11 | 12 | variable "talos_version" { 13 | type = string 14 | default = "v1.6.0" 15 | } 16 | 17 | locals { 18 | image = "https://github.com/siderolabs/talos/releases/download/${var.talos_version}/hcloud-amd64.raw.xz" 19 | } 20 | 21 | source "hcloud" "talos" { 22 | rescue = "linux64" 23 | image = "debian-12" 24 | location = "fsn1" 25 | server_type = "cx22" 26 | ssh_username = "root" 27 | 28 | snapshot_name = "talos system disk ${var.talos_version}" 29 | snapshot_labels = { 30 | type = "infra", 31 | os = "talos", 32 | version = "${var.talos_version}", 33 | } 34 | } 35 | 36 | build { 37 | sources = ["source.hcloud.talos"] 38 | 39 | provisioner "shell" { 40 | inline = [ 41 | "apt-get install -y wget", 42 | "wget -O /tmp/talos.raw.xz ${local.image}", 43 | "xz -d -c /tmp/talos.raw.xz | dd of=/dev/sda && sync", 44 | ] 45 | } 46 | } -------------------------------------------------------------------------------- /examples/terraform/hcloud/terraform/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/hetznercloud/hcloud" { 5 | version = "1.48.1" 6 | constraints = "1.48.1" 7 | hashes = [ 8 | "h1:AuI3Dw3AYY/fMrZ4EObI8XEaWzqsgiUrIRne3Nss/3Y=", 9 | "h1:fa9fxdSV9DG+HDcXyRbcGfb6Dk94SBP3TamHb1yOYiI=", 10 | "zh:086cce10cb005f25f85183c59e639d6675e91e919934c80f660ca1cc4b9bc09b", 11 | "zh:111d185707168b90c7ed3d245b522b2bd508f0bd4275496a1acdc9c0adaa85f2", 12 | "zh:1acba3f30150282d283c46cd7ce25e9afb8b027fd2f594d41de9131d25a42b27", 13 | "zh:1f8858aa81f93d52550502a11c7ea4e9370316ab098f6b75a09ffe75da6129ee", 14 | "zh:20e01e6e6f99f57b3c1ef2a9de5d617c0139d3f3934eeb5e6c5976ae8b831a48", 15 | "zh:2a8489a586a7bdadc42bbc9e3cb7b9deaefdf8020e3f2caba2678877d5d64d52", 16 | "zh:31d8017529b0429bc9e873ec5d358ab9b75af2ba0ae24f21abcd4d09f36b7ee9", 17 | "zh:407b4d7f1407e7e4a51b6f4dcdb0c7fbf81f2f1e25a7275f34054009419125a2", 18 | "zh:42cf7cf867d199054713d4e6060e4b578eff16f0f537e9aaa5fd990c3eab8bc6", 19 | "zh:460ac856ff952c5d41525949b93cfb7ee642f900594eff965494f11999d7496b", 20 | "zh:d09e527d23f62564c82bc24e286cf2cb8cb0ed6cdc6f4c66adf2145cfa62adac", 21 | "zh:d465356710444ac70dea4883252efc429b73e79fc6dc94f075662b838476680e", 22 | "zh:d476c8eca307e30a20eed54c0735b062a6f3066b4ac63eebecd38ab8f40c16f4", 23 | "zh:e0e9b2f6d5e28dbd01fa1ec3147aa88062d6223c5146532a3dcd1d3bb827e1e9", 24 | ] 25 | } 26 | 27 | provider "registry.terraform.io/siderolabs/talos" { 28 | version = "0.9.0-alpha.0" 29 | constraints = "0.9.0-alpha.0" 30 | hashes = [ 31 | "h1:aJIAbggkIN/2/7JEjSz/PfHVFRXOokeHYt3hDII6kI0=", 32 | "zh:0fa82a384b25a58b65523e0ea4768fa1212b1f5cfc0c9379d31162454fedcc9d", 33 | "zh:12a822ecfcc14da28bb88887811a796f5165ee50d64967c5afea389da12e3d18", 34 | "zh:5c2519abfbd5e45de4afd94e52d393235e3d09845af27c58aa98dd532311f47f", 35 | "zh:6a99b169eaf46789c7465de27d2dea52ca799f39412390d0a28bb28f5b9e5a4e", 36 | "zh:975daeb0ff5517e5a7a4357125f6e7d74041d87c853f9a52ecfd285ce972e51b", 37 | "zh:b358aefccccf84dab4bf07f2b039814755fc282cbe30f20496013e311eae3463", 38 | "zh:b4e3a0fddc38a05c25b8f1553098574d56959abeb2b5bf9e880208000a415231", 39 | "zh:ba331d61225fac3f787f7acd4cc298a7e0ca43ee7536ce5ab7f6c9dfae4c8e9e", 40 | "zh:bbd9bc936461d2be6c11a5abaa53f2618ac592bc7a6cc1ad9c4205fd73c95eac", 41 | "zh:bdd77e81bf65074fbc891a7429ec3264a342bc7545978a6c108e87cec5bb2f56", 42 | "zh:c132d34502d47436c5f31670f2c786c072bce6137e28cfb5d948f36721db5f66", 43 | "zh:c39ac5467fff7e326b31ada5e734ba88b8f811c5d758c3ce2c9c886504cc232f", 44 | "zh:f1083b82593be4c888e35f6c9c773a86551c8c7b5dac1f3fa69863820852fc87", 45 | "zh:f40bc8da36b6dc3b95cc13d208b81e254346d78ab81624c07a2fa74148de7a8b", 46 | "zh:f56b4589644078e21dbcdbb53cc278550a04fa9c02bc7eea3f5dc91648da2048", 47 | ] 48 | } 49 | -------------------------------------------------------------------------------- /examples/terraform/hcloud/terraform/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Requirements 3 | 4 | | Name | Version | 5 | |------|---------| 6 | | [hcloud](#requirement\_hcloud) | 1.48.1 | 7 | | [talos](#requirement\_talos) | 0.9.0-alpha.0 | 8 | 9 | ## Providers 10 | 11 | | Name | Version | 12 | |------|---------| 13 | | [hcloud](#provider\_hcloud) | 1.48.1 | 14 | | [talos](#provider\_talos) | 0.9.0-alpha.0 | 15 | 16 | ## Modules 17 | 18 | No modules. 19 | 20 | ## Resources 21 | 22 | | Name | Type | 23 | |------|------| 24 | | [hcloud_load_balancer.controlplane_load_balancer](https://registry.terraform.io/providers/hetznercloud/hcloud/1.48.1/docs/resources/load_balancer) | resource | 25 | | [hcloud_load_balancer_network.srvnetwork](https://registry.terraform.io/providers/hetznercloud/hcloud/1.48.1/docs/resources/load_balancer_network) | resource | 26 | | [hcloud_load_balancer_service.controlplane_load_balancer_service_kubectl](https://registry.terraform.io/providers/hetznercloud/hcloud/1.48.1/docs/resources/load_balancer_service) | resource | 27 | | [hcloud_load_balancer_service.controlplane_load_balancer_service_mayastor](https://registry.terraform.io/providers/hetznercloud/hcloud/1.48.1/docs/resources/load_balancer_service) | resource | 28 | | [hcloud_load_balancer_service.controlplane_load_balancer_service_talosctl](https://registry.terraform.io/providers/hetznercloud/hcloud/1.48.1/docs/resources/load_balancer_service) | resource | 29 | | [hcloud_load_balancer_target.load_balancer_target](https://registry.terraform.io/providers/hetznercloud/hcloud/1.48.1/docs/resources/load_balancer_target) | resource | 30 | | [hcloud_network.network](https://registry.terraform.io/providers/hetznercloud/hcloud/1.48.1/docs/resources/network) | resource | 31 | | [hcloud_network_subnet.subnet](https://registry.terraform.io/providers/hetznercloud/hcloud/1.48.1/docs/resources/network_subnet) | resource | 32 | | [hcloud_server.controlplane_server](https://registry.terraform.io/providers/hetznercloud/hcloud/1.48.1/docs/resources/server) | resource | 33 | | [hcloud_server.worker_server](https://registry.terraform.io/providers/hetznercloud/hcloud/1.48.1/docs/resources/server) | resource | 34 | | [hcloud_volume.volumes](https://registry.terraform.io/providers/hetznercloud/hcloud/1.48.1/docs/resources/volume) | resource | 35 | | [talos_cluster_kubeconfig.this](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/resources/cluster_kubeconfig) | resource | 36 | | [talos_machine_bootstrap.bootstrap](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/resources/machine_bootstrap) | resource | 37 | | [talos_machine_secrets.this](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/resources/machine_secrets) | resource | 38 | | [talos_client_configuration.this](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/data-sources/client_configuration) | data source | 39 | | [talos_machine_configuration.controlplane](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/data-sources/machine_configuration) | data source | 40 | | [talos_machine_configuration.worker](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/data-sources/machine_configuration) | data source | 41 | 42 | ## Inputs 43 | 44 | | Name | Description | Type | Default | Required | 45 | |------|-------------|------|---------|:--------:| 46 | | [cluster\_name](#input\_cluster\_name) | A name to provide for the Talos cluster | `string` | `"talos-hloud-cluster"` | no | 47 | | [controlplane\_ip](#input\_controlplane\_ip) | n/a | `string` | `"10.0.0.3"` | no | 48 | | [controlplane\_type](#input\_controlplane\_type) | Control plane | `string` | `"cx32"` | no | 49 | | [image\_id](#input\_image\_id) | Talos specific variables | `string` | n/a | yes | 50 | | [kubernetes\_version](#input\_kubernetes\_version) | Kubernetes version to use for the cluster, if not set the k8s version shipped with the talos sdk version will be used | `string` | `null` | no | 51 | | [load\_balancer\_type](#input\_load\_balancer\_type) | n/a | `string` | `"lb11"` | no | 52 | | [location](#input\_location) | Workers | `string` | `"fsn1"` | no | 53 | | [network\_zone](#input\_network\_zone) | Load balancer | `string` | `"eu-central"` | no | 54 | | [private\_network\_ip\_range](#input\_private\_network\_ip\_range) | n/a | `string` | `"10.0.0.0/16"` | no | 55 | | [private\_network\_name](#input\_private\_network\_name) | Networking | `string` | `"talos-network"` | no | 56 | | [private\_network\_subnet\_range](#input\_private\_network\_subnet\_range) | n/a | `string` | `"10.0.0.0/24"` | no | 57 | | [talos\_version\_contract](#input\_talos\_version\_contract) | Talos API version to use for the cluster, if not set the the version shipped with the talos sdk version will be used | `string` | `"v1.6"` | no | 58 | | [worker\_extra\_volume\_size](#input\_worker\_extra\_volume\_size) | Size of SSD volume to attach to workers | `number` | `10` | no | 59 | | [workers](#input\_workers) | Worker definition | `any` | n/a | yes | 60 | 61 | ## Outputs 62 | 63 | | Name | Description | 64 | |------|-------------| 65 | | [kubeconfig](#output\_kubeconfig) | n/a | 66 | | [talosconfig](#output\_talosconfig) | n/a | 67 | -------------------------------------------------------------------------------- /examples/terraform/hcloud/terraform/main.tf: -------------------------------------------------------------------------------- 1 | # create the private network 2 | resource "hcloud_network" "network" { 3 | name = var.private_network_name 4 | ip_range = "10.0.0.0/24" 5 | } 6 | 7 | resource "hcloud_network_subnet" "subnet" { 8 | network_id = hcloud_network.network.id 9 | type = "cloud" 10 | network_zone = var.network_zone 11 | ip_range = var.private_network_subnet_range 12 | } 13 | 14 | # create the load balancer 15 | resource "hcloud_load_balancer" "controlplane_load_balancer" { 16 | name = "talos-lb" 17 | load_balancer_type = var.load_balancer_type 18 | network_zone = var.network_zone 19 | } 20 | 21 | # attach the load blanacer to the private network 22 | resource "hcloud_load_balancer_network" "srvnetwork" { 23 | load_balancer_id = hcloud_load_balancer.controlplane_load_balancer.id 24 | network_id = hcloud_network.network.id 25 | } 26 | 27 | # at the control plane to the load balancer 28 | resource "hcloud_load_balancer_target" "load_balancer_target" { 29 | type = "server" 30 | load_balancer_id = hcloud_load_balancer.controlplane_load_balancer.id 31 | server_id = hcloud_server.controlplane_server.id 32 | use_private_ip = true 33 | depends_on = [ 34 | hcloud_server.controlplane_server 35 | ] 36 | } 37 | 38 | # loadblance kubectl port 39 | resource "hcloud_load_balancer_service" "controlplane_load_balancer_service_kubectl" { 40 | load_balancer_id = hcloud_load_balancer.controlplane_load_balancer.id 41 | protocol = "tcp" 42 | listen_port = 6443 43 | destination_port = 6443 44 | } 45 | 46 | # loadbalance talosctl 47 | resource "hcloud_load_balancer_service" "controlplane_load_balancer_service_talosctl" { 48 | load_balancer_id = hcloud_load_balancer.controlplane_load_balancer.id 49 | protocol = "tcp" 50 | listen_port = 50000 51 | destination_port = 50000 52 | } 53 | 54 | # loadbalance mayastor 55 | resource "hcloud_load_balancer_service" "controlplane_load_balancer_service_mayastor" { 56 | load_balancer_id = hcloud_load_balancer.controlplane_load_balancer.id 57 | protocol = "tcp" 58 | listen_port = 30011 59 | destination_port = 30011 60 | } 61 | 62 | 63 | # Talos 64 | # create the machine secrets 65 | resource "talos_machine_secrets" "this" { 66 | talos_version = var.talos_version_contract 67 | } 68 | 69 | # create the controlplane config, using the loadbalancer as cluster endpoint 70 | data "talos_machine_configuration" "controlplane" { 71 | cluster_name = var.cluster_name 72 | cluster_endpoint = "https://${hcloud_load_balancer.controlplane_load_balancer.ipv4}:6443" 73 | machine_type = "controlplane" 74 | machine_secrets = talos_machine_secrets.this.machine_secrets 75 | talos_version = var.talos_version_contract 76 | kubernetes_version = var.kubernetes_version 77 | config_patches = [ 78 | templatefile("${path.module}/templates/controlplanepatch.yaml.tmpl", { 79 | loadbalancerip = hcloud_load_balancer.controlplane_load_balancer.ipv4, subnet = var.private_network_subnet_range 80 | }) 81 | ] 82 | depends_on = [ 83 | hcloud_load_balancer.controlplane_load_balancer 84 | ] 85 | } 86 | 87 | # create the talos client config 88 | data "talos_client_configuration" "this" { 89 | cluster_name = var.cluster_name 90 | client_configuration = talos_machine_secrets.this.client_configuration 91 | endpoints = [ 92 | hcloud_load_balancer.controlplane_load_balancer.ipv4 93 | ] 94 | } 95 | 96 | # create the control plane and apply generated config in user_data 97 | resource "hcloud_server" "controlplane_server" { 98 | name = "talos-controlplane" 99 | image = var.image_id 100 | server_type = var.controlplane_type 101 | location = var.location 102 | labels = { type = "talos-controlplane" } 103 | user_data = data.talos_machine_configuration.controlplane.machine_configuration 104 | network { 105 | network_id = hcloud_network.network.id 106 | ip = var.controlplane_ip 107 | } 108 | depends_on = [ 109 | hcloud_network_subnet.subnet, 110 | hcloud_load_balancer.controlplane_load_balancer, 111 | talos_machine_secrets.this, 112 | ] 113 | } 114 | 115 | # bootstrap the cluster 116 | resource "talos_machine_bootstrap" "bootstrap" { 117 | client_configuration = talos_machine_secrets.this.client_configuration 118 | endpoint = hcloud_server.controlplane_server.ipv4_address 119 | node = hcloud_server.controlplane_server.ipv4_address 120 | } 121 | 122 | # create the worker config and apply the worker patch 123 | data "talos_machine_configuration" "worker" { 124 | cluster_name = var.cluster_name 125 | cluster_endpoint = "https://${hcloud_load_balancer.controlplane_load_balancer.ipv4}:6443" 126 | machine_type = "worker" 127 | machine_secrets = talos_machine_secrets.this.machine_secrets 128 | talos_version = var.talos_version_contract 129 | kubernetes_version = var.kubernetes_version 130 | config_patches = [ 131 | templatefile("${path.module}/templates/workerpatch.yaml.tmpl", { 132 | subnet = var.private_network_subnet_range 133 | }) 134 | ] 135 | depends_on = [ 136 | hcloud_load_balancer.controlplane_load_balancer 137 | ] 138 | } 139 | 140 | # create the worker and apply the generated config in user_data 141 | resource "hcloud_server" "worker_server" { 142 | for_each = var.workers 143 | name = each.value.name 144 | image = var.image_id 145 | server_type = each.value.server_type 146 | location = each.value.location 147 | labels = { type = "talos-worker" } 148 | user_data = data.talos_machine_configuration.worker.machine_configuration 149 | network { 150 | network_id = hcloud_network.network.id 151 | } 152 | depends_on = [ 153 | hcloud_network_subnet.subnet, 154 | hcloud_load_balancer.controlplane_load_balancer, 155 | ] 156 | } 157 | 158 | # create the extra ssd volumes and attach them to the worker 159 | resource "hcloud_volume" "volumes" { 160 | for_each = hcloud_server.worker_server 161 | name = "${each.value.name}-volume" 162 | size = var.worker_extra_volume_size 163 | server_id = each.value.id 164 | depends_on = [ 165 | hcloud_server.worker_server 166 | ] 167 | } 168 | 169 | # kubeconfig 170 | resource "talos_cluster_kubeconfig" "this" { 171 | client_configuration = talos_machine_secrets.this.client_configuration 172 | node = hcloud_server.controlplane_server.ipv4_address 173 | } 174 | -------------------------------------------------------------------------------- /examples/terraform/hcloud/terraform/outputs.tf: -------------------------------------------------------------------------------- 1 | output "talosconfig" { 2 | value = data.talos_client_configuration.this.talos_config 3 | sensitive = true 4 | } 5 | 6 | output "kubeconfig" { 7 | value = talos_cluster_kubeconfig.this.kubeconfig_raw 8 | sensitive = true 9 | } 10 | -------------------------------------------------------------------------------- /examples/terraform/hcloud/terraform/templates/controlplanepatch.yaml.tmpl: -------------------------------------------------------------------------------- 1 | machine: 2 | kubelet: 3 | nodeIP: 4 | validSubnets: 5 | - ${subnet} 6 | certSANs: 7 | - ${loadbalancerip} 8 | # Setup hetzner ntp server. 9 | time: 10 | # disabled: false # Indicates if the time service is disabled for the machine. 11 | # # Specifies time (NTP) servers to use for setting the system time. 12 | servers: 13 | - ntp1.hetzner.de 14 | - ntp2.hetzner.com 15 | - ntp3.hetzner.net 16 | - 0.de.pool.ntp.org 17 | - 1.de.pool.ntp.org 18 | - time.cloudflare.com 19 | -------------------------------------------------------------------------------- /examples/terraform/hcloud/terraform/templates/workerpatch.yaml.tmpl: -------------------------------------------------------------------------------- 1 | machine: 2 | kubelet: 3 | # OpenEBS Mayastor 4 | extraMounts: 5 | - destination: /var/openebs/local 6 | options: 7 | - bind 8 | - rshared 9 | - rw 10 | source: /var/openebs/local 11 | type: bind 12 | extraArgs: 13 | node-labels: openebs.io/engine=mayastor 14 | nodeIP: 15 | validSubnets: 16 | - ${subnet} 17 | # Mayastor requires vm.nr_hugepages 18 | sysctls: 19 | vm.nr_hugepages: "2048" 20 | # Setup hetzner ntp server. 21 | time: 22 | # disabled: false # Indicates if the time service is disabled for the machine. 23 | # # Specifies time (NTP) servers to use for setting the system time. 24 | servers: 25 | - ntp1.hetzner.de 26 | - ntp2.hetzner.com 27 | - ntp3.hetzner.net 28 | - 0.de.pool.ntp.org 29 | - 1.de.pool.ntp.org 30 | - time.cloudflare.com 31 | -------------------------------------------------------------------------------- /examples/terraform/hcloud/terraform/three_workers.tfvars: -------------------------------------------------------------------------------- 1 | workers = { 2 | 1 = { 3 | server_type = "cx32", 4 | name = "talos-worker-1", 5 | location = "fsn1", 6 | labels = { "type" : "talos-worker" }, 7 | taints = [], 8 | }, 9 | 2 = { 10 | server_type = "cx32", 11 | name = "talos-worker-2", 12 | location = "fsn1", 13 | labels = { "type" : "talos-worker" }, 14 | taints = [], 15 | }, 16 | 3 = { 17 | server_type = "cx32", 18 | name = "talos-worker-3", 19 | location = "fsn1", 20 | labels = { "type" : "talos-worker" }, 21 | taints = [], 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /examples/terraform/hcloud/terraform/variables.tf: -------------------------------------------------------------------------------- 1 | # Talos specific variables 2 | variable "image_id" { 3 | type = string 4 | } 5 | 6 | variable "cluster_name" { 7 | description = "A name to provide for the Talos cluster" 8 | type = string 9 | default = "talos-hloud-cluster" 10 | } 11 | 12 | variable "talos_version_contract" { 13 | description = "Talos API version to use for the cluster, if not set the the version shipped with the talos sdk version will be used" 14 | type = string 15 | default = "v1.6" 16 | } 17 | 18 | variable "kubernetes_version" { 19 | description = "Kubernetes version to use for the cluster, if not set the k8s version shipped with the talos sdk version will be used" 20 | type = string 21 | default = null 22 | } 23 | 24 | # Control plane 25 | variable "controlplane_type" { 26 | default = "cx32" 27 | } 28 | 29 | variable "controlplane_ip" { 30 | default = "10.0.0.3" 31 | type = string 32 | } 33 | 34 | # Networking 35 | variable "private_network_name" { 36 | default = "talos-network" 37 | } 38 | 39 | variable "private_network_ip_range" { 40 | default = "10.0.0.0/16" 41 | } 42 | 43 | variable "private_network_subnet_range" { 44 | default = "10.0.0.0/24" 45 | } 46 | 47 | # Load balancer 48 | variable "network_zone" { 49 | default = "eu-central" 50 | } 51 | 52 | variable "load_balancer_type" { 53 | default = "lb11" 54 | } 55 | 56 | # Workers 57 | variable "location" { 58 | default = "fsn1" 59 | } 60 | 61 | variable "workers" { 62 | description = "Worker definition" 63 | } 64 | 65 | variable "worker_extra_volume_size" { 66 | description = "Size of SSD volume to attach to workers" 67 | type = number 68 | default = 10 69 | } 70 | -------------------------------------------------------------------------------- /examples/terraform/hcloud/terraform/versions.tf: -------------------------------------------------------------------------------- 1 | # TF setup 2 | 3 | terraform { 4 | required_providers { 5 | hcloud = { 6 | source = "hetznercloud/hcloud" 7 | version = "1.48.1" 8 | } 9 | talos = { 10 | source = "siderolabs/talos" 11 | version = "0.9.0-alpha.0" 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /examples/terraform/oci/README.md: -------------------------------------------------------------------------------- 1 | # Oracle Cloud Terraform Example 2 | 3 | Example of a highly available Kubernetes cluster with Talos on Oracle Cloud. 4 | 5 | ## Prequisites 6 | 7 | **general** 8 | 9 | - a top-level tenancy 10 | 11 | **install things** 12 | 13 | ``` bash 14 | brew install oci-cli hashicorp/tap/terraform siderolabs/tap/talosctl qemu 15 | ``` 16 | 17 | ## Notes 18 | 19 | - although not officially supported by Oracle Cloud, network LoadBalancers are provided through the Oracle Cloud Controller (only officially supported on OKE) 20 | - this guide will target arm64, though you can replace with amd64 if it doesn't suit your needs 21 | - instances will only launch with firmware set to UEFI_64 and lauch mode set to PARAVIRTUALIZED 22 | 23 | ## Uploading an image 24 | 25 | Unfortunately due to upload constraints, this portion of the deployment is unable to be run using Terraform. This may change in the future. 26 | 27 | Prepare and upload a Talos disk image for Oracle Cloud, with 28 | 29 | 1. create a storage bucket: https://cloud.oracle.com/object-storage/buckets 30 | 2. using Talos Linux Image Factory, create a plan and generate an image to use. See this example: https://factory.talos.dev/?arch=arm64&cmdline=console%3DttyAMA0&cmdline-set=true&extensions=-&platform=oracle&target=cloud&version=1.8.0 31 | 3. download the disk image (ending in raw.xz) 32 | 4. define the image metadata, with the steps under the section "**defining metadata**" 33 | 5. repack the image, with steps under the section "**repacking the image**" 34 | 6. upload the image to the storage bucket under objects 35 | 7. under object and view object details, copy the dedicated endpoint url. Example: https://axe608t7iscj.objectstorage.us-phoenix-1.oci.customer-oci.com/n/axe608t7iscj/b/talos/o/talos-v1.8.0-oracle-arm64.oci 36 | 37 | ### defining metadata 38 | 39 | create a file called `image_metadata.json` with contents such as 40 | 41 | ``` json 42 | { 43 | "version": 2, 44 | "externalLaunchOptions": { 45 | "firmware": "UEFI_64", 46 | "networkType": "PARAVIRTUALIZED", 47 | "bootVolumeType": "PARAVIRTUALIZED", 48 | "remoteDataVolumeType": "PARAVIRTUALIZED", 49 | "localDataVolumeType": "PARAVIRTUALIZED", 50 | "launchOptionsSource": "PARAVIRTUALIZED", 51 | "pvAttachmentVersion": 2, 52 | "pvEncryptionInTransitEnabled": true, 53 | "consistentVolumeNamingEnabled": true 54 | }, 55 | "imageCapabilityData": null, 56 | "imageCapsFormatVersion": null, 57 | "operatingSystem": "Talos", 58 | "operatingSystemVersion": "1.8.0", 59 | "additionalMetadata": { 60 | "shapeCompatibilities": [ 61 | { 62 | "internalShapeName": "VM.Standard.A1.Flex", 63 | "ocpuConstraints": null, 64 | "memoryConstraints": null 65 | } 66 | ] 67 | } 68 | } 69 | ``` 70 | 71 | ### repacking the image 72 | 73 | decompress the downloaded disk image artifact from factory 74 | 75 | ``` bash 76 | xz --decompress DISK_IMAGE.raw.xz 77 | ``` 78 | 79 | use `qemu-img` to convert the image to qcow2 80 | 81 | ``` bash 82 | qemu-img convert -f raw -O qcow2 oracle-arm64.raw oracle-arm64.qcow2 83 | ``` 84 | 85 | repack the image as a tar file with the metadata 86 | 87 | ``` bash 88 | tar zcf oracle-arm64.oci oracle-arm64.qcow2 image_metadata.json 89 | ``` 90 | 91 | ## Create a .tfvars file 92 | 93 | to configure authentication and namespacing, create a `.tfvars` file with values from the links placeholding in the example below 94 | 95 | ``` hcl 96 | tenancy_ocid = "TENANCY OCID : https://cloud.oracle.com/tenancy" 97 | user_ocid = "YOUR USER OCID : https://cloud.oracle.com/identity/domains/my-profile" 98 | private_key_path = "YOUR PRIVATE KEY PATH : https://cloud.oracle.com/identity/domains/my-profile/api-keys" 99 | fingerprint = "THE FINGERPRINT FOR YOUR PRIVATE KEY : ^^" 100 | region = "YOUR PREFERRED REGION : https://cloud.oracle.com/regions" 101 | compartment_ocid = "YOUR COMPARTMENT OCID : https://cloud.oracle.com/identity/compartments" 102 | talos_image_oci_bucket_url = "YOUR DEDICATED BUCKET OBJECT URL : https://cloud.oracle.com/object-storage/buckets" 103 | ``` 104 | 105 | ## Bringing it up 106 | 107 | prepare the local direction for using Terraform 108 | 109 | ``` bash 110 | terraform init 111 | ``` 112 | 113 | verify the changes to provision 114 | 115 | ``` bash 116 | terraform plan -var-file=.tfvars 117 | ``` 118 | 119 | apply the changes 120 | 121 | ``` bash 122 | terraform apply -var-file=.tfvars 123 | ``` 124 | 125 | get the talosconfig 126 | 127 | ``` bash 128 | terraform output -raw talosconfig > ./talosconfig 129 | ``` 130 | 131 | get the kubeconfig 132 | 133 | ``` bash 134 | terraform output -raw kubeconfig > ./kubeconfig 135 | ``` 136 | 137 | destroy the worker nodes 138 | 139 | ``` bash 140 | terraform destroy -var-file=.tfvars -target=random_pet.worker 141 | ``` 142 | 143 | destroy 144 | 145 | ``` bash 146 | terraform destroy -var-file=.tfvars 147 | ``` 148 | -------------------------------------------------------------------------------- /examples/terraform/oci/data.tf: -------------------------------------------------------------------------------- 1 | data "oci_identity_compartment" "this" { 2 | id = var.compartment_ocid 3 | } 4 | 5 | data "oci_identity_availability_domains" "availability_domains" { 6 | #Required 7 | compartment_id = var.tenancy_ocid 8 | } 9 | 10 | data "oci_core_image_shapes" "image_shapes" { 11 | depends_on = [oci_core_shape_management.image_shape] 12 | #Required 13 | image_id = oci_core_image.talos_image.id 14 | } 15 | 16 | data "talos_image_factory_extensions_versions" "this" { 17 | # get the latest talos version 18 | talos_version = var.talos_version 19 | filters = { 20 | names = var.talos_extensions 21 | } 22 | } 23 | 24 | data "talos_image_factory_urls" "this" { 25 | talos_version = var.talos_version 26 | schematic_id = talos_image_factory_schematic.this.id 27 | platform = "oracle" 28 | architecture = var.architecture 29 | } 30 | 31 | data "talos_client_configuration" "talosconfig" { 32 | cluster_name = var.cluster_name 33 | client_configuration = talos_machine_secrets.machine_secrets.client_configuration 34 | endpoints = [for k, v in oci_core_instance.controlplane : v.public_ip] 35 | nodes = concat( 36 | [for k, v in oci_core_instance.controlplane : v.public_ip], 37 | [for k, v in oci_core_instance.worker : v.private_ip] 38 | ) 39 | } 40 | 41 | data "talos_machine_configuration" "controlplane" { 42 | cluster_name = var.cluster_name 43 | # cluster_endpoint = "https://${var.kube_apiserver_domain}:6443" 44 | cluster_endpoint = "https://${oci_network_load_balancer_network_load_balancer.controlplane_load_balancer.ip_addresses[0].ip_address}:6443" 45 | 46 | machine_type = "controlplane" 47 | machine_secrets = talos_machine_secrets.machine_secrets.machine_secrets 48 | 49 | talos_version = var.talos_version 50 | kubernetes_version = var.kubernetes_version 51 | 52 | docs = false 53 | examples = false 54 | 55 | config_patches = [ 56 | local.talos_base_configuration, 57 | <<-EOT 58 | machine: 59 | features: 60 | kubernetesTalosAPIAccess: 61 | enabled: true 62 | allowedRoles: 63 | - os:reader 64 | allowedKubernetesNamespaces: 65 | - kube-system 66 | EOT 67 | , 68 | yamlencode({ 69 | machine = { 70 | certSANs = concat([ 71 | var.kube_apiserver_domain, 72 | oci_network_load_balancer_network_load_balancer.controlplane_load_balancer.ip_addresses[0].ip_address, 73 | ], 74 | [for k, v in oci_core_instance.controlplane : v.public_ip] 75 | ) 76 | } 77 | cluster = { 78 | apiServer = { 79 | certSANs = concat([ 80 | var.kube_apiserver_domain, 81 | oci_network_load_balancer_network_load_balancer.controlplane_load_balancer.ip_addresses[0].ip_address, 82 | ], 83 | [for k, v in oci_core_instance.controlplane : v.public_ip] 84 | ) 85 | } 86 | } 87 | }), 88 | ] 89 | } 90 | 91 | data "talos_machine_configuration" "worker" { 92 | cluster_name = var.cluster_name 93 | # cluster_endpoint = "https://${var.kube_apiserver_domain}:6443" 94 | cluster_endpoint = "https://${oci_network_load_balancer_network_load_balancer.controlplane_load_balancer.ip_addresses[0].ip_address}:6443" 95 | 96 | machine_type = "worker" 97 | machine_secrets = talos_machine_secrets.machine_secrets.machine_secrets 98 | 99 | talos_version = var.talos_version 100 | kubernetes_version = var.kubernetes_version 101 | 102 | docs = false 103 | examples = false 104 | 105 | config_patches = [ 106 | local.talos_base_configuration, 107 | < val } 14 | # count = 1 15 | #Required 16 | # choose the next availability domain which wasn't last 17 | availability_domain = data.oci_identity_availability_domains.availability_domains.availability_domains[each.key % length(data.oci_identity_availability_domains.availability_domains.availability_domains)].name 18 | compartment_id = var.compartment_ocid 19 | shape = var.instance_shape == null ? data.oci_core_image_shapes.image_shapes.image_shape_compatibilities[0].shape : var.instance_shape 20 | shape_config { 21 | ocpus = var.controlplane_instance_ocpus 22 | memory_in_gbs = var.controlplane_instance_memory_in_gbs 23 | } 24 | 25 | create_vnic_details { 26 | assign_public_ip = true 27 | subnet_id = oci_core_subnet.subnet_regional.id 28 | nsg_ids = [oci_core_network_security_group.network_security_group.id] 29 | } 30 | agent_config { 31 | are_all_plugins_disabled = true 32 | is_management_disabled = true 33 | is_monitoring_disabled = true 34 | } 35 | availability_config { 36 | is_live_migration_preferred = true 37 | recovery_action = "RESTORE_INSTANCE" 38 | } 39 | #Optional 40 | display_name = "${var.cluster_name}-control-plane-${each.value.id}" 41 | freeform_tags = local.common_labels 42 | launch_options { 43 | #Optional 44 | network_type = local.instance_mode 45 | remote_data_volume_type = local.instance_mode 46 | boot_volume_type = local.instance_mode 47 | firmware = "UEFI_64" 48 | } 49 | instance_options { 50 | are_legacy_imds_endpoints_disabled = true 51 | } 52 | source_details { 53 | #Required 54 | source_type = "image" 55 | source_id = oci_core_image.talos_image.id 56 | boot_volume_size_in_gbs = "50" 57 | } 58 | preserve_boot_volume = false 59 | 60 | lifecycle { 61 | create_before_destroy = "true" 62 | ignore_changes = [ 63 | defined_tags 64 | ] 65 | } 66 | } 67 | 68 | resource "oci_core_instance" "worker" { 69 | for_each = { for idx, val in random_pet.worker : idx => val } 70 | # count = 1 71 | #Required 72 | # choose the next availability domain which wasn't last 73 | availability_domain = data.oci_identity_availability_domains.availability_domains.availability_domains[each.key % length(data.oci_identity_availability_domains.availability_domains.availability_domains)].name 74 | compartment_id = var.compartment_ocid 75 | shape = var.instance_shape == null ? data.oci_core_image_shapes.image_shapes.image_shape_compatibilities[0].shape : var.instance_shape 76 | metadata = { 77 | user_data = base64encode(data.talos_machine_configuration.worker.machine_configuration) 78 | } 79 | shape_config { 80 | ocpus = var.worker_instance_ocpus 81 | memory_in_gbs = var.worker_instance_memory_in_gbs 82 | } 83 | 84 | create_vnic_details { 85 | assign_public_ip = true 86 | subnet_id = oci_core_subnet.subnet_regional.id 87 | nsg_ids = [oci_core_network_security_group.network_security_group.id] 88 | } 89 | agent_config { 90 | are_all_plugins_disabled = true 91 | is_management_disabled = true 92 | is_monitoring_disabled = true 93 | } 94 | availability_config { 95 | is_live_migration_preferred = true 96 | recovery_action = "RESTORE_INSTANCE" 97 | } 98 | #Optional 99 | display_name = "${var.cluster_name}-worker-${each.value.id}" 100 | freeform_tags = local.common_labels 101 | launch_options { 102 | #Optional 103 | network_type = local.instance_mode 104 | remote_data_volume_type = local.instance_mode 105 | boot_volume_type = local.instance_mode 106 | firmware = "UEFI_64" 107 | } 108 | instance_options { 109 | are_legacy_imds_endpoints_disabled = true 110 | } 111 | source_details { 112 | #Required 113 | source_type = "image" 114 | source_id = oci_core_image.talos_image.id 115 | boot_volume_size_in_gbs = "50" 116 | } 117 | preserve_boot_volume = false 118 | 119 | lifecycle { 120 | create_before_destroy = "true" 121 | ignore_changes = [ 122 | metadata.user_data, 123 | defined_tags 124 | ] 125 | } 126 | 127 | depends_on = [oci_core_instance.controlplane] 128 | } 129 | -------------------------------------------------------------------------------- /examples/terraform/oci/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | common_labels = { 3 | "TalosCluster" = var.cluster_name 4 | } 5 | talos_install_image = data.talos_image_factory_urls.this.urls.installer 6 | instance_mode = "PARAVIRTUALIZED" 7 | talos_install_disk = "/dev/sda" 8 | instance_kernel_arg_console = "ttyAMA0" 9 | # Example: https://raw.githubusercontent.com/oracle/oci-cloud-controller-manager/v1.26.0/manifests/provider-config-instance-principals-example.yaml 10 | oci_config_ini = < val if var.worker_volume_enabled } 3 | #Required 4 | compartment_id = var.compartment_ocid 5 | 6 | #Optional 7 | availability_domain = data.oci_identity_availability_domains.availability_domains.availability_domains[each.key % length(data.oci_identity_availability_domains.availability_domains.availability_domains)].name 8 | display_name = each.value.display_name 9 | freeform_tags = local.common_labels 10 | size_in_gbs = var.worker_volume_size_in_gbs 11 | 12 | lifecycle { 13 | create_before_destroy = "true" 14 | ignore_changes = [ 15 | defined_tags 16 | ] 17 | } 18 | } 19 | 20 | resource "oci_core_volume_attachment" "worker_volume_attachment" { 21 | for_each = { for idx, val in oci_core_volume.worker : idx => val if var.worker_volume_enabled } 22 | #Required 23 | attachment_type = local.instance_mode 24 | instance_id = [for val in oci_core_instance.worker : val if val.display_name == each.value.display_name][0].id 25 | volume_id = each.value.id 26 | } 27 | -------------------------------------------------------------------------------- /examples/terraform/oci/talos.tf: -------------------------------------------------------------------------------- 1 | resource "talos_machine_secrets" "machine_secrets" { 2 | talos_version = var.talos_version 3 | } 4 | 5 | resource "talos_image_factory_schematic" "this" { 6 | schematic = yamlencode( 7 | { 8 | customization = { 9 | systemExtensions = { 10 | officialExtensions = data.talos_image_factory_extensions_versions.this.extensions_info[*].name 11 | } 12 | } 13 | } 14 | ) 15 | } 16 | 17 | resource "talos_cluster_kubeconfig" "kubeconfig" { 18 | depends_on = [ 19 | talos_machine_bootstrap.bootstrap 20 | ] 21 | client_configuration = talos_machine_secrets.machine_secrets.client_configuration 22 | endpoint = oci_network_load_balancer_network_load_balancer.controlplane_load_balancer.ip_addresses[0].ip_address 23 | node = oci_network_load_balancer_network_load_balancer.controlplane_load_balancer.ip_addresses[0].ip_address 24 | } 25 | 26 | resource "talos_machine_configuration_apply" "controlplane" { 27 | for_each = { for idx, val in oci_core_instance.controlplane : idx => val } 28 | client_configuration = talos_machine_secrets.machine_secrets.client_configuration 29 | machine_configuration_input = data.talos_machine_configuration.controlplane.machine_configuration 30 | node = each.value.public_ip 31 | 32 | config_patches = [ 33 | yamlencode({ 34 | machine = { 35 | kubelet = { 36 | extraArgs = { 37 | "provider-id" = each.value.id 38 | } 39 | } 40 | } 41 | }) 42 | ] 43 | } 44 | 45 | resource "talos_machine_configuration_apply" "worker" { 46 | for_each = { for idx, val in oci_core_instance.worker : idx => val } 47 | client_configuration = talos_machine_secrets.machine_secrets.client_configuration 48 | machine_configuration_input = data.talos_machine_configuration.worker.machine_configuration 49 | endpoint = [for k, v in oci_core_instance.controlplane : v.public_ip][0] 50 | node = each.value.private_ip 51 | 52 | depends_on = [oci_core_volume_attachment.worker_volume_attachment, talos_machine_configuration_apply.controlplane] 53 | } 54 | 55 | resource "talos_machine_bootstrap" "bootstrap" { 56 | depends_on = [talos_machine_configuration_apply.controlplane] 57 | 58 | client_configuration = talos_machine_secrets.machine_secrets.client_configuration 59 | endpoint = [for k, v in oci_core_instance.controlplane : v.public_ip][0] 60 | node = [for k, v in oci_core_instance.controlplane : v.public_ip][0] 61 | 62 | lifecycle { 63 | ignore_changes = all 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /examples/terraform/oci/variables.tf: -------------------------------------------------------------------------------- 1 | variable "compartment_ocid" { 2 | type = string 3 | sensitive = true 4 | } 5 | variable "tenancy_ocid" { 6 | type = string 7 | sensitive = true 8 | } 9 | variable "user_ocid" { 10 | type = string 11 | sensitive = true 12 | } 13 | variable "fingerprint" { 14 | type = string 15 | sensitive = true 16 | } 17 | variable "private_key_path" { 18 | type = string 19 | default = "~/.oci/oci_main_terraform.pem" 20 | sensitive = true 21 | } 22 | variable "instance_availability_domain" { 23 | type = string 24 | default = null 25 | } 26 | variable "region" { 27 | description = "the OCI region where resources will be created" 28 | type = string 29 | default = null 30 | } 31 | variable "cluster_name" { 32 | type = string 33 | default = "talosoci" 34 | } 35 | variable "kube_apiserver_domain" { 36 | type = string 37 | default = null 38 | } 39 | variable "cidr_blocks" { 40 | type = set(string) 41 | default = ["10.0.0.0/16"] 42 | } 43 | variable "subnet_block" { 44 | type = string 45 | default = "10.0.0.0/24" 46 | } 47 | variable "subnet_block_regional" { 48 | type = string 49 | default = "10.0.10.0/24" 50 | } 51 | variable "talos_version" { 52 | type = string 53 | default = "v1.7.6" 54 | } 55 | variable "kubernetes_version" { 56 | type = string 57 | default = "v1.30.3" 58 | } 59 | variable "instance_shape" { 60 | type = string 61 | default = "VM.Standard.A1.Flex" 62 | } 63 | variable "oracle_cloud_ccm_version" { 64 | type = string 65 | default = "v1.29.0" 66 | } 67 | variable "talos_ccm_version" { 68 | type = string 69 | default = "v1.6.0" 70 | } 71 | variable "pod_subnet_block" { 72 | type = string 73 | default = "10.32.0.0/12" 74 | } 75 | variable "service_subnet_block" { 76 | type = string 77 | default = "10.200.0.0/22" 78 | } 79 | variable "architecture" { 80 | type = string 81 | default = "arm64" 82 | } 83 | variable "talos_extensions" { 84 | type = set(string) 85 | default = [ 86 | "gvisor", 87 | "kata-containers", 88 | "iscsi-tools", 89 | "mdadm", 90 | ] 91 | } 92 | variable "controlplane_instance_count" { 93 | type = number 94 | default = 3 95 | } 96 | variable "worker_instance_count" { 97 | type = number 98 | default = 6 99 | } 100 | variable "talos_image_oci_bucket_url" { 101 | type = string 102 | nullable = false 103 | } 104 | variable "controlplane_instance_ocpus" { 105 | type = number 106 | default = 4 107 | } 108 | variable "controlplane_instance_memory_in_gbs" { 109 | type = string 110 | default = "8" 111 | } 112 | variable "worker_instance_ocpus" { 113 | type = number 114 | default = 4 115 | } 116 | variable "worker_instance_memory_in_gbs" { 117 | type = string 118 | default = "8" 119 | } 120 | variable "worker_volume_enabled" { 121 | type = bool 122 | default = true 123 | } 124 | variable "worker_volume_size_in_gbs" { 125 | type = string 126 | default = "500" 127 | } 128 | -------------------------------------------------------------------------------- /examples/terraform/oci/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | oci = { 4 | source = "oracle/oci" 5 | version = "6.9.0" 6 | } 7 | talos = { 8 | source = "siderolabs/talos" 9 | version = "0.9.0-alpha.0" 10 | } 11 | random = { 12 | source = "hashicorp/random" 13 | version = "> 0.0.0" 14 | } 15 | } 16 | required_version = ">= 1.2" 17 | } 18 | 19 | provider "oci" { 20 | tenancy_ocid = var.tenancy_ocid 21 | user_ocid = var.user_ocid 22 | private_key_path = var.private_key_path 23 | fingerprint = var.fingerprint 24 | region = var.region 25 | } 26 | -------------------------------------------------------------------------------- /examples/terraform/vultr/.terraform.lock.hcl: -------------------------------------------------------------------------------- 1 | # This file is maintained automatically by "terraform init". 2 | # Manual edits may be lost in future updates. 3 | 4 | provider "registry.terraform.io/siderolabs/talos" { 5 | version = "0.9.0-alpha.0" 6 | constraints = "0.9.0-alpha.0" 7 | hashes = [ 8 | "h1:aJIAbggkIN/2/7JEjSz/PfHVFRXOokeHYt3hDII6kI0=", 9 | "zh:0fa82a384b25a58b65523e0ea4768fa1212b1f5cfc0c9379d31162454fedcc9d", 10 | "zh:12a822ecfcc14da28bb88887811a796f5165ee50d64967c5afea389da12e3d18", 11 | "zh:5c2519abfbd5e45de4afd94e52d393235e3d09845af27c58aa98dd532311f47f", 12 | "zh:6a99b169eaf46789c7465de27d2dea52ca799f39412390d0a28bb28f5b9e5a4e", 13 | "zh:975daeb0ff5517e5a7a4357125f6e7d74041d87c853f9a52ecfd285ce972e51b", 14 | "zh:b358aefccccf84dab4bf07f2b039814755fc282cbe30f20496013e311eae3463", 15 | "zh:b4e3a0fddc38a05c25b8f1553098574d56959abeb2b5bf9e880208000a415231", 16 | "zh:ba331d61225fac3f787f7acd4cc298a7e0ca43ee7536ce5ab7f6c9dfae4c8e9e", 17 | "zh:bbd9bc936461d2be6c11a5abaa53f2618ac592bc7a6cc1ad9c4205fd73c95eac", 18 | "zh:bdd77e81bf65074fbc891a7429ec3264a342bc7545978a6c108e87cec5bb2f56", 19 | "zh:c132d34502d47436c5f31670f2c786c072bce6137e28cfb5d948f36721db5f66", 20 | "zh:c39ac5467fff7e326b31ada5e734ba88b8f811c5d758c3ce2c9c886504cc232f", 21 | "zh:f1083b82593be4c888e35f6c9c773a86551c8c7b5dac1f3fa69863820852fc87", 22 | "zh:f40bc8da36b6dc3b95cc13d208b81e254346d78ab81624c07a2fa74148de7a8b", 23 | "zh:f56b4589644078e21dbcdbb53cc278550a04fa9c02bc7eea3f5dc91648da2048", 24 | ] 25 | } 26 | 27 | provider "registry.terraform.io/vultr/vultr" { 28 | version = "2.12.0" 29 | constraints = "2.12.0" 30 | hashes = [ 31 | "h1:ASrNv7+mGVv+zm3E9RlPiQ4wwMbU0LQ4t1hyvVbR76A=", 32 | "h1:WUoBzhwm3Tu20FN9nvDrJelL+rx7RfYShAP5y5FzCxE=", 33 | "zh:002d577c52eb34cb2b4d93bd9850ebf203100d23a551505bb039d741c2424b18", 34 | "zh:054d175bbff44850f02b81f1995f04b07a5580ad59d259b1063ddb80ed43b6fb", 35 | "zh:1827f26fcbace803caa471ad2bbf4c7692199ad411343a4dc7bf981f11fa64ee", 36 | "zh:25790be3dcea5724266f57361c3dc56102e1b929fcaae56dd4846de88dc75672", 37 | "zh:38e617757d4316b656e78d031c3a3f6e892fc942624e10b903b085429b6de56b", 38 | "zh:475de485724bcad9d5d4ac1e68e587ed23723dda14eb414c09ee385316486dc1", 39 | "zh:4b9417a923137816dd59aeaf479a509c38db38ecca4d14edb352a85a68c1eb65", 40 | "zh:4f7ce4628a102690603a605bffd8b1e2873707bf3f3d97d2508e3f68219ceae6", 41 | "zh:666e35f09f61d1058b09e042ab4fc91b2d0c513f38a27717ffbd425fd1f70bed", 42 | "zh:72f9ac9a88bc82ff94a91815603eabf909e23d3ab0599ee48c3cdc2b3b4bc8d2", 43 | "zh:878b03aa5de5a19928f603e906185757fb04b34bb81a9c49e5b68c716dcf0b1e", 44 | "zh:a4b483cb9220da3d5361b1d311fd901e53ee822d0fefb673a8292975486a28c6", 45 | "zh:c235729bc1fbfe27414e3bbd43a2ccc1f120bf5bab633536baecba9a6ee5b951", 46 | "zh:cc0cde3702585d9895d7dd506b504bb5b130564e9b51fa28404a4a18496e8e8b", 47 | "zh:dbfcb5abdd78c8befaca7b8ada420c58b3744db4b7299053be648507821b08b1", 48 | "zh:ed036f0618df11bde1edbc65e5aa56c4e71eda1827145b31d75e1ee857ad8614", 49 | ] 50 | } 51 | -------------------------------------------------------------------------------- /examples/terraform/vultr/README.md: -------------------------------------------------------------------------------- 1 | # Vultr Terraform Example 2 | 3 | This example will create a load-balanced, HA Talos cluster on vultr.com. 4 | It will use the marketplace image of Talos that is present in Vultr and should result in a stable, maintainable cluster. 5 | 6 | ## Prereqs 7 | 8 | Export the `VULTR_API_KEY` environment variable with your API key obtained from vultr.com. 9 | From this directory, issue `terraform init` to ensure the proper providers are pulled down. 10 | 11 | ## Usage 12 | 13 | To create a default cluster, this should be as simple as `terraform apply`. 14 | This will create a cluster called `talos-vultr` with 3 control plane nodes and a single worker in the Atlanta region. 15 | Each of these VMs will be 2 CPU / 4GB RAM VMs. 16 | If different specs or regions are required, override them through command line with the `-var` flag or by creating a varsfile and overriding with `-var-file`. 17 | Destroying the cluster should, again, be a simple `terraform destroy`. 18 | 19 | Getting the kubeconfig and talosconfig for this cluster can be done with `terraform output -raw kubeconfig > ` and `terraform output -raw talosconfig > ` 20 | 21 | ## Requirements 22 | 23 | | Name | Version | 24 | |------|---------| 25 | | [talos](#requirement\_talos) | 0.9.0-alpha.0 | 26 | | [vultr](#requirement\_vultr) | 2.12.0 | 27 | 28 | ## Providers 29 | 30 | | Name | Version | 31 | |------|---------| 32 | | [talos](#provider\_talos) | 0.9.0-alpha.0 | 33 | | [vultr](#provider\_vultr) | 2.12.0 | 34 | 35 | ## Modules 36 | 37 | No modules. 38 | 39 | ## Resources 40 | 41 | | Name | Type | 42 | |------|------| 43 | | [talos_cluster_kubeconfig.this](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/resources/cluster_kubeconfig) | resource | 44 | | [talos_machine_bootstrap.this](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/resources/machine_bootstrap) | resource | 45 | | [talos_machine_configuration_apply.controlplane](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/resources/machine_configuration_apply) | resource | 46 | | [talos_machine_configuration_apply.worker](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/resources/machine_configuration_apply) | resource | 47 | | [talos_machine_secrets.this](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/resources/machine_secrets) | resource | 48 | | [vultr_instance.talos_control_plane](https://registry.terraform.io/providers/vultr/vultr/2.12.0/docs/resources/instance) | resource | 49 | | [vultr_instance.talos_workers](https://registry.terraform.io/providers/vultr/vultr/2.12.0/docs/resources/instance) | resource | 50 | | [vultr_load_balancer.talos_lb](https://registry.terraform.io/providers/vultr/vultr/2.12.0/docs/resources/load_balancer) | resource | 51 | | [talos_client_configuration.this](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/data-sources/client_configuration) | data source | 52 | | [talos_machine_configuration.controlplane](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/data-sources/machine_configuration) | data source | 53 | | [talos_machine_configuration.worker](https://registry.terraform.io/providers/siderolabs/talos/0.9.0-alpha.0/docs/data-sources/machine_configuration) | data source | 54 | 55 | ## Inputs 56 | 57 | | Name | Description | Type | Default | Required | 58 | |------|-------------|------|---------|:--------:| 59 | | [cluster\_name](#input\_cluster\_name) | Name of cluster | `string` | `"talos-vultr"` | no | 60 | | [num\_control\_plane](#input\_num\_control\_plane) | Number of control plane nodes to create | `number` | `3` | no | 61 | | [num\_workers](#input\_num\_workers) | Number of worker nodes to create | `number` | `1` | no | 62 | | [vultr\_plan](#input\_vultr\_plan) | Vultr plan to use | `string` | `"vc2-2c-4gb"` | no | 63 | | [vultr\_region](#input\_vultr\_region) | Vultr region to use | `string` | `"atl"` | no | 64 | 65 | ## Outputs 66 | 67 | | Name | Description | 68 | |------|-------------| 69 | | [kubeconfig](#output\_kubeconfig) | n/a | 70 | | [talosconfig](#output\_talosconfig) | n/a | 71 | -------------------------------------------------------------------------------- /examples/terraform/vultr/main.tf: -------------------------------------------------------------------------------- 1 | # Create all instances 2 | resource "vultr_instance" "talos_control_plane" { 3 | plan = var.vultr_plan 4 | region = var.vultr_region 5 | image_id = "talos-linux" 6 | hostname = "${var.cluster_name}-control-plane-${count.index}" 7 | label = "${var.cluster_name}-control-plane-${count.index}" 8 | count = var.num_control_plane 9 | } 10 | 11 | resource "vultr_instance" "talos_workers" { 12 | plan = var.vultr_plan 13 | region = var.vultr_region 14 | image_id = "talos-linux" 15 | hostname = "${var.cluster_name}-worker-${count.index}" 16 | label = "${var.cluster_name}-worker-${count.index}" 17 | count = var.num_workers 18 | } 19 | 20 | # LB for control plane 21 | resource "vultr_load_balancer" "talos_lb" { 22 | region = var.vultr_region 23 | label = "${var.cluster_name}-k8s" 24 | balancing_algorithm = "roundrobin" 25 | attached_instances = vultr_instance.talos_control_plane[*].id 26 | forwarding_rules { 27 | frontend_protocol = "tcp" 28 | frontend_port = 6443 29 | backend_protocol = "tcp" 30 | backend_port = 6443 31 | } 32 | health_check { 33 | port = 6443 34 | protocol = "tcp" 35 | response_timeout = 1 36 | unhealthy_threshold = 2 37 | check_interval = 3 38 | healthy_threshold = 4 39 | } 40 | } 41 | 42 | resource "talos_machine_secrets" "this" {} 43 | 44 | data "talos_client_configuration" "this" { 45 | cluster_name = var.cluster_name 46 | client_configuration = talos_machine_secrets.this.client_configuration 47 | endpoints = vultr_instance.talos_control_plane[*].main_ip 48 | } 49 | 50 | data "talos_machine_configuration" "controlplane" { 51 | cluster_name = var.cluster_name 52 | cluster_endpoint = "https://${vultr_load_balancer.talos_lb.ipv4}:6443" 53 | machine_type = "controlplane" 54 | machine_secrets = talos_machine_secrets.this.machine_secrets 55 | } 56 | 57 | resource "talos_machine_configuration_apply" "controlplane" { 58 | client_configuration = talos_machine_secrets.this.client_configuration 59 | machine_configuration_input = data.talos_machine_configuration.controlplane.machine_configuration 60 | count = length(vultr_instance.talos_control_plane) 61 | node = vultr_instance.talos_control_plane[count.index].main_ip 62 | } 63 | 64 | data "talos_machine_configuration" "worker" { 65 | cluster_name = var.cluster_name 66 | cluster_endpoint = "https://${vultr_load_balancer.talos_lb.ipv4}:6443" 67 | machine_type = "worker" 68 | machine_secrets = talos_machine_secrets.this.machine_secrets 69 | } 70 | 71 | resource "talos_machine_configuration_apply" "worker" { 72 | client_configuration = talos_machine_secrets.this.client_configuration 73 | machine_configuration_input = data.talos_machine_configuration.worker.machine_configuration 74 | count = length(vultr_instance.talos_workers) 75 | node = vultr_instance.talos_workers[count.index].main_ip 76 | } 77 | 78 | resource "talos_machine_bootstrap" "this" { 79 | depends_on = [ 80 | talos_machine_configuration_apply.controlplane 81 | ] 82 | 83 | client_configuration = talos_machine_secrets.this.client_configuration 84 | node = vultr_instance.talos_control_plane[0].main_ip 85 | } 86 | 87 | resource "talos_cluster_kubeconfig" "this" { 88 | client_configuration = talos_machine_secrets.this.client_configuration 89 | node = vultr_instance.talos_control_plane[0].main_ip 90 | } 91 | -------------------------------------------------------------------------------- /examples/terraform/vultr/outputs.tf: -------------------------------------------------------------------------------- 1 | output "talosconfig" { 2 | value = data.talos_client_configuration.this.talos_config 3 | sensitive = true 4 | } 5 | 6 | output "kubeconfig" { 7 | value = talos_cluster_kubeconfig.this.kubeconfig_raw 8 | sensitive = true 9 | } 10 | -------------------------------------------------------------------------------- /examples/terraform/vultr/variables.tf: -------------------------------------------------------------------------------- 1 | variable "cluster_name" { 2 | description = "Name of cluster" 3 | type = string 4 | default = "talos-vultr" 5 | } 6 | 7 | variable "num_control_plane" { 8 | description = "Number of control plane nodes to create" 9 | type = number 10 | default = 3 11 | } 12 | 13 | variable "num_workers" { 14 | description = "Number of worker nodes to create" 15 | type = number 16 | default = 1 17 | } 18 | 19 | variable "vultr_region" { 20 | description = "Vultr region to use" 21 | type = string 22 | default = "atl" 23 | } 24 | 25 | variable "vultr_plan" { 26 | description = "Vultr plan to use" 27 | type = string 28 | default = "vc2-2c-4gb" 29 | } 30 | -------------------------------------------------------------------------------- /examples/terraform/vultr/versions.tf: -------------------------------------------------------------------------------- 1 | # TF setup 2 | 3 | terraform { 4 | required_providers { 5 | vultr = { 6 | source = "vultr/vultr" 7 | version = "2.12.0" 8 | } 9 | talos = { 10 | source = "siderolabs/talos" 11 | version = "0.9.0-alpha.0" 12 | } 13 | } 14 | } 15 | 16 | # Configure providers 17 | 18 | provider "vultr" {} 19 | 20 | provider "talos" {} 21 | -------------------------------------------------------------------------------- /go.work: -------------------------------------------------------------------------------- 1 | go 1.20 2 | 3 | use ( 4 | ./examples/pulumi/azure 5 | ./examples/pulumi/equinix-metal 6 | ./examples/pulumi/gcp 7 | ) 8 | -------------------------------------------------------------------------------- /hack/backend-aws.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "s3" {} 3 | } 4 | -------------------------------------------------------------------------------- /hack/backend.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "azurerm" {} 3 | } 4 | --------------------------------------------------------------------------------