├── .github └── renovate.json ├── Makefile ├── README.md ├── aws ├── README.md ├── autoscaling-workers │ ├── autoscaling-workers.env │ └── autoscaling-workers.yaml ├── manifests │ └── ccm.yaml ├── multi-az │ ├── multi-az.env │ └── multi-az.yaml └── standard │ ├── standard.env │ └── standard.yaml └── gcp ├── README.md └── standard ├── standard.env └── standard.yaml /.github/renovate.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://docs.renovatebot.com/renovate-schema.json", 3 | "extends": [ 4 | ":dependencyDashboard", 5 | ":gitSignOff", 6 | ":semanticCommitScopeDisabled", 7 | "schedule:earlyMondays" 8 | ], 9 | "prHeader": "Update Request | Renovate Bot", 10 | "regexManagers": [ 11 | { 12 | "fileMatch": [ 13 | "\\.env" 14 | ], 15 | "matchStrings": [ 16 | "# renovate: datasource=(?.*?)(?:\\s+extractVersion=(?.+?))?(?:\\s+versioning=(?.+?))?\\s+depName=(?.+?)?\\s.*_VERSION=(?.*)?" 17 | ], 18 | "versioningTemplate": "{{#if versioning}}{{versioning}}{{else}}semver{{/if}}" 19 | }, 20 | { 21 | "fileMatch": [ 22 | "Makefile" 23 | ], 24 | "matchStrings": [ 25 | "# renovate: depName=(?.+?)\\s.*_VERSION\\s+:=\\s+(?.+)" 26 | ], 27 | "datasourceTemplate": "helm", 28 | "versioningTemplate": "semver", 29 | "registryUrlTemplate": "https://kubernetes.github.io/cloud-provider-aws" 30 | } 31 | ], 32 | "packageRules": [ 33 | { 34 | "matchPackageNames": [ 35 | "siderolabs/talos" 36 | ], 37 | "versioning": "regex:^v(?\\d+)\\.(?\\d+)\\.?(?\\d+)?$" 38 | }, 39 | { 40 | "matchPackagePatterns": [ 41 | "*" 42 | ], 43 | "matchDatasources": [ 44 | "helm" 45 | ], 46 | "groupName": "helm charts" 47 | }, 48 | { 49 | "matchPackagePatterns": [ 50 | "*" 51 | ], 52 | "matchDatasources": [ 53 | "git-refs", 54 | "git-tags", 55 | "github-tags", 56 | "github-releases" 57 | ], 58 | "groupName": "releases" 59 | } 60 | ] 61 | } 62 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # renovate: depName=aws-cloud-controller-manager 2 | AWS_CCM_VERSION := 0.0.7 3 | 4 | # run a make update whenever the helm chart version changes 5 | update: 6 | helm repo add aws-cloud-controller-manager https://kubernetes.github.io/cloud-provider-aws 7 | helm repo update 8 | helm template --version $(AWS_CCM_VERSION) aws-cloud-controller-manager aws-cloud-controller-manager/aws-cloud-controller-manager --set args="{--v=2,--cloud-provider=aws,--configure-cloud-routes=false}" > aws/manifests/ccm.yaml 9 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # cluster-api-templates 2 | 3 | The goal of this repository is to provide a collection of templates for 4 | [Cluster API (CAPI)](https://cluster-api.sigs.k8s.io) + [Talos](https://www.talos.dev). 5 | We aim for these templates to be readily usable with only some basic preparation and environment variable setup. 6 | Each cloud will have a README that details how to prepare and deploy the cluster. 7 | 8 | If you find an example lacking, please reach out to us in Slack or Github to give a heads up! 9 | 10 | Supported Templates: 11 | - [AWS](./aws/) 12 | -------------------------------------------------------------------------------- /aws/README.md: -------------------------------------------------------------------------------- 1 | # AWS 2 | 3 | As of this writing, we support two types of AWS deployments with Cluster API (CAPI): 4 | 5 | - A "standard" cluster with HA control plane and a machine deployment for workers. 6 | - An "autoscaling-workers" cluster that uses autoscaling groups for the worker set (they are called "MachinePools" in Cluster API lingo). 7 | 8 | Currently supported versions: 9 | 10 | - Talos: v0.10+ (see releases in this repository for previous versions). 11 | - cluster-api-provider-aws (CAPI): [v0.6.4](https://github.com/kubernetes-sigs/cluster-api-provider-aws/releases/tag/v0.6.4). 12 | - cloud-provider-aws: [v1.20.0-alpha.0](https://github.com/kubernetes/cloud-provider-aws/releases/tag/v1.20.0-alpha.0). 13 | 14 | ## Assumptions and Caveats 15 | 16 | This guide assumes that you have an existing VPC and subnet setup in your AWS environment. 17 | Unless you have Direct Connect or some VPN to your AWS environment, VMs on this subnet should be allowed to have public IPs so that you can connect to the instances via talosctl. 18 | 19 | For the multi-AZ setup, it is assumed that the user has pre-created both private and public subnets for use, as well as a loadbalancer that will be used to target the Talos control plane nodes at port 50000. 20 | 21 | Calico is the only supported CNI right now. The AWS Cluster API provider sets up Calico rules by default in its created security groups. Other CNIs can likely be used, but it will take some extra work on setting up the groups manually and specifying them as extra groups in the cluster manifests. 22 | 23 | ## Preparation 24 | 25 | ### Cloud 26 | 27 | - In order for the cloud-provider-aws to work properly, you should define two IAM policies in your environment: one for controlplane nodes and one for workers. 28 | See [here](https://kubernetes.github.io/cloud-provider-aws/prerequisites/) for the defined policies that need to be created. 29 | 30 | - Create a security group that allows port 50000 to your VMs, as well as port 50001 between the VMs themselves. 31 | This will be necessary in order to connect to these VMs via talosctl and for trustd communication between the VMs. 32 | 33 | ### Management Plane 34 | 35 | - Install CAPI components to an existing Kubernetes cluster. 36 | If you need a quick cluster, see [Talos docs](https://talos.dev) for an example of creating a docker-based cluster with `talosctl cluster create`. 37 | 38 | - If you plan to use MachinePools/Autoscaling groups, export the experimental environment variable so that it gets enabled: 39 | ```bash 40 | export EXP_MACHINE_POOL=true 41 | ``` 42 | 43 | - Export the path to your AWS credentials by issuing the following, updating the credentials path as necessary: 44 | ```bash 45 | export AWS_B64ENCODED_CREDENTIALS=$(cat ~/.aws/credentials | base64 | tr -d '\n') 46 | ``` 47 | 48 | - Using Cluster API's `clusterctl` tool, initialize the management plane: 49 | ```bash 50 | clusterctl init -b talos -c talos -i aws 51 | ``` 52 | 53 | - Unfortunately, until the v0.6.5 release of the AWS Cluster API provider, a patched version of the manager must be used. 54 | Issue the following to patch the deployment: 55 | ```bash 56 | kubectl patch deploy -n capa-system capa-controller-manager --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value": "docker.io/rsmitty/cluster-api-aws-controller-amd64:dev"}]' 57 | ``` 58 | 59 | ## Create cluster 60 | 61 | First, using either the [autoscaling](./autoscaling-workers/autoscaling-workers.env), [multi-az](./multi-az/multi-az.env), or [standard](./standard/standard.env) environment file as a base, substituting information as necessary to match your AWS environment. 62 | 63 | - Source the environment variables with `source /path/to/envfile` 64 | 65 | - Finally, create your cluster using the proper template. 66 | 67 | For standard: 68 | ```bash 69 | clusterctl config cluster ${CLUSTER_NAME} --from https://github.com/talos-systems/cluster-api-templates/blob/main/aws/standard/standard.yaml | kubectl apply -f - 70 | ``` 71 | 72 | For multi-AZ: 73 | ```bash 74 | clusterctl config cluster ${CLUSTER_NAME} --from https://github.com/talos-systems/cluster-api-templates/blob/main/aws/multi-az/multi-az.yaml | kubectl apply -f - 75 | ``` 76 | 77 | For MachinePools/Autoscaling groups: 78 | ```bash 79 | clusterctl config cluster ${CLUSTER_NAME} --from https://github.com/talos-systems/cluster-api-templates/blob/main/aws/autoscaling-workers/autoscaling-workers.yaml | kubectl apply -f - 80 | ``` 81 | 82 | ## Connecting 83 | 84 | - Once created, you can fetch a talosconfig for the new cluster with: 85 | ``` 86 | kubectl get talosconfig \ 87 | -l cluster.x-k8s.io/cluster-name=${CLUSTER_NAME} \ 88 | -o yaml -o jsonpath='{.items[0].status.talosConfig}' > ${CLUSTER_NAME}.yaml 89 | ``` 90 | 91 | - With the talosconfig in hand, the kubeconfig can be fetched with: 92 | ``` 93 | talosctl --talosconfig ${CLUSTER_NAME}.yaml kubeconfig 94 | ``` 95 | -------------------------------------------------------------------------------- /aws/autoscaling-workers/autoscaling-workers.env: -------------------------------------------------------------------------------- 1 | ## Cluster-wide vars 2 | export CLUSTER_NAME=talos-aws-test 3 | export AWS_REGION=us-east-1 4 | export AWS_SSH_KEY_NAME=talos-ssh 5 | export AWS_VPC_ID=vpc-xxyyyzz 6 | export AWS_SUBNET=subnet-xxyyzz 7 | # renovate: datasource=github-releases depName=projectcalico/calico 8 | export CALICO_VERSION=v3.24.1 9 | # renovate: datasource=github-releases extractVersion=^v(?.*)$ depName=kubernetes/kubernetes 10 | export KUBERNETES_VERSION=1.21.0 11 | # renovate: datasource=github-releases extractVersion=^(?v\d+\.\d+)\.\d+$ depName=siderolabs/talos 12 | export TALOS_VERSION=v0.10 13 | 14 | ## Control plane vars 15 | export CONTROL_PLANE_MACHINE_COUNT=3 16 | export AWS_CONTROL_PLANE_MACHINE_TYPE=t3.large 17 | export AWS_CONTROL_PLANE_VOL_SIZE=50 18 | export AWS_CONTROL_PLANE_AMI_ID=ami-xxyyzz 19 | export AWS_CONTROL_PLANE_ADDL_SEC_GROUPS='[{id: sg-xxyyzz}]' 20 | export AWS_CONTROL_PLANE_IAM_PROFILE=CAPI_AWS_ControlPlane 21 | 22 | ## Worker vars 23 | export AWS_WORKER_POOL_MIN=1 24 | export AWS_WORKER_POOL_DESIRED=3 25 | export AWS_WORKER_POOL_MAX=10 26 | export AWS_NODE_MACHINE_TYPE=t3.large 27 | export AWS_NODE_VOL_SIZE=50 28 | export AWS_NODE_AMI_ID=ami-xxyyzz 29 | export AWS_NODE_ADDL_SEC_GROUPS='[{id: sg-xxyyzz}]' 30 | export AWS_NODE_IAM_PROFILE=CAPI_AWS_Worker 31 | -------------------------------------------------------------------------------- /aws/autoscaling-workers/autoscaling-workers.yaml: -------------------------------------------------------------------------------- 1 | ## Cluster configs 2 | 3 | apiVersion: cluster.x-k8s.io/v1beta1 4 | kind: Cluster 5 | metadata: 6 | name: ${CLUSTER_NAME} 7 | spec: 8 | clusterNetwork: 9 | pods: 10 | cidrBlocks: 11 | - 192.168.0.0/16 12 | infrastructureRef: 13 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 14 | kind: AWSCluster 15 | name: ${CLUSTER_NAME} 16 | controlPlaneRef: 17 | kind: TalosControlPlane 18 | apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 19 | name: ${CLUSTER_NAME}-controlplane 20 | --- 21 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 22 | kind: AWSCluster 23 | metadata: 24 | name: ${CLUSTER_NAME} 25 | spec: 26 | region: ${AWS_REGION} 27 | sshKeyName: ${AWS_SSH_KEY_NAME} 28 | network: 29 | vpc: 30 | id: ${AWS_VPC_ID} 31 | subnets: 32 | - id: ${AWS_SUBNET} 33 | --- 34 | ## Control plane configs 35 | 36 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 37 | kind: AWSMachineTemplate 38 | metadata: 39 | name: ${CLUSTER_NAME}-controlplane 40 | spec: 41 | template: 42 | spec: 43 | cloudInit: 44 | insecureSkipSecretsManager: true 45 | instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE} 46 | rootVolume: 47 | size: ${AWS_CONTROL_PLANE_VOL_SIZE} 48 | sshKeyName: ${AWS_SSH_KEY_NAME} 49 | ami: 50 | id: ${AWS_CONTROL_PLANE_AMI_ID} 51 | additionalSecurityGroups: ${AWS_CONTROL_PLANE_ADDL_SEC_GROUPS} 52 | publicIP: true 53 | iamInstanceProfile: ${AWS_CONTROL_PLANE_IAM_PROFILE} 54 | subnet: 55 | id: ${AWS_SUBNET} 56 | --- 57 | apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 58 | kind: TalosControlPlane 59 | metadata: 60 | name: ${CLUSTER_NAME}-controlplane 61 | spec: 62 | version: v${KUBERNETES_VERSION} 63 | replicas: ${CONTROL_PLANE_MACHINE_COUNT} 64 | infrastructureTemplate: 65 | kind: AWSMachineTemplate 66 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 67 | name: ${CLUSTER_NAME}-controlplane 68 | controlPlaneConfig: 69 | controlplane: 70 | generateType: controlplane 71 | talosVersion: ${TALOS_VERSION} 72 | configPatches: 73 | - op: add 74 | path: /cluster/network/cni 75 | value: 76 | name: custom 77 | urls: 78 | - https://raw.githubusercontent.com/projectcalico/calico/${CALICO_VERSION}/manifests/calico.yaml 79 | - op: add 80 | path: /machine/kubelet/registerWithFQDN 81 | value: true 82 | - op: add 83 | path: /cluster/externalCloudProvider 84 | value: 85 | enabled: true 86 | manifests: 87 | - https://raw.githubusercontent.com/siderolabs/cluster-api-templates/main/aws/manifests/ccm.yaml 88 | --- 89 | ## Worker machinepool configs 90 | 91 | apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 92 | kind: TalosConfig 93 | metadata: 94 | name: ${CLUSTER_NAME}-workers 95 | spec: 96 | generateType: join 97 | talosVersion: ${TALOS_VERSION} 98 | configPatches: 99 | - op: add 100 | path: /machine/kubelet/registerWithFQDN 101 | value: true 102 | - op: add 103 | path: /cluster/externalCloudProvider 104 | value: 105 | enabled: true 106 | --- 107 | apiVersion: exp.cluster.x-k8s.io/v1beta1 108 | kind: MachinePool 109 | metadata: 110 | name: ${CLUSTER_NAME}-workers 111 | spec: 112 | clusterName: ${CLUSTER_NAME} 113 | replicas: ${AWS_WORKER_POOL_DESIRED} 114 | template: 115 | spec: 116 | bootstrap: 117 | configRef: 118 | apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 119 | kind: TalosConfig 120 | name: ${CLUSTER_NAME}-workers 121 | clusterName: ${CLUSTER_NAME} 122 | infrastructureRef: 123 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 124 | kind: AWSMachinePool 125 | name: ${CLUSTER_NAME}-workers 126 | version: ${KUBERNETES_VERSION} 127 | --- 128 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 129 | kind: AWSMachinePool 130 | metadata: 131 | name: ${CLUSTER_NAME}-workers 132 | spec: 133 | minSize: ${AWS_WORKER_POOL_MIN} 134 | maxSize: ${AWS_WORKER_POOL_MAX} 135 | awsLaunchTemplate: 136 | instanceType: ${AWS_NODE_MACHINE_TYPE} 137 | sshKeyName: ${AWS_SSH_KEY_NAME} 138 | additionalSecurityGroups: ${AWS_NODE_ADDL_SEC_GROUPS} 139 | rootVolume: 140 | size: ${AWS_NODE_VOL_SIZE} 141 | ami: 142 | id: ${AWS_NODE_AMI_ID} 143 | iamInstanceProfile: ${AWS_NODE_IAM_PROFILE} 144 | --- 145 | -------------------------------------------------------------------------------- /aws/manifests/ccm.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: aws-cloud-controller-manager/templates/serviceaccount.yaml 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: cloud-controller-manager 7 | namespace: kube-system 8 | labels: 9 | helm.sh/chart: "aws-cloud-controller-manager-0.0.7" 10 | --- 11 | # Source: aws-cloud-controller-manager/templates/clusterrole.yaml 12 | apiVersion: rbac.authorization.k8s.io/v1 13 | kind: ClusterRole 14 | metadata: 15 | name: system:cloud-controller-manager 16 | labels: 17 | helm.sh/chart: "aws-cloud-controller-manager-0.0.7" 18 | rules: 19 | - apiGroups: 20 | - "" 21 | resources: 22 | - events 23 | verbs: 24 | - create 25 | - patch 26 | - update 27 | - apiGroups: 28 | - "" 29 | resources: 30 | - nodes 31 | verbs: 32 | - '*' 33 | - apiGroups: 34 | - "" 35 | resources: 36 | - nodes/status 37 | verbs: 38 | - patch 39 | - apiGroups: 40 | - "" 41 | resources: 42 | - services 43 | verbs: 44 | - list 45 | - patch 46 | - update 47 | - watch 48 | - apiGroups: 49 | - "" 50 | resources: 51 | - services/status 52 | verbs: 53 | - list 54 | - patch 55 | - update 56 | - watch 57 | - apiGroups: 58 | - "" 59 | resources: 60 | - serviceaccounts 61 | verbs: 62 | - create 63 | - apiGroups: 64 | - "" 65 | resources: 66 | - persistentvolumes 67 | verbs: 68 | - get 69 | - list 70 | - update 71 | - watch 72 | - apiGroups: 73 | - "" 74 | resources: 75 | - endpoints 76 | verbs: 77 | - create 78 | - get 79 | - list 80 | - watch 81 | - update 82 | - apiGroups: 83 | - coordination.k8s.io 84 | resources: 85 | - leases 86 | verbs: 87 | - create 88 | - get 89 | - list 90 | - watch 91 | - update 92 | - apiGroups: 93 | - "" 94 | resources: 95 | - serviceaccounts/token 96 | verbs: 97 | - create 98 | --- 99 | # Source: aws-cloud-controller-manager/templates/cluserrolebinding.yaml 100 | apiVersion: rbac.authorization.k8s.io/v1 101 | kind: ClusterRoleBinding 102 | metadata: 103 | name: system:cloud-controller-manager 104 | labels: 105 | helm.sh/chart: "aws-cloud-controller-manager-0.0.7" 106 | roleRef: 107 | kind: ClusterRole 108 | name: system:cloud-controller-manager 109 | apiGroup: rbac.authorization.k8s.io 110 | subjects: 111 | - apiGroup: "" 112 | kind: ServiceAccount 113 | name: cloud-controller-manager 114 | namespace: kube-system 115 | --- 116 | # Source: aws-cloud-controller-manager/templates/rolebinding.yaml 117 | apiVersion: rbac.authorization.k8s.io/v1 118 | kind: RoleBinding 119 | metadata: 120 | name: cloud-controller-manager:apiserver-authentication-reader 121 | namespace: kube-system 122 | labels: 123 | helm.sh/chart: "aws-cloud-controller-manager-0.0.7" 124 | roleRef: 125 | apiGroup: rbac.authorization.k8s.io 126 | kind: Role 127 | name: extension-apiserver-authentication-reader 128 | subjects: 129 | - apiGroup: "" 130 | kind: ServiceAccount 131 | name: cloud-controller-manager 132 | namespace: kube-system 133 | --- 134 | # Source: aws-cloud-controller-manager/templates/daemonset.yaml 135 | apiVersion: apps/v1 136 | kind: DaemonSet 137 | metadata: 138 | name: aws-cloud-controller-manager 139 | labels: 140 | k8s-app: aws-cloud-controller-manager 141 | helm.sh/chart: "aws-cloud-controller-manager-0.0.7" 142 | namespace: kube-system 143 | spec: 144 | selector: 145 | matchLabels: 146 | k8s-app: aws-cloud-controller-manager 147 | updateStrategy: 148 | type: RollingUpdate 149 | template: 150 | metadata: 151 | name: aws-cloud-controller-manager 152 | labels: 153 | k8s-app: aws-cloud-controller-manager 154 | spec: 155 | tolerations: 156 | - effect: NoSchedule 157 | key: node.cloudprovider.kubernetes.io/uninitialized 158 | value: "true" 159 | - effect: NoSchedule 160 | key: node-role.kubernetes.io/master 161 | - effect: NoSchedule 162 | key: node-role.kubernetes.io/control-plane 163 | nodeSelector: 164 | node-role.kubernetes.io/control-plane: "" 165 | dnsPolicy: Default 166 | priorityClassName: system-node-critical 167 | serviceAccountName: cloud-controller-manager 168 | securityContext: 169 | {} 170 | containers: 171 | - name: aws-cloud-controller-manager 172 | image: "registry.k8s.io/provider-aws/cloud-controller-manager:v1.25.1" 173 | args: 174 | - --v=2 175 | - --cloud-provider=aws 176 | - --configure-cloud-routes=false 177 | resources: 178 | requests: 179 | cpu: 200m 180 | env: 181 | [] 182 | securityContext: 183 | {} 184 | -------------------------------------------------------------------------------- /aws/multi-az/multi-az.env: -------------------------------------------------------------------------------- 1 | ## Cluster-wide vars 2 | 3 | ### Subnets 4 | export AWS_PUB_SUB_A='{id: subnet-xxyyzz}' 5 | export AWS_PUB_SUB_B='{id: subnet-xxyyzz}' 6 | export AWS_PUB_SUB_C='{id: subnet-xxyyzz}' 7 | export AWS_PRIV_SUBS='{id: subnet-xxyyzz},{id: subnet-xxyyzz},{id: subnet-xxyyzz}' 8 | export AWS_SUBNETS="[$PUB_SUB_A,$PUB_SUB_B,$PUB_SUB_C,$PRIV_SUBS]" 9 | 10 | ### Everything Else 11 | export CLUSTER_NAME=talos-aws-test 12 | export AWS_REGION=us-east-1 13 | export AWS_SSH_KEY_NAME=talos-ssh 14 | export AWS_VPC_ID=vpc-xxyyyzz 15 | # renovate: datasource=github-releases depName=projectcalico/calico 16 | export CALICO_VERSION=v3.24.1 17 | # renovate: datasource=github-releases extractVersion=^v(?.*)$ depName=kubernetes/kubernetes 18 | export KUBERNETES_VERSION=1.21.0 19 | # renovate: datasource=github-releases extractVersion=^(?v\d+\.\d+)\.\d+$ depName=siderolabs/talos 20 | export TALOS_VERSION=v0.10 21 | 22 | ## Control plane vars 23 | export AWS_CONTROL_PLANE_LB_URL=xxyyzz.elb.us-east-1.amazonaws.com 24 | export CONTROL_PLANE_MACHINE_COUNT=3 25 | export AWS_CONTROL_PLANE_MACHINE_TYPE=t3.large 26 | export AWS_CONTROL_PLANE_VOL_SIZE=50 27 | export AWS_CONTROL_PLANE_AMI_ID=ami-xxyyzz 28 | export AWS_CONTROL_PLANE_ADDL_SEC_GROUPS='[{id: sg-xxyyzz}]' 29 | export AWS_CONTROL_PLANE_IAM_PROFILE=CAPI_AWS_ControlPlane 30 | 31 | ## Worker vars 32 | ### Note that machine count is per-AZ 33 | export WORKER_MACHINE_COUNT=1 34 | export AWS_NODE_MACHINE_TYPE=t3.large 35 | export AWS_NODE_VOL_SIZE=50 36 | export AWS_NODE_AMI_ID=ami-xxyyzz 37 | export AWS_NODE_ADDL_SEC_GROUPS='[{id: sg-xxyyzz}]' 38 | export AWS_NODE_IAM_PROFILE=CAPI_AWS_Worker 39 | -------------------------------------------------------------------------------- /aws/multi-az/multi-az.yaml: -------------------------------------------------------------------------------- 1 | ## Cluster configs 2 | 3 | apiVersion: cluster.x-k8s.io/v1beta1 4 | kind: Cluster 5 | metadata: 6 | name: ${CLUSTER_NAME} 7 | spec: 8 | clusterNetwork: 9 | pods: 10 | cidrBlocks: 11 | - 192.168.0.0/16 12 | infrastructureRef: 13 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 14 | kind: AWSCluster 15 | name: ${CLUSTER_NAME} 16 | controlPlaneRef: 17 | kind: TalosControlPlane 18 | apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 19 | name: ${CLUSTER_NAME}-controlplane 20 | --- 21 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 22 | kind: AWSCluster 23 | metadata: 24 | name: ${CLUSTER_NAME} 25 | spec: 26 | region: ${AWS_REGION} 27 | sshKeyName: ${AWS_SSH_KEY_NAME} 28 | network: 29 | vpc: 30 | id: ${AWS_VPC_ID} 31 | subnets: ${AWS_SUBNETS} 32 | --- 33 | ## Control plane configs 34 | 35 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 36 | kind: AWSMachineTemplate 37 | metadata: 38 | name: ${CLUSTER_NAME}-controlplane 39 | spec: 40 | template: 41 | spec: 42 | cloudInit: 43 | insecureSkipSecretsManager: true 44 | instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE} 45 | rootVolume: 46 | size: ${AWS_CONTROL_PLANE_VOL_SIZE} 47 | sshKeyName: ${SSH_KEY} 48 | ami: 49 | id: ${AWS_CONTROL_PLANE_AMI_ID} 50 | additionalSecurityGroups: ${AWS_CONTROL_PLANE_ADDL_SEC_GROUPS} 51 | iamInstanceProfile: ${AWS_CONTROL_PLANE_IAM_PROFILE} 52 | --- 53 | apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 54 | kind: TalosControlPlane 55 | metadata: 56 | name: ${CLUSTER_NAME}-controlplane 57 | spec: 58 | version: v${KUBERNETES_VERSION} 59 | replicas: ${CONTROL_PLANE_MACHINE_COUNT} 60 | infrastructureTemplate: 61 | kind: AWSMachineTemplate 62 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 63 | name: ${CLUSTER_NAME}-controlplane 64 | controlPlaneConfig: 65 | controlplane: 66 | generateType: controlplane 67 | talosVersion: ${TALOS_VERSION} 68 | configPatches: 69 | - op: add 70 | path: /cluster/network/cni 71 | value: 72 | name: custom 73 | urls: 74 | - https://raw.githubusercontent.com/projectcalico/calico/${CALICO_VERSION}/manifests/calico.yaml 75 | - op: add 76 | path: /machine/kubelet/registerWithFQDN 77 | value: true 78 | - op: add 79 | path: /cluster/externalCloudProvider 80 | value: 81 | enabled: true 82 | manifests: 83 | - https://raw.githubusercontent.com/siderolabs/cluster-api-templates/main/aws/manifests/ccm.yaml 84 | - op: add 85 | path: /machine/certSANs 86 | value: 87 | - ${AWS_CONTROL_PLANE_LB_URL} 88 | --- 89 | ## Worker deployment configs 90 | 91 | ### TalosConfigTemplate can be shared across all MachineDeployments 92 | apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 93 | kind: TalosConfigTemplate 94 | metadata: 95 | name: ${CLUSTER_NAME}-workers 96 | spec: 97 | template: 98 | spec: 99 | generateType: join 100 | talosVersion: ${TALOS_VERSION} 101 | configPatches: 102 | - op: add 103 | path: /machine/kubelet/extraArgs 104 | value: 105 | cloud-provider: "external" 106 | - op: add 107 | path: /machine/kubelet/registerWithFQDN 108 | value: true 109 | --- 110 | ### Worker group A 111 | apiVersion: cluster.x-k8s.io/v1beta1 112 | kind: MachineDeployment 113 | metadata: 114 | labels: 115 | cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} 116 | nodepool: nodepool-a 117 | name: ${CLUSTER_NAME}-workers-a 118 | spec: 119 | clusterName: ${CLUSTER_NAME} 120 | replicas: ${WORKER_MACHINE_COUNT} 121 | selector: 122 | matchLabels: 123 | cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} 124 | nodepool: nodepool-a 125 | template: 126 | metadata: 127 | labels: 128 | cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} 129 | nodepool: nodepool-a 130 | spec: 131 | clusterName: ${CLUSTER_NAME} 132 | bootstrap: 133 | configRef: 134 | apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 135 | kind: TalosConfigTemplate 136 | name: ${CLUSTER_NAME}-workers 137 | infrastructureRef: 138 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 139 | kind: AWSMachineTemplate 140 | name: ${CLUSTER_NAME}-workers-a 141 | version: ${KUBERNETES_VERSION} 142 | --- 143 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 144 | kind: AWSMachineTemplate 145 | metadata: 146 | name: ${CLUSTER_NAME}-workers-a 147 | spec: 148 | template: 149 | spec: 150 | cloudInit: 151 | insecureSkipSecretsManager: true 152 | instanceType: ${AWS_NODE_MACHINE_TYPE} 153 | rootVolume: 154 | size: ${AWS_NODE_VOL_SIZE} 155 | sshKeyName: ${AWS_SSH_KEY_NAME} 156 | ami: 157 | id: ${AWS_NODE_AMI_ID} 158 | subnet: ${AWS_PUB_SUB_A} 159 | publicIP: true 160 | iamInstanceProfile: ${AWS_NODE_IAM_PROFILE} 161 | additionalSecurityGroups: ${AWS_NODE_ADDL_SEC_GROUPS} 162 | --- 163 | ### Worker group B 164 | apiVersion: cluster.x-k8s.io/v1beta1 165 | kind: MachineDeployment 166 | metadata: 167 | labels: 168 | cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} 169 | nodepool: nodepool-b 170 | name: ${CLUSTER_NAME}-workers-b 171 | spec: 172 | clusterName: ${CLUSTER_NAME} 173 | replicas: ${WORKER_MACHINE_COUNT} 174 | selector: 175 | matchLabels: 176 | cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} 177 | nodepool: nodepool-b 178 | template: 179 | metadata: 180 | labels: 181 | cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} 182 | nodepool: nodepool-b 183 | spec: 184 | clusterName: ${CLUSTER_NAME} 185 | bootstrap: 186 | configRef: 187 | apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 188 | kind: TalosConfigTemplate 189 | name: ${CLUSTER_NAME}-workers 190 | infrastructureRef: 191 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 192 | kind: AWSMachineTemplate 193 | name: ${CLUSTER_NAME}-workers-b 194 | version: ${KUBERNETES_VERSION} 195 | --- 196 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 197 | kind: AWSMachineTemplate 198 | metadata: 199 | name: ${CLUSTER_NAME}-workers-b 200 | spec: 201 | template: 202 | spec: 203 | cloudInit: 204 | insecureSkipSecretsManager: true 205 | instanceType: ${AWS_NODE_MACHINE_TYPE} 206 | rootVolume: 207 | size: ${AWS_NODE_VOL_SIZE} 208 | sshKeyName: ${AWS_SSH_KEY_NAME} 209 | ami: 210 | id: ${AWS_NODE_AMI_ID} 211 | subnet: ${AWS_PUB_SUB_B} 212 | publicIP: true 213 | iamInstanceProfile: ${AWS_NODE_IAM_PROFILE} 214 | additionalSecurityGroups: ${AWS_NODE_ADDL_SEC_GROUPS} 215 | --- 216 | ### Worker group C 217 | apiVersion: cluster.x-k8s.io/v1beta1 218 | kind: MachineDeployment 219 | metadata: 220 | labels: 221 | cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} 222 | nodepool: nodepool-c 223 | name: ${CLUSTER_NAME}-workers-c 224 | spec: 225 | clusterName: ${CLUSTER_NAME} 226 | replicas: ${WORKER_MACHINE_COUNT} 227 | selector: 228 | matchLabels: 229 | cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} 230 | nodepool: nodepool-c 231 | template: 232 | metadata: 233 | labels: 234 | cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} 235 | nodepool: nodepool-c 236 | spec: 237 | clusterName: ${CLUSTER_NAME} 238 | bootstrap: 239 | configRef: 240 | apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 241 | kind: TalosConfigTemplate 242 | name: ${CLUSTER_NAME}-workers 243 | infrastructureRef: 244 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 245 | kind: AWSMachineTemplate 246 | name: ${CLUSTER_NAME}-workers-c 247 | version: ${KUBERNETES_VERSION} 248 | --- 249 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 250 | kind: AWSMachineTemplate 251 | metadata: 252 | name: ${CLUSTER_NAME}-workers-c 253 | spec: 254 | template: 255 | spec: 256 | cloudInit: 257 | insecureSkipSecretsManager: true 258 | instanceType: ${AWS_NODE_MACHINE_TYPE} 259 | rootVolume: 260 | size: ${AWS_NODE_VOL_SIZE} 261 | sshKeyName: ${AWS_SSH_KEY_NAME} 262 | ami: 263 | id: ${AWS_NODE_AMI_ID} 264 | subnet: ${AWS_PUB_SUB_C} 265 | publicIP: true 266 | iamInstanceProfile: ${AWS_NODE_IAM_PROFILE} 267 | additionalSecurityGroups: ${AWS_NODE_ADDL_SEC_GROUPS} 268 | --- 269 | ## Health check for all workers 270 | apiVersion: cluster.x-k8s.io/v1beta1 271 | kind: MachineHealthCheck 272 | metadata: 273 | name: ${CLUSTER_NAME}-worker-hc 274 | spec: 275 | clusterName: ${CLUSTER_NAME} 276 | maxUnhealthy: 40% 277 | nodeStartupTimeout: 20m 278 | selector: 279 | matchExpressions: 280 | - { 281 | key: nodepool, 282 | operator: In, 283 | values: [nodepool-a, nodepool-b, nodepool-c], 284 | } 285 | unhealthyConditions: 286 | - type: Ready 287 | status: Unknown 288 | timeout: 300s 289 | - type: Ready 290 | status: "False" 291 | timeout: 300s 292 | 293 | --- 294 | -------------------------------------------------------------------------------- /aws/standard/standard.env: -------------------------------------------------------------------------------- 1 | ## Cluster-wide vars 2 | export CLUSTER_NAME=talos-aws-test 3 | export NAMESPACE=default 4 | export AWS_REGION=us-east-1 5 | export AWS_SSH_KEY_NAME=talos-ssh 6 | export AWS_VPC_ID=vpc-xxyyyzz 7 | export AWS_SUBNET=subnet-xxyyzz 8 | export AWS_SUBNET_AZ=${AWS_SSH_KEY_NAME}a 9 | # renovate: datasource=github-releases depName=projectcalico/calico 10 | export CALICO_VERSION=v3.24.1 11 | # renovate: datasource=github-releases extractVersion=^v(?.*)$ depName=kubernetes/kubernetes 12 | export KUBERNETES_VERSION=1.21.0 13 | # renovate: datasource=github-releases extractVersion=^(?v\d+\.\d+)\.\d+$ depName=siderolabs/talos 14 | export TALOS_VERSION=v0.10 15 | 16 | ## Control plane vars 17 | export CONTROL_PLANE_MACHINE_COUNT=3 18 | export AWS_CONTROL_PLANE_MACHINE_TYPE=t3.large 19 | export AWS_CONTROL_PLANE_VOL_SIZE=50 20 | export AWS_CONTROL_PLANE_AMI_ID=ami-xxyyzz 21 | export AWS_CONTROL_PLANE_ADDL_SEC_GROUPS='[{id: sg-xxyyzz}]' 22 | export AWS_CONTROL_PLANE_IAM_PROFILE=CAPI_AWS_ControlPlane 23 | 24 | ## Worker vars 25 | export WORKER_MACHINE_COUNT=3 26 | export AWS_NODE_MACHINE_TYPE=t3.large 27 | export AWS_NODE_VOL_SIZE=50 28 | export AWS_NODE_AMI_ID=ami-xxyyzz 29 | export AWS_NODE_ADDL_SEC_GROUPS='[{id: sg-xxyyzz}]' 30 | export AWS_NODE_IAM_PROFILE=CAPI_AWS_Worker 31 | -------------------------------------------------------------------------------- /aws/standard/standard.yaml: -------------------------------------------------------------------------------- 1 | ## Cluster configs 2 | apiVersion: cluster.x-k8s.io/v1beta1 3 | kind: Cluster 4 | metadata: 5 | name: ${CLUSTER_NAME} 6 | namespace: ${NAMESPACE} 7 | spec: 8 | clusterNetwork: 9 | pods: 10 | cidrBlocks: 11 | - 192.168.0.0/16 12 | infrastructureRef: 13 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 14 | kind: AWSCluster 15 | name: ${CLUSTER_NAME} 16 | namespace: ${NAMESPACE} 17 | controlPlaneRef: 18 | kind: TalosControlPlane 19 | apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 20 | name: ${CLUSTER_NAME}-controlplane 21 | namespace: ${NAMESPACE} 22 | --- 23 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 24 | kind: AWSCluster 25 | metadata: 26 | name: ${CLUSTER_NAME} 27 | namespace: ${NAMESPACE} 28 | spec: 29 | region: ${AWS_REGION} 30 | sshKeyName: ${AWS_SSH_KEY_NAME} 31 | network: 32 | vpc: 33 | id: ${AWS_VPC_ID} 34 | subnets: 35 | - id: ${AWS_SUBNET} 36 | isPublic: true 37 | availabilityZone: ${AWS_SUBNET_AZ} 38 | controlPlaneLoadBalancer: 39 | subnets: 40 | - ${AWS_SUBNET} 41 | --- 42 | ## Control plane configs 43 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 44 | kind: AWSMachineTemplate 45 | metadata: 46 | name: ${CLUSTER_NAME}-controlplane 47 | namespace: ${NAMESPACE} 48 | spec: 49 | template: 50 | spec: 51 | cloudInit: 52 | insecureSkipSecretsManager: true 53 | instanceType: ${AWS_CONTROL_PLANE_MACHINE_TYPE} 54 | rootVolume: 55 | size: ${AWS_CONTROL_PLANE_VOL_SIZE} 56 | sshKeyName: ${AWS_SSH_KEY_NAME} 57 | ami: 58 | id: ${AWS_CONTROL_PLANE_AMI_ID} 59 | additionalSecurityGroups: ${AWS_CONTROL_PLANE_ADDL_SEC_GROUPS} 60 | publicIP: true 61 | iamInstanceProfile: ${AWS_CONTROL_PLANE_IAM_PROFILE} 62 | --- 63 | apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 64 | kind: TalosControlPlane 65 | metadata: 66 | name: ${CLUSTER_NAME}-controlplane 67 | namespace: ${NAMESPACE} 68 | spec: 69 | version: v${KUBERNETES_VERSION} 70 | replicas: ${CONTROL_PLANE_MACHINE_COUNT} 71 | infrastructureTemplate: 72 | kind: AWSMachineTemplate 73 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 74 | name: ${CLUSTER_NAME}-controlplane 75 | namespace: ${NAMESPACE} 76 | controlPlaneConfig: 77 | controlplane: 78 | generateType: controlplane 79 | talosVersion: ${TALOS_VERSION} 80 | configPatches: 81 | - op: add 82 | path: /cluster/network/cni 83 | value: 84 | name: custom 85 | urls: 86 | - https://raw.githubusercontent.com/projectcalico/calico/${CALICO_VERSION}/manifests/calico.yaml 87 | - op: add 88 | path: /machine/kubelet/registerWithFQDN 89 | value: true 90 | - op: add 91 | path: /cluster/externalCloudProvider 92 | value: 93 | enabled: true 94 | manifests: 95 | - https://raw.githubusercontent.com/siderolabs/cluster-api-templates/main/aws/manifests/ccm.yaml 96 | --- 97 | ## Worker deployment configs 98 | apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 99 | kind: TalosConfigTemplate 100 | metadata: 101 | name: ${CLUSTER_NAME}-workers 102 | namespace: ${NAMESPACE} 103 | spec: 104 | template: 105 | spec: 106 | generateType: join 107 | talosVersion: ${TALOS_VERSION} 108 | configPatches: 109 | - op: add 110 | path: /machine/kubelet/registerWithFQDN 111 | value: true 112 | - op: add 113 | path: /cluster/externalCloudProvider 114 | value: 115 | enabled: true 116 | --- 117 | apiVersion: cluster.x-k8s.io/v1beta1 118 | kind: MachineDeployment 119 | metadata: 120 | labels: 121 | cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} 122 | nodepool: nodepool-a 123 | name: ${CLUSTER_NAME}-workers 124 | namespace: ${NAMESPACE} 125 | spec: 126 | clusterName: ${CLUSTER_NAME} 127 | replicas: ${WORKER_MACHINE_COUNT} 128 | selector: 129 | matchLabels: 130 | cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} 131 | nodepool: nodepool-a 132 | template: 133 | metadata: 134 | labels: 135 | cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} 136 | nodepool: nodepool-a 137 | spec: 138 | clusterName: ${CLUSTER_NAME} 139 | bootstrap: 140 | configRef: 141 | apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 142 | kind: TalosConfigTemplate 143 | name: ${CLUSTER_NAME}-workers 144 | namespace: ${NAMESPACE} 145 | infrastructureRef: 146 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 147 | kind: AWSMachineTemplate 148 | name: ${CLUSTER_NAME}-workers 149 | namespace: ${NAMESPACE} 150 | version: ${KUBERNETES_VERSION} 151 | --- 152 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 153 | kind: AWSMachineTemplate 154 | metadata: 155 | name: ${CLUSTER_NAME}-workers 156 | namespace: ${NAMESPACE} 157 | spec: 158 | template: 159 | spec: 160 | cloudInit: 161 | insecureSkipSecretsManager: true 162 | instanceType: ${AWS_NODE_MACHINE_TYPE} 163 | rootVolume: 164 | size: ${AWS_NODE_VOL_SIZE} 165 | sshKeyName: ${AWS_SSH_KEY_NAME} 166 | ami: 167 | id: ${AWS_NODE_AMI_ID} 168 | additionalSecurityGroups: ${AWS_NODE_ADDL_SEC_GROUPS} 169 | publicIP: true 170 | iamInstanceProfile: ${AWS_NODE_IAM_PROFILE} 171 | --- 172 | ## Health check for workers 173 | apiVersion: cluster.x-k8s.io/v1beta1 174 | kind: MachineHealthCheck 175 | metadata: 176 | name: ${CLUSTER_NAME}-worker-hc 177 | namespace: ${NAMESPACE} 178 | spec: 179 | clusterName: ${CLUSTER_NAME} 180 | maxUnhealthy: 40% 181 | nodeStartupTimeout: 20m 182 | selector: 183 | matchLabels: 184 | nodepool: nodepool-a 185 | unhealthyConditions: 186 | - type: Ready 187 | status: Unknown 188 | timeout: 300s 189 | - type: Ready 190 | status: "False" 191 | timeout: 300s 192 | -------------------------------------------------------------------------------- /gcp/README.md: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/siderolabs/cluster-api-templates/a9460c652d6c6a9a2b69ddd316d457647671f942/gcp/README.md -------------------------------------------------------------------------------- /gcp/standard/standard.env: -------------------------------------------------------------------------------- 1 | ## Cluster-wide vars 2 | export CLUSTER_NAME=talos-gcp-demo 3 | export GCP_PROJECT=my-testbed 4 | export GCP_REGION=us-central1 5 | export GCP_NETWORK=default 6 | # renovate: datasource=github-releases extractVersion=^v(?.*)$ depName=kubernetes/kubernetes 7 | export KUBERNETES_VERSION=1.20.5 8 | # renovate: datasource=github-releases extractVersion=^(?v\d+\.\d+)\.\d+$ depName=siderolabs/talos 9 | export TALOS_VERSION=v0.13 10 | export GCP_VM_SVC_ACCOUNT=mysvcacct@my-testbed.iam.gserviceaccount.com 11 | 12 | ## Control plane vars 13 | export CONTROL_PLANE_MACHINE_COUNT=3 14 | export GCP_CONTROL_PLANE_MACHINE_TYPE=n1-standard-4 15 | export GCP_CONTROL_PLANE_VOL_SIZE=50 16 | export GCP_CONTROL_PLANE_IMAGE_ID=projects/${GCP_PROJECT}/global/images/talos-xxx-yyy 17 | 18 | ## Worker vars 19 | export WORKER_MACHINE_COUNT=3 20 | export GCP_NODE_MACHINE_TYPE=n1-standard-4 21 | export GCP_NODE_VOL_SIZE=50 22 | export GCP_NODE_IMAGE_ID=projects/${GCP_PROJECT}/global/images/talos-xxx-yyy 23 | -------------------------------------------------------------------------------- /gcp/standard/standard.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | ## Cluster configs 3 | 4 | apiVersion: cluster.x-k8s.io/v1beta1 5 | kind: Cluster 6 | metadata: 7 | name: ${CLUSTER_NAME} 8 | spec: 9 | clusterNetwork: 10 | pods: 11 | cidrBlocks: ["192.168.0.0/16"] 12 | infrastructureRef: 13 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 14 | kind: GCPCluster 15 | name: ${CLUSTER_NAME} 16 | controlPlaneRef: 17 | kind: TalosControlPlane 18 | apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 19 | name: ${CLUSTER_NAME}-controlplane 20 | --- 21 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 22 | kind: GCPCluster 23 | metadata: 24 | name: ${CLUSTER_NAME} 25 | spec: 26 | project: ${GCP_PROJECT} 27 | region: ${GCP_REGION} 28 | network: 29 | name: ${GCP_NETWORK} 30 | --- 31 | ## Control plane configs 32 | 33 | apiVersion: controlplane.cluster.x-k8s.io/v1alpha3 34 | kind: TalosControlPlane 35 | metadata: 36 | name: ${CLUSTER_NAME}-controlplane 37 | spec: 38 | version: v${KUBERNETES_VERSION} 39 | replicas: ${CONTROL_PLANE_MACHINE_COUNT} 40 | infrastructureTemplate: 41 | kind: GCPMachineTemplate 42 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 43 | name: ${CLUSTER_NAME}-controlplane 44 | controlPlaneConfig: 45 | controlplane: 46 | generateType: controlplane 47 | talosVersion: ${TALOS_VERSION} 48 | --- 49 | kind: GCPMachineTemplate 50 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 51 | metadata: 52 | name: ${CLUSTER_NAME}-controlplane 53 | spec: 54 | template: 55 | spec: 56 | instanceType: ${GCP_CONTROL_PLANE_MACHINE_TYPE} 57 | image: ${GCP_CONTROL_PLANE_IMAGE_ID} 58 | publicIP: true 59 | rootDeviceSize: ${GCP_CONTROL_PLANE_VOL_SIZE} 60 | serviceAccounts: 61 | email: ${GCP_VM_SVC_ACCOUNT} 62 | scopes: 63 | - "https://www.googleapis.com/auth/compute" 64 | --- 65 | ## Worker configs 66 | 67 | apiVersion: cluster.x-k8s.io/v1beta1 68 | kind: MachineDeployment 69 | metadata: 70 | labels: 71 | cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} 72 | nodepool: nodepool-a 73 | name: ${CLUSTER_NAME}-workers 74 | spec: 75 | clusterName: ${CLUSTER_NAME} 76 | replicas: ${WORKER_MACHINE_COUNT} 77 | selector: 78 | matchLabels: 79 | cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} 80 | nodepool: nodepool-a 81 | template: 82 | metadata: 83 | labels: 84 | cluster.x-k8s.io/cluster-name: ${CLUSTER_NAME} 85 | nodepool: nodepool-a 86 | spec: 87 | clusterName: ${CLUSTER_NAME} 88 | failureDomain: ${GCP_REGION}-a 89 | version: ${KUBERNETES_VERSION} 90 | bootstrap: 91 | configRef: 92 | name: ${CLUSTER_NAME}-workers 93 | apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 94 | kind: TalosConfigTemplate 95 | infrastructureRef: 96 | name: ${CLUSTER_NAME}-workers 97 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 98 | kind: GCPMachineTemplate 99 | --- 100 | apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 101 | kind: GCPMachineTemplate 102 | metadata: 103 | name: ${CLUSTER_NAME}-workers 104 | spec: 105 | template: 106 | spec: 107 | instanceType: ${GCP_NODE_MACHINE_TYPE} 108 | image: ${GCP_NODE_IMAGE_ID} 109 | serviceAccounts: 110 | email: ${GCP_VM_SVC_ACCOUNT} 111 | scopes: 112 | - "https://www.googleapis.com/auth/compute" 113 | publicIP: true 114 | rootDeviceSize: ${GCP_NODE_VOL_SIZE} 115 | --- 116 | apiVersion: bootstrap.cluster.x-k8s.io/v1alpha3 117 | kind: TalosConfigTemplate 118 | metadata: 119 | name: ${CLUSTER_NAME}-workers 120 | spec: 121 | template: 122 | spec: 123 | generateType: join 124 | talosVersion: ${TALOS_VERSION} 125 | --- 126 | ## Health check for workers 127 | apiVersion: cluster.x-k8s.io/v1alpha4 128 | kind: MachineHealthCheck 129 | metadata: 130 | name: ${CLUSTER_NAME}-worker-hc 131 | spec: 132 | clusterName: ${CLUSTER_NAME} 133 | maxUnhealthy: 40% 134 | nodeStartupTimeout: 20m 135 | selector: 136 | matchLabels: 137 | nodepool: nodepool-a 138 | unhealthyConditions: 139 | - type: Ready 140 | status: Unknown 141 | timeout: 300s 142 | - type: Ready 143 | status: "False" 144 | timeout: 300s 145 | --------------------------------------------------------------------------------