├── .gitignore ├── .pre-commit-config.yaml ├── LICENSE ├── README.md ├── data.tf ├── examples ├── full-ha │ ├── main.tf │ ├── terraform.example.tfvars │ └── variables.tf └── offline │ ├── bundle.hcl │ ├── main.tf │ ├── terraform.example.tfvars │ └── variables.tf ├── main.tf ├── modules ├── controlplane-lb │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── database │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── nodepool │ ├── README.md │ ├── data.tf │ ├── files │ │ ├── aws-ccm.yaml │ │ ├── aws-ebs.yaml │ │ ├── bootstrap.sh │ │ ├── cloud-config-base.yaml │ │ ├── download_dependencies.sh │ │ ├── k3s.sh │ │ ├── nodedrain.service │ │ ├── nodedrain.sh │ │ └── secrets.sh │ ├── main.tf │ ├── outputs.tf │ └── variables.tf └── state-store │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── outputs.tf ├── variables.tf └── versions.tf /.gitignore: -------------------------------------------------------------------------------- 1 | # IDEs 2 | .idea 3 | 4 | # Airgap testing 5 | k3s.bundle 6 | *.zip 7 | id_rsa 8 | id_rsa.pub 9 | 10 | # Local .terraform directories 11 | **/.terraform/* 12 | 13 | # .tfstate files 14 | *.tfstate 15 | *.tfstate.* 16 | 17 | # Crash log files 18 | crash.log 19 | 20 | # Ignore any .tfvars files that are generated automatically for each Terraform run. Most 21 | # .tfvars files are managed as part of configuration and so should be included in 22 | # version control. 23 | # 24 | # example.tfvars 25 | 26 | # Ignore override files as they are usually used to override resources locally and so 27 | # are not checked in 28 | override.tf 29 | override.tf.json 30 | *_override.tf 31 | *_override.tf.json 32 | 33 | # Include override files you do wish to add to version control using negated pattern 34 | # 35 | # !example_override.tf 36 | 37 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 38 | # example: *tfplan* 39 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/gruntwork-io/pre-commit 3 | rev: v0.1.10 4 | hooks: 5 | - id: terraform-fmt 6 | # - id: terraform-validate 7 | # - id: tflint 8 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Rancher Federal 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # k3s-tf 2 | 3 | __Warning__: This repo is still a WIP. While it contains ready deployments, many of the components are still subject to change. 4 | 5 | Terraform IAC for HA K3S on Commercial, GovCloud, or (T)C2S AWS regions. 6 | 7 | Since k3s doesn't enforce an installation approach, there are many approaches to cluster bootstrapping. This repository demonstrates _one_ of the many ways to go from 0 to infrastructure + HA cluster in just a few minutes. 8 | 9 | This repo is tailored to deploy on all AWS regions, and uses only the cloud services that exist on all environments. As such, the following services are required: 10 | 11 | * Autoscaling Groups 12 | * RDS MySQL 13 | * Classic Load Balancers (C2S compatibility) 14 | * S3 (C2S compatibility) 15 | 16 | TODO: More docs on architecture, inputs, etc...## Requirements 17 | 18 | ## Examples 19 | 20 | Examples are provided in the `examples/` directory for common use cases: 21 | 22 | * `full-ha`: Zero to hero full environment + cluster. Will create network (vpc, subnets, etc...) resources and HA cluster in an online environment. This is default use case and is tested against commercial and govcloud AWS. 23 | * `offline`: Restricted privilage airgapped cluster. Will use existing resources to deploy cluster in an entirely airgapped environment. This is tested against C2S. 24 | 25 | ## Inputs 26 | 27 | | Name | Description | Type | Default | Required | 28 | |------|-------------|------|---------|:--------:| 29 | | name | Name of the cluster, will be prepended to cluster resources | `string` | n/a | yes | 30 | | subnets | List of subnet ids of the shared cluster resources such as load balancers and RDS. Generally set to private subnets | `list(string)` | n/a | yes | 31 | | vpc\_id | VPC ID of the cluster | `string` | n/a | yes | 32 | | rds\_ca\_cert\_identifier | RDS CA Certificate Identifier | `string` | `"rds-ca-2017"` | no | 33 | | state\_bucket | Name of existing S3 bucket to store cluster state/secrets in, will create bucket if left blank | `string` | `null` | no | 34 | | tags | Common tags to attach to all created resources | `map(string)` | `{}` | no | 35 | 36 | ## Outputs 37 | 38 | | Name | Description | 39 | |------|-------------| 40 | | cluster | Name of the cluster to be passed into all node pools | 41 | | cluster\_security\_group | Shared cluster security group required to be passed into all node pools | 42 | | controlplane\_loadbalancer | Name of the controlplane load balancer | 43 | | datastore\_endpoint | Formatted output for k3s --datastore-endpoint. This is output for verbosity and does not need to be passed into node pools, it will be fetched from the cluster state bucket on node boot | 44 | | shared\_agent\_security\_group | Shared agent security group optional to be passed into all agent node pools | 45 | | shared\_server\_security\_group | Shared server security group required to be passed into all server node pools | 46 | | state\_bucket | Name of the bucket used to store k3s cluster state, required to be passed in to node pools | 47 | | state\_bucket\_arn | ARN of the bucket used to store k3s cluster state, if it was created. Null will be outputted if the module did not create the bucket. | 48 | | state\_key | Name of the state object used to store k3s cluster state | 49 | | tls\_san | DNS of the control plane load balancer, used for passing --tls-san to server nodepools | 50 | | token | Token used for k3s --token registration, added for brevity, does not need to be passed to module, it is loaded via S3 state bucket | 51 | | url | Formatted load balancer url used for --server on agent node pools | 52 | 53 | -------------------------------------------------------------------------------- /data.tf: -------------------------------------------------------------------------------- 1 | data "aws_vpc" "this" { 2 | id = var.vpc_id 3 | } -------------------------------------------------------------------------------- /examples/full-ha/main.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | version = "~> 3.0" 3 | } 4 | 5 | data "aws_ami" "rhel8" { 6 | owners = [219670896067] 7 | most_recent = true 8 | 9 | filter { 10 | name = "name" 11 | values = ["RHEL-8.3*"] 12 | } 13 | 14 | filter { 15 | name = "architecture" 16 | values = ["x86_64"] 17 | } 18 | } 19 | 20 | resource "tls_private_key" "global_key" { 21 | algorithm = "RSA" 22 | rsa_bits = 2048 23 | } 24 | 25 | resource "local_file" "ssh_private_key_pem" { 26 | filename = "${path.module}/id_rsa" 27 | sensitive_content = tls_private_key.global_key.private_key_pem 28 | file_permission = "0600" 29 | } 30 | 31 | resource "local_file" "ssh_public_key_openssh" { 32 | filename = "${path.module}/id_rsa.pub" 33 | content = tls_private_key.global_key.public_key_openssh 34 | } 35 | 36 | module "network" { 37 | source = "terraform-aws-modules/vpc/aws" 38 | version = "2.48.0" 39 | 40 | name = "full-online-ha-k3s" 41 | cidr = "10.188.0.0/16" 42 | 43 | azs = ["us-gov-west-1a", "us-gov-west-1b", "us-gov-west-1c"] 44 | public_subnets = ["10.188.1.0/24", "10.188.2.0/24", "10.188.3.0/24"] 45 | private_subnets = ["10.188.101.0/24", "10.188.102.0/24", "10.188.103.0/24"] 46 | 47 | enable_nat_gateway = true 48 | enable_vpn_gateway = true 49 | single_nat_gateway = true 50 | enable_dns_hostnames = true 51 | enable_dns_support = true 52 | 53 | tags = { 54 | Terraform = "true" 55 | Environment = "dev" 56 | } 57 | } 58 | 59 | module "k3s" { 60 | source = "../../" 61 | 62 | name = var.name 63 | 64 | vpc_id = module.network.vpc_id 65 | subnets = module.network.public_subnets 66 | 67 | tags = var.tags 68 | } 69 | 70 | # Primary server nodepool 71 | module "servers" { 72 | source = "../../modules/nodepool" 73 | 74 | # Node variables 75 | name = "primary-servers" 76 | vpc_id = module.network.vpc_id 77 | subnets = module.network.public_subnets 78 | ami = data.aws_ami.rhel8.id 79 | ssh_authorized_keys = [tls_private_key.global_key.public_key_openssh] 80 | iam_instance_profile = "InstanceOpsRole" 81 | asg = { min : 1, max : 3, desired : 2 } 82 | block_device_mappings = { size : 64, encrypted : true } 83 | 84 | # Cluster join variables 85 | cluster = module.k3s.cluster 86 | cluster_security_group = module.k3s.cluster_security_group 87 | extra_security_groups = [module.k3s.shared_server_security_group] 88 | controlplane_loadbalancer = module.k3s.controlplane_loadbalancer 89 | state_bucket = module.k3s.state_bucket 90 | 91 | # K3S Variables 92 | k3s_tls_sans = [module.k3s.tls_san] 93 | k3s_node_labels = ["type=primary-server"] 94 | auto_deployed_manifests = [] 95 | 96 | tags = var.tags 97 | } 98 | 99 | # Generic agent nodepool 100 | module "generic_agents" { 101 | source = "../../modules/nodepool" 102 | 103 | name = "generic-agents" 104 | vpc_id = module.network.vpc_id 105 | subnets = module.network.public_subnets 106 | ami = data.aws_ami.rhel8.id 107 | ssh_authorized_keys = [tls_private_key.global_key.public_key_openssh] 108 | iam_instance_profile = "InstanceOpsRole" 109 | asg = { min : 1, max : 2, desired : 1 } 110 | 111 | # Cluster join variables 112 | cluster = module.k3s.cluster 113 | cluster_security_group = module.k3s.cluster_security_group 114 | state_bucket = module.k3s.state_bucket 115 | 116 | # K3S Variabels 117 | k3s_node_labels = ["type=generic-agent"] 118 | k3s_url = module.k3s.url 119 | 120 | tags = var.tags 121 | } 122 | 123 | # Open ssh for demo demonstrations, ssh is not a required part of cluster bootstrapping 124 | resource "aws_security_group_rule" "demo_ssh" { 125 | from_port = 22 126 | to_port = 22 127 | protocol = "tcp" 128 | security_group_id = module.k3s.cluster_security_group 129 | type = "ingress" 130 | 131 | cidr_blocks = ["0.0.0.0/0"] 132 | } -------------------------------------------------------------------------------- /examples/full-ha/terraform.example.tfvars: -------------------------------------------------------------------------------- 1 | name = "full-ha" 2 | -------------------------------------------------------------------------------- /examples/full-ha/variables.tf: -------------------------------------------------------------------------------- 1 | variable "name" { 2 | type = string 3 | default = "k3s-ha" 4 | } 5 | 6 | variable "ssh_authorized_keys" { 7 | type = list(string) 8 | default = [] 9 | } 10 | 11 | variable "tags" { 12 | default = { 13 | "terraform" = "true", 14 | "env" = "demo", 15 | } 16 | } -------------------------------------------------------------------------------- /examples/offline/bundle.hcl: -------------------------------------------------------------------------------- 1 | terraform { 2 | # Version of Terraform to include in the bundle. An exact version number 3 | # is required. 4 | version = "0.13.1" 5 | } 6 | 7 | # Define which provider plugins are to be included 8 | providers { 9 | # Include the newest "aws" provider version in the 1.0 series. 10 | aws = { 11 | versions = ["~> 3.0"] 12 | } 13 | 14 | template = { 15 | versions = ["~> 2.0"] 16 | } 17 | 18 | random = { 19 | versions = ["~> 2.0"] 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /examples/offline/main.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | version = "~> 3.0" 3 | } 4 | 5 | locals { 6 | download_dependencies = file(var.download_dependencies) 7 | } 8 | 9 | module "k3s" { 10 | source = "../../" 11 | 12 | name = var.name 13 | 14 | vpc_id = var.vpc_id 15 | subnets = var.subnets 16 | state_bucket = var.state_bucket 17 | 18 | rds_ca_cert_identifier = var.rds_ca_cert_identifier 19 | 20 | tags = var.tags 21 | } 22 | 23 | # Primary server nodepool 24 | module "servers" { 25 | source = "../../modules/nodepool" 26 | 27 | # Node variables 28 | name = "primary-servers" 29 | vpc_id = var.vpc_id 30 | subnets = var.subnets 31 | ami = var.ami 32 | ssh_authorized_keys = var.public_keys 33 | iam_instance_profile = var.iam_instance_profile 34 | asg = { min : 1, max : 3, desired : 2 } 35 | 36 | # Cluster variables 37 | cluster = module.k3s.cluster 38 | cluster_security_group = module.k3s.cluster_security_group 39 | extra_security_groups = [module.k3s.shared_server_security_group] 40 | controlplane_loadbalancer = module.k3s.controlplane_loadbalancer 41 | state_bucket = module.k3s.state_bucket 42 | deploy_cloud_controller_manager = false 43 | 44 | dependencies_script = local.download_dependencies 45 | 46 | # K3S Variables 47 | k3s_registries = var.k3s_registries 48 | k3s_tls_sans = [module.k3s.tls_san] 49 | k3s_node_labels = ["type=primary-server"] 50 | 51 | tags = var.tags 52 | } 53 | 54 | # Generic agent nodepool 55 | module "generic_agents" { 56 | source = "../../modules/nodepool" 57 | 58 | # Node Variables 59 | name = "generic-agents" 60 | vpc_id = var.vpc_id 61 | subnets = var.subnets 62 | ami = var.ami 63 | ssh_authorized_keys = var.public_keys 64 | iam_instance_profile = var.iam_instance_profile 65 | asg = { min : 1, max : 2, desired : 1 } 66 | 67 | # Cluster Variables 68 | cluster = module.k3s.cluster 69 | cluster_security_group = module.k3s.cluster_security_group 70 | state_bucket = module.k3s.state_bucket 71 | deploy_cloud_controller_manager = false 72 | 73 | dependencies_script = local.download_dependencies 74 | 75 | # K3S Variables 76 | k3s_registries = var.k3s_registries 77 | k3s_node_labels = ["type=generic-agent"] 78 | k3s_url = module.k3s.url 79 | 80 | tags = var.tags 81 | } 82 | 83 | # NOTE: Nothing with the bootstrap process requires ssh, but for this example we open ssh on the server nodes for example purposes 84 | resource "aws_security_group_rule" "ssh" { 85 | from_port = 22 86 | to_port = 22 87 | protocol = "tcp" 88 | security_group_id = module.k3s.cluster_security_group 89 | type = "ingress" 90 | 91 | cidr_blocks = ["0.0.0.0/0"] 92 | } -------------------------------------------------------------------------------- /examples/offline/terraform.example.tfvars: -------------------------------------------------------------------------------- 1 | name = "k3s-offline" -------------------------------------------------------------------------------- /examples/offline/variables.tf: -------------------------------------------------------------------------------- 1 | variable "name" { 2 | type = string 3 | default = "k3s-ha-offline" 4 | } 5 | 6 | variable "vpc_id" { 7 | type = string 8 | } 9 | 10 | variable "subnets" { 11 | type = list(string) 12 | } 13 | 14 | variable "ami" { 15 | default = "ami-02354e95b39ca8dec" 16 | } 17 | 18 | variable "iam_instance_profile" { 19 | default = "" 20 | } 21 | 22 | variable "k3s_registries" { 23 | default = "" 24 | } 25 | 26 | variable "rds_ca_cert_identifier" { 27 | default = "rds-ca-2017" 28 | } 29 | 30 | variable "download_dependencies" { 31 | type = string 32 | default = "../../modules/nodepool/files/download_dependencies.sh" 33 | } 34 | 35 | variable "state_bucket" { 36 | type = string 37 | } 38 | 39 | variable "public_keys" { 40 | type = list(string) 41 | default = [] 42 | } 43 | 44 | variable "tags" { 45 | default = {} 46 | } -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | # 2 | # K3S resources 3 | # 4 | resource "random_password" "token" { 5 | length = 32 6 | special = false 7 | } 8 | 9 | resource "random_password" "db" { 10 | length = 32 11 | special = false 12 | } 13 | 14 | # 15 | # Control plane Lb 16 | # 17 | module "controlplane_lb" { 18 | source = "./modules/controlplane-lb" 19 | 20 | name = var.name 21 | vpc_id = var.vpc_id 22 | subnets = var.subnets 23 | 24 | # AWS CCM Tagging 25 | tags = merge({ 26 | "kubernetes.io/cluster/${var.name}" = "owned" 27 | }, var.tags) 28 | } 29 | 30 | # 31 | # Database 32 | # 33 | module "db" { 34 | source = "./modules/database" 35 | 36 | name = var.name 37 | vpc_id = var.vpc_id 38 | subnets = var.subnets 39 | password = random_password.db.result 40 | 41 | ca_cert_identifier = var.rds_ca_cert_identifier 42 | 43 | tags = var.tags 44 | } 45 | 46 | # 47 | # Cluster Shared Security Group 48 | # 49 | resource "aws_security_group" "cluster" { 50 | name = "${var.name}-k3s-cluster-sg" 51 | vpc_id = var.vpc_id 52 | description = "Shared cluster k3s server/agent security group" 53 | 54 | tags = merge({ 55 | "shared" = "true", 56 | }, var.tags) 57 | } 58 | 59 | # TODO: Trim these down 60 | resource "aws_security_group_rule" "all_self_ingress" { 61 | description = "Allow all ingress traffic between cluster nodes" 62 | from_port = 0 63 | to_port = 0 64 | protocol = "-1" 65 | security_group_id = aws_security_group.cluster.id 66 | type = "ingress" 67 | 68 | self = true 69 | } 70 | 71 | resource "aws_security_group_rule" "all_self_egress" { 72 | description = "Allow all egress traffic" 73 | from_port = 0 74 | to_port = 0 75 | protocol = "-1" 76 | security_group_id = aws_security_group.cluster.id 77 | type = "egress" 78 | 79 | cidr_blocks = ["0.0.0.0/0"] 80 | } 81 | 82 | # 83 | # Shared Security Groups 84 | # 85 | resource "aws_security_group" "shared_server" { 86 | name = "${var.name}-k3s-shared-server-sg" 87 | vpc_id = var.vpc_id 88 | description = "Shared k3s server security group" 89 | 90 | tags = merge({ 91 | "shared" = "true", 92 | }, var.tags) 93 | } 94 | 95 | resource "aws_security_group_rule" "controlplane_ingress" { 96 | description = "All traffic between nodes" 97 | from_port = module.controlplane_lb.port 98 | to_port = module.controlplane_lb.port 99 | protocol = "tcp" 100 | security_group_id = aws_security_group.shared_server.id 101 | type = "ingress" 102 | 103 | cidr_blocks = [data.aws_vpc.this.cidr_block] 104 | } 105 | 106 | resource "aws_security_group_rule" "server_db_ingress" { 107 | description = "Allow servers to connect to DB" 108 | from_port = module.db.port 109 | to_port = module.db.port 110 | protocol = "tcp" 111 | security_group_id = module.db.sg 112 | type = "ingress" 113 | 114 | source_security_group_id = aws_security_group.shared_server.id 115 | } 116 | 117 | resource "aws_security_group" "shared_agent" { 118 | name = "${var.name}-k3s-shared-agent-sg" 119 | vpc_id = var.vpc_id 120 | description = "Shared k3s agent security group" 121 | 122 | tags = merge({ 123 | "shared" = "true", 124 | }, var.tags) 125 | } 126 | 127 | # 128 | # State Storage 129 | # 130 | module "state" { 131 | source = "./modules/state-store" 132 | 133 | count = var.state_bucket == null ? 1 : 0 134 | 135 | name = var.name 136 | } 137 | 138 | resource "aws_s3_bucket_object" "state" { 139 | bucket = var.state_bucket == null ? module.state[0].bucket : var.state_bucket 140 | key = "state.env" 141 | 142 | content_type = "text/plain" 143 | content = <<-EOT 144 | TOKEN=${random_password.token.result} 145 | DATASTORE_ENDPOINT=${module.db.datastore_endpoint} 146 | EOT 147 | } -------------------------------------------------------------------------------- /modules/controlplane-lb/main.tf: -------------------------------------------------------------------------------- 1 | data "aws_vpc" "this" { 2 | id = var.vpc_id 3 | } 4 | 5 | # 6 | # Classic Load Balancer Resources 7 | # 8 | resource "aws_elb" "this" { 9 | name = "${var.name}-k3scp" 10 | internal = var.internal 11 | subnets = var.subnets 12 | cross_zone_load_balancing = true 13 | security_groups = [aws_security_group.lb.id] 14 | 15 | listener { 16 | instance_port = var.port 17 | instance_protocol = "TCP" 18 | lb_port = var.port 19 | lb_protocol = "TCP" 20 | } 21 | 22 | health_check { 23 | target = "TCP:${var.port}" 24 | healthy_threshold = var.healthy_threshold 25 | unhealthy_threshold = var.unhealthy_threshold 26 | interval = var.interval 27 | timeout = var.timeout 28 | } 29 | 30 | tags = merge({ 31 | "kubernetes.io/cluster/${var.name}" = "owned" 32 | }, var.tags) 33 | } 34 | 35 | resource "aws_security_group" "lb" { 36 | name = "${var.name}-k3s-controlplane-sg" 37 | vpc_id = var.vpc_id 38 | description = "${var.name} controlplane" 39 | } 40 | 41 | resource "aws_security_group_rule" "ingress" { 42 | from_port = var.port 43 | to_port = var.port 44 | protocol = "tcp" 45 | security_group_id = aws_security_group.lb.id 46 | type = "ingress" 47 | 48 | cidr_blocks = [data.aws_vpc.this.cidr_block] 49 | } 50 | 51 | resource "aws_security_group_rule" "egress" { 52 | from_port = -1 53 | to_port = -1 54 | protocol = "-1" 55 | security_group_id = aws_security_group.lb.id 56 | type = "egress" 57 | 58 | cidr_blocks = ["0.0.0.0/0"] 59 | } -------------------------------------------------------------------------------- /modules/controlplane-lb/outputs.tf: -------------------------------------------------------------------------------- 1 | output "name" { 2 | value = aws_elb.this.name 3 | } 4 | 5 | output "dns" { 6 | value = aws_elb.this.dns_name 7 | } 8 | 9 | output "port" { 10 | value = var.port 11 | } 12 | -------------------------------------------------------------------------------- /modules/controlplane-lb/variables.tf: -------------------------------------------------------------------------------- 1 | variable "name" { 2 | type = string 3 | } 4 | 5 | variable "vpc_id" { 6 | type = string 7 | } 8 | 9 | variable "subnets" { 10 | type = list(string) 11 | } 12 | 13 | variable "internal" { 14 | type = bool 15 | default = true 16 | } 17 | 18 | variable "port" { 19 | type = number 20 | default = 6443 21 | } 22 | 23 | variable "healthy_threshold" { 24 | type = number 25 | default = 2 26 | description = "Number of consecutive health checks successes required before considering a healthy target, must be identical to unhealthy_threshold" 27 | } 28 | 29 | variable "unhealthy_threshold" { 30 | type = number 31 | default = 2 32 | description = "Number of consecutive health checks successes required before considering a healthy target, must be identical to healthy_threshold" 33 | } 34 | 35 | variable "interval" { 36 | type = number 37 | default = 10 38 | } 39 | 40 | variable "timeout" { 41 | type = number 42 | default = 5 43 | } 44 | 45 | variable "tags" { 46 | type = map(string) 47 | default = {} 48 | } -------------------------------------------------------------------------------- /modules/database/main.tf: -------------------------------------------------------------------------------- 1 | resource "aws_security_group" "this" { 2 | name = "${var.name}-k3s" 3 | description = "${var.name} k3s db sg" 4 | vpc_id = var.vpc_id 5 | 6 | tags = var.tags 7 | } 8 | 9 | resource "aws_db_subnet_group" "this" { 10 | name = "${var.name}-k3s" 11 | subnet_ids = var.subnets 12 | 13 | tags = merge({ 14 | "kubernetes.io/cluster/${var.name}" = "owned" 15 | }, var.tags) 16 | } 17 | 18 | resource "aws_db_instance" "this" { 19 | identifier = "${var.name}-k3s" 20 | 21 | engine = "mysql" 22 | engine_version = "5.7" 23 | 24 | allocated_storage = var.allocated_storage 25 | max_allocated_storage = var.max_allocated_storage 26 | storage_type = "gp2" 27 | ca_cert_identifier = var.ca_cert_identifier 28 | db_subnet_group_name = aws_db_subnet_group.this.name 29 | vpc_security_group_ids = [aws_security_group.this.id] 30 | 31 | instance_class = var.instance_class 32 | name = "k3s" 33 | username = var.username 34 | password = var.password 35 | parameter_group_name = "default.mysql5.7" 36 | 37 | backup_retention_period = 0 38 | delete_automated_backups = true 39 | skip_final_snapshot = true 40 | performance_insights_enabled = false 41 | apply_immediately = true 42 | 43 | tags = merge({ 44 | "kubernetes.io/cluster/${var.name}" = "owned" 45 | }, var.tags) 46 | } -------------------------------------------------------------------------------- /modules/database/outputs.tf: -------------------------------------------------------------------------------- 1 | output "datastore_endpoint" { 2 | value = "mysql://${aws_db_instance.this.username}:${aws_db_instance.this.password}@tcp(${aws_db_instance.this.endpoint})/${aws_db_instance.this.name}" 3 | } 4 | 5 | output "endpoint" { 6 | value = aws_db_instance.this.endpoint 7 | } 8 | 9 | output "port" { 10 | value = aws_db_instance.this.port 11 | } 12 | 13 | output "db_name" { 14 | value = aws_db_instance.this.name 15 | } 16 | 17 | output "sg" { 18 | value = aws_security_group.this.id 19 | } 20 | 21 | output "username" { 22 | value = aws_db_instance.this.username 23 | } 24 | 25 | output "password" { 26 | value = aws_db_instance.this.password 27 | } -------------------------------------------------------------------------------- /modules/database/variables.tf: -------------------------------------------------------------------------------- 1 | variable "name" { 2 | type = string 3 | } 4 | 5 | variable "vpc_id" { 6 | type = string 7 | } 8 | 9 | variable "subnets" { 10 | type = list(string) 11 | } 12 | 13 | variable "instance_class" { 14 | type = string 15 | default = "db.t2.medium" 16 | } 17 | 18 | variable "username" { 19 | type = string 20 | default = "k3s" 21 | } 22 | 23 | variable "password" { 24 | type = string 25 | } 26 | 27 | variable "allocated_storage" { 28 | type = number 29 | default = 5 30 | } 31 | 32 | variable "max_allocated_storage" { 33 | type = number 34 | default = 100 35 | } 36 | 37 | variable "ca_cert_identifier" { 38 | type = string 39 | default = "rds-ca-2017" # govcloud default 40 | } 41 | 42 | variable "tags" { 43 | type = map(string) 44 | default = {} 45 | } -------------------------------------------------------------------------------- /modules/nodepool/README.md: -------------------------------------------------------------------------------- 1 | # nodepool 2 | 3 | Shared module for creating k3s server and agent nodepools. 4 | ## Requirements 5 | 6 | No requirements. 7 | 8 | ## Providers 9 | 10 | | Name | Version | 11 | |------|---------| 12 | | aws | n/a | 13 | | template | n/a | 14 | 15 | ## Inputs 16 | 17 | | Name | Description | Type | Default | Required | 18 | |------|-------------|------|---------|:--------:| 19 | | ami | AMI of all EC2 instances within the nodepool | `string` | n/a | yes | 20 | | cluster | Name of the cluster the nodepool belongs to, sourced from k3s module | `string` | n/a | yes | 21 | | cluster\_security\_group | n/a | `string` | n/a | yes | 22 | | name | Name of the node pool, to be appended to all resources | `string` | n/a | yes | 23 | | subnets | List of subnet ids the nodepool is deployed to | `list(string)` | n/a | yes | 24 | | vpc\_id | VPC ID the nodepool is deployed to | `string` | n/a | yes | 25 | | asg | Autoscaling group scale, requires min, max, and desired |
object({
min = number
max = number
desired = number
})
|
{
"desired": 1,
"max": 2,
"min": 1
}
| no | 26 | | auto\_deployed\_manifests | n/a | `list(string)` | `[]` | no | 27 | | block\_device\_mappings | n/a |
object({
size = number
encrypted = bool
})
|
{
"encrypted": true,
"size": 32
}
| no | 28 | | controlplane\_loadbalancer | n/a | `string` | `null` | no | 29 | | dependencies\_script | Dependencies script responsible for any pre-node setup, overriding this overrides the default setup and requires AT LEAST the k3s binary and aws cli downloaded before proceeding | `string` | `null` | no | 30 | | enable\_cloud\_provider | n/a | `bool` | `true` | no | 31 | | extra\_security\_groups | n/a | `list(string)` | `[]` | no | 32 | | iam\_instance\_profile | n/a | `string` | `null` | no | 33 | | instance\_type | n/a | `string` | `"t3.medium"` | no | 34 | | k3s\_disables | k3s services to disable, defaults to traefik, local-storage, and servicelb since we're in the cloud | `list(string)` |
[
"traefik",
"local-storage",
"servicelb"
]
| no | 35 | | k3s\_kube\_apiservers | --kube-apiserver-arg key=value | `list(string)` | `[]` | no | 36 | | k3s\_kube\_cloud\_controller\_managers | --kube-cloud-controller-manager-arg key=value | `list(string)` | `[]` | no | 37 | | k3s\_kube\_controller\_managers | --kube-controller-manager-arg key=value | `list(string)` | `[]` | no | 38 | | k3s\_kube\_schedulers | --kube-scheduler-arg key=value | `list(string)` | `[]` | no | 39 | | k3s\_kubelet\_args | --kubelet-arg key=value | `list(string)` | `[]` | no | 40 | | k3s\_node\_labels | --node-label key=value | `list(string)` | `[]` | no | 41 | | k3s\_node\_taints | --node-taint key=value | `list(string)` | `[]` | no | 42 | | k3s\_tls\_sans | n/a | `list(string)` | `[]` | no | 43 | | k3s\_url | n/a | `string` | `""` | no | 44 | | k3s\_version | K3S Variables | `string` | `"v1.18.8+k3s1"` | no | 45 | | shared\_agent\_security\_group | n/a | `string` | `null` | no | 46 | | shared\_server\_security\_group | n/a | `string` | `null` | no | 47 | | spot | n/a | `bool` | `false` | no | 48 | | ssh\_authorized\_keys | List of public keys that are added to nodes authorized hosts. This is not required for cluster bootstrap, and should only be allowed for development environments where ssh access is beneficial | `list(string)` | `[]` | no | 49 | | state\_bucket | n/a | `string` | `null` | no | 50 | | state\_key | n/a | `string` | `"state.env"` | no | 51 | | tags | n/a | `map(string)` | `{}` | no | 52 | 53 | ## Outputs 54 | 55 | | Name | Description | 56 | |------|-------------| 57 | | autoscaling\_group\_name | n/a | 58 | | security\_group | n/a | 59 | 60 | -------------------------------------------------------------------------------- /modules/nodepool/data.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | gzip_manifests = [for manifest in var.auto_deployed_manifests : { name : manifest.name, content : base64gzip(manifest.content) }] 3 | } 4 | 5 | data "template_cloudinit_config" "this" { 6 | gzip = true 7 | base64_encode = true 8 | 9 | # Main cloud-init config file 10 | part { 11 | filename = "cloud-config-base.yaml" 12 | content_type = "text/cloud-config" 13 | content = templatefile("${path.module}/files/cloud-config-base.yaml", { 14 | k3s_install = base64gzip(file("${path.module}/files/k3s.sh")) 15 | nodedrain = base64gzip(file("${path.module}/files/nodedrain.sh")) 16 | nodedrain_service = base64gzip(file("${path.module}/files/nodedrain.service")) 17 | 18 | ccm = var.external_cloud_provider && var.deploy_cloud_controller_manager ? base64gzip(file("${path.module}/files/aws-ccm.yaml")) : "" 19 | ebs = var.enable_ebs_csi_driver ? base64gzip(file("${path.module}/files/aws-ebs.yaml")) : "" 20 | 21 | ssh_authorized_keys = var.ssh_authorized_keys 22 | 23 | # Manifests to autodeploy on boot 24 | manifests = local.gzip_manifests 25 | 26 | # Registry mirroring setup: see https://rancher.com/docs/k3s/latest/en/installation/private-registry/ 27 | registries = base64gzip(var.k3s_registries) 28 | }) 29 | } 30 | 31 | # downloader (NOTE: In a production deployment, this is usually replaced by custom AMIs) 32 | part { 33 | filename = "00_download_dependencies.sh" 34 | content_type = "text/x-shellscript" 35 | content = var.dependencies_script != null ? var.dependencies_script : templatefile("${path.module}/files/download_dependencies.sh", { 36 | k3s_version = var.k3s_version 37 | }) 38 | } 39 | 40 | # secrets fetcher script 41 | part { 42 | filename = "01_secrets.sh" 43 | content_type = "text/x-shellscript" 44 | content = templatefile("${path.module}/files/secrets.sh", { 45 | state_bucket = var.state_bucket 46 | state_key = var.state_key 47 | }) 48 | } 49 | 50 | # k3s bootstrap script 51 | part { 52 | filename = "02_bootstrap.sh" 53 | content_type = "text/x-shellscript" 54 | content = templatefile("${path.module}/files/bootstrap.sh", { 55 | # Cluster metadata 56 | cluster = var.cluster 57 | name = var.name 58 | state_bucket = var.state_bucket 59 | state_key = var.state_key 60 | 61 | # Server if no k3s_url is specified 62 | type = local.is_server ? "server" : "agent" 63 | 64 | # Server K3S Variables 65 | tls_sans = join(";", var.k3s_tls_sans) 66 | disables = join(";", var.k3s_disables) 67 | kube_apiservers = join(";", var.k3s_kube_apiservers) 68 | kube_schedulers = join(";", var.k3s_kube_schedulers) 69 | kube_controller_managers = join(";", var.k3s_kube_controller_managers) 70 | kube_cloud_controller_managers = join(";", var.k3s_kube_cloud_controller_managers) 71 | 72 | # Agent K3S Variables 73 | server = var.k3s_url 74 | 75 | # Shared K3S Variables 76 | cloud_provider = var.external_cloud_provider 77 | k3s_version = var.k3s_version 78 | kubelet_args = join(";", var.k3s_kubelet_args) 79 | node_labels = join(";", var.k3s_node_labels) 80 | node_taints = join(";", var.k3s_node_taints) 81 | }) 82 | } 83 | } -------------------------------------------------------------------------------- /modules/nodepool/files/aws-ccm.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: cloud-controller-manager 6 | namespace: kube-system 7 | --- 8 | apiVersion: rbac.authorization.k8s.io/v1 9 | kind: RoleBinding 10 | metadata: 11 | name: cloud-controller-manager:apiserver-authentication-reader 12 | namespace: kube-system 13 | roleRef: 14 | apiGroup: rbac.authorization.k8s.io 15 | kind: Role 16 | name: extension-apiserver-authentication-reader 17 | subjects: 18 | - apiGroup: "" 19 | kind: ServiceAccount 20 | name: cloud-controller-manager 21 | namespace: kube-system 22 | --- 23 | apiVersion: rbac.authorization.k8s.io/v1 24 | kind: ClusterRole 25 | metadata: 26 | name: system:cloud-controller-manager 27 | rules: 28 | - apiGroups: 29 | - "" 30 | resources: 31 | - events 32 | verbs: 33 | - create 34 | - patch 35 | - update 36 | - apiGroups: 37 | - "" 38 | resources: 39 | - nodes 40 | verbs: 41 | - '*' 42 | - apiGroups: 43 | - "" 44 | resources: 45 | - nodes/status 46 | verbs: 47 | - patch 48 | - apiGroups: 49 | - "" 50 | resources: 51 | - services 52 | verbs: 53 | - list 54 | - patch 55 | - update 56 | - watch 57 | - apiGroups: 58 | - "" 59 | resources: 60 | - services/status 61 | verbs: 62 | - list 63 | - patch 64 | - update 65 | - watch 66 | - apiGroups: 67 | - "" 68 | resources: 69 | - serviceaccounts 70 | verbs: 71 | - create 72 | - apiGroups: 73 | - "" 74 | resources: 75 | - persistentvolumes 76 | verbs: 77 | - get 78 | - list 79 | - update 80 | - watch 81 | - apiGroups: 82 | - "" 83 | resources: 84 | - endpoints 85 | verbs: 86 | - create 87 | - get 88 | - list 89 | - watch 90 | - update 91 | - apiGroups: 92 | - coordination.k8s.io 93 | resources: 94 | - leases 95 | verbs: 96 | - create 97 | - get 98 | - list 99 | - watch 100 | - update 101 | --- 102 | kind: ClusterRoleBinding 103 | apiVersion: rbac.authorization.k8s.io/v1 104 | metadata: 105 | name: system:cloud-controller-manager 106 | roleRef: 107 | apiGroup: rbac.authorization.k8s.io 108 | kind: ClusterRole 109 | name: system:cloud-controller-manager 110 | subjects: 111 | - apiGroup: "" 112 | kind: ServiceAccount 113 | name: cloud-controller-manager 114 | namespace: kube-system 115 | --- 116 | apiVersion: apps/v1 117 | kind: DaemonSet 118 | metadata: 119 | name: aws-cloud-controller-manager 120 | namespace: kube-system 121 | labels: 122 | k8s-app: aws-cloud-controller-manager 123 | spec: 124 | selector: 125 | matchLabels: 126 | k8s-app: aws-cloud-controller-manager 127 | updateStrategy: 128 | type: RollingUpdate 129 | template: 130 | metadata: 131 | labels: 132 | k8s-app: aws-cloud-controller-manager 133 | spec: 134 | nodeSelector: 135 | node-role.kubernetes.io/master: "true" 136 | tolerations: 137 | - key: node.cloudprovider.kubernetes.io/uninitialized 138 | value: "true" 139 | effect: NoSchedule 140 | - key: node-role.kubernetes.io/master 141 | effect: NoSchedule 142 | operator: "Exists" 143 | serviceAccountName: cloud-controller-manager 144 | containers: 145 | - name: aws-cloud-controller-manager 146 | image: gcr.io/k8s-staging-provider-aws/cloud-controller-manager:v1.18.0-alpha.1 147 | args: 148 | - --v=2 149 | - --cloud-provider=aws 150 | resources: 151 | requests: 152 | cpu: 200m 153 | hostNetwork: true 154 | -------------------------------------------------------------------------------- /modules/nodepool/files/aws-ebs.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: aws-ebs-csi-driver/templates/serviceaccount-csi-controller.yaml 3 | apiVersion: v1 4 | kind: ServiceAccount 5 | metadata: 6 | name: ebs-csi-controller-sa 7 | namespace: kube-system 8 | labels: 9 | app.kubernetes.io/name: aws-ebs-csi-driver 10 | app.kubernetes.io/instance: aws-ebs-csi-driver 11 | helm.sh/chart: aws-ebs-csi-driver-0.5.0 12 | app.kubernetes.io/version: "0.6.0" 13 | app.kubernetes.io/managed-by: Helm 14 | --- 15 | # Source: aws-ebs-csi-driver/templates/serviceaccount-snapshot-controller.yaml 16 | apiVersion: v1 17 | kind: ServiceAccount 18 | metadata: 19 | name: ebs-snapshot-controller 20 | namespace: kube-system 21 | labels: 22 | app.kubernetes.io/name: aws-ebs-csi-driver 23 | app.kubernetes.io/instance: aws-ebs-csi-driver 24 | helm.sh/chart: aws-ebs-csi-driver-0.5.0 25 | app.kubernetes.io/version: "0.6.0" 26 | app.kubernetes.io/managed-by: Helm 27 | --- 28 | # Source: aws-ebs-csi-driver/templates/clusterrole-attacher.yaml 29 | kind: ClusterRole 30 | apiVersion: rbac.authorization.k8s.io/v1 31 | metadata: 32 | name: ebs-external-attacher-role 33 | labels: 34 | app.kubernetes.io/name: aws-ebs-csi-driver 35 | app.kubernetes.io/instance: aws-ebs-csi-driver 36 | helm.sh/chart: aws-ebs-csi-driver-0.5.0 37 | app.kubernetes.io/version: "0.6.0" 38 | app.kubernetes.io/managed-by: Helm 39 | rules: 40 | - apiGroups: [""] 41 | resources: ["persistentvolumes"] 42 | verbs: ["get", "list", "watch", "update"] 43 | - apiGroups: [""] 44 | resources: ["nodes"] 45 | verbs: ["get", "list", "watch"] 46 | - apiGroups: ["csi.storage.k8s.io"] 47 | resources: ["csinodeinfos"] 48 | verbs: ["get", "list", "watch"] 49 | - apiGroups: ["storage.k8s.io"] 50 | resources: ["volumeattachments"] 51 | verbs: ["get", "list", "watch", "update"] 52 | --- 53 | # Source: aws-ebs-csi-driver/templates/clusterrole-provisioner.yaml 54 | kind: ClusterRole 55 | apiVersion: rbac.authorization.k8s.io/v1 56 | metadata: 57 | name: ebs-external-provisioner-role 58 | labels: 59 | app.kubernetes.io/name: aws-ebs-csi-driver 60 | app.kubernetes.io/instance: aws-ebs-csi-driver 61 | helm.sh/chart: aws-ebs-csi-driver-0.5.0 62 | app.kubernetes.io/version: "0.6.0" 63 | app.kubernetes.io/managed-by: Helm 64 | rules: 65 | - apiGroups: [""] 66 | resources: ["persistentvolumes"] 67 | verbs: ["get", "list", "watch", "create", "delete"] 68 | - apiGroups: [""] 69 | resources: ["persistentvolumeclaims"] 70 | verbs: ["get", "list", "watch", "update"] 71 | - apiGroups: ["storage.k8s.io"] 72 | resources: ["storageclasses"] 73 | verbs: ["get", "list", "watch"] 74 | - apiGroups: [""] 75 | resources: ["events"] 76 | verbs: ["list", "watch", "create", "update", "patch"] 77 | - apiGroups: ["snapshot.storage.k8s.io"] 78 | resources: ["volumesnapshots"] 79 | verbs: ["get", "list"] 80 | - apiGroups: ["snapshot.storage.k8s.io"] 81 | resources: ["volumesnapshotcontents"] 82 | verbs: ["get", "list"] 83 | - apiGroups: ["storage.k8s.io"] 84 | resources: ["csinodes"] 85 | verbs: ["get", "list", "watch"] 86 | - apiGroups: [""] 87 | resources: ["nodes"] 88 | verbs: ["get", "list", "watch"] 89 | - apiGroups: ["coordination.k8s.io"] 90 | resources: ["leases"] 91 | verbs: ["get", "watch", "list", "delete", "update", "create"] 92 | --- 93 | # Source: aws-ebs-csi-driver/templates/clusterrole-resizer.yaml 94 | kind: ClusterRole 95 | apiVersion: rbac.authorization.k8s.io/v1 96 | metadata: 97 | name: ebs-external-resizer-role 98 | labels: 99 | app.kubernetes.io/name: aws-ebs-csi-driver 100 | app.kubernetes.io/instance: aws-ebs-csi-driver 101 | helm.sh/chart: aws-ebs-csi-driver-0.5.0 102 | app.kubernetes.io/version: "0.6.0" 103 | app.kubernetes.io/managed-by: Helm 104 | rules: 105 | # The following rule should be uncommented for plugins that require secrets 106 | # for provisioning. 107 | # - apiGroups: [""] 108 | # resources: ["secrets"] 109 | # verbs: ["get", "list", "watch"] 110 | - apiGroups: [""] 111 | resources: ["persistentvolumes"] 112 | verbs: ["get", "list", "watch", "update", "patch"] 113 | - apiGroups: [""] 114 | resources: ["persistentvolumeclaims"] 115 | verbs: ["get", "list", "watch"] 116 | - apiGroups: [""] 117 | resources: ["persistentvolumeclaims/status"] 118 | verbs: ["update", "patch"] 119 | - apiGroups: ["storage.k8s.io"] 120 | resources: ["storageclasses"] 121 | verbs: ["get", "list", "watch"] 122 | - apiGroups: [""] 123 | resources: ["events"] 124 | verbs: ["list", "watch", "create", "update", "patch"] 125 | --- 126 | # Source: aws-ebs-csi-driver/templates/clusterrole-snapshot-controller.yaml 127 | kind: ClusterRole 128 | apiVersion: rbac.authorization.k8s.io/v1 129 | metadata: 130 | name: ebs-snapshot-controller-role 131 | labels: 132 | app.kubernetes.io/name: aws-ebs-csi-driver 133 | app.kubernetes.io/instance: aws-ebs-csi-driver 134 | helm.sh/chart: aws-ebs-csi-driver-0.5.0 135 | app.kubernetes.io/version: "0.6.0" 136 | app.kubernetes.io/managed-by: Helm 137 | rules: 138 | - apiGroups: [""] 139 | resources: ["persistentvolumes"] 140 | verbs: ["get", "list", "watch"] 141 | - apiGroups: [""] 142 | resources: ["persistentvolumeclaims"] 143 | verbs: ["get", "list", "watch", "update"] 144 | - apiGroups: ["storage.k8s.io"] 145 | resources: ["storageclasses"] 146 | verbs: ["get", "list", "watch"] 147 | - apiGroups: [""] 148 | resources: ["events"] 149 | verbs: ["list", "watch", "create", "update", "patch"] 150 | - apiGroups: ["snapshot.storage.k8s.io"] 151 | resources: ["volumesnapshotclasses"] 152 | verbs: ["get", "list", "watch"] 153 | - apiGroups: ["snapshot.storage.k8s.io"] 154 | resources: ["volumesnapshotcontents"] 155 | verbs: ["create", "get", "list", "watch", "update", "delete"] 156 | - apiGroups: ["snapshot.storage.k8s.io"] 157 | resources: ["volumesnapshots"] 158 | verbs: ["get", "list", "watch", "update"] 159 | - apiGroups: ["snapshot.storage.k8s.io"] 160 | resources: ["volumesnapshots/status"] 161 | verbs: ["update"] 162 | --- 163 | # Source: aws-ebs-csi-driver/templates/clusterrole-snapshotter.yaml 164 | kind: ClusterRole 165 | apiVersion: rbac.authorization.k8s.io/v1 166 | metadata: 167 | name: ebs-external-snapshotter-role 168 | labels: 169 | app.kubernetes.io/name: aws-ebs-csi-driver 170 | app.kubernetes.io/instance: aws-ebs-csi-driver 171 | helm.sh/chart: aws-ebs-csi-driver-0.5.0 172 | app.kubernetes.io/version: "0.6.0" 173 | app.kubernetes.io/managed-by: Helm 174 | rules: 175 | - apiGroups: [""] 176 | resources: ["events"] 177 | verbs: ["list", "watch", "create", "update", "patch"] 178 | - apiGroups: [""] 179 | resources: ["secrets"] 180 | verbs: ["get", "list"] 181 | - apiGroups: ["snapshot.storage.k8s.io"] 182 | resources: ["volumesnapshotclasses"] 183 | verbs: ["get", "list", "watch"] 184 | - apiGroups: ["snapshot.storage.k8s.io"] 185 | resources: ["volumesnapshotcontents"] 186 | verbs: ["create", "get", "list", "watch", "update", "delete"] 187 | - apiGroups: ["snapshot.storage.k8s.io"] 188 | resources: ["volumesnapshotcontents/status"] 189 | verbs: ["update"] 190 | --- 191 | # Source: aws-ebs-csi-driver/templates/clusterrolebinding-attacher.yaml 192 | kind: ClusterRoleBinding 193 | apiVersion: rbac.authorization.k8s.io/v1 194 | metadata: 195 | name: ebs-csi-attacher-binding 196 | labels: 197 | app.kubernetes.io/name: aws-ebs-csi-driver 198 | app.kubernetes.io/instance: aws-ebs-csi-driver 199 | helm.sh/chart: aws-ebs-csi-driver-0.5.0 200 | app.kubernetes.io/version: "0.6.0" 201 | app.kubernetes.io/managed-by: Helm 202 | subjects: 203 | - kind: ServiceAccount 204 | name: ebs-csi-controller-sa 205 | namespace: kube-system 206 | roleRef: 207 | kind: ClusterRole 208 | name: ebs-external-attacher-role 209 | apiGroup: rbac.authorization.k8s.io 210 | --- 211 | # Source: aws-ebs-csi-driver/templates/clusterrolebinding-provisioner.yaml 212 | kind: ClusterRoleBinding 213 | apiVersion: rbac.authorization.k8s.io/v1 214 | metadata: 215 | name: ebs-csi-provisioner-binding 216 | labels: 217 | app.kubernetes.io/name: aws-ebs-csi-driver 218 | app.kubernetes.io/instance: aws-ebs-csi-driver 219 | helm.sh/chart: aws-ebs-csi-driver-0.5.0 220 | app.kubernetes.io/version: "0.6.0" 221 | app.kubernetes.io/managed-by: Helm 222 | subjects: 223 | - kind: ServiceAccount 224 | name: ebs-csi-controller-sa 225 | namespace: kube-system 226 | roleRef: 227 | kind: ClusterRole 228 | name: ebs-external-provisioner-role 229 | apiGroup: rbac.authorization.k8s.io 230 | --- 231 | # Source: aws-ebs-csi-driver/templates/clusterrolebinding-resizer.yaml 232 | kind: ClusterRoleBinding 233 | apiVersion: rbac.authorization.k8s.io/v1 234 | metadata: 235 | name: ebs-csi-resizer-binding 236 | labels: 237 | app.kubernetes.io/name: aws-ebs-csi-driver 238 | app.kubernetes.io/instance: aws-ebs-csi-driver 239 | helm.sh/chart: aws-ebs-csi-driver-0.5.0 240 | app.kubernetes.io/version: "0.6.0" 241 | app.kubernetes.io/managed-by: Helm 242 | subjects: 243 | - kind: ServiceAccount 244 | name: ebs-csi-controller-sa 245 | namespace: kube-system 246 | roleRef: 247 | kind: ClusterRole 248 | name: ebs-external-resizer-role 249 | apiGroup: rbac.authorization.k8s.io 250 | --- 251 | # Source: aws-ebs-csi-driver/templates/clusterrolebinding-snapshot-controller.yaml 252 | kind: ClusterRoleBinding 253 | apiVersion: rbac.authorization.k8s.io/v1 254 | metadata: 255 | name: ebs-csi-snapshot-controller-binding 256 | labels: 257 | app.kubernetes.io/name: aws-ebs-csi-driver 258 | app.kubernetes.io/instance: aws-ebs-csi-driver 259 | helm.sh/chart: aws-ebs-csi-driver-0.5.0 260 | app.kubernetes.io/version: "0.6.0" 261 | app.kubernetes.io/managed-by: Helm 262 | subjects: 263 | - kind: ServiceAccount 264 | name: ebs-snapshot-controller 265 | namespace: kube-system 266 | roleRef: 267 | kind: ClusterRole 268 | name: ebs-snapshot-controller-role 269 | apiGroup: rbac.authorization.k8s.io 270 | --- 271 | # Source: aws-ebs-csi-driver/templates/clusterrolebinding-snapshotter.yaml 272 | kind: ClusterRoleBinding 273 | apiVersion: rbac.authorization.k8s.io/v1 274 | metadata: 275 | name: ebs-csi-snapshotter-binding 276 | labels: 277 | app.kubernetes.io/name: aws-ebs-csi-driver 278 | app.kubernetes.io/instance: aws-ebs-csi-driver 279 | helm.sh/chart: aws-ebs-csi-driver-0.5.0 280 | app.kubernetes.io/version: "0.6.0" 281 | app.kubernetes.io/managed-by: Helm 282 | subjects: 283 | - kind: ServiceAccount 284 | name: ebs-csi-controller-sa 285 | namespace: kube-system 286 | roleRef: 287 | kind: ClusterRole 288 | name: ebs-external-snapshotter-role 289 | apiGroup: rbac.authorization.k8s.io 290 | --- 291 | # Source: aws-ebs-csi-driver/templates/role-snapshot-controller-leaderelection.yaml 292 | kind: Role 293 | apiVersion: rbac.authorization.k8s.io/v1 294 | metadata: 295 | name: ebs-snapshot-controller-leaderelection 296 | namespace: kube-system 297 | labels: 298 | app.kubernetes.io/name: aws-ebs-csi-driver 299 | app.kubernetes.io/instance: aws-ebs-csi-driver 300 | helm.sh/chart: aws-ebs-csi-driver-0.5.0 301 | app.kubernetes.io/version: "0.6.0" 302 | app.kubernetes.io/managed-by: Helm 303 | rules: 304 | - apiGroups: ["coordination.k8s.io"] 305 | resources: ["leases"] 306 | verbs: ["get", "watch", "list", "delete", "update", "create"] 307 | --- 308 | # Source: aws-ebs-csi-driver/templates/rolebinding-snapshot-controller-leaderelection.yaml 309 | kind: RoleBinding 310 | apiVersion: rbac.authorization.k8s.io/v1 311 | metadata: 312 | name: snapshot-controller-leaderelection 313 | namespace: kube-system 314 | labels: 315 | app.kubernetes.io/name: aws-ebs-csi-driver 316 | app.kubernetes.io/instance: aws-ebs-csi-driver 317 | helm.sh/chart: aws-ebs-csi-driver-0.5.0 318 | app.kubernetes.io/version: "0.6.0" 319 | app.kubernetes.io/managed-by: Helm 320 | subjects: 321 | - kind: ServiceAccount 322 | name: ebs-snapshot-controller 323 | namespace: kube-system 324 | roleRef: 325 | kind: Role 326 | name: snapshot-controller-leaderelection 327 | apiGroup: rbac.authorization.k8s.io 328 | --- 329 | # Source: aws-ebs-csi-driver/templates/node.yaml 330 | # Node Service 331 | kind: DaemonSet 332 | apiVersion: apps/v1 333 | metadata: 334 | name: ebs-csi-node 335 | namespace: kube-system 336 | labels: 337 | app.kubernetes.io/name: aws-ebs-csi-driver 338 | app.kubernetes.io/instance: aws-ebs-csi-driver 339 | helm.sh/chart: aws-ebs-csi-driver-0.5.0 340 | app.kubernetes.io/version: "0.6.0" 341 | app.kubernetes.io/managed-by: Helm 342 | spec: 343 | selector: 344 | matchLabels: 345 | app: ebs-csi-node 346 | app.kubernetes.io/name: aws-ebs-csi-driver 347 | app.kubernetes.io/instance: aws-ebs-csi-driver 348 | template: 349 | metadata: 350 | labels: 351 | app: ebs-csi-node 352 | app.kubernetes.io/name: aws-ebs-csi-driver 353 | app.kubernetes.io/instance: aws-ebs-csi-driver 354 | helm.sh/chart: aws-ebs-csi-driver-0.5.0 355 | app.kubernetes.io/version: "0.6.0" 356 | app.kubernetes.io/managed-by: Helm 357 | spec: 358 | affinity: 359 | nodeAffinity: 360 | requiredDuringSchedulingIgnoredDuringExecution: 361 | nodeSelectorTerms: 362 | - matchExpressions: 363 | - key: eks.amazonaws.com/compute-type 364 | operator: NotIn 365 | values: 366 | - fargate 367 | nodeSelector: 368 | kubernetes.io/os: linux 369 | kubernetes.io/arch: amd64 370 | hostNetwork: true 371 | priorityClassName: system-node-critical 372 | tolerations: 373 | - operator: Exists 374 | containers: 375 | - name: ebs-plugin 376 | securityContext: 377 | privileged: true 378 | image: amazon/aws-ebs-csi-driver:v0.6.0 379 | args: 380 | - node 381 | - --endpoint=$(CSI_ENDPOINT) 382 | - --logtostderr 383 | - --v=5 384 | env: 385 | - name: CSI_ENDPOINT 386 | value: unix:/csi/csi.sock 387 | volumeMounts: 388 | - name: kubelet-dir 389 | mountPath: /var/lib/kubelet 390 | mountPropagation: "Bidirectional" 391 | - name: plugin-dir 392 | mountPath: /csi 393 | - name: device-dir 394 | mountPath: /dev 395 | ports: 396 | - name: healthz 397 | containerPort: 9808 398 | protocol: TCP 399 | livenessProbe: 400 | httpGet: 401 | path: /healthz 402 | port: healthz 403 | initialDelaySeconds: 10 404 | timeoutSeconds: 3 405 | periodSeconds: 10 406 | failureThreshold: 5 407 | - name: node-driver-registrar 408 | image: quay.io/k8scsi/csi-node-driver-registrar:v1.1.0 409 | args: 410 | - --csi-address=$(ADDRESS) 411 | - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) 412 | - --v=5 413 | lifecycle: 414 | preStop: 415 | exec: 416 | command: ["/bin/sh", "-c", "rm -rf /registration/ebs.csi.aws.com-reg.sock /csi/csi.sock"] 417 | env: 418 | - name: ADDRESS 419 | value: /csi/csi.sock 420 | - name: DRIVER_REG_SOCK_PATH 421 | value: /var/lib/kubelet/plugins/ebs.csi.aws.com/csi.sock 422 | volumeMounts: 423 | - name: plugin-dir 424 | mountPath: /csi 425 | - name: registration-dir 426 | mountPath: /registration 427 | - name: liveness-probe 428 | image: quay.io/k8scsi/livenessprobe:v1.1.0 429 | args: 430 | - --csi-address=/csi/csi.sock 431 | volumeMounts: 432 | - name: plugin-dir 433 | mountPath: /csi 434 | volumes: 435 | - name: kubelet-dir 436 | hostPath: 437 | path: /var/lib/kubelet 438 | type: Directory 439 | - name: plugin-dir 440 | hostPath: 441 | path: /var/lib/kubelet/plugins/ebs.csi.aws.com/ 442 | type: DirectoryOrCreate 443 | - name: registration-dir 444 | hostPath: 445 | path: /var/lib/kubelet/plugins_registry/ 446 | type: Directory 447 | - name: device-dir 448 | hostPath: 449 | path: /dev 450 | type: Directory 451 | --- 452 | # Source: aws-ebs-csi-driver/templates/controller.yaml 453 | # Controller Service 454 | kind: Deployment 455 | apiVersion: apps/v1 456 | metadata: 457 | name: ebs-csi-controller 458 | namespace: kube-system 459 | labels: 460 | app.kubernetes.io/name: aws-ebs-csi-driver 461 | app.kubernetes.io/instance: aws-ebs-csi-driver 462 | helm.sh/chart: aws-ebs-csi-driver-0.5.0 463 | app.kubernetes.io/version: "0.6.0" 464 | app.kubernetes.io/managed-by: Helm 465 | spec: 466 | replicas: 2 467 | selector: 468 | matchLabels: 469 | app: ebs-csi-controller 470 | app.kubernetes.io/name: aws-ebs-csi-driver 471 | app.kubernetes.io/instance: aws-ebs-csi-driver 472 | template: 473 | metadata: 474 | labels: 475 | app: ebs-csi-controller 476 | app.kubernetes.io/name: aws-ebs-csi-driver 477 | app.kubernetes.io/instance: aws-ebs-csi-driver 478 | helm.sh/chart: aws-ebs-csi-driver-0.5.0 479 | app.kubernetes.io/version: "0.6.0" 480 | app.kubernetes.io/managed-by: Helm 481 | spec: 482 | nodeSelector: 483 | kubernetes.io/os: linux 484 | kubernetes.io/arch: amd64 485 | node-role.kubernetes.io/master: "true" 486 | serviceAccountName: ebs-csi-controller-sa 487 | priorityClassName: system-cluster-critical 488 | tolerations: 489 | - operator: Exists 490 | containers: 491 | - name: ebs-plugin 492 | image: amazon/aws-ebs-csi-driver:v0.6.0 493 | imagePullPolicy: IfNotPresent 494 | args: 495 | - controller 496 | 497 | - --endpoint=$(CSI_ENDPOINT) 498 | - --logtostderr 499 | - --v=5 500 | env: 501 | - name: CSI_ENDPOINT 502 | value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock 503 | - name: AWS_ACCESS_KEY_ID 504 | valueFrom: 505 | secretKeyRef: 506 | name: aws-secret 507 | key: key_id 508 | optional: true 509 | - name: AWS_SECRET_ACCESS_KEY 510 | valueFrom: 511 | secretKeyRef: 512 | name: aws-secret 513 | key: access_key 514 | optional: true 515 | volumeMounts: 516 | - name: socket-dir 517 | mountPath: /var/lib/csi/sockets/pluginproxy/ 518 | ports: 519 | - name: healthz 520 | containerPort: 9808 521 | protocol: TCP 522 | livenessProbe: 523 | httpGet: 524 | path: /healthz 525 | port: healthz 526 | initialDelaySeconds: 10 527 | timeoutSeconds: 3 528 | periodSeconds: 10 529 | failureThreshold: 5 530 | - name: csi-provisioner 531 | image: quay.io/k8scsi/csi-provisioner:v1.5.0 532 | args: 533 | - --csi-address=$(ADDRESS) 534 | - --v=5 535 | - --feature-gates=Topology=true 536 | - --enable-leader-election 537 | - --leader-election-type=leases 538 | env: 539 | - name: ADDRESS 540 | value: /var/lib/csi/sockets/pluginproxy/csi.sock 541 | volumeMounts: 542 | - name: socket-dir 543 | mountPath: /var/lib/csi/sockets/pluginproxy/ 544 | - name: csi-attacher 545 | image: quay.io/k8scsi/csi-attacher:v1.2.0 546 | args: 547 | - --csi-address=$(ADDRESS) 548 | - --v=5 549 | - --leader-election=true 550 | - --leader-election-type=leases 551 | env: 552 | - name: ADDRESS 553 | value: /var/lib/csi/sockets/pluginproxy/csi.sock 554 | volumeMounts: 555 | - name: socket-dir 556 | mountPath: /var/lib/csi/sockets/pluginproxy/ 557 | - name: csi-snapshotter 558 | image: quay.io/k8scsi/csi-snapshotter:v2.1.1 559 | args: 560 | - --csi-address=$(ADDRESS) 561 | - --leader-election=true 562 | env: 563 | - name: ADDRESS 564 | value: /var/lib/csi/sockets/pluginproxy/csi.sock 565 | volumeMounts: 566 | - name: socket-dir 567 | mountPath: /var/lib/csi/sockets/pluginproxy/ 568 | - name: csi-resizer 569 | image: quay.io/k8scsi/csi-resizer:v0.3.0 570 | imagePullPolicy: Always 571 | args: 572 | - --csi-address=$(ADDRESS) 573 | - --v=5 574 | env: 575 | - name: ADDRESS 576 | value: /var/lib/csi/sockets/pluginproxy/csi.sock 577 | volumeMounts: 578 | - name: socket-dir 579 | mountPath: /var/lib/csi/sockets/pluginproxy/ 580 | - name: liveness-probe 581 | image: quay.io/k8scsi/livenessprobe:v1.1.0 582 | args: 583 | - --csi-address=/csi/csi.sock 584 | volumeMounts: 585 | - name: socket-dir 586 | mountPath: /csi 587 | volumes: 588 | - name: socket-dir 589 | emptyDir: {} 590 | --- 591 | # Source: aws-ebs-csi-driver/templates/statefulset.yaml 592 | #Snapshot controller 593 | kind: StatefulSet 594 | apiVersion: apps/v1 595 | metadata: 596 | name: ebs-snapshot-controller 597 | namespace: kube-system 598 | labels: 599 | app.kubernetes.io/name: aws-ebs-csi-driver 600 | app.kubernetes.io/instance: aws-ebs-csi-driver 601 | helm.sh/chart: aws-ebs-csi-driver-0.5.0 602 | app.kubernetes.io/version: "0.6.0" 603 | app.kubernetes.io/managed-by: Helm 604 | spec: 605 | serviceName: ebs-snapshot-controller 606 | replicas: 1 607 | selector: 608 | matchLabels: 609 | app: ebs-snapshot-controller 610 | app.kubernetes.io/name: aws-ebs-csi-driver 611 | app.kubernetes.io/instance: aws-ebs-csi-driver 612 | template: 613 | metadata: 614 | labels: 615 | app: ebs-snapshot-controller 616 | app.kubernetes.io/name: aws-ebs-csi-driver 617 | app.kubernetes.io/instance: aws-ebs-csi-driver 618 | helm.sh/chart: aws-ebs-csi-driver-0.5.0 619 | app.kubernetes.io/version: "0.6.0" 620 | app.kubernetes.io/managed-by: Helm 621 | spec: 622 | serviceAccountName: ebs-snapshot-controller 623 | containers: 624 | - name: snapshot-controller 625 | image: quay.io/k8scsi/snapshot-controller:v2.1.1 626 | args: 627 | - --v=5 628 | - --leader-election=false 629 | --- 630 | # Source: aws-ebs-csi-driver/templates/csidriver.yaml 631 | apiVersion: storage.k8s.io/v1beta1 632 | kind: CSIDriver 633 | metadata: 634 | name: ebs.csi.aws.com 635 | labels: 636 | app.kubernetes.io/name: aws-ebs-csi-driver 637 | app.kubernetes.io/instance: aws-ebs-csi-driver 638 | helm.sh/chart: aws-ebs-csi-driver-0.5.0 639 | app.kubernetes.io/version: "0.6.0" 640 | app.kubernetes.io/managed-by: Helm 641 | spec: 642 | attachRequired: true 643 | podInfoOnMount: false 644 | -------------------------------------------------------------------------------- /modules/nodepool/files/bootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | K3S_STATE_PATH="/var/lib/rancher/k3s/state.env" 4 | 5 | # Cluster metadata 6 | CLUSTER="${cluster}" 7 | NAME="${name}" 8 | 9 | # Shared k3s args 10 | NODE_LABELS="${node_labels}" 11 | NODE_TAINTS="${node_taints}" 12 | KUBELET_ARGS="${kubelet_args}" 13 | 14 | # Server k3s args 15 | TLS_SANS="${tls_sans}" 16 | DISABLES="${disables}" 17 | KUBE_APISERVERS="${kube_apiservers}" 18 | KUBE_SCHEDULERS="${kube_schedulers}" 19 | KUBE_CONTROLLER_MANAGERS="${kube_controller_managers}" 20 | KUBE_CLOUD_CONTROLLER_MANAGERS="${kube_cloud_controller_managers}" 21 | 22 | # Agent k3s args 23 | SERVER="${server}" 24 | 25 | format_args () { 26 | IFS=';' read -ra arr <<< "$1" 27 | for i in "$${arr[@]}"; do 28 | printf '%s %s ' "$2" "$i" 29 | done 30 | } 31 | 32 | # --- helper functions for logs --- 33 | info() 34 | { 35 | echo '[INFO] ' "$@" 36 | } 37 | warn() 38 | { 39 | echo '[WARN] ' "$@" >&2 40 | } 41 | fatal() 42 | { 43 | echo '[ERROR] ' "$@" >&2 44 | exit 1 45 | } 46 | 47 | node_drain() { 48 | systemctl daemon-reload 49 | systemctl enable nodedrain.service 50 | systemctl start --no-block nodedrain.service 51 | } 52 | 53 | rds_ca() { 54 | # RDS CA Cert 55 | curl -sL https://s3.us-gov-west-1.amazonaws.com/rds-downloads/rds-ca-us-gov-west-1-2017-root.pem -o /etc/ssl/certs/rds-ca-us-gov-west-1-2017-root.pem 56 | curl -sL https://s3.amazonaws.com/rds-downloads/rds-combined-ca-bundle.pem -o /etc/ssl/certs/rds-combined-ca-bundle.pem 57 | } 58 | 59 | upload() { 60 | case "$1" in 61 | "server") 62 | CONTROLPLANE_LB_DNS="$(aws elb describe-load-balancers --load-balancer-name $${CLUSTER}-k3scp --query 'LoadBalancerDescriptions[*].DNSName' --output text)" 63 | 64 | pushd /etc/rancher/k3s 65 | 66 | sed 's|127.0.0.1|'$CONTROLPLANE_LB_DNS'|g' k3s.yaml > k3s-cp.yaml 67 | /usr/local/bin/aws s3 cp k3s-cp.yaml s3://${state_bucket}/k3s.yaml 68 | rm -rf k3s-cp.yaml 69 | 70 | popd 71 | ;; 72 | *) 73 | info 'Skipping kubeconfig upload since we are not a server' 74 | ;; 75 | esac 76 | 77 | } 78 | 79 | bootstrap() { 80 | export $(grep -v '^#' $K3S_STATE_PATH | xargs) 81 | 82 | node_labels=$(format_args "$NODE_LABELS" "--node-label") 83 | kubelet_args=$(format_args "$KUBELET_ARGS" "--kubelet-arg") 84 | node_taints=$(format_args "$NODE_TAINTS" "--node-taint") 85 | 86 | token=$(format_args "$TOKEN" "--token") 87 | 88 | shared_args="$${node_labels} $${kubelet_args} $${node_taints} $${token}" 89 | 90 | if ${cloud_provider}; then 91 | provider="aws:///$(curl -s http://169.254.169.254/latest/meta-data/placement/availability-zone)/$(curl -s http://169.254.169.254/latest/meta-data/instance-id)" 92 | shared_args="$${shared_args} --kubelet-arg provider-id=$${provider}" 93 | fi 94 | 95 | type_args="" 96 | 97 | case "$1" in 98 | # -- server --- 99 | "server") 100 | tls_sans=$(format_args "$TLS_SANS" "--tls-san") 101 | disables=$(format_args "$DISABLES" "--disable") 102 | 103 | kube_apiservers=$(format_args "$KUBE_APISERVERS" "--kube-apiserver-arg") 104 | kube_schedulers=$(format_args "$KUBE_SCHEDULERS" "--kube-scheduler-arg") 105 | kube_controller_managers=$(format_args "$KUBE_CONTROLLER_MANAGERS" "--kube-controller-manager-arg") 106 | kube_cloud_controller_managers=$(format_args "$KUBE_CONTROLLER_MANAGERS" "--kube-cloud-controller-manager-arg") 107 | 108 | datastore_cafile=$(format_args "/etc/ssl/certs/rds-combined-ca-bundle.pem" "--datastore-cafile") 109 | datastore_endpoint=$(format_args "$DATASTORE_ENDPOINT" "--datastore-endpoint") 110 | 111 | type_args="server $${tls_sans} $${disables} $${datastore_endpoint} $${kube_apiservers} $${kube_schedulers} $${kube_controller_managers} $${kube_cloud_controller_managers}" 112 | 113 | if ${cloud_provider}; then 114 | type_args="$${type_args} --disable-cloud-controller --kube-apiserver-arg cloud-provider=external --kube-controller-manager-arg cloud-provider=external --disable traefik --disable local-storage --disable servicelb" 115 | fi 116 | ;; 117 | # -- agent -- 118 | "agent") 119 | server=$(format_args "$SERVER" "--server") 120 | 121 | type_args="agent $${server}" 122 | ;; 123 | *) 124 | fatal 'Only server and agent are expected for bootstrap' 125 | ;; 126 | esac 127 | 128 | # Unset secrets from s3 .env 129 | unset $(grep -v '^#' $K3S_STATE_PATH | sed -E 's/(.*)=.*/\1/' | xargs) 130 | rm -rf /var/lib/rancher/k3s/state.env 131 | 132 | cat /usr/local/bin/k3s.sh | sh -s - $${type_args} $${shared_args} 133 | 134 | } 135 | 136 | { 137 | export K3S_KUBECONFIG_MODE="0644" 138 | export INSTALL_K3S_SKIP_DOWNLOAD=true 139 | 140 | # NOTE: k3s selinux is not GA yet, and as a result, while _most_ uses cases work with selinux enabled, it is not recommend quite yet 141 | # TODO: revert this when k3s selinux goes GA 142 | setenforce 0 143 | 144 | # Enable and start the node-drain service 145 | node_drain 146 | 147 | rds_ca 148 | 149 | # Boot 150 | bootstrap ${type} 151 | 152 | # Upload kubeconfig to s3 153 | upload ${type} 154 | } -------------------------------------------------------------------------------- /modules/nodepool/files/cloud-config-base.yaml: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | %{ if length(ssh_authorized_keys) > 0 } 3 | ssh_authorized_keys: 4 | %{ for key in ssh_authorized_keys } 5 | - ${key} 6 | %{ endfor } 7 | %{ endif } 8 | write_files: 9 | - path: /var/lib/rancher/k3s/server/manifests/aws-ccm.yaml 10 | encoding: gz+b64 11 | content: "${ccm}" 12 | owner: root:root 13 | permissions: '0644' 14 | - path: /var/lib/rancher/k3s/server/manifests/aws-ebs.yaml 15 | encoding: gz+b64 16 | content: "${ebs}" 17 | owner: root:root 18 | permissions: '0644' 19 | - path: /usr/local/bin/nodedrain.sh 20 | encoding: gz+b64 21 | content: "${nodedrain}" 22 | owner: root:root 23 | permissions: '0755' 24 | - path: /etc/systemd/system/nodedrain.service 25 | encoding: gz+b64 26 | content: "${nodedrain_service}" 27 | owner: root:root 28 | permissions: '0644' 29 | - path: /usr/local/bin/k3s.sh 30 | encoding: gz+b64 31 | content: "${k3s_install}" 32 | owner: root:root 33 | permissions: '0755' 34 | - path: /etc/rancher/k3s/registries.yaml 35 | encoding: gz+b64 36 | content: "${registries}" 37 | owner: root:root 38 | permissions: '0644' 39 | %{ for manifest in manifests } 40 | - path: /var/lib/rancher/k3s/server/manifests/${manifest.name} 41 | encoding: gz+b64 42 | content: ${manifest.content} 43 | owner: root:root 44 | permissions: '0644' 45 | %{ endfor } 46 | -------------------------------------------------------------------------------- /modules/nodepool/files/download_dependencies.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | 4 | K3S_VERSION="${k3s_version}" 5 | 6 | yum_dependencies() { 7 | yum install -y \ 8 | unzip \ 9 | container-selinux \ 10 | selinux-policy-base \ 11 | k3s-selinux 12 | } 13 | 14 | k3s() { 15 | # k3s dependencies 16 | curl -OLs "https://github.com/rancher/k3s/releases/download/$${K3S_VERSION}/{k3s,k3s-airgap-images-amd64.tar,k3s-images.txt,sha256sum-amd64.txt}" 17 | sha256sum -c sha256sum-amd64.txt 18 | 19 | if [ $? -ne 0 ] 20 | then 21 | echo "[ERROR] checksums of k3s files do not match" 22 | exit 1 23 | fi 24 | 25 | chmod 755 k3s* 26 | mkdir -p /var/lib/rancher/k3s/agent/images/ && mv k3s-airgap-images-amd64.tar /var/lib/rancher/k3s/agent/images/ 27 | } 28 | 29 | aws_cli() { 30 | # AWS CLI 31 | curl -sL "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o /tmp/awscliv2.zip && unzip -qq -d /tmp /tmp/awscliv2.zip && /tmp/aws/install 32 | rm -rf /tmp/aws* 33 | 34 | # Set default region for cli as current instances region 35 | /usr/local/bin/aws configure set default.region $(curl -s http://169.254.169.254/latest/meta-data/placement/region) 36 | } 37 | 38 | rancher_yum_repo() { 39 | tee /etc/yum.repos.d/rpm.rancher.io.repo >/dev/null << EOF 40 | [rancher] 41 | name=Rancher 42 | baseurl=https://rpm.rancher.io 43 | enabled=1 44 | gpgcheck=1 45 | gpgkey=https://rpm.rancher.io/public.key 46 | EOF 47 | } 48 | 49 | { 50 | # working directory 51 | cd /usr/local/bin 52 | 53 | # Add official rancher yum repo 54 | rancher_yum_repo 55 | 56 | # Install dependencies with yum 57 | yum_dependencies 58 | 59 | # Install k3s binary and images 60 | k3s 61 | 62 | # Install aws cli 63 | aws_cli 64 | } -------------------------------------------------------------------------------- /modules/nodepool/files/k3s.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -e 3 | 4 | # Usage: 5 | # curl ... | ENV_VAR=... sh - 6 | # or 7 | # ENV_VAR=... ./install.sh 8 | # 9 | # Example: 10 | # Installing a server without traefik: 11 | # curl ... | INSTALL_K3S_EXEC="--disable=traefik" sh - 12 | # Installing an agent to point at a server: 13 | # curl ... | K3S_TOKEN=xxx K3S_URL=https://server-url:6443 sh - 14 | # 15 | # Environment variables: 16 | # - K3S_* 17 | # Environment variables which begin with K3S_ will be preserved for the 18 | # systemd service to use. Setting K3S_URL without explicitly setting 19 | # a systemd exec command will default the command to "agent", and we 20 | # enforce that K3S_TOKEN or K3S_CLUSTER_SECRET is also set. 21 | # 22 | # - INSTALL_K3S_SKIP_DOWNLOAD 23 | # If set to true will not download k3s hash or binary. 24 | # 25 | # - INSTALL_K3S_SYMLINK 26 | # If set to 'skip' will not create symlinks, 'force' will overwrite, 27 | # default will symlink if command does not exist in path. 28 | # 29 | # - INSTALL_K3S_SKIP_ENABLE 30 | # If set to true will not enable or start k3s service. 31 | # 32 | # - INSTALL_K3S_SKIP_START 33 | # If set to true will not start k3s service. 34 | # 35 | # - INSTALL_K3S_VERSION 36 | # Version of k3s to download from github. Will attempt to download from the 37 | # stable channel if not specified. 38 | # 39 | # - INSTALL_K3S_COMMIT 40 | # Commit of k3s to download from temporary cloud storage. 41 | # * (for developer & QA use) 42 | # 43 | # - INSTALL_K3S_BIN_DIR 44 | # Directory to install k3s binary, links, and uninstall script to, or use 45 | # /usr/local/bin as the default 46 | # 47 | # - INSTALL_K3S_BIN_DIR_READ_ONLY 48 | # If set to true will not write files to INSTALL_K3S_BIN_DIR, forces 49 | # setting INSTALL_K3S_SKIP_DOWNLOAD=true 50 | # 51 | # - INSTALL_K3S_SYSTEMD_DIR 52 | # Directory to install systemd service and environment files to, or use 53 | # /etc/systemd/system as the default 54 | # 55 | # - INSTALL_K3S_EXEC or script arguments 56 | # Command with flags to use for launching k3s in the systemd service, if 57 | # the command is not specified will default to "agent" if K3S_URL is set 58 | # or "server" if not. The final systemd command resolves to a combination 59 | # of EXEC and script args ($@). 60 | # 61 | # The following commands result in the same behavior: 62 | # curl ... | INSTALL_K3S_EXEC="--disable=traefik" sh -s - 63 | # curl ... | INSTALL_K3S_EXEC="server --disable=traefik" sh -s - 64 | # curl ... | INSTALL_K3S_EXEC="server" sh -s - --disable=traefik 65 | # curl ... | sh -s - server --disable=traefik 66 | # curl ... | sh -s - --disable=traefik 67 | # 68 | # - INSTALL_K3S_NAME 69 | # Name of systemd service to create, will default from the k3s exec command 70 | # if not specified. If specified the name will be prefixed with 'k3s-'. 71 | # 72 | # - INSTALL_K3S_TYPE 73 | # Type of systemd service to create, will default from the k3s exec command 74 | # if not specified. 75 | # 76 | # - INSTALL_K3S_SELINUX_WARN 77 | # If set to true will continue if k3s-selinux policy is not found. 78 | # 79 | # - INSTALL_K3S_CHANNEL_URL 80 | # Channel URL for fetching k3s download URL. 81 | # Defaults to 'https://update.k3s.io/v1-release/channels'. 82 | # 83 | # - INSTALL_K3S_CHANNEL 84 | # Channel to use for fetching k3s download URL. 85 | # Defaults to 'stable'. 86 | 87 | GITHUB_URL=https://github.com/rancher/k3s/releases 88 | STORAGE_URL=https://storage.googleapis.com/k3s-ci-builds 89 | DOWNLOADER= 90 | 91 | # --- helper functions for logs --- 92 | info() 93 | { 94 | echo '[INFO] ' "$@" 95 | } 96 | warn() 97 | { 98 | echo '[WARN] ' "$@" >&2 99 | } 100 | fatal() 101 | { 102 | echo '[ERROR] ' "$@" >&2 103 | exit 1 104 | } 105 | 106 | # --- fatal if no systemd or openrc --- 107 | verify_system() { 108 | if [ -x /sbin/openrc-run ]; then 109 | HAS_OPENRC=true 110 | return 111 | fi 112 | if [ -d /run/systemd ]; then 113 | HAS_SYSTEMD=true 114 | return 115 | fi 116 | fatal 'Can not find systemd or openrc to use as a process supervisor for k3s' 117 | } 118 | 119 | # --- add quotes to command arguments --- 120 | quote() { 121 | for arg in "$@"; do 122 | printf '%s\n' "$arg" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/'/" 123 | done 124 | } 125 | 126 | # --- add indentation and trailing slash to quoted args --- 127 | quote_indent() { 128 | printf ' \\\n' 129 | for arg in "$@"; do 130 | printf '\t%s \\\n' "$(quote "$arg")" 131 | done 132 | } 133 | 134 | # --- escape most punctuation characters, except quotes, forward slash, and space --- 135 | escape() { 136 | printf '%s' "$@" | sed -e 's/\([][!#$%&()*;<=>?\_`{|}]\)/\\\1/g;' 137 | } 138 | 139 | # --- escape double quotes --- 140 | escape_dq() { 141 | printf '%s' "$@" | sed -e 's/"/\\"/g' 142 | } 143 | 144 | # --- ensures $K3S_URL is empty or begins with https://, exiting fatally otherwise --- 145 | verify_k3s_url() { 146 | case "${K3S_URL}" in 147 | "") 148 | ;; 149 | https://*) 150 | ;; 151 | *) 152 | fatal "Only https:// URLs are supported for K3S_URL (have ${K3S_URL})" 153 | ;; 154 | esac 155 | } 156 | 157 | # --- define needed environment variables --- 158 | setup_env() { 159 | # --- use command args if passed or create default --- 160 | case "$1" in 161 | # --- if we only have flags discover if command should be server or agent --- 162 | (-*|"") 163 | if [ -z "${K3S_URL}" ]; then 164 | CMD_K3S=server 165 | else 166 | if [ -z "${K3S_TOKEN}" ] && [ -z "${K3S_CLUSTER_SECRET}" ]; then 167 | fatal "Defaulted k3s exec command to 'agent' because K3S_URL is defined, but K3S_TOKEN or K3S_CLUSTER_SECRET is not defined." 168 | fi 169 | CMD_K3S=agent 170 | fi 171 | ;; 172 | # --- command is provided --- 173 | (*) 174 | CMD_K3S=$1 175 | shift 176 | ;; 177 | esac 178 | 179 | verify_k3s_url 180 | 181 | CMD_K3S_EXEC="${CMD_K3S}$(quote_indent "$@")" 182 | 183 | # --- use systemd name if defined or create default --- 184 | if [ -n "${INSTALL_K3S_NAME}" ]; then 185 | SYSTEM_NAME=k3s-${INSTALL_K3S_NAME} 186 | else 187 | if [ "${CMD_K3S}" = server ]; then 188 | SYSTEM_NAME=k3s 189 | else 190 | SYSTEM_NAME=k3s-${CMD_K3S} 191 | fi 192 | fi 193 | 194 | # --- check for invalid characters in system name --- 195 | valid_chars=$(printf '%s' "${SYSTEM_NAME}" | sed -e 's/[][!#$%&()*;<=>?\_`{|}/[:space:]]/^/g;' ) 196 | if [ "${SYSTEM_NAME}" != "${valid_chars}" ]; then 197 | invalid_chars=$(printf '%s' "${valid_chars}" | sed -e 's/[^^]/ /g') 198 | fatal "Invalid characters for system name: 199 | ${SYSTEM_NAME} 200 | ${invalid_chars}" 201 | fi 202 | 203 | # --- use sudo if we are not already root --- 204 | SUDO=sudo 205 | if [ $(id -u) -eq 0 ]; then 206 | SUDO= 207 | fi 208 | 209 | # --- use systemd type if defined or create default --- 210 | if [ -n "${INSTALL_K3S_TYPE}" ]; then 211 | SYSTEMD_TYPE=${INSTALL_K3S_TYPE} 212 | else 213 | if [ "${CMD_K3S}" = server ]; then 214 | SYSTEMD_TYPE=notify 215 | else 216 | SYSTEMD_TYPE=exec 217 | fi 218 | fi 219 | 220 | # --- use binary install directory if defined or create default --- 221 | if [ -n "${INSTALL_K3S_BIN_DIR}" ]; then 222 | BIN_DIR=${INSTALL_K3S_BIN_DIR} 223 | else 224 | BIN_DIR=/usr/local/bin 225 | fi 226 | 227 | # --- use systemd directory if defined or create default --- 228 | if [ -n "${INSTALL_K3S_SYSTEMD_DIR}" ]; then 229 | SYSTEMD_DIR="${INSTALL_K3S_SYSTEMD_DIR}" 230 | else 231 | SYSTEMD_DIR=/etc/systemd/system 232 | fi 233 | 234 | # --- set related files from system name --- 235 | SERVICE_K3S=${SYSTEM_NAME}.service 236 | UNINSTALL_K3S_SH=${UNINSTALL_K3S_SH:-${BIN_DIR}/${SYSTEM_NAME}-uninstall.sh} 237 | KILLALL_K3S_SH=${KILLALL_K3S_SH:-${BIN_DIR}/k3s-killall.sh} 238 | 239 | # --- use service or environment location depending on systemd/openrc --- 240 | if [ "${HAS_SYSTEMD}" = true ]; then 241 | FILE_K3S_SERVICE=${SYSTEMD_DIR}/${SERVICE_K3S} 242 | FILE_K3S_ENV=${SYSTEMD_DIR}/${SERVICE_K3S}.env 243 | elif [ "${HAS_OPENRC}" = true ]; then 244 | $SUDO mkdir -p /etc/rancher/k3s 245 | FILE_K3S_SERVICE=/etc/init.d/${SYSTEM_NAME} 246 | FILE_K3S_ENV=/etc/rancher/k3s/${SYSTEM_NAME}.env 247 | fi 248 | 249 | # --- get hash of config & exec for currently installed k3s --- 250 | PRE_INSTALL_HASHES=$(get_installed_hashes) 251 | 252 | # --- if bin directory is read only skip download --- 253 | if [ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ]; then 254 | INSTALL_K3S_SKIP_DOWNLOAD=true 255 | fi 256 | 257 | # --- setup channel values 258 | INSTALL_K3S_CHANNEL_URL=${INSTALL_K3S_CHANNEL_URL:-'https://update.k3s.io/v1-release/channels'} 259 | INSTALL_K3S_CHANNEL=${INSTALL_K3S_CHANNEL:-'stable'} 260 | } 261 | 262 | # --- check if skip download environment variable set --- 263 | can_skip_download() { 264 | if [ "${INSTALL_K3S_SKIP_DOWNLOAD}" != true ]; then 265 | return 1 266 | fi 267 | } 268 | 269 | # --- verify an executabe k3s binary is installed --- 270 | verify_k3s_is_executable() { 271 | if [ ! -x ${BIN_DIR}/k3s ]; then 272 | fatal "Executable k3s binary not found at ${BIN_DIR}/k3s" 273 | fi 274 | } 275 | 276 | # --- set arch and suffix, fatal if architecture not supported --- 277 | setup_verify_arch() { 278 | if [ -z "$ARCH" ]; then 279 | ARCH=$(uname -m) 280 | fi 281 | case $ARCH in 282 | amd64) 283 | ARCH=amd64 284 | SUFFIX= 285 | ;; 286 | x86_64) 287 | ARCH=amd64 288 | SUFFIX= 289 | ;; 290 | arm64) 291 | ARCH=arm64 292 | SUFFIX=-${ARCH} 293 | ;; 294 | aarch64) 295 | ARCH=arm64 296 | SUFFIX=-${ARCH} 297 | ;; 298 | arm*) 299 | ARCH=arm 300 | SUFFIX=-${ARCH}hf 301 | ;; 302 | *) 303 | fatal "Unsupported architecture $ARCH" 304 | esac 305 | } 306 | 307 | # --- verify existence of network downloader executable --- 308 | verify_downloader() { 309 | # Return failure if it doesn't exist or is no executable 310 | [ -x "$(which $1)" ] || return 1 311 | 312 | # Set verified executable as our downloader program and return success 313 | DOWNLOADER=$1 314 | return 0 315 | } 316 | 317 | # --- create tempory directory and cleanup when done --- 318 | setup_tmp() { 319 | TMP_DIR=$(mktemp -d -t k3s-install.XXXXXXXXXX) 320 | TMP_HASH=${TMP_DIR}/k3s.hash 321 | TMP_BIN=${TMP_DIR}/k3s.bin 322 | cleanup() { 323 | code=$? 324 | set +e 325 | trap - EXIT 326 | rm -rf ${TMP_DIR} 327 | exit $code 328 | } 329 | trap cleanup INT EXIT 330 | } 331 | 332 | # --- use desired k3s version if defined or find version from channel --- 333 | get_release_version() { 334 | if [ -n "${INSTALL_K3S_COMMIT}" ]; then 335 | VERSION_K3S="commit ${INSTALL_K3S_COMMIT}" 336 | elif [ -n "${INSTALL_K3S_VERSION}" ]; then 337 | VERSION_K3S=${INSTALL_K3S_VERSION} 338 | else 339 | info "Finding release for channel ${INSTALL_K3S_CHANNEL}" 340 | version_url="${INSTALL_K3S_CHANNEL_URL}/${INSTALL_K3S_CHANNEL}" 341 | case $DOWNLOADER in 342 | curl) 343 | VERSION_K3S=$(curl -w '%{url_effective}' -L -s -S ${version_url} -o /dev/null | sed -e 's|.*/||') 344 | ;; 345 | wget) 346 | VERSION_K3S=$(wget -SqO /dev/null ${version_url} 2>&1 | grep -i Location | sed -e 's|.*/||') 347 | ;; 348 | *) 349 | fatal "Incorrect downloader executable '$DOWNLOADER'" 350 | ;; 351 | esac 352 | fi 353 | info "Using ${VERSION_K3S} as release" 354 | } 355 | 356 | # --- download from github url --- 357 | download() { 358 | [ $# -eq 2 ] || fatal 'download needs exactly 2 arguments' 359 | 360 | case $DOWNLOADER in 361 | curl) 362 | curl -o $1 -sfL $2 363 | ;; 364 | wget) 365 | wget -qO $1 $2 366 | ;; 367 | *) 368 | fatal "Incorrect executable '$DOWNLOADER'" 369 | ;; 370 | esac 371 | 372 | # Abort if download command failed 373 | [ $? -eq 0 ] || fatal 'Download failed' 374 | } 375 | 376 | # --- download hash from github url --- 377 | download_hash() { 378 | if [ -n "${INSTALL_K3S_COMMIT}" ]; then 379 | HASH_URL=${STORAGE_URL}/k3s${SUFFIX}-${INSTALL_K3S_COMMIT}.sha256sum 380 | else 381 | HASH_URL=${GITHUB_URL}/download/${VERSION_K3S}/sha256sum-${ARCH}.txt 382 | fi 383 | info "Downloading hash ${HASH_URL}" 384 | download ${TMP_HASH} ${HASH_URL} 385 | HASH_EXPECTED=$(grep " k3s${SUFFIX}$" ${TMP_HASH}) 386 | HASH_EXPECTED=${HASH_EXPECTED%%[[:blank:]]*} 387 | } 388 | 389 | # --- check hash against installed version --- 390 | installed_hash_matches() { 391 | if [ -x ${BIN_DIR}/k3s ]; then 392 | HASH_INSTALLED=$(sha256sum ${BIN_DIR}/k3s) 393 | HASH_INSTALLED=${HASH_INSTALLED%%[[:blank:]]*} 394 | if [ "${HASH_EXPECTED}" = "${HASH_INSTALLED}" ]; then 395 | return 396 | fi 397 | fi 398 | return 1 399 | } 400 | 401 | # --- download binary from github url --- 402 | download_binary() { 403 | if [ -n "${INSTALL_K3S_COMMIT}" ]; then 404 | BIN_URL=${STORAGE_URL}/k3s${SUFFIX}-${INSTALL_K3S_COMMIT} 405 | else 406 | BIN_URL=${GITHUB_URL}/download/${VERSION_K3S}/k3s${SUFFIX} 407 | fi 408 | info "Downloading binary ${BIN_URL}" 409 | download ${TMP_BIN} ${BIN_URL} 410 | } 411 | 412 | # --- verify downloaded binary hash --- 413 | verify_binary() { 414 | info "Verifying binary download" 415 | HASH_BIN=$(sha256sum ${TMP_BIN}) 416 | HASH_BIN=${HASH_BIN%%[[:blank:]]*} 417 | if [ "${HASH_EXPECTED}" != "${HASH_BIN}" ]; then 418 | fatal "Download sha256 does not match ${HASH_EXPECTED}, got ${HASH_BIN}" 419 | fi 420 | } 421 | 422 | # --- setup permissions and move binary to system directory --- 423 | setup_binary() { 424 | chmod 755 ${TMP_BIN} 425 | info "Installing k3s to ${BIN_DIR}/k3s" 426 | $SUDO chown root:root ${TMP_BIN} 427 | $SUDO mv -f ${TMP_BIN} ${BIN_DIR}/k3s 428 | } 429 | 430 | # --- setup selinux policy --- 431 | setup_selinux() { 432 | policy_hint="please install: 433 | yum install -y container-selinux selinux-policy-base 434 | rpm -i https://rpm.rancher.io/k3s-selinux-0.1.1-rc1.el7.noarch.rpm 435 | " 436 | policy_error=fatal 437 | if [ "$INSTALL_K3S_SELINUX_WARN" = true ]; then 438 | policy_error=warn 439 | fi 440 | 441 | if ! $SUDO chcon -u system_u -r object_r -t container_runtime_exec_t ${BIN_DIR}/k3s >/dev/null 2>&1; then 442 | if $SUDO grep '^\s*SELINUX=enforcing' /etc/selinux/config >/dev/null 2>&1; then 443 | $policy_error "Failed to apply container_runtime_exec_t to ${BIN_DIR}/k3s, ${policy_hint}" 444 | fi 445 | else 446 | if [ ! -f /usr/share/selinux/packages/k3s.pp ]; then 447 | $policy_error "Failed to find the k3s-selinux policy, ${policy_hint}" 448 | fi 449 | fi 450 | } 451 | 452 | # --- download and verify k3s --- 453 | download_and_verify() { 454 | if can_skip_download; then 455 | info 'Skipping k3s download and verify' 456 | verify_k3s_is_executable 457 | return 458 | fi 459 | 460 | setup_verify_arch 461 | verify_downloader curl || verify_downloader wget || fatal 'Can not find curl or wget for downloading files' 462 | setup_tmp 463 | get_release_version 464 | download_hash 465 | 466 | if installed_hash_matches; then 467 | info 'Skipping binary downloaded, installed k3s matches hash' 468 | return 469 | fi 470 | 471 | download_binary 472 | verify_binary 473 | setup_binary 474 | } 475 | 476 | # --- add additional utility links --- 477 | create_symlinks() { 478 | [ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ] && return 479 | [ "${INSTALL_K3S_SYMLINK}" = skip ] && return 480 | 481 | for cmd in kubectl crictl ctr; do 482 | if [ ! -e ${BIN_DIR}/${cmd} ] || [ "${INSTALL_K3S_SYMLINK}" = force ]; then 483 | which_cmd=$(which ${cmd} 2>/dev/null || true) 484 | if [ -z "${which_cmd}" ] || [ "${INSTALL_K3S_SYMLINK}" = force ]; then 485 | info "Creating ${BIN_DIR}/${cmd} symlink to k3s" 486 | $SUDO ln -sf k3s ${BIN_DIR}/${cmd} 487 | else 488 | info "Skipping ${BIN_DIR}/${cmd} symlink to k3s, command exists in PATH at ${which_cmd}" 489 | fi 490 | else 491 | info "Skipping ${BIN_DIR}/${cmd} symlink to k3s, already exists" 492 | fi 493 | done 494 | } 495 | 496 | # --- create killall script --- 497 | create_killall() { 498 | [ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ] && return 499 | info "Creating killall script ${KILLALL_K3S_SH}" 500 | $SUDO tee ${KILLALL_K3S_SH} >/dev/null << \EOF 501 | #!/bin/sh 502 | [ $(id -u) -eq 0 ] || exec sudo $0 $@ 503 | 504 | for bin in /var/lib/rancher/k3s/data/**/bin/; do 505 | [ -d $bin ] && export PATH=$PATH:$bin:$bin/aux 506 | done 507 | 508 | set -x 509 | 510 | for service in /etc/systemd/system/k3s*.service; do 511 | [ -s $service ] && systemctl stop $(basename $service) 512 | done 513 | 514 | for service in /etc/init.d/k3s*; do 515 | [ -x $service ] && $service stop 516 | done 517 | 518 | pschildren() { 519 | ps -e -o ppid= -o pid= | \ 520 | sed -e 's/^\s*//g; s/\s\s*/\t/g;' | \ 521 | grep -w "^$1" | \ 522 | cut -f2 523 | } 524 | 525 | pstree() { 526 | for pid in $@; do 527 | echo $pid 528 | for child in $(pschildren $pid); do 529 | pstree $child 530 | done 531 | done 532 | } 533 | 534 | killtree() { 535 | kill -9 $( 536 | { set +x; } 2>/dev/null; 537 | pstree $@; 538 | set -x; 539 | ) 2>/dev/null 540 | } 541 | 542 | getshims() { 543 | ps -e -o pid= -o args= | sed -e 's/^ *//; s/\s\s*/\t/;' | grep -w 'k3s/data/[^/]*/bin/containerd-shim' | cut -f1 544 | } 545 | 546 | killtree $({ set +x; } 2>/dev/null; getshims; set -x) 547 | 548 | do_unmount() { 549 | { set +x; } 2>/dev/null 550 | MOUNTS= 551 | while read ignore mount ignore; do 552 | MOUNTS="$mount\n$MOUNTS" 553 | done /dev/null | grep 'master cni0' | while read ignore iface ignore; do 570 | iface=${iface%%@*} 571 | [ -z "$iface" ] || ip link delete $iface 572 | done 573 | ip link delete cni0 574 | ip link delete flannel.1 575 | rm -rf /var/lib/cni/ 576 | iptables-save | grep -v KUBE- | grep -v CNI- | iptables-restore 577 | EOF 578 | $SUDO chmod 755 ${KILLALL_K3S_SH} 579 | $SUDO chown root:root ${KILLALL_K3S_SH} 580 | } 581 | 582 | # --- create uninstall script --- 583 | create_uninstall() { 584 | [ "${INSTALL_K3S_BIN_DIR_READ_ONLY}" = true ] && return 585 | info "Creating uninstall script ${UNINSTALL_K3S_SH}" 586 | $SUDO tee ${UNINSTALL_K3S_SH} >/dev/null << EOF 587 | #!/bin/sh 588 | set -x 589 | [ \$(id -u) -eq 0 ] || exec sudo \$0 \$@ 590 | 591 | ${KILLALL_K3S_SH} 592 | 593 | if which systemctl; then 594 | systemctl disable ${SYSTEM_NAME} 595 | systemctl reset-failed ${SYSTEM_NAME} 596 | systemctl daemon-reload 597 | fi 598 | if which rc-update; then 599 | rc-update delete ${SYSTEM_NAME} default 600 | fi 601 | 602 | rm -f ${FILE_K3S_SERVICE} 603 | rm -f ${FILE_K3S_ENV} 604 | 605 | remove_uninstall() { 606 | rm -f ${UNINSTALL_K3S_SH} 607 | } 608 | trap remove_uninstall EXIT 609 | 610 | if (ls ${SYSTEMD_DIR}/k3s*.service || ls /etc/init.d/k3s*) >/dev/null 2>&1; then 611 | set +x; echo 'Additional k3s services installed, skipping uninstall of k3s'; set -x 612 | exit 613 | fi 614 | 615 | for cmd in kubectl crictl ctr; do 616 | if [ -L ${BIN_DIR}/\$cmd ]; then 617 | rm -f ${BIN_DIR}/\$cmd 618 | fi 619 | done 620 | 621 | rm -rf /etc/rancher/k3s 622 | rm -rf /run/k3s 623 | rm -rf /run/flannel 624 | rm -rf /var/lib/rancher/k3s 625 | rm -rf /var/lib/kubelet 626 | rm -f ${BIN_DIR}/k3s 627 | rm -f ${KILLALL_K3S_SH} 628 | EOF 629 | $SUDO chmod 755 ${UNINSTALL_K3S_SH} 630 | $SUDO chown root:root ${UNINSTALL_K3S_SH} 631 | } 632 | 633 | # --- disable current service if loaded -- 634 | systemd_disable() { 635 | $SUDO rm -f /etc/systemd/system/${SERVICE_K3S} || true 636 | $SUDO rm -f /etc/systemd/system/${SERVICE_K3S}.env || true 637 | $SUDO systemctl disable ${SYSTEM_NAME} >/dev/null 2>&1 || true 638 | } 639 | 640 | # --- capture current env and create file containing k3s_ variables --- 641 | create_env_file() { 642 | info "env: Creating environment file ${FILE_K3S_ENV}" 643 | UMASK=$(umask) 644 | umask 0377 645 | env | grep '^K3S_' | $SUDO tee ${FILE_K3S_ENV} >/dev/null 646 | env | egrep -i '^(NO|HTTP|HTTPS)_PROXY' | $SUDO tee -a ${FILE_K3S_ENV} >/dev/null 647 | umask $UMASK 648 | } 649 | 650 | # --- write systemd service file --- 651 | create_systemd_service_file() { 652 | info "systemd: Creating service file ${FILE_K3S_SERVICE}" 653 | $SUDO tee ${FILE_K3S_SERVICE} >/dev/null << EOF 654 | [Unit] 655 | Description=Lightweight Kubernetes 656 | Documentation=https://k3s.io 657 | Wants=network-online.target 658 | 659 | [Install] 660 | WantedBy=multi-user.target 661 | 662 | [Service] 663 | Type=${SYSTEMD_TYPE} 664 | EnvironmentFile=${FILE_K3S_ENV} 665 | KillMode=process 666 | Delegate=yes 667 | # Having non-zero Limit*s causes performance problems due to accounting overhead 668 | # in the kernel. We recommend using cgroups to do container-local accounting. 669 | LimitNOFILE=1048576 670 | LimitNPROC=infinity 671 | LimitCORE=infinity 672 | TasksMax=infinity 673 | TimeoutStartSec=0 674 | Restart=always 675 | RestartSec=5s 676 | ExecStartPre=-/sbin/modprobe br_netfilter 677 | ExecStartPre=-/sbin/modprobe overlay 678 | ExecStart=${BIN_DIR}/k3s \\ 679 | ${CMD_K3S_EXEC} 680 | 681 | EOF 682 | } 683 | 684 | # --- write openrc service file --- 685 | create_openrc_service_file() { 686 | LOG_FILE=/var/log/${SYSTEM_NAME}.log 687 | 688 | info "openrc: Creating service file ${FILE_K3S_SERVICE}" 689 | $SUDO tee ${FILE_K3S_SERVICE} >/dev/null << EOF 690 | #!/sbin/openrc-run 691 | 692 | depend() { 693 | after network-online 694 | want cgroups 695 | } 696 | 697 | start_pre() { 698 | rm -f /tmp/k3s.* 699 | } 700 | 701 | supervisor=supervise-daemon 702 | name=${SYSTEM_NAME} 703 | command="${BIN_DIR}/k3s" 704 | command_args="$(escape_dq "${CMD_K3S_EXEC}") 705 | >>${LOG_FILE} 2>&1" 706 | 707 | output_log=${LOG_FILE} 708 | error_log=${LOG_FILE} 709 | 710 | pidfile="/var/run/${SYSTEM_NAME}.pid" 711 | respawn_delay=5 712 | respawn_max=0 713 | 714 | set -o allexport 715 | if [ -f /etc/environment ]; then source /etc/environment; fi 716 | if [ -f ${FILE_K3S_ENV} ]; then source ${FILE_K3S_ENV}; fi 717 | set +o allexport 718 | EOF 719 | $SUDO chmod 0755 ${FILE_K3S_SERVICE} 720 | 721 | $SUDO tee /etc/logrotate.d/${SYSTEM_NAME} >/dev/null << EOF 722 | ${LOG_FILE} { 723 | missingok 724 | notifempty 725 | copytruncate 726 | } 727 | EOF 728 | } 729 | 730 | # --- write systemd or openrc service file --- 731 | create_service_file() { 732 | [ "${HAS_SYSTEMD}" = true ] && create_systemd_service_file 733 | [ "${HAS_OPENRC}" = true ] && create_openrc_service_file 734 | return 0 735 | } 736 | 737 | # --- get hashes of the current k3s bin and service files 738 | get_installed_hashes() { 739 | $SUDO sha256sum ${BIN_DIR}/k3s ${FILE_K3S_SERVICE} ${FILE_K3S_ENV} 2>&1 || true 740 | } 741 | 742 | # --- enable and start systemd service --- 743 | systemd_enable() { 744 | info "systemd: Enabling ${SYSTEM_NAME} unit" 745 | $SUDO systemctl enable ${FILE_K3S_SERVICE} >/dev/null 746 | $SUDO systemctl daemon-reload >/dev/null 747 | } 748 | 749 | systemd_start() { 750 | info "systemd: Starting ${SYSTEM_NAME}" 751 | $SUDO systemctl restart ${SYSTEM_NAME} 752 | } 753 | 754 | # --- enable and start openrc service --- 755 | openrc_enable() { 756 | info "openrc: Enabling ${SYSTEM_NAME} service for default runlevel" 757 | $SUDO rc-update add ${SYSTEM_NAME} default >/dev/null 758 | } 759 | 760 | openrc_start() { 761 | info "openrc: Starting ${SYSTEM_NAME}" 762 | $SUDO ${FILE_K3S_SERVICE} restart 763 | } 764 | 765 | # --- startup systemd or openrc service --- 766 | service_enable_and_start() { 767 | [ "${INSTALL_K3S_SKIP_ENABLE}" = true ] && return 768 | 769 | [ "${HAS_SYSTEMD}" = true ] && systemd_enable 770 | [ "${HAS_OPENRC}" = true ] && openrc_enable 771 | 772 | [ "${INSTALL_K3S_SKIP_START}" = true ] && return 773 | 774 | POST_INSTALL_HASHES=$(get_installed_hashes) 775 | if [ "${PRE_INSTALL_HASHES}" = "${POST_INSTALL_HASHES}" ]; then 776 | info 'No change detected so skipping service start' 777 | return 778 | fi 779 | 780 | [ "${HAS_SYSTEMD}" = true ] && systemd_start 781 | [ "${HAS_OPENRC}" = true ] && openrc_start 782 | return 0 783 | } 784 | 785 | # --- re-evaluate args to include env command --- 786 | eval set -- $(escape "${INSTALL_K3S_EXEC}") $(quote "$@") 787 | 788 | # --- run the install process -- 789 | { 790 | verify_system 791 | setup_env "$@" 792 | download_and_verify 793 | setup_selinux 794 | create_symlinks 795 | create_killall 796 | create_uninstall 797 | systemd_disable 798 | create_env_file 799 | create_service_file 800 | service_enable_and_start 801 | } -------------------------------------------------------------------------------- /modules/nodepool/files/nodedrain.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=node drain 3 | After=k3s.service systemd-resolved.service 4 | 5 | [Service] 6 | Type=oneshot 7 | RemainAfterExit=true 8 | ExecStart=/bin/echo "Node drain reporting for duty" 9 | TimeoutStopSec=120s 10 | ExecStop=/bin/bash /usr/local/bin/nodedrain.sh 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | -------------------------------------------------------------------------------- /modules/nodepool/files/nodedrain.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "Draining node" 3 | 4 | /usr/local/bin/kubectl drain $(hostname -s) --ignore-daemonsets --delete-local-data --timeout=100s --force 5 | 6 | echo "Deleting node" 7 | /usr/local/bin/kubectl delete node $(hostname -s) 8 | 9 | sleep 2 10 | echo "Verifying node is deleted" 11 | /usr/local/bin/kubectl get node $(hostname -s) 12 | status=$? 13 | count=0 14 | while [ "${status}" -eq 0 ] 15 | do 16 | sleep 2 17 | ((count++)) 18 | if [ "${count}" -ge 5 ] 19 | then 20 | echo "Node is still in the cluster, using --force and exiting" 21 | /usr/local/bin/kubectl delete node $(hostname -s) --force --grace-period=0 22 | break 23 | fi 24 | /usr/local/bin/kubectl get node $(hostname -s) 25 | status=$? 26 | done -------------------------------------------------------------------------------- /modules/nodepool/files/secrets.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eu 3 | 4 | mkdir -p /var/lib/rancher/k3s 5 | 6 | until /usr/local/bin/aws s3 cp s3://${state_bucket}/${state_key} /var/lib/rancher/k3s/state.env; do 7 | echo "Waiting for ${state_key} to exist within ${state_bucket}" 8 | sleep 10 9 | done 10 | -------------------------------------------------------------------------------- /modules/nodepool/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | is_server = var.k3s_url == "" ? true : false 3 | 4 | tags = merge({ 5 | "Name" = "${var.cluster}-${var.name}-nodepool", 6 | "kubernetes.io/cluster/${var.cluster}" = "owned" 7 | }, var.tags) 8 | } 9 | 10 | resource "aws_security_group" "this" { 11 | name = "${var.name}-k3s-nodepool" 12 | vpc_id = var.vpc_id 13 | description = "${var.name} node pool" 14 | tags = local.tags 15 | } 16 | 17 | # 18 | # Launch template 19 | # 20 | resource "aws_launch_template" "this" { 21 | name = "${var.name}-k3s-nodepool" 22 | image_id = var.ami 23 | instance_type = var.instance_type 24 | user_data = data.template_cloudinit_config.this.rendered 25 | vpc_security_group_ids = concat([aws_security_group.this.id], [var.cluster_security_group], var.extra_security_groups) 26 | 27 | block_device_mappings { 28 | device_name = "/dev/sda1" 29 | ebs { 30 | volume_size = var.block_device_mappings.size 31 | encrypted = var.block_device_mappings.encrypted 32 | delete_on_termination = true 33 | } 34 | } 35 | 36 | dynamic "iam_instance_profile" { 37 | for_each = var.iam_instance_profile != "" ? [var.iam_instance_profile] : [] 38 | content { 39 | name = iam_instance_profile.value 40 | } 41 | } 42 | 43 | tags = local.tags 44 | } 45 | 46 | # 47 | # Autoscaling group 48 | # 49 | resource "aws_autoscaling_group" "this" { 50 | name = "${var.name}-k3s-nodepool" 51 | vpc_zone_identifier = var.subnets 52 | 53 | min_size = var.asg.min 54 | max_size = var.asg.max 55 | desired_capacity = var.asg.desired 56 | 57 | # Health check and target groups dependent on whether we're a server or not (identified via k3s_url) 58 | health_check_type = local.is_server ? "ELB" : "EC2" 59 | load_balancers = local.is_server ? [var.controlplane_loadbalancer] : [] 60 | 61 | dynamic "launch_template" { 62 | for_each = var.spot ? [] : ["spot"] 63 | 64 | content { 65 | id = aws_launch_template.this.id 66 | version = "$Latest" 67 | } 68 | } 69 | 70 | dynamic "mixed_instances_policy" { 71 | for_each = var.spot ? ["spot"] : [] 72 | 73 | content { 74 | instances_distribution { 75 | on_demand_base_capacity = 0 76 | on_demand_percentage_above_base_capacity = 0 77 | } 78 | 79 | launch_template { 80 | launch_template_specification { 81 | launch_template_id = aws_launch_template.this.id 82 | launch_template_name = aws_launch_template.this.name 83 | version = "$Latest" 84 | } 85 | } 86 | } 87 | } 88 | 89 | dynamic "tag" { 90 | for_each = local.tags 91 | 92 | content { 93 | key = tag.key 94 | value = tag.value 95 | propagate_at_launch = true 96 | } 97 | } 98 | } 99 | -------------------------------------------------------------------------------- /modules/nodepool/outputs.tf: -------------------------------------------------------------------------------- 1 | output "autoscaling_group_name" { 2 | value = aws_autoscaling_group.this.name 3 | } 4 | 5 | output "security_group" { 6 | value = aws_security_group.this.id 7 | } 8 | -------------------------------------------------------------------------------- /modules/nodepool/variables.tf: -------------------------------------------------------------------------------- 1 | variable "name" { 2 | type = string 3 | description = "Name of the node pool, to be appended to all resources" 4 | } 5 | 6 | variable "cluster" { 7 | type = string 8 | description = "Name of the cluster the nodepool belongs to, sourced from k3s module" 9 | } 10 | 11 | variable "vpc_id" { 12 | type = string 13 | description = "VPC ID the nodepool is deployed to" 14 | } 15 | 16 | variable "subnets" { 17 | type = list(string) 18 | description = "List of subnet ids the nodepool is deployed to" 19 | } 20 | 21 | variable "ami" { 22 | type = string 23 | description = "AMI of all EC2 instances within the nodepool" 24 | } 25 | 26 | variable "ssh_authorized_keys" { 27 | type = list(string) 28 | default = [] 29 | description = "List of public keys that are added to nodes authorized hosts. This is not required for cluster bootstrap, and should only be allowed for development environments where ssh access is beneficial" 30 | } 31 | 32 | variable "auto_deployed_manifests" { 33 | type = list(object({ 34 | name = string 35 | content = string 36 | })) 37 | default = [] 38 | } 39 | 40 | variable "extra_security_groups" { 41 | type = list(string) 42 | default = [] 43 | } 44 | 45 | variable "instance_type" { 46 | type = string 47 | default = "t3.medium" 48 | } 49 | 50 | variable "tags" { 51 | type = map(string) 52 | default = {} 53 | } 54 | 55 | variable "iam_instance_profile" { 56 | type = string 57 | default = null 58 | } 59 | 60 | variable "spot" { 61 | type = bool 62 | default = false 63 | } 64 | 65 | variable "block_device_mappings" { 66 | type = object({ 67 | size = number 68 | encrypted = bool 69 | }) 70 | 71 | default = { 72 | size = 32 73 | encrypted = true 74 | } 75 | } 76 | 77 | variable "asg" { 78 | type = object({ 79 | min = number 80 | max = number 81 | desired = number 82 | }) 83 | 84 | default = { 85 | min = 1 86 | max = 2 87 | desired = 1 88 | } 89 | 90 | description = "Autoscaling group scale, requires min, max, and desired" 91 | } 92 | 93 | variable "controlplane_loadbalancer" { 94 | type = string 95 | default = null 96 | } 97 | 98 | variable "cluster_security_group" { 99 | type = string 100 | } 101 | 102 | variable "shared_server_security_group" { 103 | type = string 104 | default = null 105 | } 106 | 107 | variable "shared_agent_security_group" { 108 | type = string 109 | default = null 110 | } 111 | 112 | variable "external_cloud_provider" { 113 | type = bool 114 | default = true 115 | description = "Toggle creation of k3s cluster with builtin cloud provider disabled and cloud-provider=external, used in conjuction with deploy_cloud_controller_manager" 116 | } 117 | 118 | variable "deploy_cloud_controller_manager" { 119 | type = bool 120 | default = true 121 | description = "Toggle deployment of aws cloud controller manager, disable when still requiring --cloud-provider=external but deploying with custom manfiests" 122 | } 123 | 124 | variable "enable_ebs_csi_driver" { 125 | type = bool 126 | default = true 127 | description = "Toggle deployment of ebs csi driver" 128 | } 129 | 130 | variable "state_bucket" { 131 | type = string 132 | default = null 133 | } 134 | 135 | variable "state_key" { 136 | type = string 137 | default = "state.env" 138 | } 139 | 140 | # 141 | # K3S Variables 142 | # 143 | variable "k3s_version" { 144 | type = string 145 | default = "v1.18.8+k3s1" 146 | } 147 | 148 | variable "k3s_url" { 149 | type = string 150 | default = "" 151 | } 152 | 153 | variable "k3s_disables" { 154 | type = list(string) 155 | default = ["traefik", "local-storage", "servicelb"] 156 | description = "k3s services to disable, defaults to traefik, local-storage, and servicelb since we're in the cloud" 157 | } 158 | 159 | variable "k3s_tls_sans" { 160 | type = list(string) 161 | default = [] 162 | } 163 | 164 | variable "k3s_kubelet_args" { 165 | type = list(string) 166 | default = [] 167 | description = "--kubelet-arg key=value" 168 | } 169 | 170 | variable "k3s_kube_apiservers" { 171 | type = list(string) 172 | default = [] 173 | description = "--kube-apiserver-arg key=value" 174 | } 175 | 176 | variable "k3s_kube_schedulers" { 177 | type = list(string) 178 | default = [] 179 | description = "--kube-scheduler-arg key=value" 180 | } 181 | 182 | variable "k3s_kube_controller_managers" { 183 | type = list(string) 184 | default = [] 185 | description = "--kube-controller-manager-arg key=value" 186 | } 187 | 188 | variable "k3s_kube_cloud_controller_managers" { 189 | type = list(string) 190 | default = [] 191 | description = "--kube-cloud-controller-manager-arg key=value" 192 | } 193 | 194 | variable "k3s_node_labels" { 195 | type = list(string) 196 | default = [] 197 | description = "--node-label key=value" 198 | } 199 | 200 | variable "k3s_node_taints" { 201 | type = list(string) 202 | default = [] 203 | description = "--node-taint key=value" 204 | } 205 | 206 | variable "k3s_registries" { 207 | type = string 208 | default = "" 209 | description = "k3s registries.yaml to define registry configuration, see https://rancher.com/docs/k3s/latest/en/installation/private-registry/" 210 | } 211 | 212 | # 213 | # Download urls for dependencies 214 | # Used for external dependencies that need to be pulled on boot (extremely minimal amount of dependencies) 215 | # 216 | variable "dependencies_script" { 217 | type = string 218 | default = null 219 | description = "Dependencies script responsible for any pre-node setup, overriding this overrides the default setup and requires AT LEAST the k3s binary and aws cli downloaded before proceeding" 220 | } -------------------------------------------------------------------------------- /modules/state-store/main.tf: -------------------------------------------------------------------------------- 1 | resource "aws_s3_bucket" "state" { 2 | bucket = "${var.name}-k3s-cluster" 3 | acl = "private" 4 | force_destroy = true 5 | 6 | versioning { 7 | enabled = true 8 | } 9 | 10 | server_side_encryption_configuration { 11 | rule { 12 | apply_server_side_encryption_by_default { 13 | sse_algorithm = "aws:kms" 14 | } 15 | } 16 | } 17 | 18 | tags = var.tags 19 | } 20 | -------------------------------------------------------------------------------- /modules/state-store/outputs.tf: -------------------------------------------------------------------------------- 1 | output "bucket" { 2 | value = aws_s3_bucket.state.bucket 3 | } 4 | 5 | output "arn" { 6 | value = aws_s3_bucket.state.arn 7 | } 8 | -------------------------------------------------------------------------------- /modules/state-store/variables.tf: -------------------------------------------------------------------------------- 1 | variable "name" { 2 | type = string 3 | } 4 | 5 | variable "key" { 6 | type = string 7 | default = "state.json" 8 | description = "Key in S3 bucket to store cluster state data as json" 9 | } 10 | 11 | variable "tags" { 12 | type = map(string) 13 | default = {} 14 | } -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | # 2 | # Database Outputs 3 | # 4 | output "datastore_endpoint" { 5 | value = module.db.datastore_endpoint 6 | description = "Formatted output for k3s --datastore-endpoint. This is output for verbosity and does not need to be passed into node pools, it will be fetched from the cluster state bucket on node boot" 7 | } 8 | 9 | # 10 | # Controlplane Load Balancer Outputs 11 | # 12 | output "controlplane_loadbalancer" { 13 | value = module.controlplane_lb.name 14 | description = "Name of the controlplane load balancer" 15 | } 16 | 17 | output "tls_san" { 18 | value = module.controlplane_lb.dns 19 | description = "DNS of the control plane load balancer, used for passing --tls-san to server nodepools" 20 | } 21 | 22 | output "url" { 23 | value = "https://${module.controlplane_lb.dns}:${module.controlplane_lb.port}" 24 | description = "Formatted load balancer url used for --server on agent node pools" 25 | } 26 | 27 | # 28 | # Shared Resource Outputs 29 | # 30 | output "cluster_security_group" { 31 | value = aws_security_group.cluster.id 32 | description = "Shared cluster security group required to be passed into all node pools" 33 | } 34 | 35 | output "shared_server_security_group" { 36 | value = aws_security_group.shared_server.id 37 | description = "Shared server security group required to be passed into all server node pools" 38 | } 39 | 40 | output "shared_agent_security_group" { 41 | value = aws_security_group.shared_agent.id 42 | description = "Shared agent security group optional to be passed into all agent node pools" 43 | } 44 | 45 | # 46 | # K3S Outputs 47 | # 48 | output "cluster" { 49 | value = var.name 50 | description = "Name of the cluster to be passed into all node pools" 51 | } 52 | 53 | output "token" { 54 | value = random_password.token.result 55 | description = "Token used for k3s --token registration, added for brevity, does not need to be passed to module, it is loaded via S3 state bucket" 56 | } 57 | 58 | # 59 | # State Bucket Resources 60 | # 61 | output "state_bucket" { 62 | value = var.state_bucket == null ? module.state[0].bucket : var.state_bucket 63 | description = "Name of the bucket used to store k3s cluster state, required to be passed in to node pools" 64 | } 65 | 66 | output "state_bucket_arn" { 67 | value = var.state_bucket == null ? module.state[0].arn : null 68 | description = "ARN of the bucket used to store k3s cluster state, if it was created. Null will be outputted if the module did not create the bucket." 69 | } 70 | 71 | output "state_key" { 72 | value = aws_s3_bucket_object.state.key 73 | description = "Name of the state object used to store k3s cluster state" 74 | } 75 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | variable "name" { 2 | type = string 3 | description = "Name of the cluster, will be prepended to cluster resources" 4 | } 5 | 6 | variable "vpc_id" { 7 | type = string 8 | description = "VPC ID of the cluster" 9 | } 10 | 11 | variable "subnets" { 12 | type = list(string) 13 | description = "List of subnet ids of the shared cluster resources such as load balancers and RDS. Generally set to private subnets" 14 | } 15 | 16 | variable "state_bucket" { 17 | type = string 18 | default = null 19 | description = "Name of existing S3 bucket to store cluster state/secrets in, will create bucket if left blank" 20 | } 21 | 22 | variable "rds_ca_cert_identifier" { 23 | type = string 24 | default = "rds-ca-2017" 25 | description = "RDS CA Certificate Identifier" 26 | } 27 | 28 | variable "tags" { 29 | type = map(string) 30 | default = {} 31 | description = "Common tags to attach to all created resources" 32 | } -------------------------------------------------------------------------------- /versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.13.0" 3 | 4 | required_providers { 5 | aws = "~> 3.0" 6 | local = "~> 1.0" 7 | tls = "~> 2.0" 8 | template = "~> 2.0" 9 | } 10 | } --------------------------------------------------------------------------------