├── .gitignore ├── outputs.tf ├── variables.tf ├── scripts ├── install_k8s_wrk.sh └── install_k8s_msr.sh ├── README.md └── main.tf /.gitignore: -------------------------------------------------------------------------------- 1 | .terraform/ 2 | .terraform.lock.hcl 3 | terraform.tfstate.backup 4 | terraform.tfstate -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | output "instance_msr_public_ip" { 2 | description = "Public address IP of master" 3 | value = aws_instance.ec2_instance_msr.public_ip 4 | } 5 | 6 | output "instance_wrks_public_ip" { 7 | description = "Public address IP of worker" 8 | value = aws_instance.ec2_instance_wrk.*.public_ip 9 | } 10 | 11 | # output "instance_msr_privte_ip" { 12 | # description = "Private IP address of master" 13 | # value = aws_instance.ec2_instance_msr.private_ip 14 | # } 15 | 16 | # output "s3_bucket_name" { 17 | # description = "The S3 bucket name" 18 | # value = "k8s-${random_string.s3name.result}" 19 | # } 20 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | variable "access_key" { #Todo: uncomment the default value and add your access key. 2 | description = "Access key to AWS console" 3 | default = "XXXXXXXXXXXXXXXXXXXXXXX" 4 | } 5 | 6 | variable "secret_key" { #Todo: uncomment the default value and add your secert key. 7 | description = "Secret key to AWS console" 8 | default = "XXXXXXXXXXXXXXXXXXXXXXXXXXXX" 9 | } 10 | 11 | variable "ami_key_pair_name" { #Todo: uncomment the default value and add your pem key pair name. Hint: don't write '.pem' exction just the key name 12 | default = "Latest" 13 | } 14 | variable "number_of_worker" { 15 | description = "number of worker instances to be join on cluster." 16 | default = 2 17 | } 18 | 19 | variable "region" { 20 | description = "The region zone on AWS" 21 | default = "us-east-1" #The zone I selected is us-east-1, if you change it make sure to check if ami_id below is correct. 22 | } 23 | 24 | variable "ami_id" { 25 | description = "The AMI to use" 26 | default = "ami-0a6b2839d44d781b2" #Ubuntu 20.04 27 | } 28 | 29 | variable "instance_type" { 30 | default = "t2.medium" #the best type to start k8s with it, 31 | } -------------------------------------------------------------------------------- /scripts/install_k8s_wrk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ######### ** FOR WORKER NODE ** ######### 4 | 5 | hostname k8s-wrk-${worker_number} 6 | echo "k8s-wrk-${worker_number}" > /etc/hostname 7 | 8 | export AWS_ACCESS_KEY_ID=${access_key} 9 | export AWS_SECRET_ACCESS_KEY=${private_key} 10 | export AWS_DEFAULT_REGION=${region} 11 | 12 | 13 | apt update 14 | apt install apt-transport-https ca-certificates curl software-properties-common -y 15 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - 16 | add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu focal stable" 17 | 18 | #Installing Docker 19 | apt update 20 | apt-cache policy docker-ce 21 | apt install docker-ce -y 22 | apt install awscli -y 23 | 24 | #Be sure to understand, if you follow official Kubernetes documentation, in Ubuntu 20 it does not work, that is why, I did modification to script 25 | #Adding Kubernetes repositories 26 | 27 | #Next 2 lines are different from official Kubernetes guide, but the way Kubernetes describe step does not work 28 | curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add 29 | echo "deb https://packages.cloud.google.com/apt kubernetes-xenial main" > /etc/apt/sources.list.d/kurbenetes.list 30 | 31 | #Turn off swap 32 | swapoff -a 33 | 34 | #Installing Kubernetes tools 35 | apt update 36 | apt install kubelet kubeadm kubectl -y 37 | 38 | #next line is getting EC2 instance IP, for kubeadm to initiate cluster 39 | #we need to get EC2 internal IP address- default ENI is eth0 40 | export ipaddr=`ip address|grep eth0|grep inet|awk -F ' ' '{print $2}' |awk -F '/' '{print $1}'` 41 | 42 | 43 | # the kubeadm init won't work entel remove the containerd config and restart it. 44 | rm /etc/containerd/config.toml 45 | systemctl restart containerd 46 | 47 | # to insure the join command start when the installion of master node is done. 48 | sleep 1m 49 | 50 | aws s3 cp s3://${s3buckit_name}/join_command.sh /tmp/. 51 | chmod +x /tmp/join_command.sh 52 | bash /tmp/join_command.sh 53 | -------------------------------------------------------------------------------- /scripts/install_k8s_msr.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ######### ** FOR MASTER NODE ** ######### 4 | 5 | hostname k8s-msr-1 6 | echo "k8s-msr-1" > /etc/hostname 7 | 8 | export AWS_ACCESS_KEY_ID=${access_key} 9 | export AWS_SECRET_ACCESS_KEY=${private_key} 10 | export AWS_DEFAULT_REGION=${region} 11 | 12 | 13 | apt update 14 | apt install apt-transport-https ca-certificates curl software-properties-common -y 15 | curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - 16 | add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu focal stable" 17 | 18 | #Installing Docker 19 | apt update 20 | apt-cache policy docker-ce 21 | apt install docker-ce -y 22 | apt install awscli -y 23 | #Be sure to understand, if you follow official Kubernetes documentation, in Ubuntu 20 it does not work, that is why, I did modification to script 24 | #Adding Kubernetes repositories 25 | 26 | #Next 2 lines are different from official Kubernetes guide, but the way Kubernetes describe step does not work 27 | curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add 28 | echo "deb https://packages.cloud.google.com/apt kubernetes-xenial main" > /etc/apt/sources.list.d/kurbenetes.list 29 | 30 | #Turn off swap 31 | swapoff -a 32 | 33 | #Installing Kubernetes tools 34 | apt update 35 | apt install kubelet kubeadm kubectl -y 36 | 37 | #next line is getting EC2 instance IP, for kubeadm to initiate cluster 38 | #we need to get EC2 internal IP address- default ENI is eth0 39 | export ipaddr=`ip address|grep eth0|grep inet|awk -F ' ' '{print $2}' |awk -F '/' '{print $1}'` 40 | export pubip=`dig +short myip.opendns.com @resolver1.opendns.com` 41 | 42 | # the kubeadm init won't work entel remove the containerd config and restart it. 43 | rm /etc/containerd/config.toml 44 | systemctl restart containerd 45 | 46 | #Kubernetes cluster init 47 | #You can replace 172.16.0.0/16 with your desired pod network 48 | kubeadm init --apiserver-advertise-address=$ipaddr --pod-network-cidr=172.16.0.0/16 --apiserver-cert-extra-sans=$pubip > /tmp/restult.out 49 | cat /tmp/restult.out 50 | 51 | #to get join commdn 52 | tail -2 /tmp/restult.out > /tmp/join_command.sh; 53 | aws s3 cp /tmp/join_command.sh s3://${s3buckit_name}; 54 | #this adds .kube/config for root account, run same for ubuntu user, if you need it 55 | mkdir -p /root/.kube; 56 | cp -i /etc/kubernetes/admin.conf /root/.kube/config; 57 | cp -i /etc/kubernetes/admin.conf /tmp/admin.conf; 58 | chmod 755 /tmp/admin.conf 59 | 60 | #Add kube config to ubuntu user. 61 | mkdir -p /home/ubuntu/.kube; 62 | cp -i /etc/kubernetes/admin.conf /home/ubuntu/.kube/config; 63 | chmod 755 /home/ubuntu/.kube/config 64 | 65 | 66 | #to copy kube config file to s3 67 | # aws s3 cp /etc/kubernetes/admin.conf s3://${s3buckit_name} 68 | 69 | #Uncomment next line if you want calico Cluster Pod Network 70 | curl -o /root/calico.yaml https://docs.projectcalico.org/v3.16/manifests/calico.yaml 71 | sleep 5 72 | kubectl --kubeconfig /root/.kube/config apply -f /root/calico.yaml 73 | systemctl restart kubelet 74 | 75 | # Apply kubectl Cheat Sheet Autocomplete 76 | source <(kubectl completion bash) # set up autocomplete in bash into the current shell, bash-completion package should be installed first. 77 | echo "source <(kubectl completion bash)" >> /home/ubuntu/.bashrc # add autocomplete permanently to your bash shell. 78 | echo "source <(kubectl completion bash)" >> /root/.bashrc # add autocomplete permanently to your bash shell. 79 | alias k=kubectl 80 | complete -o default -F __start_kubectl k 81 | echo "alias k=kubectl" >> /home/ubuntu/.bashrc 82 | echo "alias k=kubectl" >> /root/.bashrc 83 | echo "complete -o default -F __start_kubectl k" >> /home/ubuntu/.bashrc 84 | echo "complete -o default -F __start_kubectl k" >> /root/.bashrc -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Terraform to Bulding Kubernetes Cluster using EC2 instances 2 | [![LinkedIn][linkedin-shield]][linkedin-url] 3 | [![GitHub][github-shield]][github-url] 4 | 5 | 6 | I built this project to create my own lab for [Kuberntes](https://kubernetes.io/) cluster on AWS cloud using EC2 instances. I found [Terraform](https://www.terraform.io) is the best tool to create my K8S lab fastly with one command 🚀. 7 |

8 | 9 | ![Terraform](https://i.imgur.com/PuS3rmb.png) 10 |

11 | 12 | ## Terraform Resources Used 13 | - EC2 14 | - One Master Node 15 | - Two Worker Node (can be increased) 16 | - VPC 17 | - Public Subnet 18 | - Internet Gateway 19 | - Route Table 20 | - Security Group 21 | - S3 Bucket 22 | 23 |
24 | 25 | ## How Will the Kubernetes Cluster Be Built? 26 | The goal is to build a K8S cluster with one master node and two worker nodes. 27 |
28 | 29 | * First, the master node will boot up and will start installing kubeadm, kubelet, kubectl, and docker. Then will run `kubeadm init` to initial the k8s cluster.
30 | Here the challenge becomes how we can get the join command that showed after init the cluster and send it to the workers node for joining the worker node into the cluster 🤔?
31 | To solve this problem I use s3 bucket. First I extracted the join command and saved it into a file, and then pushed it to the s3 object. Now we finish from the master node and is ready. 32 |
33 | 34 | * Second, the workers node will boot up and will start installing kubeadm, kubelet, kubectl, and docker. Then will fetch the joined command from s3 bucket and execute to join the worker node into the cluster. 35 | 36 |
37 | 38 | ## Incress Number of Worker Nodes 39 | * By default there are two workers on the cluster, to increase it go to `variables.tf` file and looking for number_of_worker variable, you can increase the default number. 40 | 41 |
42 | 43 | ## Requirements Before Running 44 | 1- Make sure you have the Terraform tools installed on your machine. 45 | 46 | 2- Add your Access key, Secret key, and Key Pair name on `variables.tf` file. 47 | 48 | 3- Make sure your IAM user has the right permission to create EC2, VPC, S3, Route Table, Security Group, and Internet Gateway. 49 | 50 | ## Running the Script 51 | After doing the requirements, you are ready now, start cloning the repo to your machine: 52 | ``` shell 53 | git clone https://github.com/Ahmad-Faqehi/Terraform-Bulding-K8S.git 54 | cd Terraform-Bulding-K8S/ 55 | ``` 56 | Now execute terraform commands: 57 | ``` shell 58 | terraform init 59 | terraform plan #to show what going to build 60 | terraform apply 61 | ``` 62 | 63 | ## Accessing Your Cluster 64 | * You can access your cluster by accessing the master node through ssh, you can get the public IP of the master node from terraform outputs. Below is an example of the ssh command: 65 | ``` shell 66 | ssh -i ubuntu@ 67 | ``` 68 | 69 | * Another way to access the cluster is by downloading the `admin.conf` file from the master node to your machine, find below the way to download it and access the cluster remotely. 70 | ``` shell 71 | scp -i ubuntu@:/tmp/admin.conf . 72 | ``` 73 | This will download the Kubernetes config file on your machine. Before using this config file, you have to replace the private IP with the public IP of the master node. Then you can use the following command to start accessing the cluster. 74 | ```shell 75 | kubectl --kubeconfig ./admin.conf get nodes 76 | ``` 77 | 78 | ## Removing and Destroying Kubernetes Cluster 79 | To destroy all the resources that were created after applying the script, just run the following command: 80 | ```shell 81 | terraform destroy 82 | ``` 83 | 84 | 85 | 86 | 87 | 88 | [linkedin-shield]: https://img.shields.io/badge/-LinkedIn-black.svg?style=for-the-badge&logo=linkedin&colorB=555 89 | [linkedin-url]: https://www.linkedin.com/in/saikiran-p-a0243569/ 90 | [docker-shield]: https://img.shields.io/badge/-docker-black.svg?style=for-the-badge&logo=docker&colorB=555 91 | [docker-url]: https://hub.docker.com/u/kiran2361993 92 | [github-shield]: https://img.shields.io/badge/-github-black.svg?style=for-the-badge&logo=github&colorB=555 93 | [github-url]: https://github.com/saikiranpi 94 | -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = var.region 3 | access_key = var.access_key 4 | secret_key = var.secret_key 5 | } 6 | #****** VPC Start ******# 7 | 8 | resource "aws_vpc" "some_custom_vpc" { 9 | cidr_block = "10.0.0.0/16" 10 | 11 | tags = { 12 | Name = "K8S VPC" 13 | } 14 | } 15 | 16 | resource "random_shuffle" "az" { 17 | input = ["${var.region}a", "${var.region}b", "${var.region}c", "${var.region}d", "${var.region}e"] 18 | result_count = 1 19 | } 20 | 21 | resource "aws_subnet" "some_public_subnet" { 22 | vpc_id = aws_vpc.some_custom_vpc.id 23 | cidr_block = "10.0.1.0/24" 24 | availability_zone = random_shuffle.az.result[0] 25 | 26 | tags = { 27 | Name = "K8S Subnet" 28 | } 29 | } 30 | 31 | resource "aws_internet_gateway" "some_ig" { 32 | vpc_id = aws_vpc.some_custom_vpc.id 33 | 34 | tags = { 35 | Name = "K8S Internet Gateway" 36 | } 37 | } 38 | 39 | resource "aws_route_table" "public_rt" { 40 | vpc_id = aws_vpc.some_custom_vpc.id 41 | 42 | route { 43 | cidr_block = "0.0.0.0/0" 44 | gateway_id = aws_internet_gateway.some_ig.id 45 | } 46 | 47 | route { 48 | ipv6_cidr_block = "::/0" 49 | gateway_id = aws_internet_gateway.some_ig.id 50 | } 51 | 52 | tags = { 53 | Name = "Public Route Table" 54 | } 55 | } 56 | 57 | resource "aws_route_table_association" "public_1_rt_a" { 58 | subnet_id = aws_subnet.some_public_subnet.id 59 | route_table_id = aws_route_table.public_rt.id 60 | } 61 | 62 | resource "aws_security_group" "k8s_sg" { 63 | name = "K8S Ports" 64 | vpc_id = aws_vpc.some_custom_vpc.id 65 | 66 | ingress { 67 | from_port = 80 68 | to_port = 80 69 | protocol = "tcp" 70 | cidr_blocks = ["0.0.0.0/0"] 71 | } 72 | 73 | ingress { 74 | from_port = 6443 75 | to_port = 6443 76 | protocol = "tcp" 77 | cidr_blocks = ["0.0.0.0/0"] 78 | } 79 | 80 | ingress { 81 | from_port = 2379 82 | to_port = 2380 83 | protocol = "tcp" 84 | cidr_blocks = ["0.0.0.0/0"] 85 | } 86 | 87 | ingress { 88 | from_port = 22 89 | to_port = 22 90 | protocol = "tcp" 91 | cidr_blocks = ["0.0.0.0/0"] 92 | } 93 | 94 | ingress { 95 | from_port = 10250 96 | to_port = 10250 97 | protocol = "tcp" 98 | cidr_blocks = ["0.0.0.0/0"] 99 | } 100 | ingress { 101 | from_port = 30000 102 | to_port = 32767 103 | protocol = "tcp" 104 | cidr_blocks = ["0.0.0.0/0"] 105 | } 106 | 107 | egress { 108 | from_port = 0 109 | to_port = 0 110 | protocol = -1 111 | cidr_blocks = ["0.0.0.0/0"] 112 | } 113 | } 114 | #****** VPC END ******# 115 | 116 | resource "random_string" "s3name" { 117 | length = 9 118 | special = false 119 | upper = false 120 | lower = true 121 | } 122 | 123 | resource "aws_s3_bucket_acl" "s3_bucket_acl" { 124 | bucket = aws_s3_bucket.s3buckit.id 125 | acl = "private" 126 | depends_on = [aws_s3_bucket_ownership_controls.s3_bucket_acl_ownership] 127 | } 128 | 129 | resource "aws_s3_bucket_ownership_controls" "s3_bucket_acl_ownership" { 130 | bucket = aws_s3_bucket.s3buckit.id 131 | rule { 132 | object_ownership = "ObjectWriter" 133 | } 134 | } 135 | 136 | resource "aws_s3_bucket" "s3buckit" { 137 | bucket = "k8s-${random_string.s3name.result}" 138 | force_destroy = true 139 | depends_on = [ 140 | random_string.s3name 141 | ] 142 | } 143 | 144 | 145 | resource "aws_instance" "ec2_instance_msr" { 146 | ami = var.ami_id 147 | subnet_id = aws_subnet.some_public_subnet.id 148 | instance_type = var.instance_type 149 | key_name = var.ami_key_pair_name 150 | associate_public_ip_address = true 151 | security_groups = [ aws_security_group.k8s_sg.id ] 152 | root_block_device { 153 | volume_type = "gp2" 154 | volume_size = "8" 155 | delete_on_termination = true 156 | } 157 | tags = { 158 | Name = "k8s_msr_1" 159 | } 160 | user_data_base64 = base64encode("${templatefile("scripts/install_k8s_msr.sh", { 161 | 162 | access_key = var.access_key 163 | private_key = var.secret_key 164 | region = var.region 165 | s3buckit_name = "k8s-${random_string.s3name.result}" 166 | })}") 167 | 168 | depends_on = [ 169 | aws_s3_bucket.s3buckit, 170 | random_string.s3name 171 | ] 172 | 173 | 174 | } 175 | 176 | resource "aws_instance" "ec2_instance_wrk" { 177 | ami = var.ami_id 178 | count = var.number_of_worker 179 | subnet_id = aws_subnet.some_public_subnet.id 180 | instance_type = var.instance_type 181 | key_name = var.ami_key_pair_name 182 | associate_public_ip_address = true 183 | security_groups = [ aws_security_group.k8s_sg.id ] 184 | root_block_device { 185 | volume_type = "gp2" 186 | volume_size = "8" 187 | delete_on_termination = true 188 | } 189 | tags = { 190 | Name = "k8s_wrk_${count.index + 1}" 191 | } 192 | user_data_base64 = base64encode("${templatefile("scripts/install_k8s_wrk.sh", { 193 | 194 | access_key = var.access_key 195 | private_key = var.secret_key 196 | region = var.region 197 | s3buckit_name = "k8s-${random_string.s3name.result}" 198 | worker_number = "${count.index + 1}" 199 | 200 | })}") 201 | 202 | depends_on = [ 203 | aws_s3_bucket.s3buckit, 204 | random_string.s3name, 205 | aws_instance.ec2_instance_msr 206 | ] 207 | } --------------------------------------------------------------------------------