├── 0-aws.tf ├── aws.png ├── Dockerfile ├── 7-kubeadm.tf ├── 2-etcd.tf ├── 4-controllers.tf ├── service-l7.yaml ├── LICENSE ├── 3-workers.tf ├── 5-iam.tf ├── worker.sh ├── 6-elb.tf ├── etcd.sh ├── variables.tf ├── README.md ├── master.sh └── 1-vpc.tf /0-aws.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = "${var.region}" 3 | } 4 | -------------------------------------------------------------------------------- /aws.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/graykode/aws-kubeadm-terraform/HEAD/aws.png -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:16.04 2 | LABEL maintainer="nlkey2022@gmail.com" 3 | 4 | RUN apt-get update && apt install git python python-pip unzip wget vim -y && \ 5 | git clone https://github.com/graykode/aws-kubeadm-terraform && \ 6 | cd aws-kubeadm-terraform 7 | 8 | RUN wget https://releases.hashicorp.com/terraform/0.11.13/terraform_0.11.13_linux_amd64.zip && \ 9 | unzip terraform_0.11.13_linux_amd64.zip && \ 10 | rm terraform_0.11.13_linux_amd64.zip && \ 11 | mv terraform /usr/bin && chmod +x /usr/bin/terraform 12 | 13 | RUN cd /aws-kubeadm-terraform 14 | 15 | WORKDIR /aws-kubeadm-terraform -------------------------------------------------------------------------------- /7-kubeadm.tf: -------------------------------------------------------------------------------- 1 | data "template_file" "master-userdata" { 2 | template = "${file("${var.master-userdata}")}" 3 | 4 | vars { 5 | k8stoken = "${var.k8stoken}" 6 | } 7 | } 8 | 9 | data "template_file" "worker-userdata" { 10 | template = "${file("${var.worker-userdata}")}" 11 | 12 | vars { 13 | k8stoken = "${var.k8stoken}" 14 | masterIP = "${aws_instance.controller_etcd.private_ip}" 15 | } 16 | } 17 | 18 | data "template_file" "etcd-userdata" { 19 | template = "${file("${var.etcd-userdata}")}" 20 | 21 | vars { 22 | k8stoken = "${var.k8stoken}" 23 | masterIP = "${aws_instance.controller_etcd.private_ip}" 24 | } 25 | } 26 | 27 | ############ 28 | ## Outputs 29 | ############ 30 | 31 | output "kubernetes_master" { 32 | value = "${join(",", aws_instance.controller_etcd.*.public_ip)}" 33 | } -------------------------------------------------------------------------------- /2-etcd.tf: -------------------------------------------------------------------------------- 1 | ######################### 2 | # etcd cluster instances 3 | ######################### 4 | # Delete the below comments to activate etcd. 5 | resource "aws_instance" "etcd" { 6 | count = "${var.number_of_etcd}" 7 | ami = "${lookup(var.amis, var.region)}" 8 | instance_type = "${var.etcd_instance_type}" 9 | 10 | iam_instance_profile = "${aws_iam_instance_profile.kubernetes.id}" 11 | user_data = "${data.template_file.etcd-userdata.rendered}" 12 | 13 | subnet_id = "${aws_subnet.kubernetes.id}" 14 | private_ip = "${cidrhost(var.vpc_cidr, 10 + count.index)}" 15 | associate_public_ip_address = true # Instances have public, dynamic IP 16 | 17 | availability_zone = "${var.zone}" 18 | vpc_security_group_ids = ["${aws_security_group.kubernetes.id}"] 19 | key_name = "${var.default_keypair_name}" 20 | tags = "${merge( 21 | local.common_tags, 22 | map( 23 | "Owner", "${var.owner}", 24 | "Name", "etcd-${count.index}" 25 | ) 26 | )}" 27 | } 28 | -------------------------------------------------------------------------------- /4-controllers.tf: -------------------------------------------------------------------------------- 1 | ############################ 2 | # K8s Control Pane instances 3 | ############################ 4 | 5 | resource "aws_instance" "controller_etcd" { 6 | count = 1 7 | ami = "${lookup(var.amis, var.region)}" 8 | instance_type = "${var.controller_instance_type}" 9 | 10 | iam_instance_profile = "${aws_iam_instance_profile.kubernetes.id}" 11 | user_data = "${data.template_file.master-userdata.rendered}" 12 | 13 | subnet_id = "${aws_subnet.kubernetes.id}" 14 | private_ip = "10.43.0.40" 15 | associate_public_ip_address = true # Instances have public, dynamic IP 16 | source_dest_check = false # TODO Required?? 17 | 18 | availability_zone = "${var.zone}" 19 | vpc_security_group_ids = ["${aws_security_group.kubernetes.id}"] 20 | key_name = "${var.default_keypair_name}" 21 | 22 | tags = "${merge( 23 | local.common_tags, 24 | map( 25 | "Owner", "${var.owner}", 26 | "Name", "controller-etcd-${count.index}" 27 | ) 28 | )}" 29 | } -------------------------------------------------------------------------------- /service-l7.yaml: -------------------------------------------------------------------------------- 1 | kind: Service 2 | apiVersion: v1 3 | metadata: 4 | name: ingress-nginx 5 | namespace: ingress-nginx 6 | labels: 7 | app.kubernetes.io/name: ingress-nginx 8 | app.kubernetes.io/part-of: ingress-nginx 9 | annotations: 10 | # Enable PROXY protocol 11 | service.beta.kubernetes.io/aws-load-balancer-proxy-protocol: "*" 12 | # Ensure the ELB idle timeout is less than nginx keep-alive timeout. By default, 13 | # NGINX keep-alive is set to 75s. If using WebSockets, the value will need to be 14 | # increased to '3600' to avoid any potential issues. 15 | service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout: "60" 16 | spec: 17 | type: LoadBalancer 18 | selector: 19 | app.kubernetes.io/name: ingress-nginx 20 | app.kubernetes.io/part-of: ingress-nginx 21 | ports: 22 | - name: http 23 | port: 80 24 | nodePort: 32308 25 | targetPort: http 26 | - name: https 27 | port: 6443 28 | nodePort: 32303 29 | targetPort: https 30 | --- 31 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Tae-Hwan Jung 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /3-workers.tf: -------------------------------------------------------------------------------- 1 | 2 | ############################################ 3 | # K8s Worker (aka Nodes, Minions) Instances 4 | ############################################ 5 | 6 | resource "aws_instance" "worker" { 7 | count = "${var.number_of_worker}" 8 | ami = "${lookup(var.amis, var.region)}" 9 | instance_type = "${var.worker_instance_type}" 10 | 11 | iam_instance_profile = "${aws_iam_instance_profile.kubernetes.id}" 12 | user_data = "${data.template_file.worker-userdata.rendered}" 13 | 14 | subnet_id = "${aws_subnet.kubernetes.id}" 15 | private_ip = "${cidrhost(var.vpc_cidr, 30 + count.index)}" 16 | associate_public_ip_address = true # Instances have public, dynamic IP 17 | source_dest_check = false # TODO Required?? 18 | 19 | availability_zone = "${var.zone}" 20 | vpc_security_group_ids = ["${aws_security_group.kubernetes.id}"] 21 | key_name = "${var.default_keypair_name}" 22 | 23 | tags = "${merge( 24 | local.common_tags, 25 | map( 26 | "Owner", "${var.owner}", 27 | "Name", "worker-${count.index}" 28 | ) 29 | )}" 30 | } 31 | 32 | output "kubernetes_workers_public_ip" { 33 | value = "${join(",", aws_instance.worker.*.public_ip)}" 34 | } 35 | -------------------------------------------------------------------------------- /5-iam.tf: -------------------------------------------------------------------------------- 1 | ########################## 2 | # IAM: Policies and Roles 3 | ########################## 4 | 5 | # The following Roles and Policy are mostly for future use 6 | 7 | resource "aws_iam_role" "kubernetes" { 8 | name = "kubernetes" 9 | assume_role_policy = <> \ 8 | /etc/apt/sources.list.d/kubernetes.list" 9 | 10 | # Install kubelet kubeadm kubectl kubernetes-cni docker 11 | apt-get update 12 | apt-get install -y kubelet kubeadm kubectl kubernetes-cni 13 | curl -sSL https://get.docker.com/ | sh 14 | systemctl start docker 15 | echo '[Finished] Installing kubelet kubeadm kubectl kubernetes-cni docker' > /home/ubuntu/worker.log 16 | 17 | systemctl stop docker 18 | mkdir /mnt/docker 19 | chmod 711 /mnt/docker 20 | cat < /etc/docker/daemon.json 21 | { 22 | "data-root": "/mnt/docker", 23 | "log-driver": "json-file", 24 | "log-opts": { 25 | "max-size": "10m", 26 | "max-file": "5" 27 | } 28 | } 29 | EOF 30 | systemctl start docker 31 | systemctl enable docker 32 | echo '[Finished] docker configure' > /home/ubuntu/worker.log 33 | 34 | # Point kubelet at big ephemeral drive 35 | mkdir /mnt/kubelet 36 | echo 'KUBELET_EXTRA_ARGS="--root-dir=/mnt/kubelet --cloud-provider=aws"' > /etc/default/kubelet 37 | echo '[Finished] kubelet configure' > /home/ubuntu/worker.log 38 | 39 | # ----------------- from here same with worker.sh 40 | 41 | # Pass bridged IPv4 traffic to iptables chains (required by Flannel) 42 | echo "net.bridge.bridge-nf-call-iptables = 1" > /etc/sysctl.d/60-flannel.conf 43 | service procps start 44 | 45 | echo '[Wait] kubeadm join until kubeadm cluster have been created.' > /home/ubuntu/worker.log 46 | for i in {1..50}; do sudo kubeadm join --token=${k8stoken} --discovery-token-unsafe-skip-ca-verification --node-name=$(hostname -f) ${masterIP}:6443 && break || sleep 15; done 47 | -------------------------------------------------------------------------------- /6-elb.tf: -------------------------------------------------------------------------------- 1 | //############################### 2 | //## Kubernetes API Load Balancer 3 | //############################### 4 | // 5 | //resource "aws_elb" "kubernetes_api" { 6 | // name = "${var.elb_name}" 7 | // instances = ["${aws_instance.controller_etcd.*.id}"] 8 | // subnets = ["${aws_subnet.kubernetes.id}"] 9 | // cross_zone_load_balancing = false 10 | // 11 | // security_groups = ["${aws_security_group.kubernetes_api.id}"] 12 | // 13 | // listener { 14 | // lb_port = 6443 15 | // instance_port = 6443 16 | // lb_protocol = "TCP" 17 | // instance_protocol = "TCP" 18 | // } 19 | // 20 | // health_check { 21 | // healthy_threshold = 2 22 | // unhealthy_threshold = 2 23 | // timeout = 15 24 | // target = "TCP:32303" 25 | // interval = 30 26 | // } 27 | // 28 | // tags = "${merge( 29 | // local.common_tags, 30 | // map( 31 | // "Name", "kubernetes", 32 | // "Owner", "${var.owner}" 33 | // ) 34 | // )}" 35 | //} 36 | // 37 | //############ 38 | //## Security 39 | //############ 40 | // 41 | //resource "aws_security_group" "kubernetes_api" { 42 | // vpc_id = "${aws_vpc.kubernetes.id}" 43 | // name = "kubernetes-api" 44 | // 45 | // # Allow inbound traffic to the port used by Kubernetes API HTTPS 46 | // ingress { 47 | // from_port = 6443 48 | // to_port = 6443 49 | // protocol = "TCP" 50 | // cidr_blocks = ["${var.control_cidr}"] 51 | // } 52 | // 53 | // # Allow all outbound traffic 54 | // egress { 55 | // from_port = 0 56 | // to_port = 0 57 | // protocol = "-1" 58 | // cidr_blocks = ["0.0.0.0/0"] 59 | // } 60 | // 61 | // tags = "${merge( 62 | // local.common_tags, 63 | // map( 64 | // "Name", "kubernetes-api", 65 | // "Owner", "${var.owner}" 66 | // ) 67 | // )}" 68 | //} 69 | // 70 | //############ 71 | //## Outputs 72 | //############ 73 | // 74 | //output "kubernetes_api_dns_name" { 75 | // value = "${aws_elb.kubernetes_api.dns_name}" 76 | //} -------------------------------------------------------------------------------- /etcd.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -ve 2 | touch /home/ubuntu/etcd.log 3 | 4 | curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - 5 | touch /etc/apt/sources.list.d/kubernetes.list 6 | 7 | su -c "echo 'deb http://apt.kubernetes.io/ kubernetes-xenial main' >> \ 8 | /etc/apt/sources.list.d/kubernetes.list" 9 | 10 | # Install kubelet kubeadm kubectl kubernetes-cni docker 11 | apt-get update 12 | apt-get install -y kubelet kubeadm kubectl kubernetes-cni 13 | curl -sSL https://get.docker.com/ | sh 14 | systemctl start docker 15 | echo '[Finished] Installing kubelet kubeadm kubectl kubernetes-cni docker' > /home/ubuntu/etcd.log 16 | 17 | # Install etcdctl for the version of etcd we're running 18 | ETCD_VERSION=$(kubeadm config images list | grep etcd | cut -d':' -f2) 19 | wget "https://github.com/coreos/etcd/releases/download/v$${ETCD_VERSION}/etcd-v$${ETCD_VERSION}-linux-amd64.tar.gz" 20 | tar xvf "etcd-v$${ETCD_VERSION}-linux-amd64.tar.gz" 21 | mv "etcd-v$${ETCD_VERSION}-linux-amd64/etcdctl" /usr/local/bin/ 22 | rm -rf etcd* 23 | echo '[Finished] Installing etcdctl' > /home/ubuntu/etcd.log 24 | 25 | systemctl stop docker 26 | mkdir /mnt/docker 27 | chmod 711 /mnt/docker 28 | cat < /etc/docker/daemon.json 29 | { 30 | "data-root": "/mnt/docker", 31 | "log-driver": "json-file", 32 | "log-opts": { 33 | "max-size": "10m", 34 | "max-file": "5" 35 | } 36 | } 37 | EOF 38 | systemctl start docker 39 | systemctl enable docker 40 | echo '[Finished] docker configure' > /home/ubuntu/etcd.log 41 | 42 | # Point kubelet at big ephemeral drive 43 | mkdir /mnt/kubelet 44 | echo 'KUBELET_EXTRA_ARGS="--root-dir=/mnt/kubelet --cloud-provider=aws"' > /etc/default/kubelet 45 | echo '[Finished] kubelet configure' > /home/ubuntu/etcd.log 46 | 47 | # ----------------- from here same with etcd.sh 48 | 49 | # Pass bridged IPv4 traffic to iptables chains (required by Flannel) 50 | echo "net.bridge.bridge-nf-call-iptables = 1" > /etc/sysctl.d/60-flannel.conf 51 | service procps start 52 | 53 | echo '[Wait] kubeadm join until kubeadm cluster have been created.' > /home/ubuntu/etcd.log 54 | for i in {1..50}; do sudo kubeadm join --token=${k8stoken} --discovery-token-unsafe-skip-ca-verification --node-name=$(hostname -f) ${masterIP}:6443 && break || sleep 15; done 55 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | common_tags = "${map( 3 | "kubernetes.io/cluster/${var.cluster_id_tag}", "${var.cluster_id_value}" 4 | )}" 5 | } 6 | 7 | ############################# 8 | # Adjustable variables 9 | ############################# 10 | 11 | variable number_of_etcd{ 12 | description = "The number of etcd, only acts as etcd" 13 | default = 0 14 | } 15 | 16 | variable number_of_worker{ 17 | description = "The number of worker nodes" 18 | default = 1 19 | } 20 | 21 | variable cluster_id_tag{ 22 | description = "Cluster ID tag for kubeAdm" 23 | default = "graykode" 24 | } 25 | 26 | variable cluster_id_value{ 27 | description = "Cluster ID value, it can be shared or owned" 28 | default = "owned" 29 | } 30 | 31 | ########################## 32 | # Default variables (you can change for customizing) 33 | ########################## 34 | 35 | variable control_cidr { 36 | description = "CIDR for maintenance: inbound traffic will be allowed from this IPs" 37 | default = "0.0.0.0/0" 38 | } 39 | 40 | locals { 41 | default_keypair_public_key = "${file("tf-kube.pub")}" 42 | } 43 | 44 | /* 45 | ## It triggers interpolation. It is recommended to use another way. 46 | ## TODO : Replace default_keypair_public_key as output? 47 | variable default_keypair_public_key { 48 | description = "Public Key of the default keypair" 49 | default = "${file("../keys/tf-kube.pub")}" 50 | } 51 | */ 52 | 53 | variable default_keypair_name { 54 | description = "Name of the KeyPair used for all nodes" 55 | default = "tf-kube" 56 | } 57 | 58 | variable vpc_name { 59 | description = "Name of the VPC" 60 | default = "kubernetes" 61 | } 62 | 63 | variable elb_name { 64 | description = "Name of the ELB for Kubernetes API" 65 | default = "kubernetes" 66 | } 67 | 68 | variable owner { 69 | default = "graykode" 70 | } 71 | 72 | # Networking setup 73 | variable region { 74 | default = "ap-northeast-2" 75 | } 76 | 77 | variable zone { 78 | default = "ap-northeast-2a" 79 | } 80 | 81 | ### VARIABLES BELOW MUST NOT BE CHANGED ### 82 | variable vpc_cidr { 83 | default = "10.43.0.0/16" 84 | } 85 | 86 | # Instances Setup 87 | variable amis { 88 | description = "Default AMIs to use for nodes depending on the region" 89 | type = "map" 90 | default = { 91 | ap-northeast-2 = "ami-067c32f3d5b9ace91" 92 | ap-northeast-1 = "ami-0567c164" 93 | ap-southeast-1 = "ami-a1288ec2" 94 | cn-north-1 = "ami-d9f226b4" 95 | eu-central-1 = "ami-8504fdea" 96 | eu-west-1 = "ami-0d77397e" 97 | sa-east-1 = "ami-e93da085" 98 | us-east-1 = "ami-40d28157" 99 | us-west-1 = "ami-6e165d0e" 100 | us-west-2 = "ami-a9d276c9" 101 | } 102 | } 103 | 104 | variable etcd_instance_type { 105 | default = "t2.medium" 106 | } 107 | variable controller_instance_type { 108 | default = "t2.medium" 109 | } 110 | variable worker_instance_type { 111 | default = "t2.medium" 112 | } 113 | 114 | # for Install KubeAdm Master / Worker / etcd 115 | variable "master-userdata" { 116 | default = "master.sh" 117 | } 118 | 119 | variable "worker-userdata" { 120 | default = "worker.sh" 121 | } 122 | 123 | variable "etcd-userdata" { 124 | default = "etcd.sh" 125 | } 126 | 127 | variable "k8stoken" {} -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## aws-terraform-kubeAdm 2 | 3 | In this repository, you can create kubernetes cluster using only `terraform apply` in AWS. 4 | 5 | ![](aws.png) 6 | 7 | 8 | 9 | ### 1. Configure on your docker 10 | 11 | ```bash 12 | $ docker run -it graykode/aws-kubeadm-terraform:0.3 /bin/bash 13 | ``` 14 | 15 | Export your own AWS Access / Secret keys 16 | 17 | ```bash 18 | $ export AWS_ACCESS_KEY_ID= 19 | $ export AWS_SECRET_ACCESS_KEY= 20 | ``` 21 | 22 | Generate your SSH key pair for aws_key_pair 23 | 24 | ```bash 25 | $ ssh-keygen -t rsa -N "" -f tf-kube 26 | ``` 27 | 28 | Create your `k8stoken` 29 | 30 | ```bash 31 | $ python -c 'import random; print "%0x.%0x" % (random.SystemRandom().getrandbits(3*8), random.SystemRandom().getrandbits(8*8))' 32 | 33 | > ebd70e.ad84c12d14ee5dae 34 | ``` 35 | 36 | 37 | 38 | ### 2. Create Kubernetes Cluster 39 | 40 | Initialize terraform 41 | 42 | ```bash 43 | $ terraform init 44 | ``` 45 | 46 | Create all objects in AWS and put k8s token before you created! It's Done. 47 | 48 | ```bash 49 | $ terraform apply 50 | var.k8stoken 51 | Enter a value: ebd70e.ad84c12d14ee5dae 52 | ``` 53 | 54 | Check if k8s Cluster is created. 55 | 56 | ```bash 57 | $ ssh -i tf-kube ubuntu@server-ip 58 | ubuntu@ip-10-43-0-40:~$ kubectl get nodes 59 | NAME STATUS ROLES AGE VERSION 60 | ip-10-43-0-30.ap-northeast-2.compute.internal Ready 3m40s v1.15.0 61 | ip-10-43-0-40.ap-northeast-2.compute.internal Ready master 4m4s v1.15.0 62 | ``` 63 | 64 | If you want to delete all cluster, type this. 65 | 66 | ```bash 67 | $ terraform destroy 68 | ``` 69 | 70 | 71 | 72 | #### If you want to see Kubernetes Clustering Step. 73 | 74 | ```bash 75 | $ tail -f /home/ubuntu/master.log # in master node 76 | $ tail -f /home/ubuntu/worker.log # in worker node 77 | $ tail -f /home/ubuntu/etcd.log # in etcd node 78 | ``` 79 | 80 | ### 3. Set [variables.tf](https://github.com/graykode/aws-kubeadm-terraform/blob/master/variables.tf) 81 | 82 | 83 | 84 | 1. Set EC2 instance_type 85 | ```javascript 86 | variable etcd_instance_type { 87 | default = "t2.medium" 88 | } 89 | variable controller_instance_type { 90 | default = "t2.medium" 91 | } 92 | variable worker_instance_type { 93 | default = "t2.medium" 94 | } 95 | ``` 96 | 97 | 2. Set Number of EC2 Node 98 | 99 | ```javascript 100 | variable number_of_etcd{ 101 | description = "The number of etcd, only acts as etcd" 102 | default = 0 103 | } 104 | 105 | variable number_of_worker{ 106 | description = "The number of worker nodes" 107 | default = 1 108 | } 109 | ``` 110 | 111 | 112 | 113 | #### PS 114 | 115 | If you meet `provider.aws: error validating provider credentials` Error, Please check that your IAM key is activate. 116 | 117 | #### ToDo 118 | - Set up a High Availability etcd cluster with kubeadm 119 | - Add k8s master node ingress, ingress-controller with ELB 120 | 121 | ### Author 122 | 123 | - Reference : [alicek106/aws-terraform-kubernetes](https://github.com/alicek106/aws-terraform-kubernetes), [cablespaghetti/kubeadm-aws](https://github.com/cablespaghetti/kubeadm-aws) 124 | - Tae Hwan Jung(Jeff Jung) @graykode 125 | - Author Email : [nlkey2022@gmail.com](mailto:nlkey2022@gmail.com) -------------------------------------------------------------------------------- /master.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -ve 2 | touch /home/ubuntu/master.log 3 | 4 | curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - 5 | touch /etc/apt/sources.list.d/kubernetes.list 6 | 7 | su -c "echo 'deb http://apt.kubernetes.io/ kubernetes-xenial main' >> \ 8 | /etc/apt/sources.list.d/kubernetes.list" 9 | 10 | # Install kubelet kubeadm kubectl kubernetes-cni docker 11 | apt-get update 12 | apt-get install -y kubelet kubeadm kubectl kubernetes-cni 13 | curl -sSL https://get.docker.com/ | sh 14 | systemctl start docker 15 | echo '[Finished] Installing kubelet kubeadm kubectl kubernetes-cni docker' > /home/ubuntu/master.log 16 | 17 | # Install etcdctl for the version of etcd we're running 18 | ETCD_VERSION=$(kubeadm config images list | grep etcd | cut -d':' -f2) 19 | wget "https://github.com/coreos/etcd/releases/download/v$${ETCD_VERSION}/etcd-v$${ETCD_VERSION}-linux-amd64.tar.gz" 20 | tar xvf "etcd-v$${ETCD_VERSION}-linux-amd64.tar.gz" 21 | mv "etcd-v$${ETCD_VERSION}-linux-amd64/etcdctl" /usr/local/bin/ 22 | rm -rf etcd* 23 | echo '[Finished] Installing etcdctl' > /home/ubuntu/master.log 24 | 25 | systemctl stop docker 26 | mkdir /mnt/docker 27 | chmod 711 /mnt/docker 28 | cat < /etc/docker/daemon.json 29 | { 30 | "data-root": "/mnt/docker", 31 | "log-driver": "json-file", 32 | "log-opts": { 33 | "max-size": "10m", 34 | "max-file": "5" 35 | } 36 | } 37 | EOF 38 | systemctl start docker 39 | systemctl enable docker 40 | echo '[Finished] docker configure' > /home/ubuntu/master.log 41 | 42 | # Point kubelet at big ephemeral drive 43 | mkdir /mnt/kubelet 44 | echo 'KUBELET_EXTRA_ARGS="--root-dir=/mnt/kubelet --cloud-provider=aws"' > /etc/default/kubelet 45 | echo '[Finished] kubelet configure' > /home/ubuntu/master.log 46 | 47 | # ----------------- from here same with worker.sh 48 | 49 | cat >init-config.yaml < /home/ubuntu/master.log 74 | 75 | # Pass bridged IPv4 traffic to iptables chains (required by Flannel like the above cidr setting) 76 | echo "net.bridge.bridge-nf-call-iptables = 1" > /etc/sysctl.d/60-flannel.conf 77 | service procps start 78 | 79 | # Set up kubectl for the ubuntu user 80 | mkdir -p /home/ubuntu/.kube && cp -i /etc/kubernetes/admin.conf /home/ubuntu/.kube/config && chown -R ubuntu. /home/ubuntu/.kube 81 | echo 'source <(kubectl completion bash)' >> /home/ubuntu/.bashrc 82 | echo '[Finished] Now you can use kubectl, try : kubectl get nodes' > /home/ubuntu/master.log 83 | 84 | if [ -f /tmp/fresh-cluster ]; then 85 | su -c 'kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/13a990bb716c82a118b8e825b78189dcfbfb2f1e/Documentation/kube-flannel.yml' ubuntu 86 | echo '[Finished] All nodes are ready' > /home/ubuntu/master.log 87 | # su -c 'kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/mandatory.yaml' ubuntu 88 | # su -c 'kubectl apply -f https://raw.githubusercontent.com/graykode/aws-kubeadm-terraform/master/service-l7.yaml' ubuntu 89 | # su -c 'kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/master/deploy/static/provider/aws/patch-configmap-l4.yaml' ubuntu 90 | fi 91 | -------------------------------------------------------------------------------- /1-vpc.tf: -------------------------------------------------------------------------------- 1 | ############ 2 | ## VPC 3 | ############ 4 | 5 | resource "aws_vpc" "kubernetes" { 6 | cidr_block = "${var.vpc_cidr}" 7 | enable_dns_hostnames = true 8 | 9 | tags = "${merge( 10 | local.common_tags, 11 | map( 12 | "Name", "${var.vpc_name}", 13 | "Owner", "${var.owner}" 14 | ) 15 | )}" 16 | } 17 | 18 | # DHCP Options are not actually required, being identical to the Default Option Set 19 | resource "aws_vpc_dhcp_options" "dns_resolver" { 20 | domain_name = "${var.region}.compute.internal" 21 | domain_name_servers = ["AmazonProvidedDNS"] 22 | 23 | tags = "${merge( 24 | local.common_tags, 25 | map( 26 | "Name", "${var.vpc_name}", 27 | "Owner", "${var.owner}" 28 | ) 29 | )}" 30 | } 31 | 32 | resource "aws_vpc_dhcp_options_association" "dns_resolver" { 33 | vpc_id ="${aws_vpc.kubernetes.id}" 34 | dhcp_options_id = "${aws_vpc_dhcp_options.dns_resolver.id}" 35 | } 36 | 37 | ########## 38 | # Keypair 39 | ########## 40 | 41 | resource "aws_key_pair" "default_keypair" { 42 | key_name = "${var.default_keypair_name}" 43 | public_key = "${local.default_keypair_public_key}" 44 | } 45 | 46 | 47 | ############ 48 | ## Subnets 49 | ############ 50 | 51 | # Subnet (public) 52 | resource "aws_subnet" "kubernetes" { 53 | vpc_id = "${aws_vpc.kubernetes.id}" 54 | cidr_block = "${var.vpc_cidr}" 55 | availability_zone = "${var.zone}" 56 | 57 | tags = "${merge( 58 | local.common_tags, 59 | map( 60 | "Name", "kubernetes", 61 | "Owner", "${var.owner}" 62 | ) 63 | )}" 64 | } 65 | 66 | resource "aws_internet_gateway" "gw" { 67 | vpc_id = "${aws_vpc.kubernetes.id}" 68 | 69 | tags = "${merge( 70 | local.common_tags, 71 | map( 72 | "Name", "kubernetes", 73 | "Owner", "${var.owner}" 74 | ) 75 | )}" 76 | } 77 | 78 | ############ 79 | ## Routing 80 | ############ 81 | 82 | resource "aws_route_table" "kubernetes" { 83 | vpc_id = "${aws_vpc.kubernetes.id}" 84 | 85 | # Default route through Internet Gateway 86 | route { 87 | cidr_block = "0.0.0.0/0" 88 | gateway_id = "${aws_internet_gateway.gw.id}" 89 | } 90 | 91 | tags = "${merge( 92 | local.common_tags, 93 | map( 94 | "Name", "kubernetes", 95 | "Owner", "${var.owner}" 96 | ) 97 | )}" 98 | } 99 | 100 | resource "aws_route_table_association" "kubernetes" { 101 | subnet_id = "${aws_subnet.kubernetes.id}" 102 | route_table_id = "${aws_route_table.kubernetes.id}" 103 | } 104 | 105 | 106 | ############ 107 | ## Security 108 | ############ 109 | 110 | resource "aws_security_group" "kubernetes" { 111 | vpc_id = "${aws_vpc.kubernetes.id}" 112 | name = "kubernetes" 113 | 114 | # Allow all outbound 115 | egress { 116 | from_port = 0 117 | to_port = 0 118 | protocol = "-1" 119 | cidr_blocks = ["0.0.0.0/0"] 120 | } 121 | 122 | # Allow ICMP from control host IP 123 | ingress { 124 | from_port = 8 125 | to_port = 0 126 | protocol = "icmp" 127 | cidr_blocks = ["${var.control_cidr}"] 128 | } 129 | 130 | # Allow all internal 131 | ingress { 132 | from_port = 0 133 | to_port = 0 134 | protocol = "-1" 135 | cidr_blocks = ["${var.vpc_cidr}"] 136 | } 137 | 138 | # Allow all traffic from the API ELB 139 | // ingress { 140 | // from_port = 0 141 | // to_port = 0 142 | // protocol = "-1" 143 | // security_groups = ["${aws_security_group.kubernetes_api.id}"] 144 | // } 145 | 146 | # Allow all traffic from control host IP 147 | ingress { 148 | from_port = 0 149 | to_port = 0 150 | protocol = "-1" 151 | cidr_blocks = ["${var.control_cidr}"] 152 | } 153 | 154 | tags = "${merge( 155 | local.common_tags, 156 | map( 157 | "Name", "kubernetes", 158 | "Owner", "${var.owner}" 159 | ) 160 | )}" 161 | } 162 | --------------------------------------------------------------------------------