├── .gitignore ├── README.md ├── Vagrantfile ├── build_cluster.sh ├── docs ├── images │ └── vagrant_k8s.png └── tmp │ ├── Vagrantfile │ ├── build_cluster.sh │ ├── join.sh │ ├── kubeconfig.sh │ └── provision │ ├── base.sh │ └── master_init.sh ├── join.sh ├── node_init.sh └── provision ├── base.sh └── master_init.sh /.gitignore: -------------------------------------------------------------------------------- 1 | .vagrant -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # *kubernetes-the-easy-way* 2 | This repo will show the easy method to spin up fully function kubernetes cluster from scratch! 3 | 4 | 5 | 6 | 7 | This repo will help one to setup our own vanilla kubernetes cluster setup in their local workstation from scratch. This is full pre-configured solutions and one can use this to just setup a cluster quick to play-around. Incase if you want to learn all the concepts deeply then I would highly recommend to follow [Kube Docs](https://kubernetes.io/docs/setup/production-environment/tools/kubeadm/install-kubeadm/) or some sample setup guide like [Kube Hard way](https://github.com/kelseyhightower/kubernetes-the-hard-way) 8 | 9 | ## Pre-requisite 10 | 11 | In order to effectively use this script, we need following components to be installed prior running this script: 12 | 13 | - Docker 14 | - Vagrant 15 | - Oracle Virtual box 16 | - Git 17 | 18 | ## Trigger 19 | 20 | All the configuration to build the clusters are predefined. To start, 21 | 22 | Run `.build_cluster.sh` 23 | 24 | This is spin up one master and two node k8s cluster with the all the needed componetes with latest version. 25 | 26 | Post successful completion of the script: 27 | 28 | ± You can login into the machine by running `vagrant ssh master` , ` vagrant ssh node1` and so on. 29 | 30 | ## Tear Down 31 | 32 | __To stop the VM__ 33 | 34 | Run `vagrant halt` 35 | 36 | This is shutdown your vm gracefully. 37 | 38 | __To destroy the VM__ 39 | 40 | Run `vagrant destroy` 41 | 42 | This is destroy and free up the resources 43 | 44 | For *forceful* delete 45 | 46 | Run `vagrant destroy -f` 47 | 48 | __To just reload(update the config)__ 49 | 50 | Run `vagrant reload` 51 | 52 | This will ensure the latest update to the config files or script is reloaded 53 | 54 | __ To bring the cluster up after deletion__ 55 | 56 | Run `vagrant up` 57 | 58 | Will spin up a new cluster. 59 | 60 | ## Follow-Me 61 | 62 | :id: [![Portfolio](https://img.shields.io/badge/GitHub-100000?style=for-the-badge&logo=github&logoColor=white)](https://github.com/premkumar-palanichamy) 63 |

64 | premkumarpalanichamy 65 |

66 | 67 | 68 | ### ~~Reference~~ 69 | 70 | Followed @pbacterio script and so credits to him :) 71 | 72 | Made small changes to make the script work smoothly and to run all the kubernetes components at its latest version. Also rectified the issue in connecting the nodes with the master automatically during creation process. 73 | 74 | 75 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | Vagrant.configure("2") do |config| 5 | config.vm.box = "debian/buster64" 6 | config.vm.provision "shell", path: "provision/base.sh" 7 | config.vm.synced_folder './', '/vagrant', disabled: true 8 | 9 | config.vm.define "master" do |master| 10 | master.vm.hostname = "master" 11 | master.vm.network "private_network", ip: "198.168.33.14" 12 | master.vm.provision "shell", path: "provision/master_init.sh", privileged: false 13 | for p in [:virtualbox, :libvirt] do 14 | master.vm.provider p do |provider| 15 | provider.memory = 2048 16 | provider.cpus = 2 17 | end 18 | end 19 | end 20 | 21 | (1..2).each do |i| 22 | config.vm.define "node#{i}" do |node| 23 | node.vm.hostname = "node#{i}" 24 | node.vm.network "private_network", ip: "198.168.33.1#{i}" 25 | for p in [:virtualbox, :libvirt] do 26 | node.vm.provider p do |provider| 27 | provider.memory = 2048 28 | provider.cpus = 2 29 | end 30 | end 31 | end 32 | end 33 | 34 | end -------------------------------------------------------------------------------- /build_cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | #SLEEP_TIME="300" 4 | vagrant up 5 | 6 | rm join.sh; touch join.sh 7 | 8 | #sleep ${SLEEP_TIME} 9 | 10 | cluster_token=$(vagrant ssh master -c 'kubeadm token create --print-join-command') 11 | 12 | join_cmd="sudo ${cluster_token}" 13 | 14 | 15 | cat < join.sh 16 | #!/bin/bash 17 | set -x 18 | $join_cmd 19 | EOF 20 | 21 | #sleep 60 22 | 23 | vagrant plugin install vagrant-scp 24 | 25 | vagrant scp ./join.sh node1:join.sh 26 | vagrant ssh node1 -c 'chmod 777 join.sh' 27 | vagrant scp ./node_init.sh node1:node_init.sh 28 | vagrant ssh node1 -c 'chmod 777 node_init.sh' 29 | 30 | vagrant scp ./join.sh node2:join.sh 31 | vagrant ssh node2 -c 'chmod 777 join.sh' 32 | vagrant scp ./node_init.sh node2:node_init.sh 33 | vagrant ssh node2 -c 'chmod 777 node_init.sh' 34 | 35 | vagrant ssh node1 -c "./node_init.sh" 36 | vagrant ssh node2 -c "./node_init.sh" -------------------------------------------------------------------------------- /docs/images/vagrant_k8s.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/devopshubproject/kubernetes-the-easy-way/f0cb23a3458ecc9c5a063444db1aa6260b24e78c/docs/images/vagrant_k8s.png -------------------------------------------------------------------------------- /docs/tmp/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | Vagrant.configure("2") do |config| 5 | # config.vm.box = "centos/7" 6 | config.vm.box = "debian/buster64" 7 | config.vm.provision "shell", path: "provision/base.sh" 8 | config.vm.synced_folder './', '/vagrant', disabled: true 9 | 10 | config.vm.define "master" do |master| 11 | master.vm.hostname = "master" 12 | master.vm.network "private_network", ip: "192.168.88.20" 13 | #master.vm.network "private_network", ip: "198.168.33.14" 14 | master.vm.provision "shell", path: "provision/master_init.sh", privileged: false 15 | for p in [:virtualbox, :libvirt] do 16 | master.vm.provider p do |provider| 17 | provider.memory = 2048 18 | provider.cpus = 2 19 | end 20 | end 21 | end 22 | 23 | (1..2).each do |i| 24 | config.vm.define "node#{i}" do |node| 25 | node.vm.hostname = "node#{i}" 26 | node.vm.network "private_network", ip: "192.168.88.2#{i}" 27 | #node.vm.network "private_network", ip: "198.168.33.1#{i}" 28 | for p in [:virtualbox, :libvirt] do 29 | node.vm.provider p do |provider| 30 | provider.memory = 2048 31 | provider.cpus = 2 32 | end 33 | #node.vm.provision "shell", path: "join.sh" 34 | end 35 | end 36 | end 37 | 38 | end -------------------------------------------------------------------------------- /docs/tmp/build_cluster.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | SLEEP_TIME="300" 4 | vagrant up 5 | 6 | ### ±±±±±±± To make kube master work 7 | #vagrant plugin install vagrant-scp 8 | #vagrant scp ./kubeconfig.sh master:kubeconfig.sh 9 | #vagrant ssh master -c 'chmod 777 kubeconfig.sh' 10 | #vagrant ssh master -c './kubeconfig.sh -y' 11 | 12 | rm join.sh; touch join.sh 13 | 14 | #sleep ${SLEEP_TIME} 15 | 16 | #cluster_token=$(vagrant ssh master -c 'kubeadm token create 2>/dev/null') 17 | #cluster_token=(vagrant ssh master -c 'kubeadm token create 2 > /dev/null') 18 | cluster_token=$(vagrant ssh master -c 'kubeadm token create --print-join-command') 19 | #join_cmd="sudo kubeadm join 192.168.88.20:6443 --token ${cluster_token:0:-1} --discovery-token-unsafe-skip-ca-verification" 20 | #join_cmd="sudo kubeadm join 198.168.33.14:6443 --token ${cluster_token:0:-1} --discovery-token-unsafe-skip-ca-verification" 21 | join_cmd="sudo ${cluster_token}" 22 | 23 | #kubeadm token create --print-join-command > /vagrant/configs/join.sh 24 | 25 | cat < join.sh 26 | #!/bin/bash 27 | $join_cmd 28 | EOF 29 | 30 | #sleep 60 31 | #vagrant ssh node1 -c '$join_cmd' 32 | vagrant plugin install vagrant-scp 33 | 34 | vagrant scp ./join.sh node1:join.sh 35 | vagrant ssh node1 -c 'chmod 777 join.sh' 36 | vagrant ssh node1 -c "./join.sh" 37 | #vagrant ssh node1 -c "$join_cmd" 38 | #vagrant ssh node1 -c "$join_cmd --v=5" 39 | #vagrant ssh node2 -c "$join_cmd" 40 | ##vagrant ssh node1 -c 'sudo $cluster_token' 41 | #vagrant ssh node2 -c "sudo $cluster_token" -------------------------------------------------------------------------------- /docs/tmp/join.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | sudo 3 | 4 | 5 | timed out waiting for the condition 6 | To see the stack trace of this error execute with --v=5 or higher 7 | -------------------------------------------------------------------------------- /docs/tmp/kubeconfig.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | mkdir -p $HOME/.kube 4 | sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config 5 | sudo chown $(id -u):$(id -g) $HOME/.kube/config -------------------------------------------------------------------------------- /docs/tmp/provision/base.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | apt-get update 5 | apt-get -y install docker.io curl gnupg 6 | 7 | cat <> $HOME/.bashrc 14 | -------------------------------------------------------------------------------- /join.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | sudo kubeadm join 198.168.33.14:6443 --token o50xdk.znr130xxnbe1wjht --discovery-token-ca-cert-hash sha256:ca009a7933b5538526f88bc4c1d53b96638186b6ebbd9ce4f57cb27c390e03fc 4 | -------------------------------------------------------------------------------- /node_init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | run=$(cat join.sh | sed '1,2d') 5 | 6 | eval $run -------------------------------------------------------------------------------- /provision/base.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | apt-get update 5 | apt-get -y install docker.io curl gnupg 6 | 7 | cat <> $HOME/.bashrc 13 | --------------------------------------------------------------------------------