├── gitignore ├── roles ├── master │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── worker │ ├── meta │ │ └── main.yml │ └── tasks │ │ └── main.yml ├── main.yml └── common │ ├── handlers │ └── main.yml │ ├── defaults │ └── main.yml │ └── tasks │ └── main.yml ├── README.md └── Vagrantfile /gitignore: -------------------------------------------------------------------------------- 1 | .vagrant/ 2 | ./roles/join-command -------------------------------------------------------------------------------- /roles/master/meta/main.yml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - role: common 3 | -------------------------------------------------------------------------------- /roles/worker/meta/main.yml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - role: common 3 | -------------------------------------------------------------------------------- /roles/main.yml: -------------------------------------------------------------------------------- 1 | - hosts: masters 2 | become: yes 3 | roles: 4 | - { role: master} 5 | 6 | - hosts: workers 7 | become: yes 8 | roles: 9 | - { role: worker} -------------------------------------------------------------------------------- /roles/worker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Copy the join command to server location 2 | copy: src=join-command dest=/tmp/join-command.sh mode=0777 3 | 4 | - name: Join the node to cluster 5 | command: sh /tmp/join-command.sh -------------------------------------------------------------------------------- /roles/common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: restart kubelet 2 | service: 3 | name: kubelet 4 | state: restarted 5 | daemon_reload: yes 6 | 7 | - name: docker status 8 | service: 9 | name: docker 10 | state: started 11 | -------------------------------------------------------------------------------- /roles/common/defaults/main.yml: -------------------------------------------------------------------------------- 1 | gpg_keys: 2 | - key: https://download.docker.com/linux/ubuntu/gpg 3 | - key: https://packages.cloud.google.com/apt/doc/apt-key.gpg 4 | 5 | repositories: 6 | - repo: "deb [arch=amd64] https://download.docker.com/linux/ubuntu {{ansible_distribution_release}} stable" 7 | - repo: "deb https://apt.kubernetes.io/ kubernetes-xenial main" #k8s not available yet for Bionic (Ubuntu 18.04) 8 | 9 | https_packages: 10 | - name: apt-transport-https 11 | - name: curl 12 | 13 | docker_packages: 14 | - name: docker-ce 15 | - name: docker-ce-cli 16 | - name: containerd.io 17 | 18 | 19 | k8s_packages: 20 | - name: kubeadm 21 | - name: kubelet 22 | - name: kubectl 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # k8s-cluster-vagrant-ansible 2 | 3 | ## Description 4 | 5 | This project allows you to deploy a multi-node kubernetes cluster with the help of Vagrantfile and Ansible 6 | 7 | ## How to 8 | 9 | ### Deploy and run the nodes 10 | 11 | ```sh 12 | vagrant up 13 | ``` 14 | 15 | ### Configure the k8s cluster 16 | 17 | You can configure your k8s cluster by editing the `CONFIGURATION VARIABLES` available in the `Vagrantfile` 18 | 19 | ### Configure your kubectl 20 | 21 | Here's how to configure the kubectl tool on your local machine to communicate with the kubernetes API : 22 | 23 | ```sh 24 | scp -r vagrant@192.168.50.10:/home/vagrant/.kube $HOME/ 25 | password = vagrant 26 | ``` 27 | 28 | Test your config like the example bellow : 29 | 30 | ```sh 31 | kubectl get nodes 32 | ``` 33 | 34 | Result : 35 | 36 | ``` 37 | NAME STATUS ROLES AGE VERSION 38 | master Ready master 35m v1.15.1 39 | worker-1 Ready 30m v1.15.1 40 | ``` 41 | 42 | ### Connect to the master via ssh 43 | 44 | Either you are at the same level as your Vagrantfile, in this case you run the following command : 45 | 46 | ```sh 47 | vagrant ssh master 48 | ``` 49 | 50 | Either you are in another folder : 51 | 52 | ```sh 53 | ssh -r vagrant@192.168.50.10 54 | password = vagrant 55 | ``` 56 | -------------------------------------------------------------------------------- /roles/master/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Change Docker cgroup driver - Create daemon.json file 2 | file: 3 | path: "/etc/docker/daemon.json" 4 | state: touch 5 | 6 | - name: Change Docker cgroup driver - Edit daemon.json file 7 | blockinfile: 8 | path: "/etc/docker/daemon.json" 9 | block: | 10 | { 11 | "exec-opts": ["native.cgroupdriver=systemd"] 12 | } 13 | marker: "" 14 | 15 | - name: Restart service docker 16 | systemd: 17 | state: restarted 18 | daemon_reload: yes 19 | name: docker 20 | 21 | - name: Initialize the Kubernetes cluster using kubeadm 22 | command: kubeadm init --apiserver-advertise-address="{{ node_ip }}" --apiserver-cert-extra-sans="{{ node_ip }}" --node-name="{{ node_name }}" --pod-network-cidr={{ pod_network }} 23 | 24 | - name: Setup kubeconfig for vagrant user 25 | command: "{{ item }}" 26 | with_items: 27 | - mkdir -p /home/vagrant/.kube 28 | - cp -i /etc/kubernetes/admin.conf /home/vagrant/.kube/config 29 | - chown vagrant:vagrant /home/vagrant/.kube/config 30 | 31 | - name: Install flannel pod network 32 | become: false 33 | command: kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml 34 | 35 | - name: Generate join command 36 | command: kubeadm token create --print-join-command 37 | register: join_command 38 | 39 | - name: Copy join command to local file 40 | become: false 41 | local_action: copy content="{{ join_command.stdout_lines[0] }}" dest="./join-command" 42 | -------------------------------------------------------------------------------- /roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Install packages that allow apt to be used over HTTPS 2 | apt: 3 | name='{{ item.name }}' 4 | state=present 5 | update_cache=yes 6 | with_items: "{{ https_packages | default([]) }}" 7 | 8 | 9 | - name: Add new repositories keys 10 | apt_key: 11 | url='{{item.key}}' 12 | state=present 13 | with_items: "{{ gpg_keys | default([]) }}" 14 | 15 | 16 | - name: Add new apt repositories 17 | apt_repository: 18 | repo='{{item.repo}}' 19 | state=present 20 | with_items: "{{ repositories | default([]) }}" 21 | 22 | 23 | - name: Install docker 24 | apt: 25 | name="{{ item.name }}" 26 | state=present 27 | update_cache=yes 28 | with_items: "{{ docker_packages | default([]) }}" 29 | notify: 30 | - docker status 31 | 32 | - name: Change Docker cgroup driver - Create daemon.json file 33 | file: 34 | path: "/etc/docker/daemon.json" 35 | state: touch 36 | 37 | - name: Change Docker cgroup driver - Edit daemon.json file 38 | blockinfile: 39 | path: "/etc/docker/daemon.json" 40 | block: | 41 | { 42 | "exec-opts": ["native.cgroupdriver=systemd"] 43 | } 44 | marker: "" 45 | 46 | - name: Restart service docker 47 | systemd: 48 | state: restarted 49 | daemon_reload: yes 50 | name: docker 51 | 52 | 53 | - name: Add vagrant user to docker group 54 | user: 55 | name: vagrant 56 | group: docker 57 | 58 | 59 | - name: Remove swapfile from /etc/fstab 60 | mount: 61 | name: "{{ item }}" 62 | fstype: swap 63 | state: absent 64 | with_items: 65 | - swap 66 | - none 67 | 68 | 69 | - name: Disable swap 70 | command: swapoff -a 71 | when: ansible_swaptotal_mb > 0 72 | 73 | 74 | - name: Install Kubernetes binaries 75 | apt: 76 | name="{{ item.name }}" 77 | state=present 78 | update_cache=yes 79 | with_items: "{{ k8s_packages | default([]) }}" 80 | 81 | - name: Configure node ip 82 | lineinfile: 83 | path: '/etc/systemd/system/kubelet.service.d/10-kubeadm.conf' 84 | line: 'Environment="KUBELET_EXTRA_ARGS=--node-ip={{ node_ip }}"' 85 | regexp: 'KUBELET_EXTRA_ARGS=' 86 | insertafter: '\[Service\]' 87 | state: present 88 | notify: 89 | - restart kubelet 90 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # #################################################################### 2 | # ################### CONFIGURATION VARIABLES ######################## 3 | # #################################################################### 4 | IMAGE_NAME = "bento/ubuntu-18.04" # Image to use 5 | MEM = 2048 # Amount of RAM 6 | CPU = 2 # Number of processors (Minimum value of 2 otherwise it will not work) 7 | MASTER_NAME="master" # Master node name 8 | WORKER_NBR = 1 # Number of workers node 9 | NODE_NETWORK_BASE = "192.168.50" # First three octets of the IP address that will be assign to all type of nodes 10 | POD_NETWORK = "192.168.100.0/16" # Private network for inter-pod communication 11 | 12 | 13 | 14 | Vagrant.configure("2") do |config| 15 | config.ssh.insert_key = false 16 | 17 | # RAM and CPU config 18 | config.vm.provider "virtualbox" do |v| 19 | v.memory = MEM 20 | v.cpus = CPU 21 | end 22 | 23 | # Master node config 24 | config.vm.define MASTER_NAME do |master| 25 | 26 | # Hostname and network config 27 | master.vm.box = IMAGE_NAME 28 | master.vm.network "private_network", ip: "#{NODE_NETWORK_BASE}.10" 29 | master.vm.hostname = MASTER_NAME 30 | 31 | # Ansible role setting 32 | master.vm.provision "ansible" do |ansible| 33 | 34 | # Ansbile role that will be launched 35 | ansible.playbook = "roles/main.yml" 36 | 37 | # Groups in Ansible inventory 38 | ansible.groups = { 39 | "masters" => ["#{MASTER_NAME}"], 40 | "workers" => ["worker-[1:#{WORKER_NBR}]"] 41 | } 42 | 43 | # Overload Anqible variables 44 | ansible.extra_vars = { 45 | node_ip: "#{NODE_NETWORK_BASE}.10", 46 | node_name: "master", 47 | pod_network: "#{POD_NETWORK}" 48 | } 49 | end 50 | end 51 | 52 | # Worker node config 53 | (1..WORKER_NBR).each do |i| 54 | config.vm.define "worker-#{i}" do |worker| 55 | 56 | # Hostname and network config 57 | worker.vm.box = IMAGE_NAME 58 | worker.vm.network "private_network", ip: "#{NODE_NETWORK_BASE}.#{i + 10}" 59 | worker.vm.hostname = "worker-#{i}" 60 | 61 | # Ansible role setting 62 | worker.vm.provision "ansible" do |ansible| 63 | 64 | # Ansbile role that will be launched 65 | ansible.playbook = "roles/main.yml" 66 | 67 | # Groups in Ansible inventory 68 | ansible.groups = { 69 | "masters" => ["#{MASTER_NAME}"], 70 | "workers" => ["worker-[1:#{WORKER_NBR}]"] 71 | } 72 | 73 | # Overload Anqible variables 74 | ansible.extra_vars = { 75 | node_ip: "#{NODE_NETWORK_BASE}.#{i + 10}" 76 | } 77 | end 78 | end 79 | end 80 | end 81 | --------------------------------------------------------------------------------