├── .gitignore ├── LICENSE ├── README.md ├── Vagrantfile ├── ansible-kubernetes-vagrant-tutorial.cast ├── istio-services.yaml ├── kubernetes-dashboard-service-np.yaml ├── nginx.yaml └── roles ├── common ├── add_packages │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ └── tasks │ │ └── main.yml └── pam_limits │ ├── README.md │ ├── defaults │ └── main.yml │ └── tasks │ └── main.yml ├── k8s.retry ├── k8s.yml └── k8s ├── common ├── defaults │ └── main.yml ├── handlers │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml └── templates │ └── etc │ ├── containerd │ └── config.toml │ └── modules-load.d │ └── containerd.conf ├── master ├── defaults │ └── main.yml ├── meta │ └── main.yml ├── tasks │ └── main.yml └── templates │ ├── calico │ ├── 3.15 │ │ └── calico.yaml │ └── 3.9 │ │ └── calico.yaml │ └── kubeadm-config.yaml └── node ├── defaults └── main.yml ├── meta └── main.yml ├── tasks └── main.yml └── templates └── calico-networking └── 1.7 └── calico.yaml /.gitignore: -------------------------------------------------------------------------------- 1 | .vagrant/ 2 | .private/ 3 | *-join-command 4 | *.log -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019-2020 IT Wonder Lab 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Ansible VirtualBox Vagrant Kubernetes 1.22 Containerd and Istio Tutorial (1 master N nodes) 2 | ## Building a Kubernetes Cluster with Vagrant and Ansible 3 | 4 | Tutorial with full source code explaining **how to create a Kubernetes cluster with Ansible and Vagrant** for local development. 5 | 6 | See https://www.itwonderlab.com/en/ansible-kubernetes-vagrant-tutorial/ 7 | 8 | * 22 Dec 2019: Add information about using a Private Docker Registry as suggested by Brian Quandt. 9 | * 4 Nov 2019: Install and publish Kubernetes Dashboard under vagrant, with help from Alex Alongi. Add prerequisites section as requested 10 | * 26 Sep 2019: Update Calico networking and network security to release 3.9 11 | * 6 June 2019: Fix issue: kubectl was not able to recover logs. See new task “Configure node-ip … at kubelet”. 12 | * 10 Jul 2020: 13 | Update prerequisites to latest releases 14 | It now takes above 2 minutes 15 | Change selection of hosts from Ansible groups to host-name pattern (hosts: k8s-m-* and hosts: k8s-n-*) 16 | * 3 Aug 2020: Allow different amount of CPU and MEM for master and nodes 17 | * 10 June 2021: Update host software dependencies (no changes in ansible or vagrant) 18 | * 16 August 2021: Update Ansible playbooks to use containerd instead of Docker (Kubernetes is deprecating Docker as a container runtime after v1.20.). 19 | 20 | 21 | ------------------ 22 | 23 | ## Creación de un Clúster de Kubernetes 1.22 Containerd e Istio usando Vagrant y Ansible (1 maestro N nodos) 24 | 25 | Creación de un **clúster Kubernetes con múltiples nodos usando Vagrant, Ansible y Virtualbox**. Especialmente indicado para entornos de desarrollo local realistas. 26 | 27 | See https://www.itwonderlab.com/es/cluster-kubernetes-vagrant-ansible/ 28 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | IMAGE_NAME = "bento/ubuntu-20.04" 2 | K8S_NAME = "ditwl-k8s-01" 3 | MASTERS_NUM = 1 4 | MASTERS_CPU = 2 5 | MASTERS_MEM = 2048 6 | 7 | NODES_NUM = 2 8 | NODES_CPU = 2 9 | NODES_MEM = 2048 10 | 11 | IP_BASE = "192.168.50." 12 | 13 | VAGRANT_DISABLE_VBOXSYMLINKCREATE=1 14 | 15 | Vagrant.configure("2") do |config| 16 | config.ssh.insert_key = false 17 | 18 | (1..MASTERS_NUM).each do |i| 19 | config.vm.define "k8s-m-#{i}" do |master| 20 | master.vm.box = IMAGE_NAME 21 | master.vm.network "private_network", ip: "#{IP_BASE}#{i + 10}" 22 | master.vm.hostname = "k8s-m-#{i}" 23 | master.vm.provider "virtualbox" do |v| 24 | v.memory = MASTERS_MEM 25 | v.cpus = MASTERS_CPU 26 | end 27 | master.vm.provision "ansible" do |ansible| 28 | ansible.playbook = "roles/k8s.yml" 29 | #Redefine defaults 30 | ansible.extra_vars = { 31 | k8s_cluster_name: K8S_NAME, 32 | k8s_master_admin_user: "vagrant", 33 | k8s_master_admin_group: "vagrant", 34 | k8s_master_apiserver_advertise_address: "#{IP_BASE}#{i + 10}", 35 | k8s_master_node_name: "k8s-m-#{i}", 36 | k8s_node_public_ip: "#{IP_BASE}#{i + 10}" 37 | } 38 | end 39 | end 40 | end 41 | 42 | (1..NODES_NUM).each do |j| 43 | config.vm.define "k8s-n-#{j}" do |node| 44 | node.vm.box = IMAGE_NAME 45 | node.vm.network "private_network", ip: "#{IP_BASE}#{j + 10 + MASTERS_NUM}" 46 | node.vm.hostname = "k8s-n-#{j}" 47 | node.vm.provider "virtualbox" do |v| 48 | v.memory = NODES_MEM 49 | v.cpus = NODES_CPU 50 | #v.customize ["modifyvm", :id, "--cpuexecutioncap", "20"] 51 | end 52 | node.vm.provision "ansible" do |ansible| 53 | ansible.playbook = "roles/k8s.yml" 54 | #Redefine defaults 55 | ansible.extra_vars = { 56 | k8s_cluster_name: K8S_NAME, 57 | k8s_node_admin_user: "vagrant", 58 | k8s_node_admin_group: "vagrant", 59 | k8s_node_public_ip: "#{IP_BASE}#{j + 10 + MASTERS_NUM}" 60 | } 61 | end 62 | end 63 | end 64 | end 65 | -------------------------------------------------------------------------------- /ansible-kubernetes-vagrant-tutorial.cast: -------------------------------------------------------------------------------- 1 | {"version": 2, "width": 175, "height": 36, "timestamp": 1595618136, "idle_time_limit": 2.0, "env": {"SHELL": "/bin/bash", "TERM": "xterm-256color", "USER": "jruiz"}} 2 | [0.037258, "o", "\u001b]0;jruiz@XPS13: ~/git/github/ansible-vbox-vagrant-kubernetes\u0007\u001b[01;32mjruiz@XPS13\u001b[00m:\u001b[01;34m~/git/github/ansible-vbox-vagrant-kubernetes\u001b[00m$ "] 3 | [1.850754, "i", "vagrant up"] 4 | [1.851319, "o", "vagrant up"] 5 | [2.463199, "i", "\r"] 6 | [2.463847, "o", "\r\n"] 7 | [5.419943, "o", "\u001b[0mBringing machine 'k8s-m-1' up with 'virtualbox' provider...\u001b[0m\r\n"] 8 | [5.421557, "o", "\u001b[0mBringing machine 'k8s-n-1' up with 'virtualbox' provider...\u001b[0m\r\n"] 9 | [5.423079, "o", "\u001b[0mBringing machine 'k8s-n-2' up with 'virtualbox' provider...\u001b[0m\r\n"] 10 | [5.730498, "o", "\u001b[1m==> k8s-m-1: Importing base box 'bento/ubuntu-20.04'...\u001b[0m\r\n"] 11 | [10.229717, "o", "\u001b[0m\r\u001b[K\u001b[0m"] 12 | [10.22985, "o", "\u001b[0mProgress: 20%\u001b[0m"] 13 | [10.834902, "o", "\u001b[0m\r\u001b[K\u001b[0m"] 14 | [10.83504, "o", "\u001b[0mProgress: 30%\u001b[0m"] 15 | [11.037257, "o", "\u001b[0m\r\u001b[K\u001b[0m"] 16 | [11.037396, "o", "\u001b[0mProgress: 40%\u001b[0m"] 17 | [12.047604, "o", "\u001b[0m\r\u001b[K\u001b[0m"] 18 | [12.047758, "o", "\u001b[0mProgress: 50%\u001b[0m"] 19 | [12.249147, "o", "\u001b[0m\r\u001b[K\u001b[0m"] 20 | [12.249292, "o", "\u001b[0mProgress: 80%\u001b[0m"] 21 | [13.465829, "o", "\u001b[0m\r\u001b[K\u001b[0m"] 22 | [13.466336, "o", "\u001b[0mProgress: 90%\u001b[0m"] 23 | [16.715861, "o", "\u001b[0m\r\u001b[K\u001b[0m"] 24 | [16.853981, "o", "\u001b[1m==> k8s-m-1: Matching MAC address for NAT networking...\u001b[0m\r\n"] 25 | [17.046439, "o", "\u001b[1m==> k8s-m-1: Checking if box 'bento/ubuntu-20.04' version '202005.21.0' is up to date...\u001b[0m\r\n"] 26 | [17.970897, "o", "\u001b[1m==> k8s-m-1: Setting the name of the VM: ansible-vbox-vagrant-kubernetes_k8s-m-1_1595618154375_4646\u001b[0m\r\n"] 27 | [19.01636, "o", "\u001b[1m==> k8s-m-1: Clearing any previously set network interfaces...\u001b[0m\r\n"] 28 | [19.256391, "o", "\u001b[1m==> k8s-m-1: Preparing network interfaces based on configuration...\u001b[0m\r\n"] 29 | [19.25685, "o", "\u001b[0m k8s-m-1: Adapter 1: nat\u001b[0m\r\n"] 30 | [19.257304, "o", "\u001b[0m k8s-m-1: Adapter 2: hostonly\u001b[0m\r\n"] 31 | [19.343717, "o", "\u001b[1m==> k8s-m-1: Forwarding ports...\u001b[0m\r\n"] 32 | [19.460042, "o", "\u001b[0m k8s-m-1: 22 (guest) => 2222 (host) (adapter 1)\u001b[0m\r\n"] 33 | [19.933816, "o", "\u001b[1m==> k8s-m-1: Running 'pre-boot' VM customizations...\u001b[0m\r\n"] 34 | [20.246667, "o", "\u001b[1m==> k8s-m-1: Booting VM...\u001b[0m\r\n"] 35 | [20.786693, "o", "\u001b[1m==> k8s-m-1: Waiting for machine to boot. This may take a few minutes...\u001b[0m\r\n"] 36 | [21.107462, "o", "\u001b[0m k8s-m-1: SSH address: 127.0.0.1:2222\u001b[0m\r\n"] 37 | [21.107642, "o", "\u001b[0m k8s-m-1: SSH username: vagrant\u001b[0m\r\n"] 38 | [21.107782, "o", "\u001b[0m k8s-m-1: SSH auth method: private key\u001b[0m\r\n"] 39 | [38.795395, "o", "\u001b[1m==> k8s-m-1: Machine booted and ready!\u001b[0m\r\n"] 40 | [38.795991, "o", "\u001b[1m==> k8s-m-1: Checking for guest additions in VM...\u001b[0m\r\n"] 41 | [38.861111, "o", "\u001b[1m==> k8s-m-1: Setting hostname...\u001b[0m\r\n"] 42 | [40.571904, "o", "\u001b[1m==> k8s-m-1: Configuring and enabling network interfaces...\u001b[0m\r\n"] 43 | [42.039674, "o", "\u001b[1m==> k8s-m-1: Mounting shared folders...\u001b[0m\r\n"] 44 | [42.0402, "o", "\u001b[0m k8s-m-1: /vagrant => /home/jruiz/git/github/ansible-vbox-vagrant-kubernetes\u001b[0m\r\n"] 45 | [42.546954, "o", "\u001b[1m==> k8s-m-1: Running provisioner: ansible...\u001b[0m\r\n"] 46 | [43.256566, "o", "\u001b[0m k8s-m-1: Running ansible-playbook...\u001b[0m\r\n"] 47 | [43.726326, "o", "\u001b[0m\r\nPLAY [k8s-m-*] *****************************************************************\r\n\u001b[0m"] 48 | [43.741372, "o", "\u001b[0m\r\nTASK [Gathering Facts] *********************************************************\r\n\u001b[0m"] 49 | [45.437431, "o", "\u001b[0m\u001b[0;32mok: [k8s-m-1]\u001b[0m\r\n\u001b[0;32m\u001b[0m\u001b[0m"] 50 | [45.443823, "o", "\u001b[0m\r\nTASK [add_packages : Add new repositories keys] ********************************\r\n\u001b[0m"] 51 | [46.804461, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-m-1] => (item={'key': 'https://download.docker.com/linux/ubuntu/gpg'})\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 52 | [47.997402, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-m-1] => (item={'key': 'https://packages.cloud.google.com/apt/doc/apt-key.gpg'})\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 53 | [48.003947, "o", "\u001b[0m\r\nTASK [add_packages : Add new repositories to sources] **************************\r\n\u001b[0m"] 54 | [56.923671, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-m-1] => (item={'repo': 'deb [arch=amd64] https://download.docker.com/linux/ubuntu focal stable'})\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 55 | [64.966525, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-m-1] => (item={'repo': 'deb https://apt.kubernetes.io/ kubernetes-xenial main'})\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 56 | [64.973895, "o", "\u001b[0m\r\nTASK [add_packages : Force update cache if new keys added] *********************\r\n\u001b[0m"] 57 | [65.02803, "o", "\u001b[0m\u001b[0;32mok: [k8s-m-1]\u001b[0m\r\n\u001b[0;32m\u001b[0m\u001b[0m"] 58 | [65.041944, "o", "\u001b[0m\r\nTASK [add_packages : Remove packages] ******************************************\r\n\u001b[0m"] 59 | [65.883177, "o", "\u001b[0m\u001b[0;32mok: [k8s-m-1] => (item={'name': None})\u001b[0m\r\n\u001b[0;32m\u001b[0m\u001b[0m"] 60 | [65.889784, "o", "\u001b[0m\r\nTASK [add_packages : Install packages] *****************************************\r\n\u001b[0m"] 61 | [73.033502, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-m-1] => (item={'name': 'apt-transport-https'})\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 62 | [76.08873, "o", "\u001b[0m\u001b[0;32mok: [k8s-m-1] => (item={'name': 'curl'})\u001b[0m\r\n\u001b[0;32m\u001b[0m\u001b[0m"] 63 | [98.923075, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-m-1] => (item={'name': 'docker-ce'})\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 64 | [101.913034, "o", "\u001b[0m\u001b[0;32mok: [k8s-m-1] => (item={'name': 'docker-ce-cli'})\u001b[0m\r\n\u001b[0;32m\u001b[0m\u001b[0m"] 65 | [103.840506, "o", "\u001b[0m\u001b[0;32mok: [k8s-m-1] => (item={'name': 'containerd.io'})\u001b[0m\r\n\u001b[0;32m\u001b[0m\u001b[0m"] 66 | [122.972057, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-m-1] => (item={'name': 'kubeadm'})\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 67 | [124.866436, "o", "\u001b[0m\u001b[0;32mok: [k8s-m-1] => (item={'name': 'kubelet'})\u001b[0m\r\n\u001b[0;32m\u001b[0m\u001b[0m"] 68 | [126.688289, "o", "\u001b[0m\u001b[0;32mok: [k8s-m-1] => (item={'name': 'kubectl'})\u001b[0m\r\n\u001b[0;32m\u001b[0m\u001b[0m"] 69 | [126.697904, "o", "\u001b[0m\r\nTASK [k8s/common : Remove current swaps from fstab] ****************************\r\n\u001b[0m"] 70 | [127.062555, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-m-1]\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 71 | [127.07261, "o", "\u001b[0m\r\nTASK [k8s/common : Disable swap] ***********************************************\r\n\u001b[0m"] 72 | [127.578868, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-m-1]\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 73 | [127.585241, "o", "\u001b[0m\r\nTASK [k8s/common : Add k8s_common_admin_user user to docker group] *************\r\n\u001b[0m"] 74 | [128.045171, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-m-1]\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 75 | [128.051769, "o", "\u001b[0m\r\nTASK [k8s/common : Check that docker service is started] ***********************\r\n\u001b[0m"] 76 | [128.620876, "o", "\u001b[0m\u001b[0;32mok: [k8s-m-1]\u001b[0m\r\n\u001b[0;32m\u001b[0m\u001b[0m"] 77 | [128.655994, "o", "\u001b[0m\r\nTASK [k8s/common : Configure node-ip 192.168.50.11 at kubelet] *****************\r\n\u001b[0m"] 78 | [128.924803, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-m-1]\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 79 | [128.930726, "o", "\u001b[0m\r\nTASK [k8s/master : Configure kubectl] ******************************************\r\n\u001b[0m"] 80 | [176.680239, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-m-1]\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 81 | [176.69161, "o", "\u001b[0m\r\nTASK [k8s/master : Create .kube dir for vagrant user] **************************\r\n\u001b[0m"] 82 | [177.271857, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-m-1]\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 83 | [177.284417, "o", "\u001b[0m\r\nTASK [k8s/master : Copy kube config to vagrant home .kube dir] *****************\r\n\u001b[0m"] 84 | [177.679767, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-m-1]\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 85 | [177.688061, "o", "\u001b[0m\r\nTASK [k8s/master : Rewrite calico.yaml] ****************************************\r\n\u001b[0m"] 86 | [178.257155, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-m-1]\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 87 | [178.266733, "o", "\u001b[0m\r\nTASK [k8s/master : Install Calico (using Kubernetes API datastore)] ************\r\n\u001b[0m"] 88 | [179.022876, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-m-1]\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 89 | [179.02944, "o", "\u001b[0m\r\nTASK [k8s/master : Generate join command] **************************************\r\n\u001b[0m"] 90 | [179.395205, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-m-1]\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 91 | [179.403364, "o", "\u001b[0m\r\nTASK [k8s/master : Copy join command for ditwl-k8s-01 cluster to local file] ***\r\n\u001b[0m"] 92 | [179.755779, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-m-1 -> localhost]\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 93 | [179.756408, "o", "\u001b[0m\r\nRUNNING HANDLER [k8s/common : restart kubelet] *********************************\r\n\u001b[0m"] 94 | [183.042817, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-m-1]\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 95 | [183.045221, "o", "\u001b[0m\u001b[1;35m[WARNING]: Could not match supplied host pattern, ignoring: k8s-n-*\u001b[0m\r\n\u001b[1;35m\u001b[0m\u001b[0m"] 96 | [183.046012, "o", "\u001b[0m\r\nPLAY [k8s-n-*] *****************************************************************\r\n\u001b[0m"] 97 | [183.04657, "o", "\u001b[0m\u001b[0;36mskipping: no hosts matched\u001b[0m\r\n\u001b[0;36m\u001b[0m\r\nPLAY RECAP *********************************************************************\r\n\u001b[0;33mk8s-m-1\u001b[0m : \u001b[0;32mok=19 \u001b[0m \u001b[0;33mchanged=15 \u001b[0m unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 \r\n\r\n\u001b[0m"] 98 | [183.40449, "o", "\u001b[1m==> k8s-n-1: Importing base box 'bento/ubuntu-20.04'...\u001b[0m\r\n"] 99 | [187.76204, "o", "\u001b[0m\r\u001b[K\u001b[0m"] 100 | [187.76325, "o", "\u001b[0mProgress: 20%\u001b[0m"] 101 | [188.567352, "o", "\u001b[0m\r\u001b[K\u001b[0m"] 102 | [188.567492, "o", "\u001b[0mProgress: 40%\u001b[0m"] 103 | [189.574566, "o", "\u001b[0m\r\u001b[K\u001b[0m"] 104 | [189.574624, "o", "\u001b[0mProgress: 80%\u001b[0m"] 105 | [190.721102, "o", "\u001b[0m\r\u001b[K\u001b[0m"] 106 | [190.721304, "o", "\u001b[0mProgress: 90%\u001b[0m"] 107 | [193.421777, "o", "\u001b[0m\r\u001b[K\u001b[0m"] 108 | [193.576299, "o", "\u001b[1m==> k8s-n-1: Matching MAC address for NAT networking...\u001b[0m\r\n"] 109 | [193.839722, "o", "\u001b[1m==> k8s-n-1: Checking if box 'bento/ubuntu-20.04' version '202005.21.0' is up to date...\u001b[0m\r\n"] 110 | [194.75713, "o", "\u001b[1m==> k8s-n-1: Setting the name of the VM: ansible-vbox-vagrant-kubernetes_k8s-n-1_1595618331156_20645\u001b[0m\r\n"] 111 | [195.439016, "o", "\u001b[1m==> k8s-n-1: Fixed port collision for 22 => 2222. Now on port 2200.\u001b[0m\r\n"] 112 | [195.987432, "o", "\u001b[1m==> k8s-n-1: Clearing any previously set network interfaces...\u001b[0m\r\n"] 113 | [196.293411, "o", "\u001b[1m==> k8s-n-1: Preparing network interfaces based on configuration...\u001b[0m\r\n"] 114 | [196.29384, "o", "\u001b[0m k8s-n-1: Adapter 1: nat\u001b[0m\r\n"] 115 | [196.294404, "o", "\u001b[0m k8s-n-1: Adapter 2: hostonly\u001b[0m\r\n"] 116 | [196.399613, "o", "\u001b[1m==> k8s-n-1: Forwarding ports...\u001b[0m\r\n"] 117 | [196.621107, "o", "\u001b[0m k8s-n-1: 22 (guest) => 2200 (host) (adapter 1)\u001b[0m\r\n"] 118 | [197.027417, "o", "\u001b[1m==> k8s-n-1: Running 'pre-boot' VM customizations...\u001b[0m\r\n"] 119 | [197.28241, "o", "\u001b[1m==> k8s-n-1: Booting VM...\u001b[0m\r\n"] 120 | [197.694016, "o", "\u001b[1m==> k8s-n-1: Waiting for machine to boot. This may take a few minutes...\u001b[0m\r\n"] 121 | [198.025331, "o", "\u001b[0m k8s-n-1: SSH address: 127.0.0.1:2200\u001b[0m\r\n"] 122 | [198.025547, "o", "\u001b[0m k8s-n-1: SSH username: vagrant\u001b[0m\r\n"] 123 | [198.025691, "o", "\u001b[0m k8s-n-1: SSH auth method: private key\u001b[0m\r\n"] 124 | [215.697742, "o", "\u001b[1m==> k8s-n-1: Machine booted and ready!\u001b[0m\r\n"] 125 | [215.69848, "o", "\u001b[1m==> k8s-n-1: Checking for guest additions in VM...\u001b[0m\r\n"] 126 | [215.803192, "o", "\u001b[1m==> k8s-n-1: Setting hostname...\u001b[0m\r\n"] 127 | [218.717324, "o", "\u001b[1m==> k8s-n-1: Configuring and enabling network interfaces...\u001b[0m\r\n"] 128 | [220.417224, "o", "\u001b[1m==> k8s-n-1: Mounting shared folders...\u001b[0m\r\n"] 129 | [220.417617, "o", "\u001b[0m k8s-n-1: /vagrant => /home/jruiz/git/github/ansible-vbox-vagrant-kubernetes\u001b[0m\r\n"] 130 | [221.122593, "o", "\u001b[1m==> k8s-n-1: Running provisioner: ansible...\u001b[0m\r\n"] 131 | [222.405361, "o", "\u001b[0m k8s-n-1: Running ansible-playbook...\u001b[0m\r\n"] 132 | [223.218004, "o", "\u001b[0m\r\nPLAY [k8s-m-*] *****************************************************************\r\n\u001b[0m"] 133 | [223.218618, "o", "\u001b[0m\u001b[0;36mskipping: no hosts matched\u001b[0m\r\n\u001b[0;36m\u001b[0m\u001b[0m"] 134 | [223.220394, "o", "\u001b[0m\r\nPLAY [k8s-n-*] *****************************************************************\r\n\u001b[0m"] 135 | [223.24665, "o", "\u001b[0m\r\nTASK [Gathering Facts] *********************************************************\r\n\u001b[0m"] 136 | [225.666865, "o", "\u001b[0m\u001b[0;32mok: [k8s-n-1]\u001b[0m\r\n\u001b[0;32m\u001b[0m\u001b[0m"] 137 | [225.674962, "o", "\u001b[0m\r\nTASK [add_packages : Add new repositories keys] ********************************\r\n\u001b[0m"] 138 | [227.369406, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-1] => (item={'key': 'https://download.docker.com/linux/ubuntu/gpg'})\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 139 | [228.787217, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-1] => (item={'key': 'https://packages.cloud.google.com/apt/doc/apt-key.gpg'})\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 140 | [228.794686, "o", "\u001b[0m\r\nTASK [add_packages : Add new repositories to sources] **************************\r\n\u001b[0m"] 141 | [238.532319, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-1] => (item={'repo': 'deb [arch=amd64] https://download.docker.com/linux/ubuntu focal stable'})\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 142 | [247.515206, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-1] => (item={'repo': 'deb https://apt.kubernetes.io/ kubernetes-xenial main'})\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 143 | [247.526093, "o", "\u001b[0m\r\nTASK [add_packages : Force update cache if new keys added] *********************\r\n\u001b[0m"] 144 | [247.591032, "o", "\u001b[0m\u001b[0;32mok: [k8s-n-1]\u001b[0m\r\n\u001b[0;32m\u001b[0m\u001b[0m"] 145 | [247.604731, "o", "\u001b[0m\r\nTASK [add_packages : Remove packages] ******************************************\r\n\u001b[0m"] 146 | [248.822852, "o", "\u001b[0m\u001b[0;32mok: [k8s-n-1] => (item={'name': None})\u001b[0m\r\n\u001b[0;32m\u001b[0m\u001b[0m"] 147 | [248.832598, "o", "\u001b[0m\r\nTASK [add_packages : Install packages] *****************************************\r\n\u001b[0m"] 148 | [256.968678, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-1] => (item={'name': 'apt-transport-https'})\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 149 | [258.90408, "o", "\u001b[0m\u001b[0;32mok: [k8s-n-1] => (item={'name': 'curl'})\u001b[0m\r\n\u001b[0;32m\u001b[0m\u001b[0m"] 150 | [282.565626, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-1] => (item={'name': 'docker-ce'})\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 151 | [284.687431, "o", "\u001b[0m\u001b[0;32mok: [k8s-n-1] => (item={'name': 'docker-ce-cli'})\u001b[0m\r\n\u001b[0;32m\u001b[0m\u001b[0m"] 152 | [286.729258, "o", "\u001b[0m\u001b[0;32mok: [k8s-n-1] => (item={'name': 'containerd.io'})\u001b[0m\r\n\u001b[0;32m\u001b[0m\u001b[0m"] 153 | [305.356598, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-1] => (item={'name': 'kubeadm'})\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 154 | [307.464521, "o", "\u001b[0m\u001b[0;32mok: [k8s-n-1] => (item={'name': 'kubelet'})\u001b[0m\r\n\u001b[0;32m\u001b[0m\u001b[0m"] 155 | [309.374811, "o", "\u001b[0m\u001b[0;32mok: [k8s-n-1] => (item={'name': 'kubectl'})\u001b[0m\r\n\u001b[0;32m\u001b[0m\u001b[0m"] 156 | [309.386298, "o", "\u001b[0m\r\nTASK [k8s/common : Remove current swaps from fstab] ****************************\r\n\u001b[0m"] 157 | [309.777553, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-1]\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 158 | [309.787608, "o", "\u001b[0m\r\nTASK [k8s/common : Disable swap] ***********************************************\r\n\u001b[0m"] 159 | [310.225907, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-1]\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 160 | [310.236454, "o", "\u001b[0m\r\nTASK [k8s/common : Add k8s_common_admin_user user to docker group] *************\r\n\u001b[0m"] 161 | [310.752153, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-1]\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 162 | [310.759655, "o", "\u001b[0m\r\nTASK [k8s/common : Check that docker service is started] ***********************\r\n\u001b[0m"] 163 | [311.345791, "o", "\u001b[0m\u001b[0;32mok: [k8s-n-1]\u001b[0m\r\n\u001b[0;32m\u001b[0m\u001b[0m"] 164 | [311.383039, "o", "\u001b[0m\r\nTASK [k8s/common : Configure node-ip 192.168.50.12 at kubelet] *****************\r\n\u001b[0m"] 165 | [311.696991, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-1]\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 166 | [311.70491, "o", "\u001b[0m\r\nTASK [k8s/node : Copy the join command to ditwl-k8s-01 cluster] ****************\r\n\u001b[0m"] 167 | [312.515576, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-1]\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 168 | [312.524217, "o", "\u001b[0m\r\nTASK [k8s/node : Join the node to cluster ditwl-k8s-01] ************************\r\n\u001b[0m"] 169 | [320.676716, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-1]\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 170 | [320.677427, "o", "\u001b[0m\r\nRUNNING HANDLER [k8s/common : restart kubelet] *********************************\r\n\u001b[0m"] 171 | [321.437271, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-1]\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 172 | [321.439766, "o", "\u001b[0m\r\nPLAY RECAP *********************************************************************\r\n\u001b[0m"] 173 | [321.440446, "o", "\u001b[0m\u001b[0;33mk8s-n-1\u001b[0m : \u001b[0;32mok=14 \u001b[0m \u001b[0;33mchanged=10 \u001b[0m unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 \r\n\r\n\u001b[0m"] 174 | [321.871534, "o", "\u001b[1m==> k8s-n-2: Importing base box 'bento/ubuntu-20.04'...\u001b[0m\r\n"] 175 | [326.21353, "o", "\u001b[0m\r\u001b[K\u001b[0m"] 176 | [326.213688, "o", "\u001b[0mProgress: 10%\u001b[0m"] 177 | [326.415207, "o", "\u001b[0m\r\u001b[K\u001b[0m"] 178 | [326.415328, "o", "\u001b[0mProgress: 20%\u001b[0m"] 179 | [327.048819, "o", "\u001b[0m\r\u001b[K\u001b[0m"] 180 | [327.04894, "o", "\u001b[0mProgress: 40%\u001b[0m"] 181 | [328.065264, "o", "\u001b[0m\r\u001b[K\u001b[0m\u001b[0mProgress: 60%\u001b[0m"] 182 | [328.269546, "o", "\u001b[0m\r\u001b[K\u001b[0m"] 183 | [328.269718, "o", "\u001b[0mProgress: 80%\u001b[0m"] 184 | [329.481672, "o", "\u001b[0m\r\u001b[K\u001b[0m"] 185 | [329.482086, "o", "\u001b[0mProgress: 90%\u001b[0m"] 186 | [331.270066, "o", "\u001b[0m\r\u001b[K\u001b[0m"] 187 | [331.419102, "o", "\u001b[1m==> k8s-n-2: Matching MAC address for NAT networking...\u001b[0m\r\n"] 188 | [331.697907, "o", "\u001b[1m==> k8s-n-2: Checking if box 'bento/ubuntu-20.04' version '202005.21.0' is up to date...\u001b[0m\r\n"] 189 | [332.544716, "o", "\u001b[1m==> k8s-n-2: Setting the name of the VM: ansible-vbox-vagrant-kubernetes_k8s-n-2_1595618468954_99900\u001b[0m\r\n"] 190 | [333.189904, "o", "\u001b[1m==> k8s-n-2: Fixed port collision for 22 => 2222. Now on port 2201.\u001b[0m\r\n"] 191 | [333.782493, "o", "\u001b[1m==> k8s-n-2: Clearing any previously set network interfaces...\u001b[0m\r\n"] 192 | [334.080209, "o", "\u001b[1m==> k8s-n-2: Preparing network interfaces based on configuration...\u001b[0m\r\n"] 193 | [334.081339, "o", "\u001b[0m k8s-n-2: Adapter 1: nat\u001b[0m\r\n"] 194 | [334.082366, "o", "\u001b[0m k8s-n-2: Adapter 2: hostonly\u001b[0m\r\n"] 195 | [334.247258, "o", "\u001b[1m==> k8s-n-2: Forwarding ports...\u001b[0m\r\n"] 196 | [334.400447, "o", "\u001b[0m k8s-n-2: 22 (guest) => 2201 (host) (adapter 1)\u001b[0m\r\n"] 197 | [334.787554, "o", "\u001b[1m==> k8s-n-2: Running 'pre-boot' VM customizations...\u001b[0m\r\n"] 198 | [335.113394, "o", "\u001b[1m==> k8s-n-2: Booting VM...\u001b[0m\r\n"] 199 | [335.527417, "o", "\u001b[1m==> k8s-n-2: Waiting for machine to boot. This may take a few minutes...\u001b[0m\r\n"] 200 | [335.811183, "o", "\u001b[0m k8s-n-2: SSH address: 127.0.0.1:2201\u001b[0m\r\n"] 201 | [335.81146, "o", "\u001b[0m k8s-n-2: SSH username: vagrant\u001b[0m\r\n"] 202 | [335.811748, "o", "\u001b[0m k8s-n-2: SSH auth method: private key\u001b[0m\r\n"] 203 | [353.534012, "o", "\u001b[1m==> k8s-n-2: Machine booted and ready!\u001b[0m\r\n"] 204 | [353.534406, "o", "\u001b[1m==> k8s-n-2: Checking for guest additions in VM...\u001b[0m\r\n"] 205 | [353.640706, "o", "\u001b[1m==> k8s-n-2: Setting hostname...\u001b[0m\r\n"] 206 | [356.778084, "o", "\u001b[1m==> k8s-n-2: Configuring and enabling network interfaces...\u001b[0m\r\n"] 207 | [358.351632, "o", "\u001b[1m==> k8s-n-2: Mounting shared folders...\u001b[0m\r\n"] 208 | [358.351995, "o", "\u001b[0m k8s-n-2: /vagrant => /home/jruiz/git/github/ansible-vbox-vagrant-kubernetes\u001b[0m\r\n"] 209 | [358.878527, "o", "\u001b[1m==> k8s-n-2: Running provisioner: ansible...\u001b[0m\r\n"] 210 | [360.201694, "o", "\u001b[0m k8s-n-2: Running ansible-playbook...\u001b[0m\r\n"] 211 | [360.937312, "o", "\u001b[0m\r\nPLAY [k8s-m-*] *****************************************************************\r\n\u001b[0m"] 212 | [360.938253, "o", "\u001b[0m\u001b[0;36mskipping: no hosts matched\u001b[0m\r\n\u001b[0;36m\u001b[0m\u001b[0m"] 213 | [360.939483, "o", "\u001b[0m"] 214 | [360.939652, "o", "\r\nPLAY [k8s-n-*] *****************************************************************\r\n\u001b[0m"] 215 | [360.958747, "o", "\u001b[0m\r\nTASK [Gathering Facts] *********************************************************\r\n\u001b[0m"] 216 | [363.042774, "o", "\u001b[0m\u001b[0;32mok: [k8s-n-2]\u001b[0m\r\n\u001b[0;32m\u001b[0m\u001b[0m"] 217 | [363.0493, "o", "\u001b[0m\r\nTASK [add_packages : Add new repositories keys] ********************************\r\n\u001b[0m"] 218 | [364.516064, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-2] => (item={'key': 'https://download.docker.com/linux/ubuntu/gpg'})\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 219 | [366.148539, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-2] => (item={'key': 'https://packages.cloud.google.com/apt/doc/apt-key.gpg'})\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 220 | [366.15602, "o", "\u001b[0m\r\nTASK [add_packages : Add new repositories to sources] **************************\r\n\u001b[0m"] 221 | [375.791437, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-2] => (item={'repo': 'deb [arch=amd64] https://download.docker.com/linux/ubuntu focal stable'})\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 222 | [385.087496, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-2] => (item={'repo': 'deb https://apt.kubernetes.io/ kubernetes-xenial main'})\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 223 | [385.100242, "o", "\u001b[0m\r\nTASK [add_packages : Force update cache if new keys added] *********************\r\n\u001b[0m"] 224 | [385.181084, "o", "\u001b[0m\u001b[0;32mok: [k8s-n-2]\u001b[0m\r\n\u001b[0;32m\u001b[0m\u001b[0m"] 225 | [385.201434, "o", "\u001b[0m\r\nTASK [add_packages : Remove packages] ******************************************\r\n\u001b[0m"] 226 | [386.397126, "o", "\u001b[0m\u001b[0;32mok: [k8s-n-2] => (item={'name': None})\u001b[0m\r\n\u001b[0;32m\u001b[0m\u001b[0m"] 227 | [386.404482, "o", "\u001b[0m\r\nTASK [add_packages : Install packages] *****************************************\r\n\u001b[0m"] 228 | [394.262617, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-2] => (item={'name': 'apt-transport-https'})\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 229 | [397.451651, "o", "\u001b[0m\u001b[0;32mok: [k8s-n-2] => (item={'name': 'curl'})\u001b[0m\r\n\u001b[0;32m\u001b[0m\u001b[0m"] 230 | [422.215917, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-2] => (item={'name': 'docker-ce'})\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 231 | [425.627083, "o", "\u001b[0m\u001b[0;32mok: [k8s-n-2] => (item={'name': 'docker-ce-cli'})\u001b[0m\r\n\u001b[0;32m\u001b[0m\u001b[0m"] 232 | [427.661305, "o", "\u001b[0m\u001b[0;32mok: [k8s-n-2] => (item={'name': 'containerd.io'})\u001b[0m\r\n\u001b[0;32m\u001b[0m\u001b[0m"] 233 | [449.491324, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-2] => (item={'name': 'kubeadm'})\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 234 | [452.685575, "o", "\u001b[0m\u001b[0;32mok: [k8s-n-2] => (item={'name': 'kubelet'})\u001b[0m\r\n\u001b[0;32m\u001b[0m\u001b[0m"] 235 | [456.158962, "o", "\u001b[0m\u001b[0;32mok: [k8s-n-2] => (item={'name': 'kubectl'})\u001b[0m\r\n\u001b[0;32m\u001b[0m\u001b[0m"] 236 | [456.177907, "o", "\u001b[0m\r\nTASK [k8s/common : Remove current swaps from fstab] ****************************\r\n\u001b[0m"] 237 | [456.629684, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-2]\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 238 | [456.643318, "o", "\u001b[0m\r\nTASK [k8s/common : Disable swap] ***********************************************\r\n\u001b[0m"] 239 | [457.480493, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-2]\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 240 | [457.487047, "o", "\u001b[0m\r\nTASK [k8s/common : Add k8s_common_admin_user user to docker group] *************\r\n\u001b[0m"] 241 | [458.075539, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-2]\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 242 | [458.082961, "o", "\u001b[0m\r\nTASK [k8s/common : Check that docker service is started] ***********************\r\n\u001b[0m"] 243 | [458.861856, "o", "\u001b[0m\u001b[0;32mok: [k8s-n-2]\u001b[0m\r\n\u001b[0;32m\u001b[0m\u001b[0m"] 244 | [458.906111, "o", "\u001b[0m\r\nTASK [k8s/common : Configure node-ip 192.168.50.13 at kubelet] *****************\r\n\u001b[0m"] 245 | [459.213084, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-2]\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 246 | [459.221628, "o", "\u001b[0m\r\nTASK [k8s/node : Copy the join command to ditwl-k8s-01 cluster] ****************\r\n\u001b[0m"] 247 | [460.241703, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-2]\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 248 | [460.258546, "o", "\u001b[0m\r\nTASK [k8s/node : Join the node to cluster ditwl-k8s-01] ************************\r\n\u001b[0m"] 249 | [468.705303, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-2]\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 250 | [468.706051, "o", "\u001b[0m\r\nRUNNING HANDLER [k8s/common : restart kubelet] *********************************\r\n\u001b[0m"] 251 | [469.428128, "o", "\u001b[0m\u001b[0;33mchanged: [k8s-n-2]\u001b[0m\r\n\u001b[0;33m\u001b[0m\u001b[0m"] 252 | [469.430158, "o", "\u001b[0m\r\nPLAY RECAP *********************************************************************\r\n\u001b[0m"] 253 | [469.43092, "o", "\u001b[0m\u001b[0;33mk8s-n-2\u001b[0m : \u001b[0;32mok=14 \u001b[0m \u001b[0;33mchanged=10 \u001b[0m unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 \r\n\r\n\u001b[0m"] 254 | [469.654186, "o", "\u001b]0;jruiz@XPS13: ~/git/github/ansible-vbox-vagrant-kubernetes\u0007\u001b[01;32mjruiz@XPS13\u001b[00m:\u001b[01;34m~/git/github/ansible-vbox-vagrant-kubernetes\u001b[00m$ "] 255 | [480.232739, "i", "\u0004"] 256 | [480.233519, "o", "\r\n"] 257 | -------------------------------------------------------------------------------- /istio-services.yaml: -------------------------------------------------------------------------------- 1 | 2 | #Grafana 3 | apiVersion: v1 4 | kind: Service 5 | metadata: 6 | labels: 7 | app: grafana 8 | chart: grafana 9 | heritage: Tiller 10 | release: istio 11 | name: grafana-np 12 | namespace: istio-system 13 | spec: 14 | ports: 15 | - name: http 16 | nodePort: 32493 17 | port: 3000 18 | protocol: TCP 19 | targetPort: 3000 20 | selector: 21 | app: grafana 22 | sessionAffinity: None 23 | type: NodePort 24 | --- 25 | #prometheus 26 | apiVersion: v1 27 | kind: Service 28 | metadata: 29 | labels: 30 | app: prometheus 31 | chart: prometheus 32 | heritage: Tiller 33 | release: istio 34 | name: prometheus-np 35 | namespace: istio-system 36 | spec: 37 | ports: 38 | - name: http 39 | nodePort: 32494 40 | port: 9090 41 | protocol: TCP 42 | targetPort: 9090 43 | selector: 44 | app: prometheus 45 | sessionAffinity: None 46 | type: NodePort 47 | --- 48 | #jaeger 49 | apiVersion: v1 50 | kind: Service 51 | metadata: 52 | labels: 53 | app: jaeger 54 | chart: tracing 55 | heritage: Tiller 56 | release: istio 57 | name: tracing-np 58 | namespace: istio-system 59 | spec: 60 | ports: 61 | - name: http-tracing 62 | nodePort: 32495 63 | port: 80 64 | protocol: TCP 65 | targetPort: 16686 66 | selector: 67 | app: jaeger 68 | sessionAffinity: None 69 | type: NodePort 70 | --- 71 | #kiali 72 | apiVersion: v1 73 | kind: Service 74 | metadata: 75 | labels: 76 | app: kiali 77 | chart: kiali 78 | heritage: Tiller 79 | release: istio 80 | name: kiali-np 81 | namespace: istio-system 82 | spec: 83 | ports: 84 | - name: http-kiali 85 | nodePort: 32496 86 | port: 20001 87 | protocol: TCP 88 | targetPort: 20001 89 | selector: 90 | app: kiali 91 | sessionAffinity: None 92 | type: NodePort 93 | 94 | -------------------------------------------------------------------------------- /kubernetes-dashboard-service-np.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: admin-user 6 | namespace: kubernetes-dashboard 7 | 8 | --- 9 | apiVersion: rbac.authorization.k8s.io/v1 10 | kind: ClusterRoleBinding 11 | metadata: 12 | name: admin-user 13 | roleRef: 14 | apiGroup: rbac.authorization.k8s.io 15 | kind: ClusterRole 16 | name: cluster-admin 17 | subjects: 18 | - kind: ServiceAccount 19 | name: admin-user 20 | namespace: kubernetes-dashboard 21 | 22 | --- 23 | kind: Service 24 | apiVersion: v1 25 | metadata: 26 | namespace: kubernetes-dashboard 27 | name: kubernetes-dashboard-service-np 28 | labels: 29 | k8s-app: kubernetes-dashboard 30 | spec: 31 | type: NodePort 32 | ports: 33 | - port: 8443 34 | nodePort: 30002 35 | targetPort: 8443 36 | protocol: TCP 37 | selector: 38 | k8s-app: kubernetes-dashboard 39 | -------------------------------------------------------------------------------- /nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: nginx 9 | replicas: 3 10 | template: 11 | metadata: 12 | labels: 13 | app: nginx 14 | spec: 15 | containers: 16 | - name: my-echo 17 | image: gcr.io/google_containers/echoserver:1.8 18 | --- 19 | 20 | apiVersion: v1 21 | kind: Service 22 | metadata: 23 | name: nginx-service-np 24 | labels: 25 | name: nginx-service-np 26 | spec: 27 | type: NodePort 28 | ports: 29 | - port: 8082 # Cluster IP http://10.109.199.234:8082 30 | targetPort: 8080 # Application port 31 | nodePort: 30000 # (EXTERNAL-IP VirtualBox IPs) http://192.168.50.11:30000/ http://192.168.50.12:30000/ http://192.168.50.13:30000/ 32 | protocol: TCP 33 | name: http 34 | selector: 35 | app: nginx 36 | 37 | #NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE 38 | #... 39 | #service/nginx-service-np NodePort 10.109.199.234 8082:30000/TCP 18m 40 | 41 | -------------------------------------------------------------------------------- /roles/common/add_packages/README.md: -------------------------------------------------------------------------------- 1 | # ADD_PACKAGES 2 | Uses apt-get to install a list of packages in the system 3 | If a package needs customization, should be done outside (in another rol) 4 | -------------------------------------------------------------------------------- /roles/common/add_packages/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | linux_add_packages_cache_valid_time: 60 4 | upgrade_all: false 5 | 6 | #linux_add_packages_repositories: 7 | #- repo: 8 | 9 | #linux_add_packages_keys: 10 | #- key: 11 | 12 | #linux_add_packages_names: 13 | #- name: 14 | 15 | #linux_remove_packages_names: 16 | #- name: 17 | -------------------------------------------------------------------------------- /roles/common/add_packages/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /roles/common/add_packages/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # From https://www.cyberciti.biz/faq/ansible-apt-update-all-packages-on-ubuntu-debian-linux/ 3 | - name: Upgrade all apt packages 4 | apt: upgrade=dist force_apt_get=yes update_cache=yes 5 | when: upgrade_all 6 | 7 | - name: Check if a reboot is needed for Debian and Ubuntu boxes 8 | register: reboot_required_file 9 | stat: path=/var/run/reboot-required get_md5=no 10 | 11 | - name: Reboot the Debian or Ubuntu server 12 | reboot: 13 | msg: "Reboot initiated by Ansible due to kernel updates" 14 | connect_timeout: 5 15 | reboot_timeout: 300 16 | pre_reboot_delay: 0 17 | post_reboot_delay: 30 18 | test_command: uptime 19 | when: reboot_required_file.stat.exists 20 | 21 | - name: Add new repositories keys 22 | apt_key: 23 | url='{{item.key}}' 24 | with_items: "{{ linux_add_packages_keys | default([])}}" 25 | when: linux_add_packages_keys is defined and not (linux_add_packages_keys is none or linux_add_packages_keys | trim == '') 26 | register: aptnewkeys 27 | 28 | - name: Add new repositories to sources 29 | apt_repository: 30 | repo='{{item.repo}}' 31 | with_items: "{{ linux_add_packages_repositories | default([])}}" 32 | when: linux_add_packages_repositories is defined and not (linux_add_packages_repositories is none or linux_add_packages_repositories | trim == '') 33 | 34 | - name: Force update cache if new keys added 35 | set_fact: 36 | linux_add_packages_cache_valid_time: 0 37 | when: aptnewkeys.changed 38 | 39 | - name: Remove packages 40 | apt: 41 | name={{ item.name }} 42 | state=absent 43 | with_items: "{{ linux_remove_packages_names | default([])}}" 44 | when: linux_remove_packages_names is defined and not (linux_remove_packages_names is none or linux_remove_packages_names | trim == '') 45 | 46 | - name: Install packages 47 | apt: 48 | name={{ item.name }} 49 | state=present 50 | update_cache=yes 51 | cache_valid_time={{linux_add_packages_cache_valid_time}} 52 | with_items: "{{ linux_add_packages_names | default([])}}" 53 | when: linux_add_packages_names is defined and not (linux_add_packages_names is none or linux_add_packages_names | trim == '') 54 | -------------------------------------------------------------------------------- /roles/common/pam_limits/README.md: -------------------------------------------------------------------------------- 1 | # PAM_LIMITS 2 | Sets the pam limits of the host. 3 | 4 | -------------------------------------------------------------------------------- /roles/common/pam_limits/defaults/main.yml: -------------------------------------------------------------------------------- 1 | # Common PAM LIMITS configuration 2 | --- 3 | pam_domain: "*" 4 | 5 | pam_limits: 6 | - comment: "Open file descriptors: soft" 7 | type: soft 8 | limit: nofile 9 | value: 4096 10 | - comment: "Open file descriptors: hard" 11 | type: hard 12 | limit: nofile 13 | value: 65536 14 | - comment: "Number of processes available to a single user: soft" 15 | type: soft 16 | limit: nproc 17 | value: 2047 18 | - comment: "Number of processes available to a single user: hard" 19 | type: hard 20 | limit: nproc 21 | value: 16384 22 | - comment: "Size of the stack segment of the process: soft" 23 | type: soft 24 | limit: stack 25 | value: 10240 26 | - comment: "Size of the stack segment of the process: hard" 27 | type: hard 28 | limit: stack 29 | value: 32768 30 | - comment: "Maximum locked memory limit mem lock 90% of RAM: soft" 31 | type: soft 32 | limit: memlock 33 | value: 1887437 34 | - comment: "Maximum locked memory limit mem lock 90% of RAM: hard" 35 | type: hard 36 | limit: memlock 37 | value: 1887437 38 | -------------------------------------------------------------------------------- /roles/common/pam_limits/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # file: roles/linux/pam_limits/tasks/main.yml 3 | 4 | - name: Set user limits 5 | pam_limits: domain={{ pam_domain }} limit_type={{ item.type }} limit_item={{ item.limit }} value={{ item.value }} 6 | with_items: '{{pam_limits}}' 7 | -------------------------------------------------------------------------------- /roles/k8s.retry: -------------------------------------------------------------------------------- 1 | k8s-m-1 2 | -------------------------------------------------------------------------------- /roles/k8s.yml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-m-* 2 | become: yes 3 | roles: 4 | - { role: k8s/master} 5 | 6 | - hosts: k8s-n-* 7 | become: yes 8 | roles: 9 | - { role: k8s/node} 10 | -------------------------------------------------------------------------------- /roles/k8s/common/defaults/main.yml: -------------------------------------------------------------------------------- 1 | k8s_common_add_packages_keys: 2 | - key: https://download.docker.com/linux/ubuntu/gpg 3 | - key: https://packages.cloud.google.com/apt/doc/apt-key.gpg 4 | 5 | k8s_common_add_packages_repositories: 6 | - repo: "deb [arch=amd64] https://download.docker.com/linux/ubuntu {{ansible_distribution_release}} stable" 7 | - repo: "deb https://apt.kubernetes.io/ kubernetes-xenial main" #k8s not available for Bionic (Ubuntu 18.04) 8 | 9 | k8s_common_add_packages_names: 10 | - name: apt-transport-https 11 | - name: curl 12 | - name: containerd.io 13 | - name: kubeadm 14 | - name: kubelet 15 | - name: kubectl 16 | 17 | k8s_common_remove_packages_names: 18 | - name: 19 | 20 | k8s_common_modprobe: 21 | - name: overlay 22 | - name: br_netfilter 23 | 24 | k8s_common_sysctl: 25 | - name: net.bridge.bridge-nf-call-iptables 26 | value: 1 27 | - name: net.ipv4.ip_forward 28 | value: 1 29 | - name: net.bridge.bridge-nf-call-ip6tables 30 | value: 1 31 | 32 | k8s_common_admin_user: "ubuntu" 33 | k8s_common_admin_group: "ubuntu" 34 | 35 | 36 | -------------------------------------------------------------------------------- /roles/k8s/common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | - name: restart containerd 2 | service: 3 | name: containerd 4 | state: restarted 5 | daemon_reload: yes 6 | 7 | - name: restart kubelet 8 | service: 9 | name: kubelet 10 | state: restarted 11 | daemon_reload: yes 12 | -------------------------------------------------------------------------------- /roles/k8s/common/meta/main.yml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - { role: common/pam_limits} 3 | - { role: common/add_packages, 4 | linux_add_packages_repositories: "{{ k8s_common_add_packages_repositories }}", 5 | linux_add_packages_keys: "{{ k8s_common_add_packages_keys }}", 6 | linux_add_packages_names: "{{ k8s_common_add_packages_names }}", 7 | linux_remove_packages_names: "{{ k8s_common_remove_packages_names }}", 8 | upgrade_all: false 9 | } -------------------------------------------------------------------------------- /roles/k8s/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | 2 | - name: Remove current swaps from fstab 3 | lineinfile: 4 | dest: /etc/fstab 5 | regexp: '^/[\S]+\s+none\s+swap ' 6 | state: absent 7 | 8 | - name: Disable swap 9 | command: swapoff -a 10 | when: ansible_swaptotal_mb > 0 11 | 12 | - name: Remove swapfile from /etc/fstab 13 | mount: 14 | name: "{{ item }}" 15 | fstype: swap 16 | state: absent 17 | with_items: 18 | - swap 19 | - none 20 | 21 | - name: Configure containerd.conf modules 22 | template: 23 | src: etc/modules-load.d/containerd.conf 24 | dest: /etc/modules-load.d/containerd.conf 25 | 26 | - name: Load containerd kernel modules 27 | modprobe: 28 | name: "{{ item.name }}" 29 | state: present 30 | loop: "{{ k8s_common_modprobe }}" 31 | 32 | - name: Configure kubernetes-cri sys params 33 | sysctl: 34 | name: "{{ item.name }}" 35 | value: "{{ item.value }}" 36 | state: present 37 | reload: yes 38 | loop: "{{ k8s_common_sysctl }}" 39 | 40 | # https://github.com/containerd/containerd/issues/4581 41 | # File etc/containerd/config.toml needs to be deleted before kubeadm init 42 | - name: Configure containerd.conf 43 | template: 44 | src: etc/containerd/config.toml 45 | dest: /etc/containerd/config.toml 46 | notify: restart containerd 47 | 48 | - name: Configure node-ip {{ k8s_node_public_ip }} at kubelet 49 | lineinfile: 50 | path: '/etc/systemd/system/kubelet.service.d/10-kubeadm.conf' 51 | line: 'Environment="KUBELET_EXTRA_ARGS=--node-ip={{ k8s_node_public_ip }}"' 52 | regexp: 'KUBELET_EXTRA_ARGS=' 53 | insertafter: '\[Service\]' 54 | state: present 55 | notify: restart kubelet 56 | 57 | - name: restart containerd 58 | service: 59 | name: containerd 60 | state: restarted 61 | daemon_reload: yes 62 | 63 | - name: Delete configuration for containerd.conf as kubeadm is unable to detect containerd (see https://github.com/containerd/containerd/issues/4581) 64 | file: 65 | state: absent 66 | path: /etc/containerd/config.toml 67 | 68 | - name: Restart services if needed 69 | meta: flush_handlers 70 | -------------------------------------------------------------------------------- /roles/k8s/common/templates/etc/containerd/config.toml: -------------------------------------------------------------------------------- 1 | version = 2 2 | root = "/var/lib/containerd" 3 | state = "/run/containerd" 4 | plugin_dir = "" 5 | disabled_plugins = [] 6 | required_plugins = [] 7 | oom_score = 0 8 | 9 | [grpc] 10 | address = "/run/containerd/containerd.sock" 11 | tcp_address = "" 12 | tcp_tls_cert = "" 13 | tcp_tls_key = "" 14 | uid = 0 15 | gid = 0 16 | max_recv_message_size = 16777216 17 | max_send_message_size = 16777216 18 | 19 | [ttrpc] 20 | address = "" 21 | uid = 0 22 | gid = 0 23 | 24 | [debug] 25 | address = "" 26 | uid = 0 27 | gid = 0 28 | level = "" 29 | 30 | [metrics] 31 | address = "" 32 | grpc_histogram = false 33 | 34 | [cgroup] 35 | path = "" 36 | 37 | [timeouts] 38 | "io.containerd.timeout.shim.cleanup" = "5s" 39 | "io.containerd.timeout.shim.load" = "5s" 40 | "io.containerd.timeout.shim.shutdown" = "3s" 41 | "io.containerd.timeout.task.state" = "2s" 42 | 43 | [plugins] 44 | [plugins."io.containerd.gc.v1.scheduler"] 45 | pause_threshold = 0.02 46 | deletion_threshold = 0 47 | mutation_threshold = 100 48 | schedule_delay = "0s" 49 | startup_delay = "100ms" 50 | [plugins."io.containerd.grpc.v1.cri"] 51 | disable_tcp_service = true 52 | stream_server_address = "127.0.0.1" 53 | stream_server_port = "0" 54 | stream_idle_timeout = "4h0m0s" 55 | enable_selinux = false 56 | selinux_category_range = 1024 57 | sandbox_image = "k8s.gcr.io/pause:3.2" 58 | stats_collect_period = 10 59 | systemd_cgroup = true 60 | enable_tls_streaming = false 61 | max_container_log_line_size = 16384 62 | disable_cgroup = false 63 | disable_apparmor = false 64 | restrict_oom_score_adj = false 65 | max_concurrent_downloads = 3 66 | disable_proc_mount = false 67 | unset_seccomp_profile = "" 68 | tolerate_missing_hugetlb_controller = true 69 | disable_hugetlb_controller = true 70 | ignore_image_defined_volumes = false 71 | [plugins."io.containerd.grpc.v1.cri".containerd] 72 | snapshotter = "overlayfs" 73 | default_runtime_name = "runc" 74 | no_pivot = false 75 | disable_snapshot_annotations = true 76 | discard_unpacked_layers = false 77 | [plugins."io.containerd.grpc.v1.cri".containerd.default_runtime] 78 | runtime_type = "" 79 | runtime_engine = "" 80 | runtime_root = "" 81 | privileged_without_host_devices = false 82 | base_runtime_spec = "" 83 | [plugins."io.containerd.grpc.v1.cri".containerd.untrusted_workload_runtime] 84 | runtime_type = "" 85 | runtime_engine = "" 86 | runtime_root = "" 87 | privileged_without_host_devices = false 88 | base_runtime_spec = "" 89 | [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] 90 | [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] 91 | runtime_type = "io.containerd.runc.v2" 92 | runtime_engine = "" 93 | runtime_root = "" 94 | privileged_without_host_devices = false 95 | base_runtime_spec = "" 96 | [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] 97 | SystemdCgroup = true 98 | [plugins."io.containerd.grpc.v1.cri".cni] 99 | bin_dir = "/opt/cni/bin" 100 | conf_dir = "/etc/cni/net.d" 101 | max_conf_num = 1 102 | conf_template = "" 103 | [plugins."io.containerd.grpc.v1.cri".registry] 104 | [plugins."io.containerd.grpc.v1.cri".registry.mirrors] 105 | [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] 106 | endpoint = ["https://registry-1.docker.io"] 107 | [plugins."io.containerd.grpc.v1.cri".image_decryption] 108 | key_model = "" 109 | [plugins."io.containerd.grpc.v1.cri".x509_key_pair_streaming] 110 | tls_cert_file = "" 111 | tls_key_file = "" 112 | [plugins."io.containerd.internal.v1.opt"] 113 | path = "/opt/containerd" 114 | [plugins."io.containerd.internal.v1.restart"] 115 | interval = "10s" 116 | [plugins."io.containerd.metadata.v1.bolt"] 117 | content_sharing_policy = "shared" 118 | [plugins."io.containerd.monitor.v1.cgroups"] 119 | no_prometheus = false 120 | [plugins."io.containerd.runtime.v1.linux"] 121 | shim = "containerd-shim" 122 | runtime = "runc" 123 | runtime_root = "" 124 | no_shim = false 125 | shim_debug = false 126 | [plugins."io.containerd.runtime.v2.task"] 127 | platforms = ["linux/amd64"] 128 | [plugins."io.containerd.service.v1.diff-service"] 129 | default = ["walking"] 130 | [plugins."io.containerd.snapshotter.v1.devmapper"] 131 | root_path = "" 132 | pool_name = "" 133 | base_image_size = "" 134 | async_remove = false 135 | -------------------------------------------------------------------------------- /roles/k8s/common/templates/etc/modules-load.d/containerd.conf: -------------------------------------------------------------------------------- 1 | overlay 2 | br_netfilter -------------------------------------------------------------------------------- /roles/k8s/master/defaults/main.yml: -------------------------------------------------------------------------------- 1 | 2 | k8s_master_admin_user: "ubuntu" 3 | k8s_master_admin_group: "ubuntu" 4 | 5 | k8s_master_node_name: "k8s-m" 6 | k8s_cluster_name: "k8s-cluster" 7 | 8 | k8s_master_apiserver_advertise_address: "192.168.101.100" 9 | k8s_master_pod_network_cidr: "192.168.112.0/20" 10 | 11 | 12 | -------------------------------------------------------------------------------- /roles/k8s/master/meta/main.yml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - { role: k8s/common, 3 | k8s_common_admin_user: "{{k8s_master_admin_user}}", 4 | k8s_common_admin_group: "{{k8s_master_admin_group}}" 5 | } 6 | -------------------------------------------------------------------------------- /roles/k8s/master/tasks/main.yml: -------------------------------------------------------------------------------- 1 | #https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/configure-cgroup-driver/ 2 | #https://kubernetes.io/docs/reference/config-api/kubeadm-config.v1beta3/ 3 | - name: Configuring the kubelet cgroup driver 4 | template: 5 | src: kubeadm-config.yaml 6 | dest: /home/{{ k8s_master_admin_user }}/kubeadm-config.yaml 7 | 8 | #https://docs.projectcalico.org/v3.6/getting-started/kubernetes/ 9 | #kubeadm init --config /home/vagrant/kubeadm-config.yaml 10 | # --cri-socket /run/containerd/containerd.sock 11 | #--apiserver-advertise-address=192.168.50.11 --apiserver-cert-extra-sans=192.168.50.11 --node-name=k8s-m-1 --pod-network-cidr=192.168.112.0/20 12 | - name: Configure kubectl 13 | command: kubeadm init --config /home/{{ k8s_master_admin_user }}/kubeadm-config.yaml 14 | # --apiserver-advertise-address="{{ k8s_master_apiserver_advertise_address }}" --apiserver-cert-extra-sans="{{ k8s_master_apiserver_advertise_address }}" --node-name="{{ k8s_master_node_name }}" --pod-network-cidr="{{ k8s_master_pod_network_cidr }}" 15 | args: 16 | creates: /etc/kubernetes/manifests/kube-apiserver.yaml 17 | 18 | - name: Create .kube dir for {{ k8s_master_admin_user }} user 19 | file: 20 | path: "/home/{{ k8s_master_admin_user }}/.kube" 21 | state: directory 22 | 23 | - name: Copy kube config to {{ k8s_master_admin_user }} home .kube dir 24 | copy: 25 | src: /etc/kubernetes/admin.conf 26 | dest: /home/{{ k8s_master_admin_user }}/.kube/config 27 | remote_src: yes 28 | owner: "{{ k8s_master_admin_user }}" 29 | group: "{{ k8s_master_admin_group }}" 30 | mode: 0660 31 | 32 | #Rewrite calico replacing defaults 33 | #https://docs.projectcalico.org/getting-started/kubernetes/self-managed-onprem/onpremises 34 | - name: Rewrite calico.yaml 35 | template: 36 | src: calico/3.15/calico.yaml 37 | dest: /home/{{ k8s_master_admin_user }}/calico.yaml 38 | 39 | - name: Install Calico (using Kubernetes API datastore) 40 | become: false 41 | command: kubectl apply -f /home/{{ k8s_master_admin_user }}/calico.yaml 42 | 43 | # Step 2.6 from https://kubernetes.io/blog/2019/03/15/kubernetes-setup-using-ansible-and-vagrant/ 44 | - name: Generate join command 45 | command: kubeadm token create --print-join-command 46 | register: join_command 47 | 48 | - name: Copy join command for {{ k8s_cluster_name }} cluster to local file 49 | become: false 50 | local_action: copy content="{{ join_command.stdout_lines[0] }}" dest="./{{ k8s_cluster_name }}-join-command" 51 | -------------------------------------------------------------------------------- /roles/k8s/master/templates/calico/3.9/calico.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: calico/templates/calico-config.yaml 3 | # This ConfigMap is used to configure a self-hosted Calico installation. 4 | kind: ConfigMap 5 | apiVersion: v1 6 | metadata: 7 | name: calico-config 8 | namespace: kube-system 9 | data: 10 | # Typha is disabled. 11 | typha_service_name: "none" 12 | # Configure the backend to use. 13 | calico_backend: "bird" 14 | 15 | # Configure the MTU to use 16 | veth_mtu: "1440" 17 | 18 | # The CNI network configuration to install on each node. The special 19 | # values in this config will be automatically populated. 20 | cni_network_config: |- 21 | { 22 | "name": "k8s-pod-network", 23 | "cniVersion": "0.3.0", 24 | "plugins": [ 25 | { 26 | "type": "calico", 27 | "log_level": "info", 28 | "datastore_type": "kubernetes", 29 | "nodename": "__KUBERNETES_NODE_NAME__", 30 | "mtu": __CNI_MTU__, 31 | "ipam": { 32 | "type": "calico-ipam" 33 | }, 34 | "policy": { 35 | "type": "k8s" 36 | }, 37 | "kubernetes": { 38 | "kubeconfig": "__KUBECONFIG_FILEPATH__" 39 | } 40 | }, 41 | { 42 | "type": "portmap", 43 | "snat": true, 44 | "capabilities": {"portMappings": true} 45 | } 46 | ] 47 | } 48 | 49 | --- 50 | # Source: calico/templates/kdd-crds.yaml 51 | apiVersion: apiextensions.k8s.io/v1beta1 52 | kind: CustomResourceDefinition 53 | metadata: 54 | name: felixconfigurations.crd.projectcalico.org 55 | spec: 56 | scope: Cluster 57 | group: crd.projectcalico.org 58 | version: v1 59 | names: 60 | kind: FelixConfiguration 61 | plural: felixconfigurations 62 | singular: felixconfiguration 63 | --- 64 | 65 | apiVersion: apiextensions.k8s.io/v1beta1 66 | kind: CustomResourceDefinition 67 | metadata: 68 | name: ipamblocks.crd.projectcalico.org 69 | spec: 70 | scope: Cluster 71 | group: crd.projectcalico.org 72 | version: v1 73 | names: 74 | kind: IPAMBlock 75 | plural: ipamblocks 76 | singular: ipamblock 77 | 78 | --- 79 | 80 | apiVersion: apiextensions.k8s.io/v1beta1 81 | kind: CustomResourceDefinition 82 | metadata: 83 | name: blockaffinities.crd.projectcalico.org 84 | spec: 85 | scope: Cluster 86 | group: crd.projectcalico.org 87 | version: v1 88 | names: 89 | kind: BlockAffinity 90 | plural: blockaffinities 91 | singular: blockaffinity 92 | 93 | --- 94 | 95 | apiVersion: apiextensions.k8s.io/v1beta1 96 | kind: CustomResourceDefinition 97 | metadata: 98 | name: ipamhandles.crd.projectcalico.org 99 | spec: 100 | scope: Cluster 101 | group: crd.projectcalico.org 102 | version: v1 103 | names: 104 | kind: IPAMHandle 105 | plural: ipamhandles 106 | singular: ipamhandle 107 | 108 | --- 109 | 110 | apiVersion: apiextensions.k8s.io/v1beta1 111 | kind: CustomResourceDefinition 112 | metadata: 113 | name: ipamconfigs.crd.projectcalico.org 114 | spec: 115 | scope: Cluster 116 | group: crd.projectcalico.org 117 | version: v1 118 | names: 119 | kind: IPAMConfig 120 | plural: ipamconfigs 121 | singular: ipamconfig 122 | 123 | --- 124 | 125 | apiVersion: apiextensions.k8s.io/v1beta1 126 | kind: CustomResourceDefinition 127 | metadata: 128 | name: bgppeers.crd.projectcalico.org 129 | spec: 130 | scope: Cluster 131 | group: crd.projectcalico.org 132 | version: v1 133 | names: 134 | kind: BGPPeer 135 | plural: bgppeers 136 | singular: bgppeer 137 | 138 | --- 139 | 140 | apiVersion: apiextensions.k8s.io/v1beta1 141 | kind: CustomResourceDefinition 142 | metadata: 143 | name: bgpconfigurations.crd.projectcalico.org 144 | spec: 145 | scope: Cluster 146 | group: crd.projectcalico.org 147 | version: v1 148 | names: 149 | kind: BGPConfiguration 150 | plural: bgpconfigurations 151 | singular: bgpconfiguration 152 | 153 | --- 154 | 155 | apiVersion: apiextensions.k8s.io/v1beta1 156 | kind: CustomResourceDefinition 157 | metadata: 158 | name: ippools.crd.projectcalico.org 159 | spec: 160 | scope: Cluster 161 | group: crd.projectcalico.org 162 | version: v1 163 | names: 164 | kind: IPPool 165 | plural: ippools 166 | singular: ippool 167 | 168 | --- 169 | 170 | apiVersion: apiextensions.k8s.io/v1beta1 171 | kind: CustomResourceDefinition 172 | metadata: 173 | name: hostendpoints.crd.projectcalico.org 174 | spec: 175 | scope: Cluster 176 | group: crd.projectcalico.org 177 | version: v1 178 | names: 179 | kind: HostEndpoint 180 | plural: hostendpoints 181 | singular: hostendpoint 182 | 183 | --- 184 | 185 | apiVersion: apiextensions.k8s.io/v1beta1 186 | kind: CustomResourceDefinition 187 | metadata: 188 | name: clusterinformations.crd.projectcalico.org 189 | spec: 190 | scope: Cluster 191 | group: crd.projectcalico.org 192 | version: v1 193 | names: 194 | kind: ClusterInformation 195 | plural: clusterinformations 196 | singular: clusterinformation 197 | 198 | --- 199 | 200 | apiVersion: apiextensions.k8s.io/v1beta1 201 | kind: CustomResourceDefinition 202 | metadata: 203 | name: globalnetworkpolicies.crd.projectcalico.org 204 | spec: 205 | scope: Cluster 206 | group: crd.projectcalico.org 207 | version: v1 208 | names: 209 | kind: GlobalNetworkPolicy 210 | plural: globalnetworkpolicies 211 | singular: globalnetworkpolicy 212 | 213 | --- 214 | 215 | apiVersion: apiextensions.k8s.io/v1beta1 216 | kind: CustomResourceDefinition 217 | metadata: 218 | name: globalnetworksets.crd.projectcalico.org 219 | spec: 220 | scope: Cluster 221 | group: crd.projectcalico.org 222 | version: v1 223 | names: 224 | kind: GlobalNetworkSet 225 | plural: globalnetworksets 226 | singular: globalnetworkset 227 | 228 | --- 229 | 230 | apiVersion: apiextensions.k8s.io/v1beta1 231 | kind: CustomResourceDefinition 232 | metadata: 233 | name: networkpolicies.crd.projectcalico.org 234 | spec: 235 | scope: Namespaced 236 | group: crd.projectcalico.org 237 | version: v1 238 | names: 239 | kind: NetworkPolicy 240 | plural: networkpolicies 241 | singular: networkpolicy 242 | 243 | --- 244 | 245 | apiVersion: apiextensions.k8s.io/v1beta1 246 | kind: CustomResourceDefinition 247 | metadata: 248 | name: networksets.crd.projectcalico.org 249 | spec: 250 | scope: Namespaced 251 | group: crd.projectcalico.org 252 | version: v1 253 | names: 254 | kind: NetworkSet 255 | plural: networksets 256 | singular: networkset 257 | --- 258 | # Source: calico/templates/rbac.yaml 259 | 260 | # Include a clusterrole for the kube-controllers component, 261 | # and bind it to the calico-kube-controllers serviceaccount. 262 | kind: ClusterRole 263 | apiVersion: rbac.authorization.k8s.io/v1 264 | metadata: 265 | name: calico-kube-controllers 266 | rules: 267 | # Nodes are watched to monitor for deletions. 268 | - apiGroups: [""] 269 | resources: 270 | - nodes 271 | verbs: 272 | - watch 273 | - list 274 | - get 275 | # Pods are queried to check for existence. 276 | - apiGroups: [""] 277 | resources: 278 | - pods 279 | verbs: 280 | - get 281 | # IPAM resources are manipulated when nodes are deleted. 282 | - apiGroups: ["crd.projectcalico.org"] 283 | resources: 284 | - ippools 285 | verbs: 286 | - list 287 | - apiGroups: ["crd.projectcalico.org"] 288 | resources: 289 | - blockaffinities 290 | - ipamblocks 291 | - ipamhandles 292 | verbs: 293 | - get 294 | - list 295 | - create 296 | - update 297 | - delete 298 | # Needs access to update clusterinformations. 299 | - apiGroups: ["crd.projectcalico.org"] 300 | resources: 301 | - clusterinformations 302 | verbs: 303 | - get 304 | - create 305 | - update 306 | --- 307 | kind: ClusterRoleBinding 308 | apiVersion: rbac.authorization.k8s.io/v1 309 | metadata: 310 | name: calico-kube-controllers 311 | roleRef: 312 | apiGroup: rbac.authorization.k8s.io 313 | kind: ClusterRole 314 | name: calico-kube-controllers 315 | subjects: 316 | - kind: ServiceAccount 317 | name: calico-kube-controllers 318 | namespace: kube-system 319 | --- 320 | # Include a clusterrole for the calico-node DaemonSet, 321 | # and bind it to the calico-node serviceaccount. 322 | kind: ClusterRole 323 | apiVersion: rbac.authorization.k8s.io/v1 324 | metadata: 325 | name: calico-node 326 | rules: 327 | # The CNI plugin needs to get pods, nodes, and namespaces. 328 | - apiGroups: [""] 329 | resources: 330 | - pods 331 | - nodes 332 | - namespaces 333 | verbs: 334 | - get 335 | - apiGroups: [""] 336 | resources: 337 | - endpoints 338 | - services 339 | verbs: 340 | # Used to discover service IPs for advertisement. 341 | - watch 342 | - list 343 | # Used to discover Typhas. 344 | - get 345 | - apiGroups: [""] 346 | resources: 347 | - nodes/status 348 | verbs: 349 | # Needed for clearing NodeNetworkUnavailable flag. 350 | - patch 351 | # Calico stores some configuration information in node annotations. 352 | - update 353 | # Watch for changes to Kubernetes NetworkPolicies. 354 | - apiGroups: ["networking.k8s.io"] 355 | resources: 356 | - networkpolicies 357 | verbs: 358 | - watch 359 | - list 360 | # Used by Calico for policy information. 361 | - apiGroups: [""] 362 | resources: 363 | - pods 364 | - namespaces 365 | - serviceaccounts 366 | verbs: 367 | - list 368 | - watch 369 | # The CNI plugin patches pods/status. 370 | - apiGroups: [""] 371 | resources: 372 | - pods/status 373 | verbs: 374 | - patch 375 | # Calico monitors various CRDs for config. 376 | - apiGroups: ["crd.projectcalico.org"] 377 | resources: 378 | - globalfelixconfigs 379 | - felixconfigurations 380 | - bgppeers 381 | - globalbgpconfigs 382 | - bgpconfigurations 383 | - ippools 384 | - ipamblocks 385 | - globalnetworkpolicies 386 | - globalnetworksets 387 | - networkpolicies 388 | - networksets 389 | - clusterinformations 390 | - hostendpoints 391 | - blockaffinities 392 | verbs: 393 | - get 394 | - list 395 | - watch 396 | # Calico must create and update some CRDs on startup. 397 | - apiGroups: ["crd.projectcalico.org"] 398 | resources: 399 | - ippools 400 | - felixconfigurations 401 | - clusterinformations 402 | verbs: 403 | - create 404 | - update 405 | # Calico stores some configuration information on the node. 406 | - apiGroups: [""] 407 | resources: 408 | - nodes 409 | verbs: 410 | - get 411 | - list 412 | - watch 413 | # These permissions are only requried for upgrade from v2.6, and can 414 | # be removed after upgrade or on fresh installations. 415 | - apiGroups: ["crd.projectcalico.org"] 416 | resources: 417 | - bgpconfigurations 418 | - bgppeers 419 | verbs: 420 | - create 421 | - update 422 | # These permissions are required for Calico CNI to perform IPAM allocations. 423 | - apiGroups: ["crd.projectcalico.org"] 424 | resources: 425 | - blockaffinities 426 | - ipamblocks 427 | - ipamhandles 428 | verbs: 429 | - get 430 | - list 431 | - create 432 | - update 433 | - delete 434 | - apiGroups: ["crd.projectcalico.org"] 435 | resources: 436 | - ipamconfigs 437 | verbs: 438 | - get 439 | # Block affinities must also be watchable by confd for route aggregation. 440 | - apiGroups: ["crd.projectcalico.org"] 441 | resources: 442 | - blockaffinities 443 | verbs: 444 | - watch 445 | # The Calico IPAM migration needs to get daemonsets. These permissions can be 446 | # removed if not upgrading from an installation using host-local IPAM. 447 | - apiGroups: ["apps"] 448 | resources: 449 | - daemonsets 450 | verbs: 451 | - get 452 | --- 453 | apiVersion: rbac.authorization.k8s.io/v1 454 | kind: ClusterRoleBinding 455 | metadata: 456 | name: calico-node 457 | roleRef: 458 | apiGroup: rbac.authorization.k8s.io 459 | kind: ClusterRole 460 | name: calico-node 461 | subjects: 462 | - kind: ServiceAccount 463 | name: calico-node 464 | namespace: kube-system 465 | 466 | --- 467 | # Source: calico/templates/calico-node.yaml 468 | # This manifest installs the calico-node container, as well 469 | # as the CNI plugins and network config on 470 | # each master and worker node in a Kubernetes cluster. 471 | kind: DaemonSet 472 | apiVersion: apps/v1 473 | metadata: 474 | name: calico-node 475 | namespace: kube-system 476 | labels: 477 | k8s-app: calico-node 478 | spec: 479 | selector: 480 | matchLabels: 481 | k8s-app: calico-node 482 | updateStrategy: 483 | type: RollingUpdate 484 | rollingUpdate: 485 | maxUnavailable: 1 486 | template: 487 | metadata: 488 | labels: 489 | k8s-app: calico-node 490 | annotations: 491 | # This, along with the CriticalAddonsOnly toleration below, 492 | # marks the pod as a critical add-on, ensuring it gets 493 | # priority scheduling and that its resources are reserved 494 | # if it ever gets evicted. 495 | scheduler.alpha.kubernetes.io/critical-pod: '' 496 | spec: 497 | nodeSelector: 498 | beta.kubernetes.io/os: linux 499 | hostNetwork: true 500 | tolerations: 501 | # Make sure calico-node gets scheduled on all nodes. 502 | - effect: NoSchedule 503 | operator: Exists 504 | # Mark the pod as a critical add-on for rescheduling. 505 | - key: CriticalAddonsOnly 506 | operator: Exists 507 | - effect: NoExecute 508 | operator: Exists 509 | serviceAccountName: calico-node 510 | # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force 511 | # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. 512 | terminationGracePeriodSeconds: 0 513 | priorityClassName: system-node-critical 514 | initContainers: 515 | # This container performs upgrade from host-local IPAM to calico-ipam. 516 | # It can be deleted if this is a fresh installation, or if you have already 517 | # upgraded to use calico-ipam. 518 | - name: upgrade-ipam 519 | image: calico/cni:v3.9.0 520 | command: ["/opt/cni/bin/calico-ipam", "-upgrade"] 521 | env: 522 | - name: KUBERNETES_NODE_NAME 523 | valueFrom: 524 | fieldRef: 525 | fieldPath: spec.nodeName 526 | - name: CALICO_NETWORKING_BACKEND 527 | valueFrom: 528 | configMapKeyRef: 529 | name: calico-config 530 | key: calico_backend 531 | volumeMounts: 532 | - mountPath: /var/lib/cni/networks 533 | name: host-local-net-dir 534 | - mountPath: /host/opt/cni/bin 535 | name: cni-bin-dir 536 | # This container installs the CNI binaries 537 | # and CNI network config file on each node. 538 | - name: install-cni 539 | image: calico/cni:v3.9.0 540 | command: ["/install-cni.sh"] 541 | env: 542 | # Name of the CNI config file to create. 543 | - name: CNI_CONF_NAME 544 | value: "10-calico.conflist" 545 | # The CNI network config to install on each node. 546 | - name: CNI_NETWORK_CONFIG 547 | valueFrom: 548 | configMapKeyRef: 549 | name: calico-config 550 | key: cni_network_config 551 | # Set the hostname based on the k8s node name. 552 | - name: KUBERNETES_NODE_NAME 553 | valueFrom: 554 | fieldRef: 555 | fieldPath: spec.nodeName 556 | # CNI MTU Config variable 557 | - name: CNI_MTU 558 | valueFrom: 559 | configMapKeyRef: 560 | name: calico-config 561 | key: veth_mtu 562 | # Prevents the container from sleeping forever. 563 | - name: SLEEP 564 | value: "false" 565 | volumeMounts: 566 | - mountPath: /host/opt/cni/bin 567 | name: cni-bin-dir 568 | - mountPath: /host/etc/cni/net.d 569 | name: cni-net-dir 570 | # Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes 571 | # to communicate with Felix over the Policy Sync API. 572 | - name: flexvol-driver 573 | image: calico/pod2daemon-flexvol:v3.9.0 574 | volumeMounts: 575 | - name: flexvol-driver-host 576 | mountPath: /host/driver 577 | containers: 578 | # Runs calico-node container on each Kubernetes node. This 579 | # container programs network policy and routes on each 580 | # host. 581 | - name: calico-node 582 | image: calico/node:v3.9.0 583 | env: 584 | # Use Kubernetes API as the backing datastore. 585 | - name: DATASTORE_TYPE 586 | value: "kubernetes" 587 | # Wait for the datastore. 588 | - name: WAIT_FOR_DATASTORE 589 | value: "true" 590 | # Set based on the k8s node name. 591 | - name: NODENAME 592 | valueFrom: 593 | fieldRef: 594 | fieldPath: spec.nodeName 595 | # Choose the backend to use. 596 | - name: CALICO_NETWORKING_BACKEND 597 | valueFrom: 598 | configMapKeyRef: 599 | name: calico-config 600 | key: calico_backend 601 | # Cluster type to identify the deployment type 602 | - name: CLUSTER_TYPE 603 | value: "k8s,bgp" 604 | # Auto-detect the BGP IP address. 605 | - name: IP 606 | value: "autodetect" 607 | # Enable IPIP 608 | - name: CALICO_IPV4POOL_IPIP 609 | value: "Always" 610 | # Set MTU for tunnel device used if ipip is enabled 611 | - name: FELIX_IPINIPMTU 612 | valueFrom: 613 | configMapKeyRef: 614 | name: calico-config 615 | key: veth_mtu 616 | # The default IPv4 pool to create on startup if none exists. Pod IPs will be 617 | # chosen from this range. Changing this value after installation will have 618 | # no effect. This should fall within `--cluster-cidr`. 619 | - name: CALICO_IPV4POOL_CIDR 620 | value: "{{ k8s_master_pod_network_cidr }}" 621 | # Disable file logging so `kubectl logs` works. 622 | - name: CALICO_DISABLE_FILE_LOGGING 623 | value: "true" 624 | # Set Felix endpoint to host default action to ACCEPT. 625 | - name: FELIX_DEFAULTENDPOINTTOHOSTACTION 626 | value: "ACCEPT" 627 | # Disable IPv6 on Kubernetes. 628 | - name: FELIX_IPV6SUPPORT 629 | value: "false" 630 | # Set Felix logging to "info" 631 | - name: FELIX_LOGSEVERITYSCREEN 632 | value: "info" 633 | - name: FELIX_HEALTHENABLED 634 | value: "true" 635 | securityContext: 636 | privileged: true 637 | resources: 638 | requests: 639 | cpu: 250m 640 | livenessProbe: 641 | exec: 642 | command: 643 | - /bin/calico-node 644 | - -felix-live 645 | periodSeconds: 10 646 | initialDelaySeconds: 10 647 | failureThreshold: 6 648 | readinessProbe: 649 | exec: 650 | command: 651 | - /bin/calico-node 652 | - -felix-ready 653 | - -bird-ready 654 | periodSeconds: 10 655 | volumeMounts: 656 | - mountPath: /lib/modules 657 | name: lib-modules 658 | readOnly: true 659 | - mountPath: /run/xtables.lock 660 | name: xtables-lock 661 | readOnly: false 662 | - mountPath: /var/run/calico 663 | name: var-run-calico 664 | readOnly: false 665 | - mountPath: /var/lib/calico 666 | name: var-lib-calico 667 | readOnly: false 668 | - name: policysync 669 | mountPath: /var/run/nodeagent 670 | volumes: 671 | # Used by calico-node. 672 | - name: lib-modules 673 | hostPath: 674 | path: /lib/modules 675 | - name: var-run-calico 676 | hostPath: 677 | path: /var/run/calico 678 | - name: var-lib-calico 679 | hostPath: 680 | path: /var/lib/calico 681 | - name: xtables-lock 682 | hostPath: 683 | path: /run/xtables.lock 684 | type: FileOrCreate 685 | # Used to install CNI. 686 | - name: cni-bin-dir 687 | hostPath: 688 | path: /opt/cni/bin 689 | - name: cni-net-dir 690 | hostPath: 691 | path: /etc/cni/net.d 692 | # Mount in the directory for host-local IPAM allocations. This is 693 | # used when upgrading from host-local to calico-ipam, and can be removed 694 | # if not using the upgrade-ipam init container. 695 | - name: host-local-net-dir 696 | hostPath: 697 | path: /var/lib/cni/networks 698 | # Used to create per-pod Unix Domain Sockets 699 | - name: policysync 700 | hostPath: 701 | type: DirectoryOrCreate 702 | path: /var/run/nodeagent 703 | # Used to install Flex Volume Driver 704 | - name: flexvol-driver-host 705 | hostPath: 706 | type: DirectoryOrCreate 707 | path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds 708 | --- 709 | 710 | apiVersion: v1 711 | kind: ServiceAccount 712 | metadata: 713 | name: calico-node 714 | namespace: kube-system 715 | 716 | --- 717 | # Source: calico/templates/calico-kube-controllers.yaml 718 | 719 | # See https://github.com/projectcalico/kube-controllers 720 | apiVersion: apps/v1 721 | kind: Deployment 722 | metadata: 723 | name: calico-kube-controllers 724 | namespace: kube-system 725 | labels: 726 | k8s-app: calico-kube-controllers 727 | spec: 728 | # The controllers can only have a single active instance. 729 | replicas: 1 730 | selector: 731 | matchLabels: 732 | k8s-app: calico-kube-controllers 733 | strategy: 734 | type: Recreate 735 | template: 736 | metadata: 737 | name: calico-kube-controllers 738 | namespace: kube-system 739 | labels: 740 | k8s-app: calico-kube-controllers 741 | annotations: 742 | scheduler.alpha.kubernetes.io/critical-pod: '' 743 | spec: 744 | nodeSelector: 745 | beta.kubernetes.io/os: linux 746 | tolerations: 747 | # Mark the pod as a critical add-on for rescheduling. 748 | - key: CriticalAddonsOnly 749 | operator: Exists 750 | - key: node-role.kubernetes.io/master 751 | effect: NoSchedule 752 | serviceAccountName: calico-kube-controllers 753 | priorityClassName: system-cluster-critical 754 | containers: 755 | - name: calico-kube-controllers 756 | image: calico/kube-controllers:v3.9.0 757 | env: 758 | # Choose which controllers to run. 759 | - name: ENABLED_CONTROLLERS 760 | value: node 761 | - name: DATASTORE_TYPE 762 | value: kubernetes 763 | readinessProbe: 764 | exec: 765 | command: 766 | - /usr/bin/check-status 767 | - -r 768 | 769 | --- 770 | 771 | apiVersion: v1 772 | kind: ServiceAccount 773 | metadata: 774 | name: calico-kube-controllers 775 | namespace: kube-system 776 | --- 777 | # Source: calico/templates/calico-etcd-secrets.yaml 778 | 779 | --- 780 | # Source: calico/templates/calico-typha.yaml 781 | 782 | --- 783 | # Source: calico/templates/configure-canal.yaml 784 | 785 | 786 | -------------------------------------------------------------------------------- /roles/k8s/master/templates/kubeadm-config.yaml: -------------------------------------------------------------------------------- 1 | # kubeadm-config.yaml 2 | kind: KubeletConfiguration 3 | apiVersion: kubelet.config.k8s.io/v1beta1 4 | cgroupDriver: systemd 5 | containerRuntime: remote 6 | --- 7 | apiVersion: kubeadm.k8s.io/v1beta3 8 | kind: InitConfiguration 9 | bootstrapTokens: 10 | - groups: 11 | - system:bootstrappers:kubeadm:default-node-token 12 | token: abcdef.0123456789abcdef 13 | ttl: 24h0m0s 14 | usages: 15 | - signing 16 | - authentication 17 | localAPIEndpoint: 18 | advertiseAddress: "{{ k8s_master_apiserver_advertise_address }}" 19 | bindPort: 6443 20 | --- 21 | apiVersion: kubeadm.k8s.io/v1beta3 22 | kind: ClusterConfiguration 23 | apiServer: 24 | timeoutForControlPlane: 4m0s 25 | certSANs: 26 | - "10.100.1.1" 27 | - "{{ k8s_master_apiserver_advertise_address }}" 28 | clusterName: "{{ k8s_cluster_name }}" 29 | etcd: 30 | local: 31 | dataDir: /var/lib/etcd 32 | networking: 33 | podSubnet: "{{ k8s_master_pod_network_cidr }}" 34 | -------------------------------------------------------------------------------- /roles/k8s/node/defaults/main.yml: -------------------------------------------------------------------------------- 1 | 2 | k8s_node_admin_user: "ubuntu" 3 | k8s_node_admin_group: "ubuntu" 4 | 5 | k8s_node_node_name: "k8s-m" 6 | k8s_cluster_name: "k8s-cluster" 7 | 8 | -------------------------------------------------------------------------------- /roles/k8s/node/meta/main.yml: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - { role: k8s/common, 3 | k8s_common_admin_user: "{{k8s_node_admin_user}}", 4 | k8s_common_admin_group: "{{k8s_node_admin_group}}" 5 | } 6 | -------------------------------------------------------------------------------- /roles/k8s/node/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Copy the join command to {{ k8s_cluster_name }} cluster 2 | copy: 3 | src: "./{{ k8s_cluster_name }}-join-command" 4 | dest: /home/{{ k8s_node_admin_user }}/{{ k8s_cluster_name }}-join-command 5 | owner: "{{ k8s_node_admin_user }}" 6 | group: "{{ k8s_node_admin_group }}" 7 | mode: 0760 8 | 9 | - name: Join the node to cluster {{ k8s_cluster_name }} 10 | command: sh /home/{{ k8s_node_admin_user }}/{{ k8s_cluster_name }}-join-command 11 | -------------------------------------------------------------------------------- /roles/k8s/node/templates/calico-networking/1.7/calico.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Source: calico/templates/calico-config.yaml 3 | # This ConfigMap is used to configure a self-hosted Calico installation. 4 | kind: ConfigMap 5 | apiVersion: v1 6 | metadata: 7 | name: calico-config 8 | namespace: kube-system 9 | data: 10 | # Typha is disabled. 11 | typha_service_name: "none" 12 | # Configure the Calico backend to use. 13 | calico_backend: "bird" 14 | 15 | # Configure the MTU to use 16 | veth_mtu: "1440" 17 | 18 | # The CNI network configuration to install on each node. The special 19 | # values in this config will be automatically populated. 20 | cni_network_config: |- 21 | { 22 | "name": "k8s-pod-network", 23 | "cniVersion": "0.3.0", 24 | "plugins": [ 25 | { 26 | "type": "calico", 27 | "log_level": "info", 28 | "datastore_type": "kubernetes", 29 | "nodename": "__KUBERNETES_NODE_NAME__", 30 | "mtu": __CNI_MTU__, 31 | "ipam": { 32 | "type": "calico-ipam" 33 | }, 34 | "policy": { 35 | "type": "k8s" 36 | }, 37 | "kubernetes": { 38 | "kubeconfig": "__KUBECONFIG_FILEPATH__" 39 | } 40 | }, 41 | { 42 | "type": "portmap", 43 | "snat": true, 44 | "capabilities": {"portMappings": true} 45 | } 46 | ] 47 | } 48 | 49 | --- 50 | # Source: calico/templates/kdd-crds.yaml 51 | # Create all the CustomResourceDefinitions needed for 52 | # Calico policy and networking mode. 53 | 54 | apiVersion: apiextensions.k8s.io/v1beta1 55 | kind: CustomResourceDefinition 56 | metadata: 57 | name: felixconfigurations.crd.projectcalico.org 58 | spec: 59 | scope: Cluster 60 | group: crd.projectcalico.org 61 | version: v1 62 | names: 63 | kind: FelixConfiguration 64 | plural: felixconfigurations 65 | singular: felixconfiguration 66 | --- 67 | 68 | apiVersion: apiextensions.k8s.io/v1beta1 69 | kind: CustomResourceDefinition 70 | metadata: 71 | name: ipamblocks.crd.projectcalico.org 72 | spec: 73 | scope: Cluster 74 | group: crd.projectcalico.org 75 | version: v1 76 | names: 77 | kind: IPAMBlock 78 | plural: ipamblocks 79 | singular: ipamblock 80 | 81 | --- 82 | 83 | apiVersion: apiextensions.k8s.io/v1beta1 84 | kind: CustomResourceDefinition 85 | metadata: 86 | name: blockaffinities.crd.projectcalico.org 87 | spec: 88 | scope: Cluster 89 | group: crd.projectcalico.org 90 | version: v1 91 | names: 92 | kind: BlockAffinity 93 | plural: blockaffinities 94 | singular: blockaffinity 95 | 96 | --- 97 | 98 | apiVersion: apiextensions.k8s.io/v1beta1 99 | kind: CustomResourceDefinition 100 | metadata: 101 | name: ipamhandles.crd.projectcalico.org 102 | spec: 103 | scope: Cluster 104 | group: crd.projectcalico.org 105 | version: v1 106 | names: 107 | kind: IPAMHandle 108 | plural: ipamhandles 109 | singular: ipamhandle 110 | 111 | --- 112 | 113 | apiVersion: apiextensions.k8s.io/v1beta1 114 | kind: CustomResourceDefinition 115 | metadata: 116 | name: ipamconfigs.crd.projectcalico.org 117 | spec: 118 | scope: Cluster 119 | group: crd.projectcalico.org 120 | version: v1 121 | names: 122 | kind: IPAMConfig 123 | plural: ipamconfigs 124 | singular: ipamconfig 125 | 126 | --- 127 | 128 | apiVersion: apiextensions.k8s.io/v1beta1 129 | kind: CustomResourceDefinition 130 | metadata: 131 | name: bgppeers.crd.projectcalico.org 132 | spec: 133 | scope: Cluster 134 | group: crd.projectcalico.org 135 | version: v1 136 | names: 137 | kind: BGPPeer 138 | plural: bgppeers 139 | singular: bgppeer 140 | 141 | --- 142 | 143 | apiVersion: apiextensions.k8s.io/v1beta1 144 | kind: CustomResourceDefinition 145 | metadata: 146 | name: bgpconfigurations.crd.projectcalico.org 147 | spec: 148 | scope: Cluster 149 | group: crd.projectcalico.org 150 | version: v1 151 | names: 152 | kind: BGPConfiguration 153 | plural: bgpconfigurations 154 | singular: bgpconfiguration 155 | 156 | --- 157 | 158 | apiVersion: apiextensions.k8s.io/v1beta1 159 | kind: CustomResourceDefinition 160 | metadata: 161 | name: ippools.crd.projectcalico.org 162 | spec: 163 | scope: Cluster 164 | group: crd.projectcalico.org 165 | version: v1 166 | names: 167 | kind: IPPool 168 | plural: ippools 169 | singular: ippool 170 | 171 | --- 172 | 173 | apiVersion: apiextensions.k8s.io/v1beta1 174 | kind: CustomResourceDefinition 175 | metadata: 176 | name: hostendpoints.crd.projectcalico.org 177 | spec: 178 | scope: Cluster 179 | group: crd.projectcalico.org 180 | version: v1 181 | names: 182 | kind: HostEndpoint 183 | plural: hostendpoints 184 | singular: hostendpoint 185 | 186 | --- 187 | 188 | apiVersion: apiextensions.k8s.io/v1beta1 189 | kind: CustomResourceDefinition 190 | metadata: 191 | name: clusterinformations.crd.projectcalico.org 192 | spec: 193 | scope: Cluster 194 | group: crd.projectcalico.org 195 | version: v1 196 | names: 197 | kind: ClusterInformation 198 | plural: clusterinformations 199 | singular: clusterinformation 200 | 201 | --- 202 | 203 | apiVersion: apiextensions.k8s.io/v1beta1 204 | kind: CustomResourceDefinition 205 | metadata: 206 | name: globalnetworkpolicies.crd.projectcalico.org 207 | spec: 208 | scope: Cluster 209 | group: crd.projectcalico.org 210 | version: v1 211 | names: 212 | kind: GlobalNetworkPolicy 213 | plural: globalnetworkpolicies 214 | singular: globalnetworkpolicy 215 | 216 | --- 217 | 218 | apiVersion: apiextensions.k8s.io/v1beta1 219 | kind: CustomResourceDefinition 220 | metadata: 221 | name: globalnetworksets.crd.projectcalico.org 222 | spec: 223 | scope: Cluster 224 | group: crd.projectcalico.org 225 | version: v1 226 | names: 227 | kind: GlobalNetworkSet 228 | plural: globalnetworksets 229 | singular: globalnetworkset 230 | 231 | --- 232 | 233 | apiVersion: apiextensions.k8s.io/v1beta1 234 | kind: CustomResourceDefinition 235 | metadata: 236 | name: networkpolicies.crd.projectcalico.org 237 | spec: 238 | scope: Namespaced 239 | group: crd.projectcalico.org 240 | version: v1 241 | names: 242 | kind: NetworkPolicy 243 | plural: networkpolicies 244 | singular: networkpolicy 245 | --- 246 | # Source: calico/templates/rbac.yaml 247 | 248 | # Include a clusterrole for the kube-controllers component, 249 | # and bind it to the calico-kube-controllers serviceaccount. 250 | kind: ClusterRole 251 | apiVersion: rbac.authorization.k8s.io/v1beta1 252 | metadata: 253 | name: calico-kube-controllers 254 | rules: 255 | # Nodes are watched to monitor for deletions. 256 | - apiGroups: [""] 257 | resources: 258 | - nodes 259 | verbs: 260 | - watch 261 | - list 262 | - get 263 | # Pods are queried to check for existence. 264 | - apiGroups: [""] 265 | resources: 266 | - pods 267 | verbs: 268 | - get 269 | # IPAM resources are manipulated when nodes are deleted. 270 | - apiGroups: ["crd.projectcalico.org"] 271 | resources: 272 | - ippools 273 | verbs: 274 | - list 275 | - apiGroups: ["crd.projectcalico.org"] 276 | resources: 277 | - blockaffinities 278 | - ipamblocks 279 | - ipamhandles 280 | verbs: 281 | - get 282 | - list 283 | - create 284 | - update 285 | - delete 286 | # Needs access to update clusterinformations. 287 | - apiGroups: ["crd.projectcalico.org"] 288 | resources: 289 | - clusterinformations 290 | verbs: 291 | - get 292 | - create 293 | - update 294 | --- 295 | kind: ClusterRoleBinding 296 | apiVersion: rbac.authorization.k8s.io/v1beta1 297 | metadata: 298 | name: calico-kube-controllers 299 | roleRef: 300 | apiGroup: rbac.authorization.k8s.io 301 | kind: ClusterRole 302 | name: calico-kube-controllers 303 | subjects: 304 | - kind: ServiceAccount 305 | name: calico-kube-controllers 306 | namespace: kube-system 307 | --- 308 | # Include a clusterrole for the calico-node DaemonSet, 309 | # and bind it to the calico-node serviceaccount. 310 | kind: ClusterRole 311 | apiVersion: rbac.authorization.k8s.io/v1beta1 312 | metadata: 313 | name: calico-node 314 | rules: 315 | # The CNI plugin needs to get pods, nodes, and namespaces. 316 | - apiGroups: [""] 317 | resources: 318 | - pods 319 | - nodes 320 | - namespaces 321 | verbs: 322 | - get 323 | - apiGroups: [""] 324 | resources: 325 | - endpoints 326 | - services 327 | verbs: 328 | # Used to discover service IPs for advertisement. 329 | - watch 330 | - list 331 | # Used to discover Typhas. 332 | - get 333 | - apiGroups: [""] 334 | resources: 335 | - nodes/status 336 | verbs: 337 | # Needed for clearing NodeNetworkUnavailable flag. 338 | - patch 339 | # Calico stores some configuration information in node annotations. 340 | - update 341 | # Watch for changes to Kubernetes NetworkPolicies. 342 | - apiGroups: ["networking.k8s.io"] 343 | resources: 344 | - networkpolicies 345 | verbs: 346 | - watch 347 | - list 348 | # Used by Calico for policy information. 349 | - apiGroups: [""] 350 | resources: 351 | - pods 352 | - namespaces 353 | - serviceaccounts 354 | verbs: 355 | - list 356 | - watch 357 | # The CNI plugin patches pods/status. 358 | - apiGroups: [""] 359 | resources: 360 | - pods/status 361 | verbs: 362 | - patch 363 | # Calico monitors various CRDs for config. 364 | - apiGroups: ["crd.projectcalico.org"] 365 | resources: 366 | - globalfelixconfigs 367 | - felixconfigurations 368 | - bgppeers 369 | - globalbgpconfigs 370 | - bgpconfigurations 371 | - ippools 372 | - ipamblocks 373 | - globalnetworkpolicies 374 | - globalnetworksets 375 | - networkpolicies 376 | - clusterinformations 377 | - hostendpoints 378 | verbs: 379 | - get 380 | - list 381 | - watch 382 | # Calico must create and update some CRDs on startup. 383 | - apiGroups: ["crd.projectcalico.org"] 384 | resources: 385 | - ippools 386 | - felixconfigurations 387 | - clusterinformations 388 | verbs: 389 | - create 390 | - update 391 | # Calico stores some configuration information on the node. 392 | - apiGroups: [""] 393 | resources: 394 | - nodes 395 | verbs: 396 | - get 397 | - list 398 | - watch 399 | # These permissions are only requried for upgrade from v2.6, and can 400 | # be removed after upgrade or on fresh installations. 401 | - apiGroups: ["crd.projectcalico.org"] 402 | resources: 403 | - bgpconfigurations 404 | - bgppeers 405 | verbs: 406 | - create 407 | - update 408 | # These permissions are required for Calico CNI to perform IPAM allocations. 409 | - apiGroups: ["crd.projectcalico.org"] 410 | resources: 411 | - blockaffinities 412 | - ipamblocks 413 | - ipamhandles 414 | verbs: 415 | - get 416 | - list 417 | - create 418 | - update 419 | - delete 420 | - apiGroups: ["crd.projectcalico.org"] 421 | resources: 422 | - ipamconfigs 423 | verbs: 424 | - get 425 | # Block affinities must also be watchable by confd for route aggregation. 426 | - apiGroups: ["crd.projectcalico.org"] 427 | resources: 428 | - blockaffinities 429 | verbs: 430 | - watch 431 | # The Calico IPAM migration needs to get daemonsets. These permissions can be 432 | # removed if not upgrading from an installation using host-local IPAM. 433 | - apiGroups: ["apps"] 434 | resources: 435 | - daemonsets 436 | verbs: 437 | - get 438 | --- 439 | apiVersion: rbac.authorization.k8s.io/v1beta1 440 | kind: ClusterRoleBinding 441 | metadata: 442 | name: calico-node 443 | roleRef: 444 | apiGroup: rbac.authorization.k8s.io 445 | kind: ClusterRole 446 | name: calico-node 447 | subjects: 448 | - kind: ServiceAccount 449 | name: calico-node 450 | namespace: kube-system 451 | --- 452 | 453 | --- 454 | # Source: calico/templates/calico-node.yaml 455 | # This manifest installs the node container, as well 456 | # as the Calico CNI plugins and network config on 457 | # each master and worker node in a Kubernetes cluster. 458 | kind: DaemonSet 459 | apiVersion: extensions/v1beta1 460 | metadata: 461 | name: calico-node 462 | namespace: kube-system 463 | labels: 464 | k8s-app: calico-node 465 | spec: 466 | selector: 467 | matchLabels: 468 | k8s-app: calico-node 469 | updateStrategy: 470 | type: RollingUpdate 471 | rollingUpdate: 472 | maxUnavailable: 1 473 | template: 474 | metadata: 475 | labels: 476 | k8s-app: calico-node 477 | annotations: 478 | # This, along with the CriticalAddonsOnly toleration below, 479 | # marks the pod as a critical add-on, ensuring it gets 480 | # priority scheduling and that its resources are reserved 481 | # if it ever gets evicted. 482 | scheduler.alpha.kubernetes.io/critical-pod: '' 483 | spec: 484 | nodeSelector: 485 | beta.kubernetes.io/os: linux 486 | hostNetwork: true 487 | tolerations: 488 | # Make sure calico-node gets scheduled on all nodes. 489 | - effect: NoSchedule 490 | operator: Exists 491 | # Mark the pod as a critical add-on for rescheduling. 492 | - key: CriticalAddonsOnly 493 | operator: Exists 494 | - effect: NoExecute 495 | operator: Exists 496 | serviceAccountName: calico-node 497 | # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force 498 | # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. 499 | terminationGracePeriodSeconds: 0 500 | initContainers: 501 | # This container performs upgrade from host-local IPAM to calico-ipam. 502 | # It can be deleted if this is a fresh installation, or if you have already 503 | # upgraded to use calico-ipam. 504 | - name: upgrade-ipam 505 | image: calico/cni:v3.6.1 506 | command: ["/opt/cni/bin/calico-ipam", "-upgrade"] 507 | env: 508 | - name: KUBERNETES_NODE_NAME 509 | valueFrom: 510 | fieldRef: 511 | fieldPath: spec.nodeName 512 | - name: CALICO_NETWORKING_BACKEND 513 | valueFrom: 514 | configMapKeyRef: 515 | name: calico-config 516 | key: calico_backend 517 | volumeMounts: 518 | - mountPath: /var/lib/cni/networks 519 | name: host-local-net-dir 520 | - mountPath: /host/opt/cni/bin 521 | name: cni-bin-dir 522 | # This container installs the Calico CNI binaries 523 | # and CNI network config file on each node. 524 | - name: install-cni 525 | image: calico/cni:v3.6.1 526 | command: ["/install-cni.sh"] 527 | env: 528 | # Name of the CNI config file to create. 529 | - name: CNI_CONF_NAME 530 | value: "10-calico.conflist" 531 | # The CNI network config to install on each node. 532 | - name: CNI_NETWORK_CONFIG 533 | valueFrom: 534 | configMapKeyRef: 535 | name: calico-config 536 | key: cni_network_config 537 | # Set the hostname based on the k8s node name. 538 | - name: KUBERNETES_NODE_NAME 539 | valueFrom: 540 | fieldRef: 541 | fieldPath: spec.nodeName 542 | # CNI MTU Config variable 543 | - name: CNI_MTU 544 | valueFrom: 545 | configMapKeyRef: 546 | name: calico-config 547 | key: veth_mtu 548 | # Prevents the container from sleeping forever. 549 | - name: SLEEP 550 | value: "false" 551 | volumeMounts: 552 | - mountPath: /host/opt/cni/bin 553 | name: cni-bin-dir 554 | - mountPath: /host/etc/cni/net.d 555 | name: cni-net-dir 556 | containers: 557 | # Runs node container on each Kubernetes node. This 558 | # container programs network policy and routes on each 559 | # host. 560 | - name: calico-node 561 | image: calico/node:v3.6.1 562 | env: 563 | # Use Kubernetes API as the backing datastore. 564 | - name: DATASTORE_TYPE 565 | value: "kubernetes" 566 | # Wait for the datastore. 567 | - name: WAIT_FOR_DATASTORE 568 | value: "true" 569 | # Set based on the k8s node name. 570 | - name: NODENAME 571 | valueFrom: 572 | fieldRef: 573 | fieldPath: spec.nodeName 574 | # Choose the backend to use. 575 | - name: CALICO_NETWORKING_BACKEND 576 | valueFrom: 577 | configMapKeyRef: 578 | name: calico-config 579 | key: calico_backend 580 | # Cluster type to identify the deployment type 581 | - name: CLUSTER_TYPE 582 | value: "k8s,bgp" 583 | # Auto-detect the BGP IP address. 584 | - name: IP 585 | value: "autodetect" 586 | # Enable IPIP 587 | - name: CALICO_IPV4POOL_IPIP 588 | value: "Always" 589 | # Set MTU for tunnel device used if ipip is enabled 590 | - name: FELIX_IPINIPMTU 591 | valueFrom: 592 | configMapKeyRef: 593 | name: calico-config 594 | key: veth_mtu 595 | # The default IPv4 pool to create on startup if none exists. Pod IPs will be 596 | # chosen from this range. Changing this value after installation will have 597 | # no effect. This should fall within `--cluster-cidr`. 598 | - name: CALICO_IPV4POOL_CIDR 599 | value: "{{ k8s_master_pod_network_cidr }}" 600 | # Disable file logging so `kubectl logs` works. 601 | - name: CALICO_DISABLE_FILE_LOGGING 602 | value: "true" 603 | # Set Felix endpoint to host default action to ACCEPT. 604 | - name: FELIX_DEFAULTENDPOINTTOHOSTACTION 605 | value: "ACCEPT" 606 | # Disable IPv6 on Kubernetes. 607 | - name: FELIX_IPV6SUPPORT 608 | value: "false" 609 | # Set Felix logging to "info" 610 | - name: FELIX_LOGSEVERITYSCREEN 611 | value: "info" 612 | - name: FELIX_HEALTHENABLED 613 | value: "true" 614 | securityContext: 615 | privileged: true 616 | resources: 617 | requests: 618 | cpu: 250m 619 | livenessProbe: 620 | httpGet: 621 | path: /liveness 622 | port: 9099 623 | host: localhost 624 | periodSeconds: 10 625 | initialDelaySeconds: 10 626 | failureThreshold: 6 627 | readinessProbe: 628 | exec: 629 | command: 630 | - /bin/calico-node 631 | - -bird-ready 632 | - -felix-ready 633 | periodSeconds: 10 634 | volumeMounts: 635 | - mountPath: /lib/modules 636 | name: lib-modules 637 | readOnly: true 638 | - mountPath: /run/xtables.lock 639 | name: xtables-lock 640 | readOnly: false 641 | - mountPath: /var/run/calico 642 | name: var-run-calico 643 | readOnly: false 644 | - mountPath: /var/lib/calico 645 | name: var-lib-calico 646 | readOnly: false 647 | volumes: 648 | # Used by node. 649 | - name: lib-modules 650 | hostPath: 651 | path: /lib/modules 652 | - name: var-run-calico 653 | hostPath: 654 | path: /var/run/calico 655 | - name: var-lib-calico 656 | hostPath: 657 | path: /var/lib/calico 658 | - name: xtables-lock 659 | hostPath: 660 | path: /run/xtables.lock 661 | type: FileOrCreate 662 | # Used to install CNI. 663 | - name: cni-bin-dir 664 | hostPath: 665 | path: /opt/cni/bin 666 | - name: cni-net-dir 667 | hostPath: 668 | path: /etc/cni/net.d 669 | # Mount in the directory for host-local IPAM allocations. This is 670 | # used when upgrading from host-local to calico-ipam, and can be removed 671 | # if not using the upgrade-ipam init container. 672 | - name: host-local-net-dir 673 | hostPath: 674 | path: /var/lib/cni/networks 675 | --- 676 | 677 | apiVersion: v1 678 | kind: ServiceAccount 679 | metadata: 680 | name: calico-node 681 | namespace: kube-system 682 | 683 | --- 684 | # Source: calico/templates/calico-kube-controllers.yaml 685 | # This manifest deploys the Calico node controller. 686 | # See https://github.com/projectcalico/kube-controllers 687 | apiVersion: extensions/v1beta1 688 | kind: Deployment 689 | metadata: 690 | name: calico-kube-controllers 691 | namespace: kube-system 692 | labels: 693 | k8s-app: calico-kube-controllers 694 | annotations: 695 | scheduler.alpha.kubernetes.io/critical-pod: '' 696 | spec: 697 | # The controller can only have a single active instance. 698 | replicas: 1 699 | strategy: 700 | type: Recreate 701 | template: 702 | metadata: 703 | name: calico-kube-controllers 704 | namespace: kube-system 705 | labels: 706 | k8s-app: calico-kube-controllers 707 | spec: 708 | nodeSelector: 709 | beta.kubernetes.io/os: linux 710 | tolerations: 711 | # Mark the pod as a critical add-on for rescheduling. 712 | - key: CriticalAddonsOnly 713 | operator: Exists 714 | - key: node-role.kubernetes.io/master 715 | effect: NoSchedule 716 | serviceAccountName: calico-kube-controllers 717 | containers: 718 | - name: calico-kube-controllers 719 | image: calico/kube-controllers:v3.6.1 720 | env: 721 | # Choose which controllers to run. 722 | - name: ENABLED_CONTROLLERS 723 | value: node 724 | - name: DATASTORE_TYPE 725 | value: kubernetes 726 | readinessProbe: 727 | exec: 728 | command: 729 | - /usr/bin/check-status 730 | - -r 731 | 732 | --- 733 | 734 | apiVersion: v1 735 | kind: ServiceAccount 736 | metadata: 737 | name: calico-kube-controllers 738 | namespace: kube-system 739 | --- 740 | # Source: calico/templates/calico-etcd-secrets.yaml 741 | 742 | --- 743 | # Source: calico/templates/calico-typha.yaml 744 | 745 | --- 746 | # Source: calico/templates/configure-canal.yaml 747 | 748 | --------------------------------------------------------------------------------