├── .gitignore ├── LICENSE ├── README.md ├── ansible.cfg ├── inventory └── mycluster ├── playbooks ├── add-new-workers-all.yaml ├── cleanup-all-vms.yaml ├── config-k8s-for-haproxy.yaml ├── decrypting-secrets.yaml ├── docker.yaml ├── encrypting-secrets.yaml ├── ha-etcd.yaml ├── heapster.yaml ├── k8s-all.yaml ├── keepalived.yaml ├── kubeadm-add-new-masters.yaml ├── kubeadm-upgrade.yaml ├── kubeadm.yaml ├── kubernetes-add-workers.yaml ├── kubernetes-masters.yaml ├── ntpd.yaml ├── repos.yaml ├── setup-firewalld.yaml ├── setup-haproxy.yaml └── smoke-test.yaml └── roles ├── cleanup └── tasks │ └── main.yaml ├── config-kube-apiserver └── tasks │ └── main.yaml ├── config-kube-proxy └── tasks │ └── main.yaml ├── config-kube-scheduler-and-controller └── tasks │ └── main.yaml ├── config-kubelet └── tasks │ └── main.yaml ├── copy-etcd-certs-to-masters └── tasks │ └── main.yaml ├── decrypt-secrets ├── tasks │ └── main.yaml └── templates │ └── enc-config.yaml.j2 ├── docker └── tasks │ └── main.yaml ├── encrypt-secrets ├── tasks │ └── main.yaml └── templates │ └── enc-config.yaml.j2 ├── etcd-ha ├── tasks │ └── main.yaml └── templates │ ├── etcd.env │ └── etcd.service ├── firewalld └── tasks │ └── main.yaml ├── flannel ├── tasks │ └── main.yaml └── templates │ ├── calico.yaml │ ├── flannel.yaml.j2 │ └── rbac-kdd.yaml ├── generate-etcd-certs ├── files │ └── certs │ │ ├── ca-config.json │ │ ├── ca-csr.json │ │ └── client.json └── tasks │ └── main.yaml ├── haproxy ├── tasks │ └── main.yaml └── templates │ └── haproxyConfig.j2 ├── heapster ├── tasks │ └── main.yaml └── templates │ └── heapster.yml.j2 ├── keepalived ├── tasks │ └── main.yaml └── templates │ ├── check_apiserver.sh │ └── keepalived.conf ├── kubeadm-init-first-master └── tasks │ └── main.yaml ├── kubeadm-init-other-masters └── tasks │ └── main.yaml ├── kubeadm-init-prep ├── tasks │ └── main.yaml └── templates │ └── kubeadm-config.yaml ├── kubeadm-join-node └── tasks │ └── main.yaml ├── kubeadm-upgrade └── tasks │ └── main.yaml ├── kubeadm └── tasks │ └── main.yaml ├── ntpd └── tasks │ └── main.yaml ├── pull-kubernetes-images └── tasks │ └── main.yaml ├── repos └── tasks │ └── main.yaml ├── setup-cfssl └── tasks │ └── main.yaml ├── smoke-test ├── tasks │ └── main.yaml └── templates │ └── nginx.yml.j2 ├── sync-etcd-certs └── tasks │ └── main.yaml └── update-kubelet └── tasks └── main.yaml /.gitignore: -------------------------------------------------------------------------------- 1 | inventory/* -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Ansible Playbooks To setup cross-datacenters Kubernetes HA (multi-master) on Redhat Enterprise Linux 7. 2 | 3 | This repository provides Ansible Playbooks To setup Kubernetes HA on Redhat Enterprise Linux 7. The playbooks are mainly inspired by Kubeadm documentation and other ansible tentatives on github. The playbooks could be used separately or as one playbook for a fully fledged HA cluster. 4 | 5 | 6 | # Prerequisites: 7 | 8 | RHEL 7.2+ 9 | On your manager machine install python pip: 10 | ``` 11 | wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm 12 | rpm -ivh epel-release-latest-7.noarch.rpm 13 | yum install python-pip 14 | ``` 15 | 16 | 17 | Install ansible on your ansible manager machine. 18 | 19 | * You can do: 20 | ``` 21 | pip install ansible 22 | ``` 23 | 24 | * Setup ssh access from master to workers. 25 | ```ssh-copy-id -i ~/.ssh/id_rsa.pub ``` 26 | 27 | # Environment preparation: 28 | 29 | * Clone the repo: 30 | In the machine that you want to use as ansible manager (can be your laptop or any other machine that has ssh access to the target machines): 31 | ``` 32 | git clone git@github.com:IBM/ansible-kubernetes-ha-cluster.git 33 | cd ansible-k8s-ha 34 | ``` 35 | 36 | * Create inventory/mycluster 37 | and declare your machines such as: 38 | ``` 39 | myhostname.domain.com ansible_usehost= 40 | ``` 41 | 42 | Also make sure to update the `vars` section: 43 | - choose the desired versions for kubernetes and docker 44 | - setup the pod network cidr (default setup is for flannel) 45 | - Setup the eviction hard properties for workers and masters 46 | - Choose whether you want to use a private setup or a public one, with a private setup, you need to provide the credentials to get the yum packages and docker images. 47 | - Specify the network zone for firewalld setup (default is public) 48 | 49 | There are different groups being defined and used, you can reuse mycluster file defined in inventory folder: 50 | ``` 51 | [dc1-k8s-masters] # these are all the masters of datacenter 1 (DC1) 52 | [dc1-k8s-workers-vm] # these are all the VM worker nodes of DC1 53 | [dc1-k8s-workers-bm] # these are all baremetal worker nodes of DC1 54 | ``` 55 | We can have as many data centers as we need. For each data center, define the masters and workers and add them to `[k8s-masters:children]`, `[k8s-workers:children]`, and `[k8s-nodes:children]`. 56 | 57 | You can check that you can ping all the machines: 58 | 59 | ``` 60 | ansible -m ping all -i inventory/mycluster 61 | ``` 62 | # Install a highly available kubernetes using kubeadm 63 | 64 | You can now run k8s-all playbook to get your cluster setup. 65 | You can also run the different playbooks separately for different purposes (setting up docker, etcd, keepalived, kubeadm ...). 66 | 67 | ``` 68 | ansible-playbook -i inventory/mycluster playbooks/k8s-all.yaml 69 | ``` 70 | 71 | # What k8s-all.yaml includes: 72 | 73 | - Adding the required yum repository (private or public) 74 | - Installing ntpd 75 | - Installing docker 76 | - Installing kubeadm, kubelet and kubectl 77 | - Setting up the firewalld 78 | - Generating etcd certificates and installing ha etcd cluster on all the master nodes 79 | - Installing haproxy 80 | - Installing keepalived and setting up vip management (this optional, use only when you have a vip) 81 | - Setting up kubernetes masters 82 | - Adding the nodes to the cluster 83 | - Reconfiguring the nodes and components to communicate through haproxy 84 | - Encrypting kubernetes secrets at rest 85 | - Adding heapster 86 | - Running a smoke test 87 | 88 | # Restarting the install: 89 | 90 | If you need to restart the process using kubeadm reset, please use the cleanup-all-vms playbook that deletes the state from all vms. Some of the commands might fail but you can ignore that. 91 | 92 | # Encrypting Secrets at rest (already in the k8s-all playbook): 93 | 94 | If you want to add an extra layer of securing your secrets by encrypting them at rest you can use the "encrypting-secrets.yaml playbook". You can add it to the k8s-all.yaml or use it separately. 95 | 96 | Before using it, update the inventory file to change the encryption key variable "encoded_secret". 97 | To generate a new encryption key you can do the following: 98 | 99 | ``` 100 | head -c 32 /dev/urandom | base64 101 | ``` 102 | Copy the output and save it in the inventory variable. 103 | 104 | After that run the playbook: 105 | 106 | ``` 107 | ansible-playbook -i inventory/mycluster playbooks/encrypting-secrets.yaml 108 | ``` 109 | 110 | # Work in progress: 111 | We will add the following items to this repository: 112 | 113 | - Kubeadm upgrade playbook 114 | - Adding the possibility of using other user then root 115 | - Addons, Prometheus support? 116 | 117 | # Contribution: 118 | 119 | In order to contribute please feel free to create github issues and PRs. -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | inventory = inventory 3 | roles_path = roles 4 | retry_files_enabled = False 5 | host_key_checking = False -------------------------------------------------------------------------------- /inventory/mycluster: -------------------------------------------------------------------------------- 1 | #DC1 masters 2 | [dc1-k8s-masters] 3 | ansible_usehost= 4 | #DC1 virtualmachines 5 | [dc1-k8s-workers-vm] 6 | ansible_usehost= 7 | #DC1 baremetal 8 | [dc1-k8s-workers-bm] 9 | ansible_usehost= 10 | 11 | [dc1-k8s-workers:children] 12 | dc1-k8s-workers-bm 13 | dc1-k8s-workers-vm 14 | 15 | [dc2-k8s-masters] 16 | ansible_usehost= 17 | 18 | [dc2-k8s-workers-vm] 19 | ansible_usehost= 20 | 21 | [dc2-k8s-workers-bm] 22 | ansible_usehost= 23 | 24 | [dc2-k8s-workers:children] 25 | dc2-k8s-workers-bm 26 | dc2-k8s-workers-vm 27 | 28 | [dc3-k8s-masters] 29 | ansible_usehost= 30 | 31 | [dc3-k8s-workers-vm] 32 | ansible_usehost= 33 | 34 | [dc3-k8s-workers-bm] 35 | ansible_usehost= 36 | 37 | [dc3-k8s-workers:children] 38 | dc3-k8s-workers-bm 39 | dc3-k8s-workers-vm 40 | 41 | [k8s-masters:children] 42 | dc1-k8s-masters 43 | dc2-k8s-masters 44 | dc3-k8s-masters 45 | 46 | [k8s-workers:children] 47 | dc1-k8s-workers 48 | dc2-k8s-workers 49 | dc3-k8s-workers 50 | 51 | [dc1-k8s-nodes:children] 52 | dc1-k8s-masters 53 | dc1-k8s-workers 54 | 55 | [dc2-k8s-nodes:children] 56 | dc2-k8s-masters 57 | dc2-k8s-workers 58 | 59 | [dc3-k8s-nodes:children] 60 | dc3-k8s-masters 61 | dc3-k8s-workers 62 | 63 | [k8s-nodes:children] 64 | dc1-k8s-nodes 65 | dc2-k8s-nodes 66 | dc3-k8s-nodes 67 | 68 | [k8s-workers-bm:children] 69 | dc1-k8s-workers-bm 70 | dc2-k8s-workers-bm 71 | dc3-k8s-workers-bm 72 | 73 | [k8s-workers-vm:children] 74 | dc1-k8s-workers-vm 75 | dc2-k8s-workers-vm 76 | dc3-k8s-workers-vm 77 | 78 | [new-k8s-masters] 79 | 80 | [new-k8s-workers] 81 | 82 | 83 | #---- Variables ----- 84 | 85 | [all:vars] 86 | ansible_ssh_common_args='-o StrictHostKeyChecking=no' 87 | docker_version=17.12.1.ce-1.el7.centos 88 | 89 | # Kubernetes (k8s) vars... 90 | kubernetes_version=1.9.6 91 | kubelet_version=1.9.6-0 92 | kubeadm_version=1.10.1 93 | pod_network_cidr=10.244.0.0/16 94 | # optional vip to be used with keepalived 95 | virtual_ip=10.54.235.61 96 | # change to your interface 97 | network_interface=eth0 98 | smoke_test_node_port=31111 99 | kubelet_workers_eviction_hard=memory.available<10% 100 | kubelet_masters_eviction_hard=memory.available<10% 101 | etcd_version=v3.1.12 102 | haproxy_stats_port=9000 103 | ha_proxy_port=5446 104 | # haproxy port for etcd 105 | etcd_proxy_port=3379 106 | proxy_port=6443 107 | image_pull_type=always 108 | # setup: public or private, put private to use private repo and public to use public repos 109 | setup=private 110 | encoded_secret="zm1J79tER3oeEIeQUWT6F1Vdq8Kzj/z54jqR8xj/RRE=" 111 | network_zone=public 112 | 113 | # Container images: to be changed if using private docker registry 114 | flannel_container_image=/flannel:v0.9.1-amd64 115 | kubeapi_server_container_image=/kube-apiserver-amd64:v1.9.6 116 | kubescheduler_container_image=/kube-scheduler-amd64:v1.9.6 117 | kubecontroller_manager_container_image=/kube-controller-manager-amd64:v1.9.6 118 | kubeproxy_container_image=/kube-proxy-amd64:v1.9.6 119 | haproxy_container_image=/haproxy:1.5 120 | heapster_container_image=/heapster-amd64:v1.5.1 121 | kubedns_container_image=/k8s-dns-kube-dns-amd64:1.14.8 122 | kubednsmasq_container_image=/k8s-dns-dnsmasq-nanny-amd64:1.14.8 123 | kubednssidecar_container_image=/k8s-dns-sidecar-amd64:1.14.8 124 | kubepause_container_image=/pause-amd64:3.0 125 | 126 | 127 | # Optional, needed when using private registry 128 | docker_registry= 129 | docker_registry_username= 130 | docker_registry_password= 131 | docker_registry_email= 132 | 133 | -------------------------------------------------------------------------------- /playbooks/add-new-workers-all.yaml: -------------------------------------------------------------------------------- 1 | - hosts: new-k8s-workers 2 | become: yes 3 | roles: 4 | - docker 5 | - firewalld 6 | - kubeadm 7 | - kubeadm-join-node 8 | - reconfig-kubelet -------------------------------------------------------------------------------- /playbooks/cleanup-all-vms.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-nodes 2 | become: yes 3 | gather_facts: no 4 | ignore_errors: yes 5 | roles: 6 | - cleanup -------------------------------------------------------------------------------- /playbooks/config-k8s-for-haproxy.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-masters 2 | become: yes 3 | roles: 4 | - config-kube-scheduler-and-controller 5 | - {role: config-kube-proxy, when: "groups['k8s-masters'][0] == inventory_hostname"} 6 | 7 | - hosts: k8s-masters 8 | become: yes 9 | serial: 1 10 | roles: 11 | - config-kube-apiserver 12 | 13 | - hosts: k8s-workers 14 | become: yes 15 | roles: 16 | - config-kubelet 17 | -------------------------------------------------------------------------------- /playbooks/decrypting-secrets.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-masters 2 | become: yes 3 | roles: 4 | - decrypt-secrets -------------------------------------------------------------------------------- /playbooks/docker.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-nodes 2 | become: yes 3 | roles: 4 | - docker 5 | 6 | 7 | -------------------------------------------------------------------------------- /playbooks/encrypting-secrets.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-masters 2 | become: yes 3 | roles: 4 | - encrypt-secrets 5 | -------------------------------------------------------------------------------- /playbooks/ha-etcd.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-masters 2 | become: yes 3 | vars: 4 | initial_cluster: | 5 | {% set comma = joiner(",") %} 6 | {% for master in groups["k8s-masters"] -%} 7 | {{ comma() }}{{master}}=https://{{hostvars[master]["ansible_usehost"]}}:2380 8 | {%- endfor %} \ 9 | roles: 10 | - setup-cfssl 11 | - {role: generate-etcd-certs, when: "groups['k8s-masters'][0] == inventory_hostname"} 12 | - {role: copy-etcd-certs-to-masters, when: "groups['k8s-masters'][0] != inventory_hostname"} 13 | - etcd-ha 14 | 15 | -------------------------------------------------------------------------------- /playbooks/heapster.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-masters[0] 2 | become: yes 3 | roles: 4 | - heapster -------------------------------------------------------------------------------- /playbooks/k8s-all.yaml: -------------------------------------------------------------------------------- 1 | - import_playbook: repos.yaml 2 | - import_playbook: ntpd.yaml 3 | - import_playbook: docker.yaml 4 | - import_playbook: kubeadm.yaml 5 | - import_playbook: setup-firewalld.yaml 6 | - import_playbook: ha-etcd.yaml 7 | # - import_playbook: setup-haproxy.yaml # optional when no vip is used and when using haproxy 8 | - import_playbook: setup-haproxy.yaml 9 | - import_playbook: kubernetes-masters.yaml 10 | - import_playbook: kubernetes-add-workers.yaml 11 | - import_playbook: config-k8s-for-haproxy.yaml 12 | - import_playbook: encrypting-secrets.yaml 13 | - import_playbook: heapster.yaml 14 | - import_playbook: smoke-test.yaml -------------------------------------------------------------------------------- /playbooks/keepalived.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-masters 2 | become: yes 3 | roles: 4 | - keepalived -------------------------------------------------------------------------------- /playbooks/kubeadm-add-new-masters.yaml: -------------------------------------------------------------------------------- 1 | - hosts: new-k8s-masters 2 | become: yes 3 | roles: 4 | - docker 5 | - firewalld 6 | - keepalived 7 | - kubeadm 8 | - kubeadm-init-prep 9 | - sync-etcd-certs 10 | - { role: kubeadm-init-other-masters, when: "groups['k8s-masters'][0] != inventory_hostname" } 11 | - config-scheduler-and-controller -------------------------------------------------------------------------------- /playbooks/kubeadm-upgrade.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-masters[0] 2 | become: yes 3 | vars: 4 | upgrade_version: v1.9.6 5 | kubeadm_version: latest 6 | ignore_errors: yes 7 | roles: 8 | - kubeadm-upgrade 9 | 10 | - hosts: all 11 | serial: 1 12 | become: yes 13 | vars: 14 | kubelet_version: latest 15 | roles: 16 | - update-kubelet 17 | 18 | -------------------------------------------------------------------------------- /playbooks/kubeadm.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-nodes 2 | become: yes 3 | roles: 4 | - kubeadm -------------------------------------------------------------------------------- /playbooks/kubernetes-add-workers.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-workers 2 | become: yes 3 | roles: 4 | - kubeadm-join-node 5 | # - config-kubelet 6 | -------------------------------------------------------------------------------- /playbooks/kubernetes-masters.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-masters 2 | become: yes 3 | roles: 4 | - pull-kubernetes-images 5 | - kubeadm-init-prep 6 | - { role: kubeadm-init-first-master, when: "groups['k8s-masters'][0] == inventory_hostname"} 7 | - { role: kubeadm-init-other-masters, when: "groups['k8s-masters'][0] != inventory_hostname" } 8 | - { role: flannel, when: "groups['k8s-masters'][0] == inventory_hostname"} 9 | -------------------------------------------------------------------------------- /playbooks/ntpd.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-nodes 2 | become: yes 3 | roles: 4 | - ntpd 5 | 6 | -------------------------------------------------------------------------------- /playbooks/repos.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-nodes 2 | become: yes 3 | roles: 4 | - repos 5 | 6 | 7 | -------------------------------------------------------------------------------- /playbooks/setup-firewalld.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-nodes 2 | become: yes 3 | roles: 4 | - firewalld -------------------------------------------------------------------------------- /playbooks/setup-haproxy.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-nodes 2 | become: yes 3 | roles: 4 | - haproxy -------------------------------------------------------------------------------- /playbooks/smoke-test.yaml: -------------------------------------------------------------------------------- 1 | - hosts: k8s-masters[0] 2 | become: yes 3 | roles: 4 | - smoke-test -------------------------------------------------------------------------------- /roles/cleanup/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: cleanup kubernetes data 2 | shell: | 3 | kubeadm reset 4 | docker kill haproxy 5 | docker rm haproxy 6 | systemctl stop kubelet 7 | systemctl stop docker 8 | systemctl stop etcd 9 | systemctl stop keepalived 10 | rm /etc/systemd/system/kubelet.service.d/20-pod-infra-image.conf 11 | rm -rf /var/lib/cni/ 12 | rm -rf /var/lib/kubelet/* 13 | rm -rf /etc/keepalived/* 14 | rm -rf /etc/cni/ 15 | rm -r /var/lib/etcd/member 16 | ip link delete cni0 17 | ip link delete flannel.1 18 | mkdir -p /etc/keepalived 19 | rm -rf /var/lib/docker 20 | rm /usr/local/bin/etcd 21 | rm /usr/local/bin/etcdctl 22 | rm /usr/local/bin/cfssljson 23 | rm /usr/local/bin/cfssl 24 | rm -rf /etc/kubernetes 25 | 26 | # - name: Close ports 27 | # firewalld: 28 | # port: "{{item}}" 29 | # permanent: true 30 | # state: disabled 31 | # with_items: 32 | # - 6443/tcp 33 | # - 2379-2380/tcp 34 | # - 10250/tcp 35 | # - 10251/tcp 36 | # - 10252/tcp 37 | # - 10053/tcp 38 | # - 10053/udp 39 | # - 10255/tcp 40 | # - 30000-32767/tcp 41 | # - 8082/tcp 42 | # - 8472/udp 43 | # - 8285/udp 44 | # - 9898/tcp 45 | # - 9100/tcp 46 | # - 4194/tcp 47 | # - 10254/tcp 48 | 49 | - name: remove yum packages 50 | become: yes 51 | yum: 52 | name: "{{item}}" 53 | state: absent 54 | with_items: 55 | - kubelet 56 | - kubeadm 57 | - kubectl 58 | - docker-ce 59 | - yum-utils 60 | - device-mapper-persistent-data 61 | - lvm2 62 | - policycoreutils-python 63 | - python-pip 64 | - container-selinux 65 | 66 | - name: Remove Artifactory repo 67 | yum_repository: 68 | name: Artifactory 69 | state: absent 70 | 71 | - name: Remove docker-ce-stable repo 72 | yum_repository: 73 | name: docker-ce 74 | state: absent 75 | 76 | - name: Remove kubernetes repo 77 | yum_repository: 78 | name: kubernetes 79 | state: absent 80 | -------------------------------------------------------------------------------- /roles/config-kube-apiserver/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Backup kube-apiserver.yaml 2 | shell: cp /etc/kubernetes/manifests/kube-apiserver.yaml /tmp/kube-apiserver.yaml.$(date +%Y%m%d%H%M%S) 3 | 4 | - name: Add runtime-config to kube-apiserver 5 | lineinfile: 6 | dest: /etc/kubernetes/manifests/kube-apiserver.yaml 7 | regexp: '--runtime-config' 8 | line: ' - --runtime-config=settings.k8s.io/v1alpha1=true' 9 | insertbefore: '--etcd-servers=' 10 | backup: no 11 | 12 | - replace: 13 | path: /etc/kubernetes/manifests/kube-apiserver.yaml 14 | regexp: '--admission-control=' 15 | replace: '--admission-control=PodPreset,' 16 | backup: no -------------------------------------------------------------------------------- /roles/config-kube-proxy/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Get config map for the kube-proxy 2 | shell: kubectl --kubeconfig=/tmp/kubeadm-ha/config get configmap -n kube-system kube-proxy -o yaml > /tmp/kube-proxy-cm.yaml 3 | 4 | - replace: 5 | path: /tmp/kube-proxy-cm.yaml 6 | regexp: 'server:.*' 7 | replace: "server: https://localhost:{{ha_proxy_port}}" 8 | backup: no 9 | 10 | 11 | - name: Apply the change to the config map 12 | shell: kubectl --kubeconfig=/tmp/kubeadm-ha/config apply -f /tmp/kube-proxy-cm.yaml --force 13 | 14 | - name: Delete all kube-proxy instances to make them restart with the new configuration 15 | shell: kubectl --kubeconfig=/tmp/kubeadm-ha/config delete pod -n kube-system -l k8s-app=kube-proxy -------------------------------------------------------------------------------- /roles/config-kube-scheduler-and-controller/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: change k8s scheduler and controller config 2 | become: yes 3 | replace: 4 | path: "{{item}}" 5 | regexp: '--address=127.0.0.1' 6 | replace: '--address=0.0.0.0' 7 | with_items: 8 | - /etc/kubernetes/controller-manager.conf 9 | - /etc/kubernetes/scheduler.conf 10 | - /etc/kubernetes/manifests/kube-scheduler.yaml 11 | - /etc/kubernetes/manifests/kube-controller-manager.yaml 12 | 13 | - replace: 14 | path: "{{item}}" 15 | regexp: 'server:.*' 16 | replace: "server: https://localhost:{{ha_proxy_port}}" 17 | backup: no 18 | with_items: 19 | - /etc/kubernetes/scheduler.conf 20 | - /etc/kubernetes/controller-manager.conf 21 | - /etc/kubernetes/kubelet.conf 22 | 23 | - name: restart kubelet 24 | systemd: 25 | name: kubelet 26 | state: restarted 27 | daemon_reload: yes 28 | enabled: yes -------------------------------------------------------------------------------- /roles/config-kubelet/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Pausing for 1 minute 2 | pause: 3 | minutes: 1 4 | 5 | - replace: 6 | path: /etc/kubernetes/kubelet.conf 7 | regexp: 'server:.*' 8 | replace: "server: https://localhost:{{ha_proxy_port}}" 9 | backup: no 10 | 11 | - name: restart kubelet 12 | systemd: 13 | name: kubelet 14 | daemon_reload: yes 15 | state: restarted 16 | enabled: yes -------------------------------------------------------------------------------- /roles/copy-etcd-certs-to-masters/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Creates /etc/kubernetes/pki/etcd directory 2 | file: 3 | path: /etc/kubernetes/pki/etcd 4 | state: directory 5 | 6 | - name: "copy {{item}} to other masters" 7 | copy: 8 | src: "/tmp/etcd/{{item}}" 9 | dest: "/etc/kubernetes/pki/etcd/{{item}}" 10 | with_items: 11 | - ca-config.json 12 | - ca.pem 13 | - ca-key.pem 14 | - client.pem 15 | - client-key.pem 16 | 17 | -------------------------------------------------------------------------------- /roles/decrypt-secrets/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - include_vars: ../../../../whis-platform/inventory/group_vars/local/vault 2 | - name: copy encryption config file 3 | template: 4 | src: enc-config.yaml.j2 5 | dest: /etc/kubernetes/enc-config.yaml 6 | 7 | - name: restart kubelet 8 | systemd: 9 | name: kubelet 10 | enabled: yes 11 | daemon_reload: yes 12 | state: started -------------------------------------------------------------------------------- /roles/decrypt-secrets/templates/enc-config.yaml.j2: -------------------------------------------------------------------------------- 1 | kind: EncryptionConfig 2 | apiVersion: v1 3 | resources: 4 | - resources: 5 | - secrets 6 | providers: 7 | - identity: {} 8 | - aescbc: 9 | keys: 10 | - name: key1 11 | secret: {{encoded_secret}} -------------------------------------------------------------------------------- /roles/docker/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - include_vars: ../../../../whis-platform/inventory/group_vars/local/vault 2 | 3 | - name: get epel 4 | get_url: 5 | url: http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm 6 | dest: /tmp/epel-release-latest-7.noarch.rpm 7 | when: setup=="public" 8 | 9 | - name: install epel 10 | become: yes 11 | yum: 12 | name: /tmp/epel-release-latest-7.noarch.rpm 13 | state: present 14 | when: setup=="public" 15 | 16 | - name: install docker dependencies 17 | become: yes 18 | yum: 19 | name: "{{item}}" 20 | with_items: 21 | - yum-utils 22 | - device-mapper-persistent-data 23 | - lvm2 24 | - policycoreutils-python 25 | - python-pip 26 | 27 | - name: Install container-selinux rpm from a remote repo 28 | yum: 29 | name: http://ns3.centos.org/7.3.1611/extras/x86_64/Packages/container-selinux-2.9-4.el7.noarch.rpm 30 | state: present 31 | when: setup=="public" 32 | 33 | - name: install container-selinux from taas repo 34 | become: yes 35 | yum: 36 | name: container-selinux-2.9-4.el7.noarch 37 | state: present 38 | when: setup=="private" 39 | 40 | # can be needed in some systems 41 | # - name: Enable extra repos 42 | # command: subscription-manager repos --enable=rhel-7-server-extras-rpms 43 | 44 | - name: Installs docker 45 | yum: 46 | name: docker-ce-{{docker_version}} # docker in some distros 47 | state: installed 48 | 49 | - name: Install docker-py 50 | pip: 51 | name: docker-py 52 | extra_args: -i https://{{docker_registry_username| trim}}:{{docker_registry_password| trim}}@na.artifactory.swg-devops.com/artifactory/api/pypi/wh-imaging-pypi-local/simple 53 | 54 | 55 | - name: restart docker 56 | systemd: 57 | name: docker 58 | state: restarted 59 | daemon_reload: yes 60 | enabled: yes 61 | 62 | - name: Logging to private docker registry 63 | docker_login: 64 | registry: "{{ docker_registry }}" 65 | username: "{{ docker_registry_username }}" 66 | password: "{{ docker_registry_password }}" 67 | reauthorize: yes -------------------------------------------------------------------------------- /roles/encrypt-secrets/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - include_vars: ../../../../whis-platform/inventory/group_vars/local/vault 2 | - name: Backup kube-apiserver.yaml 3 | shell: cp /etc/kubernetes/manifests/kube-apiserver.yaml /tmp/kube-apiserver.yaml.$(date +%Y%m%d%H%M%S) 4 | 5 | - name: copy encryption config file 6 | template: 7 | src: enc-config.yaml.j2 8 | dest: /etc/kubernetes/pki/enc-config.yaml 9 | 10 | - name: Add runtime-config to kube-apiserver 11 | lineinfile: 12 | dest: /etc/kubernetes/manifests/kube-apiserver.yaml 13 | regexp: ' - --experimental-encryption-provider-config=/etc/kubernetes/pki/enc-config.yaml' 14 | line: ' - --experimental-encryption-provider-config=/etc/kubernetes/pki/enc-config.yaml' 15 | insertbefore: '- --etcd-servers=' 16 | backup: no 17 | 18 | - name: restart kubelet 19 | systemd: 20 | name: kubelet 21 | enabled: yes 22 | daemon_reload: yes 23 | state: started 24 | -------------------------------------------------------------------------------- /roles/encrypt-secrets/templates/enc-config.yaml.j2: -------------------------------------------------------------------------------- 1 | kind: EncryptionConfig 2 | apiVersion: v1 3 | resources: 4 | - resources: 5 | - secrets 6 | providers: 7 | - aescbc: 8 | keys: 9 | - name: key1 10 | secret: {{encoded_secret}} 11 | - identity: {} -------------------------------------------------------------------------------- /roles/etcd-ha/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - include_vars: ../../../../whis-platform/inventory/group_vars/local/vault 2 | - name: Generate etcd certs for each master node 3 | shell: | 4 | cd /etc/kubernetes/pki/etcd 5 | /usr/local/bin/cfssl print-defaults csr > config.json 6 | 7 | - name: Add localhost to the hosts 8 | lineinfile: 9 | dest: /etc/kubernetes/pki/etcd/config.json 10 | regexp: '"localhost",' 11 | line: '"localhost",' 12 | insertbefore: '"example.net",' 13 | backup: no 14 | 15 | - replace: 16 | path: /etc/kubernetes/pki/etcd/config.json 17 | regexp: 'CN/{s/example\.net' 18 | replace: "{{ inventory_hostname }}" 19 | backup: no 20 | 21 | - replace: 22 | path: /etc/kubernetes/pki/etcd/config.json 23 | regexp: 'www\.example\.net' 24 | replace: "{{ hostvars[inventory_hostname]['ansible_usehost'] }}" 25 | backup: no 26 | 27 | - replace: 28 | path: /etc/kubernetes/pki/etcd/config.json 29 | regexp: 'example\.net' 30 | replace: "{{ inventory_hostname }}" 31 | backup: no 32 | 33 | - name: Generate etcd peer certs for each master node 34 | shell: | 35 | cd /etc/kubernetes/pki/etcd 36 | /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=server config.json | /usr/local/bin/cfssljson -bare server 37 | 38 | 39 | - name: Generate etcd server certs for each master node 40 | shell: | 41 | cd /etc/kubernetes/pki/etcd 42 | /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=peer config.json | /usr/local/bin/cfssljson -bare peer 43 | 44 | 45 | - name: Download etcd 46 | get_url: 47 | url: https://na.artifactory.swg-devops.com:443/artifactory/wh-imaging-prereqs-generic-local/tars/etcd-{{etcd_version}}-linux-amd64.tar.gz 48 | dest: /tmp/ 49 | mode: 0710 50 | force_basic_auth: yes 51 | url_password: "{{docker_registry_password}}" 52 | url_username: "{{docker_registry_username}}" 53 | 54 | - name: Unarchive etcd 55 | unarchive: 56 | src: /tmp/etcd-{{etcd_version}}-linux-amd64.tar.gz 57 | dest: /tmp/ 58 | remote_src: yes 59 | 60 | - name: Install etcd 61 | copy: 62 | src: /tmp/etcd-{{ etcd_version }}-linux-amd64/etcd 63 | dest: /usr/local/bin/etcd 64 | mode: 0710 65 | remote_src: yes 66 | 67 | - name: Install etcdctl 68 | copy: 69 | src: /tmp/etcd-{{ etcd_version }}-linux-amd64/etcdctl 70 | dest: /usr/local/bin/etcdctl 71 | mode: 0710 72 | remote_src: yes 73 | 74 | - name: Copy etcd config file 75 | template: 76 | src: etcd.service 77 | dest: /etc/systemd/system/etcd.service 78 | 79 | - name: Copy etcd.env config file 80 | template: 81 | src: etcd.env 82 | dest: /etc/etcd.env 83 | 84 | - name: start and enable etcd 85 | systemd: 86 | name: etcd 87 | enabled: yes 88 | daemon_reload: yes 89 | state: restarted 90 | -------------------------------------------------------------------------------- /roles/etcd-ha/templates/etcd.env: -------------------------------------------------------------------------------- 1 | PEER_NAME={{ inventory_hostname }} 2 | PRIVATE_IP={{ hostvars[inventory_hostname]['ansible_usehost'] }} -------------------------------------------------------------------------------- /roles/etcd-ha/templates/etcd.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=etcd 3 | Documentation=https://github.com/coreos/etcd 4 | Conflicts=etcd.service 5 | Conflicts=etcd2.service 6 | 7 | [Service] 8 | EnvironmentFile=/etc/etcd.env 9 | Type=notify 10 | Restart=always 11 | RestartSec=5s 12 | LimitNOFILE=40000 13 | TimeoutStartSec=0 14 | 15 | ExecStart=/usr/local/bin/etcd --name ${PEER_NAME} \ 16 | --data-dir /var/lib/etcd \ 17 | --listen-client-urls https://${PRIVATE_IP}:2379 \ 18 | --advertise-client-urls https://${PRIVATE_IP}:2379 \ 19 | --listen-peer-urls https://${PRIVATE_IP}:2380 \ 20 | --initial-advertise-peer-urls https://${PRIVATE_IP}:2380 \ 21 | --cert-file=/etc/kubernetes/pki/etcd/server.pem \ 22 | --key-file=/etc/kubernetes/pki/etcd/server-key.pem \ 23 | --client-cert-auth \ 24 | --trusted-ca-file=/etc/kubernetes/pki/etcd/ca.pem \ 25 | --peer-cert-file=/etc/kubernetes/pki/etcd/peer.pem \ 26 | --peer-key-file=/etc/kubernetes/pki/etcd/peer-key.pem \ 27 | --peer-client-cert-auth \ 28 | --peer-trusted-ca-file=/etc/kubernetes/pki/etcd/ca.pem \ 29 | --initial-cluster {{initial_cluster }} --initial-cluster-token my-etcd-token \ 30 | --initial-cluster-state new 31 | 32 | [Install] 33 | WantedBy=multi-user.target -------------------------------------------------------------------------------- /roles/firewalld/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Enable firewalld service 2 | systemd: 3 | state: restarted 4 | daemon_reload: yes 5 | name: firewalld 6 | enabled: yes 7 | 8 | - name: Open ports 9 | firewalld: 10 | port: "{{item}}" 11 | permanent: true 12 | state: enabled 13 | with_items: 14 | - 6443/tcp 15 | - 5443/tcp 16 | - 2379-2380/tcp 17 | - 10250/tcp 18 | - 10251/tcp 19 | - 10252/tcp 20 | - 10053/tcp 21 | - 10053/udp 22 | - 10255/tcp 23 | - 30000-32767/tcp 24 | - 8082/tcp 25 | - 8472/udp 26 | - 8285/udp 27 | - 9898/tcp 28 | - 9100/tcp 29 | - 443/tcp 30 | - 53/tcp 31 | - 53/udp 32 | - 4194/tcp 33 | - 10254/tcp 34 | 35 | # Port forwarding requires masquerading - https://www.certdepot.net/rhel7-get-started-firewalld/ 36 | # Network Zones - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/security_guide/sec-using_firewalls#sec-Understanding_Network_Zones 37 | - name: Enable IP masquerading 38 | shell: "firewall-cmd --permanent --zone={{ network_zone }} --add-masquerade" 39 | 40 | - name: Reload firewalld 41 | shell: firewall-cmd --reload 42 | -------------------------------------------------------------------------------- /roles/flannel/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - include_vars: ../../../../whis-platform/inventory/group_vars/local/vault 2 | 3 | - name: copy flannel yaml 4 | template: 5 | src: flannel.yaml.j2 6 | dest: /tmp/kubeadm-ha/flannel.yaml 7 | 8 | - name: Create credentials to access private repository 9 | shell: kubectl -n kube-system --kubeconfig=/tmp/kubeadm-ha/config create secret docker-registry regcred --docker-server={{docker_registry}} --docker-username={{docker_registry_username}} --docker-password={{docker_registry_password}} --docker-email={{docker_registry_email}} 10 | 11 | - name: Deploy flannel pod network 12 | shell: kubectl apply --kubeconfig=/tmp/kubeadm-ha/config -f /tmp/kubeadm-ha/flannel.yaml -------------------------------------------------------------------------------- /roles/flannel/templates/calico.yaml: -------------------------------------------------------------------------------- 1 | # Calico Version v3.1.3 2 | # https://docs.projectcalico.org/v3.1/releases#v3.1.3 3 | # This manifest includes the following component versions: 4 | # calico/node:v3.1.3 5 | # calico/cni:v3.1.3 6 | 7 | # This ConfigMap is used to configure a self-hosted Calico installation. 8 | kind: ConfigMap 9 | apiVersion: v1 10 | metadata: 11 | name: calico-config 12 | namespace: kube-system 13 | data: 14 | # To enable Typha, set this to "calico-typha" *and* set a non-zero value for Typha replicas 15 | # below. We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is 16 | # essential. 17 | typha_service_name: "none" 18 | 19 | # The CNI network configuration to install on each node. 20 | cni_network_config: |- 21 | { 22 | "name": "k8s-pod-network", 23 | "cniVersion": "0.3.0", 24 | "plugins": [ 25 | { 26 | "type": "calico", 27 | "log_level": "info", 28 | "datastore_type": "kubernetes", 29 | "nodename": "__KUBERNETES_NODE_NAME__", 30 | "mtu": 1500, 31 | "ipam": { 32 | "type": "host-local", 33 | "subnet": "usePodCidr" 34 | }, 35 | "policy": { 36 | "type": "k8s" 37 | }, 38 | "kubernetes": { 39 | "kubeconfig": "__KUBECONFIG_FILEPATH__" 40 | } 41 | }, 42 | { 43 | "type": "portmap", 44 | "snat": true, 45 | "capabilities": {"portMappings": true} 46 | } 47 | ] 48 | } 49 | 50 | --- 51 | 52 | # This manifest creates a Service, which will be backed by Calico's Typha daemon. 53 | # Typha sits in between Felix and the API server, reducing Calico's load on the API server. 54 | 55 | apiVersion: v1 56 | kind: Service 57 | metadata: 58 | name: calico-typha 59 | namespace: kube-system 60 | labels: 61 | k8s-app: calico-typha 62 | spec: 63 | ports: 64 | - port: 5473 65 | protocol: TCP 66 | targetPort: calico-typha 67 | name: calico-typha 68 | selector: 69 | k8s-app: calico-typha 70 | 71 | --- 72 | 73 | # This manifest creates a Deployment of Typha to back the above service. 74 | 75 | apiVersion: apps/v1beta1 76 | kind: Deployment 77 | metadata: 78 | name: calico-typha 79 | namespace: kube-system 80 | labels: 81 | k8s-app: calico-typha 82 | spec: 83 | # Number of Typha replicas. To enable Typha, set this to a non-zero value *and* set the 84 | # typha_service_name variable in the calico-config ConfigMap above. 85 | # 86 | # We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is essential 87 | # (when using the Kubernetes datastore). Use one replica for every 100-200 nodes. In 88 | # production, we recommend running at least 3 replicas to reduce the impact of rolling upgrade. 89 | replicas: 0 90 | revisionHistoryLimit: 2 91 | template: 92 | metadata: 93 | labels: 94 | k8s-app: calico-typha 95 | annotations: 96 | # This, along with the CriticalAddonsOnly toleration below, marks the pod as a critical 97 | # add-on, ensuring it gets priority scheduling and that its resources are reserved 98 | # if it ever gets evicted. 99 | scheduler.alpha.kubernetes.io/critical-pod: '' 100 | spec: 101 | hostNetwork: true 102 | tolerations: 103 | # Mark the pod as a critical add-on for rescheduling. 104 | - key: CriticalAddonsOnly 105 | operator: Exists 106 | # Since Calico can't network a pod until Typha is up, we need to run Typha itself 107 | # as a host-networked pod. 108 | serviceAccountName: calico-node 109 | containers: 110 | - image: quay.io/calico/typha:v0.7.4 111 | name: calico-typha 112 | ports: 113 | - containerPort: 5473 114 | name: calico-typha 115 | protocol: TCP 116 | env: 117 | # Enable "info" logging by default. Can be set to "debug" to increase verbosity. 118 | - name: TYPHA_LOGSEVERITYSCREEN 119 | value: "info" 120 | # Disable logging to file and syslog since those don't make sense in Kubernetes. 121 | - name: TYPHA_LOGFILEPATH 122 | value: "none" 123 | - name: TYPHA_LOGSEVERITYSYS 124 | value: "none" 125 | # Monitor the Kubernetes API to find the number of running instances and rebalance 126 | # connections. 127 | - name: TYPHA_CONNECTIONREBALANCINGMODE 128 | value: "kubernetes" 129 | - name: TYPHA_DATASTORETYPE 130 | value: "kubernetes" 131 | - name: TYPHA_HEALTHENABLED 132 | value: "true" 133 | # Uncomment these lines to enable prometheus metrics. Since Typha is host-networked, 134 | # this opens a port on the host, which may need to be secured. 135 | #- name: TYPHA_PROMETHEUSMETRICSENABLED 136 | # value: "true" 137 | #- name: TYPHA_PROMETHEUSMETRICSPORT 138 | # value: "9093" 139 | livenessProbe: 140 | httpGet: 141 | path: /liveness 142 | port: 9098 143 | periodSeconds: 30 144 | initialDelaySeconds: 30 145 | readinessProbe: 146 | httpGet: 147 | path: /readiness 148 | port: 9098 149 | periodSeconds: 10 150 | 151 | --- 152 | 153 | # This manifest installs the calico/node container, as well 154 | # as the Calico CNI plugins and network config on 155 | # each master and worker node in a Kubernetes cluster. 156 | kind: DaemonSet 157 | apiVersion: extensions/v1beta1 158 | metadata: 159 | name: calico-node 160 | namespace: kube-system 161 | labels: 162 | k8s-app: calico-node 163 | spec: 164 | selector: 165 | matchLabels: 166 | k8s-app: calico-node 167 | updateStrategy: 168 | type: RollingUpdate 169 | rollingUpdate: 170 | maxUnavailable: 1 171 | template: 172 | metadata: 173 | labels: 174 | k8s-app: calico-node 175 | annotations: 176 | # This, along with the CriticalAddonsOnly toleration below, 177 | # marks the pod as a critical add-on, ensuring it gets 178 | # priority scheduling and that its resources are reserved 179 | # if it ever gets evicted. 180 | scheduler.alpha.kubernetes.io/critical-pod: '' 181 | spec: 182 | hostNetwork: true 183 | tolerations: 184 | # Make sure calico/node gets scheduled on all nodes. 185 | - effect: NoSchedule 186 | operator: Exists 187 | # Mark the pod as a critical add-on for rescheduling. 188 | - key: CriticalAddonsOnly 189 | operator: Exists 190 | - effect: NoExecute 191 | operator: Exists 192 | serviceAccountName: calico-node 193 | # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force 194 | # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. 195 | terminationGracePeriodSeconds: 0 196 | containers: 197 | # Runs calico/node container on each Kubernetes node. This 198 | # container programs network policy and routes on each 199 | # host. 200 | - name: calico-node 201 | image: quay.io/calico/node:v3.1.3 202 | env: 203 | # Use Kubernetes API as the backing datastore. 204 | - name: DATASTORE_TYPE 205 | value: "kubernetes" 206 | # Enable felix info logging. 207 | - name: FELIX_LOGSEVERITYSCREEN 208 | value: "info" 209 | # Cluster type to identify the deployment type 210 | - name: CLUSTER_TYPE 211 | value: "k8s,bgp" 212 | # Disable file logging so `kubectl logs` works. 213 | - name: CALICO_DISABLE_FILE_LOGGING 214 | value: "true" 215 | # Set Felix endpoint to host default action to ACCEPT. 216 | - name: FELIX_DEFAULTENDPOINTTOHOSTACTION 217 | value: "ACCEPT" 218 | # Disable IPV6 on Kubernetes. 219 | - name: FELIX_IPV6SUPPORT 220 | value: "false" 221 | # Set MTU for tunnel device used if ipip is enabled 222 | - name: FELIX_IPINIPMTU 223 | value: "1440" 224 | # Wait for the datastore. 225 | - name: WAIT_FOR_DATASTORE 226 | value: "true" 227 | # The default IPv4 pool to create on startup if none exists. Pod IPs will be 228 | # chosen from this range. Changing this value after installation will have 229 | # no effect. This should fall within `--cluster-cidr`. 230 | - name: CALICO_IPV4POOL_CIDR 231 | value: "10.244.0.0/16" 232 | # Enable IPIP 233 | - name: CALICO_IPV4POOL_IPIP 234 | value: "off" 235 | # Enable IP-in-IP within Felix. 236 | - name: FELIX_IPINIPENABLED 237 | value: "false" 238 | # Typha support: controlled by the ConfigMap. 239 | - name: FELIX_TYPHAK8SSERVICENAME 240 | valueFrom: 241 | configMapKeyRef: 242 | name: calico-config 243 | key: typha_service_name 244 | # Set based on the k8s node name. 245 | - name: NODENAME 246 | valueFrom: 247 | fieldRef: 248 | fieldPath: spec.nodeName 249 | # Auto-detect the BGP IP address. 250 | - name: IP 251 | value: "autodetect" 252 | - name: FELIX_HEALTHENABLED 253 | value: "true" 254 | securityContext: 255 | privileged: true 256 | resources: 257 | requests: 258 | cpu: 250m 259 | livenessProbe: 260 | httpGet: 261 | path: /liveness 262 | port: 9099 263 | periodSeconds: 10 264 | initialDelaySeconds: 10 265 | failureThreshold: 6 266 | readinessProbe: 267 | httpGet: 268 | path: /readiness 269 | port: 9099 270 | periodSeconds: 10 271 | volumeMounts: 272 | - mountPath: /lib/modules 273 | name: lib-modules 274 | readOnly: true 275 | - mountPath: /var/run/calico 276 | name: var-run-calico 277 | readOnly: false 278 | - mountPath: /var/lib/calico 279 | name: var-lib-calico 280 | readOnly: false 281 | # This container installs the Calico CNI binaries 282 | # and CNI network config file on each node. 283 | - name: install-cni 284 | image: quay.io/calico/cni:v3.1.3 285 | command: ["/install-cni.sh"] 286 | env: 287 | # Name of the CNI config file to create. 288 | - name: CNI_CONF_NAME 289 | value: "10-calico.conflist" 290 | # The CNI network config to install on each node. 291 | - name: CNI_NETWORK_CONFIG 292 | valueFrom: 293 | configMapKeyRef: 294 | name: calico-config 295 | key: cni_network_config 296 | # Set the hostname based on the k8s node name. 297 | - name: KUBERNETES_NODE_NAME 298 | valueFrom: 299 | fieldRef: 300 | fieldPath: spec.nodeName 301 | volumeMounts: 302 | - mountPath: /host/opt/cni/bin 303 | name: cni-bin-dir 304 | - mountPath: /host/etc/cni/net.d 305 | name: cni-net-dir 306 | volumes: 307 | # Used by calico/node. 308 | - name: lib-modules 309 | hostPath: 310 | path: /lib/modules 311 | - name: var-run-calico 312 | hostPath: 313 | path: /var/run/calico 314 | - name: var-lib-calico 315 | hostPath: 316 | path: /var/lib/calico 317 | # Used to install CNI. 318 | - name: cni-bin-dir 319 | hostPath: 320 | path: /opt/cni/bin 321 | - name: cni-net-dir 322 | hostPath: 323 | path: /etc/cni/net.d 324 | 325 | # Create all the CustomResourceDefinitions needed for 326 | # Calico policy and networking mode. 327 | --- 328 | 329 | apiVersion: apiextensions.k8s.io/v1beta1 330 | kind: CustomResourceDefinition 331 | metadata: 332 | name: felixconfigurations.crd.projectcalico.org 333 | spec: 334 | scope: Cluster 335 | group: crd.projectcalico.org 336 | version: v1 337 | names: 338 | kind: FelixConfiguration 339 | plural: felixconfigurations 340 | singular: felixconfiguration 341 | 342 | --- 343 | 344 | apiVersion: apiextensions.k8s.io/v1beta1 345 | kind: CustomResourceDefinition 346 | metadata: 347 | name: bgppeers.crd.projectcalico.org 348 | spec: 349 | scope: Cluster 350 | group: crd.projectcalico.org 351 | version: v1 352 | names: 353 | kind: BGPPeer 354 | plural: bgppeers 355 | singular: bgppeer 356 | 357 | --- 358 | 359 | apiVersion: apiextensions.k8s.io/v1beta1 360 | kind: CustomResourceDefinition 361 | metadata: 362 | name: bgpconfigurations.crd.projectcalico.org 363 | spec: 364 | scope: Cluster 365 | group: crd.projectcalico.org 366 | version: v1 367 | names: 368 | kind: BGPConfiguration 369 | plural: bgpconfigurations 370 | singular: bgpconfiguration 371 | 372 | --- 373 | 374 | apiVersion: apiextensions.k8s.io/v1beta1 375 | kind: CustomResourceDefinition 376 | metadata: 377 | name: ippools.crd.projectcalico.org 378 | spec: 379 | scope: Cluster 380 | group: crd.projectcalico.org 381 | version: v1 382 | names: 383 | kind: IPPool 384 | plural: ippools 385 | singular: ippool 386 | 387 | --- 388 | 389 | apiVersion: apiextensions.k8s.io/v1beta1 390 | kind: CustomResourceDefinition 391 | metadata: 392 | name: hostendpoints.crd.projectcalico.org 393 | spec: 394 | scope: Cluster 395 | group: crd.projectcalico.org 396 | version: v1 397 | names: 398 | kind: HostEndpoint 399 | plural: hostendpoints 400 | singular: hostendpoint 401 | 402 | --- 403 | 404 | apiVersion: apiextensions.k8s.io/v1beta1 405 | kind: CustomResourceDefinition 406 | metadata: 407 | name: clusterinformations.crd.projectcalico.org 408 | spec: 409 | scope: Cluster 410 | group: crd.projectcalico.org 411 | version: v1 412 | names: 413 | kind: ClusterInformation 414 | plural: clusterinformations 415 | singular: clusterinformation 416 | 417 | --- 418 | 419 | apiVersion: apiextensions.k8s.io/v1beta1 420 | kind: CustomResourceDefinition 421 | metadata: 422 | name: globalnetworkpolicies.crd.projectcalico.org 423 | spec: 424 | scope: Cluster 425 | group: crd.projectcalico.org 426 | version: v1 427 | names: 428 | kind: GlobalNetworkPolicy 429 | plural: globalnetworkpolicies 430 | singular: globalnetworkpolicy 431 | 432 | --- 433 | 434 | apiVersion: apiextensions.k8s.io/v1beta1 435 | kind: CustomResourceDefinition 436 | metadata: 437 | name: globalnetworksets.crd.projectcalico.org 438 | spec: 439 | scope: Cluster 440 | group: crd.projectcalico.org 441 | version: v1 442 | names: 443 | kind: GlobalNetworkSet 444 | plural: globalnetworksets 445 | singular: globalnetworkset 446 | 447 | --- 448 | 449 | apiVersion: apiextensions.k8s.io/v1beta1 450 | kind: CustomResourceDefinition 451 | metadata: 452 | name: networkpolicies.crd.projectcalico.org 453 | spec: 454 | scope: Namespaced 455 | group: crd.projectcalico.org 456 | version: v1 457 | names: 458 | kind: NetworkPolicy 459 | plural: networkpolicies 460 | singular: networkpolicy 461 | 462 | --- 463 | 464 | apiVersion: v1 465 | kind: ServiceAccount 466 | metadata: 467 | name: calico-node 468 | namespace: kube-system 469 | -------------------------------------------------------------------------------- /roles/flannel/templates/flannel.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: ClusterRole 3 | apiVersion: rbac.authorization.k8s.io/v1beta1 4 | metadata: 5 | name: flannel 6 | rules: 7 | - apiGroups: 8 | - "" 9 | resources: 10 | - pods 11 | verbs: 12 | - get 13 | - apiGroups: 14 | - "" 15 | resources: 16 | - nodes 17 | verbs: 18 | - list 19 | - watch 20 | - apiGroups: 21 | - "" 22 | resources: 23 | - nodes/status 24 | verbs: 25 | - patch 26 | --- 27 | kind: ClusterRoleBinding 28 | apiVersion: rbac.authorization.k8s.io/v1beta1 29 | metadata: 30 | name: flannel 31 | roleRef: 32 | apiGroup: rbac.authorization.k8s.io 33 | kind: ClusterRole 34 | name: flannel 35 | subjects: 36 | - kind: ServiceAccount 37 | name: flannel 38 | namespace: kube-system 39 | --- 40 | apiVersion: v1 41 | kind: ServiceAccount 42 | metadata: 43 | name: flannel 44 | namespace: kube-system 45 | --- 46 | kind: ConfigMap 47 | apiVersion: v1 48 | metadata: 49 | name: kube-flannel-cfg 50 | namespace: kube-system 51 | labels: 52 | tier: node 53 | app: flannel 54 | data: 55 | cni-conf.json: | 56 | { 57 | "name": "cbr0", 58 | "type": "flannel", 59 | "delegate": { 60 | "isDefaultGateway": true 61 | } 62 | } 63 | net-conf.json: | 64 | { 65 | "Network": "{{pod_network_cidr}}", 66 | "Backend": { 67 | "Type": "vxlan" 68 | } 69 | } 70 | --- 71 | apiVersion: extensions/v1beta1 72 | kind: DaemonSet 73 | metadata: 74 | name: kube-flannel-ds 75 | namespace: kube-system 76 | labels: 77 | tier: node 78 | app: flannel 79 | spec: 80 | template: 81 | metadata: 82 | labels: 83 | tier: node 84 | app: flannel 85 | spec: 86 | hostNetwork: true 87 | nodeSelector: 88 | beta.kubernetes.io/arch: amd64 89 | tolerations: 90 | - key: node-role.kubernetes.io/master 91 | operator: Exists 92 | effect: NoSchedule 93 | serviceAccountName: flannel 94 | initContainers: 95 | - name: install-cni 96 | image: {{flannel_container_image}} 97 | command: 98 | - cp 99 | args: 100 | - -f 101 | - /etc/kube-flannel/cni-conf.json 102 | - /etc/cni/net.d/10-flannel.conf 103 | volumeMounts: 104 | - name: cni 105 | mountPath: /etc/cni/net.d 106 | - name: flannel-cfg 107 | mountPath: /etc/kube-flannel/ 108 | containers: 109 | - name: kube-flannel 110 | image: {{flannel_container_image}} 111 | command: 112 | - sh 113 | - -c 114 | - "/opt/bin/flanneld --ip-masq --kube-subnet-mgr & \ 115 | sleep 10; \ 116 | cat /run/flannel/subnet.env; \ 117 | sed -i -e 's/FLANNEL_MTU=.*/FLANNEL_MTU=1400/g' /run/flannel/subnet.env; \ 118 | cat /run/flannel/subnet.env ; 119 | wait " 120 | securityContext: 121 | privileged: true 122 | env: 123 | - name: POD_NAME 124 | valueFrom: 125 | fieldRef: 126 | fieldPath: metadata.name 127 | - name: POD_NAMESPACE 128 | valueFrom: 129 | fieldRef: 130 | fieldPath: metadata.namespace 131 | volumeMounts: 132 | - name: run 133 | mountPath: /run 134 | - name: flannel-cfg 135 | mountPath: /etc/kube-flannel/ 136 | volumes: 137 | - name: run 138 | hostPath: 139 | path: /run 140 | - name: cni 141 | hostPath: 142 | path: /etc/cni/net.d 143 | - name: flannel-cfg 144 | configMap: 145 | name: kube-flannel-cfg 146 | -------------------------------------------------------------------------------- /roles/flannel/templates/rbac-kdd.yaml: -------------------------------------------------------------------------------- 1 | # Calico Version v3.1.3 2 | # https://docs.projectcalico.org/v3.1/releases#v3.1.3 3 | kind: ClusterRole 4 | apiVersion: rbac.authorization.k8s.io/v1beta1 5 | metadata: 6 | name: calico-node 7 | rules: 8 | - apiGroups: [""] 9 | resources: 10 | - namespaces 11 | verbs: 12 | - get 13 | - list 14 | - watch 15 | - apiGroups: [""] 16 | resources: 17 | - pods/status 18 | verbs: 19 | - update 20 | - apiGroups: [""] 21 | resources: 22 | - pods 23 | verbs: 24 | - get 25 | - list 26 | - watch 27 | - patch 28 | - apiGroups: [""] 29 | resources: 30 | - services 31 | verbs: 32 | - get 33 | - apiGroups: [""] 34 | resources: 35 | - endpoints 36 | verbs: 37 | - get 38 | - apiGroups: [""] 39 | resources: 40 | - nodes 41 | verbs: 42 | - get 43 | - list 44 | - update 45 | - watch 46 | - apiGroups: ["extensions"] 47 | resources: 48 | - networkpolicies 49 | verbs: 50 | - get 51 | - list 52 | - watch 53 | - apiGroups: ["networking.k8s.io"] 54 | resources: 55 | - networkpolicies 56 | verbs: 57 | - watch 58 | - list 59 | - apiGroups: ["crd.projectcalico.org"] 60 | resources: 61 | - globalfelixconfigs 62 | - felixconfigurations 63 | - bgppeers 64 | - globalbgpconfigs 65 | - bgpconfigurations 66 | - ippools 67 | - globalnetworkpolicies 68 | - globalnetworksets 69 | - networkpolicies 70 | - clusterinformations 71 | - hostendpoints 72 | verbs: 73 | - create 74 | - get 75 | - list 76 | - update 77 | - watch 78 | 79 | --- 80 | 81 | apiVersion: rbac.authorization.k8s.io/v1beta1 82 | kind: ClusterRoleBinding 83 | metadata: 84 | name: calico-node 85 | roleRef: 86 | apiGroup: rbac.authorization.k8s.io 87 | kind: ClusterRole 88 | name: calico-node 89 | subjects: 90 | - kind: ServiceAccount 91 | name: calico-node 92 | namespace: kube-system 93 | -------------------------------------------------------------------------------- /roles/generate-etcd-certs/files/certs/ca-config.json: -------------------------------------------------------------------------------- 1 | { 2 | "signing": { 3 | "default": { 4 | "expiry": "43800h" 5 | }, 6 | "profiles": { 7 | "server": { 8 | "expiry": "43800h", 9 | "usages": [ 10 | "signing", 11 | "key encipherment", 12 | "server auth", 13 | "client auth" 14 | ] 15 | }, 16 | "client": { 17 | "expiry": "43800h", 18 | "usages": [ 19 | "signing", 20 | "key encipherment", 21 | "client auth" 22 | ] 23 | }, 24 | "peer": { 25 | "expiry": "43800h", 26 | "usages": [ 27 | "signing", 28 | "key encipherment", 29 | "server auth", 30 | "client auth" 31 | ] 32 | } 33 | } 34 | } 35 | } -------------------------------------------------------------------------------- /roles/generate-etcd-certs/files/certs/ca-csr.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "etcd", 3 | "key": { 4 | "algo": "rsa", 5 | "size": 2048 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /roles/generate-etcd-certs/files/certs/client.json: -------------------------------------------------------------------------------- 1 | { 2 | "CN": "client", 3 | "key": { 4 | "algo": "ecdsa", 5 | "size": 256 6 | } 7 | } -------------------------------------------------------------------------------- /roles/generate-etcd-certs/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Copy ca-cert config files 2 | copy: 3 | src: certs/ 4 | dest: /etc/kubernetes/pki/etcd/ 5 | # owner: root 6 | # group: root 7 | directory_mode : true 8 | 9 | - name: Generate CA certs 10 | shell: | 11 | cd /etc/kubernetes/pki/etcd 12 | /usr/local/bin/cfssl gencert -initca ca-csr.json | /usr/local/bin/cfssljson -bare ca - 13 | 14 | 15 | - name: Generate etcd client certs 16 | shell: | 17 | cd /etc/kubernetes/pki/etcd 18 | /usr/local/bin/cfssl gencert -ca=ca.pem -ca-key=ca-key.pem -config=ca-config.json -profile=client client.json | /usr/local/bin/cfssljson -bare client 19 | 20 | - name: get etcd certs locally from master0 21 | become: yes 22 | fetch: 23 | src: /etc/kubernetes/pki/etcd/{{item}} 24 | dest: /tmp/etcd/{{item}} 25 | flat: yes 26 | with_items: 27 | - ca-config.json 28 | - ca.pem 29 | - ca-key.pem 30 | - client.pem 31 | - client-key.pem -------------------------------------------------------------------------------- /roles/haproxy/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: create /opt/ha_config directory 2 | file: 3 | path: /opt/hap_config 4 | state: directory 5 | mode: 0644 6 | 7 | - name: generate and copy HAproxy config 8 | template: 9 | src: haproxyConfig.j2 10 | dest: /opt/hap_config/haproxy.cfg 11 | register: hapConfig 12 | 13 | - name: "Pulling image {{ haproxy_container_image }}" 14 | shell: docker pull "{{ haproxy_container_image }}" 15 | 16 | - name: run haproxy in a container 17 | docker_container: 18 | name: haproxy 19 | image: "{{haproxy_container_image}}" 20 | restart_policy: always 21 | state: started 22 | privileged: yes 23 | network_mode: host 24 | exposed_ports: 25 | - "{{ha_proxy_port}}" 26 | - "{{etcd_proxy_port}}" 27 | volumes: 28 | - /opt/hap_config/:/usr/local/etc/haproxy/ 29 | command: -f /usr/local/etc/haproxy/haproxy.cfg 30 | 31 | - name: restart haproxy container if it is started 32 | shell: docker restart haproxy 33 | when: hapConfig.changed -------------------------------------------------------------------------------- /roles/haproxy/templates/haproxyConfig.j2: -------------------------------------------------------------------------------- 1 | #jinja2:trim_blocks:False 2 | global 3 | log 127.0.0.1 local0 4 | maxconn 4096 5 | ssl-default-bind-ciphers kEECDH+aRSA+AES:kRSA+AES:+AES256:RC4-SHA:!kEDH:!LOW:!EXP:!MD5:!aNULL:!eNULL 6 | 7 | defaults 8 | log global 9 | mode http 10 | option httplog 11 | option dontlognull 12 | timeout connect 5s 13 | timeout client 50s 14 | timeout client-fin 50s 15 | timeout server 50s 16 | timeout tunnel 1h 17 | 18 | listen stats :{{haproxy_stats_port}} 19 | mode http 20 | stats enable 21 | stats hide-version 22 | stats realm Haproxy\ Statistics 23 | stats uri /haproxy_stats 24 | stats auth {{ansible_user_id}}:k8sHA 25 | 26 | ############################# 27 | # api-proxy config 28 | ############################# 29 | 30 | 31 | frontend front-api-proxy 32 | bind *:{{ha_proxy_port}} 33 | mode tcp 34 | option tcplog 35 | default_backend back-api-proxy 36 | 37 | backend back-api-proxy 38 | mode tcp 39 | option tcplog 40 | balance roundrobin 41 | {% for item in groups['k8s-masters'] %} 42 | server {{ item }} {{hostvars[item]["ansible_usehost"]}}:6443 check {% if(((item in groups["dc1-k8s-nodes"]) and (inventory_hostname in groups["dc2-k8s-nodes"])) or ((item in groups["dc2-k8s-nodes"]) and (inventory_hostname in groups["dc1-k8s-nodes"])))%}backup{%endif%} 43 | {% endfor %} 44 | 45 | ############################# 46 | # etcd config 47 | ############################# 48 | 49 | 50 | frontend front-etcd-proxy 51 | bind *:{{etcd_proxy_port}} 52 | mode tcp 53 | option tcplog 54 | default_backend back-etcd-proxy 55 | 56 | backend back-etcd-proxy 57 | mode tcp 58 | option tcplog 59 | balance roundrobin 60 | {% for item in groups['k8s-masters'] %} 61 | server {{ item }} {{hostvars[item]["ansible_usehost"]}}:2379 check {% if(((item in groups["dc1-k8s-nodes"]) and (inventory_hostname in groups["dc2-k8s-nodes"])) or ((item in groups["dc2-k8s-nodes"]) and (inventory_hostname in groups["dc1-k8s-nodes"])))%}backup{%endif%} 62 | {% endfor %} -------------------------------------------------------------------------------- /roles/heapster/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Copy manifest of heapster 2 | template: 3 | src: heapster.yml.j2 4 | dest: /tmp/heapster.yml 5 | 6 | - name: Deploy heapster 7 | shell: kubectl --kubeconfig=/tmp/kubeadm-ha/config apply -f /tmp/heapster.yml 8 | 9 | - name: Pausing for 1 minute 10 | pause: 11 | minutes: 1 12 | 13 | - name: Run kubectl top no 14 | shell: kubectl --kubeconfig=/tmp/kubeadm-ha/config top no 15 | register: result 16 | until: result.rc == 0 17 | retries: 20 18 | delay: 10 -------------------------------------------------------------------------------- /roles/heapster/templates/heapster.yml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | # ------------------- Heapster ------------------- # 3 | kind: ClusterRoleBinding 4 | apiVersion: rbac.authorization.k8s.io/v1beta1 5 | metadata: 6 | name: heapster 7 | roleRef: 8 | apiGroup: rbac.authorization.k8s.io 9 | kind: ClusterRole 10 | name: system:heapster 11 | subjects: 12 | - kind: ServiceAccount 13 | name: heapster 14 | namespace: kube-system 15 | --- 16 | apiVersion: v1 17 | kind: ServiceAccount 18 | metadata: 19 | name: heapster 20 | namespace: kube-system 21 | --- 22 | apiVersion: extensions/v1beta1 23 | kind: Deployment 24 | metadata: 25 | name: heapster 26 | namespace: kube-system 27 | spec: 28 | replicas: 1 29 | template: 30 | metadata: 31 | labels: 32 | task: monitoring 33 | k8s-app: heapster 34 | spec: 35 | serviceAccountName: heapster 36 | containers: 37 | - name: heapster 38 | image: {{heapster_container_image}} 39 | imagePullPolicy: IfNotPresent 40 | command: 41 | - /heapster 42 | - --source=kubernetes.summary_api:'' 43 | --- 44 | apiVersion: v1 45 | kind: Service 46 | metadata: 47 | labels: 48 | task: monitoring 49 | # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons) 50 | # If you are NOT using this as an addon, you should comment out this line. 51 | kubernetes.io/cluster-service: 'true' 52 | kubernetes.io/name: Heapster 53 | name: heapster 54 | namespace: kube-system 55 | spec: 56 | ports: 57 | - port: 80 58 | targetPort: 8082 59 | selector: 60 | k8s-app: heapster 61 | -------------------------------------------------------------------------------- /roles/keepalived/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: install keepalived 2 | become: yes 3 | yum: 4 | name: keepalived 5 | 6 | - name: deploy keepalived script 7 | become: yes 8 | template: 9 | src: check_apiserver.sh 10 | dest: /etc/keepalived/check_apiserver.sh 11 | mode: "a+x" 12 | 13 | - name: deploy keepalived config 14 | become: yes 15 | template: 16 | src: keepalived.conf 17 | dest: /etc/keepalived/keepalived.conf 18 | 19 | - name: restart keepalived 20 | systemd: 21 | state: restarted 22 | daemon_reload: yes 23 | name: keepalived 24 | enabled: yes -------------------------------------------------------------------------------- /roles/keepalived/templates/check_apiserver.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | errorExit() { 4 | echo "*** $*" 1>&2 5 | exit 1 6 | } 7 | 8 | curl --silent --max-time 2 --insecure https://localhost:6443/ -o /dev/null || errorExit "Error GET https://localhost:6443/" 9 | if ip addr | grep -q {{ virtual_ip }}; then 10 | curl --silent --max-time 2 --insecure https://{{ virtual_ip }}:6443/ -o /dev/null || errorExit "Error GET https://{{ virtual_ip }}:6443/" 11 | fi -------------------------------------------------------------------------------- /roles/keepalived/templates/keepalived.conf: -------------------------------------------------------------------------------- 1 | ! Configuration File for keepalived 2 | global_defs { 3 | router_id LVS_DEVEL 4 | } 5 | vrrp_script check_apiserver { 6 | script "/etc/keepalived/check_apiserver.sh" 7 | interval 2 8 | weight -5 9 | fall 3 10 | rise 2 11 | } 12 | vrrp_instance VI_1 { 13 | state {{ 'MASTER' if ansible_usehost == hostvars[groups['all-masters'][0]]['ansible_usehost'] else 'BACKUP' }} 14 | interface {{ network_interface }} 15 | mcast_src_ip {{ ansible_host }} 16 | virtual_router_id 51 17 | priority {{ '102' if ansible_usehost == hostvars[groups['all-masters'][0]]['ansible_usehost'] else '100' }} 18 | advert_int 2 19 | authentication { 20 | auth_type PASS 21 | auth_pass 4be37dc3b4c90194d1600c483e10ad1d 22 | } 23 | virtual_ipaddress { 24 | {{ virtual_ip }} 25 | } 26 | track_script { 27 | check_apiserver 28 | } 29 | } -------------------------------------------------------------------------------- /roles/kubeadm-init-first-master/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Kubeadm init 2 | shell: kubeadm init --config=/tmp/kubeadm-ha/kubeadm-config.yaml 3 | register: rslt 4 | 5 | - name: Store init output 6 | action: copy content="{{ rslt.stdout }}" dest="/etc/kubernetes/kubeadm-init.stdout" 7 | 8 | - name: Create .kube folder 9 | file: 10 | path: "~{{ ansible_ssh_user }}/.kube" 11 | state: directory 12 | owner: "{{ ansible_ssh_user }}" 13 | group: "{{ ansible_ssh_user }}" 14 | 15 | - name: Copy admin.conf to .kube folder 16 | copy: 17 | src: /etc/kubernetes/admin.conf 18 | dest: "~{{ ansible_ssh_user }}/.kube/config" 19 | owner: "{{ ansible_ssh_user }}" 20 | group: "{{ ansible_ssh_user }}" 21 | remote_src: yes 22 | 23 | - name: Copy admin.conf to /tmp/kubeadm-ha/config 24 | copy: 25 | src: /etc/kubernetes/admin.conf 26 | dest: "/tmp/kubeadm-ha/config" 27 | remote_src: yes 28 | 29 | - name: "Fetching {{item}} from master0" 30 | fetch: 31 | src: /etc/kubernetes/pki/{{item}} 32 | dest: /tmp/pki/{{item}} 33 | flat: yes 34 | with_items: 35 | - ca.crt 36 | - ca.key 37 | - sa.key 38 | - sa.pub -------------------------------------------------------------------------------- /roles/kubeadm-init-other-masters/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: "copy {{item}} to other masters" 2 | copy: 3 | src: /tmp/pki/{{item}} 4 | dest: /etc/kubernetes/pki/{{item}} 5 | with_items: 6 | - ca.crt 7 | - ca.key 8 | - sa.key 9 | - sa.pub 10 | 11 | 12 | - name: Kubeadm init 13 | shell: kubeadm init --config=/tmp/kubeadm-ha/kubeadm-config.yaml 14 | -------------------------------------------------------------------------------- /roles/kubeadm-init-prep/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Create /tmp/kubeadm-ha directory 2 | file: 3 | path: /tmp/kubeadm-ha 4 | state: directory 5 | 6 | - name: Copy kubeadm init config 7 | template: 8 | src: kubeadm-config.yaml 9 | dest: /tmp/kubeadm-ha/kubeadm-config.yaml -------------------------------------------------------------------------------- /roles/kubeadm-init-prep/templates/kubeadm-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kubeadm.k8s.io/v1alpha1 2 | kind: MasterConfiguration 3 | kubernetesVersion: {{kubernetes_version}} 4 | networking: 5 | podSubnet: {{pod_network_cidr}} 6 | {% if groups["k8s-masters"] | length > 1 %} 7 | apiServerCertSANs: 8 | {% if virtual_ip is defined %} 9 | - {{virtual_ip}} 10 | {% endif %} 11 | {% for master in groups["k8s-masters"] %} 12 | - {{hostvars[master]['ansible_usehost']}} 13 | {% endfor %} 14 | - localhost 15 | {% endif %} 16 | etcd: 17 | endpoints: 18 | - https://localhost:{{etcd_proxy_port}} 19 | {% for master in groups["k8s-masters"] %} 20 | - https://{{hostvars[master]['ansible_usehost']}}:2379 21 | {% endfor %} 22 | caFile: /etc/kubernetes/pki/etcd/ca.pem 23 | certFile: /etc/kubernetes/pki/etcd/client.pem 24 | keyFile: /etc/kubernetes/pki/etcd/client-key.pem 25 | apiServerExtraArgs: 26 | endpoint-reconciler-type: lease 27 | imageRepository: {{docker_registry| trim}}/kubernetes -------------------------------------------------------------------------------- /roles/kubeadm-join-node/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Generate join token 2 | shell: kubeadm token create --print-join-command 3 | register: kubeadm_join_cmd 4 | delegate_to: "{{ groups['k8s-masters'][0] }}" 5 | 6 | - set_fact: 7 | kubeadm_join: "{{ kubeadm_join_cmd.stdout }}" 8 | 9 | - debug: var=kubeadm_join 10 | 11 | - name: Store join command 12 | action: copy content="{{ kubeadm_join }}" dest="/etc/kubernetes/kubeadm-join.command" 13 | 14 | - name: Run kubeadm join 15 | shell: "{{ kubeadm_join }} --ignore-preflight-errors=swap" -------------------------------------------------------------------------------- /roles/kubeadm-upgrade/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: upgrade kubeadm 2 | yum: 3 | name: kubeadm 4 | state: "{{kubeadm_version}}" 5 | 6 | - name: pip install pexpect 7 | pip: 8 | name: pexpect 9 | extra_args: -i https://{{docker_registry_username| trim}}:{{docker_registry_password| trim}}@na.artifactory.swg-devops.com/artifactory/api/pypi/wh-imaging-pypi-local/simple 10 | 11 | - name: Run kubeadm upgrade 12 | expect: 13 | command: kubeadm upgrade apply {{upgrade_version}} 14 | echo: yes 15 | timeout: 250 16 | responses: 17 | upgrade?: "y" -------------------------------------------------------------------------------- /roles/kubeadm/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Remove swapfile from /etc/fstab 2 | mount: 3 | name: swap 4 | fstype: swap 5 | state: absent 6 | 7 | - name: Turn swap off 8 | shell: swapoff -a 9 | 10 | - name: set enforce 11 | command: setenforce 0 12 | 13 | - name: install kubeadm packages 14 | become: yes 15 | yum: 16 | name: "{{item}}" 17 | allow_downgrade: yes 18 | with_items: 19 | - kubelet-{{kubelet_version}} 20 | - kubeadm 21 | - kubectl 22 | 23 | - name: detect docker's cgroup-driver 24 | shell: docker info 2>/dev/null |grep -i cgroup | cut -d":" -f2 | tr -d " " 25 | register: docker_cgroup_driver 26 | 27 | - replace: 28 | path: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 29 | regexp: '--cgroup-driver=(systemd|cgroupfs)' 30 | replace: '--cgroup-driver={{docker_cgroup_driver.stdout}}' 31 | backup: no 32 | 33 | - replace: 34 | path: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 35 | regexp: 'cadvisor-port=0' 36 | replace: 'cadvisor-port=4194' 37 | backup: no 38 | 39 | - name: modprobe 40 | command: modprobe br_netfilter 41 | 42 | - name: Add netbridge config ip6 43 | lineinfile: 44 | path: /etc/sysctl.d/k8s.conf 45 | line: 'net.bridge.bridge-nf-call-ip6tables = 1' 46 | state: present 47 | create: yes 48 | 49 | - name: Add netbridge config ip4 50 | lineinfile: 51 | path: /etc/sysctl.d/k8s.conf 52 | line: 'net.bridge.bridge-nf-call-iptables = 1' 53 | state: present 54 | create: yes 55 | 56 | - name: update sysctl 57 | command: sysctl --system 58 | 59 | - set_fact: 60 | eviction_hard: "{{ kubelet_masters_eviction_hard }} " 61 | when: inventory_hostname in groups["k8s-masters"] 62 | 63 | - set_fact: 64 | eviction_hard: "{{ kubelet_workers_eviction_hard }} " 65 | when: inventory_hostname in groups["k8s-workers"] 66 | 67 | - name: change pause container to private registry and add eviction hard 68 | lineinfile: 69 | path: /etc/systemd/system/kubelet.service.d/20-pod-infra-image.conf 70 | create: yes 71 | state: present 72 | line: | 73 | [Service] 74 | Environment="KUBELET_EXTRA_ARGS=--pod-infra-container-image={{kubepause_container_image}} --eviction-hard={{eviction_hard}}" 75 | 76 | 77 | 78 | 79 | 80 | - name: "Pulling image {{ item }}" 81 | shell: docker pull "{{ item }}" 82 | with_items: 83 | - "{{ kubeproxy_container_image }}" 84 | - "{{ kubepause_container_image }}" 85 | - "{{ flannel_container_image }}" 86 | - "{{ heapster_container_image }}" 87 | - "{{ kubedns_container_image }}" 88 | - "{{ kubednsmasq_container_image }}" 89 | - "{{ kubednssidecar_container_image }}" 90 | 91 | #- name: restart kubelet 92 | # systemd: 93 | # state: restarted 94 | # daemon_reload: yes 95 | # name: kubelet 96 | # enabled: yes -------------------------------------------------------------------------------- /roles/ntpd/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: install ntpd 2 | become: yes 3 | yum: 4 | name: ntp 5 | state: present 6 | 7 | - name: stop ntpd service 8 | become: yes 9 | service: 10 | name: ntpd 11 | state: stopped 12 | 13 | - name: sync time 14 | become: yes 15 | command: ntpd -gq 16 | 17 | - name: start ntpd service 18 | become: yes 19 | service: 20 | name: ntpd 21 | state: restarted 22 | enabled: yes -------------------------------------------------------------------------------- /roles/pull-kubernetes-images/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: "Pulling image {{ item }}" 2 | shell: docker pull "{{ item }}" 3 | with_items: 4 | - "{{ kubeapi_server_container_image }}" 5 | - "{{ kubescheduler_container_image }}" 6 | - "{{ kubecontroller_manager_container_image }}" 7 | 8 | -------------------------------------------------------------------------------- /roles/repos/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - include_vars: ../../../../whis-platform/inventory/group_vars/local/vault 2 | - name: Add Artifactory repo 3 | yum_repository: 4 | name: Artifactory 5 | description: Artifactory taas private repo 6 | baseurl: https://{{docker_registry_username| trim| urlencode()}}:{{docker_registry_password| trim}}@na.artifactory.swg-devops.com/artifactory/wh-imaging-prereqs-rpm-local/ 7 | gpgcheck: no 8 | when: setup=="private" 9 | 10 | - name: Add docker-ce-stable repo 11 | yum_repository: 12 | name: docker-ce 13 | description: docker-ce stable repo 14 | baseurl: https://download.docker.com/linux/centos/7/$basearch/stable 15 | gpgkey: https://download.docker.com/linux/centos/gpg 16 | gpgcheck: yes 17 | when: setup=="public" 18 | 19 | - name: Add kubernetes repo 20 | yum_repository: 21 | name: kubernetes 22 | description: Kubernetes repo 23 | baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el7-$basearch 24 | gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg 25 | gpgcheck: yes 26 | when: setup=="public" -------------------------------------------------------------------------------- /roles/setup-cfssl/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - include_vars: ../../../../whis-platform/inventory/group_vars/local/vault 2 | - name: Download cfssl 3 | get_url: 4 | url: https://na.artifactory.swg-devops.com/artifactory/wh-imaging-prereqs-generic-local/bin/cfssl_linux-amd64 5 | dest: /usr/local/bin/cfssl 6 | mode: 0710 7 | force_basic_auth: yes 8 | url_password: "{{docker_registry_password}}" 9 | url_username: "{{docker_registry_username}}" 10 | 11 | - name: Download cfssljson 12 | get_url: 13 | url: https://na.artifactory.swg-devops.com/artifactory/wh-imaging-prereqs-generic-local/bin/cfssljson_linux-amd64 14 | dest: /usr/local/bin/cfssljson 15 | mode: 0710 16 | force_basic_auth: yes 17 | url_password: "{{docker_registry_password}}" 18 | url_username: "{{docker_registry_username}}" -------------------------------------------------------------------------------- /roles/smoke-test/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Wait for all of the nodes to register 2 | shell: kubectl --kubeconfig=/tmp/kubeadm-ha/config get nodes 3 | register: result 4 | until: result.stdout.find("NotReady") == -1 5 | retries: 20 6 | delay: 10 7 | 8 | - name: Copy smoke test manifest 9 | template: 10 | src: nginx.yml.j2 11 | dest: "/tmp/nginx.yml" 12 | mode: 0644 13 | 14 | - name: Deploy smoke test manifest 15 | shell: kubectl --kubeconfig=/tmp/kubeadm-ha/config apply -f /tmp/nginx.yml 16 | 17 | - name: Wait for smoke test app to be ready 18 | shell: kubectl --kubeconfig=/tmp/kubeadm-ha/config get po | grep nginx 19 | register: result 20 | until: result.stdout.find("Running") != -1 21 | retries: 20 22 | delay: 10 23 | 24 | - name: Pausing for 1 minute 25 | pause: 26 | minutes: 1 27 | 28 | - name: Access smoke test app 29 | shell: "curl http://{{ item }}:{{ smoke_test_node_port }}" 30 | register: result 31 | until: result.stdout.find("nginx") != -1 32 | with_items: "{{ groups['k8s-workers'] }}" 33 | 34 | - name: Get kubernetes no, po, svc 35 | shell: kubectl --kubeconfig=/tmp/kubeadm-ha/config get no,po,svc --all-namespaces 36 | register: rslt 37 | 38 | - debug: var=rslt.stdout_lines -------------------------------------------------------------------------------- /roles/smoke-test/templates/nginx.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1beta1 2 | kind: Deployment 3 | metadata: 4 | name: nginx 5 | spec: 6 | replicas: 1 7 | template: 8 | metadata: 9 | labels: 10 | app: nginx 11 | spec: 12 | containers: 13 | - name: nginx 14 | image: nginx 15 | ports: 16 | - containerPort: 80 17 | --- 18 | apiVersion: v1 19 | kind: Service 20 | metadata: 21 | name: nginx 22 | labels: 23 | app: nginx 24 | spec: 25 | ports: 26 | - port: 80 27 | name: http 28 | nodePort: {{ smoke_test_node_port }} 29 | selector: 30 | app: nginx 31 | type: NodePort 32 | -------------------------------------------------------------------------------- /roles/sync-etcd-certs/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: "copy {{item}} to other masters" 2 | copy: 3 | src: "/tmp/etcd/{{item}}" 4 | dest: "/etc/kubernetes/pki/etcd/{{item}}" 5 | with_items: 6 | - ca-config.json 7 | - ca.pem 8 | - ca-key.pem 9 | - client.pem 10 | - client-key.pem -------------------------------------------------------------------------------- /roles/update-kubelet/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: drain node 2 | shell: kubectl --kubeconfig=/tmp/kubeadm-ha/config drain {{inventory_hostname}} --ignore-daemonsets 3 | delegate_to: "{{ groups['k8s-masters'][0] }}" 4 | 5 | - name: upgrade kubelet 6 | yum: 7 | name: kubelet 8 | state: "{{kubelet_version}}" 9 | 10 | - replace: 11 | path: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 12 | regexp: 'KUBELET_KUBECONFIG_ARGS=' 13 | replace: 'KUBELET_KUBECONFIG_ARGS=--fail-swap-on=false ' 14 | backup: no 15 | 16 | - replace: 17 | path: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 18 | regexp: 'cadvisor-port=0' 19 | replace: 'cadvisor-port=4194' 20 | backup: no 21 | 22 | - name: restart kubelet 23 | systemd: 24 | state: restarted 25 | daemon_reload: yes 26 | name: kubelet 27 | enabled: yes 28 | 29 | - name: Bring the host back online 30 | shell: kubectl --kubeconfig=/tmp/kubeadm-ha/config uncordon {{inventory_hostname}} 31 | delegate_to: "{{ groups['k8s-masters'][0] }}" --------------------------------------------------------------------------------