├── .gitignore ├── .gitlab-ci.yml ├── .travis.yml ├── LICENSE ├── README.md ├── ansible.cfg ├── destroy.yaml ├── files ├── 10-kubeadm.conf ├── cloud-config.j2 ├── kubeadm-init.yaml.j2 ├── openstack-cloud-controller-manager-pod.yaml.j2 └── webhook.kubeconfig.yaml ├── group_vars └── all.yaml ├── roles ├── common │ └── tasks │ │ └── main.yaml ├── global-handlers │ └── handlers │ │ └── main.yaml ├── healthcheck │ └── tasks │ │ └── main.yaml ├── k8s-addons │ ├── files │ │ ├── manifests │ │ │ └── admin-user.yaml │ │ └── syncconfig.yaml │ ├── tasks │ │ ├── helm.yaml │ │ ├── main.yaml │ │ └── nginx-ingress.yaml │ └── templates │ │ ├── default-storage-class.yaml.j2 │ │ └── k8s-keystone-auth.yaml.j2 ├── k8s-rbac │ ├── files │ │ ├── cloud-controller-manager-role-bindings.yaml │ │ ├── cloud-controller-manager-roles.yaml │ │ └── k8s-auth-policy.yaml │ └── tasks │ │ └── main.yaml ├── kubeadm-master │ └── tasks │ │ └── main.yaml ├── kubeadm-nodes │ ├── meta │ │ └── main.yaml │ └── tasks │ │ └── main.yaml ├── kubeadm │ ├── meta │ │ └── main.yaml │ └── tasks │ │ └── main.yaml ├── kubectl │ └── tasks │ │ └── main.yaml ├── openstack-master │ └── tasks │ │ └── main.yaml ├── openstack-nodes │ └── tasks │ │ └── main.yaml └── openstack-security-groups │ └── tasks │ └── main.yaml ├── site.yaml └── upgrade.yaml /.gitignore: -------------------------------------------------------------------------------- 1 | group_vars/nodes.yaml 2 | *.retry 3 | admin.conf 4 | admin-*.conf 5 | -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | image: gitlab.infraly.ch:4567/francois/docker-ansible:master 2 | 3 | stages: 4 | - syntax 5 | - test 6 | - staging 7 | 8 | syntax: 9 | stage: syntax 10 | script: 11 | - ansible-playbook --version 12 | - ansible-playbook --syntax-check site.yaml 13 | 14 | lint: 15 | stage: syntax 16 | script: 17 | - ansible-lint site.yaml 18 | allow_failure: true 19 | 20 | test: 21 | stage: test 22 | variables: 23 | NAME: k8s-gitlab-ci-$CI_BUILD_ID 24 | KEY: gitlab-ci-$CI_BUILD_ID 25 | script: 26 | - mkdir -p ~/.ssh 27 | - ssh-keygen -t rsa -P "" -f ~/.ssh/id_rsa 28 | - cat ~/.ssh/id_rsa 29 | - openstack keypair create --public-key ~/.ssh/id_rsa.pub gitlab-ci-$CI_BUILD_ID 30 | - ansible-playbook site.yaml 31 | after_script: 32 | - STATE=absent ansible-playbook site.yaml || true 33 | - openstack keypair delete gitlab-ci-$CI_BUILD_ID || true 34 | 35 | staging: 36 | stage: staging 37 | variables: 38 | NAME: k8s-staging 39 | KEY: k8s-staging 40 | script: 41 | - mkdir -p ~/.ssh 42 | - echo $STAGING_SSH_PRIVATE_KEY | base64 -d > ~/.ssh/id_rsa 43 | - chmod 600 ~/.ssh/id_rsa 44 | - echo $STAGING_SSH_PUBLIC_KEY | base64 -d > ~/.ssh/id_rsa.pub 45 | - openstack keypair delete $KEY || true 46 | - openstack keypair create --public-key ~/.ssh/id_rsa.pub $KEY 47 | - ansible-playbook site.yaml 48 | only: 49 | - master 50 | artifacts: 51 | paths: 52 | - admin.conf 53 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | 3 | python: 2.7 4 | 5 | virtualenv: 6 | system_site_packages: true 7 | 8 | install: 9 | - pip install python-openstackclient 10 | - pip install shade 11 | - pip install ansible 12 | - pip install ansible-lint 13 | 14 | env: 15 | NAME: k8s-ci-$TRAVIS_BUILD_NUMBER 16 | NETWORK: k8s-ci-$TRAVIS_BUILD_NUMBER 17 | KEY: ci-$TRAVIS_BUILD_NUMBER 18 | MASTER_BOOT_FROM_VOLUME: False 19 | 20 | script: 21 | - ansible-playbook --syntax-check site.yaml 22 | - ansible-lint site.yaml || true 23 | - openstack token issue > /dev/null || (echo "No OpenStack credentials available, not running tests"; exit 1) 24 | - mkdir -p ~/.ssh 25 | - ssh-keygen -t rsa -P "" -f ~/.ssh/id_rsa 26 | - openstack keypair create --public-key ~/.ssh/id_rsa.pub ci-$TRAVIS_BUILD_NUMBER 27 | - ansible-playbook site.yaml 28 | 29 | after_script: 30 | - ansible-playbook destroy.yaml 31 | - openstack keypair delete ci-$TRAVIS_BUILD_NUMBER 32 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # k8s-on-openstack 2 | 3 | An opinionated way to deploy a Kubernetes cluster on top of an OpenStack cloud. 4 | 5 | It is based on the following tools: 6 | 7 | * `kubeadm` 8 | * `ansible` 9 | 10 | ## Getting started 11 | 12 | The following mandatory environment variables need to be set before calling `ansible-playbook`: 13 | 14 | * `OS_*`: standard OpenStack environment variables such as `OS_AUTH_URL`, `OS_USERNAME`, ... 15 | * `KEY`: name of an existing SSH keypair 16 | 17 | The following optional environment variables can also be set: 18 | 19 | * `NAME`: name of the Kubernetes cluster, used to derive instance names, `kubectl` configuration and security group name 20 | * `IMAGE`: name of an existing Ubuntu 16.04 image 21 | * `EXTERNAL_NETWORK`: name of the neutron external network, defaults to 'public' 22 | * `FLOATING_IP_POOL`: name of the floating IP pool 23 | * `FLOATING_IP_NETWORK_UUID`: uuid of the floating IP network (required for LBaaSv2) 24 | * `USE_OCTAVIA`: try to use Octavia instead of Neutron LBaaS, defaults to False 25 | * `USE_LOADBALANCER`: assume a loadbalancer is used and allow traffic to nodes (default: false) 26 | * `SUBNET_CIDR` the subnet CIDR for OpenStack's network (default: `10.8.10.0/24`) 27 | * `POD_SUBNET_CIDR` CIDR of the POD network (default: `10.96.0.0/16`) 28 | * `CLUSTER_DNS_IP`: IP address of the cluster DNS service passed to kubelet (default: `10.96.0.10`) 29 | * `BLOCK_STORAGE_VERSION`: version of the block storage (Cinder) service, defaults to 'v2' 30 | * `IGNORE_VOLUME_AZ`: whether to ignore the AZ field of volumes, needed on some clouds where AZs confuse the driver, defaults to False. 31 | * `NODE_MEMORY`: how many MB of memory should nodes have, defaults to 4GB 32 | * `NODE_FLAVOR`: allows to configure the exact OpenStack flavor name or ID to use for the nodes. When set, the `NODE_MEMORY` setting is ignored. 33 | * `NODE_COUNT`: how many nodes should we provision, defaults to 3 34 | * `NODE_AUTO_IP` assign a floating IP to nodes, defaults to False 35 | * `NODE_DELETE_FIP`: delete floating IP when node is destroyed, defaults to True 36 | * `NODE_BOOT_FROM_VOLUME`: boot node instances using boot from volume. Useful on clouds with only boot from volume 37 | * `NODE_TERMINATE_VOLUME`: delete the root volume when each node instance is destroy, defaults to True 38 | * `NODE_VOLUME_SIZE`: size of each node volume. defaults to 64GB 39 | * `NODE_EXTRA_VOLUME`: create an extra unmounted data volume for each node, defaults to False 40 | * `NODE_EXTRA_VOLUME_SIZE`: size of extra data volume for each node, defaults to 80GB 41 | * `NODE_DELETE_EXTRA_VOLUME`: delete the extra data volume for each node when node is destroy, defaults to True 42 | * `MASTER_BOOT_FROM_VOLUME`: boot the master instance on a volume for data persistence, defaults to True 43 | * `MASTER_TERMINATE_VOLUME`: delete the volume when master instance is destroy, defaults to True 44 | * `MASTER_VOLUME_SIZE`: size of the master volume. default to 64GB 45 | * `MASTER_MEMORY`: how many MB of memory should master have, defaults to 4 GB 46 | * `MASTER_FLAVOR`: allows to configure the exact OpenStack flavor name or ID to use for the master. When set, the `MASTER_MEMORY` setting is ignored. 47 | * `AVAILABILITY_ZONE`: the availability zone to use for nodes and the default `StorageClass` (defaults to `nova`). This affects `PersistentVolumeClaims` without explicit a storage class. 48 | * `HELM_REPOS`: a list of additional helm repos to add, separated by semicolons. Example: `charts* https://github.com/helm/charts;mycharts https://github.com/dev/mycharts` 49 | * `HELM_INSTALL`: a list of helm charts and their parameters to install, separated by semicolons. Example: `mycharts/mychart;charts/somechart --name somechart --namespace somenamespace` 50 | 51 | Spin up a new cluster: 52 | 53 | ```console 54 | $ ansible-playbook site.yaml 55 | ``` 56 | 57 | Destroy the cluster: 58 | 59 | ```console 60 | $ ansible-playbook destroy.yaml 61 | ``` 62 | 63 | Upgrade the cluster: 64 | 65 | The `upgrade.yaml` playbook implements the upgrade steps described in https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-upgrade-1-11/ 66 | After editing in `group_vars/all.yaml` the `kubernetes_version` and `kubernetes_ubuntu_version` variables, you can run the following commands. 67 | 68 | ```console 69 | $ ansible-playbook upgrade.yaml 70 | $ ansible-playbook site.yaml 71 | ``` 72 | 73 | ## Open Issues 74 | 75 | ### Find a better way to configure worker nodes' network plugin 76 | 77 | Somehow, the network plugin (kubenet) is not correctly set on the worker node. On the master node `/var/lib/kubelet/kubeadm-flags.env` (created by `kubeadm init`) contains: 78 | 79 | ```bash 80 | KUBELET_KUBEADM_ARGS="--cgroup-driver=systemd --cloud-provider=external --network-plugin=kubenet --pod-infra-container-image=k8s.gcr.io/pause:3.1 --resolv-conf=/run/systemd/resolve/resolv.conf" 81 | ``` 82 | 83 | It contains the correct `--network-plugin=kubenet` as configured [here](https://github.com/pfisterer/k8s-on-openstack-wip-k8s-1.15/blob/master/files/kubeadm-init.yaml.j2#L9). After joining the k8s cluster, the worker node's copy of `/var/lib/kubelet/kubeadm-flags.env` (created by `kubeadm join`) looks like this: 84 | 85 | ```bash 86 | KUBELET_KUBEADM_ARGS="--cgroup-driver=systemd --network-plugin=cni --pod-infra-container-image=k8s.gcr.io/pause:3.1 --resolv-conf=/run/systemd/resolve/resolv.conf" 87 | ``` 88 | 89 | It contains `--network-plugin=cni` despite setting `network-plugin: kubenet` [here](https://github.com/pfisterer/k8s-on-openstack-wip-k8s-1.15/blob/master/files/kubeadm-init.yaml.j2#L21). But the JoinConfiguration is ignored by `kubeadm join` when using a join token. 90 | 91 | Once I edit `/var/lib/kubelet/kubeadm-flags.env` to contain --network-plugin=kubenet, the worker node goes online. I've added a hack in [roles/kubeadm-nodes/tasks/main.yaml](https://github.com/pfisterer/k8s-on-openstack-wip-k8s-1.15/blob/master/roles/kubeadm-nodes/tasks/main.yaml#L12) to set the correct value. 92 | 93 | 94 | ## Prerequisites 95 | 96 | * Ansible (tested with version 2.9.1) 97 | * Shade library required by Ansible OpenStack modules (`python-shade` for Debian) 98 | 99 | ## CI/CD 100 | 101 | The following environment variables needs to be defined: 102 | 103 | * `OS_AUTH_URL` 104 | * `OS_PASSWORD` 105 | * `OS_USERNAME` 106 | * `OS_DOMAIN_NAME` 107 | 108 | # Authors 109 | 110 | * François Deppierraz 111 | * Oli Schacher 112 | * Saverio Proto 113 | * @HaseHarald 114 | * Dennis Pfisterer 115 | 116 | # References 117 | 118 | * https://kubernetes.io/docs/getting-started-guides/kubeadm/ 119 | * https://www.weave.works/docs/net/latest/kube-addon/ 120 | * https://github.com/kubernetes/dashboard#kubernetes-dashboard 121 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | inventory = /dev/null 3 | pipelining = True 4 | callback_whitelist = profile_tasks 5 | gathering = smart 6 | gather_subset = !all 7 | -------------------------------------------------------------------------------- /destroy.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Gather server facts from OpenStack 3 | hosts: localhost 4 | pre_tasks: 5 | - name: Set variables 6 | set_fact: 7 | state: absent 8 | tasks: 9 | - name: Gather data about k8s master 10 | os_server_info: 11 | server: "{{ master_name }}" 12 | register: os_server_info_master_result 13 | 14 | - name: Update inventory with data about k8s master 15 | add_host: 16 | name: "{{ item.name }}" 17 | ansible_ssh_host: "{{ item.public_v4 }}" 18 | ansible_ssh_user: ubuntu 19 | groupname: master 20 | loop: "{{ os_server_info_master_result.openstack_servers }}" 21 | 22 | - name: Gather data about k8s nodes 23 | os_server_info: 24 | server: "{{ nodes_name }}*" 25 | register: os_server_info_nodes_result 26 | 27 | - name: Update inventory with data about k8s nodes 28 | add_host: 29 | name: "{{ item.name }}" 30 | ansible_ssh_host: "{{ item.private_v4 }}" 31 | ansible_ssh_user: ubuntu 32 | ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q ubuntu@{{ hostvars[groups.master[0]][''ansible_ssh_host''] }}"' 33 | groupname: nodes 34 | when: item.name != master_name 35 | loop: "{{ os_server_info_nodes_result.openstack_servers }}" 36 | 37 | - name: Drain all worker nodes (using kubectl drain) 38 | hosts: master 39 | gather_facts: no 40 | ignore_unreachable: yes 41 | tasks: 42 | - name: Delete all services (e.g., to release loadbalancers in OpenStack) 43 | shell: "kubectl delete svc --all-namespaces --force --all" 44 | 45 | - name: Wait until public IPs are released 46 | shell: "kubectl get svc --all-namespaces -o jsonpath='{.items[*].status.loadBalancer.ingress[*].ip}'" 47 | register: load_balancer_public_ips 48 | retries: 10 49 | delay: 6 50 | until: load_balancer_public_ips.stdout | trim | length == 0 51 | 52 | - name: Get the nodes to drain 53 | # Get a list of worker nodes (separated by blanks) 54 | shell: "kubectl get nodes --selector='!node-role.kubernetes.io/master' -o jsonpath='{.items[*].metadata.name}'" 55 | register: worker_nodes_to_drain 56 | 57 | - name: Drain each node worker node 58 | shell: "kubectl drain --force --delete-local-data=true --grace-period=60 --ignore-daemonsets=true --timeout=120s '{{ item }}'" 59 | loop: "{{ worker_nodes_to_drain.stdout.split(' ') }}" 60 | ignore_errors: True 61 | 62 | - name: Destroy k8s cluster on nodes (using kubeadm reset) 63 | hosts: nodes 64 | become: true 65 | gather_facts: no 66 | ignore_unreachable: yes 67 | pre_tasks: 68 | - name: Set variables 69 | set_fact: 70 | state: absent 71 | roles: 72 | - kubeadm-nodes 73 | 74 | - name: Destroy k8s cluster on nodes (using kubeadm reset) 75 | hosts: master 76 | become: true 77 | gather_facts: no 78 | ignore_unreachable: yes 79 | ignore_errors: yes 80 | pre_tasks: 81 | - name: Set variables 82 | set_fact: 83 | state: absent 84 | roles: 85 | - kubeadm-master 86 | 87 | - name: Destroy OpenStack cluster resources 88 | hosts: localhost 89 | pre_tasks: 90 | - name: Set variables 91 | set_fact: 92 | state: absent 93 | 94 | roles: 95 | - openstack-master 96 | - openstack-nodes 97 | - openstack-security-groups 98 | 99 | tasks: 100 | - name: Clear static routes 101 | shell: openstack router set --no-route "{{ router_name }}" 102 | 103 | - name: Delete router 104 | os_router: 105 | state: absent 106 | name: "{{ router_name }}" 107 | 108 | - name: Delete network 109 | os_network: 110 | state: absent 111 | name: "{{ network_name }}" 112 | 113 | - name: Delete subnet 114 | os_subnet: 115 | state: absent 116 | name: "{{ network_name }}" 117 | -------------------------------------------------------------------------------- /files/10-kubeadm.conf: -------------------------------------------------------------------------------- 1 | [Service] 2 | Environment="KUBELET_KUBECONFIG_ARGS=--cloud-provider=external --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --kubeconfig=/etc/kubernetes/kubelet.conf" 3 | Environment="KUBELET_CONFIG_ARGS=--config=/var/lib/kubelet/config.yaml" 4 | # This is a file that "kubeadm init" and "kubeadm join" generate at runtime, populating the KUBELET_KUBEADM_ARGS variable dynamically 5 | EnvironmentFile=-/var/lib/kubelet/kubeadm-flags.env 6 | # This is a file that the user can use for overrides of the kubelet args as a last resort. Preferably, 7 | #the user should use the .NodeRegistration.KubeletExtraArgs object in the configuration files instead. 8 | # KUBELET_EXTRA_ARGS should be sourced from this file. 9 | EnvironmentFile=-/etc/default/kubelet 10 | ExecStart= 11 | ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_CONFIG_ARGS $KUBELET_KUBEADM_ARGS $KUBELET_EXTRA_ARGS 12 | -------------------------------------------------------------------------------- /files/cloud-config.j2: -------------------------------------------------------------------------------- 1 | [Global] 2 | auth-url = {{ lookup('env', 'OS_AUTH_URL') }} 3 | username = {{ lookup('env', 'OS_USERNAME') }} 4 | password = "{{ lookup('env', 'OS_PASSWORD') }}" 5 | tenant-name = {{ lookup('env', 'OS_PROJECT_NAME') }} 6 | {% if lookup('env', 'OS_PROJECT_ID') != '' %} 7 | tenant-id = {{ lookup('env', 'OS_PROJECT_ID') }} 8 | {% endif %} 9 | {% if lookup('env', 'OS_REGION_NAME') != '' %} 10 | region = {{ lookup('env', 'OS_REGION_NAME') }} 11 | {% endif %} 12 | {% if lookup('env', 'OS_DOMAIN_NAME') != '' %} 13 | domain-name = {{ lookup('env', 'OS_DOMAIN_NAME') }} 14 | {% elif lookup('env', 'OS_USER_DOMAIN_NAME') != '' %} 15 | domain-name = {{ lookup('env', 'OS_USER_DOMAIN_NAME') }} 16 | {% endif %} 17 | {% if lookup('env', 'OS_USER_DOMAIN_ID') != '' %} 18 | domain-name = {{ lookup('env', 'OS_USER_DOMAIN_ID') }} 19 | {% endif %} 20 | 21 | [BlockStorage] 22 | trust-device-path = false 23 | bs-version = {{ hostvars[groups.master[0]]['block_storage_version'] }} 24 | {% if hostvars[groups.master[0]]['ignore_volume_az'] %} 25 | ignore-volume-az = true 26 | {% endif %} 27 | 28 | {% if lookup('env', 'FLOATING_IP_NETWORK_UUID') != '' %} 29 | [LoadBalancer] 30 | lb-version = v2 31 | floating-network-id = {{ lookup('env', 'FLOATING_IP_NETWORK_UUID') }} 32 | subnet-id = {{ hostvars[groups.master[0]]['subnetuuid'] }} 33 | create-monitor = yes 34 | {% if hostvars[groups.master[0]]['use_octavia'] %} 35 | use-octavia = yes 36 | {% endif %} 37 | monitor-delay = 1m 38 | monitor-timeout = 30s 39 | monitor-max-retries = 3 40 | {% endif %} 41 | 42 | [Route] 43 | router-id = {{ hostvars[groups.master[0]]['routeruuid'] }} 44 | -------------------------------------------------------------------------------- /files/kubeadm-init.yaml.j2: -------------------------------------------------------------------------------- 1 | # Docs @ https://godoc.org/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta2 2 | 3 | apiVersion: kubeadm.k8s.io/v1beta2 4 | kind: InitConfiguration 5 | nodeRegistration: 6 | kubeletExtraArgs: 7 | cgroup-driver: systemd 8 | cloud-provider: external 9 | network-plugin: kubenet 10 | cluster-dns: "{{ cluster_dns_ip }}" 11 | # non-masquerade-cidr: 0.0.0.0/0 # from https://medium.com/elotl-blog/kubernetes-networking-on-aws-part-i-99012e938a40 12 | localAPIEndpoint: 13 | advertiseAddress: "{{ hostvars[groups.master[0]].ansible_ssh_host }}" 14 | 15 | # This is ignored by kubeadm (because we are joining using a token, this excludes the use of JoinConfiguration) 16 | # see kubeadm-nodes/main.yaml for a hack to heal this issue 17 | #--- 18 | #apiVersion: kubeadm.k8s.io/v1beta2 19 | #kind: JoinConfiguration 20 | #nodeRegistration: 21 | # kubeletExtraArgs: 22 | # cloud-provider: external 23 | # network-plugin: kubenet 24 | 25 | --- 26 | apiVersion: kubelet.config.k8s.io/v1beta1 27 | kind: KubeletConfiguration 28 | cgroupDriver: systemd 29 | 30 | --- 31 | apiVersion: kubeadm.k8s.io/v1beta2 32 | kind: ClusterConfiguration 33 | kubernetesVersion: "{{ kubernetes_version }}" 34 | clusterName: "{{ name }}" 35 | apiServer: 36 | extraArgs: 37 | advertise-address: "{{ hostvars[groups.master[0]].ansible_ssh_host }}" 38 | bind-address: 0.0.0.0 39 | authorization-mode: "Node,Webhook,RBAC" 40 | authentication-token-webhook-config-file: "/etc/kubernetes/pki/webhook.kubeconfig.yaml" 41 | authorization-webhook-config-file: "/etc/kubernetes/pki/webhook.kubeconfig.yaml" 42 | cloud-provider: "external" 43 | extraVolumes: 44 | - name: "cloud-config" 45 | hostPath: "/etc/kubernetes/cloud-config" 46 | mountPath: "/etc/kubernetes/cloud-config" 47 | readOnly: true 48 | pathType: FileOrCreate 49 | 50 | controllerManager: 51 | extraArgs: 52 | cloud-provider: external 53 | external-cloud-volume-plugin: openstack 54 | bind-address: 0.0.0.0 55 | cloud-config: /etc/kubernetes/cloud-config 56 | configure-cloud-routes: "true" 57 | allocate-node-cidrs: "true" 58 | extraVolumes: 59 | - name: "cloud-config" 60 | hostPath: "/etc/kubernetes/cloud-config" 61 | mountPath: "/etc/kubernetes/cloud-config" 62 | readOnly: true 63 | pathType: FileOrCreate 64 | 65 | networking: 66 | # ServiceSubnet is the subnet used by k8s services. Defaults to "10.96.0.0/12". 67 | serviceSubnet: "{{ subnet_cidr }}" 68 | # PodSubnet is the subnet used by pods. 69 | podSubnet: "{{ pod_subnet_cidr }}" 70 | 71 | etcd: 72 | local: 73 | extraArgs: 74 | listen-peer-urls: "https://0.0.0.0:2380" 75 | listen-client-urls: "https://0.0.0.0:2379" 76 | -------------------------------------------------------------------------------- /files/openstack-cloud-controller-manager-pod.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | annotations: 5 | scheduler.alpha.kubernetes.io/critical-pod: "" 6 | labels: 7 | component: kube-controller-manager 8 | tier: control-plane 9 | name: openstack-cloud-controller-manager 10 | namespace: kube-system 11 | spec: 12 | containers: 13 | - name: openstack-cloud-controller-manager 14 | image: docker.io/k8scloudprovider/openstack-cloud-controller-manager:v0.2.0 15 | args: 16 | - /bin/openstack-cloud-controller-manager 17 | - --v=2 18 | - --allocate-node-cidrs=true 19 | - --cluster-cidr={{ pod_subnet_cidr }} 20 | - --cloud-config=/etc/kubernetes/cloud-config 21 | - --cloud-provider=openstack 22 | - --use-service-account-credentials=true 23 | - --address=127.0.0.1 24 | - --kubeconfig=/etc/kubernetes/controller-manager.conf 25 | volumeMounts: 26 | - mountPath: /etc/kubernetes/pki 27 | name: k8s-certs 28 | readOnly: true 29 | - mountPath: /etc/ssl/certs 30 | name: ca-certs 31 | readOnly: true 32 | - mountPath: /etc/kubernetes/controller-manager.conf 33 | name: kubeconfig 34 | readOnly: true 35 | - mountPath: /usr/libexec/kubernetes/kubelet-plugins/volume/exec 36 | name: flexvolume-dir 37 | - mountPath: /etc/kubernetes/cloud-config 38 | name: cloud-config 39 | readOnly: true 40 | resources: 41 | requests: 42 | cpu: 200m 43 | hostNetwork: true 44 | volumes: 45 | - hostPath: 46 | path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec 47 | type: DirectoryOrCreate 48 | name: flexvolume-dir 49 | - hostPath: 50 | path: /etc/kubernetes/pki 51 | type: DirectoryOrCreate 52 | name: k8s-certs 53 | - hostPath: 54 | path: /etc/ssl/certs 55 | type: DirectoryOrCreate 56 | name: ca-certs 57 | - hostPath: 58 | path: /etc/kubernetes/controller-manager.conf 59 | type: FileOrCreate 60 | name: kubeconfig 61 | - hostPath: 62 | path: /etc/kubernetes/cloud-config 63 | type: FileOrCreate 64 | name: cloud-config 65 | -------------------------------------------------------------------------------- /files/webhook.kubeconfig.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | clusters: 4 | - cluster: 5 | insecure-skip-tls-verify: true 6 | server: https://localhost:8443/webhook 7 | name: webhook 8 | contexts: 9 | - context: 10 | cluster: webhook 11 | user: webhook 12 | name: webhook 13 | current-context: webhook 14 | kind: Config 15 | preferences: {} 16 | users: 17 | - name: webhook 18 | -------------------------------------------------------------------------------- /group_vars/all.yaml: -------------------------------------------------------------------------------- 1 | key_name: "{{ lookup('env','KEY') }}" 2 | 3 | name: "{{ lookup('env','NAME') | default('k8s', true) }}" 4 | 5 | network_name: "{{ lookup('env','NETWORK') | default(name, true) }}" 6 | subnet_name: "{{ lookup('env','NETWORK') | default(name, true) }}" 7 | subnet_cidr: "{{ lookup('env','SUBNET_CIDR') | default('10.8.10.0/24', true) }}" 8 | pod_subnet_cidr: "{{ lookup('env','POD_SUBNET_CIDR') | default('10.96.0.0/16', true) }}" 9 | cluster_dns_ip: "{{ lookup('env','CLUSTER_DNS_IP') | default('10.96.0.10', true) }}" 10 | 11 | router_name: "{{ lookup('env','NAME') | default(name, true) }}" 12 | floating_ip_pools: "{{ lookup('env', 'FLOATING_IP_POOL') | default(omit, true) }}" 13 | external_network_name: "{{ lookup('env', 'EXTERNAL_NETWORK') | default('public', true) }}" 14 | use_octavia: "{{ lookup('env', 'USE_OCTAVIA') | default('False', true) | bool }}" 15 | use_loadbalancer: "{{ lookup('env', 'USE_LOADBALANCER') | default('False', true) | bool }}" 16 | block_storage_version: "{{ lookup('env', 'BLOCK_STORAGE_VERSION') | default('v2', true) }}" 17 | ignore_volume_az: "{{ lookup('env', 'IGNORE_VOLUME_AZ') | default('false', true) | bool }}" 18 | 19 | master_name: "{{ name }}-master" 20 | master_image: "{{ lookup('env','IMAGE') | default('bionic-server-cloudimg-amd64', true) }}" 21 | master_flavor_ram: "{{ lookup('env','MASTER_MEMORY') | default('4096', true) }}" 22 | master_flavor_name: "{{ lookup('env','MASTER_FLAVOR') | default(false) }}" 23 | master_boot_from_volume: "{{ lookup('env', 'MASTER_BOOT_FROM_VOLUME') | default('True', true) }}" 24 | master_terminate_volume: "{{ lookup('env', 'MASTER_TERMINATE_VOLUME') | default('True', true) }}" 25 | master_volume_size: "{{ lookup('env', 'MASTER_VOLUME_SIZE') | default('64', true) }}" 26 | 27 | nodes_count: "{{ lookup('env', 'NODE_COUNT') | default(3, true) }}" 28 | nodes_name: "{{ name }}-" # node id will automatically be appended 29 | nodes_image: "{{ lookup('env','IMAGE') | default('bionic-server-cloudimg-amd64', true) }}" 30 | nodes_flavor_ram: "{{ lookup('env','NODE_MEMORY') | default('4096', true) }}" 31 | nodes_flavor_name: "{{ lookup('env','NODE_FLAVOR') | default(false) }}" 32 | nodes_auto_ip: "{{ lookup('env', 'NODE_AUTO_IP') | default ('False', true) }}" 33 | nodes_delete_fip: "{{ lookup('env', 'NODE_DELETE_FIP') | default ('True', true) }}" 34 | # Some clouds only support boot from volume - use it even for ephemeral nodes 35 | nodes_boot_from_volume: "{{ lookup('env', 'NODE_BOOT_FROM_VOLUME') | default('False', true) }}" 36 | nodes_terminate_volume: "{{ lookup('env', 'NODE_TERMINATE_VOLUME') | default('True', true) }}" 37 | nodes_volume_size: "{{ lookup('env', 'NODE_VOLUME_SIZE') | default('64', true) }}" 38 | # Whether to attach a data volume to each node. Useful when running Ceph via rook 39 | nodes_extra_volume: "{{ lookup('env', 'NODE_EXTRA_VOLUME') | default('True', true) }}" 40 | nodes_extra_volume_name: "{{ name }}-data-" # node id will automatically be appended 41 | nodes_extra_volume_size: "{{ lookup('env', 'NODE_EXTRA_VOLUME_SIZE') | default('80', true) }}" 42 | nodes_delete_extra_volume: "{{ lookup('env', 'NODE_DELETE_EXTRA_VOLUME') | default('True', true) }}" 43 | 44 | helm_repos: "{{ lookup('env', 'HELM_REPOS').split(';') | default([], true) }}" 45 | helm_install: "{{ lookup('env', 'HELM_INSTALL').split(';') | default([], true) }}" 46 | 47 | availability_zone: "{{ lookup('env','AVAILABILITY_ZONE') | default('nova', true) }}" 48 | 49 | #Change at your own risk 50 | kubernetes_version: v1.15.2 51 | kubernetes_ubuntu_version: 1.15.2-00 52 | kubernetes_cni_ubuntu_version: 0.7.5-00 53 | docker_version: 18.09.7-0ubuntu1~18.04.4 54 | -------------------------------------------------------------------------------- /roles/common/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Wait for cloud-init to finish 2 | raw: while ! test -f /var/lib/cloud/instance/boot-finished; do sleep 1; done 3 | retries: 5 4 | delay: 1 5 | tags: 6 | - skip_ansible_lint 7 | 8 | - name: Setup 9 | action: setup 10 | -------------------------------------------------------------------------------- /roles/global-handlers/handlers/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Restart kubelet 2 | systemd: 3 | state: restarted 4 | daemon_reload: yes 5 | name: kubelet 6 | 7 | - name: Restart docker 8 | systemd: 9 | state: restarted 10 | daemon_reload: yes 11 | name: docker 12 | 13 | - name: Restart rsyslog 14 | service: 15 | name: rsyslog 16 | state: restarted 17 | -------------------------------------------------------------------------------- /roles/healthcheck/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Wait for nodes registration 2 | shell: "/usr/bin/test $(kubectl get nodes | grep -ow Ready | wc -l) == {{ groups['nodes'] | length + groups['master'] | length }}" 3 | register: nodes_status 4 | retries: 30 5 | delay: 10 6 | until: nodes_status is success 7 | tags: 8 | - skip_ansible_lint 9 | 10 | - name: Get event log 11 | command: kubectl get events --all-namespaces 12 | register: events 13 | tags: 14 | - skip_ansible_lint 15 | 16 | - name: Display events 17 | debug: 18 | var: events.stdout_lines 19 | 20 | - name: Get nodes 21 | command: kubectl get nodes 22 | register: nodes 23 | tags: 24 | - skip_ansible_lint 25 | 26 | - name: Display nodes 27 | debug: 28 | var: nodes.stdout_lines 29 | 30 | - name: Get cluster info 31 | command: kubectl cluster-info 32 | register: clusterinfo 33 | tags: 34 | - skip_ansible_lint 35 | 36 | - name: Display cluster info 37 | debug: 38 | var: clusterinfo.stdout_lines 39 | -------------------------------------------------------------------------------- /roles/k8s-addons/files/manifests/admin-user.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ServiceAccount 4 | metadata: 5 | name: admin-user 6 | namespace: kube-system 7 | --- 8 | apiVersion: rbac.authorization.k8s.io/v1beta1 9 | kind: ClusterRoleBinding 10 | metadata: 11 | name: admin-user 12 | roleRef: 13 | apiGroup: rbac.authorization.k8s.io 14 | kind: ClusterRole 15 | name: cluster-admin 16 | subjects: 17 | - kind: ServiceAccount 18 | name: admin-user 19 | namespace: kube-system 20 | 21 | -------------------------------------------------------------------------------- /roles/k8s-addons/files/syncconfig.yaml: -------------------------------------------------------------------------------- 1 | # In format %d, %n and %i wildcards repesent keystone domain id, project name and id respectively 2 | # WARNING: If exceeds the maximum possible length of 63 characters, just Keystone project uuid will be used as the namespace name. 3 | 4 | # WARNING: a DNS-1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character (e.g. 'my-name', or '123-abc', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?') 5 | 6 | #namespace_format: "prefix-%d-%n-%i-suffix" 7 | namespace_format: "keystone-%i" 8 | 9 | # List of Keystone project ids to omit from syncing 10 | projects_black_list: ["id1", "id2"] 11 | 12 | # List of data types to synchronize 13 | "data_types_to_sync": ["projects","role_assignments"] 14 | -------------------------------------------------------------------------------- /roles/k8s-addons/tasks/helm.yaml: -------------------------------------------------------------------------------- 1 | - name: install helm 2 | shell: "curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash" 3 | args: 4 | warn: no 5 | 6 | - name: add repos to helm 7 | command: helm repo add {{ item }} 8 | when: item != '' 9 | loop: "{{ helm_repos }}" 10 | 11 | # Not added by default with Helm v3 (required for nginx-ingress) 12 | - name: add repos to helm 13 | command: "helm repo add stable https://charts.helm.sh/stable" 14 | 15 | - name: update the helm repos 16 | command: helm repo update 17 | 18 | - name: install helm charts 19 | command: helm install {{ item }} 20 | register: result 21 | retries: 30 22 | delay: 2 23 | until: result.rc == 0 24 | when: item != '' 25 | loop: "{{ helm_install }}" 26 | -------------------------------------------------------------------------------- /roles/k8s-addons/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: kubectl apply dashboard 2 | command: kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0-beta4/aio/deploy/recommended.yaml 3 | 4 | - name: copy manifests 5 | copy: 6 | src: "{{ role_path }}/files/manifests/" 7 | dest: "/home/ubuntu/manifests/" 8 | 9 | - name: Retrieve service catalog 10 | delegate_to: localhost 11 | os_auth: 12 | 13 | - name: Upload webook kubeconfig file 14 | template: 15 | dest: /home/ubuntu/manifests/k8s-keystone-auth.yaml 16 | src: "{{ role_path }}/templates/k8s-keystone-auth.yaml.j2" 17 | 18 | - name: kubectl apply admin-user for dashboard 19 | command: kubectl apply -f /home/ubuntu/manifests/admin-user.yaml 20 | 21 | - name: Install helm 22 | include: helm.yaml 23 | 24 | - name: Install NGINX ingress 25 | include: nginx-ingress.yaml 26 | 27 | - name: copy syncconfig.yaml 28 | become: true 29 | copy: 30 | src: "{{ role_path }}/files/syncconfig.yaml" 31 | dest: "/etc/kubernetes/syncconfig.yaml" 32 | 33 | - name: kubectl apply k8s-keystone-auth 34 | command: kubectl apply -f /home/ubuntu/manifests/k8s-keystone-auth.yaml 35 | 36 | # Default storage class 37 | - name: Label nodes with availability zone {{ availability_zone }} 38 | # cf. https://kubernetes.io/docs/reference/kubernetes-api/labels-annotations-taints/#failure-domainbetakubernetesiozone 39 | command: "kubectl label --overwrite=true nodes {{ nodes_name }}{{ item }} failure-domain.beta.kubernetes.io/zone={{ availability_zone }}" 40 | with_sequence: count={{ nodes_count }} 41 | 42 | - name: Copy default storage class configuration 43 | template: 44 | src: "{{ role_path }}/templates/default-storage-class.yaml.j2" 45 | dest: /home/ubuntu/manifests/default-storage-class.yaml 46 | 47 | - name: Apply default storage class 48 | command: kubectl apply -f /home/ubuntu/manifests/default-storage-class.yaml 49 | # End: Default storage class 50 | -------------------------------------------------------------------------------- /roles/k8s-addons/tasks/nginx-ingress.yaml: -------------------------------------------------------------------------------- 1 | - name: Set variables 2 | set_fact: 3 | k8s_ingress_release_name: "nginx-ingress" 4 | k8s_ingress_namespace: "kube-system" 5 | 6 | - name: Check if NGINX ingress is already installed 7 | shell: helm status -n 'kube-system' '{{ k8s_ingress_release_name }}' 8 | ignore_errors: True 9 | register: k8s_ingress_status 10 | 11 | - name: Add stable repo to helm 12 | shell: "helm repo add stable https://charts.helm.sh/stable" 13 | when: k8s_ingress_status.rc != 0 14 | 15 | - name: Update helm repos 16 | shell: "helm repo update" 17 | when: k8s_ingress_status.rc != 0 18 | 19 | - name: k8s - Install nginx-ingress via helm 20 | shell: "helm install --namespace {{ k8s_ingress_namespace }} --set 'controller.extraArgs.default-ssl-certificate=cert-manager/ingress-certificate-secret' --set 'controller.publishService.enabled=true' --set 'controller.image.tag=0.25.1' '{{ k8s_ingress_release_name }}' 'stable/nginx-ingress'" 21 | when: k8s_ingress_status.rc != 0 22 | -------------------------------------------------------------------------------- /roles/k8s-addons/templates/default-storage-class.yaml.j2: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: k8s-on-openstack-default-storage-class 5 | annotations: 6 | storageclass.beta.kubernetes.io/is-default-class: "true" 7 | provisioner: kubernetes.io/cinder 8 | parameters: 9 | availability: {{ availability_zone }} 10 | -------------------------------------------------------------------------------- /roles/k8s-addons/templates/k8s-keystone-auth.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | k8s-app: k8s-keystone-auth 7 | name: k8s-keystone-auth 8 | namespace: kube-system 9 | spec: 10 | replicas: 1 11 | selector: 12 | matchLabels: 13 | app: k8s-keystone-auth 14 | template: 15 | metadata: 16 | labels: 17 | app: k8s-keystone-auth 18 | spec: 19 | hostNetwork: True 20 | serviceAccount: admin-user 21 | serviceAccountName: admin-user 22 | containers: 23 | - image: k8scloudprovider/k8s-keystone-auth:v0.2.0 24 | imagePullPolicy: Always 25 | name: k8s-keystone-auth 26 | args: 27 | - ./bin/k8s-keystone-auth 28 | - --tls-cert-file 29 | - /etc/kubernetes/pki/apiserver.crt 30 | - --tls-private-key-file 31 | - /etc/kubernetes/pki/apiserver.key 32 | - --keystone-url 33 | - {{ ((service_catalog | selectattr('type', 'equalto', 'identity') | list)[0]['endpoints'] | selectattr('interface', 'equalto', 'public') | list)[0]['url'] }} 34 | - --policy-configmap-name 35 | - k8s-auth-policy 36 | - --sync-config-file 37 | - /etc/kubernetes/syncconfig.yaml 38 | volumeMounts: 39 | - mountPath: /etc/kubernetes 40 | name: k8s-certs 41 | readOnly: true 42 | - mountPath: /etc/ssl/certs 43 | name: ca-certs 44 | readOnly: true 45 | volumes: 46 | - hostPath: 47 | path: /etc/kubernetes 48 | type: DirectoryOrCreate 49 | name: k8s-certs 50 | - hostPath: 51 | path: /etc/ssl/certs 52 | type: DirectoryOrCreate 53 | name: ca-certs 54 | nodeSelector: 55 | node-role.kubernetes.io/master: "" 56 | tolerations: 57 | - key: node-role.kubernetes.io/master 58 | effect: NoSchedule 59 | -------------------------------------------------------------------------------- /roles/k8s-rbac/files/cloud-controller-manager-role-bindings.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | items: 3 | - apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRoleBinding 5 | metadata: 6 | name: system:cloud-node-controller 7 | roleRef: 8 | apiGroup: rbac.authorization.k8s.io 9 | kind: ClusterRole 10 | name: system:cloud-node-controller 11 | subjects: 12 | - kind: ServiceAccount 13 | name: cloud-node-controller 14 | namespace: kube-system 15 | - apiVersion: rbac.authorization.k8s.io/v1 16 | kind: ClusterRoleBinding 17 | metadata: 18 | name: system:pvl-controller 19 | roleRef: 20 | apiGroup: rbac.authorization.k8s.io 21 | kind: ClusterRole 22 | name: system:pvl-controller 23 | subjects: 24 | - kind: ServiceAccount 25 | name: pvl-controller 26 | namespace: kube-system 27 | - apiVersion: rbac.authorization.k8s.io/v1 28 | kind: ClusterRoleBinding 29 | metadata: 30 | name: system:cloud-controller-manager 31 | roleRef: 32 | apiGroup: rbac.authorization.k8s.io 33 | kind: ClusterRole 34 | name: system:cloud-controller-manager 35 | subjects: 36 | - kind: ServiceAccount 37 | name: cloud-controller-manager 38 | namespace: kube-system 39 | kind: List 40 | metadata: {} -------------------------------------------------------------------------------- /roles/k8s-rbac/files/cloud-controller-manager-roles.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | items: 3 | - apiVersion: rbac.authorization.k8s.io/v1 4 | kind: ClusterRole 5 | metadata: 6 | name: cloud-controller-manager 7 | rules: 8 | - apiGroups: 9 | - "" 10 | resources: 11 | - events 12 | verbs: 13 | - create 14 | - patch 15 | - update 16 | - apiGroups: 17 | - "" 18 | resources: 19 | - nodes 20 | verbs: 21 | - '*' 22 | - apiGroups: 23 | - "" 24 | resources: 25 | - nodes/status 26 | verbs: 27 | - patch 28 | - apiGroups: 29 | - "" 30 | resources: 31 | - services 32 | verbs: 33 | - list 34 | - patch 35 | - update 36 | - watch 37 | - apiGroups: 38 | - "" 39 | resources: 40 | - serviceaccounts 41 | verbs: 42 | - create 43 | - apiGroups: 44 | - "" 45 | resources: 46 | - persistentvolumes 47 | verbs: 48 | - get 49 | - list 50 | - update 51 | - watch 52 | - apiGroups: 53 | - "" 54 | resources: 55 | - endpoints 56 | verbs: 57 | - create 58 | - get 59 | - list 60 | - watch 61 | - update 62 | - apiVersion: rbac.authorization.k8s.io/v1 63 | kind: ClusterRole 64 | metadata: 65 | name: system:cloud-node-controller 66 | rules: 67 | - apiGroups: 68 | - "" 69 | resources: 70 | - nodes 71 | verbs: 72 | - delete 73 | - get 74 | - patch 75 | - update 76 | - list 77 | - apiGroups: 78 | - "" 79 | resources: 80 | - nodes/status 81 | verbs: 82 | - patch 83 | - apiGroups: 84 | - "" 85 | resources: 86 | - events 87 | verbs: 88 | - create 89 | - patch 90 | - update 91 | - apiVersion: rbac.authorization.k8s.io/v1 92 | kind: ClusterRole 93 | metadata: 94 | name: system:pvl-controller 95 | rules: 96 | - apiGroups: 97 | - "" 98 | resources: 99 | - persistentvolumes 100 | verbs: 101 | - get 102 | - list 103 | - watch 104 | - apiGroups: 105 | - "" 106 | resources: 107 | - events 108 | verbs: 109 | - create 110 | - patch 111 | - update 112 | kind: List 113 | metadata: {} -------------------------------------------------------------------------------- /roles/k8s-rbac/files/k8s-auth-policy.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: ConfigMap 4 | metadata: 5 | name: k8s-auth-policy 6 | namespace: kube-system 7 | data: 8 | policies: | 9 | [ 10 | ] 11 | -------------------------------------------------------------------------------- /roles/k8s-rbac/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: copy manifests 3 | copy: 4 | src: "{{ role_path }}/files/" 5 | dest: "/home/ubuntu/manifests/" 6 | 7 | - name: Wait for API server to be up 8 | shell: "/usr/bin/kubectl get nodes" 9 | register: api_server_up 10 | retries: 30 11 | delay: 10 12 | until: api_server_up is success 13 | tags: 14 | - skip_ansible_lint 15 | 16 | - name: kubectl apply RBAC roles 17 | command: kubectl apply -f /home/ubuntu/manifests/cloud-controller-manager-roles.yaml 18 | 19 | - name: kubectl apply RBAC role bindings 20 | command: kubectl apply -f /home/ubuntu/manifests/cloud-controller-manager-role-bindings.yaml 21 | 22 | - name: apply k8s-auth-policy.yaml 23 | command: kubectl apply -f /home/ubuntu/manifests/k8s-auth-policy.yaml 24 | -------------------------------------------------------------------------------- /roles/kubeadm-master/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Check for existing kubelet configuration 2 | stat: 3 | path: /etc/kubernetes/kubelet.conf 4 | register: kubelet_conf 5 | 6 | - name: Create kubeadm config file 7 | template: 8 | src: files/kubeadm-init.yaml.j2 9 | dest: /etc/kubeadm-init.conf.yaml 10 | mode: 0600 11 | when: state == "present" 12 | 13 | - name: Upload the openstack-cloud-controller-manager pod descriptor 14 | template: 15 | src: files/openstack-cloud-controller-manager-pod.yaml.j2 16 | dest: /etc/kubernetes/manifests/openstack-cloud-controller-manager-pod.yaml 17 | mode: 0600 18 | when: state == "present" 19 | 20 | - name: Ensure /etc/kubernetes/pki/ directory is present 21 | become: True 22 | file: 23 | path: /etc/kubernetes/pki/ 24 | state: directory 25 | owner: root 26 | group: root 27 | mode: 0700 28 | when: state == "present" 29 | 30 | - name: Upload webook kubeconfig file 31 | copy: 32 | dest: /etc/kubernetes/pki/webhook.kubeconfig.yaml 33 | src: files/webhook.kubeconfig.yaml 34 | when: state == "present" 35 | 36 | - name: Run kubeadm init 37 | command: "kubeadm init --config /etc/kubeadm-init.conf.yaml" 38 | args: 39 | creates: /etc/kubernetes/kubelet.conf 40 | when: 41 | - state == "present" 42 | - kubelet_conf.stat.exists == False 43 | 44 | - name: Run kubeadm reset (on destroy) 45 | shell: "kubeadm reset --force" 46 | when: 47 | - state == "absent" 48 | - kubelet_conf.stat.exists == True 49 | 50 | - name: Generate a join token 51 | command: kubeadm token create --print-join-command 52 | register: joincommand 53 | when: state == "present" 54 | 55 | - name: Set fact joincommand 56 | set_fact: 57 | joincommand: "{{ joincommand.stdout }}" 58 | when: state == "present" 59 | 60 | - name: In case of upgrade make sure container versions are right for kube-apiserver 61 | replace: 62 | path: /etc/kubernetes/manifests/kube-apiserver.yaml 63 | regexp: v1.[0-9]{1,2}.[0-9]{1,2} 64 | replace: "{{ kubernetes_version }}" 65 | when: state == "present" 66 | 67 | - name: In case of upgrade make sure container versions are right for kube-controller-manager 68 | replace: 69 | path: /etc/kubernetes/manifests/kube-controller-manager.yaml 70 | regexp: v1.[0-9]{1,2}.[0-9]{1,2} 71 | replace: "{{ kubernetes_version }}" 72 | when: state == "present" 73 | 74 | - name: In case of upgrade make sure container versions are right for kube-scheduler 75 | replace: 76 | path: /etc/kubernetes/manifests/kube-scheduler.yaml 77 | regexp: v1.[0-9]{1,2}.[0-9]{1,2} 78 | replace: "{{ kubernetes_version }}" 79 | when: state == "present" 80 | 81 | - name: Ensure kubectl configuration directory is present 82 | become: True 83 | file: 84 | path: /home/ubuntu/.kube 85 | state: directory 86 | owner: ubuntu 87 | group: ubuntu 88 | mode: 0700 89 | when: state == "present" 90 | 91 | - name: Copy kubectl configuration for the default user 92 | become: True 93 | copy: 94 | remote_src: True 95 | src: /etc/kubernetes/admin.conf 96 | dest: /home/ubuntu/.kube/config 97 | owner: ubuntu 98 | group: ubuntu 99 | mode: 0600 100 | when: state == "present" 101 | 102 | - name: Check for existing kubectl bash completion 103 | stat: 104 | path: /etc/bash_completion.d/kubectl 105 | register: kubectl_bash_completion 106 | 107 | - name: Ensure kubectl bash_completion is present 108 | become: True 109 | shell: kubectl completion bash > /etc/bash_completion.d/kubectl 110 | when: 111 | - kubectl_bash_completion.stat.exists == False 112 | - state == "present" 113 | 114 | - name: Check for existing kubectx 115 | stat: 116 | path: /opt/kubectx/kubectx 117 | register: kubectx 118 | 119 | - name: Clone kubectx repo 120 | git: 121 | repo: https://github.com/ahmetb/kubectx 122 | dest: /opt/kubectx 123 | when: 124 | - kubectx.stat.exists == False 125 | - state == "present" 126 | 127 | - name: Link kubectx 128 | file: 129 | state: link 130 | src: /opt/kubectx/kubectx 131 | dest: /usr/local/bin/kubectx 132 | when: 133 | - kubectx.stat.exists == False 134 | - state == "present" 135 | 136 | - name: Link kubens 137 | file: 138 | state: link 139 | src: /opt/kubectx/kubens 140 | dest: /usr/local/bin/kubens 141 | when: 142 | - kubectx.stat.exists == False 143 | - state == "present" 144 | -------------------------------------------------------------------------------- /roles/kubeadm-nodes/meta/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: global-handlers 4 | -------------------------------------------------------------------------------- /roles/kubeadm-nodes/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Check for existing kubelet configuration 2 | stat: 3 | path: /etc/kubernetes/kubelet.conf 4 | register: kubelet_conf 5 | 6 | - name: kubeadm join 7 | command: "{{ hostvars[groups.master[0]]['joincommand'] }}" 8 | args: 9 | creates: /etc/kubernetes/kubelet.conf 10 | when: 11 | - state == "present" 12 | - kubelet_conf.stat.exists == False 13 | 14 | - name: Run kubeadm reset (on destroy) 15 | command: "kubeadm reset --force" 16 | when: state == "absent" 17 | ignore_errors: True 18 | 19 | # For some reason "kubeadm join" does not write the correct value here 20 | - name: HACK - replace --network-plugin=cni with --network-plugin=kubenet 21 | replace: 22 | path: /var/lib/kubelet/kubeadm-flags.env 23 | regexp: network-plugin=cni 24 | replace: "network-plugin=kubenet" 25 | notify: 26 | - Restart kubelet 27 | when: state == "present" 28 | -------------------------------------------------------------------------------- /roles/kubeadm/meta/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - role: global-handlers 4 | -------------------------------------------------------------------------------- /roles/kubeadm/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Install k8s APT repo GPG key 2 | apt_key: 3 | url: "https://packages.cloud.google.com/apt/doc/apt-key.gpg" 4 | state: present 5 | 6 | - name: Setup k8s APT repo 7 | apt_repository: 8 | repo: deb http://apt.kubernetes.io/ kubernetes-xenial main 9 | state: present 10 | 11 | # TODO Make this idempotent (cf. https://github.com/ansible/ansible/issues/18889) 12 | - name: Unhold docker and kubernetes packages 13 | command: "apt-mark unhold {{ item }}" 14 | with_items: 15 | - docker.io 16 | - kubelet 17 | - kubeadm 18 | - kubectl 19 | - kubernetes-cni 20 | 21 | - name: Install docker and kubernetes packages 22 | apt: 23 | name: 24 | [ 25 | "docker.io={{ docker_version }}", 26 | "kubelet={{ kubernetes_ubuntu_version }}", 27 | "kubeadm={{ kubernetes_ubuntu_version }}", 28 | "kubectl={{ kubernetes_ubuntu_version }}", 29 | "kubernetes-cni={{ kubernetes_cni_ubuntu_version }}", 30 | ] 31 | state: present 32 | update_cache: yes 33 | 34 | - name: Enable docker service 35 | systemd: 36 | name: docker 37 | enabled: yes 38 | daemon_reload: yes 39 | 40 | - name: Hold docker and kubernetes packages 41 | command: "apt-mark hold {{ item }}" 42 | with_items: 43 | - docker.io 44 | - kubelet 45 | - kubeadm 46 | - kubectl 47 | - kubernetes-cni 48 | 49 | - name: configure docker to use journald 50 | copy: 51 | content: | 52 | { 53 | "exec-opts": [ 54 | "native.cgroupdriver=systemd" 55 | ], 56 | "log-driver": "journald", 57 | "storage-driver": "overlay2" 58 | } 59 | dest: /etc/docker/daemon.json 60 | owner: root 61 | group: root 62 | mode: 0644 63 | notify: 64 | - Restart docker 65 | 66 | - name: dont write docker logs to /var/log/syslog 67 | copy: 68 | content: | 69 | if $programname == 'dockerd' or $syslogtag == 'dockerd' then /dev/null 70 | & stop 71 | dest: "/etc/rsyslog.d/30-docker.conf" 72 | mode: 0644 73 | owner: root 74 | group: root 75 | notify: 76 | - Restart rsyslog 77 | 78 | - name: add hosts 79 | lineinfile: 80 | dest: "/etc/hosts" 81 | regexp: ".*{{ hostvars[item].ansible_hostname }}$" 82 | line: "{{ hostvars[item].ansible_default_ipv4.address }} {{ hostvars[item].ansible_hostname }}" 83 | state: present 84 | when: hostvars[item].ansible_hostname is defined 85 | with_items: "{{groups['all'] | default([])}}" 86 | 87 | - name: Create OpenStack cloud configuration 88 | template: 89 | src: files/cloud-config.j2 90 | dest: /etc/kubernetes/cloud-config 91 | mode: 0600 92 | 93 | - name: Override default kubeadm configuration to use the OpenStack cloud configuration 94 | copy: 95 | src: files/10-kubeadm.conf 96 | dest: /etc/systemd/system/kubelet.service.d/10-kubeadm.conf 97 | mode: 0600 98 | notify: 99 | - Restart kubelet 100 | -------------------------------------------------------------------------------- /roles/kubectl/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Retrieve kubectl configuration 2 | become: yes 3 | fetch: 4 | src: /etc/kubernetes/admin.conf 5 | dest: "admin-{{ name }}.conf" 6 | flat: yes 7 | fail_on_missing: yes 8 | when: state == 'present' 9 | -------------------------------------------------------------------------------- /roles/openstack-master/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Get cloud configuration 2 | tags: bootstrap 3 | os_client_config: 4 | 5 | - name: Create OpenStack instance 6 | os_server: 7 | security_groups: "sg-{{ name }}-master" 8 | name: "{{ master_name }}" 9 | image: "{{ master_image }}" 10 | boot_from_volume: "{{ master_boot_from_volume }}" 11 | terminate_volume: "{{ master_terminate_volume }}" 12 | volume_size: "{{ master_volume_size }}" 13 | key_name: "{{ key_name }}" 14 | flavor_ram: "{{ master_flavor_ram if not master_flavor_name else omit }}" 15 | flavor: "{{ master_flavor_name if master_flavor_name else omit }}" 16 | nics: 17 | - net-name: "{{ network_name }}" 18 | floating_ip_pools: "{{ floating_ip_pools }}" 19 | userdata: | 20 | #cloud-config 21 | package_upgrade: true 22 | hostname: "{{ master_name }}" 23 | manage_etc_hosts: false 24 | packages: 25 | - python 26 | - python-simplejson 27 | when: state == "present" 28 | register: "instance" 29 | 30 | - name: Delete OpenStack instances 31 | os_server: 32 | name: "{{ master_name }}" 33 | state: absent 34 | when: state == "absent" 35 | 36 | - name: Update inventory 37 | add_host: 38 | name: "{{ instance.server.name }}" 39 | ansible_ssh_host: "{{ instance.openstack.public_v4 }}" 40 | ansible_ssh_user: ubuntu 41 | groupname: master 42 | routeruuid: "{{ routeruuid | default('None',true) }}" 43 | subnetuuid: "{{ subnetuuid | default('None',true) }}" 44 | when: state == "present" 45 | 46 | - name: Wait during instances boot 47 | tags: bootstrap 48 | wait_for: 49 | host: "{{ instance.openstack.public_v4 }}" 50 | port: 22 51 | search_regex: OpenSSH 52 | when: state == "present" 53 | 54 | - name: Allow SSH keys if we don't have them already 55 | lineinfile: 56 | dest: ~/.ssh/known_hosts 57 | create: yes 58 | state: present 59 | line: "{{ lookup('pipe', 'ssh-keyscan ' + instance.openstack.public_v4 ) }}" 60 | when: state == "present" 61 | -------------------------------------------------------------------------------- /roles/openstack-nodes/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Get cloud configuration 2 | tags: bootstrap 3 | os_client_config: 4 | 5 | - name: Create OpenStack instance 6 | os_server: 7 | security_groups: "sg-{{ name }}-nodes" 8 | name: "{{ nodes_name }}{{ item }}" 9 | image: "{{ nodes_image }}" 10 | key_name: "{{ key_name }}" 11 | flavor_ram: "{{ nodes_flavor_ram if not nodes_flavor_name else omit }}" 12 | flavor: "{{ nodes_flavor_name if nodes_flavor_name else omit }}" 13 | boot_from_volume: "{{ nodes_boot_from_volume }}" 14 | terminate_volume: "{{ nodes_terminate_volume }}" 15 | volume_size: "{{ nodes_volume_size }}" 16 | nics: 17 | - net-name: "{{ network_name }}" 18 | auto_ip: "{{ nodes_auto_ip }}" 19 | delete_fip: "{{ nodes_delete_fip }}" 20 | userdata: | 21 | #cloud-config 22 | package_upgrade: true 23 | hostname: "{{ nodes_name }}{{ item }}" 24 | manage_etc_hosts: false 25 | packages: 26 | - python 27 | - python-simplejson 28 | when: state == "present" 29 | register: "instances" 30 | with_sequence: count={{ nodes_count }} 31 | 32 | - name: Create data volumes for minions 33 | os_volume: 34 | name: "{{ nodes_extra_volume_name }}{{ item }}" 35 | size: "{{ nodes_extra_volume_size }}" 36 | state: present 37 | when: 38 | - state == "present" 39 | - nodes_extra_volume 40 | with_sequence: count={{ nodes_count }} 41 | 42 | - name: Attach data volumes to minions 43 | os_server_volume: 44 | state: present 45 | server: "{{ nodes_name }}{{ item }}" 46 | volume: "{{ nodes_extra_volume_name }}{{ item }}" 47 | when: 48 | - state == "present" 49 | - nodes_extra_volume 50 | with_sequence: count={{ nodes_count }} 51 | 52 | - name: Delete OpenStack instances 53 | os_server: 54 | name: "{{ nodes_name }}{{ item }}" 55 | state: absent 56 | when: state == "absent" 57 | with_sequence: count={{ nodes_count }} 58 | 59 | - name: Delete data volumes 60 | os_volume: 61 | name: "{{ nodes_extra_volume_name }}{{ item }}" 62 | state: absent 63 | when: 64 | - state == "absent" 65 | - nodes_extra_volume 66 | with_sequence: count={{ nodes_count }} 67 | 68 | - name: Delete security group 69 | tags: bootstrap 70 | os_security_group: 71 | name: "sg-{{ name }}" 72 | state: absent 73 | when: state == "absent" 74 | 75 | - name: Update inventory for private IP 76 | add_host: 77 | name: "{{ item.server.name }}" 78 | ansible_ssh_host: "{{ item.server.private_v4 }}" 79 | ansible_ssh_user: ubuntu 80 | ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q ubuntu@{{ hostvars[groups.master[0]][''ansible_ssh_host''] }}"' 81 | groupname: nodes 82 | with_items: "{{ instances.results }}" 83 | when: 84 | - state == "present" 85 | - not nodes_auto_ip 86 | 87 | - name: Update inventory with public IP 88 | add_host: 89 | name: "{{ item.server.name }}" 90 | ansible_ssh_host: "{{ item.server.public_v4 }}" 91 | ansible_ssh_user: ubuntu 92 | groupname: nodes 93 | with_items: "{{ instances.results }}" 94 | when: 95 | - state == "present" 96 | - nodes_auto_ip 97 | 98 | - name: Wait during nodes boot 99 | wait_for: 100 | host: "{{ hostvars[item]['ansible_ssh_host'] }}" 101 | port: 22 102 | search_regex: OpenSSH 103 | with_items: "{{ groups.nodes }}" 104 | delegate_to: "{{ groups.master[0] }}" 105 | when: state == "present" 106 | 107 | - name: Scan SSH host keys 108 | command: "ssh-keyscan {{ hostvars[item]['ansible_ssh_host'] }}" 109 | with_items: "{{ groups.nodes }}" 110 | register: ssh_host_keys 111 | delegate_to: "{{ groups.master[0] }}" 112 | when: state == "present" 113 | 114 | - name: Update SSH known_hosts 115 | lineinfile: 116 | name: ~/.ssh/known_hosts 117 | line: "{{ item.stdout }}" 118 | with_items: "{{ ssh_host_keys.results }}" 119 | when: state == "present" 120 | -------------------------------------------------------------------------------- /roles/openstack-security-groups/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | - name: Create master security group 2 | tags: bootstrap 3 | os_security_group: 4 | name: "sg-{{ name }}-master" 5 | state: present 6 | when: state == "present" 7 | 8 | - name: Create nodes security group 9 | tags: bootstrap 10 | os_security_group: 11 | name: "sg-{{ name }}-nodes" 12 | state: present 13 | when: state == "present" 14 | 15 | - name: Allow SSH to master from anywhere IPv4 16 | tags: bootstrap 17 | os_security_group_rule: 18 | security_group: "sg-{{ name }}-master" 19 | protocol: tcp 20 | port_range_min: 22 21 | port_range_max: 22 22 | remote_ip_prefix: 0.0.0.0/0 23 | when: state == "present" 24 | 25 | - name: Allow SSH to master from anywhere IPv6 26 | tags: bootstrap 27 | os_security_group_rule: 28 | security_group: "sg-{{ name }}-master" 29 | ethertype: IPv6 30 | protocol: tcp 31 | port_range_min: 22 32 | port_range_max: 22 33 | remote_ip_prefix: ::/0 34 | when: state == "present" 35 | 36 | - name: Allow K8S API to master from anywhere IPv4 37 | tags: bootstrap 38 | os_security_group_rule: 39 | security_group: "sg-{{ name }}-master" 40 | protocol: tcp 41 | port_range_min: 6443 42 | port_range_max: 6443 43 | remote_ip_prefix: 0.0.0.0/0 44 | when: state == "present" 45 | 46 | - name: Allow K8S API to master from anywhere IPv6 47 | tags: bootstrap 48 | os_security_group_rule: 49 | security_group: "sg-{{ name }}-master" 50 | ethertype: IPv6 51 | protocol: tcp 52 | port_range_min: 6443 53 | port_range_max: 6443 54 | remote_ip_prefix: ::/0 55 | when: state == "present" 56 | 57 | - name: Allow HTTP to master from anywhere IPv4 58 | tags: bootstrap 59 | os_security_group_rule: 60 | security_group: "sg-{{ name }}-master" 61 | protocol: tcp 62 | port_range_min: 80 63 | port_range_max: 80 64 | remote_ip_prefix: 0.0.0.0/0 65 | when: state == "present" 66 | 67 | - name: Allow HTTP to master from anywhere IPv6 68 | tags: bootstrap 69 | os_security_group_rule: 70 | security_group: "sg-{{ name }}-master" 71 | ethertype: IPv6 72 | protocol: tcp 73 | port_range_min: 80 74 | port_range_max: 80 75 | remote_ip_prefix: ::/0 76 | when: state == "present" 77 | 78 | - name: Allow HTTPS to master from anywhere IPv4 79 | tags: bootstrap 80 | os_security_group_rule: 81 | security_group: "sg-{{ name }}-master" 82 | protocol: tcp 83 | port_range_min: 443 84 | port_range_max: 443 85 | remote_ip_prefix: 0.0.0.0/0 86 | when: state == "present" 87 | 88 | - name: Allow HTTPS to master from anywhere IPv6 89 | tags: bootstrap 90 | os_security_group_rule: 91 | security_group: "sg-{{ name }}-master" 92 | ethertype: IPv6 93 | protocol: tcp 94 | port_range_min: 443 95 | port_range_max: 443 96 | remote_ip_prefix: ::/0 97 | when: state == "present" 98 | 99 | - name: Allow traffic to master from nodes 100 | tags: bootstrap 101 | os_security_group_rule: 102 | security_group: "sg-{{ name }}-master" 103 | remote_group: "sg-{{ name }}-nodes" 104 | when: state == "present" 105 | 106 | - name: Allow SSH to nodes from anywhere IPv4 107 | tags: bootstrap 108 | os_security_group_rule: 109 | security_group: "sg-{{ name }}-nodes" 110 | protocol: tcp 111 | port_range_min: 22 112 | port_range_max: 22 113 | remote_ip_prefix: 0.0.0.0/0 114 | when: state == "present" 115 | 116 | - name: Allow SSH to nodes from anywhere IPv6 117 | tags: bootstrap 118 | os_security_group_rule: 119 | security_group: "sg-{{ name }}-nodes" 120 | ethertype: IPv6 121 | protocol: tcp 122 | port_range_min: 22 123 | port_range_max: 22 124 | remote_ip_prefix: ::/0 125 | when: state == "present" 126 | 127 | - name: Allow traffic to nodes from nodes 128 | tags: bootstrap 129 | os_security_group_rule: 130 | security_group: "sg-{{ name }}-nodes" 131 | remote_group: "sg-{{ name }}-nodes" 132 | when: state == "present" 133 | 134 | - name: Allow traffic to nodes from master 135 | tags: bootstrap 136 | os_security_group_rule: 137 | security_group: "sg-{{ name }}-nodes" 138 | remote_group: "sg-{{ name }}-master" 139 | when: state == "present" 140 | 141 | - name: Allow load balancer traffic to nodes 142 | tags: bootstrap 143 | os_security_group_rule: 144 | security_group: "sg-{{ name }}-nodes" 145 | remote_ip_prefix: 10.8.10.0/24 146 | protocol: tcp 147 | port_range_min: 30000 148 | port_range_max: 32767 149 | when: 150 | - state == "present" 151 | - (use_octavia|bool or use_loadbalancer|bool) 152 | 153 | - name: Delete master security group 154 | tags: bootstrap 155 | os_security_group: 156 | name: "sg-{{ name }}-master" 157 | state: absent 158 | when: state == "absent" 159 | 160 | - name: Delete nodes security group 161 | tags: bootstrap 162 | os_security_group: 163 | name: "sg-{{ name }}-nodes" 164 | state: absent 165 | when: state == "absent" 166 | -------------------------------------------------------------------------------- /site.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Launch k8s master 3 | hosts: localhost 4 | pre_tasks: 5 | - name: create network 6 | os_network: 7 | name: "{{ name }}" 8 | 9 | - name: create subnet 10 | os_subnet: 11 | network_name: "{{ network_name }}" 12 | name: "{{ subnet_name }}" 13 | cidr: "{{ subnet_cidr }}" 14 | dns_nameservers: 15 | - 8.8.8.8 16 | - 8.8.4.4 17 | register: subnet 18 | 19 | - name: create router 20 | os_router: 21 | name: "{{ router_name }}" 22 | network: "{{ external_network_name }}" 23 | interfaces: 24 | - "{{ network_name }}" 25 | register: router 26 | 27 | - name: Set variables 28 | set_fact: 29 | state: present 30 | routeruuid: "{{ router.id }}" 31 | subnetuuid: "{{ subnet.id }}" 32 | roles: 33 | - openstack-security-groups 34 | - openstack-master 35 | 36 | - name: Launch k8s nodes 37 | hosts: localhost 38 | roles: 39 | - openstack-nodes 40 | 41 | - name: Master preparation 42 | hosts: master 43 | pre_tasks: 44 | - name: Set variables 45 | set_fact: 46 | state: present 47 | tags: 48 | - bootstrap 49 | gather_facts: false 50 | roles: 51 | - common 52 | 53 | - name: Nodes preparation 54 | hosts: nodes 55 | tags: 56 | - bootstrap 57 | gather_facts: false 58 | roles: 59 | - common 60 | 61 | - name: Install repo and packages 62 | hosts: all 63 | tags: 64 | - bootstrap 65 | become: true 66 | roles: 67 | - kubeadm 68 | 69 | # ------------------------------- START WORKAROUNDS ---------------------------------------- 70 | 71 | #- name: WORKAROUND for iptables >= 1.8, cf. https://github.com/kubernetes/kubernetes/issues/71305 72 | # hosts: all 73 | # tasks: 74 | # - name: bla 75 | # shell: | 76 | # update-alternatives --set iptables /usr/sbin/iptables-legacy 77 | # update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy 78 | # update-alternatives --set arptables /usr/sbin/arptables-legacy 79 | # update-alternatives --set ebtables /usr/sbin/ebtables-legacy 80 | 81 | - name: WORKAROUND for networking to work with kubenet 82 | hosts: all 83 | become: true 84 | tasks: 85 | - name: Set policy ACCEPT on chain FORWARD (cf. https://github.com/projectcalico/calico/issues/1840) 86 | shell: iptables -P FORWARD ACCEPT 87 | 88 | # ------------------------------- END WORKAROUNDS ---------------------------------------- 89 | 90 | - name: k8s master setup 91 | hosts: master 92 | tags: 93 | - bootstrap 94 | become: true 95 | roles: 96 | - kubeadm-master 97 | 98 | - name: k8s nodes setup 99 | hosts: nodes 100 | become: true 101 | pre_tasks: 102 | - name: Set variables 103 | set_fact: 104 | state: present 105 | tags: 106 | - bootstrap 107 | roles: 108 | - kubeadm-nodes 109 | 110 | - name: k8s rbac 111 | hosts: master 112 | tags: 113 | - bootstrap 114 | roles: 115 | - k8s-rbac 116 | 117 | - name: k8s addons 118 | hosts: master 119 | tags: 120 | - bootstrap 121 | roles: 122 | - k8s-addons 123 | 124 | - name: Health check 125 | hosts: master 126 | roles: 127 | - healthcheck 128 | 129 | - name: Local kubectl configuration 130 | hosts: master 131 | roles: 132 | - kubectl 133 | -------------------------------------------------------------------------------- /upgrade.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create inventory dinamically 3 | hosts: localhost 4 | pre_tasks: 5 | - name: Set variables 6 | set_fact: 7 | state: present 8 | 9 | roles: 10 | - openstack-master 11 | 12 | - name: Upgrade K8S 13 | hosts: master 14 | become: true 15 | tasks: 16 | - name: upgrade kubeadm binary manually 17 | get_url: 18 | url: https://dl.k8s.io/release/{{ kubernetes_version }}/bin/linux/amd64/kubeadm 19 | dest: /usr/bin/kubeadm 20 | mode: 755 21 | force: yes 22 | 23 | # TODO: implement a check that we really upgraded kubeadm 24 | # - name: Check kubeadm version 25 | # shell: kubeadm version 26 | # register: kubeadminversion 27 | # 28 | # - name: Fail 29 | # fail: 30 | # msg: wrong version 31 | # when: kubeadminversion.stdout | search( "{{ kubernetes_version }}" ) 32 | 33 | # TODO: is running this command really needed or is just informative ? 34 | # - name: 35 | # shell: kubeadm upgrade plan 36 | 37 | - name: Run kubeadm upgrade 38 | shell: sudo kubeadm upgrade apply {{ kubernetes_version }} -y 39 | 40 | --------------------------------------------------------------------------------