├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── PULL_REQUEST_TEMPLATE.md ├── .gitignore ├── CODE-OF-CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── Vagrantfile ├── ansible ├── ansible.cfg ├── playbook.yml ├── pre.yml └── roles │ ├── common │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── debian.yml │ │ ├── main.yml │ │ └── redhat.yml │ └── vars │ │ └── main.yml │ ├── docker │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── debian.yml │ │ ├── main.yml │ │ └── redhat.yml │ ├── templates │ │ └── etc │ │ │ └── docker │ │ │ └── daemon.json │ └── vars │ │ └── main.yml │ ├── etcd │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ ├── templates │ │ └── etc │ │ │ └── systemd │ │ │ └── system │ │ │ └── etcd.service │ └── vars │ │ └── main.yml │ ├── kubernetes-cni │ ├── defaults │ │ └── main.yml │ └── tasks │ │ ├── calico.yml │ │ ├── canal.yml │ │ ├── flannel.yml │ │ └── main.yml │ ├── kubernetes-common │ ├── defaults │ │ └── main.yml │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── etc │ │ └── default │ │ └── kubelet │ ├── kubernetes-master │ ├── defaults │ │ └── main.yml │ ├── filter_plugins │ │ └── kube_master.py │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── install.yml │ │ ├── main.yml │ │ └── upgrade.yml │ └── templates │ │ └── etc │ │ └── kubernetes │ │ └── kubeadm.conf │ ├── kubernetes-node │ ├── defaults │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── etc │ │ └── kubernetes │ │ └── kubeadm.conf │ ├── kubernetes-user │ ├── defaults │ │ └── main.yml │ └── tasks │ │ └── main.yml │ ├── kubernetes │ ├── defaults │ │ └── main.yml │ ├── filter_plugins │ │ └── kube.py │ ├── tasks │ │ ├── debian.yml │ │ ├── main.yml │ │ └── redhat.yml │ ├── templates │ │ └── etc │ │ │ ├── kubernetes_community_ami_version │ │ │ └── systemd │ │ │ └── system │ │ │ ├── kubelet.service │ │ │ └── kubelet.service.d │ │ │ ├── 10-kubeadm.conf │ │ │ └── 20-cloud-provider.conf │ └── vars │ │ └── main.yml │ ├── packer-cleanup │ └── tasks │ │ └── main.yml │ ├── providers │ ├── defaults │ │ └── main.yml │ └── tasks │ │ ├── aws.yml │ │ └── main.yml │ └── test_loadbalancer │ ├── defaults │ └── main.yml │ ├── tasks │ └── main.yml │ └── templates │ └── etc │ └── nginx │ └── nginx.conf ├── docs ├── README.md ├── architecture.md ├── bootstrapping-a-kubernetes-cluster.md └── building-images.md ├── packer ├── .gitignore ├── README.md ├── aws-us-east-1.json ├── aws-us-east-2.json ├── aws-us-west-1.json ├── aws-us-west-2.json ├── gcp-source-images.json ├── oci-us-phoenix-1.json └── packer.json ├── setup.py ├── swizzle ├── .gitignore ├── README.md ├── Vagrantfile ├── add-nodes.yml ├── ansible.cfg ├── examples │ ├── calico.yml │ ├── canal.yml │ ├── sample-extra-vars-centos.yml │ ├── sample-extra-vars-ubuntu.yml │ └── sample-inventory.ini ├── install.yml ├── library │ └── modify_manifest.py ├── main.yml ├── provision.py └── upgrade.yml └── wardroom ├── __init__.py ├── aws.py └── cli.py /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Tell us about a problem you are experiencing 4 | 5 | --- 6 | 7 | /kind bug 8 | 9 | **What steps did you take and what happened:** 10 | [A clear and concise description of what the bug is.] 11 | 12 | 13 | **What did you expect to happen:** 14 | 15 | 16 | **Anything else you would like to add:** 17 | [Miscellaneous information that will assist in solving the issue.] 18 | 19 | 20 | **Environment:** 21 | 22 | - Wardroom version: `branch` 23 | - OS (e.g. from `/etc/os-release`): 24 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature enhancement request 3 | about: Suggest an idea for this project 4 | 5 | --- 6 | 7 | /kind feature 8 | 9 | **Describe the solution you'd like** 10 | [A clear and concise description of what you want to happen.] 11 | 12 | 13 | **Anything else you would like to add:** 14 | [Miscellaneous information that will assist in solving the issue.] 15 | 16 | 17 | **Environment:** 18 | 19 | - Wardroom version: `branch`: 20 | - OS (e.g. from `/etc/os-release`): 21 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 5 | 6 | **What this PR does / why we need it**: 7 | 8 | **Which issue(s) this PR fixes**: 9 | 13 | Fixes # 14 | 15 | **Applies to Kubernetes versions**: 16 | 17 | - `1.` 18 | 19 | **Special notes for your reviewer**: 20 | 21 | _Please confirm that if this PR changes any image versions, then that's the sole change this PR makes._ 22 | 23 | **Release note**: 24 | 30 | ```release-note 31 | 32 | ``` 33 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.retry 2 | .vagrant 3 | *.pyc 4 | build/ 5 | dist/ 6 | *.egg-info 7 | venv/ 8 | -------------------------------------------------------------------------------- /CODE-OF-CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Wardroom Community Code of Conduct 2 | 3 | ## Contributor Code of Conduct 4 | 5 | As contributors and maintainers of this project, and in the interest of 6 | fostering an open and welcoming community, we pledge to respect all people who 7 | contribute through reporting issues, posting feature requests, updating 8 | documentation, submitting pull requests or patches, and other activities. 9 | 10 | We are committed to making participation in this project a harassment-free 11 | experience for everyone, regardless of level of experience, gender, gender 12 | identity and expression, sexual orientation, disability, personal appearance, 13 | body size, race, ethnicity, age, religion, or nationality. 14 | 15 | Examples of unacceptable behavior by participants include: 16 | 17 | * The use of sexualized language or imagery. 18 | * Personal attacks. 19 | * Trolling or insulting/derogatory comments. 20 | * Public or private harassment. 21 | * Publishing other's private information, such as physical or electronic 22 | addresses, without explicit permission. 23 | * Other unethical or unprofessional conduct. 24 | 25 | Project maintainers have the right and responsibility to remove, edit, or reject 26 | comments, commits, code, wiki edits, issues, and other contributions that are 27 | not aligned to this Code of Conduct. By adopting this Code of Conduct, project 28 | maintainers commit themselves to fairly and consistently applying these 29 | principles to every aspect of managing this project. Project maintainers who do 30 | not follow or enforce the Code of Conduct may be permanently removed from the 31 | project team. 32 | 33 | This code of conduct applies both within project spaces and in public spaces 34 | when an individual is representing the project or its community. 35 | 36 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 37 | reported by contacting the project maintainer(s). 38 | 39 | This Code of Conduct is adapted from the [CNCF Code of 40 | Conduct](https://github.com/cncf/foundation/blob/master/code-of-conduct.md) and 41 | [Contributor Covenant](http://contributor-covenant.org/version/1/2/0/), version 42 | 1.2.0. 43 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | We welcome contributions from the community. Please read the following 2 | guidelines carefully to maximize the chances of your PR being merged. 3 | 4 | ## Submitting a PR 5 | 6 | * Fork the repository to your own account 7 | * Create your PR 8 | * Your PR description should have details on what the PR does. If it fixes an 9 | existing issue it should end with "Fixes #XXX". 10 | * If your PR does not apply to the current or all versions of Kubernetes, 11 | or the master branch of Wardroom, list which versions of Kubernetes the 12 | PR applies to, and make sure your PR is into the branch representing the 13 | latest version of Kubernetes your PR applies to. 14 | * Once you submit a PR, *please do not rebase it*. It's much easier to review if 15 | subsequent commits are new commits and/or merges. We squash rebase the final 16 | merged commit so the number of commits you have in the PR don't matter. 17 | * We expect that once a PR is opened, it will be actively worked on until it is 18 | merged or closed. We reserve the right to close PRs that are not making 19 | progress. This is generally defined as no changes for 7 days. Obviously PRs 20 | that are closed due to lack of activity can be reopened later. Closing stale 21 | PRs helps us to keep on top of all of the work currently in flight. 22 | 23 | ## PR review policy for maintainers 24 | 25 | * Typically we try to turn around reviews within one business day. 26 | * It is generally expected that a maintainer should review every PR. 27 | * It is also generally expected that a "domain expert" for the code the PR 28 | touches should review the PR. This person does not necessarily need to have 29 | commit access. 30 | * Anyone is welcome to review any PR that they want, whether they are a 31 | maintainer or not. 32 | * Please **clean up the title and body** before merging. By default, GitHub 33 | fills the squash merge title with the original title, and the commit body with 34 | every individual commit from the PR. The maintainer doing the merge should 35 | make sure the title follows the guidelines above and should overwrite the body 36 | with the original extended description from the PR (cleaning it up if 37 | necessary) while preserving the PR author's final DCO sign-off. 38 | 39 | ## DCO Sign off 40 | 41 | All authors to the project retain copyright to their work. However, to ensure 42 | that they are only submitting work that they have rights to, we are requiring 43 | everyone to acknowldge this by signing their work. 44 | 45 | Any copyright notices in this repos should specify the authors as "The 46 | project authors". 47 | 48 | To sign your work, just add a line like this at the end of your commit message: 49 | 50 | ``` 51 | Signed-off-by: Joe Beda 52 | ``` 53 | 54 | This can easily be done with the `--signoff` option to `git commit`. 55 | 56 | By doing this you state that you can certify the following (from 57 | https://developercertificate.org/): 58 | 59 | ``` 60 | Developer Certificate of Origin 61 | Version 1.1 62 | 63 | Copyright (C) 2004, 2006 The Linux Foundation and its contributors. 64 | 1 Letterman Drive 65 | Suite D4700 66 | San Francisco, CA, 94129 67 | 68 | Everyone is permitted to copy and distribute verbatim copies of this 69 | license document, but changing it is not allowed. 70 | 71 | 72 | Developer's Certificate of Origin 1.1 73 | 74 | By making a contribution to this project, I certify that: 75 | 76 | (a) The contribution was created in whole or in part by me and I 77 | have the right to submit it under the open source license 78 | indicated in the file; or 79 | 80 | (b) The contribution is based upon previous work that, to the best 81 | of my knowledge, is covered under an appropriate open source 82 | license and I have the right under that license to submit that 83 | work with modifications, whether created in whole or in part 84 | by me, under the same open source license (unless I am 85 | permitted to submit under a different license), as indicated 86 | in the file; or 87 | 88 | (c) The contribution was provided directly to me by some other 89 | person who certified (a), (b) or (c) and I have not modified 90 | it. 91 | 92 | (d) I understand and agree that this project and the contribution 93 | are public and that a record of the contribution (including all 94 | personal information I submit with it, including my sign-off) is 95 | maintained indefinitely and may be redistributed consistent with 96 | this project or the open source license(s) involved. 97 | ``` 98 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # wardroom 2 | 3 | Wardroom provides tooling that helps simplify the deployment of a Kubernetes cluster. More 4 | specifically, Wardroom provides the following functionality: 5 | 6 | * **Image Building**: Building of Kubernetes-ready base operating system images using Packer and Ansible. 7 | * **Deployment Orchestration**: Ansible-based orchestration to deploy highly-available Kubernetes 8 | clusters using kubeadm. 9 | 10 | Both use cases share a common set of [Ansible](https://github.com/ansible/ansible) roles that can 11 | be found in the [ansible](./ansible) directory. 12 | 13 | ## Image Building 14 | 15 | Wardroom leverages [Packer](https://github.com/hashicorp/packer) to build golden images of 16 | Kubernetes deployments across a wide variety of operating systems as well as image formats. During 17 | the build phase, Wardroom leverages [Ansible](https://github.com/ansible/ansible) to configure the 18 | base operating system and produce the Kubernetes-ready golden image. 19 | 20 | This functionality is used to create base images for the Heptio 21 | [aws-quickstart](https://github.com/heptio/aws-quickstart). 22 | 23 | ### Supported Image Formats 24 | 25 | * AMI 26 | 27 | ### Supported Operating Systems 28 | 29 | * Ubuntu 16.04 (Xenial) 30 | * Ubuntu 18.04 (Bionic) 31 | * CentOS 7 32 | 33 | ## Deployment Orchestration 34 | 35 | The [swizzle](./swizzle) directory contains an Ansible playbook that can be used to orchestrate the 36 | deployment of a Kubernetes cluster using kubeadm. 37 | 38 | ## Documentation 39 | 40 | Documentation and usage information can be found in the [docs](./docs) directory. 41 | 42 | ## Contributing 43 | 44 | See our [contributing](CONTRIBUTING.md) guidelines and our [code of conduct](CODE-OF-CONDUCT.md). 45 | Contributions welcome by all. 46 | 47 | ## Development 48 | 49 | [Vagrant](https://www.vagrantup.com/) may be used to test local ansible playbook development. In this scenario, Vagrant makes use of the ansible provisioner to configure the resulting operating system image. To test all operating systems simultaneously: 50 | 51 | ``` bash 52 | vagrant up 53 | ``` 54 | 55 | You may also selectively test a single operating system as such: 56 | 57 | ``` bash 58 | vagrant up [xenial|bionic|centos7] 59 | ``` 60 | 61 | To enable verbose ansible logging, you may do so by setting the `WARDROOM_DEBUG` environment variable to `'vvvv'`. 62 | 63 | The default Vagrant provisioner is Virtualbox, but other providers are possible by way of the vagrant-mutate plugin. 64 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # All Vagrant configuration is done below. The "2" in Vagrant.configure 5 | # configures the configuration version (we support older styles for 6 | # backwards compatibility). Please don't change it unless you know what 7 | # you're doing. 8 | 9 | Vagrant.configure("2") do |config| 10 | 11 | config.vm.define "xenial" do |conf| 12 | conf.vm.box = "generic/ubuntu1604" 13 | conf.vm.provider "virtualbox" do |v| 14 | v.customize ["modifyvm", :id, "--uartmode1", "disconnected"] 15 | end 16 | end 17 | 18 | config.vm.define "bionic" do |conf| 19 | conf.vm.box = "generic/ubuntu1804" 20 | conf.vm.provider "virtualbox" do |v| 21 | v.customize ["modifyvm", :id, "--uartmode1", "disconnected"] 22 | end 23 | end 24 | 25 | config.vm.define "centos7" do |conf| 26 | conf.vm.box = "centos/7" 27 | end 28 | 29 | config.vm.provision "ansible" do |ansible| 30 | ansible.playbook = "ansible/playbook.yml" 31 | ansible.verbose = ENV['WARDROOM_VERBOSE'] || false 32 | end 33 | 34 | end 35 | -------------------------------------------------------------------------------- /ansible/ansible.cfg: -------------------------------------------------------------------------------- 1 | [default] 2 | remote_tmp = /tmp/.ansible/ 3 | filter_plugins = ./filter_plugins 4 | retry_files_enabled = False 5 | -------------------------------------------------------------------------------- /ansible/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook is used by the packer build to create Kubernetes-ready OS images. 3 | - import_playbook: pre.yml 4 | 5 | - name: build image 6 | hosts: all 7 | become: yes 8 | roles: 9 | - role: common 10 | - role: docker 11 | - role: kubernetes 12 | - role: providers 13 | - role: packer-cleanup 14 | when: packer_build_name is defined 15 | -------------------------------------------------------------------------------- /ansible/pre.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | gather_facts: false 4 | become: yes 5 | pre_tasks: 6 | - name: install python 7 | raw: bash -c "export DEBIAN_FRONTEND=noninteractive; if grep -qi debian /etc/os-release && [ ! -e /usr/bin/python ]; then apt -qqy update; apt install -qqy python python-pip; fi;" 8 | register: output 9 | changed_when: output.stdout != "" 10 | retries: 3 11 | delay: 3 12 | until: output.rc == 0 13 | -------------------------------------------------------------------------------- /ansible/roles/common/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | common_enable: True 3 | common_upgrade_base: False 4 | common_rpms: 5 | - yum-utils 6 | - python2-pip 7 | - python2-requests 8 | - ebtables 9 | - socat 10 | - ntp 11 | - jq 12 | - nfs-utils 13 | - cloud-utils 14 | - bind-utils 15 | - firewalld 16 | common_extra_rpms: [] 17 | common_debs: 18 | - openssh-client 19 | - openssh-server 20 | - apt-transport-https 21 | - python-pip 22 | - python-requests 23 | - ebtables 24 | - socat 25 | - ntp 26 | - jq 27 | - nfs-client 28 | - cloud-utils 29 | - dnsutils 30 | common_extra_debs: [] 31 | common_redhat_epel_rpm: "https://dl.fedoraproject.org/pub/epel/epel-release-latest-{{ ansible_distribution_major_version }}.noarch.rpm" 32 | -------------------------------------------------------------------------------- /ansible/roles/common/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: The Wardroom Authors 4 | -------------------------------------------------------------------------------- /ansible/roles/common/tasks/debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: update apt cache 3 | apt: 4 | update_cache: True 5 | cache_valid_time: 3600 6 | 7 | - name: perform a dist-upgrade 8 | apt: 9 | upgrade: dist 10 | when: common_upgrade_base | bool 11 | 12 | - name: install baseline dependencies 13 | apt: 14 | name: "{{ common_debs }}" 15 | state: latest 16 | 17 | - name: install extra debs 18 | apt: 19 | name: "{{ common_extra_debs }}" 20 | state: latest 21 | 22 | - name: install specific version of urllib3 23 | pip: 24 | name: urllib3==1.23 25 | when: ansible_distribution_version == "16.04" 26 | -------------------------------------------------------------------------------- /ansible/roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_tasks: debian.yml 3 | when: ansible_os_family == "Debian" 4 | 5 | - import_tasks: redhat.yml 6 | when: ansible_os_family == "RedHat" 7 | -------------------------------------------------------------------------------- /ansible/roles/common/tasks/redhat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: add epel repo 3 | yum: 4 | name: "{{ common_redhat_epel_rpm }}" 5 | when: ansible_distribution != "Fedora" 6 | 7 | - name: perform a yum update 8 | yum: 9 | name: '*' 10 | state: latest 11 | when: common_upgrade_base | bool 12 | 13 | - name: install baseline dependencies 14 | yum: 15 | name: "{{ common_rpms }}" 16 | 17 | - name: install extra rpms 18 | yum: 19 | name: "{{ common_extra_rpms }}" 20 | 21 | - name: load br_netfilter kernel module 22 | modprobe: 23 | name: br_netfilter 24 | state: present 25 | 26 | - name: ensure firewalld is running 27 | systemd: 28 | name: firewalld 29 | daemon_reload: True 30 | enabled: yes 31 | state: started 32 | -------------------------------------------------------------------------------- /ansible/roles/common/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | common_upgrade_base_defined: "{{ common is defined and 'upgrade_base' in common }}" -------------------------------------------------------------------------------- /ansible/roles/docker/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | docker_enable: True 3 | docker_debian_version: '5:18.09.5~3-0~ubuntu-{{ ansible_distribution_release | lower }}' 4 | docker_redhat_version: '18.09.6-3*' 5 | docker_logging_max_size: 100m 6 | docker_channel: stable 7 | docker_distribution_release_version: "{{ ansible_distribution_major_version }}" 8 | -------------------------------------------------------------------------------- /ansible/roles/docker/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart docker 3 | systemd: 4 | name: docker 5 | daemon_reload: true 6 | state: restarted 7 | -------------------------------------------------------------------------------- /ansible/roles/docker/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | author: The Wardroom Authors 4 | -------------------------------------------------------------------------------- /ansible/roles/docker/tasks/debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: add docker apt key 3 | apt_key: 4 | url: "https://download.docker.com/linux/{{ ansible_distribution|lower}}/gpg" 5 | id: 0EBFCD88 6 | register: apt_key_result 7 | until: apt_key_result is success 8 | retries: 5 9 | delay: 5 10 | 11 | - name: add docker apt repository 12 | apt_repository: 13 | repo: "deb [arch=amd64] https://download.docker.com/linux/{{ ansible_distribution|lower}} {{ ansible_distribution_release|lower }} stable" 14 | update_cache: true 15 | register: apt_repository_result 16 | until: apt_repository_result is success 17 | retries: 5 18 | delay: 5 19 | 20 | - name: install docker 21 | apt: 22 | name: "docker-ce={{ docker_debian_version }}" 23 | update_cache: True 24 | cache_valid_time: 60 25 | state: present 26 | register: apt_result 27 | until: apt_result is success 28 | retries: 5 29 | delay: 5 30 | -------------------------------------------------------------------------------- /ansible/roles/docker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_tasks: debian.yml 3 | when: ansible_os_family == "Debian" 4 | 5 | - import_tasks: redhat.yml 6 | when: ansible_os_family == "RedHat" 7 | 8 | - name: create docker directory 9 | file: 10 | dest: /etc/docker 11 | state: directory 12 | 13 | - name: configure the docker daemon 14 | template: 15 | dest: /etc/docker/daemon.json 16 | src: etc/docker/daemon.json 17 | notify: 18 | - restart docker 19 | 20 | - meta: flush_handlers 21 | 22 | - name: create a service.d directory for config 23 | file: 24 | dest: /etc/systemd/system/docker.service.d 25 | state: directory 26 | -------------------------------------------------------------------------------- /ansible/roles/docker/tasks/redhat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Set docker_os_distribution fact if RHEL family 3 | set_fact: docker_os_distribution="centos" 4 | when: ansible_distribution != "Fedora" 5 | 6 | - name: Set docker_os_distribution fact if Fedora 7 | set_fact: docker_os_distribution="fedora" 8 | when: ansible_distribution == "Fedora" 9 | 10 | - name: add docker repo 11 | yum_repository: 12 | name: docker 13 | description: Docker YUM repo 14 | baseurl: "https://download.docker.com/linux/{{docker_os_distribution}}/{{docker_distribution_release_version}}/$basearch/{{docker_channel}}/" 15 | gpgkey: "https://download.docker.com/linux/{{docker_os_distribution}}/gpg" 16 | gpgcheck: true 17 | state: present 18 | 19 | - name: install Docker binaries 20 | package: 21 | name: "docker-ce-{{docker_redhat_version}}" 22 | 23 | - name: start docker service 24 | service: 25 | name: docker 26 | enabled: true 27 | state: started 28 | -------------------------------------------------------------------------------- /ansible/roles/docker/templates/etc/docker/daemon.json: -------------------------------------------------------------------------------- 1 | { 2 | "exec-opts": ["native.cgroupdriver={{ 'systemd' if ansible_os_family|lower == 'redhat' else 'cgroupfs'|safe }}"], 3 | "log-driver": "json-file", 4 | "log-opts": { 5 | "max-size": "{{ docker_logging_max_size }}" 6 | }, 7 | "storage-driver": "overlay2"{% if ansible_os_family|lower == "redhat" %}, 8 | "storage-opts": [ 9 | "overlay2.override_kernel_check=true" 10 | ] 11 | {% endif %} 12 | } 13 | -------------------------------------------------------------------------------- /ansible/roles/docker/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | docker_debian_version_defined: "{{ docker is defined and 'debian' in docker and 'version' in docker.debian }}" 3 | docker_redhat_version_defined: "{{ docker is defined and 'redhat' in docker and 'version' in docker.redhat }}" -------------------------------------------------------------------------------- /ansible/roles/etcd/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | etcd_enable: True 3 | etcd_cluster_token: 444fddcc-beae-45bc-9da6-d941d446b595 4 | etcd_interface: eth0 5 | etcd_version: v3.2.10 6 | -------------------------------------------------------------------------------- /ansible/roles/etcd/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | -------------------------------------------------------------------------------- /ansible/roles/etcd/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: download and extract etcd binaries 3 | unarchive: 4 | remote_src: True 5 | src: "{{ etcd_release_url }}" 6 | dest: /tmp 7 | creates: /usr/local/bin/etcd 8 | register: etcd_downloaded 9 | 10 | - name: move binaries into path 11 | copy: 12 | remote_src: True 13 | src: "/tmp/etcd-{{ etcd_version }}-linux-amd64/{{ item }}" 14 | dest: "/usr/local/bin/{{ item }}" 15 | with_items: 16 | - etcd 17 | - etcdctl 18 | when: etcd_downloaded is changed 19 | 20 | - name: set permissions on etcd binaries 21 | file: 22 | dest: "/usr/local/bin/{{ item }}" 23 | mode: 0755 24 | state: file 25 | with_items: 26 | - etcd 27 | - etcdctl 28 | 29 | - name: create data directory 30 | file: 31 | dest: /var/lib/etcd 32 | state: directory 33 | 34 | - name: open etcd ports 35 | firewalld: 36 | port: "{{ item }}" 37 | permanent: yes 38 | state: enabled 39 | immediate: True 40 | with_items: 41 | - "2379/tcp" 42 | - "2380/tcp" 43 | when: ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux' 44 | 45 | - name: etcd systemd template 46 | template: 47 | src: etc/systemd/system/etcd.service 48 | dest: /etc/systemd/system/etcd.service 49 | tags: 50 | - etcd2 51 | 52 | - name: enable and start the service 53 | systemd: 54 | name: etcd 55 | daemon_reload: True 56 | state: started 57 | enabled: True 58 | -------------------------------------------------------------------------------- /ansible/roles/etcd/templates/etc/systemd/system/etcd.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=etcd 3 | Documentation=https://github.com/coreos/etcd 4 | Conflicts=etcd.service 5 | Conflicts=etcd2.service 6 | 7 | [Service] 8 | Type=notify 9 | Restart=always 10 | RestartSec=5s 11 | LimitNOFILE=40000 12 | TimeoutStartSec=0 13 | 14 | ExecStart=/usr/local/bin/etcd \ 15 | --name={{ inventory_hostname }} \ 16 | --data-dir=/var/lib/etcd \ 17 | --listen-client-urls=http://{{hostvars[inventory_hostname]['ansible_' + etcd_interface]['ipv4']['address']}}:2379 \ 18 | --advertise-client-urls=http://{{hostvars[inventory_hostname]['ansible_' + etcd_interface]['ipv4']['address']}}:2379 \ 19 | --listen-peer-urls=http://{{hostvars[inventory_hostname]['ansible_' + etcd_interface]['ipv4']['address']}}:2380 \ 20 | --initial-advertise-peer-urls=http://{{hostvars[inventory_hostname]['ansible_' + etcd_interface]['ipv4']['address']}}:2380 \ 21 | --initial-cluster={{ etcd_cluster_endpoints }} \ 22 | --initial-cluster-token={{ etcd_cluster_token }} \ 23 | --initial-cluster-state=new 24 | 25 | [Install] 26 | WantedBy=multi-user.target 27 | -------------------------------------------------------------------------------- /ansible/roles/etcd/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | etcd_release_url: "https://github.com/coreos/etcd/releases/download/{{ etcd_version }}/etcd-{{ etcd_version }}-linux-amd64.tar.gz" 3 | etcd_client_endpoints: "{{ groups['etcd']|map('extract', hostvars, ['ansible_' + etcd_interface, 'ipv4', 'address'])|map('regex_replace', '^(.*)$', 'http://\\1:2379')|list|sort }}" 4 | etcd_cluster_endpoints: "{% for host in groups['etcd']|sort %}{{hostvars[host]['inventory_hostname']}}=http://{{hostvars[host]['ansible_' + etcd_interface]['ipv4']['address']}}:2380{% if not loop.last %},{% endif %}{% endfor %}" 5 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes-cni/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kubernetes_cni_plugin: calico 3 | 4 | kubernetes_cni_calico_rbac_url: "https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml" 5 | kubernetes_cni_calico_manifest_url: "https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml" 6 | kubernetes_cni_calico_install_calicoctl: False 7 | kubernetes_cni_calico_calicoctl_url: "https://github.com/projectcalico/calicoctl/releases/download/v3.4.0/calicoctl" 8 | kubernetes_cni_calico_manifest_mods: [] 9 | 10 | kubernetes_cni_flannel_manifest_url: "https://raw.githubusercontent.com/coreos/flannel/v0.10.0/Documentation/kube-flannel.yml" 11 | 12 | kubernetes_cni_canal_rbac_manifest_url: "https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/canal/rbac.yaml" 13 | kubernetes_cni_canal_manifest_url: "https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/canal/canal.yaml" 14 | kubernetes_cni_canal_manifest_mods: [] 15 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes-cni/tasks/calico.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: pull manifest and apply any modifications 3 | run_once: True 4 | delegate_to: "{{ groups['masters'][0] }}" 5 | modify_manifest: 6 | manifest_url: "{{ kubernetes_cni_calico_manifest_url }}" 7 | output_path: /tmp/calico.yml 8 | rules: "{{ kubernetes_cni_calico_manifest_mods }}" 9 | 10 | - name: install CNI netowrking 11 | command: "/usr/bin/kubectl apply -f {{ item }}" 12 | run_once: True 13 | delegate_to: "{{ groups['masters'][0] }}" 14 | with_items: 15 | - "{{ kubernetes_cni_calico_rbac_url }}" 16 | - /tmp/calico.yml 17 | environment: 18 | KUBECONFIG: "/etc/kubernetes/admin.conf" 19 | 20 | - name: install calicoctl 21 | get_url: 22 | url: "{{ kubernetes_cni_calico_calicoctl_url }}" 23 | dest: /usr/bin/calicoctl 24 | mode: 0755 25 | retries: 10 26 | delay: 5 27 | when: kubernetes_cni_calico_install_calicoctl is defined and kubernetes_cni_calico_install_calicoctl|bool == True 28 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes-cni/tasks/canal.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: pull manifest and apply any modifications 3 | run_once: True 4 | delegate_to: "{{ groups['masters'][0] }}" 5 | modify_manifest: 6 | manifest_url: "{{ kubernetes_cni_canal_manifest_url }}" 7 | output_path: /tmp/canal.yml 8 | rules: "{{ kubernetes_cni_canal_manifest_mods }}" 9 | 10 | - name: install CNI rbac 11 | command: "/usr/bin/kubectl apply -f {{ item }}" 12 | run_once: True 13 | delegate_to: "{{ groups['masters'][0] }}" 14 | retries: 10 15 | delay: 5 16 | when: (item is defined) and (item != "") and (item is not none) 17 | with_items: 18 | - "{{ kubernetes_cni_canal_rbac_manifest_url }}" 19 | - /tmp/canal.yml 20 | environment: 21 | KUBECONFIG: "/etc/kubernetes/admin.conf" 22 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes-cni/tasks/flannel.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install CNI networking 3 | command: "/usr/bin/kubectl apply -f {{ kubernetes_cni_flannel_manifest_url }}" 4 | run_once: True 5 | delegate_to: "{{ groups['primary_master']|first }}" 6 | retries: 10 7 | delay: 5 8 | environment: 9 | KUBECONFIG: "/etc/kubernetes/admin.conf" 10 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes-cni/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - pip: 3 | name: 4 | - pyyaml 5 | - jsonpath-ng 6 | 7 | - include_tasks: calico.yml 8 | when: kubernetes_cni_plugin == "calico" 9 | 10 | - include_tasks: flannel.yml 11 | when: kubernetes_cni_plugin == "flannel" 12 | 13 | - include_tasks: canal.yml 14 | when: kubernetes_cni_plugin == "canal" 15 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes-common/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kubernetes_common_disable_swap: True 3 | kubernetes_common_manage_etc_hosts: False 4 | kubernetes_common_api_fqdn: k8s.example.com 5 | kubernetes_common_api_ip: 10.10.10.3 6 | kubernetes_common_primary_interface: eth0 7 | 8 | # kubelet_extra_args is a dict of arg:value (ie. 'node-ip: 1.1.1.1' for '--node-ip=1.1.1.1') 9 | kubernetes_common_kubelet_extra_args: {} 10 | kubernetes_common_kubelet_env_vars: {} 11 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes-common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart kubelet 3 | systemd: 4 | name: kubelet 5 | daemon_reload: True 6 | state: restarted 7 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes-common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: comment out any swap in fstab 3 | lineinfile: 4 | path: '/etc/fstab' 5 | regexp: '^(.*\s+none\s+swap.*)' 6 | line: '# removed by wardroom \1' 7 | backrefs: yes 8 | when: kubernetes_common_disable_swap|bool == True 9 | 10 | - name: disable swap 11 | command: swapoff -a 12 | when: kubernetes_common_disable_swap|bool == True 13 | 14 | - name: update /etc/hosts to include cluster fqdn 15 | lineinfile: 16 | dest: /etc/hosts 17 | line: "{{ kubernetes_common_api_ip }} {{ kubernetes_common_api_fqdn }}" 18 | state: present 19 | when: kubernetes_common_manage_etc_hosts and kubernetes_common_api_ip is defined and kubernetes_common_api_fqdn is defined 20 | 21 | - name: set kubernetes_node_ip fact 22 | set_fact: 23 | kubernetes_node_ip: "{{ hostvars[inventory_hostname]['ansible_'~item]['ipv4']['address'] }}" 24 | with_items: 25 | - "{{ kubernetes_common_primary_interface }}" 26 | when: kubernetes_common_primary_interface is defined 27 | 28 | - name: create kubelet config directory 29 | file: 30 | dest: /etc/systemd/system/kubelet.service.d 31 | state: directory 32 | 33 | - name: drop extra args kubelet config 34 | template: 35 | backup: True 36 | dest: "/etc/{{ 'default' if ansible_os_family == 'Debian' else 'sysconfig' }}/kubelet" 37 | src: etc/default/kubelet 38 | notify: 39 | - restart kubelet 40 | when: kubernetes_common_primary_interface is defined or kubernetes_common_kubelet_extra_args is defined or kubernetes_common_kubelet_env_vars is defined 41 | 42 | - name: delete old kubelet extra args unit file 43 | file: 44 | dest: /etc/systemd/system/kubelet.service.d/09-extra-args.conf 45 | state: absent 46 | notify: 47 | - restart kubelet 48 | 49 | - name: ensure firewalld is running 50 | systemd: 51 | name: firewalld 52 | state: started 53 | when: ansible_os_family == "RedHat" 54 | 55 | - name: open kubelet ports 56 | firewalld: 57 | port: "{{ item }}" 58 | permanent: yes 59 | state: enabled 60 | immediate: True 61 | with_items: 62 | - "30000-32767/tcp" 63 | - "10250-10255/tcp" 64 | when: ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux' 65 | 66 | - name: flush handlers 67 | meta: flush_handlers 68 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes-common/templates/etc/default/kubelet: -------------------------------------------------------------------------------- 1 | KUBELET_EXTRA_ARGS={% if kubernetes_common_primary_interface is defined %} --node-ip={{kubernetes_node_ip}}{% endif %}{% for k, v in kubernetes_common_kubelet_extra_args.items() %} --{{k}}='{{v}}'{%- endfor %} 2 | 3 | {% for k, v in kubernetes_common_kubelet_env_vars.items() %} 4 | {{k}}='{{v}}' 5 | {% endfor %} -------------------------------------------------------------------------------- /ansible/roles/kubernetes-master/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kubernetes_common_kubeadm_config_clusterconfiguration: 3 | apiVersion: kubeadm.k8s.io/v1beta1 4 | kind: ClusterConfiguration 5 | kubernetesVersion: 1.15.3 6 | controlPlaneEndpoint: "{{ kubernetes_common_api_fqdn }}" 7 | apiServer: 8 | # certSANs: "{{ kubernetes_common_api_ip | kube_lookup_hostname(kubernetes_common_api_fqdn, True) }}" # Otherwise can't override with merge 9 | extraArgs: 10 | "endpoint-reconciler-type": "lease" 11 | etcd: 12 | external: 13 | endpoints: "{{ etcd_client_endpoints }}" 14 | 15 | kubernetes_common_kubeadm_config_kubeletconfiguration: 16 | apiVersion: kubelet.config.k8s.io/v1beta1 17 | kind: KubeletConfiguration 18 | 19 | kubernetes_common_kubeadm_config_initconfiguration: 20 | apiVersion: kubeadm.k8s.io/v1beta1 21 | kind: InitConfiguration 22 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes-master/filter_plugins/kube_master.py: -------------------------------------------------------------------------------- 1 | import re 2 | import socket 3 | 4 | 5 | class FilterModule(object): 6 | 7 | def filters(self): 8 | return { 9 | 'kube_lookup_hostname': self.kube_lookup_hostname, 10 | } 11 | 12 | def kube_lookup_hostname(self, ip, hostname, many=False): 13 | ips = set() 14 | 15 | ip = ip.split(':')[0] 16 | if ip and ip != "": 17 | if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip): 18 | ips.add(ip) 19 | try: 20 | (_, _, iplist) = socket.gethostbyname_ex(hostname) 21 | ips |= set(iplist) 22 | except socket.error as e: 23 | pass 24 | 25 | if many: 26 | ips.add(hostname) 27 | return sorted(list(ips)) 28 | else: 29 | return sorted(list(ips))[0] 30 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes-master/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart kubelet 3 | systemd: 4 | name: kubelet 5 | daemon_reload: True 6 | state: restarted 7 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes-master/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - { role: kubernetes-common } 4 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes-master/tasks/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: run kubeadm init on primary master 3 | shell: "/usr/bin/kubeadm init --config=/etc/kubernetes/kubeadm.conf --ignore-preflight-errors=all" 4 | delegate_to: "{{ groups['primary_master']|first }}" 5 | run_once: true 6 | when: kubeadm_apiserver_manifest.stat.exists == False 7 | 8 | - name: slurp the ca certificate and key 9 | slurp: 10 | src: "/etc/kubernetes/{{ item }}" 11 | with_items: 12 | - pki/apiserver.crt 13 | - pki/apiserver.key 14 | - pki/apiserver-kubelet-client.crt 15 | - pki/apiserver-kubelet-client.key 16 | - pki/ca.crt 17 | - pki/ca.key 18 | - pki/front-proxy-ca.crt 19 | - pki/front-proxy-ca.key 20 | - pki/front-proxy-client.crt 21 | - pki/front-proxy-client.key 22 | - pki/sa.key 23 | - pki/sa.pub 24 | register: kube_pki 25 | delegate_to: "{{ groups['primary_master']|first }}" 26 | run_once: true 27 | 28 | - name: create kubernetes pki directory 29 | file: 30 | dest: /etc/kubernetes/pki/ 31 | state: directory 32 | owner: root 33 | group: root 34 | 35 | - name: add kube pki assets 36 | no_log: True 37 | copy: 38 | dest: "{{ item.source }}" 39 | content: "{{ item.content | b64decode }}" 40 | owner: root 41 | group: root 42 | mode: 0700 43 | with_items: "{{ kube_pki.results }}" 44 | when: inventory_hostname != groups['primary_master']|first 45 | 46 | - name: initialize secondary masters 47 | command: "/usr/bin/kubeadm init --config=/etc/kubernetes/kubeadm.conf --ignore-preflight-errors=all" 48 | when: kubeadm_apiserver_manifest.stat.exists == False and inventory_hostname != groups['primary_master']|first 49 | 50 | - name: Update kubelet config if already running 51 | command: "kubeadm init phase kubelet-start --config /etc/kubernetes/kubeadm.conf" 52 | when: kubeadm_apiserver_manifest.stat.exists == True 53 | 54 | - name: Update control plane manifests if already running 55 | command: "kubeadm init phase control-plane all --config /etc/kubernetes/kubeadm.conf" 56 | when: kubeadm_apiserver_manifest.stat.exists == True 57 | 58 | - name: Upload cluster configmaps 59 | command: "kubeadm init phase upload-config all --config /etc/kubernetes/kubeadm.conf" 60 | retries: 5 61 | delay: 3 62 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes-master/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: kubernetes_master_kubeadm_config parameter obsoleted 3 | fail: 4 | msg: | 5 | kubernetes_master_kubeadm_config has been moved. It is now located at kubernetes_common_kubeadm_config." 6 | Please update your inventories 7 | when: kubernetes_master_kubeadm_config is defined 8 | 9 | - name: drop kubeadm template 10 | template: 11 | src: etc/kubernetes/kubeadm.conf 12 | dest: /etc/kubernetes/kubeadm.conf 13 | 14 | - name: determine whether kubeadm needs to be run 15 | stat: 16 | path: /etc/kubernetes/manifests/kube-apiserver.yaml 17 | register: kubeadm_apiserver_manifest 18 | 19 | - name: open api server port 20 | firewalld: 21 | port: 6443/tcp 22 | permanent: yes 23 | state: enabled 24 | immediate: True 25 | when: ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux' 26 | 27 | - include_tasks: install.yml 28 | when: wardroom_action == 'install' 29 | 30 | - include_tasks: upgrade.yml 31 | when: wardroom_action == 'upgrade' 32 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes-master/tasks/upgrade.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: update kubelet config 3 | command: "kubeadm init phase kubelet-start --config /etc/kubernetes/kubeadm.conf" 4 | retries: 5 5 | delay: 3 6 | 7 | - name: update new control plane manifests 8 | command: "kubeadm init phase control-plane all --config /etc/kubernetes/kubeadm.conf" 9 | retries: 5 10 | delay: 3 11 | 12 | - name: wait for local apiserver to be ready 13 | uri: 14 | url: "https://127.0.0.1:6443/healthz" 15 | status_code: 200 16 | return_content: True 17 | validate_certs: no 18 | register: result 19 | until: result.status == 200 and result.content == "ok" 20 | retries: 60 21 | delay: 1 22 | run_once: True 23 | delegate_to: "{{ groups['primary_master']|first }}" 24 | 25 | - name: upload cluster configmaps 26 | command: "kubeadm init phase upload-config all --config /etc/kubernetes/kubeadm.conf" 27 | retries: 5 28 | delay: 3 29 | 30 | - name: add all of the kubernetes add-ons 31 | command: "kubeadm init phase addon all --config /etc/kubernetes/kubeadm.conf" 32 | delegate_to: "{{ groups['primary_master']|first }}" 33 | run_once: True 34 | retries: 5 35 | delay: 3 36 | 37 | - name: check to see whether kube-dns is present 38 | command: "kubectl get deploy -n kube-system kube-dns" 39 | run_once: True 40 | delegate_to: "{{ groups['primary_master']|first }}" 41 | register: get_kube_dns 42 | ignore_errors: True 43 | 44 | - name: remove the kube-dns service 45 | command: "kubectl delete deploy -n kube-system kube-dns" 46 | run_once: True 47 | delegate_to: "{{ groups['primary_master']|first }}" 48 | when: get_kube_dns.rc == 0 49 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes-master/templates/etc/kubernetes/kubeadm.conf: -------------------------------------------------------------------------------- 1 | --- 2 | {{ kubernetes_common_kubeadm_config_clusterconfiguration|to_nice_yaml(indent=2) }} 3 | --- 4 | {{ kubernetes_common_kubeadm_config_kubeletconfiguration|to_nice_yaml(indent=2) }} 5 | --- 6 | {{ kubernetes_common_kubeadm_config_initconfiguration|to_nice_yaml(indent=2) }} 7 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes-node/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kubernetes_common_kubeadm_config_initconfiguration: 3 | apiVersion: kubeadm.k8s.io/v1beta1 4 | kind: InitConfiguration 5 | 6 | kubernetes_common_kubeadm_config_joinconfiguration: 7 | apiVersion: kubeadm.k8s.io/v1beta1 8 | kind: JoinConfiguration 9 | discovery: 10 | bootstrapToken: 11 | token: "{{ hostvars[groups['primary_master'][0]]['generated_token']['stdout'] }}" 12 | apiServerEndpoint: "{{ kubernetes_common_api_fqdn }}:6443" 13 | unsafeSkipCAVerification: true 14 | nodeRegistration: 15 | kubeletExtraArgs: 16 | feature-gates: TTLAfterFinished=true 17 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes-node/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | dependencies: 3 | - { role: kubernetes-common } 4 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes-node/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | #- name: check whether we have joined this node 3 | # shell: "/usr/bin/docker ps | /bin/grep kube-proxy" 4 | # register: kube_proxy_running 5 | # ignore_errors: True 6 | 7 | - name: update kubeadm template 8 | template: 9 | src: etc/kubernetes/kubeadm.conf 10 | dest: /etc/kubernetes/kubeadm.conf 11 | 12 | # Note - always run this, updates kubeadm and kubelet config with any changes 13 | # Note - would be nice to only do if kubeadm template has changed, but it always changes as it contains join token 14 | - name: join nodes to masters by fqdn 15 | command: "/usr/bin/kubeadm join phase kubelet-start --config /etc/kubernetes/kubeadm.conf" 16 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes-node/templates/etc/kubernetes/kubeadm.conf: -------------------------------------------------------------------------------- 1 | --- 2 | {{ kubernetes_common_kubeadm_config_initconfiguration|to_nice_yaml(indent=2) }} 3 | --- 4 | {{ kubernetes_common_kubeadm_config_joinconfiguration|to_nice_yaml(indent=2) }} 5 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes-user/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kubernetes_users: 3 | - { user: vagrant, group: vagrant, home: /home/vagrant } 4 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes-user/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create .kube directories in user's home 3 | file: 4 | path: "{{ item.home }}/.kube" 5 | state: absent 6 | with_items: "{{ kubernetes_users }}" 7 | 8 | - name: create .kube directories in user's home 9 | file: 10 | path: "{{ item.home }}/.kube" 11 | state: directory 12 | owner: "{{ item.user }}" 13 | group: "{{ item.group }}" 14 | mode: 0755 15 | with_items: "{{ kubernetes_users }}" 16 | 17 | - name: setup the kubeconfig 18 | copy: 19 | remote_src: True 20 | src: /etc/kubernetes/admin.conf 21 | dest: "{{ item.home }}/.kube/config" 22 | owner: "{{ item.user }}" 23 | group: "{{ item.group }}" 24 | mode: 0600 25 | with_items: "{{ kubernetes_users }}" 26 | 27 | - name: configure bash completion 28 | lineinfile: 29 | path: "{{ item.home }}/.bashrc" 30 | line: "source <(kubectl completion bash)" 31 | with_items: "{{ kubernetes_users }}" 32 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kubernetes_version: '1.15.3-00' 3 | kubernetes_cni_version: '0.7.5-00' 4 | kubernetes_enable_cached_images: False 5 | kubernetes_cached_images: [] 6 | kubernetes_apt_key_url: 'https://packages.cloud.google.com/apt/doc/apt-key.gpg' 7 | kubernetes_apt_repo_string: "deb http://apt.kubernetes.io/ {{ ansible_distribution_release | kube_debian_distro_version }} main" 8 | kubernetes_yum_baseurl: 'https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64' 9 | kubernetes_yum_gpgkey: 'https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg' 10 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes/filter_plugins/kube.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | 4 | class FilterModule(object): 5 | 6 | def filters(self): 7 | return { 8 | 'kube_platform_version': self.kube_platform_version, 9 | 'kube_debian_distro_version': self.kube_debian_distro_version, 10 | } 11 | 12 | def kube_platform_version(self, version, platform): 13 | match = re.match('(\d+\.\d+.\d+)\-(\d+)', version) 14 | if not match: 15 | raise Exception("Version '%s' does not appear to be a " 16 | "kubernetes version." % version) 17 | sub = match.groups(1)[1] 18 | if len(sub) == 1: 19 | if platform.lower() == "debian": 20 | return "%s-%s" % (match.groups(1)[0], '{:02d}'.format(sub)) 21 | else: 22 | return version 23 | if len(sub) == 2: 24 | if platform.lower() == "redhat": 25 | return "%s-%s" % (match.groups(1)[0], int(sub)) 26 | else: 27 | return version 28 | 29 | raise Exception("Could not parse kubernetes version") 30 | 31 | def kube_debian_distro_version(self, distro): 32 | if distro.lower() in ("xenial", "bionic",): 33 | return "kubernetes-xenial" 34 | return "kubernetes-%s" % distro.lower() 35 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes/tasks/debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: add the kubernetes apt repo key 3 | apt_key: 4 | url: "{{ kubernetes_apt_key_url }}" 5 | state: present 6 | register: apt_key_result 7 | until: apt_key_result is success 8 | retries: 5 9 | delay: 5 10 | 11 | - name: add the kubernetes apt repo 12 | apt_repository: 13 | repo: "{{ kubernetes_apt_repo_string }}" 14 | update_cache: True 15 | state: present 16 | register: apt_respository_result 17 | until: apt_respository_result is success 18 | retries: 5 19 | delay: 5 20 | 21 | - name: install kubernetes packages 22 | apt: 23 | update_cache: True 24 | cache_valid_time: 60 25 | name: 26 | - "kubelet={{ kubernetes_version | kube_platform_version('debian') }}" 27 | - "kubeadm={{ kubernetes_version | kube_platform_version('debian') }}" 28 | - "kubectl={{ kubernetes_version | kube_platform_version('debian') }}" 29 | - "kubernetes-cni={{kubernetes_cni_version | kube_platform_version('debian') }}" 30 | register: apt_result 31 | until: apt_result is success 32 | retries: 5 33 | delay: 5 34 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set ipv4 routing 3 | sysctl: 4 | name: net.bridge.bridge-nf-call-iptables 5 | value: 1 6 | sysctl_set: yes 7 | state: present 8 | reload: yes 9 | 10 | - import_tasks: debian.yml 11 | when: ansible_os_family == "Debian" 12 | 13 | - import_tasks: redhat.yml 14 | when: ansible_os_family == "RedHat" 15 | 16 | - name: ensure that the kubelet is running 17 | service: 18 | name: kubelet 19 | state: started 20 | enabled: true 21 | 22 | # Required by the docker_image ansible module 23 | - name: install docker-py 24 | pip: 25 | name: docker-py 26 | when: kubernetes_enable_cached_images|bool and kubernetes_cached_images|length > 0 27 | 28 | - name: cache docker images for kubeadm 29 | docker_image: 30 | name: "{{ item }}" 31 | with_items: kubernetes_cached_images 32 | when: kubernetes_enable_cached_images|bool and kubernetes_cached_images|length > 0 33 | 34 | - name: set kubernetes version file 35 | template: 36 | dest: /etc/kubernetes_community_ami_version 37 | src: etc/kubernetes_community_ami_version 38 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes/tasks/redhat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: add the kubernetes yum repo key 3 | yum_repository: 4 | name: kubernetes 5 | description: the kubernetes yum repo 6 | baseurl: "{{ kubernetes_yum_baseurl }}" 7 | gpgcheck: True 8 | gpgkey: "{{ kubernetes_yum_gpgkey }}" 9 | 10 | - name: install the kubernetes yum packages 11 | yum: 12 | allow_downgrade: True 13 | name: 14 | - "kubelet-{{ kubernetes_version | kube_platform_version('redhat') }}" 15 | - "kubeadm-{{ kubernetes_version | kube_platform_version('redhat') }}" 16 | - "kubectl-{{ kubernetes_version | kube_platform_version('redhat') }}" 17 | - "kubernetes-cni-{{kubernetes_cni_version | kube_platform_version('redhat')}}" 18 | retries: 5 19 | delay: 5 20 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes/templates/etc/kubernetes_community_ami_version: -------------------------------------------------------------------------------- 1 | v{{ kubernetes_version.split('-')[0] }} 2 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes/templates/etc/systemd/system/kubelet.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=kubelet: The Kubernetes Node Agent 3 | Documentation=http://kubernetes.io/docs/ 4 | 5 | [Service] 6 | ExecStart=/usr/bin/kubelet 7 | Restart=always 8 | StartLimitInterval=0 9 | RestartSec=10 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes/templates/etc/systemd/system/kubelet.service.d/10-kubeadm.conf: -------------------------------------------------------------------------------- 1 | [Service] 2 | Environment="KUBELET_KUBECONFIG_ARGS=--kubeconfig=/etc/kubernetes/kubelet.conf --require-kubeconfig=true" 3 | Environment="KUBELET_SYSTEM_PODS_ARGS=--pod-manifest-path=/etc/kubernetes/manifests --allow-privileged=true" 4 | Environment="KUBELET_NETWORK_ARGS=--network-plugin=cni --cni-conf-dir=/etc/cni/net.d --cni-bin-dir=/opt/cni/bin" 5 | Environment="KUBELET_DNS_ARGS=--cluster-dns=10.96.0.10 --cluster-domain=cluster.local" 6 | ExecStart= 7 | ExecStart=/usr/bin/kubelet $KUBELET_KUBECONFIG_ARGS $KUBELET_SYSTEM_PODS_ARGS $KUBELET_NETWORK_ARGS $KUBELET_DNS_ARGS $KUBELET_EXTRA_ARGS 8 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes/templates/etc/systemd/system/kubelet.service.d/20-cloud-provider.conf: -------------------------------------------------------------------------------- 1 | [Service] 2 | Environment="KUBELET_EXTRA_ARGS=--cloud-provider=aws" 3 | -------------------------------------------------------------------------------- /ansible/roles/kubernetes/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kubernetes_version_defined: "{{ kubernetes is defined and 'version' in kubernetes }}" 3 | kubernetes_cni_version_defined: "{{ kubernetes is defined and 'cni_version' in kubernetes }}" 4 | kubernetes_enable_cached_images_defined: "{{ kubernetes is defined and 'enable_cached_images' in kubernetes }}" 5 | kubernetes_cached_images_defined: "{{ kubernetes is defined and 'cached_images' in kubernetes }}" 6 | kubernetes_apt_key_url_defined: "{{ kubernetes is defined and 'apt_key_url' in kubernetes }}" 7 | kubernetes_apt_repo_string_defined: "{{ kubernetes is defined and 'apt_repo_string' in kubernetes }}" 8 | kubernetes_yum_baseurl_defined: "{{ kubernetes is defined and 'yum_baseurl' in kubernetes }}" 9 | kubernetes_yum_gpgkey_defined: "{{ kubernetes is defined and 'yum_gpgkey' in kubernetes }}" -------------------------------------------------------------------------------- /ansible/roles/packer-cleanup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: cleanup packer artifacts 3 | file: 4 | state: absent 5 | path: "{{ item }}" 6 | with_items: 7 | - /root/.ssh/authorized_keys 8 | - "/home/{{ ansible_env.SUDO_USER }}/.ssh/authorized_keys" 9 | - /etc/machine-id 10 | - /var/lib/cloud 11 | - /var/log/cloud-init.log 12 | - /var/log/cloud-init-output.log 13 | 14 | - name: replace machine-id 15 | file: 16 | dest: /etc/machine-id 17 | state: touch 18 | 19 | - name: Clean up extraneous /etc/hosts entries 20 | lineinfile: 21 | path: /etc/hosts 22 | state: absent 23 | regexp: '^127\.0\.1\.1.*instance.*' 24 | 25 | - name: Check for saved iptables configuration 26 | stat: 27 | path: /etc/iptables/rules.v4 28 | register: iptablescfg 29 | 30 | - name: Remove overly restrictive iptables rules if saved configuration exists 31 | lineinfile: 32 | path: /etc/iptables/rules.v4 33 | state: absent 34 | regexp: "{{ item }}" 35 | with_items: 36 | - "^-A INPUT -j REJECT --reject-with icmp-host-prohibited$" 37 | - "^-A FORWARD -j REJECT --reject-with icmp-host-prohibited$" 38 | when: iptablescfg.stat.exists == True 39 | -------------------------------------------------------------------------------- /ansible/roles/providers/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | packer_builder_type: '' 3 | provider_name: "{{ 'aws' if packer_builder_type.startswith('amazon') else '' }}" -------------------------------------------------------------------------------- /ansible/roles/providers/tasks/aws.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install aws clients 3 | pip: 4 | name: "{{ item }}" 5 | with_items: 6 | - https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz 7 | - awscli 8 | -------------------------------------------------------------------------------- /ansible/roles/providers/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: aws.yml 3 | when: provider_name.lower() == 'aws' 4 | 5 | -------------------------------------------------------------------------------- /ansible/roles/test_loadbalancer/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | test_loadbalancer_enable: False 3 | test_loadbalancer_interface: eth0 4 | -------------------------------------------------------------------------------- /ansible/roles/test_loadbalancer/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - debug: 3 | msg: | 4 | ################### Test Load Balancer Enaabled #################### 5 | You have enabled the loadbalancer role that is only intended for 6 | testing purposes, and should not be used in a produciton setting. 7 | #################################################################### 8 | 9 | - name: create /etc/nginx directory 10 | file: 11 | dest: /etc/nginx 12 | state: directory 13 | 14 | - name: drop the nginx.conf template 15 | template: 16 | src: etc/nginx/nginx.conf 17 | dest: /etc/nginx/nginx.conf 18 | 19 | - name: install docker-py 20 | pip: 21 | name: docker-py 22 | 23 | - name: open api server port 24 | firewalld: 25 | port: 6443/tcp 26 | permanent: yes 27 | state: enabled 28 | immediate: True 29 | when: ansible_distribution == 'CentOS' or ansible_distribution == 'Red Hat Enterprise Linux' 30 | 31 | - name: run nginx docker container 32 | become: yes 33 | docker_container: 34 | name: kubernetes-nginx-proxy 35 | image: nginx 36 | restart_policy: always 37 | volumes: 38 | - "/etc/nginx:/etc/nginx" 39 | network_mode: host 40 | -------------------------------------------------------------------------------- /ansible/roles/test_loadbalancer/templates/etc/nginx/nginx.conf: -------------------------------------------------------------------------------- 1 | events {} 2 | 3 | stream { 4 | upstream kubernetes { 5 | {% for host in groups['masters'] %} 6 | server {{ hostvars[host]['ansible_' + test_loadbalancer_interface]['ipv4']['address'] }}:6443; 7 | {% endfor %} 8 | } 9 | 10 | server { 11 | listen 6443; 12 | proxy_pass kubernetes; 13 | } 14 | } 15 | 16 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Documentation for Wardroom 2 | 3 | This directory contains documentation for the Wardroom project. 4 | 5 | * _[Architecture](architecture.md):_ Describes the current architecture of the Wardroom project. 6 | * _[Building Kubernetes-ready Base OS Images](building-images.md)_: Describes how to use Wardroom to build Kubernetes-ready base OS images. 7 | * _[Bootstrapping a Kubernetes Cluster](bootstrapping-a-kubernetes-cluster.md)_: Describes how to use Wardroom to bootstrap a Kubernetes cluster. -------------------------------------------------------------------------------- /docs/architecture.md: -------------------------------------------------------------------------------- 1 | # Wardroom Architecture 2 | 3 | ## Summary 4 | 5 | Customers deploying Kubernetes need solutions to help simplify establishing a Kubernetes cluster. There are a number of different ways to go about this; two methods, in particular, involve creating Kubernetes-ready base images and orchestrating the process of bootstrapping a cluster. Wardroom provides functionality that addresses both of these use cases: 6 | 7 | 1. Wardroom provides a mechanism for users to create Kubernetes-ready base images that contain all the necessary prerequisites for running Kubernetes. 8 | 9 | 2. Wardroom provides orchestration around the process of bootstrapping a cluster. 10 | 11 | ## Motivation 12 | 13 | As noted in the Summary above, customers and VMware personnel need a mechanism for streamlining the process of establishing a Kubernetes cluster from available infrastructure. Wardroom strives to provide such a mechanism. 14 | 15 | ### Goals 16 | 17 | The following items **are** in scope for Wardroom: 18 | 19 | * Wardroom must provide an "image generation" mechanism by which customers can create Kubernetes-ready base images. A _Kubernetes-ready image_ is defined as an OS image that has been prepared for Kubernetes and is ready to run `kubeadm` (or a tool automating `kubeadm`) to bootstrap a Kubernetes cluster. 20 | 21 | * Wardroom must provide "cluster bootstrapping" functionality that orchestrates the process of bootstrapping a Kubernetes cluster, including bootstrapping a cluster from images that have not been previously prepared. 22 | 23 | * Wardroom must support two Ubuntu LTS releases (the current and the previous LTS release) and the last major release of CentOS. Currently, this translates into supporting Ubuntu 16.04 (until the release of 20.04), Ubuntu 18.04, and CentOS 7. 24 | 25 | * Wardroom must support the same versions of Kubernetes as the upstream community; that is, _N-2_, where _N_ represents the most recent release of Kubernetes. Currently, this means support for Kubernetes 1.14, 1.13, and 1.12. 26 | 27 | ### Non-Goals 28 | 29 | The following items **are not** in scope for Wardroom: 30 | 31 | * Wardroom does not attempt to automate the provisioning of infrastructure. 32 | 33 | ## Architecture 34 | 35 | ### Implementation Details 36 | 37 | Currently, Wardroom is implemented as a set of [Ansible][1] roles and a set of [Packer][2] build files. Users use the Packer build files to invoke the Ansible roles in order to prepare images on a target platform. Other Ansible roles provide the orchestration functionality necessary to automate the bootstrapping of a cluster from scratch. Users must have the `ansible` and `packer` tools installed locally in order to use Wardroom's components. 38 | 39 | ### Extending Functionality 40 | 41 | #### Extending Image Generation Functionality 42 | 43 | *Adding a New Target Platform:* Extending Wardroom to include support for new target platforms requires the addition of a new `builder` section to Wardroom's primary Packer build file (named `packer.json`). 44 | 45 | For target platforms that have region- or geography-specific images, additional variable files are used to provide region- or geography-specific information. AWS and Oracle Cloud (OCI) are examples of such target platforms. See the "Inputs" section below. 46 | 47 | *Adding a New Host OS:* Adding support for additional host OSes requires adding a new `builder` section to the primary Packer build file. 48 | 49 | *Adding a New Kubernetes Version:* So far, the generic Ansible roles called by Packer have been able to support new Kubernetes versions with little or no modifications (only requiring the user to provide a new Kubernetes version on the `packer` command line in many cases). Future releases of Kubernetes may require changes to the Ansible roles in order to add support for that release to Wardroom. 50 | 51 | #### Extending Cluster Bootstrapping Functionality 52 | 53 | All changes to Wardroom's cluster bootstrapping functionality require modifications to the existing Ansible roles, or the addition of new Ansible roles. 54 | 55 | ### Inputs 56 | 57 | #### Image Generation 58 | 59 | For image generation, Packer is the primary tool leveraged by Wardroom, so most of the inputs required by Wardroom are inputs to Packer. Some of this information is provided via variables files, others by the main build file, and users are also able to override values on the `packer` command line itself. 60 | 61 | * _Platform credentials:_ Packer will require the appropriate credentials for the target platform (AWS, vSphere, GCP, etc.). As these vary from platform to platform, users are encouraged to refer to the Packer documentation for specific details. For security purposes, no credentials are or should be stored within Wardroom's artifacts. 62 | * _Base image:_ Packer typically builds on top of a base image and therefore needs to know what that base image is. This information is provided via a provider- and region-specific variables files. The user can also supply this information via the `packer` command line. 63 | * _Kubernetes version:_ A default version is supplied in the main Packer build file, but users can override that version via the `packer` command line. 64 | * _Kubernetes CNI version:_ This is handled in the same way as the Kubernetes version. 65 | * _Build version:_ No default build version value is defined; it must be supplied by the user on the `packer` command line. 66 | 67 | #### Cluster Bootstrapping 68 | 69 | For cluster bootstrapping, Ansible is the primary tool leveraged by Wardroom, so most of the inputs required by Wardroom are inputs to Ansible. 70 | 71 | * _Instance/OS credentials:_ Ansible will need appropriate credentials to connect to the target instances/OSes and perform its configuration tasks. 72 | * _Inventory:_ Ansible will need access to an inventory (either prepared in advance by the user or created dynamically from a source) to know which instances/OSes to orchestrate. 73 | 74 | ### Outputs 75 | 76 | #### Image Generation 77 | 78 | The primary output of Wardroom when used for image generation will be the creation of a platform-specific base image (such as an AMI for AWS, or an image for GCP). This image will appear within the account of the credentials supplied to Packer (as described in the Inputs section above). 79 | 80 | #### Cluster Bootstrapping 81 | 82 | The primary output of Wardroom when used for cluster bootstrapping will be the creation of a Kubernetes cluster. This Kubernetes cluster will be created from the inventory supplied to Ansible. 83 | 84 | ### Testing/Validation 85 | 86 | Wardroom does not currently have a test framework in place to validate images. Two forms of testing are currently under exploration: 87 | 88 | * Using [Molecule][3] to test the Ansible roles 89 | * Using [`goss`][4] to perform tests against the image artifacts 90 | 91 | ## Upstream Dependencies 92 | 93 | Wardroom is dependent on the following other projects: 94 | 95 | * Ansible 96 | * Packer 97 | 98 | When testing is added to the project, Wardroom will take a dependency on the test framework(s) used. 99 | 100 | ## Downstream Dependencies 101 | 102 | The following other projects are dependent on Wardroom or components of Wardroom: 103 | 104 | * The [VMware AWS Quickstart][5] is dependent on Wardroom for generating the AWS AMIs that it uses. 105 | 106 | [1]: https://www.ansible.com 107 | [2]: https://www.packer.io 108 | [3]: https://github.com/ansible/molecule 109 | [4]: https://github.com/aelsabbahy/goss 110 | [5]: https://github.com/heptio/aws-quickstart/ 111 | -------------------------------------------------------------------------------- /docs/bootstrapping-a-kubernetes-cluster.md: -------------------------------------------------------------------------------- 1 | # Bootstrapping a Kubernetes Cluster 2 | 3 | Wardroom leverages Ansible to provide orchestration around the process of bootstrapping a Kubernetes cluster with kubeadm. The Ansible playbooks and files related to this functionality can be found in the [swizzle](../swizzle) directory. 4 | 5 | It is important to note that Wardroom does not provide infrastructure provisioning automation. Thus, all the infrastructure must be readily available before attempting to use Wardroom to bootstrap a Kubernetes cluster. 6 | 7 | ## Prerequisites 8 | 9 | - [Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html) version >= 2.4.0.0 10 | - Nodes up and running (they are not required to be running a Wardroom-built base OS image) 11 | - SSH Credentials 12 | 13 | ## Bootstrapping a Cluster 14 | 15 | The following steps assume your current working directory is the `swizzle/` directory. 16 | 17 | ### Create Inventory 18 | 19 | Create an Ansible 20 | [inventory](https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html) file that lists all the nodes of the cluster. The following host groups are supported: 21 | 22 | - `etcd`: List of hosts that will run etcd. 23 | - `masters`: List of Kubernetes control planes nodes. 24 | - `primary_master`: The first control plane node. There is nothing special about this node, other than being the node where `kubeadm init` is run. This node must also be part of the `masters` group. 25 | - `nodes`: List of Kubernetes worker nodes. 26 | 27 | _Note: Using Ansible's dynamic inventory feature should be possible, but it is outside of the scope of this document._ 28 | 29 | A sample/example inventory file can be found in the `sample-inventory.ini` file in the `swizzle/examples` directory. **This is a required step.** Without an inventory file, the Ansible playbooks used by Wardroom will not work. 30 | 31 | #### Stacked Masters or External Etcd Cluster 32 | 33 | Wardroom supports two etcd deployment patterns: 34 | 35 | - _Stacked Masters_: Etcd runs on the same nodes as the Kubernetes Control Plane. To deploy stacked masters, the `etcd` host group must equal the `masters` host group. 36 | - _External Etcd Cluster_: Etcd runs on a dedicated set of nodes. To deploy an external etcd cluster, the `etcd` host group must have no overlap with the `masters` host group. 37 | 38 | The sample inventory file in the `swizzle/examples` directory shows an inventory for a stacked master deployment. 39 | 40 | ### Configure the Installation 41 | 42 | The Ansible roles expose a set of variables that can be used to customize the installation to your specific environment. Most variables have defaults defined in the `defaults/` directory of each role. 43 | 44 | To configure the installation, create an `extra-vars.yml` file to capture the variables for your environment. **This is a required step.** 45 | 46 | Examples of an `extra-vars.yml` file are found in the `swizzle/examples` directory (both a CentOS and Ubuntu version are present). 47 | 48 | #### Minimum Variables to Define 49 | 50 | **At a minimum** the `extra-vars.yml` file must contain the following elements: 51 | 52 | ```yaml 53 | --- 54 | kubernetes_common_kubeadm_config: 55 | networking: 56 | podSubnet: "192.168.0.0/16" 57 | kubernetes_users: 58 | - { user: ubuntu, group: ubuntu, home: /home/ubuntu } 59 | ``` 60 | 61 | The user specified in the `kubernetes_users` list should correspond to the OS being configured (use `vagrant` for local Vagrant testing). The snippet above is for Ubuntu nodes. 62 | 63 | #### Variables for Highly-Available Clusters 64 | 65 | When setting up a highly-available (HA) Kubernetes cluster, the control plane must be accessible through a single address. This is typically achieved by deploying a load balancer in front of the control plane nodes. 66 | 67 | Wardroom exposes two variables to provide the load-balanced API address: 68 | 69 | ```yaml 70 | # IP address of the load balancer fronting the Kubernetes API servers. 71 | # This is required if an FQDN is not provided. Otherwise, this can be left empty. 72 | kubernetes_common_api_ip: "" 73 | 74 | # FQDN of the load balancer fronting the Kubernetes API servers. 75 | # This variable takes precedence over the kubernetes_common_api_ip variable. 76 | kubernetes_common_api_fqdn: "" 77 | ``` 78 | 79 | When building HA clusters with Wardroom, one of these two variables **must** be included in the `extra-vars.yml` file created to configure the installation. 80 | 81 | #### Variables for Modifying Kubeadm Configuration 82 | 83 | The kubeadm configuration is provided via the `kubernetes_common_kubeadm_config` variable. Wardroom provides a [default](../ansible/roles/kubernetes-common/defaults/main.yml) for this variable that 84 | can be modified through the `extra-vars.yml` file. 85 | 86 | Given that the variable is a hash (or map), the user-provided variable is merged with the default variable. This provides the ability to make point modifications without having to redefine the entire variable. 87 | 88 | The following example shows how to set the pod CIDR in the `extra-vars.yml` file: 89 | 90 | ```yaml 91 | kubernetes_common_kubeadm_config: 92 | networking: 93 | podSubnet: "192.168.0.0/16" 94 | ``` 95 | 96 | The configuration stanza shown above **must** be included in the `extra-vars.yml` file, as the default `kubeadm` configuration does not specify this value (and this value is required for most CNI plugins). 97 | 98 | #### Modifying CNI Manifests 99 | 100 | Wardroom provides a mechanism to modify the YAML deployment manfiests of the CNI plugin before installing it in the cluster. This mechanism is implemented as an Ansible library, which allows users to express conditions as well as modifications using JSONPath syntax. 101 | 102 | The following example shows how to set the `CALICO_IPV4POOL_CIDR` environment variable in the Calico deployment manifests (this example would be _required_ if a user is not using the default CIDR of 192.168.0.0/16). In addition to specifying the `podSubnet` value as outlined above, users would also need to add this in the `extra-vars.yml` file: 103 | 104 | ```yaml 105 | kubernetes_cni_calico_manifest_mods: 106 | - conditions: 107 | - expression: kind 108 | value: DaemonSet 109 | modifications: 110 | - expression: "spec.template.spec.containers[?(@.name == 'calico-node')].env[?(@.name == 'CALICO_IPV4POOL_CIDR')].value" 111 | value: "172.16.0.0/16" 112 | ``` 113 | 114 | #### Other Variables 115 | 116 | Optionally, some other commonly-used variables that may need to be specified in the `extra-vars.yml` include: 117 | 118 | * If the primary network interfaces is _not_ named `eth0`, then you should include `kubernetes_common_primary_interface: eth0` and `etcd_interface: eth0` in the `extra-vars.yml` file. 119 | * The `kubernetes_cni_plugin` value is used to specify which CNI plugin to install. The default is Calico. 120 | 121 | _Note: Wardroom sets Ansible's [hash merging behavior](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#default-hash-behaviour) to `merge`. This means that any variable that is a hash (aka. map or dictionary) with a default value will be merged with the user-provided variable._ 122 | 123 | ### Run Ansible 124 | 125 | Once you have the inventory file and the extra variables file, run the playbook: 126 | 127 | ```shell 128 | ansible-playbook main.yml --inventory inventory.ini --extra-vars @extra-vars.yml -e wardroom_action=install 129 | ``` 130 | 131 | The value assigned to `wardroom_action` can be one of three values: 132 | 133 | * If `install` is specified, Wardroom will bootstrap an entire cluster from scratch. 134 | * If `add-nodes` is specified, Wardroom will add worker nodes to an existing cluster. This would allow users to bootstrap a cluster initially, then add nodes after the fact by simply updating the `nodes` group in the inventory and running Wardroom with `-e wardroom_action=add-nodes`. 135 | * If `upgrade` is specified, Wardroom will coordinate an upgrade of the cluster to a new Kubernetes version. 136 | 137 | ## Vagrant and provision.py 138 | 139 | Within the `swizzle` directory, Wardroom provides a Vagrantfile and the `provision.py` script _for development and testing purposes only._ 140 | 141 | The `provision.py` script generates ansible inventories from templates in the `examples/` directory. 142 | 143 | To create a cluster using `provision.py`: 144 | 145 | ```shell 146 | python provision.py -a install -o xenial examples/calico.yml 147 | ``` 148 | 149 | To destroy the cluster: 150 | 151 | ```shell 152 | vagrant destroy -f 153 | ``` 154 | -------------------------------------------------------------------------------- /docs/building-images.md: -------------------------------------------------------------------------------- 1 | # Building Kubernetes-ready Base Operating System Images 2 | 3 | This directory contains tooling for building base images for use as nodes in Kubernetes Clusters. [Packer](https://www.packer.io) is used for building these images. 4 | 5 | - [Building Wardroom Images](#building-wardroom-images) 6 | - [Prerequisites](#prerequisites) 7 | - [Prerequisites for all images](#prerequisites-for-all-images) 8 | - [Prerequisites for Amazon Web Services](#prerequisites-for-amazon-web-services) 9 | - [Prerequisites for Google Cloud](#prerequisites-for-google-cloud) 10 | - [Building Images](#building-images) 11 | - [Build Variables](#build-variables) 12 | - [Building Specific Images](#building-specific-images) 13 | - [Building the AWS AMIs](#building-the-aws-amis) 14 | - [Building Google Cloud Images](#building-google-cloud-images) 15 | - [Testing Images](#testing-images) 16 | - [Deploying Images](#deploying-images) 17 | - [AWS](#aws) 18 | - [Google Cloud](#google-cloud) 19 | - [Updating the Heptio AWS Quick Start Images](#updating-the-heptio-aws-quick-start-images) 20 | - [Appendix](#appendix) 21 | - [GCP Service Account Credentials](#gcp-service-account-credentials) 22 | 23 | ## Prerequisites 24 | 25 | ### Prerequisites for all images 26 | 27 | - [Packer](https://www.packer.io/docs/installation.html) 28 | - [Ansible](http://docs.ansible.com/ansible/latest/intro_installation.html) version >= 2.4.0.0 29 | 30 | ### Prerequisites for Amazon Web Services 31 | 32 | - An AWS account 33 | - The AWS CLI installed and configured 34 | 35 | ### Prerequisites for Google Cloud 36 | 37 | - A Google Cloud account 38 | - The gcloud CLI installed and configured 39 | - A precreated service account json file 40 | 41 | ## Building Images 42 | 43 | ### Build Variables 44 | 45 | The following variables can be overriden when building images using the `-var` option when calling `packer build`: 46 | 47 | | Variable | Default | Description | 48 | |----------|---------|-------------| 49 | | build_version | unset | A unique build version for the image | 50 | | kubernetes_version | 1.13.2-00 | Kubernetes Version to install | 51 | | kubernetes_cni_version | 0.6.0-00 | CNI Version to install | 52 | 53 | For exmaple, to build all images for use with Kubernetes 1.8.9 for build version 1: 54 | 55 | ```sh 56 | packer build -var kubernetes_version=1.8.9-00 -var build_version=1 57 | ``` 58 | 59 | There are additional variables that may be set that affect the behavior of specific builds or packer post-processors. `packer inspect packer.json` will list all available variables and their default values. 60 | 61 | ### Building Specific Images 62 | 63 | If packer build is run without specifying which images to build, then it will attempt to build all configured images. `packer inspect packer.json` will list the configured builders. The `--only` option can be specified when running `packer build` to limit the images built. 64 | 65 | For example, to build only the AWS Ubuntu image: 66 | 67 | ```sh 68 | packer build -var build_version=`git rev-parse HEAD` --only=ami-ubuntu packer.json 69 | ``` 70 | 71 | ### Building the AWS AMIs 72 | 73 | Building AWS images requires setting additional variables not set by default. The `packer/aws-us-east-1.json` file is provided as an example. 74 | 75 | To build both the Ubuntu and CentOS AWS AMIs: 76 | 77 | ```sh 78 | packer build -var-file aws-us-east-1.json -var build_version=`git rev-parse HEAD` --only=ami-centos,ami-ubuntu packer.json 79 | ``` 80 | 81 | #### Required Permissions to Build the AWS AMIs 82 | 83 | The [Packer documentation for the Amazon AMI builder](https://www.packer.io/docs/builders/amazon.html) supplies a suggested set of minimum permissions. However, Wardroom has been successfully tested with the following IAM permissions: 84 | 85 | ```json 86 | { 87 | "Version": "2012-10-17", 88 | "Statement": [ 89 | { 90 | "Effect": "Allow", 91 | "Action": [ 92 | "ec2:Describe*", 93 | "ec2:TerminateInstances", 94 | "ec2:StartInstances", 95 | "ec2:CreateSnapshot", 96 | "ec2:CreateImage", 97 | "ec2:RunInstances", 98 | "ec2:StopInstances", 99 | "ec2:CreateKeyPair", 100 | "ec2:DeleteKeyPair", 101 | "ec2:CreateSecurityGroup", 102 | "ec2:DeleteSecurityGroup", 103 | "ec2:AuthorizeSecurityGroupIngress", 104 | "ec2:CreateTags", 105 | "ec2:DeleteVolume" 106 | ], 107 | "Resource": "*" 108 | } 109 | ] 110 | } 111 | ``` 112 | 113 | ### Building Google Cloud Images 114 | 115 | Building Google Cloud images requires setting the `GOOGLE_APPLICATION_CREDENTIALS` environment variable and providing the IDs of the source images. For the latter, the `packer/gcp-source-images.json` file is provided as an example. 116 | 117 | To build only the Ubuntu Google Cloud Image: 118 | 119 | ```sh 120 | export GOOGLE_APPLICATION_CREDENTIALS= 121 | packer build -var-file=gcp-source-images.json -var build_version=`git rev-parse HEAD` -var project_id= -only gcp-ubuntu packer.json 122 | ``` 123 | 124 | #### Permissions Required to Build Google Cloud Images 125 | 126 | The account used by Wardroom (as specified by the `GOOGLE_APPLICATION_CREDENTIALS` environment variable) must have the following permissions in order for Wardroom to function as expected: 127 | 128 | ``` 129 | compute.disks.create 130 | compute.disks.delete 131 | compute.disks.useReadOnly 132 | compute.images.create 133 | compute.images.delete 134 | compute.images.get 135 | compute.instances.create 136 | compute.instances.delete 137 | compute.instances.get 138 | compute.instances.setMetadata 139 | compute.instances.setServiceAccount 140 | compute.instances.start 141 | compute.instances.stop 142 | compute.machineTypes.get 143 | compute.subnetworks.use 144 | compute.subnetworks.useExternalIp 145 | compute.zones.get 146 | ``` 147 | 148 | ### Building Oracle Cloud Infrastructure (OCI) Images 149 | 150 | Building Oracle Cloud Infrastructure (OCI) images requires a correct configuration for the Oracle CLI as outlined in the "CLI Configuration Information" section of [this page](https://docs.cloud.oracle.com/iaas/Content/API/Concepts/sdkconfig.htm), althoug the Oracle CLI does not need to be installed (Packer will use the values in the configuration file). 151 | 152 | You will also need the following pieces of information: 153 | 154 | * The Oracle Cloud ID (OCID) of the compartment where the build VM will be instantiated (you can use the root compartment, whose OCID is equal to the tenancy OCID) 155 | * The name of the availability domain where the build VM will be instantiated 156 | * The OCID for the subnet that corresponds to the availability domain where the build VM will be instantiated 157 | 158 | To build an OCI image: 159 | 160 | ```sh 161 | packer build -var-file oci-us-phoenix-1.json \ 162 | -var build_version=`git rev-parse HEAD` \ 163 | -var oci_availability_domain="" \ 164 | -var oci_compartment_ocid="" \ 165 | -var oci_subnet_ocid="" \ 166 | -only=oci-ubuntu packer.json 167 | ``` 168 | 169 | ## Testing Images 170 | 171 | After your images are created, you'll want to prove that the images can be used to build conformant clusters. The conformance testing suite tests _clusters_, not single nodes -- so we have to spin up a single node cluster 172 | and run the tests inside the new cluster. 173 | 174 | Connect remotely to an instance created from the image and run the Node Conformance tests using the following commands: 175 | 176 | ```sh 177 | 178 | # Cluster Creation (skip this if you create a single node cluster in some other way) 179 | sudo kubeadm init --pod-network-cidr=192.168.0.0/16 180 | sudo chown $(id -u):$(id -g) /etc/kubernetes/admin.conf 181 | export KUBECONFIG=/etc/kubernetes/admin.conf 182 | 183 | 184 | kubectl create -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml 185 | kubectl create -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml 186 | 187 | 188 | # Setup for Conformance tests 189 | # Remove all the taints from the node -- simply adding tolerations to the conformance deployment didn't work 190 | kubectl patch nodes $(hostname) -p '{"spec":{"taints":[]}}' 191 | 192 | # Get the yaml to run the conformance tests, and replace the source image repo to use the 193 | # globally accessible image repo, instead of the Kubernetes internal one. 194 | # This yaml was created along with the 1.14 release, but can be used with 1.13, and 195 | # it will up updated with future releases. (Again, this only works >=1.13) 196 | wget https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/images/conformance/conformance-e2e.yaml 197 | sed -i 's/k8s.gcr.io/gcr.io\/google-containers/' conformance-e2e.yaml 198 | 199 | # Will also need to go in and update the version number of the image to add the trailing "patch version" part 200 | # The pulled yaml will only have major.minor. 201 | # So, for example, with v1.14, you'll need to update the image to 1.14.0 202 | # The valid conformance test images can be found here: 203 | # gcr.io/google-containers/conformance-amd64 204 | 205 | # Add to "value" for E2E_SKIP env var 206 | \\[Flaky\\]|\\[Serial\\]|\\[sig-network\\]|Container Lifecycle Hook 207 | 208 | # Finally, run the tests -- and leave it alone for about an hour. 209 | kubectl create -f conformance-e2e.yaml 210 | ``` 211 | 212 | ## Deploying Images 213 | 214 | ### AWS 215 | 216 | There is a helper script to aid in seeding built AMI's to all other AWS regions. This script can be installed from the root of this repository by running `python3 setup.py install`. 217 | 218 | ```sh 219 | wardroom aws copy-ami -r 220 | ``` 221 | 222 | ### Google Cloud 223 | 224 | Unlike AWS, Google Cloud Images are not limited to specific regions, so no further steps are needed to use the create images. 225 | 226 | ## Updating the Heptio AWS Quick Start Images 227 | 228 | - Build the base image 229 | 230 | ```sh 231 | packer build -var-file aws-us-east-1.json -var build_version=`git rev-parse HEAD` --only=ami-ubuntu packer.json 232 | ``` 233 | - Run Node Conformance against the built image 234 | - Deploy the image using copy-ami 235 | - Update the [Quick Start](https://github.com/heptio/aws-quickstart) to use the new images 236 | 237 | ## Appendix 238 | 239 | ### GCP Service Account Credentials 240 | 241 | [Create a GCP service account](https://www.packer.io/docs/builders/googlecompute.html#running-without-a-compute-engine-service-account) 242 | 243 | You'll need to download the credential file after creating your account. Make sure you don't commit it, it contains secrets. 244 | 245 | If you want to use a service account for use with Wardroom, you'll also need to grant the service account the ServiceAccountUser role in order for Wardroom to function properly. 246 | 247 | You'll also need to make note of the "project ID" you wish to run the container in. It's a string, and you can find it at the top of the Google Cloud Console, or with `gcloud projects list`. 248 | -------------------------------------------------------------------------------- /packer/.gitignore: -------------------------------------------------------------------------------- 1 | venv 2 | *.egg-info 3 | -------------------------------------------------------------------------------- /packer/README.md: -------------------------------------------------------------------------------- 1 | packer 2 | ======= 3 | The packer directory includes the packer configuration to build Kubernetes-ready operating system images. 4 | 5 | For usage information, see the user documentation [here](../docs/building-images.md). -------------------------------------------------------------------------------- /packer/aws-us-east-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "ubuntu_16_04_ami": "ami-0565af6e282977273", 3 | "ubuntu_18_04_ami": "ami-034920e38d95c9311", 4 | "centos_7_4_ami": "ami-b81dbfc5", 5 | "aws_region": "us-east-1" 6 | } 7 | -------------------------------------------------------------------------------- /packer/aws-us-east-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "ubuntu_16_04_ami": "ami-0e7589a8422e3270f", 3 | "ubuntu_18_04_ami": "ami-0c55b159cbfafe1f0", 4 | "centos_7_4_ami": "ami-08b08d6d", 5 | "aws_region": "us-east-2" 6 | } 7 | -------------------------------------------------------------------------------- /packer/aws-us-west-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "ubuntu_16_04_ami": "ami-9cb2bdfc", 3 | "ubuntu_18_04_ami": "ami-0ea0e2d21f93aa6c9", 4 | "centos_7_4_ami": "ami-070a1367", 5 | "aws_region": "us-west-1" 6 | } 7 | -------------------------------------------------------------------------------- /packer/aws-us-west-2.json: -------------------------------------------------------------------------------- 1 | { 2 | "ubuntu_16_04_ami": "ami-08692d171e3cf02d6", 3 | "ubuntu_18_04_ami": "ami-023119b698e71c162", 4 | "centos_7_4_ami": "ami-19007261", 5 | "aws_region": "us-west-2" 6 | } 7 | -------------------------------------------------------------------------------- /packer/gcp-source-images.json: -------------------------------------------------------------------------------- 1 | { 2 | "gcp_ubuntu_source_image": "ubuntu-1604-xenial-v20180306", 3 | "gcp_centos_source_image": "centos-7-v20180401" 4 | } 5 | -------------------------------------------------------------------------------- /packer/oci-us-phoenix-1.json: -------------------------------------------------------------------------------- 1 | { 2 | "oci_base_image_ocid": "ocid1.image.oc1.phx.aaaaaaaab6bbszpshh7fy6aw2ay74rhoiuu4g2nmzr6ykcwt53ekadzjp2ta" 3 | } 4 | -------------------------------------------------------------------------------- /packer/packer.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "aws_access_key": "", 4 | "aws_secret_key": "", 5 | "aws_region": "us-east-1", 6 | "build_version": null, 7 | "kubernetes_version": "1.15.3-00", 8 | "kubernetes_cni_version": "0.6.0-00", 9 | "existing_ansible_ssh_args": "{{env `ANSIBLE_SSH_ARGS`}}" 10 | }, 11 | "builders": [ 12 | { 13 | "name": "ami-ubuntu", 14 | "type": "amazon-ebs", 15 | "instance_type": "t2.small", 16 | "source_ami": "{{user `ubuntu_18_04_ami`}}", 17 | "ami_name": "ami-ubuntu-18.04-{{user `kubernetes_version`}}-{{timestamp}}", 18 | "access_key": "{{user `aws_access_key`}}", 19 | "secret_key": "{{user `aws_secret_key`}}", 20 | "region": "{{user `aws_region`}}", 21 | "ssh_username": "ubuntu", 22 | "tags": { 23 | "build_version": "{{user `build_version`}}", 24 | "source_ami": "{{user `ubuntu_18_04_ami`}}", 25 | "build_date": "{{isotime}}", 26 | "distribution": "Ubuntu", 27 | "distribution_release": "bionic", 28 | "distribution_version": "18.04", 29 | "kubernetes_version": "{{user `kubernetes_version`}}", 30 | "kubernetes_cni_version": "{{user `kubernetes_cni_version`}}" 31 | } 32 | }, 33 | { 34 | "name": "ami-ubuntu-1604", 35 | "type": "amazon-ebs", 36 | "instance_type": "t2.small", 37 | "source_ami": "{{user `ubuntu_16_04_ami`}}", 38 | "ami_name": "ami-ubuntu-16.04-{{user `kubernetes_version`}}-{{timestamp}}", 39 | "access_key": "{{user `aws_access_key`}}", 40 | "secret_key": "{{user `aws_secret_key`}}", 41 | "region": "{{user `aws_region`}}", 42 | "ssh_username": "ubuntu", 43 | "tags": { 44 | "build_version": "{{user `build_version`}}", 45 | "source_ami": "{{user `ubuntu_16_04_ami`}}", 46 | "build_date": "{{isotime}}", 47 | "distribution": "Ubuntu", 48 | "distribution_release": "xenial", 49 | "distribution_version": "16.04", 50 | "kubernetes_version": "{{user `kubernetes_version`}}", 51 | "kubernetes_cni_version": "{{user `kubernetes_cni_version`}}" 52 | } 53 | }, 54 | { 55 | "name": "ami-centos", 56 | "type": "amazon-ebs", 57 | "instance_type": "t2.small", 58 | "source_ami": "{{user `centos_7_4_ami`}}", 59 | "ami_name": "ami-centos-7.4-{{user `kubernetes_version`}}-{{timestamp}}", 60 | "access_key": "{{user `aws_access_key`}}", 61 | "secret_key": "{{user `aws_secret_key`}}", 62 | "region": "{{user `aws_region`}}", 63 | "ssh_username": "centos", 64 | "tags": { 65 | "build_version": "{{user `build_version`}}", 66 | "source_ami": "{{user `centos_7_4_ami`}}", 67 | "build_date": "{{isotime}}", 68 | "distribution": "CentOS", 69 | "distribution_release": "Core", 70 | "distribution_version": "7.4", 71 | "kubernetes_version": "{{user `kubernetes_version`}}", 72 | "kubernetes_cni_version": "{{user `kubernetes_cni_version`}}" 73 | } 74 | }, 75 | { 76 | "name": "gcp-ubuntu", 77 | "type": "googlecompute", 78 | "project_id": "{{user `project_id`}}", 79 | "source_image": "{{ user `gcp_ubuntu_source_image`}}", 80 | "ssh_username": "packer", 81 | "zone": "us-east1-b", 82 | "metadata": { 83 | "build_version": "{{user `build_version`}}", 84 | "source_image": "{{ user `gcp_ubuntu_source_image`}}", 85 | "build_date": "{{isotime}}", 86 | "distribution": "Ubuntu", 87 | "distribution_release": "xenial", 88 | "distribution_version": "16.04", 89 | "kubernetes_version": "{{user `kubernetes_version`}}", 90 | "kubernetes_cni_version": "{{user `kubernetes_cni_version`}}" 91 | } 92 | }, 93 | { 94 | "name": "gcp-centos", 95 | "type": "googlecompute", 96 | "project_id": "{{user `project_id`}}", 97 | "source_image": "{{ user `gcp_centos_source_image`}}", 98 | "ssh_username": "packer", 99 | "zone": "us-east1-b", 100 | "metadata": { 101 | "build_version": "{{user `build_version`}}", 102 | "source_image": "{{ user `gcp_centos_source_image`}}", 103 | "build_date": "{{isotime}}", 104 | "distribution": "CentOS", 105 | "distribution_release": "Core", 106 | "distribution_version": "7.4", 107 | "kubernetes_version": "{{user `kubernetes_version`}}", 108 | "kubernetes_cni_version": "{{user `kubernetes_cni_version`}}" 109 | } 110 | }, 111 | { 112 | "name": "oci-ubuntu", 113 | "type": "oracle-oci", 114 | "availability_domain": "{{ user `oci_availability_domain` }}", 115 | "base_image_ocid": "{{ user `oci_base_image_ocid` }}", 116 | "compartment_ocid": "{{ user `oci_compartment_ocid` }}", 117 | "image_name": "Canonical-Ubuntu-16.04-K8s-{{ user `kubernetes_version` }}-{{ timestamp }}", 118 | "shape": "VM.Standard1.4", 119 | "ssh_username": "ubuntu", 120 | "subnet_ocid": "{{ user `oci_subnet_ocid` }}", 121 | "tags": { 122 | "build_version": "{{ user `build_version` }}", 123 | "source_ami": "{{ user `oci_base_image_ocid` }}", 124 | "build_date": "{{ isotime }}", 125 | "distribution": "Ubuntu", 126 | "distribution_release": "xenial", 127 | "distribution_version": "16.04", 128 | "kubernetes_version": "{{ user `kubernetes_version` }}", 129 | "kubernetes_cni_version": "{{ user `kubernetes_cni_version` }}" 130 | } 131 | } 132 | ], 133 | "provisioners": [ 134 | { 135 | "type": "ansible", 136 | "playbook_file": "../ansible/playbook.yml", 137 | "ansible_env_vars": [ 138 | "ANSIBLE_SSH_ARGS='{{user `existing_ansible_ssh_args`}} -o IdentitiesOnly=yes'", 139 | "ANSIBLE_REMOTE_TEMP='/tmp/.ansible/'" 140 | ], 141 | "extra_arguments": [ 142 | "--extra-vars", 143 | "common_upgrade_base=true kubernetes_version={{user `kubernetes_version`}} kubernetes_cni_version={{user `kubernetes_cni_version`}}" 144 | ] 145 | } 146 | ] 147 | } 148 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #/usr/bin/env python3 2 | 3 | from setuptools import setup 4 | 5 | setup( 6 | name="Wardroom", 7 | version="0.1", 8 | description="Python utility for building and managing wardroom artifacts", 9 | author="Heptio", 10 | url="https://github.com/heptiolabs/wardroom", 11 | packages=['wardroom'], 12 | entry_points={ 13 | 'console_scripts': [ 14 | 'wardroom=wardroom.cli:cli' 15 | ] 16 | }, 17 | install_requires=[ 18 | 'boto3>=1.5', 19 | 'click>=6.0' 20 | ] 21 | ) 22 | -------------------------------------------------------------------------------- /swizzle/.gitignore: -------------------------------------------------------------------------------- 1 | *.diff 2 | *.swp 3 | *.swo 4 | *.pyc 5 | *.retry 6 | .vagrant 7 | -------------------------------------------------------------------------------- /swizzle/README.md: -------------------------------------------------------------------------------- 1 | swizzle 2 | ======= 3 | The swizzle directory includes an Ansible playbook used to orchestrate the installation of Kubernetes by way of kubeadm. 4 | 5 | For usage information, see the user documentation [here](../docs/bootstrapping-a-kubernetes-cluster.md) -------------------------------------------------------------------------------- /swizzle/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # All Vagrant configuration is done below. The "2" in Vagrant.configure 5 | # configures the configuration version (we support older styles for 6 | # backwards compatibility). Please don't change it unless you know what 7 | # you're doing. 8 | WARDROOM_MASTER_COUNT = ENV["WARDROOM_MASTER_COUNT"] || 3 9 | WARDROOM_NODE_COUNT = ENV["WARDROOM_NODE_COUNT"] || 1 10 | WARDROOM_BOX = ENV["WARDROOM_BOX"] || "generic/ubuntu1804" 11 | WARDROOM_NETWORK_NAME = ENV["WARDROOM_NETWORK_NAME"] || "wardroom-private" 12 | 13 | $update_dns = <<-SCRIPT 14 | if grep -qi debian /etc/os-release; then 15 | sudo sed -i "s/\\[4.2.2.*/\\[8.8.8.8, 8.8.4.4\\]/g" /etc/netplan/01-netcfg.yaml; 16 | sudo netplan apply; 17 | fi; 18 | SCRIPT 19 | 20 | Vagrant.configure("2") do |config| 21 | 22 | config.vm.box = WARDROOM_BOX 23 | 24 | config.vm.provider :libvirt do |libvirt| 25 | libvirt.disk_bus = "virtio" 26 | end 27 | 28 | config.vm.provision "shell", inline: $update_dns 29 | 30 | config.vm.define "loadbalancer" do |subconfig| 31 | subconfig.vm.hostname = "loadbalancer" 32 | 33 | subconfig.vm.provider "virtualbox" do |v, override| 34 | override.vm.network "private_network", :ip => "10.10.10.3", virtualbox__intnet: WARDROOM_NETWORK_NAME 35 | end 36 | end 37 | 38 | (1..WARDROOM_MASTER_COUNT.to_i).each do |i| 39 | 40 | config.vm.define "master#{i}" do |subconfig| 41 | subconfig.vm.hostname = "master#{i}.local" 42 | 43 | subconfig.vm.provider "virtualbox" do |v, override| 44 | override.vm.network "private_network", :ip => "10.10.10.1#{i}", virtualbox__intnet: WARDROOM_NETWORK_NAME 45 | v.memory = 1024 46 | v.customize ["modifyvm", :id, "--uartmode1", "disconnected"] 47 | end 48 | end 49 | end 50 | 51 | (1..WARDROOM_NODE_COUNT.to_i).each do |i| 52 | config.vm.define "node#{i}" do |subconfig| 53 | subconfig.vm.hostname = "node#{i}.local" 54 | 55 | subconfig.vm.provider "virtualbox" do |v, override| 56 | override.vm.network "private_network", :ip => "10.10.10.2#{i}", virtualbox__intnet: WARDROOM_NETWORK_NAME 57 | v.memory = 4096 58 | v.customize ["modifyvm", :id, "--uartmode1", "disconnected"] 59 | end 60 | end 61 | end 62 | end 63 | -------------------------------------------------------------------------------- /swizzle/add-nodes.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install ansible prerequisutess 3 | import_playbook: ../ansible/pre.yml 4 | tags: 5 | - prereqs 6 | - ansible-prereqs 7 | 8 | - name: create a kubeadm token 9 | hosts: primary_master 10 | become: yes 11 | tasks: 12 | - name: generate a kubeadm token 13 | command: "/usr/bin/kubeadm token create --config /etc/kubernetes/kubeadm.conf --kubeconfig /etc/kubernetes/admin.conf" 14 | register: generated_token 15 | run_once: True 16 | delegate_to: "{{ groups['primary_master']|first }}" 17 | 18 | - name: install kubernetes prerequisites 19 | hosts: nodes 20 | become: yes 21 | roles: 22 | - role: common 23 | when: common_enable|bool 24 | - role: docker 25 | when: docker_enable|bool 26 | - role: kubernetes 27 | - role: etcd 28 | when: False 29 | - role: kubernetes-node 30 | tags: 31 | - kubernetes-node 32 | -------------------------------------------------------------------------------- /swizzle/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | display_skipped_hosts = false 3 | roles_path = ../ansible/roles 4 | hash_behaviour = merge 5 | 6 | [ssh_connection] 7 | ssh_args = -o ControlMaster=auto -o ControlPersist=600s -o ControlPath="~/.ansible/cp/ansible-ssh-%h-%p-%r" 8 | pipelining = True 9 | -------------------------------------------------------------------------------- /swizzle/examples/calico.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This is an example of an inventory file that can be used with provision.py 3 | # It contains jinja variables that are substitued by provision.py 4 | all: 5 | vars: 6 | docker_debian_version: "18.03.1~ce~3-0~ubuntu" 7 | {% if vagrant_provider == 'libvirt' %} 8 | kubernetes_common_primary_interface: eth0 9 | etcd_interface: eth0 10 | {% elif vagrant_provider == 'virtualbox' %} 11 | kubernetes_common_primary_interface: eth1 12 | etcd_interface: eth1 13 | test_loadbalancer_interface: eth1 14 | {% endif %} 15 | kubernetes_common_api_ip: "{{ loadbalancer_ip }}" 16 | kubernetes_common_api_fqdn: "k8s.example.com" 17 | kubernetes_common_manage_etc_hosts: True 18 | kubernetes_cni_plugin: calico 19 | kubernetes_cni_calico_manifest_mods: 20 | - conditions: 21 | - expression: kind 22 | value: DaemonSet 23 | modifications: 24 | - expression: "spec.template.spec.containers[?(@.name == 'calico-node')].env[?(@.name == 'CALICO_IPV4POOL_CIDR')].value" 25 | value: "172.16.0.0/16" 26 | kubernetes_common_kubeadm_config: 27 | apiServerCertSANs: 28 | - "{{ loadbalancer_ip }}" 29 | networking: 30 | podSubnet: "172.16.0.0/16" 31 | test_loadbalancer_enable: True 32 | -------------------------------------------------------------------------------- /swizzle/examples/canal.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This is an example of an inventory file that can be used with provision.py 3 | # It contains jinja variables that are substitued by provision.py 4 | all: 5 | vars: 6 | docker_debian_version: "18.03.1~ce~3-0~ubuntu" 7 | {% if vagrant_provider == 'libvirt' %} 8 | kubernetes_common_primary_interface: eth0 9 | etcd_interface: eth0 10 | test_loadbalancer_interface: eth0 11 | {% elif vagrant_provider == 'virtualbox' %} 12 | kubernetes_common_primary_interface: eth1 13 | etcd_interface: eth1 14 | test_loadbalancer_interface: eth1 15 | {% endif %} 16 | kubernetes_common_api_ip: "{{ loadbalancer_ip }}" 17 | kubernetes_common_api_fqdn: "k8s.example.com" 18 | kubernetes_common_manage_etc_hosts: True 19 | kubernetes_cni_plugin: canal 20 | kubernetes_cni_canal_manifest_mods: 21 | - conditions: 22 | - expression: metadata.name 23 | value: canal 24 | modifications: 25 | - expression: "spec.template.spec.containers[?(@.name == 'calico-node')].env[?(@.name == 'CALICO_IPV4POOL_CIDR')].value" 26 | value: "172.16.0.0/16" 27 | kubernetes_common_kubeadm_config: 28 | apiServerCertSANs: 29 | - "{{ loadbalancer_ip }}" 30 | networking: 31 | podSubnet: "172.16.100.0/16" 32 | test_loadbalancer_enable: True 33 | -------------------------------------------------------------------------------- /swizzle/examples/sample-extra-vars-centos.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kubernetes_common_api_fqdn: "loadbalancer.dns.name.here" 3 | kubernetes_cni_plugin: calico 4 | kubernetes_common_kubeadm_config: 5 | networking: 6 | podSubnet: "192.168.0.0/16" 7 | kubernetes_users: 8 | - { user: centos, group: centos, home: /home/centos } 9 | -------------------------------------------------------------------------------- /swizzle/examples/sample-extra-vars-ubuntu.yml: -------------------------------------------------------------------------------- 1 | --- 2 | kubernetes_common_api_fqdn: "loadbalancer.dns.name.here" 3 | kubernetes_cni_plugin: calico 4 | kubernetes_common_kubeadm_config: 5 | networking: 6 | podSubnet: "192.168.0.0/16" 7 | kubernetes_users: 8 | - { user: ubuntu, group: ubuntu, home: /home/ubuntu } 9 | -------------------------------------------------------------------------------- /swizzle/examples/sample-inventory.ini: -------------------------------------------------------------------------------- 1 | [etcd] 2 | 10.1.12.102 3 | 10.1.26.40 4 | 10.1.34.233 5 | 6 | [masters] 7 | 10.1.12.102 8 | 10.1.26.40 9 | 10.1.34.233 10 | 11 | [primary_master] 12 | 10.1.12.102 13 | 14 | [nodes] 15 | 10.1.7.195 16 | 10.1.21.145 17 | 10.1.44.124 18 | -------------------------------------------------------------------------------- /swizzle/install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install ansible prerequisutess 3 | import_playbook: ../ansible/pre.yml 4 | tags: 5 | - prereqs 6 | - ansible-prereqs 7 | 8 | - hosts: all 9 | tasks: 10 | - set_fact: 11 | wardroom_action: 'install' 12 | 13 | - name: install test loadbalancer 14 | hosts: loadbalancer 15 | become: yes 16 | pre_tasks: 17 | - setup: 18 | delegate_to: "{{ item }}" 19 | delegate_facts: True 20 | with_items: "{{ groups['masters'] }}" 21 | roles: 22 | - role: common 23 | when: common_enable|bool 24 | - role: docker 25 | when: docker_enable|bool 26 | - role: test_loadbalancer 27 | when: test_loadbalancer_enable|bool == True 28 | 29 | - name: install kubernetes prerequisites 30 | hosts: masters 31 | become: yes 32 | roles: 33 | - role: common 34 | when: common_enable|bool 35 | - role: docker 36 | when: docker_enable|bool 37 | - kubernetes 38 | tags: 39 | - prereqs 40 | - kubernetes-prereqs 41 | 42 | - name: install etcd 43 | hosts: etcd 44 | become: yes 45 | roles: 46 | - role: common 47 | when: common_enable|bool 48 | - role: etcd 49 | 50 | tags: 51 | - etcd 52 | 53 | - name: create kubernetes primary master 54 | hosts: masters 55 | become: yes 56 | roles: 57 | - role: etcd 58 | when: False # we set this to false merely so that we can gather default vars 59 | - role: kubernetes-master 60 | tags: 61 | - kubernetes-master 62 | 63 | - import_playbook: add-nodes.yml 64 | 65 | - name: install CNI 66 | hosts: masters 67 | become: yes 68 | roles: 69 | - kubernetes-cni 70 | tags: 71 | - kubernetes-cni 72 | 73 | - name: setup kubernetes users 74 | hosts: masters 75 | become: yes 76 | roles: 77 | - kubernetes-user 78 | tags: 79 | - kubernetes-user 80 | -------------------------------------------------------------------------------- /swizzle/library/modify_manifest.py: -------------------------------------------------------------------------------- 1 | # Copyright 2018, Craig Tracey 2 | # All Rights Reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 5 | # not use this file except in compliance with the License. You may obtain 6 | # a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | # License for the specific language governing permissions and limitations 14 | 15 | import re 16 | import yaml 17 | 18 | from jsonpath_ng.ext import parse 19 | from requests import get 20 | 21 | from ansible.module_utils.basic import AnsibleModule 22 | 23 | 24 | def _check_condition(manifest, condition): 25 | expr = parse(condition['expression']) 26 | matches = expr.find(manifest) 27 | 28 | found = True 29 | for match in matches: 30 | if not re.match(condition['value'], match.value): 31 | found = False 32 | break 33 | 34 | return found 35 | 36 | 37 | def _execute_modification(manifest, modification): 38 | expr = parse(modification['expression']) 39 | matches = expr.find(manifest) 40 | 41 | updated = False 42 | for match in matches: 43 | out = match.full_path.update(manifest, modification['value']) 44 | if out != manifest: 45 | updated = True 46 | return updated 47 | 48 | 49 | def main(): 50 | 51 | fields_spec = dict( 52 | expression=dict(required=True, type='str'), 53 | value=dict(required=True, type='str'), 54 | ) 55 | 56 | rules_spec = dict( 57 | conditions=dict(default=[], type='list', elements='dict', 58 | options=fields_spec), 59 | modifications=dict(default=[], type='list', elements='dict', 60 | options=fields_spec), 61 | ) 62 | 63 | module = AnsibleModule( 64 | argument_spec=dict( 65 | manifest_url=dict(required=True, type='str'), 66 | rules=dict(type='list', default=[], elements='dict', 67 | options=rules_spec), 68 | output_path=dict(required=True, type='str'), 69 | ) 70 | ) 71 | 72 | changed = False 73 | try: 74 | resp = get(module.params['manifest_url']) 75 | resp.raise_for_status() 76 | 77 | # we may get multi-doc yaml 78 | manifests = list(yaml.load_all(resp.text)) 79 | for manifest in manifests: 80 | for rule in module.params['rules']: 81 | 82 | conditions_met = True 83 | for condition in rule['conditions']: 84 | if not _check_condition(manifest, condition): 85 | conditions_met = False 86 | break 87 | 88 | if not conditions_met: 89 | break 90 | 91 | for modification in rule['modifications']: 92 | status = _execute_modification(manifest, modification) 93 | if status: 94 | changed = status 95 | 96 | with open(module.params['output_path'], 'wb') as fh: 97 | fh.write(yaml.dump_all(manifests, explicit_start=True)) 98 | 99 | except Exception as e: 100 | module.exit_json(msg=e) 101 | 102 | module.exit_json(changed=changed) 103 | 104 | 105 | if __name__ == '__main__': 106 | main() 107 | -------------------------------------------------------------------------------- /swizzle/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: add node(s) to the cluster 3 | import_playbook: add-nodes.yml 4 | when: wardroom_action == 'add-nodes' 5 | 6 | - name: run cluster install 7 | import_playbook: install.yml 8 | when: wardroom_action == 'install' 9 | 10 | - name: run cluster upgrade 11 | import_playbook: upgrade.yml 12 | when: wardroom_action == 'upgrade' 13 | -------------------------------------------------------------------------------- /swizzle/provision.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2017 Craig Tracey 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | # See the License for the specific language governing permissions and 13 | # limitations under the License. 14 | 15 | # A simple tool for provisioning Vagrant hosts in parallel. 16 | ### WARNING: there is no error checking and this is not well tested! ### 17 | 18 | import argparse 19 | import jinja2 20 | import os 21 | import pprint 22 | import re 23 | import subprocess 24 | import tempfile 25 | import yaml 26 | 27 | WARDROOM_BOXES = { 28 | 'xenial': 'generic/ubuntu1804', 29 | 'centos7': 'generic/centos7', 30 | } 31 | 32 | def vagrant_status(): 33 | """ Run `vagrant status` and parse the current vm state """ 34 | node_state = {} 35 | 36 | output = subprocess.check_output(['vagrant', 'status']) 37 | for i, line in enumerate(output.splitlines()): 38 | if i < 2: 39 | continue 40 | parts = re.split(r'\s+', line) 41 | if len(parts) == 3: 42 | node_state[parts[0]] = parts[1] 43 | elif len(parts) == 4: 44 | node_state[parts[0]] = " ".join(parts[1:3]) 45 | return node_state 46 | 47 | 48 | def vagrant_up(): 49 | """ Bring up the vm's with a `vagrant up`""" 50 | subprocess.call(['vagrant', 'up', '--parallel']) 51 | 52 | 53 | def vagrant_ssh_config(tempfile): 54 | """ Get the current ssh config via `vagrant ssh-config` """ 55 | output = subprocess.check_output(['vagrant', 'ssh-config']) 56 | with open(tempfile, 'w') as fh: 57 | fh.write(output) 58 | 59 | 60 | def run_ansible(action, inventory_file, extra_args=[]): 61 | """ Run ansible playbook via subprocess. 62 | We do not want to link ansible as it is GPL """ 63 | 64 | ssh_tempfile = tempfile.mkstemp() 65 | vagrant_ssh_config(ssh_tempfile[1]) 66 | 67 | run_env = os.environ.copy() 68 | ansible_env = {} 69 | ansible_env['ANSIBLE_CONFIG'] = "ansible.cfg" 70 | ansible_env['ANSIBLE_SSH_ARGS'] = os.getenv('ANSIBLE_SSH_ARGS', '') 71 | ansible_env['ANSIBLE_SSH_ARGS'] += " -F %s" % (ssh_tempfile[1]) 72 | run_env.update(ansible_env) 73 | 74 | playbook = "main.yml" 75 | cmd = [ 76 | "ansible-playbook", 77 | "-i", 78 | inventory_file, 79 | "-e", 80 | "wardroom_action=%s" % action, 81 | playbook, 82 | ] 83 | cmd += extra_args 84 | print "Wardroom ansible environment:\n %s\n" % pprint.pformat(ansible_env) 85 | print "Wardroom ansbile command:\n %s\n" % " ".join(cmd) 86 | subprocess.call(cmd, env=run_env) 87 | 88 | 89 | def get_vagrant_provider(): 90 | return os.environ.get('VAGRANT_DEFAULT_PROVIDER', 'virtualbox') 91 | 92 | 93 | def get_loadbalancer_ip(): 94 | 95 | provider = get_vagrant_provider() 96 | if provider == 'virtualbox': 97 | return "10.10.10.3" 98 | 99 | output = subprocess.check_output(['vagrant', 'ssh-config', 'loadbalancer']) 100 | for line in output.split('\n'): 101 | match = re.match(r'\s*HostName\s+(.*)', line) 102 | if match: 103 | return match.groups()[0] 104 | raise Exception("Could not determine loadbalancer IP") 105 | 106 | 107 | def merge_dict(source, destination): 108 | for key, value in source.items(): 109 | if isinstance(value, dict): 110 | node = destination.setdefault(key, {}) 111 | merge_dict(value, node) 112 | else: 113 | destination[key] = value 114 | 115 | return destination 116 | 117 | 118 | def generate_inventory(config, node_state={}): 119 | """ from node_state generate a dynamic ansible inventory. 120 | return temporary inventory file path """ 121 | inventory = { 122 | "loadbalancer": {"hosts": {}}, 123 | "etcd": {"hosts": {}}, 124 | "primary_master": {"hosts": {}}, 125 | "masters": {"hosts": {}}, 126 | "nodes": {"hosts": {}}, 127 | } 128 | for node, state in node_state.items(): 129 | if state == "running": 130 | if node.startswith('master'): 131 | inventory["masters"]["hosts"][node] = {} 132 | inventory["etcd"]["hosts"][node] = {} 133 | elif node.startswith("node"): 134 | inventory["nodes"]["hosts"][node] = {} 135 | elif node.startswith("loadbalancer"): 136 | inventory["loadbalancer"]["hosts"][node] = {} 137 | 138 | inventory['primary_master']["hosts"]["master1"] = {} 139 | 140 | data = None 141 | with open(config, 'rb') as fh: 142 | render_args = { 143 | 'loadbalancer_ip': get_loadbalancer_ip(), 144 | 'vagrant_provider': get_vagrant_provider(), 145 | } 146 | config = jinja2.Template(fh.read()).render(**render_args) 147 | data = yaml.load(config) 148 | 149 | inventory = merge_dict(data, inventory) 150 | 151 | temp_file = tempfile.mkstemp()[1] 152 | with open(temp_file, 'w') as fh: 153 | yaml.dump(inventory, fh) 154 | 155 | print "Running with inventory:\n" 156 | print yaml.dump(inventory) 157 | print 158 | 159 | return temp_file 160 | 161 | 162 | def state_purpose(): 163 | 164 | print "############################################################" 165 | print " provision.py is a tool to help test wardroom playbooks " 166 | print " against Vagrant provisioned infrastructure. It is simply " 167 | print " a wrapper around Vagrant and ansible. All of the Ansible " 168 | print " playbooks may be run against any ssh-enabled hosts. " 169 | print " provision.py in intended for refereence purposes. " 170 | print "############################################################" 171 | print 172 | print 173 | 174 | 175 | def main(): 176 | parser = argparse.ArgumentParser() 177 | parser.add_argument('-a', '--action', default='install', 178 | choices=['install', "upgrade", 'add-nodes']) 179 | parser.add_argument('-o', '--os', default='xenial', 180 | choices=WARDROOM_BOXES.keys()) 181 | parser.add_argument('config') 182 | args, extra_args = parser.parse_known_args() 183 | 184 | os.environ["WARDROOM_BOX"] = WARDROOM_BOXES[args.os] 185 | state_purpose() 186 | 187 | node_state = vagrant_status() 188 | 189 | start_vms = False 190 | for node, state in node_state.items(): 191 | if state != 'running': 192 | start_vms = True 193 | break 194 | 195 | if start_vms: 196 | vagrant_up() 197 | 198 | node_state = vagrant_status() 199 | inventory_file = generate_inventory(args.config, node_state) 200 | 201 | run_ansible(args.action, inventory_file, extra_args) 202 | 203 | 204 | if __name__ == '__main__': 205 | main() 206 | -------------------------------------------------------------------------------- /swizzle/upgrade.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | tasks: 4 | - set_fact: 5 | wardroom_action: 'upgrade' 6 | 7 | - name: update control plane 8 | hosts: masters 9 | become: yes 10 | roles: 11 | - role: kubernetes 12 | - role: etcd 13 | when: False 14 | - role: kubernetes-master 15 | tags: ['upgrade-control-plane'] 16 | 17 | - name: create a kubeadm token 18 | hosts: primary_master 19 | become: yes 20 | tasks: 21 | - name: generate a kubeadm token 22 | command: "/usr/bin/kubeadm token create --config /etc/kubernetes/kubeadm.conf --kubeconfig /etc/kubernetes/admin.conf" 23 | register: generated_token 24 | run_once: True 25 | delegate_to: "{{ groups['primary_master']|first }}" 26 | 27 | - name: update the workers 28 | hosts: nodes 29 | become: yes 30 | roles: 31 | - role: kubernetes 32 | - role: etcd 33 | when: False 34 | - role: kubernetes-node 35 | tags: ['upgrade-workers'] 36 | -------------------------------------------------------------------------------- /wardroom/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/vmware-archive/wardroom/617cf124394e9d05df9ba641a1e47fdeaf8342ec/wardroom/__init__.py -------------------------------------------------------------------------------- /wardroom/aws.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import sys 3 | import time 4 | 5 | import boto3 6 | import click 7 | 8 | logger = logging.getLogger(name=__name__) 9 | logger.setLevel(logging.INFO) 10 | handler = logging.StreamHandler(sys.stderr) 11 | logger.addHandler(handler) 12 | 13 | yaml_template =''' 14 | {}: 15 | '64': {} 16 | '''.strip('\r\n') 17 | 18 | 19 | def copy_to_region(image, src_region, dest_region): 20 | session = boto3.session.Session(region_name=dest_region) 21 | local_client = session.client('ec2') 22 | logger.info("creating image in region {}".format(dest_region)) 23 | resp = local_client.copy_image( 24 | Name=image.name, 25 | SourceImageId=image.image_id, 26 | SourceRegion=src_region, 27 | ) 28 | local_ec2 = session.resource('ec2') 29 | new_image = local_ec2.Image(resp['ImageId']) 30 | 31 | return (new_image, dest_region) 32 | 33 | 34 | def make_public_and_tag(image, region, desc): 35 | while True: 36 | image.load() 37 | if image.state == 'available': 38 | image.modify_attribute( 39 | LaunchPermission={ 40 | 'Add': [{'Group': 'all'}] 41 | } 42 | ) 43 | # Can only modify one attribute at a time 44 | image.modify_attribute(Description={'Value': desc}) 45 | logger.info("region {} ami {} is available".format(region, image.id)) 46 | break 47 | time.sleep(5) 48 | 49 | 50 | def encode_desc(dict_): 51 | return " ".join("{0}={1}".format(*item) for item in dict_.items()) 52 | 53 | 54 | @click.group() 55 | def aws(): 56 | pass 57 | 58 | 59 | @aws.command(name='copy-ami') 60 | @click.option('-r', '--src-region', default='us-east-1', help='AWS Region') 61 | @click.option('-q', '--quiet', is_flag=True) 62 | @click.argument('src_ami') 63 | def copy_ami(src_region, src_ami, quiet): 64 | if quiet: 65 | logger.setLevel(logging.WARN) 66 | 67 | session = boto3.session.Session(region_name=src_region) 68 | client = session.client('ec2') 69 | 70 | dest_regions = [region['RegionName'] for region in client.describe_regions()['Regions'] 71 | if region['RegionName'] != src_region 72 | ] 73 | dest_regions.sort() 74 | 75 | logger.info("detected {} regions".format(len(dest_regions))) 76 | 77 | image = session.resource('ec2').Image(src_ami) 78 | description = encode_desc({i['Key']: i['Value'] for i in image.tags or []}) 79 | 80 | # copy to all regions 81 | images = [copy_to_region(image, src_region, region) for region in dest_regions] 82 | # Add the original 83 | images.append((image, src_region)) 84 | 85 | # print out the YAML 86 | for (image, region) in images: 87 | print(yaml_template.format(region, image.id)) 88 | 89 | logger.info("waiting for all images to be available. In the mean time," 90 | "that YAML can be pasted into the quickstart template.") 91 | # wait for all images to be available 92 | for (image, region) in images: 93 | make_public_and_tag(image, region, description) 94 | -------------------------------------------------------------------------------- /wardroom/cli.py: -------------------------------------------------------------------------------- 1 | import click 2 | 3 | from wardroom.aws import aws 4 | 5 | 6 | @click.group() 7 | def cli(): 8 | pass 9 | 10 | 11 | cli.add_command(aws) 12 | --------------------------------------------------------------------------------