├── .gitmodules ├── CHANGELOG.md ├── LICENSE ├── README.md ├── Vagrant ├── .gitignore ├── README.md ├── Vagrantfile ├── ansible.cfg ├── deployments │ ├── metallb │ │ ├── deploy.yaml │ │ └── nginx-deployment.yaml │ └── nfs-pvs │ │ ├── busybox-pv-nfs.yaml │ │ ├── class.yaml │ │ ├── deployment.yaml │ │ ├── pvc.yaml │ │ └── rbac.yaml ├── environment.yml ├── hosts ├── playbooks │ ├── bootstrap.yml │ ├── playbook.yml │ ├── prep_host_vars.yml │ └── reset_cluster.yml ├── requirements.yml ├── roles │ └── ansible-k8s └── scripts │ ├── bootstrap.sh │ ├── cleanup.bat │ ├── cleanup.sh │ └── prep.sh ├── defaults └── main.yml ├── handlers └── main.yml ├── meta └── main.yml ├── requirements.yml ├── tasks ├── cluster_services.yml ├── cluster_summary.yml ├── dashboard.yml ├── debian.yml ├── helm.yml ├── init_cluster.yml ├── join_cluster.yml ├── main.yml ├── network.yml ├── pods.yml ├── redhat.yml ├── reset_cluster.yml ├── services.yml ├── set_facts.yml ├── swap.yml └── users.yml ├── templates ├── etc │ └── apt │ │ └── preferences.d │ │ └── k8s.pref.j2 ├── k8s_cluster_ip.j2 ├── k8s_dashboard.j2 ├── k8s_pods.j2 ├── k8s_services.j2 ├── k8s_token.j2 └── tiller-rbac-config.yaml.j2 ├── tests ├── inventory └── test.yml └── vars └── main.yml /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "Vagrant/roles/ansible-change-hostname"] 2 | path = Vagrant/roles/ansible-change-hostname 3 | url = https://www.github.com/mrlesmithjr/ansible-change-hostname.git 4 | [submodule "Vagrant/roles/ansible-docker"] 5 | path = Vagrant/roles/ansible-docker 6 | url = https://www.github.com/mrlesmithjr/ansible-docker.git 7 | [submodule "Vagrant/roles/ansible-etc-hosts"] 8 | path = Vagrant/roles/ansible-etc-hosts 9 | url = https://www.github.com/mrlesmithjr/ansible-etc-hosts.git 10 | [submodule "Vagrant/roles/ansible-nfs-client"] 11 | path = Vagrant/roles/ansible-nfs-client 12 | url = https://github.com/mrlesmithjr/ansible-nfs-client.git 13 | [submodule "Vagrant/roles/ansible-nfs-server"] 14 | path = Vagrant/roles/ansible-nfs-server 15 | url = https://github.com/mrlesmithjr/ansible-nfs-server.git 16 | [submodule "Vagrant/roles/ansible-ntp"] 17 | path = Vagrant/roles/ansible-ntp 18 | url = https://www.github.com/mrlesmithjr/ansible-ntp.git 19 | [submodule "Vagrant/roles/ansible-timezone"] 20 | path = Vagrant/roles/ansible-timezone 21 | url = https://www.github.com/mrlesmithjr/ansible-timezone.git 22 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | commit a1fd26aed801dff0d95f0f71d68ff4e0f736dae8 2 | Author: Larry Smith Jr 3 | Date: Wed May 6 13:57:06 2020 -0400 4 | 5 | Added ability to skip networking CNI install 6 | 7 | This may have some value in whether or not the need to add manually 8 | (possibly) the network constructs. May need to revisit. 9 | 10 | commit da7741711507f7b8a39314e9082dc4437a4ecf82 11 | Author: Larry Smith Jr 12 | Date: Mon Apr 27 17:03:35 2020 -0400 13 | 14 | Updated repo usage and info 15 | 16 | commit df096446b327ac8ed4317b50db1ab9fefe6422ef 17 | Author: Larry Smith Jr 18 | Date: Mon Apr 27 17:03:22 2020 -0400 19 | 20 | Fixed helm version check 21 | 22 | commit e54a011fe17dec210355cf4a36bf52f6de6fb2d1 23 | Author: Larry Smith Jr 24 | Date: Mon Apr 27 17:02:17 2020 -0400 25 | 26 | Fixed tags for various tasks 27 | 28 | commit 0d60bd406d886fdd918545b4eacc39f01de3e22b 29 | Author: Larry Smith Jr 30 | Date: Mon Apr 27 17:00:05 2020 -0400 31 | 32 | Updated link to dashboard 33 | 34 | commit 75f5eabc33210e7513a9a858fe08ec897c977c07 35 | Author: Larry Smith Jr 36 | Date: Mon Apr 27 15:28:54 2020 -0400 37 | 38 | Cleaned up repo info a bit 39 | 40 | Still needs more 41 | 42 | commit 79e47c45d4fe0b3203a61cc453b3a06d9a261d81 43 | Author: Larry Smith Jr 44 | Date: Mon Apr 27 15:28:28 2020 -0400 45 | 46 | Added NFS server/client roles to requirements 47 | 48 | commit 65390c0b641d52211d9da23c229735f55654e37e 49 | Author: Larry Smith Jr 50 | Date: Mon Apr 27 15:28:11 2020 -0400 51 | 52 | Tags should not have bool checks 53 | 54 | commit 112e801a9e88374105db0a761dfc5edfd8c2c97d 55 | Author: Larry Smith Jr 56 | Date: Mon Apr 27 15:27:53 2020 -0400 57 | 58 | Added Ubuntu 18.04 support 59 | 60 | commit 4c97462d68abae66560c7d44865557ee93d89e21 61 | Author: Larry Smith Jr 62 | Date: Mon Apr 27 15:27:34 2020 -0400 63 | 64 | Changed K8s version to 1.17.5 65 | 66 | - Had issues going to 1.18.0 for now 67 | 68 | commit 6f7bcbc3a157e12a4172f28ab1e58420c2dc57e6 69 | Author: Larry Smith Jr 70 | Date: Mon Apr 27 15:26:54 2020 -0400 71 | 72 | Changed Helm install from v2 to v3 73 | 74 | commit 293e091849815be2e29c8bc948d13e4fa4cefc8d 75 | Author: Larry Smith Jr 76 | Date: Mon Apr 27 15:26:10 2020 -0400 77 | 78 | Cleaned up lingering commented out tasks 79 | 80 | commit f971b4fda68e9a77b20d1904a69d932ba4680b5c 81 | Author: Larry Smith Jr 82 | Date: Mon Apr 27 15:24:59 2020 -0400 83 | 84 | Changed with_items to loop 85 | 86 | commit 83a59aef00ae689338270141e887a55cf5b913f0 87 | Author: Larry Smith Jr 88 | Date: Mon Apr 27 15:20:49 2020 -0400 89 | 90 | Updated Vagrant testing for role 91 | 92 | This is now updated and tested for Vagrant 93 | - Spins up a 3-node cluster 94 | - Spins up a nfs server for persistent volume testing 95 | - MetalLB deployment is semi functional 96 | 97 | commit 030779ae02e7adab5131b60c9d92f369780b96c9 98 | Author: Larry Smith Jr 99 | Date: Sun Apr 26 20:42:23 2020 -0400 100 | 101 | Updated additional roles for Vagrant testing 102 | 103 | commit 6f78cb8a75914ad60e2fb9aad539836cd3544381 104 | Author: Larry Smith Jr 105 | Date: Sun Apr 26 20:41:23 2020 -0400 106 | 107 | Moved remaining playbooks to the playbooks dir 108 | 109 | commit 4ac58837d082a5948ace0fa7ff9215ffb41db73f 110 | Author: Larry Smith Jr 111 | Date: Fri Apr 24 10:00:36 2020 -0400 112 | 113 | Reorganized Vagrant testing structure 114 | 115 | commit 36624a54c1449c0236ce4fe50e7d76831241f2cf 116 | Author: Larry Smith Jr 117 | Date: Thu Apr 23 12:38:53 2020 -0400 118 | 119 | Updated roles to latest 120 | 121 | commit 30df0c22189275cc8846a96ac5edb017038ee04e 122 | Author: Larry Smith Jr 123 | Date: Mon Sep 30 17:49:29 2019 -0400 124 | 125 | Cleaned up code 126 | 127 | - Cleaned up formatting 128 | - Cleaned up conditionals 129 | 130 | commit b2fe27b6728e17eac5aabdd9d37daaeaa9e215b9 131 | Author: Larry Smith Jr 132 | Date: Thu Sep 5 15:57:03 2019 -0400 133 | 134 | Updated Ansible roles 135 | 136 | commit 60e9e4df93ae4de5137f85c5c5e6df88858ab09d 137 | Author: Larry Smith Jr 138 | Date: Thu Sep 5 15:45:28 2019 -0400 139 | 140 | Updated version to 1.13.5 141 | 142 | commit 11e140daa06aa04c1ff52b5a397da01a8de97340 143 | Author: Larry Smith Jr 144 | Date: Thu Sep 5 15:44:59 2019 -0400 145 | 146 | Increased number of vCPU to 2 as pre-flight 147 | 148 | commit 85ff1e878dfbe0690233e00286b8c2cff7e9b5c0 149 | Author: Larry Smith Jr 150 | Date: Thu Sep 5 15:44:40 2019 -0400 151 | 152 | Updated Ansible roles 153 | 154 | commit 0883d7439f6f8cf44d4b9eb1d864fc41d9716abe 155 | Author: Larry Smith Jr 156 | Date: Thu Sep 5 14:58:27 2019 -0400 157 | 158 | Updated version to 1.12.7 159 | 160 | commit 30faa2a2c3ce661a9c00312921050c06339492b4 161 | Author: Larry Smith Jr 162 | Date: Thu Sep 5 14:02:20 2019 -0400 163 | 164 | Changed Supported Version 165 | 166 | - K8s (1.11.10) 167 | - K8s-CNI (0.7.5) 168 | 169 | commit 7192a35a75bef6313d3b3212d2d92997f30745a1 170 | Author: Larry Smith Jr 171 | Date: Thu Sep 5 00:12:52 2019 -0400 172 | 173 | Cleaned up apt tasks - Loops 174 | 175 | commit ca67aec4bf8f84e996206c733a9f4517946a69a0 176 | Author: Larry Smith Jr 177 | Date: Thu Sep 5 00:12:13 2019 -0400 178 | 179 | Cleaned up and added host_vars 180 | 181 | commit cb2a5bd1a8f6158e7f551d53fee53e6ae8d7761e 182 | Author: Larry Smith Jr 183 | Date: Thu Sep 5 00:12:00 2019 -0400 184 | 185 | Added pinned version of CNI 186 | 187 | commit 9a6eb80936100cdcf7219c4b22966147efebba22 188 | Author: Larry Smith Jr 189 | Date: Thu Sep 13 22:05:09 2018 -0400 190 | 191 | Added Helm install flag for true to install 192 | 193 | commit 7f71c682673ce72bb56bff90c5236e926c8d59c7 194 | Author: Larry Smith Jr 195 | Date: Thu Sep 13 22:04:45 2018 -0400 196 | 197 | Updated Helm version and K8s version 198 | 199 | commit 19ff163692a726e04e17451ebce961e8168f6c49 200 | Author: Larry Smith Jr 201 | Date: Thu May 10 15:02:02 2018 -0400 202 | 203 | Added playbook to reset k8s cluster 204 | 205 | commit 9322eecc24da386405ed7c6b5644ae5af8fae4e4 206 | Author: Larry Smith Jr 207 | Date: Thu May 10 15:01:45 2018 -0400 208 | 209 | Changed Helm version 210 | 211 | commit a72ae0331294d5fda4a246eab747769df30880b1 212 | Author: Larry Smith Jr 213 | Date: Thu May 10 14:08:53 2018 -0400 214 | 215 | Resolved Helm install when choosing to install as part of provisioning 216 | 217 | commit 098363b687361f315ab387eab73c4fc4493fa2c1 218 | Author: Larry Smith Jr 219 | Date: Thu May 3 08:11:43 2018 -0400 220 | 221 | Implemented ability to pin a specific version of K8s to install. 222 | 223 | Resolves #11 224 | 225 | commit c05d0d97dd9a72f72f23943d25f1212b714f83f4 226 | Author: Larry Smith Jr 227 | Date: Thu Feb 15 21:20:09 2018 -0500 228 | 229 | Added check for CoreDNS in addition to kube-dns 230 | 231 | commit fcaf1a5022f3ddc5b504509e26cfe3f40bfcd9c1 232 | Author: Larry Smith Jr 233 | Date: Mon Feb 12 01:06:48 2018 -0500 234 | 235 | Added functionality to install helm for architecture other than amd64 236 | 237 | commit aef065200a13fd67bdce74e27982a3075031b321 238 | Author: Larry Smith Jr 239 | Date: Fri Feb 9 21:40:25 2018 -0500 240 | 241 | Updated and fixed Weave network install 242 | 243 | commit 298d24b842e6d9366022ce3ab463af6e7e786178 244 | Author: Larry Smith Jr 245 | Date: Wed Feb 7 14:16:23 2018 -0500 246 | 247 | Fixed issue with capturing token post initial cluster provisioning. 248 | 249 | commit 9b1ea144de9f33f5d583429a773e4d079ebc8d1e 250 | Author: Larry Smith Jr 251 | Date: Tue Feb 6 12:03:48 2018 -0500 252 | 253 | Added become: true due to issue when shell does not find route command 254 | 255 | commit 1cad3861149fa70d2e7e9219a93937593c616f7f 256 | Author: Larry Smith Jr 257 | Date: Sat Jan 6 01:47:36 2018 -0500 258 | 259 | Fixed subsequent execution issue with properly displaying dashboard info 260 | 261 | commit 3ff333c89fd0b20ada5864d3e7a6f74f470f3b78 262 | Author: Larry Smith Jr 263 | Date: Wed Jan 3 01:10:28 2018 -0500 264 | 265 | Updated repo/usage info/examples 266 | 267 | commit f499e03e18a52540e3578949ad6a8e29b8113dd4 268 | Author: Larry Smith Jr 269 | Date: Wed Jan 3 01:10:16 2018 -0500 270 | 271 | Fixed missing tag for dashboard 272 | 273 | commit ee56c0952fdb334855083d8cb33fbf7fb3bd7f1e 274 | Author: Larry Smith Jr 275 | Date: Wed Jan 3 01:09:30 2018 -0500 276 | 277 | Resolves #10 278 | 279 | commit ae43f40549e0a63f1e48a5ef4b1127b6ff829697 280 | Author: Larry Smith Jr 281 | Date: Sun Dec 31 09:31:17 2017 -0500 282 | 283 | Added back no logging on joining cluster 284 | 285 | I disabled this for debugging of issue #10 but forgot to re-enable it on 286 | previous commit. 287 | 288 | commit 20c84cca73f41726adbbc63a38a3642f143ddc6e 289 | Author: Larry Smith Jr 290 | Date: Sun Dec 31 09:25:21 2017 -0500 291 | 292 | Cleaned up formatting of vars/tasks 293 | 294 | commit 7f236a378ddb75b723d76547779d2e4c43ebbf59 295 | Author: Larry Smith Jr 296 | Date: Sun Dec 31 09:23:48 2017 -0500 297 | 298 | Updated roles 299 | 300 | Signed-off-by: Larry Smith Jr 301 | 302 | commit b40a6bfb208d9efeabd7643b3cd4d2382cd082cb 303 | Author: Larry Smith Jr 304 | Date: Sun Dec 31 09:23:31 2017 -0500 305 | 306 | Fixed Docker install version 307 | 308 | commit 403814f9799e5833314d121f4ca31582b425e85a 309 | Author: Larry Smith Jr 310 | Date: Sun Dec 31 09:23:09 2017 -0500 311 | 312 | Updated/cleaned up repo info 313 | 314 | commit dbd85d12f9ce2f12f4e5eea891173047eb41614e 315 | Author: Larry Smith Jr 316 | Date: Thu Dec 21 07:29:33 2017 -0500 317 | 318 | Updated licensing 319 | 320 | commit 27825e13cdb1e32684779df70d887adaf7c46588 321 | Author: Larry Smith Jr 322 | Date: Thu Dec 21 07:26:01 2017 -0500 323 | 324 | Create LICENSE 325 | 326 | commit 1bb1ae761facf4e23ca17c08f90f3cae450edf90 327 | Author: Larry Smith Jr 328 | Date: Wed Nov 22 10:57:21 2017 -0500 329 | 330 | Updated roles for Vagrant 331 | 332 | Signed-off-by: Larry Smith Jr 333 | 334 | commit 0a83c8cc4fa6edc355636e6ef857c6e04929a198 335 | Author: Larry Smith Jr 336 | Date: Wed Nov 22 10:55:34 2017 -0500 337 | 338 | Cleaned up k8s role for Vagrant 339 | 340 | Signed-off-by: Larry Smith Jr 341 | 342 | commit 41a40db2593844dca3bdc117ae7f9b7dd4bd6a49 343 | Author: Larry Smith Jr 344 | Date: Wed Nov 22 10:47:16 2017 -0500 345 | 346 | Fixed dashboard install and issue with creating cluster 347 | 348 | Cluster was failing to initialize because of swap being enabled on 349 | hosts. 350 | 351 | commit f156a182bbe947b42ab7140d7ede5b986c65603c 352 | Author: Larry Smith Jr 353 | Date: Thu Apr 27 15:53:04 2017 -0400 354 | 355 | Fixed dashboard retrieval on provision 356 | 357 | Signed-off-by: Larry Smith Jr 358 | 359 | commit 43b17ea8c922304170b0212481b0375f19a8348d 360 | Author: Larry Smith Jr 361 | Date: Wed Apr 26 08:10:22 2017 -0400 362 | 363 | Fixed not being able to report dashboard link 364 | 365 | Signed-off-by: Larry Smith Jr 366 | 367 | commit 06a9a2aa8c1c48610a1e099ce9c7372b5ed62acb 368 | Author: Larry Smith Jr 369 | Date: Tue Apr 25 22:40:47 2017 -0400 370 | 371 | Updated roles and playbook 372 | 373 | Signed-off-by: Larry Smith Jr 374 | 375 | commit 032de8fca4722d04766b00e5babf93454fa65576 376 | Author: Larry Smith Jr 377 | Date: Tue Apr 25 22:38:44 2017 -0400 378 | 379 | Updated and added more functionality 380 | 381 | Signed-off-by: Larry Smith Jr 382 | 383 | commit 840cdef1cf370fdf5798f91ba394d2bcea022235 384 | Author: Larry Smith Jr 385 | Date: Tue Apr 25 17:19:23 2017 -0400 386 | 387 | Added ability to capture running pods and info on them presented in JSON format 388 | 389 | Signed-off-by: Larry Smith Jr 390 | 391 | commit f60a090da9dc191c6032888a6c86ac4ad87f8909 392 | Author: Larry Smith Jr 393 | Date: Tue Apr 25 13:07:38 2017 -0400 394 | 395 | Added Helm install ability 396 | 397 | Signed-off-by: Larry Smith Jr 398 | 399 | commit c36cfffca143dd2b49d2f860fa7a87d0bd4f1a52 400 | Author: Larry Smith Jr 401 | Date: Tue Apr 25 10:48:05 2017 -0400 402 | 403 | Fixed Weave-Net link 404 | 405 | Signed-off-by: Larry Smith Jr 406 | 407 | commit 4cc8c19e0a00ff930f333811781bb48add592d3f 408 | Author: Larry Smith Jr 409 | Date: Tue Apr 25 10:42:49 2017 -0400 410 | 411 | Added info on resetting cluster 412 | 413 | Signed-off-by: Larry Smith Jr 414 | 415 | commit 4fd7af3ea5111055471966d11b63417687a2f7b4 416 | Author: Larry Smith Jr 417 | Date: Tue Apr 25 10:33:40 2017 -0400 418 | 419 | Updated with Vagrant environment 420 | 421 | Signed-off-by: Larry Smith Jr 422 | 423 | commit 2f5e24030133dcf58fc4b0423c0f2918066f38d4 424 | Author: Larry Smith Jr 425 | Date: Tue Apr 25 10:11:34 2017 -0400 426 | 427 | first commit 428 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2017 Larry Smith Jr. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ansible-k8s 2 | 3 | An [Ansible](https://www.ansible.com) role to deploy a 4 | [Kubernetes](https://kubernetes.io) - K8s Cluster 5 | 6 | - [Weave-Net](https://www.weave.works/docs/net/latest/kube-addon/) is used for 7 | the network overlay currently 8 | 9 | ## Requirements 10 | 11 | Install additional required [Ansible](https://www.ansible.com) roles: 12 | 13 | ```bash 14 | sudo ansible-galaxy install -r requirements.yml 15 | ``` 16 | 17 | ## Role Variables 18 | 19 | [defaults/main.yml](defaults/main.yml) 20 | 21 | ## Dependencies 22 | 23 | None 24 | 25 | ## Example Playbook 26 | 27 | [Vagrant/playbook.yml](Vagrant/playbook.yml) 28 | 29 | ## Vagrant 30 | 31 | To spin up a test cluster using Vagrant. Check out the [README](Vagrant/README.md). 32 | 33 | ## License 34 | 35 | MIT 36 | 37 | ## Author Information 38 | 39 | Larry Smith Jr. 40 | 41 | - [EverythingShouldBeVirtual](http://everythingshouldbevirtual.com) 42 | - [@mrlesmithjr](https://www.twitter.com/mrlesmithjr) 43 | - 44 | -------------------------------------------------------------------------------- /Vagrant/.gitignore: -------------------------------------------------------------------------------- 1 | *.retry 2 | .vagrant/ 3 | host_vars/ -------------------------------------------------------------------------------- /Vagrant/README.md: -------------------------------------------------------------------------------- 1 | # Vagrant 2 | 3 | - Requirements 4 | - [Ansible](https://www.ansible.com) 5 | - [Vagrant](https://www.vagrantup.com/) 6 | - [Virtualbox](https://www.virtualbox.org/) 7 | 8 | Included in the `Vagrant` folder is a testing environment with `4` nodes. 9 | 10 | - `k8s-master` - K8s Cluster Master (`192.168.250.10`) 11 | - `k8s-worker-01` - K8s Cluster Member (`192.168.250.11`) 12 | - `k8s-worker-02` - K8s Cluster Member (`192.168.250.12`) 13 | - `k8s-nfs-server` - K8s NFS Server (`192.168.250.13`) 14 | 15 | You can easily spin this up for learning purposes: 16 | 17 | ```bash 18 | cd Vagrant/ 19 | vagrant up 20 | ``` 21 | 22 | Once the environment spins up you will see the following: 23 | 24 | ```bash 25 | TASK [ansible-k8s : cluster_summary | Displaying Cluster Nodes] **************** 26 | ok: [k8s-master] => { 27 | "_k8s_cluster_nodes['stdout_lines']": [ 28 | "NAME STATUS ROLES AGE VERSION", 29 | "k8s-master Ready master 103s v1.17.5", 30 | "k8s-worker-01 Ready 33s v1.17.5", 31 | "k8s-worker-02 Ready 33s v1.17.5" 32 | ] 33 | } 34 | skipping: [k8s-worker-01] 35 | skipping: [k8s-worker-02] 36 | ``` 37 | 38 | Do not worry about the above as the additional nodes did not completely join 39 | the cluster before the provisioning completed. You can quickly validate that 40 | the additional nodes are up and `Ready` by running: 41 | 42 | ```bash 43 | ansible-playbook -i hosts playbook.yml --tags k8s_cluster_nodes 44 | ``` 45 | 46 | The above `NotReady` should no longer be an issue as we now wait for all nodes 47 | in the cluster to become `Ready`. However, there may be an instance where this 48 | may not work as expected. 49 | 50 | ```bash 51 | TASK [ansible-k8s : cluster_summary | Displaying Cluster Nodes] ********************************************************************************************** 52 | ok: [k8s-master] => { 53 | "_k8s_cluster_nodes['stdout_lines']": [ 54 | "NAME STATUS ROLES AGE VERSION", 55 | "k8s-master Ready master 14m v1.17.5", 56 | "k8s-worker-01 Ready 13m v1.17.5", 57 | "k8s-worker-02 Ready 13m v1.17.5" 58 | ] 59 | } 60 | skipping: [k8s-worker-01] 61 | skipping: [k8s-worker-02] 62 | ``` 63 | 64 | Once the cluster is up `ssh` to `k8s-master` and begin playing: 65 | 66 | ```bash 67 | vagrant ssh k8s-master 68 | ``` 69 | 70 | When you are all done using the environment easily tear it down: 71 | 72 | ```bash 73 | ./cleanup.sh 74 | 75 | ==> node2: Forcing shutdown of VM... 76 | ==> node2: Destroying VM and associated drives... 77 | ==> node1: Forcing shutdown of VM... 78 | ==> node1: Destroying VM and associated drives... 79 | ==> node0: Forcing shutdown of VM... 80 | ==> node0: Destroying VM and associated drives... 81 | ``` 82 | 83 | ## Additional Info 84 | 85 | ### Reset `K8s` cluster 86 | 87 | ```bash 88 | ansible-playbook -i hosts playbook.yml --tags k8s_reset -e "k8s_reset_cluster=true" 89 | ``` 90 | 91 | ### Get a list of pods and information on them 92 | 93 | ```bash 94 | ansible-playbook -i hosts playbook.yml --tags k8s_pods 95 | ``` 96 | 97 | ```bash 98 | TASK [ansible-k8s : pods | Displaying Pods In All Namespaces] ************************************************************************************************ 99 | ok: [k8s-master] => { 100 | "msg": { 101 | "containers": [ 102 | { 103 | "hostIP": "192.168.250.10", 104 | "image": "k8s.gcr.io/coredns:1.6.5", 105 | "name": "coredns", 106 | "nodeName": "k8s-master", 107 | "phase": "Running", 108 | "podIP": "10.32.0.2", 109 | "resources": { 110 | "limits": { 111 | "memory": "170Mi" 112 | }, 113 | "requests": { 114 | "cpu": "100m", 115 | "memory": "70Mi" 116 | } 117 | } 118 | }, 119 | { 120 | "hostIP": "192.168.250.10", 121 | "image": "k8s.gcr.io/coredns:1.6.5", 122 | "name": "coredns", 123 | "nodeName": "k8s-master", 124 | "phase": "Running", 125 | "podIP": "10.32.0.3", 126 | "resources": { 127 | "limits": { 128 | "memory": "170Mi" 129 | }, 130 | "requests": { 131 | "cpu": "100m", 132 | "memory": "70Mi" 133 | } 134 | } 135 | }, 136 | { 137 | "hostIP": "192.168.250.10", 138 | "image": "k8s.gcr.io/etcd:3.4.3-0", 139 | "name": "etcd", 140 | "nodeName": "k8s-master", 141 | "phase": "Running", 142 | "podIP": "192.168.250.10", 143 | "resources": {} 144 | }, 145 | { 146 | "hostIP": "192.168.250.10", 147 | "image": "k8s.gcr.io/kube-apiserver:v1.17.5", 148 | "name": "kube-apiserver", 149 | "nodeName": "k8s-master", 150 | "phase": "Running", 151 | "podIP": "192.168.250.10", 152 | "resources": { 153 | "requests": { 154 | "cpu": "250m" 155 | } 156 | } 157 | }, 158 | { 159 | "hostIP": "192.168.250.10", 160 | "image": "k8s.gcr.io/kube-controller-manager:v1.17.5", 161 | "name": "kube-controller-manager", 162 | "nodeName": "k8s-master", 163 | "phase": "Running", 164 | "podIP": "192.168.250.10", 165 | "resources": { 166 | "requests": { 167 | "cpu": "200m" 168 | } 169 | } 170 | }, 171 | { 172 | "hostIP": "192.168.250.11", 173 | "image": "k8s.gcr.io/kube-proxy:v1.17.5", 174 | "name": "kube-proxy", 175 | "nodeName": "k8s-worker-01", 176 | "phase": "Running", 177 | "podIP": "192.168.250.11", 178 | "resources": {} 179 | }, 180 | { 181 | "hostIP": "192.168.250.12", 182 | "image": "k8s.gcr.io/kube-proxy:v1.17.5", 183 | "name": "kube-proxy", 184 | "nodeName": "k8s-worker-02", 185 | "phase": "Running", 186 | "podIP": "192.168.250.12", 187 | "resources": {} 188 | }, 189 | { 190 | "hostIP": "192.168.250.10", 191 | "image": "k8s.gcr.io/kube-proxy:v1.17.5", 192 | "name": "kube-proxy", 193 | "nodeName": "k8s-master", 194 | "phase": "Running", 195 | "podIP": "192.168.250.10", 196 | "resources": {} 197 | }, 198 | { 199 | "hostIP": "192.168.250.10", 200 | "image": "k8s.gcr.io/kube-scheduler:v1.17.5", 201 | "name": "kube-scheduler", 202 | "nodeName": "k8s-master", 203 | "phase": "Running", 204 | "podIP": "192.168.250.10", 205 | "resources": { 206 | "requests": { 207 | "cpu": "100m" 208 | } 209 | } 210 | }, 211 | { 212 | "hostIP": "192.168.250.11", 213 | "image": "k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1", 214 | "name": "kubernetes-dashboard", 215 | "nodeName": "k8s-worker-01", 216 | "phase": "Running", 217 | "podIP": "10.44.0.1", 218 | "resources": {} 219 | }, 220 | { 221 | "hostIP": "192.168.250.10", 222 | "image": "docker.io/weaveworks/weave-kube:2.6.2", 223 | "name": "weave", 224 | "nodeName": "k8s-master", 225 | "phase": "Running", 226 | "podIP": "192.168.250.10", 227 | "resources": { 228 | "requests": { 229 | "cpu": "10m" 230 | } 231 | } 232 | }, 233 | { 234 | "hostIP": "192.168.250.11", 235 | "image": "docker.io/weaveworks/weave-kube:2.6.2", 236 | "name": "weave", 237 | "nodeName": "k8s-worker-01", 238 | "phase": "Running", 239 | "podIP": "192.168.250.11", 240 | "resources": { 241 | "requests": { 242 | "cpu": "10m" 243 | } 244 | } 245 | }, 246 | { 247 | "hostIP": "192.168.250.12", 248 | "image": "docker.io/weaveworks/weave-kube:2.6.2", 249 | "name": "weave", 250 | "nodeName": "k8s-worker-02", 251 | "phase": "Running", 252 | "podIP": "192.168.250.12", 253 | "resources": { 254 | "requests": { 255 | "cpu": "10m" 256 | } 257 | } 258 | } 259 | ] 260 | } 261 | } 262 | ``` 263 | 264 | ## Kubernetes Dashboard 265 | 266 | As part of the provisioning we have added the Kubernetes Dashboard. Therefore, 267 | in order to get access to it you must perform a few extra things. 268 | 269 | ### Create Dashboard Service Account 270 | 271 | First, create the dashboard service account: 272 | 273 | ```bash 274 | kubectl create serviceaccount dashboard-admin-sa 275 | ``` 276 | 277 | Next bind the dashboard-admin-service-account service account to the 278 | cluster-admin role: 279 | 280 | ```bash 281 | kubectl create clusterrolebinding dashboard-admin-sa \ 282 | --clusterrole=cluster-admin --serviceaccount=default:dashboard-admin-sa 283 | ``` 284 | 285 | Next we need to obtain the secrets: 286 | 287 | ```bash 288 | kubectl get secrets 289 | ... 290 | NAME TYPE DATA AGE 291 | dashboard-admin-sa-token-lrssx kubernetes.io/service-account-token 3 23s 292 | default-token-j76qj kubernetes.io/service-account-token 3 55m 293 | ``` 294 | 295 | Now we need to describe to get the access token: 296 | 297 | ```bash 298 | kubectl describe secret dashboard-admin-sa-token-lrssx 299 | ... 300 | Name: dashboard-admin-sa-token-lrssx 301 | Namespace: default 302 | Labels: 303 | Annotations: kubernetes.io/service-account.name: dashboard-admin-sa 304 | kubernetes.io/service-account.uid: e946b610-1097-457e-b06d-af54de62ed06 305 | 306 | Type: kubernetes.io/service-account-token 307 | 308 | Data 309 | ==== 310 | ca.crt: 1025 bytes 311 | namespace: 7 bytes 312 | token: eyJhbGciOiJSUzI1NiIsImtpZCI6ImRwYTJjMkR6dXFhX3BwLTZweVBUSFFkUThWUHl1TTNQRWdvTEU2MXVHd3MifQ.eyJpc3MiOiJrdWJlcm5ldGVzL3NlcnZpY2VhY2NvdW50Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9uYW1lc3BhY2UiOiJkZWZhdWx0Iiwia3ViZXJuZXRlcy5pby9zZXJ2aWNlYWNjb3VudC9zZWNyZXQubmFtZSI6ImRhc2hib2FyZC1hZG1pbi1zYS10b2tlbi1scnNzeCIsImt1YmVybmV0ZXMuaW8vc2VydmljZWFjY291bnQvc2VydmljZS1hY2NvdW50Lm5hbWUiOiJkYXNoYm9hcmQtYWRtaW4tc2EiLCJrdWJlcm5ldGVzLmlvL3NlcnZpY2VhY2NvdW50L3NlcnZpY2UtYWNjb3VudC51aWQiOiJlOTQ2YjYxMC0xMDk3LTQ1N2UtYjA2ZC1hZjU0ZGU2MmVkMDYiLCJzdWIiOiJzeXN0ZW06c2VydmljZWFjY291bnQ6ZGVmYXVsdDpkYXNoYm9hcmQtYWRtaW4tc2EifQ.gOykiKGJ0hMwuNi72eHi-4u6Tm66foFHjzcxsN8ObeR6a5WMgD1UM2Qo6vqFA0uCNo1BPGJn5wB56Fm7ABfnSkauMxSeHnh7ZFHca9SV4jKF-M1fjHUEJ80NdZs4IgYM0cFjFXdGk3RFDfUyjbfMjBCQpY3S2kuc9ANd9Pq2PK9HEQT2-5a9ME_ChbiGoFnpdCYVD18Bzmtko-BJBGnKLDQ3-MlF7myEzo_XRiVh1J_mHKUob80VumaQ0nKnVf9HV_saLJqMstf-F-_Ooogf-cyNz8BUCSdIdXfRqhp8-8MdYzwTYgG3IHNRjVVAc3MPsic0XPfsQIZJ49_KLVekiw 313 | ``` 314 | 315 | Copy the token and enter it into the token field on the Kubernetes dashboard login page. 316 | 317 | Start the proxy to get access to the dashboard: 318 | 319 | ```bash 320 | kubectl proxy 321 | ``` 322 | 323 | Open up the [dashboard](http://localhost:8001/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/) using your browser of choice. 324 | -------------------------------------------------------------------------------- /Vagrant/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # All Vagrant configuration is done below. The "2" in Vagrant.configure 5 | # configures the configuration version (we support older styles for 6 | # backwards compatibility). Please don't change it unless you know what 7 | # you're doing. 8 | 9 | # Ensure yaml module is loaded 10 | require 'yaml' 11 | 12 | # Read yaml node definitions to create 13 | # **Update environment.yml to reflect any changes 14 | environment = YAML.load_file(File.join(File.dirname(__FILE__), 'environment.yml')) 15 | nodes = environment['nodes'] 16 | 17 | # Define global variables 18 | # 19 | 20 | Vagrant.configure(2) do |config| 21 | # Iterate over nodes to get a count 22 | # Define as 0 for counting the number of nodes to create from environment.yml 23 | groups = [] # Define array to hold ansible groups 24 | num_nodes = 0 25 | populated_ansible_groups = {} # Create hash to contain iterated groups 26 | 27 | # Create array of Ansible Groups from iterated nodes 28 | nodes.each do |node| 29 | num_nodes = node 30 | node['ansible_groups'].each do |group| 31 | groups.push(group) 32 | end 33 | end 34 | 35 | # Remove duplicate Ansible Groups 36 | groups = groups.uniq 37 | 38 | # Iterate through array of Ansible Groups 39 | groups.each do |group| 40 | group_nodes = [] 41 | # Iterate list of nodes 42 | nodes.each do |node| 43 | node['ansible_groups'].each do |nodegroup| 44 | # Check if node is a member of iterated group 45 | group_nodes.push(node['name']) if nodegroup == group 46 | end 47 | populated_ansible_groups[group] = group_nodes 48 | end 49 | end 50 | 51 | # Dynamic Ansible Groups iterated from environment.yml 52 | ansible_groups = populated_ansible_groups 53 | 54 | # Define Ansible groups statically for more control 55 | # ansible_groups = { 56 | # "spines" => ["node0", "node7"], 57 | # "leafs" => ["node[1:2]", "node[8:9]"], 58 | # "quagga-routers:children" => ["spines", "leafs", "compute-nodes"], 59 | # "compute-nodes" => ["node[3:6]"], 60 | # "docker-swarm:children" => ["docker-swarm-managers", "docker-swarm-workers"], 61 | # "docker-swarm-managers" => ["node[3:4]"], 62 | # "docker-swarm-workers" => ["node[5:6]"] 63 | # } 64 | 65 | # Iterate over nodes 66 | nodes.each do |node_id| 67 | config.vm.define node_id['name'] do |node| 68 | if node_id['disable_synced_folders'].nil? 69 | if node_id['synced_folder'].nil? 70 | config.vm.synced_folder 'playbooks', '/playbooks' 71 | config.vm.synced_folder 'scripts', '/scripts' 72 | else 73 | unless node_id['synced_folder']['type'].nil? 74 | if node_id['synced_folder']['type'] == 'rsync' 75 | config.vm.synced_folder '.', 76 | '/vagrant', 77 | type: 'rsync', 78 | rsync__args: ['--verbose', '--archive', 79 | '--delete', '-z'] 80 | config.vm.synced_folder 'playbooks', 81 | '/playbooks', 82 | type: 'rsync', 83 | rsync__args: ['--verbose', '--archive', 84 | '--delete', '-z'] 85 | config.vm.synced_folder 'scripts', 86 | '/scripts', 87 | type: 'rsync', 88 | rsync__args: ['--verbose', '--archive', 89 | '--delete', '-z'] 90 | else 91 | config.vm.synced_folder '.', 92 | '/vagrant', 93 | type: node_id['synced_folder']['type'] 94 | config.vm.synced_folder 'playbooks', 95 | '/playbooks', 96 | type: node_id['synced_folder']['type'] 97 | config.vm.synced_folder 'scripts', 98 | '/scripts', 99 | type: node_id['synced_folder']['type'] 100 | end 101 | end 102 | end 103 | unless environment['synced_folders'].nil? 104 | environment['synced_folders'].each do |folder| 105 | config.vm.synced_folder folder['src'], folder['mountpoint'], 106 | type: folder['type'] 107 | end 108 | end 109 | else 110 | if node_id['disable_synced_folders'] 111 | config.vm.synced_folder '.', '/vagrant', disabled: true 112 | else 113 | config.vm.synced_folder '.', '/vagrant' 114 | config.vm.synced_folder 'playbooks', '/playbooks' 115 | config.vm.synced_folder 'scripts', '/scripts' 116 | end 117 | end 118 | 119 | node.vm.box = node_id['box'] 120 | if node_id['manage_hostname'].nil? 121 | node.vm.hostname = node_id['name'] 122 | else 123 | node.vm.hostname = node_id['name'] if node_id['manage_hostname'] 124 | end 125 | 126 | # Setup Windows communication 127 | unless node_id['windows'].nil? 128 | if node_id['windows'] 129 | node.vm.guest = :windows 130 | node.vm.communicator = :winrm 131 | # config.winrm.transport = :ssl 132 | # config.winrm.ssl_peer_verification = false 133 | end 134 | end 135 | 136 | node.vm.provider 'virtualbox' do |vbox| 137 | # Use linked clones - default: true unless defined in environment.yml 138 | # Define linked_clone: true|false in environment.yml per node 139 | vbox.linked_clone = node_id['linked_clone'] ||= true 140 | 141 | vbox.memory = node_id['mem'] 142 | vbox.cpus = node_id['vcpu'] 143 | 144 | # Setup desktop environment 145 | unless node_id['desktop'].nil? 146 | if node_id['desktop'] 147 | vbox.gui = true 148 | vbox.customize ['modifyvm', :id, '--accelerate3d', 'on'] 149 | # This is commented out until resolution has been found for proper 150 | # usage. For now it will use the default graphics controller. 151 | vbox.customize ['modifyvm', :id, '--graphicscontroller', 'vmsvga'] 152 | # vbox.customize ['modifyvm', :id, '--graphicscontroller', 'vboxvga'] 153 | vbox.customize ['modifyvm', :id, '--hwvirtex', 'on'] 154 | vbox.customize ['modifyvm', :id, '--ioapic', 'on'] 155 | vbox.customize ['modifyvm', :id, '--vram', '128'] 156 | vbox.customize ['modifyvm', :id, '--audio', 'none'] 157 | end 158 | end 159 | 160 | # Setup Windows Server 161 | unless node_id['windows'].nil? 162 | if node_id['windows'] 163 | vbox.default_nic_type = '82540EM' 164 | # We set this to false because we can use vagrant rdp 165 | # vbox.gui = false 166 | vbox.customize ['modifyvm', :id, '--accelerate2dvideo', 'on'] 167 | vbox.customize ['modifyvm', :id, '--accelerate3d', 'on'] 168 | vbox.customize ['modifyvm', :id, '--graphicscontroller', 'vboxsvga'] 169 | vbox.customize ['modifyvm', :id, '--clipboard', 'bidirectional'] 170 | vbox.customize ['modifyvm', :id, '--vram', '128'] 171 | end 172 | end 173 | 174 | # Add additional disk(s) 175 | unless node_id['disks'].nil? 176 | # Start at 1 to account for 2 disks in box image 177 | # May need to figure out another way to do this, but for now it works 178 | dnum = 1 179 | node_id['disks'].each do |disk_num| 180 | dnum = (dnum.to_i + 1) 181 | ddev = "#{node_id['name']}_Disk#{dnum}.vdi" 182 | dsize = disk_num['size'].to_i * 1024 183 | unless File.file?(ddev.to_s) 184 | vbox.customize ['createhd', '--filename', ddev.to_s, \ 185 | '--variant', 'Fixed', '--size', dsize] 186 | end 187 | vbox.customize ['storageattach', :id, '--storagectl', \ 188 | (disk_num['controller']).to_s, '--port', dnum, \ 189 | '--device', 0, '--type', 'hdd', \ 190 | '--medium', ddev.to_s] 191 | end 192 | end 193 | end 194 | 195 | %w[vmware_desktop vmware_fusion].each do |vmware| 196 | node.vm.provider vmware do |vmw| 197 | # Use linked clones - default: true unless defined in environment.yml 198 | # Define linked_clone: true|false in environment.yml per node 199 | vmw.linked_clone = node_id['linked_clone'] ||= true 200 | 201 | vmw.vmx['memsize'] = node_id['mem'] 202 | vmw.vmx['numvcpus'] = node_id['vcpu'] 203 | 204 | # Enable nested virtualization 205 | unless node_id['nested_virtualization'].nil? 206 | vmw.vmx['vhv.enable'] = true if node_id['nested_virtualization'] 207 | end 208 | 209 | # Allow public IP SSH connection 210 | unless node_id['ssh_use_public_ip'].nil? 211 | vmw.ssh_info_public = if node_id['ssh_use_public_ip'] 212 | true 213 | else 214 | false 215 | end 216 | end 217 | 218 | # Function HGFS in guest 219 | unless node_id['functional_hgfs'].nil? 220 | vmw.functional_hgfs = if node_id['functional_hgfs'] 221 | true 222 | else 223 | false 224 | end 225 | end 226 | 227 | # Setup desktop environment 228 | unless node_id['desktop'].nil? 229 | if node_id['desktop'] 230 | vmw.gui = true 231 | vmw.vmx['mks.enable3d'] = true 232 | end 233 | end 234 | 235 | # Setup Windows Server 236 | unless node_id['windows'].nil? 237 | if node_id['windows'] 238 | # vmw.vmx['ethernet0.virtualdev'] = 'e1000' 239 | # We set this to false because we can use vagrant rdp 240 | # vmw.gui = false 241 | vmw.vmx['mks.enable3d'] = true 242 | # else 243 | # vmw.vmx['ethernet0.pcislotnumber'] = '33' 244 | end 245 | end 246 | 247 | # Add additional disk(s) 248 | unless node_id['disks'].nil? 249 | vdiskmanager = 'vmware-vdiskmanager' 250 | dnum = 1 251 | vmdk_path = File.dirname(__FILE__) 252 | node_id['disks'].each do |disk_num| 253 | dnum = (dnum.to_i + 1) 254 | ddev = File.join(vmdk_path, "#{node_id['name']}_Disk#{dnum}.vmdk") 255 | dsize = "#{disk_num['size']}GB" 256 | unless File.file?(ddev) 257 | `#{vdiskmanager} -c -s #{dsize} -a lsilogic -t 0 #{ddev}` 258 | end 259 | vmw.vmx["scsi0:#{dnum}.filename"] = ddev.to_s 260 | vmw.vmx["scsi0:#{dnum}.present"] = true 261 | vmw.vmx["scsi0:#{dnum}.redo"] = '' 262 | end 263 | end 264 | end 265 | end 266 | 267 | # Provision network interfaces 268 | unless node_id['interfaces'].nil? 269 | node_id['interfaces'].each do |int| 270 | if int['method'] == 'dhcp' 271 | if int['network_name'] == 'None' 272 | node.vm.network 'private_network', 273 | type: 'dhcp' 274 | end 275 | if int['network_name'] != 'None' 276 | node.vm.network 'private_network', 277 | virtualbox__intnet: int['network_name'], 278 | type: 'dhcp' 279 | end 280 | end 281 | next unless int['method'] == 'static' 282 | 283 | if int['network_name'] == 'None' 284 | node.vm.network 'private_network', 285 | ip: int['ip'], 286 | auto_config: int['auto_config'] 287 | end 288 | next unless int['network_name'] != 'None' 289 | 290 | node.vm.network 'private_network', 291 | virtualbox__intnet: int['network_name'], 292 | ip: int['ip'], 293 | auto_config: int['auto_config'] 294 | end 295 | end 296 | 297 | # Port Forwards 298 | unless node_id['port_forwards'].nil? 299 | node_id['port_forwards'].each do |pf| 300 | node.vm.network 'forwarded_port', guest: pf['guest'], 301 | host: pf['host'] 302 | end 303 | end 304 | 305 | # Windows RDP 306 | unless node_id['windows'].nil? 307 | if node_id['windows'] 308 | node.vm.network 'forwarded_port', guest: 3389, host: 3389, 309 | host_ip: '127.0.0.1' 310 | end 311 | end 312 | 313 | # Provisioners 314 | unless node_id['provision'].nil? 315 | if node_id['provision'] 316 | unless node_id['windows'].nil? 317 | # runs initial script 318 | if node_id['windows'] 319 | node.vm.provision 'shell', path: 'scripts/bootstrap.ps1' 320 | else 321 | node.vm.provision 'shell', path: 'scripts/bootstrap.sh' 322 | end 323 | end 324 | unless node_id['provisioners'].nil? 325 | node_id['provisioners'].each do |provisioner| 326 | if provisioner['type'] == 'shell' 327 | unless provisioner['inline'].nil? 328 | $script = <<-SCRIPT 329 | #{provisioner['inline']} 330 | SCRIPT 331 | node.vm.provision 'shell', 332 | inline: $script, 333 | privileged: provisioner['privileged'] 334 | end 335 | unless provisioner['path'].nil? 336 | provisioner['path'].each do |script| 337 | node.vm.provision 'shell', 338 | path: script, 339 | privileged: script['privileged'] 340 | end 341 | end 342 | elsif provisioner['type'] == 'ansible_local' 343 | provisioner['playbooks'].each do |playbook| 344 | node.vm.provision 'ansible_local' do |ansible| 345 | ansible.install_mode = 'pip' 346 | ansible.playbook = playbook 347 | end 348 | end 349 | end 350 | end 351 | end 352 | unless environment['provisioners'].nil? 353 | environment['provisioners'].each do |provisioner| 354 | if provisioner['type'] == 'shell' 355 | unless provisioner['inline'].nil? 356 | $script = <<-SCRIPT 357 | #{provisioner['inline']} 358 | SCRIPT 359 | node.vm.provision 'shell', 360 | inline: $script, 361 | privileged: provisioner['privileged'] 362 | end 363 | unless provisioner['path'].nil? 364 | provisioner['path'].each do |script| 365 | node.vm.provision 'shell', 366 | path: script, 367 | privileged: script['privileged'] 368 | end 369 | end 370 | elsif provisioner['type'] == 'ansible_local' 371 | provisioner['playbooks'].each do |playbook| 372 | node.vm.provision 'ansible_local' do |ansible| 373 | ansible.install_mode = 'pip' 374 | ansible.playbook = playbook 375 | end 376 | end 377 | end 378 | end 379 | end 380 | if node_id == num_nodes 381 | # We only run Ansible playbooks when our host is not Windows 382 | # This does not affect ansible_local provisioners 383 | unless Vagrant::Util::Platform.windows? 384 | node.vm.provision 'ansible' do |ansible| 385 | ansible.limit = 'all' 386 | # Sets up host_vars 387 | ansible.playbook = 'playbooks/prep_host_vars.yml' 388 | ansible.groups = ansible_groups 389 | end 390 | node.vm.provision 'ansible' do |ansible| 391 | ansible.limit = 'all' 392 | # runs bootstrap Ansible playbook 393 | ansible.playbook = 'playbooks/bootstrap.yml' 394 | ansible.groups = ansible_groups 395 | end 396 | unless environment['provisioners'].nil? 397 | environment['provisioners'].each do |provisioner| 398 | next unless provisioner['type'] == 'ansible' 399 | 400 | provisioner['playbooks'].each do |playbook| 401 | node.vm.provision 'ansible' do |ansible| 402 | ansible.limit = 'all' 403 | ansible.playbook = playbook 404 | ansible.groups = ansible_groups 405 | end 406 | end 407 | end 408 | end 409 | # We run this last to ensure all previous provisioning completes 410 | node.vm.provision 'ansible' do |ansible| 411 | ansible.limit = 'all' 412 | # runs Ansible playbook for installing roles/executing tasks 413 | ansible.playbook = 'playbooks/playbook.yml' 414 | ansible.groups = ansible_groups 415 | end 416 | end 417 | end 418 | end 419 | end 420 | end 421 | end 422 | end 423 | -------------------------------------------------------------------------------- /Vagrant/ansible.cfg: -------------------------------------------------------------------------------- 1 | # config file for ansible -- https://ansible.com/ 2 | # =============================================== 3 | 4 | # nearly all parameters can be overridden in ansible-playbook 5 | # or with command line flags. ansible will read ANSIBLE_CONFIG, 6 | # ansible.cfg in the current working directory, .ansible.cfg in 7 | # the home directory or /etc/ansible/ansible.cfg, whichever it 8 | # finds first 9 | 10 | [defaults] 11 | 12 | # some basic default values... 13 | 14 | #inventory = /etc/ansible/hosts 15 | #library = /usr/share/my_modules/ 16 | #module_utils = /usr/share/my_module_utils/ 17 | #remote_tmp = ~/.ansible/tmp 18 | #local_tmp = ~/.ansible/tmp 19 | #plugin_filters_cfg = /etc/ansible/plugin_filters.yml 20 | #forks = 5 21 | #poll_interval = 15 22 | #sudo_user = root 23 | #ask_sudo_pass = True 24 | #ask_pass = True 25 | #transport = smart 26 | #remote_port = 22 27 | #module_lang = C 28 | #module_set_locale = False 29 | 30 | # plays will gather facts by default, which contain information about 31 | # the remote system. 32 | # 33 | # smart - gather by default, but don't regather if already gathered 34 | # implicit - gather by default, turn off with gather_facts: False 35 | # explicit - do not gather by default, must say gather_facts: True 36 | #gathering = implicit 37 | 38 | # This only affects the gathering done by a play's gather_facts directive, 39 | # by default gathering retrieves all facts subsets 40 | # all - gather all subsets 41 | # network - gather min and network facts 42 | # hardware - gather hardware facts (longest facts to retrieve) 43 | # virtual - gather min and virtual facts 44 | # facter - import facts from facter 45 | # ohai - import facts from ohai 46 | # You can combine them using comma (ex: network,virtual) 47 | # You can negate them using ! (ex: !hardware,!facter,!ohai) 48 | # A minimal set of facts is always gathered. 49 | #gather_subset = all 50 | 51 | # some hardware related facts are collected 52 | # with a maximum timeout of 10 seconds. This 53 | # option lets you increase or decrease that 54 | # timeout to something more suitable for the 55 | # environment. 56 | # gather_timeout = 10 57 | 58 | # additional paths to search for roles in, colon separated 59 | #roles_path = /etc/ansible/roles 60 | roles_path = ../roles:roles 61 | 62 | # uncomment this to disable SSH key host checking 63 | host_key_checking = False 64 | 65 | # change the default callback, you can only have one 'stdout' type enabled at a time. 66 | #stdout_callback = skippy 67 | 68 | ## Ansible ships with some plugins that require whitelisting, 69 | ## this is done to avoid running all of a type by default. 70 | ## These setting lists those that you want enabled for your system. 71 | ## Custom plugins should not need this unless plugin author specifies it. 72 | 73 | # enable callback plugins, they can output to stdout but cannot be 'stdout' type. 74 | #callback_whitelist = timer, mail 75 | 76 | # Determine whether includes in tasks and handlers are "static" by 77 | # default. As of 2.0, includes are dynamic by default. Setting these 78 | # values to True will make includes behave more like they did in the 79 | # 1.x versions. 80 | #task_includes_static = False 81 | #handler_includes_static = False 82 | 83 | # Controls if a missing handler for a notification event is an error or a warning 84 | #error_on_missing_handler = True 85 | 86 | # change this for alternative sudo implementations 87 | #sudo_exe = sudo 88 | 89 | # What flags to pass to sudo 90 | # WARNING: leaving out the defaults might create unexpected behaviours 91 | #sudo_flags = -H -S -n 92 | 93 | # SSH timeout 94 | timeout = 60 95 | 96 | # default user to use for playbooks if user is not specified 97 | # (/usr/bin/ansible will use current user as default) 98 | #remote_user = root 99 | 100 | # logging is off by default unless this path is defined 101 | # if so defined, consider logrotate 102 | #log_path = /var/log/ansible.log 103 | 104 | # default module name for /usr/bin/ansible 105 | #module_name = command 106 | 107 | # use this shell for commands executed under sudo 108 | # you may need to change this to bin/bash in rare instances 109 | # if sudo is constrained 110 | #executable = /bin/sh 111 | 112 | # if inventory variables overlap, does the higher precedence one win 113 | # or are hash values merged together? The default is 'replace' but 114 | # this can also be set to 'merge'. 115 | #hash_behaviour = replace 116 | 117 | # by default, variables from roles will be visible in the global variable 118 | # scope. To prevent this, the following option can be enabled, and only 119 | # tasks and handlers within the role will see the variables there 120 | #private_role_vars = yes 121 | 122 | # list any Jinja2 extensions to enable here: 123 | #jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n 124 | 125 | # if set, always use this private key file for authentication, same as 126 | # if passing --private-key to ansible or ansible-playbook 127 | #private_key_file = /path/to/file 128 | 129 | # If set, configures the path to the Vault password file as an alternative to 130 | # specifying --vault-password-file on the command line. 131 | #vault_password_file = /path/to/vault_password_file 132 | 133 | # format of string {{ ansible_managed }} available within Jinja2 134 | # templates indicates to users editing templates files will be replaced. 135 | # replacing {file}, {host} and {uid} and strftime codes with proper values. 136 | #ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host} 137 | # {file}, {host}, {uid}, and the timestamp can all interfere with idempotence 138 | # in some situations so the default is a static string: 139 | ansible_managed = Ansible managed 140 | 141 | # by default, ansible-playbook will display "Skipping [host]" if it determines a task 142 | # should not be run on a host. Set this to "False" if you don't want to see these "Skipping" 143 | # messages. NOTE: the task header will still be shown regardless of whether or not the 144 | # task is skipped. 145 | #display_skipped_hosts = True 146 | 147 | # by default, if a task in a playbook does not include a name: field then 148 | # ansible-playbook will construct a header that includes the task's action but 149 | # not the task's args. This is a security feature because ansible cannot know 150 | # if the *module* considers an argument to be no_log at the time that the 151 | # header is printed. If your environment doesn't have a problem securing 152 | # stdout from ansible-playbook (or you have manually specified no_log in your 153 | # playbook on all of the tasks where you have secret information) then you can 154 | # safely set this to True to get more informative messages. 155 | #display_args_to_stdout = False 156 | 157 | # by default (as of 1.3), Ansible will raise errors when attempting to dereference 158 | # Jinja2 variables that are not set in templates or action lines. Uncomment this line 159 | # to revert the behavior to pre-1.3. 160 | #error_on_undefined_vars = False 161 | 162 | # by default (as of 1.6), Ansible may display warnings based on the configuration of the 163 | # system running ansible itself. This may include warnings about 3rd party packages or 164 | # other conditions that should be resolved if possible. 165 | # to disable these warnings, set the following value to False: 166 | #system_warnings = True 167 | 168 | # by default (as of 1.4), Ansible may display deprecation warnings for language 169 | # features that should no longer be used and will be removed in future versions. 170 | # to disable these warnings, set the following value to False: 171 | #deprecation_warnings = True 172 | 173 | # (as of 1.8), Ansible can optionally warn when usage of the shell and 174 | # command module appear to be simplified by using a default Ansible module 175 | # instead. These warnings can be silenced by adjusting the following 176 | # setting or adding warn=yes or warn=no to the end of the command line 177 | # parameter string. This will for example suggest using the git module 178 | # instead of shelling out to the git command. 179 | # command_warnings = False 180 | 181 | # set plugin path directories here, separate with colons 182 | #action_plugins = /usr/share/ansible/plugins/action 183 | #cache_plugins = /usr/share/ansible/plugins/cache 184 | #callback_plugins = /usr/share/ansible/plugins/callback 185 | #connection_plugins = /usr/share/ansible/plugins/connection 186 | #lookup_plugins = /usr/share/ansible/plugins/lookup 187 | #inventory_plugins = /usr/share/ansible/plugins/inventory 188 | #vars_plugins = /usr/share/ansible/plugins/vars 189 | #filter_plugins = /usr/share/ansible/plugins/filter 190 | #test_plugins = /usr/share/ansible/plugins/test 191 | #terminal_plugins = /usr/share/ansible/plugins/terminal 192 | #strategy_plugins = /usr/share/ansible/plugins/strategy 193 | 194 | # by default, ansible will use the 'linear' strategy but you may want to try 195 | # another one 196 | #strategy = free 197 | 198 | # by default callbacks are not loaded for /bin/ansible, enable this if you 199 | # want, for example, a notification or logging callback to also apply to 200 | # /bin/ansible runs 201 | #bin_ansible_callbacks = False 202 | 203 | # don't like cows? that's unfortunate. 204 | # set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1 205 | #nocows = 1 206 | 207 | # set which cowsay stencil you'd like to use by default. When set to 'random', 208 | # a random stencil will be selected for each task. The selection will be filtered 209 | # against the `cow_whitelist` option below. 210 | #cow_selection = default 211 | #cow_selection = random 212 | 213 | # when using the 'random' option for cowsay, stencils will be restricted to this list. 214 | # it should be formatted as a comma-separated list with no spaces between names. 215 | # NOTE: line continuations here are for formatting purposes only, as the INI parser 216 | # in python does not support them. 217 | #cow_whitelist=bud-frogs,bunny,cheese,daemon,default,dragon,elephant-in-snake,elephant,eyes,\ 218 | # hellokitty,kitty,luke-koala,meow,milk,moofasa,moose,ren,sheep,small,stegosaurus,\ 219 | # stimpy,supermilker,three-eyes,turkey,turtle,tux,udder,vader-koala,vader,www 220 | 221 | # don't like colors either? 222 | # set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1 223 | #nocolor = 1 224 | 225 | # if set to a persistent type (not 'memory', for example 'redis') fact values 226 | # from previous runs in Ansible will be stored. This may be useful when 227 | # wanting to use, for example, IP information from one group of servers 228 | # without having to talk to them in the same playbook run to get their 229 | # current IP information. 230 | #fact_caching = memory 231 | 232 | # retry files 233 | # When a playbook fails by default a .retry file will be created in ~/ 234 | # You can disable this feature by setting retry_files_enabled to False 235 | # and you can change the location of the files by setting retry_files_save_path 236 | 237 | #retry_files_enabled = False 238 | #retry_files_save_path = ~/.ansible-retry 239 | 240 | # squash actions 241 | # Ansible can optimise actions that call modules with list parameters 242 | # when looping. Instead of calling the module once per with_ item, the 243 | # module is called once with all items at once. Currently this only works 244 | # under limited circumstances, and only with parameters named 'name'. 245 | #squash_actions = apk,apt,dnf,homebrew,pacman,pkgng,yum,zypper 246 | 247 | # prevents logging of task data, off by default 248 | #no_log = False 249 | 250 | # prevents logging of tasks, but only on the targets, data is still logged on the master/controller 251 | #no_target_syslog = False 252 | 253 | # controls whether Ansible will raise an error or warning if a task has no 254 | # choice but to create world readable temporary files to execute a module on 255 | # the remote machine. This option is False by default for security. Users may 256 | # turn this on to have behaviour more like Ansible prior to 2.1.x. See 257 | # https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user 258 | # for more secure ways to fix this than enabling this option. 259 | #allow_world_readable_tmpfiles = False 260 | 261 | # controls the compression level of variables sent to 262 | # worker processes. At the default of 0, no compression 263 | # is used. This value must be an integer from 0 to 9. 264 | #var_compression_level = 9 265 | 266 | # controls what compression method is used for new-style ansible modules when 267 | # they are sent to the remote system. The compression types depend on having 268 | # support compiled into both the controller's python and the client's python. 269 | # The names should match with the python Zipfile compression types: 270 | # * ZIP_STORED (no compression. available everywhere) 271 | # * ZIP_DEFLATED (uses zlib, the default) 272 | # These values may be set per host via the ansible_module_compression inventory 273 | # variable 274 | #module_compression = 'ZIP_DEFLATED' 275 | 276 | # This controls the cutoff point (in bytes) on --diff for files 277 | # set to 0 for unlimited (RAM may suffer!). 278 | #max_diff_size = 1048576 279 | 280 | # This controls how ansible handles multiple --tags and --skip-tags arguments 281 | # on the CLI. If this is True then multiple arguments are merged together. If 282 | # it is False, then the last specified argument is used and the others are ignored. 283 | # This option will be removed in 2.8. 284 | #merge_multiple_cli_flags = True 285 | 286 | # Controls showing custom stats at the end, off by default 287 | #show_custom_stats = True 288 | 289 | # Controls which files to ignore when using a directory as inventory with 290 | # possibly multiple sources (both static and dynamic) 291 | #inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo 292 | 293 | # This family of modules use an alternative execution path optimized for network appliances 294 | # only update this setting if you know how this works, otherwise it can break module execution 295 | #network_group_modules=eos, nxos, ios, iosxr, junos, vyos 296 | 297 | # When enabled, this option allows lookups (via variables like {{lookup('foo')}} or when used as 298 | # a loop with `with_foo`) to return data that is not marked "unsafe". This means the data may contain 299 | # jinja2 templating language which will be run through the templating engine. 300 | # ENABLING THIS COULD BE A SECURITY RISK 301 | #allow_unsafe_lookups = False 302 | 303 | # set default errors for all plays 304 | #any_errors_fatal = False 305 | 306 | [inventory] 307 | # enable inventory plugins, default: 'host_list', 'script', 'yaml', 'ini' 308 | #enable_plugins = host_list, virtualbox, yaml, constructed 309 | 310 | # ignore these extensions when parsing a directory as inventory source 311 | #ignore_extensions = .pyc, .pyo, .swp, .bak, ~, .rpm, .md, .txt, ~, .orig, .ini, .cfg, .retry 312 | 313 | # ignore files matching these patterns when parsing a directory as inventory source 314 | #ignore_patterns= 315 | 316 | # If 'true' unparsed inventory sources become fatal errors, they are warnings otherwise. 317 | #unparsed_is_failed=False 318 | 319 | [privilege_escalation] 320 | #become=True 321 | #become_method=sudo 322 | #become_user=root 323 | #become_ask_pass=False 324 | 325 | [paramiko_connection] 326 | 327 | # uncomment this line to cause the paramiko connection plugin to not record new host 328 | # keys encountered. Increases performance on new host additions. Setting works independently of the 329 | # host key checking setting above. 330 | #record_host_keys=False 331 | 332 | # by default, Ansible requests a pseudo-terminal for commands executed under sudo. Uncomment this 333 | # line to disable this behaviour. 334 | #pty=False 335 | 336 | # paramiko will default to looking for SSH keys initially when trying to 337 | # authenticate to remote devices. This is a problem for some network devices 338 | # that close the connection after a key failure. Uncomment this line to 339 | # disable the Paramiko look for keys function 340 | #look_for_keys = False 341 | 342 | # When using persistent connections with Paramiko, the connection runs in a 343 | # background process. If the host doesn't already have a valid SSH key, by 344 | # default Ansible will prompt to add the host key. This will cause connections 345 | # running in background processes to fail. Uncomment this line to have 346 | # Paramiko automatically add host keys. 347 | #host_key_auto_add = True 348 | 349 | [ssh_connection] 350 | 351 | # ssh arguments to use 352 | # Leaving off ControlPersist will result in poor performance, so use 353 | # paramiko on older platforms rather than removing it, -C controls compression use 354 | #ssh_args = -C -o ControlMaster=auto -o ControlPersist=60s 355 | 356 | # The base directory for the ControlPath sockets. 357 | # This is the "%(directory)s" in the control_path option 358 | # 359 | # Example: 360 | # control_path_dir = /tmp/.ansible/cp 361 | #control_path_dir = ~/.ansible/cp 362 | 363 | # The path to use for the ControlPath sockets. This defaults to a hashed string of the hostname, 364 | # port and username (empty string in the config). The hash mitigates a common problem users 365 | # found with long hostames and the conventional %(directory)s/ansible-ssh-%%h-%%p-%%r format. 366 | # In those cases, a "too long for Unix domain socket" ssh error would occur. 367 | # 368 | # Example: 369 | # control_path = %(directory)s/%%h-%%r 370 | #control_path = 371 | 372 | # Enabling pipelining reduces the number of SSH operations required to 373 | # execute a module on the remote server. This can result in a significant 374 | # performance improvement when enabled, however when using "sudo:" you must 375 | # first disable 'requiretty' in /etc/sudoers 376 | # 377 | # By default, this option is disabled to preserve compatibility with 378 | # sudoers configurations that have requiretty (the default on many distros). 379 | # 380 | pipelining = True 381 | 382 | # Control the mechanism for transferring files (old) 383 | # * smart = try sftp and then try scp [default] 384 | # * True = use scp only 385 | # * False = use sftp only 386 | #scp_if_ssh = smart 387 | 388 | # Control the mechanism for transferring files (new) 389 | # If set, this will override the scp_if_ssh option 390 | # * sftp = use sftp to transfer files 391 | # * scp = use scp to transfer files 392 | # * piped = use 'dd' over SSH to transfer files 393 | # * smart = try sftp, scp, and piped, in that order [default] 394 | #transfer_method = smart 395 | 396 | # if False, sftp will not use batch mode to transfer files. This may cause some 397 | # types of file transfer failures impossible to catch however, and should 398 | # only be disabled if your sftp version has problems with batch mode 399 | #sftp_batch_mode = False 400 | 401 | # The -tt argument is passed to ssh when pipelining is not enabled because sudo 402 | # requires a tty by default. 403 | #use_tty = True 404 | 405 | [persistent_connection] 406 | 407 | # Configures the persistent connection timeout value in seconds. This value is 408 | # how long the persistent connection will remain idle before it is destroyed. 409 | # If the connection doesn't receive a request before the timeout value 410 | # expires, the connection is shutdown. The default value is 30 seconds. 411 | #connect_timeout = 30 412 | 413 | # Configures the persistent connection retry timeout. This value configures the 414 | # the retry timeout that ansible-connection will wait to connect 415 | # to the local domain socket. This value must be larger than the 416 | # ssh timeout (timeout) and less than persistent connection idle timeout (connect_timeout). 417 | # The default value is 15 seconds. 418 | #connect_retry_timeout = 15 419 | 420 | # The command timeout value defines the amount of time to wait for a command 421 | # or RPC call before timing out. The value for the command timeout must 422 | # be less than the value of the persistent connection idle timeout (connect_timeout) 423 | # The default value is 10 second. 424 | #command_timeout = 10 425 | 426 | [accelerate] 427 | #accelerate_port = 5099 428 | #accelerate_timeout = 30 429 | #accelerate_connect_timeout = 5.0 430 | 431 | # The daemon timeout is measured in minutes. This time is measured 432 | # from the last activity to the accelerate daemon. 433 | #accelerate_daemon_timeout = 30 434 | 435 | # If set to yes, accelerate_multi_key will allow multiple 436 | # private keys to be uploaded to it, though each user must 437 | # have access to the system via SSH to add a new key. The default 438 | # is "no". 439 | #accelerate_multi_key = yes 440 | 441 | [selinux] 442 | # file systems that require special treatment when dealing with security context 443 | # the default behaviour that copies the existing context or uses the user default 444 | # needs to be changed to use the file system dependent context. 445 | #special_context_filesystems=nfs,vboxsf,fuse,ramfs,9p 446 | 447 | # Set this to yes to allow libvirt_lxc connections to work without SELinux. 448 | #libvirt_lxc_noseclabel = yes 449 | 450 | [colors] 451 | #highlight = white 452 | #verbose = blue 453 | #warn = bright purple 454 | #error = red 455 | #debug = dark gray 456 | #deprecate = purple 457 | #skip = cyan 458 | #unreachable = red 459 | #ok = green 460 | #changed = yellow 461 | #diff_add = green 462 | #diff_remove = red 463 | #diff_lines = cyan 464 | 465 | [diff] 466 | # Always print diff when running ( same as always running with -D/--diff ) 467 | # always = no 468 | 469 | # Set how many context lines to show in diff 470 | # context = 3 471 | -------------------------------------------------------------------------------- /Vagrant/deployments/metallb/deploy.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: metallb-system 6 | labels: 7 | app: metallb 8 | --- 9 | apiVersion: policy/v1beta1 10 | kind: PodSecurityPolicy 11 | metadata: 12 | labels: 13 | app: metallb 14 | name: controller 15 | namespace: metallb-system 16 | spec: 17 | allowPrivilegeEscalation: false 18 | allowedCapabilities: [] 19 | allowedHostPaths: [] 20 | defaultAddCapabilities: [] 21 | defaultAllowPrivilegeEscalation: false 22 | fsGroup: 23 | ranges: 24 | - max: 65535 25 | min: 1 26 | rule: MustRunAs 27 | hostIPC: false 28 | hostNetwork: false 29 | hostPID: false 30 | privileged: false 31 | readOnlyRootFilesystem: true 32 | requiredDropCapabilities: 33 | - ALL 34 | runAsUser: 35 | ranges: 36 | - max: 65535 37 | min: 1 38 | rule: MustRunAs 39 | seLinux: 40 | rule: RunAsAny 41 | supplementalGroups: 42 | ranges: 43 | - max: 65535 44 | min: 1 45 | rule: MustRunAs 46 | volumes: 47 | - configMap 48 | - secret 49 | - emptyDir 50 | --- 51 | apiVersion: policy/v1beta1 52 | kind: PodSecurityPolicy 53 | metadata: 54 | labels: 55 | app: metallb 56 | name: speaker 57 | namespace: metallb-system 58 | spec: 59 | allowPrivilegeEscalation: false 60 | allowedCapabilities: 61 | - NET_ADMIN 62 | - NET_RAW 63 | - SYS_ADMIN 64 | allowedHostPaths: [] 65 | defaultAddCapabilities: [] 66 | defaultAllowPrivilegeEscalation: false 67 | fsGroup: 68 | rule: RunAsAny 69 | hostIPC: false 70 | hostNetwork: true 71 | hostPID: false 72 | hostPorts: 73 | - max: 7472 74 | min: 7472 75 | privileged: true 76 | readOnlyRootFilesystem: true 77 | requiredDropCapabilities: 78 | - ALL 79 | runAsUser: 80 | rule: RunAsAny 81 | seLinux: 82 | rule: RunAsAny 83 | supplementalGroups: 84 | rule: RunAsAny 85 | volumes: 86 | - configMap 87 | - secret 88 | - emptyDir 89 | --- 90 | apiVersion: v1 91 | kind: ServiceAccount 92 | metadata: 93 | labels: 94 | app: metallb 95 | name: controller 96 | namespace: metallb-system 97 | --- 98 | apiVersion: v1 99 | kind: ServiceAccount 100 | metadata: 101 | labels: 102 | app: metallb 103 | name: speaker 104 | namespace: metallb-system 105 | --- 106 | apiVersion: rbac.authorization.k8s.io/v1 107 | kind: ClusterRole 108 | metadata: 109 | labels: 110 | app: metallb 111 | name: metallb-system:controller 112 | rules: 113 | - apiGroups: 114 | - "" 115 | resources: 116 | - services 117 | verbs: 118 | - get 119 | - list 120 | - watch 121 | - update 122 | - apiGroups: 123 | - "" 124 | resources: 125 | - services/status 126 | verbs: 127 | - update 128 | - apiGroups: 129 | - "" 130 | resources: 131 | - events 132 | verbs: 133 | - create 134 | - patch 135 | - apiGroups: 136 | - policy 137 | resourceNames: 138 | - controller 139 | resources: 140 | - podsecuritypolicies 141 | verbs: 142 | - use 143 | --- 144 | apiVersion: rbac.authorization.k8s.io/v1 145 | kind: ClusterRole 146 | metadata: 147 | labels: 148 | app: metallb 149 | name: metallb-system:speaker 150 | rules: 151 | - apiGroups: 152 | - "" 153 | resources: 154 | - services 155 | - endpoints 156 | - nodes 157 | verbs: 158 | - get 159 | - list 160 | - watch 161 | - apiGroups: 162 | - "" 163 | resources: 164 | - events 165 | verbs: 166 | - create 167 | - patch 168 | - apiGroups: 169 | - policy 170 | resourceNames: 171 | - speaker 172 | resources: 173 | - podsecuritypolicies 174 | verbs: 175 | - use 176 | --- 177 | apiVersion: rbac.authorization.k8s.io/v1 178 | kind: Role 179 | metadata: 180 | labels: 181 | app: metallb 182 | name: config-watcher 183 | namespace: metallb-system 184 | rules: 185 | - apiGroups: 186 | - "" 187 | resources: 188 | - configmaps 189 | verbs: 190 | - get 191 | - list 192 | - watch 193 | --- 194 | apiVersion: rbac.authorization.k8s.io/v1 195 | kind: Role 196 | metadata: 197 | labels: 198 | app: metallb 199 | name: pod-lister 200 | namespace: metallb-system 201 | rules: 202 | - apiGroups: 203 | - "" 204 | resources: 205 | - pods 206 | verbs: 207 | - list 208 | --- 209 | apiVersion: rbac.authorization.k8s.io/v1 210 | kind: ClusterRoleBinding 211 | metadata: 212 | labels: 213 | app: metallb 214 | name: metallb-system:controller 215 | roleRef: 216 | apiGroup: rbac.authorization.k8s.io 217 | kind: ClusterRole 218 | name: metallb-system:controller 219 | subjects: 220 | - kind: ServiceAccount 221 | name: controller 222 | namespace: metallb-system 223 | --- 224 | apiVersion: rbac.authorization.k8s.io/v1 225 | kind: ClusterRoleBinding 226 | metadata: 227 | labels: 228 | app: metallb 229 | name: metallb-system:speaker 230 | roleRef: 231 | apiGroup: rbac.authorization.k8s.io 232 | kind: ClusterRole 233 | name: metallb-system:speaker 234 | subjects: 235 | - kind: ServiceAccount 236 | name: speaker 237 | namespace: metallb-system 238 | --- 239 | apiVersion: rbac.authorization.k8s.io/v1 240 | kind: RoleBinding 241 | metadata: 242 | labels: 243 | app: metallb 244 | name: config-watcher 245 | namespace: metallb-system 246 | roleRef: 247 | apiGroup: rbac.authorization.k8s.io 248 | kind: Role 249 | name: config-watcher 250 | subjects: 251 | - kind: ServiceAccount 252 | name: controller 253 | - kind: ServiceAccount 254 | name: speaker 255 | --- 256 | apiVersion: rbac.authorization.k8s.io/v1 257 | kind: RoleBinding 258 | metadata: 259 | labels: 260 | app: metallb 261 | name: pod-lister 262 | namespace: metallb-system 263 | roleRef: 264 | apiGroup: rbac.authorization.k8s.io 265 | kind: Role 266 | name: pod-lister 267 | subjects: 268 | - kind: ServiceAccount 269 | name: speaker 270 | --- 271 | apiVersion: apps/v1 272 | kind: DaemonSet 273 | metadata: 274 | labels: 275 | app: metallb 276 | component: speaker 277 | name: speaker 278 | namespace: metallb-system 279 | spec: 280 | selector: 281 | matchLabels: 282 | app: metallb 283 | component: speaker 284 | template: 285 | metadata: 286 | annotations: 287 | prometheus.io/port: "7472" 288 | prometheus.io/scrape: "true" 289 | labels: 290 | app: metallb 291 | component: speaker 292 | spec: 293 | containers: 294 | - args: 295 | - --port=7472 296 | - --config=config 297 | env: 298 | - name: METALLB_NODE_NAME 299 | valueFrom: 300 | fieldRef: 301 | fieldPath: spec.nodeName 302 | - name: METALLB_HOST 303 | valueFrom: 304 | fieldRef: 305 | fieldPath: status.hostIP 306 | - name: METALLB_ML_BIND_ADDR 307 | valueFrom: 308 | fieldRef: 309 | fieldPath: status.podIP 310 | - name: METALLB_ML_LABELS 311 | value: "app=metallb,component=speaker" 312 | - name: METALLB_ML_NAMESPACE 313 | valueFrom: 314 | fieldRef: 315 | fieldPath: metadata.namespace 316 | - name: METALLB_ML_SECRET_KEY 317 | valueFrom: 318 | secretKeyRef: 319 | name: memberlist 320 | key: secretkey 321 | image: metallb/speaker:v0.9.3 322 | imagePullPolicy: Always 323 | name: speaker 324 | ports: 325 | - containerPort: 7472 326 | name: monitoring 327 | resources: 328 | limits: 329 | cpu: 100m 330 | memory: 100Mi 331 | securityContext: 332 | allowPrivilegeEscalation: false 333 | capabilities: 334 | add: 335 | - NET_ADMIN 336 | - NET_RAW 337 | - SYS_ADMIN 338 | drop: 339 | - ALL 340 | readOnlyRootFilesystem: true 341 | hostNetwork: true 342 | nodeSelector: 343 | beta.kubernetes.io/os: linux 344 | serviceAccountName: speaker 345 | terminationGracePeriodSeconds: 2 346 | tolerations: 347 | - effect: NoSchedule 348 | key: node-role.kubernetes.io/master 349 | --- 350 | apiVersion: apps/v1 351 | kind: Deployment 352 | metadata: 353 | labels: 354 | app: metallb 355 | component: controller 356 | name: controller 357 | namespace: metallb-system 358 | spec: 359 | revisionHistoryLimit: 3 360 | selector: 361 | matchLabels: 362 | app: metallb 363 | component: controller 364 | template: 365 | metadata: 366 | annotations: 367 | prometheus.io/port: "7472" 368 | prometheus.io/scrape: "true" 369 | labels: 370 | app: metallb 371 | component: controller 372 | spec: 373 | containers: 374 | - args: 375 | - --port=7472 376 | - --config=config 377 | image: metallb/controller:v0.9.3 378 | imagePullPolicy: Always 379 | name: controller 380 | ports: 381 | - containerPort: 7472 382 | name: monitoring 383 | resources: 384 | limits: 385 | cpu: 100m 386 | memory: 100Mi 387 | securityContext: 388 | allowPrivilegeEscalation: false 389 | capabilities: 390 | drop: 391 | - all 392 | readOnlyRootFilesystem: true 393 | nodeSelector: 394 | beta.kubernetes.io/os: linux 395 | securityContext: 396 | runAsNonRoot: true 397 | runAsUser: 65534 398 | serviceAccountName: controller 399 | terminationGracePeriodSeconds: 0 400 | --- 401 | apiVersion: v1 402 | kind: ConfigMap 403 | metadata: 404 | namespace: metallb-system 405 | name: config 406 | data: 407 | config: | 408 | address-pools: 409 | - name: default 410 | protocol: layer2 411 | addresses: 412 | - 192.168.250.240-192.168.250.250 413 | -------------------------------------------------------------------------------- /Vagrant/deployments/metallb/nginx-deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Namespace 4 | metadata: 5 | name: demo 6 | --- 7 | apiVersion: apps/v1 8 | kind: Deployment 9 | metadata: 10 | name: nginx-metallb-demo 11 | namespace: demo 12 | spec: 13 | replicas: 2 14 | selector: 15 | matchLabels: 16 | app: nginx-metallb-demo 17 | template: 18 | metadata: 19 | labels: 20 | app: nginx-metallb-demo 21 | spec: 22 | containers: 23 | - name: nginx 24 | image: nginx 25 | ports: 26 | - name: http 27 | containerPort: 80 28 | --- 29 | apiVersion: v1 30 | kind: Service 31 | metadata: 32 | name: nginx-metallb-demo 33 | namespace: demo 34 | spec: 35 | ports: 36 | - name: http 37 | port: 80 38 | protocol: TCP 39 | targetPort: 80 40 | selector: 41 | app: nginx-metallb-demo 42 | type: LoadBalancer 43 | -------------------------------------------------------------------------------- /Vagrant/deployments/nfs-pvs/busybox-pv-nfs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: busybox 5 | spec: 6 | volumes: 7 | - name: host-volume 8 | persistentVolumeClaim: 9 | claimName: pvc1 10 | containers: 11 | - image: busybox 12 | name: busybox 13 | command: ["/bin/sh"] 14 | args: ["-c", "sleep 600"] 15 | volumeMounts: 16 | - name: host-volume 17 | mountPath: /mydata 18 | -------------------------------------------------------------------------------- /Vagrant/deployments/nfs-pvs/class.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: storage.k8s.io/v1 2 | kind: StorageClass 3 | metadata: 4 | name: managed-nfs-storage 5 | provisioner: example.com/nfs 6 | parameters: 7 | archiveOnDelete: "false" 8 | -------------------------------------------------------------------------------- /Vagrant/deployments/nfs-pvs/deployment.yaml: -------------------------------------------------------------------------------- 1 | kind: Deployment 2 | apiVersion: apps/v1 3 | metadata: 4 | name: nfs-client-provisioner 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: nfs-client-provisioner 9 | replicas: 1 10 | strategy: 11 | type: Recreate 12 | template: 13 | metadata: 14 | labels: 15 | app: nfs-client-provisioner 16 | spec: 17 | serviceAccountName: nfs-client-provisioner 18 | containers: 19 | - name: nfs-client-provisioner 20 | image: quay.io/external_storage/nfs-client-provisioner:latest 21 | volumeMounts: 22 | - name: nfs-client-root 23 | mountPath: /persistentvolumes 24 | env: 25 | - name: PROVISIONER_NAME 26 | value: example.com/nfs 27 | - name: NFS_SERVER 28 | value: 192.168.250.13 29 | - name: NFS_PATH 30 | value: /opt/nfs/k8s 31 | volumes: 32 | - name: nfs-client-root 33 | nfs: 34 | server: 192.168.250.13 35 | path: /opt/nfs/k8s 36 | -------------------------------------------------------------------------------- /Vagrant/deployments/nfs-pvs/pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: pvc1 5 | spec: 6 | storageClassName: managed-nfs-storage 7 | accessModes: 8 | - ReadWriteMany 9 | resources: 10 | requests: 11 | storage: 10Gi 12 | -------------------------------------------------------------------------------- /Vagrant/deployments/nfs-pvs/rbac.yaml: -------------------------------------------------------------------------------- 1 | kind: ServiceAccount 2 | apiVersion: v1 3 | metadata: 4 | name: nfs-client-provisioner 5 | --- 6 | kind: ClusterRole 7 | apiVersion: rbac.authorization.k8s.io/v1 8 | metadata: 9 | name: nfs-client-provisioner-runner 10 | rules: 11 | - apiGroups: [""] 12 | resources: ["persistentvolumes"] 13 | verbs: ["get", "list", "watch", "create", "delete"] 14 | - apiGroups: [""] 15 | resources: ["persistentvolumeclaims"] 16 | verbs: ["get", "list", "watch", "update"] 17 | - apiGroups: ["storage.k8s.io"] 18 | resources: ["storageclasses"] 19 | verbs: ["get", "list", "watch"] 20 | - apiGroups: [""] 21 | resources: ["events"] 22 | verbs: ["create", "update", "patch"] 23 | --- 24 | kind: ClusterRoleBinding 25 | apiVersion: rbac.authorization.k8s.io/v1 26 | metadata: 27 | name: run-nfs-client-provisioner 28 | subjects: 29 | - kind: ServiceAccount 30 | name: nfs-client-provisioner 31 | namespace: default 32 | roleRef: 33 | kind: ClusterRole 34 | name: nfs-client-provisioner-runner 35 | apiGroup: rbac.authorization.k8s.io 36 | --- 37 | kind: Role 38 | apiVersion: rbac.authorization.k8s.io/v1 39 | metadata: 40 | name: leader-locking-nfs-client-provisioner 41 | rules: 42 | - apiGroups: [""] 43 | resources: ["endpoints"] 44 | verbs: ["get", "list", "watch", "create", "update", "patch"] 45 | --- 46 | kind: RoleBinding 47 | apiVersion: rbac.authorization.k8s.io/v1 48 | metadata: 49 | name: leader-locking-nfs-client-provisioner 50 | subjects: 51 | - kind: ServiceAccount 52 | name: nfs-client-provisioner 53 | # replace with namespace where provisioner is deployed 54 | namespace: default 55 | roleRef: 56 | kind: Role 57 | name: leader-locking-nfs-client-provisioner 58 | apiGroup: rbac.authorization.k8s.io 59 | -------------------------------------------------------------------------------- /Vagrant/environment.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Global provisioners will run against every node in the order specified. 3 | # These provisioners run after all node specific provisioners if defined. 4 | # 5 | # For node specific provisioners add at the node level 6 | provisioners: [] 7 | # - type: shell 8 | # inline: | 9 | # if [ -f /etc/os-release ]; then 10 | # os_name="$(awk -F= '/^NAME/{ print $2 }' /etc/os-release | sed 's/"//g')" 11 | # os_version_id="$(awk -F= '/^VERSION_ID/{ print $2}' /etc/os-release | sed 's/"//g')" 12 | # echo $os_name 13 | # echo $os_version_id 14 | # fi 15 | # privileged: true 16 | # - type: shell 17 | # path: 18 | # - scripts/test.sh 19 | # privileged: false 20 | # - type: ansible_local 21 | # playbooks: 22 | # - /vagrant/playbooks/test.yml 23 | # - type: ansible 24 | # playbooks: 25 | # - playbooks/test.yml 26 | 27 | # Synced folders 28 | # Additional synced folders. 29 | # By default scripts and playbooks are synced, and available in each node as: 30 | # /playbooks, /scripts, /vagrant/playbooks, and /vagrant/scripts. They are 31 | # duplicated because of the symlinks within /vagrant. This is as designed. 32 | synced_folders: 33 | [] 34 | # - type: nfs 35 | # src: example1/ 36 | # mountpoint: /example1 37 | # - type: rsync 38 | # src: example2/ 39 | # mountpoint: /example2 40 | 41 | # Define nodes which are required for the environment you intend on creating. 42 | nodes: 43 | # - name: node0 44 | # ansible_groups: 45 | # - test_nodes 46 | # box: mrlesmithjr/bionic64 47 | # # Defines if nodes guest OS is a desktop flavor 48 | # desktop: false 49 | # # Defines if synced folders should be disabled 50 | # disable_synced_folders: false 51 | # disks: 52 | # [] 53 | # # - size: 10 54 | # # controller: "SATA Controller" 55 | # # - size: 10 56 | # # controller: "SATA Controller" 57 | # # https://www.vagrantup.com/docs/vmware/configuration.html#functional_hgfs 58 | # functional_hgfs: true 59 | # interfaces: 60 | # [] 61 | # # - ip: 192.168.250.10 62 | # # auto_config: true 63 | # # method: static 64 | # # - ip: 192.168.1.10 65 | # # auto_config: false 66 | # # method: static 67 | # # network_name: network-1 68 | # linked_clone: true 69 | # mem: 512 70 | # # Defines whether or not nested virtualization is enabled 71 | # # Currently only works on VMware boxes 72 | # nested_virtualization: false 73 | # provision: false 74 | # # Node specific provisioners 75 | # # For Ansible, only ansible_local type will work 76 | # provisioners: 77 | # [] 78 | # # - type: shell 79 | # # inline: | 80 | # # if [ -f /etc/os-release ]; then 81 | # # os_name="$(awk -F= '/^NAME/{ print $2 }' /etc/os-release | sed 's/"//g')" 82 | # # os_version_id="$(awk -F= '/^VERSION_ID/{ print $2}' /etc/os-release | sed 's/"//g')" 83 | # # echo $os_name 84 | # # echo $os_version_id 85 | # # fi 86 | # # privileged: true 87 | # # - type: shell 88 | # # path: 89 | # # - scripts/test.sh 90 | # # privileged: false 91 | # # - type: ansible_local 92 | # # playbooks: 93 | # # - /vagrant/playbooks/test.yml 94 | # # https://www.vagrantup.com/docs/vmware/configuration.html#ssh_info_public 95 | # ssh_use_public_ip: false 96 | # vcpu: 1 97 | # port_forwards: 98 | # [] 99 | # # - guest: 80 100 | # # host: 8080 101 | # # - guest: 443 102 | # # host: 4433 103 | # # Defines if nodes guest OS is Windows 104 | # windows: false 105 | - name: k8s-master 106 | ansible_groups: 107 | - k8s 108 | box: mrlesmithjr/bionic64 109 | desktop: false 110 | disks: [] 111 | interfaces: 112 | - ip: 192.168.250.10 113 | auto_config: true 114 | method: static 115 | linked_clone: true 116 | mem: 1024 117 | provision: true 118 | # Node specific provisioners 119 | provisioners: [] 120 | vcpu: 2 121 | port_forwards: [] 122 | windows: false 123 | - name: k8s-worker-01 124 | ansible_groups: 125 | - k8s 126 | box: mrlesmithjr/bionic64 127 | desktop: false 128 | disks: [] 129 | interfaces: 130 | - ip: 192.168.250.11 131 | auto_config: true 132 | method: static 133 | linked_clone: true 134 | mem: 1024 135 | provision: true 136 | # Node specific provisioners 137 | provisioners: [] 138 | vcpu: 2 139 | port_forwards: [] 140 | windows: false 141 | - name: k8s-worker-02 142 | ansible_groups: 143 | - k8s 144 | box: mrlesmithjr/bionic64 145 | desktop: false 146 | disks: [] 147 | interfaces: 148 | - ip: 192.168.250.12 149 | auto_config: true 150 | method: static 151 | linked_clone: true 152 | mem: 1024 153 | provision: true 154 | # Node specific provisioners 155 | provisioners: [] 156 | vcpu: 2 157 | port_forwards: [] 158 | windows: false 159 | - name: k8s-nfs-server 160 | ansible_groups: 161 | - k8s_nfs_servers 162 | box: mrlesmithjr/bionic64 163 | desktop: false 164 | disks: [] 165 | interfaces: 166 | - ip: 192.168.250.13 167 | auto_config: true 168 | method: static 169 | linked_clone: true 170 | mem: 512 171 | provision: true 172 | # Node specific provisioners 173 | provisioners: [] 174 | vcpu: 1 175 | port_forwards: [] 176 | windows: false 177 | -------------------------------------------------------------------------------- /Vagrant/hosts: -------------------------------------------------------------------------------- 1 | .vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory -------------------------------------------------------------------------------- /Vagrant/playbooks/bootstrap.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | vars: 4 | alpine_packages: 5 | - alpine-sdk 6 | - libffi-dev 7 | - openssl-dev 8 | - py-setuptools 9 | ansible_ver: 2.6.2 10 | debian_packages: 11 | - build-essential 12 | - libffi-dev 13 | - libssl-dev 14 | - python-dev 15 | - python-pip 16 | - python-setuptools 17 | install_ansible: false 18 | pri_domain_name: vagrant.local 19 | redhat_packages: 20 | - gmp-devel 21 | - libffi-devel 22 | - openssl-devel 23 | - python-crypto 24 | - python-devel 25 | - python-pip 26 | - python-setuptools 27 | - redhat-rpm-config 28 | suse_packages: 29 | - gmp-devel 30 | - libffi-devel 31 | - openssl-devel 32 | - python-crypto 33 | - python-devel 34 | - python-pip 35 | - python-setuptools 36 | roles: [] 37 | tasks: 38 | # Update apt-cache to ensure up to date 39 | - name: Updating Apt Cache (Debian) 40 | apt: 41 | update_cache: true 42 | cache_valid_time: 3600 43 | become: true 44 | when: ansible_os_family == "Debian" 45 | 46 | # Install pre-reqs for Ansible install 47 | - name: Installing Ansible Pre-Reqs (Alpine) 48 | apk: 49 | name: "{{ alpine_packages }}" 50 | state: present 51 | become: true 52 | when: ansible_os_family == "Alpine" 53 | 54 | - name: Installing Python Packages (Alpine) 55 | apk: 56 | name: ["py-pip", "python-dev"] 57 | state: present 58 | become: true 59 | when: > 60 | ansible_os_family == "Alpine" and 61 | ansible_distribution_version < "3.5" 62 | 63 | - name: Installing Python Packages (Alpine) 64 | apk: 65 | name: ["py2-pip", "python2-dev"] 66 | state: present 67 | become: true 68 | when: > 69 | ansible_os_family == "Alpine" and 70 | ansible_distribution_version >= "3.5" 71 | 72 | # Install pre-reqs for Ansible install 73 | - name: Installing Ansible Pre-Reqs (Debian) 74 | apt: 75 | name: "{{ debian_packages }}" 76 | state: present 77 | become: true 78 | when: > 79 | install_ansible and 80 | ansible_os_family == "Debian" 81 | 82 | - name: Installing EPEL Repo (RedHat) 83 | yum: 84 | name: epel-release 85 | state: present 86 | become: true 87 | when: > 88 | install_ansible and 89 | ansible_os_family == "RedHat" and 90 | ansible_distribution != "Fedora" 91 | 92 | # Install pre-reqs for Ansible install 93 | - name: Installing Ansible Pre-Reqs (RedHat) 94 | yum: 95 | name: "{{ redhat_packages }}" 96 | state: present 97 | become: true 98 | when: > 99 | install_ansible and 100 | ansible_os_family == "RedHat" and 101 | ansible_distribution != "Fedora" 102 | 103 | # Install pre-reqs for Ansible install 104 | - name: Installing Ansible Pre-Reqs (Fedora) 105 | dnf: 106 | name: python-dnf 107 | state: present 108 | become: true 109 | when: > 110 | install_ansible and 111 | ansible_os_family == "RedHat" and 112 | ansible_distribution == "Fedora" 113 | 114 | # Install pre-reqs for Ansible install 115 | - name: Installing Ansible Pre-Reqs (Fedora) 116 | dnf: 117 | name: "{{ redhat_packages }}" 118 | state: present 119 | become: true 120 | when: > 121 | install_ansible and 122 | ansible_os_family == "RedHat" and 123 | ansible_distribution == "Fedora" 124 | 125 | # Install pre-reqs for Ansible install 126 | - name: Installing Ansible Pre-Reqs (openSUSE) 127 | zypper: 128 | name: "{{ suse_packages }}" 129 | state: present 130 | become: true 131 | when: > 132 | install_ansible and 133 | ansible_os_family == "Suse" 134 | 135 | # Upgrading these packages to ensure a successful Ansible install 136 | - name: Updating Python Modules 137 | block: 138 | - name: Update Python Modules 139 | pip: 140 | name: ["pip", "cffi", "pyOpenSSL"] 141 | state: latest 142 | become: true 143 | when: ansible_os_family != "Windows" 144 | 145 | rescue: 146 | - name: Downloading Pip Installer 147 | get_url: 148 | url: https://bootstrap.pypa.io/get-pip.py 149 | dest: /tmp/get-pip.py 150 | 151 | - name: Installing Python Pip 152 | command: python /tmp/get-pip.py 153 | become: true 154 | 155 | when: install_ansible 156 | 157 | # Install Ansible to run Ansible related tasks within guest 158 | - name: Installing Ansible 159 | pip: 160 | name: ansible 161 | state: present 162 | version: "{{ ansible_ver }}" 163 | become: true 164 | when: > 165 | install_ansible and 166 | ansible_os_family != "Windows" 167 | 168 | - name: Enabling Windows AutoLogon 169 | win_regedit: 170 | path: HKLM:\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Winlogon 171 | name: "{{ item['name'] }}" 172 | data: "{{ item['data'] }}" 173 | type: "{{ item['type'] }}" 174 | loop: 175 | - name: AutoAdminLogon 176 | data: 1 177 | type: String 178 | - name: AutoAdminLogonCount 179 | data: 0 180 | type: dword 181 | - name: DefaultUsername 182 | data: vagrant 183 | type: String 184 | - name: DefaultPassword 185 | data: vagrant 186 | type: String 187 | register: windows_autologon 188 | when: ansible_os_family == "Windows" 189 | 190 | - name: Reboot Windows after AutoLogon 191 | win_reboot: 192 | when: windows_autologon['changed'] 193 | 194 | - name: Ensure Chocolatey is installed 195 | win_chocolatey: 196 | name: chocolatey 197 | state: present 198 | when: ansible_os_family == "Windows" 199 | -------------------------------------------------------------------------------- /Vagrant/playbooks/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | vars: 4 | # Defines if all nodes in play should be added to each hosts /etc/hosts 5 | etc_hosts_add_all_hosts: true 6 | etc_hosts_pri_dns_name: "{{ pri_domain_name }}" 7 | # Defines if node has static IP. 8 | etc_hosts_static_ip: true 9 | # Defines if ansible_default_ipv4.address is used for defining hosts 10 | etc_hosts_use_default_ip_address: false 11 | # Defines if ansible_ssh_host is used for defining hosts 12 | etc_hosts_use_ansible_ssh_host: true 13 | pri_domain_name: test.vagrant.local 14 | tasks: 15 | - import_role: 16 | name: ansible-change-hostname 17 | - import_role: 18 | name: ansible-etc-hosts 19 | - import_role: 20 | name: ansible-timezone 21 | - import_role: 22 | name: ansible-ntp 23 | 24 | - hosts: k8s_nfs_servers 25 | vars: 26 | nfs_server_exports: 27 | - export: 28 | access: 29 | - hostname: "*" 30 | options: 31 | - rw 32 | - sync 33 | - no_subtree_check 34 | - no_root_squash 35 | mode: u=rwx,g=rwx,o=rwx 36 | path: /opt/nfs/k8s 37 | pri_domain_name: test.vagrant.local 38 | tasks: 39 | - import_role: 40 | name: ansible-nfs-server 41 | 42 | - hosts: k8s 43 | # become: true 44 | vars: 45 | # Define Docker version to install 46 | docker_version: 18.06.3 47 | k8s_apply_networking: true 48 | k8s_cluster_init_skip_ca_verification: true 49 | k8s_helm_install: true 50 | k8s_pod_network_cni: weave 51 | k8s_version: 1.17.5 52 | nfs_client_mounts: 53 | - mount: 54 | fstype: nfs 55 | opts: 56 | - rsize=8192 57 | - wsize=8192 58 | - intr 59 | path: /opt/nfs/k8s 60 | src: 192.168.250.13:/opt/nfs/k8s 61 | state: mounted 62 | pri_domain_name: test.vagrant.local 63 | tasks: 64 | - import_role: 65 | name: ansible-nfs-client 66 | - import_role: 67 | name: ansible-docker 68 | - import_role: 69 | name: ansible-k8s 70 | 71 | - hosts: k8s 72 | vars: 73 | k8s_admin_config: /etc/kubernetes/admin.conf 74 | k8s_master: "{{ groups[k8s_cluster_group][0] }}" 75 | tasks: 76 | - name: Ensuring {{ lookup('env','HOME') }}/.kube Exists 77 | file: 78 | dest: "{{ lookup('env','HOME') }}/.kube" 79 | state: directory 80 | become: false 81 | delegate_to: localhost 82 | run_once: true 83 | 84 | - name: Fetching {{ k8s_admin_config }} From {{ k8s_master }} 85 | fetch: 86 | src: "{{ k8s_admin_config }}" 87 | dest: "{{ lookup('env','HOME') }}/.kube/vagrant-k8s.yml" 88 | flat: true 89 | become: true 90 | when: inventory_hostname == k8s_master 91 | -------------------------------------------------------------------------------- /Vagrant/playbooks/prep_host_vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | gather_facts: false 4 | vars: 5 | group_vars_dir: "{{ working_dir + '/' + vagrant_dir + '/group_vars' }}" 6 | host_vars_dir: "{{ working_dir + '/' + vagrant_dir + '/host_vars' }}" 7 | nodes: "{{ (lookup('file', working_dir + '/environment.yml')|from_yaml)['nodes'] }}" 8 | vagrant_dir: .vagrant/provisioners/ansible/inventory 9 | working_dir: "{{ lookup('env', 'PWD') }}" 10 | tasks: 11 | - name: Ensuring group_vars Directory Exists 12 | file: 13 | path: "{{ group_vars_dir }}" 14 | state: directory 15 | delegate_to: localhost 16 | run_once: true 17 | 18 | - name: Ensuring host_vars Directory Exists 19 | file: 20 | path: "{{ host_vars_dir }}" 21 | state: directory 22 | delegate_to: localhost 23 | run_once: true 24 | 25 | - name: Ensuring Host File Exists In host_vars 26 | stat: 27 | path: "{{ host_vars_dir }}/{{ item['name'] }}.yml" 28 | delegate_to: localhost 29 | run_once: true 30 | register: host_var 31 | loop: "{{ nodes }}" 32 | 33 | - name: Creating Missing host_vars 34 | file: 35 | path: "{{ host_vars_dir }}/{{ item['item']['name'] }}.yml" 36 | state: touch 37 | delegate_to: localhost 38 | run_once: true 39 | loop: "{{ host_var['results'] }}" 40 | when: not item['stat']['exists'] 41 | 42 | - name: Updating ansible_host 43 | lineinfile: 44 | dest: "{{ host_vars_dir }}/{{ item['name'] }}.yml" 45 | regexp: "^ansible_host{{ ':' }}" 46 | state: absent 47 | delegate_to: localhost 48 | run_once: true 49 | loop: "{{ nodes }}" 50 | when: > 51 | (item['interfaces'] is defined and 52 | item['interfaces'] == []) or 53 | item['interfaces'] is not defined 54 | 55 | - name: Updating ansible_host 56 | lineinfile: 57 | dest: "{{ host_vars_dir }}/{{ item['name'] }}.yml" 58 | regexp: "^ansible_host{{ ':' }}" 59 | line: "ansible_host{{ ':' }} {{ item['interfaces'][0]['ip'] }}" 60 | state: present 61 | delegate_to: localhost 62 | run_once: true 63 | loop: "{{ nodes }}" 64 | when: > 65 | item['interfaces'] is defined and 66 | item['interfaces'] != [] and 67 | item['interfaces'][0]['auto_config'] 68 | 69 | - name: Updating ansible_port 70 | lineinfile: 71 | dest: "{{ host_vars_dir }}/{{ item['name'] }}.yml" 72 | regexp: "^ansible_port{{ ':' }}" 73 | state: absent 74 | delegate_to: localhost 75 | run_once: true 76 | loop: "{{ nodes }}" 77 | when: > 78 | (item['interfaces'] is defined and 79 | item['interfaces'] == []) or 80 | item['interfaces'] is not defined 81 | 82 | - name: Updating ansible_port 83 | lineinfile: 84 | dest: "{{ host_vars_dir }}/{{ item['name'] }}.yml" 85 | regexp: "^ansible_port{{ ':' }}" 86 | line: "ansible_port{{ ':' }} 22" 87 | state: present 88 | delegate_to: localhost 89 | run_once: true 90 | loop: "{{ nodes }}" 91 | when: > 92 | (item['interfaces'] is defined and 93 | item['interfaces'] != [] and 94 | item['interfaces'][0]['auto_config']) and 95 | ((item['windows'] is defined and 96 | not item['windows']) or 97 | item['windows'] is not defined) 98 | 99 | - name: Updating ansible_port 100 | lineinfile: 101 | dest: "{{ host_vars_dir }}/{{ item['name'] }}.yml" 102 | regexp: "^ansible_port{{ ':' }}" 103 | line: "ansible_port{{ ':' }} 5986" 104 | state: present 105 | delegate_to: localhost 106 | run_once: true 107 | loop: "{{ nodes }}" 108 | when: > 109 | (item['interfaces'] is defined and 110 | item['interfaces'] != [] and 111 | item['interfaces'][0]['auto_config']) and 112 | (item['windows'] is defined and 113 | item['windows']) 114 | 115 | - name: Updating ansible_connection 116 | lineinfile: 117 | dest: "{{ host_vars_dir }}/{{ item['name'] }}.yml" 118 | regexp: "^ansible_connection{{ ':' }}" 119 | state: absent 120 | delegate_to: localhost 121 | run_once: true 122 | loop: "{{ nodes }}" 123 | when: > 124 | (item['windows'] is defined and 125 | not item['windows']) or 126 | item['windows'] is not defined 127 | 128 | - name: Updating ansible_connection 129 | lineinfile: 130 | dest: "{{ host_vars_dir }}/{{ item['name'] }}.yml" 131 | regexp: "^ansible_connection{{ ':' }}" 132 | line: "ansible_connection{{ ':' }} winrm" 133 | state: present 134 | delegate_to: localhost 135 | run_once: true 136 | loop: "{{ nodes }}" 137 | when: > 138 | (item['windows'] is defined and 139 | item['windows']) 140 | 141 | - name: Updating ansible_winrm_server_cert_validation 142 | lineinfile: 143 | dest: "{{ host_vars_dir }}/{{ item['name'] }}.yml" 144 | regexp: "^ansible_winrm_server_cert_validation{{ ':' }}" 145 | state: absent 146 | delegate_to: localhost 147 | run_once: true 148 | loop: "{{ nodes }}" 149 | when: > 150 | (item['windows'] is defined and 151 | not item['windows']) or 152 | item['windows'] is not defined 153 | 154 | - name: Updating ansible_winrm_server_cert_validation 155 | lineinfile: 156 | dest: "{{ host_vars_dir }}/{{ item['name'] }}.yml" 157 | regexp: "^ansible_winrm_server_cert_validation{{ ':' }}" 158 | line: "ansible_winrm_server_cert_validation{{ ':' }} ignore" 159 | state: present 160 | delegate_to: localhost 161 | run_once: true 162 | loop: "{{ nodes }}" 163 | when: > 164 | (item['windows'] is defined and 165 | item['windows']) 166 | 167 | - name: Updating ansible_user 168 | lineinfile: 169 | dest: "{{ host_vars_dir }}/{{ item['name'] }}.yml" 170 | regexp: "^ansible_user{{ ':' }}" 171 | line: "ansible_user{{ ':' }} vagrant" 172 | state: present 173 | delegate_to: localhost 174 | run_once: true 175 | loop: "{{ nodes }}" 176 | 177 | - name: Updating ansible_password 178 | lineinfile: 179 | dest: "{{ host_vars_dir }}/{{ item['name'] }}.yml" 180 | regexp: "^ansible_password{{ ':' }}" 181 | line: "ansible_password{{ ':' }} vagrant" 182 | state: present 183 | delegate_to: localhost 184 | run_once: true 185 | loop: "{{ nodes }}" 186 | when: > 187 | (item['windows'] is defined and 188 | item['windows']) 189 | -------------------------------------------------------------------------------- /Vagrant/playbooks/reset_cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: k8s 3 | vars: 4 | k8s_cluster_group: k8s 5 | tasks: 6 | - name: set_facts | Setting K8s Master 7 | set_fact: 8 | k8s_master: "{{ groups[k8s_cluster_group][0] }}" 9 | 10 | - name: set_facts | Showing K8s Master 11 | debug: var=k8s_master 12 | 13 | - hosts: k8s 14 | tasks: 15 | - name: Resetting K8s Slaves 16 | command: kubeadm reset -f 17 | become: true 18 | when: inventory_hostname != k8s_master 19 | 20 | - name: Ensuring All Default IPTABLES Chains Are Set To ACCEPT 21 | iptables: 22 | chain: "{{ item }}" 23 | policy: ACCEPT 24 | become: true 25 | loop: 26 | - INPUT 27 | - FORWARD 28 | - OUTPUT 29 | when: inventory_hostname != k8s_master 30 | 31 | - name: Flushing IPTABLES 32 | command: iptables -F 33 | become: true 34 | when: inventory_hostname != k8s_master 35 | 36 | - name: Removing Custom IPTABLES Rules 37 | command: iptables -X 38 | become: true 39 | when: inventory_hostname != k8s_master 40 | 41 | - name: Resetting K8s master 42 | command: kubeadm reset -f 43 | become: true 44 | when: inventory_hostname == k8s_master 45 | 46 | - name: Ensuring All Default IPTABLES Chains Are Set To ACCEPT 47 | iptables: 48 | chain: "{{ item }}" 49 | policy: ACCEPT 50 | become: true 51 | loop: 52 | - INPUT 53 | - FORWARD 54 | - OUTPUT 55 | when: inventory_hostname == k8s_master 56 | 57 | - name: Flushing IPTABLES 58 | command: iptables -F 59 | become: true 60 | when: inventory_hostname == k8s_master 61 | 62 | - name: Removing Custom IPTABLES Rules 63 | command: iptables -X 64 | become: true 65 | when: inventory_hostname == k8s_master 66 | -------------------------------------------------------------------------------- /Vagrant/requirements.yml: -------------------------------------------------------------------------------- 1 | ../requirements.yml -------------------------------------------------------------------------------- /Vagrant/roles/ansible-k8s: -------------------------------------------------------------------------------- 1 | ../../ -------------------------------------------------------------------------------- /Vagrant/scripts/bootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Alpine 3 | if [ -f /etc/alpine-release ]; then 4 | sudo apk update && 5 | sudo apk add python 6 | fi 7 | 8 | # Arch 9 | if [ -f /etc/arch-release ]; then 10 | sudo pacman -Sy --noconfirm ca-certificates glibc libffi python \ 11 | python-boto python-pyopenssl python-pip python-setuptools 12 | fi 13 | 14 | # Debian/Ubuntu 15 | if [ -f /etc/debian_version ]; then 16 | test -e /usr/bin/python || (sudo apt-get update && sudo apt-get -y install python-minimal) 17 | fi 18 | 19 | # RHEL 20 | if [ -f /etc/redhat-release ]; then 21 | if [ -f /etc/os-release ]; then 22 | os_name="$(awk -F= '/^NAME/{ print $2 }' /etc/os-release | sed 's/"//g')" 23 | os_version_id="$(awk -F= '/^VERSION_ID/{ print $2}' /etc/os-release | sed 's/"//g')" 24 | if [[ $os_name = "Fedora" ]]; then 25 | if [[ $os_version_id -le 21 ]]; then 26 | sudo yum -y update 27 | sudo yum -y install dnf 28 | fi 29 | sudo dnf -y install python-devel python-dnf 30 | sudo dnf -y groupinstall "Development Tools" 31 | else 32 | if [[ $os_version_id -lt 8 ]]; then 33 | sudo yum -y install python-devel 34 | sudo yum -y groupinstall "Development Tools" 35 | else 36 | sudo yum -y install platform-python-devel 37 | sudo yum -y groupinstall "Development Tools" 38 | test -e /usr/bin/python || (sudo yum -y install python3 && sudo alternatives --set python /usr/bin/python3) 39 | fi 40 | fi 41 | fi 42 | fi 43 | -------------------------------------------------------------------------------- /Vagrant/scripts/cleanup.bat: -------------------------------------------------------------------------------- 1 | vagrant destroy -f 2 | if exist "host_vars" rmdir /S /Q host_vars 3 | if exist ".vagrant" rmdir /S /Q .vagrant 4 | -------------------------------------------------------------------------------- /Vagrant/scripts/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | vagrant destroy -f 3 | # find . -type d -name "host_vars" 4 | # find . -type d -name "host_vars" -exec rm -rf {} + 5 | # find . -type d -name ".vagrant" 6 | # find . -type d -name ".vagrant" -exec rm -rf {} + 7 | find . -type f -name "*.retry" 8 | find . -type f -name "*.retry" -exec rm {} + 9 | find . -type f -name "*.vmdk" 10 | find . -type f -name "*.vmdk" -exec rm {} + 11 | -------------------------------------------------------------------------------- /Vagrant/scripts/prep.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | LINKS=(".gitignore" "ansible.cfg" "bootstrap.bat" "bootstrap.sh" \ 4 | "bootstrap.yml" "cleanup.bat" "cleanup.sh" "playbook.yml" \ 5 | "requirements.yml" "Vagrantfile") 6 | TOP_FOLDER_PATH="../../.." 7 | for i in "${LINKS[@]}" 8 | do 9 | if [ -f "./$i" ]; then 10 | rm "./$i" 11 | fi 12 | if [ ! -L "./$i" ]; then 13 | ln -s $TOP_FOLDER_PATH/$i . 14 | fi 15 | done 16 | -------------------------------------------------------------------------------- /defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for ansible-k8s 3 | # 4 | k8s_admin_config: /etc/kubernetes/admin.conf 5 | 6 | # Define Ansible group which defines the K8s Cluster 7 | k8s_advertise_address_int: enp0s8 8 | 9 | k8s_advertise_bind_port: 6443 10 | 11 | k8s_apply_networking: true 12 | 13 | k8s_cluster_group: k8s 14 | 15 | k8s_cluster_init_skip_ca_verification: false 16 | 17 | k8s_dashboard: https://raw.githubusercontent.com/kubernetes/dashboard/v2.0.0/aio/deploy/recommended.yaml 18 | 19 | k8s_debian_repo_info: 20 | key: "{{ k8s_package_url }}/apt/doc/apt-key.gpg" 21 | repo: "deb http://apt.kubernetes.io/ kubernetes-xenial main" 22 | 23 | # https://github.com/kubernetes/helm 24 | 25 | # Define architecture to install...amd64|arm 26 | k8s_helm_architecture: amd64 27 | 28 | k8s_helm_install: false 29 | 30 | k8s_helm_install_dir: /usr/local/bin 31 | 32 | k8s_helm_package: "helm-v{{ k8s_helm_version }}-linux-{{ k8s_helm_architecture }}.tar.gz" 33 | 34 | k8s_helm_url: https://get.helm.sh 35 | 36 | k8s_helm_version: 3.2.0 37 | 38 | k8s_package_url: https://packages.cloud.google.com 39 | 40 | # Define Pod Network CNI 41 | # calico|weave 42 | k8s_pod_network_cni: weave 43 | 44 | k8s_reports: 45 | all_pod_namespaces: true 46 | all_service_namespaces: true 47 | display_dashboard_link: true 48 | 49 | k8s_reset_cluster: false 50 | 51 | # Defines services which should be enabled on boot 52 | k8s_services: 53 | - kubelet 54 | 55 | k8s_token_file: /etc/kubernetes/.k8s_token 56 | 57 | k8s_users: 58 | - user: vagrant 59 | 60 | k8s_version: 1.17.5 61 | k8s_cni_version: 0.7.5 62 | -------------------------------------------------------------------------------- /handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for ansible-k8s -------------------------------------------------------------------------------- /meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: Larry Smith Jr. 3 | description: Ansible role to deploy K8s cluster 4 | 5 | license: MIT 6 | 7 | min_ansible_version: 2.8 8 | 9 | platforms: 10 | - name: EL 11 | versions: 12 | - 7 13 | - name: Ubuntu 14 | versions: 15 | - bionic 16 | - xenial 17 | 18 | galaxy_tags: 19 | - clustering 20 | - kubernetes 21 | - containers 22 | - docker 23 | 24 | dependencies: [] 25 | -------------------------------------------------------------------------------- /requirements.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - src: https://www.github.com/mrlesmithjr/ansible-change-hostname.git 3 | - src: https://www.github.com/mrlesmithjr/ansible-docker.git 4 | - src: https://www.github.com/mrlesmithjr/ansible-etc-hosts.git 5 | - src: https://github.com/mrlesmithjr/ansible-nfs-client.git 6 | - src: https://github.com/mrlesmithjr/ansible-nfs-server.git 7 | - src: https://www.github.com/mrlesmithjr/ansible-ntp.git 8 | - src: https://www.github.com/mrlesmithjr/ansible-timezone.git 9 | -------------------------------------------------------------------------------- /tasks/cluster_services.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: cluster_services | Capturing K8s Services 3 | command: > 4 | kubectl --kubeconfig {{ k8s_admin_config }} \ 5 | get services --all-namespaces -o json 6 | changed_when: false 7 | become: true 8 | tags: 9 | - k8s_get_dashboard 10 | register: _k8s_get_services_all 11 | when: k8s_reports['all_service_namespaces']|bool 12 | 13 | - name: cluster_services | Displaying K8s Services 14 | debug: 15 | var: (_k8s_get_services_all['stdout']|from_json)['items'] 16 | when: k8s_reports['all_service_namespaces']|bool 17 | -------------------------------------------------------------------------------- /tasks/cluster_summary.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: cluster_summary | Capturing Cluster Nodes 3 | command: kubectl --kubeconfig {{ k8s_admin_config }} get nodes 4 | changed_when: false 5 | become: true 6 | # We wait for the number of nodes to match the number of hosts defined in 7 | # the ansible group. We subtract 1 to account for the header line 8 | until: > 9 | ((_k8s_cluster_nodes['stdout_lines']|length - 1) == (groups[k8s_cluster_group]|length) and 10 | 'NotReady' not in _k8s_cluster_nodes['stdout']) 11 | retries: 30 12 | delay: 10 13 | register: _k8s_cluster_nodes 14 | tags: 15 | - k8s_cluster_nodes 16 | when: inventory_hostname == k8s_master 17 | 18 | # We capture as JSON in order to use for later iteration 19 | - name: cluster_summary | Capturing Cluster Nodes (JSON) 20 | command: kubectl --kubeconfig {{ k8s_admin_config }} get nodes -o json 21 | changed_when: false 22 | become: true 23 | register: _k8s_cluster_nodes_json 24 | tags: 25 | - k8s_cluster_nodes 26 | when: inventory_hostname == k8s_master 27 | 28 | - name: cluster_summary | Displaying Cluster Nodes 29 | debug: 30 | var: _k8s_cluster_nodes['stdout_lines'] 31 | tags: 32 | - k8s_cluster_nodes 33 | when: inventory_hostname == k8s_master 34 | -------------------------------------------------------------------------------- /tasks/dashboard.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: dashboard | Checking For Existing Dashboard 3 | shell: > 4 | kubectl --kubeconfig {{ k8s_admin_config }} \ 5 | get pods --namespace=kubernetes-dashboard | grep dashboard 6 | become: true 7 | register: _k8s_dashboard 8 | failed_when: (_k8s_dashboard['rc'] > 1) 9 | changed_when: false 10 | tags: 11 | - k8s_dashboard 12 | - k8s_get_dashboard 13 | when: inventory_hostname == k8s_master 14 | 15 | - name: dashboard | Loading K8s Dashboard 16 | command: > 17 | kubectl --kubeconfig {{ k8s_admin_config }} \ 18 | apply -f {{ k8s_dashboard }} 19 | become: true 20 | tags: 21 | - k8s_dashboard 22 | - k8s_get_dashboard 23 | when: 24 | - inventory_hostname == k8s_master 25 | - _k8s_dashboard['rc'] == 1 26 | 27 | - name: dashboard | Dashboard 28 | debug: 29 | msg: "{{ lookup('template', 'k8s_dashboard.j2') }}" 30 | tags: 31 | - k8s_dashboard 32 | - k8s_get_dashboard 33 | when: 34 | - inventory_hostname == k8s_master 35 | - _k8s_get_services_all is defined 36 | - k8s_reports['display_dashboard_link']|bool 37 | - _k8s_dashboard['rc'] == 0 38 | -------------------------------------------------------------------------------- /tasks/debian.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: debian | Installing Pre-Reqs 3 | apt: 4 | name: ["apt-transport-https", "jq", "net-tools"] 5 | state: present 6 | become: true 7 | register: result 8 | until: result is successful 9 | 10 | - name: debian | Adding K8s Repo Key 11 | apt_key: 12 | url: "{{ k8s_debian_repo_info['key'] }}" 13 | state: present 14 | become: true 15 | 16 | - name: debian | Adding K8s Repo 17 | apt_repository: 18 | repo: "{{ k8s_debian_repo_info['repo'] }}" 19 | state: present 20 | become: true 21 | 22 | - name: debian | Pinning K8s Version 23 | template: 24 | src: etc/apt/preferences.d/k8s.pref.j2 25 | dest: /etc/apt/preferences.d/k8s.pref 26 | become: true 27 | register: _k8s_pinned_version 28 | 29 | - name: debian | Updating Apt Cache 30 | apt: 31 | update_cache: true 32 | become: true 33 | when: _k8s_pinned_version['changed'] 34 | 35 | - name: debian | Installing K8s Packages 36 | apt: 37 | name: "{{ k8s_packages }}" 38 | state: present 39 | become: true 40 | register: result 41 | until: result is successful 42 | -------------------------------------------------------------------------------- /tasks/helm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: helm | Checking If Helm Is Already Installed 3 | stat: 4 | path: "{{ k8s_helm_install_dir }}/helm" 5 | register: _k8s_helm_installed_check 6 | when: inventory_hostname == k8s_master 7 | 8 | - name: helm | Checking Installed Version Of Helm (If Installed) 9 | shell: > 10 | helm version | grep -o Version:.* | sed 's|:| |g' \ 11 | | sed 's|,||g' | awk '{print $2}' | sed 's|\"||g' \ 12 | | sed 's|v||g' 13 | register: _k8s_helm_installed_version 14 | changed_when: false 15 | when: 16 | - inventory_hostname == k8s_master 17 | - _k8s_helm_installed_check['stat']['exists'] 18 | 19 | - name: helm | Setting Installed Version Of Helm 20 | set_fact: 21 | _k8s_helm_version_installed: "{{ hostvars[inventory_hostname]['_k8s_helm_installed_version']['stdout_lines'][0] }}" 22 | when: 23 | - inventory_hostname == k8s_master 24 | - _k8s_helm_installed_check['stat']['exists'] 25 | 26 | - name: helm | Debug Helm Version Installed 27 | debug: 28 | var: _k8s_helm_version_installed 29 | when: 30 | - inventory_hostname == k8s_master 31 | - _k8s_helm_installed_check['stat']['exists'] 32 | 33 | - name: helm | Installing Helm 34 | unarchive: 35 | src: "{{ k8s_helm_url }}/{{ k8s_helm_package }}" 36 | dest: "{{ k8s_helm_install_dir }}" 37 | remote_src: true 38 | register: _k8s_helm_installed 39 | become: true 40 | when: 41 | - inventory_hostname == k8s_master 42 | - not _k8s_helm_installed_check['stat']['exists'] or _k8s_helm_installed_check['stat']['exists'] 43 | - _k8s_helm_version_installed is not defined or (_k8s_helm_version_installed != k8s_helm_version) 44 | 45 | - name: helm | Copying Helm 46 | copy: 47 | src: "{{ k8s_helm_install_dir }}/linux-{{ k8s_helm_architecture }}/helm" 48 | dest: "{{ k8s_helm_install_dir }}/" 49 | remote_src: true 50 | become: true 51 | when: 52 | - inventory_hostname == k8s_master 53 | - _k8s_helm_installed['changed'] 54 | 55 | - name: helm | Ensuring Helm Is Executable 56 | file: 57 | dest: "{{ k8s_helm_install_dir }}/helm" 58 | mode: u=rwx,g=rx,o=rx 59 | become: true 60 | when: inventory_hostname == k8s_master 61 | 62 | - name: helm | Cleaning Up Helm Install 63 | file: 64 | dest: "{{ k8s_helm_install_dir }}/linux-{{ k8s_helm_architecture }}" 65 | state: absent 66 | become: true 67 | when: inventory_hostname == k8s_master 68 | -------------------------------------------------------------------------------- /tasks/init_cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # We look for this currently just as a check to ensure cluster has been 3 | # initialized or not. This should probably change. 4 | - name: init_cluster | Checking If Cluster Is Already Initialized 5 | stat: 6 | path: "{{ k8s_admin_config }}" 7 | tags: 8 | - k8s_get_dashboard 9 | - k8s_pods 10 | register: _k8s_kubectl_cluster_info 11 | when: inventory_hostname == k8s_master 12 | 13 | - name: init_cluster | Setting State Of Cluster On Master 14 | set_fact: 15 | k8s_cluster_init: false 16 | tags: 17 | - k8s_get_dashboard 18 | - k8s_pods 19 | when: 20 | - inventory_hostname == k8s_master 21 | - not _k8s_kubectl_cluster_info['stat']['exists'] 22 | 23 | - name: init_cluster | Setting State Of Cluster On Master 24 | set_fact: 25 | k8s_cluster_init: true 26 | tags: 27 | - k8s_get_dashboard 28 | - k8s_pods 29 | when: 30 | - inventory_hostname == k8s_master 31 | - _k8s_kubectl_cluster_info['stat']['exists'] 32 | 33 | - name: init_cluster | Setting State Of Cluster On Non-Master(s) 34 | set_fact: 35 | k8s_cluster_init: "{{ hostvars[k8s_master]['k8s_cluster_init'] }}" 36 | when: inventory_hostname != k8s_master 37 | 38 | - name: init_cluster | Showing Status Of Cluster Fact 39 | debug: 40 | msg: "Cluster already initialized == {{ k8s_cluster_init }}" 41 | 42 | - name: init_cluster | Generating Token 43 | command: kubeadm token generate 44 | register: _k8s_token 45 | changed_when: false 46 | no_log: true 47 | when: 48 | - inventory_hostname == k8s_master 49 | - not k8s_cluster_init|bool 50 | 51 | - name: init_cluster | Setting Token On Master 52 | set_fact: 53 | k8s_token: "{{ _k8s_token['stdout_lines'][0] }}" 54 | no_log: true 55 | when: 56 | - inventory_hostname == k8s_master 57 | - not k8s_cluster_init|bool 58 | 59 | - name: init_cluster | Setting Token On Non-Master(s) 60 | set_fact: 61 | k8s_token: "{{ hostvars[k8s_master]['k8s_token'] }}" 62 | no_log: true 63 | when: 64 | - inventory_hostname != k8s_master 65 | - not k8s_cluster_init|bool 66 | 67 | - name: init_cluster | Initializing Cluster Master 68 | command: > 69 | kubeadm init \ 70 | {% if k8s_pod_network_cidr is defined %} 71 | --pod-network-cidr={{ k8s_pod_network_cidr }} \ 72 | {% endif %} 73 | --apiserver-advertise-address \ 74 | {{ hostvars[inventory_hostname]['k8s_advertise_address'] }} \ 75 | --apiserver-bind-port \ 76 | {{ k8s_advertise_bind_port }} \ 77 | --token {{ k8s_token }} 78 | register: _k8s_cluster_master_init 79 | become: true 80 | no_log: true 81 | when: 82 | - inventory_hostname == k8s_master 83 | - not k8s_cluster_init|bool 84 | -------------------------------------------------------------------------------- /tasks/join_cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: join_cluster | Joining Additional Nodes To K8s Cluster 3 | command: > 4 | kubeadm join \ 5 | --token {{ k8s_token }} \ 6 | --discovery-token-unsafe-skip-ca-verification \ 7 | {{ hostvars[k8s_master]['k8s_advertise_address'] }}:{{ k8s_advertise_bind_port }} 8 | args: 9 | creates: /etc/kubernetes/kubelet.conf 10 | become: true 11 | no_log: true 12 | when: 13 | - not k8s_cluster_init|bool 14 | - k8s_cluster_init_skip_ca_verification|bool 15 | 16 | - name: join_cluster | Joining Additional Nodes To K8s Cluster 17 | command: > 18 | kubeadm join \ 19 | --token {{ k8s_token }} \ 20 | {{ hostvars[k8s_master]['k8s_advertise_address'] }}:{{ k8s_advertise_bind_port }} 21 | args: 22 | creates: /etc/kubernetes/kubelet.conf 23 | become: true 24 | no_log: true 25 | when: 26 | - not k8s_cluster_init|bool 27 | - not k8s_cluster_init_skip_ca_verification|bool 28 | -------------------------------------------------------------------------------- /tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for ansible-k8s 3 | 4 | - include_tasks: set_facts.yml 5 | tags: 6 | - k8s_cluster_ip 7 | - k8s_cluster_nodes 8 | - k8s_cluster_services 9 | - k8s_dashboard 10 | - k8s_get_dashboard 11 | - k8s_helm 12 | - k8s_pods 13 | 14 | - include_tasks: debian.yml 15 | when: ansible_os_family == "Debian" 16 | 17 | - include_tasks: redhat.yml 18 | when: ansible_os_family == "RedHat" 19 | 20 | - include_tasks: services.yml 21 | 22 | - include_tasks: swap.yml 23 | 24 | - include_tasks: init_cluster.yml 25 | tags: 26 | - k8s_cluster_services 27 | - k8s_get_dashboard 28 | - k8s_pods 29 | 30 | - include_tasks: network.yml 31 | when: k8s_apply_networking|bool 32 | 33 | - include_tasks: join_cluster.yml 34 | when: inventory_hostname != k8s_master 35 | 36 | - include_tasks: cluster_summary.yml 37 | tags: 38 | - k8s_cluster_nodes 39 | when: k8s_apply_networking|bool 40 | 41 | - include_tasks: users.yml 42 | 43 | - include_tasks: pods.yml 44 | tags: 45 | - k8s_pods 46 | when: 47 | - inventory_hostname == k8s_master 48 | # - k8s_cluster_init|bool 49 | - k8s_apply_networking|bool 50 | 51 | - include_tasks: cluster_services.yml 52 | tags: 53 | - k8s_cluster_services 54 | - k8s_dashboard 55 | - k8s_get_dashboard 56 | when: 57 | - inventory_hostname == k8s_master 58 | - k8s_cluster_init|bool 59 | - k8s_apply_networking|bool 60 | 61 | - include_tasks: dashboard.yml 62 | tags: 63 | - k8s_dashboard 64 | - k8s_get_dashboard 65 | when: 66 | - k8s_apply_networking|bool 67 | 68 | - include_tasks: helm.yml 69 | tags: 70 | - k8s_helm 71 | when: 72 | - k8s_helm_install|bool 73 | - k8s_apply_networking|bool 74 | 75 | - include_tasks: reset_cluster.yml 76 | when: k8s_reset_cluster|bool 77 | -------------------------------------------------------------------------------- /tasks/network.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: network | Applying Pod Network 3 | shell: > 4 | kubectl --kubeconfig {{ k8s_admin_config }} \ 5 | apply -f {{ k8s_pod_network_config }} 6 | become: true 7 | register: _k8s_pod_network_applied 8 | when: 9 | - inventory_hostname == k8s_master 10 | - not k8s_cluster_init|bool 11 | 12 | - name: network | Waiting For Kube-DNS or CoreDNS To Be Running 13 | shell: > 14 | kubectl --kubeconfig {{ k8s_admin_config }} \ 15 | get pods --all-namespaces | grep -e kube-dns -e coredns 16 | become: true 17 | register: _k8s_kube_dns_running 18 | until: ("Running" in _k8s_kube_dns_running['stdout']) 19 | retries: 30 20 | delay: 10 21 | changed_when: false 22 | when: inventory_hostname == k8s_master 23 | 24 | - name: network | Debugging Kube-DNS or CoreDNS 25 | debug: 26 | msg: "Kube-DNS or CoreDNS Is Up and Running" 27 | when: 28 | - inventory_hostname == k8s_master 29 | - ("Running" in _k8s_kube_dns_running['stdout']) 30 | 31 | - name: network | Capturing Cluster-IP On Master 32 | command: "kubectl --kubeconfig {{ k8s_admin_config }} get svc -o json" 33 | register: _k8s_cluster_ip 34 | become: true 35 | changed_when: false 36 | tags: 37 | - k8s_cluster_ip 38 | when: inventory_hostname == k8s_master 39 | 40 | - name: network | Setting Cluster-IP On Master 41 | set_fact: 42 | k8s_cluster_ip: "{{ (_k8s_cluster_ip['stdout']|from_json)['items'][0]['spec']['clusterIP'] }}" 43 | tags: 44 | - k8s_cluster_ip 45 | when: inventory_hostname == k8s_master 46 | 47 | - name: network | Setting Cluster-IP On Non-Master(s) 48 | set_fact: 49 | k8s_cluster_ip: "{{ hostvars[k8s_master]['k8s_cluster_ip'] }}" 50 | tags: 51 | - k8s_cluster_ip 52 | when: inventory_hostname != k8s_master 53 | 54 | - name: network | Showing Cluster-IP 55 | debug: 56 | var: k8s_cluster_ip 57 | tags: 58 | - k8s_cluster_ip 59 | 60 | - name: network | Checking For Cluster-IP Route On Hosts 61 | shell: "route -n | awk '{print $1}' | grep {{ k8s_cluster_ip }}" 62 | become: true 63 | register: _k8s_cluster_ip_route 64 | failed_when: "(_k8s_cluster_ip_route['rc'] > 1)" 65 | changed_when: false 66 | tags: 67 | - k8s_cluster_ip 68 | when: inventory_hostname != k8s_master 69 | 70 | - name: network | Setting IP Route To Reach Cluster-IP On Master 71 | command: > 72 | route add {{ k8s_cluster_ip }} gw \ 73 | {{ hostvars[k8s_master]['k8s_advertise_address'] }} 74 | become: true 75 | when: 76 | - inventory_hostname != k8s_master 77 | - _k8s_cluster_ip_route['rc'] == 1 78 | tags: 79 | - k8s_cluster_ip 80 | -------------------------------------------------------------------------------- /tasks/pods.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: pods | Capture Pods In All Namespaces 3 | command: > 4 | kubectl --kubeconfig {{ k8s_admin_config }} \ 5 | get pods --all-namespaces -o json 6 | register: _k8s_pods_all_namespaces 7 | become: true 8 | changed_when: false 9 | tags: 10 | - k8s_pods 11 | when: k8s_reports['all_pod_namespaces'] 12 | 13 | - name: pods | Displaying Pods In All Namespaces 14 | debug: 15 | msg: "{{ lookup('template', 'k8s_pods.j2') }}" 16 | tags: 17 | - k8s_pods 18 | when: k8s_reports['all_pod_namespaces'] 19 | -------------------------------------------------------------------------------- /tasks/redhat.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: redhat | Disable SELinux 3 | selinux: 4 | state: disabled 5 | become: true 6 | 7 | - name: redhat | Manage Sysctl Settings 8 | sysctl: 9 | name: "{{ item.name }}" 10 | value: "{{ item.value }}" 11 | sysctl_set: yes 12 | state: present 13 | reload: yes 14 | become: true 15 | loop: 16 | - name: net.bridge.bridge-nf-call-iptables 17 | value: 1 18 | 19 | - name: redhat | Installing Pre-Reqs 20 | package: 21 | name: 22 | - net-tools 23 | state: present 24 | become: true 25 | register: result 26 | until: result is successful 27 | 28 | - name: redhat | Adding K8s Repo 29 | yum_repository: 30 | name: kubernetes 31 | description: kubernetes 32 | baseurl: https://packages.cloud.google.com/yum/repos/kubernetes-el7-\$basearch 33 | enabled: true 34 | gpgcheck: true 35 | repo_gpgcheck: true 36 | gpgkey: https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg 37 | exclude: kubelet kubeadm kubectl 38 | become: true 39 | 40 | - name: redhat | Installing K8s Packages 41 | package: 42 | name: "{{ k8s_packages }}" 43 | state: present 44 | disable_excludes: kubernetes 45 | become: true 46 | register: result 47 | until: result is successful 48 | -------------------------------------------------------------------------------- /tasks/reset_cluster.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: reset_cluster | Resetting Cluster 3 | command: kubeadm reset 4 | become: true 5 | tags: 6 | - k8s_reset 7 | when: 8 | - k8s_reset_cluster|bool 9 | - inventory_hostname in groups[k8s_cluster_group] 10 | -------------------------------------------------------------------------------- /tasks/services.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: services | Ensuring All Services Are Started And Enabled On Boot 3 | service: 4 | name: "{{ item }}" 5 | enabled: true 6 | state: started 7 | become: true 8 | loop: "{{ k8s_services }}" 9 | -------------------------------------------------------------------------------- /tasks/set_facts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: set_facts | Setting K8s Packages 3 | set_fact: 4 | k8s_packages: 5 | - kubelet={{ k8s_version }}-00 6 | - kubeadm={{ k8s_version }}-00 7 | - kubectl={{ k8s_version }}-00 8 | - kubernetes-cni={{ k8s_cni_version }}-00 9 | when: ansible_os_family == "Debian" 10 | 11 | - name: set_facts | Setting K8s Packages 12 | set_fact: 13 | k8s_packages: 14 | - kubelet-{{ k8s_version }} 15 | - kubeadm-{{ k8s_version }} 16 | - kubectl-{{ k8s_version }} 17 | - kubernetes-cni-{{ k8s_cni_version }} 18 | when: ansible_os_family == "RedHat" 19 | 20 | - name: set_facts | Setting Network CNI Facts 21 | set_fact: 22 | k8s_pod_network_config: "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')" 23 | when: k8s_pod_network_cni|lower == "weave" 24 | 25 | - name: set_facts | Setting Network CNI Facts 26 | set_fact: 27 | k8s_pod_network_cidr: 192.168.0.0/16 28 | k8s_pod_network_config: https://docs.projectcalico.org/manifests/calico.yaml 29 | when: k8s_pod_network_cni|lower == "calico" 30 | 31 | - name: set_facts | Setting K8s Master 32 | set_fact: 33 | k8s_master: "{{ groups[k8s_cluster_group][0] }}" 34 | tags: 35 | - k8s_cluster_nodes 36 | - k8s_get_dashboard 37 | - k8s_pods 38 | 39 | - name: set_facts | Showing K8s Master 40 | debug: 41 | var: k8s_master 42 | 43 | - name: set_facts | Setting K8s Advertise Address 44 | set_fact: 45 | k8s_advertise_address: "{{ hostvars[inventory_hostname]['ansible_' + k8s_advertise_address_int]['ipv4']['address'] }}" 46 | tags: 47 | - k8s_get_dashboard 48 | 49 | - name: set_facts | Showing K8s Advertise Address 50 | debug: 51 | var: k8s_advertise_address 52 | -------------------------------------------------------------------------------- /tasks/swap.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: swap | Disable SWAP in fstab since kubernetes can't work with swap enabled 3 | replace: 4 | path: /etc/fstab 5 | regexp: '^([^#].*?\sswap\s+sw\s+.*)$' 6 | replace: '# \1' 7 | become: true 8 | 9 | - name: swap | Disabling Swap 10 | command: swapoff -a 11 | become: true 12 | -------------------------------------------------------------------------------- /tasks/users.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # We ensure that the folder exists to copy the {{ k8s_admin_config }} to the 3 | # users (defined in k8s_users) home/.kube 4 | - name: users | Ensuring Users home/user/.kube Exists 5 | file: 6 | path: "/home/{{ item['user'] }}/.kube" 7 | owner: "{{ item['user'] }}" 8 | group: "{{ item['user'] }}" 9 | state: directory 10 | become: true 11 | loop: "{{ k8s_users }}" 12 | when: inventory_hostname == k8s_master 13 | 14 | # We copy the {{ k8s_admin_config }} to the 15 | # users (defined in k8s_users) home/.kube/config 16 | # This allows the user(s) to execute kubectl commands 17 | - name: users | Copying {{ k8s_admin_config }} To Users home/user/.kube/config 18 | copy: 19 | src: "{{ k8s_admin_config }}" 20 | dest: "/home/{{ item['user'] }}/.kube/config" 21 | remote_src: true 22 | owner: "{{ item['user'] }}" 23 | group: "{{ item['user'] }}" 24 | become: true 25 | loop: "{{ k8s_users }}" 26 | when: inventory_hostname == k8s_master 27 | -------------------------------------------------------------------------------- /templates/etc/apt/preferences.d/k8s.pref.j2: -------------------------------------------------------------------------------- 1 | Package: kubeadm 2 | Pin: version {{ k8s_version }}* 3 | Pin-Priority: 900 4 | 5 | Package: kubectl 6 | Pin: version {{ k8s_version }}* 7 | Pin-Priority: 900 8 | 9 | Package: kubelet 10 | Pin: version {{ k8s_version }}* 11 | Pin-Priority: 900 12 | 13 | Package: kubernetes-cni 14 | Pin: version {{ k8s_cni_version }}* 15 | Pin-Priority: 900 -------------------------------------------------------------------------------- /templates/k8s_cluster_ip.j2: -------------------------------------------------------------------------------- 1 | {{ (_k8s_cluster_ip['stdout']|from_json)['items'][0]['spec']['clusterIP'] }} 2 | -------------------------------------------------------------------------------- /templates/k8s_dashboard.j2: -------------------------------------------------------------------------------- 1 | {% if _k8s_get_services_all is defined and k8s_reports['display_dashboard_link'] %} 2 | {% for item in (_k8s_get_services_all['stdout']|from_json)['items'] %} 3 | {% if item['metadata']['name'] == 'kubernetes-dashboard' %} 4 | Kubernetes Dashboard Can be reached at: http://{{ hostvars[k8s_master]['k8s_advertise_address'] }}:{{ item['spec']['ports'][0]['targetPort'] }} 5 | {% endif %} 6 | {% endfor %} 7 | {% endif %} 8 | -------------------------------------------------------------------------------- /templates/k8s_pods.j2: -------------------------------------------------------------------------------- 1 | {% if _k8s_pods_all_namespaces is defined and k8s_reports['all_pod_namespaces'] %} 2 | {% set containers = {} %} 3 | {% set _containers = [] %} 4 | {% for item in (_k8s_pods_all_namespaces['stdout']|from_json)['items'] %} 5 | {% set _container = {} %} 6 | {% set _node = item['spec']['nodeName'] %} 7 | {% set _hostip = item['status']['hostIP'] %} 8 | {% set _podip = item['status']['podIP'] %} 9 | {% set _phase = item['status']['phase'] %} 10 | {% set _container_name = item['spec']['containers'][0]['name'] %} 11 | {% set _container_image = item['spec']['containers'][0]['image'] %} 12 | {% set _container_resources = item['spec']['containers'][0]['resources'] %} 13 | {% set _cont = _container.update({"phase": _phase, "podIP": _podip, "hostIP": _hostip, "name": _container_name, "image": _container_image, "resources": _container_resources, "nodeName": _node}) %} 14 | {% set _cont = _containers.append(_container) %} 15 | {% endfor %} 16 | {% set _cont = containers.update({"containers": _containers}) %} 17 | {{ containers|to_nice_json }} 18 | {% endif %} 19 | -------------------------------------------------------------------------------- /templates/k8s_services.j2: -------------------------------------------------------------------------------- 1 | {% if _k8s_get_services_all is defined and k8s_reports['all_service_namespaces'] %} 2 | {% set services = {} %} 3 | {% set _services = [] %} 4 | {% for item in (_k8s_get_services_all['stdout']|from_json)['items'] %} 5 | {% set _service = {} %} 6 | {% set _service_clusterip = item['spec']['clusterIP'] %} 7 | {% set _service_labels = item['metadata']['labels'] %} 8 | {% set _service_name = item['metadata']['name'] %} 9 | {% set _service_namespace = item['metadata']['namespace'] %} 10 | {% set _service_ports = item['spec']['ports'] %} 11 | {% set _svc = _service.update({"clusterIP": _service_clusterip, "labels": _service_labels, "name": _service_name, "namespace": _service_namespace, "ports": _service_ports}) %} 12 | {% set _svc = _services.append(_service) %} 13 | {% endfor %} 14 | {% set _svc = services.update({"services": _services}) %} 15 | {{ services|to_nice_json }} 16 | {% endif %} 17 | -------------------------------------------------------------------------------- /templates/k8s_token.j2: -------------------------------------------------------------------------------- 1 | {{ k8s_token }} 2 | -------------------------------------------------------------------------------- /templates/tiller-rbac-config.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | name: tiller 5 | namespace: kube-system 6 | --- 7 | apiVersion: rbac.authorization.k8s.io/v1beta1 8 | kind: ClusterRoleBinding 9 | metadata: 10 | name: tiller 11 | roleRef: 12 | apiGroup: rbac.authorization.k8s.io 13 | kind: ClusterRole 14 | name: cluster-admin 15 | subjects: 16 | - kind: ServiceAccount 17 | name: tiller 18 | namespace: kube-system 19 | -------------------------------------------------------------------------------- /tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | 3 | -------------------------------------------------------------------------------- /tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - ansible-k8s -------------------------------------------------------------------------------- /vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for ansible-k8s --------------------------------------------------------------------------------