├── .gitignore ├── LICENSE ├── README.md ├── cli ├── README.md ├── images │ └── dashboard.png └── manifests │ └── mypod.yaml ├── configuration ├── README.md └── manifests │ ├── cm-cmd-example.yaml │ ├── cm-env-example.yaml │ ├── cm-manifest.yaml │ ├── cm-vol-example.yaml │ ├── cm │ ├── city │ └── state │ ├── secret-cmd-example.yaml │ ├── secret-env-example.yaml │ ├── secret-manifest.yaml │ ├── secret-vol-example.yaml │ └── secret │ ├── password │ └── username ├── core ├── README.md └── manifests │ ├── metalLB.yaml │ ├── pod-example.yaml │ ├── pod-multi-container-example.yaml │ ├── service-clusterip.yaml │ ├── service-loadbalancer.yaml │ └── service-nodeport.yaml ├── examples ├── README.md ├── jupyterhub │ ├── README.md │ └── manifests │ │ ├── cm-hub-config.yaml │ │ ├── cm-ingress.yaml │ │ ├── cm-nginx.yaml │ │ ├── deploy-hub.yaml │ │ ├── deploy-proxy.yaml │ │ ├── pvc-hub.yaml │ │ ├── rbac.yaml │ │ ├── secret-hub.yaml │ │ ├── svc-hub.yaml │ │ ├── svc-proxy-api.yaml │ │ ├── svc-proxy-http.yaml │ │ └── svc-proxy-public.yaml └── wordpress │ ├── README.md │ └── manifests │ ├── dep-wordpress.yaml │ ├── pvc-wordpress.yaml │ ├── secret-mysql.yaml │ ├── sts-mysql.yaml │ ├── svc-mysql.yaml │ └── svc-wordpress.yaml ├── storage ├── README.md └── manifests │ ├── html-vol.yaml │ ├── pv-sc-example.yaml │ ├── pv-selector-example.yaml │ ├── pvc-sc-example.yaml │ ├── pvc-selector-example.yaml │ ├── pvc-standard.yaml │ ├── reader.yaml │ ├── volume-example.yaml │ └── writer.yaml └── workloads ├── README.md └── manifests ├── cronjob-example.yaml ├── deploy-example.yaml ├── ds-example.yaml ├── job-example.yaml ├── pod-rs-example.yaml ├── rs-example.yaml ├── service-sts-example.yaml └── sts-example.yaml /.gitignore: -------------------------------------------------------------------------------- 1 | # OSX 2 | ._* 3 | .DS_Store 4 | 5 | # Vim-related files 6 | [._]*.s[a-w][a-z] 7 | [._]s[a-w][a-z] 8 | *.un~ 9 | Session.vim 10 | .netrwhist 11 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Attribution 4.0 International 2 | 3 | ======================================================================= 4 | 5 | Creative Commons Corporation ("Creative Commons") is not a law firm and 6 | does not provide legal services or legal advice. Distribution of 7 | Creative Commons public licenses does not create a lawyer-client or 8 | other relationship. Creative Commons makes its licenses and related 9 | information available on an "as-is" basis. Creative Commons gives no 10 | warranties regarding its licenses, any material licensed under their 11 | terms and conditions, or any related information. Creative Commons 12 | disclaims all liability for damages resulting from their use to the 13 | fullest extent possible. 14 | 15 | Using Creative Commons Public Licenses 16 | 17 | Creative Commons public licenses provide a standard set of terms and 18 | conditions that creators and other rights holders may use to share 19 | original works of authorship and other material subject to copyright 20 | and certain other rights specified in the public license below. The 21 | following considerations are for informational purposes only, are not 22 | exhaustive, and do not form part of our licenses. 23 | 24 | Considerations for licensors: Our public licenses are 25 | intended for use by those authorized to give the public 26 | permission to use material in ways otherwise restricted by 27 | copyright and certain other rights. Our licenses are 28 | irrevocable. Licensors should read and understand the terms 29 | and conditions of the license they choose before applying it. 30 | Licensors should also secure all rights necessary before 31 | applying our licenses so that the public can reuse the 32 | material as expected. Licensors should clearly mark any 33 | material not subject to the license. This includes other CC- 34 | licensed material, or material used under an exception or 35 | limitation to copyright. More considerations for licensors: 36 | wiki.creativecommons.org/Considerations_for_licensors 37 | 38 | Considerations for the public: By using one of our public 39 | licenses, a licensor grants the public permission to use the 40 | licensed material under specified terms and conditions. If 41 | the licensor's permission is not necessary for any reason--for 42 | example, because of any applicable exception or limitation to 43 | copyright--then that use is not regulated by the license. Our 44 | licenses grant only permissions under copyright and certain 45 | other rights that a licensor has authority to grant. Use of 46 | the licensed material may still be restricted for other 47 | reasons, including because others have copyright or other 48 | rights in the material. A licensor may make special requests, 49 | such as asking that all changes be marked or described. 50 | Although not required by our licenses, you are encouraged to 51 | respect those requests where reasonable. More considerations 52 | for the public: 53 | wiki.creativecommons.org/Considerations_for_licensees 54 | 55 | ======================================================================= 56 | 57 | Creative Commons Attribution 4.0 International Public License 58 | 59 | By exercising the Licensed Rights (defined below), You accept and agree 60 | to be bound by the terms and conditions of this Creative Commons 61 | Attribution 4.0 International Public License ("Public License"). To the 62 | extent this Public License may be interpreted as a contract, You are 63 | granted the Licensed Rights in consideration of Your acceptance of 64 | these terms and conditions, and the Licensor grants You such rights in 65 | consideration of benefits the Licensor receives from making the 66 | Licensed Material available under these terms and conditions. 67 | 68 | 69 | Section 1 -- Definitions. 70 | 71 | a. Adapted Material means material subject to Copyright and Similar 72 | Rights that is derived from or based upon the Licensed Material 73 | and in which the Licensed Material is translated, altered, 74 | arranged, transformed, or otherwise modified in a manner requiring 75 | permission under the Copyright and Similar Rights held by the 76 | Licensor. For purposes of this Public License, where the Licensed 77 | Material is a musical work, performance, or sound recording, 78 | Adapted Material is always produced where the Licensed Material is 79 | synched in timed relation with a moving image. 80 | 81 | b. Adapter's License means the license You apply to Your Copyright 82 | and Similar Rights in Your contributions to Adapted Material in 83 | accordance with the terms and conditions of this Public License. 84 | 85 | c. Copyright and Similar Rights means copyright and/or similar rights 86 | closely related to copyright including, without limitation, 87 | performance, broadcast, sound recording, and Sui Generis Database 88 | Rights, without regard to how the rights are labeled or 89 | categorized. For purposes of this Public License, the rights 90 | specified in Section 2(b)(1)-(2) are not Copyright and Similar 91 | Rights. 92 | 93 | d. Effective Technological Measures means those measures that, in the 94 | absence of proper authority, may not be circumvented under laws 95 | fulfilling obligations under Article 11 of the WIPO Copyright 96 | Treaty adopted on December 20, 1996, and/or similar international 97 | agreements. 98 | 99 | e. Exceptions and Limitations means fair use, fair dealing, and/or 100 | any other exception or limitation to Copyright and Similar Rights 101 | that applies to Your use of the Licensed Material. 102 | 103 | f. Licensed Material means the artistic or literary work, database, 104 | or other material to which the Licensor applied this Public 105 | License. 106 | 107 | g. Licensed Rights means the rights granted to You subject to the 108 | terms and conditions of this Public License, which are limited to 109 | all Copyright and Similar Rights that apply to Your use of the 110 | Licensed Material and that the Licensor has authority to license. 111 | 112 | h. Licensor means the individual(s) or entity(ies) granting rights 113 | under this Public License. 114 | 115 | i. Share means to provide material to the public by any means or 116 | process that requires permission under the Licensed Rights, such 117 | as reproduction, public display, public performance, distribution, 118 | dissemination, communication, or importation, and to make material 119 | available to the public including in ways that members of the 120 | public may access the material from a place and at a time 121 | individually chosen by them. 122 | 123 | j. Sui Generis Database Rights means rights other than copyright 124 | resulting from Directive 96/9/EC of the European Parliament and of 125 | the Council of 11 March 1996 on the legal protection of databases, 126 | as amended and/or succeeded, as well as other essentially 127 | equivalent rights anywhere in the world. 128 | 129 | k. You means the individual or entity exercising the Licensed Rights 130 | under this Public License. Your has a corresponding meaning. 131 | 132 | 133 | Section 2 -- Scope. 134 | 135 | a. License grant. 136 | 137 | 1. Subject to the terms and conditions of this Public License, 138 | the Licensor hereby grants You a worldwide, royalty-free, 139 | non-sublicensable, non-exclusive, irrevocable license to 140 | exercise the Licensed Rights in the Licensed Material to: 141 | 142 | a. reproduce and Share the Licensed Material, in whole or 143 | in part; and 144 | 145 | b. produce, reproduce, and Share Adapted Material. 146 | 147 | 2. Exceptions and Limitations. For the avoidance of doubt, where 148 | Exceptions and Limitations apply to Your use, this Public 149 | License does not apply, and You do not need to comply with 150 | its terms and conditions. 151 | 152 | 3. Term. The term of this Public License is specified in Section 153 | 6(a). 154 | 155 | 4. Media and formats; technical modifications allowed. The 156 | Licensor authorizes You to exercise the Licensed Rights in 157 | all media and formats whether now known or hereafter created, 158 | and to make technical modifications necessary to do so. The 159 | Licensor waives and/or agrees not to assert any right or 160 | authority to forbid You from making technical modifications 161 | necessary to exercise the Licensed Rights, including 162 | technical modifications necessary to circumvent Effective 163 | Technological Measures. For purposes of this Public License, 164 | simply making modifications authorized by this Section 2(a) 165 | (4) never produces Adapted Material. 166 | 167 | 5. Downstream recipients. 168 | 169 | a. Offer from the Licensor -- Licensed Material. Every 170 | recipient of the Licensed Material automatically 171 | receives an offer from the Licensor to exercise the 172 | Licensed Rights under the terms and conditions of this 173 | Public License. 174 | 175 | b. No downstream restrictions. You may not offer or impose 176 | any additional or different terms or conditions on, or 177 | apply any Effective Technological Measures to, the 178 | Licensed Material if doing so restricts exercise of the 179 | Licensed Rights by any recipient of the Licensed 180 | Material. 181 | 182 | 6. No endorsement. Nothing in this Public License constitutes or 183 | may be construed as permission to assert or imply that You 184 | are, or that Your use of the Licensed Material is, connected 185 | with, or sponsored, endorsed, or granted official status by, 186 | the Licensor or others designated to receive attribution as 187 | provided in Section 3(a)(1)(A)(i). 188 | 189 | b. Other rights. 190 | 191 | 1. Moral rights, such as the right of integrity, are not 192 | licensed under this Public License, nor are publicity, 193 | privacy, and/or other similar personality rights; however, to 194 | the extent possible, the Licensor waives and/or agrees not to 195 | assert any such rights held by the Licensor to the limited 196 | extent necessary to allow You to exercise the Licensed 197 | Rights, but not otherwise. 198 | 199 | 2. Patent and trademark rights are not licensed under this 200 | Public License. 201 | 202 | 3. To the extent possible, the Licensor waives any right to 203 | collect royalties from You for the exercise of the Licensed 204 | Rights, whether directly or through a collecting society 205 | under any voluntary or waivable statutory or compulsory 206 | licensing scheme. In all other cases the Licensor expressly 207 | reserves any right to collect such royalties. 208 | 209 | 210 | Section 3 -- License Conditions. 211 | 212 | Your exercise of the Licensed Rights is expressly made subject to the 213 | following conditions. 214 | 215 | a. Attribution. 216 | 217 | 1. If You Share the Licensed Material (including in modified 218 | form), You must: 219 | 220 | a. retain the following if it is supplied by the Licensor 221 | with the Licensed Material: 222 | 223 | i. identification of the creator(s) of the Licensed 224 | Material and any others designated to receive 225 | attribution, in any reasonable manner requested by 226 | the Licensor (including by pseudonym if 227 | designated); 228 | 229 | ii. a copyright notice; 230 | 231 | iii. a notice that refers to this Public License; 232 | 233 | iv. a notice that refers to the disclaimer of 234 | warranties; 235 | 236 | v. a URI or hyperlink to the Licensed Material to the 237 | extent reasonably practicable; 238 | 239 | b. indicate if You modified the Licensed Material and 240 | retain an indication of any previous modifications; and 241 | 242 | c. indicate the Licensed Material is licensed under this 243 | Public License, and include the text of, or the URI or 244 | hyperlink to, this Public License. 245 | 246 | 2. You may satisfy the conditions in Section 3(a)(1) in any 247 | reasonable manner based on the medium, means, and context in 248 | which You Share the Licensed Material. For example, it may be 249 | reasonable to satisfy the conditions by providing a URI or 250 | hyperlink to a resource that includes the required 251 | information. 252 | 253 | 3. If requested by the Licensor, You must remove any of the 254 | information required by Section 3(a)(1)(A) to the extent 255 | reasonably practicable. 256 | 257 | 4. If You Share Adapted Material You produce, the Adapter's 258 | License You apply must not prevent recipients of the Adapted 259 | Material from complying with this Public License. 260 | 261 | 262 | Section 4 -- Sui Generis Database Rights. 263 | 264 | Where the Licensed Rights include Sui Generis Database Rights that 265 | apply to Your use of the Licensed Material: 266 | 267 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right 268 | to extract, reuse, reproduce, and Share all or a substantial 269 | portion of the contents of the database; 270 | 271 | b. if You include all or a substantial portion of the database 272 | contents in a database in which You have Sui Generis Database 273 | Rights, then the database in which You have Sui Generis Database 274 | Rights (but not its individual contents) is Adapted Material; and 275 | 276 | c. You must comply with the conditions in Section 3(a) if You Share 277 | all or a substantial portion of the contents of the database. 278 | 279 | For the avoidance of doubt, this Section 4 supplements and does not 280 | replace Your obligations under this Public License where the Licensed 281 | Rights include other Copyright and Similar Rights. 282 | 283 | 284 | Section 5 -- Disclaimer of Warranties and Limitation of Liability. 285 | 286 | a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE 287 | EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS 288 | AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF 289 | ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, 290 | IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, 291 | WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR 292 | PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, 293 | ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT 294 | KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT 295 | ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. 296 | 297 | b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE 298 | TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, 299 | NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, 300 | INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, 301 | COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR 302 | USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN 303 | ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR 304 | DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR 305 | IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. 306 | 307 | c. The disclaimer of warranties and limitation of liability provided 308 | above shall be interpreted in a manner that, to the extent 309 | possible, most closely approximates an absolute disclaimer and 310 | waiver of all liability. 311 | 312 | 313 | Section 6 -- Term and Termination. 314 | 315 | a. This Public License applies for the term of the Copyright and 316 | Similar Rights licensed here. However, if You fail to comply with 317 | this Public License, then Your rights under this Public License 318 | terminate automatically. 319 | 320 | b. Where Your right to use the Licensed Material has terminated under 321 | Section 6(a), it reinstates: 322 | 323 | 1. automatically as of the date the violation is cured, provided 324 | it is cured within 30 days of Your discovery of the 325 | violation; or 326 | 327 | 2. upon express reinstatement by the Licensor. 328 | 329 | For the avoidance of doubt, this Section 6(b) does not affect any 330 | right the Licensor may have to seek remedies for Your violations 331 | of this Public License. 332 | 333 | c. For the avoidance of doubt, the Licensor may also offer the 334 | Licensed Material under separate terms or conditions or stop 335 | distributing the Licensed Material at any time; however, doing so 336 | will not terminate this Public License. 337 | 338 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public 339 | License. 340 | 341 | 342 | Section 7 -- Other Terms and Conditions. 343 | 344 | a. The Licensor shall not be bound by any additional or different 345 | terms or conditions communicated by You unless expressly agreed. 346 | 347 | b. Any arrangements, understandings, or agreements regarding the 348 | Licensed Material not stated herein are separate from and 349 | independent of the terms and conditions of this Public License. 350 | 351 | 352 | Section 8 -- Interpretation. 353 | 354 | a. For the avoidance of doubt, this Public License does not, and 355 | shall not be interpreted to, reduce, limit, restrict, or impose 356 | conditions on any use of the Licensed Material that could lawfully 357 | be made without permission under this Public License. 358 | 359 | b. To the extent possible, if any provision of this Public License is 360 | deemed unenforceable, it shall be automatically reformed to the 361 | minimum extent necessary to make it enforceable. If the provision 362 | cannot be reformed, it shall be severed from this Public License 363 | without affecting the enforceability of the remaining terms and 364 | conditions. 365 | 366 | c. No term or condition of this Public License will be waived and no 367 | failure to comply consented to unless expressly agreed to by the 368 | Licensor. 369 | 370 | d. Nothing in this Public License constitutes or may be interpreted 371 | as a limitation upon, or waiver of, any privileges and immunities 372 | that apply to the Licensor or You, including from the legal 373 | processes of any jurisdiction or authority. 374 | 375 | 376 | ======================================================================= 377 | 378 | Creative Commons is not a party to its public 379 | licenses. Notwithstanding, Creative Commons may elect to apply one of 380 | its public licenses to material it publishes and in those instances 381 | will be considered the “Licensor.” The text of the Creative Commons 382 | public licenses is dedicated to the public domain under the CC0 Public 383 | Domain Dedication. Except for the limited purpose of indicating that 384 | material is shared under a Creative Commons public license or as 385 | otherwise permitted by the Creative Commons policies published at 386 | creativecommons.org/policies, Creative Commons does not authorize the 387 | use of the trademark "Creative Commons" or any other trademark or logo 388 | of Creative Commons without its prior written consent including, 389 | without limitation, in connection with any unauthorized modifications 390 | to any of its public licenses or any other arrangements, 391 | understandings, or agreements concerning use of licensed material. For 392 | the avoidance of doubt, this paragraph does not form part of the 393 | public licenses. 394 | 395 | Creative Commons may be contacted at creativecommons.org. 396 | 397 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Tutorials 2 | 3 | ## Before you begin 4 | 5 | These tutorials accompany the presentation [Introduction to Kubernetes][intro-slides] and make use of 6 | [kind][kind]. A tool that allows users to quickly spin up and run a single instance of Kubernetes locally using 7 | Docker. To install it and the other tutorial dependencies, see the 8 | [Installation Guides](#installation-guides) section. 9 | 10 | Each section assumes an instance of kind is up and running. To start kind for the first time, use the command: 11 | ``` 12 | kind create cluster 13 | ``` 14 | 15 | Tutorials have been validated against kind v0.14.0 running Kubernetes v1.24.x and kubectl 1.24.0 16 | 17 | --- 18 | 19 | ## Tutorial Index 20 | * [cli](/cli/README.md) - Covers the basics of using `kubectl` to interact with a Kubernetes cluster. 21 | * [core](/core/README.md) - Tutorial of the core concepts, or building blocks of Kubernetes. 22 | * [workloads](/workloads/README.md) - Walkthrough of the different types of application workloads. 23 | * [storage](/storage/README.md) - Explores the relationship between Persistent Volumes, Persistent Volume Claims, 24 | and Volumes themselves. 25 | * [configuration](/configuration/README.md) - Tutorials going over how to use the two Configuration objects 26 | ConfigMaps and Secrets. 27 | * [Examples](/examples/README.md) - Examples of full blown applications to explore after the tutorials have been 28 | completed. 29 | 30 | --- 31 | 32 | ## Installation Guides 33 | 34 | The Installation guides are centered around using Docker. Please ensure you have Docker installed by following 35 | their [installation guide](https://docs.docker.com/engine/install/#desktop) for your platform. 36 | 37 | * [OSX](#osx-installation-guide) 38 | * [Windows](#windows-installation-guide) 39 | * [Linux](#linux) 40 | * [Verifying Install](#verifying-install) 41 | * [Troubleshooting Install Problems](#troubleshooting-install-problems) 42 | 43 | --- 44 | 45 | ## OSX Installation Guide 46 | 47 | Installation on OSX is done with [Homebrew][brew], an OSX package manager. If you have not installed it previously, 48 | please see their [installation guide][brew] before continuing. 49 | 50 | Install `git`, `kubectl`, and `kind` 51 | ``` 52 | brew install git kubernetes-cli kind 53 | ``` 54 | 55 | Once done, [verify your Install](#verifying-install). 56 | 57 | **Optional:** You can improve the general user experience of working with `kubectl` by installing 58 | [command-completion][osx-completion] 59 | 60 | --- 61 | 62 | ## Windows Installation Guide 63 | 64 | Installation on Windows is done with [chocolatey][choco], a Windows Package Manager. If you have not 65 | installed it previously, please see their [installation guide][choco-install] before continuing. 66 | 67 | ``` 68 | dism.exe /Online /Disable-Feature:Microsoft-Hyper-V 69 | ``` 70 | 71 | Install `git`, `kubectl`, and `kind`. 72 | ``` 73 | choco install git kubernetes-cli kind 74 | ``` 75 | 76 | Once done, [verify your Install](#verifying-install). 77 | 78 | --- 79 | 80 | ## Linux 81 | 82 | Linux installation is different for each distro. General install information is linked below: 83 | 84 | * [git][linux-git] 85 | * [kubectl][linux-kubectl] 86 | * [kind][linux-kind] 87 | 88 | Once done, [verify your Install](#verifying-install). 89 | 90 | **Optional:** You can improve the general user experience of working with `kubectl` by installing 91 | [command-completion][linux-completion] 92 | 93 | --- 94 | 95 | ## Verifying Install 96 | 97 | With the software installed you can verify it is working correctly by executing: 98 | ``` 99 | kind create cluster 100 | ``` 101 | 102 | This will take a little bit of time the first time it is run as it will download its needed dependencies and starts the 103 | container. When it completes, you can verify it is working correctly by executing: 104 | ``` 105 | kubectl version 106 | ``` 107 | 108 | You should get output similar to the following: 109 | ``` 110 | Client Version: version.Info{Major:"1", Minor:"19", GitVersion:"v1.19.3", GitCommit:"1e11e4a2108024935ecfcb2912226cedeafd99df", GitTreeState:"clean", BuildDate:"2020-10-14T12:50:19Z", GoVersion:"go1.15.2", Compiler:"gc", Platform:"linux/amd64"} 111 | Server Version: version.Info{Major:"1", Minor:"24", GitVersion:"v1.24.0", GitCommit:"4ce5a8954017644c5420bae81d72b09b735c21f0", GitTreeState:"clean", BuildDate:"2022-05-19T15:39:43Z", GoVersion:"go1.18.1", Compiler:"gc", Platform:"linux/amd64"} 112 | ``` 113 | 114 | After that you may stop the container with: 115 | ``` 116 | kind delete cluster 117 | ``` 118 | 119 | --- 120 | 121 | ## Troubleshooting Install Problems 122 | 123 | (This tutorial has been recently updated. After running tutorials it will be updated with what we've learned!) 124 | 125 | 126 | 127 | [intro-slides]: https://docs.google.com/presentation/d/1zrfVlE5r61ZNQrmXKx5gJmBcXnoa_WerHEnTxu5SMco/edit?usp=sharing 128 | [kind]: https://kind.sigs.k8s.io 129 | [brew]: https://brew.sh/ 130 | [osx-completion]: https://kubernetes.io/docs/tasks/tools/install-kubectl/#on-macos-using-bash 131 | [choco]: https://chocolatey.org/ 132 | [choco-install]: https://chocolatey.org/install 133 | [linux-git]: https://git-scm.com/book/en/v2/Getting-Started-Installing-Git 134 | [linux-kubectl]: https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl 135 | [linux-kind]: https://github.com/kubernetes-sigs/kind#installation-and-usage 136 | [linux-completion]: https://kubernetes.io/docs/tasks/tools/install-kubectl/#enabling-shell-autocompletion -------------------------------------------------------------------------------- /cli/README.md: -------------------------------------------------------------------------------- 1 | 2 | # Using the CLI 3 | The Kubernetes client, `kubectl` is the primary method of interacting with a Kubernetes cluster. Getting to know it 4 | is essential to using Kubernetes itself. 5 | 6 | 7 | ## Index 8 | * [Syntax Structure](#syntax-structure) 9 | * [Context and kubeconfig](#context-and-kubeconfig) 10 | * [kubectl config](#kubectl-config) 11 | * [Exercise: Using Contexts](#exercise-using-contexts) 12 | * [kubectl Basics](#kubectl-basics) 13 | * [kubectl get](#kubectl-get) 14 | * [kubectl create](#kubectl-create) 15 | * [kubectl apply](#kubectl-apply) 16 | * [kubectl edit](#kubectl-edit) 17 | * [kubectl delete](#kubectl-delete) 18 | * [kubectl describe](#kubectl-describe) 19 | * [kubectl logs](#kubectl-logs) 20 | * [Exercise: The Basics](#exercise-the-basics) 21 | * [Accessing the Cluster](#accessing-the-cluster) 22 | * [kubectl exec](#kubectl-exec) 23 | * [Exercise: Executing Commands within a Remote Pod](#exercise-executing-commands-within-a-remote-pod) 24 | * [kubectl proxy](#kubectl-proxy) 25 | * [Exercise: Using the Proxy](#exercise-using-the-proxy) 26 | * [Cleaning up](#cleaning-up) 27 | * [Helpful Resources](#helpful-resources) 28 | 29 | --- 30 | 31 | # Syntax Structure 32 | 33 | `kubectl` uses a common syntax for all operations in the form of: 34 | 35 | ``` 36 | kubectl 37 | ``` 38 | 39 | * **command** - The command or operation to perform. e.g. `apply`, `create`, `delete`, and `get`. 40 | * **type** - The resource type or object. 41 | * **name** - The name of the resource or object. 42 | * **flags** - Optional flags to pass to the command. 43 | 44 | **Examples** 45 | ``` 46 | $ kubectl create -f mypod.yaml 47 | $ kubectl get pods 48 | $ kubectl get pod mypod 49 | $ kubectl delete pod mypod 50 | ``` 51 | 52 | --- 53 | 54 | [Back to Index](#index) 55 | 56 | --- 57 | --- 58 | 59 | 60 | # Context and kubeconfig 61 | `kubectl` allows a user to interact with and manage multiple Kubernetes clusters. To do this, it requires what is known 62 | as a context. A context consists of a combination of `cluster`, `namespace` and `user`. 63 | * **cluster** - A friendly name, server address, and certificate for the Kubernetes cluster. 64 | * **namespace (optional)** - The logical cluster or environment to use. If none is provided, it will use the default 65 | `default` namespace. 66 | * **user** - The credentials used to connect to the cluster. This can be a combination of client certificate and key, 67 | username/password, or token. 68 | 69 | These contexts are stored in a local yaml based config file referred to as the `kubeconfig`. For \*nix based 70 | systems, the `kubeconfig` is stored in `$HOME/.kube/config` for Windows, it can be found in 71 | `%USERPROFILE%/.kube/config` 72 | 73 | This config is viewable without having to view the file directly. 74 | 75 | **Command** 76 | ``` 77 | $ kubectl config view 78 | ``` 79 | 80 | **Example** 81 | ```yaml 82 | ❯ kubectl config view 83 | apiVersion: v1 84 | clusters: 85 | - cluster: 86 | certificate-authority-data: DATA+OMITTED 87 | server: https://127.0.0.1:46347 88 | name: kind-kind 89 | contexts: 90 | - context: 91 | cluster: kind-kind 92 | user: kind-kind 93 | name: kind-kind 94 | current-context: kind-kind 95 | kind: Config 96 | preferences: {} 97 | users: 98 | - name: kind-kind 99 | user: 100 | client-certificate-data: REDACTED 101 | client-key-data: REDACTED 102 | 103 | ``` 104 | 105 | --- 106 | 107 | ### `kubectl config` 108 | 109 | Managing all aspects of contexts is done via the `kubectl config` command. Some examples include: 110 | * See the active context with `kubectl config current-context`. 111 | * Get a list of available contexts with `kubectl config get-contexts`. 112 | * Switch to using another context with the `kubectl config use-context ` command. 113 | * Add a new context with `kubectl config set-context --cluster= --user= --namespace=`. 114 | 115 | There can be quite a few specifics involved when adding a context, for the available options, please see the 116 | [Configuring Multiple Clusters](https://kubernetes.io/docs/tasks/access-application-cluster/configure-access-multiple-clusters/) 117 | Kubernetes documentation. 118 | 119 | --- 120 | 121 | ### Exercise: Using Contexts 122 | **Objective:** Create a new context called `kind-dev` and switch to it. 123 | 124 | --- 125 | 126 | 1. View the current contexts. 127 | ``` 128 | $ kubectl config get-contexts 129 | ``` 130 | 131 | 2. Create a new context called `kind-dev` within the `kind-kind` cluster with the `dev` namespace, as the 132 | `kind-kind` user. 133 | ``` 134 | $ kubectl config set-context kind-dev --cluster=kind-kind --user=kind-kind --namespace=dev 135 | ``` 136 | 137 | 3. View the newly added context. 138 | ``` 139 | kubectl config get-contexts 140 | ``` 141 | 142 | 4. Switch to the `kind-dev` context using `use-context`. 143 | ``` 144 | $ kubectl config use-context kind-dev 145 | ``` 146 | 147 | 5. View the current active context. 148 | ``` 149 | $ kubectl config current-context 150 | ``` 151 | 152 | --- 153 | 154 | **Summary:** Understanding and being able to switch between contexts is a base fundamental skill required by every 155 | Kubernetes user. As more clusters and namespaces are added, this can become unwieldy. Installing a helper 156 | application such as [kubectx](https://github.com/ahmetb/kubectx) can be quite helpful. Kubectx allows a user to quickly 157 | switch between contexts and namespaces without having to use the full `kubectl config use-context` command. 158 | 159 | --- 160 | 161 | [Back to Index](#index) 162 | 163 | --- 164 | --- 165 | 166 | ## Kubectl Basics 167 | There are several `kubectl` commands that are frequently used for any sort of day-to-day operations. `get`, `create`, 168 | `apply`, `delete`, `describe`, and `logs`. Other commands can be listed simply with `kubectl --help`, or 169 | `kubectl --help`. 170 | 171 | --- 172 | 173 | ### `kubectl get` 174 | `kubectl get` fetches and lists objects of a certain type or a specific object itself. It also supports outputting the 175 | information in several different useful formats including: json, yaml, wide (additional columns), or name 176 | (names only) via the `-o` or `--output` flag. 177 | 178 | **Command** 179 | ``` 180 | kubectl get 181 | kubectl get 182 | kubectl get -o 183 | ``` 184 | 185 | **Examples** 186 | ``` 187 | $ kubectl get namespaces 188 | NAME STATUS AGE 189 | default Active 4h 190 | kube-public Active 4h 191 | kube-system Active 4h 192 | $ 193 | $kubectl get pod mypod -o wide 194 | NAME READY STATUS RESTARTS AGE IP NODE 195 | mypod 1/1 Running 0 5m 172.17.0.6 kind-control-plane 196 | ``` 197 | 198 | --- 199 | 200 | ### `kubectl create` 201 | `kubectl create` creates an object from the commandline (`stdin`) or a supplied json/yaml manifest. The manifests can be 202 | specified with the `-f` or `--filename` flag that can point to either a file, or a directory containing multiple 203 | manifests. 204 | 205 | **Command** 206 | ``` 207 | kubectl create 208 | kubectl create -f 209 | ``` 210 | 211 | **Examples** 212 | ``` 213 | $ kubectl create namespace dev 214 | namespace "dev" created 215 | $ 216 | $ kubectl create -f manifests/mypod.yaml 217 | pod "mypod" created 218 | ``` 219 | 220 | --- 221 | 222 | ### `kubectl apply` 223 | `kubectl apply` is similar to `kubectl create`. It will essentially update the resource if it is already created, or 224 | simply create it if does not yet exist. When it updates the config, it will save the previous version of it in an 225 | `annotation` on the created object itself. **WARNING:** If the object was not created initially with 226 | `kubectl apply` it's updating behavior will act as a two-way diff. For more information on this, please see the 227 | [kubectl apply](https://kubernetes.io/docs/concepts/cluster-administration/manage-deployment/#kubectl-apply) 228 | documentation. 229 | 230 | Just like `kubectl create` it takes a json or yaml manifest with the `-f` flag or accepts input from `stdin`. 231 | 232 | **Command** 233 | ``` 234 | kubectl apply -f 235 | ``` 236 | 237 | **Examples** 238 | ``` 239 | $ kubectl apply -f manifests/mypod.yaml 240 | Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply 241 | pod "mypod" configured 242 | ``` 243 | 244 | --- 245 | 246 | ### `kubectl edit` 247 | `kubectl edit` modifies a resource in place without having to apply an updated manifest. It fetches a copy of the 248 | desired object and opens it locally with the configured text editor, set by the `KUBE_EDITOR` or `EDITOR` Environment 249 | Variables. This command is useful for troubleshooting, but should be avoided in production scenarios as the changes 250 | will essentially be untracked. 251 | 252 | **Command** 253 | ``` 254 | $ kubectl edit 255 | ``` 256 | 257 | **Examples** 258 | ``` 259 | kubectl edit pod mypod 260 | kubectl edit service myservice 261 | ``` 262 | 263 | --- 264 | 265 | ### `kubectl delete` 266 | `kubectl delete` deletes the object from Kubernetes. 267 | 268 | **Command** 269 | ``` 270 | kubectl delete 271 | ``` 272 | 273 | **Examples** 274 | ``` 275 | $ kubectl delete pod mypod 276 | pod "mypod" deleted 277 | ``` 278 | 279 | --- 280 | 281 | ### `kubectl describe` 282 | `kubectl describe` lists detailed information about the specific Kubernetes object. It is a very helpful 283 | troubleshooting tool. 284 | 285 | **Command** 286 | ``` 287 | kubectl describe 288 | kubectl describe 289 | ``` 290 | 291 | **Examples** 292 | ``` 293 | $ kubectl describe pod mypod 294 | Name: mypod 295 | Namespace: dev 296 | Node: kind-control-plane/192.168.99.100 297 | Start Time: Sat, 10 Mar 2018 13:12:53 -0500 298 | Labels: 299 | Annotations: kubectl.kubernetes.io/last-applied-configuration={"apiVersion":"v1","kind":"Pod","metadata":{"annotations":{},"name":"mypod","namespace":"dev"},"spec":{"containers":[{"image":... 300 | Status: Running 301 | IP: 172.17.0.6 302 | Containers: 303 | nginx: 304 | Container ID: docker://5a0c100de6599300b1565e73e64e8917f9a4f4b06325dc4890aad980d582cf04 305 | Image: nginx:stable-alpine 306 | Image ID: docker-pullable://nginx@sha256:db5acc22920799fe387a903437eb89387607e5b3f63cf0f4472ac182d7bad644 307 | Port: 80/TCP 308 | State: Running 309 | Started: Sat, 10 Mar 2018 13:12:53 -0500 310 | Ready: True 311 | Restart Count: 0 312 | Environment: 313 | Mounts: 314 | /var/run/secrets/kubernetes.io/serviceaccount from default-token-s2xd7 (ro) 315 | Conditions: 316 | Type Status 317 | Initialized True 318 | Ready True 319 | PodScheduled True 320 | Volumes: 321 | default-token-s2xd7: 322 | Type: Secret (a volume populated by a Secret) 323 | SecretName: default-token-s2xd7 324 | Optional: false 325 | QoS Class: BestEffort 326 | Node-Selectors: 327 | Tolerations: 328 | Events: 329 | Type Reason Age From Message 330 | ---- ------ ---- ---- ------- 331 | Normal Scheduled 5s default-scheduler Successfully assigned mypod to kind-control-plane 332 | Normal SuccessfulMountVolume 5s kubelet, kind-control-plane MountVolume.SetUp succeeded for volume "default-token-s2xd7" 333 | Normal Pulled 5s kubelet, kind-control-plane Container image "nginx:stable-alpine" already present on machine 334 | Normal Created 5s kubelet, kind-control-plane Created container 335 | Normal Started 5s kubelet, kind-control-plane Started container 336 | ``` 337 | 338 | --- 339 | 340 | ### `kubectl logs` 341 | `kubectl logs` outputs the combined `stdout` and `stderr` logs from a pod. If more than one container exist in a 342 | `pod` the `-c` flag is used and the container name must be specified. 343 | 344 | **Command** 345 | ``` 346 | kubectl logs 347 | kubectl logs -c 348 | ``` 349 | 350 | **Examples** 351 | ``` 352 | $ kubectl logs mypod 353 | 172.17.0.1 - - [10/Mar/2018:18:14:15 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.57.0" "-" 354 | 172.17.0.1 - - [10/Mar/2018:18:14:17 +0000] "GET / HTTP/1.1" 200 612 "-" "curl/7.57.0" "-" 355 | ``` 356 | 357 | --- 358 | 359 | ### Exercise: The Basics 360 | **Objective:** Explore the basics. Create a namespace, a pod, then use the `kubectl` commands to describe and delete 361 | what was created. 362 | 363 | **NOTE:** You should still be using the `kind-dev` context created earlier. 364 | 365 | --- 366 | 367 | 1) Create the `dev` namespace. 368 | ``` 369 | kubectl create namespace dev 370 | ``` 371 | 372 | 2) Apply the manifest `manifests/mypod.yaml`. 373 | ``` 374 | kubectl apply -f manifests/mypod.yaml 375 | ``` 376 | 377 | 3) Get the yaml output of the created pod `mypod`. 378 | ``` 379 | kubectl get pod mypod -o yaml 380 | ``` 381 | 382 | 4) Describe the pod `mypod`. 383 | ``` 384 | kubectl describe pod mypod 385 | ``` 386 | 387 | 5) Clean up the pod by deleting it. 388 | ``` 389 | kubectl delete pod mypod 390 | ``` 391 | 392 | --- 393 | 394 | **Summary:** The `kubectl` _"CRUD"_ commands are used frequently when interacting with a Kubernetes cluster. These 395 | simple tasks become 2nd nature as more experience is gained. 396 | 397 | --- 398 | 399 | [Back to Index](#index) 400 | 401 | --- 402 | --- 403 | 404 | # Accessing the Cluster 405 | 406 | `kubectl` provides several mechanisms for accessing resources within the cluster remotely. For this tutorial, the focus 407 | will be on using `kubectl exec` to get a remote shell within a container, and `kubectl proxy` to gain access to the 408 | services exposed through the API proxy. 409 | 410 | --- 411 | 412 | ### `kubectl exec` 413 | `kubectl exec` executes a command within a Pod and can optionally spawn an interactive terminal within a remote 414 | container. When more than one container is present within a Pod, the `-c` or `--container` flag is required, followed 415 | by the container name. 416 | 417 | If an interactive session is desired, the `-i` (`--stdin`) and `-t` (`--tty`) flags must be supplied. 418 | 419 | **Command** 420 | ``` 421 | kubectl exec -- 422 | kubectl exec -c -- 423 | kubectl exec -i -t -c -- 424 | kubectl exec -it -c -- 425 | ``` 426 | 427 | 428 | **Example** 429 | ``` 430 | $ kubectl exec mypod -c nginx -- printenv 431 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 432 | HOSTNAME=mypod 433 | KUBERNETES_SERVICE_PORT_HTTPS=443 434 | KUBERNETES_PORT=tcp://10.96.0.1:443 435 | KUBERNETES_PORT_443_TCP=tcp://10.96.0.1:443 436 | KUBERNETES_PORT_443_TCP_PROTO=tcp 437 | KUBERNETES_PORT_443_TCP_PORT=443 438 | KUBERNETES_PORT_443_TCP_ADDR=10.96.0.1 439 | KUBERNETES_SERVICE_HOST=10.96.0.1 440 | KUBERNETES_SERVICE_PORT=443 441 | NGINX_VERSION=1.12.2 442 | HOME=/root 443 | $ 444 | $ kubectl exec -i -t mypod -c nginx -- /bin/sh 445 | / # 446 | / # cat /etc/alpine-release 447 | 3.5.2 448 | ``` 449 | 450 | --- 451 | 452 | ### Exercise: Executing Commands within a Remote Pod 453 | **Objective:** Use `kubectl exec` to both initiate commands and spawn an interactive shell within a Pod. 454 | 455 | --- 456 | 457 | 1) If not already created, create the Pod `mypod` from the manifest `manifests/mypod.yaml`. 458 | ``` 459 | $ kubectl create -f manifests/mypod.yaml 460 | ``` 461 | 462 | 2) Wait for the Pod to become ready (`running`). 463 | ``` 464 | $ kubectl get pods --watch 465 | ``` 466 | 467 | 3) Use `kubectl exec` to `cat` the file `/etc/os-release`. 468 | ``` 469 | $ kubectl exec mypod -- cat /etc/os-release 470 | ``` 471 | It should output the contents of the `os-release` file. 472 | 473 | 4) Now use `kubectl exec` and supply the `-i -t` flags to spawn a shell session within the container. 474 | ``` 475 | $ kubectl exec -i -t mypod -- /bin/sh 476 | ``` 477 | If executed correctly, it should drop you into a new shell session within the nginx container. 478 | 479 | 5) use `ps aux` to view the current processes within the container. 480 | ``` 481 | / # ps aux 482 | ``` 483 | There should be two nginx processes along with a `/bin/sh` process representing your interactive shell. 484 | 485 | 6) Exit out of the container simply by typing `exit`. 486 | With that the shell process will be terminated and the only running processes within the container should 487 | once again be nginx and its worker process. 488 | 489 | --- 490 | 491 | **Summary:** `kubectl exec` is not often used, but is an important skill to be familiar with when it comes to Pod 492 | debugging. 493 | 494 | --- 495 | 496 | ### `kubectl proxy` 497 | `kubectl proxy` enables access to both the Kubernetes API-Server and to resources running within the cluster 498 | securely using `kubectl`. By default it creates a connection to the API-Server that can be accessed at 499 | `127.0.0.1:8001` or an alternative port by supplying the `-p` or `--port` flag. 500 | 501 | 502 | **Command** 503 | ``` 504 | kubectl proxy 505 | kubectl proxy --port= 506 | ``` 507 | 508 | **Examples** 509 | ``` 510 | $ kubectl proxy 511 | Starting to serve on 127.0.0.1:8001 512 | 513 | 514 | $ curl 127.0.0.1:8001/version 515 | { 516 | "major": "", 517 | "minor": "", 518 | "gitVersion": "v1.9.0", 519 | "gitCommit": "925c127ec6b946659ad0fd596fa959be43f0cc05", 520 | "gitTreeState": "clean", 521 | "buildDate": "2018-01-26T19:04:38Z", 522 | "goVersion": "go1.9.1", 523 | "compiler": "gc", 524 | "platform": "linux/amd64" 525 | } 526 | ``` 527 | 528 | The Kubernetes API-Server has the built in capability to proxy to running services or pods within the cluster. This 529 | ability in conjunction with the `kubectl proxy` command allows a user to access those services or pods without having 530 | to expose them outside of the cluster. 531 | 532 | ``` 533 | http:///api/v1/namespaces///[:port_name]/proxy 534 | ``` 535 | * **proxy_address** - The local proxy address - `127.0.0.1:8001` 536 | * **namespace** - The namespace owning the resources to proxy to. 537 | * **service|pod** - The type of resource you are trying to access, either `service` or `pod`. 538 | * **service_name|pod_name** - The name of the `service` or `pod` to be accessed. 539 | * **[:port]** - An optional port to proxy to. Will default to the first one exposed. 540 | 541 | **Example** 542 | ``` 543 | http://127.0.0.1:8001/api/v1/namespaces/default/pods/mypod/proxy/ 544 | ``` 545 | 546 | --- 547 | 548 | ### Exercise: Using the Proxy 549 | **Objective:** Examine the capabilities of the proxy by accessing a pod's exposed ports. 550 | 551 | --- 552 | 553 | 1) Create the Pod `mypod` from the manifest `manifests/mypod.yaml`. (if not created previously) 554 | ``` 555 | $ kubectl create -f manifests/mypod.yaml 556 | ``` 557 | 558 | 2) Start the `kubectl proxy` with the defaults. 559 | ``` 560 | $ kubectl proxy 561 | ``` 562 | 563 | 3) Access the Pod through the proxy. 564 | ``` 565 | http://127.0.0.1:8001/api/v1/namespaces/dev/pods/mypod/proxy/ 566 | ``` 567 | You should see the "Welcome to nginx!" page. 568 | 569 | --- 570 | 571 | **Summary:** Being able to access the exposed Pods and Services within a cluster without having to consume an 572 | external IP, or create firewall rules is an incredibly useful tool for troubleshooting cluster services. 573 | 574 | --- 575 | 576 | [Back to Index](#index) 577 | 578 | --- 579 | --- 580 | 581 | ## Cleaning up 582 | **NOTE:** If you are proceeding with the next tutorials, simply delete the pod with: 583 | ``` 584 | $ kubectl delete pod mypod 585 | ``` 586 | The namespace and context will be reused. 587 | 588 | To remove everything that was created in this tutorial, execute the following commands: 589 | ``` 590 | kubectl delete namespace dev 591 | kubectl config delete-context kind-dev 592 | ``` 593 | 594 | --- 595 | 596 | [Back to Index](#index) 597 | 598 | --- 599 | --- 600 | 601 | ### Helpful Resources 602 | * [kubectl Overview](https://kubernetes.io/docs/reference/kubectl/overview/) 603 | * [kubectl Cheat Sheet](https://kubernetes.io/docs/reference/kubectl/cheatsheet/) 604 | * [kubectl Reference](https://kubernetes.io/docs/reference/generated/kubectl/kubectl-commands) 605 | * [Accessing Clusters](https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/) 606 | 607 | 608 | [Back to Index](#index) 609 | -------------------------------------------------------------------------------- /cli/images/dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/abstractinfrastructure/k8s-intro-tutorials/1fe3ddc88229b7287705e6309334777eba7310d2/cli/images/dashboard.png -------------------------------------------------------------------------------- /cli/manifests/mypod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: mypod 5 | spec: 6 | containers: 7 | - name: nginx 8 | image: nginx:stable-alpine 9 | ports: 10 | - containerPort: 80 11 | -------------------------------------------------------------------------------- /configuration/README.md: -------------------------------------------------------------------------------- 1 | # Configuration 2 | 3 | Kubernetes has an integrated pattern for decoupling configuration from application or container. 4 | 5 | This pattern makes use of two Kubernetes components: ConfigMaps and Secrets. 6 | 7 | Both types of objects hold key-value pairs that can be injected into Pods through a variety of means. 8 | 9 | # Index 10 | 11 | * [ConfigMaps](#configmaps) 12 | * [Exercise: Creating ConfigMaps](#exercise-creating-configmaps) 13 | * [Exercise: Using ConfigMaps with Environment Variables](#exercise-using-configmaps-with-environment-variables) 14 | * [Exercise: Using ConfigMaps with Volumes](#exercise-using-configmaps-with-volumes) 15 | * [Secrets](#secrets) 16 | * [Exercise: Creating Secrets](#exercise-creating-secrets) 17 | * [Exercise: Using Secrets with Environment Variables](#exercise-using-secrets-with-environment-variables) 18 | * [Exercise: Using Secrets with Volumes](#exercise-using-secrets-with-volumes) 19 | * [Cleaning Up](#cleaning-up) 20 | * [Helpful Resources](#helpful-resources) 21 | 22 | 23 | ---- 24 | 25 | # ConfigMaps 26 | 27 | A ConfigMap is externalized data stored within Kubernetes that can be referenced through several different means: 28 | * Environment variable 29 | * A command line argument (via env var) 30 | * Injected as a file into a volume mount 31 | 32 | ConfigMaps can be created from a manifest, literals, a directory, or from the files the directly. 33 | 34 | --- 35 | 36 | ### Exercise: Creating ConfigMaps 37 | **Objective:** Go over the four methods of creating ConfigMaps. 38 | 39 | --- 40 | 41 | #### From Manifest 42 | Create ConfigMap `manifest-example` from the manifest `manifests/cm-manifest.yaml` or use the yaml below. 43 | 44 | **manifests/cm-manifest.yaml** 45 | ```yaml 46 | apiVersion: v1 47 | kind: ConfigMap 48 | metadata: 49 | name: manifest-example 50 | data: 51 | city: Ann Arbor 52 | state: Michigan 53 | ``` 54 | 55 | **Command** 56 | ``` 57 | $ kubectl create -f manifests/cm-manifest.yaml 58 | ``` 59 | 60 | View the created ConfigMap. 61 | ``` 62 | $ kubectl get configmap manifest-example -o yaml 63 | ``` 64 | 65 | #### From Literal 66 | 67 | Create ConfigMap `literal-example` using the `--from-literal` flag and `city=Ann Arbor` along with `state=Michigan` 68 | for the values. 69 | ``` 70 | $ kubectl create cm literal-example --from-literal="city=Ann Arbor" --from-literal=state=Michigan 71 | ``` 72 | 73 | View the created ConfigMap. 74 | ``` 75 | $ kubectl get cm literal-example -o yaml 76 | ``` 77 | 78 | #### From Directory 79 | 80 | Create ConfigMap `dir-example` by using the `manifests/cm` directory as the source. 81 | ``` 82 | $ kubectl create cm dir-example --from-file=manifests/cm/ 83 | ``` 84 | 85 | View the created ConfigMap. 86 | ``` 87 | $ kubectl get cm dir-example -o yaml 88 | ``` 89 | 90 | #### From File 91 | 92 | Create ConfigMap `file-example` by using the `city` and `state` files in the `manifests/cm` directory. 93 | ``` 94 | $ kubectl create cm file-example --from-file=manifests/cm/city --from-file=manifests/cm/state 95 | ``` 96 | 97 | View the created ConfigMap. 98 | ``` 99 | $ kubectl get cm file-example -o yaml 100 | ``` 101 | 102 | **Note:** When creating a ConfigMap from a file or directory the content will assume to be multiline as signified by 103 | the pipe symbol (`|`) in the yaml. 104 | 105 | --- 106 | 107 | **Summary:** There are four primary methods of creating ConfigMaps with `kubectl`. From a manifest, passing literals 108 | on the command-line, supplying a path to a directory, or to the individual files themselves. These ConfigMaps are 109 | stored within etcd, and may be used in a multitude of ways. 110 | 111 | --- 112 | 113 | ### Exercise: Using ConfigMaps with Environment Variables 114 | **Objective:** Dive into how ConfigMap items may be referenced as Environment Variables and how this method may be 115 | extended to use them as command-line arguments. 116 | 117 | **Note:** This exercise builds off the previous exercise: [Creating ConfigMaps](#exercise-creating-configmaps). If you 118 | have not, complete it first before continuing. 119 | 120 | --- 121 | 122 | 1) Create Job `cm-env-example` using the manifest `manifests/cm-env-example.yaml` or the yaml below. 123 | 124 | **manifests/cm-env-example.yaml** 125 | ```yaml 126 | apiVersion: batch/v1 127 | kind: Job 128 | metadata: 129 | name: cm-env-example 130 | spec: 131 | template: 132 | spec: 133 | containers: 134 | - name: env 135 | image: alpine:latest 136 | command: ["/bin/sh", "-c"] 137 | args: ["printenv CITY"] 138 | env: 139 | - name: CITY 140 | valueFrom: 141 | configMapKeyRef: 142 | name: manifest-example 143 | key: city 144 | restartPolicy: Never 145 | ``` 146 | 147 | **Command** 148 | ``` 149 | $ kubectl create -f manifests/cm-env-example.yaml 150 | ``` 151 | 152 | Note how the Environment Variable is injected using `valueFrom` and `configMapKeyRef`. This queries a specific key 153 | from the ConfigMap and injects it as an Environment Variable. 154 | 155 | 2) List the Pods. 156 | ``` 157 | $ kubectl get pods 158 | ``` 159 | 160 | 3) Copy the pod name and view the output of the Job. 161 | ``` 162 | $ kubectl logs cm-env-example- 163 | ``` 164 | It should echo the value from the `manifest-example` ConfigMap `city` key-value pair. 165 | 166 | This same technique can be used to inject the value for use in a Command. 167 | 168 | 4) Create another Job `cm-cmd-example` from the manifest `manifests/cm-cmd-example.yaml` or use the yaml below. 169 | 170 | **manifests/cm-cmd-example.yaml** 171 | ```yaml 172 | apiVersion: batch/v1 173 | kind: Job 174 | metadata: 175 | name: cm-cmd-example 176 | spec: 177 | template: 178 | spec: 179 | containers: 180 | - name: env 181 | image: alpine:latest 182 | command: ["/bin/sh", "-c"] 183 | args: ["echo Hello from ${CITY}!"] 184 | env: 185 | - name: CITY 186 | valueFrom: 187 | configMapKeyRef: 188 | name: manifest-example 189 | key: city 190 | restartPolicy: Never 191 | ``` 192 | 193 | **Command** 194 | ``` 195 | $ kubectl create -f manifests/cm-cmd-example.yaml 196 | ``` 197 | 198 | 5) List the Pods. 199 | ``` 200 | $ kubectl get pods 201 | ``` 202 | 203 | 3) Copy the pod name of the `cm-cmd-example` job and view the output of the Pod. 204 | ``` 205 | $ kubectl logs cm-cmd-example- 206 | ``` 207 | It should echo the string "Hello from " referencing the value from the `manifest-example` ConfigMap. 208 | 209 | --- 210 | 211 | **Summary:** Items within a ConfigMap can be injected into a Pod's Environment Variables at container creation. These 212 | items may be picked up by the application being run in the container directly, or referenced as a command-line argument. 213 | Both methods are commonly used and enable a wide-variety of use-cases. 214 | 215 | --- 216 | 217 | **Clean Up Command:** 218 | ``` 219 | kubectl delete job cm-env-example cm-cmd-example 220 | ``` 221 | 222 | --- 223 | 224 | ### Exercise: Using ConfigMaps with Volumes 225 | **Objective:** Learn how to mount a ConfigMap or specific items stored within a ConfigMap as a volume. 226 | 227 | **Note:** This exercise builds off the previous exercise: [Creating ConfigMaps](#exercise-creating-configmaps). If you 228 | have not, complete it first before continuing. 229 | 230 | --- 231 | 232 | 1) Create the Pod `cm-vol-example` using the manifest `manifests/cm-vol-example.yaml` or use the yaml below. 233 | 234 | **manifests/cm-vol-example.yaml** 235 | ```yaml 236 | apiVersion: v1 237 | kind: Pod 238 | metadata: 239 | name: cm-vol-example 240 | spec: 241 | containers: 242 | - name: mypod 243 | image: alpine:latest 244 | command: ["/bin/sh", "-c"] 245 | args: 246 | - while true; do 247 | sleep 10; 248 | done 249 | volumeMounts: 250 | - name: config-volume 251 | mountPath: /myconfig 252 | - name: city 253 | mountPath: /mycity 254 | readOnly: true 255 | volumes: 256 | - name: config-volume 257 | configMap: 258 | name: manifest-example 259 | - name: city 260 | configMap: 261 | name: manifest-example 262 | items: 263 | - key: city 264 | path: thisismycity 265 | ``` 266 | 267 | **Command** 268 | ``` 269 | $ kubectl create -f manifests/cm-vol-example.yaml 270 | ``` 271 | 272 | Note the volumes and how they are being referenced. The volume `city`, has an array of `items` that contains a sub-set 273 | of the key-value pairs stored in the `manifest-example` ConfigMap. When working with the individual items, it is 274 | possible to override the name or path to the generated file by supplying an argument for the `path` parameter. 275 | 276 | 2) View the contents of the `/myconfig` volume mount. 277 | ``` 278 | $ kubectl exec cm-vol-example -- ls /myconfig 279 | ``` 280 | It will contain two files, matching the names of the keys stored in configMap `manifest-example`. 281 | 282 | 3) `cat` the contents of the files. 283 | ``` 284 | $ kubectl exec cm-vol-example -- /bin/sh -c "cat /myconfig/*" 285 | ``` 286 | It will match the values stored in the configMap `manifest-example` concatenated together. 287 | 288 | 4) View the contents of the other Volume Mount `mycity`. 289 | ``` 290 | $ kubectl exec cm-vol-example -- ls /mycity 291 | ``` 292 | A file will be present that represents the single item being referenced in the `city` volume. This file bears the 293 | name `thisismycity` as specified by the `path` variable. 294 | 295 | 5) `cat` contents of the `thisismycity` file. 296 | ``` 297 | $ kubectl exec cm-vol-example -- cat /mycity/thisismycity 298 | ``` 299 | The contents should match the value of data[city]. 300 | 301 | --- 302 | 303 | **Summary:** In addition to being injected as Environment Variables it's possible to mount the contents of a ConfigMap 304 | as a volume. This same method may be augmented to mount specific items from a ConfigMap instead of the entire thing. 305 | These items can be renamed or be made read-only to meet a variety of application needs providing an easy to use 306 | avenue to further decouple application from configuration. 307 | 308 | --- 309 | 310 | **Clean Up Command:** 311 | ``` 312 | kubectl delete pod cm-vol-example 313 | kubectl delete cm dir-example file-example literal-example manifest-example 314 | ``` 315 | 316 | --- 317 | 318 | [Back to Index](#index) 319 | 320 | --- 321 | --- 322 | 323 | # Secrets 324 | 325 | A Secret is externalized "private" base64 encoded data stored within Kubernetes that can be referenced through 326 | several different means: 327 | * Environment variable 328 | * A command line argument (via env var) 329 | * Injected as a file into a volume mount 330 | 331 | Like ConfigMaps, Secrets can be created from a manifest, literals, or from files directly. 332 | 333 | **Note:** For all intents and purposes, Secrets are created and used just like ConfigMaps. If you have completed 334 | the ConfigMap Exercises, the Secrets section may be skimmed over glossing a few of the minor syntax differences. 335 | 336 | --- 337 | 338 | ### Exercise: Creating Secrets 339 | **Objective:** Learn to use the four different methods of creating Secrets, and how they different slightly from their 340 | ConfigMap counterparts. 341 | 342 | --- 343 | 344 | #### From Manifest 345 | Create Secret `manifest-example` from the manifest `manifests/secret-manifest.yaml` or use the yaml below. 346 | 347 | **manifests/secret-manifest.yaml** 348 | ```yaml 349 | apiVersion: v1 350 | kind: Secret 351 | metadata: 352 | name: manifest-example 353 | type: Opaque 354 | data: 355 | username: ZXhhbXBsZQ== 356 | password: bXlwYXNzd29yZA== 357 | ``` 358 | 359 | **Command** 360 | ``` 361 | $ kubectl create -f manifests/secret-manifest.yaml 362 | ``` 363 | 364 | Note the Secret has the additional attribute `type` when compared to a ConfigMap. The `Opaque` value simply means the 365 | data is unstructured. Additionally, the content referenced in `data` itself is base64 encoded. Decoded, they are 366 | `username=example` and `password=mypassword`. 367 | 368 | View the created Secret. 369 | ``` 370 | $ kubectl get secret manifest-example -o yaml 371 | ``` 372 | 373 | #### From Literal 374 | 375 | Create Secret `literal-example` using the `--from-literal` flag and `username=example` along with `password=mypassword` 376 | for the values. 377 | ``` 378 | $ kubectl create secret generic literal-example --from-literal=username=example --from-literal=password=mypassword 379 | ``` 380 | **Note:** Unlike ConfigMaps you **must** also specify the type of Secret you are creating. There are 3 types: 381 | * docker-registry - Credentials used to interact with a container registry. 382 | * generic - Eeuivalent to `Opaque`. Used for unstructured data. 383 | * tls - A TLS key pair (PEM Format) that accepts a cert (`--cert`) and key (`--key`). 384 | 385 | 386 | View the created Secret. 387 | ``` 388 | $ kubectl get secret literal-example -o yaml 389 | ``` 390 | 391 | #### From Directory 392 | 393 | Create Secret `dir-example` by using the `manifests/secret` directory as the source. 394 | ``` 395 | $ kubectl create secret generic dir-example --from-file=manifests/secret/ 396 | ``` 397 | 398 | View the created Secret. 399 | ``` 400 | $ kubectl get secret dir-example -o yaml 401 | ``` 402 | 403 | #### From File 404 | 405 | Create ConfigMap `file-example` by using the `username` and `password` files in the `manifests/secret` directory. 406 | ``` 407 | $ kubectl create secret generic file-example --from-file=manifests/secret/username --from-file=manifests/secret/password 408 | ``` 409 | 410 | View the created Secret. 411 | ``` 412 | $ kubectl get secret file-example -o yaml 413 | ``` 414 | 415 | --- 416 | 417 | **Summary:** Just like ConfigMaps, there are four primary methods of creating Secrets with `kubectl`. From manifests 418 | and literals to directories and files; no one method is better than another. The fundamental difference when working 419 | with Secrets over ConfigMaps, is that they require a type such as `generic` or `opaque` and the contents itself is 420 | stored in a base64 encoded form. 421 | 422 | --- 423 | 424 | ### Exercise: Using Secrets with Environment Variables 425 | **Objective:** Examine how Secrets may be referenced as Environment Variables and how this may be extended to use 426 | them in command-line arguments. 427 | 428 | **Note:** This exercise builds off the previous exercise: [Creating Secrets](#exercise-creating-Secrets). If you 429 | have not, complete it first before continuing. 430 | 431 | --- 432 | 433 | 1) Create Job `secret-env-example` using the manifest `manifests/secret-env-example.yaml` or the yaml below. 434 | 435 | **manifests/secret-env-example.yaml** 436 | ```yaml 437 | apiVersion: batch/v1 438 | kind: Job 439 | metadata: 440 | name: cm-env-example 441 | spec: 442 | template: 443 | spec: 444 | containers: 445 | - name: env 446 | image: alpine:latest 447 | command: ["/bin/sh", "-c"] 448 | args: ["printenv USERNAME"] 449 | env: 450 | - name: USERNAME 451 | valueFrom: 452 | secretKeyRef: 453 | name: manifest-example 454 | key: username 455 | restartPolicy: Never 456 | ``` 457 | 458 | **Command** 459 | ``` 460 | $ kubectl create -f manifests/secret-env-example.yaml 461 | ``` 462 | 463 | Note how the Environment Variable is injected using `valueFrom` and `secretKeyRef`. This queries a specific key 464 | from the Secret and injects it as an Environment Variable. 465 | 466 | 2) List the Pods. 467 | ``` 468 | $ kubectl get pods 469 | ``` 470 | 471 | 3) Copy the pod name and view the output of the Job. 472 | ``` 473 | $ kubectl logs secret-env-example- 474 | ``` 475 | It should echo the value from the `manifest-example` Secret `username` key-value pair. 476 | 477 | This same technique can be used to inject the value for use in a Command. 478 | 479 | 4) Create another Job `secret-cmd-example` from the manifest `manifests/secret-cmd-example.yaml` or use the yaml below. 480 | 481 | **manifests/secret-cmd-example.yaml** 482 | ```yaml 483 | apiVersion: batch/v1 484 | kind: Job 485 | metadata: 486 | name: secret-cmd-example 487 | spec: 488 | template: 489 | spec: 490 | containers: 491 | - name: env 492 | image: alpine:latest 493 | command: ["/bin/sh", "-c"] 494 | args: ["echo Hello there ${USERNAME}!"] 495 | env: 496 | - name: USERNAME 497 | valueFrom: 498 | secretKeyRef: 499 | name: manifest-example 500 | key: username 501 | restartPolicy: Never 502 | ``` 503 | 504 | **Command** 505 | ``` 506 | $ kubectl create -f manifests/secret-cmd-example.yaml 507 | ``` 508 | 509 | 5) List the Pods. 510 | ``` 511 | $ kubectl get pods 512 | ``` 513 | 514 | 3) Copy the pod name of the `secret-cmd-example` job and view the output of the Pod. 515 | ``` 516 | $ kubectl logs secret-cmd-example- 517 | ``` 518 | It should echo the string "Hello there !" referencing the value from the `manifest-example` Secret. 519 | 520 | --- 521 | 522 | **Summary:** Secrets may be injected into a Pod's Environment Variables at container creation. These variables 523 | may be picked up by the application being run in the container directly, or referenced as a command-line argument. 524 | Both methods are useful in a wide-variety of scenarios enabling further decoupling of application and configuration. 525 | 526 | --- 527 | 528 | **Clean Up Command:** 529 | ``` 530 | kubectl delete job secret-env-example secret-cmd-example 531 | ``` 532 | 533 | --- 534 | 535 | ### Exercise: Using Secrets with Volumes 536 | **Objective:** Learn how to mount a Secret or specific items stored within a Secret as a volume. 537 | 538 | **Note:** This exercise builds off the previous exercise: [Creating Secrets](#exercise-creating-secrets). If you 539 | have not, complete it first before continuing. 540 | 541 | --- 542 | 543 | 1) Create the Pod `secret-vol-example` using the manifest `manifests/secret-vol-example.yaml` or use the yaml below. 544 | 545 | **manifests/secret-vol-example.yaml** 546 | ```yaml 547 | apiVersion: v1 548 | kind: Pod 549 | metadata: 550 | name: secret-vol-example 551 | spec: 552 | containers: 553 | - name: mypod 554 | image: alpine:latest 555 | command: ["/bin/sh", "-c"] 556 | args: 557 | - while true; do 558 | sleep 10; 559 | done 560 | volumeMounts: 561 | - name: secret-volume 562 | mountPath: /mysecret 563 | - name: password 564 | mountPath: /mypass 565 | readOnly: true 566 | volumes: 567 | - name: secret-volume 568 | secret: 569 | secretName: manifest-example 570 | - name: password 571 | secret: 572 | secretName: manifest-example 573 | items: 574 | - key: password 575 | path: supersecretpass 576 | ``` 577 | 578 | **Command** 579 | ``` 580 | $ kubectl create -f manifests/secret-vol-example.yaml 581 | ``` 582 | 583 | Note the volumes and how they are being referenced. The volume `password`, has an array of `items` that contains a 584 | sub-set of the key-value pairs stored in the `manifest-example` Secret. When working with the individual items, it is 585 | possible to override the name or path to the generated file by supplying an argument for the `path` parameter. 586 | 587 | 2) View the contents of the `/mysecret` volume mount. 588 | ``` 589 | $ kubectl exec secret-vol-example -- ls /mysecret 590 | ``` 591 | It will contain two files, matching the names of the keys stored in Secret `manifest-example`. 592 | 593 | 3) `cat` the contents of the files. 594 | ``` 595 | $ kubectl exec secret-vol-example -- /bin/sh -c "cat /mysecret/*" 596 | ``` 597 | It will match the values stored in the Secret `manifest-example` concatenated together. 598 | 599 | 4) View the contents of the other Volume Mount `mypass`. 600 | ``` 601 | $ kubectl exec secret-vol-example -- ls /mypass 602 | ``` 603 | A file will be present that represents the single item being referenced in the `password` volume. This file bears the 604 | name `supersecretpass` as specified by the `path` variable. 605 | 606 | 5) `cat` contents of the `supersecretpass` file. 607 | ``` 608 | $ kubectl exec secret-vol-example -- cat /mypass/supersecretpass 609 | ``` 610 | The contents should match the value of data[password]. 611 | 612 | --- 613 | 614 | **Summary:** Secrets can be consumed in multiple ways. One of the more flexible (and secure) methods being mounting 615 | as a volume. It's possible to mount the entire contents of a Secret as a volume, or alternatively mount specific 616 | items stored in a Secret. These items can be renamed or be made read-only to meet a variety of application needs. 617 | 618 | --- 619 | 620 | **Clean Up Command:** 621 | ``` 622 | kubectl delete pod secret-vol-example 623 | kubectl delete secret dir-example file-example literal-example manifest-example 624 | ``` 625 | 626 | --- 627 | 628 | [Back to Index](#index) 629 | 630 | --- 631 | --- 632 | 633 | # Cleaning Up 634 | 635 | ``` 636 | kubectl delete cm manifest-example literal-example dir-example file-example 637 | kubectl delete secret manifest-example literal-example dir-example file-example 638 | ``` 639 | 640 | --- 641 | 642 | [Back to Index](#index) 643 | 644 | --- 645 | --- 646 | 647 | # Helpful Resources 648 | 649 | * [Configure a Pod to Use a ConfigMap](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/) 650 | * [Secrets Overview](https://kubernetes.io/docs/concepts/configuration/secret/) 651 | * [Storing and Using Docker Registry Credentials as a Secret](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/) 652 | 653 | 654 | --- 655 | 656 | [Back to Index](#index) 657 | -------------------------------------------------------------------------------- /configuration/manifests/cm-cmd-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: cm-cmd-example 5 | spec: 6 | template: 7 | spec: 8 | containers: 9 | - name: env 10 | image: alpine:latest 11 | command: ["/bin/sh", "-c"] 12 | args: ["echo Hello from ${CITY}!"] 13 | env: 14 | - name: CITY 15 | valueFrom: 16 | configMapKeyRef: 17 | name: manifest-example 18 | key: city 19 | restartPolicy: Never 20 | -------------------------------------------------------------------------------- /configuration/manifests/cm-env-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: cm-env-example 5 | spec: 6 | template: 7 | spec: 8 | containers: 9 | - name: env 10 | image: alpine:latest 11 | command: ["/bin/sh", "-c"] 12 | args: ["printenv CITY"] 13 | env: 14 | - name: CITY 15 | valueFrom: 16 | configMapKeyRef: 17 | name: manifest-example 18 | key: city 19 | restartPolicy: Never 20 | -------------------------------------------------------------------------------- /configuration/manifests/cm-manifest.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: manifest-example 5 | data: 6 | city: Ann Arbor 7 | state: Michigan 8 | -------------------------------------------------------------------------------- /configuration/manifests/cm-vol-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: cm-vol-example 5 | spec: 6 | containers: 7 | - name: mypod 8 | image: alpine:latest 9 | command: ["/bin/sh", "-c"] 10 | args: 11 | - while true; do 12 | sleep 10; 13 | done 14 | volumeMounts: 15 | - name: config-volume 16 | mountPath: /myconfig 17 | - name: city 18 | mountPath: /mycity 19 | readOnly: true 20 | volumes: 21 | - name: config-volume 22 | configMap: 23 | name: manifest-example 24 | - name: city 25 | configMap: 26 | name: manifest-example 27 | items: 28 | - key: city 29 | path: thisismycity 30 | -------------------------------------------------------------------------------- /configuration/manifests/cm/city: -------------------------------------------------------------------------------- 1 | Ann Arbor 2 | -------------------------------------------------------------------------------- /configuration/manifests/cm/state: -------------------------------------------------------------------------------- 1 | Michigan 2 | -------------------------------------------------------------------------------- /configuration/manifests/secret-cmd-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: secret-cmd-example 5 | spec: 6 | template: 7 | spec: 8 | containers: 9 | - name: env 10 | image: alpine:latest 11 | command: ["/bin/sh", "-c"] 12 | args: ["echo Hello there ${USERNAME}!"] 13 | env: 14 | - name: USERNAME 15 | valueFrom: 16 | secretKeyRef: 17 | name: manifest-example 18 | key: username 19 | restartPolicy: Never 20 | -------------------------------------------------------------------------------- /configuration/manifests/secret-env-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: secret-env-example 5 | spec: 6 | template: 7 | spec: 8 | containers: 9 | - name: mypod 10 | image: alpine:latest 11 | command: ["/bin/sh", "-c"] 12 | args: ["printenv USERNAME"] 13 | env: 14 | - name: USERNAME 15 | valueFrom: 16 | secretKeyRef: 17 | name: manifest-example 18 | key: username 19 | restartPolicy: Never 20 | -------------------------------------------------------------------------------- /configuration/manifests/secret-manifest.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: manifest-example 5 | type: Opaque 6 | data: 7 | username: ZXhhbXBsZQ== 8 | password: bXlwYXNzd29yZA== 9 | -------------------------------------------------------------------------------- /configuration/manifests/secret-vol-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: secret-vol-example 5 | spec: 6 | containers: 7 | - name: mypod 8 | image: alpine:latest 9 | command: ["/bin/sh", "-c"] 10 | args: 11 | - while true; do 12 | sleep 10; 13 | done 14 | volumeMounts: 15 | - name: secret-volume 16 | mountPath: /mysecret 17 | - name: password 18 | mountPath: /mypass 19 | readOnly: true 20 | volumes: 21 | - name: secret-volume 22 | secret: 23 | secretName: manifest-example 24 | - name: password 25 | secret: 26 | secretName: manifest-example 27 | items: 28 | - key: password 29 | path: supersecretpass 30 | -------------------------------------------------------------------------------- /configuration/manifests/secret/password: -------------------------------------------------------------------------------- 1 | mypassword 2 | -------------------------------------------------------------------------------- /configuration/manifests/secret/username: -------------------------------------------------------------------------------- 1 | example 2 | -------------------------------------------------------------------------------- /core/README.md: -------------------------------------------------------------------------------- 1 | # Exploring the Core 2 | 3 | This tutorial covers the fundamental building blocks that make up Kubernetes. Understanding what these components are 4 | and how they are used is crucial to learning how to use the higher level objects and resources. 5 | 6 | # Index 7 | * [Namespaces](#namespaces) 8 | * [Exercise: Using Namespaces](#exercise-using-namespaces) 9 | * [Pods](#pods) 10 | * [Exercise: Creating Pods](#exercise-creating-pods) 11 | * [Labels and Selectors](#labels-and-selectors) 12 | * [Exercise: Using Labels and Selectors](#exercise-using-labels-and-selectors) 13 | * [Services](#services) 14 | * [Exercise: The ClusterIP Service](#exercise-the-clusterip-service) 15 | * [Exercise: Using the NodePort Service](#exercise-using-the-nodeport-service) 16 | * [Exercise: The LoadBalancer Service](#exercise-the-loadbalancer-service) 17 | * [Exercise: Using the ExternalName Service](#exercise-using-the-externalname-service) 18 | * [Cleaning up](#cleaning-up) 19 | * [Helpful Resources](#helpful-resources) 20 | 21 | --- 22 | 23 | # Namespaces 24 | Namespaces are a logical cluster or environment. They are the primary method of partitioning a cluster or scoping 25 | access. 26 | 27 | --- 28 | 29 | ### Exercise: Using Namespaces 30 | **Objectives:** Learn how to create and switch between Kubernetes Namespaces using `kubectl`. 31 | 32 | **NOTE:** If you are coming from the [cli tutorial](../cli/README.md), you may have completed this already. 33 | 34 | --- 35 | 36 | 1) List the current namespaces 37 | ``` 38 | $ kubectl get namespaces 39 | ``` 40 | 41 | 2) Create the `dev` namespace 42 | ``` 43 | $ kubectl create namespace dev 44 | ``` 45 | 46 | 3) Create a new context called `kind-dev` within the `kind-kind` cluster as the `kind-kind` user, with the namespace 47 | set to `dev`. 48 | ``` 49 | $ kubectl config set-context kind-dev --cluster=kind-kind --user=kind-kind --namespace=dev 50 | ``` 51 | 52 | 4) Switch to the newly created context. 53 | ``` 54 | $ kubectl config use-context kind-dev 55 | ``` 56 | 57 | --- 58 | 59 | **Summary:** Namespaces function as the primary method of providing scoped names, access, and act as an umbrella for 60 | group based resource restriction. Creating and switching between them is quick and easy, but learning to use them is 61 | essential in the general usage of Kubernetes. 62 | 63 | --- 64 | 65 | [Back to Index](#index) 66 | 67 | --- 68 | --- 69 | 70 | # Pods 71 | A pod is the atomic unit of Kubernetes. It is the smallest _“unit of work”_ or _“management resource”_ within the 72 | system and is the foundational building block of all Kubernetes Workloads. 73 | 74 | **Note:** These exercises build off the previous Core tutorials. If you have not done so, complete those before continuing. 75 | 76 | --- 77 | 78 | ### Exercise: Creating Pods 79 | **Objective:** Examine both single and multi-container Pods; including: viewing their attributes through the cli and 80 | their exposed Services through the API Server proxy. 81 | 82 | --- 83 | 84 | 1) Create a simple Pod called `pod-example` using the `nginx:stable-alpine` image and expose port `80`. Use the 85 | manifest `manifests/pod-example.yaml` or the yaml below. 86 | 87 | **manifests/pod-example.yaml** 88 | ```yaml 89 | apiVersion: v1 90 | kind: Pod 91 | metadata: 92 | name: pod-example 93 | spec: 94 | containers: 95 | - name: nginx 96 | image: nginx:stable-alpine 97 | ports: 98 | - containerPort: 80 99 | ``` 100 | 101 | **Command** 102 | ``` 103 | $ kubectl create -f manifests/pod-example.yaml 104 | ``` 105 | 106 | 2) Use `kubectl` to describe the Pod and note the available information. 107 | ``` 108 | $ kubectl describe pod pod-example 109 | ``` 110 | 111 | 3) Use `kubectl proxy` to verify the web server running in the deployed Pod. 112 | 113 | **Command** 114 | ``` 115 | $ kubectl proxy 116 | ``` 117 | **URL** 118 | ``` 119 | http://127.0.0.1:8001/api/v1/namespaces/dev/pods/pod-example/proxy/ 120 | ``` 121 | 122 | The default **"Welcome to nginx!"** page should be visible. 123 | 124 | 4) Using the same steps as above, create a new Pod called `multi-container-example` using the manifest 125 | `manifests/pod-multi-container-example.yaml` or create a new one yourself with the below yaml. 126 | 127 | **manifests/pod-multi-container-example.yaml** 128 | ```yaml 129 | apiVersion: v1 130 | kind: Pod 131 | metadata: 132 | name: multi-container-example 133 | spec: 134 | containers: 135 | - name: nginx 136 | image: nginx:stable-alpine 137 | ports: 138 | - containerPort: 80 139 | volumeMounts: 140 | - name: html 141 | mountPath: /usr/share/nginx/html 142 | - name: content 143 | image: alpine:latest 144 | volumeMounts: 145 | - name: html 146 | mountPath: /html 147 | command: ["/bin/sh", "-c"] 148 | args: 149 | - while true; do 150 | echo $(date)"
" >> /html/index.html; 151 | sleep 5; 152 | done 153 | volumes: 154 | - name: html 155 | emptyDir: {} 156 | ``` 157 | 158 | **Command** 159 | ``` 160 | $ kubectl create -f manifests/pod-multi-container-example.yaml 161 | ``` 162 | **Note:** `spec.containers` is an array allowing you to use multiple containers within a Pod. 163 | 164 | 5) Use the proxy to verify the web server running in the deployed Pod. 165 | 166 | **Command** 167 | ``` 168 | $ kubectl proxy 169 | ``` 170 | **URL** 171 | ``` 172 | http://127.0.0.1:8001/api/v1/namespaces/dev/pods/multi-container-example/proxy/ 173 | ``` 174 | 175 | There should be a repeating date-time-stamp. 176 | 177 | --- 178 | 179 | **Summary:** Becoming familiar with creating and viewing the general aspects of a Pod is an important skill. While it 180 | is rare that one would manage Pods directly within Kubernetes, the knowledge of how to view, access and describe them 181 | is important and a common first-step in troubleshooting a possible Pod failure. 182 | 183 | --- 184 | 185 | [Back to Index](#index) 186 | 187 | --- 188 | --- 189 | 190 | # Labels and Selectors 191 | Labels are key-value pairs that are used to identify, describe and group together related sets of objects or 192 | resources. 193 | 194 | Selectors use labels to filter or select objects, and are used throughout Kubernetes. 195 | 196 | --- 197 | 198 | ### Exercise: Using Labels and Selectors 199 | **Objective:** Explore the methods of labeling objects in addition to filtering them with both equality and 200 | set-based selectors. 201 | 202 | --- 203 | 204 | 1) Label the Pod `pod-example` with `app=nginx` and `environment=dev` via `kubectl`. 205 | 206 | ``` 207 | $ kubectl label pod pod-example app=nginx environment=dev 208 | ``` 209 | 210 | 2) View the labels with `kubectl` by passing the `--show-labels` flag 211 | ``` 212 | $ kubectl get pods --show-labels 213 | ``` 214 | 215 | 3) Update the multi-container example manifest created previously with the labels `app=nginx` and `environment=prod` 216 | then apply it via `kubectl`. 217 | 218 | **manifests/pod-multi-container-example.yaml** 219 | ```yaml 220 | apiVersion: v1 221 | kind: Pod 222 | metadata: 223 | name: multi-container-example 224 | labels: 225 | app: nginx 226 | environment: prod 227 | spec: 228 | containers: 229 | - name: nginx 230 | image: nginx:stable-alpine 231 | ports: 232 | - containerPort: 80 233 | volumeMounts: 234 | - name: html 235 | mountPath: /usr/share/nginx/html 236 | - name: content 237 | image: alpine:latest 238 | volumeMounts: 239 | - name: html 240 | mountPath: /html 241 | command: ["/bin/sh", "-c"] 242 | args: 243 | - while true; do 244 | date >> /html/index.html; 245 | sleep 5; 246 | done 247 | volumes: 248 | - name: html 249 | emptyDir: {} 250 | ``` 251 | 252 | **Command** 253 | ``` 254 | $ kubectl apply -f manifests/pod-multi-container-example.yaml 255 | ``` 256 | 257 | 4) View the added labels with `kubectl` by passing the `--show-labels` flag once again. 258 | ``` 259 | $ kubectl get pods --show-labels 260 | ``` 261 | 262 | 5) With the objects now labeled, use an [equality based selector](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#equality-based-requirement) 263 | targeting the `prod` environment. 264 | 265 | ``` 266 | $ kubectl get pods --selector environment=prod 267 | ``` 268 | 269 | 6) Do the same targeting the `nginx` app with the short version of the selector flag (`-l`). 270 | ``` 271 | $ kubectl get pods -l app=nginx 272 | ``` 273 | 274 | 7) Use a [set-based selector](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#set-based-requirement) 275 | to view all pods where the `app` label is `nginx` and filter out any that are in the `prod` environment. 276 | 277 | ``` 278 | $ kubectl get pods -l 'app in (nginx), environment notin (prod)' 279 | ``` 280 | 281 | --- 282 | 283 | **Summary:** Kubernetes makes heavy use of labels and selectors in near every aspect of it. The usage of selectors 284 | may seem limited from the cli, but the concept can be extended to when it is used with higher level resources and 285 | objects. 286 | 287 | --- 288 | 289 | [Back to Index](#index) 290 | 291 | --- 292 | --- 293 | 294 | # Services 295 | Services within Kubernetes are the unified method of accessing the exposed workloads of Pods. They are a durable 296 | resource (unlike Pods) that is given a static cluster-unique IP and provide simple load-balancing through kube-proxy. 297 | 298 | **Note:** These exercises build off the previous Core tutorials. If you have not done so, complete those before continuing. 299 | 300 | --- 301 | 302 | ### Exercise: The clusterIP Service 303 | **Objective:** Create a `ClusterIP` service and view the different ways it is accessible within the cluster. 304 | 305 | --- 306 | 307 | 1) Create `ClusterIP` service `clusterip` that targets Pods labeled with `app=nginx` forwarding port `80` using 308 | either the yaml below, or the manifest `manifests/service-clusterip.yaml`. 309 | 310 | **manifests/service-clusterip.yaml** 311 | ```yaml 312 | apiVersion: v1 313 | kind: Service 314 | metadata: 315 | name: clusterip 316 | spec: 317 | selector: 318 | app: nginx 319 | ports: 320 | - protocol: TCP 321 | port: 80 322 | targetPort: 80 323 | ``` 324 | 325 | **Command** 326 | ``` 327 | $ kubectl create -f manifests/service-clusterip.yaml 328 | ``` 329 | 330 | 2) Describe the newly created service. Note the `IP` and the `Endpoints` fields. 331 | ``` 332 | $ kubectl describe service clusterip 333 | ``` 334 | 335 | 3) View the service through `kube proxy` and refresh several times. It should serve up pages from both pods. 336 | 337 | **Command** 338 | ``` 339 | $ kubectl proxy 340 | ``` 341 | **URL** 342 | ``` 343 | http://127.0.0.1:8001/api/v1/namespaces/dev/services/clusterip/proxy/ 344 | ``` 345 | 346 | 4) Lastly, verify that the generated DNS record has been created for the Service by using nslookup within the 347 | `example-pod` Pod that was provisioned in the [Creating Pods](#exercise-creating-pods) exercise. 348 | ``` 349 | $ kubectl exec pod-example -- nslookup clusterip.dev.svc.cluster.local 350 | ``` 351 | It should return a valid response with the IP matching what was noted earlier when describing the Service. 352 | 353 | --- 354 | 355 | **Summary:** The `ClusterIP` Service is the most commonly used Service within Kubernetes. Every `ClusterIP` Service 356 | is given a cluster unique IP and DNS name that maps to one or more Pod `Endpoints`. It functions as the main method in 357 | which exposed Pod Services are consumed **within** a Kubernetes Cluster. 358 | 359 | --- 360 | 361 | ### Exercise: Using NodePort 362 | 363 | **Objective:** Create a `NodePort` based Service and explore how it is available both inside and outside the cluster. 364 | 365 | --- 366 | 367 | 1) Create a `NodePort` Service called `nodeport` that targets Pods with the labels `app=nginx` and `environment=dev` 368 | forwarding port `80` in cluster, and port `32410` on the node itself. Use either the yaml below, or the manifest 369 | `manifests/service-nodeport.yaml`. 370 | 371 | **manifests/service-nodeport.yaml** 372 | ```yaml 373 | apiVersion: v1 374 | kind: Service 375 | metadata: 376 | name: nodeport 377 | spec: 378 | type: NodePort 379 | selector: 380 | app: nginx 381 | environment: prod 382 | ports: 383 | - nodePort: 32410 384 | protocol: TCP 385 | port: 80 386 | targetPort: 80 387 | ``` 388 | 389 | **Command** 390 | ``` 391 | $ kubectl create -f manifests/service-nodeport.yaml 392 | ``` 393 | 394 | 2) Describe the newly created Service Endpoint. Note the Service still has an internal cluster `IP`, and now 395 | additionally has a `NodePort`. 396 | ``` 397 | $ kubectl describe service nodeport 398 | ``` 399 | 400 | 3) Run the below command to get the Kind Cluster's IP address and visit it in a browser. 401 | ``` 402 | $ echo $(docker inspect -f '{{.NetworkSettings.Networks.kind.IPAddress}}' kind-control-plane):32410 403 | ``` 404 | 405 | 4) Lastly, verify that the generated DNS record has been created for the Service by using nslookup within 406 | the `example-pod` Pod. 407 | ``` 408 | $ kubectl exec pod-example -- nslookup nodeport.dev.svc.cluster.local 409 | ``` 410 | It should return a valid response with the IP matching what was noted earlier when describing the Service. 411 | 412 | --- 413 | 414 | **Summary:** The `NodePort` Services extend the `ClusterIP` Service and additionally expose a port that is either 415 | statically defined, as above (port 32410) or dynamically taken from a range between 30000-32767. This port is then 416 | exposed on every node within the cluster and proxies to the created Service. 417 | 418 | --- 419 | 420 | ### Exercise: The LoadBalancer Service 421 | **Objective:** Create a `LoadBalancer` based Service, and learn how it extends both `ClusterIP` and `NodePort` to 422 | make a Service available outside the Cluster. 423 | 424 | **Before you Begin** 425 | To use Service Type `LoadBalancer` it requires integration with an external IP provider. In most cases, this is a 426 | cloud provider which will likely already be integrated with your cluster. 427 | 428 | For bare-metal and on prem deployments, this must be handled yourself. There are several available tools and products 429 | that can do this, but for this example the Google [metalLB](https://github.com/google/metallb) provider will be used. 430 | 431 | **NOTE:** We need to provide metallb a range of IP addresses it controls. We want this range to be on the docker kind 432 | network. 433 | 434 | ``` 435 | $ docker network inspect -f '{{.IPAM.Config}}' kind 436 | ``` 437 | 438 | The output will contain a cidr such as 172.18.0.0/16. We want our loadbalancer IP range to come from this subclass. 439 | We can configure metallb, for instance, to use 172.18.255.200 to 172.18.255.250 by creating the configmap. 440 | 441 | Edit the manifest `manifests/metalLB.yaml` and change the cidr range on line 19 (`172.18.255.200-172.18.255.250`) to 442 | fit your requirements. Otherwise go ahead and deploy it. 443 | 444 | ``` 445 | $ kubectl create -f manifests/metalLB.yaml 446 | ``` 447 | 448 | 1) Create a `LoadBalancer` Service called `loadbalancer` that targets pods with the labels `app=nginx` and 449 | `environment=prod` forwarding as port `80`. Use either the yaml below, or the manifest 450 | `manifests/service-loadbalancer.yaml`. 451 | 452 | **manifests/service-loadbalancer.yaml** 453 | ```yaml 454 | apiVersion: v1 455 | kind: Service 456 | metadata: 457 | name: loadbalancer 458 | spec: 459 | type: LoadBalancer 460 | selector: 461 | app: nginx 462 | environment: prod 463 | ports: 464 | - protocol: TCP 465 | port: 80 466 | targetPort: 80 467 | ``` 468 | 469 | **Command** 470 | ``` 471 | $ kubectl create -f manifests/service-loadbalancer.yaml 472 | ``` 473 | 474 | 2) Describe the Service `loadbalancer`, and note the Service retains the aspects of both the `ClusterIP` and 475 | `NodePort` Service types in addition to having a new attribute `LoadBalancer Ingress`. 476 | ``` 477 | $ kubectl describe service loadbalancer 478 | ``` 479 | 480 | 3) Open a browser and visit the IP noted in the `Loadbalancer Ingress` field. It should directly map to the exposed 481 | Service. 482 | 483 | 4) Finally, verify that the generated DNS record has been created for the Service by using nslookup within the 484 | `example-pod` Pod. 485 | ``` 486 | $ kubectl exec pod-example -- nslookup loadbalancer.dev.svc.cluster.local 487 | ``` 488 | It should return a valid response with the IP matching what was noted earlier when describing the Service. 489 | 490 | --- 491 | 492 | **Summary:** `LoadBalancer` Services are the second most frequently used Service within Kubernetes as they are the 493 | main method of directing external traffic into the Kubernetes cluster. They work with an external provider to map 494 | ingress traffic destined to the `LoadBalancer Ingress` IP to the cluster nodes on the exposed `NodePort`. These in 495 | turn direct traffic to the desired Pods. 496 | 497 | --- 498 | 499 | ### Exercise: Using the ExternalName Service 500 | **Objective:** Gain an understanding of the `ExternalName` Service and how it is used within a Kubernetes Cluster. 501 | 502 | --- 503 | 504 | 1) Create an `ExternalName` service called `externalname` that points to `google.com` 505 | ``` 506 | $ kubectl create service externalname externalname --external-name=google.com 507 | ``` 508 | 509 | 2) Describe the `externalname` Service. Note that it does **NOT** have an internal IP or other _normal_ service 510 | attributes. 511 | ``` 512 | $ kubectl describe service externalname 513 | ``` 514 | 515 | 3) Lastly, look at the generated DNS record has been created for the Service by using nslookup within the 516 | `example-pod` Pod. It should return the IP of `google.com`. 517 | ``` 518 | $ kubectl exec pod-example -- nslookup externalname.dev.svc.cluster.local 519 | ``` 520 | 521 | --- 522 | 523 | **Summary:** `ExternalName` Services create a `CNAME` entry in the Cluster DNS. This provides an avenue to use 524 | internal Service discovery methods to reference external entities. 525 | 526 | --- 527 | 528 | [Back to Index](#index) 529 | 530 | --- 531 | --- 532 | 533 | # Cleaning Up 534 | 535 | To remove everything that was created in this tutorial, execute the following commands: 536 | ``` 537 | kubectl delete namespace dev 538 | kubectl delete -f manifests/metalLB.yaml 539 | kubectl config delete-context kind-dev 540 | kubectl config use-context kind-kind 541 | ``` 542 | 543 | --- 544 | 545 | [Back to Index](#index) 546 | 547 | --- 548 | --- 549 | 550 | # Helpful Resources 551 | 552 | * [Pod Object Spec](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.10/#podspec-v1-core) 553 | * [Labels and Selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/) 554 | * [Concepts: Service Networking](https://kubernetes.io/docs/concepts/services-networking/service/) 555 | 556 | --- 557 | 558 | [Back to Index](#index) 559 | 560 | --- 561 | -------------------------------------------------------------------------------- /core/manifests/metalLB.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: metallb-system 5 | labels: 6 | app: metallb 7 | --- 8 | apiVersion: v1 9 | kind: ConfigMap 10 | metadata: 11 | namespace: metallb-system 12 | name: config 13 | data: 14 | config: | 15 | address-pools: 16 | - name: default 17 | protocol: layer2 18 | addresses: 19 | - 172.18.255.200-172.18.255.250 20 | --- 21 | apiVersion: policy/v1beta1 22 | kind: PodSecurityPolicy 23 | metadata: 24 | labels: 25 | app: metallb 26 | name: controller 27 | spec: 28 | allowPrivilegeEscalation: false 29 | allowedCapabilities: [] 30 | allowedHostPaths: [] 31 | defaultAddCapabilities: [] 32 | defaultAllowPrivilegeEscalation: false 33 | fsGroup: 34 | ranges: 35 | - max: 65535 36 | min: 1 37 | rule: MustRunAs 38 | hostIPC: false 39 | hostNetwork: false 40 | hostPID: false 41 | privileged: false 42 | readOnlyRootFilesystem: true 43 | requiredDropCapabilities: 44 | - ALL 45 | runAsUser: 46 | ranges: 47 | - max: 65535 48 | min: 1 49 | rule: MustRunAs 50 | seLinux: 51 | rule: RunAsAny 52 | supplementalGroups: 53 | ranges: 54 | - max: 65535 55 | min: 1 56 | rule: MustRunAs 57 | volumes: 58 | - configMap 59 | - secret 60 | - emptyDir 61 | --- 62 | apiVersion: policy/v1beta1 63 | kind: PodSecurityPolicy 64 | metadata: 65 | labels: 66 | app: metallb 67 | name: speaker 68 | spec: 69 | allowPrivilegeEscalation: false 70 | allowedCapabilities: 71 | - NET_RAW 72 | allowedHostPaths: [] 73 | defaultAddCapabilities: [] 74 | defaultAllowPrivilegeEscalation: false 75 | fsGroup: 76 | rule: RunAsAny 77 | hostIPC: false 78 | hostNetwork: true 79 | hostPID: false 80 | hostPorts: 81 | - max: 7472 82 | min: 7472 83 | - max: 7946 84 | min: 7946 85 | privileged: true 86 | readOnlyRootFilesystem: true 87 | requiredDropCapabilities: 88 | - ALL 89 | runAsUser: 90 | rule: RunAsAny 91 | seLinux: 92 | rule: RunAsAny 93 | supplementalGroups: 94 | rule: RunAsAny 95 | volumes: 96 | - configMap 97 | - secret 98 | - emptyDir 99 | --- 100 | apiVersion: v1 101 | kind: ServiceAccount 102 | metadata: 103 | labels: 104 | app: metallb 105 | name: controller 106 | namespace: metallb-system 107 | --- 108 | apiVersion: v1 109 | kind: ServiceAccount 110 | metadata: 111 | labels: 112 | app: metallb 113 | name: speaker 114 | namespace: metallb-system 115 | --- 116 | apiVersion: rbac.authorization.k8s.io/v1 117 | kind: ClusterRole 118 | metadata: 119 | labels: 120 | app: metallb 121 | name: metallb-system:controller 122 | rules: 123 | - apiGroups: 124 | - '' 125 | resources: 126 | - services 127 | verbs: 128 | - get 129 | - list 130 | - watch 131 | - apiGroups: 132 | - '' 133 | resources: 134 | - services/status 135 | verbs: 136 | - update 137 | - apiGroups: 138 | - '' 139 | resources: 140 | - events 141 | verbs: 142 | - create 143 | - patch 144 | - apiGroups: 145 | - policy 146 | resourceNames: 147 | - controller 148 | resources: 149 | - podsecuritypolicies 150 | verbs: 151 | - use 152 | --- 153 | apiVersion: rbac.authorization.k8s.io/v1 154 | kind: ClusterRole 155 | metadata: 156 | labels: 157 | app: metallb 158 | name: metallb-system:speaker 159 | rules: 160 | - apiGroups: 161 | - '' 162 | resources: 163 | - services 164 | - endpoints 165 | - nodes 166 | verbs: 167 | - get 168 | - list 169 | - watch 170 | - apiGroups: ["discovery.k8s.io"] 171 | resources: 172 | - endpointslices 173 | verbs: 174 | - get 175 | - list 176 | - watch 177 | - apiGroups: 178 | - '' 179 | resources: 180 | - events 181 | verbs: 182 | - create 183 | - patch 184 | - apiGroups: 185 | - policy 186 | resourceNames: 187 | - speaker 188 | resources: 189 | - podsecuritypolicies 190 | verbs: 191 | - use 192 | --- 193 | apiVersion: rbac.authorization.k8s.io/v1 194 | kind: Role 195 | metadata: 196 | labels: 197 | app: metallb 198 | name: config-watcher 199 | namespace: metallb-system 200 | rules: 201 | - apiGroups: 202 | - '' 203 | resources: 204 | - configmaps 205 | verbs: 206 | - get 207 | - list 208 | - watch 209 | --- 210 | apiVersion: rbac.authorization.k8s.io/v1 211 | kind: Role 212 | metadata: 213 | labels: 214 | app: metallb 215 | name: pod-lister 216 | namespace: metallb-system 217 | rules: 218 | - apiGroups: 219 | - '' 220 | resources: 221 | - pods 222 | verbs: 223 | - list 224 | --- 225 | apiVersion: rbac.authorization.k8s.io/v1 226 | kind: Role 227 | metadata: 228 | labels: 229 | app: metallb 230 | name: controller 231 | namespace: metallb-system 232 | rules: 233 | - apiGroups: 234 | - '' 235 | resources: 236 | - secrets 237 | verbs: 238 | - create 239 | - apiGroups: 240 | - '' 241 | resources: 242 | - secrets 243 | resourceNames: 244 | - memberlist 245 | verbs: 246 | - list 247 | - apiGroups: 248 | - apps 249 | resources: 250 | - deployments 251 | resourceNames: 252 | - controller 253 | verbs: 254 | - get 255 | --- 256 | apiVersion: rbac.authorization.k8s.io/v1 257 | kind: ClusterRoleBinding 258 | metadata: 259 | labels: 260 | app: metallb 261 | name: metallb-system:controller 262 | roleRef: 263 | apiGroup: rbac.authorization.k8s.io 264 | kind: ClusterRole 265 | name: metallb-system:controller 266 | subjects: 267 | - kind: ServiceAccount 268 | name: controller 269 | namespace: metallb-system 270 | --- 271 | apiVersion: rbac.authorization.k8s.io/v1 272 | kind: ClusterRoleBinding 273 | metadata: 274 | labels: 275 | app: metallb 276 | name: metallb-system:speaker 277 | roleRef: 278 | apiGroup: rbac.authorization.k8s.io 279 | kind: ClusterRole 280 | name: metallb-system:speaker 281 | subjects: 282 | - kind: ServiceAccount 283 | name: speaker 284 | namespace: metallb-system 285 | --- 286 | apiVersion: rbac.authorization.k8s.io/v1 287 | kind: RoleBinding 288 | metadata: 289 | labels: 290 | app: metallb 291 | name: config-watcher 292 | namespace: metallb-system 293 | roleRef: 294 | apiGroup: rbac.authorization.k8s.io 295 | kind: Role 296 | name: config-watcher 297 | subjects: 298 | - kind: ServiceAccount 299 | name: controller 300 | - kind: ServiceAccount 301 | name: speaker 302 | --- 303 | apiVersion: rbac.authorization.k8s.io/v1 304 | kind: RoleBinding 305 | metadata: 306 | labels: 307 | app: metallb 308 | name: pod-lister 309 | namespace: metallb-system 310 | roleRef: 311 | apiGroup: rbac.authorization.k8s.io 312 | kind: Role 313 | name: pod-lister 314 | subjects: 315 | - kind: ServiceAccount 316 | name: speaker 317 | --- 318 | apiVersion: rbac.authorization.k8s.io/v1 319 | kind: RoleBinding 320 | metadata: 321 | labels: 322 | app: metallb 323 | name: controller 324 | namespace: metallb-system 325 | roleRef: 326 | apiGroup: rbac.authorization.k8s.io 327 | kind: Role 328 | name: controller 329 | subjects: 330 | - kind: ServiceAccount 331 | name: controller 332 | --- 333 | apiVersion: apps/v1 334 | kind: DaemonSet 335 | metadata: 336 | labels: 337 | app: metallb 338 | component: speaker 339 | name: speaker 340 | namespace: metallb-system 341 | spec: 342 | selector: 343 | matchLabels: 344 | app: metallb 345 | component: speaker 346 | template: 347 | metadata: 348 | annotations: 349 | prometheus.io/port: '7472' 350 | prometheus.io/scrape: 'true' 351 | labels: 352 | app: metallb 353 | component: speaker 354 | spec: 355 | containers: 356 | - args: 357 | - --port=7472 358 | - --config=config 359 | - --log-level=info 360 | env: 361 | - name: METALLB_NODE_NAME 362 | valueFrom: 363 | fieldRef: 364 | fieldPath: spec.nodeName 365 | - name: METALLB_HOST 366 | valueFrom: 367 | fieldRef: 368 | fieldPath: status.hostIP 369 | - name: METALLB_ML_BIND_ADDR 370 | valueFrom: 371 | fieldRef: 372 | fieldPath: status.podIP 373 | # needed when another software is also using memberlist / port 7946 374 | # when changing this default you also need to update the container ports definition 375 | # and the PodSecurityPolicy hostPorts definition 376 | #- name: METALLB_ML_BIND_PORT 377 | # value: "7946" 378 | - name: METALLB_ML_LABELS 379 | value: "app=metallb,component=speaker" 380 | - name: METALLB_ML_SECRET_KEY 381 | valueFrom: 382 | secretKeyRef: 383 | name: memberlist 384 | key: secretkey 385 | image: quay.io/metallb/speaker:v0.12.1 386 | name: speaker 387 | ports: 388 | - containerPort: 7472 389 | name: monitoring 390 | - containerPort: 7946 391 | name: memberlist-tcp 392 | - containerPort: 7946 393 | name: memberlist-udp 394 | protocol: UDP 395 | livenessProbe: 396 | httpGet: 397 | path: /metrics 398 | port: monitoring 399 | initialDelaySeconds: 10 400 | periodSeconds: 10 401 | timeoutSeconds: 1 402 | successThreshold: 1 403 | failureThreshold: 3 404 | readinessProbe: 405 | httpGet: 406 | path: /metrics 407 | port: monitoring 408 | initialDelaySeconds: 10 409 | periodSeconds: 10 410 | timeoutSeconds: 1 411 | successThreshold: 1 412 | failureThreshold: 3 413 | securityContext: 414 | allowPrivilegeEscalation: false 415 | capabilities: 416 | add: 417 | - NET_RAW 418 | drop: 419 | - ALL 420 | readOnlyRootFilesystem: true 421 | hostNetwork: true 422 | nodeSelector: 423 | kubernetes.io/os: linux 424 | serviceAccountName: speaker 425 | terminationGracePeriodSeconds: 2 426 | tolerations: 427 | - effect: NoSchedule 428 | key: node-role.kubernetes.io/master 429 | operator: Exists 430 | --- 431 | apiVersion: apps/v1 432 | kind: Deployment 433 | metadata: 434 | labels: 435 | app: metallb 436 | component: controller 437 | name: controller 438 | namespace: metallb-system 439 | spec: 440 | revisionHistoryLimit: 3 441 | selector: 442 | matchLabels: 443 | app: metallb 444 | component: controller 445 | template: 446 | metadata: 447 | annotations: 448 | prometheus.io/port: '7472' 449 | prometheus.io/scrape: 'true' 450 | labels: 451 | app: metallb 452 | component: controller 453 | spec: 454 | containers: 455 | - args: 456 | - --port=7472 457 | - --config=config 458 | - --log-level=info 459 | env: 460 | - name: METALLB_ML_SECRET_NAME 461 | value: memberlist 462 | - name: METALLB_DEPLOYMENT 463 | value: controller 464 | image: quay.io/metallb/controller:v0.12.1 465 | name: controller 466 | ports: 467 | - containerPort: 7472 468 | name: monitoring 469 | livenessProbe: 470 | httpGet: 471 | path: /metrics 472 | port: monitoring 473 | initialDelaySeconds: 10 474 | periodSeconds: 10 475 | timeoutSeconds: 1 476 | successThreshold: 1 477 | failureThreshold: 3 478 | readinessProbe: 479 | httpGet: 480 | path: /metrics 481 | port: monitoring 482 | initialDelaySeconds: 10 483 | periodSeconds: 10 484 | timeoutSeconds: 1 485 | successThreshold: 1 486 | failureThreshold: 3 487 | securityContext: 488 | allowPrivilegeEscalation: false 489 | capabilities: 490 | drop: 491 | - all 492 | readOnlyRootFilesystem: true 493 | nodeSelector: 494 | kubernetes.io/os: linux 495 | securityContext: 496 | runAsNonRoot: true 497 | runAsUser: 65534 498 | fsGroup: 65534 499 | serviceAccountName: controller 500 | terminationGracePeriodSeconds: 0 -------------------------------------------------------------------------------- /core/manifests/pod-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: pod-example 5 | labels: 6 | app: nginx 7 | environment: prod 8 | spec: 9 | containers: 10 | - name: nginx 11 | image: nginx:stable-alpine 12 | ports: 13 | - containerPort: 80 14 | -------------------------------------------------------------------------------- /core/manifests/pod-multi-container-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: multi-container-example 5 | spec: 6 | containers: 7 | - name: nginx 8 | image: nginx:stable-alpine 9 | ports: 10 | - containerPort: 80 11 | volumeMounts: 12 | - name: html 13 | mountPath: /usr/share/nginx/html 14 | - name: content 15 | image: alpine:latest 16 | command: ["/bin/sh", "-c"] 17 | args: 18 | - while true; do 19 | echo $(date)"
" >> /html/index.html; 20 | sleep 5; 21 | done 22 | volumeMounts: 23 | - name: html 24 | mountPath: /html 25 | volumes: 26 | - name: html 27 | emptyDir: {} 28 | -------------------------------------------------------------------------------- /core/manifests/service-clusterip.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: clusterip 5 | spec: 6 | selector: 7 | app: nginx 8 | ports: 9 | - protocol: TCP 10 | port: 80 11 | targetPort: 80 12 | -------------------------------------------------------------------------------- /core/manifests/service-loadbalancer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: loadbalancer 5 | spec: 6 | type: LoadBalancer 7 | selector: 8 | app: nginx 9 | environment: prod 10 | ports: 11 | - protocol: TCP 12 | port: 80 13 | targetPort: 80 14 | -------------------------------------------------------------------------------- /core/manifests/service-nodeport.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nodeport 5 | spec: 6 | type: NodePort 7 | selector: 8 | app: nginx 9 | environment: prod 10 | ports: 11 | - nodePort: 32410 12 | protocol: TCP 13 | port: 80 14 | targetPort: 80 15 | -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # Examples 2 | 3 | The Examples here are fully functional applications with descriptions of their components and instructions on how they should be deployed. It is encouraged to fully explore all the manifests to gain a greater understanding of how these components work together. 4 | 5 | ## Applications 6 | 7 | * [JupyterHub](jupyterhub/README.md) - A multi-user science notebook server 8 | * [WordPress](wordpress/README.md) - Blog / CMS engine 9 | -------------------------------------------------------------------------------- /examples/jupyterhub/README.md: -------------------------------------------------------------------------------- 1 | # JupyterHub 2 | 3 | [JupyterHub][hub] is a multi-user hub that spawns, manages, and proxies to single-user instances of the 4 | [Jupyter notebook server][jupyter]. It is commonly used to serve notebooks to students, enterprise data scientists, 5 | or other scientific research groups. 6 | 7 | The JupyterHub Team has a project known as [Zero to JupyterHub][ztjh] that makes it easy to install and manage 8 | JupyterHub within a Kubernetes cluster. This project makes use of [Helm][helm], a package manager for Kubernetes. 9 | 10 | The example application deployment found in this repo is a stripped down version of the standard helm deployment; 11 | running in single-user mode. It is for demo and example purposes only, and should not be used in any production form. 12 | For that, see the documentation at the [Zero to JupyterHub][ztjh] site. 13 | 14 | ## How does it Work? 15 | The JupyterHub Kubernetes stack makes use of two major components: The hub which acts as a user notebook spawner and a 16 | dynamic proxy to redirect a user to their specific notebook instance. 17 | 18 | When a user logs in, a new Pod is provisioned to serve as their personal notebook server and a proxy rule is added 19 | automatically. Together they make for a fairly seamless Jupyter experience. 20 | 21 | --- 22 | 23 | ## Prereqs 24 | 25 | Create the service accounts and rbac policies with the below command. 26 | ``` 27 | $ kubectl create -f manifests/rbac.yaml 28 | ``` 29 | 30 | **NOTE:** RBAC is out of scope for the introductory tutorials, however they're required for both the Hub and Proxy to 31 | be able to communicate with the Kubernetes API. If you are interested at exploring RBAC, see the docs here: 32 | [Using RBAC Authorization][rbac] 33 | 34 | --- 35 | 36 | ## Installation 37 | 38 | 1. Create the 3 ConfigMaps: 39 | ``` 40 | $ kubectl create \ 41 | -f manifests/cm-hub-config.yaml \ 42 | -f manifests/cm-ingress.yaml \ 43 | -f manifests/cm-nginx.yaml 44 | ``` 45 | * **[cm-hub-config.yaml](manifests/cm-hub-config.yaml)** - Functions as the Config for JupyterHub and is mounted as a 46 | volume within the Hub Pod. 47 | * **[cm-ingress.yaml](manifests/cm-ingress.yaml)** - A placeholder empty config used by the nginx ingress controller 48 | container within the Proxy Pod. 49 | * **[cm-nginx.yaml](manifests/cm-nginx.yaml)** - Nginx specific configuration options. 50 | 51 | 2. Create the [secret](manifests/secret-hub.yaml) used by the Proxy to authenticate to the Hub. 52 | ``` 53 | $ kubectl create -f manifests/secret-hub.yaml 54 | ``` 55 | 56 | 3. Create the [PVC](manifests/pvc-hub.yaml) used by the Hub to store it's internal database. 57 | ``` 58 | $ kubectl create -f manifests/pvc-hub.yaml 59 | ``` 60 | 61 | 4. Now create the 4 services used by both the Hub and Proxy: 62 | ``` 63 | $ kubectl create \ 64 | -f manifests/svc-hub.yaml \ 65 | -f manifests/svc-proxy-api.yaml \ 66 | -f manifests/svc-proxy-http.yaml \ 67 | -f manifests/svc-proxy-public.yaml 68 | ``` 69 | 70 | * **[svc-hub.yaml](manifests/svc-hub.yaml)** - The internal ClusterIP service that targets the Hub server. 71 | * **[svc-proxy-api.yaml](manifests/svc-proxy-api.yaml)** - Internal ClusterIP service that points to the JupyterHub 72 | [Configurable HTTP Proxy (CHP)][chp-proxy] api within the Proxy Pod. 73 | 74 | * **[svc-proxy-http.yaml](manifests/svc-proxy-http.yaml)** - Internal ClusterIP service that points to CHP within the 75 | Proxy Pod, which in turn points to the Hub server. 76 | * **[svc-proxy-public.yaml](manifests/svc-proxy-public.yaml)** - External User facing NodePort Service that maps to 77 | nginx within the Proxy pod. This service will direct the User to the Hub server and the spawned User Notebooks. 78 | 79 | 5. With everything else provisioned, the two deployments for the Hub Server and Proxy may now be created. 80 | ``` 81 | $ kubectl create \ 82 | -f manifests/deploy-hub.yaml \ 83 | -f manifests/deploy-proxy.yaml 84 | ``` 85 | 86 | * **[deploy-hub.yaml](manifests/deploy-hub.yaml)** - Hub server deployment. 87 | * **[deploy-proxy.yaml](manifests/deploy-proxy.yaml)** - Proxy deployment. 88 | 89 | 6. Wait for the Pods to be up and running: 90 | ``` 91 | $ kubectl get pods --watch 92 | ``` 93 | **NOTE:** It is common for the Hub Server to restart at least once. 94 | 95 | 7. When ready, visit IP:Port output by the command below to access the proxy-public service in a browser and login to 96 | JupyterHub with the credentials: `admin/admin`. 97 | ``` 98 | $ echo $(docker inspect -f '{{.NetworkSettings.Networks.kind.IPAddress}}' kind-control-plane):32020 99 | ``` 100 | **NOTE:** It may take some time for the service to actually become available. Refresh it once or twice within 30 seconds. 101 | 102 | 8. Watch the Pods once again. 103 | ``` 104 | $ kubectl get pods --watch 105 | ``` 106 | There will be Pod spinning up with the name `jupyter-admin`. This is the dynamically provisioned notebook server being 107 | spun up. 108 | 109 | With that you should have a fully functional instance of the JupyterHub provisioned and available to explore. 110 | 111 | --- 112 | 113 | ## Clean Up 114 | 115 | ``` 116 | $ kubectl delete -f manifests/ 117 | $ kubectl delete pod jupyter-admin 118 | $ kubectl delete pvc claim-admin 119 | ``` 120 | 121 | [hub]: https://jupyterhub.readthedocs.io/en/latest/ 122 | [jupyter]: https://jupyter-notebook.readthedocs.io/en/latest/ 123 | [ztjh]: https://zero-to-jupyterhub.readthedocs.io/en/latest/ 124 | [helm]: https://www.helm.sh/ 125 | [rbac]: https://kubernetes.io/docs/admin/authorization/rbac/ 126 | [chp-proxy]: https://github.com/jupyterhub/configurable-http-proxy -------------------------------------------------------------------------------- /examples/jupyterhub/manifests/cm-hub-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: hub-config 5 | data: 6 | auth.admin.access: "true" 7 | auth.state.enabled: "false" 8 | auth.type: dummy 9 | cull.enabled: "true" 10 | cull.every: "600" 11 | cull.timeout: "3600" 12 | cull.users: "false" 13 | hub.base_url: / 14 | hub.concurrent-spawn-limit: "64" 15 | hub.db_url: sqlite:///jupyterhub.sqlite 16 | singleuser.cloud-metadata: | 17 | enabled: false 18 | ip: 169.254.169.254 19 | singleuser.cmd: jupyterhub-singleuser 20 | singleuser.fs-gid: "1000" 21 | singleuser.memory.guarantee: 100M 22 | singleuser.memory.limit: 150M 23 | singleuser.network-tools.image.name: jupyterhub/k8s-network-tools 24 | singleuser.network-tools.image.tag: v0.6 25 | singleuser.node-selector: '{}' 26 | singleuser.start-timeout: "300" 27 | singleuser.storage.capacity: 10Gi 28 | singleuser.storage.extra-volume-mounts: '[]' 29 | singleuser.storage.extra-volumes: '[]' 30 | singleuser.storage.home_mount_path: /home/jovyan 31 | singleuser.storage.type: dynamic 32 | singleuser.uid: "1000" 33 | token.cookie_secret: "" -------------------------------------------------------------------------------- /examples/jupyterhub/manifests/cm-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: ingress-controller-leader-jupyterhub-proxy -------------------------------------------------------------------------------- /examples/jupyterhub/manifests/cm-nginx.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: nginx-proxy-config 5 | data: 6 | proxy-body-size: 64m -------------------------------------------------------------------------------- /examples/jupyterhub/manifests/deploy-hub.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: hub 5 | labels: 6 | app: jupyterhub 7 | component: hub 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: jupyterhub 12 | component: hub 13 | template: 14 | metadata: 15 | labels: 16 | app: jupyterhub 17 | component: hub 18 | spec: 19 | containers: 20 | - name: hub-container 21 | image: jupyterhub/k8s-hub:v0.6 22 | command: [ "jupyterhub", "--config", "/srv/jupyterhub_config.py", "--upgrade-db" ] 23 | env: 24 | - name: SINGLEUSER_IMAGE 25 | value: jupyterhub/k8s-singleuser-sample:v0.6 26 | - name: POD_NAMESPACE 27 | valueFrom: 28 | fieldRef: 29 | apiVersion: v1 30 | fieldPath: metadata.namespace 31 | - name: CONFIGPROXY_AUTH_TOKEN 32 | valueFrom: 33 | secretKeyRef: 34 | key: proxy.token 35 | name: hub-secret 36 | ports: 37 | - name: hub 38 | containerPort: 8081 39 | protocol: TCP 40 | resources: 41 | requests: 42 | cpu: 200m 43 | memory: 100M 44 | volumeMounts: 45 | - name: config 46 | mountPath: /etc/jupyterhub/config/ 47 | - name: secret 48 | mountPath: /etc/jupyterhub/secret/ 49 | - name: hub-db-dir 50 | mountPath: /srv/jupyterhub 51 | securityContext: 52 | fsGroup: 1000 53 | runAsUser: 1000 54 | serviceAccount: hub 55 | serviceAccountName: hub 56 | volumes: 57 | - name: config 58 | configMap: 59 | name: hub-config 60 | defaultMode: 420 61 | - name: secret 62 | secret: 63 | secretName: hub-secret 64 | defaultMode: 420 65 | - name: hub-db-dir 66 | persistentVolumeClaim: 67 | claimName: hub-db-dir -------------------------------------------------------------------------------- /examples/jupyterhub/manifests/deploy-proxy.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: proxy 5 | labels: 6 | app: jupyterhub 7 | component: proxy 8 | spec: 9 | selector: 10 | matchLabels: 11 | app: jupyterhub 12 | component: proxy 13 | template: 14 | metadata: 15 | labels: 16 | app: jupyterhub 17 | component: proxy 18 | spec: 19 | containers: 20 | - name: nginx 21 | image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.9.0 22 | args: 23 | - /nginx-ingress-controller 24 | - --default-backend-service=default/proxy-http 25 | - --configmap=default/nginx-proxy-config 26 | - --ingress-class=jupyterhub-proxy 27 | - --watch-namespace=default 28 | env: 29 | - name: POD_NAME 30 | valueFrom: 31 | fieldRef: 32 | apiVersion: v1 33 | fieldPath: metadata.name 34 | - name: POD_NAMESPACE 35 | valueFrom: 36 | fieldRef: 37 | apiVersion: v1 38 | fieldPath: metadata.namespace 39 | ports: 40 | - name: http 41 | containerPort: 80 42 | protocol: TCP 43 | - name: chp 44 | image: jupyterhub/configurable-http-proxy:3.0.0 45 | command: 46 | - configurable-http-proxy 47 | - --ip=0.0.0.0 48 | - --port=8000 49 | - --api-ip=0.0.0.0 50 | - --api-port=8001 51 | - --default-target=http://$(HUB_SERVICE_HOST):$(HUB_SERVICE_PORT) 52 | - --error-target=http://$(HUB_SERVICE_HOST):$(HUB_SERVICE_PORT) 53 | - --log-level=debug 54 | env: 55 | - name: CONFIGPROXY_AUTH_TOKEN 56 | valueFrom: 57 | secretKeyRef: 58 | key: proxy.token 59 | name: hub-secret 60 | ports: 61 | - name: proxy-public 62 | containerPort: 8000 63 | protocol: TCP 64 | - name: api 65 | containerPort: 8001 66 | protocol: TCP 67 | resources: 68 | requests: 69 | cpu: 200m 70 | memory: 100M 71 | serviceAccount: proxy 72 | serviceAccountName: proxy -------------------------------------------------------------------------------- /examples/jupyterhub/manifests/pvc-hub.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: hub-db-dir 5 | spec: 6 | accessModes: 7 | - ReadWriteOnce 8 | resources: 9 | requests: 10 | storage: 1Gi 11 | storageClassName: standard 12 | -------------------------------------------------------------------------------- /examples/jupyterhub/manifests/rbac.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: nginx 5 | rules: 6 | - apiGroups: 7 | - "" 8 | resources: 9 | - configmaps 10 | - endpoints 11 | - nodes 12 | - pods 13 | - secrets 14 | verbs: 15 | - list 16 | - watch 17 | - apiGroups: 18 | - "" 19 | resourceNames: 20 | - default 21 | resources: 22 | - namespaces 23 | verbs: 24 | - get 25 | - apiGroups: 26 | - "" 27 | resources: 28 | - nodes 29 | verbs: 30 | - get 31 | - apiGroups: 32 | - "" 33 | resources: 34 | - services 35 | verbs: 36 | - get 37 | - list 38 | - update 39 | - watch 40 | - apiGroups: 41 | - extensions 42 | resources: 43 | - ingresses 44 | verbs: 45 | - get 46 | - list 47 | - watch 48 | - apiGroups: 49 | - "" 50 | resources: 51 | - events 52 | verbs: 53 | - create 54 | - patch 55 | - apiGroups: 56 | - extensions 57 | resources: 58 | - ingresses/status 59 | verbs: 60 | - update 61 | --- 62 | apiVersion: rbac.authorization.k8s.io/v1 63 | kind: Role 64 | metadata: 65 | name: hub 66 | rules: 67 | - apiGroups: 68 | - "" 69 | resources: 70 | - pods 71 | - persistentvolumeclaims 72 | verbs: 73 | - get 74 | - watch 75 | - list 76 | - create 77 | - delete 78 | --- 79 | apiVersion: rbac.authorization.k8s.io/v1 80 | kind: Role 81 | metadata: 82 | name: nginx 83 | rules: 84 | - apiGroups: 85 | - "" 86 | resources: 87 | - configmaps 88 | - namespaces 89 | - pods 90 | - secrets 91 | verbs: 92 | - get 93 | - apiGroups: 94 | - "" 95 | resourceNames: 96 | - ingress-controller-leader-jupyterhub-proxy-tls 97 | resources: 98 | - configmaps 99 | verbs: 100 | - get 101 | - update 102 | - apiGroups: 103 | - "" 104 | resources: 105 | - configmaps 106 | verbs: 107 | - create 108 | - apiGroups: 109 | - "" 110 | resources: 111 | - endpoints 112 | verbs: 113 | - create 114 | - get 115 | - update 116 | --- 117 | apiVersion: rbac.authorization.k8s.io/v1 118 | kind: ClusterRoleBinding 119 | metadata: 120 | name: nginx 121 | roleRef: 122 | apiGroup: rbac.authorization.k8s.io 123 | kind: ClusterRole 124 | name: nginx 125 | subjects: 126 | - kind: ServiceAccount 127 | name: proxy 128 | namespace: default 129 | --- 130 | apiVersion: rbac.authorization.k8s.io/v1 131 | kind: RoleBinding 132 | metadata: 133 | name: hub 134 | roleRef: 135 | apiGroup: rbac.authorization.k8s.io 136 | kind: Role 137 | name: hub 138 | subjects: 139 | - kind: ServiceAccount 140 | name: hub 141 | --- 142 | apiVersion: rbac.authorization.k8s.io/v1 143 | kind: RoleBinding 144 | metadata: 145 | name: nginx 146 | roleRef: 147 | apiGroup: rbac.authorization.k8s.io 148 | kind: Role 149 | name: nginx 150 | subjects: 151 | - kind: ServiceAccount 152 | name: proxy 153 | --- 154 | apiVersion: v1 155 | kind: ServiceAccount 156 | metadata: 157 | name: hub 158 | --- 159 | apiVersion: v1 160 | kind: ServiceAccount 161 | metadata: 162 | name: proxy 163 | -------------------------------------------------------------------------------- /examples/jupyterhub/manifests/secret-hub.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: hub-secret 5 | type: Opaque 6 | data: 7 | proxy.token: YjEwYTA4YmE3ZTFkMTg5NjE4NTEyMjJkN2Q3ZTE3YmUyYTVkNzA0ZmJkYTc1MzY0N2RhNmQ2YTE2MjJhMzU3ZQ== 8 | 9 | -------------------------------------------------------------------------------- /examples/jupyterhub/manifests/svc-hub.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: hub 5 | spec: 6 | type: ClusterIP 7 | selector: 8 | app: jupyterhub 9 | component: hub 10 | ports: 11 | - port: 8081 12 | protocol: TCP 13 | targetPort: 8081 14 | 15 | -------------------------------------------------------------------------------- /examples/jupyterhub/manifests/svc-proxy-api.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: proxy-api 5 | spec: 6 | type: ClusterIP 7 | selector: 8 | app: jupyterhub 9 | component: proxy 10 | ports: 11 | - port: 8001 12 | protocol: TCP 13 | targetPort: 8001 -------------------------------------------------------------------------------- /examples/jupyterhub/manifests/svc-proxy-http.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: proxy-http 5 | spec: 6 | type: ClusterIP 7 | selector: 8 | app: jupyterhub 9 | component: proxy 10 | ports: 11 | - port: 8000 12 | protocol: TCP 13 | targetPort: 8000 -------------------------------------------------------------------------------- /examples/jupyterhub/manifests/svc-proxy-public.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: proxy-public 5 | spec: 6 | type: NodePort 7 | selector: 8 | app: jupyterhub 9 | component: proxy 10 | ports: 11 | - name: http 12 | nodePort: 32020 13 | port: 80 14 | protocol: TCP 15 | targetPort: 80 -------------------------------------------------------------------------------- /examples/wordpress/README.md: -------------------------------------------------------------------------------- 1 | # WordPress 2 | 3 | [WordPress][wordpress] is a commonly used Blog and CMS engine that serves as an excellent introduction to a multi-tier 4 | application. 5 | 6 | The WordPress example has two major components: A MySQL database to serve as the backing datastore and the WordPress 7 | container itself that combines the Apache webserver along with PHP and the needed application dependencies to run an 8 | instance of the blog engine. 9 | 10 | The manifests used in this example function as the bare minimum to provision an instance and should not be used in a 11 | production deployment. For a more production ready deployment, see the [WordPress Helm Chart][wordpress-chart]. 12 | 13 | --- 14 | 15 | ## Installation 16 | 17 | 1. Create the Secret used for the MySQL root account: 18 | ``` 19 | $ kubectl create -f manifests/secret-mysql.yaml 20 | ``` 21 | 22 | * **[manifests/secret-mysql.yaml](manifests/secret-mysql.yaml)** - Contains a base64 encoded string to serve as the 23 | MySQL Database password. 24 | 25 | 26 | 2. Create the MySQL [StatefulSet](manifests/sts-mysql.yaml) and its associated [service](manifests/svc-mysql.yaml). 27 | ``` 28 | $ kubectl create \ 29 | -f manifests/sts-mysql.yaml \ 30 | -f manifests/svc-mysql.yaml 31 | ``` 32 | 33 | * **[manifests/sts-mysql.yaml](manifests/sts-mysql.yaml)** - MySQL StatefulSet. 34 | * **[manifests/svc-mysql.yaml](manifests/svc-mysql.yaml)** - Associated MySQL Service. 35 | 36 | **NOTE:** The MySQL StatefulSet does not require a PVC to be created ahead of time for its storage. Instead, it uses 37 | the `volumeClaimTemplates` StatefulSet feature in combination with the default StorageClass provided by Kind to 38 | dynamically provision a volume. 39 | 40 | 3. Wait for the Pod to be up and running: 41 | ``` 42 | $ kubectl get pods --watch 43 | ``` 44 | 45 | 3. With MySQL up and running, WordPress can now be provisioned. Start by Creating the 46 | [PVC](manifests/pvc-wordpress.yaml) used to store WordPress's internal data. 47 | ``` 48 | $ kubectl create -f manifests/pvc-wordpress.yaml 49 | ``` 50 | * **[manifests/pvc-wordpress.yaml](manifests/pvc-wordpress.yaml)** - The Persistent Volume Claim used for the WordPress 51 | pod's own internal storage. 52 | 53 | 4. Now create the WordPress deployment and its associated Service. 54 | ``` 55 | $ kubectl create \ 56 | -f manifests/dep-wordpress.yaml \ 57 | -f manifests/svc-wordpress.yaml 58 | ``` 59 | 60 | * **[manifests/dep-wordpress.yaml](manifests/dep-wordpress.yaml)** - WordPress deployment. The MySQL password is read 61 | from the secret and passed to MySQL as an environment variable. 62 | * **[manifests/svc-wordpress.yaml](manifests/svc-wordpress.yaml)** - WordPress NodePort service 63 | 64 | 5. Wait for the Pods to be up and running: 65 | ``` 66 | $ kubectl get pods --watch 67 | ``` 68 | 69 | 6. With both MySQL and WordPress up and running, visit the IP:Port combo from the command below in a browser to access 70 | the WordPress deployment. 71 | ``` 72 | $ echo $(docker inspect -f '{{.NetworkSettings.Networks.kind.IPAddress}}' kind-control-plane):80 73 | ``` 74 | 75 | At this point, you should see the WordPress default installation and configuration page. You can configure it and 76 | give it a go! 77 | 78 | --- 79 | 80 | ## Clean Up 81 | 82 | ``` 83 | $ kubectl delete -f manifests/ 84 | $ kubectl delete pvc mysql-data-mysql-0 85 | ``` 86 | 87 | [wordpress]: https://wordpress.org/ 88 | [wordpress-chart]: https://github.com/helm/charts/tree/master/stable/wordpress -------------------------------------------------------------------------------- /examples/wordpress/manifests/dep-wordpress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: wordpress 5 | labels: 6 | app: wordpress 7 | component: wordpress 8 | spec: 9 | replicas: 2 10 | selector: 11 | matchLabels: 12 | app: wordpress 13 | component: wordpress 14 | strategy: 15 | type: Recreate 16 | template: 17 | metadata: 18 | labels: 19 | app: wordpress 20 | component: wordpress 21 | spec: 22 | containers: 23 | - image: wordpress:4.9-apache 24 | name: wordpress 25 | env: 26 | - name: WORDPRESS_DB_HOST 27 | value: mysql 28 | - name: WORDPRESS_DB_PASSWORD 29 | valueFrom: 30 | secretKeyRef: 31 | name: mysql 32 | key: password 33 | ports: 34 | - containerPort: 80 35 | name: wordpress 36 | volumeMounts: 37 | - name: wordpress 38 | mountPath: /var/www/html 39 | volumes: 40 | - name: wordpress 41 | persistentVolumeClaim: 42 | claimName: wordpress 43 | -------------------------------------------------------------------------------- /examples/wordpress/manifests/pvc-wordpress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: wordpress 5 | labels: 6 | app: wordpress 7 | component: wordpress 8 | spec: 9 | accessModes: 10 | - ReadWriteMany 11 | resources: 12 | requests: 13 | storage: 1Gi -------------------------------------------------------------------------------- /examples/wordpress/manifests/secret-mysql.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: mysql 5 | labels: 6 | app: wordpress 7 | component: mysql 8 | type: Opaque 9 | data: 10 | password: c3VwZXJzZWNyZXRwYXNzd29yZA== 11 | # supersecretpassword -------------------------------------------------------------------------------- /examples/wordpress/manifests/sts-mysql.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: mysql 5 | labels: 6 | app: wordpress 7 | component: mysql 8 | spec: 9 | replicas: 1 10 | selector: 11 | matchLabels: 12 | app: wordpress 13 | component: mysql 14 | serviceName: mysql 15 | template: 16 | metadata: 17 | labels: 18 | app: wordpress 19 | component: mysql 20 | spec: 21 | containers: 22 | - image: mysql:5.6 23 | name: mysql 24 | env: 25 | - name: MYSQL_ROOT_PASSWORD 26 | valueFrom: 27 | secretKeyRef: 28 | name: mysql 29 | key: password 30 | ports: 31 | - containerPort: 3306 32 | name: mysql 33 | volumeMounts: 34 | - name: mysql-data 35 | mountPath: /var/lib/mysql 36 | volumeClaimTemplates: 37 | - metadata: 38 | name: mysql-data 39 | spec: 40 | accessModes: [ "ReadWriteOnce" ] 41 | storageClassName: standard 42 | resources: 43 | requests: 44 | storage: 1Gi -------------------------------------------------------------------------------- /examples/wordpress/manifests/svc-mysql.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: mysql 5 | labels: 6 | app: wordpress 7 | component: mysql 8 | spec: 9 | ports: 10 | - port: 3306 11 | selector: 12 | app: wordpress 13 | component: mysql 14 | clusterIP: None -------------------------------------------------------------------------------- /examples/wordpress/manifests/svc-wordpress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: wordpress 5 | labels: 6 | app: wordpress 7 | component: wordpress 8 | spec: 9 | ports: 10 | - port: 80 11 | selector: 12 | app: wordpress 13 | component: wordpress 14 | type: NodePort -------------------------------------------------------------------------------- /storage/README.md: -------------------------------------------------------------------------------- 1 | # Storage 2 | 3 | Pods by themselves are useful, but many workloads require exchanging data between containers, or persisting some 4 | form of data. 5 | 6 | For this task we have Volumes, Persistent Volumes, Persistent Volume Claims, and Storage Classes. 7 | 8 | # Index 9 | 10 | * [Before you Begin](#before-you-begin) 11 | * [Volumes](#volumes) 12 | * [Exercise: Using Volumes with Pods](#exercise-using-volumes-with-pods) 13 | * [Persistent Volumes and Claims](#persistent-volumes-and-claims) 14 | * [Exercise: Understanding Persistent Volumes and Claims](#exercise-understanding-persistent-volumes-and-claims) 15 | * [Exercise: Using PersistentVolumeClaims](#exercise-using-persistentvolumeclaims) 16 | - [Storage](#storage) 17 | - [Index](#index) 18 | - [Before you Begin](#before-you-begin) 19 | - [Volumes](#volumes) 20 | - [Exercise: Using Volumes with Pods](#exercise--using-volumes-with-pods) 21 | - [Persistent Volumes and Claims](#persistent-volumes-and-claims) 22 | - [Exercise: Understanding Persistent Volumes and Claims](#exercise--understanding-persistent-volumes-and-claims) 23 | - [Exercise: Using PersistentVolumeClaims](#exercise--using-persistentvolumeclaims) 24 | - [Storage Classes](#storage-classes) 25 | - [Exercise: Exploring StorageClasses](#exercise--exploring-storageclasses) 26 | - [Helpful Resources](#helpful-resources) 27 | 28 | --- 29 | 30 | # Before you Begin 31 | 32 | Kind comes with a default storage class provisioner that can get in the way when trying to explore how storage 33 | is used within a Kubernetes cluster. For these exercises, it should be disabled. 34 | 35 | ``` 36 | $ kubectl annotate --overwrite sc standard storageclass.kubernetes.io/is-default-class="false" 37 | ``` 38 | 39 | 40 | When done, re-enabling the default-storageclass will automatically turn it back on. 41 | ``` 42 | $ kubectl annotate --overwrite sc standard storageclass.kubernetes.io/is-default-class="true" 43 | ``` 44 | 45 | --- 46 | 47 | # Volumes 48 | Volumes within Kubernetes are storage that is tied to the Pod’s lifecycle. 49 | 50 | A pod can have one or more type of volumes attached to it. 51 | These volumes are consumable by any of the containers within the pod. 52 | 53 | They can survive Pod restarts; however their durability beyond that is dependent on the Volume Type. 54 | 55 | --- 56 | 57 | ### Exercise: Using Volumes with Pods 58 | **Objective:** Understand how to add and reference volumes to a Pod and their containers. 59 | 60 | --- 61 | 62 | 1) Create a Pod with from the manifest `manifests/volume-example.yaml` or the yaml below. 63 | 64 | **manifests/volume-example.yaml** 65 | ```yaml 66 | apiVersion: v1 67 | kind: Pod 68 | metadata: 69 | name: volume-example 70 | spec: 71 | containers: 72 | - name: nginx 73 | image: nginx:stable-alpine 74 | ports: 75 | - containerPort: 80 76 | volumeMounts: 77 | - name: html 78 | mountPath: /usr/share/nginx/html 79 | readOnly: true 80 | - name: content 81 | image: alpine:latest 82 | volumeMounts: 83 | - name: html 84 | mountPath: /html 85 | command: ["/bin/sh", "-c"] 86 | args: 87 | - while true; do 88 | echo $(date)"
" >> /html/index.html; 89 | sleep 5; 90 | done 91 | volumes: 92 | - name: html 93 | emptyDir: {} 94 | ``` 95 | 96 | **Command** 97 | ``` 98 | $ kubectl create -f manifests/volume-example.yaml 99 | ``` 100 | 101 | Note the relationship between `volumes` in the Pod spec, and the `volumeMounts` directive in each container. 102 | 103 | 2) Exec into `content` container within the `volume-example` Pod, and `cat` the `html/index.html` file. 104 | ``` 105 | $ kubectl exec volume-example -c content -- /bin/sh -c "cat /html/index.html" 106 | ``` 107 | You should see a list of date time-stamps. This is generated by the script being used as the entrypoint (`args`) of the 108 | content container. 109 | 110 | 3) Now do the same within the `nginx` container, using `cat` to see the content of `/usr/share/nginx/html/index.html` 111 | example. 112 | ``` 113 | $ kubectl exec volume-example -c nginx -- /bin/sh -c "cat /usr/share/nginx/html/index.html" 114 | ``` 115 | You should see the same file. 116 | 117 | 4) Now try to append "nginx" to `index.html` from the `nginx` container. 118 | ``` 119 | $ kubectl exec volume-example -c nginx -- /bin/sh -c "echo nginx >> /usr/share/nginx/html/index.html" 120 | ``` 121 | It should error out and complain about the file being read only. The `nginx` container has no reason to write to the 122 | file, and mounts the same Volume as read-only. Writing to the file is handled by the `content` container. 123 | 124 | --- 125 | 126 | **Summary:** Pods may have multiple volumes using different Volume types. Those volumes in turn can be mounted to one 127 | or more containers within the Pod by adding them to the `volumeMounts` list. This is done by referencing their name and 128 | supplying their `mountPath`. Additionally, volumes may be mounted both read-write or read-only depending on the 129 | application, enabling a variety of use-cases. 130 | 131 | --- 132 | 133 | **Clean Up Command** 134 | ``` 135 | kubectl delete pod volume-example 136 | ``` 137 | 138 | --- 139 | 140 | [Back to Index](#index) 141 | 142 | --- 143 | --- 144 | 145 | # Persistent Volumes and Claims 146 | 147 | Persistent Volumes and Claims work in conjunction to serve as the direct method in which a Pod Consumes Persistent 148 | storage. 149 | 150 | A `PersistentVolume` (PV) is a representation of a cluster-wide storage resource that is linked to a backing storage 151 | provider - `NFS`, `GCEPersistentDisk`, `RBD` etc. 152 | 153 | A `PersistentVolumeClaim` acts as a namespaced _request_ for storage that satisfies a set of a requirements instead 154 | of mapping to the storage resource directly. 155 | 156 | This separation of PV and PVC ensures that an application’s _‘claim’_ for storage is portable across numerous backends 157 | or providers. 158 | 159 | --- 160 | 161 | ### Exercise: Understanding Persistent Volumes and Claims 162 | **Objective:** Gain an understanding of the relationship between Persistent Volumes, Persistent Volume Claims, and 163 | the multiple ways they may be selected. 164 | 165 | --- 166 | 167 | 1) Create PV `pv-sc-example` from the manifest `manifests/pv-sc-example.yaml` or use the yaml below. Ensure to note 168 | that its labeled with `type=hostpath`, its Storage Class Name is set to `mypvsc`, and uses `Delete` for the Reclaim 169 | Policy. 170 | 171 | **manifests/pv-sc-example.yaml** 172 | ```yaml 173 | kind: PersistentVolume 174 | apiVersion: v1 175 | metadata: 176 | name: pv-sc-example 177 | labels: 178 | type: hostpath 179 | spec: 180 | capacity: 181 | storage: 2Gi 182 | accessModes: 183 | - ReadWriteMany 184 | persistentVolumeReclaimPolicy: Delete 185 | storageClassName: mypvsc 186 | hostPath: 187 | type: DirectoryOrCreate 188 | path: "/data/mypvsc" 189 | ``` 190 | 191 | **Command** 192 | ``` 193 | $ kubectl create -f manifests/pv-sc-example.yaml 194 | ``` 195 | 196 | 2) Once created, list the available Persistent Volumes. 197 | ``` 198 | $ kubectl get pv 199 | ``` 200 | You should see the single PV `pv-sc-example` flagged with the status `Available`. Meaning no claim has been issued 201 | that targets it. 202 | 203 | 3) Create PVC `pvc-selector-example` from the manifest `manifests/pvc-selector-example.yaml` or the yaml below. 204 | 205 | **manifests/pvc-selector-example.yaml** 206 | ```yaml 207 | kind: PersistentVolumeClaim 208 | apiVersion: v1 209 | metadata: 210 | name: pvc-selector-example 211 | spec: 212 | accessModes: 213 | - ReadWriteMany 214 | resources: 215 | requests: 216 | storage: 1Gi 217 | selector: 218 | matchLabels: 219 | type: hostpath 220 | ``` 221 | 222 | **Command** 223 | ``` 224 | $ kubectl create -f manifests/pvc-selector-example.yaml 225 | ``` 226 | 227 | Note that the selector targets `type=hostpath`. 228 | 229 | 4) Then describe the newly created PVC 230 | ``` 231 | $ kubectl describe pvc pvc-selector-example 232 | ``` 233 | The pvc `pvc-selector-example` should be in a `Pending` state with the Error Event `FailedBinding` and 234 | `no Persistent Volumes available for this claim and no storage class is set`. If a PV is given a `storageClassName`, 235 | **ONLY** PVCs that request that Storage Class may use it, even if the selector has a valid target. 236 | 237 | 5) Now create the PV `pv-selector-example` from the manifest `manifests/pv-selector-example.yaml` or the yaml below. 238 | 239 | **manifests/pv-selector-example.yaml** 240 | ```yaml 241 | kind: PersistentVolume 242 | apiVersion: v1 243 | metadata: 244 | name: pv-selector-example 245 | labels: 246 | type: hostpath 247 | spec: 248 | capacity: 249 | storage: 2Gi 250 | accessModes: 251 | - ReadWriteMany 252 | hostPath: 253 | type: DirectoryOrCreate 254 | path: "/data/mypvselector" 255 | ``` 256 | 257 | **Command** 258 | ``` 259 | $ kubectl create -f manifests/pv-selector-example.yaml 260 | ``` 261 | 262 | 6) Give it a few moments and then look at the Persistent Volumes once again. 263 | ``` 264 | $ kubectl get pv 265 | ``` 266 | The PV `pv-selector-example` should now be in a `Bound` state, meaning that a PVC has been mapped or _"bound"_ to it. 267 | Once bound, **NO** other PVCs may make a claim against the PV. 268 | 269 | 7) Create the pvc `pvc-sc-example` from the manifest `manifests/pvc-sc-example.yaml` or use the yaml below. 270 | 271 | **manifests/pvc-sc-example.yaml** 272 | ```yaml 273 | kind: PersistentVolumeClaim 274 | apiVersion: v1 275 | metadata: 276 | name: pvc-sc-example 277 | spec: 278 | accessModes: 279 | - ReadWriteMany 280 | storageClassName: mypvsc 281 | resources: 282 | requests: 283 | storage: 1Gi 284 | ``` 285 | 286 | **Command** 287 | ``` 288 | $ kubectl create -f manifests/pvc-sc-example.yaml 289 | ``` 290 | 291 | Note that this PVC has a `storageClassName` reference and no selector. 292 | 293 | 8) Give it a few seconds and then view the current PVCs. 294 | ``` 295 | $ kubectl get pvc 296 | ``` 297 | The `pvc-sc-example` should be bound to the `pv-sc-example` Volume. It consumed the PV with the corresponding 298 | `storageClassName`. 299 | 300 | 9) Delete both PVCs. 301 | ``` 302 | $ kubectl delete pvc pvc-sc-example pvc-selector-example 303 | ``` 304 | 305 | 10) Then list the PVs once again. 306 | ``` 307 | $ kubectl get pv 308 | ```` 309 | The `pv-sc-example` will not be listed. This is because it was created with a `persistentVolumeReclaimPolicy` 310 | of `Delete` meaning that as soon as the PVC was deleted, the PV itself was deleted. 311 | 312 | PV `pv-selector-example`, was created without specifying a `persistentVolumeReclaimPolicy` and was in turn created 313 | with the default for PVs: `Retain`. It's state of `Released` means that it's associated PVC has been deleted. 314 | In this state no other PVC's may claim it, even if `pvc-selector-example` was created again. The PV must **manually** 315 | be reclaimed or deleted. This ensures the preservation of the state of the Volume in the event that its PVC was 316 | accidentally deleted giving an administrator time to do something with the data before reclaiming it. 317 | 318 | 11) Delete the PV `pv-selector-example`. 319 | ``` 320 | $ kubectl delete pv pv-selector-example 321 | ``` 322 | 323 | --- 324 | 325 | **Summary:** Persistent Volumes and Persistent Volume Claims when bound together provide the primary method of 326 | attaching durable storage to Pods. Claims may reference PVs by specifying a `storageClassName`, targeting them 327 | with a selector, or a combination of both. Once a PV is bound to a PVC, it becomes a tightly coupled relationship and 328 | no further PVCs may issue a claim against the PV, even if the binding PVC is deleted. How PVs are reclaimed is 329 | configured via the PV attribute `persistentVolumeReclaimPolicy` where they can either be deleted automatically when 330 | set to `Delete` or require manual intervention when set to `Retain` as a data-preservation safe-guard. 331 | 332 | --- 333 | 334 | ### Exercise: Using PersistentVolumeClaims 335 | **Objective:** Learn how to consume a Persistent Volume Claim within a Pod, and explore some of the ways they may 336 | be used. 337 | 338 | --- 339 | 340 | 1) Create PV and associated PVC `html` using the manifest `manifests/html-vol.yaml` 341 | 342 | **manifest/html-vol.yaml** 343 | ```yaml 344 | kind: PersistentVolume 345 | apiVersion: v1 346 | metadata: 347 | name: html 348 | labels: 349 | type: hostpath 350 | spec: 351 | capacity: 352 | storage: 1Gi 353 | accessModes: 354 | - ReadWriteMany 355 | storageClassName: html 356 | persistentVolumeReclaimPolicy: Delete 357 | hostPath: 358 | type: DirectoryOrCreate 359 | path: "/tmp/html" 360 | 361 | --- 362 | 363 | kind: PersistentVolumeClaim 364 | apiVersion: v1 365 | metadata: 366 | name: html 367 | spec: 368 | accessModes: 369 | - ReadWriteMany 370 | storageClassName: html 371 | resources: 372 | requests: 373 | storage: 1Gi 374 | ``` 375 | 376 | **Command** 377 | ``` 378 | $ kubectl create -f manifests/html-vol.yaml 379 | ``` 380 | 381 | 2) Create Deployment `writer` from the manifest `manifests/writer.yaml` or use the yaml below. It is similar to the 382 | [`volume-example` Pod from the first exercise](#exercise-using-volumes-with-pods), but now uses a 383 | `persistentVolumeClaim` Volume instead of an `emptyDir`. 384 | 385 | **manifests/writer.yaml** 386 | ```yaml 387 | apiVersion: apps/v1 388 | kind: Deployment 389 | metadata: 390 | name: writer 391 | spec: 392 | replicas: 1 393 | selector: 394 | matchLabels: 395 | app: writer 396 | template: 397 | metadata: 398 | labels: 399 | app: writer 400 | spec: 401 | containers: 402 | - name: content 403 | image: alpine:latest 404 | volumeMounts: 405 | - name: html 406 | mountPath: /html 407 | command: ["/bin/sh", "-c"] 408 | args: 409 | - while true; do 410 | date >> /html/index.html; 411 | sleep 5; 412 | done 413 | volumes: 414 | - name: html 415 | persistentVolumeClaim: 416 | claimName: html 417 | ``` 418 | 419 | **Command** 420 | ``` 421 | $ kubectl create -f manifests/writer.yaml 422 | ``` 423 | 424 | Note that the `claimName` references the previously created PVC defined in the `html-vol` manifest. 425 | 426 | 2) Create a Deployment and Service `reader` from the manifest `manifests/reader.yaml` or use the yaml below. 427 | 428 | **manifests/reader.yaml** 429 | ```yaml 430 | apiVersion: apps/v1 431 | kind: Deployment 432 | metadata: 433 | name: reader 434 | spec: 435 | replicas: 3 436 | selector: 437 | matchLabels: 438 | app: reader 439 | template: 440 | metadata: 441 | labels: 442 | app: reader 443 | spec: 444 | containers: 445 | - name: nginx 446 | image: nginx:stable-alpine 447 | ports: 448 | - containerPort: 80 449 | volumeMounts: 450 | - name: html 451 | mountPath: /usr/share/nginx/html 452 | readOnly: true 453 | volumes: 454 | - name: html 455 | persistentVolumeClaim: 456 | claimName: html 457 | 458 | --- 459 | 460 | apiVersion: v1 461 | kind: Service 462 | metadata: 463 | name: reader 464 | spec: 465 | selector: 466 | app: reader 467 | ports: 468 | - protocol: TCP 469 | port: 80 470 | targetPort: 80 471 | ``` 472 | 473 | **Command** 474 | ``` 475 | $ kubectl create -f manifests/reader.yaml 476 | ``` 477 | 478 | 479 | 3) With the `reader` Deployment and Service created, use `kubectl proxy` to view the `reader` Service. 480 | ``` 481 | $ kubectl proxy 482 | ``` 483 | **URL** 484 | ``` 485 | http://127.0.0.1:8001/api/v1/namespaces/default/services/reader/proxy/ 486 | ``` 487 | The `reader` Pods can reference the same Claim as the `writer` Pod. This is possible because the PV and PVC were 488 | created with the access mode `ReadWriteMany`. 489 | 490 | 4) Now try to append "nginx" to `index.html` from one of the `reader` Pods. 491 | ``` 492 | $ kubectl exec reader-- -- /bin/sh -c "echo nginx >> /usr/share/nginx/html/index.html" 493 | ``` 494 | The `reader` Pods have mounted the Volume as read only. Just as it did with exercise 1, The command should error out 495 | with a message complaining about not being able to modify a read-only filesystem. 496 | 497 | --- 498 | 499 | **Summary:** Using Persistent Volume Claims with Pods is quite easy. The attribute `persistentVolumeClaim.claimName` 500 | simply must reference the name of the desired PVC in the Pod's Volume definition. Multiple Pods may reference the same 501 | PVC as long as their access mode supports it. 502 | 503 | --- 504 | 505 | **Clean Up Command** 506 | ``` 507 | kubectl delete -f manifests/reader.yaml -f manifests/writer.yaml -f manifests/html-vol.yaml 508 | ``` 509 | 510 | --- 511 | 512 | [Back to Index](#index) 513 | 514 | --- 515 | --- 516 | 517 | # Storage Classes 518 | 519 | Storage classes are an abstraction on top of an external storage resource (PV). They work directly with the external 520 | storage system to enable dynamic provisioning and remove the need for the cluster admin to pre-provision Persistent 521 | Volumes. 522 | 523 | --- 524 | 525 | ### Exercise: Exploring StorageClasses 526 | **Objective:** Understand how it's possible for a Persistent Volume Claim to consume dynamically provisioned storage 527 | via a Storage Class. 528 | 529 | --- 530 | 531 | 1) Re-enable the kind default-storageclass, and wait for it to become available 532 | ``` 533 | $ kubectl annotate --overwrite sc standard storageclass.kubernetes.io/is-default-class="true" 534 | ``` 535 | 536 | 2) Describe the new Storage Class 537 | ``` 538 | $ kubectl describe sc standard 539 | ``` 540 | Note the fields `IsDefaultClass`, `Provisioner`, and `ReclaimPolicy`. The `Provisioner` attribute references the 541 | _"driver"_ for the Storage Class. Kind comes with it's own driver `rancher.io/local-path` that simply mounts 542 | a hostpath from within the VM as a Volume. 543 | 544 | 3) Create PVC `pvc-standard` from the manifest `manifests/pvc-standard.yaml` or use the yaml below. 545 | 546 | **manifests/pvc-standard.yaml** 547 | ```yaml 548 | kind: PersistentVolumeClaim 549 | apiVersion: v1 550 | metadata: 551 | name: pvc-standard 552 | spec: 553 | accessModes: 554 | - ReadWriteMany 555 | storageClassName: standard 556 | resources: 557 | requests: 558 | storage: 1Gi 559 | ``` 560 | 561 | **Command** 562 | ``` 563 | $ kubectl create -f manifests/pvc-standard.yaml 564 | ``` 565 | 566 | 4) Describe the PVC `pvc-standard` 567 | ``` 568 | $ kubectl describe pvc pvc-standard 569 | ``` 570 | The `Events` lists the actions that occurred when the PVC was created. The external provisioner `standard` provisions 571 | a Volume for the claim `default/pvc-standard` and is assigned the name `pvc-`. 572 | 573 | 5) List the PVs. 574 | ``` 575 | $ kubectl get pv 576 | ``` 577 | The PV `pvc-` will be the **exact** size of the associated PVC. 578 | 579 | 6) Now create the PVC `pvc-selector-example` from the manifest `manifests/pvc-selector-example.yaml` or use the yaml 580 | below. 581 | 582 | **manifests/pvc-selector-example.yaml** 583 | ```yaml 584 | apiVersion: v1 585 | kind: PersistentVolumeClaim 586 | metadata: 587 | name: pvc-selector-example 588 | spec: 589 | accessModes: 590 | - ReadWriteMany 591 | resources: 592 | requests: 593 | storage: 1Gi 594 | selector: 595 | matchLabels: 596 | type: hostpath 597 | ``` 598 | 599 | **Command** 600 | ``` 601 | $ kubectl create -f manifests/pvc-selector-example.yaml 602 | ``` 603 | 604 | 7) List the PVCs. 605 | ``` 606 | $ kubectl get pvc 607 | ``` 608 | The PVC `pvc-selector-example` was bound to a PV automatically, even without a valid selector target. The `standard` 609 | Storage Class was configured as the default, meaning that **any** PVCs that do not have a valid target will default to 610 | using the `standard` Storage Class. 611 | 612 | 8) Delete both PVCs. 613 | ``` 614 | $ kubectl delete pvc pvc-standard pvc-selector-example 615 | ``` 616 | 617 | 9) List the PVs once again. 618 | ``` 619 | $ kubectl get pv 620 | ``` 621 | The PVs were automatically reclaimed following the `ReclaimPolicy` that was set by the Storage Class. 622 | 623 | --- 624 | 625 | **Summary:** Storage Classes provide a method of dynamically provisioning Persistent Volumes from an external Storage 626 | System. They have the same attributes as normal PVs, and have their own methods of being garbage collected. They may 627 | be targeted by name using the `storageClassName` within a Persistent Volume Claim request, or a Storage Class may be 628 | configured as default ensuring that Claims may be fulfilled even when there is no valid selector target. 629 | 630 | --- 631 | 632 | [Back to Index](#index) 633 | 634 | --- 635 | --- 636 | 637 | # Helpful Resources 638 | 639 | * [Persistent Volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) 640 | * [Storage Classes](https://kubernetes.io/docs/concepts/storage/storage-classes/) 641 | 642 | --- 643 | 644 | [Back to Index](#index) 645 | -------------------------------------------------------------------------------- /storage/manifests/html-vol.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolume 2 | apiVersion: v1 3 | metadata: 4 | name: html 5 | labels: 6 | type: hostpath 7 | spec: 8 | capacity: 9 | storage: 1Gi 10 | accessModes: 11 | - ReadWriteMany 12 | storageClassName: html 13 | persistentVolumeReclaimPolicy: Delete 14 | hostPath: 15 | type: DirectoryOrCreate 16 | path: "/tmp/html" 17 | 18 | --- 19 | 20 | kind: PersistentVolumeClaim 21 | apiVersion: v1 22 | metadata: 23 | name: html 24 | spec: 25 | accessModes: 26 | - ReadWriteMany 27 | storageClassName: html 28 | resources: 29 | requests: 30 | storage: 1Gi 31 | -------------------------------------------------------------------------------- /storage/manifests/pv-sc-example.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolume 2 | apiVersion: v1 3 | metadata: 4 | name: pv-sc-example 5 | labels: 6 | type: hostpath 7 | spec: 8 | capacity: 9 | storage: 2Gi 10 | accessModes: 11 | - ReadWriteMany 12 | storageClassName: mypvsc 13 | persistentVolumeReclaimPolicy: Delete 14 | hostPath: 15 | type: DirectoryOrCreate 16 | path: "/tmp/mypvsc" 17 | -------------------------------------------------------------------------------- /storage/manifests/pv-selector-example.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolume 2 | apiVersion: v1 3 | metadata: 4 | name: pv-selector-example 5 | labels: 6 | type: hostpath 7 | spec: 8 | capacity: 9 | storage: 2Gi 10 | accessModes: 11 | - ReadWriteMany 12 | persistentVolumeReclaimPolicy: Retain 13 | hostPath: 14 | type: DirectoryOrCreate 15 | path: "/tmp/mypvselector" 16 | -------------------------------------------------------------------------------- /storage/manifests/pvc-sc-example.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: pvc-sc-example 5 | spec: 6 | accessModes: 7 | - ReadWriteMany 8 | storageClassName: mypvsc 9 | resources: 10 | requests: 11 | storage: 1Gi 12 | -------------------------------------------------------------------------------- /storage/manifests/pvc-selector-example.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: pvc-selector-example 5 | spec: 6 | accessModes: 7 | - ReadWriteMany 8 | resources: 9 | requests: 10 | storage: 1Gi 11 | selector: 12 | matchLabels: 13 | type: hostpath 14 | -------------------------------------------------------------------------------- /storage/manifests/pvc-standard.yaml: -------------------------------------------------------------------------------- 1 | kind: PersistentVolumeClaim 2 | apiVersion: v1 3 | metadata: 4 | name: pvc-standard 5 | spec: 6 | accessModes: 7 | - ReadWriteMany 8 | storageClassName: standard 9 | resources: 10 | requests: 11 | storage: 1Gi 12 | -------------------------------------------------------------------------------- /storage/manifests/reader.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: reader 5 | spec: 6 | replicas: 3 7 | selector: 8 | matchLabels: 9 | app: reader 10 | template: 11 | metadata: 12 | labels: 13 | app: reader 14 | spec: 15 | containers: 16 | - name: nginx 17 | image: nginx:stable-alpine 18 | ports: 19 | - containerPort: 80 20 | volumeMounts: 21 | - name: html 22 | mountPath: /usr/share/nginx/html 23 | readOnly: true 24 | volumes: 25 | - name: html 26 | persistentVolumeClaim: 27 | claimName: html 28 | 29 | --- 30 | 31 | apiVersion: v1 32 | kind: Service 33 | metadata: 34 | name: reader 35 | spec: 36 | selector: 37 | app: reader 38 | ports: 39 | - protocol: TCP 40 | port: 80 41 | targetPort: 80 42 | -------------------------------------------------------------------------------- /storage/manifests/volume-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: volume-example 5 | spec: 6 | containers: 7 | - name: nginx 8 | image: nginx:stable-alpine 9 | ports: 10 | - containerPort: 80 11 | volumeMounts: 12 | - name: html 13 | mountPath: /usr/share/nginx/html 14 | readOnly: true 15 | - name: content 16 | image: alpine:latest 17 | volumeMounts: 18 | - name: html 19 | mountPath: /html 20 | command: ["/bin/sh", "-c"] 21 | args: 22 | - while true; do 23 | echo $(date)"
" >> /html/index.html; 24 | sleep 5; 25 | done 26 | volumes: 27 | - name: html 28 | emptyDir: {} 29 | 30 | -------------------------------------------------------------------------------- /storage/manifests/writer.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: writer 5 | spec: 6 | replicas: 1 7 | selector: 8 | matchLabels: 9 | app: writer 10 | template: 11 | metadata: 12 | labels: 13 | app: writer 14 | spec: 15 | containers: 16 | - name: content 17 | image: alpine:latest 18 | volumeMounts: 19 | - name: html 20 | mountPath: /html 21 | command: ["/bin/sh", "-c"] 22 | args: 23 | - while true; do 24 | date >> /html/index.html; 25 | sleep 5; 26 | done 27 | volumes: 28 | - name: html 29 | persistentVolumeClaim: 30 | claimName: html 31 | -------------------------------------------------------------------------------- /workloads/README.md: -------------------------------------------------------------------------------- 1 | # Workloads 2 | 3 | Workloads within Kubernetes are higher level objects that manage Pods or other higher level objects. 4 | 5 | In **ALL CASES** a Pod Template is included, and acts as the base tier of management. 6 | 7 | **Note:** 8 | 1) If you are coming directly from the previous tutorials (core), you may still be configured to use the 9 | `kind-dev` context. Switch to the `kind-kind` context before proceeding with the rest of the tutorials. 10 | 11 | 2) Unlike some of the other tutorials, the workload exercises should be cleaned up before moving onto the next 12 | workload type. The clean-up commands will included after **Summary** section of the exercise. 13 | 14 | # Index 15 | * [ReplicaSets](#replicasets) 16 | * [Exercise: Understanding ReplicaSets](#exercise-understanding-replicasets) 17 | * [Deployments](#deployments) 18 | * [Exercise: Using Deployments](#exercise-using-deployments) 19 | * [Exercise: Rolling Back a Deployment](#exercise-rolling-back-a-deployment) 20 | * [DaemonSets](#daemonsets) 21 | * [Exercise: Managing DaemonSets](#exercise-managing-daemonsets) 22 | * [Optional: Working with DaemonSet Revisions](#optional-working-with-daemonset-revisions) 23 | * [StatefulSets](#statefulsets) 24 | * [Exercise: Managing StatefulSets](#exercise-managing-statefulsets) 25 | * [Exercise: Understanding StatefulSet Network Identity](#exercise-understanding-statefulset-network-identity) 26 | * [Jobs and Cronjobs](#jobs-and-cronjobs) 27 | * [Exercise: Creating a Job](#exercise-creating-a-job) 28 | * [Exercise: Scheduling a CronJob](#exercise-scheduling-a-cronjob) 29 | * [Helpful Resources](#helpful-resources) 30 | 31 | 32 | --- 33 | 34 | # ReplicaSets 35 | ReplicaSets are the primary method of managing Pod replicas and their lifecycle. This includes their scheduling, 36 | scaling, and deletion. 37 | 38 | Their job is simple, **always** ensure the desired number of `replicas` that match the selector are running. 39 | 40 | --- 41 | 42 | ### Exercise: Understanding ReplicaSets 43 | **Objective:** Create and scale a ReplicaSet. Explore and gain an understanding of how the Pods are generated from 44 | the Pod template, and how they are targeted with selectors. 45 | 46 | --- 47 | 48 | 1) Begin by creating a ReplicaSet called `rs-example` with `3` `replicas`, using the `nginx:stable-alpine` image and 49 | configure the labels and selectors to target `app=nginx` and `env=prod`. The yaml block below or the manifest 50 | `manifests/rs-example.yaml` may be used. 51 | 52 | **manifests/rs-example.yaml** 53 | ```yaml 54 | apiVersion: apps/v1 55 | kind: ReplicaSet 56 | metadata: 57 | name: example-rs 58 | spec: 59 | replicas: 3 60 | selector: 61 | matchLabels: 62 | app: nginx 63 | env: prod 64 | template: 65 | metadata: 66 | labels: 67 | app: nginx 68 | env: prod 69 | spec: 70 | containers: 71 | - name: nginx 72 | image: nginx:stable-alpine 73 | ports: 74 | - containerPort: 80 75 | ``` 76 | 77 | **Command** 78 | ``` 79 | $ kubectl create -f manifests/rs-example.yaml 80 | ``` 81 | 82 | 2) Watch as the newly created ReplicaSet provisions the Pods based off the Pod Template. 83 | ``` 84 | $ kubectl get pods --watch --show-labels 85 | ``` 86 | Note that the newly provisioned Pods are given a name based off the ReplicaSet name appended with a 5 character random 87 | string. These Pods are labeled with the labels as specified in the manifest. 88 | 89 | 3) Scale ReplicaSet `rs-example` up to `5` replicas with the below command. 90 | ``` 91 | $ kubectl scale replicaset rs-example --replicas=5 92 | ``` 93 | **Tip:** `replicaset` can be substituted with `rs` when using `kubectl`. 94 | 95 | 4) Describe `rs-example` and take note of the `Replicas` and `Pod Status` field in addition to the `Events`. 96 | ``` 97 | $ kubectl describe rs rs-example 98 | ``` 99 | 100 | 5) Now, using the `scale` command bring the replicas back down to `3`. 101 | ``` 102 | $ kubectl scale rs rs-example --replicas=3 103 | ``` 104 | 105 | 6) Watch as the ReplicaSet Controller terminates 2 of the Pods to bring the cluster back into it's desired state of 106 | 3 replicas. 107 | ``` 108 | $ kubectl get pods --show-labels --watch 109 | ``` 110 | 111 | 7) Once `rs-example` is back down to 3 Pods. Create an independent Pod manually with the same labels as the one 112 | targeted by `rs-example` from the manifest `manifests/pod-rs-example.yaml`. 113 | 114 | **manifests/pod-rs-example.yaml** 115 | ```yaml 116 | apiVersion: v1 117 | kind: Pod 118 | metadata: 119 | name: pod-example 120 | labels: 121 | app: nginx 122 | env: prod 123 | spec: 124 | containers: 125 | - name: nginx 126 | image: nginx:stable-alpine 127 | ports: 128 | - containerPort: 80 129 | ``` 130 | 131 | **Command** 132 | ``` 133 | $ kubectl create -f manifests/pod-rs-example.yaml 134 | ``` 135 | 136 | 8) Immediately watch the Pods. 137 | ``` 138 | $ kubectl get pods --show-labels --watch 139 | ``` 140 | Note that the Pod is created and immediately terminated. 141 | 142 | 9) Describe `rs-example` and look at the `events`. 143 | ``` 144 | $ kubectl describe rs rs-example 145 | ``` 146 | There will be an entry with `Deleted pod: pod-example`. This is because a ReplicaSet targets **ALL** Pods matching 147 | the labels supplied in the selector. 148 | 149 | --- 150 | 151 | **Summary:** ReplicaSets ensure a desired number of replicas matching the selector are present. They manage the 152 | lifecycle of **ALL** matching Pods. If the desired number of replicas matching the selector currently exist when the 153 | ReplicaSet is created, no new Pods will be created. If they are missing, then the ReplicaSet Controller will create 154 | new Pods based off the Pod Template till the desired number of Replicas are present. 155 | 156 | --- 157 | 158 | **Clean Up Command** 159 | ``` 160 | kubectl delete rs rs-example 161 | ``` 162 | 163 | --- 164 | 165 | [Back to Index](#index) 166 | 167 | --- 168 | --- 169 | 170 | # Deployments 171 | Deployments are a declarative method of managing Pods via ReplicaSets. They provide rollback functionality in addition 172 | to more granular update control mechanisms. 173 | 174 | --- 175 | 176 | ### Exercise: Using Deployments 177 | **Objective:** Create, update and scale a Deployment as well as explore the relationship of Deployment, ReplicaSet 178 | and Pod. 179 | 180 | --- 181 | 182 | 1) Create a Deployment `deploy-example`. Configure it using the example yaml block below or use the manifest 183 | `manifests/deploy-example.yaml`. Additionally pass the `--record` flag to `kubectl` when you create the Deployment. 184 | The `--record` flag saves the command as an annotation, and it can be thought of similar to a git commit message. 185 | 186 | **manifests/deployment-example.yaml** 187 | ```yaml 188 | apiVersion: apps/v1 189 | kind: Deployment 190 | metadata: 191 | name: deploy-example 192 | spec: 193 | replicas: 3 194 | revisionHistoryLimit: 3 195 | selector: 196 | matchLabels: 197 | app: nginx 198 | strategy: 199 | type: RollingUpdate 200 | rollingUpdate: 201 | maxSurge: 1 202 | maxUnavailable: 0 203 | template: 204 | metadata: 205 | labels: 206 | app: nginx 207 | spec: 208 | containers: 209 | - name: nginx 210 | image: nginx:stable-alpine 211 | ports: 212 | - containerPort: 80 213 | ``` 214 | 215 | **Command** 216 | ``` 217 | $ kubectl create -f manifests/deploy-example.yaml --record 218 | ``` 219 | 220 | 2) Check the status of the Deployment. 221 | ``` 222 | $ kubectl get deployments 223 | ``` 224 | 225 | 3) Once the Deployment is ready, view the current ReplicaSets and be sure to show the labels. 226 | ``` 227 | $ kubectl get rs --show-labels 228 | ``` 229 | Note the name and `pod-template-hash` label of the newly created ReplicaSet. The created ReplicaSet's name will 230 | include the `pod-template-hash`. 231 | 232 | 4) Describe the generated ReplicaSet. 233 | ``` 234 | $ kubectl describe rs deploy-example- 235 | ``` 236 | Look at both the `Labels` and the `Selectors` fields. The `pod-template-hash` value has automatically been added to 237 | both the Labels and Selector of the ReplicaSet. Then take note of the `Controlled By` field. This will reference the 238 | direct parent object, and in this case the original `deploy-example` Deployment. 239 | 240 | 5) Now, get the Pods and pass the `--show-labels` flag. 241 | ``` 242 | $ kubectl get pods --show-labels 243 | ``` 244 | Just as with the ReplicaSet, the Pods name are labels include the `pod-template-hash`. 245 | 246 | 6) Describe one of the Pods. 247 | ``` 248 | $ kubectl describe pod deploy-example- 249 | ``` 250 | Look at the `Controlled By` field. It will contain a reference to the parent ReplicaSet, but not the parent Deployment. 251 | 252 | Now that the relationship from Deployment to ReplicaSet to Pod is understood. It is time to update the 253 | `deploy-example` and see an update in action. 254 | 255 | 7) Update the `deploy-example` manifest and add a few additional labels to the Pod template. Once done, apply the 256 | change with the `--record` flag. 257 | ``` 258 | $ kubectl apply -f manifests/deploy-example.yaml --record 259 | < or > 260 | $ kubectl edit deploy deploy-example --record 261 | ``` 262 | **Tip:** `deploy` can be substituted for `deployment` when using `kubectl`. 263 | 264 | 8) Immediately watch the Pods. 265 | ``` 266 | $ kubectl get pods --show-labels --watch 267 | ``` 268 | The old version of the Pods will be phased out one at a time and instances of the new version will take its place. 269 | The way in which this is controlled is through the `strategy` stanza. For specific documentation this feature, see 270 | the [Deployment Strategy Documentation](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy). 271 | 272 | 9) Now view the ReplicaSets. 273 | ``` 274 | $ kubectl get rs --show-labels 275 | ``` 276 | There will now be two ReplicaSets, with the previous version of the Deployment being scaled down to 0. 277 | 278 | 10) Now, scale the Deployment up as you would a ReplicaSet, and set the `replicas=5`. 279 | ``` 280 | $ kubectl scale deploy deploy-example --replicas=5 281 | ``` 282 | 283 | 11) List the ReplicaSets. 284 | ``` 285 | $ kubectl get rs --show-labels 286 | ``` 287 | Note that there is **NO** new ReplicaSet generated. Scaling actions do **NOT** trigger a change in the Pod Template. 288 | 289 | 12) Just as before, describe the Deployment, ReplicaSet and one of the Pods. Note the `Events` and `Controlled By` 290 | fields. It should present a clear picture of relationship between objects during an update of a Deployment. 291 | ``` 292 | $ kubectl describe deploy deploy-example 293 | $ kubectl describe rs deploy-example- 294 | $ kubectl describe pod deploy-example- 295 | ``` 296 | 297 | --- 298 | 299 | **Summary:** Deployments are the main method of managing applications deployed within Kubernetes. They create and 300 | supervise targeted ReplicaSets by generating a unique hash called the `pod-template-hash` and attaching it to child 301 | objects as a Label along with automatically including it in their Selector. This method of managing rollouts along with 302 | being able to define the methods and tolerances in the update strategy permits for a safe and seamless way of updating 303 | an application in place. 304 | 305 | --- 306 | 307 | ### Exercise: Rolling Back a Deployment 308 | **Objective:** Learn how to view the history of a Deployment and rollback to older revisions. 309 | 310 | **Note:** This exercise builds off the previous exercise: [Using Deployments](#exercise-using-deployments). If you 311 | have not, complete it first before continuing. 312 | 313 | --- 314 | 315 | 1) Use the `rollout` command to view the `history` of the Deployment `deploy-example`. 316 | ``` 317 | $ kubectl rollout history deployment deploy-example 318 | ``` 319 | There should be two revisions. One for when the Deployment was first created, and another when the additional Labels 320 | were added. The number of revisions saved is based off of the `revisionHistoryLimit` attribute in the Deployment spec. 321 | 322 | 2) Look at the details of a specific revision by passing the `--revision=` flag. 323 | ``` 324 | $ kubectl rollout history deployment deploy-example --revision=1 325 | $ kubectl rollout history deployment deploy-example --revision=2 326 | ``` 327 | Viewing the specific revision will display a summary of the Pod Template. 328 | 329 | 3) Choose to go back to revision `1` by using the `rollout undo` command. 330 | ``` 331 | $ kubectl rollout undo deployment deploy-example --to-revision=1 332 | ``` 333 | **Tip:** The `--to-revision` flag can be omitted if you wish to just go back to the previous configuration. 334 | 335 | 4) Immediately watch the Pods. 336 | ``` 337 | $ kubectl get pods --show-labels --watch 338 | ``` 339 | They will cycle through rolling back to the previous revision. 340 | 341 | 5) Describe the Deployment `deploy-example`. 342 | ``` 343 | $ kubectl describe deployment deploy-example 344 | ``` 345 | The events will describe the scaling back of the previous and switching over to the desired revision. 346 | 347 | --- 348 | 349 | **Summary:** Understanding how to use `rollout` command to both get a diff of the different revisions as well as 350 | be able to roll-back to a previously known good configuration is an important aspect of Deployments that cannot 351 | be left out. 352 | 353 | --- 354 | 355 | **Clean Up Command** 356 | ``` 357 | kubectl delete deploy deploy-example 358 | ``` 359 | 360 | --- 361 | 362 | [Back to Index](#index) 363 | 364 | --- 365 | --- 366 | 367 | # DaemonSets 368 | 369 | DaemonSets ensure that all nodes matching certain criteria will run an instance of the supplied Pod. 370 | 371 | They bypass default scheduling mechanisms and restrictions, and are ideal for cluster wide services such as 372 | log forwarding, or health monitoring. 373 | 374 | --- 375 | 376 | ### Exercise: Managing DaemonSets 377 | **Objective:** Experience creating, updating, and rolling back a DaemonSet. Additionally delve into the process of 378 | how they are scheduled and how an update occurs. 379 | 380 | --- 381 | 382 | 1) Create DaemonSet `ds-example` and pass the `--record` flag. Use the example yaml block below as a base, or use 383 | the manifest `manifests/ds-example.yaml` directly. 384 | 385 | **manifests/ds-example.yaml** 386 | ```yaml 387 | apiVersion: apps/v1 388 | kind: DaemonSet 389 | metadata: 390 | name: ds-example 391 | spec: 392 | revisionHistoryLimit: 3 393 | selector: 394 | matchLabels: 395 | app: nginx 396 | template: 397 | metadata: 398 | labels: 399 | app: nginx 400 | spec: 401 | nodeSelector: 402 | nodeType: edge 403 | containers: 404 | - name: nginx 405 | image: nginx:stable-alpine 406 | ports: 407 | - containerPort: 80 408 | ``` 409 | 410 | **Command** 411 | ``` 412 | $ kubectl create -f manifests/ds-example.yaml --record 413 | ``` 414 | 415 | 2) View the current DaemonSets. 416 | ``` 417 | $ kubectl get daemonset 418 | ``` 419 | As there are no matching nodes, no Pods should be scheduled. 420 | 421 | 3) Label the `kind-control-plane` node with `nodeType=edge` 422 | ``` 423 | $ kubectl label node kind-control-plane nodeType=edge 424 | ``` 425 | 426 | 4) View the current DaemonSets once again. 427 | ``` 428 | $ kubectl get daemonsets 429 | ``` 430 | There should now be a single instance of the DaemonSet `ds-example` deployed. 431 | 432 | 5) View the current Pods and display their labels with `--show-labels`. 433 | ``` 434 | $ kubectl get pods --show-labels 435 | ``` 436 | Note that the deployed Pod has a `controller-revision-hash` label. This is used like the `pod-template-hash` in a 437 | Deployment to track and allow for rollback functionality. 438 | 439 | 6) Describing the DaemonSet will provide you with status information regarding it's Deployment cluster wide. 440 | ``` 441 | $ kubectl describe ds ds-example 442 | ``` 443 | **Tip:** `ds` can be substituted for `daemonset` when using `kubectl`. 444 | 445 | 7) Update the DaemonSet by adding a few additional labels to the Pod Template and use the `--record` flag. 446 | ``` 447 | $ kubectl apply -f manifests/ds-example.yaml --record 448 | < or > 449 | $ kubectl edit ds ds-example --record 450 | ``` 451 | 452 | 8) Watch the Pods and be sure to show the labels. 453 | ``` 454 | $ kubectl get pods --show-labels --watch 455 | ``` 456 | The old version of the DaemonSet will be phased out one at a time and instances of the new version will take its 457 | place. Similar to Deployments, DaemonSets have their own equivalent to a Deployment's `strategy` in the form of 458 | `updateStrategy`. The defaults are generally suitable, but other tuning options may be set. For reference, see the 459 | [Updating DaemonSet Documentation](https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/#performing-a-rolling-update). 460 | 461 | --- 462 | 463 | **Summary:** DaemonSets are usually used for important cluster-wide support services such as Pod Networking, Logging, 464 | or Monitoring. They differ from other workloads in that their scheduling bypasses normal mechanisms, and is centered 465 | around node placement. Like Deployments, they have their own `pod-template-hash` in the form of 466 | `controller-revision-hash` used for keeping track of Pod Template revisions and enabling rollback functionality. 467 | 468 | --- 469 | 470 | ### Optional: Working with DaemonSet Revisions 471 | 472 | **Objective:** Explore using the `rollout` command to rollback to a specific version of a DaemonSet. 473 | 474 | **Note:** This exercise is functionally identical to the Exercise[Rolling Back a Deployment](#exercise-rolling-back-deployment). 475 | If you have completed that exercise, then this may be considered optional. Additionally, this exercise builds off 476 | the previous exercise [Managing DaemonSets](#exercise-managing-daemonsets) and it must be completed before continuing. 477 | 478 | --- 479 | 480 | 1) Use the `rollout` command to view the `history` of the DaemonSet `ds-example` 481 | ``` 482 | $ kubectl rollout history ds ds-example 483 | ``` 484 | There should be two revisions. One for when the Deployment was first created, and another when the additional Labels 485 | were added. The number of revisions saved is based off of the `revisionHistoryLimit` attribute in the DaemonSet spec. 486 | 487 | 2) Look at the details of a specific revision by passing the `--revision=` flag. 488 | ``` 489 | $ kubectl rollout history ds ds-example --revision=1 490 | $ kubectl rollout history ds ds-example --revision=2 491 | ``` 492 | Viewing the specific revision will display the Pod Template. 493 | 494 | 3) Choose to go back to revision `1` by using the `rollout undo` command. 495 | ``` 496 | $ kubectl rollout undo ds ds-example --to-revision=1 497 | ``` 498 | **Tip:** The `--to-revision` flag can be omitted if you wish to just go back to the previous configuration. 499 | 500 | 4) Immediately watch the Pods. 501 | ``` 502 | $ kubectl get pods --show-labels --watch 503 | ``` 504 | They will cycle through rolling back to the previous revision. 505 | 506 | 5) Describe the DaemonSet `ds-example`. 507 | ``` 508 | $ kubectl describe ds ds-example 509 | ``` 510 | The events will be sparse with a single host, however in an actual Deployment they will describe the status of 511 | updating the DaemonSet cluster wide, cycling through hosts one-by-one. 512 | 513 | --- 514 | 515 | **Summary:** Being able to use the `rollout` command with DaemonSets is import in scenarios where one may have 516 | to quickly go back to a previously known-good version. This becomes even more important for 'infrastructure' like 517 | services such as Pod Networking. 518 | 519 | --- 520 | 521 | **Clean Up Command** 522 | ``` 523 | kubectl delete ds ds-example 524 | ``` 525 | 526 | --- 527 | 528 | [Back to Index](#index) 529 | 530 | --- 531 | --- 532 | 533 | # StatefulSets 534 | The StatefulSet controller is tailored to managing Pods that must persist or maintain state. Pod identity including 535 | hostname, network, and storage can be considered **persistent**. 536 | 537 | They ensure persistence by making use of three things: 538 | * The StatefulSet controller enforcing predicable naming, and ordered provisioning/updating/deletion. 539 | * A headless service to provide a unique network identity. 540 | * A volume template to ensure stable per-instance storage. 541 | --- 542 | 543 | ### Exercise: Managing StatefulSets 544 | **Objective:** Create, update, and delete a `StatefulSet` to gain an understanding of how the StatefulSet lifecycle 545 | differs from other workloads with regards to updating, deleting and the provisioning of storage. 546 | 547 | --- 548 | 549 | 1) Create StatefulSet `sts-example` using the yaml block below or the manifest `manifests/sts-example.yaml`. 550 | 551 | **manifests/sts-example.yaml** 552 | ```yaml 553 | apiVersion: apps/v1 554 | kind: StatefulSet 555 | metadata: 556 | name: sts-example 557 | spec: 558 | replicas: 3 559 | revisionHistoryLimit: 3 560 | selector: 561 | matchLabels: 562 | app: stateful 563 | serviceName: app 564 | updateStrategy: 565 | type: OnDelete 566 | template: 567 | metadata: 568 | labels: 569 | app: stateful 570 | spec: 571 | containers: 572 | - name: nginx 573 | image: nginx:stable-alpine 574 | ports: 575 | - containerPort: 80 576 | volumeMounts: 577 | - name: www 578 | mountPath: /usr/share/nginx/html 579 | volumeClaimTemplates: 580 | - metadata: 581 | name: www 582 | spec: 583 | accessModes: [ "ReadWriteOnce" ] 584 | storageClassName: standard 585 | resources: 586 | requests: 587 | storage: 1Gi 588 | ``` 589 | 590 | **Command** 591 | ``` 592 | $ kubectl create -f manifests/sts-example.yaml 593 | ``` 594 | 595 | 2) Immediately watch the Pods being created. 596 | ``` 597 | $ kubectl get pods --show-labels --watch 598 | ``` 599 | Unlike Deployments or DaemonSets, the Pods of a StatefulSet are created one-by-one, going by their ordinal index. 600 | Meaning, `sts-example-0` will fully be provisioned before `sts-example-1` starts up. Additionally, take notice of 601 | the `controller-revision-hash` label. This serves the same purpose as the `controller-revision-hash` label in a 602 | DaemonSet or the `pod-template-hash` in a Deployment. It provides a means of tracking the revision of the Pod 603 | Template and enables rollback functionality. 604 | 605 | 3) More information on the StatefulSet can be gleaned about the state of the StatefulSet by describing it. 606 | ``` 607 | $ kubectl describe statefulset sts-example 608 | ``` 609 | Within the events, notice that it is creating claims for volumes before each Pod is created. 610 | 611 | 4) View the current Persistent Volume Claims. 612 | ``` 613 | $ kubectl get pvc 614 | ``` 615 | The StatefulSet controller creates a volume for each instance based off the `volumeClaimTemplate`. It prepends 616 | the volume name to the Pod name. e.g. `www-sts-example-0`. 617 | 618 | 5) Update the StatefulSet's Pod Template and add a few additional labels. 619 | ``` 620 | $ kubectl apply -f manifests/sts-example.yaml --record 621 | < or > 622 | $ kubectl edit statefulset sts-example --record 623 | ``` 624 | 625 | 6) Return to watching the Pods. 626 | ``` 627 | $ kubectl get pods --show-labels 628 | ``` 629 | None of the Pods are being updated to the new version of the Pod. 630 | 631 | 7) Delete the `sts-example-2` Pod. 632 | ``` 633 | $ kubectl delete pod sts-example-2 634 | ``` 635 | 636 | 8) Immediately get the Pods. 637 | ``` 638 | $ kubectl get pods --show-labels --watch 639 | ``` 640 | The new `sts-example-2` Pod should be created with the new additional labels. The `OnDelete` Update Strategy will 641 | not spawn a new iteration of the Pod until the previous one was **deleted**. This allows for manual gating the 642 | update process for the StatefulSet. 643 | 644 | 9) Update the StatefulSet and change the Update Strategy Type to `RollingUpdate`. 645 | ``` 646 | $ kubectl apply -f manifests/sts-example.yaml --record 647 | < or > 648 | $ kubectl edit statefulset sts-example --record 649 | ``` 650 | 651 | 10) Immediately watch the Pods once again. 652 | ``` 653 | $ kubectl get pods --show-labels --watch 654 | ``` 655 | Note that the Pods are sequentially updated in descending order, or largest to smallest based on the 656 | Pod's ordinal index. This means that if `sts-example-2` was not updated already, it would be updated first, then 657 | `sts-example-1` and finally `sts-example-0`. 658 | 659 | 11) Delete the StatefulSet `sts-example` 660 | ``` 661 | $ kubectl delete statefulset sts-example 662 | ``` 663 | 664 | 12) View the Persistent Volume Claims. 665 | ``` 666 | $ kubectl get pvc 667 | ``` 668 | Created PVCs are **NOT** garbage collected automatically when a StatefulSet is deleted. They must be reclaimed 669 | independently of the StatefulSet itself. 670 | 671 | 13) Recreate the StatefulSet using the same manifest. 672 | ``` 673 | $ kubectl create -f manifests/sts-example.yaml --record 674 | ``` 675 | 676 | 14) View the Persistent Volume Claims again. 677 | ``` 678 | $ kubectl get pvc 679 | ``` 680 | Note that new PVCs were **NOT** provisioned. The StatefulSet controller assumes if the matching name is present, 681 | that PVC is intended to be used for the associated Pod. 682 | 683 | --- 684 | 685 | **Summary:** Like many applications where state must be taken into account, the planning and usage of StatefulSets 686 | requires forethought. The consistency brought by standard naming, ordered updates/deletes and templated storage 687 | does however make this task easier. 688 | 689 | --- 690 | 691 | ### Exercise: Understanding StatefulSet Network Identity 692 | 693 | **Objective:** Create a _"headless service"_ or a service without a `ClusterIP` (`ClusterIP=None`) for use with the 694 | StatefulSet `sts-example`, then explore how this enables consistent service discovery. 695 | 696 | --- 697 | 698 | 1) Create the headless service `app` using the `app=stateful` selector from the yaml below or the manifest 699 | `manifests/service-sts-example.yaml`. 700 | 701 | **manifests/service-sts-example.yaml** 702 | ```yaml 703 | apiVersion: v1 704 | kind: Service 705 | metadata: 706 | name: app 707 | spec: 708 | clusterIP: None 709 | selector: 710 | app: stateful 711 | ports: 712 | - protocol: TCP 713 | port: 80 714 | targetPort: 80 715 | ``` 716 | 717 | **Command** 718 | ``` 719 | $ kubectl create -f manifests/service-sts-example.yaml 720 | ``` 721 | 722 | 2) Describe the newly created service 723 | ``` 724 | $ kubectl describe svc app 725 | ``` 726 | Notice that it does not have a `clusterIP`, but does have the Pod Endpoints listed. Headless services are unique 727 | in this behavior. 728 | 729 | 3) Query the DNS entry for the `app` service. 730 | ``` 731 | $ kubectl exec sts-example-0 -- nslookup app.default.svc.cluster.local 732 | ``` 733 | An A record will have been returned for each instance of the StatefulSet. Querying the service directly will do 734 | simple DNS round-robin load-balancing. 735 | 736 | 4) Finally, query one of instances directly. 737 | ``` 738 | $ kubectl exec sts-example-0 -- nslookup sts-example-1.app.default.svc.cluster.local 739 | ``` 740 | This is a unique feature to StatefulSets. This allows for services to directly interact with a specific instance 741 | of a Pod. If the Pod is updated and obtains a new IP, the DNS record will immediately point to it enabling consistent 742 | service discovery. 743 | 744 | --- 745 | 746 | **Summary:** StatefulSet service discovery is unique within Kubernetes in that it augments a headless service 747 | (A service without a unique `ClusterIP`) to provide a consistent mapping to the individual Pods. These mappings 748 | take the form of an A record in format of: `-...svc.cluster.local` 749 | and can be used consistently throughout other Workloads. 750 | 751 | --- 752 | 753 | **Clean Up Command** 754 | ``` 755 | kubectl delete svc app 756 | kubectl delete statefulset sts-example 757 | kubectl delete pvc www-sts-example-0 www-sts-example-1 www-sts-example-2 758 | ``` 759 | 760 | --- 761 | 762 | [Back to Index](#index) 763 | 764 | --- 765 | --- 766 | 767 | # Jobs and CronJobs 768 | The Job Controller ensures one or more Pods are executed and successfully terminate. Essentially a task executor 769 | that can be run in parallel. 770 | 771 | CronJobs are an extension of the Job Controller, and enable Jobs to be run on a schedule. 772 | 773 | --- 774 | 775 | ### Exercise: Creating a Job 776 | **Objective:** Create a Kubernetes `Job` and work to understand how the Pods are managed with `completions` and 777 | `parallelism` directives. 778 | 779 | --- 780 | 781 | 1) Create job `job-example` using the yaml below, or the manifest located at `manifests/job-example.yaml` 782 | 783 | **manifests/job-example.yaml** 784 | ```yaml 785 | apiVersion: batch/v1 786 | kind: Job 787 | metadata: 788 | name: job-example 789 | spec: 790 | backoffLimit: 4 791 | completions: 4 792 | parallelism: 2 793 | template: 794 | spec: 795 | containers: 796 | - name: hello 797 | image: alpine:latest 798 | command: ["/bin/sh", "-c"] 799 | args: ["echo hello from $HOSTNAME!"] 800 | restartPolicy: Never 801 | ``` 802 | 803 | **Command** 804 | ``` 805 | $ kubectl create -f manifests/job-example.yaml 806 | ``` 807 | 808 | 2) Watch the Pods as they are being created. 809 | ``` 810 | $ kubectl get pods --show-labels --watch 811 | ``` 812 | Only two Pods are being provisioned at a time; adhering to the `parallelism` attribute. This is done until the total 813 | number of `completions` is satisfied. Additionally, the Pods are labeled with `controller-uid`, this acts as a 814 | unique ID for that specific Job. 815 | 816 | When done, the Pods persist in a `Completed` state. They are not deleted after the Job is completed or failed. 817 | This is intentional to better support troubleshooting. 818 | 819 | 3) A summary of these events can be seen by describing the Job itself. 820 | ``` 821 | $ kubectl describe job job-example 822 | ``` 823 | 824 | 4) Delete the job. 825 | ``` 826 | $ kubectl delete job job-example 827 | ``` 828 | 829 | 5) View the Pods once more. 830 | ``` 831 | $ kubectl get pods 832 | ``` 833 | The Pods will now be deleted. They are cleaned up when the Job itself is removed. 834 | 835 | --- 836 | 837 | **Summary:** Jobs are fire and forget one off tasks, batch processing or as an executor for a workflow engine. 838 | They _"run to completion"_ or terminate gracefully adhering to the `completions` and `parallelism` directives. 839 | 840 | --- 841 | 842 | ### Exercise: Scheduling a CronJob 843 | **Objective:** Create a CronJob based off a Job Template. Understand how the Jobs are generated and how to suspend 844 | a job in the event of a problem. 845 | 846 | --- 847 | 848 | 1) Create CronJob `cronjob-example` based off the yaml below, or use the manifest `manifests/cronjob-example.yaml` 849 | It is configured to run the Job from the earlier example every minute, using the cron schedule `"*/1 * * * *"`. 850 | This schedule is **UTC ONLY**. 851 | 852 | **manifests/cronjob-example.yaml** 853 | ```yaml 854 | apiVersion: batch/v1 855 | kind: CronJob 856 | metadata: 857 | name: cronjob-example 858 | spec: 859 | schedule: "*/1 * * * *" 860 | successfulJobsHistoryLimit: 2 861 | failedJobsHistoryLimit: 1 862 | jobTemplate: 863 | spec: 864 | completions: 4 865 | parallelism: 2 866 | template: 867 | spec: 868 | containers: 869 | - name: hello 870 | image: alpine:latest 871 | command: ["/bin/sh", "-c"] 872 | args: ["echo hello from $HOSTNAME!"] 873 | restartPolicy: Never 874 | ``` 875 | 876 | **Command** 877 | ``` 878 | $ kubectl create -f manifests/cronjob-example.yaml 879 | ``` 880 | 881 | 2) Give it some time to run, and then list the Jobs. 882 | ``` 883 | $ kubectl get jobs 884 | ``` 885 | There should be at least one Job named in the format `-`. Note the timestamp of 886 | the oldest Job. 887 | 888 | 3) Give it a few minutes and list the Jobs once again 889 | ``` 890 | $ kubectl get jobs 891 | ``` 892 | The oldest Job should have been removed. The CronJob controller will purge Jobs according to the 893 | `successfulJobHistoryLimit` and `failedJobHistoryLimit` attributes. In this case, it is retaining strictly the 894 | last 3 successful Jobs. 895 | 896 | 4) Describe the CronJob `cronjob-example` 897 | ``` 898 | $ kubectl describe CronJob cronjob-example 899 | ``` 900 | The events will show the records of the creation and deletion of the Jobs. 901 | 902 | 5) Edit the CronJob `cronjob-example` and locate the `Suspend` field. Then set it to true. 903 | ``` 904 | $ kubectl edit CronJob cronjob-example 905 | ``` 906 | This will prevent the cronjob from firing off any future events, and is useful to do to initially troubleshoot 907 | an issue without having to delete the CronJob directly. 908 | 909 | 910 | 5) Delete the CronJob 911 | ``` 912 | $ kubectl delete cronjob cronjob-example 913 | ``` 914 | Deleting the CronJob **WILL** delete all child Jobs. Use `Suspend` to _'stop'_ the Job temporarily if attempting 915 | to troubleshoot. 916 | 917 | --- 918 | 919 | **Summary:** CronJobs are a useful extension of Jobs. They are great for backup or other day-to-day tasks, with the 920 | only caveat being they adhere to a **UTC ONLY** schedule. 921 | 922 | --- 923 | 924 | **Clean Up Commands** 925 | ``` 926 | kubectl delete CronJob cronjob-example 927 | ``` 928 | 929 | --- 930 | 931 | [Back to Index](#index) 932 | 933 | --- 934 | --- 935 | 936 | # Helpful Resources 937 | 938 | * [Deployment Overview](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) 939 | * [DaemonSet Overview](https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/) 940 | * [StatefulSet Basics](https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/) 941 | * [StatefulSet Overview](https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/) 942 | * [Job Overview](https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/) 943 | 944 | --- 945 | 946 | [Back to Index](#index) 947 | -------------------------------------------------------------------------------- /workloads/manifests/cronjob-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: CronJob 3 | metadata: 4 | name: cronjob-example 5 | spec: 6 | schedule: "*/1 * * * *" 7 | successfulJobsHistoryLimit: 2 8 | failedJobsHistoryLimit: 1 9 | jobTemplate: 10 | spec: 11 | completions: 4 12 | parallelism: 2 13 | template: 14 | spec: 15 | containers: 16 | - name: hello 17 | image: alpine:latest 18 | command: ["/bin/sh", "-c"] 19 | args: ["echo hello from $HOSTNAME!"] 20 | restartPolicy: Never 21 | -------------------------------------------------------------------------------- /workloads/manifests/deploy-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: deploy-example 5 | spec: 6 | replicas: 3 7 | revisionHistoryLimit: 3 8 | selector: 9 | matchLabels: 10 | app: nginx 11 | strategy: 12 | type: RollingUpdate 13 | rollingUpdate: 14 | maxSurge: 1 15 | maxUnavailable: 0 16 | template: 17 | metadata: 18 | labels: 19 | app: nginx 20 | spec: 21 | containers: 22 | - name: nginx 23 | image: nginx:stable-alpine 24 | ports: 25 | - containerPort: 80 26 | 27 | 28 | -------------------------------------------------------------------------------- /workloads/manifests/ds-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: DaemonSet 3 | metadata: 4 | name: ds-example 5 | spec: 6 | revisionHistoryLimit: 3 7 | selector: 8 | matchLabels: 9 | app: nginx 10 | template: 11 | metadata: 12 | labels: 13 | app: nginx 14 | spec: 15 | nodeSelector: 16 | nodeType: edge 17 | containers: 18 | - name: nginx 19 | image: nginx:stable-alpine 20 | ports: 21 | - containerPort: 80 22 | -------------------------------------------------------------------------------- /workloads/manifests/job-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: batch/v1 2 | kind: Job 3 | metadata: 4 | name: job-example 5 | spec: 6 | backoffLimit: 4 7 | completions: 4 8 | parallelism: 2 9 | template: 10 | spec: 11 | containers: 12 | - name: hello 13 | image: alpine:latest 14 | command: ["/bin/sh", "-c"] 15 | args: ["echo hello from $HOSTNAME!"] 16 | restartPolicy: Never 17 | -------------------------------------------------------------------------------- /workloads/manifests/pod-rs-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: pod-example 5 | labels: 6 | app: nginx 7 | env: prod 8 | spec: 9 | containers: 10 | - name: nginx 11 | image: nginx:stable-alpine 12 | ports: 13 | - containerPort: 80 14 | -------------------------------------------------------------------------------- /workloads/manifests/rs-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: rs-example 5 | spec: 6 | replicas: 3 7 | selector: 8 | matchLabels: 9 | app: nginx 10 | env: prod 11 | template: 12 | metadata: 13 | labels: 14 | app: nginx 15 | env: prod 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:stable-alpine 20 | ports: 21 | - containerPort: 80 22 | -------------------------------------------------------------------------------- /workloads/manifests/service-sts-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: app 5 | spec: 6 | clusterIP: None 7 | selector: 8 | app: stateful 9 | ports: 10 | - protocol: TCP 11 | port: 80 12 | targetPort: 80 13 | -------------------------------------------------------------------------------- /workloads/manifests/sts-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: sts-example 5 | spec: 6 | replicas: 3 7 | revisionHistoryLimit: 3 8 | selector: 9 | matchLabels: 10 | app: stateful 11 | serviceName: app 12 | updateStrategy: 13 | type: OnDelete 14 | template: 15 | metadata: 16 | labels: 17 | app: stateful 18 | spec: 19 | containers: 20 | - name: nginx 21 | image: nginx:stable-alpine 22 | ports: 23 | - containerPort: 80 24 | volumeMounts: 25 | - name: www 26 | mountPath: /usr/share/nginx/html 27 | volumeClaimTemplates: 28 | - metadata: 29 | name: www 30 | spec: 31 | accessModes: [ "ReadWriteOnce" ] 32 | storageClassName: standard 33 | resources: 34 | requests: 35 | storage: 1Gi 36 | --------------------------------------------------------------------------------