├── .eslintrc.json ├── .github └── workflows │ └── gatsby.yml ├── .gitignore ├── .prettierrc ├── LICENSE.md ├── README.md ├── csv.js ├── gatsby-config.js ├── gatsby-node.js ├── lessons ├── alpine.md ├── bind-mounts.md ├── build-a-nodejs-app.md ├── buildah.md ├── cgroups.md ├── chroot.md ├── conclusion.md ├── dev-containers.md ├── docker-cli.md ├── docker-compose.md ├── docker-images-with-docker.md ├── docker-images-without-docker.md ├── docker.md ├── dockerfile.md ├── expose.md ├── images │ ├── FrontendMastersLogo.png │ ├── brian.jpg │ └── logo.svg ├── intro.md ├── kompose.md ├── kubernetes.md ├── layers.md ├── making-our-own-alpine-nodejs-container.md ├── more-complicated-nodejs-app.md ├── multi-stage-builds.md ├── namespaces.md ├── networking.md ├── nodejs-on-docker.md ├── podman.md ├── static-assets-project.md ├── tags.md ├── visual-studio-code.md ├── volumes.md └── what-are-containers.md ├── package-lock.json ├── package.json ├── src ├── components │ ├── TOCCard.css │ └── TOCCard.js ├── layouts │ ├── index.css │ └── index.js ├── pages │ ├── 404.js │ ├── index.css │ └── index.js └── templates │ └── lessonTemplate.js └── static └── posterframe.jpg /.eslintrc.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": [ 3 | "eslint:recommended", 4 | "plugin:import/errors", 5 | "plugin:react/recommended", 6 | "plugin:jsx-a11y/recommended", 7 | "prettier", 8 | "prettier/react" 9 | ], 10 | "rules": { 11 | "react/prop-types": 0, 12 | "jsx-a11y/label-has-for": 0, 13 | "no-console": 1 14 | }, 15 | "plugins": ["react", "import", "jsx-a11y"], 16 | "parser": "babel-eslint", 17 | "parserOptions": { 18 | "ecmaVersion": 2018, 19 | "sourceType": "module", 20 | "ecmaFeatures": { 21 | "jsx": true 22 | } 23 | }, 24 | "env": { 25 | "es6": true, 26 | "browser": true, 27 | "node": true 28 | }, 29 | "settings": { 30 | "react": { 31 | "version": "16.5.2" 32 | } 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /.github/workflows/gatsby.yml: -------------------------------------------------------------------------------- 1 | name: Deploy Gatsby Site to GitHub Pages 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | 8 | jobs: 9 | deploy: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@master 13 | - name: npm install and build 14 | run: | 15 | npm install 16 | npm run build 17 | - name: Deploy 🚀 18 | uses: JamesIves/github-pages-deploy-action@4.1.5 19 | with: 20 | branch: gh-pages 21 | folder: public 22 | token: ${{ secrets.GITHUB_TOKEN }} 23 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Project dependencies 2 | # https://www.npmjs.org/doc/misc/npm-faq.html#should-i-check-my-node_modules-folder-into-git 3 | node_modules 4 | .cache/ 5 | # Build directory 6 | public/ 7 | .DS_Store 8 | yarn-error.log 9 | .vscode 10 | notes.md 11 | -------------------------------------------------------------------------------- /.prettierrc: -------------------------------------------------------------------------------- 1 | {} 2 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | ## creative commons 2 | 3 | # Attribution-NonCommercial 4.0 International 4 | 5 | Creative Commons Corporation (“Creative Commons”) is not a law firm and does not provide legal services or legal advice. Distribution of Creative Commons public licenses does not create a lawyer-client or other relationship. Creative Commons makes its licenses and related information available on an “as-is” basis. Creative Commons gives no warranties regarding its licenses, any material licensed under their terms and conditions, or any related information. Creative Commons disclaims all liability for damages resulting from their use to the fullest extent possible. 6 | 7 | ### Using Creative Commons Public Licenses 8 | 9 | Creative Commons public licenses provide a standard set of terms and conditions that creators and other rights holders may use to share original works of authorship and other material subject to copyright and certain other rights specified in the public license below. The following considerations are for informational purposes only, are not exhaustive, and do not form part of our licenses. 10 | 11 | * __Considerations for licensors:__ Our public licenses are intended for use by those authorized to give the public permission to use material in ways otherwise restricted by copyright and certain other rights. Our licenses are irrevocable. Licensors should read and understand the terms and conditions of the license they choose before applying it. Licensors should also secure all rights necessary before applying our licenses so that the public can reuse the material as expected. Licensors should clearly mark any material not subject to the license. This includes other CC-licensed material, or material used under an exception or limitation to copyright. [More considerations for licensors](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensors). 12 | 13 | * __Considerations for the public:__ By using one of our public licenses, a licensor grants the public permission to use the licensed material under specified terms and conditions. If the licensor’s permission is not necessary for any reason–for example, because of any applicable exception or limitation to copyright–then that use is not regulated by the license. Our licenses grant only permissions under copyright and certain other rights that a licensor has authority to grant. Use of the licensed material may still be restricted for other reasons, including because others have copyright or other rights in the material. A licensor may make special requests, such as asking that all changes be marked or described. Although not required by our licenses, you are encouraged to respect those requests where reasonable. [More considerations for the public](http://wiki.creativecommons.org/Considerations_for_licensors_and_licensees#Considerations_for_licensees). 14 | 15 | ## Creative Commons Attribution-NonCommercial 4.0 International Public License 16 | 17 | By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-NonCommercial 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions. 18 | 19 | ### Section 1 – Definitions. 20 | 21 | a. __Adapted Material__ means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image. 22 | 23 | b. __Adapter's License__ means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License. 24 | 25 | c. __Copyright and Similar Rights__ means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights. 26 | 27 | d. __Effective Technological Measures__ means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements. 28 | 29 | e. __Exceptions and Limitations__ means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material. 30 | 31 | f. __Licensed Material__ means the artistic or literary work, database, or other material to which the Licensor applied this Public License. 32 | 33 | g. __Licensed Rights__ means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license. 34 | 35 | h. __Licensor__ means the individual(s) or entity(ies) granting rights under this Public License. 36 | 37 | i. __NonCommercial__ means not primarily intended for or directed towards commercial advantage or monetary compensation. For purposes of this Public License, the exchange of the Licensed Material for other material subject to Copyright and Similar Rights by digital file-sharing or similar means is NonCommercial provided there is no payment of monetary compensation in connection with the exchange. 38 | 39 | j. __Share__ means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them. 40 | 41 | k. __Sui Generis Database Rights__ means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world. 42 | 43 | l. __You__ means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning. 44 | 45 | ### Section 2 – Scope. 46 | 47 | a. ___License grant.___ 48 | 49 | 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to: 50 | 51 | A. reproduce and Share the Licensed Material, in whole or in part, for NonCommercial purposes only; and 52 | 53 | B. produce, reproduce, and Share Adapted Material for NonCommercial purposes only. 54 | 55 | 2. __Exceptions and Limitations.__ For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions. 56 | 57 | 3. __Term.__ The term of this Public License is specified in Section 6(a). 58 | 59 | 4. __Media and formats; technical modifications allowed.__ The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a)(4) never produces Adapted Material. 60 | 61 | 5. __Downstream recipients.__ 62 | 63 | A. __Offer from the Licensor – Licensed Material.__ Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License. 64 | 65 | B. __No downstream restrictions.__ You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material. 66 | 67 | 6. __No endorsement.__ Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i). 68 | 69 | b. ___Other rights.___ 70 | 71 | 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise. 72 | 73 | 2. Patent and trademark rights are not licensed under this Public License. 74 | 75 | 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties, including when the Licensed Material is used other than for NonCommercial purposes. 76 | 77 | ### Section 3 – License Conditions. 78 | 79 | Your exercise of the Licensed Rights is expressly made subject to the following conditions. 80 | 81 | a. ___Attribution.___ 82 | 83 | 1. If You Share the Licensed Material (including in modified form), You must: 84 | 85 | A. retain the following if it is supplied by the Licensor with the Licensed Material: 86 | 87 | i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated); 88 | 89 | ii. a copyright notice; 90 | 91 | iii. a notice that refers to this Public License; 92 | 93 | iv. a notice that refers to the disclaimer of warranties; 94 | 95 | v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable; 96 | 97 | B. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and 98 | 99 | C. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License. 100 | 101 | 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information. 102 | 103 | 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable. 104 | 105 | 4. If You Share Adapted Material You produce, the Adapter's License You apply must not prevent recipients of the Adapted Material from complying with this Public License. 106 | 107 | ### Section 4 – Sui Generis Database Rights. 108 | 109 | Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material: 110 | 111 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database for NonCommercial purposes only; 112 | 113 | b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material; and 114 | 115 | c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database. 116 | 117 | For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights. 118 | 119 | ### Section 5 – Disclaimer of Warranties and Limitation of Liability. 120 | 121 | a. __Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You.__ 122 | 123 | b. __To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You.__ 124 | 125 | c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability. 126 | 127 | ### Section 6 – Term and Termination. 128 | 129 | a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically. 130 | 131 | b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates: 132 | 133 | 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or 134 | 135 | 2. upon express reinstatement by the Licensor. 136 | 137 | For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License. 138 | 139 | c. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License. 140 | 141 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public License. 142 | 143 | ### Section 7 – Other Terms and Conditions. 144 | 145 | a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed. 146 | 147 | b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License. 148 | 149 | ### Section 8 – Interpretation. 150 | 151 | a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License. 152 | 153 | b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions. 154 | 155 | c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor. 156 | 157 | d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority. 158 | 159 | > Creative Commons is not a party to its public licenses. Notwithstanding, Creative Commons may elect to apply one of its public licenses to material it publishes and in those instances will be considered the “Licensor.” Except for the limited purpose of indicating that material is shared under a Creative Commons public license or as otherwise permitted by the Creative Commons policies published at [creativecommons.org/policies](http://creativecommons.org/policies), Creative Commons does not authorize the use of the trademark “Creative Commons” or any other trademark or logo of Creative Commons without its prior written consent including, without limitation, in connection with any unauthorized modifications to any of its public licenses or any other arrangements, understandings, or agreements concerning use of licensed material. For the avoidance of doubt, this paragraph does not form part of the public licenses. 160 | > 161 | > Creative Commons may be contacted at creativecommons.org 162 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [Complete Intro to Containers (feat. Docker)][course] 2 | 3 | [![Frontend Masters](/lessons/images/FrontendMastersLogo.png)][fem] 4 | 5 | - [Please click here][website] to head to the course website. 6 | - See the [course on Frontend Masters][course] 7 | 8 | # Issues and Pull Requests 9 | 10 | Please file issues and open pull requests here! Thank you! 11 | 12 | # Getting Set Up 13 | 14 | ## For Everyone 15 | 16 | [Install Visual Studio Code](https://code.visualstudio.com). For one section of the course I'll go over some of how VSCode and containers work well together. The rest of the course you can use whatever editor you want. 17 | 18 | ## For macOS and Linux 19 | 20 | Please make sure you have the following things installed and ready to go! 21 | 22 | - For Mac: [Docker Desktop Community](https://www.docker.com/products/docker-desktop) 23 | - For Linux: [Docker Engine Community](https://docs.docker.com/install/linux/docker-ce/ubuntu/) 24 | 25 | ## For Windows 26 | 27 | If **you have Windows 10 Professional** (it doesn't work in Home), try [WSL 2](https://docs.microsoft.com/en-us/windows/wsl/wsl2-install) and [Docker for WSL 2](https://docs.docker.com/docker-for-windows/wsl-tech-preview/). WSL stands for Window Subsystem for Linux. It allows you to run Linux within Windows. That's what I'll be using. WSL 2 works faster than WSL1 but it's harder to set up since it's still in preview. 28 | 29 | If you do not have Windows 10 Professional and you do not want to buy it, [follow this blog post](https://medium.com/@mbyfieldcameron/docker-on-windows-10-home-edition-c186c538dff3) to install VirtualBox and a Linux VM so you can follow inside of Linux. I'd suggest using Ubuntu. 30 | 31 | Or, if you know PowerShell really well and know how to translate bash commands to PowerShell commands, feel free to install [Docker Desktop Community](https://www.docker.com/products/docker-desktop) and do everything from PowerShell (honestly it shouldn't be too bad.) 32 | 33 | ## Verify Docker installation: 34 | 35 | Make sure when you go to a bash prompt and type `docker info` that it outputs system info and doesn't error out. This will let you know that everything is working. 36 | 37 | Once you have Docker up and running, please run the following. This will pull most of the containers you will need up front. 38 | 39 | ```bash 40 | docker pull ubuntu:bionic 41 | docker pull node:12-stretch 42 | docker pull node:12-alpine 43 | docker pull nginx:1.17 44 | docker pull mongo:3 45 | docker pull jguyomard/hugo-builder:0.55 46 | ``` 47 | 48 | # License 49 | 50 | The content of this workshop is licensed under CC-BY-NC-4.0. Feel free to share freely but do not resell my content. 51 | 52 | The code, including the code of the site itself and the code in the exercises, are licensed under Apache 2.0. 53 | 54 | [website]: https://btholt.github.io/complete-intro-to-containers/ 55 | [fem]: https://www.frontendmasters.com 56 | [course]: https://frontendmasters.com/courses/complete-intro-containers/ -------------------------------------------------------------------------------- /csv.js: -------------------------------------------------------------------------------- 1 | const fs = require("fs").promises; 2 | const path = require("path"); 3 | const fm = require("front-matter"); 4 | const mdDir = process.env.MARKDOWN_DIR || path.join(__dirname, "lessons/"); 5 | const outputPath = 6 | process.env.OUTPUT_FILE || path.join(__dirname, "public/lessons.csv"); 7 | 8 | async function createCsv() { 9 | console.log(`making the markdown files into a CSV from ${mdDir}`); 10 | 11 | // get paths 12 | const allFiles = await fs.readdir(mdDir); 13 | const files = allFiles.filter(filePath => filePath.endsWith(".md")); 14 | 15 | // read paths, get buffers 16 | const buffers = await Promise.all( 17 | files.map(filePath => fs.readFile(path.join(mdDir, filePath))) 18 | ); 19 | 20 | // make buffers strings 21 | const contents = buffers.map(content => content.toString()); 22 | 23 | // make strings objects 24 | let frontmatters = contents.map(fm); 25 | 26 | // find all attribute keys 27 | const seenAttributes = new Set(); 28 | frontmatters.forEach(item => { 29 | Object.keys(item.attributes).forEach(attr => seenAttributes.add(attr)); 30 | }); 31 | const attributes = Array.from(seenAttributes.values()); 32 | 33 | if (attributes.includes("order")) { 34 | frontmatters = frontmatters.sort( 35 | (a, b) => a.attributes.order - b.attributes.order 36 | ); 37 | } 38 | 39 | // get all data into an array 40 | let rows = frontmatters.map(item => { 41 | const row = attributes.map(attr => 42 | item.attributes[attr] ? JSON.stringify(item.attributes[attr]) : "" 43 | ); 44 | return row; 45 | }); 46 | 47 | // header row must be first row 48 | rows.unshift(attributes); 49 | 50 | // join into CSV string 51 | const csv = rows.map(row => row.join(",")).join("\n"); 52 | 53 | // write file out 54 | await fs.writeFile(outputPath, csv); 55 | 56 | console.log(`Wrote ${rows.length} rows to ${outputPath}`); 57 | } 58 | 59 | createCsv(); 60 | -------------------------------------------------------------------------------- /gatsby-config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | siteMetadata: { 3 | title: "Complete Intro to Containers", 4 | subtitle: "🐳", 5 | description: "A complete intro to Linux containers for developers", 6 | keywords: [ 7 | "linux", 8 | "containers", 9 | "javascript", 10 | "node", 11 | "brian holt", 12 | "frontend masters", 13 | "docker", 14 | "nodejs" 15 | ] 16 | }, 17 | pathPrefix: "/complete-intro-to-containers", 18 | plugins: [ 19 | `gatsby-plugin-sharp`, 20 | `gatsby-plugin-layout`, 21 | { 22 | resolve: `gatsby-source-filesystem`, 23 | options: { 24 | path: `${__dirname}/lessons`, 25 | name: "markdown-pages" 26 | } 27 | }, 28 | `gatsby-plugin-react-helmet`, 29 | { 30 | resolve: `gatsby-transformer-remark`, 31 | options: { 32 | plugins: [ 33 | `gatsby-remark-autolink-headers`, 34 | `gatsby-remark-copy-linked-files`, 35 | `gatsby-remark-prismjs`, 36 | { 37 | resolve: `gatsby-remark-images`, 38 | options: { 39 | maxWidth: 800, 40 | linkImagesToOriginal: true, 41 | sizeByPixelDensity: false 42 | } 43 | } 44 | ] 45 | } 46 | } 47 | ] 48 | }; 49 | -------------------------------------------------------------------------------- /gatsby-node.js: -------------------------------------------------------------------------------- 1 | const path = require("path"); 2 | 3 | exports.createPages = ({ actions, graphql }) => { 4 | const { createPage } = actions; 5 | 6 | const lessonTemplate = path.resolve(`src/templates/lessonTemplate.js`); 7 | 8 | return graphql(` 9 | { 10 | allMarkdownRemark( 11 | sort: { order: DESC, fields: [frontmatter___order] } 12 | limit: 1000 13 | ) { 14 | edges { 15 | node { 16 | excerpt(pruneLength: 250) 17 | html 18 | id 19 | frontmatter { 20 | order 21 | path 22 | title 23 | } 24 | } 25 | } 26 | } 27 | } 28 | `).then(result => { 29 | if (result.errors) { 30 | return Promise.reject(result.errors); 31 | } 32 | 33 | result.data.allMarkdownRemark.edges.forEach(({ node }) => { 34 | createPage({ 35 | path: node.frontmatter.path, 36 | component: lessonTemplate 37 | }); 38 | }); 39 | }); 40 | }; 41 | -------------------------------------------------------------------------------- /lessons/alpine.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Alpine Linux" 3 | path: "/alpine-linux" 4 | order: 5.0 5 | section: "Making Tiny Containers" 6 | description: "Tiny containers make a lot of things easier and more secure. In this section Brian shows us how to go from a nearly gigabyte-sized Ubuntu container to a 80MB Alpine container with no functionality loss and more secure to boot." 7 | --- 8 | 9 | We've now built a nice little container for our Node.js app and we absolutely could ship it as-is to production. However there's a few things we can do to make things even faster, cheaper, and more secure. 10 | 11 | ## Making your containers smaller 12 | 13 | Making your containers smaller is a good thing for a few reasons. For one, everything tends to get a bit cheaper. Moving containers across the Internet takes time and bits to do. If you can make those containers smaller, things will go faster and you'll require less space on your servers. Often private container registries (like personal Docker Hubs, Azure Container Registry is a good example) charge you by how much storage you're using. 14 | 15 | Beyond that, having less _things_ in your container means you're less susceptible to bugs. Let's say there's a Python exploit that's going around that allows hackers to get root access to your container. If you don't have Python in your container, you're not vulnerable! And obviously if you do have Python installed (even if you're not using it) you're vulnerable. So let's see how to make your container a bit smaller. 16 | 17 | In your previous Dockerfile, change the first line (`FROM`) 18 | 19 | ```dockerfile 20 | FROM node:12-alpine 21 | 22 | USER node 23 | 24 | RUN mkdir /home/node/code 25 | 26 | WORKDIR /home/node/code 27 | 28 | COPY --chown=node:node package-lock.json package.json ./ 29 | 30 | RUN npm ci 31 | 32 | COPY --chown=node:node . . 33 | 34 | CMD ["node", "index.js"] 35 | ``` 36 | 37 | Our image size (by comparing the `"Size"` field in in `docker inspect my-app`) from 913MB to 86MB just like that. We shed quite a bit of cruft that we didn't need in Ubuntu and we didn't even need to change anything in our Dockerfile. Honestly, that's unusual. When you strip _everything_ out typically you'll have to go back and add some of them back in. But in this case we're golden! 38 | 39 | Alpine, if you remember, is a bare bones alternative to Ubuntu. It's built on Busybox Linux which is a 2MB distro of Linux (Alpine is 5MB.) `node:12-alpine` itself is about `80MB` and `node:latest is about 908MB`. 40 | 41 | When should you select Alpine? My general feeling (this is a Brian Holt opinion, not a community one so take it with a grain of salt) is that the "end destination" container is where Alpine is the best. It cuts all cruft out which is super helpful for end-deployment sorts of scenarios due to security and size but it also can be annoying for development scenarios because it lacks just about everything necessary for those, making you have to hand install everything you need. In these "middle scenarios" where it's not really the destination and the container is just another tool in your development system (where that's a multi stage build or a development container) I'll reach for Ubuntu or Debian. 42 | -------------------------------------------------------------------------------- /lessons/bind-mounts.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Bind Mounts" 3 | path: "/bind-mounts" 4 | order: 6.0 5 | section: "Features in Docker" 6 | description: "Let's start here because this is easier to see the use case for. Bind mounts allow you to mount files from your host computer into your container. This allows you to use the containers a much more flexible way than previously possible." 7 | --- 8 | 9 | So far we've been dealing with self-contained containers. Normally this is all you ever want: containers that can spin up and spin down as frequently as they need to. They're ephemeral, temporary, and disposable. None of these containers are "snowflakes". When I say snowflakes, picture you're running a server that's serving a Wordpress site. Imagine setting up this server, SSH'ing into the server, and setting everything up to be just right and tuned to the exact way you need it. This would be a snowflake server: if someone goes and deletes this server, you're screwed. You have to go and spend a bunch of time re-setting up this server. This is exactly the sort of thing we're trying to avoid with containers. We want to make our servers easy to reproduce whenever we want so we can spin up and spin down servers at will. 10 | 11 | However not everything can fit neatly into a container all the time. Sometimes our containers need to be stateful in some capacity. Sometimes our containers need to read and write to the host. This is fundamentally at odds with the idea of a stateless, able-to-create-and-destroy-anytime container that we've been adhering to thusfar. So what are we to do? 12 | 13 | Enter volumes and bind mounts. Both of these are methods of reading and writing to the host but with slight-but-important differences of when to use which. We'll go over both. 14 | 15 | ## Bind Mounts 16 | 17 | Let's start here because this is easier to see the use case for. Bind mounts allow you to mount files from your host computer into your container. This allows you to use the containers a much more flexible way than previously possible: you don't have to know what files the container will have _when you build it_ and it allows you to determine those files _when you run it_. 18 | 19 | Let's go over an example of how this could be useful. 20 | 21 | In the previous project, we used the NGINX container to build a container with our static assets baked into the container. In general this what I recommend you do since now we can ship that container anywhere and it'll just work. It's totally self-contained. But what if we just want to run a NGINX container locally to test stuff out? Sure, we could make a new Dockerfile and write it, but wouldn't it be cool if we could just use the NGINX container directly? We can! Let's try it. Go back to your static site project from the previous lesson. Let's use the `nginx` container to serve directly from it. 22 | 23 | ```bash 24 | # from the root directory of your CRA app 25 | docker run --mount type=bind,source="$(pwd)"/build,target=/usr/share/nginx/html -p 8080:80 nginx 26 | ``` 27 | 28 | This is how you do bind mounts. It's a bit verbose but necessary. Let's dissect it. 29 | 30 | - We use the `--mount` flag to identify we're going to be mounting something in from the host. 31 | - As far as I know the only two types are `bind` and `volume`. Here we're using bind because we to mount in some piece of already existing data from the host. 32 | - In the source, we identify what part of the host we want to make readable-and-writable to the container. It has to be an absolute path (e.g we can't say `"./build"`) which is why use the `"$(pwd)"` to get the **p**resent **w**orking **d**irectory to make it an absolute path. 33 | - The target is where we want those files to be mounted in the container. Here we're putting it in the spot that NGINX is expecting. 34 | - As a side note, you can mount as many mounts as you care to, and you mix bind and volume mounts. NGINX has a default config that we're using but if we used another bind mount to mount an NGINX config to `/etc/nginx/nginx.conf` it would use that instead. 35 | 36 | Again, it's preferable to bake your own container so you don't have to ship the container and the code separately; you'd rather just ship one thing that you can run without much ritual nor ceremony. But this is a useful trick to have in your pocket. 37 | 38 | [storage]: https://docs.docker.com/storage/ 39 | -------------------------------------------------------------------------------- /lessons/build-a-nodejs-app.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Build a Node.js App" 3 | path: "/build-a-nodejs-app" 4 | order: 4.1 5 | section: "The Dockerfile" 6 | description: "In order to understand Dockerfiles better, Brian shows how to build a Node.js application inside of a container and how to write a proper Dockerfile for a Node.js app." 7 | --- 8 | 9 | So now let's dig into some more advance things you can do with a Dockerfile. Let's first make our project a real Node.js application. Make a file called `index.js` and put this in there. 10 | 11 | ```javascript 12 | const http = require("http"); 13 | 14 | http 15 | .createServer(function(request, response) { 16 | console.log("request received"); 17 | response.end("omg hi", "utf-8"); 18 | }) 19 | .listen(3000); 20 | console.log("server started"); 21 | ``` 22 | 23 | This more-or-less that most barebones Node.js app you can write. It just responds to HTTP traffic on port 3000. Go ahead and try running it on your local computer (outside of Docker) by running `node index.js`. Hit [localhost:3000][localhost] to give it a shot. 24 | 25 | Okay, so let's get this running _inside_ Docker now. First thing is we have to copy this file from your local file system into the container. We'll use a new instruction, `COPY`. Modify your Dockerfile to say: 26 | 27 | ```dockerfile 28 | FROM node:12-stretch 29 | 30 | COPY index.js index.js 31 | 32 | CMD ["node", "index.js"] 33 | ``` 34 | 35 | This will copy your index.js file from your file system into the Docker file system (the first index.js is the source and the second index.js is the destination of that file inside the container.) 36 | 37 | We then modified the `CMD` to start the server when we finally do run the container. Now run 38 | 39 | ```bash 40 | docker build -t my-node-app . 41 | docker run my-node-app 42 | ``` 43 | 44 | Now your Node.js app is running inside of a container managed by Docker! Hooray! But one problem, how do we access it? If you open [locahlost:3000][localhost] now, it doesn't work! We have to tell Docker to expose the port. So let's do that now. Stop your container from running and run it again like this. 45 | 46 | Try stopping your server now. Your normal CTRL+C won't work. Node.js itself doesn't handle SIGINT (which is what CTRL+C is) in and of itself. Instead you either have to handle it yourself inside of your Node.js code (preferable for real apps) or you can tell Docker to handle it with the `--init` flag. This uses a package called [tini][tini] to handle shutdown signal for you. 47 | 48 | ```bash 49 | docker run --init --publish 3000:3000 my-node-app # or you can use -p instead of --publish 50 | ``` 51 | 52 | The `publish` part allows you to forward a port out of a container to the host computer. In this case we're forwarding the port of `3000` (which is what the Node.js server was listening on) to port `3000` on the host machine. The `3000` represents the port on the host machine and the second `3000` represents what port is being used in the container. If you did `docker run --publish 8000:3000 my-node-app`, you'd open `localhost:8000` to see the server (running on port `3000` inside the container). 53 | 54 | Next, let's organize ourselves a bit better. Right now we're putting our app into the root directory of our container and running it as the root user. This both messy and unsafe. If there's an exploit for Node.js that get released, it means that whoever uses that exploit on our Node.js server will doing so as root which means they can do whatever they want. Ungood. So let's fix that. We'll put the directory inside our home directory under a different users. 55 | 56 | ```dockerfile 57 | FROM node:12-stretch 58 | 59 | USER node 60 | 61 | COPY index.js /home/node/code/index.js 62 | 63 | CMD ["node", "/home/node/code/index.js"] 64 | ``` 65 | 66 | The `USER` instruction let's us switch from being the root user to a different user, one called "node" which the `node:12-stretch` image has already made for us. We could make our own user too using bash commands but let's just use the one the node image gave us. (More or less you'd run `RUN useradd -ms /bin/bash lolcat` to add a lolcat user.) 67 | 68 | Notice we're now copying inside of the user's home directory. This is because they'll have proper permissions to interact with those files whereas they may not if we were outside of their home directory. You'll save yourself a lot of permission wrangling if you put it in a home directory. But we'll have to add a flag to the `COPY` command to make sure the user owns those files. We'll do that with `--chown=node:node` where the first `node` is the user and the second `node` is the user group. 69 | 70 | It's no big deal that the "code" directory doesn't exist, `COPY` will create it. 71 | 72 | ### A Quick Note on COPY vs ADD 73 | 74 | The two commands `COPY` and `ADD` do very similar things with a few key differences. `ADD` can also accept, in addition to local files, URLs to download things off the Internet and it will also automatically unzip any tar files it downloads or adds. `COPY` will just copy local files. Use `COPY` unless you need to unzip something or are downloading something. 75 | 76 | --- 77 | 78 | Great. Let's make everything a bit more succint by setting a working directory 79 | 80 | ```dockerfile 81 | FROM node:12-stretch 82 | 83 | USER node 84 | 85 | WORKDIR /home/node/code 86 | 87 | COPY --chown=node:node index.js . 88 | 89 | CMD ["node", "index.js"] 90 | ``` 91 | 92 | `WORKDIR` works as if you had `cd`'d into that directory, so now all paths are relative to that. And again, if it doesn't exist, it will create it for you. 93 | 94 | Now we just tell `COPY` to copy the file into the same directory. Now we're giving it a directory instead of a file name, it'll just assume we want the same name. You could rename it here if you wanted. 95 | 96 | [localhost]: http://localhost:3000 97 | [tini]: https://github.com/krallin/tini 98 | -------------------------------------------------------------------------------- /lessons/buildah.md: -------------------------------------------------------------------------------- 1 | --- 2 | path: "/buildah" 3 | order: 8.0 4 | title: "Buildah" 5 | section: "OCI (Non-Docker) Containers" 6 | description: "There are alternatives to Docker. Buildah is a tool that allows you to build new containers. It actually allows many different ways of building containers, from writing bash scripts that define the containers to building containers interactively. Brian shows how to build a OCI container with a Dockerfile via Buildah." 7 | --- 8 | 9 | **NOTE**: Both of these tools _only work in Linux_. If you're using Windows and WSL, this work seamlessly. If you're on macOS, this will not work. You could try to run this within [the Buildah container][buildah-container] off of Docker Hub. This container has both Buildah and Podman. This is what I'm going to do. 10 | 11 | ## Intro 12 | 13 | While a lot of the gravity of the container world centers on Docker and the Dockery ecosystem, there are other great projects that exist and are worthy of your consideration. I want to take the most brief detour to showcase the totally-open-source alternatives to Docker: [Buildah][buildah] and [Podman][podman]. Can we agree that containers projects have the cutest logos ever? 14 | 15 | A lot of the non-Docker support is centered behind what's called the [Open Container Initiative][oci] which is a project underneath the Linux Foundation. Keep in mind that Docker supports the OCI; they're members of it. This is meant to encourage a healthy diversity of players in the field and to have certain common guidelines so that the various sorts of tools can work together. Everybody wins here. 16 | 17 | We're going to talk about these two tools, Buildah and Podman, that allow you to create and run OCI container images. Whereas we've been using Docker to both build and run Docker containers, we're going to use Buildah to build containers and Podman to run them. In reality, with Docker we've been using `docker` to build containers and Docker Desktop has been running `dockerd` in the background to run the containers for us, so the same separation has existed even if we didn't have to know that directly. 18 | 19 | There's a lot to these tools and they overlap quite a bit too. We're just going to introduce you to these and then move on. 20 | 21 | ## Buildah 22 | 23 | ### Installation 24 | 25 | Follow [the instructions here][buildah-install] for your distro of Linux (if you're not using the container, I will be except for one part at thend): 26 | 27 | This is for only if you're not using the Buildah container. Make sure you have a valid `/etc/containers/registries.conf`. If you don't have that file, put this there: 28 | 29 | ``` 30 | # This is a system-wide configuration file used to 31 | # keep track of registries for various container backends. 32 | # It adheres to TOML format and does not support recursive 33 | # lists of registries. 34 | 35 | # The default location for this configuration file is /etc/containers/registries.conf. 36 | 37 | # The only valid categories are: 'registries.search', 'registries.insecure', 38 | # and 'registries.block'. 39 | 40 | [registries.search] 41 | registries = ['docker.io', 'registry.fedoraproject.org', 'registry.access.redhat.com'] 42 | 43 | # If you need to access insecure registries, add the registry's fully-qualified name. 44 | # An insecure registry is one that does not have a valid SSL certificate or only does HTTP. 45 | [registries.insecure] 46 | registries = [] 47 | 48 | 49 | # If you need to block pull access from a registry, uncomment the section below 50 | # and add the registries fully-qualified name. 51 | # 52 | # Docker only 53 | [registries.block] 54 | registries = [] 55 | ``` 56 | 57 | Lastly, make sure you have `runc` installed too. It should be available on every major package manager. 58 | 59 | If you want to do it inside of Docker, run 60 | 61 | ```bash 62 | docker run -it --rm -p 3000:3000 --privileged --mount type=bind,source="$(pwd)",target=/src --mount type="volume",src=podman-data,target=/var/lib/containers tomkukral/buildah bash 63 | ``` 64 | 65 | This will run the Buildah / Podman container with your current directory mounted in at `/src`. Do note that this in Alpine Linux and you'll be dropped in ash, not bash which does have some differences. Sorta fun though, right? Building containers inside of your containers. Also do note that we're running it as `--privileged` which means the container has elevated privileges. Use this only when you need to. 66 | 67 | We need a volume mount for where the built containers are going to be stored (both ones we build and ones we download from registries) and we need a bind mount for own code being mounted in. We're also exposing port 3000 because when we run the app inside the container (containers within containers) we'll tunnel that port through. 68 | 69 | ### Using Buildah 70 | 71 | Buildah is the tool that allows you to build new containers. It actually allows many different ways of building containers, from writing bash scripts that define the containers to building containers interactively. We're going to do something a bit more familiar to you: Dockerfiles! Yes, Buildah can read and use Dockerfiles. So let's give it a shot. 72 | 73 | ```bash 74 | buildah bud -f ./Dockerfile -t my-app-buildah . # instead of bud, you can use build-using-dockerfile 75 | ``` 76 | 77 | This accomplishes the same thing as `docker build`. It'll take a bit longer and for me it consumed a lot of memory. But once it's done you should see the image when you run `buildah images`. You can inspect it with `buildah inspect my-app-buildah`. Now, you can use Buildah a different way and start using this container interactively but I leave that to your exploration. 78 | 79 | So to see our built container in Buildah (Buildah allows you to do some running of containers but most of that resides in Podman) run `buildah from my-app-buildah`. This will start a container running in the background (and not run the `CMD` in our Dockerfile.) From there, run `buildah run --net host my-app-buildah-working-container -- bash`. This will get us inside the container! You can try to run our project but it'll fail since we haven't connected MongoDB (and we're not going to right now.) Congrats! You built and ran a container without Docker! (again, since we already did it with lxc and lxd.) 80 | 81 | ## Podman 82 | 83 | I'm going to do this inside of the [Buildah container][buildah-container] because this was quite difficult to set up outside of it. And in reality most of you will be doing this with Docker anyway so this is more of an academic exercise. 84 | 85 | After having build your container above with Buildah, run this: 86 | 87 | ```bash 88 | podman run --cgroup-manager cgroupfs -p 3000:3000 localhost/my-app-buildah 89 | ``` 90 | 91 | This will start Podman managing an instance of your Buildah-built container! This by-default will run your container in the foreground, you can run it in the background with `-d` added. 92 | 93 | ### Run your Buildah container with Docker 94 | 95 | In order to do this part, you have to run this outside of a container. 96 | 97 | We need to first transfer our container out of Buildah and into Docker. We also need to be aware that there are two ways to package a container: Docker and OCI. If we tell Buildah to push to Docker, it'll fix that automatically but be aware you can also use OCI (Open Container Initiative) images as well. 98 | 99 | Just like you can push a container to Docker Hub, you can use the same mechanism within Buildah to push to a local Docker daemon (background process.) So ahead and run `buildah push localhost/my-app-buildah docker-daemon:my-app-buildah:latest`. This will move our app out Buildah and into Docker. Now if you run `docker run -it my-app-buildah bash` it should drop you into a running container. As a fun exercise, try to start the Node.js app and connect it to a running `mongo` container using the techniques we learned before. Now you have one container built using Docker connecting to a container built using Buildah. Pretty cool! 100 | 101 | [buildah]: https://buildah.io/ 102 | [podman]: https://podman.io/ 103 | [install-buildah]: https://github.com/containers/buildah/blob/master/install.md 104 | [oci]: https://www.opencontainers.org/ 105 | [buildah-container]: https://hub.docker.com/r/tomkukral/buildah 106 | [buildah-install]: https://github.com/containers/buildah/blob/master/install.md 107 | -------------------------------------------------------------------------------- /lessons/cgroups.md: -------------------------------------------------------------------------------- 1 | --- 2 | path: "/cgroups" 3 | title: "cgroups" 4 | order: 2.3 5 | section: "Crafting Containers By Hand" 6 | description: "cgroups, or control groups, are the third and last feature of the Linux kernel that allow for containers. They allow us to restrict how much resources a process can take. Brian shows how to manually use cgroups on to restrict processes." 7 | --- 8 | 9 | Okay, so now we've hidden the processes from Eve so Bob and Alice can engage in commerce in privacy and peace. So we're all good, right? They can no longer mess each other, right? Not quite. We're almost there. 10 | 11 | So now say it's Black Friday, Boxing Day or Singles' Day (three of the biggest shopping days in the year, pick the one that makes the most sense to you 😄) and Bob and Alice are gearing up for their biggest sales day of the year. Everything is ready to go and at 9:00AM their site suddenly goes down without warning. What happened!? They log on to their chroot'd, unshare'd shell on your server and see that the CPU is pegged at 100% and there's no more memory available to allocate! Oh no! What happened? 12 | 13 | The first explanation could be that Eve has her site running on another server and simple logged on and ran a program that ate up all the available resources so that Bob and Alice so that their sites would go down and Eve would be the only site that was up, increasing her sales. 14 | 15 | However another, possibly more likely explanation is that both Bob's and Alice's sites got busy at the same time and that in-and-of-itself took all the resources without any malice involved, taking down their sites and everyone else on the server. Or perhaps Bob's site had a memory leak and that was enough to take all the resources available. 16 | 17 | Suffice to say, we still have a problem. Every isolated environment has access to all _physical_ resources of the server. There's no isolation of physical components from these environments. 18 | 19 | Enter the hero of this story: cgroups, or control groups. Google saw this same problem when building their own infrastructure and wanted to protect runaway processes from taking down entire servers and made this idea of cgroups so you can say "this isolated environment only gets so much CPU, so much memory, etc. and once it's out of those it's out-of-luck, it won't get any more." 20 | 21 | This is a bit more difficult to accomplish but let's go ahead and give it a shot. 22 | 23 | ```bash 24 | # outside of unshare'd environment get the tools we'll need here 25 | apt-get install -y cgroup-tools htop 26 | 27 | # create new cgroups 28 | cgcreate -g cpu,memory,blkio,devices,freezer:/sandbox 29 | 30 | # add our unshare'd env to our cgroup 31 | ps aux # grab the bash PID that's right after the unshare one 32 | cgclassify -g cpu,memory,blkio,devices,freezer:sandbox 33 | 34 | # list tasks associated to the sandbox cpu group, we should see the above PID 35 | cat /sys/fs/cgroup/cpu/sandbox/tasks 36 | 37 | # show the cpu share of the sandbox cpu group, this is the number that determines priority between competing resources, higher is is higher priority 38 | cat /sys/fs/cgroup/cpu/sandbox/cpu.shares 39 | 40 | # kill all of sandbox's processes if you need it 41 | # kill -9 $(cat /sys/fs/cgroup/cpu/sandbox/tasks) 42 | 43 | # Limit usage at 5% for a multi core system 44 | cgset -r cpu.cfs_period_us=100000 -r cpu.cfs_quota_us=$[ 5000 * $(getconf _NPROCESSORS_ONLN) ] sandbox 45 | 46 | # Set a limit of 80M 47 | cgset -r memory.limit_in_bytes=80M sandbox 48 | # Get memory stats used by the cgroup 49 | cgget -r memory.stat sandbox 50 | 51 | # in terminal session #2, outside of the unshare'd env 52 | htop # will allow us to see resources being used with a nice visualizer 53 | 54 | # in terminal session #1, inside unshared'd env 55 | yes > /dev/null # this will instantly consume one core's worth of CPU power 56 | # notice it's only taking 5% of the CPU, like we set 57 | # if you want, run the docker exec from above to get a third session to see the above command take 100% of the available resources 58 | # CTRL+C stops the above any time 59 | 60 | # in terminal session #1, inside unshare'd env 61 | yes | tr \\n x | head -c 1048576000 | grep n # this will ramp up to consume ~1GB of RAM 62 | # notice in htop it'll keep the memory closer to 80MB due to our cgroup 63 | # as above, connect with a third terminal to see it work outside of a cgroup 64 | ``` 65 | 66 | And now we can call this a container. Using these features together, we allow Bob, Alice, and Eve to run whatever code they want and the only people they can mess with is themselves. 67 | 68 | So while this is a container at its most basic sense, we haven't broached more advance topics like networking, deploying, bundling, or anything else that something like Docker takes care of for us. But now you know at its most base level what a container is, what it does, and how you _could_ do this yourself but you'll be grateful that Docker does it for you. On to the next lesson! 69 | -------------------------------------------------------------------------------- /lessons/chroot.md: -------------------------------------------------------------------------------- 1 | --- 2 | path: "/chroot" 3 | title: "chroot" 4 | order: 2.1 5 | section: "Crafting Containers By Hand" 6 | description: "chroot is the first of the important Linux kernel features that allow us to create contained processes without a whole virtualization layer. Brian shows how to use chroot to restrict a process to a certain file tree." 7 | --- 8 | 9 | ## chroot 10 | 11 | I've heard people call this "cha-root" and "change root". I'm going to stick to "change root" because I feel less ridiculous saying that. It's a Linux command that allows you to set the root directory of a new process. In our container use case, we just set the root directory to be where-ever the new container's new root directory should be. And now the new container group of processes can't see anything outside of it, eliminating our security problem because the new process has no visibility outside of its new root. 12 | 13 | Let's try it. Start up a Ubuntu VM however you feel most comfortable. I'll be using Docker (and doing containers within containers 🤯). If you're like me, run `docker run -it --name docker-host --rm --privileged ubuntu:bionic`. This will download the [official Ubuntu container][ubuntu] from Docker Hub and grab the version marked with the _bionic_ tag. In this case, _latest_ means it's the latest stable release (18.04.) You could put `ubuntu:devel` to get the latest development of Ubuntu (as of writing that'd be 19.10). `docker run` means we're going to run some commands in the container, and the `-it` means we want to make the shell interactive (so we can use it like a normal terminal.) 14 | 15 | If you're in Windows and using WSL, just open a new WSL terminal in Ubuntu. ✌️ 16 | 17 | To see what version of Ubuntu you're using, run `cat /etc/issue/`. `cat` reads a file and dumps it into the output which means we can read it, and `/etc/issue` is a file that will tell us what distro we're using. Mine says `Ubuntu 18.04.3 LTS \n \l`. 18 | 19 | Okay, so let's attempt to use `chroot` right now. 20 | 21 | 1. Make a new folder in your home directory via `mkdir /my-new-root`. 22 | 1. Inside that new folder, run `echo "my super secret thing" >> /my-new-root/secret.txt`. 23 | 1. Now try to run `chroot /my-new-root bash` and see the error it gives you. 24 | 25 | You should see something about failing to run a shell or not being able to find bash. That's because bash is a program and your new root wouldn't have bash to run (because it can't reach outside of its new root.) So let's fix that! Run: 26 | 27 | 1. `mkdir /my-new-root/bin` 28 | 1. `cp /bin/bash /bin/ls /my-new-root/bin/` 29 | 1. `chroot /my-new-root bash` 30 | 31 | Still not working! The problem is that these commands rely on libraries to power them and we didn't bring those with us. So let's do that too. Run `ldd /bin/bash`. This print out something like this: 32 | 33 | ```bash 34 | $ ldd /bin/bash 35 | linux-vdso.so.1 (0x00007fffa89d8000) 36 | libtinfo.so.5 => /lib/x86_64-linux-gnu/libtinfo.so.5 (0x00007f6fb8a07000) 37 | libdl.so.2 => /lib/x86_64-linux-gnu/libdl.so.2 (0x00007f6fb8803000) 38 | libc.so.6 => /lib/x86_64-linux-gnu/libc.so.6 (0x00007f6fb8412000) 39 | /lib64/ld-linux-x86-64.so.2 (0x00007f6fb8f4b000) 40 | ``` 41 | 42 | These are the libraries we need for bash. Let's go ahead and copy those into our new environment. 43 | 44 | 1. `mkdir /my-new-root/lib /my-new-root/lib64` or you can do `/my-new-root/lib{,64}` if you want to be fancy 45 | 1. Then we need to copy all those paths (ignore the lines that don't have paths) into our directory. Make sure you get the right files in the right directory. In my case above (yours likely will be different) it'd be two commands: 46 | 1. `cp /lib/x86_64-linux-gnu/libtinfo.so.5 /lib/x86_64-linux-gnu/libdl.so.2 /lib/x86_64-linux-gnu/libc.so.6 /my-new-root/lib` 47 | 1. `cp /lib64/ld-linux-x86-64.so.2 /my-new-root/lib64` 48 | 1. Do it again for `ls`. Run `ldd /bin/ls` 49 | 1. Follow the same process to copy the libraries for `ls` into our `my-new-root`. 50 | 1. `cp /lib/x86_64-linux-gnu/libselinux.so.1 /lib/x86_64-linux-gnu/libpcre.so.3 /lib/x86_64-linux-gnu/libpthread.so.0 /my-new-root/lib` 51 | 52 | Now, finally, run `chroot /my-new-root bash` and run `ls`. You should successfully see everything in the directory. Now try `pwd` to see your working directory. You should see `/`. You can't get out of here! This, before being called containers, was called a jail for this reason. At any time, hit CTRL+D or run `exit` to get out of your chrooted environment. 53 | 54 | ## cat exercise 55 | 56 | Now try running `cat secret.txt`. Oh no! Your new chroot-ed environment doesn't know how to cat! As an exercise, go make `cat` work the same way we did above! 57 | 58 | Congrats you just cha-rooted the \*\*\*\* out of your first environment! 59 | 60 | [ubuntu]: https://hub.docker.com/_/ubuntu 61 | -------------------------------------------------------------------------------- /lessons/conclusion.md: -------------------------------------------------------------------------------- 1 | --- 2 | path: "/conclusion" 3 | order: 9 4 | title: "Conclusion" 5 | section: "Wrapping Up" 6 | description: "Brian wraps up the whole course and gives additional topics to follow up on for those who are curious. Congrats! You've learned how to build and use containers!" 7 | --- 8 | 9 | ## What you learned 10 | 11 | You're done! Congratulations! You've successfully learned the fundamentals of containers and Docker. As a recap, here's what we went over: 12 | 13 | - What a container is 14 | - How to use chroot to isolate a file system 15 | - How to use namespaces to isolate a tree of processes from other processes, networks, etc. 16 | - How to use cgroups to limit a tree of processes on how much CPU, memory, etc. can use 17 | - What Docker is and where it can be useful 18 | - How to write Dockerfiles for development and production 19 | - How to set up networks in Docker 20 | - How to set up bind mounts and volumes in Docker 21 | - Using Docker Compose to set up a multicontainer setup 22 | - Kubernetes and a very small introduction to large container orchestrations 23 | - An intro to the great container ecosystem with OCI, Podman, and Buildah 24 | 25 | ## What you immediately helps you 26 | 27 | You're probably not going to jumping directly into massive cloud-scale deployments of Kubernetes after this tutorial (or I'll be very impressed if you do!) But we did take away some things that should be immediately useful to you in your day-to-day: 28 | 29 | - How to make a shareable developer container 30 | - How to use that shareable container with Visual Studio Code 31 | - How to get up and going with both development and production containers 32 | - How to start a smaller-scale multi container project 33 | 34 | ## The future 35 | 36 | Containers are just becoming more and more important to the greater container ecosystem. Big cloud providers like Microsoft Azure, Amazon Web Services, and Google Cloud Platform are all making large bets that that's how your team will be shipping code in the future. They're making big bets like the various Kubernetes platforms, Azure Container Instances, AWS Fargate, Google Cloud Run, Visual Studio Online, and a myriad of others. 37 | 38 | ### Container runtime alternatives to Docker 39 | 40 | - [lxc & lxd][lxc] – Docker used to be based on these technologies (and has since moved on.) These days lxd is more used for long-running containers that are provisioned at runtime as opposed to at build time like Docker. You'll typically provision these containers with some like Chef, Salt, Ansible, Terraform or the like. 41 | - [rkt][rkt] – rkt fits much more of Docker's use case. Core to rkt is the idea of a pod. Pods are the same sorts of pods that you find in Kubernetes, making them great for development with Kubernetes in mind. rkt is developed by CoreOS. 42 | 43 | Both rkt and lxd can execute Docker and OCI containers. 44 | 45 | ### Swarm, Mesos, and other orchestration systems 46 | 47 | - [Docker Swarm][swarm] – Docker Swarm is built right into Docker and thus well suited to be used with Docker. Docker Swarm still has services but doesn't have the concept of pods. The idea of a Swarm is to make multiple Docker hosts present together as one host. On the whole, Swarm is a simpler product to get started with than Kubernetes but may be too basic for large, enterprise apps. 48 | - [Apache Mesos][mesos] – Mesos is an open source project bourne out of Berkeley that was adopted and furthered by Twitter. While it is a powerful and flexible tool, it comes with the price of being a very complicated tool and one I haven't been able to really wrap my mind around. 49 | - [Hashicorp Nomad][nomad] – Nomad is a new comer on the scene from the purveyors of Vagrant, Terraform, and other well-known dev tools. Nomad aims to be _only_ a container orchestration tool and none of the other parts like load balancer, service discovery, secrets manager, etc. whereas Kubernetes handles all of those things. While is it new, they do have some big customers with a slowly-growing fanbase. 50 | 51 | ### Container OSs 52 | 53 | Something we didn't discuss here is which container host operating system you use. We're talking about the operating system of the machine that's going to be running the containers, not the actual containers' operating system. While something like Alpine is great for running in containers, it would be a terrible fit for actually running the containers. Let's look at a few popular host operating systems for container deployments. 54 | 55 | - [CoreOS Container Linux][coreos] – A minimal Linux host OS that ships with a bunch of helpful tools for running containers (typically via their rkt engine) and Kubernetes. They're well established and liked in the container world. They were recently acquired by Red Hat who itself was recently acquired by IBM. 56 | - [RancherOS][rancher] – A fascinating Linux distro where all processes are containers and PID 1 is Docker. As you can imagine, if everything is a container then the whole OS is very oriented towards optimizing container usage. 57 | - [Ubuntu][ubuntu] – I mean, Ubuntu is great and it works just fine for running containers, as does Debian, Fedora, and other normal Linux distros. 58 | - [DC/OS][dcos] – People strongly associate DC/OS with Mesos because it runs based on Mesos but in reality it can be used as an OS for anything containers. From their own docs: "_DC/OS (the Distributed Cloud Operating System) is an open-source, distributed operating system based on the Apache Mesos distributed systems kernel. DC/OS manages multiple machines in the cloud or on-premises from a single interface; deploys containers, distributed services, and legacy applications into those machines; and provides networking, service discovery and resource management to keep the services running and communicating with each other._" 59 | - [VMWare Photon][photon] – a relatively new project from VMWare that integrates with their other products like vSphere 60 | 61 | ## Wrap Up 62 | 63 | Again, congratulations. This is a deep course with a lot of breadth thrown at you. This will probably take several exposures to really sink in and that's common for deeply technical things like this. But in the end I think you'll be a better dev for it whether you're writing CSS, Haskell, or Node.js. I hope you enjoyed and let me know if you liked it! 64 | 65 | -- Brian 66 | 67 | [lxc]: https://linuxcontainers.org/lxc/introduction/ 68 | [rkt]: https://coreos.com/rkt/ 69 | [nomad]: https://www.nomadproject.io/ 70 | [swarm]: https://docs.docker.com/engine/swarm/ 71 | [mesos]: http://mesos.apache.org/ 72 | [coreos]: https://coreos.com/ 73 | [rancher]: https://rancher.com/rancher-os/ 74 | [ubuntu]: https://ubuntu.com/ 75 | [dcos]: https://dcos.io/ 76 | [photon]: https://vmware.github.io/photon/ 77 | -------------------------------------------------------------------------------- /lessons/dev-containers.md: -------------------------------------------------------------------------------- 1 | --- 2 | order: 6.2 3 | path: "/dev-containers" 4 | title: "Using Containers for your Dev Environment" 5 | section: "Features in Docker" 6 | description: "Containers can be useful as development environments. This makes them shareable, recreatable, portable, and makes them a great launching pad for building the production environment too. Brian shows how to use containers to build a Hugo (a Go-based static site generator) project." 7 | --- 8 | 9 | So far we've talking about taking an app and using containers to prepare the apps to run. This is an obvious use case for them and one you're going to use a lot. But let's talk about a different use case for them: building development environments for your apps. 10 | 11 | Let's paint a picture. Let's say you got a new job with a company and they're a Ruby shop (if you know Ruby, pretend you don't for a sec.) When you arrive, you're going to be given a very long, likely-out-of-date, complicated README that you're going to have to go look for and struggle to set up the proper version of Ruby, the correct dependencies installed, and that Mercury is in retrograde (just kidding.) Suffice to say, it's a not-fun struggle to get new apps working locally, particularly if it's in a stack that you're not familiar with. Shouldn't there be a better way? There is! (I feel like I'm selling knives on an informercial.) 12 | 13 | Containers! What we can do is define a Dockerfile that sets up all our dependencies so that it's 100% re-createable with zero knowledge of how it works to everyone that approaches it. With bind mounts, we can mount our local code into the container so that we can edit locally and have it propagate into the development container. Let's give it a shot! 14 | 15 | ## Hugo 16 | 17 | I'm not a Go developer. Go is a wonderful language with a rich ecosystem, it's just not what I've previously used. As such, Go is not set up on my computer. But happens if I move onto a new project that uses [Hugo][hugo]? Hugo is a great static site generation tool written in Go but one I'm not too familiar with. I could spend a decent amount of time getting everything set up … or I could use a container! After a quick Internet search, I stumbled across the container [hugo-builder][hugo-builder] which has Hugo all ready to go, I just have to bind in my source files. So let's give it a shot! 18 | 19 | [Your new project is here][hugo-project]. 20 | 21 | Let's go from zero to running our new project in dev mode in three commands 22 | 23 | ```bash 24 | git clone https://github.com/btholt/hugo-example.git 25 | cd hugo-example/ 26 | # you could rewrite the --mount here as -v $PWD:/src 27 | docker run --rm -it --mount type=bind,source="$(pwd)",target=/src -p 1313:1313 -u hugo jguyomard/hugo-builder:0.55 hugo server -w --bind=0.0.0.0 28 | ``` 29 | 30 | How cool is this? We're zero to developing in Go in three commands! This is a super useful tool for getting developer environments up and running. 31 | 32 | Notice we didn't copy our files in our files. Why? Well, we need our files to live on our host because we want to edit them locally and then run them in the container, right? If they lived in the container, they'd go away once we shut down the container. 33 | 34 | ## Aside on Node.js and Native Dependencies 35 | 36 | This also works great for Node.js but there would be a problem here, our dependencies (unless you're running Linux as the host and as the container OS.) Whenever you run `npm install` it'll build you dependencies specifically for whatever OS you're on. This is only a problem if you have dependencies that native code in them (like `node-sass` for example) but it's good to know how to handle this. 37 | 38 | Fastest way is to just ignore everything and run the container as-is. Once you have the container running, just `docker attach` to it and run `npm install` yourself inside of the container. A bit manual (and sort of defeating the purpose of containers) but effective. 39 | 40 | The second option which is a bit gross in its own way to add `npm install &&` to the beginning of the `CMD` of your dev container. This will make it so that the node_modules are installed before starting your server. It's a bit of extra overhead on every restart of the app which can be annoying. 41 | 42 | [hugo-project]: https://github.com/btholt/hugo-example 43 | [hugo]: https://gohugo.io/ 44 | [hugo-builder]: https://hub.docker.com/r/jguyomard/hugo-builder/ 45 | -------------------------------------------------------------------------------- /lessons/docker-cli.md: -------------------------------------------------------------------------------- 1 | --- 2 | path: "/docker-cli" 3 | title: "Docker CLI" 4 | order: 3.5 5 | section: "Docker" 6 | description: "There's a lot of features of the Docker CLI and while that won't necessarily be the focus of this workshop, Brian takes a moment to explain to you some of the additional available functionality." 7 | --- 8 | 9 | Let's take a look at some more cool features of the Docker CLI. 10 | 11 | ### pull / push 12 | 13 | `pull` allows you to pre-fetch container to run. P 14 | 15 | ```bash 16 | docker pull jturpin/hollywood 17 | docker run -it jturpin/hollywood hollywood # notice it's already loaded and cached here; it doesn't redownload it 18 | ``` 19 | 20 | That will pull the hollywood container from the user jturpin's user account. The second line will execute this fun container which is just meant to look a hacker's screen in a movie (it doesn't really do anything than look cool.) 21 | 22 | `push` allows you to push containers to whatever registry you're connected to (probably normally Docker Hub or something like Azure Container Registry). 23 | 24 | ### inspect 25 | 26 | ```bash 27 | docker inspect node 28 | ``` 29 | 30 | This will dump out a lot of info about the container. Helpful when figuring out what's going on with a container 31 | 32 | ### pause / unpause 33 | 34 | As it looks, these pauses or unpause all the processes in a container. Feel free to try 35 | 36 | ```bash 37 | docker run -dit jturpin/hollywood hollywood 38 | docker ps # see container running 39 | docker pause 40 | docker ps # see container paused 41 | docker unpause 42 | docker ps # see container running again 43 | docker kill # see container is gone 44 | ``` 45 | 46 | ### exec 47 | 48 | This allows you to execute a command against a running container. This is different from `docker run` because `docker run` will start a new container whereas `docker exec` runs the command in an already-running container. 49 | 50 | ```bash 51 | docker run -dit jturpin/hollywood hollywood 52 | docker ps # grab the name or ID 53 | docker exec ps aux # see it output all the running processes of the container 54 | ``` 55 | 56 | If you haven't seen `ps aux` before, it's a really useful way to see what's running on your computer. Try running `ps aux` on your macOS or Linux computer to see everything running. 57 | 58 | ### import / export 59 | 60 | Allows you to dump out your container to a tar ball (which we did above.) You can also import a tar ball as well. 61 | 62 | ### history 63 | 64 | We'll get into layers in a bit but this allow you to see how this Docker image's layer composition has changed over time and how recently. 65 | 66 | ```bash 67 | docker history node 68 | ``` 69 | 70 | ### info 71 | 72 | Dumps a bunch of info about the host system. Useful if you're on a VM somewhere and not sure what the environment is. 73 | 74 | ```bash 75 | docker info 76 | ``` 77 | 78 | ### top 79 | 80 | Allows you to see processes running on a container (similar to what we did above) 81 | 82 | ```bash 83 | docker run mongo 84 | docker top # you should see MongoDB running 85 | ``` 86 | 87 | ### rm / rmi 88 | 89 | If you run `docker ps --all` it'll show all containers you've stopped running in addition to the runs you're running. If you want to remove something from this list, you can do `docker rm `. 90 | 91 | If you want to remove an image from your computer (to save space or whatever) you can run `docker rmi mongo` and it'll delete the image from your computer. This isn't a big deal since you can always reload it again 92 | 93 | ### logs 94 | 95 | Very useful to see the output of one of your running containers. 96 | 97 | ```bash 98 | docker run -d mongo 99 | docker logs # see all the logs 100 | ``` 101 | 102 | ### restart 103 | 104 | Pretty self explanatory. Will restart a running container 105 | 106 | ### search 107 | 108 | If you want to see if a container exists on Docker Hub (or whatever registry you're connected to), this will allow you to take a look. 109 | 110 | ```bash 111 | docker search python # see all the various flavors of Python containers you can run 112 | docker search node # see all the various flavors of Node.js containers you can run 113 | ``` 114 | -------------------------------------------------------------------------------- /lessons/docker-compose.md: -------------------------------------------------------------------------------- 1 | --- 2 | path: "/docker-compose" 3 | order: 7.0 4 | title: "Docker Compose" 5 | section: "Multi Container Projects" 6 | description: "Docker Compose allows for setting up multiple container set ups with a single configuration file as opposed to having to hand create your development. Brian shows how to take the Node.js + MongoDB app and create the environment with just `docker-compose up`." 7 | --- 8 | 9 | This may be one of the most useful features you learn about Docker. We've been mixing various different facets of deploying your app to production and creating development environments. This feature in particular is geared much more for development environments. Many times when you're developing containers you're not in just a single container environment (though that does happen too.) When this happens, you need to coordinate multiple containers when you're doing local dev and you've seen in the previous chapter, networking, that it's possible if a bit annoying. 10 | 11 | With Docker Compose we simplify this a lot. Docker Compose allows us the ability to coordinate multiple containers and do so with one YAML file. This is great if you're developing a Node.js app and it requires a database, caching, or even if you have two+ separate apps in two+ separate containers that depend on each other or all the above! Docker Compose makes it really simple to define the relationship between these containers and get them all running with one `docker-compose up`. 12 | 13 | Do note that Docker does say that Docker Compose is suitable for production environments if you have a single instance running multiple containers. This is atypical for the most part: if you have multiple containers, typically you want the ability to have many instances. 14 | 15 | In addition to working very well dev, Docker Compose is very useful in CI/CD scenarios when you want GitHub Actions or some CI/CD provider to spin up multiple environments to quickly run some tests. 16 | 17 | Okay so let's get our previous app working: the one with a MongoDB database being connected to by a Node.js app. Create a new file in the root directory of your project called `docker-compose.yml` and put this in there: 18 | 19 | ```yml 20 | version: "3" 21 | services: 22 | web: 23 | build: . 24 | ports: 25 | - "3000:3000" 26 | volumes: 27 | - .:/home/node/code 28 | - /home/node/code/node_modules 29 | links: 30 | - db 31 | environment: 32 | MONGO_CONNECTION_STRING: mongodb://db:27017 33 | db: 34 | image: mongo:3 35 | ``` 36 | 37 | This should feel familiar even if it's new to you. This is basically all of the CLI configurations we were giving to the two containers but captured in a YAML file. The version is which version of the Docker Compose YAML you're using. They do this so they can stay backwards compatible and still add new features. As of writing, v3 is the latest. 38 | 39 | In `service` we define the containers we need for this particular app. We have two: the `web` container (which is our app) and the `db` container which is MongoDB. We then identify where the Dockerfile is with `build`, which ports to expose in `ports`, which volumes to make in `volumes` (here we're mounting in our code so that we can keep code without having to rebuild the image), and the `environment` variables using that field. 40 | 41 | Notice the second `volumes` entry. We do this so that the `node_modules` from the original build container get mounted in, not the ones on your local host. We do this because it's important to have node_modules that built for the system they're running on. If you run `npm install` on a Windows machine and then copy those to a Ubuntu machine, chances are that they'll break due to how `node_modules` can build native depenencies. If you `npm install` something on your app, you may be to stop the app and run `docker-compose build` again. 42 | 43 | The one interesting one here is the `links` field. In this one we're saying that the `web` container needs to be connected to the `db` container. This means Docker will start this container first and then network it to the `web` container. This works the same way as what we were doing in the previous lesson. 44 | 45 | The `db` container is pretty simple: it's just the `mongo` container from Docker Hub. This is actually smart enough to expose 27017 as the port and to make a volume to keep the data around between restarts so we don't actually have to do anything for that. If you needed any other containers, you'd just put them here in services. 46 | 47 | There's a lot more to compose files than what I've shown you here but I'll let you explore that on your own time. [Click here][compose] to see the docs to see what else is possible. 48 | 49 | This will start and work now, just run `docker-compose up` and it'll get going. I just want to do one thing: let's make our app even more productive to develop on. Go to your Dockerfile for the app make it read a such: 50 | 51 | ```dockerfile 52 | FROM node:latest 53 | 54 | RUN npm i -g nodemon 55 | 56 | USER node 57 | 58 | RUN mkdir /home/node/code 59 | 60 | WORKDIR /home/node/code 61 | 62 | COPY --chown=node:node package-lock.json package.json ./ 63 | 64 | RUN npm ci 65 | 66 | COPY --chown=node:node . . 67 | 68 | CMD ["nodemon", "index.js"] 69 | ``` 70 | 71 | Now we can write our code and every time it save it'll restart the server from within the container. This will make this super productive to work with! 72 | 73 | While we're about to get to Kubernetes which will handle bigger deployment scenarios than Docker Compose can, you can use `docker-compose up --scale web=10` to scale up your web container to 10 concurrently running containers. This won't work at the moment because they're all trying to listen on the host on port 3000 but we could use something like NGINX or HAProxy to loadbalance amongst the containers. It's a bit more advance use case and less useful for Compose since at that point you should probably just use Kubernetes or something similar. We'll approach it in the Kubernetes chapter. 74 | 75 | [compose]: https://docs.docker.com/compose/compose-file/#compose-file-structure-and-examples 76 | -------------------------------------------------------------------------------- /lessons/docker-images-with-docker.md: -------------------------------------------------------------------------------- 1 | --- 2 | path: "/docker-images-with-docker" 3 | title: "Docker Images with Docker" 4 | order: 3.2 5 | section: "Docker" 6 | description: "Brian will now show you how to use the Docker CLI directly to use images and how the Docker command line client works with regard to images of containers." 7 | --- 8 | 9 | ### Docker Images with Docker 10 | 11 | So it's much easier to do what we did with Docker. Run this command: 12 | 13 | ```bash 14 | docker run --interactive --tty alpine:3.10 # or, to be shorter: docker run -it alpine:3.10 15 | ``` 16 | 17 | A bit easier to remember, right? This will drop you into a Alpine ash shell inside of a container as the root user of that container. When you're done, just run `exit` or hit CTRL+D. Notice that this will grab the [alpine][alpine] image from Docker for you and run it. The `run` part of the command is telling Docker you're going to be executing a container (as opposed to building it.) The `-it` part says you want to be dropped into the container interactively so you can run commands and inspect the container. By default containers run and then exit as soon as they're done. Go ahead and try `docker run alpine:3.10`. It'll look it did nothing but it actually starts the container and then, because it has nothing defined for it to do, it just exits. 18 | 19 | So what if you wanted it to execute something? Try this: 20 | 21 | ```bash 22 | docker run alpine:3.10 ls 23 | ``` 24 | 25 | Or let's switch to Ubuntu now, since it's more familiar to most. We'll talk about Alpine later on in-depth. 26 | 27 | ```bash 28 | docker run ubuntu:bionic ls 29 | ``` 30 | 31 | The `ls` part at the end is what you pass into the container to be run. As you can see here, it executes the command, outputs the results, and shuts down the container. This is great for running a Node.js server. Since it doesn't exit, it'll keep running until the server crashes or the server exits itself. 32 | 33 | So now what if we want to detach the container running from the foreground? Let's try that. 34 | 35 | ```bash 36 | docker run --detach -it ubuntu:bionic # or, to be shorter: docker run -dit ubuntu:bionic 37 | ``` 38 | 39 | So it prints a long hash out and then nothing. Oh no! What happened to it!? Well, it's running in the background. So how do we get ahold of it? 40 | 41 | ```bash 42 | docker ps 43 | ``` 44 | 45 | This will print out all the running containers that Docker is managing for you. You should see your container there. So copy the ID or the name and say: 46 | 47 | ```bash 48 | docker attach # e.g. `docker attach 20919c49d6e5` would attach to that container 49 | ``` 50 | 51 | This allows you to attach a shell to a running container and mess around with it. Useful if you need to inspect something or see running logs. Feel free to type `exit` to get out of here. Run `docker run -dit ubuntu:bionic` one more time. Let's kill this container without attaching to it. Run `docker ps`, get the IDs or names of the containers you want to kill and say: 52 | 53 | ```bash 54 | docker kill # e.g. `docker kill fae0f0974d3d 803e1721dad3 20919c49d6e5` would kill those three containers 55 | ``` 56 | 57 | A fun way to kill all running containers would be 58 | 59 | ```bash 60 | docker kill $(docker ps -q) 61 | ``` 62 | 63 | The `$()` portion of that will evaluate whatever is inside of that first and plug its output into the second command. In this case, `docker ps -q` returns all the IDs and nothing else. These are then passed to `docker kill` which will kill all those IDs. Neat! 64 | 65 | ## --name and --rm 66 | 67 | Let's make it a bit easier to keep track of these. Try this 68 | 69 | ```bash 70 | docker run -dit --name my-ubuntu ubuntu:bionic 71 | docker kill my-ubuntu 72 | ``` 73 | 74 | Now you can refer to these by a name you set. But now if you tried it again, it'd say that `my-ubuntu` exists. If you run `docker ps --all` you'll see that the container exists even if it's been stopped. That's because Docker keeps this metadata around until you tell it to stop doing that. You can run `docker rm my-ubuntu` which will free up that name or you can run `docker container prune` to free up all existing stopped containers (and free up some disk space.) 75 | 76 | In the future you can just do 77 | 78 | ```bash 79 | docker run --rm -dit --name my-ubuntu ubuntu:bionic 80 | docker kill my-ubuntu 81 | ``` 82 | 83 | This will automatically clean up the container when it's done. 84 | -------------------------------------------------------------------------------- /lessons/docker-images-without-docker.md: -------------------------------------------------------------------------------- 1 | --- 2 | path: "/docker-images-without-docker" 3 | title: "Docker Images without Docker" 4 | order: 3.1 5 | section: "Docker" 6 | description: "Docker give us the fabulous ability to use containers other people have used by pulling them Docker Hub which is like a package manager for containers. Brian shows us how to download a container, unpack it, and use it without Docker so you can know how to do it manually." 7 | --- 8 | 9 | ## Images 10 | 11 | These pre-made containers are called _images_. They basically dump out the state of the container, package that up, and store it so you can use it later. So let's go nab one of these image and run it! We're going to do it first without Docker to show you that you actually already knows what's going on. 12 | 13 | First thing, let's go grab a container off of Docker Hub. Let's grab the latest Node.js container that runs Ubuntu. 14 | 15 | ### Docker Images without Docker 16 | 17 | ```bash 18 | # start docker contanier with docker running in it connected to host docker daemon 19 | docker run -ti -v /var/run/docker.sock:/var/run/docker.sock --privileged --rm --name docker-host docker:18.06.1-ce 20 | 21 | # run stock alpine container 22 | docker run --rm -dit --name my-alpine alpine:3.10 sh 23 | 24 | # export running container's file system 25 | docker export -o dockercontainer.tar my-alpine 26 | 27 | # make container-root directory, export contents of container into it 28 | mkdir container-root 29 | tar xf dockercontainer.tar -C container-root/ 30 | 31 | # make a contained user, mount in name spaces 32 | unshare --mount --uts --ipc --net --pid --fork --user --map-root-user chroot $PWD/container-root ash # this also does chroot for us 33 | mount -t proc none /proc 34 | mount -t sysfs none /sys 35 | mount -t tmpfs none /tmp 36 | 37 | # here's where you'd do all the cgroup rules making with the settings you wanted to 38 | ``` 39 | 40 | So, this isn't totally it. Docker does a lot more for you than just this like networking, volumes, and other things but suffice to say this core of what Docker is doing for you: creating a new environment that's isolated by namespace and limited by cgroups and chroot'ing you into it. So why did we go through all this ceremony? Well, it's because I want you to understand what Docker is doing for you, know that you _could_ do it by hand but since there's a tool that does for you you don't want to. I hold a strong personal belief that tools people need to understand their tools and what they do for them. Every tool you add to your environment adds complexity but should also add ease. If you don't understand the complexity the tool is solving, you resent it and don't get to fully appreciate nor take advantage of what the tool can fully offer. 41 | 42 | So how often will you do what we just did? Never. 99% of container-utilizers have no idea this is what's happening under the hood. But now that you know it will make you embrace the complexity that Docker adds because you can see why you have it. 43 | -------------------------------------------------------------------------------- /lessons/docker.md: -------------------------------------------------------------------------------- 1 | --- 2 | path: "/getting-set-up-with-docker" 3 | title: "Getting Set Up with Docker" 4 | order: 3.0 5 | section: "Docker" 6 | description: "This is probably why you're here: Docker. Docker is a commandline tool that made creating, updating packaging, distributing, and running containers significantly easier which in turns allowed them become very popular with not just system administraters but the programming populace at large." 7 | --- 8 | 9 | This is probably why you're here: Docker. Docker is a commandline tool that made creating, updating packaging, distributing, and running containers significantly easier which in turns allowed them become very popular with not just system administraters but the programming populace at large. At its heart, it's a command line to achieve what we were doing with cgroups, namespaces, and chroot but in a much more convenient way. Let's dive into the core concepts of Docker. 10 | 11 | ## Docker Desktop 12 | 13 | Go ahead and install [Docker Desktop][desktop] right now. It will work for both Mac and Windows. Docker Desktop runs the Docker [daemon][daemon] (daemon just means a program that runs in the background all the time) so that we can download, run, and build containers. If you're on Mac, you'll see a cute little whale icon in your status bar. Feel free to poke around and see what it has. It will also take the liberty of installing the `docker` commandline tool so we can start doing all the fun things with Docker. 14 | 15 | ## Docker Hub 16 | 17 | [Click here][hub] to head over to Docker Hub. Docker Hub is a public registry of pre-made containers. Think of it like an npm for containers. Instead of having to handcraft everything yourself, you can start out with a base container from Docker Hub and start from there. For example, instead of having to start with Ubuntu and install Node.js on it yourself, you can just start with a container that has Node.js already on it! There's a pre-made container for just about anything you can think of, and for those you can't it's pretty easy to find a good starting point so you can make your own bespoke, artisinal containers. If you feel so inclined, you can publish your own containers on the registry so others can take advantage of your discoveries. 18 | 19 | Feel free to make an account on Docker Hub at this point. We won't be publishing anything to it during this workshop but it's a good idea to have one for when you want to! 20 | 21 | [ubuntu]: https://hub.docker.com/_/ubuntu 22 | [alpine]: https://hub.docker.com/_/alpine 23 | [node]: https://hub.docker.com/_/node/ 24 | [desktop]: https://www.docker.com/products/docker-desktop 25 | [hub]: https://hub.docker.com/search?q=&type=image 26 | [alpine]: https://www.alpinelinux.org/ 27 | [daemon]: https://en.wikipedia.org/wiki/Daemon_(computing) 28 | [cgmanager]: https://linuxcontainers.org/cgmanager/ 29 | -------------------------------------------------------------------------------- /lessons/dockerfile.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Intro to Dockerfiles" 3 | path: "/dockerfile" 4 | order: 4.0 5 | section: "The Dockerfile" 6 | description: "Docker has a special file called a `Dockerfile` which allows you to outline how a container will be built. Each line in a Docker file is a new a directive of how to change your Docker container. Brian goes into the basics of write a Dockerfile here." 7 | --- 8 | 9 | So far we've been focusing a lot on running containers and haven't much dug into building them. This is on purpose because most of benefit of containers for developers comes from the running of containers. If you learn one thing, it should be how to run them. 10 | 11 | That said, let's learn to build our own containers. We'll again be using Docker for this though there are other ways to do this. Docker has a special file called a `Dockerfile` which allows you to outline how a container will be built. Each line in a Docker file is a new a directive of how to change your Docker container. 12 | 13 | A _big key_ with Docker container is that they're supposed to be disposable. You should be able to create them and throw them away as many times as necessary. In other words: adopt a mindset of making everything short-lived. There are other, better tools for long-running, custom containers. 14 | 15 | Let's make the most basic Dockerfile ever. Let's make a new folder, maybe on your desktop. Put a file in there called `Dockerfile` (no extension.) In your file, put this. 16 | 17 | ## The most basic Dockerfile-based Container 18 | 19 | ```dockerfile 20 | FROM node:12-stretch 21 | 22 | CMD ["node", "-e", "console.log(\"hi lol\")"] 23 | ``` 24 | 25 | The first thing on each line (`FROM` and `CMD` in this case) are called _instructions_. They don't technically have to be all caps but it's convention to do so so that the file is easier to read. Each one of these instruction incrementally changes the container from the state it was in previously, adding what we call a _layer_. 26 | 27 | Let's go ahead and build our container. Run (from inside of the directory of where your Dockerfile is) 28 | 29 | ```bash 30 | docker build . 31 | ``` 32 | 33 | You should see it out put a bunch of stuff and it'll leave you with the hash of an image. After each instruction, you'll see a hash similar to the ones we've been using for the IDs for the containers. You know why that is? It's because each one of those layers is in-and-of themselves a valid container image! This ends up being important later and we'll discuss it in a bit. 34 | 35 | Our container has two instructions in its Dockerfile, but actually it has many, many more. How? The first instruction, `FROM node:12-stretch` actually means _start_ with the `node` container. That container itself [comes from another Dockerfile][docker-node] which build its own container, which itself [comes from another Dockerfile][buildpack], which comes ultimately from the [Debian][debian] image. 36 | 37 | This is something very powerful about Docker: you can use images to build other images and build on the work of others. Instead of having to worry about how to install Debian and all the necessary items to build Node.js from its source, we can just start with a well-put-together image from the community. 38 | 39 | Okay, so we start with `node:12-stretch` and then we add the `CMD` instruction. There will only ever be one of these in effect in a Dockerfile. If you have multiple it'll just take the last one. This is what you want Docker to do when someone runs the container. In our case, we're running `node -e "console.log('hi lol')"` from within the container. `node -e`, if you don't know, will run whatever is inside of the quotes with Node.js. In this case, we're logging out `hi lol` to the console. 40 | 41 | You _can_ put `CMD node -e "console.log('hi lol')"` as that last line and it'll work but it's not the preferred way of doing it. This won't actually go through bash which itself is simpler and usually safer. I do it this way because the docs strongly encourage you to do it this way. 42 | 43 | So, in essence, our containers nabs a `node:12-stretch` container and then when we have it execute a `node` command when you run it. Let's try it. Grab the hash from your build and run 44 | 45 | ```bash 46 | docker run 47 | ``` 48 | 49 | It's a little inconvenient to always have to refer to it by ID, it'd be easier if it had a name. So let's do that! Try 50 | 51 | ```bash 52 | docker build . --tag my-node-app ## or -t instead of --tag 53 | docker run my-node-app 54 | ``` 55 | 56 | Much easier to remember the name rather than a hash. If you want to version it yourself, you can totally do this: 57 | 58 | ```bash 59 | docker build -t my-node-app:1 . 60 | docker run my-node-app:1 61 | ``` 62 | 63 | Now change your `Dockerfile` so that it logs out `wat` instead of `hi lol`. After you do that. 64 | 65 | ```bash 66 | docker build -t my-node-app:2 . 67 | docker run my-node-app:2 68 | docker run my-node-app:1 69 | ``` 70 | 71 | You can version your containers and hold on to older ones, just in case! 72 | 73 | [buildpack]: https://github.com/docker-library/buildpack-deps 74 | [debian]: https://hub.docker.com/_/debian/ 75 | [docker-node]: https://github.com/nodejs/docker-node/blob/master/Dockerfile-debian.template 76 | -------------------------------------------------------------------------------- /lessons/expose.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "A Note on EXPOSE" 3 | path: "/expose" 4 | order: 4.4 5 | section: "The Dockerfile" 6 | description: "It can be confusing what the EXPOSE instruction does so Brian takes a moment to explain why you won't necessarily need to use it." 7 | --- 8 | 9 | ## A Note on EXPOSE 10 | 11 | This was a point of confusion for me so I'm going to try to clear it up for you. There is an instruction called `EXPOSE ` that its intended use is to expose ports from within the container to the host machine. However if we don't do the `-p 3000:3000` it still isn't exposed so in reality this instruction doesn't do much. You don't need `EXPOSE`. 12 | 13 | There are two caveats to that. The first is that it could be useful documentation to say that "I know this Node.js service listens on port 3000 and now anyone who reads this Docekrfile will know that too." I would challenge this that I don't think the Dockerfile is the best place for that documentation 14 | 15 | The second caveat is that instead of `-p 3000:3000` you can do `-P`. This will take all of the ports you exposed using `EXPOSE` and will map them to random ports on the host. You can see what ports it chose by using `docker ps`. It'll say something like `0.0.0.0:32769->3000/tcp` so you can see in this case it chose `32769`. Again, I'd prefer to be deliberate about which ports are being mapped. 16 | -------------------------------------------------------------------------------- /lessons/images/FrontendMastersLogo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/btholt/complete-intro-to-containers/ea3d08d5da2528c2c25fdcc3a5c16ea266743093/lessons/images/FrontendMastersLogo.png -------------------------------------------------------------------------------- /lessons/images/brian.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/btholt/complete-intro-to-containers/ea3d08d5da2528c2c25fdcc3a5c16ea266743093/lessons/images/brian.jpg -------------------------------------------------------------------------------- /lessons/images/logo.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /lessons/intro.md: -------------------------------------------------------------------------------- 1 | --- 2 | path: "/intro" 3 | title: "Introduction" 4 | order: 1.0 5 | section: "Welcome" 6 | description: "Brian Holt lays out the course objectives, his background, and where to file issues with the course as well as why he created this course: he believes that containers are going to be important to every developer going forward." 7 | --- 8 | 9 | ## Course Objective 10 | 11 | Hello! And welcome to the Complete Intro to Containers! The objective of this course is demystify what containers are, describe how they can be useful to you, and walk you through the steps of working with containers so that at the end of this course containers will be another tool available to you in your toolbox. Containers are just getting more important in the industry and now it's not just a tool for ops, it's a tool for developers. Everyone from the designers to the low level system engineers will need to interact with containers on a regular basis. This will help you get ahead of the curve. 12 | 13 | ## Who Are You? 14 | 15 | This course is aimed at a developer demographic. While all the examples will be dealing with JavaScript applications, you don't necessarily need to be a JavaScript developer to grasp this case; the code will be incidental to the concepts being taught. 16 | 17 | This course assumes a very basic grasp of Linux and using the command line. You don't need to be a bash expert but this shouldn't be your first exposure to Linux or the command line. The class will be taught for both macOS and Windows users and will be using Ubuntu and Alpine Linux for the containers. This will also work well for Linux developers but the class won't have any additional instructions for Linux devs but following the macOS steps should be 95% the same. If you are a Windows developer, please be using Windows 10. You'll need to either use [WSL 2][wsl2] or VirtualBox. See the set up instructions below. 18 | 19 | For set up instructions, [refer here][readme]. 20 | 21 | To see all of the completed project files in a repo, [refer here][project-files]. 22 | 23 | Do note that containers can take a lot of CPU and memory. If you have a modern-ish processor and 8GB, you will be fine. This could probably be done with some slow down on 4GB but anything lower would be pretty tough. 24 | 25 | This can also take a lot of bandwidth because we'll be downloading a lot of things. Be aware of that. 26 | 27 | ## Where to File Issues 28 | 29 | I write these courses and take care to not make mistakes. However when teaching hours of material, mistakes are inevitable, both here in the grammar and in the course with the material. However I (and the wonderful team at Frontend Masters) are constantly correcting the mistakes so that those of you that come later get the best product possible. If you find a mistake we'd love to fix it. The best way to do this is to open a pull request or [file an issue][issue] on the GitHub repo. While I'm always happy to chat and give advice on social media, I can't be tech support for everyone. And if you file it on GitHub, those who come later can Google the same answer you got. 30 | 31 | ## Who Am I? 32 | 33 | ![Brian drinking a beer](images/brian.jpg) 34 | 35 | My name is Brian Holt. I'm presently (as of writing) a senior program manager over Visual Studio Code and JavaScript on Azure at Microsoft. That means I'm trying to make Azure a place you want to deploy your code and VSCode the best tool to write code with. I've taught a lot of lessons on [Frontend Masters][frontend-masters] and used to be on the frontend development podcast [Front End Happy Hour][fehh]. Previous to that, I was a cloud advocate for Microsoft and a staff JavaScript / Node.js engineer at LinkedIn, Netflix, Reddit, Needle, KSL.com, and NuSkin. I'm also stoke to be a board member of the amazing organization [Vets Who Code][vwc]. 36 | 37 | My biggest passions in life are people and experiences. I hope by going through this course that it can improve your life in some meaningful way and that you in turn can improve someone else's life. My beautiful wife and I live in Seattle, Washington in the United States of America with our cute little Havanese dog Luna. I'd almost always rather be traveling and have been fortunate to see over forty countries in the past six years. 38 | 39 | Please catch up with me on social media, would love to chat: 40 | 41 | - [Twitter][twitter] 42 | - [GitHub][github] 43 | - [LinkedIn][linkedin] 44 | 45 | ## Why was this course created? 46 | 47 | ![Frontend Masters Logo](images/FrontendMastersLogo.png) 48 | 49 | I love to teach. It's a challenging task that forces you to peel back all the knowledge you've gained so you can approach someone who lacks the same experience and terminology you have. It forces you to take amorphous concepts floating in your brain and crystalize them into solid concepts that you can describe. It forces you to acknowledge your gaps in knowledge because you'll begin to question things you know others will question. For me to ever master a concept, I have to teach it to someone else. 50 | 51 | Unfortunately life gets in the way. These courses take dozens of hours to prepare and to get right. While I'd love to just create content all day, I have a (awesome) day job at Microsoft that demands and deserves my full attention. However I'm grateful to the team at [Frontend Masters][fem] for giving me deadlines and incentive to create these courses and then allowing and encouraging me to open source the materials. Not everyone has the money to pay for these courses which is why these materials are and will be forever open source for you to reference and share. I think the video content is pretty good too and so I'd encourage you to [take a look at the videos on Frontend Masters][course] too if that's in the cards for you. 52 | 53 | And hey, if you could take a second and [star the repo on GitHub][gh] I'd be super appreciative. It helps me reach more people. 54 | 55 | [gh]: https://github.com/btholt/complete-intro-to-containers 56 | [frontend-masters]: https://frontendmasters.com/teachers/brian-holt/ 57 | [fehh]: http://frontendhappyhour.com/ 58 | [fem]: https://frontendmasters.com/ 59 | [twitter]: https://twitter.com/holtbt 60 | [github]: https://github.com/btholt 61 | [linkedin]: https://www.linkedin.com/in/btholt/ 62 | [course]: https://frontendmasters.com/courses/complete-intro-containers/ 63 | [vwc]: https://vetswhocode.io/ 64 | [issue]: https://github.com/btholt/complete-intro-to-containers/issues 65 | [wsl2]: https://docs.microsoft.com/en-us/windows/wsl/wsl2-install 66 | [wsl]: https://docs.microsoft.com/en-us/windows/wsl/install-win10 67 | [readme]: https://github.com/btholt/complete-intro-to-containers/blob/master/README.md 68 | [project-files]: https://github.com/btholt/projects-for-complete-intro-to-containers 69 | -------------------------------------------------------------------------------- /lessons/kompose.md: -------------------------------------------------------------------------------- 1 | --- 2 | path: "/kompose" 3 | order: 7.2 4 | title: "Kompose" 5 | section: "Multi Container Projects" 6 | description: "Kompose converts a docker-compose.yml configuration to a Kubernetes configuration. Brian shows how to take the previous Docker Compose YAML file and running that same configuration through Kubernetes" 7 | --- 8 | 9 | Next tool we're going to use is one called [Kompose][kompose]. I'm showing you this tool because it's normally how I start out with Kubernetes. Kompose converts a docker-compose.yml configuration to a Kubernetes configuration. I find this to much more approachable than starting with the myriad configurations you need to get Kubernetes going. 10 | 11 | [Click here][install-kompose] to see how to install Kompose on your platform. 12 | 13 | So first let's modify our docker-compose.yml a bit to make it work for Kompose. 14 | 15 | ```yml 16 | version: "3" 17 | services: 18 | web: 19 | image: btholt/node-app # change build to image 20 | ports: 21 | - "3000:3000" 22 | links: 23 | - db 24 | labels: 25 | kompose.service.type: LoadBalancer # need this label for Kubernetes 26 | environment: 27 | MONGO_CONNECTION_STRING: mongodb://db:27017 28 | db: 29 | image: mongo:3 30 | ports: 31 | - "27017" # explicitly expose the port 32 | ``` 33 | 34 | Kompose (as of writing) doesn't have a way to easily use local images without pushing it to Docker Hub. If you want to use your own image, add back your `build: .` line and call the image something like `image: /node-app` and make sure you're logged into the Docker CLI via `docker login`. I've built the image that we've been building together and pushed it to `btholt/node-app` so feel free to just use mine. It's the same code. 35 | 36 | We add the `LoadBalancer` label so that Kubernetes will know to expose this particular service to the outside world. What this actually does for you is it spins up a loadbalancer that will distribute the load amongst all of your running pods. Do note tha this one of three ways to expose a service to outside world (by default everything is only expose internally). The other two are NodePort and using an ingress controller. [This is a great explainer][ingress] if you're curious. For now LoadBalancer is perfect. 37 | 38 | Lastly, we need to explicit about the port MongoDB exposes. Locally Docker was able to take care of it but Kubernetes needs us to be super explicity of what's exposed and what's not. 39 | 40 | Okay, so now, a hack. Kompose expects kubectl to be listening on port 8080. We need to do that because it doesn't by default. So run `kubectl proxy --port=8080` and leave that running. You may need to open another terminal while that runs or run that last command in the background. 41 | 42 | Now, you should be able to run `kompose up` and access your app on [`localhost:3000`][localhost]. Congrats! You're running Kuberenetes! 43 | 44 | To get a bird's eye view of everything running, run `kubectl get all` to see everything happening. 45 | 46 | Let's do some Kubernetes magic now. Run `kubectl scale --replicas=5 deployment/web` and run `kubectl get all`. Just like that, you have five instances of our Node.js app running and Kubernetes smartly routing traffic to each. If one of them becomes unhealthy, Kubernetes will automatically tear it down and spin up a new one. By setting up Kubernetes, you get a lot of cool stuff for free. If you're computer is starting to warm up, feel free to run `kubectl scale --replicas=1 deployment/web` to scale down. 47 | 48 | Once you're done toying, run `kubectl delete all --all`. This will tear down everything. 49 | 50 | ## Convert 51 | 52 | We did all of this from a docker-compose.yml file but that's just to get you started. What you want are the actual Kubernetes configuration files. To get those, run `kompose convert` which will spit out all of the various configurations you'll need for the services and deployments. I'm not going to get into today these configurations. Kubernetes is very powerful and has many knobs and levers. For now you can see what Kompose generates for you. 53 | 54 | ## To the cloud! 55 | 56 | What's super fun is that kubectl is the same tool you'd use to control your production deployment. So everything you just learn would work against Azure, AWS, GCP, etc. All you have to do is change the context from minikube or docker-desktop to Azure, AWS, or GCP. I'm not going to do that but I'll drop the tutorials here so you can play around yourself. Do note these are often not free and if you're not careful, Kubernetes can get expensive! 57 | 58 | - [Azure AKS][aks] 59 | - [Amazon EKS][aws] 60 | - [Google GKE][gcp] 61 | 62 | [ingress]: https://medium.com/google-cloud/kubernetes-nodeport-vs-loadbalancer-vs-ingress-when-should-i-use-what-922f010849e0 63 | [localhost]: http://localhost:3000 64 | [aks]: https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough 65 | [aws]: https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html 66 | [gcp]: https://cloud.google.com/kubernetes-engine/docs/quickstart 67 | [kompose]: https://kompose.io/ 68 | [install-kompose]: https://kompose.io/installation/ 69 | -------------------------------------------------------------------------------- /lessons/kubernetes.md: -------------------------------------------------------------------------------- 1 | --- 2 | path: "/kubernetes" 3 | order: 7.1 4 | title: "Kubernetes" 5 | section: "Multi Container Projects" 6 | description: "Kubernetes is a container orchestration tool. It allows you to manage large, complicated clusters of containers to multiple different hosts. Brian goes over what Kubernetes is and the definitions of the various Kubernetes terms." 7 | --- 8 | 9 | I like to tell people that containers are the "simple" (simple is a relative term here) part and Kubernetes is the "hard" (hard isn't relative; Kubernetes is really hard) part. So if this feels hard, it's because it is. 10 | 11 | NOTE: Because Kubernetes is long, it's often abbreviates at k8s (k then eight letters then s.) 12 | 13 | So let's talk about use cases. Containers by themselves are useful for many, many use cases like production apps, machine learning, setting up environments, developer environments, and one-off experimentations. Kubernetes builds on containers (read: you need to know containers to use Kubernetes.) Kubernetes is a container orchestration tool. It allows you to manage large, complicated clusters of containers to multiple different hosts. It's a complicated tool that solves complicated problems. As such, we are going to do a hello world so you can understand what it is, what it can do, and then leave you to explore more on your own. 14 | 15 | So let's go over a few fundamental concepts here. 16 | 17 | - The **master** is a server that coordinates everything else. This is the brain on of your cluster. Some cloud providers actually won't charge you to run the master. 18 | - **Nodes** (not to be confused with Node.js) are the worker servers that are actually going to be running your containers. One node can one or multiple containers. If you're running machine learning and you need big, beefy servers to churn through the learning, your node may only run one container. If you're running a Node.js server like we are, you'll have many containers on one node. 19 | - Technically, a Node is just a deploy target. It could itself be a VM or a container, or as we said it could be a metal-and-silicon server. It's not really important. Just think of it as a destination for containers. 20 | - A **pod** is bascially an atom to a cluster: it's a thing that can't be divided and thus needs to be deployed together. Imagine if you had several types of containers that all worked together as one unit and wouldn't work without each other. In this case, you'd put those into a pod. In many cases and what we're going to do today is do one-container-one-pod. Our app stands alone and thus can be deployed independently. We'll keep the MongoDB pod and app pod separate because they can scale individually. 21 | - A **service** is a group of pods that make up one backend (services can be other things but bear with me for a second), so to speak. Think one microservice is a group of microservices. Pods are scaling up and down all the time and thus it's unreliable to rely on a single pod's IP. So if I tell the User service to rely on this specific IP for the Admin service, that IP might disappear as that pod is scalled up and down. Enter services. This is a reliable entry point so that these services can talk to each other independent of the relative scale of each other. Like you can have one-container-one-pod, you can have one-pod-one-service as well which means you can have one-container-one-pod-one-service. Services can be more than a backend, they can machine learning nodes, database, caches, etc. 22 | - A **deployment** is where you describe what you want the state of your pods to be and then Kubernetes works to get your cluster into that state. 23 | 24 | Here's the sad part: doing this in the Windows subsystem for Linux is tough. If you're following along in Windows, I'd say just grab a coffee and watch how this works. It's not important that you actually do this. If you're comfortable in PowerShell, it works well from there or if you can connect to a true Linux VM, it'll work well from there too. Otherwise, just relax while I do this from macOS. 25 | 26 | So you're going to need at least one new CLI: `kubectl`. `kubectl` ([see here for how to install][kubectl]) is the tool that allows you to control _any_ Kubernetes cluster, be it local or in the cloud. It's the single unified CLI for managing Kubernetes. 27 | 28 | After that you, you need to make a choice between `minikube` and using Docker Desktop's built in Kubernetes support. If it's all the same to you, I'd suggest using Docker Desktop's because it's easier to use. 29 | 30 | - Docker Desktop ships with very simple Kubernetes support. It's nice to learn on but has some limitations. If you need to do more complicated things, get minikube. To enable Kubernetes on Docker Desktop, open the preferences of Docker Desktop, navigate to the Kubernetes tab, enable it, accept when it asks when if it can restart itself, and then wait a few minutes. 31 | - `minikube` ([see here for how to install][minikube]) is a development tool to get your Kubernetes cluster running on your local computer. You will only ever use this locally. 32 | 33 | You can have both installed, by the way. These will be called **contexts**. To switch between the two, you can `kubectl config use-context minikube` or `kubectl config use-context docker-desktop`. You can also shorten `use-context` to `use`. 34 | 35 | If you're using minikube, make sure you run `minikube start`. If you're using Docker Desktop, it should be started already. Do a `kubectl cluster-info` to make sure. To see your nodes, run `kubectl cluster-info`. You should see a master node running. 36 | 37 | [kubectl]: https://kubernetes.io/docs/tasks/tools/install-kubectl/ 38 | [minikube]: https://kubernetes.io/docs/tasks/tools/install-minikube/ 39 | -------------------------------------------------------------------------------- /lessons/layers.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Layers" 3 | path: "/layers" 4 | order: 4.5 5 | section: "The Dockerfile" 6 | description: "Brian goes into depth about what layers are with regard to Docker images and how you can leverage Docker's caching of layers to more quickly rebuild containers" 7 | --- 8 | 9 | Go make any change to your Node.js app. Now re-run your build process. Docker is smart enough to see the your `FROM`, `RUN`, and `WORKDIR` instructions haven't changed and wouldn't change if you ran them again so it uses the same containers it cached from the previous but it can see that your `COPY` is different since files changed between last time and this time, so it begins the build process there and re-runs all instructinos after that. Pretty smart, right? 10 | 11 | So which part of container-building takes the longest? `RUN npm ci`. Anything that has to hit the network is going to take the longest without-a-doubt. The shame is that our `package.json` hasn't changed since the previous iteration; we just changed something in our `index.js`. So how we make it so we only re-run our `npm ci` when package.json changes? Break it into two `COPY` instructions! 12 | 13 | ```Dockerfile 14 | FROM node:12-stretch 15 | 16 | USER node 17 | 18 | RUN mkdir /home/node/code 19 | 20 | WORKDIR /home/node/code 21 | 22 | COPY --chown=node:node package-lock.json package.json ./ 23 | 24 | RUN npm ci 25 | 26 | COPY --chown=node:node . . 27 | 28 | CMD ["node", "index.js"] 29 | ``` 30 | 31 | The first `COPY` pulls just the `package.json` and the `package-lock.json` which is just enough to do the `npm ci`. After that we nab the rest of the files. Now if you make changes you avoid doing a full npm install. This is useful and recommended for any dependency installation: apt-get, pip, cargo, gems, etc. as well as any long-running command like building some from source. 32 | -------------------------------------------------------------------------------- /lessons/making-our-own-alpine-nodejs-container.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Making Our Own Alpine Node.js Container" 3 | path: "/making-our-own-alpine-nodejs-container" 4 | order: 5.1 5 | section: "Making Tiny Containers" 6 | description: "As an exercise to understand what goes into making a container from scratch, Brian shows how to create a Node.js container from the base Alpine container." 7 | --- 8 | 9 | ## Making our own Node.js Alpine container 10 | 11 | Let's take this exercise a bit further. Let's actually make our own Node.js Alpine container. NOTE: I'd suggest always using the official one. They'll keep it up to date with security fixes and they're _real_ good at making containers. Better than I am, anyway. But this is a good exercise for us to go through to learn how to install system dependencies. 12 | 13 | Start with this in a new Dockerfile. You can call it `my-node.Dockerfile`. Some people will insist it should be `Dockerfile.my-node` but the former doesn't break syntax highlighting and it doesn't matter since Docker doesn't actually care. 14 | 15 | ```dockerfile 16 | FROM alpine:3.10 17 | 18 | RUN apk add --update nodejs npm 19 | ``` 20 | 21 | `alpine:latest` would nab you the latest Alpine (3.10 as of writing, if you run into issues with versions, continue with `alpine:3.10` instead of `alpine:latest`. Otherwise feel free to truck on with `alpine:latest`) 22 | 23 | `RUN apk add --update nodejs npm` will use the Alpine package manager to grab Node.js and npm (they're bundled separately for Alpine.) 24 | 25 | If you encounter error like this 26 | 27 | ```bash 28 | /home/node/code/node_modules/@hapi/hapi/lib/core.js:51 29 | actives = new WeakMap(); // Active requests being processed 30 | ^ 31 | 32 | SyntaxError: Unexpected token = 33 | ``` 34 | 35 | Try using `nodejs-current` instead of `nodejs` 36 | 37 | ```dockerfile 38 | RUN apk add --update nodejs-current npm 39 | ``` 40 | 41 | Okay so now if you do `docker build -t my-node -f my-node.Dockerfile .` it'll build your new image. `-t` is `--tag` and `-f` is `--file` which is what tells Docker is the name of your Dockerfile you're using (otherwise it assumes you're using a file called exactly `Dockerfile`.) Now try `docker run -it my-node`. In here you should have a pretty bare bones Linux container but both `node -v` and `npm -v` should work. 42 | 43 | Keep in mind that Alpine does not use bash for its shell; it uses a different shell called `ash` or often just `sh`. It's similar enough to bash but there are some differences. It's not really the point of this class so we'll keep the focus on learning just what's necessary. 44 | 45 | Let's next make our `node` user. 46 | 47 | ```dockerfile 48 | FROM alpine:3.10 49 | 50 | RUN apk add --update nodejs npm 51 | 52 | RUN addgroup -S node && adduser -S node -G node 53 | 54 | USER node 55 | ``` 56 | 57 | I'm mimicking what the Node.js official container does, which is make a user group of `node` with one user in it, `node`. Feel free to name them different things if you feel so inclined. Notice we could conceivably combine the two `RUN` instructions together but it's generally best practices to keep "ideas" separate. The first `RUN` installs dependencies, the second one creates the `node` user. Up to you how you do it, neither is wrong per se. 58 | 59 | Now we can just copy the rest from the previous Dockerfile! Let's do that. 60 | 61 | ```dockerfile 62 | FROM alpine:3.10 63 | 64 | RUN apk add --update nodejs npm 65 | 66 | RUN addgroup -S node && adduser -S node -G node 67 | 68 | USER node 69 | 70 | RUN mkdir /home/node/code 71 | 72 | WORKDIR /home/node/code 73 | 74 | COPY --chown=node:node package-lock.json package.json ./ 75 | 76 | RUN npm ci 77 | 78 | COPY --chown=node:node . . 79 | 80 | CMD ["node", "index.js"] 81 | ``` 82 | 83 | It works! We're down to 56MB (compared to 86MB with the official `node:12-alpine` container). Honestly, I'm not entirely sure what we cut out from the other `node:12-alpine` container but it's probably important. Again, I'd stick to the official containers where they exist. But hey, we learned how to add a user and install system dependencies! Let's make it even small because why the hell not. 84 | -------------------------------------------------------------------------------- /lessons/more-complicated-nodejs-app.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "A More Complicated Node.js App" 3 | path: "/more-complicated-nodejs-app" 4 | order: 4.2 5 | section: "The Dockerfile" 6 | description: "Brian shows how to write a Dockerfile for a more complicated app and how to avoid problems with Node.js native modules" 7 | --- 8 | 9 | Okay, all looking good so far. Let's make this app go one step further. Let's have it have an npm install step! In the directory where your app is, put this: 10 | 11 | ```javascript 12 | // more-or-less the example code from the hapi-pino repo 13 | const hapi = require("@hapi/hapi"); 14 | 15 | async function start() { 16 | const server = hapi.server({ 17 | host: "0.0.0.0", 18 | port: process.env.PORT || 3000 19 | }); 20 | 21 | server.route({ 22 | method: "GET", 23 | path: "/", 24 | handler() { 25 | return { success: true }; 26 | } 27 | }); 28 | 29 | await server.register({ 30 | plugin: require("hapi-pino"), 31 | options: { 32 | prettyPrint: true 33 | } 34 | }); 35 | 36 | await server.start(); 37 | 38 | return server; 39 | } 40 | 41 | start().catch(err => { 42 | console.log(err); 43 | process.exit(1); 44 | }); 45 | ``` 46 | 47 | This is a [hapi.js][hapi] server. Hapi is a server-side framework (like Express) for Node.js and my personal favorite. This is going to require that we `npm install` the dependencies. So in your project do the following 48 | 49 | ```bash 50 | npm init -y # this will create a package.json for you without asking any questions 51 | npm install @hapi/hapi hapi-pino 52 | ``` 53 | 54 | Now try running `node index.js` to run the Node.js server. You should see it running and logging out info whenever you hit an endpoint. Cool, so now that we have a full featured Node.js app, let's containerize it. 55 | 56 | If we tried to build it and run it right now it'd fail because we didn't `npm install` the dependencies. So now right after the `COPY` we'll add a `RUN`. 57 | 58 | ```dockerfile 59 | FROM node:12-stretch 60 | 61 | USER node 62 | 63 | WORKDIR /home/node/code 64 | 65 | COPY --chown=node:node . . 66 | 67 | RUN npm ci 68 | 69 | CMD ["node", "index.js"] 70 | ``` 71 | 72 | We changed the `COPY` to copy everything in the directory. Right now you probably a `node_modules` but if you're building a container directly from a repo it won't copy the `node_modules` so we have to operate under the assumption that those won't be there. Feel free even to delete them if you want. 73 | 74 | Let's go ahead and add a `.dockerignore` file to the root of the project that prevents Docker from copying the `node_modules`. This has the same format as a `.gitignore`. 75 | 76 | ``` 77 | node_modules/ 78 | .git/ 79 | ``` 80 | 81 | We then added a `RUN` instruction to run a command inside of the container. If you're not familiar with `npm ci` it's very similar to `npm install` with a few key differences: it'll follow the `package-lock.json` exactly (where `npm install` will ignore it and update it if newer patch versions of your dependencies are available) and it'll automatically delete `node_modules` if it exists. `npm ci` is made for situations like this. 82 | 83 | Now if you try to build again, it'll fail with permissions issues. Why? Well, when you have `WORKDIR` create a directory, it does so as root which means that the node user won't have enough permissions to modify that directory. We could either use `RUN` to change the user or we could use `RUN` to make the directory in the first place as node. Let's do the latter. 84 | 85 | ```dockerfile 86 | FROM node:12-stretch 87 | 88 | USER node 89 | 90 | RUN mkdir /home/node/code 91 | 92 | WORKDIR /home/node/code 93 | 94 | COPY --chown=node:node . . 95 | 96 | RUN npm ci 97 | 98 | CMD ["node", "index.js"] 99 | ``` 100 | 101 | Now try building and running your container. It should work now! Yay! 102 | 103 | **NOTE:** make sure you don't bind your app to host `localhost` (like if you put `localhost` instead of `0.0.0.0` in the host in our hapi app.) This will make it so the app is only available _inside_ the container. If you see `connection reset` instead of when you're expecting a response, this a good candidate for what's happening (because this definitely didn't _just_ happen to me 😂.) 104 | -------------------------------------------------------------------------------- /lessons/multi-stage-builds.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Multi Stage Builds" 3 | path: "/multi-stage-builds" 4 | order: 5.2 5 | section: "Making Tiny Containers" 6 | description: "Multi stage builds can help you make your containers even smaller and more secure by leaving out tools only needed to build the container. Brian shows how to make the Alpine-Node.js container built in previous sections even smaller using this technique." 7 | --- 8 | 9 | Hey, we're already half-way to ridiculous, let's make our image EVEN SMALLER. Technically we only need `npm` to build our app, right? We don't actually need it to run our app. Docker allows you to have what it called multistage builds, we it uses one container to build your app and another to run it. This can be useful if you have big dependencies to build your app but you don't need those dependencies to actually run the app. A C++ or Rust app might be a good example of that: they need big tool chains to compile the apps but the resulting binaries are smaller and don't need those tools to actually run them. Or one perhaps more applicable to you is that you don't need the TypeScript or Sass compiler in production, just the compiled files. We'll actually do that here in a sec, but let's start here with eliminating `npm`. 10 | 11 | Make a new Dockerfile, call it `multi-node.Dockerfile`. 12 | 13 | ```dockerfile 14 | # build stage 15 | FROM node:12-stretch 16 | WORKDIR /build 17 | COPY package-lock.json package.json ./ 18 | RUN npm ci 19 | COPY . . 20 | 21 | # runtime stage 22 | FROM alpine:3.10 23 | RUN apk add --update nodejs 24 | RUN addgroup -S node && adduser -S node -G node 25 | USER node 26 | RUN mkdir /home/node/code 27 | WORKDIR /home/node/code 28 | COPY --from=0 --chown=node:node /build . 29 | CMD ["node", "index.js"] 30 | ``` 31 | 32 | Notice we have have two `FROM` instructions. This is how you can tell it's multistage. The last container made will be the final one that gets labeled and shipped. Notice we're starting in Ubuntu which one we use since we're not going to ship this container so we can use the kitchen sink to build it before it copying it to a smaller container. 33 | 34 | After building everything in the build stage (you can have more than two stages by the way) we move on to the runtime container. In this one we're using Alpine due its size and security benefits. Everything else looks similar to what we were doing before, just now we're going to be copying from the build container instead of the host machine. 35 | 36 | The two real key differences are that we don't `apk add npm` and we're doing `COPY --from=0` which means we're copying from the first stage. As you may imagine, this means you can copy from any previous stage or if you leave `--from` off it'll come from the host machine. 37 | 38 | So try it now! 39 | 40 | ```bash 41 | docker build -t multi-node -f multi-node.Dockerfile . 42 | docker run -p 3000:3000 multi-node 43 | ``` 44 | 45 | Still works! And our container size is down to a cool 39MB as compared to 56MB when we included npm, 86MB when we used `node:12-alpine` and 913MB when we used `node:latest`. 46 | 47 | Pretty amazing, right? Honestly, how worth is it doing micro optimization like this? Not very. We had to do a decent amount to shave 40MB off the final size and now we're stuck maintaining it. I'd rather just start with `FROM node:12-alpine` and call it a day. We get all their wisdom for free and we're not stuck with a longer Dockerfile than we need. But it is definitely worth going from 913MB to 86MB! 48 | -------------------------------------------------------------------------------- /lessons/namespaces.md: -------------------------------------------------------------------------------- 1 | --- 2 | path: "/namespaces" 3 | title: "Namespaces" 4 | order: 2.2 5 | section: "Crafting Containers By Hand" 6 | description: "Namespaces is the second feature of the Linux kernel that allow for containers. Namespaces let you hide processes, networks, and other core functionality from sets of processes. Brian shows us how to use namespaces manually." 7 | --- 8 | 9 | ## namespace 10 | 11 | While chroot is a pretty straightforward, namespaces and cgroups are a bit more nebulous to understand but no less important. Both of these next two features are for security and resource management. 12 | 13 | Let's say you're running a big server that's in your home and you're selling space to other people (that you don't know) to run their code on your server. What sort of concerns would you have? Let's say you have Alice and Bob who are running e-commerce services dealing with lots of money. They themselves are good citizens of the servers and minding their own business. But then you have Eve join the server who has other intentions: she wants to steal money, source code, and whatever else she can get her hands on from your other tenants on the server. If just gave all three them root access to server, what's to stop Eve from taking everything? Or what if she just wants to disrupt their businesses, even if she's not stealing anything? 14 | 15 | Your first line of defense is that you could log them into chroot'd environments and limit them to only those. Great! Now they can't see each others' files. Problem solved? Well, no, not quite yet. Despite the fact that she can't see the files, she can still see all the processes going on on the computer. She can kill processes, unmount filesystem and potentially even hijack processes. 16 | 17 | Enter namespaces. Namespaces allow you to hide processes from other processes. If we give each chroot'd environment different sets of namespaces, now Alice, Bob, and Eve can't see each others' processes (they even get different process PIDs, or process IDs, so they can't guess what the others have) and you can't steal or hijack what you can't see! 18 | 19 | There's a lot more depth to namespaces beyond what I've outlined here. The above is describing _just_ the UTS (or UNIX Timesharing) namespace. There are more namespaces as well and this will help these containers stay isloated from each other. 20 | 21 | ## The problem with chroot alone 22 | 23 | Now, this isn't secure. The only thing we've protected is the file system, mostly. 24 | 25 | 1. chroot in a terminal into our environment 26 | 1. In another terminal, run `docker exec -it docker-host bash`. This will get another terminal session #2 for us (I'll refer to the chroot'd environment as #1) 27 | 1. Run `tail -f /my-new-root/secret.txt &` in #2. This will start an infinitely running process in the background. 28 | 1. Run `ps` to see the process list in #2 and see the `tail` process running. Copy the PID (process ID) for the tail process. 29 | 1. In #1, the chroot'd shell, run `kill `. This will kill the tail process from inside the `chroot'd` environment. This is a problem because that means chroot isn't enough to isolate someone. We need more barriers. This is just one problem, processes, but it's illustrative that we need more isolation beyond just the file system. 30 | 31 | ## Safety with namespaces 32 | 33 | So let's create a chroot'd environment now that's isolated using namespaces using a new command: `unshare`. `unshare` creates a new isolated namespace from its parent (so you, the server provider can't spy on Bob nor Alice either) and all other future tenants. Run this: 34 | 35 | **NOTE**: This next command downloads about 150MB and takes at least a few minutes to run. 36 | 37 | ```bash 38 | exit # from our chroot'd environment if you're still running it, if not skip this 39 | 40 | # install debootstrap 41 | apt-get update -y 42 | apt-get install debootstrap -y 43 | debootstrap --variant=minbase bionic /better-root 44 | 45 | # head into the new namespace'd, chroot'd environment 46 | unshare --mount --uts --ipc --net --pid --fork --user --map-root-user chroot /better-root bash # this also chroot's for us 47 | mount -t proc none /proc # process namespace 48 | mount -t sysfs none /sys # filesystem 49 | mount -t tmpfs none /tmp # filesystem 50 | ``` 51 | 52 | This will create a new environment that's isolated on the system with its own PIDs, mounts (like storage and volumes), and network stack. Now we can't see any of the processes! 53 | 54 | Now try our previous exercise again. 55 | 56 | 1. Run `tail -f /my-new-root/secret.txt &` from #2 (not the unshare env) 57 | 1. Run `ps` from #1, grab pid for `tail` 58 | 1. Run `kill `, see that it doesn't work 59 | 60 | We used namespaces to protect our processes! We could explore the other namespaces but know it's a similar exercise: using namespaces to restrict capabilities of containers to interfering with other containers (both for nefarious purposes and to protect ourselves from ourselves.) 61 | -------------------------------------------------------------------------------- /lessons/networking.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Networking with Docker" 3 | order: 6.4 4 | path: "/networking" 5 | section: "Features in Docker" 6 | description: "Networking is key when you start making projects have multiple containers. Docker allows you to create bridge networks to connect connect containers together. Brian shows how to connect two MongoDB containers together so you can have one container running a database and one running the client to connect to the other." 7 | --- 8 | 9 | This is not going to be a deep dive into how networking works. Networking is a deep, deep pool of knowledge and merits entire courses to understand. Just worrying about networking is some people's jobs due to the immense surface area of the subject. Instead, I want to just peek under the covers of how to do manual networking with Docker so you can understand what Docker Compose and Kubernetes do for you. 10 | 11 | So why do we care about networking? Many reasons! Let's make our Node.js app a bit more complicated. What if it had a database? Let's connect it to a running MongoDB database. We _could_ start this MongoDB database inside of the same container and this might be fine for development on the smallest app but it'd be better and easier if we could just the [mongo][mongo] container directly. But if I have two containers running at the same time (the app containers and the MongoDB container) how do they talk to each other? Networking! 12 | 13 | There are several ways of doing networking within Docker and all of them work differently depending which operating system you're on. Again, this is a deep subject and we're just going to skim the surface. We're going to deal with the simplest, the bridge networks. There is a default bridge network running all the time. If you want to check this out, run `docker network ls`. You'll see something like this: 14 | 15 | ```bash 16 | $ docker network ls 17 | NETWORK ID NAME DRIVER SCOPE 18 | xxxxxxxxxxxx bridge bridge local 19 | xxxxxxxxxxxx host host local 20 | xxxxxxxxxxxx none null local 21 | ``` 22 | 23 | The bridge network is the one that exists all the time and we could attach to it if we want to, but again Docker recommends against it so we'll create our own. There's also the host network which is the host computer itself's network. The last network with the `null` driver is one that you'd use if you wanted to use some other provider or if you wanted to do it manually yourself. 24 | 25 | Go ahead and run `docker network create --driver=bridge app-net` 26 | 27 | Once you've done that, let's start a MongoDB server. Run `docker run -d --network=app-net -p 27017:27017 --name=db --rm mongo:3`. I'm having you run a specific version of MongoDB, v3, because I know the package to interact with it is already available on Ubuntu. Feel free to use v4+ if you know it's available. We also added a few flags. The `--name` flag allows us to refer specifically to that one running container, and even better it allows us to use that as its address on the network. We'll see that in a sec. The one other, since we're using `--name` is `--rm`. If we didn't use that, we'd have to run `docker rm db` before restarting our `db` container since when it stops a container, it doesn't delete it and its logs and meta data until you tell it to. The `--rm` means toss all that stuff as soon as the container finishes and free up that name again. 28 | 29 | Now, for fun we can use _another_ MongoDB containter (because it has the `mongo` client on it in addition to have the MongoDB server). Run this: `docker run -it --network=app-net --rm mongo:3 mongo --host db`. This will be one instance of a MongoDB container connecting to a different container over our Docker network. Cool, right? So let's make our Node.js app read and write to MongoDB! 30 | 31 | ## Connecting our Node.js App to MongoDB 32 | 33 | This isn't a course in MongoDB or anything but more just to show you how to connect one app container to a database container as well as set you up for the next lesson Docker composes. And this sort of method work just as well for any DB: MySQL, Postgres, Redis, etc. 34 | 35 | So first thing, let's add some logic to our app that reads and writes to MongoDB 36 | 37 | ```javascript 38 | // more-or-less the example code from the hapi-pino repo 39 | const hapi = require("@hapi/hapi"); 40 | const { MongoClient } = require("mongodb"); 41 | const url = process.env.MONGO_CONNECTION_STRING || "mongodb://localhost:27017"; 42 | const dbName = "dockerApp"; 43 | const collectionName = "count"; 44 | 45 | async function start() { 46 | const client = await MongoClient.connect(url); 47 | const db = client.db(dbName); 48 | const collection = db.collection(collectionName); 49 | 50 | const server = hapi.server({ 51 | host: "0.0.0.0", 52 | port: process.env.PORT || 3000 53 | }); 54 | 55 | server.route({ 56 | method: "GET", 57 | path: "/", 58 | async handler() { 59 | const count = await collection.count(); 60 | return { success: true, count }; 61 | } 62 | }); 63 | 64 | server.route({ 65 | method: "GET", 66 | path: "/add", 67 | async handler() { 68 | const res = await collection.insertOne({}); 69 | return { inserted: res.insertedCount }; 70 | } 71 | }); 72 | 73 | await server.register({ 74 | plugin: require("hapi-pino"), 75 | options: { 76 | prettyPrint: true 77 | } 78 | }); 79 | 80 | await server.start(); 81 | 82 | return server; 83 | } 84 | 85 | start().catch(err => { 86 | console.log(err); 87 | process.exit(1); 88 | }); 89 | ``` 90 | 91 | You could absolutely run this locally if you have MongoDB running on your host machine since the default connection string will connect to a local MonogDB. But we also left it open so we can feed the app an environmental variable so we can modify it to be a different container. 92 | 93 | So build the container and run it using the following commands: 94 | 95 | ```bash 96 | npm install mongodb@3.3 # you need to add mongodb to your project 97 | docker build --tag=my-app-with-mongo . 98 | docker run -p 3000:3000 --network=app-net --env MONGO_CONNECTION_STRING=mongodb://db:27017 my-app-with-mongo 99 | ``` 100 | 101 | Okay so we added a new endpoint and modified one. The first one is `/add` which will add an empty object (MongoDB will add an `_id` to it so it's not totally empty). It will then return how many items it successfully added to MongoDB (hopefully 1!). And then we modified the `/` route to return the count of items in the database. Great! This is how the basics of networking work in Docker. 102 | 103 | One key thing here that we need to discuss: if you shut down that one Docker container, where is your data going to go? Well, it'll disappear. How do you mitigate this? Usually with some sort of volume that lives beyond the container, and usually by having more than one container of MongoDB running. It's beyond the scope of this course but you already have the tools you need to be able to do that. 104 | 105 | Congrats! You've done basic networking in Docker. Now let's go use other tools to make this easier for us. 106 | 107 | [mongo]: https://hub.docker.com/_/mongo 108 | -------------------------------------------------------------------------------- /lessons/nodejs-on-docker.md: -------------------------------------------------------------------------------- 1 | --- 2 | path: "/nodejs-on-docker" 3 | title: "Node.js on Docker" 4 | order: 3.3 5 | section: "Docker" 6 | description: "To demonstrate the wide variety of containers that exist, Brian shows you how to pull down a Node.js container that uses Debian instead of Ubuntu." 7 | --- 8 | 9 | ## Node.js on Containers 10 | 11 | So now what if we wanted to run a container that Node in it? The default Ubuntu container doesn't have Node.js installed. Let's use a different container! 12 | 13 | ``` 14 | docker run -it node:12-stretch 15 | ``` 16 | 17 | The version here is we're using is Node.js version 12 and Stretch refers to the version of Debian (which is what the Node.js uses by default.) 18 | 19 | Notice this drops us into the Node.js REPL which may or may not be what you want. What if we wanted to be dropped into bash of that container? Easy! You already know how! 20 | 21 | ```bash 22 | docker run -it node:12-stretch bash 23 | ``` 24 | 25 | Remember, after we identify the container ([node][node]), anything we put after get's evaluated instead of the default command identified by the container (in the container `node`'s case, it runs the command `node` by default). This allows us to run whatever command we want! In this case, we're exectuing `bash` which puts us directly into a bash shell. 26 | 27 | So what flavor of Linux is the `node` image running? Honestly, I didn't even remember when I was writing this. But it's easy to find out! There's a file on every\* Linux OS that has in it what sort of Linux it's running. If we run `cat /etc/issue` it'll show us what sort of Linux it is. `cat` is a way to output a file's contents to the terminal. Try running the two commands 28 | 29 | ```bash 30 | docker run ubuntu:bionic cat /etc/issue # hopefully this shouldn't surprise you 31 | docker run node:12-stretch cat /etc/issue # ???? 32 | ``` 33 | 34 | We'll get into later how to select which Linux distros you should use but for now this is just a fun exercise. 35 | -------------------------------------------------------------------------------- /lessons/podman.md: -------------------------------------------------------------------------------- 1 | --- 2 | path: "/podman" 3 | order: 8.1 4 | title: "Podman" 5 | section: "OCI (Non-Docker) Containers" 6 | description: "Podman allows you to run OCI or Docker containers. Brian takes the Buildah container that was built in the previous section and runs it with Podman." 7 | --- 8 | 9 | I'm going to do this inside of the [Buildah container][buildah-container] because this was quite difficult to set up outside of it. And in reality most of you will be doing this with Docker anyway so this is more of an academic exercise. 10 | 11 | After having build your container above with Buildah, run this: 12 | 13 | ```bash 14 | podman run --cgroup-manager cgroupfs -p 3000:3000 localhost/my-app-buildah 15 | ``` 16 | 17 | This will start Podman managing an instance of your Buildah-built container! This by-default will run your container in the foreground, you can run it in the background with `-d` added. 18 | 19 | ### Run your Buildah container with Docker 20 | 21 | In order to do this part, you have to run this outside of a container. 22 | 23 | We need to first transfer our container out of Buildah and into Docker. We also need to be aware that there are two ways to package a container: Docker and OCI. If we tell Buildah to push to Docker, it'll fix that automatically but be aware you can also use OCI (Open Container Initiative) images as well. 24 | 25 | Just like you can push a container to Docker Hub, you can use the same mechanism within Buildah to push to a local Docker daemon (background process.) So ahead and run `buildah push localhost/my-app-buildah docker-daemon:my-app-buildah:latest`. This will move our app out Buildah and into Docker. Now if you run `docker run -it my-app-buildah bash` it should drop you into a running container. As a fun exercise, try to start the Node.js app and connect it to a running `mongo` container using the techniques we learned before. Now you have one container built using Docker connecting to a container built using Buildah. Pretty cool! 26 | -------------------------------------------------------------------------------- /lessons/static-assets-project.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Static Assets Project" 3 | path: "/static-assets-project" 4 | order: 5.3 5 | section: "Making Tiny Containers" 6 | description: "Brian gives an exercise to make the create-react-app app be served by NGINX so that it's understandable how to pull into someone else's container they've built and build on top of it." 7 | --- 8 | 9 | We're going to do a project now! Free to attempt the project first and then follow along with me as I code the answer. 10 | 11 | We're going to construct a very basic front end website with React, TypeScript, and Sass. Why these? Because I want it to have a lot of dependencies and a big build step. This class isn't about any of these things (and I don't personally endorse the use of Sass anymore though I respect those who do.) If you want to take a class on React, my [intro][intro] and [intermediate][intermediate] classes are available on Frontend Masters. 12 | 13 | We're going to create a new project using [create-react-app][cra]. Go to the directory where you want to create the new project (CRA will create the folder for you) and run `npx --ignore-existing create-react-app static-assets-project --template typescript --use-npm`. Do note: create-react-app has been having issues with global installs of create-react-app. If you have it install globally right now, please uninstall it and use the above command. 14 | 15 | This will scaffold out a whole new TypeScript React project for you. 16 | 17 | **OPTIONAL:** Go into the project and run `npm install node-sass`. This will install the Sass compiler for you. Go change both of the `.css` files in this project to have the `.scss` extensions (everything that's valid in CSS is valid in SCSS.) Update the two `.css` imports in `App.tsx` and `index.tsx` to have `.scss imports instead. If you're struggling with the Sass stuff, feel free to leave it out and just go with an out-of-the-box CRA app, the Sass stuff is just to drive home the point that you can have as many dependencies as you want. 18 | 19 | To make sure this works right now, run `npm run start` in your console and make sure the app starts okay. You should see a splash screen. Once you're ready to build it, run `npm run build` to have it build for production. 20 | 21 | The project is to make a multi-stage Dockerfile that build the project in one container and then serves it from a different container using NGINX. If you're not familiar with NGINX, fear not! It is a static file server, which is to say it take takes HTML, CSS, JS, images, fonts, etc. and serves them to your users. It handles all the serving and file headers for you. Using it can be accomplished in few steps. You'll use the `nginx:latest` (or `nginx:alpine`! up to you) container and copy **just the newly built files, not everything** (which is in the `build` directory inside of the CRA app) to `/usr/share/nginx/html` and the `nginx` will take care of the rest. The `nginx` container defines `CMD` in it and if you don't override it, it starts NGINX for you. Give it a shot! Once you've tried, come back here and we'll do the solution together. NGINX runs on port 80 by default, so you probably want to route that something like 8080 on your host machine (otherwise you have to run it as root which no one wants to do.) 22 | 23 | Scroll down to see my answer. 24 | 25 |
26 | 27 | Done? If you gave it a shot, your Dockerfile probably shouldn't very long. Let's see what I came up with 28 | 29 | ```Dockerfile 30 | FROM node:latest 31 | WORKDIR /app 32 | COPY . . 33 | RUN npm ci && npm run build 34 | 35 | # you could totally use nginx:alpine here too 36 | FROM nginx:latest 37 | COPY --from=0 /app/build /usr/share/nginx/html 38 | ``` 39 | 40 | Now if you run this, it should work: 41 | 42 | ```bash 43 | docker build -t static-app . 44 | docker run -p 8080:80 static-app 45 | ``` 46 | 47 | It should be working now! Hooray! Hopefully you're starting to see the power of what Docker can unlock for you. 48 | 49 | [intro]: https://frontendmasters.com/courses/complete-react-v5/ 50 | [intermediate]: https://frontendmasters.com/courses/intermediate-react-v2/ 51 | [cra]: https://create-react-app.dev 52 | -------------------------------------------------------------------------------- /lessons/tags.md: -------------------------------------------------------------------------------- 1 | --- 2 | path: "/tags" 3 | title: "Tags" 4 | order: 3.4 5 | section: "Docker" 6 | description: "Tags are an essential part of the Docker CLI that allow you to label your containers with referenceable tags and allows you to tie your app to specific versions of containers, whether that's a certain version of Linux or a particular version of Node.js (or both!)" 7 | --- 8 | 9 | ## Tags 10 | 11 | So far we've just been running containers with random tags that I chose. If you run `docker run -it node` the tag implicitly is using the `latest` tag. When you say `docker run -it node`, it's the same as saying `docker run -it node:latest`. The `:latest` is the tag. This allows you to run different versions of the same container, just like you can install React version 15 or React version 16: some times you don't want the latest. Let's say you have a legacy application at your job and it depends on running on Node.js 8 (update your app, Node.js is already past end-of-life) then you can say 12 | 13 | ```bash 14 | docker run -it node:8 bash 15 | ``` 16 | 17 | Once in the shell, run `node --version` and you'll see the Node.js version is 8._._! Neat! This is helpful because now we can fix our Node.js version to the one our app expects. Hop back over to [the Docker Hub page for the node container][node]. Take a look at all the version of the node container you can download. Let's try another one. 18 | 19 | ```bash 20 | docker run node:12-alpine cat /etc/issue 21 | ``` 22 | 23 | You'll see this is running an entirely different OS all together: Alpine! [Alpine Linux][alpine] is a very, very tiny distro of Linux made for containers and specifically because it is tiny. Alpine containers are bare bones: if you want _anything_ in them, you're going to have to do it yourself. This is in opposition to the Ubuntu and Debian containers: they ship the kitchen sink with them which is both convenient and much bigger in size. Alpine images are about five megabytes whereas Ubuntu is close to two hundred megabytes. As you can imagine, this can make a difference in how fast you can deploy and can cost significantly less in terms of storage and network traffic. It's also in general better to have less unnecessary things in your containers: less is more in terms of security. If an attacker tries to execute a Python exploit on your container but your container doesn't have Python then their attack won't work. 24 | 25 | We'll get more into how to ship containers to production but I'll leave you with this pro-tip: have a development container which has all the bells, whistles, debugging tools, etc. that you need. Then have a production container that's minimalist as possibly can be. You'll get the best of both worlds. 26 | 27 | [node]: https://hub.docker.com/_/node/ 28 | [alpine]: https://hub.docker.com/_/alpine 29 | -------------------------------------------------------------------------------- /lessons/visual-studio-code.md: -------------------------------------------------------------------------------- 1 | --- 2 | order: 6.3 3 | path: "/visual-studio-code" 4 | title: "Dev Containers with Visual Studio Code" 5 | section: "Features in Docker" 6 | description: "Containers can be used in conjunction with Visual Studio Code to automatically set up development environments for yourself and other developers working on the project. Brian shows how to set up dev containers specifically for Visual Studio Code." 7 | --- 8 | 9 | ## Visual Studio Code 10 | 11 | As you may imagine, I'm a big fan of Visual Studio Code. And I'm a big fan of investing time to learn your tools very well. If you haven't [Burke Holland's course on Frontend Masters][burke] on Visual Studio Code. He as well touches on some of the things we're about to talk about here. 12 | 13 | Visual Studio Code has a relatively recent feature that it can connect to remote environments (remote in the sense as in you're editing files not on your host.) You can use VSCode to connect to remote virtual machines (or really anything that's SSH-able), to WSL (Linux running on Windows), and finally to containers. 14 | 15 | Go ahead and install [the Remote - Containers][remote] extension. 16 | 17 | This takes everything one step further: you can actually set up someone's editor for them when they open your project. You can change settings, add extensions, define debugging, and control the container environment with the remote extension and dev containers. Let's go ahead and give it a shot! 18 | 19 | Make a folder within your static-app project called `.devcontainer`. In there we'll put two files. The first one is the Dockerfile where we'll just set up our dev environment. 20 | 21 | ```Dockerfile 22 | FROM node:12-stretch 23 | RUN npm install -g eslint prettier 24 | ``` 25 | 26 | Just need the tools and environment, don't actually need to build anything or put the code in there. Visual Studio Code will handle that automatically. Next make a file inside `.devcontainer` called `devcontainer.json`. The folder name has leading `.`, the the JSON file does not. 27 | 28 | ```json 29 | { 30 | "name": "Frontend Masters Sample", 31 | "dockerFile": "Dockerfile", 32 | "appPort": [3000], 33 | "runArgs": ["-u", "node"], 34 | "settings": { 35 | "workbench.colorTheme": "Night Owl", 36 | // "workbench.colorTheme": "Hot Dog Stand", 37 | "terminal.integrated.shell.linux": "/bin/bash" 38 | }, 39 | "postCreateCommand": "npm install", 40 | "extensions": [ 41 | // "somekittens.hot-dog-stand", 42 | "sdras.night-owl", 43 | "dbaeumer.vscode-eslint", 44 | "esbenp.prettier-vscode" 45 | ] 46 | } 47 | ``` 48 | 49 | From here, close Visual Studio Code and then reopen the project again. You should see a little prompt asking you if you want to re-open the project in a container. Click yes! If you miss that prompt, click the (normally green) button in the bottom left of VSCode that look sort like `><` but shoved together. It should have an option to open this project in a dev container. 50 | 51 | **NOTE**: if you're on Windows and you're following along with WSL, you'll have to get the project _out_ of WSL before it'll let you re-open it in a container. This will hopefully be a smoother experience in the future. To get into Windows from WSL, click the same `><` logo in the bottom left and say open in Windows. From there the above instructions should work. 52 | 53 | Couple of key things here: 54 | 55 | - We can have two different Dockerfiles for dev and production. We can have one. I generally have two unless they overlap so much they're basically the same. 56 | - We're setting up our colleagues for success by making sure everyone has the correct extensions installed for dev. In this case, I added Prettier and ESLint to my team's environment so they can have instant feedback when they're working. 57 | - We can add settings to their environment (like formatting on save) so that everything just works the same for everyone. No worries: your team can override any of this if it's not for them. 58 | 59 | This whole workflow works for VSCode users only so not everyone will get to take apart of the magic. However, at the end of it all, it's just a container for development environments and they have the Dockerfile. They can still build and run that via the techniques above! 60 | 61 | From here you can get as complicated as you need, setting up memory dumps, tracing, and the like. I leave that as an exercise for you (as I'm not the most knowledgeable on how to do it) but whatever you can do with a bash script can be done by Docker for you! 62 | 63 | [remote]: https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers 64 | [burke]: https://frontendmasters.com/workshops/visual-studio-code/ 65 | -------------------------------------------------------------------------------- /lessons/volumes.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Volumes" 3 | path: "/volumes" 4 | order: 6.1 5 | section: "Features in Docker" 6 | description: "Volumes are so that your containers can maintain state between runs. If you have a container that runs and the next time it runs it needs the results from the previous time it ran, you need volumes." 7 | --- 8 | 9 | Bind mounts are great for when you need to share data between your host and your container as we just learned. Volumes, on the other hand, are so that your containers can maintain state between runs. So if you have a container that runs and the next time it runs it needs the results from the previous time it ran, volumes are going to be helpful. Volumes can not only be shared by the same container-type between runs but also between different containers. Maybe if you have two containers and you want to log to consolidate your logs to one place, volumes could help with that. 10 | 11 | They key here is this: bind mounts are file systems managed the host. They're just normal files in your host being mounted into a container. Volumes are different because they're a new file system that Docker manages that are mounted into your container. These Docker-managed file systems are not visible to the host system (they can be found but it's designed to be.) 12 | 13 | Let's make a quick Node.js app that reads from a file that a number in it, prints it, writes it to a volume, and finishes. Create a new Node.js project. 14 | 15 | ```bash 16 | mkdir docker-volume 17 | cd docker-volume 18 | touch index.js Dockerfile 19 | ``` 20 | 21 | Inside that Node.js file, put this: 22 | 23 | ```javascript 24 | const fs = require("fs").promises; 25 | const path = require("path"); 26 | 27 | const dataPath = path.join(process.env.DATA_PATH || "./data.txt"); 28 | 29 | fs.readFile(dataPath) 30 | .then(buffer => { 31 | const data = buffer.toString(); 32 | console.log(data); 33 | writeTo(+data + 1); 34 | }) 35 | .catch(e => { 36 | console.log("file not found, writing '0' to a new file"); 37 | writeTo(0); 38 | }); 39 | 40 | const writeTo = data => { 41 | fs.writeFile(dataPath, data.toString()).catch(console.error); 42 | }; 43 | ``` 44 | 45 | Don't worry too much about the Node.js. It looks for a file `$DATA_PATH` if it exists or `./data.txt` if it doesn't and if it exists, it reads it, logs it, and writes back to the data file after incrementing the number. If it just run it right now, it'll create a `data.txt` file with 0 in it. If you run it again, it'll have `1` in there and so on. So let's make this work with volumes. 46 | 47 | ```dockerfile 48 | FROM node:12-alpine 49 | COPY --chown=node:node . /src 50 | WORKDIR /src 51 | CMD ["node", "index.js"] 52 | ``` 53 | 54 | Now run 55 | 56 | ```bash 57 | docker build --tag=incrementor . 58 | docker run incrementor 59 | ``` 60 | 61 | Every time you run this it'll be the same thing. This is nothing is persisted once the container finishes. We need something that can live between runs. We could use bind mounts and it would work but this data is only designed to be used and written to within Docker which makes volumes preferable and recommended by Docker. If you use volumes, Docker can handle back ups, clean ups, and more security for you. If you use bind mounts, you're on your own. 62 | 63 | So, without having to rebuild your container, try this 64 | 65 | ```bash 66 | docker run --env DATA_PATH=/data/num.txt --mount type=volume,src=incrementor-data,target=/data incrementor 67 | ``` 68 | 69 | Now you should be to run it multiple times and everything should work! We use the `--env` flag to set the DATA_PATH to be where we want Node.js to write the file and we use `--mount` to mount a named volume called `incrementor-data`. You can leave this out and it'll be an anonymous volume that will persist beyond the container but it won't automatically choose the right one on future runs. Awesome! 70 | 71 | ## named pipes, tmpfs, and wrap up 72 | 73 | Prefer to use volumes when you can, use bind mounts where it makes sense. If you're still unclear, the [official Docker][storage] docs are pretty good on the subject. 74 | 75 | There are two more that we didn't talk about, `tmpfs` and `npipe`. The former is Linux only and the latter is Windows only (we're not going over Windows containers at all in this workshop.) `tmpfs` imitates a file system but actually keeps everything in memory. This is useful for mounting in secrets like database keys. The latter is useful for mounting third party tools for Windows containers. If you need more info than that, refer to the docs. I've never directly used either. 76 | -------------------------------------------------------------------------------- /lessons/what-are-containers.md: -------------------------------------------------------------------------------- 1 | --- 2 | path: "/what-are-containers" 3 | title: "What Are Containers?" 4 | order: 2.0 5 | section: "Crafting Containers By Hand" 6 | description: "Containers are simpler than you think they are. Brian lays out an abridged history of virtualization, why the creation of containers was necessary, and what containers actually achieve for you." 7 | --- 8 | 9 | Containers are probably simpler than you think they are. Before I took a deep dive into what they are, I was very intimidated by the concept of what containers were. I thought they were for one super-versed in Linux and sysadmin type activties. In reality, the core of what containers are is just a few features of the Linux kernel duct-taped together. Honestly, there's no single concept of a "container": it's just using a few features of Linux together to achieve isolation. That's it. 10 | 11 | So how comfortable are you with the command line? This course doesn't assume wizardry with bash or zsh but this probably shouldn't be your first adventure with it. If it is, [check out Jem's course on it][jem]. This course will give you more than we'll need to keep up with this course. 12 | 13 | ## Why Containers 14 | 15 | Let's start with why first, why we need containers. 16 | 17 | ### Bare Metal 18 | 19 | Historically, if you wanted to run a web server, you either set up your own or you rented a literal server somewhere. We often call this "bare metal" because, well, your code is literally executing on the processor with no abstraction. This is great if you're extremely performance sensitive and you have ample and competent staffing to take care of these servers. 20 | 21 | The problem with running your servers on the bare metal is you come become extremely inflexible. Need to spin up another server? Call up Dell or IBM and ask them to ship you another one, then get your tech to go install the phyiscal server, set up the server, and bring into the server farm. That only takes a month or two right? Pretty much instant. 😄 22 | 23 | Okay, so now at least you have a pool of servers responding to web traffic. Now you just to worry about keeping the operating system up to date. Oh, and all the drivers connecting to the hardware. And all the software running on the server. And replacing the components of your server as new ones come out. Or maybe the whole server. And fixing failed components. And network issues. And running cables. And your power bill. And who has physical access to your server room. And the actual temperature of the data center. And paying a ridiculous Internet bill. You get the point. Managing your own servers is _hard_ and requires a whole team to do it. 24 | 25 | ### Virtual Machines 26 | 27 | Virtual machines are the next step. This is adding a layer of abstraction between you and the metal. Now instead of having one instance of Linux running on your computer, you'll have multiple guest instances of Linux running inside of a host instance of Linux (it doesn't have to be Linux but I'm using it to be illustrative.) Why is this helpful? For one, I can have one beefy server and have it spin up and down servers at will. So now if I'm adding a new service, I can just spin up a new VM on one of my servers (providing I have space to do so.) This allows a lot more flexibility. 28 | 29 | Another thing is I can separate two VMs from each other on the same machine _totally_ from each other. This affords a few nice things. 30 | 31 | 1. Imagine both Coca-Cola and Pepsi lease a server from Microsoft Azure to power their soda making machines and hence have the recipe on the server. If Microsoft puts both of these servers on the same physical server with no separation, one soda-maker could just SSH into the server and browse the competitor's files and find the secret recipe. So this is a massive security problem. 32 | 1. Imagine one of the soda-makers discovers that they're on the same server as their competitor. They could drop a [fork bomb][fork-bomb] and devour all the resources their competitors' website was using. 33 | 1. Much less nefariously, any person on a shared-tenant server could unintentionally crash the server and thus ruin everyone's day. 34 | 35 | So enter VMs. These are individual operating systems that as far as they know, are running on bare metal themselves. The host operating system offers the VM a certain amount resources and if that VM runs out, they run out and they don't affect other guest operating systems running on the server. If they crash their server, they crash their guest OS and yours hums along unaffected. And since they're in a guest OS, they can't peek into your files because their VM has no concept of any sibling VMs on the machine so it's much more secure. 36 | 37 | All these above features come at the cost of a bit of performance. Running an operating system within an operating system isn't free. But in general we have enough computing power and memory that this isn't the primary concern. And of course, with abstraction comes ease at the cost of additional complexity. In this case, the advantages very much outweigh the cost most of the time. 38 | 39 | If you want to play with VMs, Jem's course does a great job showing you how to do it with his [Full Stack course][jem]. 40 | 41 | ### Public Cloud 42 | 43 | So, as alluded to above, you can nab a VM from a public cloud provider like Microsoft Azure or Amazon Web Services. It will come with a pre-allocated amount of memory and computing power (often called virtual cores or vCores because their dedicated cores to your virutal machine.) Now you no longer have to manage the expensive and difficult business of maintaining a data center but you do have to still manage all the software of it yourself: Microsoft won't update Ubuntu for you but they will make sure the hardware is up to date. 44 | 45 | But now you have the great ability spin up and spin down virtual machines in the cloud, giving you access to resources with the only upper bound being how much you're willing to pay. And we've been doing this for a while. But the hard part is they're still just giving you machines, you have to manage all the software, networking, provisioning, updating, etc. for all these servers. And lots of companies still do! Tools like Terraform, Chef, Puppet, Salt, etc. help a lot with things like this because they can make spinning up new VMs easy because they can handle the software needed to get it going. 46 | 47 | We're still paying the cost of running a whole operating system in the cloud inside of a host operating system. It'd be nice if we could just run the code inside the host OS without the additional expenditure of guest OSs. 48 | 49 | ### Containers 50 | 51 | And here we are, containers. As you may have divined, containers give us many of the security and resource-management features of VMs but without the cost of having to run a whole other operating system. It instead usings chroot, namespace, and cgroup to separate a group of processes from each other. If this sounds a little flimsy to you and you're still worried about security and resource-management, you're not alone. But I assure you a lot of very smart people have worked out the kinks and containers are the future of deploying code. 52 | 53 | So now that we've been through why we need containers, let's go through the three things that make containers a reality. 54 | 55 | [fork-bomb]: https://en.wikipedia.org/wiki/Fork_bomb 56 | [jem]: https://frontendmasters.com/courses/fullstack-v2/ 57 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "gatsby-course-starter", 3 | "description": "a gatsby seed project to get your education site started", 4 | "version": "1.0.0", 5 | "author": "Brian Holt ", 6 | "dependencies": { 7 | "bootstrap": "^5.1.3", 8 | "code-mirror-themes": "^1.0.0", 9 | "front-matter": "^4.0.2", 10 | "gatsby": "^4.6.2", 11 | "gatsby-cli": "^4.6.1", 12 | "gatsby-link": "^4.6.0", 13 | "gatsby-plugin-layout": "^3.6.0", 14 | "gatsby-plugin-react-helmet": "^5.6.0", 15 | "gatsby-plugin-sharp": "4.6.0", 16 | "gatsby-remark-autolink-headers": "^5.6.0", 17 | "gatsby-remark-copy-linked-files": "^5.6.0", 18 | "gatsby-remark-images": "^6.6.0", 19 | "gatsby-remark-prismjs": "^6.6.0", 20 | "gatsby-source-filesystem": "^4.6.0", 21 | "gatsby-transformer-remark": "^5.6.0", 22 | "is-url-superb": "^5.0.0", 23 | "parse-markdown-links": "^1.0.4", 24 | "prismjs": "^1.26.0", 25 | "react": "^17.0.2", 26 | "react-dom": "^17.0.2", 27 | "react-helmet": "^6.1.0" 28 | }, 29 | "keywords": [ 30 | "gatsby", 31 | "gatsby-starter", 32 | "course", 33 | "education" 34 | ], 35 | "license": "(CC-BY-NC-4.0 OR Apache-2.0)", 36 | "main": "n/a", 37 | "scripts": { 38 | "build": "gatsby build --prefix-paths", 39 | "csv": "node csv.js", 40 | "dev": "gatsby develop", 41 | "format": "prettier --write \"src/**/*.{js,jsx,md,css}\"", 42 | "lint": "eslint \"src/**/*.{js,jsx}\"", 43 | "deploy": "gatsby build --prefix-paths && gh-pages -d public -b gh-pages" 44 | }, 45 | "devDependencies": { 46 | "@babel/polyfill": "^7.12.1", 47 | "babel-eslint": "^10.1.0", 48 | "core-js": "^3.21.0", 49 | "eslint": "^8.8.0", 50 | "eslint-config-prettier": "^8.3.0", 51 | "eslint-plugin-import": "^2.25.4", 52 | "eslint-plugin-jsx-a11y": "^6.5.1", 53 | "eslint-plugin-react": "^7.28.0", 54 | "prettier": "^2.5.1", 55 | "gh-pages": "^3.2.3" 56 | } 57 | } -------------------------------------------------------------------------------- /src/components/TOCCard.css: -------------------------------------------------------------------------------- 1 | .main-card { 2 | border: 1px solid #ccc; 3 | border-radius: 8px; 4 | width: 100%; 5 | margin: 0; 6 | overflow: hidden; 7 | background-color: white; 8 | } 9 | 10 | .lesson-title { 11 | font-size: 20px; 12 | padding: 15px 30px; 13 | } 14 | 15 | .lesson-content { 16 | padding: 0 15px 15px 15px; 17 | line-height: 1.5; 18 | } 19 | 20 | .sections-name { 21 | list-style: none; 22 | } 23 | 24 | .lesson-section-title { 25 | margin-top: 25px; 26 | } 27 | -------------------------------------------------------------------------------- /src/components/TOCCard.js: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import Link from "gatsby-link"; 3 | 4 | import "./TOCCard.css"; 5 | 6 | const LessonCard = ({ content, title }) => { 7 | const sections = content 8 | .map(lesson => [lesson.node.frontmatter.section, lesson.node.frontmatter]) 9 | .reduce((acc, [name, lesson]) => { 10 | if (!acc.length) { 11 | acc.push([lesson]); 12 | return acc; 13 | } 14 | 15 | const lastName = acc[acc.length - 1][0].section; 16 | if (lastName === name) { 17 | acc[acc.length - 1].push(lesson); 18 | } else { 19 | acc.push([lesson]); 20 | } 21 | 22 | return acc; 23 | }, []); 24 | 25 | return ( 26 |
27 |

{title}

28 |
29 |
    30 | {sections.map(section => ( 31 |
  1. 32 |

    {section[0].section}

    33 |
      34 | {section.map(lesson => ( 35 |
    1. 36 | {lesson.title} 37 |
    2. 38 | ))} 39 |
    40 |
  2. 41 | ))} 42 |
43 |
44 |
45 | ); 46 | }; 47 | 48 | export default LessonCard; 49 | -------------------------------------------------------------------------------- /src/layouts/index.css: -------------------------------------------------------------------------------- 1 | .gradient { 2 | background: rgb(96, 108, 136); 3 | background: linear-gradient( 4 | to bottom, 5 | rgb(96, 108, 136) 0%, 6 | rgb(63, 76, 107) 100% 7 | ); 8 | } 9 | 10 | .navbar { 11 | border-bottom: 1px solid #ccc; 12 | position: fixed; 13 | width: 100%; 14 | top: 0; 15 | z-index: 10; 16 | display: flex; 17 | padding: 10px; 18 | justify-content: space-between; 19 | align-items: center; 20 | padding: 0.5rem 1rem; 21 | } 22 | 23 | .navbar h1 { 24 | font-size: 20px; 25 | margin: inherit; 26 | padding: inherit; 27 | font-weight: bold; 28 | } 29 | 30 | .navbar h2, 31 | .navbar h3 { 32 | font-size: 14px; 33 | margin: inherit; 34 | padding: inherit; 35 | text-transform: uppercase; 36 | color: white; 37 | } 38 | 39 | 40 | .button { 41 | border-radius: 10px; 42 | padding: 0; 43 | background: #0066cc; 44 | color: white; 45 | display: flex; 46 | justify-content: center; 47 | align-items: center; 48 | text-decoration: none; 49 | } 50 | 51 | .button .icon { 52 | padding-left: 5px; 53 | display: inline-block; 54 | } 55 | 56 | .button a { 57 | padding: 3px 8px; 58 | color: white; 59 | text-decoration: none; 60 | } 61 | 62 | @media (max-width: 1450px) { 63 | .mobile-hidden { 64 | display: none; 65 | } 66 | .lesson-container { 67 | padding-top: 65px; 68 | } 69 | } 70 | 71 | .button:hover { 72 | background: #0d6efd; 73 | } 74 | 75 | .jumbotron { 76 | padding: 2rem 1rem; 77 | margin-bottom: 2rem; 78 | background-color: #e9ecef; 79 | border-radius: .3rem; 80 | } 81 | 82 | @media (min-width: 576px) { 83 | .jumbotron { 84 | padding: 4rem 2rem; 85 | } 86 | } 87 | 88 | .jumbotron.gradient { 89 | color: white; 90 | text-transform: uppercase; 91 | font-weight: bold; 92 | } 93 | 94 | .navbar-brand.navbar-brand { 95 | text-transform: uppercase; 96 | color: white; 97 | font-weight: bold; 98 | } 99 | 100 | .navbar-brand.navbar-brand:hover { 101 | color: #777; 102 | } 103 | 104 | .navbar-brand.navbar-brand:focus { 105 | color: white; 106 | } 107 | 108 | .lesson { 109 | margin: 15px; 110 | padding: 15px; 111 | background-color: #fff; 112 | border-radius: 8px; 113 | overflow: scroll; 114 | } 115 | 116 | .lesson p { 117 | clear: both; 118 | } 119 | 120 | .lesson-links { 121 | font-size: 18px; 122 | padding: 15px 0; 123 | } 124 | 125 | .next { 126 | float: right; 127 | } 128 | 129 | .prev { 130 | float: left; 131 | } 132 | 133 | .lesson-title { 134 | color: white; 135 | text-transform: uppercase; 136 | font-weight: bold; 137 | } 138 | 139 | .klipse-result { 140 | border: 1px solid #90b4fe; 141 | padding-top: 8px; 142 | position: relative; 143 | width: 100%; 144 | } 145 | 146 | .klipse-result .CodeMirror-wrap { 147 | width: 100%; 148 | border-color: transparent; 149 | } 150 | 151 | .klipse-result::before { 152 | content: "result"; 153 | background-color: white; 154 | position: absolute; 155 | top: -13px; 156 | height: 13px; 157 | } 158 | 159 | .language-htm, 160 | .language-css, 161 | .language-js, 162 | .language-json { 163 | width: 100%; 164 | } 165 | 166 | .gatsby-highlight { 167 | /* border: 1px solid black; */ 168 | padding: 4px; 169 | border-radius: 4px; 170 | display: flex; 171 | justify-content: space-between; 172 | flex-direction: column; 173 | align-items: stretch; 174 | } 175 | 176 | .CodeMirror-wrap { 177 | width: 100%; 178 | font-size: 12px; 179 | height: inherit; 180 | margin-bottom: 12px; 181 | } 182 | 183 | .CodeMirror-gutters { 184 | height: inherit !important; 185 | } 186 | 187 | .klipse-snippet > .CodeMirror { 188 | border: none; 189 | width: 100%; 190 | } 191 | 192 | .gatsby-highlight > .klipse-snippet { 193 | border: 1px solid #90b4fe; 194 | width: 100%; 195 | border-right: none; 196 | position: relative; 197 | margin-bottom: 15px; 198 | } 199 | 200 | .doggos { 201 | width: 100%; 202 | border: 1px solid #666; 203 | border-radius: 5px; 204 | } 205 | -------------------------------------------------------------------------------- /src/layouts/index.js: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import Link from "gatsby-link"; 3 | import Helmet from "react-helmet"; 4 | import { graphql, StaticQuery } from "gatsby"; 5 | 6 | import "bootstrap/dist/css/bootstrap.css"; 7 | import "prismjs/themes/prism-solarizedlight.css"; 8 | import "code-mirror-themes/themes/monokai.css"; 9 | import "./index.css"; 10 | 11 | import jpg from "../../static/posterframe.jpg"; 12 | 13 | const TemplateWrapper = props => { 14 | return ( 15 | { 17 | const frontmatter = 18 | props.data && props.data.markdownRemark 19 | ? props.data.markdownRemark.frontmatter 20 | : null; 21 | 22 | return ( 23 |
24 | 70 |
71 | 72 |

{data.site.siteMetadata.title}

73 | 74 | {!frontmatter ? null : ( 75 |

{`${frontmatter.section} – ${frontmatter.title}`}

76 | )} 77 |

78 | 79 | Complete Intro to Containers Videos 80 |  ▶️  81 | 82 |

83 |
84 |
{props.children}
85 |
86 | ); 87 | }} 88 | query={graphql` 89 | query HomePage($path: String!) { 90 | markdownRemark(frontmatter: { path: { eq: $path } }) { 91 | html 92 | frontmatter { 93 | path 94 | title 95 | order 96 | section 97 | description 98 | } 99 | } 100 | site { 101 | pathPrefix 102 | siteMetadata { 103 | title 104 | subtitle 105 | description 106 | keywords 107 | } 108 | } 109 | } 110 | `} 111 | /> 112 | ); 113 | }; 114 | 115 | export default TemplateWrapper; 116 | -------------------------------------------------------------------------------- /src/pages/404.js: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | 3 | const NotFoundPage = () => ( 4 |
5 |

NOT FOUND

6 |

You just hit a route that doesn't exist... the sadness.

7 |
8 | ); 9 | 10 | export default NotFoundPage; 11 | -------------------------------------------------------------------------------- /src/pages/index.css: -------------------------------------------------------------------------------- 1 | body { 2 | background-color: #eee; 3 | } 4 | 5 | .index { 6 | width: 97%; 7 | max-width: 750px; 8 | margin: 0 auto; 9 | margin-top: 40px; 10 | } 11 | 12 | .main { 13 | margin-top: 80px; 14 | } 15 | 16 | .lesson-container { 17 | max-width: 850px; 18 | margin: 0 auto; 19 | } 20 | 21 | .lesson { 22 | margin: 20px; 23 | } 24 | 25 | .lesson-content { 26 | padding: 20px; 27 | } 28 | 29 | .lesson h1 { 30 | margin: 0; 31 | padding: 20px; 32 | } 33 | 34 | .example-table { 35 | border-collapse: separate; 36 | } 37 | 38 | .example-table td { 39 | border: 1px solid black; 40 | width: 20px; 41 | height: 20px; 42 | } 43 | 44 | .example-table .current { 45 | background-color: #fcc; 46 | } 47 | 48 | .example-table .n { 49 | border-top-color: transparent; 50 | } 51 | 52 | .example-table .s { 53 | border-bottom-color: transparent; 54 | } 55 | 56 | .example-table .e { 57 | border-right-color: transparent; 58 | } 59 | 60 | .example-table .w { 61 | border-left-color: transparent; 62 | } 63 | 64 | .lesson-content table { 65 | } 66 | 67 | .lesson-content td { 68 | border: 1px solid black; 69 | padding: 8px; 70 | } 71 | 72 | .lesson-content td input { 73 | min-width: 300px; 74 | } 75 | 76 | .lesson-flex { 77 | display: flex; 78 | flex-direction: column; 79 | justify-content: center; 80 | align-items: center; 81 | } 82 | 83 | .random-tweet { 84 | width: 100%; 85 | margin-top: 100px; 86 | } 87 | 88 | .fem-link { 89 | text-align: center; 90 | } 91 | -------------------------------------------------------------------------------- /src/pages/index.js: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import { StaticQuery, graphql } from "gatsby"; 3 | import Card from "../components/TOCCard"; 4 | 5 | import "./index.css"; 6 | 7 | const IndexPage = () => ( 8 | ( 36 |
37 |
38 |

{props.site.siteMetadata.title} (feat. Docker) {props.site.siteMetadata.subtitle}

39 |
40 | 41 | 45 |
46 | )} 47 | /> 48 | ); 49 | 50 | export default IndexPage; 51 | -------------------------------------------------------------------------------- /src/templates/lessonTemplate.js: -------------------------------------------------------------------------------- 1 | import React from "react"; 2 | import Link from "gatsby-link"; 3 | import { graphql } from "gatsby"; 4 | 5 | export default function Template(props) { 6 | let { markdownRemark, allMarkdownRemark } = props.data; // data.markdownRemark holds our post data 7 | 8 | const { frontmatter, html } = markdownRemark; 9 | 10 | const index = allMarkdownRemark.edges.reduce( 11 | (acc, el, i) => (el.node.frontmatter.path === frontmatter.path ? i : acc), 12 | -1 13 | ); 14 | 15 | const prevLink = 16 | index > 0 ? ( 17 | 21 | {"← " + allMarkdownRemark.edges[index - 1].node.frontmatter.title} 22 | 23 | ) : null; 24 | const nextLink = 25 | index < allMarkdownRemark.edges.length - 1 ? ( 26 | 30 | {allMarkdownRemark.edges[index + 1].node.frontmatter.title + " →"} 31 | 32 | ) : null; 33 | return ( 34 |
35 |
36 |

{frontmatter.title}

37 |

{frontmatter.date}

38 |
42 |
43 | {prevLink} 44 | {nextLink} 45 |
46 |
47 |
48 | ); 49 | } 50 | 51 | export const pageQuery = graphql` 52 | query LessonByPath($path: String!) { 53 | markdownRemark(frontmatter: { path: { eq: $path } }) { 54 | html 55 | frontmatter { 56 | path 57 | title 58 | order 59 | section 60 | description 61 | } 62 | } 63 | allMarkdownRemark( 64 | sort: { order: ASC, fields: [frontmatter___order] } 65 | limit: 1000 66 | ) { 67 | edges { 68 | node { 69 | frontmatter { 70 | order 71 | path 72 | title 73 | } 74 | } 75 | } 76 | } 77 | } 78 | `; 79 | -------------------------------------------------------------------------------- /static/posterframe.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/btholt/complete-intro-to-containers/ea3d08d5da2528c2c25fdcc3a5c16ea266743093/static/posterframe.jpg --------------------------------------------------------------------------------