├── .github
└── workflows
│ └── next.yaml
├── .gitignore
├── .prettierrc
├── LICENSE
├── README.md
├── components
├── corner.js
├── footer.js
├── github.js
├── header.js
├── layout.js
├── linkedin.js
└── twitter.js
├── context
├── courseInfoContext.js
└── headerContext.js
├── course.json
├── csv
└── index.js
├── data
├── course.js
└── lesson.js
├── lessons
├── 01-welcome
│ ├── A-introduction.md
│ └── B-set-up.md
├── 02-crafting-containers-by-hand
│ ├── A-what-are-containers.md
│ ├── B-chroot.md
│ ├── C-namespaces.md
│ ├── D-cgroups.md
│ └── meta.json
├── 03-docker
│ ├── A-docker-images.md
│ ├── B-docker-images-with-docker.md
│ ├── C-javascript-on-docker.md
│ ├── D-tags.md
│ ├── E-docker-cli.md
│ └── meta.json
├── 04-dockerfiles
│ ├── A-intro-to-dockerfiles.md
│ ├── B-build-a-nodejs-app.md
│ ├── C-build-a-more-complicated-nodejs-app.md
│ ├── D-a-note-on-expose.md
│ ├── E-layers.md
│ └── meta.json
├── 05-making-tiny-containers
│ ├── A-alpine-linux.md
│ ├── B-making-our-own-alpine-nodejs-container.md
│ ├── C-multi-stage-builds.md
│ ├── D-distroless.md
│ ├── E-static-asset-project.md
│ └── meta.json
├── 06-docker-features
│ ├── A-bind-mounts.md
│ ├── B-volumes.md
│ ├── C-dev-containers.md
│ ├── D-networking-with-docker.md
│ └── meta.json
├── 07-multi-container-projects
│ ├── A-docker-compose.md
│ ├── B-kubernetes.md
│ ├── C-kompose.md
│ └── meta.json
└── 08-wrap-up
│ ├── A-docker-alternatives.md
│ ├── B-conclusion.md
│ └── meta.json
├── next.config.js
├── package-lock.json
├── package.json
├── pages
├── _app.js
├── index.js
└── lessons
│ └── [section]
│ └── [slug].js
├── public
├── .nojekyll
└── images
│ ├── BRAND-WHearts.png
│ ├── apple-touch-icon.png
│ ├── author.jpg
│ ├── course-icon.png
│ ├── dev-containers.jpg
│ ├── favicon-16x16.png
│ ├── favicon-32x32.png
│ ├── favicon.ico
│ ├── kubernetes1.png
│ ├── kubernetes2.png
│ ├── social-share-cover.jpg
│ └── vscode-ui.png
├── styles
├── courses.css
├── footer.css
└── variables.css
└── summary
├── getPrompt.js
└── index.js
/.github/workflows/next.yaml:
--------------------------------------------------------------------------------
1 | name: Deploy NextJS Course Site to GitHub Pages
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 |
8 | jobs:
9 | deploy:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - uses: actions/checkout@master
13 | - name: npm install, build
14 | run: |
15 | npm install
16 | npm run build
17 | - name: Deploy site to gh-pages branch
18 | uses: crazy-max/ghaction-github-pages@v2
19 | with:
20 | target_branch: gh-pages
21 | build_dir: out
22 | fqdn: containers-v2.holt.courses
23 | env:
24 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
25 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
2 |
3 | # dependencies
4 | /node_modules
5 | /.pnp
6 | .pnp.js
7 |
8 | # testing
9 | /coverage
10 |
11 | # next.js
12 | /.next/
13 | /out/
14 |
15 | # production
16 | /build
17 |
18 | # misc
19 | .DS_Store
20 |
21 | # debug
22 | npm-debug.log*
23 | yarn-debug.log*
24 | yarn-error.log*
25 |
26 | # local env files
27 | .env.local
28 | .env.development.local
29 | .env.test.local
30 | .env.production.local
31 |
32 | *.csv
33 |
34 | .env
35 |
--------------------------------------------------------------------------------
/.prettierrc:
--------------------------------------------------------------------------------
1 | {}
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 | [][fem]
4 |
5 | This repository is for the [Complete Intro to Containers v2][fem] course from [Frontend Masters][fem]. To view the course website, visit [https://containers-v2.holt.courses/][course].
6 |
7 | # Issues and Pull Requests
8 |
9 | Please file issues and open pull requests here! Thank you! This repo itself is the course website.
10 |
11 | # License
12 |
13 | The content of this workshop is licensed under CC-BY-NC-4.0. Feel free to share freely but do not resell my content.
14 |
15 | The code, including the code of the site itself and the code in the exercises, are licensed under Apache 2.0.
16 |
17 | [fem]: https://frontendmasters.com/courses/complete-intro-containers-v2/
18 | [course]: https://containers-v2.holt.courses
19 |
20 | [Container icons created by smashingstocks - Flaticon](https://www.flaticon.com/free-icons/container)
21 |
--------------------------------------------------------------------------------
/components/corner.js:
--------------------------------------------------------------------------------
1 | export default function Corner() {
2 | return (
3 |
26 |
27 | );
28 | }
29 |
--------------------------------------------------------------------------------
/components/layout.js:
--------------------------------------------------------------------------------
1 | import { useState } from "react";
2 |
3 | import Footer from "./footer";
4 | import Header from "./header";
5 | import getCourseConfig from "../data/course";
6 | import { Provider as HeaderProvider } from "../context/headerContext";
7 | import { Provider as CourseInfoProvider } from "../context/courseInfoContext";
8 |
9 | function Layout({ children }) {
10 | const courseInfo = getCourseConfig();
11 | const headerHook = useState({});
12 | return (
13 |
14 |
15 |
16 |
17 |
18 |
{children}
19 |
20 |
25 |
26 |
27 |
34 |
35 |
36 | );
37 | }
38 |
39 | export default function App({ children }) {
40 | return {children};
41 | }
42 |
--------------------------------------------------------------------------------
/components/linkedin.js:
--------------------------------------------------------------------------------
1 | export default function LinkedIn() {
2 | return (
3 |
30 | );
31 | }
32 |
--------------------------------------------------------------------------------
/components/twitter.js:
--------------------------------------------------------------------------------
1 | export default function Twitter() {
2 | return (
3 |
30 | );
31 | }
32 |
--------------------------------------------------------------------------------
/context/courseInfoContext.js:
--------------------------------------------------------------------------------
1 | import { createContext } from "react";
2 |
3 | const courseInfoContext = createContext([{}, () => {}]);
4 |
5 | export const Provider = courseInfoContext.Provider;
6 | export const Consumer = courseInfoContext.Consumer;
7 | export const Context = courseInfoContext;
8 |
--------------------------------------------------------------------------------
/context/headerContext.js:
--------------------------------------------------------------------------------
1 | import { createContext } from "react";
2 |
3 | const headerContext = createContext([{}, () => {}]);
4 |
5 | export const Provider = headerContext.Provider;
6 | export const Consumer = headerContext.Consumer;
7 | export const Context = headerContext;
8 |
--------------------------------------------------------------------------------
/course.json:
--------------------------------------------------------------------------------
1 | {
2 | "author": {
3 | "name": "Brian Holt",
4 | "company": "SQLite Cloud"
5 | },
6 | "title": "Complete Intro to Containers",
7 | "subtitle": "v2",
8 | "frontendMastersLink": "https://holt.fyi/containers",
9 | "social": {
10 | "linkedin": "btholt",
11 | "github": "btholt",
12 | "twitter": "holtbt"
13 | },
14 | "description": "Complete Intro to Containers course by Brian Holt aims to demystify containers, making them accessible to developers regardless of expertise level. In this course you will learn how to craft containers by hand, how to use Docker and its features, and how to use Docker in your development flows today.",
15 | "keywords": ["linux", "containers", "javascript", "node", "brian holt", "frontend masters", "docker", "nodejs", "kubernetes", "k8s", "cloud", "aws", "gcp", "azure", "sql", "sqlite"],
16 | "csvPath": "./out/lessons.csv"
17 | }
18 |
--------------------------------------------------------------------------------
/csv/index.js:
--------------------------------------------------------------------------------
1 | import fs from "fs/promises";
2 | import path from "path";
3 | import { convertArrayToCSV } from "convert-array-to-csv";
4 | import { getLessons } from "../data/lesson.js";
5 |
6 | async function start() {
7 | const configBuffer = await fs.readFile(
8 | path.join(process.cwd(), "course.json")
9 | );
10 | const config = JSON.parse(configBuffer);
11 |
12 | if (!config.csvPath) {
13 | console.log("no csvPath in course.json, skipping CSV generation");
14 | return;
15 | }
16 |
17 | process.env.BASE_URL = config?.productionBaseUrl || "";
18 | const sections = await getLessons();
19 |
20 | const lessons = [];
21 |
22 | for (let i = 0; i < sections.length; i++) {
23 | const section = sections[i];
24 |
25 | for (let j = 0; j < section.lessons.length; j++) {
26 | const lesson = section.lessons[j];
27 |
28 | lessons.push({
29 | order: lesson.order,
30 | sectionTitle: section.title,
31 | lessonTitle: lesson.title,
32 | slug: section.slug + "/" + lesson.slug,
33 | sectionIcon: section.icon,
34 | filePath: lesson.fullSlug,
35 | description: lesson.description,
36 | });
37 | }
38 | }
39 |
40 | const csv = convertArrayToCSV(lessons);
41 |
42 | await fs.writeFile(config.csvPath, csv);
43 | console.log(`wrote ${lessons.length} rows to ${config.csvPath}`);
44 | }
45 |
46 | start();
47 |
--------------------------------------------------------------------------------
/data/course.js:
--------------------------------------------------------------------------------
1 | import config from "../course.json";
2 |
3 | const DEFAULT_CONFIG = {
4 | author: {
5 | name: "An Author",
6 | company: "An Author's Company",
7 | },
8 | title: "A Superb Course",
9 | subtitle: "That Teaches Nice Things",
10 | frontendMastersLink: "",
11 | description: "A nice course for nice people.",
12 | keywords: ["a nice course", "for people", "to learn", "nice things"],
13 | social: {
14 | linkedin: "btholt",
15 | github: "btholt",
16 | twitter: "holtbt",
17 | },
18 | productionBaseUrl: "/",
19 | };
20 |
21 | export default function getCourseConfig() {
22 | return Object.assign({}, DEFAULT_CONFIG, config);
23 | }
24 |
--------------------------------------------------------------------------------
/data/lesson.js:
--------------------------------------------------------------------------------
1 | import path from "path";
2 | import fs from "fs/promises";
3 | import matter from "gray-matter";
4 | import { titleCase } from "title-case";
5 | import { Marked } from "marked";
6 | import { markedHighlight } from "marked-highlight";
7 | import hljs from "highlight.js";
8 |
9 | const DEFAULT_ICON = "info-circle";
10 | const lessonsPath = path.join(process.cwd(), "lessons");
11 |
12 | const marked = new Marked(
13 | markedHighlight({
14 | baseUrl: process.env.BASE_URL ? process.env.BASE_URL + "/" : "/",
15 | highlight: function (code, lang) {
16 | const language = hljs.getLanguage(lang) ? lang : "plaintext";
17 | return hljs.highlight(code, { language }).value;
18 | },
19 | langPrefix: "hljs language-",
20 | })
21 | );
22 |
23 | function getTitle(slug, override) {
24 | let title = override;
25 | if (!title) {
26 | title = titleCase(slug.split("-").join(" "));
27 | }
28 |
29 | return title;
30 | }
31 |
32 | async function getMeta(section) {
33 | let meta = {};
34 | try {
35 | const file = await fs.readFile(
36 | path.join(lessonsPath, section, "meta.json")
37 | );
38 | meta = JSON.parse(file.toString());
39 | } catch (e) {
40 | // no meta.json, nothing to do
41 | }
42 |
43 | return meta;
44 | }
45 |
46 | function slugify(inputPath) {
47 | const pathParts = inputPath.split("-");
48 | const pathOrder = pathParts.shift();
49 | const pathSlug = pathParts.join("-");
50 | return {
51 | slug: pathSlug,
52 | order: pathOrder,
53 | title: titleCase(pathParts.join(" ")),
54 | };
55 | }
56 |
57 | export async function getLessons() {
58 | const dir = await fs.readdir(lessonsPath);
59 | const sections = [];
60 |
61 | for (let dirFilename of dir) {
62 | const dirStats = await fs.lstat(path.join(lessonsPath, dirFilename));
63 |
64 | if (dirStats.isFile()) {
65 | continue;
66 | }
67 |
68 | const lessonsDir = await fs.readdir(path.join(lessonsPath, dirFilename));
69 |
70 | let {
71 | title: sectionTitle,
72 | order: sectionOrder,
73 | slug: sectionSlug,
74 | } = slugify(dirFilename);
75 |
76 | let icon = DEFAULT_ICON;
77 |
78 | const meta = await getMeta(dirFilename);
79 | if (meta.title) {
80 | sectionTitle = meta.title;
81 | }
82 | if (meta.icon) {
83 | icon = meta.icon;
84 | }
85 |
86 | const lessons = [];
87 | for (let lessonFilename of lessonsDir) {
88 | if (lessonFilename.slice(-3) !== ".md") {
89 | continue;
90 | }
91 |
92 | const filePath = path.join(lessonsPath, dirFilename, lessonFilename);
93 |
94 | const file = await fs.readFile(filePath);
95 | const { data } = matter(file.toString());
96 | let slug = lessonFilename.replace(/\.md$/, "");
97 |
98 | const slugParts = slug.split("-");
99 | const lessonOrder = slugParts.shift();
100 |
101 | slug = slugParts.join("-");
102 |
103 | const title = getTitle(slug, data.title);
104 |
105 | lessons.push({
106 | slug,
107 | fullSlug: `/lessons/${sectionSlug}/${slug}`,
108 | title,
109 | order: `${sectionOrder}${lessonOrder.toUpperCase()}`,
110 | path: filePath,
111 | description: data.description ? data.description : "",
112 | });
113 | }
114 |
115 | sections.push({
116 | icon,
117 | title: sectionTitle,
118 | slug: sectionSlug,
119 | lessons,
120 | order: sectionOrder,
121 | });
122 | }
123 |
124 | return sections;
125 | }
126 |
127 | export async function getLesson(targetDir, targetFile) {
128 | const dir = await fs.readdir(lessonsPath);
129 |
130 | for (let i = 0; i < dir.length; i++) {
131 | const dirPath = dir[i];
132 | if (dirPath.endsWith(targetDir)) {
133 | const lessonDir = (
134 | await fs.readdir(path.join(lessonsPath, dirPath))
135 | ).filter((str) => str.endsWith(".md"));
136 |
137 | for (let j = 0; j < lessonDir.length; j++) {
138 | const slugPath = lessonDir[j];
139 | if (slugPath.endsWith(targetFile + ".md")) {
140 | const filePath = path.join(lessonsPath, dirPath, slugPath);
141 | const file = await fs.readFile(filePath);
142 | const { data, content } = matter(file.toString());
143 | const html = marked.parse(content);
144 | const title = getTitle(targetFile, data.title);
145 | const meta = await getMeta(dirPath);
146 |
147 | const section = getTitle(targetDir, meta.title);
148 | const icon = meta.icon ? meta.icon : DEFAULT_ICON;
149 |
150 | let nextSlug;
151 | let prevSlug;
152 |
153 | // get next
154 | if (lessonDir[j + 1]) {
155 | // has next in section
156 | const { slug: next } = slugify(lessonDir[j + 1]);
157 | nextSlug = `${targetDir}/${next.replace(/\.md$/, "")}`;
158 | } else if (dir[i + 1]) {
159 | // has next in next section
160 | const nextDir = (
161 | await fs.readdir(path.join(lessonsPath, dir[i + 1]))
162 | ).filter((str) => str.endsWith(".md"));
163 | const nextDirSlug = slugify(dir[i + 1]).slug;
164 | const nextLessonSlug = slugify(nextDir[0]).slug.replace(
165 | /\.md$/,
166 | ""
167 | );
168 | nextSlug = `${nextDirSlug}/${nextLessonSlug}`;
169 | } else {
170 | // last section
171 | nextSlug = null;
172 | }
173 |
174 | // get prev
175 | if (lessonDir[j - 1]) {
176 | // has prev in section
177 | const { slug: prev } = slugify(lessonDir[j - 1]);
178 | prevSlug = `${targetDir}/${prev.replace(/\.md$/, "")}`;
179 | } else if (dir[i - 1]) {
180 | // has prev in prev section
181 | const prevDir = (
182 | await fs.readdir(path.join(lessonsPath, dir[i - 1]))
183 | ).filter((str) => str.endsWith(".md"));
184 | const prevDirSlug = slugify(dir[i - 1]).slug;
185 | const prevLessonSlug = slugify(
186 | prevDir[prevDir.length - 1]
187 | ).slug.replace(/\.md$/, "");
188 | prevSlug = `${prevDirSlug}/${prevLessonSlug}`;
189 | } else {
190 | // first section
191 | prevSlug = null;
192 | }
193 |
194 | const base = process.env.BASE_URL ? process.env.BASE_URL : "/";
195 |
196 | return {
197 | attributes: data,
198 | html,
199 | markdown: content,
200 | slug: targetFile,
201 | title,
202 | section,
203 | icon,
204 | filePath,
205 | nextSlug: nextSlug ? path.join(base, "lessons", nextSlug) : null,
206 | prevSlug: prevSlug ? path.join(base, "lessons", prevSlug) : null,
207 | };
208 | }
209 | }
210 | }
211 | }
212 |
213 | return false;
214 | }
215 |
--------------------------------------------------------------------------------
/lessons/01-welcome/A-introduction.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: >-
3 | Complete Intro to Containers course by Brian Holt aims to demystify
4 | containers, making them accessible to developers regardless of expertise level
5 | in JavaScript. The course covers Linux basics, usage on macOS and Windows,
6 | emphasizing the importance of containers in modern tech environments.
7 | keywords:
8 | - containers
9 | - intro
10 | - developers
11 | - Linux basics
12 | - macOS
13 | - Windows
14 | - Brian Holt
15 | ---
16 |
17 | ## Course Objective
18 |
19 | Hello! And welcome to the Complete Intro to Containers! The objective of this course is demystify what containers are, describe how they can be useful to you, and walk you through the steps of working with containers so that at the end of this course containers will be another tool available to you in your toolbox. Containers are just getting more important in the industry and now it's not just a tool for ops, it's a tool for developers. Everyone from the designers to the low level system engineers will need to interact with containers on a regular basis. This will help you get ahead of the curve.
20 |
21 | ## Who Are You?
22 |
23 | This course is aimed at a developer demographic. While all the examples will be dealing with JavaScript applications, you don't necessarily need to be a JavaScript developer to grasp this case; the code will be incidental to the concepts being taught.
24 |
25 | This course assumes a very basic grasp of Linux and using the command line. You don't need to be a bash expert but this shouldn't be your first exposure to Linux or the command line. The class will be taught for both macOS and Windows users and will be using Ubuntu and Alpine Linux for the containers. This will also work well for Linux developers but the class won't have any additional instructions for Linux devs but following the macOS steps should be 95% the same. If you are a Windows developer, please be using Windows 10. You'll need to either use [WSL 2][wsl2] or VirtualBox. See the set up instructions below.
26 |
27 | If you need to brush up on your Linux basics, [I taught a course here that would be super helpful][linux] and I strongly suggest you take that first
28 |
29 | To see all of the completed project files in a repo, [refer here][project-files].
30 |
31 | Do note that containers can take a lot of CPU and memory. If you have a modern-ish processor and 8GB, you will be fine. This could probably be done with some slow down on 4GB but anything lower would be pretty tough.
32 |
33 | This can also take a lot of bandwidth because we'll be downloading a lot of things. Be aware of that.
34 |
35 | ## Where to File Issues
36 |
37 | I write these courses and take care to avoid making mistakes. However, when teaching hours of material, mistakes are inevitable, both here in the grammar and in the course with the material. However, I (and the wonderful team at Frontend Masters) are constantly correcting the mistakes so that those of you that come later get the best product possible. If you find an error, we'd love to fix it. The best way to do this is to [open a pull request or file an issue on the GitHub repo][issue]. While I'm always happy to chat and give advice on social media, I can't be tech support for everyone. And if you file it on GitHub, those who come later can Google the same answer you got.
38 |
39 | ## Who Am I?
40 |
41 | 
42 |
43 | My name is Brian Holt and I am the vice president of product at [SQLite Cloud][sqlitecloud]. I love teaching and teaching courses. I was previously on a path to become a university professor before pivoting in tech. Luckily Frontend Masters has afforded me an amazing opportunity to take my practical knowledge I have acquired over the years of working in various roles of tech to share that knowledge with everyone I can.
44 |
45 | My current role is trying to make the simple, lightweight, blazing-fast database SQLite scale to enormous proportions to power every web and mobile app. Previous to my current role, I worked as a PM at Snowflake, Stripe, and Microsoft and as a staff engineer at LinkedIn, Netflix, Reddit, and a few other startups. I have had the privilege of having a varied career and to see tech from a lot of angles and I hope to share that with you.
46 |
47 | I got really interested in containers when I was put in charge at Microsoft of developer experience for JavaScript developers working with Microsoft Azure. At first I was really intimidated by them: they were this big, scary operations tool that was unapproachable to me, a JavaScript developer. Once I started to dig into them, I realized that they were neither complicated nor scary. Containers are surprisingly simple pieces of technology and I promise once we work through them they will be far less scary.
48 |
49 | Please catch up with me on social media, would love to chat. I will warn you: I am awful at responding to direct messages!
50 |
51 | - [Twitter][twitter]
52 | - [GitHub][github]
53 | - [LinkedIn][linkedin]
54 |
55 | And hey, if you could take a second and [star the repo on GitHub][gh] I'd be super appreciative. It helps me reach more people and strokes my fragile ego.
56 |
57 | [gh]: https://github.com/btholt/complete-intro-to-containers-v2
58 | [frontend-masters]: https://frontendmasters.com/teachers/brian-holt/
59 | [fehh]: http://frontendhappyhour.com/
60 | [fem]: https://frontendmasters.com/
61 | [twitter]: https://twitter.com/holtbt
62 | [github]: https://github.com/btholt
63 | [linkedin]: https://www.linkedin.com/in/btholt/
64 | [course]: https://frontendmasters.com/courses/complete-intro-containers-v2/
65 | [issue]: https://github.com/btholt/complete-intro-to-containers-v2/issues
66 | [project-files]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2
67 | [linux]: https://frontendmasters.com/courses/linux-command-line/
68 | [sqlitecloud]: https://sqlitecloud.io/
69 | [wsl2]: https://learn.microsoft.com/en-us/windows/wsl/install
70 |
--------------------------------------------------------------------------------
/lessons/01-welcome/B-set-up.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: >-
3 | Learn about Docker Desktop, a convenient desktop app GUI to control Docker on
4 | your computer. Follow installation instructions for Windows, macOS, and Linux
5 | provided in the document. Understand system requirements, internet usage, and
6 | tools used in the course.
7 | keywords:
8 | - Docker Desktop
9 | - installation instructions
10 | - system requirements
11 | - internet usage
12 | - tools FAQ
13 | ---
14 |
15 | ## Docker Desktop
16 |
17 | Docker Desktop is a desktop app GUI that allows you to control Docker on your computer. You can definitely use Docker and containers without it but it's just a convenience to be able to turns things on and off with an app instead of trying to communicate with the daemon via their client. Suffice to say, at least for this course, please use Docker Desktop. It's free for individuals and small companies.
18 |
19 | You will need to set up Docker Desktop if you haven't already.
20 |
21 | - [Installation instructions for Microsoft Windows][windows] (if you're unsure, I suggest doing the WSL2 installation)
22 | - [Installation instructions for Apple macOS][macos] (make sure to choose if you have an Intel or an Apple chip in your computer)
23 | - [Installation instructions for Linux][linux]
24 |
25 | Docker states on its website that it requires 4GB of RAM to run okay. I haven't tried it but that seems like it should be enough. 8GB would really put you in a comfortable spot.
26 |
27 | For Windows developers, you'll either need to be on Windows 10 or 11. It doesn't matter what version (Home, Pro, Education, etc.) It used to matter but now Windows allows any version of Windows 10+ to use Docker. Please be sure to follow all the instructions carefully as you may have to do some stuff like enable virtualization and turn on WSL which aren't on by default. This course does not work on Windows 7 or 8 (or 9, lol.) You will see on the Windows page a bunch of information about what version of Windows you need for Windows containers – ignore that. We're not doing any Windows containers today, just Linux.
28 |
29 | For Linux devs, they have instructions for Ubuntu, Debian, RHEL, and Fedora. They also list experimental support for Arch. If you're using something different than those, you're on your own. Generally if you're on Linux I'm going to assume you can translate my macOS instructions into Linux.
30 |
31 | This course also assumes you are using an x64 processor or an Apple Silicon processor. This class is untested on 32 bit processors, other ARM, RISC, etc. processors.
32 |
33 | ## Internet and Storage
34 |
35 | This class will use a fair amount of bandwidth as containers can be quite large. Docker does a decent job of caching so once you've downloaded a container once it will cache its layers so you don't have to install it again. If you're on metered or slower Internet, be aware of that.
36 |
37 | Also be aware the Docker can eat up your disk space pretty quickly. I have barely used Docker on my new computer and already it's using 2GB of storage with various images. Once Docker is running, run `docker image ls` to see what you have locally and type `docker image rm ` to remove any that you don't want sticking around if you need to free up space. You can also do this from the Docker Desktop GUI.
38 |
39 | ## Tools FAQ
40 |
41 | ### What tools are your using?
42 |
43 | - Visual Studio Code – I used to work at Microsoft on VS Code so it's no surprise that I'll be using it in this course. We'll also be using a few extensions that I'll call out as we get there.
44 | - Firefox – I want more than Chromium to exist so I support Firefox where I can. Feel free to use any browser; it won't matter in this course.
45 | - Terminal.app – I used to use iTerm2 and Hyper but in the end I appreciate how fast the default terminal is.
46 |
47 | ### What are you using?
48 |
49 | - Visual Studio Code
50 | - Dark+ Theme – It comes installed by default but it's not the default theme anymore. I'm so used to it that I can't switch.
51 | - [MonoLisa][monolisa] font – I like fonts and I look at it all day so I was okay paying for it. I have [ligatures][ligatures] enabled which is why you might see strange glyphs. If you want ligatures but don't want to pay, the linked ligature article has a few. I like Cascadia Code from Microsoft.
52 | - [vscode-icons][vscode-icons] – Lots of neat icons for VS Code and it's free.
53 | - Terminal
54 | - zsh – It comes with macOS now and I'm _way_ too lazy to switch back to bash.
55 | - [Dracula theme][dracula] – I like the pastels. I would use it in VS Code too if Dark+ wasn't ingrained in my blood.
56 | - [Starship Prompt][starship] – Very cool prompt that's just pretty. Also shows you what sort of project you're in which is occasionally useful
57 | - [CaskaydiaCove Nerd Font][nerd] – This works with Starship prompt to give you the JS logos and all those extra glyphs. It's based on Cascadia Code.
58 |
59 | ### Can I use a different container engine than Docker for this course?
60 |
61 | The short answer is no.
62 |
63 | The slightly longer answer is noooo.
64 |
65 | The longer answer is that it's likely _most_ of the course would work on something like podman or nerdctl but I'm not testing any of it so I'm sure you'll run into inconsistencies and I won't be able to help you with it. They're very valid and useful pieces of technology and you should try them but for this course let's stick to Docker.
66 |
67 | [windows]: https://docs.docker.com/desktop/install/windows-install/
68 | [macos]: https://docs.docker.com/desktop/install/mac-install/
69 | [linux]: https://docs.docker.com/desktop/install/linux-install/
70 | [ligatures]: https://worldofzero.com/posts/enable-font-ligatures-vscode/
71 | [monolisa]: https://www.monolisa.dev/
72 | [vscode-icons]: https://marketplace.visualstudio.com/items?itemName=vscode-icons-team.vscode-icons
73 | [dracula]: https://draculatheme.com/terminal
74 | [starship]: https://starship.rs/
75 | [nerd]: https://www.nerdfonts.com/font-downloads
76 |
--------------------------------------------------------------------------------
/lessons/02-crafting-containers-by-hand/A-what-are-containers.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: >-
3 | Understand the simplicity of containers by exploring how they leverage a few
4 | Linux kernel features for isolation, contrasting with managing bare metal
5 | servers or virtual machines. Discover the advantages and trade-offs associated
6 | with each approach, leading to the emergence of containers as a cost-effective
7 | and efficient solution for deploying code.
8 | keywords:
9 | - containers
10 | - Linux kernel features
11 | - bare metal servers
12 | - virtual machines
13 | - resource management
14 | - security
15 | - deploying code
16 | ---
17 |
18 | Containers are probably simpler than you think they are. Before I took a deep dive into what they are, I was very intimidated by the concept of what containers were. I thought they were for one super-versed in Linux and sysadmin type activties. In reality, the core of what containers are is just a few features of the Linux kernel duct-taped together. Honestly, there's no single concept of a "container": it's just using a few features of Linux together to achieve isolation. That's it.
19 |
20 | So how comfortable are you with the command line? This course doesn't assume wizardry with bash or zsh but this probably shouldn't be your first adventure with it. If it is, [check out my course on the command line and Linux][linux]. This course will give you more than we'll need to keep up with this course.
21 |
22 | ## Why Containers
23 |
24 | Let's start with why first, why we need containers.
25 |
26 | ### Bare Metal
27 |
28 | Historically, if you wanted to run a web server, you either set up your own or you rented a literal server somewhere. We often call this "bare metal" because, well, your code is literally executing on the processor with no abstraction. This is great if you're extremely performance sensitive and you have ample and competent staffing to take care of these servers.
29 |
30 | The problem with running your servers on the bare metal is you come become extremely inflexible. Need to spin up another server? Call up Dell or IBM and ask them to ship you another one, then get your tech to go install the physical server, set up the server, and bring into the server farm. That only takes a month or two right? Pretty much instant. 😅
31 |
32 | Okay, so now at least you have a pool of servers responding to web traffic. Now you just have to worry about keeping the operating system up to date. Oh, and all the drivers connecting to the hardware. And all the software running on the server. And replacing the components of your server as new ones come out. Or maybe the whole server. And fixing failed components. And network issues. And running cables. And your power bill. And who has physical access to your server room. And the actual temperature of the data center. And paying a ridiculous Internet bill. You get the point. Managing your own servers is its own set of challenges and requires a whole team to do it.
33 |
34 | ### Virtual Machines
35 |
36 | Virtual machines are the next step. This is adding a layer of abstraction between you and the metal. Now instead of having one instance of Linux running on your computer, you'll have multiple guest instances of Linux running inside of a host instance of Linux (it doesn't have to be Linux but I'm using it to be illustrative.) Why is this helpful? For one, I can have one beefy server and have it spin up and down virtual servers at will. So now if I'm adding a new service, I can just spin up a new VM on one of my servers (providing I have space to do so.) This allows a lot more flexibility.
37 |
38 | Another thing is I can separate two VMs from each other on the same machine _totally_ from each other. This affords a few nice things.
39 |
40 | 1. Imagine both Coca-Cola and Pepsi lease a server from Microsoft to power their soda making machines and hence have the recipe on the server. Microsoft, wanting to be effecient, buys large physical servers and then allocates virtual servers to each of them. If Microsoft puts both of these virtual servers on the same physical server with no separation, one soda-maker could just connect into the server and browse the competitor's files and find the secret recipe. So this is a massive security problem.
41 | 1. Imagine one of the soda-makers discovers that they're on the same server as their competitor. They could drop a [fork bomb][fork-bomb] to devour all the resources their competitors' website was using and intentionally crash the server.
42 | 1. Much less nefariously, imagine an engineer at Coca-Cola shipped a bug that crashed the whole server. If there's no separation between the two virtual servers, his shipping a bug would also crash Pepsi's website, something they wouldn't be super happy about.
43 |
44 | So enter VMs. These are individual instances of operating systems that as far as the OSes know, are running on bare metal themselves. The host operating system offers the VM a certain amount resources and if that VM runs out, they run out and they don't affect other guest operating systems running on the server. If someone else crashes their server, they crash their guest OS and yours hums along unaffected. And since they're in a guest OS, they can't peek into your files because their VM has no concept of any sibling VMs on the machine so it's much more secure.
45 |
46 | All these above features come at the cost of a bit of performance. Running an operating system within an operating system isn't free. But in general we have enough computing power and memory that this isn't the primary concern. And of course, with abstraction comes ease at the cost of additional complexity. In this case, the advantages very much outweigh the cost most of the time.
47 |
48 | ### Public Cloud
49 |
50 | So, as alluded to above, you can nab a VM from a public cloud provider like Microsoft Azure or Amazon Web Services. It will come with a pre-allocated amount of memory and computing power (often called virtual cores or vCores because they're dedicated cores to your virutal machine.) Now you no longer have to manage the expensive and difficult business of maintaining a data center but you do have to still manage all the software of it yourself: Microsoft won't update Ubuntu for you (generally speaking, they might prompt you but you still have to worry about it) but they will make sure the hardware is up to date.
51 |
52 | But now you have the great ability to spin up and spin down virtual machines in the cloud, giving you access to resources with the only upper bound being how much you're willing to pay. And we've been doing this for a while. But the hard part is they're still just giving you machines, you have to manage all the software, networking, provisioning, updating, etc. for all these servers. And lots of companies still do! Tools like Terraform, Chef, Puppet, Salt, etc. help a lot with things like this because they can make spinning up new VMs easy because they can handle the software needed to get it going.
53 |
54 | We're still paying the cost of running a whole operating system in the cloud inside of a host operating system. It'd be nice if we could just run the code inside the host OS without the additional expenditure of guest OSs.
55 |
56 | ### Containers
57 |
58 | And here we are, containers. As you may have divined, containers give us many of the security and resource-management features of VMs but without the cost of having to run a whole other operating system. It instead uses chroot, namespace, and cgroup to separate a group of processes from each other. If this sounds a little flimsy to you and you're still worried about security and resource-management, you're not alone. But I assure you a lot of very smart people have worked out the kinks and containers are the future of deploying code.
59 |
60 | So now that we've been through why we need containers, let's go through the three things that make containers a reality.
61 |
62 | [fork-bomb]: https://en.wikipedia.org/wiki/Fork_bomb
63 | [linux]: https://frontendmasters.com/courses/linux-command-line/
64 |
--------------------------------------------------------------------------------
/lessons/02-crafting-containers-by-hand/B-chroot.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: chroot
3 | description: >-
4 | Learn how to use the Linux `chroot` command within containers to set a new
5 | root directory, isolating processes for enhanced security. Follow a
6 | step-by-step guide to create a new environment, copy necessary libraries, and
7 | successfully run commands within the isolated space.
8 | keywords:
9 | - Linux chroot command
10 | - container security
11 | - isolating processes
12 | - copying libraries in chroot
13 | - Ubuntu Docker container
14 | - setting new root directory
15 | ---
16 |
17 | I've heard people call this "cha-root" and "change root". I'm going to stick to "change root" because I feel less ridiculous saying that. It's a Linux command that allows you to set the root directory of a new process. In our container use case, we just set the root directory to be where-ever the new container's new root directory should be. And now the new container group of processes can't see anything outside of it, eliminating our security problem because the new process has no visibility outside of its new root.
18 |
19 | Let's try it. Start up a Ubuntu VM however you feel most comfortable. I'll be using Docker (and doing containers within containers 🤯). If you're like me, run `docker run -it --name docker-host --rm --privileged ubuntu:jammy`. This will download the [official Ubuntu container][ubuntu] from Docker Hub and grab the version marked with the _jammy_ tag. In this case, _latest_ means it's the latest stable release (22.04.) You could put `ubuntu:devel` to get the latest development of Ubuntu (as of writing that'd be 24.04). `docker run` means we're going to run some commands in the container, and the `-it` means we want to make the shell interactive (so we can use it like a normal terminal.)
20 |
21 | If you're in Windows and using WSL, just open a new WSL terminal in Ubuntu. ✌️
22 |
23 | To see what version of Ubuntu you're using, run `cat /etc/issue`. `cat` reads a file and dumps it into the output which means we can read it, and `/etc/issue` is a file that will tell us what distro we're using. Mine says `Ubuntu 22.04.4 LTS \n \l`.
24 |
25 | Okay, so let's attempt to use `chroot` right now.
26 |
27 | 1. Make a new folder in your root directory via `mkdir /my-new-root`.
28 | 1. Inside that new folder, run `echo "my super secret thing" >> /my-new-root/secret.txt`.
29 | 1. Now try to run `chroot /my-new-root bash` and see the error it gives you.
30 |
31 | You should see something about failing to run a shell or not being able to find bash. That's because bash is a program and your new root wouldn't have bash to run (because it can't reach outside of its new root.) So let's fix that! Run:
32 |
33 | 1. `mkdir /my-new-root/bin`
34 | 1. `cp /bin/bash /bin/ls /my-new-root/bin/`
35 | 1. `chroot /my-new-root bash`
36 |
37 | Still not working! The problem is that these commands rely on libraries to power them and we didn't bring those with us. So let's do that too. Run `ldd /bin/bash`. This print out something like this:
38 |
39 | ```bash
40 | $ ldd /bin/bash
41 | linux-vdso.so.1 (0x0000ffffbe221000)
42 | libtinfo.so.6 => /lib/aarch64-linux-gnu/libtinfo.so.6 (0x0000ffffbe020000)
43 | libc.so.6 => /lib/aarch64-linux-gnu/libc.so.6 (0x0000ffffbde70000)
44 | /lib/ld-linux-aarch64.so.1 (0x0000ffffbe1e8000)
45 | ```
46 |
47 | These are the libraries we need for bash. Let's go ahead and copy those into our new environment.
48 |
49 | 1. `mkdir /my-new-root/lib`
50 | 1. Then we need to copy all those paths (ignore the lines that don't have paths) into our directory. Make sure you get the right files in the right directory. In my case above (yours likely will be different) it's:
51 | 1. `cp /lib/aarch64-linux-gnu/libtinfo.so.6 /lib/aarch64-linux-gnu/libc.so.6 /lib/ld-linux-aarch64.so.1 /my-new-root/lib`
52 | 1. Do it again for `ls`. Run `ldd /bin/ls`
53 | 1. Follow the same process to copy the libraries for `ls` into our `my-new-root`.
54 | 1. `cp /lib/aarch64-linux-gnu/libselinux.so.1 /lib/aarch64-linux-gnu/libc.so.6 /lib/ld-linux-aarch64.so.1 /lib/aarch64-linux-gnu/libpcre2-8.so.0 /my-new-root/lib`
55 |
56 | Now, finally, run `chroot /my-new-root bash` and run `ls`. You should successfully see everything in the directory. Now try `pwd` to see your working directory. You should see `/`. You can't get out of here! This, before being called containers, was called a jail for this reason. At any time, hit CTRL+D or run `exit` to get out of your chrooted environment.
57 |
58 | ## cat exercise
59 |
60 | Now try running `cat secret.txt`. Oh no! Your new chroot-ed environment doesn't know how to cat! As an exercise, go make `cat` work the same way we did above!
61 |
62 | Congrats you just cha-rooted the \*\*\*\* out of your first environment!
63 |
64 | [ubuntu]: https://hub.docker.com/_/ubuntu
65 |
--------------------------------------------------------------------------------
/lessons/02-crafting-containers-by-hand/C-namespaces.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: >-
3 | Understand the importance of namespaces and cgroups for security and resource
4 | management in server environments. Learn how namespaces can isolate processes
5 | to enhance security and prevent unauthorized access in shared server
6 | environments, beyond what chroot alone provides.
7 | keywords:
8 | - namespaces
9 | - cgroups
10 | - security
11 | - resource management
12 | - chroot
13 | - process isolation
14 | - server environment
15 | ---
16 |
17 | While chroot is a pretty straightforward, namespaces and cgroups are a bit more nebulous to understand but no less important. Both of these next two features are for security and resource management.
18 |
19 | Let's say you're running a big server that's in your home and you're selling space to customers (that you don't know) to run their code on your server. What sort of concerns would you have about running their "untrusted" code? Let's say you have Alice and Bob who are running e-commerce services dealing with lots of money. They themselves are good citizens of the servers and minding their own business. But then you have Eve join the server who has other intentions: she wants to steal money, source code, and whatever else she can get her hands on from your other tenants on the server. If just gave all three them unfettered root access to server, what's to stop Eve from taking everything? Or what if she just wants to disrupt their businesses, even if she's not stealing anything?
20 |
21 | Your first line of defense is that you could log them into chroot'd environments and limit them to only those. Great! Now they can't see each others' files. Problem solved? Well, no, not quite yet. Despite the fact that she can't see the files, she can still see all the processes going on on the computer. She can kill processes, unmount filesystem and even hijack processes.
22 |
23 | Enter namespaces. Namespaces allow you to hide processes from other processes. If we give each chroot'd environment different sets of namespaces, now Alice, Bob, and Eve can't see each others' processes (they even get different process PIDs, or process IDs, so they can't guess what the others have) and you can't steal or hijack what you can't see!
24 |
25 | There's a lot more depth to namespaces beyond what I've outlined here. The above is describing _just_ the PID namespace. There are more namespaces as well and this will help these containers stay isloated from each other.
26 |
27 | ## The problem with chroot alone
28 |
29 | Now, this isn't secure. The only thing we've protected is the file system, mostly.
30 |
31 | 1. chroot in a terminal into our environment
32 | 1. In another terminal, run `docker exec -it docker-host bash`. This will get another terminal session #2 for us (I'll refer to the chroot'd environment as #1)
33 | 1. Run `tail -f /my-new-root/secret.txt &` in #2. This will start an infinitely running process in the background.
34 | 1. Run `ps` to see the process list in #2 and see the `tail` process running. Copy the PID (process ID) for the tail process.
35 | 1. In #1, the chroot'd shell, run `kill `. This will kill the tail process from inside the `chroot'd` environment. This is a problem because that means chroot isn't enough to isolate someone. We need more barriers. This is just one problem, processes, but it's illustrative that we need more isolation beyond just the file system.
36 |
37 | ## Safety with namespaces
38 |
39 | So let's create a chroot'd environment now that's isolated using namespaces using a new command: `unshare`. `unshare` creates a new isolated namespace from its parent (so you, the server provider can't spy on Bob nor Alice either) and all other future tenants. Run this:
40 |
41 | **NOTE**: This next command downloads about 150MB and takes at least a few minutes to run. Unlike Docker images, this will redownload it _every_ time you run it and does no caching.
42 |
43 | ```bash
44 | # from our chroot'd environment if you're still running it, if not skip this
45 | exit
46 |
47 | ## Install debootstrap
48 | apt-get update -y
49 | apt-get install debootstrap -y
50 | debootstrap --variant=minbase jammy /better-root
51 |
52 | # head into the new namespace'd, chroot'd environment
53 | unshare --mount --uts --ipc --net --pid --fork --user --map-root-user chroot /better-root bash # this also chroot's for us
54 | mount -t proc none /proc # process namespace
55 | mount -t sysfs none /sys # filesystem
56 | mount -t tmpfs none /tmp # filesystem
57 | ```
58 |
59 | This will create a new environment that's isolated on the system with its own PIDs, mounts (like storage and volumes), and network stack. Now we can't see any of the processes!
60 |
61 | Now try our previous exercise again.
62 |
63 | 1. Run `tail -f /my-new-root/secret.txt &` from #2 (not the unshare env)
64 | 1. Run `ps` from #1, grab pid for `tail`
65 | 1. Run `kill `, see that it doesn't work
66 |
67 | We used namespaces to protect our processes! We could explore the other namespaces but know it's a similar exercise: using namespaces to restrict capabilities of containers to interfering with other containers (both for nefarious purposes and to protect ourselves from ourselves.)
68 |
--------------------------------------------------------------------------------
/lessons/02-crafting-containers-by-hand/D-cgroups.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: cgroups
3 | description: >-
4 | In the Frontend Masters course "Complete Intro to Containers" taught by Brian Holt, participants learn how to manage server resources on high-traffic shopping days like Black Friday. The course introduces cgroups, a technology developed by Google, which safeguards sites from crashes by limiting resource usage per process, ensuring stability and preventing malicious overloads. This essential tool enhances server efficiency and security in shared environments, enabling robust e-commerce operations.
5 | keywords:
6 | - Frontend Masters Containers
7 | - Brian Holt Containers Course
8 | - cgroups technology
9 | - server resource management
10 | - prevent server crashes
11 | - e-commerce server stability
12 | - Google cgroups implementation
13 | ---
14 |
15 | Okay, so now we've hidden the processes from Eve so Bob and Alice can engage in commerce in privacy and peace. So we're all good, right? They can no longer mess with each other, right? Not quite. We're almost there.
16 |
17 | So now, say it's Black Friday, Boxing Day or Singles' Day (three of the biggest shopping days in the year, pick the one that makes the most sense to you 😄) and Bob and Alice are gearing up for their biggest sales day of the year. Everything is ready to go and at 9:00AM their site suddenly goes down without warning. What happened!? They log on to their chroot'd, unshare'd shell on your server and see that the CPU is pegged at 100% and there's no more memory available to allocate! Oh no! What happened?
18 |
19 | The first explanation could be that Eve has her site running on another virtual server and simply logged on and ran a malicious script that ate up all the available resources for Bob and Alice so that their sites would go down and Eve would be the only site that was up, increasing her sales.
20 |
21 | However, another (possibly more likely) explanation is that both Bob's and Alice's sites got busy at the same time and that in and of itself took all the resources without any malice involved, taking down their sites and everyone else's on the server. Or perhaps Bob's site had a memory leak and that was enough to take all the resources available.
22 |
23 | Suffice it to say, we still have a problem. Every isolated environment has access to all _physical_ resources of the server. There's no isolation of physical components from these environments.
24 |
25 | Enter the hero of this story: cgroups, or control groups. Google saw this same problem when building their own infrastructure and wanted to protect runaway processes from taking down entire servers and made this idea of cgroups, so you can say "this isolated environment only gets so much CPU, so much memory, etc. and once it's out of those it's out-of-luck, it won't get any more."
26 |
27 | This is a bit more difficult to accomplish but let's go ahead and give it a shot.
28 |
29 | > cgroups v2 is now the standard. Run `grep -c cgroup /proc/mounts` in your terminal. If the number that is **greater than one**, the system you're using is cgroups v1. [Click here][move-to-v2] if you want to try to get your system from cgroup v1 to v2. As this is fairly involved, I would just suggest using a more recent version of Ubuntu, as it will have cgroups v2 on it.
30 | >
31 | > If you want to learn cgroups v1 (which I would not suggest, as they're getting phased out), [the first version of this course][v1] teaches them.
32 |
33 | cgroups, as we have said, allow you to move processes and their children into groups which then allow you to limit various aspects of them. Imagine you're running a single physical server for Google with both Maps and GMail having virtual servers on it. If Maps ships an infinite loop bug and it pins the CPU usage of the server to 100%, you only want Maps to go down and _not_ GMail just because it happens to be colocated with Maps. Let's see how to do that.
34 |
35 | You interact with cgroups by a pseudo-file system. Honestly, the whole interface feels weird to me but that is what it is! Inside your #2 terminal (the non-unshared one) run `cd /sys/fs/cgroup` and then run `ls`. You'll see a bunch of "files" that look like `cpu.max`, `cgroup.procs`, and `memory.high`. Each one of these represents a setting that you can play with with regard to the cgroup. In this case, we are looking at the root cgroup: all cgroups will be children of this root cgroup. The way you make your own cgroup is by creating a folder inside of the cgroup.
36 |
37 | ```bash
38 | # creates the cgroup
39 | mkdir /sys/fs/cgroup/sandbox
40 |
41 | # look at all the files created automatically
42 | ls /sys/fs/cgroup/sandbox
43 | ```
44 |
45 | We now have a sandbox cgroup, which is a child of the root cgroup and can put limits on it! If we wanted to create a child of sandbox, as you may have guessed, just create another folder inside of sandbox.
46 |
47 | Let's move our unshared environment into the cgroup. Every process belongs to exactly one cgroup. If you move a process to a cgroup, it will automatically be removed from the cgroup it was in. If we move our unshared bash process from the root cgroup to the sandbox cgroup, it will be removed from the root cgroup without you doing anything.
48 |
49 | ```bash
50 | # Find your isolated bash PID, it's the bash one immediately after the unshare
51 | ps aux
52 |
53 | # should see the process in the root cgroup
54 | cat /sys/fs/cgroup/cgroup.procs
55 |
56 | # puts the unshared env into the cgroup called sandbox
57 | echo > /sys/fs/cgroup/sandbox/cgroup.procs
58 |
59 | # should see the process in the sandbox cgroup
60 | cat /sys/fs/cgroup/sandbox/cgroup.procs
61 |
62 | # should see the process no longer in the root cgroup - processes belong to exactly 1 cgroup
63 | cat /sys/fs/cgroup/cgroup.proc
64 | ```
65 |
66 | We now have moved our unshared bash process into a cgroup. We haven't placed any limits on it yet but it's there, ready to be managed. We have a minor problem at the moment though that we need to solve.
67 |
68 | ```bash
69 | # should see all the available controllers
70 | cat /sys/fs/cgroup/cgroup.controllers
71 |
72 | # there's no controllers
73 | cat /sys/fs/cgroup/sandbox/cgroup.controllers
74 |
75 | # there's no controllers enabled its children
76 | cat /sys/fs/cgroup/cgroup.subtree_control
77 | ```
78 |
79 | You have to enable controllers for the children and none of them are enabled at the moment. You can see the root cgroup has them all enabled, but hasn't enabled them in its subtree_control so thus none are available in sandbox's controllers. Easy, right? We just add them to subtree_control, right? Yes, but one problem: you can't add new subtree_control configs while the cgroup itself has processes in it. So we're going to create another cgroup, add the rest of the processes to that one, and then enable the subtree_control configs for the root cgroup.
80 |
81 | ```bash
82 | # make new cgroup for the rest of the processes, you can't modify cgroups that have processes and by default Docker doesn't include any subtree_controllers
83 | mkdir /sys/fs/cgroup/other-procs
84 |
85 | # see all the processes you need to move, rerun each time after you add as it may move multiple processes at once due to some being parent / child
86 | cat /sys/fs/cgroup/cgroup.procs
87 |
88 | # you have to do this one at a time for each process
89 | echo > /sys/fs/cgroup/other-procs/cgroup.procs
90 |
91 | # verify all the processes have been moved
92 | cat /sys/fs/cgroup/cgroup.procs
93 |
94 | # add the controllers
95 | echo "+cpuset +cpu +io +memory +hugetlb +pids +rdma" > /sys/fs/cgroup/cgroup.subtree_control
96 |
97 | # notice how few files there are
98 | ls /sys/fs/cgroup/sandbox
99 |
100 | # all the controllers now available
101 | cat /sys/fs/cgroup/sandbox/cgroup.controllers
102 |
103 | # notice how many more files there are now
104 | ls /sys/fs/cgroup/sandbox
105 | ```
106 |
107 | We did it! We went ahead and added all the possible controllers, but normally you should add just the ones you need. If you want to learn more about what each of them does, [the kernel docs are quite readable][kernel].
108 |
109 | Let's get a third terminal going. From your host OS (Windows or macOS or your own Linux distro, not within Docker) run another `docker exec -it docker-host bash`. That way, we can have #1 inside the unshared environment, #2 running our commands, and #3 giving us a visual display of what's going with `htop`, a visual tool for seeing what process, CPU cores, and memory are doing.
110 |
111 | So, let's do three little exercises to demonstrate what we can do with a cgroup. First, let's make it so the unshared environment only has access to 80MB of memory instead of all of it.
112 |
113 | ```bash
114 | # a cool visual representation of CPU and RAM being used
115 | apt-get install htop
116 |
117 | # from #3 so we can watch what's happening
118 | htop
119 |
120 | # run this from #1 terminal and watch it in htop to see it consume about a gig of RAM and 100% of CPU core
121 | yes | tr \\n x | head -c 1048576000 | grep n
122 |
123 | # from #2, (you can get the PID from htop) to stop the CPU from being pegged and memory from being consumed
124 | kill -9
125 |
126 | # should see max, so the memory is unlimited
127 | cat /sys/fs/cgroup/sandbox/memory.max
128 |
129 | # set the limit to 80MB of RAM (the number is 80MB in bytes)
130 | echo 83886080 > /sys/fs/cgroup/sandbox/memory.max
131 |
132 | # from inside #1, see it limit the RAM taken up; because the RAM is limited, the CPU usage is limited
133 | yes | tr \\n x | head -c 1048576000 | grep n
134 | ```
135 |
136 | I think this is very cool. We just made it so our unshared environment only has access to 80MB of RAM, so despite a script being run to literally just consume RAM, it was limited to only consuming 80MB of it.
137 |
138 | However, as you saw, the user inside of the container could still peg the CPU if they wanted to. Let's fix that. Let's only give them 5% of a core.
139 |
140 | ```bash
141 | # inside #1 / the cgroup/unshare – this will peg one core of a CPU at 100% of the resources available, see it peg 1 CPU
142 | yes > /dev/null
143 |
144 | # from #2, (you can get the PID from htop) to stop the CPU from being pegged
145 | kill -9
146 |
147 | # from #2 this allows the cgroup to only use 5% of a CPU
148 | echo '5000 100000' > /sys/fs/cgroup/sandbox/cpu.max
149 |
150 | # inside #1 / the cgroup/unshare – this will peg one core of a CPU at 5% since we limited it
151 | yes > /dev/null
152 |
153 | # from #2, to stop the CPU from being pegged, get the PID from htop
154 | kill -9
155 | ```
156 |
157 | Pretty cool, right? Now, no matter how bad the code is we run inside of our chroot'd, unshare'd, cgroup'd environment, we cannot take more than 5% of a CPU core.
158 |
159 | One more demo, the dreaded [fork bomb][fork-bomb]. A fork bomb is a script that forks itself into multiple processes, which then fork themselves, which then fork themselves, etc., until all resources are consumed and it crashes the computer. It can be written plainly as
160 |
161 | ```bash
162 | fork() {
163 | fork | fork &
164 | }
165 | fork
166 | ```
167 |
168 | but you'll see it written as `:(){ :|:& };:` where `:` is the name of the function instead of `fork`.
169 |
170 | So someone could run a fork bomb on our system right now and it'd limit the blast radius of CPU and RAM but creating and destroying so many processes still carries a toll on the system. What we can do to more fully prevent a fork bomb is limit how many PIDs can be active at once. Let's try that.
171 |
172 | ```bash
173 | # See how many processes the cgroup has at the moment
174 | cat /sys/fs/cgroup/sandbox/pids.current
175 |
176 | # See how many processes the cgroup can create before being limited (max)
177 | cat /sys/fs/cgroup/sandbox/pids.max
178 |
179 | # set a limit that the cgroup can only run 3 processes at a time
180 | echo 3 > /sys/fs/cgroup/sandbox/pids.max
181 |
182 | # this runs 5 15 second processes that run and then stop. run this from within #2 and watch it work. now run it in #1 and watch it not be able to. it will have to retry several times
183 | for a in $(seq 1 5); do sleep 15 & done
184 |
185 | # DO NOT RUN THIS ON YOUR COMPUTER. This is a fork bomb. If not accounted for, this would bring down your computer. However we can safely run inside our #1 because we've limited the amount of PIDs available. It will end up spawning about 100 processes total but eventually will run out of forks to fork.
186 | :(){ :|:& };:
187 | ```
188 |
189 | Attack prevented! 3 processes is way too few for anyone to do anything meaningful, but by limiting the max PIDs available it allows you to limit what damage could be done. I'll be honest, this is the first time I've run a fork bomb on a computer and it's pretty exhilirating. I felt like I was in the movie _Hackers_. [Hack the planet!][hackers].
190 |
191 | And now we can call this a container. You have handcrafted a container. A container is literally nothing more than what we did together. There are other sorts of technologies that will accompany containers, like runtimes and daeomons, but the containers themselves are just a combination of chroot, namespaces, and cgroups! Using these features together, we allow Bob, Alice, and Eve to run whatever code they want and the only people they can mess with is themselves.
192 |
193 | So while this is a container at its most basic sense, we haven't broached more advance topics like networking, deploying, bundling, or anything else that something like Docker takes care of for us. But now you know at its most base level what a container is, what it does, and how you _could_ do this yourself, but you'll be grateful that Docker does it for you. On to the next lesson!
194 |
195 | [move-to-v2]: https://medium.com/@charles.vissol/cgroup-v2-in-details-8c138088f9ba#aa07
196 | [v1]: https://btholt.github.io/complete-intro-to-containers/cgroups
197 | [kernel]: https://docs.kernel.org/admin-guide/cgroup-v2.html#controllers
198 | [fork-bomb]: https://en.wikipedia.org/wiki/Fork_bomb
199 | [hackers]: https://youtu.be/Rn2cf_wJ4f4
200 |
--------------------------------------------------------------------------------
/lessons/02-crafting-containers-by-hand/meta.json:
--------------------------------------------------------------------------------
1 | {
2 | "icon": "hand-holding-heart"
3 | }
--------------------------------------------------------------------------------
/lessons/03-docker/A-docker-images.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: >-
3 | Learn how to work with Docker images without Docker by unpacking, exporting,
4 | and creating a new isolated environment manually through commands. Understand
5 | the core concepts behind Docker such as namespace isolation, cgroups
6 | limitation, and chroot environment while exploring functionalities like
7 | networking and volumes.
8 | keywords:
9 | - Docker images
10 | - Docker Hub
11 | - container environment
12 | - namespace isolation
13 | - cgroups
14 | - chroot
15 | - environment setup
16 | ---
17 |
18 | These pre-made containers are called _images_. They basically dump out the state of the container, package that up, and store it so you can use it later. So let's go nab one of these image and run it! We're going to do it first without Docker to show you that you actually already knows what's going on.
19 |
20 | First thing, let's go grab a container off of Docker Hub. Let's grab the latest Node.js container that runs Ubuntu.
21 |
22 | ### Docker Images without Docker
23 |
24 | ```bash
25 | # start docker contaier with docker running in it connected to host docker daemon
26 | docker run -ti -v /var/run/docker.sock:/var/run/docker.sock --privileged --rm --name docker-host docker:26.0.1-cli
27 |
28 | # run stock alpine container
29 | docker run --rm -dit --name my-alpine alpine:3.19.1 sh
30 |
31 | # export running container's file system
32 | docker export -o dockercontainer.tar my-alpine
33 |
34 | # make container-root directory, export contents of container into it
35 | mkdir container-root
36 | tar xf dockercontainer.tar -C container-root/
37 |
38 | # make a contained user, mount in name spaces
39 | unshare --mount --uts --ipc --net --pid --fork --user --map-root-user chroot $PWD/container-root ash # this also does chroot for us
40 | mount -t proc none /proc
41 | mount -t sysfs none /sys
42 | mount -t tmpfs none /tmp
43 |
44 | # here's where you'd do all the cgroup rules making with the settings you wanted to
45 | # we're not going to since we did it all in the last lesson
46 | ```
47 |
48 | So, this isn't totally it. Docker does a lot more for you than just this like networking, volumes, and other things but suffice to say this core of what Docker is doing for you: creating a new environment that's isolated by namespace and limited by cgroups and chroot'ing you into it. So why did we go through all this ceremony? Well, it's because I want you to understand what Docker is doing for you, know that you _could_ do it by hand but since there's a tool that does for you you don't want to. I hold a strong personal belief that tools people need to understand their tools and what they do for them. Every tool you add to your environment adds complexity but should also add ease. If you don't understand the complexity the tool is solving, you resent it and don't get to fully appreciate nor take advantage of what the tool can fully offer.
49 |
50 | So how often will you do what we just did? Never. 99.9% of container-utilizers have no idea this is what's happening under the hood. But now that you know it will make you embrace the complexity that Docker adds because you can see why you have it.
51 |
--------------------------------------------------------------------------------
/lessons/03-docker/B-docker-images-with-docker.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: >-
3 | Learn how to use Docker to run and interact with container images like Alpine
4 | and Ubuntu, execute commands within containers, manage running containers in
5 | the background, and clean up containers efficiently.
6 | keywords:
7 | - Docker
8 | - container images
9 | - Alpine
10 | - Ubuntu
11 | - running containers
12 | - manage containers
13 | - clean up containers
14 | ---
15 |
16 | ### Docker Images with Docker
17 |
18 | So it's much easier to do what we did with Docker. Run this command:
19 |
20 | ```bash
21 | docker run --interactive --tty alpine:3.19.1
22 | # or, to be shorter: docker run -it alpine:3.19.1
23 | ```
24 |
25 | A bit easier to remember, right? This will drop you into a Alpine ash shell inside of a container as the root user of that container. When you're done, just run `exit` or hit CTRL+D. Notice that this will grab the [alpine][alpine] image from Docker for you and run it. The `run` part of the command is telling Docker you're going to be executing a container (as opposed to building it.) The `-it` part says you want to be dropped into the container interactively so you can run commands and inspect the container. By default containers run and then exit as soon as they're done. Go ahead and try `docker run alpine:3.19.1`. It'll look it did nothing but it actually starts the container and then, because it has nothing defined for it to do, it just exits.
26 |
27 | So what if you wanted it to execute something? Try this:
28 |
29 | ```bash
30 | docker run alpine:3.19.1 ls
31 | ```
32 |
33 | Or let's switch to Ubuntu now, since it's more familiar to most. We'll talk about Alpine later on in-depth.
34 |
35 | ```bash
36 | docker run ubuntu:jammy ls
37 | ```
38 |
39 | The `ls` part at the end is what you pass into the container to be run. As you can see here, it executes the command, outputs the results, and shuts down the container. This is great for running a Node.js server. Since it doesn't exit, it'll keep running until the server crashes or the server exits itself.
40 |
41 | So now what if we want to detach the container running from the foreground? Let's try that.
42 |
43 | ```bash
44 | docker run --detach -it ubuntu:jammy # or, to be shorter: docker run -dit ubuntu:jammy
45 | ```
46 |
47 | So it prints a long hash out and then nothing. Oh no! What happened to it!? Well, it's running in the background. So how do we get ahold of it?
48 |
49 | ```bash
50 | docker ps
51 | ```
52 |
53 | This will print out all the running containers that Docker is managing for you. You should see your container there. So copy the ID or the name and say:
54 |
55 | ```bash
56 | docker attach # e.g. `docker attach 20919c49d6e5` would attach to that container
57 | ```
58 |
59 | This allows you to attach a shell to a running container and mess around with it. Useful if you need to inspect something or see running logs. Feel free to type `exit` to get out of here. Run `docker run -dit ubuntu:jammy` one more time. Let's kill this container without attaching to it. Run `docker ps`, get the IDs or names of the containers you want to kill and say:
60 |
61 | ```bash
62 | docker kill # e.g. `docker kill fae0f0974d3d 803e1721dad3 20919c49d6e5` would kill those three containers
63 | ```
64 |
65 | ## --name and --rm
66 |
67 | Let's make it a bit easier to keep track of these. Try this
68 |
69 | ```bash
70 | docker run -dit --name my-ubuntu ubuntu:jammy
71 | docker kill my-ubuntu
72 | ```
73 |
74 | Now you can refer to these by a name you set. But now if you tried it again, it'd say that `my-ubuntu` exists. If you run `docker ps --all` you'll see that the container exists even if it's been stopped. That's because Docker keeps this metadata around until you tell it to stop doing that. You can run `docker rm my-ubuntu` which will free up that name or you can run `docker container prune` to free up all existing stopped containers (and free up some disk space.)
75 |
76 | In the future you can just do
77 |
78 | ```bash
79 | docker run --rm -dit --name my-ubuntu ubuntu:jammy
80 | docker kill my-ubuntu
81 | ```
82 |
83 | This will automatically clean up the container when it's done.
84 |
85 | [alpine]: https://www.alpinelinux.org/
86 |
--------------------------------------------------------------------------------
/lessons/03-docker/C-javascript-on-docker.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: >-
3 | Learn how to run Node.js, Deno, Bun, and other runtimes in containers using
4 | Docker images. Explore different Linux distros like Debian, Alpine, and CoreOS
5 | for your containerized applications.
6 | keywords:
7 | - Node.js
8 | - Docker
9 | - containers
10 | - Linux distros
11 | - Deno
12 | - Bun
13 | - runtimes
14 | ---
15 |
16 | ## Node.js on Containers
17 |
18 | So now what if we wanted to run a container that has Node.js in it? The default Ubuntu container doesn't have Node.js installed. Let's use a different container!
19 |
20 | ```bash
21 | docker run -it node:20
22 | ```
23 |
24 | The version here is we're using is Node.js version 20. If you run this as-is, it'll drop you directly into Node.js. What version of Linux do you think this is? Let's find out!
25 |
26 | ```bash
27 | docker run -it node:20 cat /etc/issue
28 | ```
29 |
30 | It's Debian! They made a choice to choose Debian which is a perfectly great distro to use (it's what Ubuntu is based on.)
31 |
32 | What if we wanted to be dropped into bash of that container? Easy! You already know how!
33 |
34 | ```bash
35 | docker run -it node:20 bash
36 | ```
37 |
38 | Remember, after we identify the container ([node][node]), anything we put after get's evaluated instead of the default command identified by the container (in the container `node`'s case, it runs the command `node` by default). This allows us to run whatever command we want! In this case, we're exectuing `bash` which puts us directly into a bash shell.
39 |
40 | We'll get into later how to select which Linux distros you should use but for now this is just a fun exercise.
41 |
42 | Just for fun, let's try one of the other Linux distros that you can use with Node.js
43 |
44 | ```bash
45 | docker run -it node:20-alpine cat /etc/issue
46 | ```
47 |
48 | This one still has Node.js version 20 on it but it's using a much slimmer version of Linux on it, Alpine. We'll talk a lot about Alpine later but know that it's possible.
49 |
50 | ## Deno
51 |
52 | ```bash
53 | docker run -it denoland/deno:centos-1.42.4
54 | docker run -it denoland/deno:centos-1.42.4 deno
55 | ```
56 |
57 | This will allow you to run the alternative to Node.js JavaScript runtime, Deno. This command should log out "Welcome to Deno!" and then exit.
58 |
59 | This operating system is another good candiate for your Linux distro for you containers, CoreOS which is a Fedora/IBM product.
60 |
61 | The second command will actually get you into the Deno REPL to play around with Deno.
62 |
63 | ## Bun
64 |
65 | ```bash
66 | docker run -it oven/bun:1.1.3 bun repl
67 | docker run -it oven/bun:1.1.3 cat /etc/issue
68 | ```
69 |
70 | Like above, the first command will get you into Bun, another JS runtime based on Safari's JavaScript engine JavaScriptCore (as opposed to Chrome's V8.)
71 |
72 | The second command will let you see that by default Bun uses Debian.
73 |
74 | ## A few other runtimes
75 |
76 | ```bash
77 | # you don't have to run all of these, just wanted to show you the variety of what's available
78 | docker run -it ruby:3.3
79 | docker run -it golang:1.22.2
80 | docker run -it rust:1.77.2
81 | docker run -it php:8.2
82 | docker run -it python:3.12.3
83 | ```
84 |
85 | Here's just a few but as you can imagine, just about every run time has a pre-made container for them. And in the case yours doesn't, I'll show you how to make it!
86 |
87 | [node]: https://hub.docker.com/_/node
88 |
--------------------------------------------------------------------------------
/lessons/03-docker/D-tags.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: >-
3 | Learn how to manage Docker container versions, from using the latest tag to
4 | specifying specific versions for Node.js and exploring Alpine Linux for
5 | minimalistic container deployments. Discover the benefits of choosing
6 | lightweight Alpine containers over larger Ubuntu or Debian images for faster
7 | deployment, reduced storage costs, and enhanced security.
8 | keywords:
9 | - Docker containers
10 | - version management
11 | - Alpine Linux
12 | - Node.js
13 | - container deployment
14 | - security
15 | - minimalist containers
16 | ---
17 |
18 | So far we've just been running containers with random tags that I chose. If you run `docker run -it node` the tag implicitly is using the `latest` tag. When you say `docker run -it node`, it's the same as saying `docker run -it node:latest`. The `:latest` is the tag. This allows you to run different versions of the same container, just like you can install React version 17 or React version 18: some times you don't want the latest. Let's say you have a legacy application at your job and it depends on running on Node.js 20 (update your app, Node.js is already past end-of-life) then you can say
19 |
20 | ```bash
21 | docker run -it node:20 bash
22 | ```
23 |
24 | Once in the shell, run `node --version` and you'll see the Node.js version is 20._._! Neat! This is helpful because now we can fix our Node.js version to the one our app expects. Hop back over to [the Docker Hub page for the node container][node]. Take a look at all the version of the node container you can download. Let's try another one.
25 |
26 | ```bash
27 | docker run node:20-alpine cat /etc/issue
28 | ```
29 |
30 | You'll see this is running an entirely different OS all together: Alpine! [Alpine Linux][alpine] is a very, very tiny distro of Linux made for containers and specifically because it is tiny. Alpine containers are bare bones: if you want _anything_ in them, you're going to have to do it yourself. This is in opposition to the Ubuntu and Debian containers: they ship the kitchen sink with them which is both convenient and much bigger in size. Alpine images are about five megabytes whereas Ubuntu is close to two hundred megabytes. As you can imagine, this can make a difference in how fast you can deploy and can cost significantly less in terms of storage and network traffic. It's also in general better to have less unnecessary things in your containers: less is more in terms of security. If an attacker tries to execute a Python exploit on your container but your container doesn't have Python then their attack won't work.
31 |
32 | We'll get more into how to ship containers to production but I'll leave you with this pro-tip: have a development container which has all the bells, whistles, debugging tools, etc. that you need. Then have a production container that's minimalist as possibly can be. You'll get the best of both worlds.
33 |
34 | [node]: https://hub.docker.com/_/node/
35 | [alpine]: https://hub.docker.com/_/alpine
36 |
--------------------------------------------------------------------------------
/lessons/03-docker/E-docker-cli.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Docker CLI
3 | description: >-
4 | Explore Docker CLI features like pull, push, inspect, and more. Learn how to
5 | manage containers efficiently with commands such as pause, unpause, exec,
6 | import, export, history, info, top, rm, rmi, logs, restart, and search.
7 | keywords:
8 | - Docker CLI
9 | - container management
10 | - Docker commands
11 | - Docker features
12 | - Docker container operations
13 | ---
14 |
15 | Let's take a look at some more cool features of the Docker CLI.
16 |
17 | ### pull / push
18 |
19 | `pull` allows you to pre-fetch container to run.
20 |
21 | ```bash
22 | # this just downloads and caches the image, it doesn't do anything else with it
23 | docker pull jturpin/hollywood
24 |
25 | # notice it's already loaded and cached here; it doesn't redownload it
26 | docker run -it jturpin/hollywood hollywood
27 | ```
28 |
29 | That will pull the hollywood container from the user jturpin's user account. The second line will execute this fun container which is just meant to look a hacker's screen in a movie (it doesn't really do anything than look cool.)
30 |
31 | > Note: The `jturpin/hollywood` image has been depricated. These steps should still work, but if you have issues, you can replace that image with `bcbcarl/hollywood`.
32 |
33 | `push` allows you to push containers to whatever registry you're connected to (probably normally Docker Hub or something like Azure Container Registry or GitHub Container Registry).
34 |
35 | ### inspect
36 |
37 | ```bash
38 | docker inspect node:20
39 | ```
40 |
41 | This will dump out a lot of info about the container. Helpful when figuring out what's going on with a container
42 |
43 | ### pause / unpause
44 |
45 | As it looks, these pauses or unpause all the processes in a container. Feel free to try
46 |
47 | ```bash
48 | docker run -dit --name hw --rm jturpin/hollywood hollywood
49 | docker ps # see container running
50 | docker pause hw
51 | docker ps # see container paused
52 | docker unpause hw
53 | docker ps # see container running again
54 | docker kill hw # see container is gone
55 | ```
56 |
57 | ### exec
58 |
59 | This allows you to execute a command against a running container. This is different from `docker run` because `docker run` will start a new container whereas `docker exec` runs the command in an already-running container.
60 |
61 | ```bash
62 | docker run -dit --name hw --rm jturpin/hollywood hollywood
63 |
64 | # see it output all the running processes of the container
65 | docker exec hw ps aux
66 | ```
67 |
68 | If you haven't seen `ps aux` before, it's a really useful way to see what's running on your computer. Try running `ps aux` on your macOS or Linux computer to see everything running.
69 |
70 | ### import / export
71 |
72 | Allows you to dump out your container to a tar ball (which we did above.) You can also import a tar ball as well.
73 |
74 | ### history
75 |
76 | We'll get into layers in a bit but this allow you to see how this Docker image's layer composition has changed over time and how recently.
77 |
78 | ```bash
79 | docker history node:20
80 | ```
81 |
82 | ### info
83 |
84 | Dumps a bunch of info about the host system. Useful if you're on a VM somewhere and not sure what the environment is.
85 |
86 | ```bash
87 | docker info
88 | ```
89 |
90 | ### top
91 |
92 | Allows you to see processes running on a container (similar to what we did above)
93 |
94 | ```bash
95 | docker run -dit --name my-mongo --rm mongo
96 | docker top my-mongo # you should see MongoDB running
97 | docker kill my-mongo
98 | ```
99 |
100 | ### rm / rmi
101 |
102 | If you run `docker ps --all` it'll show all containers you've stopped running in addition to the runs you're running. If you want to remove something from this list, you can do `docker rm `.
103 |
104 | You can run `docker container prune` to remove _all_ of the stopped containers.
105 |
106 | If you want to remove an image from your computer (to save space or whatever) you can run `docker rmi mongo` and it'll delete the image from your computer. This isn't a big deal since you can always reload it again
107 |
108 | ### logs
109 |
110 | Very useful to see the output of one of your running containers.
111 |
112 | ```bash
113 | docker run --name my-mongo --rm -dit mongo
114 | docker logs my-mongo # see all the logs
115 | docker kill my-mongo
116 | ```
117 |
118 | ### restart
119 |
120 | Pretty self explanatory. Will restart a running container
121 |
122 | ### search
123 |
124 | If you want to see if a container exists on Docker Hub (or whatever registry you're connected to), this will allow you to take a look.
125 |
126 | ```bash
127 | docker search python # see all the various flavors of Python containers you can run
128 | docker search node # see all the various flavors of Node.js containers you can run
129 | ```
130 |
--------------------------------------------------------------------------------
/lessons/03-docker/meta.json:
--------------------------------------------------------------------------------
1 | {
2 | "icon": "fish"
3 | }
--------------------------------------------------------------------------------
/lessons/04-dockerfiles/A-intro-to-dockerfiles.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: >-
3 | Learn how to build Docker containers with a Dockerfile, the key instructions
4 | involved, and the concept of disposable containers. Discover the power of
5 | creating container images incrementally by leveraging existing images in the
6 | Docker ecosystem.
7 | keywords:
8 | - Docker containers
9 | - Dockerfile
10 | - building containers
11 | - disposable containers
12 | - docker run command
13 | - container versioning
14 | ---
15 |
16 | So far we've been focusing a lot on running containers and haven't much dug into building them. This is on purpose because most of benefit of containers for developers comes from the running of containers. If you learn one thing, it should be how to run them. In fact I'll event venture to say that _most_ developers really only ever need to know how to run them. But you, you're going to learn how to write them. It's an extra superpower.
17 |
18 | That said, let's learn to build our own containers. We'll again be using Docker for this though there are other ways to do this. Docker has a special file called a `Dockerfile` which allows you to outline how a container will be built. Each line in a Docker file is a new a directive of how to change your Docker container.
19 |
20 | A _big key_ with Docker container is that they're supposed to be disposable. You should be able to create them and throw them away as many times as necessary. In other words: adopt a mindset of making everything short-lived. There are other, better tools for long-running, custom containers.
21 |
22 | The (imperfect) analogy that people use sometimes is your containers should be "[cattle, not pets][cattle]". You design containers so you can easily create and destroy them as much as necessary. The analogy here is that you name your pets and take special care of them whereas you have a thousand cattle and can't name or take special care of them, just the herd.
23 |
24 | Let's make the most basic Dockerfile ever. Let's make a new folder, maybe on your desktop. Put a file in there called `Dockerfile` (no extension.) In your file, put this.
25 |
26 | ## The most basic Dockerfile-based Container
27 |
28 | ```dockerfile
29 | FROM node:20
30 |
31 | CMD ["node", "-e", "console.log(\"hi lol\")"]
32 | ```
33 |
34 | [⛓️ Link to the Dockerfile][dockerfile]
35 |
36 | The first thing on each line (`FROM` and `CMD` in this case) are called _instructions_. They don't technically have to be all caps but it's convention to do so so that the file is easier to read. Each one of these instruction incrementally changes the container from the state it was in previously, adding what we call a _layer_.
37 |
38 | Let's go ahead and build our container. Run (from inside of the directory of where your Dockerfile is)
39 |
40 | ```bash
41 | docker build .
42 | ```
43 |
44 | You should see it out put a bunch of stuff and it'll leave you with the hash of an image. After each instruction, you'll see a hash similar to the ones we've been using for the IDs for the containers. You know why that is? It's because each one of those layers is in-and-of themselves a valid container image! This ends up being important later and we'll discuss it in a bit.
45 |
46 | Our container has two instructions in its Dockerfile, but actually it has many, many more. How? The first instruction, `FROM node:20` actually means _start_ with the `node` container. That container itself [comes from another Dockerfile][docker-node] which build its own container, which itself [comes from another Dockerfile][buildpack], which comes ultimately from the [Debian][debian] image.
47 |
48 | This is something very powerful about Docker: you can use images to build other images and build on the work of others. Instead of having to worry about how to install Debian and all the necessary items to build Node.js from its source, we can just start with a well-put-together image from the community.
49 |
50 | Okay, so we start with `node:20` and then we add the `CMD` instruction. There will only ever be one of these in effect in a Dockerfile. If you have multiple it'll just take the last one. This is what you want Docker to do when someone runs the container. In our case, we're running `node -e "console.log('hi lol')"` from within the container. `node -e`, if you don't know, will run whatever is inside of the quotes with Node.js. In this case, we're logging out `hi lol` to the console.
51 |
52 | You _can_ put `CMD node -e "console.log('hi lol')"` as that last line and it'll work but it's not the preferred way of doing it. This won't actually go through bash which itself is simpler and usually safer. I do it this way because the docs strongly encourage you to do it this way.
53 |
54 | So, in essence, our containers nabs a `node:20` container and then when we have it execute a `node` command when you run it. Let's try it. Grab the hash from your build and run
55 |
56 | ```bash
57 | docker run
58 | ```
59 |
60 | It's a little inconvenient to always have to refer to it by ID, it'd be easier if it had a name. So let's do that! Try
61 |
62 | ```bash
63 | docker build . --tag my-node-app ## or -t instead of --tag
64 | docker run my-node-app
65 | ```
66 |
67 | Much easier to remember the name rather than a hash. If you want to version it yourself, you can totally do this:
68 |
69 | ```bash
70 | docker build -t my-node-app:1 .
71 | docker run my-node-app:1
72 | ```
73 |
74 | Now change your `Dockerfile` so that it logs out `wat` instead of `hi lol`. After you do that.
75 |
76 | ```bash
77 | docker build -t my-node-app:2 .
78 | docker run my-node-app:2
79 | docker run my-node-app:1
80 | ```
81 |
82 | You can version your containers and hold on to older ones, just in case!
83 |
84 | [buildpack]: https://github.com/docker-library/buildpack-deps
85 | [debian]: https://hub.docker.com/_/debian/
86 | [docker-node]: https://github.com/nodejs/docker-node/blob/master/Dockerfile-debian.template
87 | [cattle]: http://cloudscaling.com/blog/cloud-computing/the-history-of-pets-vs-cattle/
88 | [dockerfile]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2/blob/main/intro-to-dockerfiles/Dockerfile
89 |
--------------------------------------------------------------------------------
/lessons/04-dockerfiles/B-build-a-nodejs-app.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Build a Node.js App
3 | description: >-
4 | Learn how to set up a basic Node.js application inside Docker with detailed
5 | steps on copying files, exposing ports, and user permissions. Enhance your
6 | Dockerfile skills by optimizing file structures and using instructions like
7 | COPY, USER, and WORKDIR effectively.
8 | keywords:
9 | - Dockerfile
10 | - Node.js application
11 | - Docker setup
12 | - copy files in Docker
13 | - expose ports in Docker
14 | - user permissions in Docker
15 | - WORKDIR instruction
16 | ---
17 |
18 | So now let's dig into some more advance things you can do with a Dockerfile. Let's first make our project a real Node.js application. Make a file called `index.js` and put this in there.
19 |
20 | ```javascript
21 | const http = require("http");
22 |
23 | http
24 | .createServer(function (request, response) {
25 | console.log("request received");
26 | response.end("omg hi", "utf-8");
27 | })
28 | .listen(3000);
29 | console.log("server started");
30 | ```
31 |
32 | [⛓️ Link to Node.js file][node-file]
33 |
34 | This more-or-less that most barebones Node.js app you can write. It just responds to HTTP traffic on port 3000. Go ahead and try running it on your local computer (outside of Docker) by running `node index.js`. Open [localhost:3000][localhost] in your browser to give it a shot.
35 |
36 | Okay, so let's get this running _inside_ Docker now. First thing is we have to copy this file from your local file system into the container. We'll use a new instruction, `COPY`. Modify your Dockerfile to say:
37 |
38 | ```dockerfile
39 | FROM node:20
40 |
41 | COPY index.js index.js
42 |
43 | CMD ["node", "index.js"]
44 | ```
45 |
46 | [⛓️ Link to Dockerfile][dockerfile-1]
47 |
48 | This will copy your index.js file from your file system into the Docker file system (the first index.js is the source and the second index.js is the destination of that file inside the container.)
49 |
50 | We then modified the `CMD` to start the server when we finally do run the container. Now run
51 |
52 | ```bash
53 | docker build -t my-node-app .
54 | docker run --name my-app --rm my-node-app
55 | ```
56 |
57 | You might need to open another terminal and type `docker kill my-app`.
58 |
59 | Now your Node.js app is running inside of a container managed by Docker! Hooray! But one problem, how do we access it? If you open [locahlost:3000][localhost] now, it doesn't work! We have to tell Docker to expose the port. So let's do that now. Stop your container from running and run it again like this.
60 |
61 | Try stopping your server now. Your normal CTRL+C won't work. Node.js itself doesn't handle SIGINT (which is what CTRL+C is) in and of itself. Instead you either have to handle it yourself inside of your Node.js code (preferable for real apps) or you can tell Docker to handle it with the `--init` flag. This uses a package called [tini][tini] to handle shutdown signal for you.
62 |
63 | ```bash
64 | docker run --init --publish 3000:3000 --rm my-node-app # or you can use -p instead of --publish
65 | ```
66 |
67 | The `publish` part allows you to forward a port out of a container to the host computer. In this case we're forwarding the port of `3000` (which is what the Node.js server was listening on) to port `3000` on the host machine. The `3000` represents the port on the host machine and the second `3000` represents what port is being used in the container. If you did `docker run --publish 8000:3000 my-node-app`, you'd open `localhost:8000` to see the server (running on port `3000` inside the container).
68 |
69 | Next, let's organize ourselves a bit better. Right now we're putting our app into the root directory of our container and running it as the root user. This both messy and unsafe. If there's an exploit for Node.js that get released, it means that whoever uses that exploit on our Node.js server will doing so as root which means they can do whatever they want. Ungood. So let's fix that. We'll put the directory inside our home directory under a different users.
70 |
71 | ```dockerfile
72 | FROM node:20
73 |
74 | USER node
75 |
76 | COPY index.js /home/node/code/index.js
77 |
78 | CMD ["node", "/home/node/code/index.js"]
79 | ```
80 |
81 | The `USER` instruction let's us switch from being the root user to a different user, one called "node" which the `node:20` image has already made for us. We could make our own user too using bash commands but let's just use the one the node image gave us. (More or less you'd run `RUN useradd -ms /bin/bash lolcat` to add a lolcat user.)
82 |
83 | Notice we're now copying inside of the user's home directory. This is because they'll have proper permissions to interact with those files whereas they may not if we were outside of their home directory. You'll save yourself a lot of permission wrangling if you put it in a home directory. But we'll have to add a flag to the `COPY` command to make sure the user owns those files. We'll do that with `--chown=node:node` where the first `node` is the user and the second `node` is the user group.
84 |
85 | It's no big deal that the "code" directory doesn't exist, `COPY` will create it.
86 |
87 | ### A Quick Note on COPY vs ADD
88 |
89 | The two commands `COPY` and `ADD` do very similar things with a few key differences. `ADD` can also accept, in addition to local files, URLs to download things off the Internet and it will also automatically unzip any tar files it downloads or adds. `COPY` will just copy local files. Use `COPY` unless you need to unzip something or are downloading something.
90 |
91 | ---
92 |
93 | Great. Let's make everything a bit more succint by setting a working directory
94 |
95 | ```dockerfile
96 | FROM node:20
97 |
98 | USER node
99 |
100 | WORKDIR /home/node/code
101 |
102 | COPY --chown=node:node index.js .
103 |
104 | CMD ["node", "index.js"]
105 | ```
106 |
107 | [⛓️ Link to Dockerfile][dockerfile-2]
108 |
109 | `WORKDIR` works as if you had `cd`'d into that directory, so now all paths are relative to that. And again, if it doesn't exist, it will create it for you.
110 |
111 | Now we just tell `COPY` to copy the file into the same directory. Now we're giving it a directory instead of a file name, it'll just assume we want the same name. You could rename it here if you wanted.
112 |
113 | [localhost]: http://localhost:3000
114 | [tini]: https://github.com/krallin/tini
115 | [node-file]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2/blob/main/build-a-nodejs-app/index.js
116 | [dockerfile-1]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2/blob/main/build-a-nodejs-app/Dockerfile
117 | [dockerfile-2]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2/blob/main/build-a-nodejs-app/better.Dockerfile
118 |
--------------------------------------------------------------------------------
/lessons/04-dockerfiles/C-build-a-more-complicated-nodejs-app.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Build a More Complicated Node.js App
3 | description: >-
4 | Learn how to containerize a Node.js app using Fastify, npm, and Docker. Follow
5 | steps for npm installation, Dockerfile creation, and handling permissions
6 | issues within the container.
7 | keywords:
8 | - Node.js
9 | - Fastify
10 | - npm install
11 | - Docker
12 | - containerize
13 | - Dockerfile
14 | - permissions issues
15 | ---
16 |
17 | Okay, all looking good so far. Let's make this app go one step further. Let's have it have an npm install step! In the directory where your app is, put this:
18 |
19 | ```javascript
20 | // this is the sample app from fastify.dev
21 |
22 | // Require the framework and instantiate it
23 | const fastify = require("fastify")({ logger: true });
24 |
25 | // Declare a route
26 | fastify.get("/", function handler(request, reply) {
27 | reply.send({ hello: "world" });
28 | });
29 |
30 | // Run the server!
31 | fastify.listen({ port: 8080, host: "0.0.0.0" }, (err) => {
32 | if (err) {
33 | fastify.log.error(err);
34 | process.exit(1);
35 | }
36 | });
37 | ```
38 |
39 | [⛓️ Link to the code][node-file]
40 |
41 | This is a [Fastify][fastify] server. Fastify is a server-side framework (like Express) for Node.js and one I've used several times. This is going to require that we `npm install` the dependencies. So in your project do the following
42 |
43 | ```bash
44 | npm init -y # this will create a package.json for you without asking any questions
45 | npm install fastify
46 | ```
47 |
48 | [⛓️ Link to the package.json][package-file]
49 |
50 | Now try running `node index.js` to run the Node.js server. You should see it running and logging out info whenever you hit an endpoint. Cool, so now that we have a full featured Node.js app, let's containerize it.
51 |
52 | If we tried to build it and run it right now it'd fail because we didn't `npm install` the dependencies. So now right after the `COPY` we'll add a `RUN`.
53 |
54 | ```dockerfile
55 | FROM node:20
56 |
57 | USER node
58 |
59 | WORKDIR /home/node/code
60 |
61 | COPY --chown=node:node . .
62 |
63 | RUN npm ci
64 |
65 | CMD ["node", "index.js"]
66 | ```
67 |
68 | ```bash
69 | docker build -t more-complicated-app .
70 | docker run -it -p 8080:8080 --name my-app --rm --init more-complicated-app
71 | ```
72 |
73 | We changed the `COPY` to copy everything in the directory. Right now you probably have a `node_modules` but if you're building a container directly from a repo it won't copy the `node_modules` so we have to operate under the assumption that those won't be there. Feel free even to delete them if you want.
74 |
75 | Let's go ahead and add a `.dockerignore` file to the root of the project that prevents Docker from copying the `node_modules`. This has the same format as a `.gitignore`.
76 |
77 | ```
78 | node_modules/
79 | .git/
80 | ```
81 |
82 | We then added a `RUN` instruction to run a command inside of the container. If you're not familiar with `npm ci` it's very similar to `npm install` with a few key differences: it'll follow the `package-lock.json` exactly (where `npm install` will ignore it and update it if newer patch versions of your dependencies are available) and it'll automatically delete `node_modules` if it exists. `npm ci` is made for situations like this.
83 |
84 | Now if you try to build again, it _may_ fail with permissions issues. Why? Well, when you have `WORKDIR` create a directory, it does so as root (depending on which version of Docker you're using) which means that the node user won't have enough permissions to modify that directory. We could either use `RUN` to change the user or we could use `RUN` to make the directory in the first place as node. Let's do the latter.
85 |
86 | Generally it's encouraged to not rely on `WORKDIR` to get it right and just do it yourself.
87 |
88 | ```dockerfile
89 | FROM node:20
90 |
91 | USER node
92 |
93 | RUN mkdir /home/node/code
94 |
95 | WORKDIR /home/node/code
96 |
97 | COPY --chown=node:node . .
98 |
99 | RUN npm ci
100 |
101 | CMD ["node", "index.js"]
102 | ```
103 |
104 | [⛓️ Link to the Dockerfile][dockerfile-file]
105 |
106 | ```bash
107 | docker build -t more-complicated-app .
108 | docker run -it -p 8080:8080 --name my-app --rm --init more-complicated-app
109 | ```
110 |
111 | Now try building and running your container. It should work now! Yay!
112 |
113 | > **NOTE:** Make sure you don't bind your app to host `localhost` (like if you put `localhost` instead of `0.0.0.0` in the host in our Fastify app.) This will make it so the app is only available _inside_ the container. If you see `connection reset` instead of when you're expecting a response, this a good candidate for what's happening (because this definitely didn't _just_ happen to me 😂.) You need to have the `host: "0.0.0.0"` in your Node.js app
114 |
115 | [node-file]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2/blob/main/build-a-more-complicated-nodejs-app/index.js
116 | [package-file]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2/blob/main/build-a-more-complicated-nodejs-app/package.json
117 | [dockerfile-file]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2/blob/main/build-a-more-complicated-nodejs-app/Dockerfile
118 | [fastify]: https://fastify.dev/
119 |
--------------------------------------------------------------------------------
/lessons/04-dockerfiles/D-a-note-on-expose.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: >-
3 | Understanding the usage and limitations of the `EXPOSE` instruction in Docker,
4 | its intended purpose to expose container ports to the host machine, and the
5 | optional `-P` flag for mapping ports to random host ports. Considerations
6 | include documentation benefits and deliberate port mapping.
7 | keywords:
8 | - Docker EXPOSE instruction
9 | - Docker port mapping
10 | - Dockerfile port documentation
11 | ---
12 |
13 | This was a point of confusion for me so I'm going to try to clear it up for you. There is an instruction called `EXPOSE ` that its intended use is to expose ports from within the container to the host machine. However if we don't do the `-p 3000:3000` it still isn't exposed so in reality this instruction doesn't do much. You don't need `EXPOSE`.
14 |
15 | There are two caveats to that. The first is that it could be useful documentation to say that "I know this Node.js service listens on port 3000 and now anyone who reads this Docekrfile will know that too." I would challenge this that I don't think the Dockerfile is the best place for that documentation
16 |
17 | The second caveat is that instead of `-p 3000:3000` you can do `-P`. This will take all of the ports you exposed using `EXPOSE` and will map them to random ports on the host. You can see what ports it chose by using `docker ps`. It'll say something like `0.0.0.0:32769->3000/tcp` so you can see in this case it chose `32769`. Again, I'd prefer to be deliberate about which ports are being mapped.
18 |
--------------------------------------------------------------------------------
/lessons/04-dockerfiles/E-layers.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: >-
3 | Learn how Docker optimizes build processes by reusing cached layers and
4 | rebuilding only what has changed, like with the COPY instruction. Discover
5 | strategies to speed up container-building, such as breaking COPY into multiple
6 | instructions for efficient npm ci runs in Node.js applications.
7 | keywords:
8 | - Docker optimization
9 | - container building
10 | - npm ci optimization
11 | - speed up Docker builds
12 | ---
13 |
14 | Go make any change to your Node.js app. Now re-run your build process. Docker is smart enough to see the your `FROM`, `RUN`, and `WORKDIR` instructions haven't changed and wouldn't change if you ran them again so it uses the same layers it cached from the previous but it can see that your `COPY` is different since files changed between last time and this time, so it begins the build process there and re-runs all instructions after that. Pretty smart, right? This is the same mechanism that Docker uses when you pull a new container to download it in pieces. Each one of those corresponds to a layer.
15 |
16 | So which part of container-building takes the longest? `RUN npm ci`. Anything that has to hit the network is going to take the longest without-a-doubt. The shame is that our `package.json` hasn't changed since the previous iteration; we just changed something in our `index.js`. So how we make it so we only re-run our `npm ci` when package.json changes? Break it into two `COPY` instructions!
17 |
18 | ```Dockerfile
19 | FROM node:20
20 |
21 | USER node
22 |
23 | RUN mkdir /home/node/code
24 |
25 | WORKDIR /home/node/code
26 |
27 | COPY --chown=node:node package-lock.json package.json ./
28 |
29 | RUN npm ci
30 |
31 | COPY --chown=node:node . .
32 |
33 | CMD ["node", "index.js"]
34 | ```
35 |
36 | [⛓️ Link to the Dockerfile][dockerfile-file]
37 |
38 | ```bash
39 | docker build -t layers .
40 | docker run -it -p 8080:8080 --name my-app --rm --init layers
41 | ```
42 |
43 | The first `COPY` pulls just the `package.json` and the `package-lock.json` which is just enough to do the `npm ci`. After that we nab the rest of the files. Now if you make changes you avoid doing a full npm install. This is useful and recommended for any dependency installation: apt-get, pip, cargo, gems, etc. as well as any long-running command like building some from source.
44 |
45 | [dockerfile-file]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2/blob/main/layers/Dockerfile
46 |
--------------------------------------------------------------------------------
/lessons/04-dockerfiles/meta.json:
--------------------------------------------------------------------------------
1 | {
2 | "icon": "file-lines"
3 | }
--------------------------------------------------------------------------------
/lessons/05-making-tiny-containers/A-alpine-linux.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: >-
3 | Learn how to make your Node.js app container faster, cheaper, and more secure
4 | by optimizing its size. Reduce vulnerabilities and hosting costs with tips on
5 | minimizing container size using Alpine Linux in Docker.
6 | keywords:
7 | - Node.js
8 | - container optimization
9 | - Alpine Linux
10 | - Docker
11 | - security
12 | - cost-effective hosting
13 | ---
14 |
15 | We've now built a nice little container for our Node.js app and we absolutely could ship it as-is to production. However there's a few things we can do to make things even faster, cheaper, and more secure.
16 |
17 | ## Making your containers smaller
18 |
19 | Making your containers smaller is a good thing for a few reasons. For one, everything tends to get a bit cheaper. Moving containers across the Internet takes time and bits to do. If you can make those containers smaller, things will go faster and you'll require less space on your servers. Often private container registries (like personal Docker Hubs, Azure Container Registry is a good example) charge you by how much storage you're using.
20 |
21 | Beyond that, having less _things_ in your container means you're less susceptible to bugs. Let's say there's a Python exploit that's going around that allows hackers to get root access to your container. If you don't have Python in your container, you're not vulnerable! And obviously if you do have Python installed (even if you're not using it) you're vulnerable. So let's see how to make your container a bit smaller.
22 |
23 | In your previous Dockerfile, change the first line (`FROM`)
24 |
25 | ```dockerfile
26 | FROM node:20-alpine
27 |
28 | USER node
29 |
30 | RUN mkdir /home/node/code
31 |
32 | WORKDIR /home/node/code
33 |
34 | COPY --chown=node:node package-lock.json package.json ./
35 |
36 | RUN npm ci
37 |
38 | COPY --chown=node:node . .
39 |
40 | CMD ["node", "index.js"]
41 | ```
42 |
43 | [⛓️ Link to the Dockerfile][dockerfile-file]
44 |
45 | Our image size (by comparing the `"Size"` field in in `docker inspect my-app`) from 1.1GB to 150MB just like that. We shed quite a bit of cruft that we didn't need in Debian and we didn't even need to change anything in our Dockerfile. Honestly, that's unusual. When you strip _everything_ out typically you'll have to go back and add some of them back in. But in this case we're golden!
46 |
47 | Alpine, if you remember, is a bare bones alternative to Debian. It's built on Busybox Linux which is a 2MB distro of Linux (Alpine is 5MB.) `node:20-alpine` itself is about `133MB` and `node:latest` is about 1.0GB.
48 |
49 | When should you select Alpine? My general feeling (this is a Brian Holt opinion, not a community one so take it with a grain of salt) is that the "end destination" container is where Alpine is most useful. It cuts all cruft out which is super helpful for end-deployment sorts of scenarios due to security and size but it also can be annoying for development scenarios because it lacks just about everything necessary for those, making you have to hand install everything you need. In these "middle scenarios" where it's not really the destination and the container is just another tool in your development system (where that's a multi stage build or a development container) I'll reach for Ubuntu or Debian.
50 |
51 | [dockerfile-file]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2/blob/main/alpine-linux/Dockerfile
52 |
--------------------------------------------------------------------------------
/lessons/05-making-tiny-containers/B-making-our-own-alpine-nodejs-container.md:
--------------------------------------------------------------------------------
1 | ---
2 | title: Making Our Own Alpine Node.js Container
3 | description: >-
4 | Learn how to create a custom Node.js Alpine container by installing system
5 | dependencies and setting up a minimal Linux container with Node.js and npm.
6 | Explore steps to optimize the container size and user setup, mirroring
7 | practices from official containers.
8 | keywords:
9 | - Node.js Alpine container
10 | - Dockerfile tutorial
11 | - system dependencies installation
12 | - Alpine Linux setup
13 | - custom container optimization
14 | ---
15 |
16 | ## Making our own Node.js Alpine container
17 |
18 | Let's take this exercise a bit further. Let's actually make our own Node.js Alpine container. NOTE: I'd suggest **always** using the official one. They'll keep it up to date with security fixes and they're _real_ good at making containers. Better than I am, anyway. But this is a good exercise for us to go through to learn how to install system dependencies.
19 |
20 | Start with this in a new Dockerfile.
21 |
22 | ```dockerfile
23 | FROM alpine:3.19
24 |
25 | RUN apk add --update nodejs npm
26 | ```
27 |
28 | `alpine:latest` would nab you the latest Alpine (3.19 as of writing, if you run into issues with versions, continue with `alpine:3.19` instead of `alpine:latest`. Otherwise feel free to truck on with `alpine:latest`)
29 |
30 | `RUN apk add --update nodejs npm` will use the Alpine package manager to grab Node.js and npm (they're bundled separately for Alpine.)
31 |
32 | ```bash
33 | docker build -t my-node .
34 | ```
35 |
36 | If you encounter error like this
37 |
38 | ```bash
39 | /home/node/code/node_modules/@hapi/hapi/lib/core.js:51
40 | actives = new WeakMap(); // Active requests being processed
41 | ^
42 |
43 | SyntaxError: Unexpected token =
44 | ```
45 |
46 | Try using `nodejs-current` instead of `nodejs`
47 |
48 | ```dockerfile
49 | RUN apk add --update nodejs-current npm
50 | ```
51 |
52 | Okay so now if you do `docker build -t my-node .`. Now try `docker run --rm --name my-app -it my-node`. In here you should have a pretty bare bones Linux container but both `node -v` and `npm -v` should work. I checked and already my container is 72MB.
53 |
54 | Keep in mind that Alpine does not use bash for its shell; it uses a different shell called `ash` or often just `sh`. It's similar enough to bash but there are some differences. It's not really the point of this class so we'll keep the focus on learning just what's necessary.
55 |
56 | Let's next make our `node` user.
57 |
58 | ```dockerfile
59 | FROM alpine:3.19
60 |
61 | RUN apk add --update nodejs npm
62 |
63 | RUN addgroup -S node && adduser -S node -G node
64 |
65 | USER node
66 | ```
67 |
68 | I'm mimicking what the Node.js official container does, which is make a user group of `node` with one user in it, `node`. Feel free to name them different things if you feel so inclined. Notice we could conceivably combine the two `RUN` instructions together but it's generally best practices to keep "ideas" separate. The first `RUN` installs dependencies, the second one creates the `node` user. Up to you how you do it, neither is wrong per se.
69 |
70 | Now we can just copy the rest from the previous Dockerfile! Let's do that.
71 |
72 | ```dockerfile
73 | FROM alpine:3.19
74 |
75 | RUN apk add --update nodejs npm
76 |
77 | RUN addgroup -S node && adduser -S node -G node
78 |
79 | USER node
80 |
81 | RUN mkdir /home/node/code
82 |
83 | WORKDIR /home/node/code
84 |
85 | COPY --chown=node:node package-lock.json package.json ./
86 |
87 | RUN npm ci
88 |
89 | COPY --chown=node:node . .
90 |
91 | CMD ["node", "index.js"]
92 | ```
93 |
94 | [⛓️ Link to the Dockerfile][dockerfile-file]
95 |
96 | It works! We're down to 89MB (compared to 150MB-ish with the official `node:20-alpine` container). Honestly, I'm not entirely sure what we cut out from the other `node:20-alpine` container but it's probably important. Again, I'd stick to the official containers where they exist. But hey, we learned how to add a user and install system dependencies! Let's make it even small because why the hell not.
97 |
98 | [dockerfile-file]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2/blob/main/make-our-own-alpine-nodejs-container/Dockerfile
99 |
--------------------------------------------------------------------------------
/lessons/05-making-tiny-containers/C-multi-stage-builds.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: >-
3 | Learn how to optimize Docker images using multistage builds with Node.js and
4 | Alpine, reducing container size significantly by eliminating unnecessary
5 | dependencies like npm. Follow a tutorial on building a Dockerfile with
6 | multiple stages and leveraging Alpine for smaller, more efficient containers.
7 | keywords:
8 | - Docker multistage build
9 | - Node.js Alpine Docker image
10 | - optimize Docker image size
11 | ---
12 |
13 | Hey, we're already half-way to ridiculous, let's make our image EVEN SMALLER. Technically we only need `npm` to build our app, right? We don't actually need it to run our app. Docker allows you to have what it called multistage builds, we it uses one container to build your app and another to run it. This can be useful if you have big dependencies to build your app but you don't need those dependencies to actually run the app. A C++ or Rust app might be a good example of that: they need big tool chains to compile the apps but the resulting binaries are smaller and don't need those tools to actually run them. Or one perhaps more applicable to you is that you don't need the TypeScript or Sass compiler in production, just the compiled files. We'll actually do that here in a sec, but let's start here with eliminating `npm`.
14 |
15 | Make a new Dockerfile, call it `Dockerfile`.
16 |
17 | ```dockerfile
18 | # build stage
19 | FROM node:20 AS node-builder
20 | RUN mkdir /build
21 | WORKDIR /build
22 | COPY package-lock.json package.json ./
23 | RUN npm ci
24 | COPY . .
25 |
26 | # runtime stage
27 | FROM alpine:3.19
28 | RUN apk add --update nodejs
29 | RUN addgroup -S node && adduser -S node -G node
30 | USER node
31 | RUN mkdir /home/node/code
32 | WORKDIR /home/node/code
33 | COPY --from=node-builder --chown=node:node /build .
34 | CMD ["node", "index.js"]
35 | ```
36 |
37 | Notice we have have two `FROM` instructions. This is how you can tell it's multistage. The last container made will be the final one that gets labeled and shipped. Notice we're starting in the full `node:20` container since we're not going to ship this container so we can use the kitchen sink to build it before it copying it to a smaller container.
38 |
39 | After building everything in the build stage (you can have more than two stages by the way) we move on to the runtime container. In this one we're using Alpine due its size and security benefits. Everything else looks similar to what we were doing before, just now we're going to be copying from the build container instead of the host machine.
40 |
41 | The two real key differences are that we don't `apk add npm` and we're doing `COPY --from=my-node` which means we're copying from the first stage. We do `FROM node:20 AS node-builder` so we can refer to node-builder by name which simplifies reading the Dockerfile.
42 |
43 | As you may imagine, this means you can copy from any previous stage or if you leave `--from` off it'll come from the host machine.
44 |
45 | So try it now!
46 |
47 | ```bash
48 | docker build -t my-multi .
49 | docker run -it -p 8080:8080 --name my-app --rm --init my-multi
50 | ```
51 |
52 | Still works! And our container size is down to a cool 72MB as compared to 89MB when we included npm, 150MB when we used `node:20-alpine` and 1.1GB when we used `node:20`.
53 |
54 | Pretty amazing, right? Honestly, how worth is it doing micro optimization like this? Not very. We had to do a decent amount to shave 50% off the final size and now we're stuck maintaining it. I'd rather just start with `FROM node:20-alpine` and call it a day. We get all their wisdom for free and we're not stuck with a longer Dockerfile than we need. But it is definitely worth going from 1.1GB to 150MB!
55 |
56 | ## A note on container sizes
57 |
58 | A last note here: file size isn't everything. It's at best weakly correlated with security, it's just a fun metric to measure. In theory you'll save some money on bandwidth but I have to guess you'll spend more engineering salaries making containers tiny than you'll save on bandwidth. I'd much rather have `node:20` and have it be maintained by security professionals than trying to do it myself. Just keep that in mind: it can be a fool's errand to chase shaving bytes off your containers.
59 |
--------------------------------------------------------------------------------
/lessons/05-making-tiny-containers/D-distroless.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: >-
3 | Learn about the differences between Alpine and Distroless for Docker
4 | containers, focusing on edge cases with Alpine and the stripped-down nature of
5 | Distroless. Explore alternative options like Wolfi, Red Hat's Universal Base
6 | Image Micro, and Google's Distroless projects, emphasizing security and
7 | minimalism.
8 | keywords:
9 | - Alpine
10 | - Distroless
11 | - Docker containers
12 | - security
13 | - minimalism
14 | ---
15 |
16 | You may not want to use Alpine. [This blog post goes into depth][blog] but let me sum it up with two points:
17 |
18 | 1. Alpine made some design choices that have some extremely rare edge cases that can cause failures and be _very_ hard to diagnose. This arises from their choice of replacing the typical `glibc` with `musl`. Read the blog post if you want to know more. Suffice to say, unless you're running Kubernetes at a large scale this shouldn't concern you; lots of people run Alpine and never see issues.
19 | 1. Now Alpine isn't the only option!
20 |
21 | The four projects to look to here, [Wolfi (an open source project)][wolfi], [Red Hat's Universal Base Image Micro][ubi], [Debian's slim variant][slim], and [Google's Distroless][distroless].
22 |
23 | You would be set with any of these. We are going to focus on Distroless because it is currently the most popular but feel free to experiment!
24 |
25 | "Distroless" is a bit of a lie as it still based on Debian, but to their point, they've stripped away essentially everything except what is 100% necessary to run your containers. This means you need to install _everything_ you need to get running. It means no package manager. It means it is truly as barebones as it can get.
26 |
27 | Let's build a Node.js distroless image.
28 |
29 | ```dockerfile
30 | # build stage
31 | FROM node:20 AS node-builder
32 | WORKDIR /build
33 | COPY package-lock.json package.json ./
34 | RUN npm ci
35 | COPY . .
36 |
37 | # runtime stage
38 | FROM gcr.io/distroless/nodejs20
39 | COPY --from=node-builder --chown=node:node /build /app
40 | WORKDIR /app
41 | CMD ["index.js"]
42 | ```
43 |
44 | [⛓️ Link to the Dockerfile][dockerfile-file]
45 |
46 | ```bash
47 | docker build -t my-distroless .
48 | docker run -it -p 8080:8080 --name my-app --rm --init my-distroless
49 | ```
50 |
51 | The size (according to my computer) was about 175MB, so not necessarily any smaller than Alpine, but it is indeed using a Debian-derivative Linux instead of Alpine which does exclude a class of rare-but-possible bugs! These days I tend to use Distroless images but honestly I'm fine with anything you choose here. Probably by the time you _need_ something other than an Alpine image you will have 100x surpassed my knowledge and skills with containers or have a whole dev ops org to attend to these nuances.
52 |
53 | One note with the Dockerfile: notice we _just_ give it the Node.js file and _not_ the Node.js command. The Distroless container locks it down so it can only run Node.js apps and cannot be given any other command. Just another way they are hyper-focused for security in production.
54 |
55 | [blog]: https://martinheinz.dev/blog/92
56 | [wolfi]: https://wolfi.dev
57 | [distroless]: https://github.com/GoogleContainerTools/distroless
58 | [ubi]: https://catalog.redhat.com/software/base-images
59 | [node-file]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2/blob/main/distroless/index.js
60 | [dockerfile-file]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2/blob/main/distroless/Dockerfile
61 | [slim]: https://hub.docker.com/_/debian
62 |
--------------------------------------------------------------------------------
/lessons/05-making-tiny-containers/E-static-asset-project.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: >-
3 | Learn how to build a front end website using Astro, React, TypeScript, and
4 | Tailwind with step-by-step instructions. Create a multi-stage Dockerfile to
5 | build and serve the project with NGINX, simplifying static file serving for
6 | your users.
7 | keywords:
8 | - Astro
9 | - React
10 | - TypeScript
11 | - Tailwind
12 | - Dockerfile
13 | - NGINX
14 | - static assets
15 | ---
16 |
17 | We're going to do a project now! Feel free to attempt the project first and then follow along with me as I code the answer.
18 |
19 | We're going to construct a very basic front end website with Astro, React, TypeScript, and Tailwind. Why these? Because I want it to have a lot of dependencies and a big build step. This class isn't about any of these things but if you want to take a class on React, my [intro][intro] and [intermediate][intermediate] classes are available on Frontend Masters.
20 |
21 | You have two choices here: you can either create your own Astro project with `npx create-astro@latest` or you can just use my copy of it. I added Tailwind and React to mine but you don't necessarily need to as it doesn't really affect building the project.
22 |
23 | Also feel free to use your own static asset project or favorite static assets framework. As long as `npm run build` works and you make sure to get the path right for where the assets are to be served from, it doesn't matter.
24 |
25 | [⛓️ Link to the Project][project]
26 |
27 | > Do note I have the complete Dockerfile in there under `solution.Dockerfile`. Only glance at it once you've tried to build it yourself.
28 |
29 | You should have your project ready to go now.
30 |
31 | To make sure this works right now, run `npm run dev` in your console and make sure the app starts okay. You should see a splash screen. Once you're ready to build it, run `npm run build` to have it build for production.
32 |
33 | The project is to make a multi-stage Dockerfile that build the project in one container and then serves it from a different container using NGINX. If you're not familiar with NGINX, fear not! It is a static file server, which is to say it take takes HTML, CSS, JS, images, fonts, etc. and serves them to your users. It handles all the serving and file headers for you. Using it can be accomplished in few steps. You'll use the `nginx:latest` (or `nginx:alpine`! up to you) container and copy **just the newly built files, not everything** (which is in the `dist` directory inside of the Astro app) to `/usr/share/nginx/html` and the `nginx` will take care of the rest. The `nginx` container defines `CMD` in it and if you don't override it, it starts NGINX for you. Give it a shot! Once you've tried, come back here and we'll do the solution together.
34 |
35 | > NGINX runs on port 80 by default, so you probably want to route that something like 8080 on your host machine (otherwise you have to run it as root which no one wants to do.) In other words, use `-p 8080:80` when you start Docker.
36 |
37 | Scroll down to see my answer.
38 |
39 |
40 |
41 | Done? If you gave it a shot, your Dockerfile probably shouldn't very long. Let's see what I came up with
42 |
43 | ```Dockerfile
44 | FROM node:20 AS node-builder
45 | WORKDIR /app
46 | COPY . .
47 | RUN npm ci
48 | RUN npm run build
49 |
50 | # you could totally use nginx:alpine here too
51 | FROM nginx:latest
52 | COPY --from=node-builder /app/dist /usr/share/nginx/html
53 | ```
54 |
55 | Now if you run this, it should work:
56 |
57 | ```bash
58 | docker build -t my-static-app .
59 | docker run -it -p 8080:80 --name my-app --rm --init my-static-app
60 | ```
61 |
62 | It should be working now! Hooray! Hopefully you're starting to see the power of what Docker can unlock for you.
63 |
64 | [intro]: https://frontendmasters.com/courses/complete-react-v8/
65 | [intermediate]: https://frontendmasters.com/courses/intermediate-react-v5/
66 | [project]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2/blob/main/static-asset-project
67 |
--------------------------------------------------------------------------------
/lessons/05-making-tiny-containers/meta.json:
--------------------------------------------------------------------------------
1 | {
2 | "icon": "minimize"
3 | }
--------------------------------------------------------------------------------
/lessons/06-docker-features/A-bind-mounts.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: >-
3 | Learn about using volumes and bind mounts in Docker containers to handle
4 | stateful operations, along with the differences between them. Bind mounts
5 | enable flexible file access between the host computer and container, offering
6 | a practical solution for testing or development scenarios.
7 | keywords:
8 | - Docker
9 | - volumes
10 | - bind mounts
11 | - stateful containers
12 | - containerization
13 | - NGINX
14 | - Dockerfile
15 | ---
16 |
17 | So far we've been dealing with self-contained containers. Normally this is all you ever want: containers that can spin up and spin down as frequently as they need to. They're ephemeral, temporary, and disposable. None of these containers are "snowflakes". When I say snowflakes, picture you're running a server that's serving a Wordpress site. Imagine setting up this server, SSH'ing into the server, and setting everything up to be just right and tuned to the exact way you need it. This would be a snowflake server: if someone goes and deletes this server, you're screwed. You have to go and spend a bunch of time re-setting up this server. This is exactly the sort of thing we're trying to avoid with containers. These are the "pet" containers we talked about earlier. We want to make our servers easy to reproduce whenever we want so we can spin up and spin down servers at will. These are the "cattle" containers we talked about.
18 |
19 | However not everything can fit neatly into a container all the time. Sometimes our containers need to be stateful in some capacity. Sometimes our containers need to read and write to the host. This is fundamentally at odds with the idea of a stateless, able-to-create-and-destroy-anytime container that we've been adhering to thusfar. So what are we to do?
20 |
21 | Enter volumes and bind mounts. Both of these are methods of reading and writing to the host but with slight-but-important differences of when to use which. We'll go over both.
22 |
23 | ## Bind Mounts
24 |
25 | Let's start here because this is easier to see the use case for. Bind mounts allow you to mount files from your host computer into your container. This allows you to use the containers a much more flexible way than previously possible: you don't have to know what files the container will have _when you build it_ and it allows you to determine those files _when you run it_.
26 |
27 | Let's go over an example of how this could be useful.
28 |
29 | In the previous project, we used the NGINX image to build a container with our static assets baked into the container. In general this what I recommend you do since now we can ship that container anywhere and it'll just work. It's totally self-contained. But what if we just want to run a NGINX container locally to test stuff out? Sure, we could make a new Dockerfile and write it, but wouldn't it be cool if we could just use the NGINX container directly? We can! Let's try it. Go back to your static site project from the previous lesson. Let's use the `nginx` container to serve directly from it.
30 |
31 | ```bash
32 | # from the root directory of your Astro app
33 | docker run --mount type=bind,source="$(pwd)"/dist,target=/usr/share/nginx/html -p 8080:80 nginx:latest
34 | ```
35 |
36 | [⛓️ Link to the Project][project] (run the docker command from this directory)
37 |
38 | > Make sure you have built the project. If you did the previous lesson you should have this already built. If you don't see the `dist/` directory, run `npm install && npm run build`. You should see the `dist` directory after that.
39 |
40 | This is how you do bind mounts. It's a bit verbose but necessary. Let's dissect it.
41 |
42 | - We use the `--mount` flag to identify we're going to be mounting something in from the host.
43 | - As far as I know the only two types are `bind` and `volume`. Here we're using bind because we to mount in some piece of already existing data from the host.
44 | - In the source, we identify what part of the host we want to make readable-and-writable to the container. It has to be an absolute path (e.g we can't say `"./dist"`) which is why use the `"$(pwd)"` to get the **p**resent **w**orking **d**irectory to make it an absolute path.
45 | - The target is where we want those files to be mounted in the container. Here we're putting it in the spot that NGINX is expecting.
46 | - As a side note, you can mount as many mounts as you care to, and you mix bind and volume mounts. NGINX has a default config that we're using but if we used another bind mount to mount an NGINX config to `/etc/nginx/nginx.conf` it would use that instead.
47 |
48 | Again, it's preferable to bake your own container so you don't have to ship the container and the code separately; you'd rather just ship one thing that you can run without much ritual nor ceremony. But this is a useful trick to have in your pocket. It's kind of like [serve][serve] but with real NGINX.
49 |
50 | [storage]: https://docs.docker.com/storage/
51 | [project]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2/blob/main/static-asset-project
52 | [serve]: https://github.com/vercel/serve
53 |
--------------------------------------------------------------------------------
/lessons/06-docker-features/B-volumes.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: >-
3 | Learn about the differences between bind mounts and volumes in Docker, how to
4 | persist data using volumes for containers, and create a Node.js app with
5 | Docker volumes. Understand the benefits of using volumes over bind mounts in
6 | Docker for data persistence and manageability.
7 | keywords:
8 | - Docker bind mounts vs volumes
9 | - persist data in Docker containers
10 | - create Node.js app with Docker volumes
11 | ---
12 |
13 | Bind mounts are great for when you need to share data between your host and your container as we just learned. Volumes, on the other hand, are so that your containers can maintain state between runs. So if you have a container that runs and the next time it runs it needs the results from the previous time it ran, volumes are going to be helpful. Volumes can not only be shared by the same container-type between runs but also between different containers. Maybe if you have two containers and you want to log to consolidate your logs to one place, volumes could help with that.
14 |
15 | They key here is this: bind mounts are file systems managed the host. They're just normal files in your host being mounted into a container. Volumes are different because they're a new file system that Docker manages that are mounted into your container. These Docker-managed file systems are not visible to the host system (they can be found but it's designed not to be.)
16 |
17 | Let's make a quick Node.js app that reads from a file that a number in it, prints it, writes it to a volume, and finishes. Create a new Node.js project.
18 |
19 | ```bash
20 | mkdir docker-volume
21 | cd docker-volume
22 | touch index.js Dockerfile
23 | ```
24 |
25 | Inside that index.js file, put this:
26 |
27 | ```javascript
28 | const fs = require("fs").promises;
29 | const path = require("path");
30 |
31 | const dataPath = path.join(process.env.DATA_PATH || "./data.txt");
32 |
33 | fs.readFile(dataPath)
34 | .then((buffer) => {
35 | const data = buffer.toString();
36 | console.log(data);
37 | writeTo(+data + 1);
38 | })
39 | .catch((e) => {
40 | console.log("file not found, writing '0' to a new file");
41 | writeTo(0);
42 | });
43 |
44 | const writeTo = (data) => {
45 | fs.writeFile(dataPath, data.toString()).catch(console.error);
46 | };
47 | ```
48 |
49 | Don't worry too much about the index.js. It looks for a file `$DATA_PATH` if it exists or `./data.txt` if it doesn't and if it exists, it reads it, logs it, and writes back to the data file after incrementing the number. If it just run it right now, it'll create a `data.txt` file with 0 in it. If you run it again, it'll have `1` in there and so on. So let's make this work with volumes.
50 |
51 | ```dockerfile
52 | FROM node:20-alpine
53 | COPY --chown=node:node . /src
54 | WORKDIR /src
55 | CMD ["node", "index.js"]
56 | ```
57 |
58 | Now run
59 |
60 | ```bash
61 | docker build -t incrementor .
62 | docker run --rm incrementor
63 | ```
64 |
65 | Every time you run this it'll be the same thing. This is nothing is persisted once the container finishes. We need something that can live between runs. We could use bind mounts and it would work but this data is only designed to be used and written to within Docker which makes volumes preferable and recommended by Docker. If you use volumes, Docker can handle back ups, clean ups, and more security for you. If you use bind mounts, you're on your own.
66 |
67 | So, without having to rebuild your container, try this
68 |
69 | ```bash
70 | docker run --rm --env DATA_PATH=/data/num.txt --mount type=volume,src=incrementor-data,target=/data incrementor
71 | ```
72 |
73 | Now you should be to run it multiple times and everything should work! We use the `--env` flag to set the DATA_PATH to be where we want `index.js` to write the file and we use `--mount` to mount a named volume called `incrementor-data`. You can leave this out and it'll be an anonymous volume that will persist beyond the container but it won't automatically choose the right one on future runs. Awesome!
74 |
75 | ## named pipes, tmpfs, and wrap up
76 |
77 | Prefer to use volumes when you can, use bind mounts where it makes sense. If you're still unclear, the [official Docker storage](https://docs.docker.com/engine/storage/) docs are pretty good on the subject.
78 |
79 | There are two more that we didn't talk about, `tmpfs` and `npipe`. The former is Linux only and the latter is Windows only (we're not going over Windows containers at all in this workshop.) `tmpfs` imitates a file system but actually keeps everything in memory. This is useful for mounting in secrets like database keys or anything that wouldn't be persisted between container launches but you don't want to add to the Dockerfile. The latter is useful for mounting third party tools for Windows containers. If you need more info than that, refer to the docs. I've never directly used either.
80 |
--------------------------------------------------------------------------------
/lessons/06-docker-features/C-dev-containers.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: >-
3 | Learn how to use containers to streamline setting up development environments,
4 | illustrated with a Ruby on Rails example. Explore the benefits of using
5 | Dockerfiles and bind mounts for efficient app development. Discover tools like
6 | DevContainer CLI, Visual Studio, IntelliJ, and GitHub Codespaces that support
7 | dev containers.
8 | keywords:
9 | - containers
10 | - development environments
11 | - Dockerfiles
12 | - bind mounts
13 | - DevContainer CLI
14 | - Visual Studio
15 | - GitHub Codespaces
16 | ---
17 |
18 | So far we've talking about taking an app and using containers to prepare the apps to run. This is an obvious use case for them and one you're going to use a lot. But let's talk about a different use case for them: building development environments for your apps.
19 |
20 | Let's paint a picture. Let's say you got a new job with a company and they're a Ruby shop (if you know Ruby, pretend you don't for a sec.) When you arrive, you're going to be given a very long, likely-out-of-date, complicated README that you're going to have to go look for and struggle to set up the proper version of Ruby, the correct dependencies installed, and that Mercury is in retrograde (just kidding.) Suffice to say, it's a not-fun struggle to get new apps working locally, particularly if it's in a stack that you're not familiar with. Shouldn't there be a better way? There is! (I feel like I'm selling knives on an informercial.)
21 |
22 | Containers! What we can do is define a Dockerfile that sets up all our dependencies so that it's 100% re-createable with zero knowledge of how it works to everyone that approaches it. With bind mounts, we can mount our local code into the container so that we can edit locally and have it propagate into the development container. Let's give it a shot!
23 |
24 | ## Ruby on Rails
25 |
26 | I am not a Rails developer but I will confess I have always had an admiration for talented Rails developers. On one hand, I really don't like all the black magic that Rails entails. I feel like you whisper an arcane incantation into the CLI and on the other side a new website manifests itself from the ether. On the other hand, a really good Rails dev can make stuff so much faster than me because they can wield that sorcery so well.
27 |
28 | So let's say we got added to a new Rails project and had to go set it up. Open this project in VS Code.
29 |
30 | [⛓️ Link to the project][project]
31 |
32 | If you do this in VS Code, it should show you a prompt in the bottom to reopen in a dev container. Say yes.
33 |
34 | 
35 |
36 | If you miss the notification or want to do it later, you can either do in the [Command Palette][command] with the command "Dev Containers: Open Workspace in Container" or with the `><` UI element in the bottom left of VS Code and clicking "Reopen in Container".
37 |
38 | 
39 |
40 | This should build the container, setup all the Ruby dependencies and put you in a container. From here, you can open the terminal and see that you're now inside a Linux container. Run `rails server` and it will open the container and automatically forward the port for you to open `localhost:3000` in your own browser. There you go! Rails running without very little thought about it on our part. This is even running SQLite for us. You can make pretty complicated dev environments (using Docker Compose, we'll talk about that later), this was just a simple example.
41 |
42 | Personally, this took a good 30 mins of messing around just to get set up, but with a dev container it was just instant, and that's kind of the magic: it's a ready-made dev environment to go.
43 |
44 | > Just to be super clear, you dev containers and production containers will be different. You wouldn't want to ship your dev environment to production. So in these cases your project may have multiple Dockerfiles doing different things.
45 |
46 | ## Dev Containers Outside of VS Code
47 |
48 | While dev containers is a decidedly Microsoft / GitHub initative to start up, they have opened it into an open standard and other companies can now use dev containers. Here's a few other tools that work with dev containers.
49 |
50 | - [DevContainer CLI][cli] – Run dev containers from the CLI directly and then you can just use them without any IDE needed to manage it. Maintained by Microsoft and GitHub
51 | - [Visual Studio][vs]
52 | - [JetBrain IntelliJ][jetbrains]
53 | - [GitHub Codespaces][gh] – Any time you open a project with a dev container in it in Codespaces, Codespaces will automatically use that dev container for you.
54 |
55 | [project]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2/blob/main/dev-containers
56 | [command]: https://code.visualstudio.com/docs/getstarted/userinterface#_command-palette
57 | [cli]: https://github.com/devcontainers/cli
58 | [vs]: https://devblogs.microsoft.com/cppblog/dev-containers-for-c-in-visual-studio/
59 | [jetbrains]: https://blog.jetbrains.com/idea/2023/06/intellij-idea-2023-2-eap-6/#SupportforDevContainers
60 | [gh]: https://docs.github.com/en/codespaces/overview
61 |
--------------------------------------------------------------------------------
/lessons/06-docker-features/D-networking-with-docker.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: >-
3 | Learn how to perform manual networking with Docker, understanding Docker
4 | Compose and Kubernetes roles. Explore connecting a Node.js app to a MongoDB
5 | database using Docker networks. Discover basic networking concepts within
6 | Docker, such as bridge networks, and connect containers through custom
7 | networks.
8 | keywords:
9 | - Docker networking
10 | - manual networking
11 | - Node.js app
12 | - MongoDB database
13 | - bridge networks
14 | - connecting containers
15 | - basic Docker networking
16 | ---
17 |
18 | This is not going to be a deep dive into how networking works. Networking is a deep, deep pool of knowledge and merits entire courses to understand. Just worrying about networking is some people's jobs due to the immense surface area of the subject. Instead, I want to just peek under the covers of how to do manual networking with Docker so you can understand what Docker Compose and Kubernetes do for you.
19 |
20 | So why do we care about networking? Many reasons! Let's make our Node.js app a bit more complicated. What if it had a database? Let's connect it to a running MongoDB database. We _could_ start this MongoDB database inside of the same container and this might be fine for development on the smallest app but it'd be better and easier if we could just use the [mongo][mongo] container directly. But if I have two containers running at the same time (the app containers and the MongoDB container) how do they talk to each other? Networking!
21 |
22 | There are several ways of doing networking within Docker and all of them work differently depending which operating system you're on. Again, this is a deep subject and we're just going to skim the surface. We're going to deal with the simplest, the bridge networks. There is a default bridge network running all the time. If you want to check this out, run `docker network ls`. You'll see something like this:
23 |
24 | ```bash
25 | $ docker network ls
26 | NETWORK ID NAME DRIVER SCOPE
27 | xxxxxxxxxxxx bridge bridge local
28 | xxxxxxxxxxxx host host local
29 | xxxxxxxxxxxx none null local
30 | ```
31 |
32 | The bridge network is the one that exists all the time and we could attach to it if we want to, but again Docker recommends against it so we'll create our own. There's also the host network which is the host computer itself's network. The last network with the `null` driver is one that you'd use if you wanted to use some other provider or if you wanted to do it manually yourself.
33 |
34 | ```bash
35 | # create the network
36 | docker network create --driver=bridge app-net
37 |
38 | # start the mongodb server
39 | docker run -d --network=app-net -p 27017:27017 --name=db --rm mongo:7
40 | ```
41 |
42 | I'm having you run a specific version of MongoDB, v7, because I know the package to interact with it is already available on Ubuntu. Feel free to use v8+ if you know it's available. We also added a few flags. The `--name` flag allows us to refer specifically to that one running container, and even better it allows us to use that as its address on the network. We'll see that in a sec. The one other, since we're using `--name` is `--rm`. If we didn't use that, we'd have to run `docker rm db` before restarting our `db` container since when it stops a container, it doesn't delete it and its logs and meta data until you tell it to. The `--rm` means toss all that stuff as soon as the container finishes and free up that name again.
43 |
44 | Now, for fun we can use _another_ MongoDB containter (because it has the `mongosh` client on it in addition to have the MongoDB server).
45 |
46 | ```bash
47 | docker run -it --network=app-net --rm mongo:7 mongosh --host db
48 | ```
49 |
50 | This will be one instance of a MongoDB container connecting to a different container over our Docker network. Cool, right? So let's make our Node.js app read and write to MongoDB!
51 |
52 | ## Connecting our Node.js App to MongoDB
53 |
54 | This isn't a course in MongoDB or anything but more just to show you how to connect one app container to a database container as well as set you up for the next lesson Docker composes. And this sort of method work just as well for any DB: MySQL, Postgres, Redis, etc.
55 |
56 | So first thing, let's add some logic to our app that reads and writes to MongoDB
57 |
58 | ```javascript
59 | const fastify = require("fastify")({ logger: true });
60 | const { MongoClient } = require("mongodb");
61 | const url = process.env.MONGO_CONNECTION_STRING || "mongodb://localhost:27017";
62 | const dbName = "dockerApp";
63 | const collectionName = "count";
64 |
65 | async function start() {
66 | const client = await MongoClient.connect(url);
67 | const db = client.db(dbName);
68 | const collection = db.collection(collectionName);
69 |
70 | fastify.get("/", async function handler(request, reply) {
71 | const count = await collection.countDocuments();
72 | return { success: true, count };
73 | });
74 |
75 | fastify.get("/add", async function handler(request, reply) {
76 | const res = await collection.insertOne({});
77 | return { acknowledged: res.acknowledged };
78 | });
79 |
80 | fastify.listen({ port: 8080, host: "0.0.0.0" }, (err) => {
81 | if (err) {
82 | fastify.log.error(err);
83 | process.exit(1);
84 | }
85 | });
86 | }
87 |
88 | start().catch((err) => {
89 | console.log(err);
90 | process.exit(1);
91 | });
92 | ```
93 |
94 | [⛓️ Link to the project files][project]
95 |
96 | > Open the project files to this above directory so you can get the right dependencies and Dockerfile. Make sure you run `npm install`.
97 |
98 | This is pretty similar to the project we ran before in the Layers section. We're just reading and writing to MongoDB now in the Node.js server, but we're using otherwise everything else the same including the same Dockerfile.
99 |
100 | You could absolutely run this locally if you have MongoDB running on your host machine since the default connection string will connect to a local MonogDB. But we also left it open so we can feed the app an environmental variable so we can modify it to be a different container.
101 |
102 | So build the container and run it using the following commands:
103 |
104 | ```bash
105 | docker build --tag=my-app-with-mongo .
106 | docker run -p 8080:8080 --network=app-net --init --env MONGO_CONNECTION_STRING=mongodb://db:27017 my-app-with-mongo
107 | ```
108 |
109 | Okay so we added a new endpoint and modified one. The first one is `/add` which will add an empty object (MongoDB will add an `_id` to it so it's not totally empty). It will then return how many items it successfully added to MongoDB (hopefully 1!). And then we modified the `/` route to return the count of items in the database. Great! This is how the basics of networking work in Docker.
110 |
111 | One key thing here that we need to discuss: if you shut down that one Docker container, where is your data going to go? Well, it'll disappear. How do you mitigate this? Usually with some sort of volume that lives beyond the container, and usually by having more than one container of MongoDB running. It's beyond the scope of this course but you already have the tools you need to be able to do that.
112 |
113 | Congrats! You've done basic networking in Docker. Now let's go use other tools to make this easier for us.
114 |
115 | [mongo]: https://hub.docker.com/_/mongo
116 | [project]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2/blob/main/networking-with-docker
117 |
--------------------------------------------------------------------------------
/lessons/06-docker-features/meta.json:
--------------------------------------------------------------------------------
1 | {
2 | "icon": "box"
3 | }
--------------------------------------------------------------------------------
/lessons/07-multi-container-projects/A-docker-compose.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: >-
3 | Learn how to use Docker Compose to coordinate multiple containers for
4 | development environments efficiently. Simplify defining relationships between
5 | containers using a YAML file, making it easy to manage complex setups with one
6 | command. Explore CI/CD integration possibilities and enhance development
7 | productivity by leveraging Docker Compose features.
8 | keywords:
9 | - Docker Compose
10 | - multiple containers
11 | - development environments
12 | - CI/CD scenarios
13 | - YAML file
14 | - containers relationship
15 | - productivity
16 | ---
17 |
18 | This may be one of the most useful features you learn about Docker. We've been mixing various different facets of deploying your app to production and creating development environments. This feature in particular is geared much more for development environments. Many times when you're developing containers you're not in just a single container environment (though that does happen too.) When this happens, you need to coordinate multiple containers when you're doing local dev and you've seen in the previous chapter, networking, that it's possible if a bit annoying.
19 |
20 | With Docker Compose we simplify this a lot. Docker Compose allows us the ability to coordinate multiple containers and do so with one YAML file. This is great if you're developing a Node.js app and it requires a database, caching, or even if you have two+ separate apps in two+ separate containers that depend on each other or all the above! Docker Compose makes it really simple to define the relationship between these containers and get them all running with one `docker compose up`.
21 |
22 | > If you see any commands out there with `docker-compose` (key being the `-` in there) it's from Docker Compose v1 which is not supported anymore. We are using Docker Compose v2 here. For our purposes there isn't much difference.
23 |
24 | Do note that Docker does say that Docker Compose is suitable for production environments if you have a single instance running multiple containers. This is atypical for the most part: if you have multiple containers, typically you want the ability to have many instances.
25 |
26 | In addition to working very well dev, Docker Compose is very useful in CI/CD scenarios when you want GitHub Actions or some CI/CD provider to spin up multiple environments to quickly run some tests.
27 |
28 | Okay so let's get our previous app working: the one with a MongoDB database being connected to by a Node.js app. Create a new file in the root directory of your project called `docker-compose.yml` and put this in there:
29 |
30 | ```yml
31 | services:
32 | api:
33 | build: api
34 | ports:
35 | - "8080:8080"
36 | links:
37 | - db
38 | environment:
39 | MONGO_CONNECTION_STRING: mongodb://db:27017
40 | db:
41 | image: mongo:7
42 | web:
43 | build: web
44 | ports:
45 | - "8081:80"
46 | ```
47 |
48 | This should feel familiar even if it's new to you. This is basically all of the CLI configurations we were giving to the two containers but captured in a YAML file.
49 |
50 | In `service` we define the containers we need for this particular app. We have two: the `web` container (which is our app) and the `db` container which is MongoDB. We then identify where the Dockerfile is with `build`, which ports to expose in `ports`, and the `environment` variables using that field.
51 |
52 | The one interesting one here is the `links` field. In this one we're saying that the `api` container needs to be connected to the `db` container. This means Docker will start this container first and then network it to the `api` container. This works the same way as what we were doing in the previous lesson.
53 |
54 | The `db` container is pretty simple: it's just the `mongo` container from Docker Hub. This is actually smart enough to expose 27017 as the port and to make a volume to keep the data around between restarts so we don't actually have to do anything for that. If you needed any other containers, you'd just put them here in services.
55 |
56 | We then have a frontend React.js app that is being built by Parcel.js and served by NGINX.
57 |
58 | There's a lot more to compose files than what I've shown you here but I'll let you explore that on your own time. [Click here][compose] to see the docs to see what else is possible.
59 |
60 | This will start and work now, just run `docker compose up` and it'll get going. I just want to do one thing: let's make our app even more productive to develop on. Go to your Dockerfile for the app make it read a such:
61 |
62 | > If you change something and want to make sure it builds, make sure to run `docker compose up --build`. Docker Compose isn't watching for changes when you run up.
63 |
64 | ```dockerfile
65 | FROM node:latest
66 |
67 | RUN npm i -g nodemon
68 |
69 | USER node
70 |
71 | RUN mkdir /home/node/code
72 |
73 | WORKDIR /home/node/code
74 |
75 | COPY --chown=node:node package-lock.json package.json ./
76 |
77 | RUN npm ci
78 |
79 | COPY --chown=node:node . .
80 |
81 | CMD ["nodemon", "index.js"]
82 | ```
83 |
84 | Now we can write our code and every time it save it'll restart the server from within the container. This will make this super productive to work with!
85 |
86 | While we're about to get to Kubernetes which will handle bigger deployment scenarios than Docker Compose can, you can use `docker-compose up --scale web=10` to scale up your web container to 10 concurrently running containers. This won't work at the moment because they're all trying to listen on the host on port 3000 but we could use something like NGINX or HAProxy to loadbalance amongst the containers. It's a bit more advance use case and less useful for Compose since at that point you should probably just use Kubernetes or something similar. We'll approach it in the Kubernetes chapter.
87 |
88 | [compose]: https://docs.docker.com/compose/compose-file/#compose-file-structure-and-examples
89 |
--------------------------------------------------------------------------------
/lessons/07-multi-container-projects/B-kubernetes.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: >-
3 | Learn about containers and Kubernetes, where containers are the building
4 | blocks and Kubernetes orchestrates them. Understand fundamental concepts like
5 | control planes, nodes, pods, services, and deployments. Explore how to set up
6 | Kubernetes using tools like kubectl, minikube, and Docker Desktop for local
7 | development.
8 | keywords:
9 | - containers
10 | - Kubernetes
11 | - orchestration
12 | - control plane
13 | - nodes
14 | - pods
15 | - services
16 | ---
17 |
18 | I like to tell people that containers are the "simple" (simple is a relative term here) part and Kubernetes is the "hard" (hard isn't relative; Kubernetes is really hard) part. So if this feels hard, it's because it is.
19 |
20 | NOTE: Because Kubernetes is long, it's often abbreviates at k8s (k then eight letters then s.)
21 |
22 | So let's talk about use cases. Containers by themselves are useful for many, many use cases like production apps, machine learning, setting up environments, developer environments, and one-off experimentations. Kubernetes builds on containers (read: you need to know containers to use Kubernetes.) Kubernetes is a container orchestration tool. It allows you to manage large, complicated clusters of containers to multiple different hosts. It's a complicated tool that solves complicated problems. As such, we are going to do a hello world so you can understand what it is, what it can do, and then leave you to explore more on your own.
23 |
24 | So let's go over a few fundamental concepts here.
25 |
26 | - The **control plane** is a server that coordinates everything else. This is the brain on of your cluster. Some cloud providers actually won't charge you to run the control plane. You will see this referred to sometimes at the "master node" but it has since been renamed.
27 | - **Nodes** (not to be confused with Node.js) are the worker servers that are actually going to be running your containers. One node can one or multiple containers. If you're running machine learning and you need big, beefy servers to churn through the learning, your node may only run one container. If you're running a Node.js server like we are, you'll have many containers on one node.
28 | - Technically, a Node is just a deploy target. It could itself be a VM or a container, or as we said it could be a metal-and-silicon server. It's not really important. Just think of it as a destination for containers.
29 | - A **pod** is bascially an atom to a cluster: it's a thing that can't be divided and thus needs to be deployed together. Imagine if you had several types of containers that all worked together as one unit and wouldn't work without each other. In this case, you'd put those into a pod. In many cases and what we're going to do today is do one-container-one-pod. Our app stands alone and thus can be deployed independently. We'll keep the MongoDB pod and app pod separate because they can scale individually.
30 | - A **service** is a group of pods that make up one backend (services can be other things but bear with me for a second), so to speak. Think one service is a group of microservices. Pods are scaling up and down all the time and thus it's unreliable to rely on a single pod's IP. So if I tell the User service to rely on this specific IP for the Admin service, that IP might disappear as that pod is scalled up and down. Enter services. This is a reliable entry point so that these services can talk to each other independent of the relative scale of each other. Like you can have one-container-one-pod, you can have one-pod-one-service as well which means you can have one-container-one-pod-one-service. Services can be more than a backend, they can machine learning nodes, database, caches, etc.
31 | - A **deployment** is where you describe what you want the state of your pods to be and then Kubernetes works to get your cluster into that state.
32 |
33 | Here's the sad part: doing this in the Windows subsystem for Linux is tough. If you're following along in Windows, I'd say just grab a coffee and watch how this works. It's not important that you actually do this. If you're comfortable in PowerShell, it works well from there or if you can connect to a true Linux VM, it'll work well from there too. Otherwise, just relax while I do this from macOS.
34 |
35 | So you're going to need at least one new CLI: `kubectl`. `kubectl` ([see here for how to install][kubectl]) is the tool that allows you to control _any_ Kubernetes cluster, be it local or in the cloud. It's the single unified CLI for managing Kubernetes. I definitely pronounce this as "cube cuddle" because it makes me happy.
36 |
37 | After that you, you need to make a choice between `minikube` and using Docker Desktop's built in Kubernetes support. If it's all the same to you, I'd suggest using Docker Desktop's because it's easier to use.
38 |
39 | - Docker Desktop ships with very simple Kubernetes support. It's nice to learn on but has some limitations. If you need to do more complicated things, get minikube. To enable Kubernetes on Docker Desktop, open the preferences of Docker Desktop, navigate to the Kubernetes tab, enable it, accept when it asks when if it can restart itself, and then wait a few minutes.
40 | - `minikube` ([see here for how to install][minikube]) is a development tool to get your Kubernetes cluster running on your local computer. You will only ever use this locally.
41 |
42 | You can have both installed, by the way. These will be called **contexts**. To switch between the two, you can `kubectl config use-context minikube` or `kubectl config use-context docker-desktop`. You can also shorten `use-context` to `use`.
43 |
44 | If you're using minikube, make sure you run `minikube start`.
45 |
46 | If you're using Docker Desktop, you will need to enable Kubernetes.
47 |
48 | 
49 |
50 | Click the gear in the top right of the Docker Desktop app.
51 |
52 | 
53 |
54 | Navigate to the Kubernetes menu and click enable Kubernetes. You will likely have to restart Docker.
55 |
56 | Do a `kubectl cluster-info` to make sure. You should see your control plane running.
57 |
58 | [kubectl]: https://kubernetes.io/docs/tasks/tools/install-kubectl/
59 | [minikube]: https://kubernetes.io/docs/tasks/tools/install-minikube/
60 |
--------------------------------------------------------------------------------
/lessons/07-multi-container-projects/C-kompose.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: >-
3 | Learn how to use Kompose to convert a docker-compose.yml configuration to
4 | Kubernetes, scale services using Kubernetes, set up LoadBalancer for exposing
5 | services, and interact with Kubernetes cluster using kubectl. Explore
6 | deployment options on Azure AKS, Amazon EKS, and Google GKE.
7 | keywords:
8 | - Kompose
9 | - Kubernetes scaling
10 | - LoadBalancer
11 | - kubectl
12 | - Azure AKS
13 | - Amazon EKS
14 | - Google GKE
15 | ---
16 |
17 | Next tool we're going to use is one called [Kompose][kompose]. I'm showing you this tool because it's how I start out with Kubernetes when I have a project that I want to use with it. Kompose converts a docker-compose.yml configuration to a Kubernetes configuration. I find this to much more approachable than starting with the myriad configurations you need to get Kubernetes going.
18 |
19 | [Click here][install-kompose] to see how to install Kompose on your platform. I did `brew install kompose` with Homebrew.
20 |
21 | So first let's modify our docker-compose.yml a bit to make it work for Kompose.
22 |
23 | ```yml
24 | services:
25 | api:
26 | build: api
27 | ports:
28 | - "8080:8080"
29 | links:
30 | - db
31 | depends_on:
32 | - db
33 | environment:
34 | MONGO_CONNECTION_STRING: mongodb://db:27017
35 | labels:
36 | kompose.service.type: nodeport
37 | kompose.image-pull-policy: Never
38 | db:
39 | image: mongo:7
40 | ports:
41 | - "27017:27017"
42 | web:
43 | build: web
44 | links:
45 | - api
46 | depends_on:
47 | - api
48 | labels:
49 | kompose.service.type: LoadBalancer
50 | kompose.service.expose: true
51 | kompose.image-pull-policy: Never
52 | ports:
53 | - "8081:80"
54 | ```
55 |
56 | [⛓️ Link to the project][project]
57 |
58 | > I went ahead here and modified the NGINX config to handle all inbound traffic. We could expose two services but in reality we want NGINX to be our front door and then allow our API to scale independently. I also modified the Node.js app to have correct paths relative to NGINX routes.
59 |
60 | We add the `NodePort` type to the api service so that we can scale this part of our infra up and Kubernetes will make it bind to different ports. Any app can reach api on 8080 but we can have 50 scaled up instances that it's spreading across.
61 |
62 | We add the `LoadBalancer` label to web so that Kubernetes will know to expose this particular service to the outside world. What this actually does for you is it spins up a loadbalancer that will distribute the load amongst all of your running pods. Do note tha this one of three ways to expose a service to outside world (by default everything is only expose internally). The other two are NodePort and using an ingress controller. [This is a great explainer][ingress] if you're curious. For now LoadBalancer is perfect. It's actually just a NodePort under the hood in Kubernetes but once you deploy to GCP, AWS, or Azure they'll use their own flavor of load balancer for you. You can also handle this yourself but that's _way_ outside the scope of this course.
63 |
64 | Lastly, we need to explicit about the port MongoDB exposes. Locally Docker was able to take care of it but Kubernetes needs us to be super explicity of what's exposed and what's not.
65 |
66 | > They used to let you do `kompose up` but now they don't. You have to convert the config and then apply the configs.
67 |
68 | ```bash
69 | kompose convert --build local
70 | ```
71 |
72 | Okay, now that you've done this, run
73 |
74 | ```bash
75 | kubectl apply -f '*.yaml'
76 | ```
77 |
78 | > If you see an error, make sure you have the quotes, they're needed, and make sure that your docker-compose.yml file doesn't have .yaml for its extension.
79 |
80 | To get a bird's eye view of everything running, run `kubectl get all` to see everything happening. Critically, we want to see STATUS: Running on all three of our services. If you're seeing something like ErrImagePull or something like that, it means your containers probably aren't pulling locally and you'll need to debug that.
81 |
82 | Let's do some Kubernetes magic now. Run `kubectl scale --replicas=5 deployment/api` and run `kubectl get all`. Just like that, you have five instances of our Node.js app running and Kubernetes smartly routing traffic to each. If one of them becomes unhealthy, Kubernetes will automatically tear it down and spin up a new one. By setting up Kubernetes, you get a lot of cool stuff for free. If you're computer is starting to warm up, feel free to run `kubectl scale --replicas=1 deployment/api` to scale down. You can scale the database the same way too but the loadbalancer won't do it but again that's because Kubernetes expects the cloud provider to do that for you.
83 |
84 | Once you're done toying, run `kubectl delete all --all`. This will tear down everything.
85 |
86 | ## To the cloud!
87 |
88 | What's super fun is that kubectl is the same tool you'd use to control your production deployment. So everything you just learn would work against Azure, AWS, GCP, etc. All you have to do is change the context from minikube or docker-desktop to Azure, AWS, or GCP. I'm not going to do that but I'll drop the tutorials here so you can play around yourself. Do note these are often not free and if you're not careful, Kubernetes can get expensive!
89 |
90 | - [Azure AKS][aks]
91 | - [Amazon EKS][aws]
92 | - [Google GKE][gcp]
93 |
94 | [ingress]: https://medium.com/google-cloud/kubernetes-nodeport-vs-loadbalancer-vs-ingress-when-should-i-use-what-922f010849e0
95 | [localhost]: http://localhost:3000
96 | [aks]: https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough
97 | [aws]: https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html
98 | [gcp]: https://cloud.google.com/kubernetes-engine/docs/quickstart
99 | [kompose]: https://kompose.io/
100 | [install-kompose]: https://kompose.io/installation/
101 | [project]: https://github.com/btholt/project-files-for-complete-intro-to-containers-v2/blob/main/kubernetes
102 |
--------------------------------------------------------------------------------
/lessons/07-multi-container-projects/meta.json:
--------------------------------------------------------------------------------
1 | {
2 | "icon": "boxes-stacked"
3 | }
--------------------------------------------------------------------------------
/lessons/08-wrap-up/A-docker-alternatives.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: >-
3 | Explore alternatives to Docker for container building, runtime tools,
4 | runtimes, orchestrators, and desktop apps. Learn about tools like Podman,
5 | Buildah, containerd, gVisor, OpenShift, Nomad, and more as options in the
6 | container ecosystem.
7 | keywords:
8 | - Docker alternatives
9 | - container tools
10 | - container runtimes
11 | - orchestration tools
12 | - Podman
13 | - containerd
14 | - Kubernetes alternatives
15 | ---
16 | So far we have only talked about Docker and there's a pretty good reason for that: for _personal_ use or _developer_ use, Docker is pretty much the indisputable champion. It has all the developer experience bells and whistles, all the mindshare of developers everywhere, and even all the other tools we are about to talk about like to tout their Docker compatability. However it good to keep in mind that Docker is a for-profit company and thus they are trying to align your incentives to there's and vice versa. It's good to know what else exists out there.
17 |
18 | I don't have a lot of experience with any of these as I've exclusively used Docker my whole career, but I wanted to get these names out in front of you so you recognize what they are.
19 |
20 | ## Container Builders
21 |
22 | This is what would replace `docker build`. What tools out there exist for building containers?
23 |
24 | - [Buildah][buildah] – Generally can read a Dockerfile without any problems. It also has the neat ability to use hosts' package managers instead of having to include those inside your Docker container. Supported by Red Hat / IBM.
25 |
26 | ## Container Runtime Tools
27 |
28 | This is what would replace `docker run`. This is the toolset that orchestrates the runtime that actually runs the container.
29 |
30 | - [Podman][podman] – Docker relies on having a daemon (background process) in the background to run its containers. Think of it like a client and server. Podman is daemonless: it builds containers to be run directly by the host OS without a daemon in the middle. Podman also has Podman Compose for Docker Compose situations. Supported by Red Hat / IBM.
31 | - [Colima][colima] – A project to make running container on macOS and Linux easier by cutting down on setup. Still uses Docker's tools under the hood, just a tool to make interacting with it easier.
32 | - [rkt][rkt] – This was a project from CoreOS, who got bought by Red Hat, who got bought by IBM. Along the way rkt got deprecated so this project isn't maintained. Just wanted to mention it because my last course made mention of it.
33 |
34 | ## Container Runtimes
35 |
36 | This is the actual code executing your container.
37 |
38 | - [containerd][containerd] – So Docker actually uses containerd inside of it itself by default, but you can use containerd without Docker and you can use Docker with other Runtimes. containerd is a [CNCF][cncf] project that handles the running of containers, and Docker is a tool that wraps that to add all the Docker stuff on top of it. [Docker has a deeper discussion if you want to know more][docker-containerd].
39 | - [gVisor][gvisor] – Google's container runtime with a particular focus on security. A hot topic at the moment, I see a lot of companies moving stuff to gVisor.
40 | - [Kata][kata] – I know way less about Kata, but they use full-on VMs to separate their "containers" as opposed to just using container features to separate them. From reading their docs, their intent is to be mixed-and-matched with actual containers and only used for containers that need the strongest separation.
41 |
42 | ## Container Orchestrators
43 |
44 | These are alternatives to Kubernetes (and somewhat Docker Compose)
45 |
46 | - [Apache Mesos][mesos] – The less I say about Mesos the better as it's very complicated and it's a tool I don't know or use. It's been around for a long time and therefore has some core believers in it. It predates Kubernetes even. Apache actually tried to stop development on Mesos and people revolted so now they still maintain it.
47 | - [Docker Swarm][swarm] – Before Kubernetes really won out, Docker was push Swarm hard, its own version of Kubernetes. Nowadays unless you're planning on using Swarm, use Compose and Kubernetes.
48 | - [OpenShift][openshift] – OpenShift is Red Hat's layer on top of Kubernetes, so indeed it is using Kubernetes underneath the hood. It includes things in it that Kubernetes lacks like CI/CD.
49 | - [Nomad][nomad] – I'm a big fan of Hashicorp and the products they've made. I think they do a great job making DevOps tools approachable by developers and that's why they're so popular. Nomad is a tool that takes Kubernetes and strips it down to the most-simple it can be.
50 | - [Rancher][rancher] – A project by SUSE (the team who make SUSE Linux) that itself also wraps Kubernetes but with extras stuff in it.
51 |
52 | ## Desktop Apps
53 |
54 | Alternatives to the Docker Desktop app.
55 |
56 | - [Podman Desktop][podman-desktop] – Since Red Hat / IBM makes Podman to run containers, they made a desktop app like Docker Desktop to be able to use Podman and Buildah the same way.
57 | - [Rancher Deskopt][rancher-desktop] – Rancher provides a desktop app like Docker Desktop to build and run containers for devs. Users Docker and [Rancher][rancher] and is maintained by SUSE (who make SUSE Linux.)
58 |
59 | [podman]: https://podman.io/
60 | [buildah]: https://buildah.io/
61 | [podman-desktop]: https://podman-desktop.io/
62 | [cncf]: https://www.cncf.io/
63 | [containerd]: https://containerd.io/
64 | [docker-containerd]: https://www.docker.com/blog/containerd-vs-docker/
65 | [rkt]: https://github.com/rkt/rkt
66 | [kata]: https://katacontainers.io/
67 | [rancher-desktop]: https://rancherdesktop.io/
68 | [rancher]: https://www.rancher.com/
69 | [mesos]: https://mesos.apache.org/
70 | [openshift]: https://docs.openshift.com/
71 | [nomad]: https://www.nomadproject.io/
72 | [gvisor]: https://gvisor.dev/
73 | [swarm]: https://docs.docker.com/engine/swarm/
74 | [colima]: https://github.com/abiosoft/colima
75 |
--------------------------------------------------------------------------------
/lessons/08-wrap-up/B-conclusion.md:
--------------------------------------------------------------------------------
1 | ---
2 | description: >-
3 | Congratulations on completing the Complete Intro to Containers, version 2!
4 | This course covers containers, Docker, Kubernetes, and related tools, making
5 | systems-level concepts approachable. Topics include Dockerfiles, container
6 | optimization, networking, Docker Compose, Kubernetes, and more, providing
7 | essential knowledge in today's container-driven workflows.
8 | keywords:
9 | - containers
10 | - Docker
11 | - Kubernetes
12 | - Dockerfiles
13 | - container optimization
14 | - networking
15 | - DevOps
16 | ---
17 | That's it Congratulations on completing the Complete Intro to Containers, version 2. On one hand, I feel like this course is pretty dense and a lot of systems-level stuff is thrown at you in a short amount of time. On the other hand, I feel like all of this stuff is actually more approachable than it seems at first impression. I started doing this stuff when I worked at Microsoft because I wanted to be able to sound intelligent when I spoke to the smart people making Azure work and it turns out I really like it. Every since I've always been in and around cloud stuff and it really started with my love for mucking around with containers.
18 |
19 | Let's review what we talked about:
20 |
21 | - What containers are
22 | - chroot and code jails
23 | - Linux namespaces and how to limit processes on the same OS
24 | - cgroups and how to limit resources to processes
25 | - What an image is
26 | - How to build a JavaScript project in Docker
27 | - Docker Desktop and dev tools to use with Docker
28 | - Building Docker images
29 | - Dockerfiles in depth
30 | - How to build containers both for production performance and so they rebuild quickly
31 | - How to make smaller containers
32 | - Alpine and alternative Linux distros
33 | - How to do multi stage builds
34 | - Distroless and other alternatives to Alpine
35 | - A project on building your own static asset server
36 | - Bind mounts and volumes
37 | - Dev containers
38 | - Networking in Docker
39 | - Docker Compose and multi container setups
40 | - Kubernetes and Kompose
41 | - What other tools are out there
42 |
43 | That's a lot of stuff! Congrats, you are now _ahead_ of the curve on containers and this will serve you whole career. Containers aren't going anywhere; they're just becoming a bigger part of our workflows. Every day you interact with dozens if not hundreds of containers in some way. I see that only increasing as everything becomes a deploy target, from fridges to watches to billboards.
44 |
45 | Thanks, and as always, please let me know how you liked the course!
46 |
47 | ❤️ [Brian Holt][brian]
48 |
49 | [brian]: https://www.twitter.com/holtbt
50 |
--------------------------------------------------------------------------------
/lessons/08-wrap-up/meta.json:
--------------------------------------------------------------------------------
1 | {
2 | "icon": "graduation-cap"
3 | }
--------------------------------------------------------------------------------
/next.config.js:
--------------------------------------------------------------------------------
1 | import { readFileSync } from "fs";
2 | import path from "path";
3 |
4 | const buffer = readFileSync(path.join(process.cwd(), "./course.json"));
5 | const course = JSON.parse(buffer);
6 | const BASE_URL = course?.productionBaseUrl || "";
7 |
8 | const config = {
9 | output: "export",
10 | basePath: BASE_URL,
11 | env: {
12 | BASE_URL,
13 | },
14 | };
15 |
16 | if (process.env.NODE_ENV === "development" && BASE_URL) {
17 | config.redirects = async () => {
18 | console.log(`ℹ️ ignore the warning 'Specified "redirects" will not automatically work with "output: export"'. This redirect only happens in development mode.`)
19 | return [
20 | {
21 | source: "/",
22 | destination: BASE_URL,
23 | basePath: false,
24 | permanent: false,
25 | },
26 | ];
27 | }
28 | }
29 |
30 | export default config;
31 |
--------------------------------------------------------------------------------
/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "private": true,
3 | "type": "module",
4 | "license": "(CC-BY-NC-4.0 OR Apache-2.0)",
5 | "author": "Brian Holt ",
6 | "scripts": {
7 | "dev": "next dev",
8 | "build": "next build && npm run csv",
9 | "start": "next start",
10 | "csv": "node csv/index.js",
11 | "seo": "node summary/index.js"
12 | },
13 | "dependencies": {
14 | "@fortawesome/fontawesome-free": "^6.5.1",
15 | "gray-matter": "^4.0.3",
16 | "highlight.js": "^11.9.0",
17 | "marked": "^11.1.1",
18 | "marked-highlight": "^2.1.1",
19 | "next": "^14.0.4",
20 | "react": "^18.2.0",
21 | "react-dom": "^18.2.0",
22 | "title-case": "^4.2.0"
23 | },
24 | "devDependencies": {
25 | "chatgpt": "^5.2.5",
26 | "convert-array-to-csv": "^2.0.0",
27 | "dotenv": "^16.3.1"
28 | }
29 | }
30 |
--------------------------------------------------------------------------------
/pages/_app.js:
--------------------------------------------------------------------------------
1 | import Head from "next/head";
2 | import "@fortawesome/fontawesome-free/css/all.css";
3 |
4 | import "highlight.js/styles/a11y-light.css";
5 | import "../styles/variables.css";
6 | import "../styles/footer.css";
7 | import "../styles/courses.css";
8 |
9 | import Layout from "../components/layout";
10 |
11 | export default function App({ Component, pageProps }) {
12 | return (
13 |
14 |
15 |
20 |
26 |
32 |
38 |
43 |
44 |
45 |
46 | );
47 | }
48 |
--------------------------------------------------------------------------------
/pages/index.js:
--------------------------------------------------------------------------------
1 | import Head from "next/head";
2 | import Link from "next/link";
3 |
4 | import { getLessons } from "../data/lesson";
5 |
6 | import Corner from "../components/corner";
7 | import getCourseConfig from "../data/course";
8 |
9 | export default function Lessons({ sections }) {
10 | const courseInfo = getCourseConfig();
11 | return (
12 | <>
13 |
14 | {courseInfo.title}
15 |
16 |
17 |
18 |
19 |
23 |
24 |
25 |