23 | );
24 | }
25 |
26 | export default App;
27 |
--------------------------------------------------------------------------------
/containerize-react-app/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | "target": "es5",
4 | "lib": [
5 | "dom",
6 | "dom.iterable",
7 | "esnext"
8 | ],
9 | "allowJs": true,
10 | "skipLibCheck": true,
11 | "esModuleInterop": true,
12 | "allowSyntheticDefaultImports": true,
13 | "strict": true,
14 | "forceConsistentCasingInFileNames": true,
15 | "noFallthroughCasesInSwitch": true,
16 | "module": "esnext",
17 | "moduleResolution": "node",
18 | "resolveJsonModule": true,
19 | "isolatedModules": true,
20 | "noEmit": true,
21 | "jsx": "react-jsx"
22 | },
23 | "include": [
24 | "src"
25 | ]
26 | }
27 |
--------------------------------------------------------------------------------
/notes-rest-api/reverse-proxy/nginx.conf:
--------------------------------------------------------------------------------
1 | worker_processes 1;
2 |
3 | events {
4 | worker_connections 1024;
5 | }
6 |
7 | http {
8 | server {
9 | listen 80;
10 |
11 | location /api/notebooks {
12 | proxy_pass http://notebooks;
13 | proxy_set_header Host $host;
14 | proxy_set_header X-Real-IP $remote_addr;
15 | }
16 |
17 | location /api/notes {
18 | proxy_pass http://notes;
19 | proxy_set_header Host $host;
20 | proxy_set_header X-Real-IP $remote_addr;
21 | }
22 |
23 | location / {
24 | try_files $uri $uri/ =404;
25 | }
26 | }
27 | }
--------------------------------------------------------------------------------
/notes-rest-api/notes-backend/src/server.js:
--------------------------------------------------------------------------------
1 | const express = require('express');
2 | const mongoose = require('mongoose');
3 | const bodyParser = require('body-parser');
4 | const { noteRouter } = require('./routes');
5 |
6 | const app = express();
7 |
8 | app.use(bodyParser.json());
9 | app.use('/api/notes', noteRouter);
10 |
11 | const port = process.env.PORT;
12 |
13 | mongoose
14 | .connect(process.env.DB_URL)
15 | .then(() => {
16 | console.log('Connected to MongoDB! Starting server.');
17 |
18 | app.listen(port, () => {
19 | console.log(`Notes server listening on port ${port}`);
20 | });
21 | })
22 | .catch((err) => {
23 | console.error('Something went wrong!');
24 | console.error(err);
25 | });
26 |
--------------------------------------------------------------------------------
/notes-rest-api/notebooks-backend/src/server.js:
--------------------------------------------------------------------------------
1 | const express = require('express');
2 | const mongoose = require('mongoose');
3 | const bodyParser = require('body-parser');
4 | const { notebookRouter } = require('./routes');
5 |
6 | const app = express();
7 |
8 | app.use(bodyParser.json());
9 | app.use('/api/notebooks', notebookRouter);
10 |
11 | const port = process.env.PORT;
12 |
13 | mongoose
14 | .connect(process.env.DB_URL)
15 | .then(() => {
16 | console.log('Connected to MongoDB! Starting server.');
17 |
18 | app.listen(port, () => {
19 | console.log(`Notebooks server listening on port ${port}`);
20 | });
21 | })
22 | .catch((err) => {
23 | console.error('Something went wrong!');
24 | console.error(err);
25 | });
26 |
--------------------------------------------------------------------------------
/containerize-react-app/src/App.css:
--------------------------------------------------------------------------------
1 | .App {
2 | text-align: center;
3 | }
4 |
5 | .App-logo {
6 | height: 40vmin;
7 | pointer-events: none;
8 | }
9 |
10 | @media (prefers-reduced-motion: no-preference) {
11 | .App-logo {
12 | animation: App-logo-spin infinite 20s linear;
13 | }
14 | }
15 |
16 | .App-header {
17 | background-color: #282c34;
18 | min-height: 100vh;
19 | display: flex;
20 | flex-direction: column;
21 | align-items: center;
22 | justify-content: center;
23 | font-size: calc(10px + 2vmin);
24 | color: white;
25 | }
26 |
27 | .App-link {
28 | color: #61dafb;
29 | }
30 |
31 | @keyframes App-logo-spin {
32 | from {
33 | transform: rotate(0deg);
34 | }
35 | to {
36 | transform: rotate(360deg);
37 | }
38 | }
39 |
--------------------------------------------------------------------------------
/_exercises/06-containerize_react_app/project_overview.md:
--------------------------------------------------------------------------------
1 | # Containerizing a React Application with Docker
2 |
3 | Welcome! In this guide, we’ll dive into the process of containerizing a React application using Docker, focusing on the powerful concept of multistage builds. 🚀 Before we jump into the details, let’s outline what you’ll try to accomplish.
4 |
5 | ## Overview
6 |
7 | Here’s what you should aim to implement in this exercise:
8 |
9 | 1. **Create a simple React app** using the Create React App utility.
10 | 2. **Write a multistage Dockerfile** specifically designed to build your React application.
11 | 3. **Extend the Dockerfile** to copy the static files generated during the build process.
12 | 4. **Set up an Nginx server** to serve the static files effectively.
13 |
14 | We encourage you to give this a try on your own before peeking at the step-by-step solution recordings. Challenge yourself to see how far you can get!
15 |
--------------------------------------------------------------------------------
/containerize-express-app/src/index.js:
--------------------------------------------------------------------------------
1 | const express = require('express');
2 | const bodyParser = require('body-parser');
3 |
4 | const app = express();
5 | const port = 3000;
6 | const users = [];
7 |
8 | app.use(bodyParser.json());
9 |
10 | app.get('/', (req, res) => {
11 | res.send('Hello world!');
12 | });
13 |
14 | // Get registered users
15 | app.get('/users', (req, res) => {
16 | return res.json({ users });
17 | })
18 |
19 | // Register a new user
20 | app.post('/users', (req, res) => {
21 | const newUserId = req.body.userId;
22 | if (!newUserId) {
23 | return res.status(400).send('Missing userId.');
24 | }
25 |
26 | if (users.includes(newUserId)) {
27 | return res.status(400).send('userId already exists.');
28 | }
29 |
30 | users.push(newUserId);
31 | return res.status(201).send('User registered.');
32 | });
33 |
34 | app.listen(port, () => {
35 | console.log(`Server listening on port ${port}`);
36 | });
--------------------------------------------------------------------------------
/notes-rest-api/notes-backend/compose.yaml:
--------------------------------------------------------------------------------
1 | name: notes-backend
2 |
3 | services:
4 | notes:
5 | build:
6 | context: .
7 | target: development
8 | ports:
9 | - 3001:80
10 | environment:
11 | - PORT=80
12 | - DB_URL=mongodb://${NOTES_DB_USER}:${NOTES_DB_PASSWORD}@notes-db/${NOTES_DB_NAME}
13 | develop:
14 | watch:
15 | - action: sync
16 | path: ./src
17 | target: /app/src
18 | networks:
19 | - notes-net
20 | depends_on:
21 | - notes-db
22 | notes-db:
23 | image: mongodb/mongodb-community-server:7.0-ubuntu2204
24 | env_file:
25 | - .env
26 | volumes:
27 | - type: volume
28 | source: notes-data
29 | target: /data/db
30 | - type: bind
31 | source: ./db-config/mongo-init.js
32 | target: /docker-entrypoint-initdb.d/mongo-init.js
33 | read_only: true
34 | networks:
35 | - notes-net
36 |
37 | volumes:
38 | notes-data:
39 |
40 | networks:
41 | notes-net:
42 |
--------------------------------------------------------------------------------
/_exercises/02-customizing_nginx/project_overview.md:
--------------------------------------------------------------------------------
1 | # Project Introduction: Customizing an NGNX Server
2 |
3 | Welcome to the project where we'll dive into customizing an NGNX server! 🎉 Whether you're already a bit familiar with Docker or just starting out, this project is designed to help you grasp foundational concepts while having some fun along the way.
4 |
5 | ## Overview
6 |
7 | In this project, your goal is to create and customize an NGNX server using Docker. Before peeking at the step-by-step guide, feel free to give it a shot on your own! Here’s a quick summary of the main steps you should aim to follow:
8 |
9 | 1. Run a Docker container based on the NGNX image with the specific tag `1.27.0`.
10 | 2. Access an interactive shell within your running NGNX container.
11 | 3. Install a text editor of your choice (like VIM).
12 | 4. Modify the `/usr/share/nginx/html/index.html` file to deliver your custom content.
13 |
14 | Take some time to attempt these steps yourself, and see how far you can get before watching the solution videos!
15 |
--------------------------------------------------------------------------------
/key-value-app/start-backend.sh:
--------------------------------------------------------------------------------
1 | source .env.db
2 |
3 | # Connectivity
4 | source .env.network
5 | LOCALHOST_PORT=3000
6 | CONTAINER_PORT=3000
7 |
8 | BACKEND_IMAGE_NAME=key-value-backend
9 | BACKEND_CONTAINER_NAME=backend
10 |
11 | MONGODB_HOST=mongodb
12 |
13 | if [ "$(docker ps -aq -f name=$BACKEND_CONTAINER_NAME)" ]; then
14 | echo "A container with the name $BACKEND_CONTAINER_NAME already exists."
15 | echo "The container will be removed when stopped."
16 | echo "To stop the container, run: docker kill $BACKEND_CONTAINER_NAME"
17 | exit 1
18 | fi
19 |
20 | docker build -t $BACKEND_IMAGE_NAME \
21 | -f backend/Dockerfile.dev \
22 | backend
23 |
24 | docker run --rm -d --name $BACKEND_CONTAINER_NAME \
25 | -e KEY_VALUE_DB=$KEY_VALUE_DB \
26 | -e KEY_VALUE_USER=$KEY_VALUE_USER \
27 | -e KEY_VALUE_PASSWORD=$KEY_VALUE_PASSWORD \
28 | -e MONGODB_HOST=$MONGODB_HOST \
29 | -e PORT=$CONTAINER_PORT \
30 | -p $LOCALHOST_PORT:$CONTAINER_PORT \
31 | -v ./backend/src:/app/src \
32 | --network $NETWORK_NAME \
33 | $BACKEND_IMAGE_NAME
--------------------------------------------------------------------------------
/key-value-app/backend/src/server.js:
--------------------------------------------------------------------------------
1 | const express = require('express');
2 | const mongoose = require('mongoose');
3 | const bodyParser = require('body-parser');
4 | const { keyValueRouter } = require('./routes/store');
5 | const { healthRouter } = require('./routes/health');
6 |
7 | const port = process.env.PORT;
8 | const app = express();
9 |
10 | app.use(bodyParser.json());
11 | app.use('/health', healthRouter);
12 | app.use('/store', keyValueRouter);
13 |
14 | console.log('Connecting to DB');
15 | mongoose
16 | .connect(
17 | `mongodb://${process.env.MONGODB_HOST}/${process.env.KEY_VALUE_DB}`,
18 | {
19 | auth: {
20 | username: process.env.KEY_VALUE_USER,
21 | password: process.env.KEY_VALUE_PASSWORD,
22 | },
23 | connectTimeoutMS: 500,
24 | }
25 | )
26 | .then(() => {
27 | app.listen(port, () => {
28 | console.log(`Listening on port ${port}`);
29 | });
30 | console.log('Connected to DB');
31 | })
32 | .catch((err) => {
33 | console.error('Something went wrong!');
34 | console.error(err);
35 | });
36 |
--------------------------------------------------------------------------------
/notes-rest-api/notebooks-backend/compose.yaml:
--------------------------------------------------------------------------------
1 | name: notebooks-backend
2 |
3 | services:
4 | notebooks:
5 | build:
6 | context: .
7 | target: development
8 | ports:
9 | - 3000:80
10 | environment:
11 | - PORT=80
12 | - DB_URL=mongodb://${NOTEBOOKS_DB_USER}:${NOTEBOOKS_DB_PASSWORD}@notebooks-db/${NOTEBOOKS_DB_NAME}
13 | develop:
14 | watch:
15 | - action: sync
16 | path: ./src
17 | target: /app/src
18 | networks:
19 | - notebooks-net
20 | depends_on:
21 | - notebooks-db
22 | notebooks-db:
23 | image: mongodb/mongodb-community-server:7.0-ubuntu2204
24 | env_file:
25 | - .env
26 | volumes:
27 | - type: volume
28 | source: notebooks-data
29 | target: /data/db
30 | - type: bind
31 | source: ./db-config/mongo-init.js
32 | target: /docker-entrypoint-initdb.d/mongo-init.js
33 | read_only: true
34 | networks:
35 | - notebooks-net
36 |
37 | volumes:
38 | notebooks-data:
39 |
40 | networks:
41 | notebooks-net:
42 |
--------------------------------------------------------------------------------
/key-value-app/cleanup.sh:
--------------------------------------------------------------------------------
1 | # 1. Stop and remove mongodb containers
2 | # 2. Stop and remove app containers
3 | # 3. Remove volumes
4 | # 4. Remove networks
5 |
6 | source .env.db
7 | source .env.volume
8 | source .env.network
9 |
10 | if [ "$(docker ps -aq -f name=$DB_CONTAINER_NAME)" ]; then
11 | echo "Removing DB container $DB_CONTAINER_NAME"
12 | docker kill $DB_CONTAINER_NAME # && docker rm $DB_CONTAINER_NAME - Add if not running with --rm
13 | else
14 | echo "A container with the name $DB_CONTAINER_NAME does not exist. Skipping container deletion."
15 | fi
16 |
17 | if [ "$(docker volume ls -q -f name=$VOLUME_NAME)" ]; then
18 | echo "Removing volume $VOLUME_NAME"
19 | docker volume rm $VOLUME_NAME
20 | else
21 | echo "A volume with the name $VOLUME_NAME does not exist. Skipping volume deletion."
22 | fi
23 |
24 | if [ "$(docker network ls -q -f name=$NETWORK_NAME)" ]; then
25 | echo "Removing network $NETWORK_NAME"
26 | docker network rm $NETWORK_NAME
27 | else
28 | echo "A network with the name $NETWORK_NAME does not exist. Skipping network deletion."
29 | fi
--------------------------------------------------------------------------------
/compose/compose.yaml:
--------------------------------------------------------------------------------
1 | name: key-value-app
2 |
3 | services:
4 | backend:
5 | build:
6 | context: backend
7 | dockerfile: Dockerfile.dev
8 | ports:
9 | - 3000:3000
10 | env_file:
11 | - .env.db-key-value
12 | environment:
13 | - MONGODB_HOST=db
14 | - PORT=3000
15 | depends_on:
16 | - db
17 | networks:
18 | - key-value-net
19 | develop:
20 | watch:
21 | - action: sync
22 | path: ./backend/src
23 | target: /app/src
24 | db:
25 | image: mongodb/mongodb-community-server:7.0-ubuntu2204
26 | ports:
27 | - 27017:27017
28 | env_file:
29 | - .env.db-root-creds
30 | - .env.db-key-value
31 | volumes:
32 | - type: bind
33 | source: ./db-config/mongo-init.js
34 | target: /docker-entrypoint-initdb.d/mongo-init.js
35 | read_only: true
36 | - type: volume
37 | source: mongodb-data
38 | target: /data/db
39 | networks:
40 | - key-value-net
41 |
42 | volumes:
43 | mongodb-data:
44 |
45 | networks:
46 | key-value-net:
47 |
--------------------------------------------------------------------------------
/_exercises/04-containerize_express_app/project_overview.md:
--------------------------------------------------------------------------------
1 | # Containerizing an Express Application with Docker
2 |
3 | Welcome to our journey into containerization! In this exercise, we'll be learning how to build a Docker image for a simple Express application. Before diving into the details, I encourage you to give it a shot on your own. Here's a high-level overview of what you'll be doing:
4 |
5 | ## Overview
6 |
7 | In this project, your goal is to containerize a basic back-end API built with Node.js and Express. Before peeking at the details, here’s a quick summary of the main steps you should try to implement:
8 |
9 | 1. **Set Up Your Express Application**: Create a basic Express app with a couple of endpoints.
10 | 2. **Write a Dockerfile**: Define the Dockerfile to build your application image.
11 | 3. **Build the Docker Image**: Use Docker commands to create your application image.
12 | 4. **Run a Container**: Launch a container based on the image you built.
13 | 5. **Test Your API**: Hit your endpoints to ensure everything is working smoothly.
14 |
15 | Give these steps a try on your own before checking the solution recordings! 🚀
16 |
--------------------------------------------------------------------------------
/containerize-react-app/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "containerize-react-app",
3 | "version": "0.1.0",
4 | "private": true,
5 | "dependencies": {
6 | "@testing-library/jest-dom": "^5.17.0",
7 | "@testing-library/react": "^13.4.0",
8 | "@testing-library/user-event": "^13.5.0",
9 | "@types/jest": "^27.5.2",
10 | "@types/node": "^16.18.101",
11 | "@types/react": "^18.3.3",
12 | "@types/react-dom": "^18.3.0",
13 | "react": "^18.3.1",
14 | "react-dom": "^18.3.1",
15 | "react-scripts": "5.0.1",
16 | "typescript": "^4.9.5",
17 | "web-vitals": "^2.1.4"
18 | },
19 | "scripts": {
20 | "start": "react-scripts start",
21 | "build": "react-scripts build",
22 | "test": "react-scripts test",
23 | "eject": "react-scripts eject"
24 | },
25 | "eslintConfig": {
26 | "extends": [
27 | "react-app",
28 | "react-app/jest"
29 | ]
30 | },
31 | "browserslist": {
32 | "production": [
33 | ">0.2%",
34 | "not dead",
35 | "not op_mini all"
36 | ],
37 | "development": [
38 | "last 1 chrome version",
39 | "last 1 firefox version",
40 | "last 1 safari version"
41 | ]
42 | }
43 | }
44 |
--------------------------------------------------------------------------------
/compose/backend/src/server.js:
--------------------------------------------------------------------------------
1 | const express = require('express');
2 | const mongoose = require('mongoose');
3 | const bodyParser = require('body-parser');
4 | const { keyValueRouter } = require('./routes/store');
5 | const { healthRouter } = require('./routes/health');
6 |
7 | const port = process.env.PORT;
8 | const app = express();
9 |
10 | app.use(bodyParser.json());
11 | app.get('/', (req, res) => {
12 | return res.json({
13 | message: 'Welcome to our Key-Value store',
14 | });
15 | });
16 | app.use('/health', healthRouter);
17 | app.use('/store', keyValueRouter);
18 |
19 | console.log('Connecting to DB');
20 | mongoose
21 | .connect(
22 | `mongodb://${process.env.MONGODB_HOST}/${process.env.KEY_VALUE_DB}`,
23 | {
24 | auth: {
25 | username: process.env.KEY_VALUE_USER,
26 | password: process.env.KEY_VALUE_PASSWORD,
27 | },
28 | connectTimeoutMS: 500,
29 | }
30 | )
31 | .then(() => {
32 | app.listen(port, () => {
33 | console.log(`Listening on port ${port}`);
34 | });
35 | console.log('Connected to DB!');
36 | })
37 | .catch((err) => {
38 | console.error('Something went wrong!');
39 | console.error(err);
40 | });
41 |
--------------------------------------------------------------------------------
/_exercises/09-key_value_app/project_overview.md:
--------------------------------------------------------------------------------
1 | # Building a Key-Value REST API with Docker
2 |
3 | Welcome to our hands-on project where we will dive into creating a key-value REST API! 🚀 This project will enhance your skills in Docker and working with multiple containers, as we utilize an Express-based application and a MongoDB database. Let’s roll up our sleeves and get to work!
4 |
5 | ## Overview
6 |
7 | Before we jump into the step-by-step guide, let’s take a moment to see what you should try to implement on your own:
8 |
9 | 1. **Set up a Docker environment** for both the Express application and the MongoDB database.
10 | 2. **Define a user-defined network** to allow communication between the application and the database containers.
11 | 3. **Create volumes** for data persistence with the MongoDB container.
12 | 4. **Develop API endpoints** for storing, retrieving, updating, and deleting key-value pairs.
13 | 5. **Handle errors** with appropriate status codes based on the API logic.
14 | 6. **Include a health check endpoint** to verify that your service is up and running.
15 |
16 | Now, it's your turn! Give it a shot and try implementing the solution above before checking out the detailed solution recordings.
17 |
--------------------------------------------------------------------------------
/key-value-app/start-db.sh:
--------------------------------------------------------------------------------
1 | MONGODB_IMAGE="mongodb/mongodb-community-server"
2 | MONGODB_TAG="7.0-ubuntu2204"
3 | source .env.db
4 |
5 | # Root credentials
6 | ROOT_USER="root-user"
7 | ROOT_PASSWORD="root-password"
8 |
9 | # Connectivity
10 | source .env.network
11 | LOCALHOST_PORT=27017
12 | CONTAINER_PORT=27017
13 |
14 | # Storage
15 | source .env.volume
16 | VOLUME_CONTAINER_PATH="/data/db"
17 |
18 | source setup.sh
19 |
20 | if [ "$(docker ps -q -f name=$DB_CONTAINER_NAME)" ]; then
21 | echo "A container with the name $DB_CONTAINER_NAME already exists."
22 | echo "The container will be removed when stopped."
23 | echo "To stop the container, run: docker kill $DB_CONTAINER_NAME"
24 | exit 1
25 | fi
26 |
27 | docker run --rm -d --name $DB_CONTAINER_NAME \
28 | -e MONGODB_INITDB_ROOT_USERNAME=$ROOT_USER \
29 | -e MONGODB_INITDB_ROOT_PASSWORD=$ROOT_PASSWORD \
30 | -e KEY_VALUE_DB=$KEY_VALUE_DB \
31 | -e KEY_VALUE_USER=$KEY_VALUE_USER \
32 | -e KEY_VALUE_PASSWORD=$KEY_VALUE_PASSWORD \
33 | -p $LOCALHOST_PORT:$CONTAINER_PORT \
34 | -v $VOLUME_NAME:$VOLUME_CONTAINER_PATH \
35 | -v ./db-config/mongo-init.js:/docker-entrypoint-initdb.d/mongo-init.js:ro \
36 | --network $NETWORK_NAME \
37 | $MONGODB_IMAGE:$MONGODB_TAG
--------------------------------------------------------------------------------
/_exercises/11-notes_app/project_overview.md:
--------------------------------------------------------------------------------
1 | # Building a Notes REST API with Multiple Services
2 |
3 | Welcome to our project focused on creating a Notes REST API! In this exercise, we’ll be diving into some cool concepts like working with multiple services and containers while ensuring our application’s resilience. 🛠️ Before we jump into the detailed steps, here’s a high-level overview of what you’ll be implementing. I encourage you to attempt this on your own before checking the step-by-step guide below.
4 |
5 | ## Overview
6 |
7 | In this project, you will:
8 |
9 | 1. **Set Up Your Environment**: Use Docker Compose to create your project structure.
10 | 2. **Create the Notebooks Service**:
11 | - Implement the API for creating, retrieving, updating, and deleting notebooks.
12 | - Ensure validation for request bodies, including error handling for 400 and 404 responses.
13 | 3. **Create the Notes Service**:
14 | - Develop the API for notes, ensuring the request body contains the necessary fields (title, content) and handle optional notebook IDs.
15 | - Set up similar endpoints as the Notebooks service.
16 | 4. **Implement NGINX Reverse Proxy**: Configure NGINX to act as a single entry point for the APIs.
17 | 5. **Manage Service Outages**: Discuss and implement a simple strategy to handle cases when one service is down, ensuring the application can still function.
18 | 6. **Test Your API**: Run tests to make sure all endpoints are working as expected.
19 |
20 | Take the time to try implementing this before diving into the guide!
21 |
--------------------------------------------------------------------------------
/containerize-react-app/public/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
12 |
13 |
17 |
18 |
27 | React App
28 |
29 |
30 |
31 |
32 |
42 |
43 |
44 |
--------------------------------------------------------------------------------
/_exercises/10-docker_compose/07-hot_reloading_watch.md:
--------------------------------------------------------------------------------
1 | # Hot Reloading with Docker Compose
2 |
3 | Welcome to our guide on implementing hot reloading in your Docker Compose project! In this section, we'll explore how to enable fast feedback during development by automatically syncing file changes in your backend services. 🚀
4 |
5 | ## Overview
6 |
7 | Before we dive into the step-by-step guide, here's a high-level summary of what you should aim to implement:
8 |
9 | 1. **Create a bind mount** or configure a watch setting in your Docker Compose file.
10 | 2. **Define the paths** you wish to sync from your local machine to your Docker container.
11 | 3. **Run Docker Compose** with the watch flag to enable hot reloading.
12 | 4. **Verify changes** made in the code are reflected in your running containers.
13 |
14 | Take a moment to try implementing this on your own. Just follow the steps outlined above, and see if you can get hot reloading working before checking the detailed guide below!
15 |
16 | ## Step-by-Step Guide
17 |
18 | 1. Open your Docker Compose file and locate the `develop` key.
19 | 2. Inside the `develop` configuration, create a `watch` object to list the paths to sync.
20 | 3. Specify your **source path** (e.g., `./backend/src`) and **target path** inside the container (e.g., `/app/src`).
21 | 4. Add any folders you wish to ignore using the `ignore` option (e.g., `node_modules`).
22 | 5. Save the changes to your Docker Compose file.
23 | 6. Open your terminal and run:
24 | ```bash
25 | docker-compose up --watch
26 | ```
27 | 7. Modify your code (e.g., add a new line in `server.js`) and save it.
28 | 8. Check the Docker logs to see if the changes are reflected.
29 | 9. Use a tool like Postman to test the endpoint and verify that your updates are live.
30 |
31 | ## Conclusion
32 |
33 | Congratulations on enabling hot reloading in your Docker Compose project! This feature will save you significant time and effort during development by providing immediate feedback for code changes. Keep practicing and experimenting with Docker Compose to deepen your knowledge. You're on the right track! 🌟
34 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # The Complete Docker and Kubernetes Course: From Zero to Hero
2 |
3 | This repository is part of my Docker and Kubernetes full course! Check right below for the link with a great discount
4 |
5 | ### Course link (with a big discount 🙂): https://www.lauromueller.com/courses/docker-kubernetes
6 |
7 | **Check my other courses:**
8 |
9 | - 👉 The Definitive Helm Course: From Beginner to Master - https://www.lauromueller.com/courses/definitive-helm-course
10 | - 👉 Mastering Terraform: From Beginner to Expert - https://www.lauromueller.com/courses/mastering-terraform
11 | - 👉 Mastering GitHub Actions: From Beginner to Expert - https://www.lauromueller.com/courses/mastering-github-actions
12 | - 👉 Write better code: 20 code smells and how to get rid of them - https://www.lauromueller.com/courses/writing-clean-code
13 |
14 | Welcome everyone! I'm very happy to see you around, and I hope this repository brings lots of value for those learning more about Docker and Kubernetes. Make sure to check the link above for a great discount on the course in Udemy, where I not only provide theoretical explanations around all the concepts here, but also go in details through the entire coding of the examples in this repository.
15 |
16 | Here are a few tips for you to best navigate the contents of this repository:
17 |
18 | 1. The folder `_exercises` contains practical steps for you to try to implement the hands-on labs we go through throughout the course. They are organized based on the Docker sections of the course, so you can easily match the folder and the section by title, as well as the exercise file and the corresponding hands-on lab video in the course.
19 | 2. Each section in the course has one or more folders associated with it, and each folder contains all the files and code for the respective topic. Sometimes, sections have more than one folder, but it should be very straightforward to identify the correct folder based on the lecture titles, as I tried to keep the naming as consistent as possible.
20 |
21 | ## Additional Links:
22 |
23 | - Kubernetes's code repository (also part of the bundle): https://github.com/lm-academy/kubernetes-course
24 |
--------------------------------------------------------------------------------
/containerize-react-app/README.md:
--------------------------------------------------------------------------------
1 | # Getting Started with Create React App
2 |
3 | This project was bootstrapped with [Create React App](https://github.com/facebook/create-react-app).
4 |
5 | ## Available Scripts
6 |
7 | In the project directory, you can run:
8 |
9 | ### `npm start`
10 |
11 | Runs the app in the development mode.\
12 | Open [http://localhost:3000](http://localhost:3000) to view it in the browser.
13 |
14 | The page will reload if you make edits.\
15 | You will also see any lint errors in the console.
16 |
17 | ### `npm test`
18 |
19 | Launches the test runner in the interactive watch mode.\
20 | See the section about [running tests](https://facebook.github.io/create-react-app/docs/running-tests) for more information.
21 |
22 | ### `npm run build`
23 |
24 | Builds the app for production to the `build` folder.\
25 | It correctly bundles React in production mode and optimizes the build for the best performance.
26 |
27 | The build is minified and the filenames include the hashes.\
28 | Your app is ready to be deployed!
29 |
30 | See the section about [deployment](https://facebook.github.io/create-react-app/docs/deployment) for more information.
31 |
32 | ### `npm run eject`
33 |
34 | **Note: this is a one-way operation. Once you `eject`, you can’t go back!**
35 |
36 | If you aren’t satisfied with the build tool and configuration choices, you can `eject` at any time. This command will remove the single build dependency from your project.
37 |
38 | Instead, it will copy all the configuration files and the transitive dependencies (webpack, Babel, ESLint, etc) right into your project so you have full control over them. All of the commands except `eject` will still work, but they will point to the copied scripts so you can tweak them. At this point you’re on your own.
39 |
40 | You don’t have to ever use `eject`. The curated feature set is suitable for small and middle deployments, and you shouldn’t feel obligated to use this feature. However we understand that this tool wouldn’t be useful if you couldn’t customize it when you are ready for it.
41 |
42 | ## Learn More
43 |
44 | You can learn more in the [Create React App documentation](https://facebook.github.io/create-react-app/docs/getting-started).
45 |
46 | To learn React, check out the [React documentation](https://reactjs.org/).
47 |
--------------------------------------------------------------------------------
/_exercises/10-docker_compose/04-volumes_networks.md:
--------------------------------------------------------------------------------
1 | # Setting Up Volumes and Networks in Docker Compose 🌐
2 |
3 | ## Overview
4 |
5 | In this exercise, we will wrap up the setup of our database service by creating a volume for persistent storage and defining a network. The goal is for you to implement this solution on your own, enhancing your understanding of Docker Compose. Before diving into the step-by-step guide, here's a brief summary of what you will accomplish:
6 |
7 | 1. Define a volume for MongoDB data in your Docker Compose file.
8 | 2. Set up a network for the database service.
9 | 3. Configure the services in the Docker Compose file to utilize the created volume and network.
10 | 4. Verify the successful creation of both the volume and the network.
11 |
12 | Challenge yourself to implement these steps before checking the detailed guide below!
13 |
14 | ## Step-by-Step Guide
15 |
16 | 1. **Open your Docker Compose file** (`docker-compose.yaml`).
17 | 2. **Create a volume**:
18 | - Add a `volumes` section at the top level and define your volume name (e.g., `MongoDB-data`).
19 | 3. **Set up a network**:
20 | - Similarly, add a `networks` section and define your network (e.g., `key-value-net`).
21 | 4. **Configure the service**:
22 | - In the service configuration (e.g., MongoDB), specify the `volume` you created under the service's `volumes` list, and point its source to the defined volume name with the target set to `/data/db`.
23 | - Under the same service, reference the network in the `networks` section.
24 | 5. **Run Docker Compose**: Open your terminal and run `docker-compose up -d` to create everything based on your configuration.
25 | 6. **Verify creation**: Once the command completes, check your Docker volumes and networks to confirm they have been created successfully:
26 | - Use `docker volume ls` to see your volumes.
27 | - Use `docker network ls` to see your networks.
28 |
29 | ## Conclusion
30 |
31 | In this exercise, we've successfully defined both a volume for persistent data storage and a network for our database service using Docker Compose. Remember that both resources need to be utilized in at least one service in order to be created. Keep experimenting with Docker Compose, and you'll keep uncovering powerful features that will help you in your development journey. Happy coding! 🚀
32 |
--------------------------------------------------------------------------------
/_exercises/10-docker_compose/09-docker_compose_cli_getting_help.md:
--------------------------------------------------------------------------------
1 | # Getting Help with Docker Compose CLI
2 |
3 | ## Overview
4 |
5 | In this exercise, we'll explore how to effectively use the Docker Compose CLI to get help with commands and options. The goal is to familiarize yourself with the `--help` option, which is a valuable tool when navigating the Docker Compose commands. Before diving into the step-by-step guide, why not challenge yourself to implement the solution on your own? Here’s a quick summary of what you’ll try:
6 |
7 | 1. Start a Docker Compose service in detached mode.
8 | 2. Use the `docker-compose stats --help` command to learn about the stats command and its options.
9 | 3. Run `docker-compose stats` to view live statistics for your containers.
10 | 4. Experiment with passing different options and specifying individual services.
11 | 5. Clean up by taking down all the services and volumes.
12 |
13 | Give it a shot! 🚀
14 |
15 | ## Step-by-Step Guide
16 |
17 | 1. **Start Your Services**: Open your terminal and run the following command to start your services in detached mode:
18 | ```bash
19 | docker-compose up -d
20 | ```
21 | 2. **Access Help Information**:
22 | To understand the available options for the stats command, type:
23 |
24 | ```bash
25 | docker-compose stats --help
26 | ```
27 |
28 | Take note of the options you can use.
29 |
30 | 3. **View Statistics**:
31 | Now let's see the live statistics of your running containers:
32 |
33 | ```bash
34 | docker-compose stats
35 | ```
36 |
37 | 4. **Filter Statistics by Service**:
38 | If you want to get stats for a specific service, you can specify it like this:
39 |
40 | ```bash
41 | docker-compose stats
42 | ```
43 |
44 | 5. **Stop and Clean Up**:
45 | Once you’re done, make sure to stop all services and clean up by removing the containers, networks, and volumes:
46 | ```bash
47 | docker-compose down --volumes
48 | ```
49 |
50 | ## Conclusion
51 |
52 | That’s a wrap! We’ve covered how to utilize the help feature in the Docker Compose CLI for better navigation and management of your containers. Remember, the `--help` flag is available for all commands and can be your best ally when exploring new features. Keep practicing these commands, and you'll enhance your Docker skills in no time! 🌟
53 |
--------------------------------------------------------------------------------
/_exercises/03-introduction_images/04-build_dockerfile_nginx.md:
--------------------------------------------------------------------------------
1 | # Implementing a Dockerfile for NGINX
2 |
3 | In this guide, we’ll learn how to create a Dockerfile to automate the setup of an NGINX server with the necessary configurations and dependencies.
4 |
5 | ## Overview
6 |
7 | Before diving into the step-by-step process, let's try to implement this on your own! Here’s a quick rundown of what you should aim to do:
8 |
9 | 1. Create a new directory and add a Dockerfile inside it.
10 | 2. Define the base image as NGINX version 1.27.0.
11 | 3. Install any required dependencies using the `APT-get` command.
12 | 4. Build the Docker image and tag it appropriately.
13 | 5. Run the Docker container and confirm the installation of your dependencies.
14 |
15 | Give it a shot, and once you think you’re done, you can check below for the step-by-step guide to see how closely it matches your implementation! 🚀
16 |
17 | ## Step-by-Step Guide
18 |
19 | 1. **Create a new folder**: Start by creating an empty folder dedicated to your Docker project.
20 | 2. **Write the Dockerfile**: Open a text editor (like VS Code) and create a new file named `Dockerfile`.
21 |
22 | 3. **Define the base image**:
23 |
24 | ```dockerfile
25 | FROM nginx:1.27.0
26 | ```
27 |
28 | 4. **Install dependencies**: In your Dockerfile, add the following lines to install VIM:
29 |
30 | ```dockerfile
31 | RUN apt-get update && apt-get install -y vim
32 | ```
33 |
34 | 5. **Build the Docker image**: Open your terminal, navigate to the folder containing your Dockerfile, and run:
35 |
36 | ```bash
37 | docker build -t web-server-image .
38 | ```
39 |
40 | 6. **Run the Docker container**: Execute the following command:
41 |
42 | ```bash
43 | docker run -d web-server-image
44 | ```
45 |
46 | 7. **Verify installation**: Check if the VIM editor is installed by running:
47 | ```bash
48 | docker exec -it vim
49 | ```
50 |
51 | ## Conclusion
52 |
53 | In this guide, we learned how to efficiently create a Dockerfile for NGINX, automating the installation of necessary dependencies and building a custom Docker image. Remember to practice these steps several times to gain confidence in your skills. The more you experiment with Docker, the more familiar you will become with its functionalities and best practices. Keep learning and exploring! 🌟
54 |
--------------------------------------------------------------------------------
/_exercises/03-introduction_images/01-introduction_docker_hub.md:
--------------------------------------------------------------------------------
1 | # Understanding Docker Hub and Container Images
2 |
3 | ## Overview
4 |
5 | In this exercise, we aim to explore Docker Hub in greater detail and understand how to efficiently work with container images. The goal is to familiarize yourself with the basics of Docker Hub, image tags, and the importance of version control in your projects. Before diving into the step-by-step guide, try to implement the solution yourself using the following steps:
6 |
7 | 1. Access Docker Hub at hub.docker.com.
8 | 2. Search for popular images, such as 'Ubuntu' or 'NGINX'.
9 | 3. Examine the details of the image, including available tags.
10 | 4. Understand the relationship between different tags and the same underlying images.
11 | 5. Learn how to pin image versions for stability.
12 |
13 | Take a moment to try out these steps on your own. Once you’re comfortable, check out the step-by-step guide below!
14 |
15 | ## Step-by-Step Guide
16 |
17 | 1. **Access Docker Hub**: Open your web browser and go to [hub.docker.com](https://hub.docker.com).
18 |
19 | 2. **Search for an Image**:
20 |
21 | - Type "Ubuntu" or "NGINX" into the search bar and hit enter.
22 | - Browse the results and select an image that interests you.
23 |
24 | 3. **View Image Details**:
25 |
26 | - After selecting an image, scroll through the details to understand its features.
27 | - Look for documentation and information on how to use the image effectively.
28 |
29 | 4. **Check Available Tags**:
30 |
31 | - Click on the "Tags" tab to view the list of available tags for that image.
32 | - Notice how different tags may refer to the same underlying image by comparing their hash values.
33 |
34 | 5. **Pin Image Versions**:
35 | - Take note of the recommended practice to pin the version of the images you use to avoid potential breaking changes when new versions are published.
36 | - Remember to check for updates regularly to ensure you have the latest security patches.
37 |
38 | ## Conclusion
39 |
40 | In today's lecture, we learned how to navigate Docker Hub and work with container images. Understanding the relationship between tags and underlying images is crucial, as is the practice of version pinning to maintain stability in our projects. Keep experimenting with Docker Hub and the various images available to deepen your understanding! 🚀
41 |
--------------------------------------------------------------------------------
/_exercises/01-running_containers/04-docker_help_command.md:
--------------------------------------------------------------------------------
1 | # Understanding Docker Help Command
2 |
3 | Welcome to this session on the Docker Help command! In this lecture, we'll explore the `--help` flag in the Docker CLI and learn how you can utilize it effectively. The `--help` option is often overlooked but can be incredibly useful for finding command syntax, options, and examples without having to search the internet. 🌐
4 |
5 | ## Overview
6 |
7 | Before diving into the details, let's take a moment to think through what you’ll be implementing in this exercise. Here’s a summarized list of steps to consider:
8 |
9 | 1. Open your terminal and type `docker --help` to view common Docker commands.
10 | 2. Experiment by appending `--help` to specific commands, like `docker run --help`, to dig deeper into available options.
11 | 3. Check the syntax and understand the order of flags and commands.
12 | 4. Familiarize yourself with the various flags and aliases to enhance your command-line skills.
13 | 5. Reflect on how this information could assist you in building and running your Docker images.
14 |
15 | Try to go through these steps on your own before looking at the guide below. It’s a great opportunity to learn by doing!
16 |
17 | ## Step-by-Step Guide
18 |
19 | Here’s a straightforward guide to using the `--help` command in Docker:
20 |
21 | 1. **Open Your Terminal:** Start your command line interface.
22 | 2. **Run the Command:** Type `docker --help` and hit Enter. This will show you a list of available commands and options.
23 | 3. **Explore Command Help:** For any specific command, like `docker run`, execute `docker run --help`. This provides you with details on how to use that command, including the proper syntax and options.
24 | 4. **Review the Options:** Scroll through the output to see which flags are available, their descriptions, and any aliases you can use.
25 | 5. **Practice Frequently:** Regularly use the `--help` option when navigating through Docker commands to reinforce your learning.
26 |
27 | ## Conclusion
28 |
29 | In this lecture, we’ve highlighted the importance of the `--help` flag in the Docker CLI. This feature not only provides crucial information about commands but also serves as a handy reference to understand flag syntax and options. As you continue your learning journey, don’t forget to utilize this powerful tool to enhance your Docker skills! 🚀 Keep practicing, and you'll be a pro in no time!
30 |
--------------------------------------------------------------------------------
/_exercises/03-introduction_images/03-managing_images_cli.md:
--------------------------------------------------------------------------------
1 | # Managing Docker Images from the Command Line
2 |
3 | ## Overview
4 |
5 | In this session, we're diving into the world of Docker image management through the command line interface (CLI)! By the end of this exercise, you should be able to tag, pull, remove, and push images in Docker seamlessly. Here’s a quick rundown of what you’ll want to tackle:
6 |
7 | 1. Understand image tagging and the significance of different image sizes.
8 | 2. Pull multiple tags of a Docker image.
9 | 3. Create a Dockerfile and build an image from it.
10 | 4. Tag your image appropriately and push it to Docker Hub.
11 |
12 | Before you scroll down to the step-by-step guide, why not give these tasks a try on your own? 😊 You might surprise yourself with what you can achieve!
13 |
14 | ## Step-by-Step Guide
15 |
16 | 1. **Explore Image Tags**: Start with a sample image, like "node", and familiarize yourself with tags like `lts-slim`, `lts-alpine`, and others. Understand their sizes and vulnerabilities.
17 |
18 | 2. **List Local Images**: Use the command `docker images` to see the images on your local machine.
19 |
20 | 3. **Remove Images**: Identify any images you no longer need and try removing them with `docker image rm `. If an image can't be deleted, force it with `docker image rm -f `.
21 |
22 | 4. **Pull All Tags**: Fetch multiple tags of an image using the command `docker pull --all-tags`, but keep it to lighter images like "hello-world" to avoid unnecessary downloads.
23 |
24 | 5. **Create a Dockerfile**: In your working directory, create a basic Dockerfile with an Ubuntu image and a simple command.
25 |
26 | 6. **Build Your Image**: Use `docker build -t : .` to build your image from the Dockerfile.
27 |
28 | 7. **Tag Your Image**: Tag your newly created image using `docker tag :/:`.
29 |
30 | 8. **Push to Docker Hub**: Finally, use `docker push /:` to push your tagged image to Docker Hub.
31 |
32 | ## Conclusion
33 |
34 | In this lecture, we covered crucial operations on managing Docker images, including tagging, pulling, removing, and pushing images through the command line. Mastering these skills forms a foundation for efficient Docker workflows. Keep practicing these commands, play around with different images, and explore Docker Hub! Your journey in containerization has only just begun. 🚀
35 |
--------------------------------------------------------------------------------
/_exercises/01-running_containers/01-running_your_first_container.md:
--------------------------------------------------------------------------------
1 | # Running Your First Docker Container
2 |
3 | ## Overview
4 |
5 | In this exercise, we will learn how to run our very first Docker container using the NGINX web server. By doing this, you'll not only get familiar with Docker commands but also see your container in action through your browser! 🎉
6 |
7 | Here’s a high-level summary of what we'll be doing:
8 |
9 | 1. Pull the NGINX image from Docker Hub.
10 | 2. Run the NGINX container and observe the logs.
11 | 3. Start the container in detached mode to free up your terminal.
12 | 4. Map the local port to the container’s port for HTTP requests.
13 | 5. Verify that the container is running by accessing it through a web browser.
14 |
15 | Take a moment to try implementing this on your own before moving to the step-by-step guide below!
16 |
17 | ## Step-by-Step Guide
18 |
19 | Follow these steps to successfully run your first Docker container:
20 |
21 | 1. **Pull the NGINX Image**: Open your terminal and execute the command:
22 |
23 | ```
24 | docker pull nginx
25 | ```
26 |
27 | 2. **Run the Container**: Start the NGINX container by running:
28 |
29 | ```
30 | docker run nginx
31 | ```
32 |
33 | (Note: For now, you'll see logs; proceed to the next step for a better experience.)
34 |
35 | 3. **Run in Detached Mode**: Stop the previous container using `CTRL+C` and run it again in detached mode:
36 |
37 | ```
38 | docker run -d nginx
39 | ```
40 |
41 | 4. **Map Local and Container Ports**: Execute the following command to map your local port 8080 to the container's port 80 and to add a name of `web-server` to the container:
42 |
43 | ```
44 | docker run -d -p 8080:80 --name web-server nginx
45 | ```
46 |
47 | 5. **Verify the Container**: Open your web browser and go to `http://localhost:8080`. You should see the NGINX welcome page! 🎈
48 |
49 | 6. **Check Running Containers**: In the terminal, run:
50 |
51 | ```
52 | docker ps
53 | ```
54 |
55 | to see your running container.
56 |
57 | 7. **Stop the Container**: Stop the running container gracefully by using:
58 | ```
59 | docker stop web-server
60 | ```
61 |
62 | ## Conclusion
63 |
64 | Congratulations on running your first Docker container! 🎊 You’ve learned how to pull an image, run a container, map ports, and verify the setup via a web browser. Remember to practice these commands as we will cover more advanced topics in the upcoming lectures. Keep exploring and happy Docking!
65 |
--------------------------------------------------------------------------------
/_exercises/10-docker_compose/08-docker_compose_cli.md:
--------------------------------------------------------------------------------
1 | # Managing Docker Compose Projects Efficiently
2 |
3 | ## Overview
4 |
5 | In this lecture, we focus on enhancing our understanding of the Docker Compose Command Line Interface (CLI) to manage our Docker Compose projects more effectively. We’ll explore various commands that can help you manage your services, containers, and logs. Before diving into the details, let's see if you can implement the solution on your own! Try to follow these steps:
6 |
7 | 1. Use the `docker-compose help` command to list available commands and options.
8 | 2. Experiment with the `docker-compose up` command in detached mode.
9 | 3. View the containers related to your Compose project using `docker-compose ps`.
10 | 4. Access the logs of a specific service with `docker-compose logs [service_name]`.
11 | 5. Learn to start, stop, and remove individual services and containers.
12 | 6. Discover how to manage volumes and orphan containers using the `docker-compose down` command.
13 |
14 | Give it a shot! Challenge yourself to implement these steps before looking at the guide! 💪
15 |
16 | ## Step-by-Step Guide
17 |
18 | 1. **Explore Available Commands**:
19 |
20 | - Open your terminal and navigate to your project folder.
21 | - Run `docker-compose help` to view available commands and options.
22 |
23 | 2. **Start Services in Detached Mode**:
24 |
25 | - Start your services using `docker-compose up -d` to run in detached mode.
26 |
27 | 3. **Check Active Containers**:
28 |
29 | - Use `docker-compose ps` to see the status of the containers associated with your project.
30 |
31 | 4. **View Logs for a Service**:
32 |
33 | - Run `docker-compose logs [service_name]` (replace `[service_name]` with your actual service name) to see the logs.
34 |
35 | 5. **Manage Individual Services**:
36 |
37 | - Stop a service with `docker-compose stop [service_name]`.
38 | - Start it again with `docker-compose start [service_name]`.
39 |
40 | 6. **Remove Containers and Volumes**:
41 | - Use `docker-compose down` to stop and remove containers.
42 | - If you also want to remove unused volumes, run `docker-compose down -v`.
43 |
44 | ## Conclusion
45 |
46 | Throughout this lecture, we delved into the various commands of the Docker Compose CLI that allow for effective management of your Docker services. With practice, you'll find it easier to navigate your projects and control your containers. Keep experimenting and don't hesitate to explore the Docker documentation for more detailed insights! Happy learning! 🚀
47 |
--------------------------------------------------------------------------------
/_exercises/07-volumes/02-bind_mounts.md:
--------------------------------------------------------------------------------
1 | # Understanding Docker Bind Mounts
2 |
3 | ## Overview
4 |
5 | In this lecture, we dive into the exciting world of Docker bind mounts. The goal is to enable automatic hot reloading of your local development changes within a Docker container, making your development experience much smoother and efficient. Before you jump into the step-by-step guide, here's a quick summary of what to implement:
6 |
7 | 1. Create a development Dockerfile that runs a development server instead of a production build.
8 | 2. Change the command in the Dockerfile to use CMD to run the development server.
9 | 3. Build the Docker image from this Dockerfile.
10 | 4. Use the `docker run` command with bind mounts for your local source and public directories.
11 | 5. Confirm that your application supports hot reloading by modifying files and observing live updates in the browser.
12 |
13 | Give it a shot! Try implementing these steps on your own before checking the detailed guide below. 🚀
14 |
15 | ## Step-by-Step Guide
16 |
17 | 1. **Create a Development Dockerfile:**
18 |
19 | - Copy your existing Dockerfile and name it `Dockerfile.dev`.
20 | - Replace any build commands with the command to run your development server (e.g., `CMD ["npm", "start"]`).
21 |
22 | 2. **Build the Docker Image:**
23 |
24 | - Open your terminal and run:
25 | ```bash
26 | docker build -t react-app:dev -f Dockerfile.dev .
27 | ```
28 |
29 | 3. **Run the Docker Container:**
30 |
31 | - With the terminal ready, execute the `docker run` command, adding mounts for your local source and public folders:
32 | ```bash
33 | docker run -d -p 3000:3000 -v $(pwd)/public:/app/public -v $(pwd)/src:/app/src react-app:dev
34 | ```
35 |
36 | 4. **Verify the Container is Running:**
37 |
38 | - Use the below command to check the currently running containers:
39 | ```bash
40 | docker ps
41 | ```
42 | - Check the logs to ensure everything is running smoothly.
43 |
44 | 5. **Test Hot Reloading:**
45 | - Open your browser and navigate to `http://localhost:3000`.
46 | - Modify a file in your local `src` directory and refresh the browser to see the changes applied instantly.
47 |
48 | ## Conclusion
49 |
50 | Today, we learned how to set up Docker bind mounts to facilitate hot reloading in our React applications. This not only streamlines our development process by automatically applying changes but also maintains the advantages of containerized environments. Remember, practice is key—continue experimenting with Docker in your projects!
51 |
--------------------------------------------------------------------------------
/containerize-react-app/src/logo.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/_exercises/05-images_deep_dive/01-docker_layered_system.md:
--------------------------------------------------------------------------------
1 | # Understanding the Layered Architecture of Docker
2 |
3 | ## Overview
4 |
5 | In this exercise, the focus is on exploring Docker's layered architecture using a Dockerfile from a containerized Express app project. Before diving into the step-by-step guide, give yourself a chance to implement the solution on your own. Here’s a brief outline of what you’ll be doing:
6 |
7 | 1. Retrieve the Dockerfile from the Express app project (or download the zipped resources).
8 | 2. Execute the Docker build command to create the image.
9 | 3. Use the Docker history command to visualize the layers of your image.
10 | 4. Investigate the Dockerfile of the base image (Node 22) to understand the contributing factors to image size.
11 | 5. Reflect on ways to optimize your image size based on the findings.
12 |
13 | Now, it’s your turn! Try to implement the solution yourself before checking out the detailed guide below. 🚀
14 |
15 | ## Step-by-Step Guide
16 |
17 | 1. **Download the Resources**: If you didn't complete the project, download the zipped folder from the lecture resources and extract it.
18 | 2. **Open Terminal**: Navigate to the folder where you have extracted the files.
19 | 3. **Build the Docker Image**: Run the following command to build your Docker image:
20 | ```bash
21 | docker build -t express-app .
22 | ```
23 | This will create an image tagged as `express-app` from the files in the current directory.
24 | 4. **Check Your Docker Images**: Use the command below to view your images:
25 | ```bash
26 | docker images
27 | ```
28 | 5. **View Image Layers**: To examine the layers of your image, run:
29 | ```bash
30 | docker history express-app
31 | ```
32 | 6. **Explore the Node 22 Dockerfile**:
33 | - Search for the Node 22 Dockerfile online or on Docker Hub.
34 | - Review the commands to see how they relate to the layers in your Express app image.
35 | 7. **Analyze Image Size**: Pay attention to which layers are contributing significantly to the final image size and consider potential optimizations.
36 |
37 | ## Conclusion
38 |
39 | In this lecture, we uncovered the fascinating concept of Docker's layered architecture. We learned that each command in a Dockerfile creates a distinct layer in the final image and how this architecture can lead to improved caching and faster build times. Understanding these layers not only helps in efficient image management but also provides insights into potential optimizations. Keep experimenting with Docker and don’t hesitate to dive deeper into these concepts as you continue your learning journey! 💻
40 |
--------------------------------------------------------------------------------
/notes-rest-api/notebooks-backend/src/routes.js:
--------------------------------------------------------------------------------
1 | const express = require('express');
2 | const mongoose = require('mongoose');
3 | const { Notebook } = require('./models');
4 |
5 | const notebookRouter = express.Router();
6 |
7 | const validateId = (req, res, next) => {
8 | const { id } = req.params;
9 |
10 | if (!mongoose.Types.ObjectId.isValid(id)) {
11 | return res.status(404).json({ error: 'Notebook not found.' });
12 | }
13 |
14 | next();
15 | };
16 |
17 | notebookRouter.post('/', async (req, res) => {
18 | try {
19 | const { name, description } = req.body;
20 |
21 | if (!name) {
22 | return res.status(400).json({ error: "'name' field is required." });
23 | }
24 |
25 | const notebook = new Notebook({ name, description });
26 | await notebook.save();
27 | res.status(201).json({ data: notebook });
28 | } catch (err) {
29 | res.status(500).json({ error: err.message });
30 | }
31 | });
32 |
33 | notebookRouter.get('/', async (req, res) => {
34 | try {
35 | const notebooks = await Notebook.find();
36 | return res.status(200).json({ data: notebooks });
37 | } catch (err) {
38 | res.status(500).json({ error: err.message });
39 | }
40 | });
41 |
42 | notebookRouter.get('/:id', validateId, async (req, res) => {
43 | try {
44 | const notebook = await Notebook.findById(req.params.id);
45 |
46 | if (!notebook) {
47 | return res.status(404).json({ error: 'Notebook not found.' });
48 | }
49 |
50 | return res.status(200).json({ data: notebook });
51 | } catch (err) {
52 | res.status(500).json({ error: err.message });
53 | }
54 | });
55 |
56 | notebookRouter.put('/:id', validateId, async (req, res) => {
57 | try {
58 | const { name, description } = req.body;
59 |
60 | const notebook = await Notebook.findByIdAndUpdate(
61 | req.params.id,
62 | { name, description },
63 | { new: true }
64 | );
65 |
66 | if (!notebook) {
67 | return res.status(404).json({ error: 'Notebook not found.' });
68 | }
69 |
70 | return res.json({ data: notebook });
71 | } catch (err) {
72 | res.status(500).json({ error: err.message });
73 | }
74 | });
75 |
76 | notebookRouter.delete('/:id', validateId, async (req, res) => {
77 | try {
78 | const notebook = await Notebook.findByIdAndDelete(req.params.id);
79 |
80 | if (!notebook) {
81 | return res.status(404).json({ error: 'Notebook not found.' });
82 | }
83 |
84 | return res.sendStatus(204);
85 | } catch (err) {
86 | res.status(500).json({ error: err.message });
87 | }
88 | });
89 |
90 | module.exports = {
91 | notebookRouter,
92 | };
93 |
--------------------------------------------------------------------------------
/_exercises/10-docker_compose/02-handling_environment_variables.md:
--------------------------------------------------------------------------------
1 | # Setting Up Environment Variables with Docker Compose
2 |
3 | ## Overview
4 |
5 | In this exercise, we’ll focus on efficiently setting up environment variables for our database service in Docker Compose. It’s a great opportunity to practice separating sensitive data from your codebase, which is essential for maintaining security in your applications. Here’s a quick checklist of what we will implement:
6 |
7 | 1. Define environment variables using the `environment` key in the Compose file.
8 | 2. Create a `.env` file to store sensitive information securely.
9 | 3. Load multiple `.env` files to manage different sets of credentials.
10 | 4. Ensure that the `.env` files are properly ignored from the repository.
11 |
12 | Before diving into the step-by-step guide, I encourage you to try implementing these steps on your own! Give it a shot, and then check back for the full instructions.
13 |
14 | ## Step-by-Step Guide
15 |
16 | Here’s how you can set up your environment variables in Docker Compose:
17 |
18 | 1. **Define Environment Variables Directly**: Open your Docker Compose file and under your service, use the `environment` key to define your variables. For example:
19 |
20 | ```yaml
21 | environment:
22 | - MONGODB_INITDB_ROOT_USERNAME=root
23 | - MONGODB_INITDB_ROOT_PASSWORD=root_password
24 | ```
25 |
26 | 2. **Create a `.env` File**: To avoid exposing sensitive credentials, create a file named `.env.db` and add your environment variables there:
27 |
28 | ```plaintext
29 | MONGODB_INITDB_ROOT_USERNAME=root
30 | MONGODB_INITDB_ROOT_PASSWORD=root_password
31 | ```
32 |
33 | 3. **Load the `.env` File with Docker Compose**: In your Docker Compose configuration, specify the `.env` file:
34 |
35 | ```yaml
36 | env_file:
37 | - .env.db
38 | ```
39 |
40 | 4. **Organize Credentials**: If managing multiple databases, consider creating separate `.env` files for each:
41 |
42 | - `.env.db-root-creds` for root credentials.
43 | - `.env.db-key-value` for key-value store credentials.
44 |
45 | 5. **Verify Ignored Files**: Ensure that your `.env` files are included in your `.gitignore`, so they’re not tracked by version control.
46 |
47 | ## Conclusion
48 |
49 | Today, we explored how to set environment variables in Docker Compose while keeping sensitive information secure. By utilizing `.env` files, we enhance our application's security and maintain cleaner code. Make sure to practice this setup regularly, as it’s a crucial skill for working with Docker and database management! Keep up the great work, and let’s continue our learning journey! 🚀
50 |
--------------------------------------------------------------------------------
/_exercises/07-volumes/01-volumes_motivation.md:
--------------------------------------------------------------------------------
1 | # Understanding Data Persistence with Docker Volumes
2 |
3 | ## Overview
4 |
5 | In today’s exercise, we’re going to explore data persistence in Docker using volumes. This is an important concept because it allows your data to exist beyond the life cycle of a container. Before diving into the step-by-step guide, give yourself a chance to implement the solution on your own. Here’s a quick summary of what we’ll cover:
6 |
7 | 1. Run a simple NGINX container in detached mode with a specific name.
8 | 2. Connect to the running container and create a file in a temporary directory.
9 | 3. Stop and restart the container to check if the file persists.
10 | 4. Understand the limitations of container storage without using volumes.
11 | 5. Introduce Docker volumes for better data management.
12 |
13 | Go ahead and try to implement these steps before checking the guide below! 🚀
14 |
15 | ## Step-by-Step Guide
16 |
17 | 1. **Run the NGINX Container**:
18 | - Execute the command to run an NGINX container in detached mode:
19 | ```bash
20 | docker run -d --name web-server nginx:1.27.0
21 | ```
22 | 2. **Access the Container**:
23 | - Use the Docker exec command to enter the running container:
24 | ```bash
25 | docker exec -it web-server /bin/sh
26 | ```
27 | 3. **Create a File**:
28 | - Navigate to the `/tmp` directory and create a file:
29 | ```bash
30 | cd /tmp
31 | echo "Hello World" > hello.txt
32 | ```
33 | 4. **Exit and Stop the Container**:
34 | - Exit from the container shell and then stop the container:
35 | ```bash
36 | exit
37 | docker stop web-server
38 | ```
39 | 5. **Start the Container Again**:
40 | - Restart the container and check if your file is still there:
41 | ```bash
42 | docker start web-server
43 | docker exec -it web-server /bin/sh
44 | cat /tmp/hello.txt
45 | ```
46 | 6. **Remove the Container and Check Again**:
47 | - Remove the container and run a new one to see if the data persists without using volumes:
48 | ```bash
49 | docker rm web-server
50 | docker run -d --name web-server nginx:1.27.0
51 | docker exec -it web-server /bin/sh
52 | ls /tmp
53 | ```
54 |
55 | ## Conclusion
56 |
57 | In this session, we’ve learned about the basic concept of data persistence in Docker, especially focusing on the limitations of container storage. By trying out the steps for creating, stopping, and restarting containers, you should have a clearer understanding of when and why to use Docker volumes for persistent data storage. Keep experimenting, and remember that mastering these concepts takes practice! 🌟
58 |
--------------------------------------------------------------------------------
/_exercises/05-images_deep_dive/15-optimizing_typescript_dockerfile.md:
--------------------------------------------------------------------------------
1 | # Optimizing TypeScript Dockerfile
2 |
3 | Welcome! In this guide, we’re going to dive into an exciting exercise focused on enhancing the Docker file we created during the multi-stage builds section of the course. This will not only help you solidify your understanding of Docker but also teach you how to optimize your Dockerfile for better efficiency and performance. 💪
4 |
5 | ## Overview
6 |
7 | Before we jump into the step-by-step guide, let’s take a moment to think about how you might approach this exercise. Here’s a high-level summary of what we want to implement:
8 |
9 | 1. **Review the existing Dockerfile** structure and identify areas for improvement.
10 | 2. **Separate the build and production dependencies** by creating a distinct dependencies stage in your Dockerfile.
11 | 3. **Implement multi-stage builds**, ensuring we're only copying necessary files from each stage.
12 | 4. **Test the optimized image** to verify that it's working as expected.
13 |
14 | Try to implement the solution based on these steps before checking out the detailed guide below. You’ll learn a lot by attempting it yourself!
15 |
16 | ## Step-by-Step Guide
17 |
18 | Here’s how to optimize your TypeScript Dockerfile effectively:
19 |
20 | 1. **Create a new dependencies stage** by adding a new stage using the `FROM node:22-alpine` image.
21 | 2. **Set the work directory** in this dependencies stage to keep your files organized.
22 | 3. **Copy the `package.json` and `package-lock.json`** files into the dependencies stage to install only the production dependencies.
23 | - Run the command: `npm ci --only=production` to install only what's needed for runtime.
24 | 4. **Modify the existing build stage** to reference the node modules from the newly created dependencies stage instead of copying from itself.
25 | 5. **Ensure that your build stage still handles copying the source code** and executing the build command.
26 | 6. **Run your Docker build command**, tagging the image as something appropriate like `express-ts`.
27 | 7. **Test your container** by running it and mapping the necessary ports to ensure your application responds as expected.
28 |
29 | By following these steps, you'll create a more efficient Dockerfile that leverages Docker's multi-stage build capabilities.
30 |
31 | ## Conclusion
32 |
33 | Congratulations on taking this important step in optimizing your Dockerfile! 🎉 By implementing a multi-stage build and correctly handling dependencies, you are now creating images that are not only more efficient but also more secure. Keep practicing these techniques, and don't hesitate to explore more advanced concepts as you continue to learn about Docker!
34 |
--------------------------------------------------------------------------------
/_exercises/01-running_containers/02-docker_cli_part1.md:
--------------------------------------------------------------------------------
1 | # Mastering Docker CLI Commands - Part 1
2 |
3 | ## Overview
4 |
5 | In this session, we're diving deep into Docker command-line interface (CLI) commands, focusing on how to manage containers and images effectively. Before jumping into the steps, why not give it a shot yourself? Here’s a brief outline of what you’ll aim to implement:
6 |
7 | 1. Check existing images with `docker images`.
8 | 2. Pull the latest Ubuntu image using `docker pull ubuntu`.
9 | 3. Run an Ubuntu container and observe its behavior.
10 | 4. Start a new Nginx container in detached mode.
11 | 5. Manage existing containers by starting, stopping, and removing them.
12 | 6. Use filters to find specific containers and perform cleanup operations.
13 |
14 | Try implementing these steps on your own, and once you're ready, check out the detailed guide below! 🚀
15 |
16 | ## Step-by-Step Guide
17 |
18 | 1. **Check Existing Images**: Run `docker images` to see which images you have stored locally.
19 | 2. **Pull Ubuntu Image**: Execute `docker pull ubuntu` to download the latest Ubuntu image from Docker Hub.
20 |
21 | 3. **Run an Ubuntu Container**: Type `docker run ubuntu` to start the container. Notice that it will exit immediately since there's no command to keep it alive.
22 |
23 | 4. **Run Nginx Container**: Use `docker run -d --name web_server nginx` to start a new Nginx container in detached mode. This will allow it to run continuously.
24 |
25 | 5. **Check Running Containers**: Run `docker ps` to see active containers and `docker ps -a` to see all containers, both running and exited.
26 |
27 | 6. **Start a Stopped Container**: To restart an existing but stopped container, use `docker start ` where `` is the ID of the stopped container.
28 |
29 | 7. **Stop a Running Container**: Execute `docker stop ` to gracefully stop an active container.
30 |
31 | 8. **Remove Containers**: Clean up by running `docker rm ` for any containers you no longer need.
32 |
33 | 9. **Filtering Containers**: Utilize filters with `docker ps --filter "name=web_server"` to find specific containers by name.
34 |
35 | 10. **Cleanup**: To stop all running containers at once, combine commands like this: `docker stop $(docker ps -q)`. For removing all stopped containers, use `docker rm $(docker ps -a -q)`.
36 |
37 | ## Conclusion
38 |
39 | We’ve covered a lot about managing Docker containers and images using the CLI. Remember, practice makes perfect! By working with these commands, you’ll build a solid foundation for using Docker effectively. Keep experimenting and learning more about the Docker ecosystem, and don’t hesitate to reach out if you have any questions. Happy Dockering! 🐳
40 |
--------------------------------------------------------------------------------
/compose/backend/src/routes/store.js:
--------------------------------------------------------------------------------
1 | const express = require('express');
2 | const { KeyValue } = require('../models/keyValue');
3 |
4 | const keyValueRouter = express.Router();
5 |
6 | keyValueRouter.post('/', async (req, res) => {
7 | const { key, value } = req.body;
8 |
9 | if (!key || !value) {
10 | return res
11 | .status(400)
12 | .json({ error: 'Both "key" and "value" are required' });
13 | }
14 | try {
15 | const existingKey = await KeyValue.findOne({ key });
16 |
17 | if (existingKey) {
18 | return res.status(400).json({ error: 'Key already exists' });
19 | }
20 |
21 | const keyValue = new KeyValue({ key, value });
22 | await keyValue.save();
23 |
24 | return res
25 | .status(201)
26 | .json({ message: 'Key-Value pair stored successfully' });
27 | } catch (err) {
28 | res.status(500).json({ error: 'Internal server error' });
29 | }
30 | });
31 |
32 | keyValueRouter.get('/:key', async (req, res) => {
33 | const { key } = req.params;
34 |
35 | try {
36 | const keyValue = await KeyValue.findOne({ key });
37 |
38 | if (!keyValue) {
39 | return res.status(404).json({ error: 'Key not found' });
40 | }
41 |
42 | return res.status(200).json({ key, value: keyValue.value });
43 | } catch (err) {
44 | res.status(500).json({ message: 'Internal server error' });
45 | }
46 | });
47 |
48 | keyValueRouter.put('/:key', async (req, res) => {
49 | const { key } = req.params;
50 | const { value } = req.body;
51 |
52 | if (!value) {
53 | return res.status(400).json({ error: '"value" is required' });
54 | }
55 |
56 | try {
57 | const keyValue = await KeyValue.findOneAndUpdate(
58 | { key },
59 | { value },
60 | { new: true }
61 | );
62 |
63 | if (!keyValue) {
64 | return res.status(404).json({ error: 'Key not found' });
65 | }
66 |
67 | return res.status(200).json({
68 | message: 'Key-value pair updated successfully',
69 | key: keyValue.key,
70 | value: keyValue.value,
71 | });
72 | } catch (err) {
73 | res.status(500).json({ message: 'Internal server error' });
74 | }
75 | });
76 |
77 | keyValueRouter.delete('/:key', async (req, res) => {
78 | const { key } = req.params;
79 |
80 | try {
81 | const keyValue = await KeyValue.findOneAndDelete({ key });
82 |
83 | if (!keyValue) {
84 | return res.status(404).json({ error: 'Key not found' });
85 | }
86 |
87 | return res
88 | .status(200)
89 | .json({ message: 'Key-value pair deleted successfully' });
90 | } catch (err) {
91 | res.status(500).json({ message: 'Internal server error' });
92 | }
93 | });
94 |
95 | module.exports = {
96 | keyValueRouter,
97 | };
98 |
--------------------------------------------------------------------------------
/key-value-app/backend/src/routes/store.js:
--------------------------------------------------------------------------------
1 | const express = require('express');
2 | const { KeyValue } = require('../models/keyValue');
3 |
4 | const keyValueRouter = express.Router();
5 |
6 | keyValueRouter.post('/', async (req, res) => {
7 | const { key, value } = req.body;
8 |
9 | if (!key || !value) {
10 | return res
11 | .status(400)
12 | .json({ error: 'Both "key" and "value" are required' });
13 | }
14 | try {
15 | const existingKey = await KeyValue.findOne({ key });
16 |
17 | if (existingKey) {
18 | return res.status(400).json({ error: 'Key already exists' });
19 | }
20 |
21 | const keyValue = new KeyValue({ key, value });
22 | await keyValue.save();
23 |
24 | return res
25 | .status(201)
26 | .json({ message: 'Key-Value pair stored successfully' });
27 | } catch (err) {
28 | res.status(500).json({ error: 'Internal server error' });
29 | }
30 | });
31 |
32 | keyValueRouter.get('/:key', async (req, res) => {
33 | const { key } = req.params;
34 |
35 | try {
36 | const keyValue = await KeyValue.findOne({ key });
37 |
38 | if (!keyValue) {
39 | return res.status(404).json({ error: 'Key not found' });
40 | }
41 |
42 | return res.status(200).json({ key, value: keyValue.value });
43 | } catch (err) {
44 | res.status(500).json({ message: 'Internal server error' });
45 | }
46 | });
47 |
48 | keyValueRouter.put('/:key', async (req, res) => {
49 | const { key } = req.params;
50 | const { value } = req.body;
51 |
52 | if (!value) {
53 | return res.status(400).json({ error: '"value" is required' });
54 | }
55 |
56 | try {
57 | const keyValue = await KeyValue.findOneAndUpdate(
58 | { key },
59 | { value },
60 | { new: true }
61 | );
62 |
63 | if (!keyValue) {
64 | return res.status(404).json({ error: 'Key not found' });
65 | }
66 |
67 | return res.status(200).json({
68 | message: 'Key-value pair updated successfully',
69 | key: keyValue.key,
70 | value: keyValue.value,
71 | });
72 | } catch (err) {
73 | res.status(500).json({ message: 'Internal server error' });
74 | }
75 | });
76 |
77 | keyValueRouter.delete('/:key', async (req, res) => {
78 | const { key } = req.params;
79 |
80 | try {
81 | const keyValue = await KeyValue.findOneAndDelete({ key });
82 |
83 | if (!keyValue) {
84 | return res.status(404).json({ error: 'Key not found' });
85 | }
86 |
87 | return res
88 | .status(200)
89 | .json({ message: 'Key-value pair deleted successfully' });
90 | } catch (err) {
91 | res.status(500).json({ message: 'Internal server error' });
92 | }
93 | });
94 |
95 | module.exports = {
96 | keyValueRouter,
97 | };
98 |
--------------------------------------------------------------------------------
/_exercises/03-introduction_images/02-dockerhub_login_cli.md:
--------------------------------------------------------------------------------
1 | # Docker Fundamentals: Docker Hub Login & CLI Usage
2 |
3 | ## Overview
4 |
5 | In this exercise, we're diving into how to create a Docker Hub account, log in using the CLI, and manage Docker images. Before we proceed with the step-by-step guide, I encourage you to try implementing the solution on your own! Here’s a quick summary of the steps:
6 |
7 | 1. Create a Docker Hub account using either email or via GitHub/Google.
8 | 2. Log in to the Docker CLI with your username and use a personal access token instead of a password.
9 | 3. Use `docker search` to find an image like Ubuntu.
10 | 4. Download images using `docker pull`, specifying the version tag if desired.
11 | 5. Check your downloaded images using `docker images`.
12 |
13 | Now, go ahead and give it a shot! Remember, practice makes perfect! 😄
14 |
15 | ## Step-by-Step Guide
16 |
17 | 1. **Creating a Docker Hub Account**:
18 |
19 | - Go to Docker Hub and click on "Sign Up."
20 | - Enter your email, username, and password, or sign up using GitHub or Google.
21 | - Complete the sign-up process and verify your account if necessary.
22 |
23 | 2. **Logging in via the CLI**:
24 |
25 | - Open your terminal.
26 | - Run the command `docker login`.
27 | - Enter your username.
28 | - Instead of your password, use a personal access token:
29 | - Go to Docker Hub, navigate to your profile, and find the "Security" section.
30 | - Create a new access token, name it (like "Docker-CLI"), and set the permissions.
31 | - Copy the generated token, return to your terminal, paste it, and press enter.
32 | - If successful, you'll see "login succeeded."
33 |
34 | 3. **Searching for Images**:
35 |
36 | - Ensure Docker is running by using the command `docker info`.
37 | - Use `docker search ubuntu ` to see available Ubuntu images.
38 |
39 | 4. **Pulling Images**:
40 |
41 | - To download the latest Ubuntu image, run `docker pull ubuntu`.
42 | - If you want a specific version, use `docker pull ubuntu:24.04`, substituting "24.04" for any version you prefer.
43 | - Check if the images were downloaded successfully using `docker images`.
44 |
45 | 5. **Verifying and Managing Images**:
46 | - Run `docker images` to see a list of your downloaded images, including their IDs and sizes.
47 | - Notice how different tags may reference the same image.
48 |
49 | ## Conclusion
50 |
51 | Today, we explored how to set up your Docker Hub account, log in via the CLI, and work with Docker images. This foundational knowledge will help you navigate through Docker and manage containers more efficiently. Keep experimenting with different commands, and don’t forget to check back for more advanced topics in our next lecture. Happy Dockering! 🚀
52 |
--------------------------------------------------------------------------------
/_exercises/05-images_deep_dive/09-multistage_distroless.md:
--------------------------------------------------------------------------------
1 | # Multi-Stage Builds with Distroless Images
2 |
3 | Welcome to another exciting session! In this lecture, we dive into the world of multi-stage builds using distroless images with Node.js. 🎉 If you're ready to implement a clean and efficient Docker setup, you've come to the right place!
4 |
5 | ## Overview
6 |
7 | In this exercise, you're going to learn how to use multi-stage builds effectively. We'll focus on installing dependencies in one stage and running your application in a distroless image in the next. Before you dive into the step-by-step guide, here’s a quick summary to get you started:
8 |
9 | 1. Define the first stage and name it "build" to install dependencies using a Node.js base image.
10 | 2. Set up the work directory and copy your `package.json` files into it.
11 | 3. Run `npm install` to install your application’s dependencies.
12 | 4. Create a second stage using a distroless image.
13 | 5. Copy the installed `node_modules` folder from the first stage to the second.
14 | 6. Copy your application source code to the second stage.
15 | 7. Specify the command to run your application.
16 |
17 | Now, why not give it a shot on your own before peeking at the step-by-step guide? Challenge yourself! 💪
18 |
19 | ## Step-by-Step Guide
20 |
21 | Here’s how to implement the solution we discussed:
22 |
23 | 1. Start with a Dockerfile that specifies the first stage using Node.js (e.g., `FROM node:22-alpine AS build`).
24 | 2. Set your working directory to `/app` (use `WORKDIR /app`).
25 | 3. Copy your `package.json` and `package-lock.json` files into the work directory (`COPY package*.json ./`).
26 | 4. Run `npm install` to install your dependencies (`RUN npm ci`).
27 | 5. Add another stage by specifying the distroless base image (e.g., `FROM gcr.io/distroless/nodejs`).
28 | 6. Set the work directory again (`WORKDIR /app`).
29 | 7. Use the `COPY --from=build /app/node_modules ./node_modules` command to copy the `node_modules` folder from the first stage.
30 | 8. Copy your application's source code to this stage (`COPY . .`).
31 | 9. Specify the command to run your application, e.g., `CMD ["your-app-file.js"]`.
32 |
33 | With these steps, you will create an efficient Docker image that utilizes multi-stage builds nicely!
34 |
35 | ## Conclusion
36 |
37 | Great work on exploring multi-stage builds and distroless images! We've learned how to separate the installation of dependencies from the running of our application, creating a more efficient and secure environment. Keep practicing these concepts, as they will serve you well in your Docker journey. Remember, the key takeaway is understanding how to leverage multiple stages to streamline your Docker builds. Keep up the great work, and let's keep that curiosity alive! 🚀
38 |
--------------------------------------------------------------------------------
/_exercises/08-advanced_topics/05-user_defined_networks.md:
--------------------------------------------------------------------------------
1 | # User-Defined Networks in Docker
2 |
3 | Welcome! In this guide, we'll explore how to create and manage user-defined networks in Docker, which will allow our containers to communicate with one another seamlessly. 🤝
4 |
5 | ## Overview
6 |
7 | Before diving into the details, let’s set the stage. Your goal is to implement user-defined networks that enable containers to connect and interact more effectively. Here’s a high-level plan for you to try on your own before checking the step-by-step guide:
8 |
9 | 1. **Use the Docker network command with the help flag** to see available options.
10 | 2. **Create your user-defined network** using `docker network create`.
11 | 3. **Connect a container to the new network** using the `docker network connect` command.
12 | 4. **Inspect the connections** to verify that the container is indeed linked to the network.
13 | 5. **Test the communication** between the connected containers.
14 | 6. **Clean up** by removing any containers and networks used during the exercise.
15 |
16 | Now, give it a shot! Implement the solution above and see how it goes before looking at the detailed instructions below.
17 |
18 | ## Step-by-Step Guide
19 |
20 | 1. **Open your terminal.**
21 | 2. **List Docker networks:** Run `docker network ls`.
22 | 3. **Create a new network:**
23 | ```bash
24 | docker network create app-net
25 | ```
26 | 4. **Spin up a web server container:**
27 | ```bash
28 | docker run -d --name web-server nginx
29 | ```
30 | 5. **Connect the web server to your new network:**
31 | ```bash
32 | docker network connect app-net web-server
33 | ```
34 | 6. **Inspect the network to confirm connection:**
35 | ```bash
36 | docker network inspect app-net
37 | ```
38 | 7. **Start another container and connect it to the network:**
39 | ```bash
40 | docker run -it --network app-net alpine sh
41 | ```
42 | 8. **Install curl in the Alpine container:**
43 | ```bash
44 | apk add curl
45 | ```
46 | 9. **Test communication with the web server:**
47 | ```bash
48 | curl web-server
49 | ```
50 | 10. **Exit the interactive container.**
51 | 11. **Remove all containers:**
52 | ```bash
53 | docker rm $(docker ps -aq)
54 | ```
55 | 12. **Remove the created network:**
56 | ```bash
57 | docker network rm app-net
58 | ```
59 |
60 | ## Conclusion
61 |
62 | In this session, we learned how to create user-defined networks in Docker, which not only allows for container communication but also provides isolation between networks. Remember, utilizing networks effectively is crucial for building scalable and organized applications. Keep practicing these skills, and don’t hesitate to experiment with more complex network setups! You’ve got this! 🚀
63 |
--------------------------------------------------------------------------------
/_exercises/08-advanced_topics/04-default_bridge_network.md:
--------------------------------------------------------------------------------
1 | # Networking in Docker: Understanding the Default Bridge Network
2 |
3 | ## Overview
4 |
5 | Welcome back! In this session, we're diving into an essential aspect of Docker: networking. Our focus will be on the default bridge network, how it works, and some of its limitations. Before we jump into the detailed steps, let’s take a moment to think through what you can try to implement on your own.
6 |
7 | Here’s a summary of the main steps:
8 |
9 | 1. Run a container using the default bridge network.
10 | 2. Inspect the bridge network to check the connected container details.
11 | 3. Attempt to access the container using its name and then by its IP address.
12 | 4. Understand and note the limitations of using the default bridge network.
13 |
14 | Give these steps a try before comparing your results with the guidelines below! 🐳
15 |
16 | ## Step-by-Step Guide
17 |
18 | 1. **Run a Container**: Start by running a container (like Nginx) in detached mode and giving it a name, such as `web_server`.
19 |
20 | ```bash
21 | docker run -d --name web_server nginx:1.27.0
22 | ```
23 |
24 | 2. **Inspect the Bridge Network**: Execute the command to inspect the bridge network.
25 |
26 | ```bash
27 | docker network inspect bridge
28 | ```
29 |
30 | 3. **Get Container IP**: Look for the container's IP address in the inspection output.
31 |
32 | 4. **Run another Container**: Start a new container (like Ubuntu) and verify it runs.
33 |
34 | ```bash
35 | docker run -it ubuntu:24.04 /bin/bash
36 | ```
37 |
38 | 5. **Install curl**: Within the Ubuntu container, update the package list and install curl.
39 |
40 | ```bash
41 | apt update && apt install curl -y
42 | ```
43 |
44 | 6. **Test Connectivity**: Try to reach the Nginx container first by its IP address using curl.
45 |
46 | ```bash
47 | curl
48 | ```
49 |
50 | 7. **Verify Name Resolution**: Now, try to access the Nginx container using its name:
51 |
52 | ```bash
53 | curl web_server
54 | ```
55 |
56 | You'll notice that this won't work due to DNS resolution limitations in the default bridge network.
57 |
58 | 8. **Cleanup**: Exit the Ubuntu container and remove both it and the Nginx container to clean up your environment.
59 | ```bash
60 | docker rm -f
61 | ```
62 |
63 | ## Conclusion
64 |
65 | In this session, we explored Docker's default bridge network. We learned how to connect containers and the importance of IP addresses, as relying on them can lead to difficulties if the containers are recreated. It’s crucial to understand these networking concepts as they form the foundation for more advanced Docker networking practices. Keep practicing and continue to deepen your understanding! 🚀
66 |
--------------------------------------------------------------------------------
/_exercises/01-running_containers/03-docker_cli_part2.md:
--------------------------------------------------------------------------------
1 | # Docker CLI: Working with Containers and Images
2 |
3 | Welcome to the next part of our Docker journey! In this session, we will dive deeper into some practical commands you'll use frequently when working with Docker containers and images.
4 |
5 | ## Overview
6 |
7 | In this exercise, the main objective is to understand how to interact with Docker containers, run commands inside them, and build simple images. By the end, you should be able to:
8 |
9 | 1. Run an NGINX container in detached mode.
10 | 2. View the logs of the running container.
11 | 3. Execute commands inside the container to interact with it.
12 | 4. Create a simple Dockerfile and build a custom image.
13 |
14 | Before peeking at the step-by-step guide, I encourage you to give this a try on your own! 🚀
15 |
16 | ## Step-by-Step Guide
17 |
18 | Here’s how to accomplish the tasks:
19 |
20 | 1. **Run an NGINX Container**: Use the `docker run` command with the appropriate flags to start the NGINX container in detached mode, mapping the ports correctly.
21 |
22 | ```bash
23 | docker run -d -p 80:80 --name web_server nginx
24 | ```
25 |
26 | 2. **Check Running Containers**: Verify that your container is running with:
27 |
28 | ```bash
29 | docker ps
30 | ```
31 |
32 | 3. **View Logs**: To check the logs of your running NGINX container, use:
33 |
34 | ```bash
35 | docker logs web_server
36 | ```
37 |
38 | 4. **Follow Logs Live**: To follow the logs as they occur, add the `-f` flag:
39 |
40 | ```bash
41 | docker logs -f web_server
42 | ```
43 |
44 | 5. **Execute Shell Inside Container**: If you need to run commands inside the NGINX container, use:
45 |
46 | ```bash
47 | docker exec -it web_server sh
48 | ```
49 |
50 | 6. **Create a Dockerfile**: Create a new file named `Dockerfile` and add the following:
51 |
52 | ```dockerfile
53 | FROM ubuntu:latest
54 | CMD ["echo", "Hello from my first Docker image!"]
55 | ```
56 |
57 | 7. **Build the Custom Image**: Run the build command in the directory where your Dockerfile is located:
58 |
59 | ```bash
60 | docker build -t my_first_image .
61 | ```
62 |
63 | 8. **Run Your Custom Image**: Finally, execute your newly built image:
64 | ```bash
65 | docker run my_first_image
66 | ```
67 |
68 | ## Conclusion
69 |
70 | You’ve learned some essential Docker commands today, such as running containers, viewing logs, and building custom images. This foundational knowledge is critical for diving deeper into Docker's powerful capabilities. Remember, the goal isn't to memorize everything but rather to understand the basics so you can practice and apply them as we progress through the course. Keep experimenting and don’t hesitate to ask questions when something isn’t clear! Happy learning! 🎉
71 |
--------------------------------------------------------------------------------
/_exercises/07-volumes/04-volumes_cli.md:
--------------------------------------------------------------------------------
1 | # Managing Docker Volumes with CLI
2 |
3 | Welcome to our guide on managing Docker volumes using the Command Line Interface (CLI)! 🎉 In this lecture, we'll dive deeper into how you can interact with volumes in Docker and ensure your containers are working smoothly with shared data.
4 |
5 | ## Overview
6 |
7 | Before you check out the step-by-step guide, why not give it a shot yourself? Here’s a quick rundown of what you’ll be working on:
8 |
9 | 1. **List Your Docker Volumes**: Start by checking the existing volumes with `docker volume ls`.
10 | 2. **Inspect a Volume**: Use `docker volume inspect ` to get details about a specific volume.
11 | 3. **Create a New Volume**: Create a new volume using `docker volume create `.
12 | 4. **Remove a Volume**: Attempt to remove a volume and understand why it may fail if it's still in use.
13 | 5. **Clean Up Dangling Volumes**: Learn how to remove unused volumes with filtering options.
14 |
15 | We encourage you to implement these steps before looking at our guide! You might surprise yourself with how much you can accomplish. 😊
16 |
17 | ## Step-by-Step Guide
18 |
19 | ### 1. List Existing Volumes
20 |
21 | Run the following command to see all your Docker volumes:
22 |
23 | ```bash
24 | docker volume ls
25 | ```
26 |
27 | ### 2. Inspect a Specific Volume
28 |
29 | To get more details on a specific volume, use:
30 |
31 | ```bash
32 | docker volume inspect
33 | ```
34 |
35 | Replace `` with the name of the volume you want to inspect.
36 |
37 | ### 3. Create a New Volume
38 |
39 | To create a new Docker volume, run:
40 |
41 | ```bash
42 | docker volume create
43 | ```
44 |
45 | Again, replace `` with your chosen name for the volume.
46 |
47 | ### 4. Attempt to Remove a Volume
48 |
49 | If you'd like to delete a volume, you can try:
50 |
51 | ```bash
52 | docker volume rm
53 | ```
54 |
55 | If you encounter an error, this means the volume is currently in use by one or more containers!
56 |
57 | ### 5. Stop and Remove Related Containers
58 |
59 | If a volume is in use, first stop the containers using the volume:
60 |
61 | ```bash
62 | docker stop $(docker ps -a -q)
63 | ```
64 |
65 | Then remove the containers:
66 |
67 | ```bash
68 | docker rm $(docker ps -a -q)
69 | ```
70 |
71 | ### 6. Clean Up Dangling Volumes
72 |
73 | To remove volumes that aren't associated with any containers, use:
74 |
75 | ```bash
76 | docker volume rm $(docker volume ls -f dangling=true -q)
77 | ```
78 |
79 | ## Conclusion
80 |
81 | And that's a wrap on managing Docker volumes! 🎉 Remember, the key commands you learned are essential for effectively managing data across your containers. Keep practicing these commands, and you'll become more comfortable navigating Docker in no time!
82 |
--------------------------------------------------------------------------------
/_exercises/08-advanced_topics/01-setting_cpus_containers.md:
--------------------------------------------------------------------------------
1 | # Setting CPU Constraints for Docker Containers
2 |
3 | ## Overview
4 |
5 | In this session, we'll explore how to set CPU constraints for your Docker containers to ensure that they don't overwhelm system resources. This is crucial, as unrestricted CPU usage can lead to performance issues in your environment. Before diving into the step-by-step guide, I encourage you to give the following implementation a try on your own.
6 |
7 | Here are the main steps to follow:
8 |
9 | 1. Use the `docker run` command to check the options for CPU settings.
10 | 2. Run a simple Docker container with specific CPU settings.
11 | 3. Experiment with CPU shares and compare how they manage resources under constraints.
12 | 4. Explore the `CPU quota` and `CPU period` settings for advanced resource management.
13 |
14 | Give it a go, and see how it works out for you. If you’re stuck, check below for the detailed guide!
15 |
16 | ## Step-by-Step Guide
17 |
18 | 1. **Inspect Docker CPU Options:**
19 | - Run `docker run --help` and filter for CPU options using `grep CPU`.
20 | 2. **Set up a Container with CPU Limits:**
21 |
22 | - Start a container in detached mode, naming it something like `cpu_decimals`, and set the `--cpus` option to specify decimal values (like `0.5`) for CPU usage.
23 | - Use a command that keeps the container busy, such as `y0 true; do :; done`.
24 |
25 | 3. **Monitor CPU Usage:**
26 |
27 | - While your container is running, check its CPU usage by executing `docker stats`.
28 |
29 | 4. **Use CPU Shares to Set Relative Weights:**
30 |
31 | - Stop the previous container using `docker kill cpu_decimals`.
32 | - Run another container with the `--cpu-shares` option. Play around with different values (like `512` vs `2048`) to see how they affect CPU allocation when resources are scarce.
33 |
34 | 5. **Experiment with CPU Quota and Period:**
35 |
36 | - Another approach is to define `--cpu-quota` and `--cpu-period` for further resource limitations. For instance, set the `--cpu-period` to `100000` microseconds and the `--cpu-quota` to `75000` for a 75% limit.
37 | - Run the container and again check `docker stats` to see the effect.
38 |
39 | 6. **Cleanup:**
40 | - Make sure to stop all running containers with `docker kill $(docker ps -q)` when you finish experimenting.
41 |
42 | ## Conclusion
43 |
44 | Today, we learned about various methods to impose CPU constraints on Docker containers, ensuring they operate efficiently and don’t hog the system resources. From using `--cpus` and `--cpu-shares` to the more intricate settings like `--cpu-quota` and `--cpu-period`, these tools are essential for managing performance in a resource-limited environment. Keep practicing these techniques, and don't hesitate to explore more Docker functionalities as you continue learning! 🚀
45 |
--------------------------------------------------------------------------------
/_exercises/05-images_deep_dive/03-dockerignore.md:
--------------------------------------------------------------------------------
1 | # Docker Ignore Files: Exclude Unwanted Files in Your Build
2 |
3 | Welcome! Today, we'll explore how to effectively use Docker Ignore files to help you exclude unnecessary files from your Docker build context. This is particularly useful for keeping your images lean by avoiding test files and other large files that you don’t need in your production environment. 🐳
4 |
5 | ## Overview
6 |
7 | Before diving into a detailed guide, let's lay out the main steps involved in creating and utilizing a Docker Ignore file:
8 |
9 | 1. **Set up your project directory.** Create a structure that includes test files and source code.
10 | 2. **Create a `.dockerignore` file.** List the files and patterns you want Docker to ignore during the build process.
11 | 3. **Build your Docker image.** Ensure the build context does not include the ignored files.
12 | 4. **Verify the image contents.** Check that the undesired files are indeed excluded.
13 |
14 | Try to implement these steps yourself before looking at the detailed guide below! You may find surprises and learn quite a bit in the process! 🚀
15 |
16 | ## Step-by-Step Guide
17 |
18 | 1. **Set Up Your Project Directory:**
19 |
20 | - Create a folder for your project.
21 | - Inside this folder, create a structure resembling the following:
22 | ```
23 | /project
24 | /source
25 | /component1
26 | - component1.js
27 | - component1.test.js
28 | /component2
29 | - component2.js
30 | - component2.test.js
31 | - index.js
32 | ```
33 |
34 | 2. **Create a `.dockerignore` File:**
35 |
36 | - In the root of your project directory, create a file named `.dockerignore`.
37 | - Add lines to specify which files or patterns to ignore:
38 | ```
39 | some-large-file
40 | **/*.test.js
41 | ```
42 |
43 | 3. **Build Your Docker Image:**
44 |
45 | - Open your terminal and navigate to your project directory.
46 | - Run the following command to build your Docker image:
47 | ```bash
48 | docker build -t your-image-name .
49 | ```
50 |
51 | 4. **Verify the Image Contents:**
52 | - Determine which files are included by running:
53 | ```bash
54 | docker run --rm -it your-image-name sh
55 | ```
56 | - Inside the container, navigate to `/app/source/component1` and `/app/source/component2` to check that the test files are missing.
57 |
58 | ## Conclusion
59 |
60 | In this session, we discussed how to create a `.dockerignore` file and the conceptual benefit it brings to your Docker image builds. By excluding unnecessary files from your build context, you not only streamline the build process but also ensure that your production images remain clutter-free and efficient. Keep practicing, and soon you’ll find managing Docker builds becomes second nature!
61 |
--------------------------------------------------------------------------------
/_exercises/10-docker_compose/06-managing_service_dependencies.md:
--------------------------------------------------------------------------------
1 | # Managing Service Dependencies in Docker Compose
2 |
3 | ## Overview
4 |
5 | In this exercise, we will learn how to manage service dependencies in a Docker Compose environment. The main goal is to ensure that the backend service only starts once its dependencies, like the database service, are fully operational. 🚀 Here’s a quick summary of the steps to implement this on your own:
6 |
7 | 1. Create a `docker-compose.yml` file.
8 | 2. Define a backend service and a database service within this file.
9 | 3. Specify the `depends_on` configuration for the backend service to ensure it waits for the database service to be ready.
10 | 4. (Optional) Include additional dependencies, such as a caching service.
11 | 5. Use the terminal to bring the services up with `docker-compose up --build`.
12 | 6. Monitor the logs to verify that the services are running and dependencies are satisfied.
13 |
14 | Try to follow these steps on your own before checking out the detailed guide below!
15 |
16 | ## Step-by-Step Guide
17 |
18 | 1. **Create Your Docker Compose File**: Start by creating a `docker-compose.yml` file in your project directory. This file will contain the configuration for all your services.
19 | 2. **Define Your Services**:
20 |
21 | - Add a `backend` service.
22 | - Add a `database` service where the backend service will store and retrieve data.
23 |
24 | 3. **Add Dependency Configuration**:
25 |
26 | - Under the `backend` service configuration, include the line `depends_on:` followed by the name of your `database` service. This tells Docker Compose that the backend service relies on the database being up before it can start.
27 |
28 | 4. **Add Additional Dependencies (Optional)**:
29 |
30 | - If you need any more services, such as caching, add them under the same `depends_on` section.
31 |
32 | 5. **Run Docker Compose**:
33 |
34 | - Open your terminal and navigate to your project folder.
35 | - Run the command `docker-compose up --build`. This will build your images and start the services while considering dependencies.
36 |
37 | 6. **Monitor and Verify**:
38 | - As the services come up, observe the logs in your terminal. They will show you the status of each service and confirm that the backend is successfully connecting to the database.
39 | - To test your backend, send a request to one of its endpoints (like saving or retrieving data) to ensure everything is operational.
40 |
41 | ## Conclusion
42 |
43 | In this lecture, we explored how to manage service dependencies in Docker Compose effectively. By using the `depends_on` configuration, we ensured that our backend service waits for the database service to be available before starting. This not only enhances the reliability of our application but also streamlines the development process. Keep experimenting with Docker Compose and integrate more features as you grow! Happy coding! 💻
44 |
--------------------------------------------------------------------------------
/_exercises/05-images_deep_dive/05-env_vars_cli.md:
--------------------------------------------------------------------------------
1 | # Environment Variables in Docker: Implementation Guide 🌟
2 |
3 | ## Overview
4 |
5 | In this exercise, we’ll dive into using environment variables in Docker to configure your applications without modifying the Dockerfile each time. You'll learn how to set default values, override them during container launch, and explore using `.env` files for better management.
6 |
7 | Before checking out the step-by-step guide, let's try to implement the solution on your own! Here’s a quick summary of what you’ll do:
8 |
9 | 1. Clean up your Docker environment by removing existing containers and images.
10 | 2. Set up a Dockerfile and specify a default environment variable.
11 | 3. Build and run the Docker image.
12 | 4. Override the default environment variable when starting the container.
13 | 5. Experiment with different port assignments and verify the results.
14 |
15 | Give it a shot! Once you’re done, feel free to look at the guide below for further details.
16 |
17 | ## Step-by-Step Guide
18 |
19 | 1. **Clean the Docker Environment**:
20 |
21 | - Use `docker kill` to stop all running containers.
22 | - Run `docker ps -aq` combined with `docker rm` to remove all containers.
23 | - Use `docker rmi -f $(docker images -q)` to force-remove all images (if necessary).
24 |
25 | 2. **Set Default Environment Variables in Dockerfile**:
26 |
27 | - Open your Dockerfile and add an environment variable like this:
28 | ```
29 | ENV APP_NAME="my awesome application"
30 | ```
31 |
32 | 3. **Build the Docker Image**:
33 |
34 | - Execute the command:
35 | ```bash
36 | docker build -t express:5000 .
37 | ```
38 |
39 | 4. **Run the Docker Container**:
40 |
41 | - Start your container by mapping the necessary ports:
42 | ```bash
43 | docker run -d -p 5000:5000 --name awesome_app express:5000
44 | ```
45 |
46 | 5. **Override Environment Variables**:
47 |
48 | - Run the container again, this time overriding the default port:
49 | ```bash
50 | docker run -e PORT=5001 -d -p 5001:5001 --name express-5001 express
51 | ```
52 |
53 | 6. **Check the Output**:
54 |
55 | - Use `curl http://localhost:5001` to confirm your setup is working as expected.
56 | - You can also check logs using `docker logs express-5001` to see the running outputs.
57 |
58 | 7. **Experiment with Additional Variables**:
59 | - Try running containers with different environment variables and ports, such as:
60 | ```bash
61 | docker run -e PORT=8080 -d -p 8080:8080 --name express-8080 express
62 | ```
63 |
64 | ## Conclusion
65 |
66 | Great job! 🎉 You've just learned how to manage environment variables with Docker, set defaults, and override them dynamically. This flexibility is key in making your Docker containers more adaptable and robust for development. Keep exploring and practicing, as there's always more to learn in the world of Docker!
67 |
--------------------------------------------------------------------------------
/_exercises/10-docker_compose/05-backend_service.md:
--------------------------------------------------------------------------------
1 | # Setting Up Your Back End Service with Docker
2 |
3 | Welcome to the lecture on setting up your back end service! 🌟 In this exercise, we’ll explore how to create a back end service using Docker, building our own image locally. Let’s try to implement the solution ourselves before diving into the detailed steps. Below is a brief overview of what we’ll be doing:
4 |
5 | ## Overview
6 |
7 | In this exercise, you will be implementing a back end service that leverages Docker to build your own image locally. Here are the main steps you should aim to follow:
8 |
9 | 1. Define your back end service using a Docker file and Docker ignore.
10 | 2. Pass the build option in your Docker configuration.
11 | 3. Set up the required ports and environment variables.
12 | 4. Attach your container to the existing network for database access.
13 | 5. Ensure your application connects to MongoDB correctly.
14 |
15 | Go ahead and give it a shot! Once you feel ready, check out the step-by-step guide below for complete instructions.
16 |
17 | ## Step-by-Step Guide
18 |
19 | 1. **Define the Back End Service**:
20 |
21 | - Use your `docker-compose.yml` file to set up the back end service.
22 | - Instead of using a pre-built image, specify the build context pointing to your back end directory.
23 |
24 | 2. **Set Build Options**:
25 |
26 | - Use the `build` option rather than `image`.
27 | - Specify the path for your Docker file if it doesn't follow the default naming convention.
28 |
29 | 3. **Configure Ports**:
30 |
31 | - Set up the port mapping to allow external access. For example, map port `3000` on your host to port `3000` on your container.
32 |
33 | 4. **Use Environment Variables**:
34 |
35 | - Include your `.env` file for key-value pairs required for database authentication.
36 | - Define variables like `MONGO_DB_HOST` which should point to your database container.
37 |
38 | 5. **Attach to Network**:
39 |
40 | - Ensure your back end container is connected to the existing network so it can communicate with the database using `DB` as the hostname.
41 |
42 | 6. **Open Your Source Code**:
43 |
44 | - Go to your `server.js` file and ensure the connection settings match your environment variables, specifically for MongoDB.
45 |
46 | 7. **Combine Environment Variables** (if needed):
47 | - Use the environment keyword to define multiple sets of environment variables that your application may require.
48 |
49 | After you have set everything up, you can start your service with Docker Compose and test to see if everything is functioning as expected.
50 |
51 | ## Conclusion
52 |
53 | In this lecture, we learned how to configure a back end service using Docker by building our own image locally and managing environment variables. Don’t forget to keep practicing and exploring more about Docker. The more you dive into it, the more comfortable you’ll become! Keep up the great work! 🚀
54 |
--------------------------------------------------------------------------------
/_exercises/08-advanced_topics/06-host_network.md:
--------------------------------------------------------------------------------
1 | # Docker Fundamentals: Using Host Network and Port Binding
2 |
3 | ## Overview
4 |
5 | In this exercise, we’ll explore how to use Docker's host network, specifically focusing on container management and port bindings. The goal here is to understand how to effectively run containers using the host network and manage port conflicts. Before diving into the step-by-step guide, consider giving the following tasks a try on your own:
6 |
7 | 1. Run an NGNX container using the Docker host network.
8 | 2. Inspect the container to check its network settings.
9 | 3. Attempt to run a second NGNX container on the same port and analyze the results.
10 | 4. Create a user-defined Docker network and run an NGNX container with port mapping.
11 | 5. Test the port mapping to ensure that traffic is correctly routed to the container.
12 |
13 | Take a moment to implement these steps before looking at the detailed guide. It’s a great way to learn! 🚀
14 |
15 | ## Step-by-Step Guide
16 |
17 | Let's break down the process step by step:
18 |
19 | 1. **Running an NGNX Container:**
20 |
21 | - Use the command:
22 | ```bash
23 | docker run --network host nginx:1.27.0
24 | ```
25 | - This command runs an NGNX container connected to the host network.
26 |
27 | 2. **Inspecting the Container:**
28 |
29 | - To inspect the container, use:
30 | ```bash
31 | docker inspect
32 | ```
33 | - Check the network settings to verify it is connected to the host network.
34 |
35 | 3. **Running a Second NGNX Container:**
36 |
37 | - Try to run another NGNX container on the same host network:
38 | ```bash
39 | docker run --network host nginx:1.27.0
40 | ```
41 | - Use the command `docker ps -a` to view any errors regarding the second container.
42 |
43 | 4. **Creating a User-Defined Network:**
44 |
45 | - Create a new network:
46 | ```bash
47 | docker network create app-net
48 | ```
49 | - Run the NGNX container with port mapping:
50 | ```bash
51 | docker run --network app-net -p 80:80 nginx:1.27.0
52 | ```
53 |
54 | 5. **Testing Port Mapping:**
55 |
56 | - Use curl to test if the container responds:
57 | ```bash
58 | curl http://localhost
59 | ```
60 |
61 | 6. **Cleaning Up:**
62 | - Remove your containers:
63 | ```bash
64 | docker rm $(docker ps -a -q)
65 | ```
66 | - For cleaning up the network:
67 | ```bash
68 | docker network rm app-net
69 | ```
70 |
71 | ## Conclusion
72 |
73 | In this lecture, we dove into the concept of Docker's host network, the implications of running containers with this network, and the importance of managing port bindings to avoid conflicts. We also explored how user-defined networks offer a more isolated and secure environment for your containers. Keep practicing these concepts to deepen your understanding and skillset. Happy Dockering! 🐳
74 |
--------------------------------------------------------------------------------
/_exercises/05-images_deep_dive/14-optimizing_images_runtime_deps.md:
--------------------------------------------------------------------------------
1 | # Optimizing Docker Images by Installing Only Runtime Dependencies
2 |
3 | Welcome to the guide on optimizing your Docker images by focusing specifically on your application's runtime dependencies. This session is all about ensuring that your Docker containers are lean, efficient, and secure! 🐳
4 |
5 | ## Overview
6 |
7 | In this exercise, we will learn how to craft Docker images that only include the necessary dependencies required for your application to run. This practice will help reduce the overall size of your images, speed up the build processes, and enhance security by minimizing the number of installed packages.
8 |
9 | Before looking at the step-by-step guide, I encourage you to give the implementation a try on your own! Here’s a quick summary of the main steps you’ll want to follow:
10 |
11 | 1. Analyze your current Docker setup to identify unnecessary development dependencies.
12 | 2. Modify your Dockerfile to use the `--only=production` flag when installing dependencies.
13 | 3. Test your Docker image to confirm it operates as expected.
14 | 4. Check the size of your Docker image before and after the change to see the improvements.
15 |
16 | Take a moment to try these steps yourself before consulting the detailed guide below!
17 |
18 | ## Step-by-Step Guide
19 |
20 | 1. **Access Your Dockerfile**: Start by reviewing your existing Dockerfile where you define your application’s dependencies.
21 | 2. **Identify Dependencies**: Note the packages listed in your `package.json` file and recognize which ones are purely for development (like TypeScript and testing frameworks).
22 |
23 | 3. **Modify the Dockerfile**: Change the command for installing dependencies. Use the command:
24 |
25 | ```
26 | RUN npm install --only=production
27 | ```
28 |
29 | This ensures only runtime dependencies are installed.
30 |
31 | 4. **Rebuild Your Docker Image**: Use the following command to build your Docker image again:
32 |
33 | ```
34 | docker build -t : -f Dockerfile .
35 | ```
36 |
37 | 5. **Run Your Docker Container**: Spin up a container from your optimized Docker image:
38 |
39 | ```
40 | docker run --rm -it :
41 | ```
42 |
43 | 6. **Validate**: Test your application to ensure everything is functioning properly, and list your installed dependencies to confirm the reduction in size and unnecessary packages.
44 |
45 | 7. **Check Image Size**: Compare the size of the new image with the previous version.
46 |
47 | ## Conclusion
48 |
49 | By focusing on installing only the necessary runtime dependencies, you have not only reduced the size of your Docker images but also sped up your build process and enhanced the overall security of your application. This small yet impactful change can lead to better performance and maintainability in your projects. Keep this practice in mind as you continue your Docker journey! 🌟
50 |
--------------------------------------------------------------------------------
/_exercises/05-images_deep_dive/13-optimizing_images_order.md:
--------------------------------------------------------------------------------
1 | # Optimizing Docker Images: Understanding Instruction Order
2 |
3 | Welcome! In this guide, we'll dive into the important topic of optimizing Docker images by rearranging the order of instructions in your Dockerfile. Our goal is to ensure you can effectively leverage Docker's caching mechanism to speed up your builds! 🚀 Before we get started with the step-by-step guide, here’s an overview of what you should aim to implement.
4 |
5 | ## Overview
6 |
7 | To optimize your Docker build process, focus on the order of commands in your Dockerfile. Here’s a simple list of what you should try to do:
8 |
9 | 1. Create a new Dockerfile named `Dockerfile.order`.
10 | 2. Copy the necessary commands from your existing Dockerfile into `Dockerfile.order`.
11 | 3. Start with stable commands (like dependency installations), followed by commands that are likely to change (like source code).
12 | 4. Test the differences in build times by modifying your source code and watching how the cache behaves.
13 | 5. Save and tag your final optimized Docker image properly.
14 |
15 | We encourage you to give this a try before going through the detailed steps below!
16 |
17 | ## Step-by-Step Guide
18 |
19 | 1. **Create the Dockerfile**: Open your IDE and create a new Dockerfile named `Dockerfile.order`.
20 |
21 | 2. **Copy Commands**: Start by copying the same commands you have in your original Dockerfile into this new one.
22 |
23 | 3. **Rearrange Instructions**:
24 |
25 | - First, copy the `package.json` and other dependency files.
26 | - Then, run the command to install dependencies (e.g., `npm ci`).
27 | - Finally, add the commands to copy your application source code.
28 |
29 | 4. **Run Build Command**: Use the terminal to build your Docker image by specifying the new Dockerfile. For example:
30 |
31 | ```bash
32 | docker build -t image_order:good -f Dockerfile.order .
33 | ```
34 |
35 | 5. **Test Changes**: Modify a line within your application code (e.g., add an exclamation mark in `index.js`). Then, rebuild the image to see how build time is affected.
36 |
37 | 6. **Review Cache Usage**: Check the output to see if Docker is able to leverage the cache effectively for your dependency installation.
38 |
39 | 7. **Tagging Images**: Be sure to tag your images appropriately based on the changes you made (for instance, using `good` for the optimized version and `not-so-good` for the inefficient one).
40 |
41 | ## Conclusion
42 |
43 | In summary, the key takeaway here is to place the commands that are less likely to change at the top of your Dockerfile, such as dependency installations, while leaving the commands that are more prone to change—like your application code—at the bottom. This strategy allows Docker to effectively cache the image layers, resulting in significantly reduced build times. So, let’s keep practicing this optimization technique and enhance our Docker skills further! Happy coding! 🎉
44 |
--------------------------------------------------------------------------------
/_exercises/03-introduction_images/05-copy_local_html.md:
--------------------------------------------------------------------------------
1 | # Creating and Copying from a Local HTML File in Docker
2 |
3 | Welcome! In this guide, we’ll walk through the process of copying an `index.html` file from your local machine into a Docker image using a Docker file. This is a great practice to understand how to manage files within Docker containers, especially when customizing a web server like NGINX. 🌐
4 |
5 | ## Overview
6 |
7 | Before diving into the step-by-step guide, let's try to get a sense of what you'll implement. Here’s a high-level summary of the steps you should take:
8 |
9 | 1. Create your own `index.html` file in the Docker file's context directory.
10 | 2. Use the Docker file to copy this file into the appropriate directory inside the Docker image.
11 | 3. Change the ownership of the `index.html` file to the correct user and group (`nginx:nginx`).
12 | 4. Build the Docker image with your changes.
13 | 5. Run a container from your newly created image.
14 |
15 | Give it a shot! Try to implement these steps on your own before moving on to the detailed guide below.
16 |
17 | ## Step-by-Step Guide
18 |
19 | 1. **Create Your `index.html`**:
20 |
21 | - Start by creating a new `index.html` file in the directory where your Docker file is located. Add your custom HTML content with a welcoming message.
22 |
23 | 2. **Update the Docker File**:
24 |
25 | - In your Docker file, use the `COPY` command to copy your `index.html` to the NGINX HTML directory:
26 | ```Dockerfile
27 | COPY index.html /usr/share/nginx/html/index.html
28 | ```
29 |
30 | 3. **Change Ownership**:
31 |
32 | - Ensure that the HTML file has the right owner by adding the `RUN` command to change ownership to NGINX:
33 | ```Dockerfile
34 | RUN chown nginx:nginx /usr/share/nginx/html/index.html
35 | ```
36 |
37 | 4. **Build the Docker Image**:
38 |
39 | - Run the following command to build your Docker image:
40 | ```bash
41 | docker build -t web_server_image .
42 | ```
43 |
44 | 5. **Run the Docker Container**:
45 |
46 | - Start your container with:
47 | ```bash
48 | docker run -d -p 80:80 web_server_image
49 | ```
50 |
51 | 6. **Test Your Setup**:
52 |
53 | - Use `curl http://localhost` to check if your custom HTML file is served correctly. If you encounter a `403 Forbidden` error, check the ownership of the file.
54 |
55 | 7. **Clean Up**:
56 | - If you change your Docker file or the HTML file, don't forget to rebuild the image and rerun the container.
57 |
58 | ## Conclusion
59 |
60 | Congratulations on getting through this process! 🎉 You've successfully created a Docker file that copies an HTML file from your local machine into a Docker image and serves it through NGINX. Remember to always consider file ownership when working with Docker, as it can save you from confusing errors in the future. Keep practicing, and soon you'll be comfortable creating and managing Docker files and images with ease!
61 |
--------------------------------------------------------------------------------
/_exercises/05-images_deep_dive/06-env_files.md:
--------------------------------------------------------------------------------
1 | # Using .env Files for Simplified Docker Environment Variables
2 |
3 | ## Overview
4 |
5 | In this guide, we'll explore how to efficiently manage environment variables in Docker using .env files. Instead of adding each variable in the Docker run command, we'll learn how to create a more readable and structured approach using these files. Before you dive into the step-by-step guide, here’s a quick summary of what you’ll be doing:
6 |
7 | 1. Understand the importance of using .env files for managing environment variables.
8 | 2. Create a `.env` file for production settings.
9 | 3. Create a `.env` file for development settings.
10 | 4. Update the `.dockerignore` file to prevent sensitive information from being included in the Docker image.
11 | 5. Run your containers using the `.env` files you created.
12 |
13 | Give it a go! Try implementing the solution on your own before checking the detailed instructions below. 💪
14 |
15 | ## Step-by-Step Guide
16 |
17 | 1. **Update the `.dockerignore` File**: Open your `.dockerignore` file and add a line to exclude all `.env` files to protect sensitive information:
18 |
19 | ```
20 | *.env
21 | ```
22 |
23 | 2. **Create a Production `.env` File**:
24 |
25 | - Name it `.env.prod`.
26 | - Add the following lines, adjusting port and app name as necessary:
27 | ```
28 | PORT=9000
29 | APP_NAME="my prod app"
30 | ```
31 |
32 | 3. **Run the Production Container**: Use the command below to start your container with the production settings:
33 |
34 | ```bash
35 | docker run --env-file .env.prod -d -p 9000:9000 --name express-prod express
36 | ```
37 |
38 | 4. **Confirm Setup**: Check that the server is running on port 9000:
39 |
40 | ```bash
41 | docker logs express-prod
42 | curl http://localhost:9000
43 | ```
44 |
45 | 5. **Create a Development `.env` File**:
46 |
47 | - Name it `.env.dev`.
48 | - Use similar syntax to set the development environment:
49 | ```
50 | PORT=3000
51 | APP_NAME="my dev app"
52 | ```
53 |
54 | 6. **Run the Development Container**: Execute the command to run your development container:
55 |
56 | ```bash
57 | docker run --env-file .env.dev -d -p 3000:3000 --name express-dev express
58 | ```
59 |
60 | 7. **Verify Development Setup**: Confirm that your development app is running correctly on port 3000:
61 | ```bash
62 | docker logs express-dev
63 | curl http://localhost:3000
64 | ```
65 |
66 | By following these steps, you'll have a more organized and secure way to manage your Docker environment variables. 🎉
67 |
68 | ## Conclusion
69 |
70 | In this session, we learned how to use .env files to handle environment variables more effectively in Docker. By creating separate files for production and development environments, we minimized clutter in our Docker commands and improved security by keeping sensitive information out of our images. Keep practicing with these concepts, as they are key to working with Docker efficiently.
71 |
--------------------------------------------------------------------------------
/_exercises/08-advanced_topics/03-restart_policies.md:
--------------------------------------------------------------------------------
1 | # Restart Policies in Docker: Managing Container Restarts
2 |
3 | ## Overview
4 |
5 | In this exercise, we will dive into Docker's restart policies that keep our containers running even when they stop or crash. Understanding these policies is essential for ensuring that your applications remain available and resilient. You will explore four primary restart policies: no restart, on-failure, always, and unless-stopped. Before you check out the detailed instructions below, let's try to implement this on your own! Here’s a quick summary of what you'll want to accomplish:
6 |
7 | 1. Start a container without any restart policy.
8 | 2. Implement the `on-failure` restart policy.
9 | 3. Learn to limit the number of restart attempts.
10 | 4. Use the `always` restart policy.
11 | 5. Differentiate between the `always` and `unless-stopped` policies.
12 |
13 | Feel free to give these steps a go before peeking at the step-by-step guide! 🚀
14 |
15 | ## Step-by-Step Guide
16 |
17 | ### 1. Start without a Restart Policy
18 |
19 | - Make sure no containers are running to start fresh. You can do this by using `docker ps` to check.
20 | - Run a container using the BusyBox image with the command `sleep 3; exit 1`. Use a name like `no_race_start`.
21 | ```bash
22 | docker run --name no_race_start busybox sh -c "sleep 3; exit 1"
23 | ```
24 |
25 | ### 2. Implement `on-failure`
26 |
27 | - Remove the previous container:
28 | ```bash
29 | docker rm no_race_start
30 | ```
31 | - Now, run the container again with an `on-failure` restart policy:
32 | ```bash
33 | docker run --name race_start_fail --restart on-failure busybox sh -c "sleep 3; exit 1"
34 | ```
35 |
36 | ### 3. Limit Restart Attempts
37 |
38 | - You can specify how many times Docker should try restarting:
39 | ```bash
40 | docker run --name race_start_fail --restart on-failure:3 busybox sh -c "sleep 3; exit 1"
41 | ```
42 | - Verify the restart count by inspecting the container:
43 | ```bash
44 | docker inspect race_start_fail | grep -i restart
45 | ```
46 |
47 | ### 4. Use the `always` Policy
48 |
49 | - Now, run another container with the `always` policy:
50 | ```bash
51 | docker run --name restart_always --restart always busybox sh -c "sleep 3; exit 1"
52 | ```
53 |
54 | ### 5. Understand `unless-stopped`
55 |
56 | - Finally, create a container with the `unless-stopped` policy:
57 | ```bash
58 | docker run --name restart_unless_stopped --restart unless-stopped busybox sh -c "sleep 3; exit 1"
59 | ```
60 |
61 | ## Conclusion
62 |
63 | In this session, we explored various Docker restart policies, including how to implement them and the differences between them. These policies are fundamental for maintaining the uptime of your applications in production environments. Keep practicing creating and managing these containers, as understanding restart mechanisms will significantly aid your development workflow. Remember, Docker has plenty of features to explore, and every little bit of practice helps! Happy learning! 🌟
64 |
--------------------------------------------------------------------------------
/_exercises/05-images_deep_dive/11-multistage_update_dockerfile.md:
--------------------------------------------------------------------------------
1 | # Updating the Dockerfile for TypeScript Integration
2 |
3 | Welcome! This guide is aimed at helping you navigate through updating a Dockerfile to support your TypeScript integration. If you’re eager to get hands-on with Docker and TypeScript, you’re in the right place! Let’s break it down together.
4 |
5 | ## Overview
6 |
7 | In this exercise, we're focusing on refining our Dockerfile to accommodate the changes introduced by integrating TypeScript into our project. The goal is to set up our Docker environment in a way that properly builds our TypeScript files into JavaScript and sets up the necessary configuration.
8 |
9 | Here’s a quick summary of the steps you should try on your own before diving into the detailed guide:
10 |
11 | 1. Update your Docker Ignore file to exclude the `dist` directory.
12 | 2. Adjust the `COPY` commands in your Dockerfile to ensure the TypeScript source files and configuration are included properly.
13 | 3. Execute the build command for TypeScript within the Dockerfile to generate the `dist` directory.
14 | 4. Ensure that your Docker run command maps the correct ports and starts the server as expected.
15 |
16 | Take a moment to attempt these steps on your own – it's a great way to solidify your understanding! 💪
17 |
18 | ## Step-by-Step Guide
19 |
20 | Follow these steps to successfully update your Dockerfile for TypeScript:
21 |
22 | 1. **Edit the `.dockerignore` file**: Add the `dist` directory to prevent it from being copied into the Docker container. This ensures that we only work with the source files needed for the build.
23 |
24 | 2. **Update the Dockerfile**:
25 |
26 | - Move the `COPY` command for the source directory above the build command.
27 | - Add a `COPY` command for the `tsconfig.json` file to ensure TypeScript has the configuration it needs.
28 | - Ensure that your `COPY` command includes the `node_modules` as part of the final stage too, just to guarantee everything needed is present.
29 |
30 | 3. **Build the TypeScript files**: Run the command `NPM run build` within your Dockerfile after copying the source files, to generate the `dist` directory.
31 |
32 | 4. **Set up the CMD instruction**: Make sure to define the CMD instruction correctly to execute your application. The final command should look something like `node dist/index.js`, adhering to the structure of a distroless Docker image.
33 |
34 | 5. **Verify your setup**: Run the build command and then the run command in Docker. Check for any errors and see if the server starts correctly. Test connectivity by accessing the application via `curl http://localhost:3000`.
35 |
36 | ## Conclusion
37 |
38 | Congratulations on successfully updating your Dockerfile to accommodate TypeScript! 🎉 We went through some essential steps to make sure our Docker environment is correctly set up for the TypeScript integration. Don't forget to keep practicing and experimenting with these configurations; it's the best way to learn. If you have questions, feel free to reach out in the Q&A section!
39 |
--------------------------------------------------------------------------------
/_exercises/10-docker_compose/01-run_mongodb_docker_compose.md:
--------------------------------------------------------------------------------
1 | # Running MongoDB with Docker Compose
2 |
3 | Welcome! In this session, we’re going to dive into using Docker Compose to set up a MongoDB database. This is a fantastic opportunity to practice your Docker skills, and I encourage you to try implementing the steps on your own before looking at the details below. Ready? Let’s go!
4 |
5 | ## Overview
6 |
7 | In this exercise, you will create a MongoDB database using Docker Compose instead of Docker run commands. Here’s a high-level summary of what you’ll need to do:
8 |
9 | 1. Create a new file named `compose.yml` at the top level of your project folder.
10 | 2. Configure the `compose.yml` file to declare services, specifying the MongoDB image and the desired port mappings.
11 | 3. Use the terminal to run `docker-compose up` and observe the logs.
12 | 4. Verify that your MongoDB container is running and check the created networks.
13 | 5. Access the MongoDB shell to confirm the database setup.
14 | 6. Finally, shut down the container gracefully.
15 |
16 | Take a moment to attempt these steps on your own. Once you’ve given it a shot, you can refer to the step-by-step guide below for assistance. 🚀
17 |
18 | ## Step-by-Step Guide
19 |
20 | Here’s a concise guide to walk you through the process:
21 |
22 | 1. **Create `compose.yml`:**
23 |
24 | - In your project folder, create a file called `compose.yml`.
25 |
26 | 2. **Define Services in `compose.yml`:**
27 |
28 | - Open `compose.yml` and define a service for your MongoDB. Set the image to `mongo:7.0-ubuntu2204`, and add port mappings for 27017.
29 |
30 | Example `compose.yml`:
31 |
32 | ```yaml
33 | version: '3.8'
34 | services:
35 | mongodb:
36 | image: mongo:7.0-ubuntu2204
37 | ports:
38 | - '27017:27017'
39 | ```
40 |
41 | 3. **Run Docker Compose:**
42 |
43 | - Open your terminal and navigate to your project directory where the `compose.yml` file is located.
44 | - Run the command:
45 | ```bash
46 | docker-compose up
47 | ```
48 |
49 | 4. **Check Running Containers and Networks:**
50 |
51 | - In another terminal window, run:
52 | ```bash
53 | docker ps
54 | ```
55 | - This will show your running MongoDB container.
56 |
57 | 5. **Access MongoDB Shell:**
58 |
59 | - To access the MongoDB shell, run:
60 | ```bash
61 | docker run -it --network compose_default --rm mongo mongo --host mongodb --port 27017
62 | ```
63 | - Check that your databases are accessible!
64 |
65 | 6. **Stop the Container:**
66 | - Finally, stop the MongoDB container gracefully by running:
67 | ```bash
68 | docker-compose down
69 | ```
70 |
71 | ## Conclusion
72 |
73 | Congratulations on successfully setting up MongoDB with Docker Compose! 🎉 You’ve just taken a big step towards mastering Docker and simplifying your development workflow. Keep experimenting, and don’t hesitate to revisit these steps if you need more practice. The more you play around, the more comfortable you’ll become with Docker tools.
74 |
--------------------------------------------------------------------------------
/_exercises/08-advanced_topics/02-setting_memory.md:
--------------------------------------------------------------------------------
1 | # Memory Management in Docker Containers
2 |
3 | Welcome! In this section, we'll explore how to effectively allocate and manage memory for your Docker containers. Understanding these concepts is crucial for ensuring your applications run smoothly without consuming excessive resources. Let's dive into how to set memory limits, reservations, and swaps for your containers! 🚀
4 |
5 | ## Overview
6 |
7 | Before we break it down step-by-step, here’s a high-level summary of what you should try to implement in this exercise. I encourage you to give it a go yourself before referring to the detailed guide below:
8 |
9 | 1. Ensure there are no running containers with `docker ps -q` and remove them.
10 | 2. Run a MongoDB container with specific memory limits.
11 | 3. Experiment with different memory settings: limits, reservations, and swap.
12 | 4. Use the `docker stats` command to monitor memory usage.
13 | 5. Inspect containers that have exited to understand why they were terminated.
14 |
15 | Ready to dive in? Give these steps a try, and once you're done, check out the step-by-step guide for more clarity!
16 |
17 | ## Step-by-Step Guide
18 |
19 | 1. **Cleanup Running Containers**: Start by ensuring no other containers are running. Use the command `docker ps -q` and remove any active containers.
20 |
21 | 2. **Start a MongoDB Container**: Run a MongoDB container in detached mode, utilizing the following command:
22 |
23 | ```
24 | docker run -d --name mongodb --memory="100m" mongo:7.0-ubuntu2204
25 | ```
26 |
27 | 3. **Monitor Memory Usage**: While the container is running, use `docker stats` to observe the memory usage.
28 |
29 | 4. **Adjust Memory Settings**:
30 |
31 | - Stop and remove the MongoDB container.
32 | - Experiment with different memory limits by setting:
33 | ```
34 | docker run -d --name mongodb --memory="20m" mongo:7.0-ubuntu2204
35 | ```
36 | - This should result in an out-of-memory error which you can check via `docker inspect`.
37 |
38 | 5. **Set Memory Reservation**:
39 |
40 | - Use a command that sets memory reservations alongside the limit:
41 | ```
42 | docker run -d --name mongodb --memory="100m" --memory-reservation="80m" mongo:7.0-ubuntu2204
43 | ```
44 |
45 | 6. **Explore Memory Swap Options**: Introduce a memory swap:
46 |
47 | ```
48 | docker run -d --name mongodb --memory="20m" --memory-swap="200m" mongo:7.0-ubuntu2204
49 | ```
50 |
51 | 7. **Check Container Status**: Use `docker ps` and `docker stats` to confirm that the container is running and observe memory behavior.
52 |
53 | 8. **Final Cleanup**: Stop and remove the MongoDB container once you're finished testing.
54 |
55 | ## Conclusion
56 |
57 | Understanding how to allocate and manage memory in Docker is essential for building robust applications. By setting limits and reservations, you can prevent your containers from monopolizing host resources, ensuring a smoother and more efficient deployment. Keep experimenting with these settings as you continue your Docker journey! 👍
58 |
--------------------------------------------------------------------------------
/_exercises/05-images_deep_dive/12-optimizing_images_image_size.md:
--------------------------------------------------------------------------------
1 | # Optimizing Docker Images: Reducing Image Size
2 |
3 | Welcome! In this guide, we'll explore strategies to optimize Docker images, particularly focusing on reducing image sizes while maintaining functionality. Let's dive in!
4 |
5 | ## Overview
6 |
7 | In this exercise, we'll cover best practices to create Docker images that are smaller, faster, and more efficient. Before jumping to the detailed instructions, I encourage you to try to implement these steps on your own:
8 |
9 | 1. Create a new clean directory for your project.
10 | 2. Initialize a new Node.js project and install necessary dependencies.
11 | 3. Write your first Dockerfile using a suitable base image.
12 | 4. Build your Docker image and observe the size.
13 | 5. Experiment with different base images like `node:slim` and `node:alpine`.
14 |
15 | Give it a go! After you try these steps, you can refer to the step-by-step guide below for detailed instructions. 🚀
16 |
17 | ## Step-by-Step Guide
18 |
19 | 1. **Create a New Directory**:
20 |
21 | - Open your terminal and create a new clean directory. This will help keep your workspace organized.
22 |
23 | 2. **Initialize a Node.js Project**:
24 |
25 | - Navigate to the new directory and run:
26 | ```bash
27 | npm init -y
28 | ```
29 | - Install required dependencies:
30 | ```bash
31 | npm install express@4.19.2 --save-exact
32 | npm install typescript@5.5.3 @types/express@4.17.21 --save-dev --save-exact
33 | ```
34 |
35 | 3. **Create Your Dockerfile**:
36 |
37 | - In your IDE, create a `Dockerfile.size`. Start with the following content:
38 | ```Dockerfile
39 | FROM node:22
40 | WORKDIR /app
41 | COPY package*.json ./
42 | RUN npm install
43 | COPY index.js ./
44 | CMD ["node", "index.js"]
45 | ```
46 | - Create an `index.js` file with a simple console log statement:
47 | ```javascript
48 | console.log('Hello World');
49 | ```
50 |
51 | 4. **Build Your Docker Image**:
52 |
53 | - In the terminal, build your Docker image:
54 | ```bash
55 | docker build -t image-size:vanilla -f Dockerfile.size .
56 | ```
57 | - Check the image size using:
58 | ```bash
59 | docker images
60 | ```
61 |
62 | 5. **Experiment with Smaller Base Images**:
63 | - Modify your Dockerfile to use smaller base images, first changing it to:
64 | ```Dockerfile
65 | FROM node:22-slim
66 | ```
67 | - Rebuild your Docker image and check the size.
68 | - Repeat this process with `FROM node:22-alpine` to see the impact on both size and build time.
69 |
70 | ## Conclusion
71 |
72 | Throughout this exercise, we’ve highlighted how optimizing Docker images can significantly affect build times and image sizes. By selecting appropriate base images and understanding the implications of development dependencies, you can create images that are efficient and tailored to your needs. Keep experimenting with different configurations, and you'll become more adept at optimizing Docker images. Happy coding! 🐳
73 |
--------------------------------------------------------------------------------
/_exercises/05-images_deep_dive/10-multistage_adding_typescript.md:
--------------------------------------------------------------------------------
1 | # Integrating TypeScript into Our Docker Workflow
2 |
3 | Welcome! In this guide, we’ll dive into integrating TypeScript into our Docker project using multi-stage builds. This will be a hands-on exercise, so get ready to roll up your sleeves! Before you check out the step-by-step instructions, I encourage you to give it a shot yourself. Here’s a quick overview of what you should aim to accomplish:
4 |
5 | ## Overview
6 |
7 | ### What You'll Do:
8 |
9 | 1. **Install TypeScript** as a development dependency in your project.
10 | 2. **Initialize the TypeScript project** to create the necessary configuration file.
11 | 3. **Rename your main JavaScript file** from `index.js` to `index.ts` and adjust the import statements accordingly.
12 | 4. **Add a build script** in the `package.json` to compile TypeScript files into JavaScript.
13 | 5. **Set up the output directory** for compiled files in your TypeScript configuration.
14 | 6. **Run your application** to ensure everything works smoothly after the changes.
15 |
16 | Try to implement these steps before moving on to the detailed guide. Let's see what you can do! 🚀
17 |
18 | ## Step-by-Step Guide
19 |
20 | 1. **Install TypeScript:**
21 | Open your terminal and run the following commands to add TypeScript as a development dependency:
22 |
23 | ```bash
24 | npm install --save-dev typescript@5.5.3 @types/express@4.17.21
25 | ```
26 |
27 | 2. **Initialize TypeScript:**
28 | Initialize your TypeScript project by running:
29 |
30 | ```bash
31 | npx tsc --init
32 | ```
33 |
34 | 3. **Rename the Main File:**
35 | Change `index.js` to `index.ts`. This will let TypeScript know that this file is meant for TypeScript rather than JavaScript.
36 |
37 | 4. **Update Import Statements:**
38 | Change the `require` statement to the `import` syntax in `index.ts`:
39 |
40 | ```javascript
41 | import express from 'express';
42 | ```
43 |
44 | 5. **Modify the TypeScript Configuration:**
45 | Open the `tsconfig.json` file and uncomment the `outDir` option, setting it to `dist`:
46 |
47 | ```json
48 | "outDir": "./dist"
49 | ```
50 |
51 | 6. **Add a Build Script:**
52 | In your `package.json`, add a new script for building the TypeScript project:
53 |
54 | ```json
55 | "scripts": {
56 | "build": "tsc"
57 | }
58 | ```
59 |
60 | 7. **Build and Run:**
61 | Execute the build command in your terminal:
62 |
63 | ```bash
64 | npm run build
65 | ```
66 |
67 | Now run your application using the compiled JavaScript:
68 |
69 | ```bash
70 | PORT=3000 node dist/index.js
71 | ```
72 |
73 | Check that your application responds with "hello from express" when accessed.
74 |
75 | ## Conclusion
76 |
77 | In this exercise, we learned how to integrate TypeScript into our Docker project with multi-stage builds. You successfully installed TypeScript, updated your project configuration, and ensured that your application runs as expected. Keep practicing and exploring TypeScript, as it adds a lot of value to your development process. Happy coding! 🌟
78 |
--------------------------------------------------------------------------------
/_exercises/05-images_deep_dive/02-build_context.md:
--------------------------------------------------------------------------------
1 | # Understanding Build Contexts in Docker
2 |
3 | Welcome to the session where we dive into the fascinating world of Docker build contexts! 🎉 In this lecture, we’re going to explore what build contexts are, how they relate to Docker images, and why they matter in your Docker workflow. Before we get into the details, let's give you a chance to implement the solution on your own!
4 |
5 | ## Overview
6 |
7 | This lecture focuses on understanding the concept of build contexts in Docker. Here’s a quick roadmap for you to follow as you try to implement it yourself:
8 |
9 | 1. Create a simple `index.js` file that logs a message.
10 | 2. Write a Dockerfile that uses the Node.js Alpine version.
11 | 3. Set the working directory and copy the necessary files into the Docker container.
12 | 4. Run the Docker build command and observe the size of your image.
13 | 5. Experiment with larger files to see how Docker handles build contexts.
14 | 6. Modify the Dockerfile to copy only the required files.
15 | 7. Utilize a `.dockerignore` file to optimize your build context.
16 |
17 | Give it a shot! See if you can implement these steps before checking out the detailed guide below.
18 |
19 | ## Step-by-Step Guide
20 |
21 | Here's a clear and concise guide to help you on this journey:
22 |
23 | 1. **Create the `index.js` file:**
24 |
25 | - Inside your project directory, create a file named `index.js` containing `console.log('Hello from Node!');`.
26 |
27 | 2. **Write the Dockerfile:**
28 | - Create a file named `Dockerfile` with the following content:
29 | ```dockerfile
30 | FROM node:22-alpine
31 | WORKDIR /app
32 | COPY . .
33 | CMD ["node", "index.js"]
34 | ```
35 | 3. **Build your Docker image:**
36 |
37 | - Open a terminal in your project folder and run:
38 | ```bash
39 | docker build -t hello-from-node .
40 | ```
41 |
42 | 4. **Observe the Docker build output:**
43 |
44 | - Check how the image size reflects the context you provided and the files it includes.
45 |
46 | 5. **Test with large files:**
47 |
48 | - Create a large file (e.g., using `mkfile -n 5g large-file` in MacOS or `fallocate -l 5G large-file` in Linux) and build your image again to see how it affects the build context.
49 |
50 | 6. **Optimize your Dockerfile:**
51 |
52 | - Modify your Dockerfile to copy only the required file instead of the entire context:
53 | ```dockerfile
54 | COPY index.js .
55 | ```
56 |
57 | 7. **Create a `.dockerignore` file (Optional):**
58 | - If you have large files or directories that aren't needed for your build, create a `.dockerignore` file to specify those files (for example, `large-file`).
59 |
60 | ## Conclusion
61 |
62 | Today, we learned about Docker build contexts and how they can significantly influence your Docker workflows. We discovered that the context is not just the set of files in your directory; it also impacts how efficiently Docker builds images. By managing your build context properly and using a `.dockerignore` file, you can create smaller, faster images. Keep experimenting and practicing, and remember, the more you explore, the better you’ll get! 🚀
63 |
--------------------------------------------------------------------------------
/_exercises/03-introduction_images/06-images_vs_containers.md:
--------------------------------------------------------------------------------
1 | # Understanding Images and Containers in Docker
2 |
3 | Welcome to the guide on understanding the key differences between Docker images and containers! 🐳 This session is going to help clarify a topic that often confuses many newcomers to Docker. Before diving into the detailed steps, let's give you a chance to explore the solution on your own.
4 |
5 | ## Overview
6 |
7 | In this exercise, you will learn how to differentiate between Docker images and containers by creating and managing them effectively. The goal is to create a simple web server using Docker, and understand how modifying an image affects the containers created from it.
8 |
9 | Here’s a brief summary of the main steps you should try to implement the solution:
10 |
11 | 1. **Clean Your Docker Environment:** Stop and remove any existing containers.
12 | 2. **Modify the HTML File:** Customize an `index.html` file for your web server.
13 | 3. **Build a New Docker Image:** Use the Docker CLI to build your image with appropriate tagging.
14 | 4. **Run Your Docker Containers:** Start containers from the new image and verify they serve the updated content.
15 | 5. **Manage Versioning:** Understand how changing the image affects existing containers and learn to create new versions.
16 |
17 | Go ahead and give it a try! Implement the solution before checking the detailed steps below.
18 |
19 | ## Step-by-Step Guide
20 |
21 | 1. **Clean Your Docker Environment:**
22 |
23 | - Stop all running containers:
24 | ```bash
25 | docker stop $(docker ps -q)
26 | ```
27 | - Remove all containers:
28 | ```bash
29 | docker rm $(docker ps -aq)
30 | ```
31 | - Confirm that you have a clean slate with:
32 | ```bash
33 | docker images
34 | docker ps -a
35 | ```
36 |
37 | 2. **Modify the HTML File:**
38 |
39 | - Open your `index.html`, and change the welcome message to something unique (e.g., "Welcome to the blue NGINX").
40 |
41 | 3. **Build a New Docker Image:**
42 |
43 | - Run the build command in the terminal:
44 | ```bash
45 | docker build -t web_server_image:blue .
46 | ```
47 |
48 | 4. **Run Your Docker Containers:**
49 |
50 | - Start a new container:
51 | ```bash
52 | docker run -d -p 3000:80 --name blue web_server_image:blue
53 | ```
54 | - Verify that it works using curl:
55 | ```bash
56 | curl http://localhost:3000
57 | ```
58 |
59 | 5. **Manage Versioning:**
60 | - Change your `index.html` again (e.g., "Welcome to the updated blue NGINX") and rebuild the image:
61 | ```bash
62 | docker build -t web_server_image:blue .
63 | ```
64 | - Start a new container for the updated image:
65 | ```bash
66 | docker run -d -p 3001:80 --name blue1 web_server_image:blue
67 | ```
68 |
69 | ## Conclusion
70 |
71 | Throughout this session, we learned how images serve as blueprints for creating containers and how changes to an image do not affect existing containers. Remember, whenever you want to push new code, it's typically better to build a new image and run new containers instead of trying to update the existing ones. Keep practicing these concepts to solidify your understanding of Docker! 🚀
72 |
--------------------------------------------------------------------------------
/_exercises/07-volumes/03-named_volumes.md:
--------------------------------------------------------------------------------
1 | # Docker Fundamentals: Understanding Named Volumes
2 |
3 | Welcome! In this session, we’ll dive into the concept of named volumes in Docker. You’ll learn how these volumes allow containers to share data seamlessly, enhancing both persistence and collaboration among multiple containers. Before we jump in, let's give you a chance to try it out on your own!
4 |
5 | ## Overview
6 |
7 | In this exercise, we will implement named volumes to share data between multiple containers. Your task is to create a volume, mount it to an Nginx container, and observe how modifications in one container reflect in others, illustrating the power of Docker volumes.
8 |
9 | Here’s a quick summary of what you’ll be doing:
10 |
11 | 1. Create a named volume (let's call it `website-data`).
12 | 2. Run an Nginx container and mount the created volume.
13 | 3. Spin up additional Nginx containers that also mount the same volume.
14 | 4. Modify a file in one container and check the changes in all containers to observe data sharing.
15 | 5. Explore the possibilities of horizontal scaling by adding more containers that reference the same volume.
16 |
17 | Give it a shot! Try implementing the solution yourself before checking the step-by-step guide below! 🚀
18 |
19 | ## Step-by-Step Guide
20 |
21 | 1. **Create the volume**:
22 |
23 | ```bash
24 | docker volume create website-data
25 | ```
26 |
27 | 2. **Run the first Nginx container** and mount the volume:
28 |
29 | ```bash
30 | docker run -d --name website-main -p 3000:80 -v website-data:/usr/share/nginx/html nginx:1.27.0
31 | ```
32 |
33 | 3. **Run additional Nginx containers** (each with a different port) using the same volume:
34 |
35 | ```bash
36 | docker run -d --name website-readonly1 -p 3001:80 -v website-data:/usr/share/nginx/html:ro nginx:1.27.0
37 | docker run -d --name website-readonly2 -p 3002:80 -v website-data:/usr/share/nginx/html:ro nginx:1.27.0
38 | docker run -d --name website-readonly3 -p 3003:80 -v website-data:/usr/share/nginx/html:ro nginx:1.27.0
39 | ```
40 |
41 | 4. **Modify a file inside the main Nginx container**:
42 |
43 | - Access the container:
44 | ```bash
45 | docker exec -it website-main /bin/sh
46 | ```
47 | - Replace the content of `index.html` with:
48 | ```bash
49 | echo "Hello World" > index.html
50 | ```
51 | - Exit the container.
52 |
53 | 5. **Check the modifications in the read-only containers**:
54 | - Use curl to check from each container or directly access them via your browser (accessible at localhost:3001, 3002, and 3003).
55 |
56 | By performing these steps, you'll clearly see how data in one volume can be shared across several containers, reinforcing the concept of data persistence and easy scalability.
57 |
58 | ## Conclusion
59 |
60 | Congratulations on successfully implementing your first named volume! 🎉 You've experienced firsthand how volumes help share data among containers and persist data outside the container lifecycle. These concepts are fundamental when building scalable applications in Docker. Keep practicing, and feel free to explore further use cases of named volumes in your projects. The more you experiment, the more proficient you’ll become!
61 |
--------------------------------------------------------------------------------
/_exercises/05-images_deep_dive/07-cmd_entrypoint.md:
--------------------------------------------------------------------------------
1 | # Understanding Docker Commands and Entry Points
2 |
3 | ## Overview
4 |
5 | In this exercise, we will dive into the differences between the CMD and ENTRYPOINT instructions in a Dockerfile. Your task will be to create and experiment with different Dockerfiles to see how each command works in practice. Before moving on to the guide below, give these steps a try on your own:
6 |
7 | 1. Create a Dockerfile using CMD to echo a message.
8 | 2. Build and run a container from this Dockerfile.
9 | 3. Create a second Dockerfile using ENTRYPOINT to echo a message.
10 | 4. Build and run the container from the ENTRYPOINT Dockerfile.
11 | 5. Combine CMD and ENTRYPOINT in a third Dockerfile and see how they interact.
12 |
13 | Take some time to implement this before checking out the detailed steps below. You might surprise yourself with what you learn! 🚀
14 |
15 | ## Step-by-Step Guide
16 |
17 | 1. **Create a Dockerfile.cmd:**
18 | - Start with the `FROM alpine:3.20` line.
19 | - Add a CMD instruction to echo a message, e.g., `CMD ["echo", "Hello from CMD in Dockerfile.cmd"]`.
20 | 2. **Build the CMD Dockerfile:**
21 |
22 | - Open your terminal and navigate to the directory containing your Dockerfile.
23 | - Run the command:
24 | ```bash
25 | docker build -t cmd-example -f Dockerfile.cmd .
26 | ```
27 |
28 | 3. **Run the CMD Container:**
29 |
30 | - Use:
31 | ```bash
32 | docker run cmd-example
33 | ```
34 |
35 | 4. **Create a Dockerfile.entrypoint With ENTRYPOINT:**
36 |
37 | - In this Dockerfile, change the instruction to `ENTRYPOINT ["echo", "Hello from ENTRYPOINT in Dockerfile.entrypoint"]`.
38 |
39 | 5. **Build the ENTRYPOINT Dockerfile:**
40 |
41 | - Similar to CMD, run:
42 | ```bash
43 | docker build -t entrypoint-example -f Dockerfile.entrypoint .
44 | ```
45 |
46 | 6. **Run the ENTRYPOINT Container:**
47 |
48 | - Use:
49 | ```bash
50 | docker run entrypoint-example
51 | ```
52 |
53 | 7. **Create a Combined Dockerfile (No Extension):**
54 |
55 | - Start with `FROM alpine:3.20`.
56 | - Set an ENTRYPOINT, then use CMD to set a default message.
57 | - Example:
58 | ```dockerfile
59 | ENTRYPOINT ["echo"]
60 | CMD ["Default message"]
61 | ```
62 |
63 | 8. **Build the Combined Dockerfile:**
64 |
65 | - Execute:
66 | ```bash
67 | docker build -t cmd-entrypoint-example .
68 | ```
69 |
70 | 9. **Run the Combined Container:**
71 |
72 | - Simply run:
73 | ```bash
74 | docker run cmd-entrypoint-example
75 | ```
76 |
77 | 10. **Test Custom Messages:**
78 | - Override the CMD default by passing a custom message:
79 | ```bash
80 | docker run cmd-entrypoint-example "Hello from my custom message!"
81 | ```
82 |
83 | ## Conclusion
84 |
85 | In this lecture, we explored the differences between CMD and ENTRYPOINT commands in Dockerfiles. We learned that CMD provides a default command that can be overridden, while ENTRYPOINT works as a fixed command that can be extended with additional parameters. By combining both, you can create more flexible and powerful Docker images. Keep experimenting and practicing these concepts as you continue your journey with Docker!
86 |
--------------------------------------------------------------------------------
/_exercises/05-images_deep_dive/04-env_vars_introduction.md:
--------------------------------------------------------------------------------
1 | # Exploring Environment Variables in Docker 🐳
2 |
3 | ## Overview
4 |
5 | In this exercise, we're diving into the crucial aspect of Docker: environment variables. The goal is for you to implement a simple Express application that utilizes environment variables to configure various settings, like the port. Before you check the step-by-step guide, give it a try on your own! Here’s a quick summary of the main steps to guide your exploration:
6 |
7 | 1. **Initialize a new npm project**.
8 | 2. **Install Express.js**.
9 | 3. **Create a basic Express application** that listens for requests.
10 | 4. **Create a Dockerfile** for containerization.
11 | 5. **Define an environment variable** for the application's port.
12 | 6. **Build and run your Docker container**, mapping ports as necessary.
13 | 7. **Verify the application is functioning by accessing the appropriate endpoint**.
14 |
15 | Take a shot at implementing these steps, and only refer to the guide if you get stuck!
16 |
17 | ## Step-by-Step Guide
18 |
19 | 1. **Set Up Your Project**:
20 |
21 | - Open your terminal in an empty folder.
22 | - Run `npm init -y` to create a new npm project.
23 | - Install Express.js with the command:
24 | ```bash
25 | npm install express@4.19.2 --save-exact
26 | ```
27 |
28 | 2. **Create Application Files**:
29 |
30 | - Create a `source` directory.
31 | - Inside, create `index.js` file with the following code:
32 |
33 | ```javascript
34 | const express = require('express');
35 | const app = express();
36 | const port = process.env.PORT || 3000;
37 |
38 | app.get('/', (req, res) => {
39 | res.send('Hello from Express!');
40 | });
41 |
42 | app.listen(port, () => {
43 | console.log(`Server listening on port ${port}`);
44 | });
45 | ```
46 |
47 | 3. **Set Up a Dockerfile**:
48 |
49 | - Create a `Dockerfile` in your project root with the following contents:
50 | ```dockerfile
51 | FROM node:22-alpine
52 | WORKDIR /app
53 | COPY package*.json ./
54 | RUN npm ci
55 | COPY . .
56 | CMD ["node", "source/index.js"]
57 | ```
58 |
59 | 4. **Create a `.dockerignore`**:
60 |
61 | - Add `node_modules` to the `.dockerignore` file to prevent unnecessary copying into the image.
62 |
63 | 5. **Add Environment Variable**:
64 |
65 | - Update the Dockerfile to include:
66 | ```dockerfile
67 | ENV PORT=5000
68 | ```
69 |
70 | 6. **Build the Docker Image**:
71 |
72 | - Run the following command in your terminal:
73 | ```bash
74 | docker build -t express-app-env-vars .
75 | ```
76 |
77 | 7. **Run Your Docker Container**:
78 | - Start your container with a command like:
79 | ```bash
80 | docker run -d -p 5000:5000 express-app-env-vars
81 | ```
82 | - Test it by sending a request to `localhost:5000`.
83 |
84 | ## Conclusion
85 |
86 | Congratulations! You’ve successfully built an Express application configured with environment variables and run it within a Docker container. Remember, environment variables play a crucial role in configuring applications across different environments, so mastering them is essential. Keep experimenting and practicing, as this knowledge will serve you well in your Docker journey. 🚀
87 |
--------------------------------------------------------------------------------
/_exercises/10-docker_compose/03-bind_mounts.md:
--------------------------------------------------------------------------------
1 | # Working with Bind Mounts in Docker
2 |
3 | Welcome to this guide on using bind mounts in Docker! In this session, we'll delve into how to create and use a script to initialize a key-value database with the required user roles. Before we get started, I encourage you to take a shot at implementing the solution on your own. Ready for the challenge? Here’s a quick overview of the steps you’ll need to follow:
4 |
5 | ## Overview
6 |
7 | 1. Create a new folder named `B-config`.
8 | 2. Write a script named `Mongo-init.js` to initialize your key-value database and set user roles.
9 | 3. Add a bind mount volume in your `compose.yaml` file to connect the script from your host to the container.
10 | 4. Confirm that the script runs successfully and initializes the database as expected.
11 | 5. Make any necessary adjustments and rerun your Docker containers to ensure everything is functioning properly.
12 |
13 | Give these steps a try before checking out the detailed guide below! 🌟
14 |
15 | ## Step-by-Step Guide
16 |
17 | 1. **Set Up the Script**:
18 |
19 | - Create a folder called `B-config` in your project directory.
20 | - Inside this folder, create a file named `Mongo-init.js`.
21 | - In your `Mongo-init.js`, use the following code snippet to set up your database:
22 |
23 | ```javascript
24 | process.m.key_value_db = {
25 | username: 'user',
26 | password: 'password',
27 | };
28 |
29 | const db = getSiblingDb('key_value_db');
30 | db.createUser({
31 | user: 'key_value_user',
32 | pwd: 'key_value_password',
33 | roles: [{ role: 'readWrite', db: 'key_value_db' }],
34 | });
35 | ```
36 |
37 | 2. **Update the Docker Compose Configuration**:
38 |
39 | - Open your `compose.yaml` file.
40 | - Add a new volume to bind mount the `Mongo-init.js` script. You can do this in two ways, either the shorthand or the more explicit method:
41 | **Shorthand Syntax**:
42 | ```yaml
43 | volumes:
44 | - ./B-config/mongo-init.js:/docker-entrypoint-initdb.d/mongo-init.js:ro
45 | ```
46 | **Explicit Syntax**:
47 | ```yaml
48 | volumes:
49 | mongo-init:
50 | type: bind
51 | source: ./B-config/mongo-init.js
52 | target: /docker-entrypoint-initdb.d/mongo-init.js
53 | read_only: true
54 | ```
55 |
56 | 3. **Run Your Docker Compose**:
57 |
58 | - Open your terminal and run the command:
59 | ```bash
60 | docker-compose up
61 | ```
62 | - Check the logs to see if your initialization script executed successfully.
63 |
64 | 4. **Verify Database Initialization**:
65 |
66 | - You can confirm the script’s execution by running:
67 | ```bash
68 | docker-compose ps -a
69 | ```
70 | - You should see an initialization message in your logs.
71 |
72 | 5. **Test Database Access**:
73 | - Run a command to connect to your database and check if the collections have been set up correctly. Use the credentials defined in your script to ensure you can authenticate.
74 |
75 | ## Conclusion
76 |
77 | Great job on working through this process of setting up bind mounts in Docker! You've implemented a script to initialize a key-value database, which is a crucial step in database management. As we continue exploring Docker, remember that practice is key. Keep experimenting and playing around with these concepts, and you'll solidify your understanding in no time!
78 |
--------------------------------------------------------------------------------
/_exercises/05-images_deep_dive/08-multistage_motivation.md:
--------------------------------------------------------------------------------
1 | # Multi-Stage Builds in Docker: Understanding the Concept and Implementation
2 |
3 | Hello! In this session, we’ll dive into a crucial topic in Docker: multi-stage builds. This powerful feature allows you to optimize your Docker images, leading to smaller, faster, and more secure containers. 🚀 Before we jump into the specifics, I encourage you to take a shot at implementing the solution yourself! Here’s a quick overview of the steps you’ll be working with.
4 |
5 | ## Overview
6 |
7 | Here’s a summary of the main steps you'll want to follow to implement multi-stage builds:
8 |
9 | 1. **Create a New Directory**: Start with an empty directory for your project.
10 | 2. **Initialize an NPM Project**: Set up your Node.js project and install necessary dependencies.
11 | 3. **Write Your Source Code**: Create a simple `index.js` file with basic express code.
12 | 4. **Construct Your Dockerfile**: Develop a Dockerfile that includes multiple stages for building and running your application.
13 | 5. **Build and Run Your Image**: Test the entire setup by building the Docker image and running it.
14 |
15 | Try to work through these steps on your own before looking at the detailed guide below!
16 |
17 | ## Step-by-Step Guide
18 |
19 | 1. **Set Up Your Project**:
20 |
21 | - Create a new empty directory called `multi-stage-builds`.
22 | - Inside this directory, initialize a new Node.js project using `npm init`.
23 |
24 | 2. **Install Dependencies**:
25 |
26 | - Install Express in your project with the command: `npm install express@4.19.2`.
27 |
28 | 3. **Create Directory Structure**:
29 |
30 | - Create a folder named `SRC`.
31 | - Inside `SRC`, create an `index.js` file for your application code.
32 |
33 | 4. **Write Your Application Code**:
34 |
35 | - Open `index.js` and add the following boilerplate code:
36 |
37 | ```javascript
38 | const express = require('express');
39 | const app = express();
40 |
41 | app.get('/', (req, res) => res.send('Hello from Express!'));
42 |
43 | const port = process.env.PORT || 3000;
44 | app.listen(port, () => console.log(`Server listening on port ${port}`));
45 | ```
46 |
47 | 5. **Setup the `.dockerignore` File**:
48 |
49 | - Create a `.dockerignore` file in your project root, adding `node_modules` to exclude it from the build context.
50 |
51 | 6. **Create Your Dockerfile**:
52 |
53 | - Start with the following basic structure:
54 | ```dockerfile
55 | FROM node:22-alpine
56 | WORKDIR /app
57 | COPY package*.json ./
58 | RUN npm install
59 | COPY SRC/ ./SRC
60 | CMD ["node", "SRC/index.js"]
61 | ```
62 |
63 | 7. **Build Your Docker Image**:
64 |
65 | - Run the build command:
66 | ```bash
67 | docker build -t express-image .
68 | ```
69 |
70 | 8. **Run Your Docker Container**:
71 |
72 | - Spin up your container with:
73 | ```bash
74 | docker run --rm -d -p 3000:3000 --name express-container express-image
75 | ```
76 |
77 | 9. **Test Your Application**:
78 |
79 | - Open a browser and navigate to `http://localhost:3000` to see your application in action.
80 |
81 | 10. **Experiment with Distroless Images (Optional)**:
82 | - Try modifying your Dockerfile to use a distroless image in the first stage and see how it impacts your build process.
83 |
84 | ## Conclusion
85 |
86 | In this lecture, we explored the concept of multi-stage builds in Docker, which helps create more efficient and secure images. By separating the build and run processes, we can take advantage of smaller, streamlined containers. Keep practicing these concepts and applying them in your projects. There’s always more to learn!
87 |
--------------------------------------------------------------------------------
/notes-rest-api/notes-backend/src/routes.js:
--------------------------------------------------------------------------------
1 | const express = require('express');
2 | const mongoose = require('mongoose');
3 | const axios = require('axios');
4 | const { Note } = require('./models');
5 |
6 | const notebooksApiUrl = process.env.NOTEBOOKS_API_URL;
7 | const noteRouter = express.Router();
8 |
9 | const validateId = (req, res, next) => {
10 | const { id } = req.params;
11 |
12 | if (!mongoose.Types.ObjectId.isValid(id)) {
13 | return res.status(404).json({ error: 'Note not found.' });
14 | }
15 |
16 | next();
17 | };
18 |
19 | noteRouter.post('/', async (req, res) => {
20 | try {
21 | const { title, content, notebookId } = req.body;
22 |
23 | let validatedNotebookId = null;
24 |
25 | if (!notebookId) {
26 | console.info({
27 | message: 'Notebook ID not provided. Storing note without notebook.',
28 | });
29 | } else if (!mongoose.Types.ObjectId.isValid(notebookId)) {
30 | return res.status(400).json({ error: 'Notebook not found', notebookId });
31 | } else {
32 | try {
33 | await axios.get(`${notebooksApiUrl}/${notebookId}`);
34 | } catch (err) {
35 | const jsonError = err.toJSON();
36 |
37 | if (jsonError.status === 404) {
38 | return res
39 | .status(400)
40 | .json({ error: 'Notebook not found', notebookId });
41 | } else {
42 | console.error({
43 | message:
44 | 'Error verifying the notebook ID. Upstream notebooks service not available. Storing note with provided ID for later validation.',
45 | notebookId,
46 | error: err.message,
47 | });
48 | }
49 | } finally {
50 | validatedNotebookId = notebookId;
51 | }
52 | }
53 |
54 | if (!title || !content) {
55 | return res
56 | .status(400)
57 | .json({ error: "'title', 'content' fields are required." });
58 | }
59 |
60 | const note = new Note({ title, content, notebookId: validatedNotebookId });
61 | await note.save();
62 | res.status(201).json({ data: note });
63 | } catch (err) {
64 | res.status(500).json({ error: err.message });
65 | }
66 | });
67 |
68 | noteRouter.get('/', async (req, res) => {
69 | try {
70 | const notes = await Note.find();
71 | return res.status(200).json({ data: notes });
72 | } catch (err) {
73 | res.status(500).json({ error: err.message });
74 | }
75 | });
76 |
77 | noteRouter.get('/:id', validateId, async (req, res) => {
78 | try {
79 | const note = await Note.findById(req.params.id);
80 |
81 | if (!note) {
82 | return res.status(404).json({ error: 'Note not found.' });
83 | }
84 |
85 | return res.status(200).json({ data: note });
86 | } catch (err) {
87 | res.status(500).json({ error: err.message });
88 | }
89 | });
90 |
91 | noteRouter.put('/:id', validateId, async (req, res) => {
92 | try {
93 | const { title, content } = req.body;
94 |
95 | const note = await Note.findByIdAndUpdate(
96 | req.params.id,
97 | { title, content },
98 | { new: true }
99 | );
100 |
101 | if (!note) {
102 | return res.status(404).json({ error: 'Note not found.' });
103 | }
104 |
105 | return res.json({ data: note });
106 | } catch (err) {
107 | res.status(500).json({ error: err.message });
108 | }
109 | });
110 |
111 | noteRouter.delete('/:id', validateId, async (req, res) => {
112 | try {
113 | const note = await Note.findByIdAndDelete(req.params.id);
114 |
115 | if (!note) {
116 | return res.status(404).json({ error: 'Note not found.' });
117 | }
118 |
119 | return res.sendStatus(204);
120 | } catch (err) {
121 | res.status(500).json({ error: err.message });
122 | }
123 | });
124 |
125 | module.exports = {
126 | noteRouter,
127 | };
128 |
--------------------------------------------------------------------------------
/multistage-builds/tsconfig.json:
--------------------------------------------------------------------------------
1 | {
2 | "compilerOptions": {
3 | /* Visit https://aka.ms/tsconfig to read more about this file */
4 |
5 | /* Projects */
6 | // "incremental": true, /* Save .tsbuildinfo files to allow for incremental compilation of projects. */
7 | // "composite": true, /* Enable constraints that allow a TypeScript project to be used with project references. */
8 | // "tsBuildInfoFile": "./.tsbuildinfo", /* Specify the path to .tsbuildinfo incremental compilation file. */
9 | // "disableSourceOfProjectReferenceRedirect": true, /* Disable preferring source files instead of declaration files when referencing composite projects. */
10 | // "disableSolutionSearching": true, /* Opt a project out of multi-project reference checking when editing. */
11 | // "disableReferencedProjectLoad": true, /* Reduce the number of projects loaded automatically by TypeScript. */
12 |
13 | /* Language and Environment */
14 | "target": "es2016", /* Set the JavaScript language version for emitted JavaScript and include compatible library declarations. */
15 | // "lib": [], /* Specify a set of bundled library declaration files that describe the target runtime environment. */
16 | // "jsx": "preserve", /* Specify what JSX code is generated. */
17 | // "experimentalDecorators": true, /* Enable experimental support for legacy experimental decorators. */
18 | // "emitDecoratorMetadata": true, /* Emit design-type metadata for decorated declarations in source files. */
19 | // "jsxFactory": "", /* Specify the JSX factory function used when targeting React JSX emit, e.g. 'React.createElement' or 'h'. */
20 | // "jsxFragmentFactory": "", /* Specify the JSX Fragment reference used for fragments when targeting React JSX emit e.g. 'React.Fragment' or 'Fragment'. */
21 | // "jsxImportSource": "", /* Specify module specifier used to import the JSX factory functions when using 'jsx: react-jsx*'. */
22 | // "reactNamespace": "", /* Specify the object invoked for 'createElement'. This only applies when targeting 'react' JSX emit. */
23 | // "noLib": true, /* Disable including any library files, including the default lib.d.ts. */
24 | // "useDefineForClassFields": true, /* Emit ECMAScript-standard-compliant class fields. */
25 | // "moduleDetection": "auto", /* Control what method is used to detect module-format JS files. */
26 |
27 | /* Modules */
28 | "module": "commonjs", /* Specify what module code is generated. */
29 | // "rootDir": "./", /* Specify the root folder within your source files. */
30 | // "moduleResolution": "node10", /* Specify how TypeScript looks up a file from a given module specifier. */
31 | // "baseUrl": "./", /* Specify the base directory to resolve non-relative module names. */
32 | // "paths": {}, /* Specify a set of entries that re-map imports to additional lookup locations. */
33 | // "rootDirs": [], /* Allow multiple folders to be treated as one when resolving modules. */
34 | // "typeRoots": [], /* Specify multiple folders that act like './node_modules/@types'. */
35 | // "types": [], /* Specify type package names to be included without being referenced in a source file. */
36 | // "allowUmdGlobalAccess": true, /* Allow accessing UMD globals from modules. */
37 | // "moduleSuffixes": [], /* List of file name suffixes to search when resolving a module. */
38 | // "allowImportingTsExtensions": true, /* Allow imports to include TypeScript file extensions. Requires '--moduleResolution bundler' and either '--noEmit' or '--emitDeclarationOnly' to be set. */
39 | // "resolvePackageJsonExports": true, /* Use the package.json 'exports' field when resolving package imports. */
40 | // "resolvePackageJsonImports": true, /* Use the package.json 'imports' field when resolving imports. */
41 | // "customConditions": [], /* Conditions to set in addition to the resolver-specific defaults when resolving imports. */
42 | // "resolveJsonModule": true, /* Enable importing .json files. */
43 | // "allowArbitraryExtensions": true, /* Enable importing files with any extension, provided a declaration file is present. */
44 | // "noResolve": true, /* Disallow 'import's, 'require's or ''s from expanding the number of files TypeScript should add to a project. */
45 |
46 | /* JavaScript Support */
47 | // "allowJs": true, /* Allow JavaScript files to be a part of your program. Use the 'checkJS' option to get errors from these files. */
48 | // "checkJs": true, /* Enable error reporting in type-checked JavaScript files. */
49 | // "maxNodeModuleJsDepth": 1, /* Specify the maximum folder depth used for checking JavaScript files from 'node_modules'. Only applicable with 'allowJs'. */
50 |
51 | /* Emit */
52 | // "declaration": true, /* Generate .d.ts files from TypeScript and JavaScript files in your project. */
53 | // "declarationMap": true, /* Create sourcemaps for d.ts files. */
54 | // "emitDeclarationOnly": true, /* Only output d.ts files and not JavaScript files. */
55 | // "sourceMap": true, /* Create source map files for emitted JavaScript files. */
56 | // "inlineSourceMap": true, /* Include sourcemap files inside the emitted JavaScript. */
57 | // "outFile": "./", /* Specify a file that bundles all outputs into one JavaScript file. If 'declaration' is true, also designates a file that bundles all .d.ts output. */
58 | "outDir": "dist", /* Specify an output folder for all emitted files. */
59 | // "removeComments": true, /* Disable emitting comments. */
60 | // "noEmit": true, /* Disable emitting files from a compilation. */
61 | // "importHelpers": true, /* Allow importing helper functions from tslib once per project, instead of including them per-file. */
62 | // "downlevelIteration": true, /* Emit more compliant, but verbose and less performant JavaScript for iteration. */
63 | // "sourceRoot": "", /* Specify the root path for debuggers to find the reference source code. */
64 | // "mapRoot": "", /* Specify the location where debugger should locate map files instead of generated locations. */
65 | // "inlineSources": true, /* Include source code in the sourcemaps inside the emitted JavaScript. */
66 | // "emitBOM": true, /* Emit a UTF-8 Byte Order Mark (BOM) in the beginning of output files. */
67 | // "newLine": "crlf", /* Set the newline character for emitting files. */
68 | // "stripInternal": true, /* Disable emitting declarations that have '@internal' in their JSDoc comments. */
69 | // "noEmitHelpers": true, /* Disable generating custom helper functions like '__extends' in compiled output. */
70 | // "noEmitOnError": true, /* Disable emitting files if any type checking errors are reported. */
71 | // "preserveConstEnums": true, /* Disable erasing 'const enum' declarations in generated code. */
72 | // "declarationDir": "./", /* Specify the output directory for generated declaration files. */
73 |
74 | /* Interop Constraints */
75 | // "isolatedModules": true, /* Ensure that each file can be safely transpiled without relying on other imports. */
76 | // "verbatimModuleSyntax": true, /* Do not transform or elide any imports or exports not marked as type-only, ensuring they are written in the output file's format based on the 'module' setting. */
77 | // "isolatedDeclarations": true, /* Require sufficient annotation on exports so other tools can trivially generate declaration files. */
78 | // "allowSyntheticDefaultImports": true, /* Allow 'import x from y' when a module doesn't have a default export. */
79 | "esModuleInterop": true, /* Emit additional JavaScript to ease support for importing CommonJS modules. This enables 'allowSyntheticDefaultImports' for type compatibility. */
80 | // "preserveSymlinks": true, /* Disable resolving symlinks to their realpath. This correlates to the same flag in node. */
81 | "forceConsistentCasingInFileNames": true, /* Ensure that casing is correct in imports. */
82 |
83 | /* Type Checking */
84 | "strict": true, /* Enable all strict type-checking options. */
85 | // "noImplicitAny": true, /* Enable error reporting for expressions and declarations with an implied 'any' type. */
86 | // "strictNullChecks": true, /* When type checking, take into account 'null' and 'undefined'. */
87 | // "strictFunctionTypes": true, /* When assigning functions, check to ensure parameters and the return values are subtype-compatible. */
88 | // "strictBindCallApply": true, /* Check that the arguments for 'bind', 'call', and 'apply' methods match the original function. */
89 | // "strictPropertyInitialization": true, /* Check for class properties that are declared but not set in the constructor. */
90 | // "noImplicitThis": true, /* Enable error reporting when 'this' is given the type 'any'. */
91 | // "useUnknownInCatchVariables": true, /* Default catch clause variables as 'unknown' instead of 'any'. */
92 | // "alwaysStrict": true, /* Ensure 'use strict' is always emitted. */
93 | // "noUnusedLocals": true, /* Enable error reporting when local variables aren't read. */
94 | // "noUnusedParameters": true, /* Raise an error when a function parameter isn't read. */
95 | // "exactOptionalPropertyTypes": true, /* Interpret optional property types as written, rather than adding 'undefined'. */
96 | // "noImplicitReturns": true, /* Enable error reporting for codepaths that do not explicitly return in a function. */
97 | // "noFallthroughCasesInSwitch": true, /* Enable error reporting for fallthrough cases in switch statements. */
98 | // "noUncheckedIndexedAccess": true, /* Add 'undefined' to a type when accessed using an index. */
99 | // "noImplicitOverride": true, /* Ensure overriding members in derived classes are marked with an override modifier. */
100 | // "noPropertyAccessFromIndexSignature": true, /* Enforces using indexed accessors for keys declared using an indexed type. */
101 | // "allowUnusedLabels": true, /* Disable error reporting for unused labels. */
102 | // "allowUnreachableCode": true, /* Disable error reporting for unreachable code. */
103 |
104 | /* Completeness */
105 | // "skipDefaultLibCheck": true, /* Skip type checking .d.ts files that are included with TypeScript. */
106 | "skipLibCheck": true /* Skip type checking all .d.ts files. */
107 | }
108 | }
109 |
--------------------------------------------------------------------------------