├── .gitignore ├── README.md ├── _exercises ├── 01-running_containers_k8s │ ├── 01-creating_pod.md │ ├── 02-managing_pod_kubectl.md │ ├── 03-exposing_pod.md │ ├── 04-code_color_api_v1.0.0.md │ └── 05-from_dockerfile_to_pod.md ├── 02-object_management_yaml_manifests │ ├── 01-imperative_config_files-create_nginx.md │ ├── 02-imperative_config_files-create_svc.md │ ├── 03-generating_manifests.md │ ├── 04-shortcomings_imperative_config_files.md │ ├── 05-declarative_management.md │ ├── 06-imperative_to_declarative.md │ └── 07-multiple_objects_same_file.md ├── 03-replicasets_deployments │ ├── 01-create_replicaset.md │ ├── 02-replicaset_shortcomings_updating_pods.md │ ├── 03-replicaset_shortcomings_existing_pods.md │ ├── 04-create_deployment.md │ ├── 05-update_pod_template.md │ ├── 06-exploring_rollouts.md │ ├── 07-kubectl_scale.md │ └── 08-failed_rollouts.md ├── 04-services │ ├── 01-color_api_add_hostname.md │ ├── 02-code_traffic_generator.md │ ├── 03-deploy_color_api_traffic_generator.md │ ├── 04-clusterip_service.md │ ├── 05-nodeport_service.md │ ├── 06-nodeport_service_linux.md │ └── 07-externalname_service.md ├── 05-resource_management │ ├── 01-labels_selectors_kubectl.md │ ├── 02-labels_selectors_match_expressions.md │ ├── 03-namespaces.md │ ├── 04-cross_namespace_service.md │ ├── 05-resource_quotas.md │ ├── 06-requests_limits.md │ ├── 07-rollouts_requests_limits.md │ ├── 08-extend_color_api_probes.md │ ├── 09-startup_probes.md │ ├── 10-add_dedicated_startup_probe.md │ ├── 11-liveness_probes.md │ └── 12-readiness_probe.md ├── 06-storage_persistence │ ├── 01-emptydir.md │ ├── 02-create_pv_pvc.md │ ├── 03-create_pod_fix_local_path.md │ ├── 04-delete_pvc_pv.md │ ├── 05-dynamic_provision_pv.md │ ├── 06-statefulsets_create_pvs.md │ ├── 07-statefulsets_create_ss.md │ ├── 08-stateful_set_dynamic_pvs.md │ └── 09-headless_svc.md ├── 07-configuration_management │ ├── 01-color_api_pass_color_env_variable.md │ ├── 02-configmap_env_vars.md │ ├── 03-configmap_volumes.md │ ├── 04-secrets_env_variables.md │ └── 05-secrets_volumes.md ├── 08-deploy_mongodb_database │ └── project_overview.md ├── 09-security_fundamentals │ ├── 01-overview_minikube_user_clusterroles.md │ ├── 02-create_users_alice_bob.md │ ├── 03-configure_user_credentials.md │ ├── 04-exploring_api_resources.md │ ├── 05-setup_permissions_read_pods_bob.md │ ├── 06-clusterroles_clusterrolebindings.md │ ├── 07-permissions_subresources.md │ ├── 08-default_service_accounts.md │ ├── 09-create_own_service_account.md │ ├── 10-cleanup.md │ ├── 11-deny_all_ingress.md │ ├── 12-allow_curl.md │ ├── 13-podselector.md │ ├── 14-combining_selectors.md │ ├── 15-egress.md │ ├── 16-network_policies_namespace.md │ ├── 17-pod_security_standards.md │ └── 18-pod_security_standards_documentation.md ├── 10-kustomize │ ├── 01-first_kustomize_project.md │ ├── 02-bases_overlays.md │ ├── 03-transformations.md │ ├── 04-configmap_generator.md │ ├── 05-secret_generator.md │ ├── 06-introduction_patches.md │ ├── 07-strategic_merge_patches.md │ └── 08-json_patches.md └── 11-deploy_color_api_gke │ └── project_introduction.md ├── config-maps ├── green-color-api.yaml ├── green-config.yaml ├── red-color-api.yaml └── red-config.yaml ├── containers ├── color-api │ ├── .dockerignore │ ├── Dockerfile │ ├── package-lock.json │ ├── package.json │ └── src │ │ ├── db │ │ └── color.js │ │ ├── index.js │ │ ├── routes │ │ ├── api.js │ │ ├── health.js │ │ └── root.js │ │ └── utils.js └── traffic-generator │ ├── Dockerfile │ └── traffic-gen.sh ├── deployments └── nginx-depl.yaml ├── headless-service ├── color-ss.yaml ├── debug.yaml └── svc.yaml ├── health-probes ├── color-api-pod.yaml └── readiness-probes │ ├── color-api-depl.yaml │ ├── color-api-svc.yaml │ └── traffic-generator.yaml ├── kustomize ├── dev-ns.yaml ├── nginx-app │ ├── base │ │ ├── kustomization.yaml │ │ ├── nginx-depl.yaml │ │ ├── nginx-svc.yaml │ │ ├── reverse-proxy-depl.yaml │ │ └── reverse-proxy-pod.yaml │ └── overlays │ │ ├── dev │ │ ├── db-init.js │ │ ├── kustomization.yaml │ │ ├── mount-db-init.patch.yaml │ │ ├── remove-resources.patch.json │ │ ├── remove-resources.patch.yaml │ │ └── use-latest-tag.patch.yaml │ │ └── prod │ │ └── kustomization.yaml └── prod-ns.yaml ├── labels-selectors ├── color-api.yaml └── color-depl.yaml ├── namespaces ├── color-api.yaml ├── dev-ns.yaml └── traffic-generator.yaml ├── network-policies ├── color-api.yaml ├── curl.yaml ├── curl2.yaml ├── curl3.yaml ├── dev-ns.yaml ├── policies │ ├── allow-color-api.yaml │ ├── allow-curl.yaml │ ├── allow-egress-dns.yaml │ └── deny-all.yaml └── traffic-gen.yaml ├── object-management ├── nginx-all.yaml ├── nginx-pod.yaml └── nginx-svc.yaml ├── pod-security-standards ├── baseline-pod.yaml ├── namespaces │ ├── baseline.yaml │ └── privileged.yaml ├── privileged-pod.yaml └── restricted-pod.yaml ├── proj-gke ├── color-api │ ├── _base │ │ ├── color-api.yaml │ │ ├── color-svc.yaml │ │ ├── kustomization.yaml │ │ └── network-policies │ │ │ └── allow-external.yaml │ ├── dev │ │ ├── ingress.yaml │ │ ├── kustomization.yaml │ │ ├── managed-cert.yaml │ │ └── use-dev-image.yaml │ └── prod │ │ ├── increase-replica-count.yaml │ │ ├── ingress.yaml │ │ ├── kustomization.yaml │ │ └── managed-cert.yaml ├── color-db │ ├── _base │ │ ├── kustomization.yaml │ │ ├── mongo-init.js │ │ ├── mongodb-ss.yaml │ │ ├── mongodb-svc.yaml │ │ └── network-policies │ │ │ └── allow-colorapi.yaml │ ├── dev │ │ └── kustomization.yaml │ └── prod │ │ ├── kustomization.yaml │ │ └── use-premium-storage.yaml ├── namespaces │ ├── dev.yaml │ └── prod.yaml └── shared-config │ ├── _network-policies │ ├── deny-ingress.yaml │ └── kustomization.yaml │ ├── dev │ └── kustomization.yaml │ └── prod │ └── kustomization.yaml ├── proj-mongodb ├── color-api.yaml ├── color-svc.yaml ├── mongodb-colordb-creds.yaml ├── mongodb-init-colordb.yaml ├── mongodb-root-creds.yaml ├── mongodb-ss.yaml ├── mongodb-svc.yaml └── traffic-generator.yaml ├── rbac ├── clusterroles │ ├── pod-admin-crb.yaml │ └── pod-admin.yaml ├── csr.yaml ├── pods.yaml ├── roles │ ├── dev-pod-reader-rb.yaml │ ├── dev-pod-reader.yaml │ └── ns.yaml └── service-account │ └── pod-inspector.yaml ├── replica-sets ├── nginx-pod.yaml └── nginx-rs.yaml ├── resource-quotas ├── color-api-depl.yaml ├── color-api-pod.yaml ├── dev-ns.yaml └── prod-ns.yaml ├── secrets └── demo-pod.yaml ├── services ├── color-api-clusterip.yaml ├── color-api-depl.yaml ├── color-api-nodeport.yaml ├── google-extname.yaml └── traffic-generator.yaml ├── stateful-sets ├── pvs.yaml └── stateful-set.yaml └── storage-persistence ├── dynamic.yml ├── empty-dir-example.yaml └── local-vol-example.yaml /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | node_modules 3 | .env* 4 | *.csr 5 | *.key 6 | *.crt 7 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # The Complete Docker and Kubernetes Course: From Zero to Hero 2 | 3 | This repository is part of my Docker and Kubernetes full course! Check right below for the link with a great discount 4 | 5 | ### Course link (with a big discount 🙂): https://www.lauromueller.com/courses/docker-kubernetes 6 | 7 | **Check my other courses:** 8 | 9 | - 👉 The Definitive Helm Course: From Beginner to Master - https://www.lauromueller.com/courses/definitive-helm-course 10 | - 👉 Mastering Terraform: From Beginner to Expert - https://www.lauromueller.com/courses/mastering-terraform 11 | - 👉 Mastering GitHub Actions: From Beginner to Expert - https://www.lauromueller.com/courses/mastering-github-actions 12 | - 👉 Write better code: 20 code smells and how to get rid of them - https://www.lauromueller.com/courses/writing-clean-code 13 | 14 | Welcome everyone! I'm very happy to see you around, and I hope this repository brings lots of value for those learning more about Docker and Kubernetes. Make sure to check the link above for a great discount on the course in Udemy, where I not only provide theoretical explanations around all the concepts here, but also go in details through the entire coding of the examples in this repository. 15 | 16 | Here are a few tips for you to best navigate the contents of this repository: 17 | 18 | 1. The folder `_exercises` contains practical steps for you to try to implement the hands-on labs we go through throughout the course. They are organized based on the Kubernetes sections of the course, so you can easily match the folder and the section by title, as well as the exercise file and the corresponding hands-on lab video in the course. 19 | 2. Each section in the course has one or more folders associated with it, and each folder contains all the files and code for the respective topic. Sometimes, sections have more than one folder, but it should be very straightforward to identify the correct folder based on the lecture titles, as I tried to keep the naming as consistent as possible. 20 | 21 | ## Additional Links: 22 | 23 | - Docker's code repository (also part of the bundle): https://github.com/lm-academy/docker-course 24 | -------------------------------------------------------------------------------- /_exercises/01-running_containers_k8s/01-creating_pod.md: -------------------------------------------------------------------------------- 1 | # Creating and Managing Pods in Kubernetes 2 | 3 | ## Overview 4 | 5 | In this exercise, we'll explore how to create and manage pods using the `kubectl` command-line utility. By the end of this session, you should have a good understanding of the basic commands needed to spin up your first pod. Before diving into the step-by-step process, give it a try on your own! Here’s a quick summary of the steps you'll be attempting: 6 | 7 | 1. Ensure Kubernetes is running and verify the `kubectl` version. 8 | 2. Check your current context to ensure it's set to `minikube`. 9 | 3. Use the `kubectl run` command to create a pod with an NGINX image. 10 | 4. Confirm the pod is running and in a ready state. 11 | 12 | Have a go at implementing this before you peek into the guide below! 🚀 13 | 14 | ## Step-by-Step Guide 15 | 16 | Here’s how to create and manage your pod in a clear step-by-step format: 17 | 18 | 1. **Check Kubernetes Setup**: Run the command `kubectl version` to check if Kubernetes is up and running. You should receive a valid response from the server. 19 | 2. **Verify Context**: Ensure you're set to the `minikube` context by executing: 20 | ```bash 21 | kubectl config current-context 22 | ``` 23 | If it doesn't return `minikube`, update the context with: 24 | ```bash 25 | kubectl config set-context minikube 26 | ``` 27 | 3. **Run the Pod**: Use the following command to create an NGINX pod (version 1.27.0): 28 | ```bash 29 | kubectl run my-nginx --image=nginx:1.27.0 30 | ``` 31 | Replace `my-nginx` with your preferred pod name. 32 | 4. **Check the Pod Status**: After running the command, verify your pod's status by using: 33 | ```bash 34 | kubectl get pods 35 | ``` 36 | You should see your pod listed as 'Running' and ready. 37 | 38 | ## Conclusion 39 | 40 | Congratulations on successfully spinning up your first pod! 🎉 You’ve now gone through the basics of using `kubectl` to create and manage pods in Kubernetes. Keep practicing these commands and exploring other features of Kubernetes to deepen your understanding. The more you experiment, the more proficient you’ll become! 41 | -------------------------------------------------------------------------------- /_exercises/01-running_containers_k8s/02-managing_pod_kubectl.md: -------------------------------------------------------------------------------- 1 | # Managing Pods with kubectl 2 | 3 | ## Overview 4 | 5 | In this exercise, we’ll dive into managing pods using `kubectl` in Kubernetes. You’ll get hands-on experience with commands that allow you to gather detailed information about your pods and effectively communicate with them. Before you check the step-by-step guide below, why not challenge yourself to implement the following steps on your own? 6 | 7 | 1. Retrieve a list of all pods in your Kubernetes cluster using `kubectl get pods`. 8 | 2. Use the `kubectl describe` command to get detailed information about a specific pod. 9 | 3. Create a new pod, ensuring you can access a shell within it. 10 | 4. Install a utility like `curl` in your pod. 11 | 5. Use the pod's IP address to make requests to another service (your `nginx` pod). 12 | 6. Delete the pods once you’re done exploring. 13 | 14 | Give it a shot! Try to perform these steps on your own before diving into the detailed guide below. 🤓 15 | 16 | ## Step-by-Step Guide 17 | 18 | 1. **Get Pods**: 19 | Run the command to list all the pods in your cluster: 20 | 21 | ```sh 22 | kubectl get pods 23 | ``` 24 | 25 | 2. **Describe a Pod**: 26 | To gather more information about a specific pod, use the describe command: 27 | 28 | ```sh 29 | kubectl describe pod 30 | ``` 31 | 32 | Replace `` with the actual name of the pod you want to inspect. 33 | 34 | 3. **Create a New Pod**: 35 | Create a pod based on the `alpine:3.20` image with an interactive shell: 36 | 37 | ```sh 38 | kubectl run --image=alpine:3.20 -it -- /bin/sh 39 | ``` 40 | 41 | Replace `` with the appropriate value. 42 | 43 | 4. **Install Curl**: 44 | Once inside the container shell, update package lists and install `curl`: 45 | 46 | ```sh 47 | apk update 48 | apk add curl 49 | ``` 50 | 51 | 5. **Make Requests**: 52 | With `curl` installed, you can now use the pod's IP address to communicate with the nginx pod (use `kubectl describe pod my-nginx` to retrieve the private IP address of the nginx pod): 53 | 54 | ```sh 55 | curl 56 | ``` 57 | 58 | 6. **Delete the Pod**: 59 | Once you've finished testing, you can delete the pod with: 60 | ```sh 61 | kubectl delete pod 62 | ``` 63 | 64 | ## Conclusion 65 | 66 | In this lecture, we covered how to gather detailed information about Kubernetes pods using `kubectl`, create and manage pods effectively, and establish communication with them. We also explored some tools like `curl` for testing connectivity. Keep practicing these commands, as getting comfortable with them will greatly improve your abilities in Kubernetes. Happy learning! 🚀 67 | -------------------------------------------------------------------------------- /_exercises/01-running_containers_k8s/03-exposing_pod.md: -------------------------------------------------------------------------------- 1 | # Exposing Pods and Services in Kubernetes 2 | 3 | Welcome! In this session, we'll dive into exposing pods and containers in Kubernetes by creating a service. This exercise will help you understand how to provide a stable endpoint for communication between pods, which is important since pod IP addresses can change frequently. Before we jump into the step-by-step guide, here’s a quick overview of what we’ll be doing. 🤓 4 | 5 | ## Overview 6 | 7 | In this exercise, your goal is to expose a pod (in our case, an NGINX pod) so that it can be accessed via a stable name or IP address. Take a moment to try the following steps on your own before checking the detailed guide: 8 | 9 | 1. Ensure you have an NGINX pod running. 10 | 2. Use the `kubectl expose pod` command to create a service. 11 | 3. Check that the service has been created successfully. 12 | 4. Test accessing the service using its Cluster IP address and its name. 13 | 5. Clean up by deleting the service and pods after testing. 14 | 15 | Give it a try! Remember, the practice is where the real learning happens. 🚀 16 | 17 | ## Step-by-Step Guide 18 | 19 | Here’s a straightforward guide to help you through the process: 20 | 21 | 1. **List the running pods**: 22 | Run `kubectl get pods` to confirm you have an NGINX pod up and running. 23 | 24 | 2. **Expose the NGINX pod**: 25 | Use the command: 26 | 27 | ```bash 28 | kubectl expose pod --type=NodePort --port=80 29 | ``` 30 | 31 | Replace `` with the actual name of your NGINX pod. 32 | 33 | 3. **Verify the service creation**: 34 | Run `kubectl get services` to make sure your service was created successfully. You should see a reference to your NGINX service. 35 | 36 | 4. **Send a test request**: 37 | Create another pod (such as an alpine pod) to test communication with your service. Inside the alpine pod, use curl: 38 | 39 | ```bash 40 | curl 41 | ``` 42 | 43 | Additionally, you can use: 44 | 45 | ```bash 46 | curl 47 | ``` 48 | 49 | Both should return a response indicating the service is up. 50 | 51 | 5. **Clean up resources**: 52 | After testing, don’t forget to delete your service and any pods you created: 53 | ```bash 54 | kubectl delete service 55 | kubectl delete pod 56 | kubectl delete pod 57 | ``` 58 | 59 | ## Conclusion 60 | 61 | Congratulations on successfully exposing pods using services in Kubernetes! By creating a stable endpoint for communication, you can ensure your pods can interact without depending on ephemeral IP addresses. Keep practicing, and don’t hesitate to explore the various types of services available in Kubernetes for deeper knowledge. There’s always more to learn! 62 | -------------------------------------------------------------------------------- /_exercises/01-running_containers_k8s/05-from_dockerfile_to_pod.md: -------------------------------------------------------------------------------- 1 | # From Dockerfile to Pod: Your Guide to Running Applications in Kubernetes 2 | 3 | Welcome! In this guide, we're going to dive into how to take an application from your Dockerfile, build it locally, push it to Docker Hub, and finally run it in a Kubernetes pod. 🐳 It's an exciting journey, and by the end, you'll be able to see your application live in action! 4 | 5 | ## Overview 6 | 7 | Before we jump into the step-by-step process, I challenge you to try implementing the solution on your own! Here's a quick summary of what you'll need to do: 8 | 9 | 1. Ensure you have the application code locally, either from the GitHub repository or resources provided. 10 | 2. Build your Docker image using the Dockerfile. 11 | 3. Log in to Docker Hub. 12 | 4. Push your Docker image to your Docker Hub repository. 13 | 5. Create a Kubernetes pod using your image. 14 | 6. Verify that your application is running by checking the logs. 15 | 16 | Give it a shot and see if you can complete these steps before referring to the detailed guide below! 17 | 18 | ## Step-by-Step Guide 19 | 20 | 1. **Set Up Your Environment**: Make sure you're in the directory of your application, which contains your Dockerfile. 21 | 2. **Build the Docker Image**: Run the command: 22 | ```bash 23 | docker build -t /color-api:1.0.0 . 24 | ``` 25 | Replace `` with your actual Docker Hub username. 26 | 3. **Log In to Docker Hub**: Use the command: 27 | ```bash 28 | docker login 29 | ``` 30 | Enter your Docker Hub credentials or a personal access token if prompted. 31 | 4. **Push Your Docker Image**: Upload your image with: 32 | ```bash 33 | docker push /color-api:1.0.0 34 | ``` 35 | 5. **Create a Kubernetes Pod**: Run the following command: 36 | ```bash 37 | kubectl run color-api --image=/color-api:1.0.0 38 | ``` 39 | 6. **Check the Pod Status**: Use: 40 | ```bash 41 | kubectl get pods 42 | ``` 43 | To confirm that your pod is running. 44 | 7. **View Logs**: You can verify the application is working by checking the logs: 45 | ```bash 46 | kubectl logs color-api 47 | ``` 48 | 49 | Don't forget to clean up your resources afterward to maintain a tidy environment! 50 | 51 | ## Conclusion 52 | 53 | Congratulations on making it through this process! 🌟 You have now learned how to take an application from a Dockerfile, build it, push it to Docker Hub, and run it in Kubernetes. Remember, practice makes perfect, so keep experimenting and learning more about Docker and Kubernetes. 54 | -------------------------------------------------------------------------------- /_exercises/02-object_management_yaml_manifests/01-imperative_config_files-create_nginx.md: -------------------------------------------------------------------------------- 1 | # Creating NGINX Pods and Services in Kubernetes 2 | 3 | ## Overview 4 | 5 | In this exercise, we're diving into the world of Kubernetes by learning how to create and manage NGINX pods using imperative commands along with configuration files. This is a great opportunity to solidify your understanding of both concepts before you dive into the step-by-step guide. Are you ready? 👍 6 | 7 | Here’s a quick overview of what you'll be implementing: 8 | 9 | 1. Create an NGINX pod using imperative commands. 10 | 2. Write a configuration file for the NGINX pod. 11 | 3. Define the necessary metadata and specifications in the YAML file. 12 | 4. Create a service to expose the NGINX pod. 13 | 5. Verify that the pod and service are running correctly. 14 | 15 | Go ahead and give it a try before checking the detailed steps below! 16 | 17 | ## Step-by-Step Guide 18 | 19 | Let's break down the process of creating an NGINX pod along with exposing it via a service: 20 | 21 | 1. **Open your IDE:** Start by creating a new folder for your project (you can name it `object-management`). 22 | 2. **Create the Pod Configuration File:** 23 | - Create a new file named `nginx-pod.yaml`. 24 | - Define the API version with `apiVersion: v1`. 25 | - Declare the kind of resource with `kind: Pod`. 26 | - Add metadata (name the pod as `nginx-pod` and include a simple label, e.g., `app: nginx`). 27 | ```yaml 28 | metadata: 29 | name: nginx-pod 30 | labels: 31 | app: nginx 32 | ``` 33 | 3. **Define the Pod Specification:** 34 | - In the `spec` section, add the containers list. 35 | - Specify the image as `nginx:1.27.0` and give your container a name (`nginx-container`). 36 | - You can optionally add a ports array (`containerPort: 80`). 37 | 4. **Create the Pod:** 38 | - Open your terminal and run the command: 39 | ``` 40 | kubectl create -f nginx-pod.yaml 41 | ``` 42 | 5. **Verify the Pod:** 43 | - Check if the pod is running using: 44 | ``` 45 | kubectl get pods 46 | ``` 47 | - Get additional details about the pod with: 48 | ``` 49 | kubectl describe pod nginx-pod 50 | ``` 51 | 6. **Expose the Pod as a Service:** 52 | - Use the command to expose it: 53 | ``` 54 | kubectl expose pod nginx-pod --type=ClusterIP --port=80 --target-port=80 55 | ``` 56 | 7. **Confirm the Service:** 57 | - Check that the service was created successfully: 58 | ``` 59 | kubectl get svc 60 | ``` 61 | 62 | ## Conclusion 63 | 64 | Great job! Today, we've explored how to create an NGINX pod and then expose it using a service in Kubernetes. By practicing these steps, you're getting hands-on experience with both imperative command usage and writing Kubernetes configuration files. Keep experimenting and applying what you've learned as you continue your Kubernetes journey! 🚀 65 | -------------------------------------------------------------------------------- /_exercises/02-object_management_yaml_manifests/02-imperative_config_files-create_svc.md: -------------------------------------------------------------------------------- 1 | # Creating Node Port Services in Kubernetes 2 | 3 | ## Overview 4 | 5 | In this lesson, we'll be diving into how to create a Node Port service in Kubernetes using a configuration file. The goal is to understand the structure and components needed for your service and to practice creating one before you dive into the detailed guide. Here’s a summary of the main steps to implement the solution: 6 | 7 | 1. Create a configuration file for the service. 8 | 2. Define the API version and kind of object. 9 | 3. Set up the metadata section with the service name and labels. 10 | 4. Specify the service type as Node Port and define the ports. 11 | 5. Set up the selector to connect the service with the appropriate pods. 12 | 6. Deploy the service using the `kubectl create` command. 13 | 14 | We encourage you to try implementing these steps yourself before checking the detailed instructions below! 😊 15 | 16 | ## Step-by-Step Guide 17 | 18 | 1. **Create a Configuration File:** 19 | - Start by creating a new file named `nginx-svc.yaml` (or similar). 20 | 2. **Define API Version and Kind:** 21 | 22 | ```yaml 23 | apiVersion: v1 24 | kind: Service 25 | ``` 26 | 27 | 3. **Set Up Metadata:** 28 | 29 | ```yaml 30 | metadata: 31 | name: nginx 32 | labels: 33 | app: nginx 34 | ``` 35 | 36 | 4. **Specify Service Type and Ports:** 37 | 38 | ```yaml 39 | spec: 40 | type: NodePort 41 | ports: 42 | - port: 80 43 | protocol: TCP 44 | targetPort: # Replace with the correct port if needed 45 | ``` 46 | 47 | 5. **Add Selector:** 48 | 49 | ```yaml 50 | selector: 51 | app: nginx 52 | ``` 53 | 54 | 6. **Deploy the Service:** 55 | Run the command in your terminal: 56 | 57 | ``` 58 | kubectl create -f nginx-svc.yaml 59 | ``` 60 | 61 | 7. **Verify the Service:** 62 | Use the command `kubectl get services` to check that your service is up and running. 63 | 64 | 8. **Test Your Service:** 65 | Curl the service's IP address to validate that it's redirecting requests to the right pod! 66 | 67 | ## Conclusion 68 | 69 | Great job on creating your Node Port service! 🎉 You have now learned how to define a service using a configuration file, including setting its metadata, specifications, and selectors. This knowledge equips you to manage your Kubernetes resources more effectively. Continue experimenting and practicing, as working with configuration files is a fundamental skill in Kubernetes management. Keep up the great work, and let's keep learning together! 70 | -------------------------------------------------------------------------------- /_exercises/02-object_management_yaml_manifests/03-generating_manifests.md: -------------------------------------------------------------------------------- 1 | # Generating YAML Configuration Files with Dry Run in Kubernetes 2 | 3 | Welcome! In this session, we will explore how to generate a YAML configuration file from an imperative `kubectl` command using a helpful flag called "dry run." 🎉 This method allows us to see how our configuration file will look before actually applying it to the Kubernetes cluster. Let’s dive in! 4 | 5 | ## Overview 6 | 7 | Before you jump right into the guide, why not give this a shot on your own? Here's a quick overview of what you will be aiming to implement: 8 | 9 | 1. Use the `kubectl run` command with the `--dry-run=client` flag to generate a YAML file for a pod. 10 | 2. Ensure the container configuration (like image name and version) is specified. 11 | 3. Save the generated YAML content to a file. 12 | 4. Apply the newly created configuration file using `kubectl create`. 13 | 5. Repeat the process for the `kubectl expose` command to generate a service configuration. 14 | 15 | Try to implement these steps yourself first! If you get stuck or want to double-check, you can refer to the detailed step-by-step guide below. 16 | 17 | ## Step-by-Step Guide 18 | 19 | 1. **Open your terminal** where you'll run `kubectl` commands. 20 | 2. **Generate the pod configuration** by executing: 21 | ```bash 22 | kubectl run color-api --image=lm-academy/color-api:1.0.0 --dry-run=client -o yaml > color-api.yaml 23 | ``` 24 | This command will create a YAML file named `color-api.yaml` that contains the configuration for your pod. 25 | 3. **Review the contents of the file** to ensure all fields (like API version, kind, labels, and container specs) are set correctly. 26 | 4. **Apply the configuration** using: 27 | ```bash 28 | kubectl create -f color-api.yaml 29 | ``` 30 | 5. **Expose the pod as a service** by running: 31 | ```bash 32 | kubectl expose pod color-api --type=NodePort --port=80 --dry-run=client -o yaml > color-api-service.yaml 33 | ``` 34 | 6. **Check the generated service configuration** in the `color-api-service.yaml` file. 35 | 7. **Create the service** using: 36 | ```bash 37 | kubectl create -f color-api-service.yaml 38 | ``` 39 | 40 | And that's it! 🎊 You’ve successfully transitioned from using imperative commands to working with configuration files. 41 | 42 | ## Conclusion 43 | 44 | In this lecture, we explored the powerful `--dry-run=client` flag in `kubectl`, allowing us to generate YAML configuration files for our Kubernetes applications without needing to push commands to the server right away. Mastering this technique enables a smoother workflow when dealing with Kubernetes configurations. Keep practicing, and soon you'll be customizing configurations with ease! 45 | -------------------------------------------------------------------------------- /_exercises/02-object_management_yaml_manifests/04-shortcomings_imperative_config_files.md: -------------------------------------------------------------------------------- 1 | # Understanding the Shortcomings of Imperative Configuration Files in Kubernetes 2 | 3 | Welcome! In this session, we're diving into the limitations of using imperative configuration files in Kubernetes and how they can lead us toward the declarative approach. 🌟 It's important to recognize these limitations, as it will help you improve your Kubernetes management skills. 4 | 5 | ## Overview 6 | 7 | Before we jump into the details, let's summarize what we'll be implementing in this exercise. The goal is to understand the differences between the configuration you've written and what Kubernetes actually uses, particularly when modifying the image of a pod. Here's what you should try to do: 8 | 9 | 1. Review the existing configuration of the nginx pod. 10 | 2. Attempt to change the image of the nginx pod in its configuration file. 11 | 3. Run the `kubectl replace` command with the updated configuration. 12 | 4. Observe the error messages and understand what they indicate. 13 | 5. Learn how to properly apply changes by deleting and recreating the pod. 14 | 15 | Give these steps a shot before looking at the detailed guide. It's a great way to learn through hands-on practice! 16 | 17 | ## Step-by-Step Guide 18 | 19 | 1. **Review the Current Configuration**: Use the command `kubectl describe pod nginx.bot` to see the current configuration of your nginx pod. 20 | 2. **Modify the Configuration File**: Open the nginx configuration file and change the image from `1.27.0` to an Alpine version. 21 | 3. **Attempt to Update**: In your terminal, run the command: 22 | ``` 23 | kubectl replace -f nginx.pod.yaml 24 | ``` 25 | 4. **Analyze the Error**: If you see an error regarding changing fields other than the image, take note of what it's saying about missing fields. 26 | 5. **Delete and Recreate the Pod**: If you get an error, delete the nginx pod using: 27 | ``` 28 | kubectl delete pod nginx.bot 29 | ``` 30 | After that, recreate it with: 31 | ``` 32 | kubectl create -f nginx.pod.yaml 33 | ``` 34 | 6. **Verify the Change**: Run `kubectl describe pod nginx.bot` again to confirm that the image change has been successfully applied. 35 | 36 | ## Conclusion 37 | 38 | Through this exercise, we explored the challenges that come with imperative configuration management in Kubernetes. Understanding the difference between what's defined in your files and what Kubernetes manages behind the scenes is crucial. By switching to a declarative approach, Kubernetes can efficiently manage status updates and maintain object integrity without needing constant deletions and recreations. Keep practicing these concepts, and soon, you'll feel much more comfortable with Kubernetes! 39 | -------------------------------------------------------------------------------- /_exercises/02-object_management_yaml_manifests/05-declarative_management.md: -------------------------------------------------------------------------------- 1 | # Declarative Management in Kubernetes 2 | 3 | Welcome to our session on declarative management in Kubernetes! In this exercise, we’ll dive into how to manage Kubernetes objects using the `apply` command and explore the powerful features it offers. Before we get started with the step-by-step guide, I challenge you to implement the solution yourself. 🚀 4 | 5 | ## Overview 6 | 7 | In this exercise, you will learn how to manage Kubernetes objects declaratively using the `kubectl apply` command. To get started, try to follow these steps: 8 | 9 | 1. Create a new nginx pod using a YAML configuration file. 10 | 2. Verify that the pod is running in your cluster. 11 | 3. Update the image of the pod and apply the changes. 12 | 4. Check the status of the pod after the update. 13 | 5. Use the `kubectl diff` command to see differences before applying changes. 14 | 6. Delete the pod or service using the `kubectl delete` command. 15 | 16 | Give it a go! See if you can work through these steps before looking at the detailed guide below. 17 | 18 | ## Step-by-Step Guide 19 | 20 | 1. **Create a Pod**: 21 | 22 | - Write the YAML configuration for an nginx pod and save it as `nginxpod.yaml`. 23 | - Run the command: 24 | ```bash 25 | kubectl apply -f nginxpod.yaml 26 | ``` 27 | - Confirm that the pod is created: 28 | ```bash 29 | kubectl get pods 30 | ``` 31 | 32 | 2. **Update the Pod**: 33 | 34 | - Modify the `nginxpod.yaml` file to change the image version (e.g., from `1.27.0` to `1.27.0-alpine`). 35 | - Apply the changes: 36 | ```bash 37 | kubectl apply -f nginxpod.yaml 38 | ``` 39 | - Verify that the pod has been updated. 40 | 41 | 3. **Check Differences**: 42 | 43 | - Run the diff command to see any differences between your configuration and the current state. 44 | ```bash 45 | kubectl diff -f nginxpod.yaml 46 | ``` 47 | 48 | 4. **Delete Resources**: 49 | 50 | - To delete the nginx pod, run: 51 | ```bash 52 | kubectl delete -f nginxpod.yaml 53 | ``` 54 | 55 | 5. **Manage Multiple Objects**: 56 | - If you have multiple configurations in a directory, you can apply them all at once by running: 57 | ```bash 58 | kubectl apply -f . 59 | ``` 60 | 61 | ## Conclusion 62 | 63 | Congratulations on exploring declarative management in Kubernetes! By using the `kubectl apply` and `kubectl diff` commands, you’re empowered to manage your cluster's state more effectively. Don’t forget to practice more by trying different configurations and operations. Keep experimenting and expanding your Kubernetes skills! 🌟 64 | -------------------------------------------------------------------------------- /_exercises/02-object_management_yaml_manifests/06-imperative_to_declarative.md: -------------------------------------------------------------------------------- 1 | # Transitioning from Imperative to Declarative in Kubernetes 2 | 3 | In this exercise, we're going to explore how to transition from an imperative approach to a declarative approach in Kubernetes. 4 | 5 | ## Overview 6 | 7 | The primary focus will be on using the `kubectl apply` command to manage our resources effectively. Before diving into the step-by-step guide, here’s a quick summary of what you should aim to implement: 8 | 9 | 1. Create a pod using the imperative `kubectl create` command. 10 | 2. Confirm that the pod is created using `kubectl get pods`. 11 | 3. Use the `kubectl apply` command to apply the same pod configuration with the nginx pod file. 12 | 4. Check the pod’s configuration to verify the presence of the last applied configuration annotation. 13 | 5. Optionally, learn how to use the `--save-config` flag for future resource creations. 14 | 15 | Take a moment to try implementing these steps on your own before checking out the detailed guide below. Don't hesitate to experiment! 🚀 16 | 17 | ## Step-by-Step Guide 18 | 19 | Here’s a straightforward guide to help you transition your command usage: 20 | 21 | 1. **Create a Pod**: 22 | ```bash 23 | kubectl create -f nginx-pod.yaml 24 | ``` 25 | 2. **Check the Created Pod**: 26 | 27 | ```bash 28 | kubectl get pods 29 | ``` 30 | 31 | 3. **Apply the Configuration**: 32 | 33 | ```bash 34 | kubectl apply -f nginx-pod.yaml 35 | ``` 36 | 37 | 4. **Verify the Last Applied Configuration**: 38 | 39 | ```bash 40 | kubectl get pod nginx -o yaml 41 | ``` 42 | 43 | Look for the `last-applied-configuration` annotation. 44 | 45 | 5. **Optionally Use the Save Config Flag**: 46 | If you didn't use the `--save-config` option during creation, you can still proceed with `kubectl apply` as it will patch the configuration automatically. 47 | 48 | 6. **Clean Up**: 49 | Don't forget to delete any objects defined in your configuration files to keep your environment tidy: 50 | ```bash 51 | kubectl delete -f nginx-pod.yaml 52 | ``` 53 | 54 | ## Conclusion 55 | 56 | By moving from imperative commands to using `kubectl apply`, you not only streamline your resource management but also embrace a more declarative way of working with Kubernetes. This transition allows Kubernetes to maintain the last applied configuration for seamless updates. Keep practicing, and don’t hesitate to try out different configurations! Remember, getting comfortable with these commands will make you more proficient in Kubernetes management. 57 | -------------------------------------------------------------------------------- /_exercises/02-object_management_yaml_manifests/07-multiple_objects_same_file.md: -------------------------------------------------------------------------------- 1 | # Managing Multiple Kubernetes Objects in a Single File 2 | 3 | ## Overview 4 | 5 | In this exercise, we're going to explore how to define multiple Kubernetes objects in a single YAML file. This can help keep your configurations organized and make it easier to manage complex applications. Before diving into the step-by-step guide, here's a quick list of what you can try to implement on your own: 6 | 7 | 1. Create a new YAML file for an NGINX pod and service. 8 | 2. Use three dashes (`---`) to separate the pod and service definitions within the same file. 9 | 3. Apply the changes using the `kubectl apply` command. 10 | 4. Experiment with changes to either the pod or service to see how updates can be handled. 11 | 12 | I encourage you to give this a shot before referring to the detailed guide below. 💪 13 | 14 | ## Step-by-Step Guide 15 | 16 | 1. **Open your IDE**: Start by creating a new YAML file to store both your NGINX pod and service definitions. 17 | 2. **Define the NGINX Pod**: Copy and paste the pod definition into your new YAML file. 18 | 3. **Separate with Dashes**: After the pod definition, add three dashes (`---`) to indicate the start of a new resource. 19 | 4. **Define the NGINX Service**: Paste the service definition below the separator. 20 | 5. **Save the File**: Ensure that your YAML file is saved (e.g., `nginx.yml`). 21 | 6. **Check for Existing Resources**: Run `kubectl get pods` and `kubectl get svc` in your terminal to confirm there are no existing NGINX pods or services. 22 | 7. **Apply the Configuration**: Use the command `kubectl apply -f nginx.yml` to create both the pod and service resources. 23 | 8. **Experiment with Updates**: Try making changes to either the pod or service configuration, and apply those changes using the `kubectl apply` command again to see how it affects your resources. 24 | 25 | ## Conclusion 26 | 27 | Congratulations on learning how to manage multiple Kubernetes objects within a single YAML file! This approach provides a neat way to bundle related configurations together, making your life easier as you work with more complex applications. Remember, you can also break things into different files if you prefer more targeted updates later on. Keep practicing and exploring, as the more you work with Kubernetes, the more comfortable you'll become! 🚀 28 | -------------------------------------------------------------------------------- /_exercises/03-replicasets_deployments/02-replicaset_shortcomings_updating_pods.md: -------------------------------------------------------------------------------- 1 | # Understanding the Shortcomings of Updating Pods with Replica Sets 2 | 3 | ## Overview 4 | 5 | In this exercise, we will delve into the challenges and limitations of using replica sets for managing updates and pods in Kubernetes. Before diving into the step-by-step guide, I encourage you to try implementing the solution yourself. Here’s a quick summary of what you'll be tackling: 6 | 7 | 1. Verify that your replica set and pods are running. 8 | 2. Modify the replica set configuration to update the image. 9 | 3. Use the `kubectl apply` command to apply the changes. 10 | 4. Monitor the pod and replica set status to see what happens. 11 | 5. Explore how to properly update the pods by deleting the existing ones. 12 | 13 | Give it a shot, and let's see how far you can get before checking the guide! 🚀 14 | 15 | ## Step-by-Step Guide 16 | 17 | 1. **Check Running Pods**: Begin by confirming that your replica set and the corresponding pods are operational. You can do this by running: 18 | 19 | ```bash 20 | kubectl get pods 21 | kubectl get replicaset 22 | ``` 23 | 24 | 2. **Update the Image**: Modify the replica set configuration file to change the image version (e.g., from `1.27` to `1.27.0-alt-pine`). 25 | 26 | 3. **Apply the Changes**: Use the following command to apply your changes: 27 | 28 | ```bash 29 | kubectl apply -f 30 | ``` 31 | 32 | 4. **Monitor Changes**: In a new terminal, run the following commands to watch for changes in pods and the replica set: 33 | 34 | ```bash 35 | kubectl get pod --watch 36 | kubectl get replicaset --watch 37 | ``` 38 | 39 | 5. **Investigate Pod Status**: After applying your changes, check the pod status. Use the command: 40 | 41 | ```bash 42 | kubectl describe pod | grep Image 43 | ``` 44 | 45 | This will show you which image is currently being used by the pod. 46 | 47 | 6. **Delete Existing Pods**: Since the changes won't take effect automatically, delete one of the existing pods: 48 | 49 | ```bash 50 | kubectl delete pod 51 | ``` 52 | 53 | 7. **Watch for New Pod Creation**: Once a pod is deleted, the replica set should automatically create a new pod with the updated image. 54 | 55 | 8. **Confirm the New Image**: Finally, check that the new pod is indeed running the updated image: 56 | ```bash 57 | kubectl describe pod | grep Image 58 | ``` 59 | 60 | ## Conclusion 61 | 62 | In this exercise, we've explored how replica sets handle pod updates and the necessity of deleting existing pods to apply new configurations. Remember, relying solely on replica sets can complicate the update process, which is why Kubernetes uses higher abstractions like deployments to facilitate smoother updates. Keep experimenting and practicing, as hands-on experience is key to mastering Kubernetes! 63 | -------------------------------------------------------------------------------- /_exercises/03-replicasets_deployments/03-replicaset_shortcomings_existing_pods.md: -------------------------------------------------------------------------------- 1 | # Replicaset Shortcomings and Existing Pods in Kubernetes 2 | 3 | Welcome! In this guide, we're diving into the nuances of managing pods in a Kubernetes ReplicaSet, specifically focusing on what happens when you create a pod that matches the selection criteria of an existing ReplicaSet. Let’s explore this topic together, and I invite you to try implementing the solution on your own before looking at the step-by-step guide! 4 | 5 | ### Overview 6 | 7 | In this exercise, you will learn how to manage pods within a ReplicaSet and understand the automatic behaviors of Kubernetes when pod counts exceed the desired state. Here’s a quick summary of what we’ll cover: 8 | 9 | 1. Create a new nginx pod that matches the selection criteria of an existing ReplicaSet. 10 | 2. Observe how the ReplicaSet reacts to the additional pod. 11 | 3. Understand the importance of selection criteria and management of pods. 12 | 4. Clean up and remove the unnecessary pod to maintain clear configurations. 13 | 14 | I encourage you to give these steps a shot on your own before proceeding to the detailed guide! 🚀 15 | 16 | ### Step-by-Step Guide 17 | 18 | 1. **Create a New YAML File for the Pod**: In your IDE, set up a new YAML file for an nginx pod with the appropriate metadata, labels, and container specs. 19 | 2. **Apply the Pod Definition**: Use the `kubectl apply` command in your terminal to create the pod using the new YAML file. 20 | 3. **Monitor the ReplicaSet and Pods**: Keep an eye on both the ReplicaSet and pod states by using `kubectl get replicaset` and `kubectl get pods --watch`. 21 | 4. **Observe the Behavior**: Notice how the ReplicaSet manages the pod counts, including terminating any excess pods that match its selector. 22 | 5. **Remove the Unwanted Pod**: Once you've finished testing, delete the manually created pod to clean up your environment. 23 | 24 | ### Conclusion 25 | 26 | In this session, we explored how Kubernetes manages pods within a ReplicaSet when additional pods with matching selectors are created. Understanding this behavior ensures that we can effectively maintain our cluster without conflicts. Keep practicing these concepts, and don’t hesitate to try out different scenarios to deepen your understanding! Happy learning! 🌟 27 | -------------------------------------------------------------------------------- /_exercises/03-replicasets_deployments/05-update_pod_template.md: -------------------------------------------------------------------------------- 1 | # Updating Pod Templates in Kubernetes 2 | 3 | ## Overview 4 | 5 | In this exercise, we will focus on updating a pod template in Kubernetes from an older image version to a newer one. Your goal is to implement the following changes using your IDE before reviewing the step-by-step guide: 6 | 7 | 1. Open your IDE and locate the pod template image. 8 | 2. Update the image version from `1.27.0` to `1.27.0-alpine`. 9 | 3. Review the changes that will occur using the `kubectl apply` command. 10 | 4. Monitor the pods and replica sets to track the changes during the update process. 11 | 5. Confirm that the new image version is applied correctly. 12 | 13 | I encourage you to try implementing these steps on your own before diving into the detailed guide below! 🚀 14 | 15 | ## Step-by-Step Guide 16 | 17 | Let's walk through the steps to update your pod template: 18 | 19 | 1. **Open your IDE**: Launch your integrated development environment and access the pod template file. 20 | 2. **Update the Image**: Scroll to the image declaration and change the version from `1.27.0` to `1.27.0-alpine`. 21 | 22 | 3. **Identify Changes**: Use the command `kubectl apply --dry-run=client -f .yaml` to see what changes will be applied without actually making them. 23 | 24 | 4. **Prepare the Terminal**: Split your terminal into two or three instances. Use one to monitor pods, one for replica sets, and one for executing commands. 25 | 26 | 5. **View Existing Pods and Replica Sets**: Execute the following commands: 27 | 28 | - `kubectl get pods` to watch the current pods. 29 | - `kubectl get rs` to watch the current replica sets. 30 | 31 | 6. **Apply Changes**: Run the command `kubectl apply -f .yaml` to apply your updates. 32 | 33 | 7. **Monitor Logs**: Observe the logs and the changes in the pods and replica sets as the rolling update occurs. 34 | 35 | 8. **Describe the Deployment**: Execute `kubectl describe deploy ` to review the events and confirm the updates. 36 | 37 | 9. **Confirm the New Image**: Use `kubectl get pods` and then `kubectl describe pod ` to check that the new image version is correctly applied. 38 | 39 | ## Conclusion 40 | 41 | You've successfully updated your pod template in Kubernetes and monitored the changes through a rolling update! 🎉 This exercise is critical for understanding how Kubernetes manages deployments and updates. Remember, practice is key! Keep experimenting with different configurations and commands to deepen your understanding. 42 | -------------------------------------------------------------------------------- /_exercises/03-replicasets_deployments/07-kubectl_scale.md: -------------------------------------------------------------------------------- 1 | # Scaling Deployments in Kubernetes 2 | 3 | ## Overview 4 | 5 | In this exercise, we will explore how to use the `kubectl scale` command to adjust the number of replicas in a Kubernetes deployment. The ability to scale your applications dynamically can be essential for managing resources effectively. 🌱 While we're going to look at the scale command, remember that this method is primarily for temporary changes and should not replace your configuration files. 6 | 7 | Here’s a quick overview of the main steps to implement the scaling of your deployment: 8 | 9 | 1. Retrieve your current deployment status using `kubectl get deploy`. 10 | 2. Use the `kubectl scale` command to set the desired number of replicas. 11 | 3. Verify the change by running `kubectl get deploy` again. 12 | 4. Remember that this change isn't permanent in your configuration files. 13 | 5. (Optional) Scale your deployment to zero and then back to the desired replicas if necessary for pod recovery. 14 | 15 | Give it a try! Implement these steps on your own before checking out the detailed guide below. 16 | 17 | ## Step-by-Step Guide 18 | 19 | 1. **Get Current Deployments**: Open your command line and run the command `kubectl get deploy`. This shows you the current state of your deployments along with the number of replicas. 20 | 2. **Scale the Deployment**: Run the command `kubectl scale deployment nginx --replicas=3` to scale your chosen deployment (in this case, nginx) down to three replicas. You can change the number accordingly. 21 | 22 | 3. **Verify the Changes**: After scaling, check the deployment again by running `kubectl get deploy`. You should see the updated number of replicas. 23 | 24 | 4. **Return to the Original State**: If you were to apply the original configuration file with `kubectl apply -f your-deployment-file.yaml`, be aware that it will reset the replicas back to what’s specified in the file. 25 | 26 | 5. **Special Case for Pod Recovery**: If your pods are unhealthy, you can scale down to zero replicas with `kubectl scale deployment nginx --replicas=0` and then back up to your desired number like `kubectl scale deployment nginx --replicas=5`. 27 | 28 | ## Conclusion 29 | 30 | Scaling deployments in Kubernetes is an essential skill that allows you to manage your application's computer resources effectively. While the `kubectl scale` command is useful for temporary adjustments, always remember to keep your deployment configurations up-to-date for consistency. 🚀 Keep practicing these commands and exploring other Kubernetes functionalities; there's a lot more to learn! 31 | -------------------------------------------------------------------------------- /_exercises/03-replicasets_deployments/08-failed_rollouts.md: -------------------------------------------------------------------------------- 1 | # Understanding Failed Rollouts in Kubernetes 2 | 3 | ## Overview 4 | 5 | In this exercise, we will delve into what happens when a Kubernetes deployment encounters an invalid configuration, specifically focusing on how it affects pod status and the deployment itself. The goal is to understand how to troubleshoot these issues effectively. Before you look at the detailed guide, try to implement the solution based on the following steps: 6 | 7 | 1. Create a deployment with an intentionally invalid image tag. 8 | 2. Describe the deployment to check the status of the pods. 9 | 3. Investigate the individual pods to identify error messages. 10 | 4. Roll back the deployment to the previous working version if necessary. 11 | 5. Correct the invalid configuration and reapply the deployment. 12 | 13 | Give this a shot before diving into the step-by-step guide! You might surprise yourself with what you can accomplish. 🎉 14 | 15 | ## Step-by-Step Guide 16 | 17 | 1. **Create an Invalid Deployment**: 18 | 19 | - Intentionally set an incorrect image tag in your deployment YAML file, e.g., forget a dot in the tag. 20 | 21 | 2. **Apply Your Configuration**: 22 | 23 | - Use the command `kubectl apply -f .yaml` to apply the invalid deployment. 24 | 25 | 3. **Check Deployment Status**: 26 | 27 | - Run `kubectl describe deployment ` to get an overview of the deployment's status. 28 | 29 | 4. **Investigate Pod Errors**: 30 | 31 | - List all pods with `kubectl get pods`. Look for any pods showing an "ImagePullBackOff" status. 32 | - Use `kubectl describe pod ` to get detailed error messages. 33 | 34 | 5. **Identify the Issue**: 35 | 36 | - Look for messages indicating issues with image pulls or invalid tags, making a note of any errors. 37 | 38 | 6. **Roll Back Deployment (if necessary)**: 39 | 40 | - If things go awry during deployment, run `kubectl rollout undo deployment/` to revert to the previous version. 41 | 42 | 7. **Fix the Invalid Configuration**: 43 | 44 | - Correct the image tag in your YAML configuration file. 45 | 46 | 8. **Reapply the Deployment**: 47 | 48 | - Run `kubectl apply -f .yaml` again to apply the corrected configuration. 49 | 50 | 9. **Check Your Work**: 51 | - Verify that the deployment is now healthy by checking pod statuses and using `kubectl rollout status deployment/`. 52 | 53 | ## Conclusion 54 | 55 | By working through these steps, you've explored how Kubernetes handles invalid configurations and learned several methods for troubleshooting deployment issues. Remember, the key takeaway is knowing how to leverage the rolling update strategy effectively to minimize downtime and ensure smoother deployments. Keep experimenting and practicing these concepts to enhance your Kubernetes skills. You're doing great! 🌟 56 | -------------------------------------------------------------------------------- /_exercises/04-services/01-color_api_add_hostname.md: -------------------------------------------------------------------------------- 1 | # Enhancing the Caller API with Hostname and Formatting Options 2 | 3 | Welcome! In today's exercise, we're diving into making some notable enhancements to our Caller API. We're excited to implement new features that will not only make our API more informative but also provide flexible response formats. 🚀 4 | 5 | ## Overview 6 | 7 | In this session, you’ll be adding additional functionality to the Caller API. The aim is to make it return the hostname of the machine running the application and allow users to choose between JSON and plain text formats for the API response. Here’s a quick summary of the main steps you’ll need to tackle: 8 | 9 | 1. Create a new route for the `/api` endpoint. 10 | 2. Implement JSON response to include both the color and the hostname. 11 | 3. Add the OS module to fetch the hostname. 12 | 4. Introduce query parameters to allow users to specify the desired response format (JSON or plain text). 13 | 5. Test the API using Docker after building a new image. 14 | 15 | Before you dive into the step-by-step guide, give your best shot at implementing the solution yourself! You'll learn much more by trying it firsthand. 16 | 17 | ## Step-by-Step Guide 18 | 19 | Let's get started with the enhancements: 20 | 21 | 1. **Create the API Endpoint**: Set up a new endpoint that listens for requests on `/api`. 22 | 23 | 2. **Define the Response**: Use `response.json()` to send back a JSON response that includes: 24 | 25 | - A hardcoded value for color (e.g., "blue"). 26 | - The hostname fetched with the `OS` module. 27 | 28 | 3. **Import the OS Module**: 29 | 30 | - Use Node's built-in `os` module to retrieve the current hostname. 31 | - Add this information to the JSON response. 32 | 33 | 4. **Format Options**: 34 | 35 | - Implement logic to check query parameters, enabling users to specify if they want the response in JSON or plain text. 36 | - Based on the user’s input, return the appropriate format. 37 | 38 | 5. **Docker Container**: 39 | - Build your Docker image with appropriate tagging. 40 | - Run your container and test the API with `curl` commands to ensure everything is functioning correctly. 41 | 42 | ## Conclusion 43 | 44 | Great job on enhancing the Caller API! You've successfully added the functionality to retrieve the hostname and offered users options for response formats. This hands-on experience solidifies your understanding of developing and deploying APIs. Keep experimenting and practicing, as there's always more to discover in the world of APIs! 45 | -------------------------------------------------------------------------------- /_exercises/04-services/04-clusterip_service.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Fundamentals: Implementing Cluster IP Services 2 | 3 | ## Overview 4 | 5 | In this exercise, we're going to focus on creating a Cluster IP service in our cluster, which will help us manage internal communication effectively. 6 | 7 | ### Try to Implement the Following Steps: 8 | 9 | 1. Create a YAML file for your Cluster IP service. 10 | 2. Define the service metadata, including the name and labels. 11 | 3. Specify the pod selector that the service will use to route traffic. 12 | 4. Set the service ports and ensure the type is defined as Cluster IP. 13 | 5. Apply the service configuration using `kubectl apply`. 14 | 6. Verify that the service is running and check its stable IP address. 15 | 7. Update your traffic generator to point to the service's Cluster IP. 16 | 8. Monitor the traffic and observe how it handles pod changes and load balancing. 17 | 18 | Take a moment to go through these steps on your own before checking the detailed guide below. It’s a great way to reinforce your learning! 💪 19 | 20 | ## Step-by-Step Guide 21 | 22 | 1. **Create a YAML File**: 23 | 24 | - Navigate to your services folder in the IDE. 25 | - Create a new file named `color-api-cluster-ip.yaml`. 26 | 27 | 2. **Define Service Metadata**: 28 | 29 | - Set the API version to `v1`. 30 | - Define the kind as `Service`. 31 | - Add the name under the metadata section as `color-api-cluster-ip` and include labels such as `app: color-api`. 32 | 33 | 3. **Set Pod Selector**: 34 | 35 | - Under the `spec` section, add a `selector` that matches the labels of your pods (e.g., `app: color-api`). 36 | 37 | 4. **Specify Service Ports**: 38 | 39 | - Define the ports section and set both the service and port values to `80`. Explicitly mention the service type as `ClusterIp`. 40 | 41 | 5. **Apply the Configuration**: 42 | 43 | - Save your file. 44 | - In the terminal, apply the configuration with: 45 | ```bash 46 | kubectl apply -f color-api-cluster-ip.yaml 47 | ``` 48 | 49 | 6. **Verify the Service**: 50 | 51 | - Check that the service is running by executing: 52 | ```bash 53 | kubectl get svc 54 | ``` 55 | 56 | 7. **Update Traffic Generator**: 57 | 58 | - Open your traffic generator file. 59 | - Replace the pod IP address with the Cluster IP of your service. 60 | - Save the changes and apply it in the terminal: 61 | ```bash 62 | kubectl apply -f traffic-generator.yaml 63 | ``` 64 | 65 | 8. **Monitor Logs**: 66 | - Follow the logs to see how the traffic is handled and the load balancing in action. 67 | 68 | ## Conclusion 69 | 70 | In this session, we learned how to set up a Cluster IP service in Kubernetes, enabling efficient internal communication within our cluster. Remember, using the service name instead of the Cluster IP allows for more stability, especially during pod or service restarts. Keep experimenting with what you've learned and continue to practice these concepts! 🚀 71 | -------------------------------------------------------------------------------- /_exercises/04-services/05-nodeport_service.md: -------------------------------------------------------------------------------- 1 | # NodePort Service in Kubernetes 2 | 3 | ## Overview 4 | 5 | In this exercise, we're diving into the implementation of a NodePort service in Kubernetes. The goal is to enable external access to our applications running within the cluster, while understanding the differences between ClusterIP and NodePort services. Before peeking at the step-by-step guide, give the implementation a try yourself! Here’s a quick summary of the steps you should undertake: 6 | 7 | 1. Review the services and pods currently running in your cluster. 8 | 2. Create a new YAML file by copying the ClusterIP service definition. 9 | 3. Change the service type from ClusterIP to NodePort and set a specific port for external access. 10 | 4. Apply the new NodePort service configuration. 11 | 5. Test the service to confirm that you can access your pods from outside the cluster. 12 | 13 | Are you ready? Let’s see how you can set this up on your own! 🚀 14 | 15 | ## Step-by-Step Guide 16 | 17 | 1. **Check Current Resources**: 18 | 19 | - Use the command `kubectl get pods` and `kubectl get services` to ensure everything is running smoothly in your cluster. 20 | 21 | 2. **Create NodePort Definition**: 22 | 23 | - In your IDE, create a new YAML file (or copy the existing ClusterIP service definition). 24 | - Change the service name to `node-port` and set the service type to `NodePort`. 25 | 26 | 3. **Set NodePort**: 27 | 28 | - Specifically indicate the port you want to expose (for example, use `30007`). 29 | 30 | 4. **Apply the New Service**: 31 | 32 | - In your terminal, run the command: `kubectl apply -f .yaml` to create the new NodePort service. 33 | 34 | 5. **Verify the Service**: 35 | 36 | - Check the services again with `kubectl get services` to confirm that the NodePort service is listed and has a cluster IP. 37 | 38 | 6. **Access the Service**: 39 | 40 | - If you’re using Minikube on Mac or Windows, run the command: `minikube service --url` to get the URL for accessing your service. 41 | 42 | 7. **Test**: 43 | - Open your web browser and navigate to the service URL to see if you can communicate with your pods from outside the cluster. 44 | 45 | ## Conclusion 46 | 47 | Congratulations on successfully setting up a NodePort service! 🎉 This task has illustrated how we can expose our applications to the outside world while contrasting it with the more restricted ClusterIP service. Remember, while NodePort is useful for development, managing security is essential when transitioning to production. Keep practicing and experimenting with Kubernetes, and you'll find yourself becoming more proficient in no time! 48 | -------------------------------------------------------------------------------- /_exercises/05-resource_management/02-labels_selectors_match_expressions.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Fundamentals: Understanding Labels, Selectors, and Match Expressions 2 | 3 | ## Overview 4 | 5 | In this section, we'll focus on implementing deployment in Kubernetes, specifically using labels, selectors, and match expressions. The goal is to help you gain a practical understanding of how to manage your Kubernetes pods effectively. Before diving into the detailed guide, I encourage you to try to implement the solution on your own! Here are the key steps you'll need to follow: 6 | 7 | 1. Delete any existing resources in the label selectors folder. 8 | 2. Create a new deployment file (`color_deploy.yaml`) and define the deployment API version and kind. 9 | 3. Configure the container specifications, including image and port settings. 10 | 4. Add relevant labels for environment and tier to your deployment. 11 | 5. Set up match labels for your deployment to specify which pods to manage. 12 | 6. Utilize match expressions for advanced selection criteria. 13 | 7. Apply your changes using `kubectl` and check the results. 14 | 15 | Try working through these steps on your own and see how it goes before checking the step-by-step guide! 🚀 16 | 17 | ## Step-by-Step Guide 18 | 19 | 1. **Clean Up**: Start by removing any resources you've created in the label selectors folder to begin fresh. 20 | 2. **Create the Deployment File**: In your IDE, create a new file named `color_deploy.yaml`. Set the `apiVersion` to `apps/v1` and define the `kind` as `Deployment`. 21 | 3. **Define Container Specifications**: Specify the container details, including the image (e.g., `lmacademy/color-api:1.1.0`) and set the container port (let's use port 80). 22 | 4. **Add Labels**: Add labels to your deployment for organization. For example, you might set `environment: local` and `tier: backend`. 23 | 5. **Set Match Labels**: Utilize match labels to map the pods that the deployment should manage. Copy the key-value pairs from your labels into the `matchLabels` section. 24 | 6. **Implement Match Expressions**: Add match expressions to allow for more complex selection logic. For instance, you could create rules to only manage pods with specific properties. 25 | 7. **Apply Your Deployment**: Use the terminal to apply your changes by running `kubectl apply -f .` and monitor the pods created by your deployment. 26 | 27 | ## Conclusion 28 | 29 | In this section, we've learned about the importance of labels, selectors, and match expressions when configuring deployments in Kubernetes. Understanding these concepts will significantly enhance your ability to manage your applications and resources efficiently. Keep practicing these skills, and don't hesitate to explore additional complexities in your Kubernetes setups! Happy learning! 🌱 30 | -------------------------------------------------------------------------------- /_exercises/05-resource_management/04-cross_namespace_service.md: -------------------------------------------------------------------------------- 1 | # Communicating Across Namespaces in Kubernetes 2 | 3 | ## Overview 4 | 5 | In this exercise, we'll be exploring how to communicate with services that reside in different namespaces within a Kubernetes cluster. You'll implement a solution to expose a pod through a service in the `dev` namespace and generate traffic to it from another namespace. Before diving into the step-by-step guide, take a moment to try implementing this solution on your own. Here’s a quick summary of the main steps to follow: 6 | 7 | 1. Create a service definition for the `color API` pod in the `dev` namespace. 8 | 2. Set up a traffic generator pod in the default namespace. 9 | 3. Use the fully qualified domain name (FQDN) to communicate with the service in the `dev` namespace. 10 | 4. Verify the logs to ensure that traffic is being correctly directed to the `color API` service. 11 | 12 | Give it a shot! It's a great opportunity to practice what you've learned, and when you're ready, check out the step-by-step guide below. 🚀 13 | 14 | ## Step-by-Step Guide 15 | 16 | 1. **Create the Service Definition**: 17 | 18 | - Create a YAML file for your service. 19 | - Define the API version as `V1`, kind as `Service`, and set the port and target port to `80`. 20 | - Specify the service type as `ClusterIP` and include labels that match your pod. 21 | 22 | 2. **Specify the Namespace**: 23 | 24 | - In the metadata section of your service, set the namespace to `dev`. 25 | 26 | 3. **Define the Traffic Generator Pod**: 27 | 28 | - Create a YAML file for the traffic generator pod. 29 | - Omit the namespace to deploy it in the default namespace. 30 | - Set the necessary parameters, including the endpoint and interval for generating traffic. 31 | 32 | 4. **Create a Fully Qualified Domain Name (FQDN)**: 33 | 34 | - Ensure that the traffic generator uses the FQDN format: `service-name.namespace.svc.cluster.local` to communicate with the service in `dev`. 35 | 36 | 5. **Apply the Configuration**: 37 | 38 | - Use `kubectl apply` to apply your configuration files. If the `dev` namespace doesn’t exist yet, it will need to be created first. 39 | 40 | 6. **Verify the Setup**: 41 | - Check the status of your pods and services using `kubectl get pods` and `kubectl get services`. 42 | - Look at the logs of the traffic generator pod to confirm it’s successfully communicating with the `color API`. 43 | 44 | ## Conclusion 45 | 46 | In this lecture, we covered how to set up communication between services across different namespaces in a Kubernetes cluster. By using fully qualified domain names, you can easily route traffic to services that aren't in the same namespace. Remember to practice these concepts regularly, as they are crucial for managing services within Kubernetes. Keep exploring, and good luck with your learning journey! 🌟 47 | -------------------------------------------------------------------------------- /_exercises/05-resource_management/05-resource_quotas.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Fundamentals: Resource Quotas in Kubernetes 2 | 3 | ## Overview 4 | 5 | In this exercise, we will learn how to implement resource quotas in Kubernetes namespaces. Resource quotas help manage the resources allocated to your applications, ensuring that you don’t exceed certain limits. Before diving into the step-by-step guide, we encourage you to try implementing this on your own! Here’s a high-level summary of the steps to follow: 6 | 7 | 1. Create two namespaces: `dev` and `prod`. 8 | 2. Define resource quotas for both namespaces. 9 | - Set requests and limits for CPU and memory for the `dev` namespace. 10 | - Set scaled requests and limits for the `prod` namespace. 11 | 3. Apply the configurations using `kubectl`. 12 | 4. Verify that resources are correctly allocated and limits are set. 13 | 14 | Give it a shot! 🚀 Challenge yourself to implement these steps before referencing the detailed guide below. 15 | 16 | ## Step-by-Step Guide 17 | 18 | 1. **Create Namespaces**: In an empty directory, create a YAML file for each namespace (one for `dev` and one for `prod`). 19 | 20 | ```yaml 21 | apiVersion: v1 22 | kind: Namespace 23 | metadata: 24 | name: dev 25 | --- 26 | apiVersion: v1 27 | kind: Namespace 28 | metadata: 29 | name: prod 30 | ``` 31 | 32 | 2. **Define Resource Quotas**: Combine the resource quota definitions for both namespaces into a single file. 33 | 34 | ```yaml 35 | apiVersion: v1 36 | kind: ResourceQuota 37 | metadata: 38 | name: dev-quota 39 | namespace: dev 40 | spec: 41 | hard: 42 | requests.cpu: '1' 43 | requests.memory: '1Gi' 44 | limits.cpu: '2' 45 | limits.memory: '2Gi' 46 | --- 47 | apiVersion: v1 48 | kind: ResourceQuota 49 | metadata: 50 | name: prod-quota 51 | namespace: prod 52 | spec: 53 | hard: 54 | requests.cpu: '2' 55 | requests.memory: '2Gi' 56 | limits.cpu: '4' 57 | limits.memory: '4Gi' 58 | ``` 59 | 60 | 3. **Apply Resource Quotas**: Use the terminal command to apply the configurations: 61 | 62 | ```bash 63 | kubectl apply -f your_file_name.yaml 64 | ``` 65 | 66 | 4. **Verify Quotas**: Check that the quotas are set properly: 67 | 68 | ```bash 69 | kubectl get resourcequota --all-namespaces 70 | kubectl describe resourcequota dev-quota --namespace=dev 71 | kubectl describe resourcequota prod-quota --namespace=prod 72 | ``` 73 | 74 | 5. **Explore Further**: Experiment with creating pods in both namespaces that utilize the specified resource limits to see how Kubernetes enforces these quotas. 75 | 76 | ## Conclusion 77 | 78 | In this lesson, we tackled the concept of resource quotas in Kubernetes, focusing on how to define and apply them within different namespaces. Remember, being aware of resource limits helps maintain efficient resource usage across your cluster. Keep exploring and practicing this concept to deepen your understanding! Happy learning! 🎉 79 | -------------------------------------------------------------------------------- /_exercises/05-resource_management/07-rollouts_requests_limits.md: -------------------------------------------------------------------------------- 1 | # Implementing Resource Requests and Limits in Kubernetes Deployments 2 | 3 | ## Overview 4 | 5 | In this exercise, we will explore how to create a Kubernetes deployment while ensuring that resource requests and limits are effectively managed. This is crucial for preventing resource exhaustion in your cluster’s namespace. 6 | 7 | Before looking at the step-by-step guide, here's what you'll need to do: 8 | 9 | 1. Create a Kubernetes deployment with a specified number of replicas. 10 | 2. Set appropriate resource requests and limits for the containers in your deployment. 11 | 3. Understand the implications of resource quotas on your deployment updates. 12 | 4. Simulate an update to your deployment image and manage the rollout process while adhering to resource quotas. 13 | 14 | We encourage you to try these steps on your own before referring to the detailed guide below! 💪 15 | 16 | ## Step-by-Step Guide 17 | 18 | 1. **Create a Deployment**: 19 | 20 | - Define your deployment using `apps/v1`. 21 | - Name it `caller API deployment` and set the namespace to `dev`. 22 | - Set the replica count to a manageable number, like 4. 23 | 24 | 2. **Configure Metadata**: 25 | 26 | - Assign labels and define metadata for your deployment template. 27 | - Ensure the `selector` matches your deployment's labels. 28 | 29 | 3. **Set Resource Requests and Limits**: 30 | 31 | - For each pod, specify CPU and memory requests and limits, keeping within your resource quota (e.g., 200m CPU and 256Mi memory). 32 | 33 | 4. **Apply Changes**: 34 | 35 | - Use the terminal to apply your changes. 36 | - First, create the namespace, then the deployment. 37 | 38 | 5. **Update Deployment**: 39 | 40 | - Change the image version in your deployment configuration. 41 | - Attempt to apply the update and monitor the rollout status. 42 | 43 | 6. **Handle Resource Quotas**: 44 | 45 | - If the rollout fails due to exceeded quotas, review and adjust your resource specifications accordingly. 46 | 47 | 7. **Clean Up**: 48 | - Once you’re done experimenting, clean up your resources to keep your environment tidy. 49 | 50 | ## Conclusion 51 | 52 | In this lecture, we have discussed the importance of managing resource requests and limits when deploying applications in Kubernetes. Understanding how to effectively use resource quotas will help you prevent deployment issues and ensure smooth application rollouts. Remember, always monitor your resource usage and keep learning as you go! 🌟 53 | -------------------------------------------------------------------------------- /_exercises/05-resource_management/08-extend_color_api_probes.md: -------------------------------------------------------------------------------- 1 | # Extending the Color API with Probes 2 | 3 | Welcome! In this session, we’ll extend our Color API with some practical endpoints to manage startup, readiness, and liveness probes. This will make it easier for us to control the behavior of these probes using environment variables. Let’s dive into what you’ll aim to implement! 🙌 4 | 5 | ## Overview 6 | 7 | In this exercise, you will enhance the Color API by adding functionality for startup, liveness, and readiness probes. Below are the main steps you'll want to follow. Before checking the step-by-step guide, give it a shot on your own: 8 | 9 | 1. **Create Environment Variables**: Set up environment variables that control the behavior of the startup, liveness, and readiness probes. 10 | 2. **Implement Delays**: Introduce logic to delay the startup of the application based on the environment variable. 11 | 3. **Add Endpoints**: Create `/ready` and `/health` endpoints to respond to readiness and liveness probes. 12 | 4. **Use Randomness for Readiness**: Implement logic in the readiness endpoint to randomly fail half of the time. 13 | 5. **Test Your Implementation**: Ensure that your application handles different scenarios based on the variables you set. 14 | 15 | Before moving on, take a moment to implement these steps on your own. It's a great way to learn! 16 | 17 | ## Step-by-Step Guide 18 | 19 | 1. **Set Environment Variables**: 20 | - Create variables like `fail_startup`, `fail_liveness`, and `fail_readiness` in your environment configuration. 21 | 2. **Update Logic for Probes**: 22 | 23 | - In your Color API, modify the logic to check these environment variables when determining probe results. 24 | - For `fail_startup`, introduce a startup delay if the variable is set to true. 25 | - For `fail_liveness` and `fail_readiness`, return appropriate HTTP responses based on the variable values. 26 | 27 | 3. **Create New Endpoints**: 28 | 29 | - Add a `/ready` endpoint that checks the `fail_readiness` variable and responds with a 503 status if it's set to true. 30 | - Add a `/health` endpoint using similar logic for the `fail_liveness` variable. 31 | 32 | 4. **Implement Randomness**: 33 | 34 | - Use `Math.random()` to create a 50% chance of failing the readiness probe when `fail_readiness` is true. 35 | 36 | 5. **Build and Push Your Container**: 37 | - Build your updated Docker container using a command like `docker build -t yourusername/color-api:v1.2.0 .` 38 | - Push the new version with `docker push yourusername/color-api:v1.2.0`. 39 | 40 | ## Conclusion 41 | 42 | Great job! By adding these new endpoints and enhancing the Color API with probe management, you’ve taken an important step in understanding how to control application behavior in Kubernetes. Keep exploring, experimenting, and practicing these concepts—there’s always more to discover! 43 | -------------------------------------------------------------------------------- /_exercises/05-resource_management/09-startup_probes.md: -------------------------------------------------------------------------------- 1 | # Health Probes in Kubernetes 2 | 3 | Welcome! In this session, we’ll be diving deep into the fascinating world of health probes in Kubernetes, specifically focusing on startup probes. This is a crucial concept that helps ensure our applications are reliable and resilient. Let's get started! 🚀 4 | 5 | ## Overview 6 | 7 | Before jumping into the step-by-step guide, it's a great idea to try implementing the solution on your own. Here’s a brief summary of what you should attempt: 8 | 9 | 1. Set up a new Kubernetes pod for a color API using the correct image version (1.2.0). 10 | 2. Define a startup probe to monitor the health of your pod. 11 | 3. Set parameters like failure threshold and probe periods. 12 | 4. Experiment with environment variables to delay the startup and observe how the startup probe reacts. 13 | 5. Verify the pod's status and examine the behavior when the startup probe fails. 14 | 15 | Give it a try! Once you've made your attempts, follow the step-by-step guide below to solidify your understanding. 16 | 17 | ## Step-by-Step Guide 18 | 19 | 1. **Create New Directory**: Start by creating a new directory for your health probes. 20 | 2. **Create Pod Configuration**: 21 | - Name your file `color_api_pod.yaml` 22 | - Define the API version, kind, and container specifications including the image `lmacademy/color-api:v1.2.0`. 23 | 3. **Define Startup Probe**: 24 | - In your pod definition, create a `startupProbe`. 25 | - Set it to perform an HTTP GET request to the `/health` endpoint at port 80. 26 | 4. **Configure Resource Limits** (optional but recommended): 27 | - Add resource limits for CPU and memory. 28 | 5. **Set Probe Parameters**: 29 | - Define failure threshold (e.g., 2). 30 | - Set the period between probe checks (e.g., 3 seconds). 31 | 6. **Apply Your Pod**: Save your configuration and apply it using `kubectl apply -f color_api_pod.yaml`. 32 | 7. **Monitor Pod Status**: Use `kubectl get pod -w` to watch the pod’s status as it changes. 33 | 8. **Experiment with Startup Delay**: 34 | - Change the value of `delay_startup` to `true` to test how the startup probe reacts. 35 | - Monitor for failure messages and container restarts. 36 | 9. **Revert the Changes**: Set `delay_startup` back to `false` and observe the pod returning to a healthy state. 37 | 38 | ## Conclusion 39 | 40 | To summarize, we explored the concept of startup probes in Kubernetes, learned how to configure them properly, and observed their effect on pod behavior. Understanding health probes is essential for building robust applications that can self-correct, so keep experimenting and practicing! Don't hesitate to continue learning and testing—the more you practice, the better you’ll get! 41 | -------------------------------------------------------------------------------- /_exercises/05-resource_management/10-add_dedicated_startup_probe.md: -------------------------------------------------------------------------------- 1 | # Implementing Dedicated Startup Probes in Kubernetes 2 | 3 | ## Overview 4 | 5 | In this exercise, we’re going to tackle the implementation of dedicated startup probes in Kubernetes. The main goal here is to ensure that our application has a robust way to manage its startup and health checks independently. Before diving into the step-by-step guide, I encourage you to try this out on your own! Here’s a quick summary of the main steps you’ll need to take: 6 | 7 | 1. Identify the existing probe implementation and its limitations. 8 | 2. Create a dedicated endpoint for startup probes. 9 | 3. Update the health check logic to differentiate between startup, readiness, and liveness probes. 10 | 4. Build and push the updated Docker image. 11 | 5. Verify that the probes work correctly. 12 | 13 | Give it a shot! Once you’ve given it your best effort, check out the step-by-step guide below. 14 | 15 | ## Step-by-Step Guide 16 | 17 | 1. **Review Current Probes**: Look at your application’s current health check endpoints and understand where the bug exists regarding the dual use of the health endpoint for startup and liveness probes. 18 | 19 | 2. **Add Dedicated Startup Endpoint**: Implement a new endpoint specifically for startup probes. This endpoint should return a simple message (like "OK") to serve as a successful response for startup checks. 20 | 21 | 3. **Adjust Probe Logic**: Make sure that your application’s readiness and liveness probes can operate independently of the startup probe. This means updating the probe configuration in your deployment specs accordingly. 22 | 23 | 4. **Build the Docker Image**: Navigate to your IDE and run the Docker build command, tagging it appropriately, for example: 24 | 25 | ``` 26 | docker build -t lmacademy/color-api:1.2.1 . 27 | ``` 28 | 29 | 5. **Push the Docker Image**: Use the Docker push command to upload your newly built image to the repository: 30 | 31 | ``` 32 | docker push lmacademy/color-api:1.2.1 33 | ``` 34 | 35 | 6. **Test Your Changes**: After everything is set up, deploy your application and ensure that all probes are functioning as intended! 36 | 37 | ## Conclusion 38 | 39 | By setting up dedicated startup probes, you improve the reliability and clarity of your application's health checks in Kubernetes. This exercise reinforces the importance of separating concerns in your probes to ensure that failures in one do not inadvertently affect others. Keep practicing and don’t hesitate to explore other aspects of Kubernetes as you continue your learning journey! 🚀 40 | -------------------------------------------------------------------------------- /_exercises/05-resource_management/11-liveness_probes.md: -------------------------------------------------------------------------------- 1 | # Understanding Liveness Probes in Kubernetes 2 | 3 | Welcome! In this guide, we'll dive into the concept of liveness probes in Kubernetes and how to implement them in your applications. This exercise aims to provide you with hands-on experience, so feel free to try it out yourself before referring to the detailed steps below. 🚀 4 | 5 | ## Overview 6 | 7 | To get started with implementing liveness probes, here’s what you should aim to accomplish: 8 | 9 | 1. Ensure you've defined a startup probe that checks the application’s health. 10 | 2. Use version 1.2.1 so the up endpoint is available for checks. 11 | 3. Implement your liveness probe to hit the health endpoint instead of the up endpoint. 12 | 4. Adjust the probe's failure threshold and timing as necessary. 13 | 5. Experiment with creating, monitoring, and deleting pods that use these probes. 14 | 15 | We encourage you to give it a go! Try implementing these steps yourself before popping over to the step-by-step guide. 16 | 17 | ## Step-by-Step Guide 18 | 19 | 1. **Set up the Startup Probe**: 20 | 21 | - Fix and define your startup probe to check the up endpoint. 22 | - Ensure you're using Kubernetes version 1.2.1 to access the necessary endpoints. 23 | 24 | 2. **Create the Pod**: 25 | - Use the terminal command `kubectl apply` to create your pod. 26 | - Monitor the pod’s health status. 27 | 3. **Implement the Liveness Probe**: 28 | 29 | - Change the probe to check the health endpoint instead of the up endpoint. 30 | - Set a failure threshold (e.g., 3) and a delay (e.g., 10 seconds) between probes. 31 | 32 | 4. **Add Environment Variables**: 33 | 34 | - Introduce an environment variable, such as `fail_liveness`, and set it to true to simulate failure behavior. 35 | 36 | 5. **Create and Monitor Your Liveness Probe**: 37 | 38 | - Once you've made your changes, recreate the pod using `kubectl apply`. 39 | - Watch its status and see how the liveness probe behaves in real-time. 40 | 41 | 6. **Observe Container Behavior**: 42 | 43 | - Note how many times the container restarts if the liveness probe fails. 44 | - Consider the implications for real-world applications. 45 | 46 | 7. **Clean Up**: 47 | - Once done, remember to delete your pod to keep your environment tidy. 48 | 49 | ## Conclusion 50 | 51 | In this lecture, we explored how to use liveness probes to ensure your containers are functioning healthily in Kubernetes. We learned that failed liveness probes can trigger container restarts, which helps maintain application stability. Keep practicing these concepts as they are vital for managing Kubernetes applications effectively. Remember, the key takeaway here is that liveness probes help us ensure our applications are running smoothly! 🌟 52 | -------------------------------------------------------------------------------- /_exercises/05-resource_management/12-readiness_probe.md: -------------------------------------------------------------------------------- 1 | # Readiness Probes in Kubernetes 2 | 3 | ## Overview 4 | 5 | In this exercise, we will dive into the concept of readiness probes in Kubernetes and how they affect the management of pods. The goal is to help you understand what happens when readiness probes fail and how to implement this feature in your deployments and services. 6 | 7 | Here's a quick outline of what you'll aim to accomplish: 8 | 9 | 1. Create a deployment with readiness probes configured. 10 | 2. Set up a service that interfaces with the deployment. 11 | 3. Observe how failing readiness probes affect traffic routing to your pods. 12 | 4. Use a traffic generator to simulate traffic and observe the behavior of your deployment. 13 | 14 | Before looking at the step-by-step guide, give it a shot on your own and see if you can implement the solution! 15 | 16 | ## Step-by-Step Guide 17 | 18 | 1. **Create a Deployment:** 19 | 20 | - Create a file named `color-api-deployment.yaml`. 21 | - Specify the API version and kind (Deployment). 22 | - Define metadata such as name and labels. 23 | - Set the replicas to 6. 24 | - Include container specifications and environment variables, ensuring to set the `fail-readiness` variable to true. 25 | 26 | 2. **Configure the Readiness Probe:** 27 | 28 | - In the deployment specification, set the readiness probe to query the `/ready` endpoint. 29 | - Configure the threshold and period settings for your readiness check. 30 | 31 | 3. **Create a Service:** 32 | 33 | - Create a service definition in the same or a separate YAML file. 34 | - Ensure your service selects the appropriate pods that match the deployment labels. 35 | 36 | 4. **Set Up a Traffic Generator:** 37 | 38 | - Create a separate YAML file for the traffic generator. 39 | - Configure parameters like the endpoint to hit and delays for traffic generation. 40 | 41 | 5. **Apply Your Configuration:** 42 | 43 | - Use `kubectl apply` to deploy your configurations to the Kubernetes cluster. 44 | - Check the status of your pods to see if they are healthy or unhealthy based on your readiness probes. 45 | 46 | 6. **Observe Traffic Routing:** 47 | 48 | - Use logs from the traffic generator to ensure it is only sending requests to healthy pods. 49 | - Verify that unhealthy pods are not receiving any traffic. 50 | 51 | 7. **Clean Up:** 52 | - Delete all resources you created for this exercise to maintain a clean environment. 53 | 54 | ## Conclusion 55 | 56 | In this lecture, we explored readiness probes and their important role in ensuring that only healthy pods receive traffic. This feature is critical for maintaining the reliability of your applications running on Kubernetes. Remember, setting up these probes not only helps in automatic remediation but also improves user experience by avoiding service interruptions. Keep practicing, and you'll become more proficient with Kubernetes! 57 | -------------------------------------------------------------------------------- /_exercises/06-storage_persistence/01-emptydir.md: -------------------------------------------------------------------------------- 1 | # Working with EmptyDir in Kubernetes 2 | 3 | Welcome to the guide on leveraging the EmptyDir volume in Kubernetes! 🌟 This README will help you understand how to use this ephemeral storage type effectively. Before diving into the details, there’s a challenge for you: try to implement the solution on your own based on the overview below! 4 | 5 | ## Overview 6 | 7 | In this exercise, we'll explore how to create and manage a Kubernetes pod that utilizes an EmptyDir volume. The key points we’ll look at include: 8 | 9 | 1. **Create a clean folder** and define a new YAML file for your EmptyDir example. 10 | 2. **Set up the Pod Definition**, including the EmptyDir volume configuration. 11 | 3. **Implement volume mounts** to allow containers to access the EmptyDir volume. 12 | 4. **Test the ephemeral nature** of an EmptyDir by checking file persistence across container restarts. 13 | 5. **Create two containers** within the same pod, designating one as a writer and the other as a reader of the shared volume. 14 | 15 | Now, give this a shot on your own! Once you've tried your hand at implementing it, check out the step-by-step guide below. 16 | 17 | ## Step-by-Step Guide 18 | 19 | 1. **Create a new folder** for your project and navigate into it. 20 | 2. **Create a YAML file** named `empty-dir-example.yaml` and define your pod with the following configurations: 21 | - Define the pod with `apiVersion: v1`, and `kind: Pod`. 22 | - Set the image to `busybox:1.36.1`. 23 | 3. **Define the EmptyDir volume** in the `volumes` section of your pod definition: 24 | ```yaml 25 | volumes: 26 | - name: temporary-storage 27 | emptyDir: {} 28 | ``` 29 | 4. **Add volume mounts** in your container definition to specify where the EmptyDir volume should be accessed: 30 | ```yaml 31 | volumeMounts: 32 | - name: temporary-storage 33 | mountPath: /user/share/temp 34 | ``` 35 | 5. **Deploy your pod** by running `kubectl apply -f empty-dir-example.yaml` in the terminal. 36 | 6. **Test if the setup works** by using `kubectl exec` to enter the container and create files in the mounted directory. 37 | 7. **Check file persistence** by deleting the pod and re-creating it to see if the mounted files are deleted. 38 | 8. **Create a second container** within the same pod, allowing one container to write and the other to read from the same EmptyDir volume: 39 | - Set the reader container with the `readOnly` flag set to `true`. 40 | 41 | ## Conclusion 42 | 43 | You've learned how to implement and manage EmptyDir volumes in Kubernetes! This type of storage is transient and tied to the pod lifecycle, making it essential to consider when handling data in your applications. Keep practicing and exploring other volume types as we continue our Kubernetes journey together! 💻 44 | -------------------------------------------------------------------------------- /_exercises/06-storage_persistence/04-delete_pvc_pv.md: -------------------------------------------------------------------------------- 1 | # Managing Persistent Volumes and Claims in Kubernetes 2 | 3 | ## Overview 4 | 5 | In this exercise, we’ll explore the implications of deleting Persistent Volume Claims (PVCs) and Persistent Volumes (PVs) in a Kubernetes environment. Before diving into the step-by-step guide, take some time to think through the following main actions you’ll want to implement: 6 | 7 | 1. List your current Pods, PVCs, and PVs. 8 | 2. Delete the Pods using the appropriate command. 9 | 3. Delete the Persistent Volume Claim. 10 | 4. Check the status of the Persistent Volume after deleting the PVC. 11 | 5. Assess whether files remain accessible after deleting the PV and PVC. 12 | 13 | I encourage you to try implementing this yourself first before checking out the detailed steps below. It's a great way to reinforce your learning! 😊 14 | 15 | ## Step-by-Step Guide 16 | 17 | 1. **List Existing Resources**: Start by using the command `kubectl get pods pv pvc` to see your existing Pods, PVCs, and PVs. 18 | 2. **Delete the Pods**: Use the command `kubectl delete pod ` to delete both Pods. Be sure to replace `` and `` with your actual Pod names, and include the `--force` flag to ensure they’re deleted. 19 | 20 | 3. **Delete the Persistent Volume Claim**: Execute `kubectl delete pvc ` to remove the Persistent Volume Claim. 21 | 22 | 4. **Check the Status of the Persistent Volume**: Run `kubectl get pv` to see that the status of the Persistent Volume has changed to 'Released'. Remember, with the retained policy, this PV will not be automatically available for use again. 23 | 24 | 5. **Verify File Persistence**: Use `minikube ssh` and navigate to the directory where you mounted the volume to check if your files (like `hello.txt`) are still present. 25 | 26 | 6. **Delete Everything (Optional)**: If you choose to delete everything, use `kubectl delete pod --force` for the Pod along with `kubectl delete pvc ` to remove the PVC and check that the PV no longer exists. 27 | 28 | ## Conclusion 29 | 30 | Today, we delved into what happens when we delete Persistent Volume Claims and Persistent Volumes in Kubernetes. We highlighted that while deleting a PVC affects its associated volume's status, files may still persist based on the configuration. Understanding these nuances is crucial for managing data effectively in your Kubernetes clusters. As you continue your journey with Kubernetes, keep practicing these concepts, as hands-on experience will greatly enhance your understanding and skills—keep it up! 🚀 31 | -------------------------------------------------------------------------------- /_exercises/06-storage_persistence/06-statefulsets_create_pvs.md: -------------------------------------------------------------------------------- 1 | # Creating Persistent Volumes and Stateful Sets in Kubernetes 2 | 3 | ## Overview 4 | 5 | In this guide, we will dive into the process of creating Persistent Volumes (PVs) and Stateful Sets in Kubernetes to effectively manage stateful applications. Before we jump into the step-by-step instructions, here’s a brief overview of what you should aim to implement: 6 | 7 | 1. SSH into your MiniKube and create directories for your persistent volumes. 8 | 2. Configure the permissions for those directories. 9 | 3. Create a YAML file to define your persistent volumes. 10 | 4. Apply the configuration to create the persistent volumes in your cluster. 11 | 5. Prepare to define your Stateful Set to utilize these persistent volumes. 12 | 13 | We encourage you to try implementing these steps yourself before checking the detailed guide that follows. Let's take it on—ready when you are! 💪 14 | 15 | ## Step-by-Step Guide 16 | 17 | 1. **SSH into MiniKube**: 18 | - Access your MiniKube environment. 19 | 2. **Create Directories**: 20 | - Inside your `mount` directory, create three folders: `ss-0`, `ss-1`, and `ss-2`. 21 | 3. **Set Permissions**: 22 | 23 | - Change the permissions of these directories to `777` to ensure they are writable. 24 | 25 | 4. **Create Persistent Volume YAML**: 26 | 27 | - Open your preferred IDE and create a new directory named `stateful_sets`. 28 | - In this directory, create a file called `PVs.yaml` to define your persistent volumes. 29 | - Copy and paste the necessary configuration from a local volume example, modifying the names to `stateful set zero`, `stateful set one`, and `stateful set two`, respectively. 30 | 31 | 5. **Apply Persistent Volumes**: 32 | 33 | - Use the command `kubectl apply -f PVs.yaml` in the terminal to create the persistent volumes. 34 | - Verify that all persistent volumes are in the "available" status. 35 | 36 | 6. **Prepare for Stateful Set Definition**: 37 | - Get ready to define and apply your Stateful Set, which you will do in the next steps. 38 | 39 | ## Conclusion 40 | 41 | In this session, we explored how to create Persistent Volumes and set up Stateful Sets in Kubernetes. These concepts are crucial for managing stateful applications effectively, providing stable identities and storage for your Pods. Keep practicing these techniques, as mastering them will greatly enhance your Kubernetes skills. Happy coding! 🚀 42 | -------------------------------------------------------------------------------- /_exercises/06-storage_persistence/07-statefulsets_create_ss.md: -------------------------------------------------------------------------------- 1 | # Creating Stateful Sets in Kubernetes 2 | 3 | ## Overview 4 | 5 | In this exercise, we’re going to focus on creating a StatefulSet in Kubernetes. A StatefulSet is a powerful feature that allows us to manage stateful applications by providing unique, persistent identities to our pods. The goal here is to help you understand how to define and create a StatefulSet, link it with Persistent Volume Claims (PVCs), and manage individual pod stability. 6 | 7 | Here’s a quick overview of what you should aim to implement: 8 | 9 | 1. Create a StatefulSet definition with the appropriate API version and kind. 10 | 2. Define the metadata for the StatefulSet, including its name. 11 | 3. Specify the number of replicas and configure the selector. 12 | 4. Set up the volume claim templates to enable volume management for the pods. 13 | 5. Apply the StatefulSet configuration and observe the pod behavior. 14 | 15 | Before diving into the step-by-step guide, I encourage you to give it a shot and try implementing the solution on your own! 16 | 17 | ## Step-by-Step Guide 18 | 19 | 1. **Create StatefulSet Definition**: 20 | 21 | - Start by defining the StatefulSet with `apiVersion: apps/v1` and `kind: StatefulSet`. 22 | 23 | 2. **Set Metadata**: 24 | 25 | - Add metadata to the StatefulSet, including an easy-to-understand name like `demo-statefulset`. 26 | 27 | 3. **Define Spec**: 28 | 29 | - Within the spec, set the `serviceName`, specify the desired number of replicas (e.g., 2), and describe the `selector` for managing the pods. 30 | 31 | 4. **Template and Volume Claim**: 32 | 33 | - Add a section for pod templates similar to Deployments, and include a volume claim template that specifies how to manage persistent storage. 34 | 35 | 5. **Apply the Configuration**: 36 | 37 | - Run the command to apply your StatefulSet. Verify that the pods are created with stable, predictable names, such as `demo-ss-0` and `demo-ss-1`. 38 | 39 | 6. **Validate and Experiment**: 40 | - Use commands like `kubectl get pods` and `kubectl describe statefulset ` to ensure everything is running as expected. 41 | 42 | ## Conclusion 43 | 44 | Congratulations! You've now learned how to create a StatefulSet in Kubernetes and manage its associated persistent storage effectively. Remember that StatefulSets are ideal when you need stable identities for your pods, which offers significant benefits for applications that manage state. Keep practicing, and don’t hesitate to explore the official Kubernetes documentation for more advanced configurations and options! 🚀 45 | -------------------------------------------------------------------------------- /_exercises/06-storage_persistence/09-headless_svc.md: -------------------------------------------------------------------------------- 1 | # Understanding Headless Services in Kubernetes 2 | 3 | ## Overview 4 | 5 | In this exercise, we'll explore the concept of headless services in Kubernetes and their significance, especially in relation to stateful sets. A headless service allows us to reach specific pods directly, providing a stable DNS entry instead of dealing with load balancing. This is particularly useful for applications that maintain some state, like databases. 6 | 7 | Before jumping into the step-by-step guide, here's a quick summary of the main steps to implement a headless service and a stateful set: 8 | 9 | 1. Create a `service.yaml` file to define the headless service. 10 | 2. Set the `ClusterIP` option to `None` in the service definition. 11 | 3. Create a stateful set definition in another YAML file. 12 | 4. Ensure the service name matches the one defined in the stateful set. 13 | 5. Apply both YAML files to create the resources in Kubernetes. 14 | 6. Use curl within a debug pod to test connectivity to specific pods via their DNS entries. 15 | 16 | Now, take a moment to try following these steps on your own before looking at the detailed guide below! 🚀 17 | 18 | ## Step-by-Step Guide 19 | 20 | 1. **Create a Headless Service**: 21 | 22 | - Create a YAML file named `service.yaml`. 23 | - Define the API version as `v1`, the kind as `Service`, and set the service name (e.g., `color-service`). 24 | - Set the `ClusterIP` to `None` to indicate that it is a headless service. 25 | 26 | 2. **Create a Stateful Set**: 27 | 28 | - Create another YAML file for the stateful set (e.g., `statefulset.yaml`). 29 | - Define the service name to match the headless service. 30 | - Configure the replicas and ensure the pod specifications are defined appropriately. 31 | 32 | 3. **Apply the Configurations**: 33 | 34 | - Use `kubectl apply -f service.yaml` to create the headless service. 35 | - Use `kubectl apply -f statefulset.yaml` to create the stateful set with pods. 36 | 37 | 4. **Verify the Setup**: 38 | 39 | - Check the services and pods using `kubectl get services` and `kubectl get pods`. 40 | - Ensure that the headless service does not have a `ClusterIP` assigned. 41 | 42 | 5. **Test Connectivity**: 43 | 44 | - Create a debug pod using an Alpine image with curl capabilities. 45 | - Execute curl commands to access specific pods directly using their DNS names (e.g., `color-ss-0.color-service`). 46 | 47 | 6. **Clean Up**: 48 | - Remove the created resources with `kubectl delete` commands. 49 | 50 | ## Conclusion 51 | 52 | Today, we've delved into headless services within Kubernetes and how they interact with stateful sets. By providing direct access to individual pods, headless services enable applications that need consistent data access across instances. Don’t hesitate to keep experimenting and practicing with these concepts, especially in real-world scenarios! 💻 53 | -------------------------------------------------------------------------------- /_exercises/07-configuration_management/02-configmap_env_vars.md: -------------------------------------------------------------------------------- 1 | # Implementing Environment Variables with Config Maps in Kubernetes 2 | 3 | ## Overview 4 | 5 | In this exercise, we're diving into how to use Config Maps to pass data as environment variables to your Kubernetes containers. The goal is to create a Config Map with some configuration data and then use that data as environment variables in a container. Before you look at the step-by-step guide, here’s a quick summary of what you will be doing: 6 | 7 | 1. Create a `redconfig.yaml` file with color configuration in a Config Map. 8 | 2. Apply the Config Map using `kubectl`. 9 | 3. Create a `red-color-api.yaml` file for your pod configuration. 10 | 4. Use the Config Map values as environment variables in your pod manifests. 11 | 5. Deploy the pod and verify that the color is being served correctly. 12 | 13 | Take a moment to try these steps on your own before checking the detailed guide below! 🌟 14 | 15 | ## Step-by-Step Guide 16 | 17 | 1. **Create the Config Map:** 18 | 19 | - Create a file named `redconfig.yaml` and define a Config Map with a color key set to "red". 20 | - Ensure that your Config Map is using the `apiVersion` of `v1` and `kind` of `ConfigMap`. 21 | 22 | 2. **Apply the Config Map:** 23 | 24 | - Use the command `kubectl apply -f redconfig.yaml` to create the Config Map in Kubernetes. 25 | 26 | 3. **Set Up the Pod Configuration:** 27 | 28 | - Create a new file called `red-color-api.yaml`. 29 | - Define the pod specifications, including name, labels, and container image. 30 | - Specify the container port (usually 80). 31 | 32 | 4. **Configure Environment Variables:** 33 | 34 | - Choose one of two methods to pass environment variables: 35 | - Method 1: Use the `envFrom` option to load all values from the Config Map, but be cautious about naming conventions. 36 | - Method 2: Map necessary environment variables individually for better decoupling. 37 | 38 | 5. **Deploy the Pod:** 39 | 40 | - Apply the pod configuration using `kubectl apply -f red-color-api.yaml`. 41 | - Check the status of your pod using `kubectl get pods` and see if it’s running. 42 | 43 | 6. **Expose the Pod:** 44 | 45 | - Use the command `kubectl expose pod red-color-api --type=NodePort --port=80`. 46 | - Access the service using the URL provided by Minikube. 47 | 48 | 7. **Verify:** 49 | - Open your web browser to check if the service is correctly returning "red" as expected. 50 | 51 | ## Conclusion 52 | 53 | In summary, using Config Maps to manage environment variables allows you to easily change configurations without altering your application code. You’ve learned two approaches for binding environment variables from a Config Map and how to expose your pod services. Remember to keep practicing what you’ve learned here! The more you engage with these concepts, the more comfortable you will become with Kubernetes. Happy coding! 🚀 54 | -------------------------------------------------------------------------------- /_exercises/07-configuration_management/05-secrets_volumes.md: -------------------------------------------------------------------------------- 1 | # Passing Secrets as Files and Volume Mounts in Kubernetes 2 | 3 | ## Overview 4 | 5 | In this exercise, we will explore how to securely pass secrets to our containers in Kubernetes by using volume mounts. The goal is to understand how to create a volume definition, mount it to the container, and manage access to the secret values appropriately. Before diving into the step-by-step guide, here’s a brief outline of what you'll be trying to implement: 6 | 7 | 1. Remove unneeded commands and set up a basic container that runs for a while. 8 | 2. Define a volume in the pod specification to hold your secrets. 9 | 3. Mount the volume to the container at the desired path. 10 | 4. Access the secrets to ensure they have been correctly mounted. 11 | 5. Explore how to limit access to the secrets for better security. 12 | 13 | Take a moment to think through these steps and see if you can implement the solution on your own before checking the detailed guide below! 🚀 14 | 15 | ## Step-by-Step Guide 16 | 17 | 1. **Create a Basic Pod Configuration**: 18 | 19 | - Define a pod that uses a base image like BusyBox and runs a sleep command to keep it alive. 20 | 21 | 2. **Add Volume Definition**: 22 | 23 | - Under the pod specification, create a volumes section and define your secret volume. For example: 24 | ```yaml 25 | volumes: 26 | - name: db-secrets 27 | secret: 28 | secretName: db-creds 29 | ``` 30 | 31 | 3. **Mount the Volume**: 32 | 33 | - Inside the container specification, add a volume mount that specifies where the secrets will be available: 34 | ```yaml 35 | volumeMounts: 36 | - name: db-secrets 37 | mountPath: /etc/db 38 | ``` 39 | 40 | 4. **Deploy the Pod**: 41 | 42 | - Use `kubectl apply -f .yaml` to create the pod and then check its status with `kubectl get pods`. 43 | 44 | 5. **Access the Pod**: 45 | 46 | - Use `kubectl exec -it -- /bin/sh` to get a shell in the container. 47 | - Navigate to `/etc/db` to verify if the secrets are mounted correctly, and use `cat` to read their contents. 48 | 49 | 6. **Delete the Pod and Secrets**: 50 | - Once you’re done testing, clean up by deleting the Pod and any secrets you’ve created using `kubectl delete pod ` and `kubectl delete secret db-creds`. 51 | 52 | ## Conclusion 53 | 54 | In this lecture, we’ve covered how to pass secrets to containers in Kubernetes using volume mounts. You’ve learned about securely managing your secrets and the importance of controlling who has access to them. Remember that managing permissions is critical to maintaining the security of your applications. Keep practicing these concepts as you dive deeper into Kubernetes! 55 | -------------------------------------------------------------------------------- /_exercises/08-deploy_mongodb_database/project_overview.md: -------------------------------------------------------------------------------- 1 | # Project Overview: Color API and MongoDB Integration 2 | 3 | Welcome to our project focused on persisting data from our Color API! 🌈 In this exercise, we'll be implementing a solution that connects our Color API to a MongoDB database, allowing color information to be stored and retrieved efficiently. 4 | 5 | ### Overview 6 | 7 | Before diving into the detailed steps, here’s a high-level summary of what you’ll be working on: 8 | 9 | 1. Deploy a MongoDB Stateful Set with a headless service. 10 | 2. Create a persistent volume claim for dynamic volume provisioning in Minikube. 11 | 3. Set up necessary config maps and secrets for database connection. 12 | 4. Update the Color API code to handle database connections through environment variables. 13 | 5. Utilize MongoDB for database interactions in the Color API. 14 | 6. Extend the REST API to include relevant paths for color data management. 15 | 7. Test the API using a NodePort service to interact with external users. 16 | 17 | Take a moment to implement this solution on your own! You might be surprised at how much you can achieve without immediate guidance. 18 | -------------------------------------------------------------------------------- /_exercises/09-security_fundamentals/01-overview_minikube_user_clusterroles.md: -------------------------------------------------------------------------------- 1 | # Understanding Role-Based Access Control in Kubernetes 2 | 3 | ## Overview 4 | 5 | In this exercise, we will be exploring the concept of Role-Based Access Control (RBAC) in Kubernetes, which is crucial for managing user permissions and access to resources within your cluster. We will manually set up two users—Alice and Bob—using x509 certificates as their authentication method, and we will see how to connect users to roles through contexts and role bindings. 6 | 7 | Before diving into the step-by-step guide, I encourage you to try implementing the solution on your own! Here’s a quick summary of the main steps to give you a head start: 8 | 9 | 1. Create a directory for your Kubernetes setup. 10 | 2. Generate x509 certificates for the users Alice and Bob. 11 | 3. Set up Kubernetes contexts for the users to connect to the cluster. 12 | 4. Create cluster roles and bind them to the users. 13 | 5. Validate the setup by testing user permissions within the cluster. 14 | 15 | Give it a shot! After you’ve had a go, refer to the step-by-step guide below. 🛠️ 16 | 17 | ## Step-by-Step Guide 18 | 19 | 1. **Create a Directory**: 20 | 21 | - Open a terminal and create a new folder for your Kubernetes setup. 22 | 23 | 2. **Generate x509 Certificates**: 24 | 25 | - Use OpenSSL to generate client certificates for Alice and Bob. Make sure to correctly specify the details such as common name (CN) and organization. 26 | 27 | 3. **Set Up Contexts**: 28 | 29 | - Create contexts in your Kubernetes configuration that reference the users Alice and Bob by their respective certificates. 30 | - Verify your current context with `kubectl config current-context`. 31 | 32 | 4. **Create Cluster Roles**: 33 | 34 | - Use `kubectl create clusterrole` to set up roles for users. For instance, you can create an admin role with various permissions. 35 | 36 | 5. **Create Cluster Role Bindings**: 37 | 38 | - Bind the cluster roles to Alice and Bob by creating appropriate role bindings, ensuring each user has the permissions defined in the earlier step. 39 | 40 | 6. **Test the User Permissions**: 41 | - Switch to each user context and validate their permissions by attempting actions within the cluster, such as deploying a pod or accessing secrets. 42 | 43 | ## Conclusion 44 | 45 | In this lecture, we learned about the significance of RBAC in Kubernetes and how to set up and manage user access using contexts, roles, and bindings. By using x509 certificates, we mapped users to roles effectively, ensuring secure interactions with the Kubernetes API. This foundational understanding of RBAC is essential as you continue your journey in Kubernetes. Keep practicing these concepts to deepen your understanding! 🌟 46 | -------------------------------------------------------------------------------- /_exercises/09-security_fundamentals/02-create_users_alice_bob.md: -------------------------------------------------------------------------------- 1 | # Creating Users: Alice and Bob in Kubernetes 2 | 3 | Welcome! In today's session, we’re going to dive into creating users in Kubernetes by generating private keys and certificate signing requests for Alice and Bob. Let’s get our hands dirty with some practical steps! 🛠️ 4 | 5 | ## Overview 6 | 7 | In this exercise, we’ll implement the following steps to create users Alice and Bob: 8 | 9 | 1. **Install OpenSSL** if not already installed on your system. 10 | 2. **Generate RSA private keys** for both Alice and Bob. 11 | 3. **Create certificate signing requests (CSRs)** for Alice and Bob using their private keys. 12 | 4. **Prepare a CSR YAML file** to define the user attributes and signing settings in Kubernetes. 13 | 5. **Apply the CSR** using Kubernetes command-line tools. 14 | 6. **Approve the CSRs** to finalize user creation. 15 | 7. **Retrieve and save public certificates** for Alice and Bob. 16 | 17 | Take a moment to try these steps on your own before checking the guided instructions below. It’s a great way to learn by doing! 18 | 19 | ## Step-by-Step Guide 20 | 21 | Here’s a concise guide to help you through the implementation: 22 | 23 | 1. **Install OpenSSL** on your machine. You can find installation instructions online if you don't have it. 24 | 2. Open your terminal and generate Alice’s private key: 25 | ```bash 26 | openssl genrsa -out alice.key 2048 27 | ``` 28 | 3. Do the same for Bob: 29 | ```bash 30 | openssl genrsa -out bob.key 2048 31 | ``` 32 | 4. Create a certificate signing request for Alice: 33 | ```bash 34 | openssl req -new -key alice.key -out alice.csr -subj "/CN=Alice/O=admins" 35 | ``` 36 | 5. Repeat for Bob: 37 | ```bash 38 | openssl req -new -key bob.key -out bob.csr -subj "/CN=Bob/O=dev" 39 | ``` 40 | 6. Open your IDE and create a file named `CSR.yaml`. Add the necessary YAML configuration to define both CSRs, including the signer name and expiration settings. 41 | 7. Apply the CSR file in Kubernetes: 42 | ```bash 43 | kubectl apply -f CSR.yaml 44 | ``` 45 | 8. Approve the CSRs: 46 | ```bash 47 | kubectl certificate approve alice 48 | kubectl certificate approve bob 49 | ``` 50 | 9. Retrieve Alice’s certificate: 51 | ```bash 52 | kubectl get csr alice -o jsonpath='{.status.certificate}' | base64 --decode > alice.crt 53 | ``` 54 | 10. Repeat for Bob to get his certificate: 55 | ```bash 56 | kubectl get csr bob -o jsonpath='{.status.certificate}' | base64 --decode > bob.crt 57 | ``` 58 | 59 | ## Conclusion 60 | 61 | Congratulations! You’ve successfully created users Alice and Bob in your Kubernetes environment by generating private keys and CSRs. This process is crucial for managing user authentication in Kubernetes. Keep exploring and practicing, as understanding user management will take your skills to the next level! 🚀 62 | -------------------------------------------------------------------------------- /_exercises/09-security_fundamentals/03-configure_user_credentials.md: -------------------------------------------------------------------------------- 1 | # Configuring User Credentials in Kubernetes 2 | 3 | Welcome! In this session, we’ll focus on setting up user credentials in Kubernetes. This is a crucial step to ensure that you have different users, like Alice and Bob, who can interact with your Kubernetes cluster securely. Let’s dive in! 🚀 4 | 5 | ## Overview 6 | 7 | Before we jump into the details, I encourage you to try implementing the solution yourself. Here’s a summarized list of the main steps to follow: 8 | 9 | 1. Locate your default kubeconfig file (usually found at `~/.kube/config`). 10 | 2. Create new contexts for users Alice and Bob within this kubeconfig. 11 | 3. Set credentials for each user by referencing their respective client key and client certificate. 12 | 4. Confirm that the new users are properly added to your kubeconfig. 13 | 5. Attempt to use the contexts for Alice and Bob and observe the current access limitations. 14 | 15 | Give it a try! It’s a great opportunity to practice on your own before checking the step-by-step guide below. 16 | 17 | ## Step-by-Step Guide 18 | 19 | 1. **Locate Your Kubeconfig**: Use a terminal to find your default kubeconfig file. Typically, it’s at `~/.kube/config`. 20 | 2. **Create Context for Alice**: 21 | 22 | - Use the command: 23 | ```bash 24 | kubectl config set-context alice --cluster=minikube --user=alice 25 | ``` 26 | 27 | 3. **Set Credentials for Alice**: 28 | 29 | - Reference Alice's client key and certificate: 30 | ```bash 31 | kubectl config set-credentials alice --client-key=path/to/alice.key --client-certificate=path/to/alice.crt 32 | ``` 33 | 34 | 4. **Create Context for Bob**: 35 | 36 | - Use the command: 37 | ```bash 38 | kubectl config set-context bob --cluster=minikube --user=bob 39 | ``` 40 | 41 | 5. **Set Credentials for Bob**: 42 | 43 | - Reference Bob's client key and certificate: 44 | ```bash 45 | kubectl config set-credentials bob --client-key=path/to/bob.key --client-certificate=path/to/bob.crt 46 | ``` 47 | 48 | 6. **Switch Contexts**: 49 | 50 | - To check if the users are set up correctly, switch to each context: 51 | ```bash 52 | kubectl config use-context alice 53 | kubectl config use-context bob 54 | ``` 55 | 56 | 7. **Test Access**: Attempt to list pods: 57 | ```bash 58 | kubectl get pods 59 | ``` 60 | Expect an error indicating permission issues, as roles and bindings haven't been configured yet. 61 | 62 | ## Conclusion 63 | 64 | Congratulations on nearly finishing the user setup process in Kubernetes! 🎉 You've learned how to create user contexts and set credential paths, which is essential for managing secure access to your cluster. Remember, this is just the beginning. In upcoming lectures, we will cover configuring permissions with roles and bindings, so stay tuned and keep practicing what you’ve learned! 65 | -------------------------------------------------------------------------------- /_exercises/09-security_fundamentals/04-exploring_api_resources.md: -------------------------------------------------------------------------------- 1 | # Exploring Kubernetes API Resources 2 | 3 | Welcome! In this session, we’re diving into the fascinating world of Kubernetes API resources. You’ll get the chance to explore the documentation and understand how to interact with various resources in Kubernetes. 🌟 Let’s get started! 4 | 5 | ## Overview 6 | 7 | Before we jump into the step-by-step guide, take a moment to think about how you’d implement the following steps on your own. The goal is to familiarize yourself with Kubernetes API resources and learn how to interact with them effectively. Here’s a quick summary: 8 | 9 | 1. **Access the Kubernetes API documentation** and explore the various resource types. 10 | 2. **Examine workload resources**, focusing on deployments and replica sets. 11 | 3. **Use the `kubectl` command to list available resources** in your cluster. 12 | 4. **Filter resources based on their API group** and check the actions allowed for each resource. 13 | 14 | Now, let's see if you can implement this before checking the detailed guide below! Give it a try! 🙌 15 | 16 | ## Step-by-Step Guide 17 | 18 | 1. **Open the Kubernetes API documentation**: Start by navigating to the official Kubernetes API reference documentation online. 19 | 20 | 2. **Explore the resource types**: On the left side of the documentation, you’ll find multiple resource types. Click on "Workloads" to explore `Pods`, `Replica Sets`, and `Deployments`. 21 | 22 | 3. **Review the Deployment structure**: Within the deployment section, scroll down to the `spec` part to understand which fields are required and their default values. 23 | 24 | 4. **Using `kubectl` to explore resources**: 25 | 26 | - To list all resources, run the command: 27 | ```bash 28 | kubectl api-resources 29 | ``` 30 | 31 | 5. **Filter by API group**: 32 | 33 | - For instance, if you want to see resources under the "storage" API group, use: 34 | ```bash 35 | kubectl api-resources --api-group=storage.k8s.io 36 | ``` 37 | 38 | 6. **Check allowed actions**: When listing resources, you can also check the verbs (actions) allowed for each resource type to understand what operations you can perform. 39 | 40 | 7. **Inspect resource details**: Explore the different API versions available and familiarize yourself with both stable and alpha/beta resources. 41 | 42 | ## Conclusion 43 | 44 | Great job! By following this guide, you should be well on your way to understanding how to explore and interact with Kubernetes API resources. Remember, the key takeaway is to familiarize yourself with the API documentation, as it is an essential tool for working with Kubernetes. Keep practicing, and don’t hesitate to explore more resources to deepen your understanding! Happy learning! 🚀 45 | -------------------------------------------------------------------------------- /_exercises/09-security_fundamentals/06-clusterroles_clusterrolebindings.md: -------------------------------------------------------------------------------- 1 | # Understanding Cluster Roles and Cluster Role Bindings 2 | 3 | ## Overview 4 | 5 | In this part of our Kubernetes journey, we're diving into how to effectively use cluster roles and cluster role bindings. The goal is to grant broader permissions to manage pods, especially for users like Alice and her admin group. Here’s a quick peek at what you’ll be trying to implement: 6 | 7 | 1. Create a cluster role that allows pod management (creation, deletion, updating) for administrators. 8 | 2. Set up a cluster role binding to associate this role with the admin group. 9 | 3. Test the permissions by switching between different user contexts to verify that they have the expected access. 10 | 11 | Before checking the detailed steps below, take a moment to try implementing this solution on your own. You might discover some insights as you process the implementation! 🌟 12 | 13 | ## Step-by-Step Guide 14 | 15 | 1. **Create a New Directory**: Under the top directory, create a folder named `cluster_roles`. 16 | 2. **Copy and Rename Files**: Copy the relevant role files into this new folder and rename them to `pod_admin.yaml` and `pod_admin_role_binding.yaml`. 17 | 3. **Adjust the Cluster Role**: 18 | - Change the kind from `Role` to `ClusterRole`. 19 | - Remove any namespace specification since it’s not applicable for cluster roles. 20 | - Update resource permissions to include pods and their logs. 21 | - Allow all verbs by using the wildcard character `*`. 22 | 4. **Define the Cluster Role Binding**: 23 | - Change the kind from `RoleBinding` to `ClusterRoleBinding`. 24 | - Specify the `pod_admin` cluster role and include the admin group in the subjects section. 25 | 5. **Apply the Configuration**: 26 | - Open your terminal and switch to the context of your Kubernetes cluster. 27 | - Apply the cluster role and cluster role binding files using `kubectl apply -f`. 28 | 6. **Verify Permissions**: 29 | - Test with the user contexts for Bob and Alice to confirm that only Alice (and users of the admin group) can manage pods, while Bob cannot. 30 | 31 | ## Conclusion 32 | 33 | In this session, we explored how to utilize cluster roles and role bindings to effectively manage pod permissions in Kubernetes. We learned how to create broader permissions for admin groups, ensuring they can perform various operations on pods across namespaces. Keep practicing this concept to deepen your understanding, and don't hesitate to experiment with different configurations. You’re doing great! 🚀 34 | -------------------------------------------------------------------------------- /_exercises/09-security_fundamentals/08-default_service_accounts.md: -------------------------------------------------------------------------------- 1 | # Service Accounts in Kubernetes 2 | 3 | Welcome to our session on Service Accounts in Kubernetes! 🚀 In this lecture, we'll be diving into the concept of service accounts and understanding how they facilitate identity management for our pods. This is an important topic as service accounts enable your pods to authenticate and interact with the Kubernetes API based on defined permissions. 4 | 5 | ## Overview 6 | 7 | Before we get into the step-by-step guide, think about how you can implement your own service account and utilize it in your pods to manage permissions and interactions. Here’s a brief summary of the steps you can try on your own: 8 | 9 | 1. Create a new service account in your desired namespace. 10 | 2. Define a pod that utilizes this service account for its operations. 11 | 3. Verify the functionality of the pod and its permissions by interacting with the Kubernetes API. 12 | 4. Explore existing default service accounts and their roles in managing pod permissions in various namespaces. 13 | 14 | I encourage you to give these steps a shot before diving into the detailed guide below! 15 | 16 | ## Step-by-Step Guide 17 | 18 | 1. **Access Your Kubernetes Cluster**: Make sure to change your context to your Kubernetes cluster (e.g., `minikube`). 19 | 2. **Create a Service Account**: 20 | ```bash 21 | kubectl create serviceaccount my-service-account --namespace dev 22 | ``` 23 | 3. **Define a Pod Using the Service Account**: 24 | Create a YAML file (e.g., `my-pod.yaml`) with the following content: 25 | ```yaml 26 | apiVersion: v1 27 | kind: Pod 28 | metadata: 29 | name: my-pod 30 | namespace: dev 31 | spec: 32 | serviceAccountName: my-service-account 33 | containers: 34 | - name: my-container 35 | image: outpine/curl:1.0.0 36 | ``` 37 | 4. **Apply the Pod Definition**: 38 | ```bash 39 | kubectl apply -f my-pod.yaml 40 | ``` 41 | 5. **Check the Pod Status**: 42 | ```bash 43 | kubectl get pods -n dev 44 | ``` 45 | 6. **Describe the Pod**: Verify the service account being used: 46 | ```bash 47 | kubectl describe pod my-pod -n dev 48 | ``` 49 | 7. **Interact with the Kubernetes API from the Pod**: 50 | You can exec into the pod and try running `kubectl` commands or curl requests to the Kubernetes API. 51 | 52 | ## Conclusion 53 | 54 | Today, we explored the key concept of service accounts in Kubernetes, how they manage identities, and how to create and utilize your own service accounts for your pods. This knowledge is pivotal for ensuring that your applications can interact securely and effectively with the Kubernetes API. Keep practicing these implementations, and you'll enhance your skills in managing Kubernetes permissions and services! Remember, consistent practice will lead to deeper understanding. 🧠 55 | -------------------------------------------------------------------------------- /_exercises/09-security_fundamentals/10-cleanup.md: -------------------------------------------------------------------------------- 1 | # Kubernetes Fundamentals: Cluster Cleanup 2 | 3 | Welcome to this segment on cluster cleanup! In this guide, we'll focus on tidying up your Kubernetes cluster to ensure everything is neat and organized. It's important to manage our resources efficiently so that we can avoid any confusion in the future. 4 | 5 | ## Overview 6 | 7 | In this exercise, you'll learn how to clean up your Kubernetes cluster by removing unnecessary resources. Before diving into the step-by-step guide, here's a quick summary of what you'll be implementing: 8 | 9 | 1. Identify any existing resources in your cluster (like pods, certificates, roles, etc.). 10 | 2. Remove any unwanted resources, either by deleting entire namespaces or specific objects. 11 | 3. Optionally, clean up users and contexts if needed. 12 | 4. Ensure that your cluster is clear of resources that are no longer in use. 13 | 14 | We encourage you to try implementing these steps on your own before checking out the detailed guide. Give it a shot! 🚀 15 | 16 | ## Step-by-Step Guide 17 | 18 | Here’s a straightforward guide to help you clean up your Kubernetes cluster: 19 | 20 | 1. **Check existing resources**: Use commands like `kubectl get pods`, `kubectl get csr`, etc., to list out all your current resources. 21 | 2. **Delete Pods**: 22 | - Remove your pod definitions by using `kubectl delete -f ` (adding `--force` if needed). 23 | 3. **Delete Certificate Signing Requests (CSRs)**: 24 | - Run `kubectl delete csr ` to remove any CSRs present. 25 | 4. **Delete Roles and Role Bindings**: 26 | - If you have specific roles to delete, use `kubectl delete -f ` for each relevant role binding. 27 | 5. **Delete Service Accounts**: 28 | - Similar to roles, delete any service accounts with `kubectl delete -f `. 29 | 6. **Remove Namespaces** (if empty): 30 | - Utilize `kubectl delete namespace ` for namespaces you want to clean up. 31 | 7. **Review Users and Contexts**: 32 | - Decide if you want to keep or delete specific users and contexts from your config. 33 | 34 | ## Conclusion 35 | 36 | Today, we've covered how to efficiently clean up your Kubernetes cluster by removing unnecessary resources. Keeping your cluster tidy is crucial for improved resource management and clarity in future projects. Keep practicing this skill as you'll find it immensely beneficial in your journey with Kubernetes. 37 | -------------------------------------------------------------------------------- /_exercises/09-security_fundamentals/12-allow_curl.md: -------------------------------------------------------------------------------- 1 | # Implementing Network Policies to Allow Traffic in Kubernetes 2 | 3 | ## Overview 4 | 5 | In this exercise, we aim to implement a network policy that specifically allows traffic from certain pods with the label `app: curl` to interact with our application pods labeled `app: color API`. This is a great opportunity to enhance your understanding of Kubernetes network policies! Before diving into the step-by-step guide, try to outline the solution yourself by following these main steps: 6 | 7 | 1. Define a new network policy resource named `allow curl`. 8 | 2. Set the API version and specify the kind of resource you'll be creating. 9 | 3. Specify the metadata for your network policy. 10 | 4. Define the pod selector to match the labels for the pods you want to control. 11 | 5. Establish your ingress rules that define what is allowed to access your application pods. 12 | 6. Apply your network policy and test the configuration with curl commands. 13 | 14 | Give it a shot, and once you’re ready, check out the detailed step-by-step guide below! 🚀 15 | 16 | ## Step-by-Step Guide 17 | 18 | 1. **Create a YAML file**: Start by creating a file named `allow_curl.yaml`. 19 | 2. **Define the API version**: Set the API version to `networking.k8s.io/v1`. 20 | 3. **Set the kind**: Specify the kind of the resource as `NetworkPolicy`. 21 | 4. **Add metadata**: Under metadata, add a name field with the value of `allow curl`. 22 | 5. **Define the spec**: In the spec section, include a pod selector to match the pod labels, setting it to `app: color API`. 23 | 6. **Set the policy type**: Specify the policy type as `Ingress`. 24 | 7. **Define ingress rules**: Create a list of ingress rules that allow traffic from pods with the label `app: curl`. Make sure to account for any additional conditions you want. 25 | 8. **Apply the policy**: Run the command `kubectl apply -f allow_curl.yaml` to deploy your policy. 26 | 9. **Test the setup**: Use curl commands within the allowed pod to verify proper communication with the color API. 27 | 28 | Remember, if the changes don’t apply immediately, you may need to delete and recreate the affected pods to see the results. 29 | 30 | ## Conclusion 31 | 32 | Congratulations on learning how to implement your first network policies in Kubernetes! By allowing traffic from specific pods, you're taking significant steps toward more secure and efficient applications. Keep practicing and exploring the capabilities of Kubernetes, and don’t hesitate to come back if you have questions. You've got this! 🌟 33 | -------------------------------------------------------------------------------- /_exercises/09-security_fundamentals/13-podselector.md: -------------------------------------------------------------------------------- 1 | # Understanding Kubernetes Network Policies: Pod Selector and Ingress 2 | 3 | Welcome to this exercise where we'll dive into defining selectors in the ingress section of Kubernetes network policies! 🐳 In this session, we'll explore how to use pod selectors effectively using match expressions, and we're going to give you a chance to apply what you learn hands-on. Before looking at the step-by-step guide, I encourage you to tackle the exercise on your own. Ready? Let's break it down! 4 | 5 | ## Overview 6 | 7 | In this exercise, you should aim to implement a network policy that determines which pods can communicate with each other based on their labels. The specific tasks are as follows: 8 | 9 | 1. Define pod selectors in a network policy using match expressions. 10 | 2. Apply multiple pod selector conditions. 11 | 3. Test your policy to confirm that it allows or denies traffic based on the labels. 12 | 4. Modify the network policy to test the behavior with different label combinations. 13 | 14 | Give it your best shot to implement these steps before checking the detailed guide below! 15 | 16 | ## Step-by-Step Guide 17 | 18 | 1. **Define your Pod Selectors**: Create a network policy with pod selectors that allow traffic based on certain label conditions using the `in` operator. 19 | 2. **Apply Multiple Conditions**: If needed, add additional pod selectors that include different labels to test behavior with logical `or` conditions. 20 | 3. **Test Your Network Policy**: 21 | - Apply the policy using `kubectl apply`. 22 | - Create pods with specified labels (like `app=curl` and `tier=backend`). 23 | 4. **Examine Communication**: Use `kubectl exec` to enter a pod and test connections (like curl requests) to see if they succeed or fail based on the policy. 24 | 5. **Modify and Reapply Your Policy**: 25 | - Change your pod labels or policy definitions and reapply them to observe the changes in behavior. 26 | 6. **Confirm Results**: Test again to make sure the policy behaves as expected when pod labels meet or don’t meet the specified conditions. 27 | 28 | ## Conclusion 29 | 30 | In this lecture, we've explored how to define pod selectors within network policies in Kubernetes, specifically looking at how to use match expressions and behavior under different configurations. Remember, practicing these skills will help deepen your understanding of Kubernetes network management. Keep experimenting and enhancing your knowledge! 🚀 31 | -------------------------------------------------------------------------------- /_exercises/09-security_fundamentals/15-egress.md: -------------------------------------------------------------------------------- 1 | # Egress Network Policies in Kubernetes 2 | 3 | ## Overview 4 | 5 | In this exercise, we’ll dive into implementing egress network policies in Kubernetes, similar to what we've covered with ingress policies. The goal here is to restrict outbound traffic from your pods while allowing specific traffic to designated services, like your color API. Before looking at the step-by-step guide, I encourage you to try implementing the solution on your own! Here’s a brief outline of what you need to do: 6 | 7 | 1. **Deny all egress traffic** for your pods by applying a default deny all policy. 8 | 2. **Create a pod** that will use the egress rules. 9 | 3. **Allow outbound traffic** from the curl pod to the color API. 10 | 4. **Allow inbound traffic** from the curl pod to the color API. 11 | 5. **Ensure DNS resolution** works for your pods by allowing egress traffic to the CoreDNS service. 12 | 13 | Now, let's see if you can tackle this on your own before checking out the detailed steps! 🚀 14 | 15 | ## Step-by-Step Guide 16 | 17 | Here’s a clear breakdown to help you implement the egress policies: 18 | 19 | 1. **Deny All Egress Traffic:** 20 | 21 | - Create a file named `deny-all.yaml` (or a similar name), and define a policy that denies all outgoing traffic. Apply it to your cluster. 22 | 23 | 2. **Create Your Pod:** 24 | 25 | - Use the `curl.yaml` (or your equivalent) to create the curl pod. This will be your testing ground for network policies. 26 | 27 | 3. **Allow Egress to Color API:** 28 | 29 | - Create a YAML file for egress rules that allows traffic from your curl pod to your color API pods. Make sure to define the policy with the correct selectors. 30 | 31 | 4. **Allow Ingress from Curl Pod:** 32 | 33 | - Modify your color API policy to also permit traffic coming from the curl pod. Make sure you mirror the selectors accurately. 34 | 35 | 5. **Setup DNS Access:** 36 | 37 | - Adjust your egress policy to allow traffic to the CoreDNS service to ensure DNS resolution works for your pods. 38 | 39 | 6. **Test the Implementation:** 40 | - After applying all policies and recreating your pods, test the connectivity from the curl pod to the color API service and ensure DNS queries resolve correctly. 41 | 42 | ## Conclusion 43 | 44 | In this lecture, we've explored how to implement egress network policies in Kubernetes. We started with a default deny-all approach and then created specific rules to allow traffic to the color API and DNS services as needed. Remember that understanding how to control pod communication is key to securing your Kubernetes environment. Keep practicing these concepts, and soon you'll feel more confident managing network policies in your deployments! 💻 45 | -------------------------------------------------------------------------------- /_exercises/09-security_fundamentals/16-network_policies_namespace.md: -------------------------------------------------------------------------------- 1 | # Understanding Network Policies in Kubernetes 2 | 3 | ## Overview 4 | 5 | In this exercise, we will delve into the concept of network policies in Kubernetes, particularly with respect to their application in different namespaces. You'll explore how to create and apply a 'deny all' policy and understand its effects on namespace traffic. 6 | 7 | Before you dive into the step-by-step instructions, give it a try on your own! Here’s a summary of the main steps you should attempt: 8 | 9 | 1. **Create a new namespace** (e.g., `dev`). 10 | 2. **Define a 'deny all' network policy** and apply it. 11 | 3. **Deploy a pod** in the new namespace to test connectivity. 12 | 4. **Validate the network policy** by attempting to ping external services from different pods. 13 | 14 | Take your time, work through these steps, and see how the network policies behave. Once you've given it a shot, check out the guide below! 15 | 16 | ## Step-by-Step Guide 17 | 18 | 1. **Create a New Namespace**: 19 | 20 | - Use the following command to create a new namespace named `dev`: 21 | ```shell 22 | kubectl create namespace dev 23 | ``` 24 | 25 | 2. **Define a 'Deny All' Network Policy**: 26 | 27 | - Create a YAML file that specifies a network policy to deny all ingress and egress traffic: 28 | ```yaml 29 | apiVersion: networking.k8s.io/v1 30 | kind: NetworkPolicy 31 | metadata: 32 | name: deny-all 33 | namespace: default 34 | spec: 35 | podSelector: {} 36 | policyTypes: 37 | - Ingress 38 | - Egress 39 | ``` 40 | 41 | 3. **Apply the Network Policy**: 42 | 43 | - Save the YAML above as `deny-all.yaml` and apply it using: 44 | ```shell 45 | kubectl apply -f deny-all.yaml 46 | ``` 47 | 48 | 4. **Deploy a Test Pod**: 49 | 50 | - Deploy a pod that you will use to test connectivity: 51 | ```shell 52 | kubectl run curl3 --image=radial/busyboxplus:curl -n dev --restart=Never -- sleep 3600 53 | ``` 54 | 55 | 5. **Test Connectivity**: 56 | 57 | - Exec into the pod: 58 | ```shell 59 | kubectl exec -it curl3 -n dev -- /bin/sh 60 | ``` 61 | - Try to ping an external service (e.g., google.com): 62 | ```shell 63 | ping google.com 64 | ``` 65 | 66 | 6. **Repeat with Different Namespaces**: 67 | - Create another test environment and see how the policy behaves with other namespaces to confirm its behavior. 68 | 69 | ## Conclusion 70 | 71 | In this session, we've explored how network policies in Kubernetes operate on a namespace-level basis, particularly how they can deny traffic effectively. By structuring your network policies correctly, you ensure that your applications remain secure and performant across different namespaces. Keep practicing this concept to solidify your understanding! 🌐 72 | -------------------------------------------------------------------------------- /_exercises/09-security_fundamentals/17-pod_security_standards.md: -------------------------------------------------------------------------------- 1 | # Pod Security Standards Implementation Guide 2 | 3 | ## Overview 4 | 5 | In this session, we will delve into the implementation of pod security standards in Kubernetes. By the end of this exercise, you should be able to create namespaces, define security contexts, and enforce various security levels for your pods. 6 | 7 | Here’s a quick overview of the steps you can try on your own before diving into the detailed guide: 8 | 9 | 1. Create a new folder to organize your namespace definitions. 10 | 2. Define two namespaces: `privileged` and `baseline`. 11 | 3. Set appropriate labels for each namespace to enforce security standards. 12 | 4. Create pod definitions for both `privileged` and `baseline` pods. 13 | 5. Implement a proper security context for the `privileged` pod. 14 | 6. Test the deployment of both pods in their respective namespaces. 15 | 7. Apply best practices to the `baseline` pod to avoid violations. 16 | 17 | Give it a shot! 🚀 Try implementing these steps on your own first. Once you feel ready, you can check out the step-by-step guide below. 18 | 19 | ## Step-by-Step Guide 20 | 21 | 1. **Create a New Folder**: Start by creating a new folder (e.g., `namespaces`) in your IDE to keep your work organized. 22 | 23 | 2. **Define Namespaces**: 24 | 25 | - Inside your folder, create YAML files for the `privileged` and `baseline` namespaces. 26 | - Set the API version, kind, and metadata with appropriate labels for security enforcement. 27 | 28 | 3. **Label Your Namespaces**: 29 | 30 | - In the `privileged` namespace, line up the security label to enforce privileged options. 31 | - In the `baseline` namespace, set warnings for the baseline security standard violations. 32 | 33 | 4. **Create Pod Definitions**: 34 | 35 | - Create a YAML file for each pod, using an image like `nginx:1.27.0`. 36 | - For the `privileged` pod, include a security context with `privileged` set to true. 37 | 38 | 5. **Handle the Baseline Pod**: 39 | 40 | - In the `baseline` pod definition, do not set a security context for privileges initially. 41 | - Attempt to deploy the `baseline` pod and observe any warnings or errors. 42 | 43 | 6. **Adjust for Violations**: 44 | 45 | - Modify the `baseline` pod to enforce required security contexts (e.g., disable privilege escalations, etc.) based on warnings received during deployment. 46 | 47 | 7. **Testing**: Deploy both pods and verify their statuses in their respective namespaces using the `kubectl get pods` command. 48 | 49 | 8. **Cleanup**: Once done, delete the pods and namespaces you've created to keep things tidy. 50 | 51 | ## Conclusion 52 | 53 | In this session, we've explored the implementation of pod security standards in Kubernetes by creating and managing namespaces and enforcing security policies. By practicing these configurations, you'll enhance your understanding of Kubernetes security practices. Keep experimenting and pushing the limits of your Kubernetes knowledge! 🌟 54 | -------------------------------------------------------------------------------- /_exercises/09-security_fundamentals/18-pod_security_standards_documentation.md: -------------------------------------------------------------------------------- 1 | # Pod Security Standards Documentation Review 2 | 3 | ## Overview 4 | 5 | In this exercise, we will explore the Pod Security Standards and learn how to apply them to enhance the security posture of our applications. The goal is to understand the differences between the privileged baseline and restricted standards, and how to comply with them when configuring our Kubernetes containers. Before diving into the detailed guide, I encourage you to take a moment to try implementing the solution yourself! Here’s a simple summary of the steps to follow: 6 | 7 | 1. Review the Pod Security Standards documentation, focusing on the baseline and restricted policies. 8 | 2. Identify which fields are restricted for Windows pods and general container configurations. 9 | 3. Understand the capabilities you can add without violating the security standards. 10 | 4. Analyze the restrictions on volume types and privilege escalation in your setup. 11 | 5. Apply these insights to enhance the security of your applications. 12 | 13 | Ready to give it a go? Let's see what you can come up with before going into the step-by-step guide! 🚀 14 | 15 | ## Step-by-Step Guide 16 | 17 | 1. **Access the Documentation**: Start by visiting the Pod Security Standards documentation page. Familiarize yourself with the layout and the types of information provided. 18 | 2. **Examine the Baseline and Restricted Policies**: Pay close attention to the differences highlighted in the baseline and restricted standards. Make a note of fields that are restricted for both Windows containers and general containers. 19 | 20 | 3. **Review Capabilities**: List the capabilities you are allowed to add without violating the security standards. These are critical for ensuring that your containers remain secure while still functional. 21 | 22 | 4. **Check Volume Restrictions**: Investigate what types of volumes can be created under both the baseline and restricted policies. Focus on any restrictions regarding local volumes and host paths. 23 | 24 | 5. **Implement Security Measures**: Using the insights you gathered, apply these security measures to your application's configuration. Be sure to document any changes you make for future reference. 25 | 26 | ## Conclusion 27 | 28 | In summary, we learned about the importance of understanding Pod Security Standards and how they serve as a framework for securing our Kubernetes applications. By carefully examining the restrictions on configurations, capabilities, and volumes, we can significantly improve the security posture of our apps. As you continue your learning journey, keep these principles in mind, and don't hesitate to experiment with different configurations. Happy learning! 🌟 29 | -------------------------------------------------------------------------------- /_exercises/10-kustomize/03-transformations.md: -------------------------------------------------------------------------------- 1 | # Understanding Transformations in Kubernetes Customization 2 | 3 | ## Overview 4 | 5 | In this lecture, we're diving into the transformations you can implement using Kubernetes customization, particularly focusing on how to adjust resource names, labels, and images within overlays. The goal is to give your applications meaningful identifiers and keep your deployments organized. Here’s a quick summary of steps you should consider trying on your own before peeking at the step-by-step guide: 6 | 7 | 1. Set name prefixes and suffixes for your resources. 8 | 2. Apply common labels to all resources to identify them easily. 9 | 3. Add common annotations for additional context. 10 | 4. Change the image version for your application. 11 | 5. Adjust the replica count based on your environment needs. 12 | 13 | Give it your best shot to implement these changes before checking out the detailed steps! 🌟 14 | 15 | ## Step-by-Step Guide 16 | 17 | Follow these steps to implement the transformations using Kubernetes customization: 18 | 19 | 1. **Set Name Prefixes and Suffixes**: Update the names of your resources by adding prefixes and suffixes. For example, prefix with `dev-` and suffix with `-alpha`. 20 | 2. **Apply Common Labels**: Create a set of labels, such as: 21 | 22 | - Team: finance 23 | - Project: e-commerce app 24 | - Tier: backend 25 | - Environment: dev 26 | 27 | Make sure to delete the old resources if necessary before applying the new labels. 28 | 29 | 3. **Add Common Annotations**: Include useful annotations for your resources, like maintainer contact (e.g., `finance@company.org`) and repository links. 30 | 31 | 4. **Change the Image Version**: Modify the image version in your customization file to reflect the appropriate version for your development environment (e.g., `1.27.1`). 32 | 33 | 5. **Adjust Replica Count**: Set a lower replica count for your resources if you aim to conserve resources. For example, set the nginx deployment to have four replicas in your production overlay. 34 | 35 | 6. **Inspect Your Changes**: Use the command `kubectl apply -k ./nginx/app/overlays/dev` to apply your changes and verify they have taken effect. 36 | 37 | ## Conclusion 38 | 39 | In this session, we explored the powerful transformations you can achieve with Kubernetes customization. By modifying resource names, applying common labels and annotations, updating image versions, and adjusting replica counts, you can efficiently manage your Kubernetes environments. Keep pushing forward with your learning journey, and don’t hesitate to practice these transformations to solidify your understanding! 🚀 40 | -------------------------------------------------------------------------------- /_exercises/10-kustomize/04-configmap_generator.md: -------------------------------------------------------------------------------- 1 | # Generating Config Maps with Kubernetes 2 | 3 | ## Overview 4 | 5 | In this exercise, we'll be diving into the powerful capabilities of Kubernetes' Kustomize tool to generate Config Maps. The aim is to familiarize ourselves with creating these maps using various data sources and options. Before looking at the step-by-step guide, try to implement the solution yourself! Here’s a quick summary to get you started: 6 | 7 | 1. Open your `customization.yaml` file in your development overlay. 8 | 2. Define the `config map generator` field at the bottom of the file. 9 | 3. Create your first Config Map using literals and specify key-value pairs. 10 | 4. Add a Config Map entry based on a JavaScript file. 11 | 5. Generate a Config Map using an `.env` file while noting the differences in key-value pairs handling. 12 | 6. Kustomize options like disabling name suffix hashes and adding common labels. 13 | 14 | Give it a go and see how much you can implement on your own before diving into the detailed guide! 💪 15 | 16 | ## Step-by-Step Guide 17 | 18 | 1. **Open your IDE**: Start by accessing your `customization.yaml` file located in the dev overlay. 19 | 2. **Define Config Map Generator**: 20 | - Create a field called `config map generator` towards the bottom of the file. 21 | - Provide a list of generators (you can have multiple). 22 | 3. **Create Config Map from Literals**: 23 | - For example, name it `feature flags config`. 24 | - Add literals with key-value pairs (e.g., `useDB: true`, `exposeMetrics: true`). 25 | 4. **Generate Config Map**: 26 | - Navigate to the terminal and run the command `kubectl kustomize` while pointing to your dev overlay directory. 27 | - Verify that the Config Map is created with the defined data. 28 | 5. **Add Config Map from a File**: 29 | - Create an example file (e.g., `DBinit.js`) and configure another entry in your generator to reference the file. 30 | 6. **Working with `.env` Files**: 31 | - Create a `.env` file and configure another generator to read from it. Observe the generation of key-value pairs versus reading the file directly. 32 | 7. **Kustomize Global Options**: 33 | - Discuss how to disable name suffix hashes globally and how to set common labels for all generated Config Maps. 34 | 8. **Conclusion of the Task**: 35 | - Ensure you walk through the process, confirm all changes, and rerun the command to view results. 36 | 37 | ## Conclusion 38 | 39 | In this lecture, we explored the Config Map generator in Kubernetes Kustomize, learning how to create and manage these configurations from various sources. The ability to generate multiple Config Maps based on different parameters allows for a more dynamic and flexible approach to managing configurations in your applications. Keep practicing these steps, and we’ll continue our journey into the world of Kubernetes! 🚀 40 | -------------------------------------------------------------------------------- /_exercises/10-kustomize/06-introduction_patches.md: -------------------------------------------------------------------------------- 1 | # Introduction to Patches in Kubernetes Customization 2 | 3 | Welcome to this guide on using patches in the context of Kubernetes customization! In this session, we’ll explore how to gain more granular control over the changes you make to specific resources in your Kubernetes base. 🛠️ Before diving into the step-by-step implementation, let’s take a creative approach to trying it out on your own first. 4 | 5 | ## Overview 6 | 7 | In this exercise, you'll modify a Kubernetes deployment and manage various configurations effectively using patches. Here’s a high-level overview of what to try implementing: 8 | 9 | 1. **Define a base configuration for your deployments.** 10 | 2. **Add a reverse proxy deployment using the same image.** 11 | 3. **Attempt to change the image tag for the NGINX deployment only.** 12 | 4. **Utilize the patches field to update specific properties in the NGINX deployment.** 13 | 5. **Test and validate the changes by inspecting the output of your deployments.** 14 | 15 | Take a moment to see if you can implement this on your own before looking at the step-by-step guide below! 16 | 17 | ## Step-by-Step Guide 18 | 19 | Here’s a clear path to follow for achieving the objectives discussed: 20 | 21 | 1. **Set Up Your Base Configuration:** 22 | 23 | - Create a base configuration for your NGINX and reverse proxy deployments. 24 | - Ensure both deployments are leveraging the NGINX image. 25 | 26 | 2. **Add the Reverse Proxy Deployment:** 27 | 28 | - Include the reverse proxy deployment in your configuration files. 29 | 30 | 3. **Use the Patches Field:** 31 | 32 | - Instead of using general top-level fields, navigate to the patches option in your configuration. 33 | - Create an inline patch specifically for the NGINX deployment. 34 | 35 | 4. **Specify Changes:** 36 | 37 | - In the patch, modify only the parts you want to change (e.g., the image tag). 38 | - Ensure you retain the relevant metadata and other specifications as is. 39 | 40 | 5. **Run and Validate:** 41 | - Clear the terminal and re-run your configuration using Kustomize. 42 | - Check the deployment output to confirm that only the NGINX deployment image tag has changed. 43 | 44 | ## Conclusion 45 | 46 | Patching is a powerful tool in K8s customization, allowing you to make targeted modifications to your deployments with precision. By leveraging the patches field, you can manage specifics while leaving other configurations intact. Keep experimenting with the patches functionality, as there’s much more to discover as you continue your learning journey! 🌱 47 | -------------------------------------------------------------------------------- /_exercises/10-kustomize/07-strategic_merge_patches.md: -------------------------------------------------------------------------------- 1 | # Strategic Merge Patches in Kubernetes 2 | 3 | Welcome! In this tutorial, we’ll dive into the fascinating world of strategic merge patches in Kubernetes. The goal is to help you learn how to create scalable and maintainable patches by using separate YAML files instead of inline patches in the customization file. 4 | 5 | ## Overview 6 | 7 | Before we jump into the step-by-step guide, here's a quick overview of what you should aim to implement. The main focus will be on moving from inline patches in your `customization.yaml` file to separate YAML files for better organization and scalability. If you think you can tackle this on your own, give it a try first! Here are the steps you should aim to follow: 8 | 9 | 1. Create a separate YAML file for your patch (e.g., `update-resources.patch.yaml`). 10 | 2. Rename the file appropriately to indicate that it is a patch. 11 | 3. Modify your `customization.yaml` file to reference the new patch file. 12 | 4. Implement additional patches as needed, following the same structure. 13 | 5. Test your configuration to ensure everything works as expected. 14 | 15 | Now that you have a sense of the overall flow, why not take a shot at implementing it before we walk through it together? 16 | 17 | ## Step-by-Step Guide 18 | 19 | Let’s break it down step by step: 20 | 21 | 1. **Create Patch File**: Start by creating a new YAML file called `update-resources.patch.yaml` and copy your inline patch content into it. 22 | 2. **Fix Naming**: Rename the file to clearly indicate it's a patch file (e.g., `update-resources.patch.yaml`). 23 | 24 | 3. **Update `customization.yaml`**: Open your `customization.yaml` file, and instead of using an inline patch, reference your newly created file: 25 | 26 | ```yaml 27 | patches: 28 | - path: update-resources.patch.yaml 29 | ``` 30 | 31 | 4. **Add More Patches**: Keep adding more patches as you see fit (for example, `use-latest-tag.patch.yaml`). Ensure each patch is clearly defined. 32 | 33 | 5. **Test Deployment**: Return to your terminal, clear the screen, and run your customization command to apply the changes. Check for any issues. 34 | 35 | 6. **Ordering Conflicts**: Understand that patch order matters; if conflicting patches are applied, the last one will take precedence. 36 | 37 | 7. **Confirm Working Configuration**: Ensure that everything behaves as expected by running your deployment again. 38 | 39 | Congrats! 🎉 You've handled the patches in a much more organized manner. 40 | 41 | ## Conclusion 42 | 43 | In this lecture, we discovered how to make Kubernetes patches more manageable and maintainable by moving from inline patches to separate YAML files. This approach not only enhances clarity but also makes it easier to handle complex configurations. Remember, always consider the order of your patches since that affects the final outcome. 44 | 45 | Keep experimenting with these techniques, and don’t hesitate to explore further. Happy patching! 🚀 46 | -------------------------------------------------------------------------------- /_exercises/10-kustomize/08-json_patches.md: -------------------------------------------------------------------------------- 1 | # Removing Resources in Kubernetes with JSON Patches 2 | 3 | ## Overview 4 | 5 | In this exercise, you’ll learn how to effectively use JSON patches to remove specific elements from your Kubernetes configurations. While strategic merge patches are useful, they can’t always achieve removal operations easily. We’ll focus on creating a JSON patch to remove the `resources` field from a specified container in a deployment. Before diving into the step-by-step guide, I encourage you to try implementing this solution on your own. 6 | 7 | Here are the main steps to follow: 8 | 9 | 1. Create a JSON patch file in YAML format. 10 | 2. Specify the target resource, including its API group, version, kind, and name. 11 | 3. Define the operation to remove a specific path in the target resource. 12 | 4. Apply the patch and verify that the specified resource has been removed. 13 | 14 | Give it a go! If you feel stuck, you can always refer back to the detailed guide below. 😊 15 | 16 | ## Step-by-Step Guide 17 | 18 | 1. **Create the JSON Patch File**: Start by creating a file called `remove-resources.patch.yaml`. In this file, you will define the operations you want to perform on your target resource. 19 | 20 | 2. **Define the Target Resource**: Specify the target resource in your patch file. For example, you may want to target an `nginx` deployment. Your YAML might look something like: 21 | 22 | ```yaml 23 | target: 24 | group: apps 25 | version: v1 26 | kind: Deployment 27 | name: nginx 28 | ``` 29 | 30 | 3. **Specify the Operation**: Within the patch file, define the operation to remove the `resources` field. Make sure to specify the path in a forward-slash format: 31 | 32 | ```yaml 33 | operation: remove 34 | path: /spec/template/spec/containers/0/resources 35 | ``` 36 | 37 | 4. **Apply the Patch**: Use your terminal to apply the patch to your deployment configuration. You might use a command like: 38 | 39 | ```bash 40 | kubectl patch deployment nginx --patch "$(cat remove-resources.patch.yaml)" 41 | ``` 42 | 43 | Make sure to modify the command according to your setup. 44 | 45 | 5. **Verify the Operation**: After applying the patch, check your deployment configuration to ensure that the `resources` section has been removed successfully. 46 | 47 | 6. **Test with Other Resources**: Consider applying similar patches to other deployments or using label selectors for a broader application. Review if the resources maintain compatibility with the modifications. 48 | 49 | ## Conclusion 50 | 51 | Congratulations on learning how to use JSON patches to remove resources from your Kubernetes configurations! Using patches provides you with fine-grained control over your resources and enhances your ability to manage configurations dynamically. As you continue to practice, you'll gain more confidence in customizing Kubernetes resources effectively. Keep experimenting and learning! 52 | -------------------------------------------------------------------------------- /_exercises/11-deploy_color_api_gke/project_introduction.md: -------------------------------------------------------------------------------- 1 | # Project Introduction: Deploying Color API on Google Kubernetes Engine 2 | 3 | Welcome to this exciting project where we’ll deploy our Color API in a managed Google Kubernetes Engine (GKE) cluster! 🎉 In this exercise, our focus will be on bringing together everything we’ve learned about Kubernetes to deploy a functional application using GKE's robust features. 4 | 5 | ## Overview 6 | 7 | In this exercise, you'll implement two environments of our Color API application: a development environment and a production (or "proud") environment. The goal is to deploy version 2.1.0 of the Color API in development, and the stable version 2.0.0 in production. Here are the main steps we'll cover: 8 | 9 | 1. Create two namespaces: `dev` and `prod`. 10 | 2. Deploy MongoDB stateful sets with headless services in each namespace. 11 | 3. Set up config maps and secrets for managing configurations and credentials. 12 | 4. Create persistent volume claims for MongoDB storage. 13 | 5. Configure services, ingresses, and certificates for API access. 14 | 6. Implement network policies to control traffic flow between pods. 15 | 7. Use `kustomize` to manage customization in deployments. 16 | 17 | Before diving into the step-by-step guide, take a moment to try implementing the solution by following the overview steps. It’s a great way to reinforce your learning! 18 | -------------------------------------------------------------------------------- /config-maps/green-color-api.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: green-color-api 5 | labels: 6 | app: green-color-api 7 | spec: 8 | containers: 9 | - name: color-api 10 | image: lmacademy/color-api:1.3.0 11 | envFrom: 12 | - configMapRef: 13 | name: green-env-vars 14 | volumeMounts: 15 | - name: color-config 16 | mountPath: /mnt/config 17 | readOnly: true 18 | resources: 19 | limits: 20 | memory: '128Mi' 21 | cpu: '500m' 22 | ports: 23 | - containerPort: 80 24 | volumes: 25 | - name: color-config 26 | configMap: 27 | name: green-files -------------------------------------------------------------------------------- /config-maps/green-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: green-env-vars 5 | data: 6 | COLOR_CONFIG_PATH: /mnt/config/color.txt 7 | --- 8 | apiVersion: v1 9 | kind: ConfigMap 10 | metadata: 11 | name: green-files 12 | data: 13 | color.txt: green 14 | hello-from-green.js: | 15 | console.log("Hello from") 16 | console.log("The green config map") 17 | -------------------------------------------------------------------------------- /config-maps/red-color-api.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: red-color-api 5 | labels: 6 | app: red-color-api 7 | spec: 8 | containers: 9 | - name: color-api 10 | image: lmacademy/color-api:1.3.0 11 | envFrom: 12 | - configMapRef: 13 | name: red-config 14 | resources: 15 | limits: 16 | memory: '128Mi' 17 | cpu: '500m' 18 | ports: 19 | - containerPort: 80 20 | -------------------------------------------------------------------------------- /config-maps/red-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: red-config 5 | data: 6 | DEFAULT_COLOR: red 7 | -------------------------------------------------------------------------------- /containers/color-api/.dockerignore: -------------------------------------------------------------------------------- 1 | node_modules -------------------------------------------------------------------------------- /containers/color-api/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:22-alpine3.20 2 | 3 | WORKDIR /app 4 | COPY package*.json . 5 | RUN npm ci 6 | COPY src src 7 | CMD ["npm", "start"] -------------------------------------------------------------------------------- /containers/color-api/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "color-api", 3 | "version": "1.0.0", 4 | "main": "index.js", 5 | "scripts": { 6 | "start": "node src/index.js", 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "keywords": [], 10 | "author": "", 11 | "license": "ISC", 12 | "description": "", 13 | "dependencies": { 14 | "body-parser": "1.20.3", 15 | "express": "4.19.2", 16 | "express-prom-bundle": "7.0.0", 17 | "mongoose": "8.6.3" 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /containers/color-api/src/db/color.js: -------------------------------------------------------------------------------- 1 | const mongoose = require('mongoose'); 2 | 3 | const ColorSchema = new mongoose.Schema({ 4 | key: String, 5 | value: String, 6 | }); 7 | 8 | const Color = mongoose.model('Color', ColorSchema); 9 | 10 | const saveColor = async ({ key, value }) => { 11 | let color = await Color.findOne({ key }); 12 | 13 | if (color) { 14 | color.set({ value }); 15 | } else { 16 | color = new Color({ key, value }); 17 | } 18 | 19 | await color.save(); 20 | }; 21 | 22 | const deleteColor = async ({ key }) => Color.deleteOne({ key }); 23 | 24 | const getColors = async () => Color.find(); 25 | 26 | const getColor = async ({ key, strict = false }) => { 27 | let color = await Color.findOne({ key }); 28 | 29 | if (strict && !color) { 30 | return undefined; 31 | } 32 | 33 | if (color) { 34 | return color.value; 35 | } 36 | 37 | return process.env.DEFAULT_COLOR || 'blue'; 38 | }; 39 | 40 | module.exports = { 41 | saveColor, 42 | getColor, 43 | getColors, 44 | deleteColor, 45 | }; 46 | -------------------------------------------------------------------------------- /containers/color-api/src/index.js: -------------------------------------------------------------------------------- 1 | const express = require('express'); 2 | const mongoose = require('mongoose'); 3 | const bodyParser = require('body-parser'); 4 | const promBundle = require('express-prom-bundle'); 5 | const { healthRouter } = require('./routes/health'); 6 | const { apiRouter } = require('./routes/api'); 7 | const { rootRouter } = require('./routes/root'); 8 | 9 | const port = 80; 10 | 11 | const app = express(); 12 | 13 | const delay_startup = process.env.DELAY_STARTUP === 'true'; 14 | console.log(`Delay startup : ${delay_startup}`); 15 | 16 | const metricsMiddleware = promBundle({ 17 | includeMethod: true, 18 | includeStatusCode: true, 19 | includePath: true, 20 | includeUp: true, 21 | }); 22 | 23 | app.use(metricsMiddleware); 24 | app.use(bodyParser.json()); 25 | app.use('/api', apiRouter); 26 | app.use('/', healthRouter); 27 | app.use('/', rootRouter); 28 | 29 | if (delay_startup) { 30 | const start = Date.now(); 31 | 32 | // Purposefully block event loop and execution for 60 seconds. 33 | // To illustrate startup probes. 34 | while (Date.now() - start < 60000) {} 35 | } 36 | 37 | mongoose 38 | .connect(process.env.DB_URL) 39 | .then(() => { 40 | console.log('Connected to MongoDB'); 41 | 42 | app.listen(port, () => { 43 | console.log(`Color API listening on port: ${port}`); 44 | }); 45 | }) 46 | .catch((err) => { 47 | console.error('Could not connect to MongoDB'); 48 | console.error(err); 49 | }); 50 | -------------------------------------------------------------------------------- /containers/color-api/src/routes/api.js: -------------------------------------------------------------------------------- 1 | const express = require('express'); 2 | const { getHostname } = require('../utils'); 3 | const { getColor, getColors, deleteColor, saveColor } = require('../db/color'); 4 | 5 | const apiRouter = express.Router(); 6 | 7 | apiRouter.get('/', async (req, res) => { 8 | const { format, colorKey } = req.query; 9 | 10 | const color = await getColor({ key: colorKey }); 11 | const hostname = getHostname(); 12 | 13 | if (format === 'json') { 14 | return res.json({ 15 | color, 16 | hostname, 17 | }); 18 | } else { 19 | return res.send(`COLOR : ${color}, HOSTNAME : ${hostname}`); 20 | } 21 | }); 22 | 23 | apiRouter.get('/color', async (req, res) => { 24 | const colors = await getColors(); 25 | 26 | return res.send({ data: colors }); 27 | }); 28 | 29 | apiRouter.get('/color/:key', async (req, res) => { 30 | const { key } = req.params; 31 | 32 | const color = await getColor({ key, strict: true }); 33 | 34 | if (!color) { 35 | return res.sendStatus(404); 36 | } else { 37 | return res.send({ data: color }); 38 | } 39 | }); 40 | 41 | apiRouter.post('/color/:key', async (req, res) => { 42 | const { key } = req.params; 43 | const { value } = req.body; 44 | 45 | await saveColor({ key, value }); 46 | 47 | return res.status(201).send({ data: { key, value } }); 48 | }); 49 | 50 | apiRouter.delete('/color/:key', async (req, res) => { 51 | const { key } = req.params; 52 | 53 | await deleteColor({ key }); 54 | 55 | return res.sendStatus(204); 56 | }); 57 | 58 | module.exports = { 59 | apiRouter, 60 | }; 61 | -------------------------------------------------------------------------------- /containers/color-api/src/routes/health.js: -------------------------------------------------------------------------------- 1 | const express = require('express'); 2 | 3 | const healthRouter = express.Router(); 4 | const fail_liveness = process.env.FAIL_LIVENESS === 'true'; 5 | const fail_readiness = 6 | process.env.FAIL_READINESS === 'true' ? Math.random() < 0.5 : false; 7 | 8 | console.log(`Fail liveness : ${fail_liveness}`); 9 | console.log(`Fail readiness : ${fail_readiness}`); 10 | 11 | healthRouter.get('/ready', (req, res) => { 12 | if (fail_readiness) { 13 | return res.sendStatus(503); 14 | } 15 | 16 | return res.send('ok'); 17 | }); 18 | 19 | healthRouter.get('/up', (req, res) => { 20 | return res.send('ok'); 21 | }); 22 | 23 | healthRouter.get('/health', (req, res) => { 24 | if (fail_liveness) { 25 | return res.sendStatus(503); 26 | } 27 | 28 | return res.send('ok'); 29 | }); 30 | 31 | module.exports = { 32 | healthRouter, 33 | }; 34 | -------------------------------------------------------------------------------- /containers/color-api/src/routes/root.js: -------------------------------------------------------------------------------- 1 | const express = require('express'); 2 | const { getHostname } = require('../utils'); 3 | const { getColor } = require('../db/color'); 4 | 5 | const rootRouter = express.Router(); 6 | 7 | rootRouter.get('/', async (req, res) => { 8 | const { colorKey } = req.query; 9 | 10 | const color = await getColor({ key: colorKey }); 11 | const hostname = getHostname(); 12 | 13 | res.send(`

Hello from color-api!

14 |

Hostname: ${hostname}

`); 15 | }); 16 | 17 | module.exports = { 18 | rootRouter, 19 | }; 20 | -------------------------------------------------------------------------------- /containers/color-api/src/utils.js: -------------------------------------------------------------------------------- 1 | const os = require('os'); 2 | 3 | const getHostname = () => os.hostname(); 4 | 5 | module.exports = { 6 | getHostname, 7 | }; 8 | -------------------------------------------------------------------------------- /containers/traffic-generator/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine:3.20 2 | 3 | WORKDIR /app 4 | 5 | RUN apk add --no-cache curl 6 | 7 | COPY traffic-gen.sh traffic-gen.sh 8 | 9 | RUN chmod +x traffic-gen.sh 10 | 11 | ENTRYPOINT [ "./traffic-gen.sh" ] -------------------------------------------------------------------------------- /containers/traffic-generator/traffic-gen.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | if [ "$#" -lt 2 ]; then 4 | echo "Usage: $0 " 5 | exit 1 6 | fi 7 | 8 | TARGET=$1 9 | INTERVAL=$2 10 | 11 | echo "Sending requests to $TARGET every $INTERVAL seconds." 12 | 13 | while true; do 14 | REQUEST_TIME=$(date +"%Y-%m-%d %H:%M:%S") 15 | RESPONSE=$(curl -s "$TARGET") 16 | 17 | echo "[$REQUEST_TIME] $RESPONSE" 18 | 19 | sleep $INTERVAL 20 | done -------------------------------------------------------------------------------- /deployments/nginx-depl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx-deployment 5 | labels: 6 | app: nginx 7 | spec: 8 | replicas: 5 9 | selector: 10 | matchLabels: 11 | app: nginx 12 | template: 13 | metadata: 14 | labels: 15 | app: nginx 16 | spec: 17 | containers: 18 | - name: nginx 19 | image: nginx:1.27.1-alpine 20 | ports: 21 | - containerPort: 80 22 | -------------------------------------------------------------------------------- /headless-service/color-ss.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: color-ss 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: color-api 9 | serviceName: color-svc 10 | replicas: 3 11 | template: 12 | metadata: 13 | labels: 14 | app: color-api 15 | spec: 16 | containers: 17 | - name: color-api 18 | image: lmacademy/color-api:1.2.1 19 | ports: 20 | - containerPort: 80 21 | name: web 22 | volumeMounts: 23 | - name: dummy-data 24 | mountPath: /tmp/data 25 | volumeClaimTemplates: 26 | - metadata: 27 | name: dummy-data 28 | spec: 29 | accessModes: ['ReadWriteOnce'] 30 | storageClassName: standard 31 | resources: 32 | requests: 33 | storage: 1Gi 34 | -------------------------------------------------------------------------------- /headless-service/debug.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: curl 5 | labels: 6 | name: curl 7 | spec: 8 | containers: 9 | - name: curl 10 | image: lmacademy/alpine-curl:1.0.0 11 | resources: 12 | limits: 13 | memory: '128Mi' 14 | cpu: '500m' 15 | -------------------------------------------------------------------------------- /headless-service/svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: color-svc 5 | spec: 6 | clusterIP: None 7 | ports: 8 | - port: 80 9 | targetPort: 80 10 | selector: 11 | app: color-api -------------------------------------------------------------------------------- /health-probes/color-api-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: color-api-pod 5 | labels: 6 | name: color-api-pod 7 | spec: 8 | containers: 9 | - name: color-api-pod 10 | image: lmacademy/color-api:1.2.1 11 | ports: 12 | - containerPort: 80 13 | resources: 14 | limits: 15 | cpu: '500m' 16 | memory: '512Mi' 17 | env: 18 | - name: DELAY_STARTUP 19 | value: 'false' 20 | - name: FAIL_LIVENESS 21 | value: 'false' 22 | startupProbe: 23 | httpGet: 24 | path: /up 25 | port: 80 26 | failureThreshold: 2 27 | periodSeconds: 3 28 | livenessProbe: 29 | httpGet: 30 | path: /health 31 | port: 80 32 | failureThreshold: 3 33 | periodSeconds: 10 34 | -------------------------------------------------------------------------------- /health-probes/readiness-probes/color-api-depl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: color-api 5 | labels: 6 | app: color-api 7 | spec: 8 | replicas: 6 9 | selector: 10 | matchLabels: 11 | app: color-api 12 | template: 13 | metadata: 14 | labels: 15 | app: color-api 16 | spec: 17 | containers: 18 | - name: color-api 19 | image: lmacademy/color-api:1.2.1 20 | ports: 21 | - containerPort: 80 22 | resources: 23 | limits: 24 | cpu: '500m' 25 | memory: '512Mi' 26 | env: 27 | - name: DELAY_STARTUP 28 | value: 'false' 29 | - name: FAIL_LIVENESS 30 | value: 'false' 31 | - name: FAIL_READINESS 32 | value: 'true' 33 | startupProbe: 34 | httpGet: 35 | path: /up 36 | port: 80 37 | failureThreshold: 2 38 | periodSeconds: 3 39 | livenessProbe: 40 | httpGet: 41 | path: /health 42 | port: 80 43 | failureThreshold: 3 44 | periodSeconds: 10 45 | readinessProbe: 46 | httpGet: 47 | path: /ready 48 | port: 80 49 | failureThreshold: 2 50 | periodSeconds: 5 51 | -------------------------------------------------------------------------------- /health-probes/readiness-probes/color-api-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: color-api-svc 5 | spec: 6 | selector: 7 | app: color-api 8 | ports: 9 | - port: 80 10 | targetPort: 80 11 | -------------------------------------------------------------------------------- /health-probes/readiness-probes/traffic-generator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: traffic-generator 5 | labels: 6 | app: traffic-generator 7 | spec: 8 | containers: 9 | - name: traffic-generator 10 | image: lmacademy/traffic-generator:1.0.0 11 | args: 12 | - 'color-api-svc/api' 13 | - '0.5' 14 | resources: 15 | limits: 16 | cpu: '500m' 17 | memory: '512Mi' 18 | -------------------------------------------------------------------------------- /kustomize/dev-ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: dev 5 | -------------------------------------------------------------------------------- /kustomize/nginx-app/base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - nginx-depl.yaml 6 | - nginx-svc.yaml 7 | - reverse-proxy-depl.yaml 8 | - reverse-proxy-pod.yaml 9 | -------------------------------------------------------------------------------- /kustomize/nginx-app/base/nginx-depl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: nginx 9 | template: 10 | metadata: 11 | labels: 12 | app: nginx 13 | spec: 14 | containers: 15 | - name: nginx 16 | image: nginx:1.27.0 17 | resources: 18 | limits: 19 | memory: '128Mi' 20 | cpu: '500m' 21 | ports: 22 | - containerPort: 80 23 | -------------------------------------------------------------------------------- /kustomize/nginx-app/base/nginx-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: nginx-svc 5 | spec: 6 | selector: 7 | app: nginx 8 | ports: 9 | - port: 80 10 | targetPort: 80 11 | -------------------------------------------------------------------------------- /kustomize/nginx-app/base/reverse-proxy-depl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: reverse-proxy 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: reverse-proxy 9 | template: 10 | metadata: 11 | labels: 12 | app: reverse-proxy 13 | spec: 14 | containers: 15 | - name: nginx 16 | image: nginx:1.27.0 17 | resources: 18 | limits: 19 | memory: '128Mi' 20 | cpu: '500m' 21 | ports: 22 | - containerPort: 80 23 | -------------------------------------------------------------------------------- /kustomize/nginx-app/base/reverse-proxy-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: reverse-proxy-pod 5 | labels: 6 | app: reverse-proxy-standalone 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginx:1.27.0 11 | resources: 12 | limits: 13 | memory: '128Mi' 14 | cpu: '500m' 15 | ports: 16 | - containerPort: 80 17 | -------------------------------------------------------------------------------- /kustomize/nginx-app/overlays/dev/db-init.js: -------------------------------------------------------------------------------- 1 | console.log('Initializing DB'); 2 | console.log('Performing some operations'); 3 | console.log('Successfully initialized'); -------------------------------------------------------------------------------- /kustomize/nginx-app/overlays/dev/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - ../../base 6 | 7 | namePrefix: dev- 8 | nameSuffix: -alpha 9 | 10 | commonLabels: 11 | project: ecommerce-app 12 | tier: backend 13 | env: dev 14 | 15 | commonAnnotations: 16 | team: finance 17 | maintainer: 'finance@ourcompany.org' 18 | repository: 'git-repo' 19 | 20 | replicas: 21 | - name: nginx 22 | count: 2 23 | 24 | configMapGenerator: 25 | - name: feature-flag-config 26 | literals: 27 | - use_db=true 28 | - expose_metrics=true 29 | - some_other_feature=true 30 | - name: db-init-config 31 | files: 32 | - db-init.js 33 | 34 | secretGenerator: 35 | - name: local-config 36 | envs: 37 | - .env.local 38 | type: Opaque 39 | 40 | patches: 41 | - path: use-latest-tag.patch.yaml 42 | - path: mount-db-init.patch.yaml 43 | - path: remove-resources.patch.json 44 | target: 45 | group: apps 46 | version: v1 47 | kind: Deployment 48 | -------------------------------------------------------------------------------- /kustomize/nginx-app/overlays/dev/mount-db-init.patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx 5 | spec: 6 | template: 7 | spec: 8 | containers: 9 | - name: nginx 10 | volumeMounts: 11 | - name: db-config 12 | mountPath: /db/config 13 | volumes: 14 | - name: db-config 15 | configMap: 16 | name: db-init-config 17 | -------------------------------------------------------------------------------- /kustomize/nginx-app/overlays/dev/remove-resources.patch.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "op": "remove", 4 | "path": "/spec/template/spec/containers/0/resources" 5 | } 6 | ] 7 | -------------------------------------------------------------------------------- /kustomize/nginx-app/overlays/dev/remove-resources.patch.yaml: -------------------------------------------------------------------------------- 1 | - op: remove 2 | path: /spec/template/spec/containers/0/resources 3 | -------------------------------------------------------------------------------- /kustomize/nginx-app/overlays/dev/use-latest-tag.patch.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: nginx 5 | spec: 6 | template: 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginx:latest 11 | -------------------------------------------------------------------------------- /kustomize/nginx-app/overlays/prod/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | replicas: 5 | - name: nginx 6 | count: 4 7 | 8 | resources: 9 | - ../../base 10 | -------------------------------------------------------------------------------- /kustomize/prod-ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: prod 5 | -------------------------------------------------------------------------------- /labels-selectors/color-api.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: color-backend 5 | labels: 6 | app: color-api 7 | environment: local 8 | tier: backend 9 | spec: 10 | containers: 11 | - name: color-backend 12 | image: lmacademy/color-api:1.1.0 13 | ports: 14 | - containerPort: 80 15 | --- 16 | apiVersion: v1 17 | kind: Pod 18 | metadata: 19 | name: color-frontend 20 | labels: 21 | app: color-api 22 | environment: local 23 | tier: frontend 24 | spec: 25 | containers: 26 | - name: color-nginx 27 | image: nginx:1.27.0 28 | ports: 29 | - containerPort: 80 30 | -------------------------------------------------------------------------------- /labels-selectors/color-depl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: color-api 5 | spec: 6 | replicas: 3 7 | selector: 8 | matchLabels: 9 | app: color-api 10 | environment: local 11 | tier: backend 12 | matchExpressions: 13 | - key: managed 14 | operator: Exists 15 | template: 16 | metadata: 17 | labels: 18 | app: color-api 19 | environment: local 20 | tier: backend 21 | managed: 'deployment' 22 | spec: 23 | containers: 24 | - name: color-api 25 | image: lmacademy/color-api:1.1.0 26 | ports: 27 | - containerPort: 80 28 | -------------------------------------------------------------------------------- /namespaces/color-api.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: color-api 5 | namespace: dev 6 | labels: 7 | app: color-api 8 | spec: 9 | containers: 10 | - name: color-api 11 | image: lmacademy/color-api:1.1.0 12 | ports: 13 | - containerPort: 80 14 | --- 15 | apiVersion: v1 16 | kind: Service 17 | metadata: 18 | name: color-api-svc 19 | namespace: dev 20 | labels: 21 | app: color-api 22 | spec: 23 | type: ClusterIP 24 | selector: 25 | app: color-api 26 | ports: 27 | - port: 80 28 | targetPort: 80 29 | -------------------------------------------------------------------------------- /namespaces/dev-ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: dev 5 | -------------------------------------------------------------------------------- /namespaces/traffic-generator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: traffic-generator 5 | labels: 6 | app: traffic-generator 7 | spec: 8 | containers: 9 | - name: traffic-generator 10 | image: lmacademy/traffic-generator:1.0.0 11 | args: 12 | - 'color-api-svc.dev.svc.cluster.local/api' 13 | - '1' -------------------------------------------------------------------------------- /network-policies/color-api.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: color-api 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: color-api 9 | template: 10 | metadata: 11 | labels: 12 | app: color-api 13 | spec: 14 | containers: 15 | - name: color-api 16 | image: lmacademy/color-api:1.2.0 17 | resources: 18 | limits: 19 | memory: '128Mi' 20 | cpu: '500m' 21 | ports: 22 | - containerPort: 80 23 | --- 24 | apiVersion: v1 25 | kind: Service 26 | metadata: 27 | name: color-svc 28 | spec: 29 | selector: 30 | app: color-api 31 | ports: 32 | - port: 80 33 | targetPort: 80 34 | -------------------------------------------------------------------------------- /network-policies/curl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: curl 5 | labels: 6 | app: curl 7 | spec: 8 | containers: 9 | - name: curl 10 | image: lmacademy/alpine-curl:1.0.0 11 | resources: 12 | limits: 13 | memory: '128Mi' 14 | cpu: '500m' 15 | -------------------------------------------------------------------------------- /network-policies/curl2.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: curl2 5 | labels: 6 | app: curl2 7 | tier: backend 8 | spec: 9 | containers: 10 | - name: curl2 11 | image: lmacademy/alpine-curl:1.0.0 12 | resources: 13 | limits: 14 | memory: '128Mi' 15 | cpu: '500m' 16 | -------------------------------------------------------------------------------- /network-policies/curl3.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: curl3 5 | namespace: dev 6 | labels: 7 | app: curl3 8 | spec: 9 | containers: 10 | - name: curl3 11 | image: lmacademy/alpine-curl:1.0.0 12 | resources: 13 | limits: 14 | memory: '128Mi' 15 | cpu: '500m' 16 | -------------------------------------------------------------------------------- /network-policies/dev-ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: dev 5 | -------------------------------------------------------------------------------- /network-policies/policies/allow-color-api.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: allow-color-api 5 | spec: 6 | podSelector: 7 | matchLabels: 8 | app: curl 9 | policyTypes: 10 | - Egress 11 | egress: 12 | - to: 13 | - podSelector: 14 | matchLabels: 15 | app: color-api 16 | -------------------------------------------------------------------------------- /network-policies/policies/allow-curl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: allow-curl 5 | spec: 6 | podSelector: 7 | matchLabels: 8 | app: color-api 9 | policyTypes: 10 | - Ingress 11 | ingress: 12 | - from: 13 | - namespaceSelector: 14 | matchLabels: 15 | kubernetes.io/metadata.name: dev 16 | podSelector: 17 | matchLabels: 18 | app: traffic-generator 19 | - podSelector: 20 | matchLabels: 21 | app: curl 22 | -------------------------------------------------------------------------------- /network-policies/policies/allow-egress-dns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: allow-egress-dns 5 | spec: 6 | podSelector: {} 7 | policyTypes: 8 | - Egress 9 | egress: 10 | - to: 11 | - namespaceSelector: 12 | matchLabels: 13 | kubernetes.io/metadata.name: kube-system 14 | podSelector: 15 | matchLabels: 16 | k8s-app: kube-dns 17 | -------------------------------------------------------------------------------- /network-policies/policies/deny-all.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: deny-all 5 | spec: 6 | podSelector: {} 7 | policyTypes: 8 | - Ingress 9 | - Egress 10 | -------------------------------------------------------------------------------- /network-policies/traffic-gen.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: traffic-generator 5 | namespace: dev 6 | labels: 7 | app: traffic-generator 8 | spec: 9 | containers: 10 | - name: traffic-generator 11 | image: lmacademy/traffic-generator:1.0.0 12 | args: 13 | - 'color-svc.default.svc.cluster.local' 14 | - '0.5' 15 | resources: 16 | limits: 17 | memory: '128Mi' 18 | cpu: '500m' 19 | -------------------------------------------------------------------------------- /object-management/nginx-all.yaml: -------------------------------------------------------------------------------- 1 | # For documentation, equivalent to the imperative command: 2 | # kubectl run nginx-pod --image=nginx:1.27.0 3 | 4 | apiVersion: v1 5 | kind: Pod 6 | metadata: 7 | name: nginx-pod 8 | labels: 9 | app: nginx 10 | spec: 11 | containers: 12 | - image: nginx:1.27.0-alpine 13 | name: nginx-container 14 | ports: 15 | - containerPort: 80 16 | 17 | --- 18 | # For documentation, equivalent to the imperative command: 19 | # kubectl expose pod nginx-pod --type=NodePort --port=80 20 | 21 | apiVersion: v1 22 | kind: Service 23 | metadata: 24 | name: nginx-svc 25 | labels: 26 | app: nginx 27 | spec: 28 | type: NodePort 29 | ports: 30 | - port: 80 31 | protocol: TCP 32 | targetPort: 80 33 | selector: 34 | app: nginx 35 | -------------------------------------------------------------------------------- /object-management/nginx-pod.yaml: -------------------------------------------------------------------------------- 1 | # For documentation, equivalent to the imperative command: 2 | # kubectl run nginx-pod --image=nginx:1.27.0 3 | 4 | apiVersion: v1 5 | kind: Pod 6 | metadata: 7 | name: nginx-pod 8 | labels: 9 | app: nginx 10 | spec: 11 | containers: 12 | - image: nginx:1.27.0-alpine 13 | name: nginx-container 14 | ports: 15 | - containerPort: 80 16 | -------------------------------------------------------------------------------- /object-management/nginx-svc.yaml: -------------------------------------------------------------------------------- 1 | # For documentation, equivalent to the imperative command: 2 | # kubectl expose pod nginx-pod --type=NodePort --port=80 3 | 4 | apiVersion: v1 5 | kind: Service 6 | metadata: 7 | name: nginx-svc 8 | labels: 9 | app: nginx 10 | spec: 11 | type: NodePort 12 | ports: 13 | - port: 80 14 | protocol: TCP 15 | targetPort: 80 16 | selector: 17 | app: nginx 18 | -------------------------------------------------------------------------------- /pod-security-standards/baseline-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx-baseline 5 | labels: 6 | name: nginx-baseline 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginx:1.27.0 11 | resources: 12 | limits: 13 | memory: '128Mi' 14 | cpu: '500m' 15 | ports: 16 | - containerPort: 80 17 | -------------------------------------------------------------------------------- /pod-security-standards/namespaces/baseline.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: baseline 5 | labels: 6 | pod-security.kubernetes.io/enforce: baseline 7 | pod-security.kubernetes.io/warn: restricted 8 | -------------------------------------------------------------------------------- /pod-security-standards/namespaces/privileged.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: privileged 5 | labels: 6 | pod-security.kubernetes.io/enforce: privileged 7 | pod-security.kubernetes.io/warn: baseline -------------------------------------------------------------------------------- /pod-security-standards/privileged-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx-privileged 5 | labels: 6 | name: nginx-privileged 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginx:1.27.0 11 | resources: 12 | limits: 13 | memory: '128Mi' 14 | cpu: '500m' 15 | ports: 16 | - containerPort: 80 17 | securityContext: 18 | privileged: true 19 | -------------------------------------------------------------------------------- /pod-security-standards/restricted-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx-restricted 5 | labels: 6 | name: nginx-restricted 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginxinc/nginx-unprivileged:1.27.1 11 | resources: 12 | limits: 13 | memory: '128Mi' 14 | cpu: '500m' 15 | ports: 16 | - containerPort: 80 17 | securityContext: 18 | allowPrivilegeEscalation: false 19 | capabilities: 20 | drop: ['ALL'] 21 | runAsNonRoot: true 22 | seccompProfile: 23 | type: RuntimeDefault 24 | -------------------------------------------------------------------------------- /proj-gke/color-api/_base/color-api.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: color-api 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: color-api 9 | template: 10 | metadata: 11 | labels: 12 | app: color-api 13 | spec: 14 | containers: 15 | - name: color-api 16 | image: lmacademy/color-api:2.0.0 17 | resources: 18 | requests: 19 | memory: '64Mi' 20 | cpu: '250m' 21 | limits: 22 | memory: '128Mi' 23 | cpu: '500m' 24 | env: 25 | - name: DB_USER 26 | valueFrom: 27 | secretKeyRef: 28 | key: USERNAME 29 | name: mongodb-colordb-creds 30 | - name: DB_PASSWORD 31 | valueFrom: 32 | secretKeyRef: 33 | key: PASSWORD 34 | name: mongodb-colordb-creds 35 | - name: DB_URL 36 | value: 'mongodb://$(DB_USER):$(DB_PASSWORD)@mongodb-ss-0.mongodb-svc:27017/colordb' 37 | ports: 38 | - containerPort: 80 39 | -------------------------------------------------------------------------------- /proj-gke/color-api/_base/color-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: color-svc 5 | spec: 6 | type: LoadBalancer 7 | selector: 8 | app: color-api 9 | ports: 10 | - port: 80 11 | targetPort: 80 12 | --- 13 | apiVersion: v1 14 | kind: Service 15 | metadata: 16 | name: color-svc-np 17 | spec: 18 | type: NodePort 19 | selector: 20 | app: color-api 21 | ports: 22 | - port: 80 23 | targetPort: 80 24 | -------------------------------------------------------------------------------- /proj-gke/color-api/_base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - color-api.yaml 6 | - color-svc.yaml 7 | - ./network-policies/allow-external.yaml 8 | 9 | -------------------------------------------------------------------------------- /proj-gke/color-api/_base/network-policies/allow-external.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: allow-external 5 | spec: 6 | podSelector: 7 | matchLabels: 8 | app: color-api 9 | policyTypes: 10 | - Ingress 11 | ingress: 12 | - {} 13 | -------------------------------------------------------------------------------- /proj-gke/color-api/dev/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: dev-ingress 5 | annotations: 6 | kubernetes.io/ingress.global-static-ip-name: color-api-dev 7 | kubernetes.io/ingress.class: 'gce' 8 | networking.gke.io/managed-certificates: dev-mc 9 | spec: 10 | rules: 11 | - host: dev.socolourful.com 12 | http: 13 | paths: 14 | - path: /* 15 | pathType: ImplementationSpecific 16 | backend: 17 | service: 18 | name: color-svc-np 19 | port: 20 | number: 80 21 | -------------------------------------------------------------------------------- /proj-gke/color-api/dev/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | namespace: dev 5 | 6 | resources: 7 | - ../_base 8 | - managed-cert.yaml 9 | - ingress.yaml 10 | 11 | patches: 12 | - path: use-dev-image.yaml 13 | -------------------------------------------------------------------------------- /proj-gke/color-api/dev/managed-cert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.gke.io/v1 2 | kind: ManagedCertificate 3 | metadata: 4 | name: dev-mc 5 | spec: 6 | domains: 7 | - dev.socolourful.com 8 | -------------------------------------------------------------------------------- /proj-gke/color-api/dev/use-dev-image.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: color-api 5 | spec: 6 | template: 7 | spec: 8 | containers: 9 | - name: color-api 10 | image: lmacademy/color-api:2.1.0-dev 11 | -------------------------------------------------------------------------------- /proj-gke/color-api/prod/increase-replica-count.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: color-api 5 | spec: 6 | replicas: 3 7 | -------------------------------------------------------------------------------- /proj-gke/color-api/prod/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: prod-ingress 5 | annotations: 6 | kubernetes.io/ingress.global-static-ip-name: color-api-prod 7 | kubernetes.io/ingress.class: 'gce' 8 | networking.gke.io/managed-certificates: prod-mc 9 | spec: 10 | rules: 11 | - host: socolourful.com 12 | http: 13 | paths: 14 | - path: /* 15 | pathType: ImplementationSpecific 16 | backend: 17 | service: 18 | name: color-svc-np 19 | port: 20 | number: 80 21 | - host: www.socolourful.com 22 | http: 23 | paths: 24 | - path: /* 25 | pathType: ImplementationSpecific 26 | backend: 27 | service: 28 | name: color-svc-np 29 | port: 30 | number: 80 31 | -------------------------------------------------------------------------------- /proj-gke/color-api/prod/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | namespace: prod 5 | 6 | resources: 7 | - ../_base 8 | - managed-cert.yaml 9 | - ingress.yaml 10 | 11 | patches: 12 | - path: increase-replica-count.yaml 13 | -------------------------------------------------------------------------------- /proj-gke/color-api/prod/managed-cert.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.gke.io/v1 2 | kind: ManagedCertificate 3 | metadata: 4 | name: prod-mc 5 | spec: 6 | domains: 7 | - socolourful.com 8 | - www.socolourful.com 9 | -------------------------------------------------------------------------------- /proj-gke/color-db/_base/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - mongodb-ss.yaml 6 | - mongodb-svc.yaml 7 | - ./network-policies/allow-colorapi.yaml 8 | 9 | configMapGenerator: 10 | - name: mongodb-init-colordb 11 | files: 12 | - mongo-init.js 13 | -------------------------------------------------------------------------------- /proj-gke/color-db/_base/mongo-init.js: -------------------------------------------------------------------------------- 1 | const dbName = process.env.DB_NAME; 2 | const dbUser = process.env.DB_USER; 3 | const dbPassword = process.env.DB_PASSWORD; 4 | 5 | db = db.getSiblingDB(dbName); 6 | 7 | console.log(`INITIALIZING : ${dbName}`); 8 | console.log(`INITIALIZING : Creating user ${dbUser}`); 9 | 10 | db.createUser({ 11 | user: dbUser, 12 | pwd: dbPassword, 13 | roles: [ 14 | { 15 | role: 'readWrite', 16 | db: dbName, 17 | }, 18 | ], 19 | }); 20 | 21 | console.log(`INITIALIZING : Success`); 22 | -------------------------------------------------------------------------------- /proj-gke/color-db/_base/mongodb-ss.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: mongodb-ss 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: mongodb 9 | serviceName: mongodb-svc 10 | replicas: 1 11 | template: 12 | metadata: 13 | labels: 14 | app: mongodb 15 | spec: 16 | containers: 17 | - name: mongo 18 | image: mongo:8.0.0 19 | ports: 20 | - containerPort: 27017 21 | resources: 22 | requests: 23 | memory: '4Gi' 24 | cpu: '1' 25 | limits: 26 | memory: '8Gi' 27 | cpu: '2' 28 | env: 29 | - name: MONGO_INITDB_ROOT_USERNAME 30 | valueFrom: 31 | secretKeyRef: 32 | key: USERNAME 33 | name: mongodb-root-creds 34 | - name: MONGO_INITDB_ROOT_PASSWORD 35 | valueFrom: 36 | secretKeyRef: 37 | key: PASSWORD 38 | name: mongodb-root-creds 39 | - name: DB_NAME 40 | value: colordb 41 | - name: DB_USER 42 | valueFrom: 43 | secretKeyRef: 44 | key: USERNAME 45 | name: mongodb-colordb-creds 46 | - name: DB_PASSWORD 47 | valueFrom: 48 | secretKeyRef: 49 | key: PASSWORD 50 | name: mongodb-colordb-creds 51 | volumeMounts: 52 | - name: mongodb-init-config 53 | mountPath: /docker-entrypoint-initdb.d 54 | affinity: 55 | nodeAffinity: 56 | requiredDuringSchedulingIgnoredDuringExecution: 57 | nodeSelectorTerms: 58 | - matchExpressions: 59 | - key: topology.kubernetes.io/zone 60 | operator: In 61 | values: 62 | - europe-west4-a 63 | - europe-west4-b 64 | - europe-west4-c 65 | - europe-west1-b 66 | - europe-west1-c 67 | - europe-west1-d 68 | volumes: 69 | - name: mongodb-init-config 70 | configMap: 71 | name: mongodb-init-colordb 72 | volumeClaimTemplates: 73 | - metadata: 74 | name: mongodb-data 75 | spec: 76 | accessModes: ['ReadWriteOnce'] 77 | storageClassName: standard-rwo 78 | resources: 79 | requests: 80 | storage: 10Gi 81 | -------------------------------------------------------------------------------- /proj-gke/color-db/_base/mongodb-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: mongodb-svc 5 | spec: 6 | clusterIP: None 7 | selector: 8 | app: mongodb 9 | ports: 10 | - port: 27017 11 | targetPort: 27017 12 | -------------------------------------------------------------------------------- /proj-gke/color-db/_base/network-policies/allow-colorapi.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: allow-colorapi 5 | spec: 6 | podSelector: 7 | matchLabels: 8 | app: mongodb 9 | policyTypes: 10 | - Ingress 11 | ingress: 12 | - from: 13 | - podSelector: 14 | matchLabels: 15 | app: color-api 16 | -------------------------------------------------------------------------------- /proj-gke/color-db/dev/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | namespace: dev 5 | 6 | resources: 7 | - ../_base 8 | -------------------------------------------------------------------------------- /proj-gke/color-db/prod/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | namespace: prod 5 | 6 | resources: 7 | - ../_base 8 | 9 | patches: 10 | - path: use-premium-storage.yaml 11 | target: 12 | version: v1 13 | group: apps 14 | kind: StatefulSet 15 | -------------------------------------------------------------------------------- /proj-gke/color-db/prod/use-premium-storage.yaml: -------------------------------------------------------------------------------- 1 | - op: replace 2 | path: /spec/volumeClaimTemplates/0/spec/storageClassName 3 | value: premium-rwo 4 | -------------------------------------------------------------------------------- /proj-gke/namespaces/dev.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: dev -------------------------------------------------------------------------------- /proj-gke/namespaces/prod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: prod -------------------------------------------------------------------------------- /proj-gke/shared-config/_network-policies/deny-ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: NetworkPolicy 3 | metadata: 4 | name: deny-ingress 5 | spec: 6 | podSelector: {} 7 | policyTypes: 8 | - Ingress 9 | -------------------------------------------------------------------------------- /proj-gke/shared-config/_network-policies/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | resources: 5 | - deny-ingress.yaml 6 | -------------------------------------------------------------------------------- /proj-gke/shared-config/dev/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | namespace: dev 5 | 6 | resources: 7 | - ../_network-policies 8 | 9 | generatorOptions: 10 | disableNameSuffixHash: true 11 | 12 | secretGenerator: 13 | - name: mongodb-root-creds 14 | envs: 15 | - .env.root-creds.dev 16 | - name: mongodb-colordb-creds 17 | envs: 18 | - .env.colordb-creds.dev 19 | -------------------------------------------------------------------------------- /proj-gke/shared-config/prod/kustomization.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: kustomize.config.k8s.io/v1beta1 2 | kind: Kustomization 3 | 4 | namespace: prod 5 | 6 | resources: 7 | - ../_network-policies 8 | 9 | generatorOptions: 10 | disableNameSuffixHash: true 11 | 12 | secretGenerator: 13 | - name: mongodb-root-creds 14 | envs: 15 | - .env.root-creds.prod 16 | - name: mongodb-colordb-creds 17 | envs: 18 | - .env.colordb-creds.prod 19 | -------------------------------------------------------------------------------- /proj-mongodb/color-api.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: color-api 5 | labels: 6 | app: color-api 7 | spec: 8 | selector: 9 | matchLabels: 10 | app: color-api 11 | template: 12 | metadata: 13 | labels: 14 | app: color-api 15 | spec: 16 | containers: 17 | - name: color-api 18 | image: lmacademy/color-api:2.0.0 19 | resources: 20 | limits: 21 | memory: '128Mi' 22 | cpu: '500m' 23 | env: 24 | - name: DB_USER 25 | valueFrom: 26 | secretKeyRef: 27 | key: username 28 | name: mongodb-colordb-creds 29 | - name: DB_PASSWORD 30 | valueFrom: 31 | secretKeyRef: 32 | key: password 33 | name: mongodb-colordb-creds 34 | - name: DB_HOST 35 | value: 'mongodb-ss-0.mongodb-svc.default.svc.cluster.local' 36 | - name: DB_PORT 37 | value: '27017' 38 | - name: DB_NAME 39 | value: 'colordb' 40 | - name: DB_URL 41 | value: 'mongodb://$(DB_USER):$(DB_PASSWORD)@$(DB_HOST):$(DB_PORT)/$(DB_NAME)' 42 | ports: 43 | - containerPort: 80 44 | -------------------------------------------------------------------------------- /proj-mongodb/color-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: color-svc 5 | spec: 6 | type: NodePort 7 | selector: 8 | app: color-api 9 | ports: 10 | - port: 80 11 | targetPort: 80 12 | nodePort: 30007 13 | -------------------------------------------------------------------------------- /proj-mongodb/mongodb-colordb-creds.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: mongodb-colordb-creds 5 | labels: 6 | app: mongodb 7 | type: Opaque 8 | data: 9 | username: Y29sb3JkYl91c2Vy # colordb_user 10 | password: Y29sb3JkYl9wYXNzd29yZA== # colordb_password 11 | -------------------------------------------------------------------------------- /proj-mongodb/mongodb-init-colordb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: mongodb-init-colordb 5 | labels: 6 | app: mongodb 7 | data: 8 | mongo-init.js: | 9 | const dbName = process.env.DB_NAME; 10 | const dbUser = process.env.DB_USER; 11 | const dbPassword = process.env.DB_PASSWORD; 12 | 13 | db = db.getSiblingDB(dbName); 14 | 15 | console.log(`INITIALIZING : ${dbName}`); 16 | console.log(`INITIALIZING : Creating user ${dbUser}`); 17 | 18 | db.createUser({ 19 | user: dbUser, 20 | pwd: dbPassword, 21 | roles: [ 22 | { 23 | role: 'readWrite', 24 | db: dbName 25 | } 26 | ] 27 | }); 28 | 29 | console.log(`INITIALIZING : Success`); 30 | -------------------------------------------------------------------------------- /proj-mongodb/mongodb-root-creds.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: mongodb-root-creds 5 | labels: 6 | app: mongodb 7 | type: Opaque 8 | data: 9 | username: cm9vdF91c2Vy # root_user 10 | password: cm9vdF9wYXNzd29yZA== # root_password 11 | -------------------------------------------------------------------------------- /proj-mongodb/mongodb-ss.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: mongodb-ss 5 | spec: 6 | selector: 7 | matchLabels: 8 | app: mongodb 9 | serviceName: mongodb-svc 10 | replicas: 1 11 | template: 12 | metadata: 13 | labels: 14 | app: mongodb 15 | spec: 16 | containers: 17 | - name: mongo 18 | image: mongo:8.0.0 19 | ports: 20 | - containerPort: 27017 21 | env: 22 | - name: MONGO_INITDB_ROOT_USERNAME 23 | valueFrom: 24 | secretKeyRef: 25 | key: username 26 | name: mongodb-root-creds 27 | - name: MONGO_INITDB_ROOT_PASSWORD 28 | valueFrom: 29 | secretKeyRef: 30 | key: password 31 | name: mongodb-root-creds 32 | - name: DB_NAME 33 | value: colordb 34 | - name: DB_USER 35 | valueFrom: 36 | secretKeyRef: 37 | key: username 38 | name: mongodb-colordb-creds 39 | - name: DB_PASSWORD 40 | valueFrom: 41 | secretKeyRef: 42 | key: password 43 | name: mongodb-colordb-creds 44 | volumeMounts: 45 | - mountPath: /data/db 46 | name: mongodb-data 47 | - mountPath: /docker-entrypoint-initdb.d 48 | name: mongodb-init-config 49 | volumes: 50 | - name: mongodb-init-config 51 | configMap: 52 | name: mongodb-init-colordb 53 | volumeClaimTemplates: 54 | - metadata: 55 | name: mongodb-data 56 | spec: 57 | accessModes: ['ReadWriteOnce'] 58 | storageClassName: standard 59 | resources: 60 | requests: 61 | storage: 10Gi 62 | -------------------------------------------------------------------------------- /proj-mongodb/mongodb-svc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: mongodb-svc 5 | labels: 6 | app: mongodb 7 | spec: 8 | clusterIP: None 9 | selector: 10 | app: mongodb 11 | ports: 12 | - port: 27017 13 | targetPort: 27017 14 | -------------------------------------------------------------------------------- /proj-mongodb/traffic-generator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: traffic-generator 5 | labels: 6 | name: traffic-generator 7 | spec: 8 | containers: 9 | - name: traffic-generator 10 | image: lmacademy/traffic-generator:1.0.0 11 | args: 12 | - 'color-svc/api?colorKey=primary' 13 | - '0.5' 14 | resources: 15 | limits: 16 | memory: '128Mi' 17 | cpu: '500m' 18 | -------------------------------------------------------------------------------- /rbac/clusterroles/pod-admin-crb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRoleBinding 3 | metadata: 4 | name: pod-admin 5 | roleRef: 6 | kind: ClusterRole 7 | name: pod-admin 8 | apiGroup: rbac.authorization.k8s.io 9 | subjects: 10 | - kind: Group 11 | name: admin 12 | apiGroup: rbac.authorization.k8s.io 13 | -------------------------------------------------------------------------------- /rbac/clusterroles/pod-admin.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: ClusterRole 3 | metadata: 4 | name: pod-admin 5 | rules: 6 | - apiGroups: [''] 7 | resources: ['pods', 'pods/log', 'pods/exec', 'pods/attach'] 8 | verbs: ['*'] 9 | -------------------------------------------------------------------------------- /rbac/csr.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: certificates.k8s.io/v1 2 | kind: CertificateSigningRequest 3 | metadata: 4 | name: alice 5 | spec: 6 | request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ1pUQ0NBVTBDQVFBd0lERU9NQXdHQTFVRUF3d0ZZV3hwWTJVeERqQU1CZ05WQkFvTUJXRmtiV2x1TUlJQgpJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBMEp1SnVSZ2hKbEdzTFlOQWk3bkVSQk5uCmhsNWtBTDFrWFhaMkt2NkFSc0NBNmxjWTFGOWRvbDIzdEcwSlY4aEhyQWhuVTRsbGlJUkRJbDEveHhZVDdaZ0cKRHdFS25uanRzUWNUdENQa3pMYVE3QVJ3ejY0OFJkUEppbXRkMTdSeXJ2NFp4STBwRnlXenRVVEFRZjJKUTEyeApNWXpaOGxSWkJGb3IwaXdGd05xTk41TUlFMnBtK2o5VUttSzQyTFpxN0tETVhuc2c2UmxqR0RPbStkb3VubC9HCkN5UUlSeGRLUnJrUHF4cVMxZ2ZrSDBNM1h3dkFJNFI3QjRyblpMaXBTRm1HWGV3TEtDcHZRQXFsVnArY0VOOWQKblQ4SE02TkdsNExOMGk0SGFmaURESHpKN0ZZQmwwZitaTWhVeHg3L3JjUjR6ZWdQVEdkREE4ekttWkNQYVFJRApBUUFCb0FBd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFNRlluSVQwYjRINGxFZGt6UURxYTU5V3dnUlc1L21sCnVzU3dGdkF5MUpFOG16VFJuTVArOU1tZFRteEtzSUJDY0w5WktxSXRUZFpxNk9obytVZmJWVGZ6azNGQ0k3SVQKU3ViOWxKRG50VzFHaVVLak1vYVlFeW5OSVJVOFVNYVZ3UlBMNzU1dXpyeEZPdytvWFBTNVZseVdsVG5qazZ0RQp6aDdGQkROZ2tsQkgxcEpLeUZFUHg3QVM2UXJ2T3c4aVlIY01zejF2TjFaZE9Fc0g4SHNma2pRTzlMRnNiYmdOCm5UMGRleDkyUU4wVGl6WnZ2L0VrN2FDUGpSRCtOUSszVlkvOGhnamQ5U0VHNXFIbFhoREJ6MzlPUCtsVk0yZ04KcHFZczZ2anpWdUtyWHBXdmt3eE9zNGo2NE9JSUR1SC9TeE1rTzFON0E4UDlPNzhlai95akxrYz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUgUkVRVUVTVC0tLS0tCg== 7 | signerName: kubernetes.io/kube-apiserver-client 8 | expirationSeconds: 86400 9 | usages: 10 | - client auth 11 | --- 12 | apiVersion: certificates.k8s.io/v1 13 | kind: CertificateSigningRequest 14 | metadata: 15 | name: bob 16 | spec: 17 | request: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0KTUlJQ1lUQ0NBVWtDQVFBd0hERU1NQW9HQTFVRUF3d0RZbTlpTVF3d0NnWURWUVFLREFOa1pYWXdnZ0VpTUEwRwpDU3FHU0liM0RRRUJBUVVBQTRJQkR3QXdnZ0VLQW9JQkFRQ1dwRHFMTVF6Y2ZhVzN4RUI1WmRXeHJkSWhhcjNvCm5lcTZmTkNwS08wVFFoL0RGL0tsMmhKcjIrdFJHeHFRZElua2xMMENsOVl3Y2FRMXQyQnBUN0lpaEdXbGFabnIKS1pXbnAyKzF2YU1hdXFOU0g4REJtclZ5eTdXTzZCOHU2Mzl0bEFIVW4yKzA4cDZpQjlMWXpyOWxPVVFyWXcrZQorTzFZcW9BNjl4MVdEelF0NUowa3RSdGhWQ1h3RTNVaTBobmQ2TEZtcWp1dy9RelY3eWlsQVJadVVsdVpVRTJzCjJPSU9oRWZDY2twZG1POWFpc04rUzVodHlndDh0RngyeWZrVU9QaXZPZVFRQnN1VEdzVEVFWkVQTkJUMy8zbDgKU1E4ZnhEQ1NJdG9aK3ROMzQ2UXZ3UmZrM01EaHRhdTRSNFpCUWUvVDNKNnZha3Y0VlQ4dkhma0JBZ01CQUFHZwpBREFOQmdrcWhraUc5dzBCQVFzRkFBT0NBUUVBTWlYblk3L29qOWpVYmVLQjBmTDJkM1A5bk4vUTAzenhndUdRCjRGL2g4eWI2d1Vadmx2ZG90Y1FQRVc5c2k1SnN4SU50WHB3VkFSMkVNZG54ejM3eUlEL3JDcDMrVVM2S0VvSkQKYkdoMFV5b3BVZU9MRFc2cEhjSTY4a1MvZi9LU0xJb210Vnk4V1ZwYk1MSkZ5bENSaEN3VXB0T3B5WWVSRnY4bAp2dmpFYlhPWDExQmFUZ1BkQ2w4OXJXWTlWRUhGeDBHbUhvdFJITzZmYmUwT3Q1MVhGWWNxUnFsWlRPREdkYzlzCkdjNHFwa0FHL0wrMzBpdE1CSHl4VFZGZ2pTL1JsdWJaVEQyWHlSTjJnQlg4Z2hBaEtmMDlLNGN4UDJBZTFwVVgKUVpsVkFIckRQbXdjR2gwVTYvYmtXVXhMN3lOVWJFbjlzNjkvRlZqK1ZUNjdicmhMTlE9PQotLS0tLUVORCBDRVJUSUZJQ0FURSBSRVFVRVNULS0tLS0K 18 | signerName: kubernetes.io/kube-apiserver-client 19 | expirationSeconds: 86400 20 | usages: 21 | - client auth 22 | -------------------------------------------------------------------------------- /rbac/pods.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: nginx 5 | namespace: dev 6 | labels: 7 | name: nginx 8 | spec: 9 | containers: 10 | - name: nginx 11 | image: nginx:1.27.0 12 | resources: 13 | limits: 14 | memory: '128Mi' 15 | cpu: '500m' 16 | ports: 17 | - containerPort: 80 18 | --- 19 | apiVersion: v1 20 | kind: Pod 21 | metadata: 22 | name: nginx 23 | namespace: prod 24 | labels: 25 | name: nginx 26 | spec: 27 | containers: 28 | - name: nginx 29 | image: nginx:1.27.0 30 | resources: 31 | limits: 32 | memory: '128Mi' 33 | cpu: '500m' 34 | ports: 35 | - containerPort: 80 36 | --- 37 | apiVersion: v1 38 | kind: Pod 39 | metadata: 40 | name: alpine-curl 41 | namespace: dev 42 | labels: 43 | name: alpine-curl 44 | spec: 45 | serviceAccountName: pod-inspector 46 | containers: 47 | - name: curl 48 | image: lmacademy/alpine-curl:1.0.0 49 | resources: 50 | limits: 51 | memory: '128Mi' 52 | cpu: '500m' 53 | ports: 54 | - containerPort: 80 55 | -------------------------------------------------------------------------------- /rbac/roles/dev-pod-reader-rb.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: RoleBinding 3 | metadata: 4 | namespace: dev 5 | name: pod-reader 6 | roleRef: 7 | kind: Role 8 | name: pod-reader 9 | apiGroup: rbac.authorization.k8s.io 10 | subjects: 11 | - kind: User 12 | name: bob 13 | apiGroup: rbac.authorization.k8s.io 14 | - kind: ServiceAccount 15 | name: pod-inspector 16 | namespace: dev 17 | -------------------------------------------------------------------------------- /rbac/roles/dev-pod-reader.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: rbac.authorization.k8s.io/v1 2 | kind: Role 3 | metadata: 4 | namespace: dev 5 | name: pod-reader 6 | rules: 7 | - apiGroups: [''] 8 | resources: ['pods'] 9 | verbs: ['get', 'list'] 10 | -------------------------------------------------------------------------------- /rbac/roles/ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: dev 5 | --- 6 | apiVersion: v1 7 | kind: Namespace 8 | metadata: 9 | name: prod 10 | -------------------------------------------------------------------------------- /rbac/service-account/pod-inspector.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ServiceAccount 3 | metadata: 4 | namespace: default 5 | name: pod-inspector 6 | --- 7 | apiVersion: v1 8 | kind: ServiceAccount 9 | metadata: 10 | namespace: dev 11 | name: pod-inspector 12 | -------------------------------------------------------------------------------- /replica-sets/nginx-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: sole-nginx 5 | labels: 6 | app: nginx 7 | spec: 8 | containers: 9 | - name: nginx 10 | image: nginx:1.27.0 11 | ports: 12 | - containerPort: 80 13 | -------------------------------------------------------------------------------- /replica-sets/nginx-rs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: nginx-replicaset 5 | spec: 6 | replicas: 3 7 | selector: 8 | matchLabels: 9 | app: nginx 10 | template: 11 | metadata: 12 | labels: 13 | app: nginx 14 | spec: 15 | containers: 16 | - name: nginx 17 | image: nginx:1.27.0-alpine 18 | ports: 19 | - containerPort: 80 20 | -------------------------------------------------------------------------------- /resource-quotas/color-api-depl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: color-api-depl 5 | namespace: dev 6 | labels: 7 | app: color-api 8 | spec: 9 | replicas: 4 10 | selector: 11 | matchLabels: 12 | app: color-api 13 | template: 14 | metadata: 15 | labels: 16 | app: color-api 17 | spec: 18 | containers: 19 | - name: color-api 20 | image: lmacademy/color-api:1.1.0 21 | resources: 22 | requests: 23 | cpu: '200m' 24 | memory: '256Mi' 25 | limits: 26 | cpu: '500m' 27 | memory: '512Mi' 28 | ports: 29 | - containerPort: 80 30 | -------------------------------------------------------------------------------- /resource-quotas/color-api-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: color-api 5 | namespace: dev 6 | labels: 7 | app: color-api 8 | spec: 9 | containers: 10 | - name: color-api 11 | image: lmacademy/color-api:1.1.0 12 | resources: 13 | requests: 14 | cpu: '200m' 15 | memory: '256Mi' 16 | limits: 17 | cpu: '500m' 18 | memory: '512Mi' 19 | ports: 20 | - containerPort: 80 21 | -------------------------------------------------------------------------------- /resource-quotas/dev-ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: dev 5 | --- 6 | apiVersion: v1 7 | kind: ResourceQuota 8 | metadata: 9 | namespace: dev 10 | name: dev-quota 11 | spec: 12 | hard: 13 | requests.cpu: '2' 14 | requests.memory: '2Gi' 15 | limits.cpu: '4' 16 | limits.memory: '4Gi' 17 | -------------------------------------------------------------------------------- /resource-quotas/prod-ns.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Namespace 3 | metadata: 4 | name: prod 5 | --- 6 | apiVersion: v1 7 | kind: ResourceQuota 8 | metadata: 9 | namespace: prod 10 | name: prod-quota 11 | spec: 12 | hard: 13 | requests.cpu: '2' 14 | requests.memory: '2Gi' 15 | limits.cpu: '4' 16 | limits.memory: '4Gi' 17 | -------------------------------------------------------------------------------- /secrets/demo-pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: busybox 5 | labels: 6 | name: busybox 7 | spec: 8 | containers: 9 | - name: busybox 10 | image: busybox:1.36.1 11 | command: 12 | - 'sh' 13 | - '-c' 14 | - 'sleep 1800' 15 | resources: 16 | limits: 17 | memory: '128Mi' 18 | cpu: '500m' 19 | volumeMounts: 20 | - name: db-secrets 21 | mountPath: /etc/db 22 | volumes: 23 | - name: db-secrets 24 | secret: 25 | secretName: db-creds 26 | items: 27 | - key: password 28 | path: dev/password 29 | -------------------------------------------------------------------------------- /services/color-api-clusterip.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: color-api-clusterip 5 | labels: 6 | app: color-api 7 | spec: 8 | type: ClusterIP 9 | selector: 10 | app: color-api 11 | ports: 12 | - port: 80 13 | targetPort: 80 14 | -------------------------------------------------------------------------------- /services/color-api-depl.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: color-api-deployment 5 | labels: 6 | app: color-api 7 | spec: 8 | replicas: 5 9 | selector: 10 | matchLabels: 11 | app: color-api 12 | template: 13 | metadata: 14 | labels: 15 | app: color-api 16 | spec: 17 | containers: 18 | - name: color-api 19 | image: lmacademy/color-api:1.1.0 20 | ports: 21 | - containerPort: 80 22 | -------------------------------------------------------------------------------- /services/color-api-nodeport.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: color-api-nodeport 5 | labels: 6 | app: color-api 7 | spec: 8 | type: NodePort 9 | selector: 10 | app: color-api 11 | ports: 12 | - port: 80 13 | targetPort: 80 14 | nodePort: 30007 15 | -------------------------------------------------------------------------------- /services/google-extname.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: my-external-svc 5 | spec: 6 | type: ExternalName 7 | externalName: google.com -------------------------------------------------------------------------------- /services/traffic-generator.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: traffic-generator 5 | labels: 6 | app: traffic-generator 7 | spec: 8 | containers: 9 | - name: traffic-generator 10 | image: lmacademy/traffic-generator:1.0.0 11 | args: 12 | - 'color-api-clusterip/api' 13 | - '0.5' 14 | -------------------------------------------------------------------------------- /stateful-sets/pvs.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: ss-0 5 | spec: 6 | capacity: 7 | storage: 128Mi 8 | accessModes: 9 | - ReadWriteOnce 10 | storageClassName: local-storage 11 | local: 12 | path: /mnt/disks/ss-0 13 | nodeAffinity: 14 | required: 15 | nodeSelectorTerms: 16 | - matchExpressions: 17 | - key: kubernetes.io/hostname 18 | operator: In 19 | values: ['minikube'] 20 | --- 21 | apiVersion: v1 22 | kind: PersistentVolume 23 | metadata: 24 | name: ss-1 25 | spec: 26 | capacity: 27 | storage: 128Mi 28 | accessModes: 29 | - ReadWriteOnce 30 | storageClassName: local-storage 31 | local: 32 | path: /mnt/disks/ss-1 33 | nodeAffinity: 34 | required: 35 | nodeSelectorTerms: 36 | - matchExpressions: 37 | - key: kubernetes.io/hostname 38 | operator: In 39 | values: ['minikube'] 40 | --- 41 | apiVersion: v1 42 | kind: PersistentVolume 43 | metadata: 44 | name: ss-2 45 | spec: 46 | capacity: 47 | storage: 128Mi 48 | accessModes: 49 | - ReadWriteOnce 50 | storageClassName: local-storage 51 | local: 52 | path: /mnt/disks/ss-2 53 | nodeAffinity: 54 | required: 55 | nodeSelectorTerms: 56 | - matchExpressions: 57 | - key: kubernetes.io/hostname 58 | operator: In 59 | values: ['minikube'] -------------------------------------------------------------------------------- /stateful-sets/stateful-set.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: StatefulSet 3 | metadata: 4 | name: demo-ss 5 | spec: 6 | serviceName: busybox 7 | replicas: 2 8 | selector: 9 | matchLabels: 10 | app: busybox 11 | template: 12 | metadata: 13 | labels: 14 | app: busybox 15 | spec: 16 | containers: 17 | - name: busybox 18 | image: busybox:1.36.1 19 | command: 20 | - 'sh' 21 | - '-c' 22 | - 'sleep 3600' 23 | resources: 24 | limits: 25 | memory: '128Mi' 26 | cpu: '500m' 27 | volumeMounts: 28 | - name: local-volume 29 | mountPath: /mnt/local 30 | volumeClaimTemplates: 31 | - metadata: 32 | name: local-volume 33 | spec: 34 | accessModes: 35 | - ReadWriteOnce 36 | storageClassName: standard 37 | resources: 38 | requests: 39 | storage: 128Mi 40 | -------------------------------------------------------------------------------- /storage-persistence/dynamic.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: dynamic-pv-example 5 | spec: 6 | resources: 7 | requests: 8 | storage: 1Gi 9 | volumeMode: Filesystem 10 | storageClassName: standard 11 | accessModes: 12 | - ReadWriteOnce 13 | -------------------------------------------------------------------------------- /storage-persistence/empty-dir-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: empty-dir-demo 5 | labels: 6 | name: empty-dir-demo 7 | spec: 8 | containers: 9 | - name: empty-dir-writer 10 | image: busybox:1.36.1 11 | command: 12 | - 'sh' 13 | - '-c' 14 | - 'sleep 3600' 15 | resources: 16 | limits: 17 | memory: '128Mi' 18 | cpu: '500m' 19 | volumeMounts: 20 | - name: temporary-storage 21 | mountPath: /usr/share/temp 22 | - name: empty-dir-reader 23 | image: busybox:1.36.1 24 | command: 25 | - 'sh' 26 | - '-c' 27 | - 'sleep 3600' 28 | resources: 29 | limits: 30 | memory: '128Mi' 31 | cpu: '500m' 32 | volumeMounts: 33 | - name: temporary-storage 34 | mountPath: /temp 35 | readOnly: true 36 | volumes: 37 | - name: temporary-storage 38 | emptyDir: {} 39 | -------------------------------------------------------------------------------- /storage-persistence/local-vol-example.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: local-volume 5 | spec: 6 | capacity: 7 | storage: 1Gi 8 | volumeMode: Filesystem 9 | accessModes: 10 | - ReadWriteOnce 11 | persistentVolumeReclaimPolicy: Retain 12 | storageClassName: local-storage 13 | local: 14 | path: /mnt/disks/local1 15 | nodeAffinity: 16 | required: 17 | nodeSelectorTerms: 18 | - matchExpressions: 19 | - key: kubernetes.io/hostname 20 | operator: In 21 | values: ['minikube'] 22 | --- 23 | apiVersion: v1 24 | kind: PersistentVolumeClaim 25 | metadata: 26 | name: local-volume-claim 27 | spec: 28 | resources: 29 | requests: 30 | storage: 1Gi 31 | volumeMode: Filesystem 32 | accessModes: 33 | - ReadWriteOnce 34 | storageClassName: local-storage 35 | --- 36 | apiVersion: v1 37 | kind: Pod 38 | metadata: 39 | name: local-vol-pod 40 | labels: 41 | name: local-vol-pod 42 | spec: 43 | containers: 44 | - name: local-vol 45 | image: busybox:1.36.1 46 | command: 47 | - 'sh' 48 | - '-c' 49 | - 'sleep 3600' 50 | resources: 51 | limits: 52 | memory: '128Mi' 53 | cpu: '500m' 54 | volumeMounts: 55 | - name: local-volume 56 | mountPath: /mnt/local 57 | volumes: 58 | - name: local-volume 59 | persistentVolumeClaim: 60 | claimName: local-volume-claim 61 | --- 62 | apiVersion: v1 63 | kind: Pod 64 | metadata: 65 | name: local-vol-pod2 66 | labels: 67 | name: local-vol-pod2 68 | spec: 69 | containers: 70 | - name: local-vol 71 | image: busybox:1.36.1 72 | command: 73 | - 'sh' 74 | - '-c' 75 | - 'sleep 3600' 76 | resources: 77 | limits: 78 | memory: '128Mi' 79 | cpu: '500m' 80 | volumeMounts: 81 | - name: local-volume 82 | mountPath: /mnt/local2 83 | volumes: 84 | - name: local-volume 85 | persistentVolumeClaim: 86 | claimName: local-volume-claim 87 | --------------------------------------------------------------------------------