├── modules ├── 10. Orchestration and Clustering │ ├── app │ │ ├── app │ │ │ ├── __init__.py │ │ │ └── main.py │ │ ├── text.txt │ │ ├── requirements.txt │ │ ├── configmap.yaml │ │ ├── service.yaml │ │ ├── replicaset.yaml │ │ ├── docker-compose.yaml │ │ ├── .docker │ │ │ └── docker-compose.yaml │ │ ├── pod-volume.yaml │ │ ├── pod.yaml │ │ ├── pod-configmap.yaml │ │ ├── verify.sh │ │ ├── Dockerfile │ │ ├── deployment.yaml │ │ └── README.md │ ├── containers-workshop │ │ ├── finish.md │ │ ├── wait-init.sh │ │ ├── setup.sh │ │ ├── step1 │ │ │ ├── verify.sh │ │ │ └── text.md │ │ ├── step2 │ │ │ ├── verify.sh │ │ │ └── text.md │ │ ├── step6 │ │ │ ├── verify.sh │ │ │ └── text.md │ │ ├── step7 │ │ │ ├── verify.sh │ │ │ └── text.md │ │ ├── step3 │ │ │ ├── verify.sh │ │ │ └── text.md │ │ ├── step4 │ │ │ ├── verify.sh │ │ │ └── text.md │ │ ├── step5 │ │ │ ├── verify.sh │ │ │ └── text.md │ │ ├── index.json │ │ ├── intro.md │ │ └── README.md │ ├── Part1 │ │ ├── image │ │ │ └── content │ │ │ │ └── 1692933167567.png │ │ └── README.md │ ├── Part4 │ │ └── README.md │ ├── Part2 │ │ └── README.md │ ├── Part3 │ │ └── README.md │ ├── README.md │ └── Part5 │ │ └── README.md ├── 01. Linux Basics │ ├── A. Automation and Scripting Languages - Bash │ │ ├── linux-bash-workshop │ │ │ ├── setup.sh │ │ │ ├── finish.md │ │ │ ├── step1 │ │ │ │ ├── verify.sh │ │ │ │ └── text.md │ │ │ ├── step2 │ │ │ │ ├── verify.sh │ │ │ │ └── text.md │ │ │ ├── step3 │ │ │ │ ├── verify.sh │ │ │ │ └── text.md │ │ │ ├── step4 │ │ │ │ ├── verify.sh │ │ │ │ └── text.md │ │ │ ├── step8 │ │ │ │ ├── text.md │ │ │ │ └── verify.sh │ │ │ ├── step7 │ │ │ │ ├── verify.sh │ │ │ │ └── text.md │ │ │ ├── intro.md │ │ │ ├── step9 │ │ │ │ ├── text.md │ │ │ │ └── verify.sh │ │ │ ├── step10 │ │ │ │ ├── text.md │ │ │ │ └── verify.sh │ │ │ ├── step5 │ │ │ │ ├── verify.sh │ │ │ │ └── text.md │ │ │ ├── step6 │ │ │ │ ├── text.md │ │ │ │ └── verify.sh │ │ │ └── index.json │ │ ├── README.md │ │ ├── structure.json │ │ └── linux-cli-workshop │ │ │ ├── finish.md │ │ │ ├── step5 │ │ │ ├── verify.sh │ │ │ └── text.md │ │ │ ├── step6 │ │ │ ├── verify.sh │ │ │ └── text.md │ │ │ ├── step7 │ │ │ ├── verify.sh │ │ │ └── text.md │ │ │ ├── step10 │ │ │ ├── verify.sh │ │ │ └── text.md │ │ │ ├── step11 │ │ │ ├── verify.sh │ │ │ └── text.md │ │ │ ├── step1 │ │ │ ├── verify.sh │ │ │ └── text.md │ │ │ ├── step4 │ │ │ ├── verify.sh │ │ │ └── text.md │ │ │ ├── step8 │ │ │ ├── verify.sh │ │ │ └── text.md │ │ │ ├── step9 │ │ │ ├── verify.sh │ │ │ └── text.md │ │ │ ├── step3 │ │ │ ├── verify.sh │ │ │ └── text.md │ │ │ ├── step2 │ │ │ ├── verify.sh │ │ │ └── text.md │ │ │ ├── intro.md │ │ │ └── index.json │ ├── workshop.md │ └── linux.md ├── 03. Git │ ├── SCM_RSSchool.pdf │ ├── gh-cross-check-guide-imgs │ │ ├── step-02.png │ │ ├── step-03.png │ │ ├── step-04.png │ │ ├── step-05.png │ │ ├── step-07.png │ │ ├── step-08.png │ │ ├── step-09.png │ │ ├── step-10.gif │ │ ├── step-11.gif │ │ ├── step-06_01.png │ │ └── step-06_02.png │ ├── README.md │ ├── gh-cross-check-guide.md │ └── cv-project.md ├── 05. CICD │ ├── CICD_RSSchool.pdf │ ├── README.md │ └── workshop.md ├── 02. Networking Basics │ ├── image.png │ ├── Networking Basics.pptx │ ├── 02.1 Networking Workshop │ │ ├── Networking Workshop.pptx │ │ └── README.md │ └── README.md ├── 09. Containers vs VMs │ ├── img │ │ ├── VMs.png │ │ └── Containers.png │ ├── README.md │ ├── 9.2. Use cases │ │ └── README.md │ ├── 9.0. The basics │ │ └── README.md │ └── 9.1. VMs and Containers │ │ └── README.md ├── 08. Configuration Management │ ├── cm.png │ ├── ansible-workshop │ │ ├── README.md │ │ ├── wait-init.sh │ │ ├── finish.md │ │ ├── step1 │ │ │ ├── verify.sh │ │ │ └── text.md │ │ ├── step5 │ │ │ ├── verify.sh │ │ │ └── text.md │ │ ├── step3 │ │ │ ├── verify.sh │ │ │ └── text.md │ │ ├── step2 │ │ │ ├── verify.sh │ │ │ └── text.md │ │ ├── setup.sh │ │ ├── step4 │ │ │ ├── verify.sh │ │ │ └── text.md │ │ ├── intro.md │ │ └── index.json │ └── workshop.md ├── 07. Infrastructure as Code (IaC) │ ├── Infrastructur as Code.pptx │ └── README.md ├── 00. DevOps Intro │ └── README.md ├── structure.json ├── 04. Cloud │ ├── Cloud Practitioner Essentials │ │ └── README.md │ ├── Cloud Practitioner Quest │ │ └── README.md │ └── AWS Fundamentals │ │ └── README.md ├── README.md └── 12. Final Project │ └── README.md ├── structure.json ├── LICENSE └── README.md /modules/10. Orchestration and Clustering/app/app/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/app/text.txt: -------------------------------------------------------------------------------- 1 | GREAT, YOU DID IT! -------------------------------------------------------------------------------- /structure.json: -------------------------------------------------------------------------------- 1 | { 2 | "items": [ 3 | { "path": "modules" } 4 | ] 5 | } -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/app/requirements.txt: -------------------------------------------------------------------------------- 1 | fastapi 2 | uvicorn 3 | -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-bash-workshop/setup.sh: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /modules/03. Git/SCM_RSSchool.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rolling-scopes-school/devops/HEAD/modules/03. Git/SCM_RSSchool.pdf -------------------------------------------------------------------------------- /modules/05. CICD/CICD_RSSchool.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rolling-scopes-school/devops/HEAD/modules/05. CICD/CICD_RSSchool.pdf -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/app/configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ConfigMap 3 | metadata: 4 | name: my-config -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/app/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: my-firts-loadbalancer -------------------------------------------------------------------------------- /modules/02. Networking Basics/image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rolling-scopes-school/devops/HEAD/modules/02. Networking Basics/image.png -------------------------------------------------------------------------------- /modules/09. Containers vs VMs/img/VMs.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rolling-scopes-school/devops/HEAD/modules/09. Containers vs VMs/img/VMs.png -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/app/replicaset.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: ReplicaSet 3 | metadata: 4 | name: fastapi-replicaset -------------------------------------------------------------------------------- /modules/08. Configuration Management/cm.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rolling-scopes-school/devops/HEAD/modules/08. Configuration Management/cm.png -------------------------------------------------------------------------------- /modules/09. Containers vs VMs/img/Containers.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rolling-scopes-school/devops/HEAD/modules/09. Containers vs VMs/img/Containers.png -------------------------------------------------------------------------------- /modules/02. Networking Basics/Networking Basics.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rolling-scopes-school/devops/HEAD/modules/02. Networking Basics/Networking Basics.pptx -------------------------------------------------------------------------------- /modules/03. Git/gh-cross-check-guide-imgs/step-02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rolling-scopes-school/devops/HEAD/modules/03. Git/gh-cross-check-guide-imgs/step-02.png -------------------------------------------------------------------------------- /modules/03. Git/gh-cross-check-guide-imgs/step-03.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rolling-scopes-school/devops/HEAD/modules/03. Git/gh-cross-check-guide-imgs/step-03.png -------------------------------------------------------------------------------- /modules/03. Git/gh-cross-check-guide-imgs/step-04.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rolling-scopes-school/devops/HEAD/modules/03. Git/gh-cross-check-guide-imgs/step-04.png -------------------------------------------------------------------------------- /modules/03. Git/gh-cross-check-guide-imgs/step-05.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rolling-scopes-school/devops/HEAD/modules/03. Git/gh-cross-check-guide-imgs/step-05.png -------------------------------------------------------------------------------- /modules/03. Git/gh-cross-check-guide-imgs/step-07.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rolling-scopes-school/devops/HEAD/modules/03. Git/gh-cross-check-guide-imgs/step-07.png -------------------------------------------------------------------------------- /modules/03. Git/gh-cross-check-guide-imgs/step-08.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rolling-scopes-school/devops/HEAD/modules/03. Git/gh-cross-check-guide-imgs/step-08.png -------------------------------------------------------------------------------- /modules/03. Git/gh-cross-check-guide-imgs/step-09.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rolling-scopes-school/devops/HEAD/modules/03. Git/gh-cross-check-guide-imgs/step-09.png -------------------------------------------------------------------------------- /modules/03. Git/gh-cross-check-guide-imgs/step-10.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rolling-scopes-school/devops/HEAD/modules/03. Git/gh-cross-check-guide-imgs/step-10.gif -------------------------------------------------------------------------------- /modules/03. Git/gh-cross-check-guide-imgs/step-11.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rolling-scopes-school/devops/HEAD/modules/03. Git/gh-cross-check-guide-imgs/step-11.gif -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/README.md: -------------------------------------------------------------------------------- 1 | # KillerCoda challenges 2 | 3 | - Bash scripting challenges 4 | - CLI commands challenges 5 | -------------------------------------------------------------------------------- /modules/03. Git/gh-cross-check-guide-imgs/step-06_01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rolling-scopes-school/devops/HEAD/modules/03. Git/gh-cross-check-guide-imgs/step-06_01.png -------------------------------------------------------------------------------- /modules/03. Git/gh-cross-check-guide-imgs/step-06_02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rolling-scopes-school/devops/HEAD/modules/03. Git/gh-cross-check-guide-imgs/step-06_02.png -------------------------------------------------------------------------------- /modules/08. Configuration Management/ansible-workshop/README.md: -------------------------------------------------------------------------------- 1 | # RSSchool Ansible Workshop 2 | ### [Workshop Link](https://killercoda.com/rsschool/course/modules/ansible-workshop) -------------------------------------------------------------------------------- /modules/07. Infrastructure as Code (IaC)/Infrastructur as Code.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rolling-scopes-school/devops/HEAD/modules/07. Infrastructure as Code (IaC)/Infrastructur as Code.pptx -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/structure.json: -------------------------------------------------------------------------------- 1 | { 2 | "items": [ 3 | { "path": "linux-bash-workshop" }, 4 | { "path": "linux-cli-workshop"} 5 | ] 6 | } -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/containers-workshop/finish.md: -------------------------------------------------------------------------------- 1 | ### Thanks for participating in this Workshop! 2 | 3 | You know now how to deal with Containers and Kubernetes orchestration. 4 | -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-bash-workshop/finish.md: -------------------------------------------------------------------------------- 1 | ## Damn, you have done, warrior! 2 | 3 | Thank you for being patient. I knew you were smart enough to complete it. -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-cli-workshop/finish.md: -------------------------------------------------------------------------------- 1 | ## Damn, you have done, warrior! 2 | 3 | Thank you for being patient. I knew you were smart enough to complete it. 4 | -------------------------------------------------------------------------------- /modules/02. Networking Basics/02.1 Networking Workshop/Networking Workshop.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rolling-scopes-school/devops/HEAD/modules/02. Networking Basics/02.1 Networking Workshop/Networking Workshop.pptx -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/Part1/image/content/1692933167567.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/rolling-scopes-school/devops/HEAD/modules/10. Orchestration and Clustering/Part1/image/content/1692933167567.png -------------------------------------------------------------------------------- /modules/08. Configuration Management/ansible-workshop/wait-init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # waits for background init to finish 3 | 4 | echo "Installing scenario..." 5 | while [ ! -f /tmp/finished ]; do sleep 1; done 6 | echo DONE -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/containers-workshop/wait-init.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # waits for background init to finish 3 | 4 | echo "Installing scenario..." 5 | while [ ! -f /tmp/finished ]; do sleep 1; done 6 | echo DONE -------------------------------------------------------------------------------- /modules/08. Configuration Management/ansible-workshop/finish.md: -------------------------------------------------------------------------------- 1 | ### Thanks for participating in this Workshop! 2 | 3 | You solved this workshop and you were able to deploy and configure WordPress with a LAMP stack, thanks for participating! -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-bash-workshop/step1/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ $(bash /scripts/hello.sh) != "Hello, World!" || $(bash /scripts/numbers.sh) != "1 is odd!" ]]; 4 | then exit 1 5 | fi -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/app/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | # Write your docker compose here 2 | api: 3 | image: myfirstcontainer:latest 4 | container_name: fastapi-application 5 | environment: 6 | PORT: 8000 7 | ports: 8 | - '8000:8000' 9 | restart: "no" 10 | -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-cli-workshop/step5/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # check all proper files exist 4 | if ! ls /task1/sun_star | sha256sum | grep b288d79f8f1a4961ae038a63840b4cc1dab32b1c4487e9c35636c42e990d5e95; then 5 | exit 1 6 | fi 7 | 8 | -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-cli-workshop/step6/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # check all files have right permissions 4 | if ! stat --format=%n%A /task1/permissions/* | sort | sha256sum | grep 115ce2706c21f0f25a5965f8703741bde11c17e1aad577d5b041cee006d0a659; then 5 | exit 1 6 | fi -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-cli-workshop/step7/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # check all files contain right content 4 | if ! cat /task1/canto_1/{of_lines,head_32,tail_32} | sha256sum | grep 4fb72915dbf11cc25888f696d98f49f4eff3b574101d2dba5f6d958cff53d360; then 5 | exit 1 6 | fi -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-cli-workshop/step10/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # check all files contain right content 4 | if ! cat /task1/it_will_be_tough/{emails,mac_addresses,website}_result | sha256sum | grep 5e0b633cd61fe4c44171dff8addec74fd61829ee5e9bba0f794a125c87c4e6ac; then 5 | exit 1 6 | fi -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-cli-workshop/step11/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # check all files contain right content 4 | if ! cat /task1/it_will_be_tough/{emails,mac_addresses,website}_result | sha256sum | grep 5e0b633cd61fe4c44171dff8addec74fd61829ee5e9bba0f794a125c87c4e6ac; then 5 | exit 1 6 | fi -------------------------------------------------------------------------------- /modules/08. Configuration Management/ansible-workshop/step1/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | LOGFILE=/root/ks/step1-verify.log 4 | set -e # exit once any command fails 5 | 6 | { 7 | date 8 | docker ps | grep db1 9 | docker ps | grep web 10 | 11 | } >> ${LOGFILE} 2>&1 12 | 13 | echo "done" # let Validator know success -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/containers-workshop/setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | echo starting... 4 | git clone https://github.com/rolling-scopes-school/devops.git 5 | #cd devops/modules/10. Containers/app 6 | cd devops/ 7 | git checkout feature/containers 8 | cd modules/10. Containers/app 9 | mkdir logs 10 | echo done > /tmp/finished 11 | -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-cli-workshop/step1/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | ls /task1/test_dir_1 > /expected/step1 4 | ls /task1/test_dir_2 >> /expected/step1 5 | ls /task1/test_dir_3 >> /expected/step1 6 | 7 | if ! sha256sum /expected/step1 | grep 237eef37d83d9063571fd81d7803c1f9cfa1346524222722216d7faf9d1d9fea; then 8 | exit 1 9 | fi -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-cli-workshop/step4/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # check all proper files exist 4 | if ! cat /task1/metamorphosis/kill_bill /task1/metamorphosis/lord_of_the_rings /task1/metamorphosis/the_matrix | sha256sum | grep a7224d98504a416aa80fa08e62e071a8e802d4279cc8629e3210547294b4762b; then 5 | exit 1 6 | fi 7 | 8 | -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/app/.docker/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | services: 2 | api: 3 | build: 4 | context: . 5 | target: dev-envs 6 | container_name: fastapi-application 7 | environment: 8 | PORT: 8000 9 | ports: 10 | - '8000:8000' 11 | volumes: 12 | - /var/run/docker.sock:/var/run/docker.sock 13 | restart: "no" 14 | -------------------------------------------------------------------------------- /modules/00. DevOps Intro/README.md: -------------------------------------------------------------------------------- 1 | - **Goal:** Understand the fundamental principles of DevOps, recognize the benefits and key practices of DevOps, gain insight into continuous integration, continuous delivery, and deployment pipelines, comprehend the importance of collaboration, automation, and monitoring, and apply knowledge to improve software development processes and ensure high-quality products. 2 | -------------------------------------------------------------------------------- /modules/05. CICD/README.md: -------------------------------------------------------------------------------- 1 | # Continuous Integration, Delivery and Deployment. 2 | 3 | In this part of the course, you going to explore CI/CD, whre you going to learn how to understand the flow on how the applications may be build, deliver and deploy from the SCM engine to a productive environment. 4 | 5 | In this lesson you will find the pdf presentation and the workshop. 6 | 7 | Thanks and good luck! -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/app/pod-volume.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: fastapi-pod-volume 5 | spec: 6 | containers: 7 | - name: api 8 | image: localhost:5000/myfirstcontainer:latest 9 | imagePullPolicy: IfNotPresent 10 | ports: 11 | - containerPort: 8000 12 | env: 13 | - name: PORT 14 | value: "8000" -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/app/pod.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: fastapi-pod 5 | spec: 6 | containers: 7 | - name: api 8 | image: localhost:5000/myfirstcontainer:latest 9 | imagePullPolicy: IfNotPresent 10 | ports: 11 | - containerPort: 8000 12 | env: 13 | - name: PORT 14 | value: "8000" -------------------------------------------------------------------------------- /modules/08. Configuration Management/ansible-workshop/step5/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ansible-inventory --graph -i dynamic_docker.yaml | grep web 3 | 4 | LOGFILE=/root/ks/step2-verify.log 5 | set -e # exit once any command fails 6 | 7 | { 8 | date 9 | curl -I http://127.0.0.1/wp-admin/install.php | grep 200 10 | 11 | } >> ${LOGFILE} 2>&1 12 | 13 | echo "done" # let Validator know success -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/app/pod-configmap.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Pod 3 | metadata: 4 | name: fastapi-pod-configmap 5 | spec: 6 | containers: 7 | - name: api 8 | image: localhost:5000/myfirstcontainer:latest 9 | imagePullPolicy: IfNotPresent 10 | ports: 11 | - containerPort: 8000 12 | envFrom: 13 | - configMapRef: 14 | name: my-config -------------------------------------------------------------------------------- /modules/structure.json: -------------------------------------------------------------------------------- 1 | { 2 | "items": [ 3 | { "path": "01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-bash-workshop" }, 4 | { "path": "01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-cli-workshop" }, 5 | { "path": "08. Configuration Management/ansible-workshop" }, 6 | { "path": "10. Orchestration and Clustering/containers-workshop" } 7 | ] 8 | } 9 | -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-bash-workshop/step1/text.md: -------------------------------------------------------------------------------- 1 | # Helpful commands: 2 | 3 | > `#!/bin/bash` - so-called [shebang](https://en.wikipedia.org/wiki/Shebang_(Unix)) 4 | 5 | # Task description: 6 | 7 | 1. Create a script in /scripts/hello.sh, which prints 'Hello, World!' into the console. 8 | 2. Create a script in /scripts/numbers.sh, which prints '1 is odd!' into the console. 9 | -------------------------------------------------------------------------------- /modules/08. Configuration Management/ansible-workshop/step3/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ansible-inventory --graph -i dynamic_docker.yaml | grep web 3 | 4 | LOGFILE=/root/ks/step2-verify.log 5 | set -e # exit once any command fails 6 | 7 | { 8 | date 9 | cat /root/ansible-workshop/roles/prerequisites/tasks/main.yaml | grep Install 10 | 11 | } >> ${LOGFILE} 2>&1 12 | 13 | echo "done" # let Validator know success -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/containers-workshop/step1/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # exit once any command fails 4 | 5 | { 6 | http_status=$(curl -s -o /dev/null -w "%{http_code}" http://localhost:80) 7 | 8 | # Check if the HTTP status code is 200 9 | if [ "$http_status" -eq 200 ]; then 10 | echo "done" # let Validator know success 11 | else 12 | exit 1 13 | fi 14 | 15 | } 16 | -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/containers-workshop/step2/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # exit once any command fails 4 | 5 | { 6 | http_status=$(curl -s -o /dev/null -w "%{http_code}" http://localhost:8000) 7 | 8 | # Check if the HTTP status code is 200 9 | if [ "$http_status" -eq 200 ]; then 10 | echo "done" # let Validator know success 11 | else 12 | exit 1 13 | fi 14 | 15 | } 16 | -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/containers-workshop/step6/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # exit once any command fails 4 | 5 | { 6 | http_status=$(curl -s -o /dev/null -w "%{http_code}" http://localhost:2850/file) 7 | 8 | # Check if the HTTP status code is 200 9 | if [ "$http_status" -eq 200 ]; then 10 | echo "done" # let Validator know success 11 | else 12 | exit 1 13 | fi 14 | 15 | } -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/containers-workshop/step7/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # exit once any command fails 4 | 5 | { 6 | http_status=$(curl -s -o /dev/null -w "%{http_code}" http://localhost:9000/file) 7 | 8 | # Check if the HTTP status code is 200 9 | if [ "$http_status" -eq 200 ]; then 10 | echo "done" # let Validator know success 11 | else 12 | exit 1 13 | fi 14 | 15 | } -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/containers-workshop/step3/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # exit once any command fails 4 | 5 | { 6 | CURRENT_REPLICAS=$(kubectl get replicaset fastapi-replicaset -o=jsonpath='{.status.availableReplicas}') 7 | 8 | # Check if the current number of replicas matches the expected number 9 | if [[ "$CURRENT_REPLICAS" -eq 2 ]]; then 10 | echo "done" 11 | else 12 | exit 1 13 | fi 14 | 15 | } -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-cli-workshop/step8/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # check all files contain right content 4 | if ! cat /task1/canto_1/{of_lines,head_32,tail_32} | sha256sum | grep 4fb72915dbf11cc25888f696d98f49f4eff3b574101d2dba5f6d958cff53d360; then 5 | exit 1 6 | fi 7 | 8 | if ! sha256sum /task1/words | grep 9c267737dbbeae862b055832e29c0bc5d7609ed1a2cfff22f775b5cea0ead43d; then 9 | exit 1 10 | fi -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-cli-workshop/step9/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # check all files contain right content 4 | if ! cat /task1/exec/{perfect,imperfect}.log | sha256sum | grep 39aa1b7a745bd0e1c3a2fecf0555343ef86363856b23f352cb78390c9a8cd8a6; then 5 | exit 1 6 | fi 7 | 8 | if ! sha256sum /task1/exec/errors.log | grep 1631c5e1499f61e068a2bfacddad94d0f1b607496c9010e8aa1b5adf02e20d1d; then 9 | exit 1 10 | fi -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/app/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # exit once any command fails 4 | 5 | { 6 | CURRENT_REPLICAS=$(kubectl get replicaset fastapi-replicaset -o=jsonpath='{.status.availableReplicas}') 7 | 8 | # Check if the current number of replicas matches the expected number 9 | if [[ "$CURRENT_REPLICAS" -eq 2 ]]; then 10 | echo "done" 11 | exit 0 12 | else 13 | exit 1 14 | fi 15 | 16 | } 17 | 18 | -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-cli-workshop/step3/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # check all proper files exist 4 | if ! ls /task1/secrets | sha256sum | grep 767a275f1a828e6e0b4f908edd6eaf37a4276e82e903a60613809e23ff927ff3; then 5 | exit 1 6 | fi 7 | 8 | # check copied content is the same 9 | if ! sha256sum /task1/super_secret_data | grep d54de3ffd6d72716e85ceeda4562de1f56b4300b55ceba614225915003fb82c0; then 10 | exit 1 11 | fi 12 | -------------------------------------------------------------------------------- /modules/08. Configuration Management/ansible-workshop/step2/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ansible-inventory --graph -i dynamic_docker.yaml | grep web 3 | 4 | LOGFILE=/root/ks/step2-verify.log 5 | set -e # exit once any command fails 6 | 7 | { 8 | date 9 | ansible-inventory --graph -i dynamic_docker.yaml | grep "@web" 10 | ansible-inventory --graph -i dynamic_docker.yaml | grep "@db" 11 | 12 | } >> ${LOGFILE} 2>&1 13 | 14 | echo "done" # let Validator know success -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-cli-workshop/step2/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | EXPECTED_DIR=$(ls /task1) 4 | # check only divine_comedy dir exists 5 | if [ "$EXPECTED_DIR" != "divine_comedy" ]; then 6 | exit 1 7 | fi 8 | 9 | cat /task1/divine_comedy/chapter_{1..3} > /expected/step2 10 | 11 | if ! sha256sum /expected/step2 | grep bac848fb87099d3a699f6577df5bf569c8539f1423d152796873d2b7f35212fa; then 12 | echo "not exist" 13 | exit 1 14 | fi -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-bash-workshop/step2/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ $(bash /scripts/hello.sh JoHn) != "Hello, JoHn!" 4 | || $(bash /scripts/hello.sh andaRiel) != "Hello, andaRiel!" 5 | || $(bash /scripts/hello.sh) != "Hello, World!" 6 | ]]; 7 | then exit 1 8 | fi 9 | 10 | if [[ $(bash /scripts/numbers.sh 42) != "42 is even!" 11 | || $(bash /scripts/numbers.sh 69) != "69 is odd!" 12 | ]]; 13 | then exit 1 14 | fi -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/containers-workshop/step7/text.md: -------------------------------------------------------------------------------- 1 | 2 | ## Part 5: Advanced Kubernetes Concepts 3 | 4 | Based on the content that you can [find in the repo](https://github.com/rolling-scopes-school/devops/modules/10. Containers/Part5), complete the following task to create your first Helm Chart 5 | 6 | 1. **It's time to use all you have learnt in this jurney. Your last task is to create a Helm Chart to expose the application in port 9000, this Helm chart need to include a volume as done in previous task** 7 | -------------------------------------------------------------------------------- /modules/04. Cloud/Cloud Practitioner Essentials/README.md: -------------------------------------------------------------------------------- 1 | ## AWS Cloud Practitioner Essentials - 6h. 2 | This course is for individuals who seek an overall understanding of the Amazon Web Services (AWS) Cloud, independent of specific technical roles. You will learn about AWS Cloud concepts, AWS services, security, architecture, pricing, and support to build your AWS Cloud knowledge. This course also helps you prepare for the AWS Certified Cloud Practitioner exam. https://explore.skillbuilder.aws/learn/course/internal/view/elearning/134/aws-cloud-practitioner-essentials 3 | -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-bash-workshop/step3/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ $(bash /scripts/hello.sh Helen) != "Hello, Helen! This username has 5 letters." 4 | || $(bash /scripts/hello.sh Baal) != "Hello, Baal! This username has 4 letters." 5 | || $(bash /scripts/hello.sh) != "Hello, World!" 6 | ]]; 7 | then exit 1 8 | fi 9 | 10 | if [[ $(bash /scripts/numbers.sh 42 15) != "Maximum is 42, minimum is 15" 11 | || $(bash /scripts/numbers.sh 5 13) != "Maximum is 13, minimum is 5" 12 | ]]; 13 | then exit 1 14 | fi -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/containers-workshop/step6/text.md: -------------------------------------------------------------------------------- 1 | ## Part 5: Advanced Kubernetes Concepts 2 | 3 | Based on the content that you can [find in the repo](https://github.com/rolling-scopes-school/devops/modules/10. Containers/Part5), follow the following steps to configure the application using volumes: 4 | 5 | 1. **Fill pod-volume.yaml to add a volume to expose the current directory. Then using kubectl create the pod and ensure it's running. Then expose your pod using (Pod local path must be /app):** 6 | 7 | `kubectl port-forward fastapi-pod-volume 2850:8000`{{exec}} 8 | -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/containers-workshop/step4/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # exit once any command fails 4 | 5 | { 6 | EXTERNAL_IP=$(kubectl get svc -o jsonpath='{.items[1].spec.clusterIPs[0]}') 7 | 8 | # Check if the current number of replicas matches the expected number 9 | http_status=$(curl -s -o /dev/null -w "%{http_code}" http://$EXTERNAL_IP:3500) 10 | 11 | # Check if the HTTP status code is 200 12 | if [ "$http_status" -eq 200 ]; then 13 | echo "done" # let Validator know success 14 | else 15 | exit 1 16 | fi 17 | 18 | } -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/containers-workshop/step5/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e # exit once any command fails 4 | 5 | { 6 | EXTERNAL_IP=$(kubectl get svc -o jsonpath='{.items[1].spec.clusterIPs[1]}') 7 | 8 | # Check if the current number of replicas matches the expected number 9 | http_status=$(curl -s -o /dev/null -w "%{http_code}" http://$EXTERNAL_IP:4000) 10 | 11 | # Check if the HTTP status code is 200 12 | if [ "$http_status" -eq 200 ]; then 13 | echo "done" # let Validator know success 14 | else 15 | exit 1 16 | fi 17 | 18 | } -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-bash-workshop/step4/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ $(bash /scripts/hello.sh mePhistO) != "Hello, Mephisto! This username has 8 letters." 4 | || $(bash /scripts/hello.sh bAAl) != "Hello, Baal! This username has 4 letters." 5 | || $(bash /scripts/hello.sh) != "Hello, World!" 6 | ]]; 7 | then exit 1 8 | fi 9 | 10 | if [[ $(bash /scripts/numbers.sh 15 42) != "Sum is 57, product is 630, average is 0.36" 11 | || $(bash /scripts/numbers.sh 5 13) != "Sum is 18, product is 65, average is 0.38" 12 | ]]; 13 | then exit 1 14 | fi -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/app/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax = docker/dockerfile:1.4 2 | 3 | FROM tiangolo/uvicorn-gunicorn-fastapi:python3.9-slim AS builder 4 | 5 | WORKDIR /app 6 | 7 | COPY requirements.txt ./ 8 | RUN pip install -r requirements.txt 9 | 10 | COPY ./app ./app 11 | 12 | FROM builder as dev-envs 13 | 14 | RUN < `#!/bin/bash` - so-called [shebang](https://en.wikipedia.org/wiki/Shebang_(Unix)) 4 | 5 | # Task description: 6 | 7 | 1. Modify /scripts/hello.sh: 8 | 1.1 It should print a 'name' argument if the 'name' is provided, f.e. `hello.sh John_Doe` will print "Hello, John_Doe!" 9 | 1.2 Otherwise, It still prints 'Hello, World!' 10 | 2. Modify /scripts/number.sh: 11 | 2.1 It should take a number argument and print 'N is odd!' or 'N is even!', where N is provided number. 12 | 2.2 F.e. `numbers.sh 5` should print '5 is odd!' 13 | 2.3 Domain: -10000 <= N <= 1000 14 | 15 | -------------------------------------------------------------------------------- /modules/08. Configuration Management/ansible-workshop/setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | echo starting... 4 | 5 | pip install ansible 6 | 7 | cat >/root/docker-compose.yml < /tmp/finished 35 | -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/containers-workshop/step5/text.md: -------------------------------------------------------------------------------- 1 | ## Part 5: Advanced Kubernetes Concepts 2 | 3 | Based on the content that you can [find in the repo](https://github.com/rolling-scopes-school/devops/modules/10. Containers/Part5), follow the following steps to configure the application using configmaps: 4 | 5 | **Fill configmap.yaml to add a configmap with a environment variable CUSTOM_MESSAGE with today's date. Then using kubectl create the configmap and the pod and ensure it's running. Then create a custom loadbalancer taking as example service.yaml to expose kubernetes port 8000 through machine port 4000.** 6 | 7 | > To run your configuration use pod-configmap.yaml and the steps you learned in previous steps. 8 | -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-bash-workshop/step3/text.md: -------------------------------------------------------------------------------- 1 | # Helpful commands: 2 | 3 | > `#!/bin/bash` - so-called [shebang](https://en.wikipedia.org/wiki/Shebang_(Unix)) 4 | 5 | # Task description: 6 | 7 | 1. Modify /scripts/hello.sh: 8 | 1.1 If name exists, it should also print 'This username has N letters.', f.e. `hello.sh John_Doe` will print "Hello, John_Doe! This username has 8 letters." 9 | 1.2 Otherwise, It still prints 'Hello, World!' 10 | 2. Modify /scripts/numbers.sh: 11 | 2.1 It should take two numbers, A and B 12 | 2.2 It should print max and min argument, f.e. `numbers.sh 42 15` should print 'Maximum is 42, minimum is 15' 13 | 2.3 Domain: -10000 <= A <= 10000, -10000 <= B <= 10000 -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-bash-workshop/step4/text.md: -------------------------------------------------------------------------------- 1 | # Helpful commands: 2 | 3 | > `#!/bin/bash` - so-called [shebang](https://en.wikipedia.org/wiki/Shebang_(Unix)) 4 | 5 | # Task description: 6 | 7 | 1. Modify /scripts/hello.sh: 8 | 1.1 If name exists, it should be normalized and capitalised, f.e. `hello.sh geORGe` will print "Hello, George! This username has 6 letters." 9 | 1.2 Otherwise, It still prints 'Hello, World!' 10 | 2. Modify /scripts/numbers.sh: 11 | 2.1 It should take two numbers, A and B 12 | 2.2 It should print their sum, product and average (with scale = 2), f.e. `numbers.sh 12 5` should print 'Sum is 17, product is 60, average is 2.40' 13 | 2.3 Domain: -10000 <= A <= 10000, -10000 <= B <= 10000 -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-bash-workshop/step8/text.md: -------------------------------------------------------------------------------- 1 | # Helpful commands: 2 | 3 | > `#!/bin/bash` - so-called [shebang](https://en.wikipedia.org/wiki/Shebang_(Unix)) 4 | 5 | # Task description: 6 | 7 | 1. Create /scripts/address.sh. It should take an argument and print a message if it is: 8 | 1.1 IPv4 address -> prints "IPv4" 9 | 1.2 IPv6 address -> prints "IPv6" 10 | 1.3 MAC address -> prints "MAC" 11 | 1.4 If it doesn't match, prints "Unknown" 12 | 2. Modify /scripts/numbers.sh: 13 | 2.1 It should take a number A (more than 0 and below 100) 14 | 2.2 It should print number by words, f.e. `numbers.sh 15` should print 'fifteen' whereas `numbers.sh 67` should print 'sixty seven' 15 | 2.3 Domain: 0 < A < 100 -------------------------------------------------------------------------------- /modules/08. Configuration Management/ansible-workshop/step1/text.md: -------------------------------------------------------------------------------- 1 | ***NOTE***: Installing the scenario might take some minutes, please wait. Also by default we are going to use `nano` command-line editor. Some tasks will open this editor automatically, you should copy the code, paste it in the editor and save it with `CTRL+S` and `CTRL+X` 2 | 3 | First check that Ansible is installed validating the version and validate that Docker plugin is also installed. 4 | 5 | Check Ansible version 6 | 7 | ```sh 8 | ansible --version 9 | ```{{exec}} 10 | 11 | Check Docker plugin 12 | 13 | ```sh 14 | ansible-galaxy collection list | grep -i docker 15 | ```{{exec}} 16 | 17 | Then, check the running Docker containers and there should be 2 (db1, web1). 18 | 19 | ```sh 20 | docker ps 21 | ```{{exec}} -------------------------------------------------------------------------------- /modules/08. Configuration Management/ansible-workshop/step4/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ansible-inventory --graph -i dynamic_docker.yaml | grep web 3 | 4 | LOGFILE=/root/ks/step2-verify.log 5 | set -e # exit once any command fails 6 | 7 | { 8 | date 9 | cat /root/ansible-workshop/roles/apache/templates/apache.conf.j2 | grep "{{ http_host }}" 10 | cat /root/ansible-workshop/roles/apache/handlers/main.yaml | grep Apache 11 | cat /root/ansible-workshop/roles/apache/tasks/main.yaml | grep Apache 12 | cat /root/ansible-workshop/roles/mysql/tasks/main.yaml | grep MySQL 13 | cat /root/ansible-workshop/roles/wordpress/templates/wp-config.php.j2 | grep WordPress 14 | cat /root/ansible-workshop/roles/wordpress/tasks/main.yaml | grep WordPress 15 | 16 | } >> ${LOGFILE} 2>&1 17 | 18 | echo "done" # let Validator know success -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-bash-workshop/step7/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ $(bash /scripts/hello.sh moN3keY_Shin-E) != "Hello, Monkeyshine! This username has 11 letters: 5 vowels and 6 consonants. Nothing special!" 4 | || $(bash /scripts/hello.sh cA4^lliga=n) != "Hello, Calligan! This username has 8 letters: 3 vowels and 5 consonants. Nothing special!" 5 | || $(bash /scripts/hello.sh b4e4e) != "Hello, Bee! This username has 3 letters: 2 vowels and 1 consonants. The rarest name!" 6 | || $(bash /scripts/hello.sh) != "Hello, World!" 7 | ]]; 8 | then exit 1 9 | fi 10 | 11 | if [[ $(bash /scripts/numbers.sh 1 6 9) != "X1=X2=-3.00" 12 | || $(bash /scripts/numbers.sh 1 6 5) != "X1=-1.00, X2=-5.00" 13 | || $(bash /scripts/numbers.sh 1 6 10) != "There are no roots." 14 | ]]; 15 | then exit 1 16 | fi -------------------------------------------------------------------------------- /modules/02. Networking Basics/02.1 Networking Workshop/README.md: -------------------------------------------------------------------------------- 1 | # Networking Workshop: Subnetting Fundamentals 2 | 3 | With knowledge in both DevOps and cloud technologies, an understanding of network subnetting is essential. 4 | 5 | ## What you will study in this topic: 6 | 7 | ✅ Classful Subnetting 8 | ✅ Classless Subnetting 9 | ✅ Classful Subnetting Example 10 | ✅ Classless Subnetting Example 11 | ✅ Visualizing subnets using the Box Method 12 | ✅ Tasks 1-4 13 | ✅ Q&A. 14 | 15 | 16 | ### Useful links 17 | - [A Beginners Guide to Subnetting ](https://www.packetcoders.io/a-beginners-guide-to-subnetting/) 18 | - [What is Subnetting? - Subnetting Mastery ](https://www.youtube.com/watch?v=BWZ-MHIhqjM&list=PLIFyRwBY_4bQUE4IB5c4VPRyDoLgOdExE) 19 | 20 | ### Cross-check 21 | - [Link to the cross-check documentation](https://docs.app.rs.school/#/platform/cross-check-flow) -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/containers-workshop/step4/text.md: -------------------------------------------------------------------------------- 1 | ## Part 4: Kubernetes Deployment and Services 2 | 3 | Based on the content that you can [find in the repo](https://github.com/rolling-scopes-school/devops/modules/10. Containers/Part4), follow the following steps to run your first kubernetes deployment: 4 | 5 | 1. Verify your kubectl intallation 6 | 7 | `kubectl version`{{exec}} 8 | 2. Apply the deployment definition to create the Pod in your Kubernetes cluster 9 | 10 | `kubectl apply -f deployment.yaml`{{exec}} 11 | 3. Verify that the deployment is running and ready: 12 | 13 | `kubectl get pods`{{exec}} 14 | 4. Delete your pod 15 | 16 | `kubectl delete pod YOUR_POD_NAME`{{exec}} 17 | 5. **Now, complete the service.yaml file to create to create a LoadBalancer Service for the previous deployment, be sure that you expose the deployment in port 3500.** 18 | 19 | `kubectl apply -f service.yaml`{{exec}} 20 | -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-bash-workshop/intro.md: -------------------------------------------------------------------------------- 1 | # Hello. 2 | 3 | You are going to some basics to work with bash scripts on a Linux system 4 | 5 | There are a few hints how to be successful, learning it: 6 | 7 | 1. First of all - save your solutions, because you have only one hour to complete it. When time is up, you will start from the very beginning, so to be able to skip solved tasks, just copy your saved commands. 8 | 2. Don't be impatient. If you tried, and you failed too many times - get some rest, don't panic and have no doubts. Eventually, you will finish it. Everyone is different. For some of us it 9 | needs less time than for the others. I believe you can do it. 10 | 3. Don't ignore BASH documentation, use in-built command manuals and helps, be creative and explore each command in details. Be attentive and you will be rewarded. 11 | 4. There is no goal for a true samurai, there is only a path. -------------------------------------------------------------------------------- /modules/05. CICD/workshop.md: -------------------------------------------------------------------------------- 1 | # CI/CD Workshop. 2 | 3 | In this workshop, you'll not build a pipeline, but instead, you going to debug a pipeline, and check what is missing or what is wrong in the pipeline. 4 | 5 | You'll be using the demo reviewed in the presentation and you will have to identify what is happening with the pipeline. 6 | 7 | ## Pre Requiriments 8 | 9 | 1. Fork this [repository](https://github.com/ivdgonzalezco/go-demo) where the pipeline challenge awaits. 10 | 2. Go to the Actions Tap where the pipeline is ready to be run. 11 | 12 | So your tasks are: 13 | 14 | 1. Run the pipeline to check where is failing. 15 | 2. Solve the errors and issues and document them in a Markdown file. 16 | 2. Upload all the evidence to your github repository, detailing all the issues the pipeline has and how you solve them, in a Markdown file. Also, upload a screenshot of the pipeline working and a link to the Github Actions execution in the same file. -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-cli-workshop/intro.md: -------------------------------------------------------------------------------- 1 | # Hello. 2 | 3 | You are going to learn some basic commands to work with files and command-line on a Linux system 4 | 5 | There are a few hints how to be successful, learning it: 6 | 7 | 1. First of all - save your solutions, because you have only one hour to complete it. When time is up, you will start from the very beginning, so to be able to skip solved tasks, just copy your saved commands. 8 | 2. Don't be impatient. If you tried, and you failed too many times - get some rest, don't panic and have no doubts. Eventually, you will finish it. Everyone is different. For some of us it 9 | needs less time than for the others. I believe you can do it. 10 | 3. Don't ignore BASH documentation, use in-built command manuals and helps, be creative and explore each command in details. Be attentive and you will be rewarded. 11 | 4. There is no goal for a true samurai, there is only a path. -------------------------------------------------------------------------------- /modules/02. Networking Basics/README.md: -------------------------------------------------------------------------------- 1 | # Networking Basics 2 | 3 | With knowledge in both DevOps and cloud technologies, an understanding of networking is essential. 4 | 5 | ## What you will study in this topic: 6 | 7 | ✅ Introduction to Computer Networking: 8 | ✅ Layer 1: Physical Layer - Functions and Responsibilities 9 | ✅ Layer 2: Data Link Layer - Functions and Responsibilities 10 | ✅ Layer 3: Network Layer 11 | ✅ Layer 4: Transport Layer 12 | ✅ Layer 5: Overview of the Session Layer 13 | ✅ Layer 6: Overview of the Presentation Layer 14 | ✅ Layer 7: Overview of the Presentation Layer 15 | ✅ Q&A. 16 | 17 | 18 | ### Useful links 19 | - [Networking Basics (Tutorial)](https://skillsforall.com/course/networking-basics?courseLang=en-US) 20 | - [Learn How Computer Networks Work with a Free 9-Hour Course](https://www.freecodecamp.org/news/free-computer-networking-course/) 21 | 22 | ### Cross-check 23 | - [Link to the cross-check documentation](https://docs.app.rs.school/#/platform/cross-check-flow) -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-bash-workshop/step7/text.md: -------------------------------------------------------------------------------- 1 | # Helpful commands: 2 | 3 | > `#!/bin/bash` - so-called [shebang](https://en.wikipedia.org/wiki/Shebang_(Unix)) 4 | 5 | # Task description: 6 | 7 | 1. Modify /scripts/hello.sh: 8 | 1.1 If name exists, sanitize it. Remove all non-letter symbols, f.e. `hello.sh pa8tri_ck` will print 'Hello, Patrick! This username has 7 letters: 2 vowels and 5 consonants. Nothing special!' 9 | 1.2 Otherwise (without a name), It still prints 'Hello, World!' 10 | 2. Modify /scripts/numbers.sh: 11 | 2.1 It should take three arguments. 12 | 2.2 Those arguments are coefficients A, B and C of [quadratic equation](https://en.wikipedia.org/wiki/Quadratic_equation) Ax2 + Bx + C = 0. 13 | 2.3 It should print the roots, scaled by 2, f.e. 14 | - `numbers.sh 2 12 7` should print 'X1=-0.65, X2=-5.35' 15 | - `numbers.sh 1 12 36` should print 'X1=X2=-6.00' 16 | - `numbers.sh 5 2 15` should print 'There are no roots' -------------------------------------------------------------------------------- /modules/09. Containers vs VMs/README.md: -------------------------------------------------------------------------------- 1 | ### Module description 2 | In this module, we'll explore the differences between containers and virtual machines (VMs). Containers are lightweight and efficient, while VMs provide hardware-level isolation. By the end, you'll make informed decisions on which technology suits your needs best. 3 | 4 | ### Methodology 5 | Within this module, you will find multiple chapters; please follow the numbered sequence in order. 6 | 7 | - 9.0. The basics 8 | - 9.1. VMs and Containers 9 | - 9.2. Use cases 10 | 11 | ### Education materials 12 | * [Containers vs. Virtual Machines (VMs): What’s the Difference? - IBM](https://www.ibm.com/blog/containers-vs-vms/) 13 | * [Containers vs virtual machines (VMs): What is the difference? - CircleCI](https://circleci.com/blog/containers-vs-virtual-machines/) 14 | * [Containers vs. virtual machines - Atlassian](https://www.atlassian.com/microservices/cloud-computing/containers-vs-vms) 15 | 16 | # Test 17 | Since this module is purely theoretical, the assessment method for this module is through a chapter-wise quiz. -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-bash-workshop/step9/text.md: -------------------------------------------------------------------------------- 1 | # Helpful commands: 2 | 3 | > `#!/bin/bash` - so-called [shebang](https://en.wikipedia.org/wiki/Shebang_(Unix)) 4 | 5 | # Task description: 6 | 7 | 1. Create /scripts/user_conf.sh. It should read prompt from a user: 8 | - First name 9 | - Last name 10 | - Age 11 | 1.1 It should create a JSON file /scripts/user_conf.json which looks like below 12 | 1.2 Indentation equals 2 whitespaces. 13 | 1.3 All values should be strings 14 | 1.4 Every call on this script should override the result file. 15 | 16 | ```json 17 | { 18 | "user": { 19 | "first_name": "John", 20 | "last_name": "Doe", 21 | "age": "19" 22 | } 23 | } 24 | ``` 25 | 26 | 2. Create /scripts/cards.sh: 27 | 2.1 It should take an argument - string. 28 | 2.2 It should verify if this string is a valid bank card number (contains only digits and follow this pattern XXXX-XXXX-XXXX-XXXX). 29 | 2.3 It should print 'Card number is valid/is not valid', f.e. `cards.sh 1ABC-1234-2345-3456` should print 'Card is not valid' -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-bash-workshop/step9/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cat > /scripts/json_result.json << EOD 4 | { 5 | "user": { 6 | "first_name": "John", 7 | "last_name": "Jane", 8 | "age": "23" 9 | } 10 | } 11 | EOD 12 | 13 | bash /scripts/user_conf.sh << EOD 14 | John 15 | Jane 16 | 23 17 | EOD 18 | 19 | if [[ $(cat /scripts/json_result.json) != $(cat /scripts/user_conf.json) ]]; 20 | then exit 1 21 | fi 22 | 23 | if [[ $(bash /scripts/cards.sh "4215-3106-5750-2197") != "Card number is valid" 24 | || $(bash /scripts/cards.sh "42A5-3106-5750-2197") != "Card number is not valid" 25 | || $(bash /scripts/cards.sh "4215-3106=5750-2197") != "Card number is not valid" 26 | || $(bash /scripts/cards.sh "4215-3106-5750-21975") != "Card number is not valid" 27 | || $(bash /scripts/cards.sh "4215-3106-5750-2197-1234") != "Card number is not valid" 28 | || $(bash /scripts/cards.sh "4215-3106-5750") != "Card number is not valid" 29 | || $(bash /scripts/cards.sh "4215-3106-575-2197") != "Card number is not valid" 30 | ]]; 31 | then exit 1 32 | fi -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-bash-workshop/step10/text.md: -------------------------------------------------------------------------------- 1 | # Helpful commands: 2 | 3 | > `#!/bin/bash` - so-called [shebang](https://en.wikipedia.org/wiki/Shebang_(Unix)) 4 | 5 | # Task description: 6 | 7 | 1. Modify /scripts/user_conf.sh. It should not only read prompt from previous task, but also a format in range of json/xml/yaml 8 | 1.1 Now, being depended on chosen format, it should be able to create an XML file /scripts/user_conf.xml or YAML file /scripts/user_conf.yml that look like below 9 | 1.2 Indentation equals 2 whitespaces. 10 | ```xml 11 | 12 | John 13 | Doe 14 | 19 15 | 16 | ``` 17 | 18 | ```yaml 19 | user: 20 | first-name: John 21 | last-name: Doe 22 | age: 19 23 | ``` 24 | 25 | 2. **(Optional)** Modify /scripts/cards.sh: 26 | 2.1 Apply [Luhn Algorithm](https://en.wikipedia.org/wiki/Luhn_algorithm) to your validation. 27 | 2.2 It should print the same result, but this time it should check card number's validity using this algorithm. 28 | 2.3 Consider last digit as a checksum digit. -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-cli-workshop/step1/text.md: -------------------------------------------------------------------------------- 1 | # Helpful commands: 2 | 3 | > `COMMAND --help` - print usage of command and possible options 4 | > `pwd` - print working (current) directory 5 | > `cd [FILE]` - change working (current) directory 6 | > `ls [OPTION...] [FILE]...` - list all files in FILE directories OR in a current directory 7 | > `touch [OPTION...] FILE...` - update the access and modification times of each FILE to the current time, although if 8 | > FILE doesn't exist, it will be created 9 | > `mkdir [OPTION...] DIRECTORY...` - create all DIRECTORIES, if they do not already exist 10 | > `{[FROM]..[TO]..[STEP]}` - create a list of integer numbers with a step, f.e. {1..10} creates a list of values: 1 2 3 11 | > 4 5 6 7 8 9 10 12 | 13 | # Task description: 14 | 15 | 1. Create directories /task1/test_dir_1, /task1/test_dir_2, /task1/test_dir_3. 16 | 2. In /task1/test_dir_1 create directories: 1, 2, 3, 4, 5, 6 17 | 3. In /task1/test_dir_2 create directories: 01, 02, 03, 04, 05, 06 18 | 4. In /task1/test_dir_3 create files: file_01, file_03, file_05, file_07, file_09 19 | -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-bash-workshop/step5/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ $(bash /scripts/hello.sh moNkeYShinE) != "Hello, Monkeyshine! This username has 11 letters: 5 vowels and 6 consonants." 4 | || $(bash /scripts/hello.sh cAlligan) != "Hello, Calligan! This username has 8 letters: 3 vowels and 5 consonants." 5 | || $(bash /scripts/hello.sh) != "Hello, World!" 6 | ]]; 7 | then exit 1 8 | fi 9 | 10 | if [[ $(bash /scripts/numbers.sh 3 4 5) != "Sides are valid." 11 | || $(bash /scripts/numbers.sh 45 117 73) != "Sides are valid." 12 | || $(bash /scripts/numbers.sh 4 4 2) != "Sides are valid. This triangle is isoceles." 13 | || $(bash /scripts/numbers.sh 7 7 7) != "Sides are valid. This triangle is equilateral." 14 | ]]; 15 | then exit 1 16 | fi 17 | 18 | if [[ $(bash /scripts/numbers.sh 0 0 0) != "Sides are not valid." 19 | || $(bash /scripts/numbers.sh 0 6 6) != "Sides are not valid." 20 | || $(bash /scripts/numbers.sh 1 2 3) != "Sides are not valid." 21 | || $(bash /scripts/numbers.sh 2 3 19) != "Sides are not valid." 22 | ]]; 23 | then exit 1 24 | fi -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2023 Rolling Scopes School 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-bash-workshop/step5/text.md: -------------------------------------------------------------------------------- 1 | # Helpful commands: 2 | 3 | > `#!/bin/bash` - so-called [shebang](https://en.wikipedia.org/wiki/Shebang_(Unix)) 4 | > 5 | 6 | # Task description: 7 | 8 | 1. Modify /scripts/hello.sh: 9 | 1.1 If name exists, the script also should print quantity of vowels and consonants, f.e. `hello.sh geORGe` will print "Hello, George! This username has 6 letters: 3 vowels and 3 consonants." 10 | 1.2 Otherwise, It still prints 'Hello, World!' 11 | 2. Modify /scripts/numbers.sh: 12 | 2.1 It should take three numbers - A, B and C 13 | 2.2 Those numbers are sides of triangle. 14 | 2.3 It should print if it is possible to draw a triangle with those sides, f.e. `numbers.sh 12 14 19` should print 'Sides are valid' and `numbers.sh 2 3 19` should print 'Sides are not valid.' 15 | 2.4. If these are sides of equilateral or isoceles triangle ([wiki page](https://en.wikipedia.org/wiki/Triangle)), put an additional message with this property, f.e. 'Sides are valid. This triangle is equilateral/isoceles.' 16 | 2.5 Domain: 0 <= A <= 10000, 0 <= B <= 10000, 0 <= C <= 10000 -------------------------------------------------------------------------------- /modules/07. Infrastructure as Code (IaC)/README.md: -------------------------------------------------------------------------------- 1 | # Infrastructure as Code (IaC) 2 | 3 | With knowledge in both DevOps and cloud technologies, an understanding of infrastructure as a code (IaC) and configuration management is essential. 4 | 5 | ## What you will study in this topic: 6 | 7 | ✅ An introduction to infrastructure as code 8 | ✅ Key concepts of infrastructure as code 9 | ✅ Benefits of infrastructure as code 10 | ✅ Tools of infrastructure as code 11 | ✅ Demonstration 12 | ✅ Best practices for implementing Infrastructure as Code 13 | ✅ Q&A. 14 | 15 | 16 | ### Useful links 17 | - [What is Infrastructure as Code? (Tutorial).](https://www.freecodecamp.org/news/what-is-infrastructure-as-code/) 18 | - [AWS Infrastructure as Code Workshops. ](https://www.workshops.aws/categories/Infrastructure%20as%20Code) 19 | - [Azure Infrastructure as Code Workshop ](https://azuredevcollege.com/iac-basics-workshop/#azure-resource-manager ) 20 | - [Getting Started with Terraform for Google Cloud ](https://www.cloudskillsboost.google/course_templates/443) 21 | 22 | 23 | ### Cross-check 24 | - [Link to the cross-check documentation](https://docs.app.rs.school/#/platform/cross-check-flow) -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-cli-workshop/step7/text.md: -------------------------------------------------------------------------------- 1 | # Helpful commands: 2 | 3 | > `COMMAND --help` - print usage of command and possible options 4 | > `pwd` - print working (current) directory 5 | > `cd [FILE]` - change working (current) directory 6 | > `ls [OPTION]... [FILE]...` - list all files in FILE directories OR in a current directory 7 | > `cat [OPTION]... [FILE]...` - concatenate FILE(s) to standard output. With no FILE, or when FILE is -, read 8 | > standard input. 9 | > `head [OPTION]... [FILE]...` - print the first 10 lines of each FILE to standard output. 10 | > `tail [OPTION]... [FILE]...` - print the last 10 lines of each FILE to standard output. 11 | > `less [OPTION]... FILE` - open file content in a read mode with sc 12 | 13 | # Task description: 14 | 15 | 1. Remove all data from /task1 16 | 2. Copy file from /dante to /task1 17 | 3. Using less, find all lines with word 'of' in the /task1/divine_comedy_canto_1. Write line numbers with comma delimiter in /task1/canto_1/of_lines. F.e. 1,12,45,62,117 18 | 4. Using head, print first 32 bytes of file /task1/divine_comedy_canto_1. Write it into /task1/canto_1/head_32. 19 | 5. Using tail, print first 32 bytes of file /task1/divine_comedy_canto_1. Write it into /task1/canto_1/tail_32. 20 | 6. Make sure your files end with a new line -------------------------------------------------------------------------------- /modules/08. Configuration Management/ansible-workshop/intro.md: -------------------------------------------------------------------------------- 1 | Let's learn some basic commands to work with Ansible. 2 | 3 | We will deploy WordPress with LAMP Stack (Linux, Apache, MySQL and PHP) using Ansible. The goal is to familiarize yourself with Ansible, from verifying the tool and requirements, to create multiple roles with `tasks`{{}}, `handlers`{{}} and `templates`{{}}, and use them in a playbook with variables to configure different Instances/ Virtual Machines/ Containers. 4 | 5 | In this KillerCoda scenario, you will follow the below steps: 6 | 7 | 1. Validate Ansible and Docker plugin installation 8 | 2. Create a dynamic inventory and gather the containers in different groups 9 | 3. Create multiple roles for each component of the applicantion (Apache, MySQL, WordPress, Prerequisites) 10 | 4. Create the main Playbook and a variables file to configure WordPress 11 | 5. Validate functionality 12 | 13 | Ansible has already been installed in this environment and there are 2 Docker containers with the following structure: 14 | 15 | ``` 16 | Docker/ 17 | ├── web1 18 | └── db1 19 | ``` 20 | 21 | The idea of each container is to represent a unique host (Server/Virtual Machine/Instance), and let us group them into different groups based on the VM type. 22 | 23 | To group them, each container was created with a label that corresponds to its type (e.g. type=web/db). 24 | -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/containers-workshop/step1/text.md: -------------------------------------------------------------------------------- 1 | # Part 1: Introduction to Containers and Docker 2 | 3 | Based on the content that you can [find in the repo](https://github.com/rolling-scopes-school/devops/modules/9.Containers/Part1), follow the following steps to run your first docker container: 4 | 5 | > **Take a moment in step 6 to go to verify button and check what happens, Can you explain it?** 6 | 7 | 1. Verify your docker intallation 8 | 9 | `docker --version`{{exec}} 10 | 2. Go to /app 11 | 12 | `cd devops/modules/10. Containers/app`{{exec}} 13 | 3. Build the docker image using dockerfile 14 | 15 | `docker build -t myfirstcontainer:latest .`{{exec}} 16 | 4. Run your recently created docker image 17 | 18 | `containerid=$(docker run -d --name mycontainer -p 80:80 myfirstcontainer:latest)`{{exec}} 19 | 5. List running containers 20 | 21 | `docker ps`{{exec}} 22 | 6. Stop your docker container 23 | 24 | `docker stop $containerid`{{exec}} 25 | 7. Start your docker container 26 | 27 | `docker start $containerid`{{exec}} 28 | 8. Kill your docker container 29 | 30 | `docker kill $containerid`{{exec}} 31 | 9. Delete your docker container 32 | 33 | `docker rm $containerid`{{exec}} 34 | 10. Run your recently created docker image to validate the task 35 | 36 | `docker run -d --name mycontainer -p 80:80 myfirstcontainer:latest`{{exec}} 37 | -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/containers-workshop/step2/text.md: -------------------------------------------------------------------------------- 1 | ## Part 2: Docker Compose and Multi-Container Applications 2 | 3 | Based on the content that you can [find in the repo](https://github.com/rolling-scopes-school/devops/modules/10. Containers/Part2), follow the following steps to run your first docker container using docker-compose: 4 | 5 | 1. Verify your docker compose intallation 6 | 7 | `docker-compose --version`{{exec}} 8 | 2. **Write a docker compose file in docker-compose.yaml to expose the container we build, remember that the container should be exposed in port 8000.** 9 | 3. Run the docker compose app with prepared file 10 | 11 | `docker-compose up`{{exec}} 12 | 4. CTRL+C to close application logs 13 | 5. Run the docker compose app with prepared file in daemon mode 14 | 15 | `docker-compose up -d`{{exec}} 16 | 6. List running containers and copy your container id 17 | 18 | `docker ps`{{exec}} 19 | `export containerid=`{{copy}} 20 | 7. Stop your docker container using your id 21 | 22 | `docker stop $containerid`{{exec}} 23 | 8. Start your docker container using your id 24 | 25 | `docker start $containerid`{{exec}} 26 | 9. Kill your docker container using your id 27 | 28 | `docker kill $containerid`{{exec}} 29 | 10. Delete your docker container using your id 30 | 31 | `docker rm $containerid`{{exec}} 32 | 11. Run your recently created docker image to validate the task 33 | 34 | `docker-compose up -d`{{exec}} 35 | -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-bash-workshop/step10/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | cat > /scripts/json_result.json << EOD 4 | { 5 | "user": { 6 | "first_name": "John", 7 | "last_name": "Jane", 8 | "age": "23" 9 | } 10 | } 11 | EOD 12 | 13 | bash /scripts/user_conf.sh << EOD 14 | John 15 | Jane 16 | 23 17 | json 18 | EOD 19 | 20 | if [[ $(cat /scripts/json_result.json) != $(cat /scripts/user_conf.json) ]]; 21 | then exit 1 22 | fi 23 | 24 | cat > /scripts/json_result.xml << EOD 25 | 26 | John 27 | Jane 28 | 23 29 | 30 | EOD 31 | 32 | bash /scripts/user_conf.sh << EOD 33 | John 34 | Jane 35 | 23 36 | xml 37 | EOD 38 | 39 | if [[ $(cat /scripts/json_result.xml) != $(cat /scripts/user_conf.xml) ]]; 40 | then exit 1 41 | fi 42 | 43 | cat > /scripts/json_result.yml << EOD 44 | user: 45 | firstName: John 46 | lastName: Jane 47 | age: 23 48 | EOD 49 | 50 | bash /scripts/user_conf.sh << EOD 51 | John 52 | Jane 53 | 23 54 | yaml 55 | EOD 56 | 57 | if [[ $(cat /scripts/json_result.yml) != $(cat /scripts/user_conf.yml) ]]; 58 | then exit 1 59 | fi 60 | 61 | #if [[ $(bash /scripts/cards.sh) "4175-0050-2516-4005" != "Card number is valid." 62 | # || $(bash /scripts/cards.sh) "4175-0050-5377-2596" != "Card number is valid." 63 | # || $(bash /scripts/cards.sh) "4026-8235-9548-6943" != "Card number is not valid." 64 | # ]]; 65 | # then exit 1 66 | #fi -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-cli-workshop/step4/text.md: -------------------------------------------------------------------------------- 1 | # Helpful commands: 2 | 3 | > `COMMAND --help` - print usage of command and possible options 4 | > `pwd` - print working (current) directory 5 | > `cd [FILE]` - change working (current) directory 6 | > `ls [OPTION...] [FILE]...` - list all files in FILE directories OR in a current directory 7 | > `touch [OPTION...] FILE...` - update the access and modification times of each FILE to the current time, although if 8 | > FILE doesn't exist, it will be created 9 | > `mkdir [OPTION...] DIRECTORY...` - create all DIRECTORIES, if they do not already exist 10 | > `{[FROM]..[TO]..[STEP]}` - create a list of integer numbers with a step, f.e. {1..10} creates a list of values: 1 2 3 11 | > 4 5 6 7 8 9 10 12 | > `rm [OPTION...] FILE...` - remove (unlink) FILES. 13 | > `rmdir [OPTION...] DIRECTORY...` - remove the DIRECTORIES, if they are empty. 14 | > `mv [OPTION]... [-T] SOURCE DEST` or `mv [OPTION]... SOURCE... DIRECTORY` - rename SOURCE to DEST, or move SOURCE(s) 15 | > to DIRECTORY. 16 | > `cp [OPTION]... [-T] SOURCE DEST` or `mv [OPTION]... SOURCE... DIRECTORY` - copy SOURCE to DEST, or multiple SOURCE(s) 17 | > to DIRECTORY 18 | 19 | # Task description: 20 | 21 | 1. Remove all data from /task1 22 | 2. Copy all files from /encoded_names to /task1/metamorphosis 23 | 3. Change working directory to /task1/metamorphosis 24 | 4. Copy filename to its content for every file 25 | 5. Rename files deleting all digits from their names 26 | -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/containers-workshop/step3/text.md: -------------------------------------------------------------------------------- 1 | ## Part 3: Introduction to Kubernetes 2 | 3 | Based on the content that you can [find in the repo](https://github.com/rolling-scopes-school/devops/modules/10. Containers/Part3), follow the following steps to run your first container using Kubernetes: 4 | 5 | 1. Start a local registry 6 | 7 | `docker run -d -p 5000:5000 --name local-registry registry:2`{{exec}} 8 | 3. Tag to push to local registry 9 | 10 | `docker tag myfirstcontainer:latest localhost:5000/myfirstcontainer:latest`{{exec}} 11 | 4. Push to local registry 12 | 13 | `docker push localhost:5000/myfirstcontainer:latest`{{exec}} 14 | 1. Verify your kubectl intallation 15 | 16 | `kubectl version`{{exec}} 17 | 5. Apply the Pod definition to create the Pod in your Kubernetes cluster 18 | 19 | `kubectl apply -f pod.yaml`{{exec}} 20 | 6. Verify that the Pod is running and ready: 21 | 22 | `kubectl get pods`{{exec}} 23 | 7. To access the container logs, use 24 | 25 | `kubectl logs fastapi-pod`{{exec}} 26 | 8. Expose the port of your pod to test if it is running 27 | 28 | `kubectl port-forward fastapi-pod 8300:8000 &`{{exec}} 29 | 9. Check application with curl 30 | 31 | `curl localhost:8300`{{exec}} 32 | 10. Delete your pod 33 | 34 | `kubectl delete pod fastapi-pod`{{exec}} 35 | 11. **Now, complete the replicaset.yaml file to create two replicas of the fastapi application and run it(Take as example the pod.yaml file with image and imagepullpolicy). Click in validate once you are ready.** 36 | 37 | `kubectl apply -f replicaset.yaml`{{exec}} 38 | -------------------------------------------------------------------------------- /modules/08. Configuration Management/ansible-workshop/index.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "RSSchool Ansible Workshop", 3 | "description": "Let's learn some basic commands to work with Ansible deploying WordPress", 4 | "details": { 5 | "intro": { 6 | "text": "intro.md", 7 | "background": "setup.sh", 8 | "foreground": "wait-init.sh" 9 | }, 10 | "steps": [ 11 | { 12 | "title": "Validate Ansible installation and Docker containers", 13 | "text": "step1/text.md", 14 | "verify": "step1/verify.sh" 15 | }, 16 | { 17 | "title": "Create Ansible Dynamic Inventory", 18 | "text": "step2/text.md", 19 | "verify": "step2/verify.sh" 20 | }, 21 | { 22 | "title": "Ansible Project Structure and Prerequisites Role", 23 | "text": "step3/text.md", 24 | "verify": "step3/verify.sh" 25 | }, 26 | { 27 | "title": "Ansible Roles", 28 | "text": "step4/text.md", 29 | "verify": "step4/verify.sh" 30 | }, 31 | { 32 | "title": "Create and Run the Main Playbook", 33 | "text": "step5/text.md", 34 | "verify": "step5/verify.sh" 35 | } 36 | ], 37 | "finish": { 38 | "text": "finish.md" 39 | } 40 | }, 41 | "backend": { 42 | "imageid": "ubuntu" 43 | } 44 | } -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-bash-workshop/step8/verify.sh: -------------------------------------------------------------------------------- 1 | for ADDRESS in "0.0.0.0" "1.2.3.4" "13.24.35.46" "123.234.1.5" "255.255.255.255" ; 2 | do 3 | if [[ $(bash /scripts/address.sh ${ADDRESS}) != "IPv4" ]]; then 4 | exit 1 5 | fi 6 | done 7 | 8 | for ADDRESS in "-1.0.0.0" "1.256.3.4" "13.24.35" "123.234.1.5.21" "a.b.c.d" ; 9 | do 10 | if [[ $(bash /scripts/address.sh ${ADDRESS}) != "Unknown" ]]; then 11 | exit 1 12 | fi 13 | done; 14 | 15 | for ADDRESS in "2001:0db8:0000:0000:0000:8a2e:0370:7334" "1111:0db8:2222:3333:4444:8a2e:5555:9999" ; 16 | do 17 | if [[ $(bash /scripts/address.sh ${ADDRESS}) != "IPv6" ]]; then 18 | exit 1 19 | fi 20 | done 21 | 22 | for ADDRESS in "0db8:0000:0000:0000:8a2e:0370:7334" "1111:0db8:2222:3333:4444:8a2e:5555:9999:1234" "1111:0db8:22h2:3333:4444:8a2e:5555:9999" ; 23 | do 24 | if [[ $(bash /scripts/address.sh ${ADDRESS}) != "Unknown" ]]; then 25 | exit 1 26 | fi 27 | done 28 | 29 | for ADDRESS in "12:34:56:78:ab:11" "32:34:56:98:cf:32" "12:34:ea:78:ab:45" ; 30 | do 31 | if [[ $(bash /scripts/address.sh ${ADDRESS}) != "MAC" ]]; then 32 | exit 1 33 | fi 34 | done 35 | 36 | for ADDRESS in "12:34:56:78:gb:11" "32:34:56:-98:cf:32" "12:34:ea:78:ab" "32:34:56:98:cf:32:41" ; 37 | do 38 | if [[ $(bash /scripts/address.sh ${ADDRESS}) != "Unknown" ]]; then 39 | exit 1 40 | fi 41 | done 42 | 43 | if [[ $(bash /scripts/numbers.sh 12) != "twelve" 44 | || $(bash /scripts/numbers.sh 17) != "seventeen" 45 | || $(bash /scripts/numbers.sh 38) != "thirty eight" 46 | || $(bash /scripts/numbers.sh 79) != "seventy nine" 47 | ]]; 48 | then exit 1 49 | fi -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-cli-workshop/step3/text.md: -------------------------------------------------------------------------------- 1 | # Helpful commands: 2 | 3 | > `COMMAND --help` - print usage of command and possible options 4 | > `pwd` - print working (current) directory 5 | > `cd [FILE]` - change working (current) directory 6 | > `ls [OPTION...] [FILE]...` - list all files in FILE directories OR in a current directory 7 | > `touch [OPTION...] FILE...` - update the access and modification times of each FILE to the current time, although if 8 | > FILE doesn't exist, it will be created 9 | > `mkdir [OPTION...] DIRECTORY...` - create all DIRECTORIES, if they do not already exist 10 | > `{[FROM]..[TO]..[STEP]}` - create a list of integer numbers with a step, f.e. {1..10} creates a list of values: 1 2 3 11 | > 4 5 6 7 8 9 10 12 | > `rm [OPTION...] FILE...` - remove (unlink) FILES. 13 | > `rmdir [OPTION...] DIRECTORY...` - remove the DIRECTORIES, if they are empty. 14 | > `mv [OPTION]... [-T] SOURCE DEST` or `mv [OPTION]... SOURCE... DIRECTORY` - rename SOURCE to DEST, or move SOURCE(s) 15 | > to DIRECTORY. 16 | > `cp [OPTION]... [-T] SOURCE DEST` or `mv [OPTION]... SOURCE... DIRECTORY` - copy SOURCE to DEST, or multiple SOURCE(s) 17 | > to DIRECTORY 18 | 19 | # Task description: 20 | 21 | 1. Remove all data from /task1 22 | 2. There are three files somewhere in /findme. You should find them all. 23 | 2.1. When you find a file, you should copy it to /task1/secrets directory. Don't change origin filename. 24 | 3. After you find them, you should create file /task1/super_secret_data and copy codes from files you have found to it. 25 | Codes should be pasted in alphabetical order. 26 | -------------------------------------------------------------------------------- /modules/03. Git/README.md: -------------------------------------------------------------------------------- 1 | ### Module description 2 | Discover the collaborative approach in software development by using a version control system and how to cooperate with other people or teams in a proper way. Within the module, you’ll learn Git fundamentals, its core concepts, how it enables you to efficiently manage source code and development history. In addition, you’ll get acquainted with GitHub as one of the most popular collaboration platforms, learn how to make a contribution to a project which is maintained by several teammates by using branching strategy and pull request workflow. 3 | 4 | ### Education materials 5 | * [Learn Git Branching](https://learngitbranching.js.org/) 6 | * [What is version control?](https://www.atlassian.com/git/tutorials/what-is-version-control) 7 | * [A step-by-step guide to Git](https://opensource.com/article/18/1/step-step-guide-git) 8 | * [How to write the perfect pull request](https://github.blog/2015-01-21-how-to-write-the-perfect-pull-request/) 9 | * [Visualizing Git](http://git-school.github.io/visualizing-git/) 10 | * [Lecture RS School.Lithuania. Git](https://youtu.be/nZdrmhN90j8) 11 | 12 | ### Test 13 | At the end of this module you should pass the **"Git Quiz"** 14 | 15 | #### Preconditions for test: 16 | * Tests submitted in RS School App could be solved after authorization in the application. 17 | * The minimum passing score is **75%** of the maximum possible number of points. 18 | * You can take the test **5 times**, the last result is counted. 19 | * You can try the test even more times, but the score for the test will be reduced by half (from the original score). 20 | * The result of passing the test will be displayed immediately, it will be added to the score page next day after passing. 21 | 22 | -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-bash-workshop/step6/text.md: -------------------------------------------------------------------------------- 1 | # Helpful commands: 2 | 3 | > `#!/bin/bash` - so-called [shebang](https://en.wikipedia.org/wiki/Shebang_(Unix)) 4 | 5 | # Task description: 6 | 7 | 1. Modify /scripts/hello.sh: 8 | 1.1 If name exists, check its rarity. 9 | - If the name is in the list of rare names [ Rafferty, Zebedee, Romilly, Bee ], it should print 'The rarest name!', f.e. `hello.sh zebedee` will print "Hello, Zebedee! This username has 7 letters: 4 vowels and 3 consonants. The rarest name!" 10 | - If the name is in the list of uncommon names [ Grover, Ajax, Ottilie, Lorcan], it should print 'Pretty uncommon name!', f.e. `hello.sh aJax` will print "Hello, Ajax! This username has 4 letters: 2 vowels and 2 consonants. Pretty uncommon name!" 11 | - Otherwise, it should print 'Nothing special!', f.e. `hello.sh jane` will print "Hello, Jane! This username has 4 letters: 2 vowels and 2 consonants. Nothing special!" 12 | 1.2 Otherwise (without a name), It still prints 'Hello, World!' 13 | 2. Modify /scripts/numbers.sh: 14 | 2.1 It should take three arguments - A(day), B(month) and C(year) 15 | 2.2 Those arguments are day, month and year of Gregorian calendar, respectively. 16 | 2.3 It should print if it is a valid date or not, f.e. `numbers.sh 12 7 2005` should print 'Valid date.' whereas `numbers.sh 5 14 2005` should print 'Invalid date.' 17 | 2.4 Any year is considered as valid. Leap year could be skipped. 18 | 2.5 Domain: -10000 <= A <= 10000, -10000 <= B <= 10000, -999999 <= C <= 999999 19 | 3. **(Optional)** Modify this date validation. Add a check to verify if the date is the 29 of February, year value should be a [Leap Year](https://en.wikipedia.org/wiki/Leap_year) -------------------------------------------------------------------------------- /modules/09. Containers vs VMs/9.2. Use cases/README.md: -------------------------------------------------------------------------------- 1 | ### Use Cases for VMs vs. Use Cases for Containers 2 | Both containers and virtual machines (VMs) offer distinct advantages and drawbacks, and your choice should align with your specific requirements. 3 | 4 | When it comes to selecting the appropriate technology for your workloads, VMs excel in scenarios where applications necessitate full access to the operating system's resources and functionality. VMs are ideal for situations where you need to run multiple applications on servers or manage a diverse array of operating systems. If you have an existing monolithic application that doesn't require refactoring into microservices, VMs will continue to fulfill your needs effectively. 5 | 6 | On the other hand, containers prove to be a superior choice when your primary objective is to maximize the number of applications or services running on a minimal number of servers while prioritizing portability. If you're embarking on the development of a new application and intend to employ a microservices architecture for scalability and portability, containers are the preferred solution. Containers truly shine in the realm of cloud-native application development, particularly within a microservices architecture. 7 | 8 | It's worth noting that you can also run containers within a virtual machine, making the decision less of an "either/or" and more of an evaluation to determine which technology aligns best with your workload requirements. 9 | 10 | In summary: 11 | 12 | - VMs enable companies to optimize their infrastructure resources by increasing the number of virtual machines achievable from a finite pool of hardware and software resources. 13 | - Containers empower companies to maximize their development resources, facilitating the adoption of microservices and DevOps practices. 14 | -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-cli-workshop/step11/text.md: -------------------------------------------------------------------------------- 1 | # Helpful commands: 2 | 3 | > `COMMAND --help` - print usage of command and possible options 4 | > `pwd` - print working (current) directory 5 | > `cd [FILE]` - change working (current) directory 6 | > `ls [OPTION]... [FILE]...` - list all files in FILE directories OR in a current directory 7 | > `sed [OPTION]... {script-only-if-no-other-script} [input-file]...` - in-built stream editor, can process and modify stream of symbols 8 | 9 | # Task description: 10 | 11 | 1. Remove all data from /task1 12 | 2. Create directory /task1/sed_results 13 | 3. Copy file from /dante to /task1/sed_results 14 | 4. Print first 20 lines of divine_comedy_canto_1. Replace all vowels with '_' (underscore). Write them into /task1/sed_results/beatbox_cheatsheet 15 | 5. Print first 20 lines of divine_comedy_canto_1. Replace all consonants with '.' (dot). Write them into /task1/sed_results/hard_pronunciation 16 | 6. Print first 20 lines of divine_comedy_canto_1. Replace all vowels with '-' (dash), all consonants with '.' (dot). Remove all other symbols. Write them into /task1/sed_results/i_am_samuel_morse 17 | 7. Print first 20 lines of divine_comedy_canto_1. Double all doubled consonants, f.e. 'well' -> 'wellll', 'passed' -> 'passssed'. Uppercase all double vowels, f.e. 'oozy' -> 'OOzy', 'fleet' -> 'flEEt'. Write them into /task1/sed_results/dr_feelgood 18 | 8. Print first 20 lines of divine_comedy_canto_1. Add leading and trailing '-' (dash) to each word. Remove all spaces and commas. Write it to /task1/sed_results/fancy_kebab 19 | 9. Print first 20 lines of divine_comedy_canto_1. Add two leading spaces to lines that start with vowels. Replace all dots that are placed at the end of the line with '!!!' (three exclamation marks). Write it into /task1/sed_results/from_the_start_to_the_end -------------------------------------------------------------------------------- /modules/08. Configuration Management/workshop.md: -------------------------------------------------------------------------------- 1 | # Ansible Workshop: Building WordPress with LAMP Stack (Linux, Apache, MySQL and PHP) 2 | ## Task 3 | 4 | Your task is to follow the KillerCoda workshop. Familiarize yourself with Ansible, from verifying the tool and requirements, to create multiple roles with `tasks`, `handlers` and `templates`. Use them in a playbook with variables to configure different Instances/ Virtual Machines/ Containers. 5 | 6 | **NOTE: The scenario will use Docker containers to simulate multiple servers.** 7 | 8 | In the KillerCoda scenario, you will follow the below steps: 9 | 10 | 1. Validate Ansible and Docker plugin installation 11 | 2. Create a dynamic inventory and gather the containers in different groups 12 | 3. Create multiple roles for each component of the application (Apache, MySQL, WordPress, Prerequisites) 13 | 4. Create the main Playbook and a variables file to configure WordPress 14 | 5. Validate functionality 15 | 16 | ### [Workshop Link](https://killercoda.com/rsschool/course/modules/ansible-workshop) 17 | 18 | ## How to do the task? 19 | ### 1. Sign into Killercoda 20 | You could use your Github account 21 | ### 2. Take the entire workshop - [Workshop Link](https://killercoda.com/rsschool/course/modules/ansible-workshop) 22 | Once you finish it, do not close the environment! 23 | ### 3. Execute an `echo` command with your github name in the KillerCoda environment console 24 | For example `$echo rsschool-repository` 25 | ### 4. Validate your participation! 26 | Take a screenshot of the completion screen `Thanks for participating in this Workshop!` and your github name in the KillerCoda environment console. Besides, it should contain the date and time. 27 | ### 5. Cross-check 28 | Upload a screenshot to the RS School portal. Your result will be cross-checked by another student of the course, and you will have to check someone else result. 29 | -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-cli-workshop/step10/text.md: -------------------------------------------------------------------------------- 1 | # Helpful commands: 2 | 3 | > `COMMAND --help` - print usage of command and possible options 4 | > `pwd` - print working (current) directory 5 | > `cd [FILE]` - change working (current) directory 6 | > `ls [OPTION]... [FILE]...` - list all files in FILE directories OR in a current directory 7 | > `cat [OPTION]... [FILE]...` - concatenate FILE(s) to standard output. With no FILE, or when FILE is -, read 8 | > **_standard input_**. 9 | > `|` - redirect **_standard output_** to **_standard input_**. Then you can use any command to receive and handle it. F.e. `echo Hola! | sha256sum` 10 | > `grep [OPTION]... PATTERNS [FILE]...` - Search for PATTERNS in each FILE. When FILE is '-', read standard input. 11 | > `sort [OPTION]... [FILE]...` - Write sorted concatenation of all FILE(s) to standard output. With no FILE, or when FILE is -, read standard input. 12 | 13 | # NOTES: 14 | - The standard input (STDIN) device is the keyboard. 15 | - The standard output (STDOUT) device is the screen. 16 | 17 | # Task description: 18 | 19 | 1. Remove all data from /task1 20 | 2. Copy files from /grep_challenges to /task1/it_will_be_tough 21 | 3. Write all emails with domains 'gmail' and 'outlook' which contain two vowels, following back-to-back from /task1/it_will_be_tough/emails to file /task1/it_will_be_tough/emails_result. F.e sh**au**n_omall**ey**@gmail.com or stipe_m**io**cic@outlook.com. Emails should be sorted alphabetically in ascending order. 22 | 4. Write all mac-addresses from /task1/it_will_be_tough/mac_addresses to file /task1/it_will_be_tough/mac_addresses_result. Addresses should be sorted alphabetically in descending order. 23 | 5. Write all URL links from img tags under 'src' attribute/task1/it_will_be_tough/website.html to file /task1/it_will_be_tough/website_result. Links should be sorted alphabetically in ascending order -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/app/README.md: -------------------------------------------------------------------------------- 1 | ## Compose sample application 2 | 3 | > **This is an example application taked from [Github Repo](https://github.com/docker/awesome-compose/tree/master/fastapi)** 4 | 5 | ### Use with Docker Development Environments 6 | 7 | You can open this sample in the Dev Environments feature of Docker Desktop version 4.12 or later. 8 | 9 | [Open in Docker Dev Environments `Open in Docker Dev Environments`](https://open.docker.com/dashboard/dev-envs?url=https://github.com/docker/awesome-compose/tree/master/fastapi) 10 | 11 | ### Python/FastAPI application 12 | 13 | Project structure: 14 | 15 | ``` 16 | ├── compose.yaml 17 | ├── Dockerfile 18 | ├── requirements.txt 19 | ├── app 20 |    ├── main.py 21 |    ├── __init__.py 22 | 23 | ``` 24 | 25 | [_compose.yaml_](compose.yaml) 26 | 27 | ``` 28 | services: 29 | api: 30 | build: . 31 | container_name: fastapi-application 32 | environment: 33 | PORT: 8000 34 | ports: 35 | - '8000:8000' 36 | restart: "no" 37 | 38 | ``` 39 | 40 | ## Deploy with docker compose 41 | 42 | ```shell 43 | docker-compose up -d --build 44 | ``` 45 | 46 | ## Expected result 47 | 48 | Listing containers must show one container running and the port mapping as below: 49 | 50 | ``` 51 | $ docker ps 52 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 53 | 7087a6e79610 5c1778a60cf8 "/start.sh" About a minute ago Up About a minute 80/tcp, 0.0.0.0:8000->8000/tcp, :::8000->8000/tcp fastapi-application 54 | ``` 55 | 56 | After the application starts, navigate to `http://localhost:8000` in your web browser and you should see the following json response: 57 | 58 | ``` 59 | { 60 | "message": "OK" 61 | } 62 | ``` 63 | 64 | Stop and remove the containers 65 | 66 | ``` 67 | $ docker compose down 68 | ``` 69 | -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-cli-workshop/step8/text.md: -------------------------------------------------------------------------------- 1 | # Helpful commands: 2 | 3 | > `COMMAND --help` - print usage of command and possible options 4 | > `pwd` - print working (current) directory 5 | > `cd [FILE]` - change working (current) directory 6 | > `ls [OPTION]... [FILE]...` - list all files in FILE directories OR in a current directory 7 | > `cat [OPTION]... [FILE]...` - concatenate FILE(s) to standard output. With no FILE, or when FILE is -, read 8 | > **_standard input_**. 9 | > `echo [SHORT-OPTION]... [STRING]...` - display a line of text to **_standard output_** 10 | > `>` - redirect **_standard output_** to another source (file, device). Source content will be overwritten by redirected. If file doesn't exist, it will be created. 11 | > `>>` - redirect **_standard output_** to another source (file, device). Redirected content will be appended to source content. If file doesn't exist, it will be created. 12 | > `<` - redirect **_standard input_** from another source (file, device). 13 | 14 | # NOTES: 15 | - The standard input (STDIN) device is the keyboard. 16 | - The standard output (STDOUT) device is the screen. 17 | 18 | # Task description: 19 | 20 | 1. Remove all data from /task1 21 | 2. Copy file from /dante to /task1 22 | 3. Using less, find all lines with word 'of' in the /task1/divine_comedy_canto_1. This time, write line numbers into a file /task1/canto_1/of_lines using `echo` and redirection 23 | 4. Using head, print first 32 bytes of file /task1/divine_comedy_canto_1. This time, write it into /task1/canto_1/head_32, using redirection. 24 | 5. Using tail, print first 32 bytes of file /task1/divine_comedy_canto_1. This time, write it into /task1/canto_1/tail_32, using redirection. 25 | 6. Write all file names from /words, starting with vowels, contain 6 letters and end not with vowels to a file /task1/words, using redirection. 26 | 7. Make sure your files end with a new line 27 | -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-bash-workshop/step6/verify.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [[ $(bash /scripts/hello.sh ZeBEdeE) != "Hello, Zebedee! This username has 7 letters: 4 vowels and 3 consonants. The rarest name!" 4 | || $(bash /scripts/hello.sh rafFErty) != "Hello, Rafferty! This username has 8 letters: 3 vowels and 5 consonants. The rarest name!" 5 | || $(bash /scripts/hello.sh aJax) != "Hello, Ajax! This username has 4 letters: 2 vowels and 2 consonants. Pretty uncommon name!" 6 | || $(bash /scripts/hello.sh gRoVer) != "Hello, Grover! This username has 6 letters: 2 vowels and 4 consonants. Pretty uncommon name!" 7 | || $(bash /scripts/hello.sh jAne) != "Hello, Jane! This username has 4 letters: 2 vowels and 2 consonants. Nothing special!" 8 | || $(bash /scripts/hello.sh) != "Hello, World!" 9 | ]]; 10 | then exit 1 11 | fi 12 | 13 | if [[ $(bash /scripts/numbers.sh 31 1 2005) != "Valid date." 14 | || $(bash /scripts/numbers.sh 29 2 1998) != "Valid date." 15 | || $(bash /scripts/numbers.sh 31 3 2001) != "Valid date." 16 | || $(bash /scripts/numbers.sh 30 4 2001) != "Valid date." 17 | || $(bash /scripts/numbers.sh 31 5 2001) != "Valid date." 18 | || $(bash /scripts/numbers.sh 30 6 2001) != "Valid date." 19 | || $(bash /scripts/numbers.sh 31 7 2001) != "Valid date." 20 | || $(bash /scripts/numbers.sh 31 8 2001) != "Valid date." 21 | || $(bash /scripts/numbers.sh 30 9 2001) != "Valid date." 22 | || $(bash /scripts/numbers.sh 31 10 2001) != "Valid date." 23 | || $(bash /scripts/numbers.sh 30 11 2001) != "Valid date." 24 | || $(bash /scripts/numbers.sh 31 12 2001) != "Valid date." 25 | || $(bash /scripts/numbers.sh 32 01 1996) != "Invalid date." 26 | || $(bash /scripts/numbers.sh -1 5 2001) != "Invalid date." 27 | || $(bash /scripts/numbers.sh 30 2 1998) != "Invalid date." 28 | || $(bash /scripts/numbers.sh 0 15 1995) != "Invalid date." 29 | ]]; 30 | then exit 1 31 | fi 32 | -------------------------------------------------------------------------------- /modules/01. Linux Basics/workshop.md: -------------------------------------------------------------------------------- 1 | # Linux Workshop: investigating linux CLI abilities and writing powerful scripts 2 | ## Task 3 | 4 | Your task is to follow the two KillerCoda workshops. Familiarize yourself with Linux CLI terminal 5 | and Bash-scripting. 6 | 7 | In these KillerCoda scenarios, you will follow the below steps: 8 | 9 | 1. Learn a lot of in-built tools of CLI (terminal) and gain experience with: 10 | - creating folders, files, list, move and rename them and review their content 11 | - managing file permissions 12 | - mastering pipes (|), redirections (<, <<, >, >>), `grep`, `sed` and others 13 | 2. Learn basics of Bash-scripting and gain experience with: 14 | - creating simple scripts to automatize your work 15 | - managing business flows with operators (if, else, switch, for) 16 | - using variables, math calculations and many other things 17 | 18 | ### [Linux CLI Workshop](https://killercoda.com/rsschool/course/modules/linux-cli-workshop) 19 | 20 | ### [Linux Bash Workshop](https://killercoda.com/rsschool/course/modules/linux-bash-workshop) 21 | 22 | ## How to do the task? 23 | ### 1. Sign in to KillerCoda 24 | You could use your GitHub account 25 | ### 2. Take the entire workshop 26 | Once you finish it, do not close the environment! 27 | ### 3. Execute an `echo` command with your GitHub name in the KillerCoda environment console 28 | For example `$echo rsschool-repository` 29 | ### 4. Validate your participation! 30 | Take a screenshot of the completion screen and your GitHub name in the KillerCoda environment console. Besides, it should contain the date and time. 31 | ### 5. Cross-check 32 | Upload a screenshot to the RS School portal. Your result will be cross-checked by another student of the course, and you will have to check someone else result. 33 | 34 | #### In case of questions, problems or suggestions 35 | 36 | Please, feel free to reach me or anyone from RS School out. My discord - memphis9664, [GitHub](https://github.com/memphis35). -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-cli-workshop/step6/text.md: -------------------------------------------------------------------------------- 1 | # Helpful commands: 2 | 3 | > `COMMAND --help` - print usage of command and possible options 4 | > `pwd` - print working (current) directory 5 | > `cd [FILE]` - change working (current) directory 6 | > `ls [OPTION]... [FILE]...` - list all files in FILE directories OR in a current directory 7 | > `stat [OPTION]... FILE...` - Display file or file system status 8 | > `chmod [OPTION]... MODE[,MODE]... FILE...` or 9 | > `chmod [OPTION]... OCTAL-MODE FILE...` - change the mode of each FILE to 10 | > MODE 11 | 12 | # OCTAL-MODE binary table 13 | 14 | | r (read) | w (write) | x (execute) | binary_code | integer_code | 15 | |----------|-----------|-------------|-------------|--------------| 16 | | 0 | 0 | 1 | 001 (--x) | 1 | 17 | | 0 | 1 | 0 | 010 (-w-) | 2 | 18 | | 0 | 1 | 1 | 011 (-wx) | 3 | 19 | | 1 | 0 | 0 | 100 (r--) | 4 | 20 | | 1 | 0 | 1 | 101 (r-x) | 5 | 21 | | 1 | 1 | 0 | 110 (rw-) | 6 | 22 | | 1 | 1 | 1 | 111 (rwx) | 7 | 23 | 24 | # Task description: 25 | 26 | 1. Remove all data from /task1 27 | 2. Copy files from /permissions to /task1/permissions 28 | 3. Change working directory to /task1/permissions 29 | 4. Make file with name 'r_only' -r--r--r-- 30 | 5. Make file with name 'owner_rwx_group_other_r' -rwxr--r-- 31 | 6. Make file with name 'group_rwx_owner_other_r' -r--rwxr-- 32 | 7. Make file with name 'other_rwx_owner_group_read' -r--r--rwx 33 | 8. Make file with name 'all_rwx' -rwxrwxrwx 34 | 9. Make file with name 'all_rw' -rw-rw-rw- 35 | 10. Make file with name 'owner_rx_group_rw_other_wx' -r-xrw--wx 36 | 11. Make file with name 'owner_r_group_x_other_w' -r----x-w- 37 | 12. Make file with name 'fully_blocked' ---------- 38 | -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/containers-workshop/index.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "RSSchool Containers Workshop", 3 | "description": "Let's learn how to manipulate containers with Docker and orchestrate with Kubernetes", 4 | "details": { 5 | "intro": { 6 | "text": "intro.md", 7 | "background": "setup.sh", 8 | "foreground": "wait-init.sh" 9 | }, 10 | "steps": [ 11 | { 12 | "title": "Part 1: Introduction to Containers and Docker", 13 | "text": "step1/text.md", 14 | "verify": "step1/verify.sh" 15 | }, 16 | { 17 | "title": "Part 2: Docker Compose and Multi-Container Applications", 18 | "text": "step2/text.md", 19 | "verify": "step2/verify.sh" 20 | }, 21 | { 22 | "title": "Part 3: Introduction to Kubernetes", 23 | "text": "step3/text.md", 24 | "verify": "step3/verify.sh" 25 | }, 26 | { 27 | "title": "Part 4: Kubernetes Deployment and Services", 28 | "text": "step4/text.md", 29 | "verify": "step4/verify.sh" 30 | }, 31 | { 32 | "title": "Part 5: Advanced Kubernetes Concepts", 33 | "text": "step5/text.md", 34 | "verify": "step5/verify.sh" 35 | }, 36 | { 37 | "title": "Part 5: Advanced Kubernetes Concepts", 38 | "text": "step6/text.md", 39 | "verify": "step6/verify.sh" 40 | }, 41 | { 42 | "title": "Part 5: Advanced Kubernetes Concepts", 43 | "text": "step7/text.md", 44 | "verify": "step7/verify.sh" 45 | } 46 | ], 47 | "finish": { 48 | "text": "finish.md" 49 | } 50 | }, 51 | "backend": { 52 | "imageid": "kubernetes-kubeadm-1node" 53 | } 54 | } -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-cli-workshop/step9/text.md: -------------------------------------------------------------------------------- 1 | # Helpful commands: 2 | 3 | > `COMMAND --help` - print usage of command and possible options 4 | > `pwd` - print working (current) directory 5 | > `cd [FILE]` - change working (current) directory 6 | > `ls [OPTION]... [FILE]...` - list all files in FILE directories OR in a current directory 7 | > `cat [OPTION]... [FILE]...` - concatenate FILE(s) to standard output. With no FILE, or when FILE is -, read 8 | > **_standard input_**. 9 | > `>` - redirect **_standard output_** to another source (file, device). Source content will be overwritten by redirected. If file doesn't exist, it will be created. 10 | > `>>` - redirect **_standard output_** to another source (file, device). Redirected content will be appended to source content. If file doesn't exist, it will be created. 11 | > `<` - redirect **_standard input_** from another source (file, device). 12 | > `bash [GNU long option] [option] script-file ...` - execute script-file with Bourne-again Shell 13 | 14 | # NOTES: 15 | - The standard input (STDIN) device is the keyboard. 16 | - The standard output (STDOUT) device is the screen. 17 | 18 | # Task description: 19 | 20 | 1. Remove all data from /task1 21 | 2. You know how to use `touch` and `nano` as a text editor. This time create file /task1/dont_touch_and_echo_it/haiku, with content: 22 | > The old pond 23 | A frog leaps in. 24 | Sound of the water. 25 | 26 | but don't use `touch`, `echo` and any text editors. Instead of this, find another way with power of **_redirection_**. 27 | 3. Copy shell script from /scripts/perfect_script.sh to /task1/exec/ 28 | 4. Run it, make sure it prints a few messages to the console. 29 | 5. Now run it again, but this time redirect all output to /task1/exec/perfect.log 30 | 6. Copy shell script from /scripts/imperfect_script.sh to /task1/exec/ 31 | 7. Run it, make sure it prints a few messages to the console and print an error as well. 32 | 8. Now run it again, but this time redirect all output to /task1/exec/imperfect.log, except error messages. Redirect them to /task1/exec/errors.log. -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-cli-workshop/step2/text.md: -------------------------------------------------------------------------------- 1 | # Helpful commands: 2 | 3 | > `COMMAND --help` - print usage of command and possible options 4 | > `pwd` - print working (current) directory 5 | > `cd [FILE]` - change working (current) directory 6 | > `ls [OPTION...] [FILE]...` - list all files in FILE directories OR in a current directory 7 | > `touch [OPTION...] FILE...` - update the access and modification times of each FILE to the current time, although if 8 | > FILE doesn't exist, it will be created 9 | > `mkdir [OPTION...] DIRECTORY...` - create all DIRECTORIES, if they do not already exist 10 | > `{[FROM]..[TO]..[STEP]}` - create a list of integer numbers with a step, f.e. {1..10} creates a list of values: 1 2 3 11 | > 4 5 6 7 8 9 10 12 | > `rm [OPTION...] FILE...` - remove (unlink) FILES. 13 | > `rmdir [OPTION...] DIRECTORY...` - remove the DIRECTORIES, if they are empty. 14 | 15 | # Task description: 16 | 17 | 1. Remove all data from /task1 18 | 2. Create /task1/divine_comedy directory 19 | 3. Create three files: chapter_1, chapter_2, chapter_3 20 | 4. Open /task1/chapter_1 with nano* editor and type two lines 21 | 22 | > In the midway of this our mortal life, 23 | > I found me in a gloomy wood, astray 24 | 25 | 5. Open /task1/chapter_2 with nano* editor and type two lines 26 | 27 | > Gone from the path direct: and even to tell 28 | > It were no easy task, how savage wild 29 | 30 | 6. Open /task1/chapter_3 with nano* editor and type two lines 31 | 32 | > That forest, how robust and rough its growth, 33 | > Which to remember only, my dismay 34 | 35 | ## NOTE: 36 | 37 | please, be careful with symbols and don't type extra spaces at the end of the line, but each file should end with a new 38 | line 39 | 40 | # *NANO: simple console text editor 41 | 42 | Super-compact, old-school, useful text editor, which lets you edit file content with ease 43 | If you want to edit file with nano, use `nano FILEPATH`. 44 | To save changes, use `ctrl + S`. 45 | To exit, even without saving changes, use `ctrl + X`. 46 | More details about how to use nano as a pro [here](https://www.nano-editor.org/dist/latest/nano.html) -------------------------------------------------------------------------------- /modules/04. Cloud/AWS Fundamentals/README.md: -------------------------------------------------------------------------------- 1 | # AWS Fundamentals cours 2 | 3 | ## Introduction 4 | Welcome to our AWS Fundamentals course, designed to help you gain the knowledge about AWS Basics We expect that you will need to dedicate 5-10 hours per week to complete the assignments. The total duration of the course is 2 weeks. 5 | 6 | ## Week #1 7 | - AWS Cloud Practitioner Essentials - 6h. 8 | 9 | This course is for individuals who seek an overall understanding of the Amazon Web Services (AWS) Cloud, independent of specific technical roles. You will learn about AWS Cloud concepts, AWS services, security, architecture, pricing, and support to build your AWS Cloud knowledge. This course also helps you prepare for the AWS Certified Cloud Practitioner exam. 10 | 11 | [Course Link](https://explore.skillbuilder.aws/learn/course/internal/view/elearning/134/aws-cloud-practitioner-essentials) 12 | 13 | **Evaluation methodology**: Upload a screenshot once you passed every quiz per module. 14 | 15 | ## Week#2 16 | - AWS General Immersion Day – 9h. 17 | In this General Immersion Day workshop, through a mix of service explanation and hands-on labs led by AWS, you will learn about AWS foundational services as well as key concepts for AWS security measures and architecture best practices. 18 | 19 | The hands-on labs are largely divided into basic and advanced modules. 20 | 21 | In basic modules, you can learn various features of each AWS foundational service. In advanced modules, you can learn how to connect each service organically to create architecture like 3-tier web application. 22 | 23 | [Workshop link](https://catalog.workshops.aws/general-immersionday/en-US) 24 | 25 | **Evaluation methodology**: Before cleaning up the environment as instructed per module, upload screenshots of created resources 26 | 27 | ## Optional 28 | - Practitioner Quest - 9h 29 | 30 | AWS Cloud Quest: Cloud Practitioner is a role-playing learning game that helps you develop practical cloud skills through interactive learning and hands-on activities using AWS services. 31 | 32 | [Course Link](https://explore.skillbuilder.aws/learn/course/internal/view/elearning/11458/aws-cloud-quest-cloud-practitioner) 33 | -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-bash-workshop/index.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "RSSchool Linux Bash Workshop", 3 | "description": "Let's learn how to create efficient bash scripts with ease", 4 | "details": { 5 | "intro": { 6 | "text": "intro.md", 7 | "background": "setup.sh" 8 | }, 9 | "steps": [ 10 | { 11 | "title": "Bash scripts basics: the simplest script", 12 | "text": "step1/text.md", 13 | "verify": "step1/verify.sh" 14 | }, 15 | { 16 | "title": "Bash scripts basics: varargs, variable substitution", 17 | "text": "step2/text.md", 18 | "verify": "step2/verify.sh" 19 | }, 20 | { 21 | "title": "Bash scripts basics: if-else, arithmetic expressions", 22 | "text": "step3/text.md", 23 | "verify": "step3/verify.sh" 24 | }, 25 | { 26 | "title": "Bash scripts basics: string transformation, command substitution", 27 | "text": "step4/text.md", 28 | "verify": "step4/verify.sh" 29 | }, 30 | { 31 | "title": "Bash scripts basics: for", 32 | "text": "step5/text.md", 33 | "verify": "step5/verify.sh" 34 | }, 35 | { 36 | "title": "Bash scripts basics: switch-case", 37 | "text": "step6/text.md", 38 | "verify": "step6/verify.sh" 39 | }, 40 | { 41 | "title": "Bash scripts basics: command substitution", 42 | "text": "step7/text.md", 43 | "verify": "step7/verify.sh" 44 | }, 45 | { 46 | "title": "Bash scripts basics: multiple if-else, regexp", 47 | "text": "step8/text.md", 48 | "verify": "step8/verify.sh" 49 | }, 50 | { 51 | "title": "Bash scripts basics: final task - read and save", 52 | "text": "step9/text.md", 53 | "verify": "step9/verify.sh" 54 | }, 55 | { 56 | "title": "Bash scripts basics: final task - switch-case with fallback error", 57 | "text": "step10/text.md", 58 | "verify": "step10/verify.sh" 59 | } 60 | ], 61 | "finish": { 62 | "text": "finish.md" 63 | } 64 | }, 65 | "backend": { 66 | "imageid": "ubuntu" 67 | } 68 | } 69 | -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-cli-workshop/step5/text.md: -------------------------------------------------------------------------------- 1 | # Helpful commands: 2 | 3 | > `COMMAND --help` - print usage of command and possible options 4 | > `pwd` - print working (current) directory 5 | > `cd [FILE]` - change working (current) directory 6 | > `ls [OPTION...] [FILE]...` - list all files in FILE directories OR in a current directory 7 | > `touch [OPTION...] FILE...` - update the access and modification times of each FILE to the current time, although if 8 | > FILE doesn't exist, it will be created 9 | > `mkdir [OPTION...] DIRECTORY...` - create all DIRECTORIES, if they do not already exist 10 | > `{[FROM]..[TO]..[STEP]}` - create a list of integer numbers with a step, f.e. {1..10} creates a list of values: 1 2 3 11 | > 4 5 6 7 8 9 10 12 | > `rm [OPTION...] FILE...` - remove (unlink) FILES. 13 | > `rmdir [OPTION...] DIRECTORY...` - remove the DIRECTORIES, if they are empty. 14 | > `mv [OPTION]... [-T] SOURCE DEST` or `mv [OPTION]... SOURCE... DIRECTORY` - rename SOURCE to DEST, or move SOURCE(s) 15 | > to DIRECTORY. 16 | > `cp [OPTION]... [-T] SOURCE DEST` or `mv [OPTION]... SOURCE... DIRECTORY` - copy SOURCE to DEST, or multiple SOURCE(s) 17 | > to DIRECTORY 18 | 19 | # NOTES: how to use special symbols 20 | 21 | There are special symbols * (any number of symbols), ? (only one symbol) and [] (set of symbols) you can use to mask 22 | your query. 23 | F.e., I want to list all files in the working directory that start with , so I can try to do it this 24 | way `ls ./a*`. 25 | Or, I want to find all files with '.xml' extension. Like this `ls ./*.xml`. 26 | Or, I want to find files that 27 | have 4 letters and contain a vowel at the second position. Like this `?[aeiouy]??`. 28 | You've got the idea. Now, break the challenge. 29 | 30 | # Task description: 31 | 32 | 1. Remove all data from /task1 33 | 2. Copy files from /words to /task1/starts_scum with names that start from 's', 'c', 'u', 'm' 34 | 3. Copy files from /words to /task1/ends_trace with names that end on 't', 'r', 'a', 'c', 'e' 35 | 4. Copy files from /words to /task1/contains_ch_th_sh with names that contain 'ch', 'th', 'sh' 36 | 5. Copy files from /words to /task1/five_letters with names that contain only 5 letters 37 | 6. Move all files from previous directories (p.2-5) to /task1/sun_star that start with 's', 'u', 'n' and ends with ' 38 | s', 't', 'a', 'r' 39 | -------------------------------------------------------------------------------- /modules/01. Linux Basics/A. Automation and Scripting Languages - Bash/linux-cli-workshop/index.json: -------------------------------------------------------------------------------- 1 | { 2 | "title": "RSSchool Linux CLI Workshop", 3 | "description": "Let's learn how to use linux terminal easily and efficiently", 4 | "details": { 5 | "intro": { 6 | "text": "intro.md", 7 | "background": "setup.sh" 8 | }, 9 | "steps": [ 10 | { 11 | "title": "Command-line basics, pt. 1: mkdir, touch", 12 | "text": "step1/text.md", 13 | "verify": "step1/verify.sh" 14 | }, 15 | { 16 | "title": "Command-line basics, pt. 2: rmdir, rm, nano text editor", 17 | "text": "step2/text.md", 18 | "verify": "step2/verify.sh" 19 | }, 20 | { 21 | "title": "Command-line basics, pt. 3: cp", 22 | "text": "step3/text.md", 23 | "verify": "step3/verify.sh" 24 | }, 25 | { 26 | "title": "Command-line basics, pt. 4: mv", 27 | "text": "step4/text.md", 28 | "verify": "step4/verify.sh" 29 | }, 30 | { 31 | "title": "Command-line basics, pt. 5: conditional cp & mv", 32 | "text": "step5/text.md", 33 | "verify": "step5/verify.sh" 34 | }, 35 | { 36 | "title": "Command-line basics, pt. 6: file permissions", 37 | "text": "step6/text.md", 38 | "verify": "step6/verify.sh" 39 | }, 40 | { 41 | "title": "Command-line basics, pt. 7: cat, less, head, tail", 42 | "text": "step7/text.md", 43 | "verify": "step7/verify.sh" 44 | }, 45 | { 46 | "title": "Command-line basics, pt. 8: input, output, redirection", 47 | "text": "step8/text.md", 48 | "verify": "step8/verify.sh" 49 | }, 50 | { 51 | "title": "Command-line basics, pt. 9. input, output, redirection", 52 | "text": "step9/text.md", 53 | "verify": "step9/verify.sh" 54 | }, 55 | { 56 | "title": "Command-line basics, pt. 10: pipes, grep, sort", 57 | "text": "step10/text.md", 58 | "verify": "step10/verify.sh" 59 | }, 60 | { 61 | "title": "Command-line basics, pt. 11: sed, redirection", 62 | "text": "step11/text.md", 63 | "verify": "step11/verify.sh" 64 | } 65 | ], 66 | "finish": { 67 | "text": "finish.md" 68 | } 69 | }, 70 | "backend": { 71 | "imageid": "ubuntu" 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/containers-workshop/intro.md: -------------------------------------------------------------------------------- 1 | Welcome to the Docker and Kubernetes Workshop! In this workshop, we will embark on a journey through the world of containerization and container orchestration, equipping you with the skills and knowledge needed to harness the power of Docker and Kubernetes. 2 | 3 | ## Workshop Overview 4 | 5 | ### Workshop Agenda 6 | 7 | **Part 1: Introduction to Containers and Docker** 8 | 9 | * Dive into the world of containers and understand why they have revolutionized modern application deployment. Learn how Docker simplifies containerization and enhances portability. 10 | 11 | **Part 2: Docker Compose and Multi-Container Applications** 12 | 13 | * Explore Docker Compose for managing multi-container applications effortlessly. Discover how to define, link, and scale containers within a single application. 14 | 15 | **Part 3: Introduction to Kubernetes** 16 | 17 | * Uncover the fundamentals of Kubernetes, the industry-standard container orchestration platform. Set up your own Kubernetes cluster, and learn how to deploy and manage pods. 18 | 19 | **Part 4: Kubernetes Deployment and Services** 20 | 21 | * Master the art of Kubernetes Deployments and Services. Understand how to ensure application availability, perform rolling updates, and balance loads efficiently. 22 | 23 | **Part 5: Advanced Kubernetes Concepts** 24 | 25 | * Delve into advanced Kubernetes concepts like ConfigMaps, Secrets, StatefulSets, and Helm for effective application configuration and management. 26 | 27 | ### Prerequisites 28 | 29 | To make the most of this workshop, participants should have: 30 | 31 | * Basic knowledge of Linux and command-line operations. 32 | * Familiarity with software development and basic networking concepts. 33 | * A laptop with administrative rights (for local installations). 34 | 35 | ### Workshop Format 36 | 37 | Our workshop combines hands-on experience with in-depth theoretical knowledge. You'll not only learn about Docker and Kubernetes but also put that knowledge to work through practical exercises and demonstrations. 38 | 39 | ### What You'll Gain 40 | 41 | By the end of this workshop, you will: 42 | 43 | * Understand the core concepts of containers and containerization. 44 | * Be proficient in Docker, Docker Compose, and Kubernetes. 45 | * Have hands-on experience deploying and managing containerized applications. 46 | * Gain insights into container orchestration best practices. 47 | -------------------------------------------------------------------------------- /modules/09. Containers vs VMs/9.0. The basics/README.md: -------------------------------------------------------------------------------- 1 | ### What Is Virtualization? 2 | Virtualization is a technology that allows you to create virtual instances of computer hardware or software within a single physical system. It enables the emulation of multiple virtual environments, such as virtual machines (VMs), operating systems, storage devices, or network resources, on a single physical server or host. These virtual instances, often called "virtualization instances" or "virtualized environments", operate independently and can run different operating systems and applications. 3 | 4 | Virtualization offers several benefits, including efficient resource utilization, hardware consolidation, easier management, scalability, and improved disaster recovery options. It has become a fundamental technology in modern IT infrastructure, enabling the efficient allocation and sharing of computing resources in data centers and cloud environments. Popular virtualization platforms include VMware, Microsoft Hyper-V, KVM, and VirtualBox. 5 | 6 | ### What Is a Hypervisor? 7 | The software that enables the creation and management of virtual computing environments is called a hypervisor. It’s a lightweight software or firmware layer that sits between the physical hardware and the virtualized environments and allows multiple operating systems to run concurrently on a single physical machine. The hypervisor abstracts and partitions the underlying hardware resources, such as central processing units (CPUs), memory, storage, and networking, and allocates them to the virtual environments. You can think of the hypervisor as the middleman that pulls resources from the raw materials of your infrastructure and directs them to the various computing instances. 8 | 9 | There are two types of hypervisors: 10 | 11 | - Type 1, bare-metal hypervisors, run directly on the hardware. 12 | - Type 2 hypervisors operate within a host operating system. 13 | 14 | Hypervisors are fundamental to virtualization technology, enabling efficient utilization and management of computing resources. 15 | 16 | ### Binaries 17 | In general, binaries are non-text files made up of ones and zeros that tell a processor how to execute a program. 18 | 19 | ### Libraries 20 | Libraries are sets of prewritten code that a program can use to do either common or specialized things. They allow developers to avoid rewriting the same code over and over. 21 | 22 | ### Kernels 23 | Kernels are the ringleaders of the OS. They’re the core programming at the center that controls all other parts of the operating system. 24 | -------------------------------------------------------------------------------- /modules/README.md: -------------------------------------------------------------------------------- 1 | # RS School & Course Introduction 2 | 3 | ### Module description 4 | This week you will learn 5 | * what is the Rolling scopes 6 | * how to use the Rolling Scopes School app, 7 | * what type of tasks you will face, 8 | * how to submit your education tasks, 9 | * and so on. 10 | 11 | # Who are we? 12 | **The Rolling Scopes** is an independent international community of developers. 13 | 14 | **RS School** is a free-of-charge and community-based education program run by the Rolling Scopes developer community since 2013. 15 | Thousands of graduated trainees in 11 countries and 500+ mentors around the world. 16 | 17 | #### What are our principles? 18 | - **Open to everyone.** Free courses, no obligations, and no contracts. No age limit. Only students’ time and dedication are required. Students can repeatedly attend courses. 19 | - **Open source philosophy.** [Our Learning Management System](https://github.com/rolling-scopes/rsschool-app) platform and education materials are publicly available on GitHub and YouTube. 20 | - **Teach it forward** According to this principle, students study at school for free, but we request that they return as mentors to pass on their knowledge to the next generation of students. 21 | 22 | ## RS AWS Club 23 | **RS AWS Club** - part of The Rolling Scopes focused on AWS technologies. We meet regularly to share ideas, answer questions, and learn about new services and best practices. 24 | 25 | ### Useful links 26 | - [Video: Rolling Scopes School in Poland. Story of my journey.](https://wearecommunity.io/events/hitchhiker-s-guide-to-epam-in-poland/talks/18089) 27 | - [Video: Intro about RS Community and NodeJS Course](https://www.youtube.com/watch?v=PG7ZBHSi09k) 28 | - [Official documentation](https://docs.rs.school/#/en/) 29 | - The [RS APP documentation](https://docs.app.rs.school/#/) helps you to orientate in the RS School app, know how to submit any type of task, what is the cross-check and so on.. 30 | 31 | ### Test 32 | At the end of this module, you should pass the **"RS app intro"** test. 33 | 34 | #### Preconditions for test: 35 | * Tests submitted in RS School App could be solved after authorization in the application. 36 | * The minimum passing score is **80%** of the maximum possible number of points. 37 | * You can take the test **3 times**, the last result is counted. 38 | * You can try the test even more times, but the score for the test will be reduced by half (from the original score). 39 | * The result of passing the test will be displayed immediately, it will be added to the score page next day after passing. 40 | 41 | -------------------------------------------------------------------------------- /modules/03. Git/gh-cross-check-guide.md: -------------------------------------------------------------------------------- 1 | # How to create GitHub repository and add evidence of completing task to it 2 | 3 | ## Register GitHub account 4 | 5 | 1) Open in your browser https://github.com/. 6 | 2) Click "Sign Up" button in right top corner. 7 | ![Step 2](./gh-cross-check-guide-imgs/step-02.png) 8 | 3) Enter your e-mail, click "Continue", enter your password, click "Continue", enter your username, click "Continue", answer the question - you could type "n", click "Continue". Click "Verify", solve a puzzle and click "Submit". After that click "Create account" button. 9 | ![Step 3](./gh-cross-check-guide-imgs/step-03.png) 10 | 4) Check your e-mail inbox for GitHub launch code. 11 | ![Step 4](./gh-cross-check-guide-imgs/step-04.png) 12 | 5) Open e-mail and click "Open GitHub" button or link down below. 13 | ![Step 5](./gh-cross-check-guide-imgs/step-05.png) 14 | 6.1) Enter code from e-mail to the form. 15 | ![Step 6-01](./gh-cross-check-guide-imgs/step-06_01.png) 16 | 6.2) Registration is completed. 17 | ![Step 6-02](./gh-cross-check-guide-imgs/step-06_02.png) 18 | 19 | ## Creating repository 20 | 1) You could click plus button at the top (1) and click at "New repository" (2). 21 | ![Step 7](./gh-cross-check-guide-imgs/step-07.png) 22 | 2) Input course name `rss-devops-2023q3` to repository name and check "Add a README file" checkbox, after that click "Create repository" button. 23 | ![Step 8](./gh-cross-check-guide-imgs/step-08.png) 24 | 3) That's it. Repository has been created. :) 25 | 26 | ## Adding evidence of completing task to repository 27 | First of all we need to create branch for current task. 28 | Click at the current branch name `main` (1), input new branch name (2), e.g. `linux` and click "Create branch: linux". 29 | ![Step 9](./gh-cross-check-guide-imgs/step-09.png) 30 | After that you will be at `linux` branch and ready to upload files. 31 | 32 | Create folder at you computer and name it as the task, e.g. `linux`. Place screenshots or another needed files to this folder. Select folder in your Explorer and just drag-n-drop it to the GitHub page of your repository with selected branch of the task. 33 | Provide commit message after files are uploaded and click "Commit changes". 34 | ![Step 10](./gh-cross-check-guide-imgs/step-10.gif) 35 | 36 | Now we should create a Pull Request to a main branch and a link of this Pull Request should be submitted to RSS App. 37 | So we have a message in our repository that `linux` branch has changes and there is a "Compare & pull request" button. Click it. 38 | Provide Pull Request title as task name, e.g. `linux` and click "Create pull request". 39 | Copy a link in address bar and submit it at "Cross-check: Submit" page of RSS App 40 | ![Step 11](./gh-cross-check-guide-imgs/step-11.gif) 41 | Now students could check you files during cross-check at Pull Request page in "Files" tab. -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/Part4/README.md: -------------------------------------------------------------------------------- 1 | ## Part 4: Kubernetes Deployment and Services 2 | 3 | ### 1. Deployments and Rolling Updates 4 | 5 | #### Using Deployments for Declarative Application Updates 6 | 7 | Deployments are a higher-level abstraction over pods and ReplicaSets. They allow you to declaratively manage the desired state of your application. Deployments handle scaling, rolling updates, and rollbacks. 8 | 9 | Example Deployment definition: 10 | 11 | ```yaml 12 | apiVersion: apps/v1 13 | kind: Deployment 14 | metadata: 15 | name: nginx-deployment 16 | spec: 17 | replicas: 3 18 | selector: 19 | matchLabels: 20 | app: nginx 21 | template: 22 | metadata: 23 | labels: 24 | app: nginx 25 | spec: 26 | containers: 27 | - name: nginx-container 28 | image: nginx:1.19 29 | ``` 30 | 31 | #### Performing Rolling Updates and Rollbacks 32 | 33 | Rolling updates allow you to update your application while maintaining availability. Kubernetes replaces pods gradually, ensuring minimal downtime. If an update causes issues, you can roll back to the previous version. 34 | 35 | To perform a rolling update: 36 | 37 | ```bash 38 | kubectl set image deployment/nginx-deployment nginx-container=nginx:1.20 39 | ``` 40 | 41 | To roll back to the previous version: 42 | 43 | ```bash 44 | kubectl rollout undo deployment/nginx-deployment 45 | ``` 46 | 47 | ### 2. Services and Networking 48 | 49 | #### Exposing Applications Within a Cluster using Services 50 | 51 | Services provide network access to a set of pods. They abstract the underlying network architecture and enable pods to communicate with each other regardless of their physical location within the cluster. 52 | 53 | Example Service definition: 54 | 55 | ```yaml 56 | apiVersion: v1 57 | kind: Service 58 | metadata: 59 | name: nginx-service 60 | spec: 61 | selector: 62 | app: nginx 63 | ports: 64 | - protocol: TCP 65 | port: 80 66 | targetPort: 80 67 | type: ClusterIP 68 | ``` 69 | 70 | #### ClusterIP, NodePort, and LoadBalancer Services 71 | 72 | * **ClusterIP:** Default service type. Exposes the service on an internal IP within the cluster. 73 | * **NodePort:** Exposes the service on the same port across all nodes in the cluster. Suitable for external access to the service. 74 | * **LoadBalancer:** Automatically provisions an external load balancer (cloud-provider specific) and assigns a fixed, external IP to the service. 75 | 76 | ### 3. Ingress and Load Balancing 77 | 78 | #### Configuring Ingress Controllers for External Access 79 | 80 | Ingress provides an external entry point to services within the cluster. It allows you to define rules for routing external traffic to services based on hostnames and paths. 81 | 82 | Example Ingress definition: 83 | 84 | ```yaml 85 | apiVersion: networking.k8s.io/v1 86 | kind: Ingress 87 | metadata: 88 | name: my-ingress 89 | spec: 90 | rules: 91 | - host: myapp.example.com 92 | http: 93 | paths: 94 | - path: / 95 | pathType: Prefix 96 | backend: 97 | service: 98 | name: nginx-service 99 | port: 100 | number: 80 101 | ``` 102 | -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/containers-workshop/README.md: -------------------------------------------------------------------------------- 1 | # Containers 2 | 3 | Welcome to the Docker and Kubernetes Workshop! In this workshop, we will embark on a journey through the world of containerization and container orchestration, equipping you with the skills and knowledge needed to harness the power of Docker and Kubernetes. 4 | 5 | ## Workshop Overview 6 | 7 | ### Workshop Agenda 8 | 9 | **Part 1: Introduction to Containers and Docker** 10 | 11 | * Dive into the world of containers and understand why they have revolutionized modern application deployment. Learn how Docker simplifies containerization and enhances portability. 12 | 13 | **Part 2: Docker Compose and Multi-Container Applications** 14 | 15 | * Explore Docker Compose for managing multi-container applications effortlessly. Discover how to define, link, and scale containers within a single application. 16 | 17 | **Part 3: Introduction to Kubernetes** 18 | 19 | * Uncover the fundamentals of Kubernetes, the industry-standard container orchestration platform. Set up your own Kubernetes cluster, and learn how to deploy and manage pods. 20 | 21 | **Part 4: Kubernetes Deployment and Services** 22 | 23 | * Master the art of Kubernetes Deployments and Services. Understand how to ensure application availability, perform rolling updates, and balance loads efficiently. 24 | 25 | **Part 5: Advanced Kubernetes Concepts** 26 | 27 | * Delve into advanced Kubernetes concepts like ConfigMaps, Secrets, StatefulSets, and Helm for effective application configuration and management. 28 | 29 | ### Prerequisites 30 | 31 | To make the most of this workshop, participants should have: 32 | 33 | * Basic knowledge of Linux and command-line operations. 34 | * Familiarity with software development and basic networking concepts. 35 | * A laptop with administrative rights (for local installations). 36 | 37 | ### Workshop Format 38 | 39 | Our workshop combines hands-on experience with in-depth theoretical knowledge. You'll not only learn about Docker and Kubernetes but also put that knowledge to work through practical exercises and demonstrations. 40 | 41 | ### What You'll Gain 42 | 43 | By the end of this workshop, you will: 44 | 45 | * Understand the core concepts of containers and containerization. 46 | * Be proficient in Docker, Docker Compose, and Kubernetes. 47 | * Have hands-on experience deploying and managing containerized applications. 48 | * Gain insights into container orchestration best practices 49 | 50 | ## How to do the task? 51 | 52 | ### 1. Sign into Killercoda 53 | 54 | You could use your Github account 55 | 56 | ### 2. Take the entire workshop - [Workshop Link](https://killercoda.com/rsschool/course/modules/containers-workshop) 57 | 58 | Once you finish it, do not close the environment! 59 | 60 | ### 3. Execute an `echo` command with your github name in the KillerCoda environment console 61 | 62 | For example `$echo rsschool-repository` 63 | 64 | ### 4. Validate your participation! 65 | 66 | Take a screenshot of the completion screen `Thanks for participating in this Workshop!` and your github name in the KillerCoda environment console. Besides, it should contain the date and time. 67 | 68 | ### 5. Cross-check 69 | 70 | Upload a screenshot to the RS School portal. Your result will be cross-checked by another student of the course, and you will have to check someone else result. 71 | -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/Part2/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Part 2: Docker Compose and Multi-Container Applications 3 | 4 | ### 1. Introduction to Docker Compose 5 | 6 | #### What is Docker Compose and Why is it Used? 7 | 8 | Docker Compose is a tool that simplifies the management of multi-container applications. It allows you to define and manage multi-container setups using a single configuration file, enabling you to define services, networks, and volumes in a declarative manner. 9 | 10 | Docker Compose is particularly useful for orchestrating the deployment of interconnected services, such as web applications with databases, caching systems, and message brokers. 11 | 12 | #### Writing a `docker-compose.yml` File 13 | 14 | A `docker-compose.yml` file is used to define the configuration of a multi-container application. It specifies services, networks, volumes, environment variables, and more. 15 | 16 | Example `docker-compose.yml` file: 17 | 18 | ```yaml 19 | services: 20 | web: 21 | image: nginx 22 | ports: 23 | - "8080:80" 24 | db: 25 | image: postgres 26 | environment: 27 | POSTGRES_PASSWORD: mysecretpassword 28 | ``` 29 | 30 | This example defines two services (`web` and `db`) with their respective images and configurations. 31 | 32 | ### 2. Managing Multi-Container Applications 33 | 34 | #### Defining and Linking Multiple Containers using Docker Compose 35 | 36 | Docker Compose allows you to define and manage multiple containers as a single application. You can specify dependencies and relationships between services. 37 | 38 | ```yaml 39 | web: 40 | build: ./webapp 41 | ports: 42 | - "8080:80" 43 | depends_on: 44 | - db 45 | db: 46 | image: postgres 47 | ``` 48 | 49 | In this example, the `web` service depends on the `db` service, ensuring that the database container is started before the web container. 50 | 51 | #### Environment Variables and Secrets in Docker Compose 52 | 53 | You can define environment variables for services in the `docker-compose.yml` file to configure application behavior. Secrets can also be managed using Docker Compose in conjunction with Docker's secret management. 54 | 55 | ```yaml 56 | web: 57 | image: myapp 58 | environment: 59 | DATABASE_URL: postgres://user:password@db:5432/mydb 60 | ``` 61 | 62 | This sets the `DATABASE_URL` environment variable for the `web` service. 63 | 64 | #### Running and Scaling Multi-Container Applications 65 | 66 | To start the entire multi-container application defined in the `docker-compose.yml` file, use the following command: 67 | 68 | ```bash 69 | docker-compose up 70 | ``` 71 | 72 | You can also scale services using Docker Compose to create multiple instances of a service: 73 | 74 | ```bash 75 | docker-compose up --scale web=3 76 | ``` 77 | 78 | ### 3. Data Management and Volumes 79 | 80 | #### Understanding Docker Volumes for Persistent Data Storage 81 | 82 | Docker volumes are used to store and manage data outside of containers. They enable data persistence even when containers are stopped or removed. 83 | 84 | #### Managing Data Between Containers and the Host 85 | 86 | By using volume mappings, you can share data between containers and the host system. This is useful for cases where you want data changes to persist across container restarts. 87 | 88 | ```yaml 89 | web: 90 | image: myapp 91 | volumes: 92 | - ./app-data:/app/data 93 | ``` 94 | 95 | In this example, the `./app-data` directory on the host is mapped to the `/app/data` directory in the container. 96 | -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/Part3/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Part 3: Introduction to Kubernetes 3 | 4 | ### 1. Introduction to Kubernetes 5 | 6 | #### What is Kubernetes and Why is it Important? 7 | 8 | Kubernetes is an open-source container orchestration platform that automates the deployment, scaling, and management of containerized applications. It abstracts the underlying infrastructure and provides tools for deploying, maintaining, and scaling applications with ease. 9 | 10 | Kubernetes is crucial for managing complex applications composed of multiple containers, handling scaling, load balancing, and ensuring high availability. 11 | 12 | #### Kubernetes Components: Master and Nodes 13 | 14 | Kubernetes architecture consists of two main components: 15 | 16 | 1. **Control Plane (Master):** The control plane manages the overall state of the cluster. It includes components such as the API server, etcd (key-value store), scheduler, controller manager, and more. 17 | 2. **Nodes:** Nodes are the worker machines that run containerized applications. Each node hosts pods, which are the smallest deployable units in Kubernetes. Nodes are managed by the control plane. 18 | 19 | ### 2. Setting Up a Kubernetes Cluster 20 | 21 | #### Installing Kubernetes Locally 22 | 23 | For local development and learning purposes, you can use Minikube, which sets up a single-node Kubernetes cluster on your local machine. Here's how to do it: 24 | 25 | 1. **Install Hypervisor (VirtualBox, KVM, or Docker Desktop):** Minikube requires a hypervisor to create a virtual machine for Kubernetes. Install a supported hypervisor based on your operating system. 26 | 2. **[Install Minikube](https://minikube.sigs.k8s.io/docs/start/):** Download and install Minikube from the official website or package manager for your OS. 27 | 3. **Start Minikube:** Open a terminal and run the following command to start Minikube with the default driver (VirtualBox): 28 | 29 | ```bash 30 | minikube start 31 | ``` 32 | 4. **Interact with Minikube:** Once Minikube is running, you can interact with your local Kubernetes cluster using `kubectl`, the Kubernetes command-line tool. Configure `kubectl` to use the Minikube cluster: 33 | 34 | ```bash 35 | kubectl config use-context minikube 36 | ``` 37 | 38 | Verify the cluster is running: 39 | 40 | ```bash 41 | kubectl cluster-info 42 | ``` 43 | 5. **Explore Kubernetes:** You now have a local Kubernetes cluster running. You can create and manage pods, deployments, services, and other Kubernetes resources using `kubectl`. 44 | 45 | ### 3. Pods and ReplicaSets 46 | 47 | #### Creating Pods 48 | 49 | A pod is the smallest deployable unit in Kubernetes. It encapsulates one or more containers, shared storage, and network resources. Containers within a pod share the same network namespace and can communicate using `localhost`. 50 | 51 | Example pod definition: 52 | 53 | ```yaml 54 | apiVersion: v1 55 | kind: Pod 56 | metadata: 57 | name: my-pod 58 | spec: 59 | containers: 60 | - name: nginx-container 61 | image: nginx 62 | ``` 63 | 64 | #### Using ReplicaSets for Scaling and Load Distribution 65 | 66 | ReplicaSets ensure that a specified number of pod replicas are running at all times. They are used for scaling and maintaining the desired number of instances of a pod. 67 | 68 | Example ReplicaSet definition: 69 | 70 | ```yaml 71 | apiVersion: apps/v1 72 | kind: ReplicaSet 73 | metadata: 74 | name: nginx-replicaset 75 | spec: 76 | replicas: 3 77 | selector: 78 | matchLabels: 79 | app: nginx 80 | template: 81 | metadata: 82 | labels: 83 | app: nginx 84 | spec: 85 | containers: 86 | - name: nginx-container 87 | image: nginx 88 | ``` 89 | -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/README.md: -------------------------------------------------------------------------------- 1 | # ContainersPart 1: Introduction to Containers and Docker 2 | 3 | 1. **Introduction to Containers** 4 | * What are containers and why are they important? 5 | * Container vs. Virtual Machine (VM) comparison. 6 | 2. **Getting Started with Docker** 7 | * Installing Docker on various platforms (Windows, macOS, Linux). 8 | * Docker architecture: Docker Engine, images, and containers. 9 | 3. **Working with Docker Images** 10 | * Understanding Docker images and layers. 11 | * Building custom Docker images using Dockerfiles. 12 | 4. **Running Containers** 13 | * Pulling and running Docker images from Docker Hub. 14 | * Container lifecycle: starting, stopping, and removing containers. 15 | * Mapping ports and volumes between host and container. 16 | 17 | ## Part 2: Docker Compose and Multi-Container Applications 18 | 19 | 1. **Introduction to Docker Compose** 20 | * What is Docker Compose and why is it used? 21 | * Writing a `docker-compose.yml` file. 22 | 2. **Managing Multi-Container Applications** 23 | * Defining and linking multiple containers using Docker Compose. 24 | * Environment variables and secrets in Docker Compose. 25 | * Running and scaling multi-container applications. 26 | 3. **Data Management and Volumes** 27 | * Understanding Docker volumes for persistent data storage. 28 | * Managing data between containers and the host. 29 | 30 | ## Part 3: Introduction to Kubernetes 31 | 32 | 1. **Introduction to Kubernetes** 33 | * What is Kubernetes and why is it important? 34 | * Kubernetes components: master and nodes. 35 | 2. **Setting Up a Kubernetes Cluster** 36 | * Installing Kubernetes using kubeadm or managed solutions (like Minikube). 37 | * Configuring `kubectl` for cluster communication. 38 | 3. **Pods and ReplicaSets** 39 | * Creating pods, the smallest deployable units in Kubernetes. 40 | * Using ReplicaSets for scaling and load distribution. 41 | 42 | ## Part 4: Kubernetes Deployment and Services 43 | 44 | 1. **Deployments and Rolling Updates** 45 | * Using Deployments for declarative application updates. 46 | * Performing rolling updates and rollbacks. 47 | 2. **Services and Networking** 48 | * Exposing applications within a cluster using Services. 49 | * ClusterIP, NodePort, and LoadBalancer Services. 50 | 3. **Ingress and Load Balancing** 51 | * Configuring Ingress controllers for external access. 52 | 53 | ## Part 5: Advanced Kubernetes Concepts 54 | 55 | 1. **ConfigMaps and Secrets** 56 | * Managing configuration data using ConfigMaps. 57 | * Handling sensitive data using Secrets. 58 | 2. **Persistent Volumes and StatefulSets** 59 | * Managing stateful applications using StatefulSets. 60 | * Configuring Persistent Volumes for data persistence. 61 | 3. **Introduction to Helm** 62 | * Packaging, sharing, and deploying applications using Helm charts. 63 | 64 | ## How to do the task? 65 | 66 | ### 1. Sign into Killercoda 67 | 68 | You could use your Github account 69 | 70 | ### 2. Take the entire workshop - [Workshop Link](https://killercoda.com/rsschool/course/modules/containers-workshop) 71 | 72 | Once you finish it, do not close the environment! 73 | 74 | ### 3. Execute an `echo` command with your github name in the KillerCoda environment console 75 | 76 | For example `$echo rsschool-repository` 77 | 78 | ### 4. Validate your participation! 79 | 80 | Take a screenshot of the completion screen `Thanks for participating in this Workshop!` and your github name in the KillerCoda environment console. Besides, it should contain the date and time. 81 | 82 | ### 5. Cross-check 83 | 84 | Upload a screenshot to the RS School portal. Your result will be cross-checked by another student of the course, and you will have to check someone else result. 85 | -------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/Part5/README.md: -------------------------------------------------------------------------------- 1 | 2 | ## Part 5: Advanced Kubernetes Concepts 3 | 4 | ### 1. ConfigMaps and Secrets 5 | 6 | #### Managing Configuration Data using ConfigMaps 7 | 8 | ConfigMaps allow you to decouple configuration data from application code. You can store configuration settings, environment variables, and other configuration data in a ConfigMap. 9 | 10 | Creating a ConfigMap: 11 | 12 | ```bash 13 | kubectl create configmap my-config --from-literal=key1=value1 --from-literal=key2=value2 14 | ``` 15 | 16 | Using ConfigMap in a pod: 17 | 18 | ```yaml 19 | apiVersion: v1 20 | kind: Pod 21 | metadata: 22 | name: my-pod 23 | spec: 24 | containers: 25 | - name: nginx-container 26 | image: nginx 27 | envFrom: 28 | - configMapRef: 29 | name: my-config 30 | ``` 31 | 32 | #### Handling Sensitive Data using Secrets 33 | 34 | Secrets are used to store sensitive information like passwords, API keys, and tokens. They are base64 encoded and can be mounted as volumes or exposed as environment variables in pods. 35 | 36 | Creating a Secret: 37 | 38 | ```bash 39 | kubectl create secret generic my-secret --from-literal=username=user --from-literal=password=pass 40 | ``` 41 | 42 | Using Secret in a pod: 43 | 44 | ```yaml 45 | apiVersion: v1 46 | kind: Pod 47 | metadata: 48 | name: my-pod 49 | spec: 50 | containers: 51 | - name: nginx-container 52 | image: nginx 53 | envFrom: 54 | - secretRef: 55 | name: my-secret 56 | ``` 57 | 58 | ### 2. Persistent Volumes and StatefulSets 59 | 60 | #### Managing Stateful Applications using StatefulSets 61 | 62 | StatefulSets are used to manage stateful applications that require stable network identities and persistent storage. Each pod in a StatefulSet has a unique, stable hostname and persistent storage. 63 | 64 | Creating a StatefulSet: 65 | 66 | ```yaml 67 | apiVersion: apps/v1 68 | kind: StatefulSet 69 | metadata: 70 | name: web 71 | spec: 72 | serviceName: "web" 73 | replicas: 3 74 | selector: 75 | matchLabels: 76 | app: nginx 77 | template: 78 | metadata: 79 | labels: 80 | app: nginx 81 | spec: 82 | containers: 83 | - name: nginx-container 84 | image: nginx 85 | ``` 86 | 87 | #### Configuring Persistent Volumes for Data Persistence 88 | 89 | Persistent Volumes (PVs) are used to provide persistent storage resources to pods. Persistent Volume Claims (PVCs) are used by pods to request storage resources. 90 | 91 | Example Persistent Volume Claim: 92 | 93 | ```yaml 94 | apiVersion: v1 95 | kind: PersistentVolumeClaim 96 | metadata: 97 | name: my-pvc 98 | spec: 99 | accessModes: 100 | - ReadWriteOnce 101 | resources: 102 | requests: 103 | storage: 1Gi 104 | ``` 105 | 106 | Using the PVC in a pod: 107 | 108 | ```yaml 109 | apiVersion: v1 110 | kind: Pod 111 | metadata: 112 | name: my-pod 113 | spec: 114 | containers: 115 | - name: nginx-container 116 | image: nginx 117 | volumeMounts: 118 | - mountPath: "/app/data" 119 | name: data-volume 120 | volumes: 121 | - name: data-volume 122 | persistentVolumeClaim: 123 | claimName: my-pvc 124 | ``` 125 | 126 | ### 3. Introduction to Helm 127 | 128 | #### Packaging, Sharing, and Deploying Applications using Helm Charts 129 | 130 | > For installation steps of Helm, check [this guide](https://helm.sh/docs/intro/install/) 131 | 132 | Helm is a package manager for Kubernetes that helps you define, install, and upgrade even the most complex Kubernetes applications. 133 | 134 | A Helm chart is a package that contains all the resources necessary to run a set of microservices. It includes templates, values, and optionally, pre-packaged Kubernetes manifests. 135 | 136 | Creating a Helm chart structure: 137 | 138 | ```bash 139 | helm create mychart 140 | ``` 141 | 142 | Installing a Helm chart: 143 | 144 | ```bash 145 | helm install my-release ./mychart 146 | ``` 147 | 148 | Upgrading a Helm chart: 149 | 150 | ```bash 151 | helm upgrade my-release ./mychart 152 | ``` 153 | -------------------------------------------------------------------------------- /modules/12. Final Project/README.md: -------------------------------------------------------------------------------- 1 | ## Individual Project Description 2 | 3 | The objective of the final project is to apply all the knowledge and skills you've acquired throughout this course. You will be required to choose an open-source application and set up a CI/CD pipeline, along with appropriate DevOps practices for this application. The choice of application is up to you, and it can be one of the following: 4 | 5 | 1. **WordPress**: A popular blogging platform that will help you understand how to manage a PHP application with a MySQL database. 6 | 7 | 2. **Jenkins**: A powerful, open-source automation server that enables developers to reliably build, test, and deploy their software. 8 | 9 | 3. **Ghost**: A Node.js-based blogging platform, a simpler alternative to WordPress. 10 | 11 | 4. **Rocket.Chat**: An open-source web chat platform, similar to Slack. 12 | 13 | 5. **Redmine**: A Ruby on Rails-based project management tool. 14 | 15 | 6. **Odoo**: A comprehensive suite of business applications including CRM, e-Commerce, accounting, inventory, PoS, project management, etc. 16 | 17 | 7. **Nextcloud**: A suite of client-server software for creating and using file hosting services. 18 | 19 | 8. **Magento**: An e-commerce platform built on open source technology which provides online merchants with a flexible shopping cart system. 20 | 21 | Your project should demonstrate a thorough understanding of the core DevOps concepts such as CI/CD, Infrastructure as Code, configuration management, and containerization. You are expected to containerize the application, handle its deployment using DevOps practices, and manage data persistence when the applications require a database. 22 | 23 | ## Assessment Criteria 24 | 25 | The project will be assessed on a scale of 0 to 100, with the following breakdown: 26 | 27 | 1. **Continuous Integration/Continuous Deployment (CI/CD) Pipeline (30 points)**: Setup a complete CI/CD pipeline for the application. This includes automated testing and deployment. 28 | 29 | 2. **Infrastructure as Code (IaC) (20 points)**: Implement IaC for the infrastructure needed for the application. This includes servers, databases, networks, etc. 30 | 31 | 3. **Configuration Management (20 points)**: Use a configuration management tool to automate the configuration of servers and applications. 32 | 33 | 4. **Containerization (20 points)**: Use containers for the application and its services. Use a container orchestration tool to manage them. 34 | 35 | 5. **Monitoring and Logging (10 points)**: Implement monitoring and logging for the application and the infrastructure. 36 | 37 | Remember, the objective is not just to deploy the application, but to demonstrate the best practices in DevOps that you have learned throughout the course. 38 | 39 | ## Presentation Criteria 40 | 41 | 1. **Presentation (20 points)**: This includes clarity of communication, quality of visuals, and effectiveness in conveying the key points of the project. Students should be able to clearly explain their approach, the tools they used, and the results they achieved. 42 | 43 | 2. **Documentation (20 points)**: The documentation should be clear, comprehensive, and well-structured. It should provide an overview of the project, explain the setup and deployment processes, and include a guide for users or developers. This can be in the form of a README file, Wiki pages, or a separate document. 44 | 45 | 3. **Code Quality (20 points)**: The quality of the code and configuration scripts will be assessed based on readability, organization, and adherence to best practices. 46 | 47 | 4. **Scalability & Security (20 points)**: Consideration should be given to how the application could scale and how security considerations have been addressed in the project. 48 | 49 | 5. **Troubleshooting and Error Handling (20 points)**: The project should demonstrate robustness through appropriate error handling and should include a troubleshooting guide to address common issues that might arise during the deployment or operation of the application. 50 | 51 | Remember, the goal is not just to create a functioning project, but also to demonstrate your ability to create a well-documented, scalable, and secure application deployment using the principles and practices of DevOps. The presentation and documentation are your opportunity to highlight what you have learned and accomplished. -------------------------------------------------------------------------------- /modules/08. Configuration Management/ansible-workshop/step3/text.md: -------------------------------------------------------------------------------- 1 | ## Ansible Roles 2 | 3 | Ansible roles are a way to organize and encapsulate reusable parts of Ansible playbooks. They provide a structured and modular approach to writing configurations and allow you to share and reuse configurations across different playbooks and projects. Roles help make your Ansible playbooks more maintainable, scalable, and easier to understand. 4 | 5 | Once we have the dynamic inventory and grouped the hosts, we are going to create our main `playbook`{{}} and four `roles`{{}}: `apache`{{}}, `mysql`{{}}, `wordpress`{{}} and `prerequisites`{{}}. 6 | 7 | ## Ansible Project Structure 8 | At the end we should have the following directory structure: 9 | 10 | ```yaml 11 | ansible-workshop/ 12 | ├── roles/ 13 | │ ├── apache/ 14 | │ │ ├── tasks/ 15 | │ │ │ └── main.yaml 16 | │ │ ├── templates/ 17 | │ │ │ └── apache.conf.j2 18 | │ │ └── handlers/ 19 | │ │ └── main.yaml 20 | │ ├── mysql/ 21 | │ │ └── tasks/ 22 | │ │ └── main.yaml 23 | │ ├── prerequisites/ 24 | │ │ └── tasks/ 25 | │ │ └── main.yaml 26 | │ └── wordpress/ 27 | │ ├── tasks/ 28 | │ │ └── main.yaml 29 | │ └── templates/ 30 | │ └── wp-config.php.j2 31 | ├── vars/ 32 | │ └── default.yaml 33 | └── main.yaml 34 | ``` 35 | 36 | It's important to clarify why all roles have a `task`{{}} subfolder and `main.yaml`{{}} name. This is because Ansible by default will look in each directory within a role for a `main.yaml`{{}} file when the roles tasks are used within a playbook. The same way with the handlers, a `main.yaml`{{}} file will be looked by default. You can see more information about role folder and best-practices in the following link: https://docs.ansible.com/ansible/latest/playbook_guide/playbooks_reuse_roles.html 37 | 38 | Let's start creating the folders: 39 | 40 | ```sh 41 | mkdir ansible-workshop 42 | cd ansible-workshop 43 | mkdir roles 44 | mkdir vars 45 | cd roles 46 | mkdir -p apache/tasks 47 | mkdir -p apache/templates 48 | mkdir -p apache/handlers 49 | mkdir -p mysql/tasks 50 | mkdir -p prerequisites/tasks 51 | mkdir -p wordpress/tasks 52 | mkdir -p wordpress/templates 53 | ```{{exec}} 54 | 55 | ## Prerequisites Role 56 | 57 | Once we have all the folders, lets create the `prerequisites role`{{}}. In this role, we will define installation tasks that needs to be executed in all hosts. 58 | 59 | We are going to use the `apt`{{}} and `pip`{{}} parameters to install packages. The `loop`{{}} parameter allows us to execute the same command using the `item`{{}} variable as iterator. To declare variables within a task, you need to use the format `{{ variable_name }}`{{}}. 60 | 61 | Besides, it may be useful to run only specific parts of it instead of running the entire playbook. You can do this with Ansible tags. 62 | 63 | Finally, the `when`{{}} is like an if statement to filter which hosts will execute this task. As you can see, we are using the custom variable that we created in the Dynamic Inventory `server_type`{{}}. With this approach, we can invoke the role to all Ansible hosts. 64 | 65 | ## Solution 66 | 67 | Let's create the main.yaml 68 | 69 | ```sh 70 | cd /root/ansible-workshop/roles/prerequisites/tasks 71 | nano main.yaml 72 | ```{{exec}} 73 | 74 | ```yaml 75 | # /root/ansible-workshop/roles/prerequisites/tasks/main.yaml 76 | --- 77 | - name: Install prerequisites 78 | apt: name=aptitude update_cache=yes state=latest force_apt_get=yes 79 | tags: prerequisites 80 | 81 | - name: Install Web Packages 82 | apt: name={{ item }} update_cache=yes state=latest 83 | loop: [ 'apache2', 'php', 'php-mysql', 'libapache2-mod-php', 'mysql-client' ] 84 | when: server_type == 'web' 85 | tags: prerequisites 86 | 87 | - name: Install DB Packages 88 | apt: name={{ item }} update_cache=yes state=latest 89 | loop: [ 'mysql-server', 'mysql-client', 'python3-mysqldb', 'libmysqlclient-dev' ] 90 | when: server_type == 'db' 91 | tags: prerequisites 92 | 93 | - name: Install pymysql python package 94 | pip: 95 | name: pymysql 96 | when: server_type == 'db' 97 | tags: prerequisites 98 | 99 | - name: Install PHP Extensions 100 | apt: name={{ item }} update_cache=yes state=latest 101 | loop: "{{ php_modules }}" 102 | when: server_type == 'web' 103 | tags: prerequisites 104 | ```{{copy}} 105 | 106 | -------------------------------------------------------------------------------- /modules/03. Git/cv-project.md: -------------------------------------------------------------------------------- 1 | # "CV. Markdown" 2 | 3 | ## Task 4 | 5 | Your task is to create a markdown document. Familiarize yourself with markdown syntax and functionality using this guide: https://guides.github.com/features/mastering-markdown/ 6 | 7 | The document should contain your resume, structured specifically for a Junior DevOps Engineer position. 8 | 9 | Here are some tips and requirements: 10 | 11 | ``` 12 | What should a Junior DevOps Engineer Resume contain? 13 | 14 | ! Important: Write your resume in English if you are able to 15 | 16 | 1. First Name, Last Name (real ones) 17 | 2. Contact Info (include several ways to contact you) 18 | 3. Summary (your career goals, aspirations, and what's important to you in a role. Showcase your enthusiasm and ability to learn quickly. As a junior, you are expected to constantly absorb new knowledge from a variety of sources). 19 | 4. Skills (e.g., knowledge in cloud platforms like AWS, GCP or Azure, containerization tools like Docker, scripting languages like Python or Bash, Infrastructure as Code (IaC) tools like Terraform, CI/CD tools like Jenkins, version control systems like Git etc.) 20 | 5. Code and Configuration Examples (latest samples of your work. If available, include Infrastructure as Code (IaC) examples, scripts, or other relevant configurations you've done). 21 | 6. Experience (for a Junior DevOps Engineer, this includes all relevant experiences: internships, projects from courses, freelance projects, or personal projects. Include links to the source code, if available) 22 | 7. Education (include relevant courses, seminars, lectures, online learning. A degree in Computer Science or similar is common, but not always required. Certifications in relevant tools or platforms can be very beneficial) 23 | 8. English (provide information about your English language skills and any practice you've had, how long it lasted, etc.) 24 | 25 | ``` 26 | 27 | **NOTE!** You are recommended to use real data. However, using fictitious data is also acceptable. 28 | 29 | The document should be deployed on GitHub Pages (https://pages.github.com/), it will happen automatically as soon as you create a `gh-pages` branch. After that your page will be available at URL like this one: https://your-github-account.github.io/rsschool-cv/cv 30 | 31 | ### Repository Requirements: 32 | 33 | 1. Make a public repository called `rsschool-cv` on your GitHub account. 34 | 2. There should be only one file in the `main` branch - a `README.md` file with a link like this https://your-github-account.github.io/rsschool-cv/cv in it 35 | 3. The CV document itself called `cv.md` should be in the branch `gh-pages`. 36 | 4. Once you are done, open a Pull Request `gh-pages`->`main`. **Do not merge this Pull Request!** 37 | 38 | ### Commits Requirements 39 | 40 | - A minimum of 3 commits 41 | - [Commit names according to the guideline](https://docs.rs.school/#/en/git-convention) 42 | 43 | ### PR (Pull Request) Requirements 44 | 45 | 1. PR name should contain **the task name as "Markdown & Git"** and probably additional info. 46 | 2. Changes **must not contain commented code, unnecessary files, changes from other branches and generated files** like \*.bundle.js. Please review your changes before contributing. .editorconfig, .gitignore, etc. can be included. 47 | 3. Comments in the PR are a good practice. 48 | 4. [How to write the perfect Pull Request](https://github.com/blog/1943-how-to-write-the-perfect-pull-request) 49 | 50 | ### Criteria 51 | 52 | - +50 points if the requirements for the task are met 53 | - +50 points if repository, commit and PR requirements are met 54 | 55 | ### FAQ 56 | 57 | 1. Question: I’ve got 0 for the task 58 | Answer: The page with your CV is absent at: https://your-github-account.github.io/rsschool-cv/cv 59 | 60 | 2. Question: I’ve got 50 and status "Failed repository requirements: ...." 61 | Answer: You do not have "rsschool-cv" repository with "gh-pages" branch and "cv.md" file there. Or there is no “README.md” file in "main" branch. 62 | 63 | 3. Question: I’ve got 50 and status "Failed commit requirements: ...." 64 | Answer: Your "gh-pages" branch has less then 3 commits or not all commits are made according to the rules: https://docs.rs.school/#/en/git-convention All commits that do not follow the rules will be written in the status. 65 | You can check your commits here: https://github.com/your-github-account/rsschool-cv/commits/gh-pages 66 | "Merge ..." or "Initial commit" commits were ignored. 67 | 68 | 4. Question: I’ve got 50 and status "Failed PR requirements:...." 69 | Answer: The main reason, in the majority of cases, is that you haven’t made a Pull Request from ‘gh-pages’ to ‘main’. The other reason - there is no task title ("Markdown & Git") in your Pull Request. To look at your PR you can here: https://github.com/your-github-account/rsschool-cv/pulls?utf8=%E2%9C%93&q=is%3Apr 70 | -------------------------------------------------------------------------------- /modules/08. Configuration Management/ansible-workshop/step5/text.md: -------------------------------------------------------------------------------- 1 | ## Variables files 2 | 3 | Once we have all the roles, let's move forward with the `vars`{{}} folder 4 | 5 | It is used to store variable files. It is part of the standard directory structure for Ansible and is used to keep variables separate from the tasks and templates, promoting better organization and maintainability. 6 | 7 | Inside the vars folder, you can create one or more YAML files containing variables specific configuration. These variables can be used within the tasks, templates, and other files in the role. By using variable files, you can easily customize the behavior of the role for different hosts, environments, or configurations. 8 | 9 | ```yaml 10 | ansible-workshop/ 11 | ├── roles/ 12 | │ └── ... 13 | ├── vars/ 14 | │ └── default.yaml 15 | ``` 16 | 17 | * The `php_modules`{{}} variable is an array of php modules used in `prerequisites` role. 18 | * The `mysql_`{{}} variables are used to configure the MySQL server and also modify the Wordpress configuration template. 19 | * Here is important to highlight `mysql_host`{{}} variable because it is created based on another variable. In this case, `hostvars[groups['db'][0]]`{{}} allows us to retrieve a variable from a host, which is filtered to be first host of the groups `db`{{}}. To get the `ansible_host`{{}} value. This variable has the hostname of the db1 container, which is required to configure WordPress db host connection. 20 | * The `http_`{{}} variables are used for Apache and WordPress configuration templates 21 | 22 | ```sh 23 | cd /root/ansible-workshop/vars 24 | nano default.yaml 25 | ```{{exec}} 26 | 27 | ```yaml 28 | # ansible-workshop/vars/default.yaml 29 | --- 30 | #System Settings 31 | php_modules: [ 'php-curl', 'php-gd', 'php-mbstring', 'php-xml', 'php-xmlrpc', 'php-soap', 'php-intl', 'php-zip' ] 32 | 33 | #MySQL Settings 34 | mysql_root_password: "mysql_root_password" 35 | mysql_db: "wordpress" 36 | mysql_user: "rsschool" 37 | mysql_password: "password" 38 | mysql_host: "{{ hostvars[groups['db'][0]]['ansible_host'] }}" 39 | 40 | #HTTP Settings 41 | http_host: "rsschool" 42 | http_conf: "rsschool.conf" 43 | http_port: "80" 44 | 45 | ```{{copy}} 46 | 47 | ## Main Playbook 48 | 49 | At this point we should have already the following structure: 50 | 51 | ```yaml 52 | ansible-workshop 53 | roles 54 | apache 55 | tasks 56 | main.yaml 57 | templates 58 | apache.conf.j2 59 | handlers 60 | main.yaml 61 | mysql 62 | tasks 63 | main.yaml 64 | prerequisites 65 | tasks 66 | main.yaml 67 | wordpress 68 | tasks 69 | main.yaml 70 | templates 71 | wp-config.php.j2 72 | vars 73 | default.yaml 74 | main.yaml 75 | 76 | ``` 77 | 78 | This main playbook has 4 key arguments: 79 | * `hosts`{{}}: This specifies the target hosts or groups of hosts on which the task will be executed. The value "all" indicates that the task will be applied to all hosts in the inventory. 80 | * `gather_facts`{{}}: This is a boolean value that determines whether Ansible should gather facts about the target hosts before executing the task. 81 | * `vars_files`{{}}: This is a list of paths to external variable files that should be included before executing the task. In this case, the task includes the variables defined in the file vars/default.yaml. These variables will be available for use in this task and in the invoked roles. 82 | * `roles`{{}}: This is a list of roles that will be applied to the hosts defined in the "hosts" section. 83 | 84 | 85 | ***NOTE***: The playbook execution order follows a specific sequence of steps, allowing for a structured and organized way to apply configurations and automation tasks to remote hosts. 86 | 87 | Let's create the main playbook: 88 | 89 | ```sh 90 | cd /root/ansible-workshop/ 91 | nano main.yaml 92 | ```{{exec}} 93 | 94 | ```yaml 95 | # /root/ansible-workshop/main.yaml 96 | --- 97 | - name: Prepare all servers 98 | hosts: all 99 | gather_facts: no 100 | vars_files: 101 | - vars/default.yaml 102 | roles: 103 | - prerequisites 104 | 105 | - name: Execute apache configuration 106 | hosts: web 107 | gather_facts: no 108 | vars_files: 109 | - vars/default.yaml 110 | roles: 111 | - apache 112 | 113 | - name: Execute MySQL configuration 114 | hosts: db 115 | gather_facts: no 116 | vars_files: 117 | - vars/default.yaml 118 | roles: 119 | - mysql 120 | 121 | - name: Execute Wordpress configuration 122 | hosts: web 123 | gather_facts: no 124 | vars_files: 125 | - vars/default.yaml 126 | roles: 127 | - wordpress 128 | 129 | ```{{copy}} 130 | 131 | 132 | ## Apply the Ansible Playbook 133 | 134 | The final step of our workshop is deploy our main playbook, the `ansible-playbook`{{}} command will handle this operation and the `-i`{{}} flag to specify the inventory file 135 | 136 | ```sh 137 | cd /root/ansible-workshop/ 138 | ansible-playbook -i /root/dynamic_docker.yaml main.yaml 139 | ```{{exec}} 140 | 141 | This playbook will take some minutes 142 | 143 | ## Validate WordPress functionality 144 | 145 | Once it finish, let's validate the WordPress functionality! We should get an HTTP 200 146 | 147 | ```sh 148 | curl -I http://127.0.0.1/wp-admin/install.php 149 | ```{{exec}} 150 | -------------------------------------------------------------------------------- /modules/08. Configuration Management/ansible-workshop/step2/text.md: -------------------------------------------------------------------------------- 1 | ## Dynamic Inventory 2 | 3 | Once we validated the requirements, we will create the dynamic inventory in `dynamic_docker.yaml` file. 4 | 5 | Dynamic inventory information is generated dynamically at runtime rather than being defined in a static inventory file. Instead of manually maintaining an inventory file, dynamic inventory allows Ansible to retrieve host information from external sources. Ansible can draw inventory, group, and variable information from sources like AWS, GCP, OpenStack, and more. 6 | 7 | The idea is to create a inventory with groups based on the label *type* of each container. 8 | 9 | We will use the Docker plugin following the official documentation: https://docs.ansible.com/ansible/latest/collections/community/docker/docker_containers_inventory.html 10 | 11 | ## Gathering Dynamic Inventory Hosts Data 12 | 13 | First, we will define the Docker plugin and the host, by referencing the Docker Sock (where local Docker daemon is listening by default). Besides, enable the verbose_output to later use the `ansible-inventory` command and identify all the available dynamic inventory data per container. 14 | 15 | ```sh 16 | cd /root/ 17 | nano dynamic_docker.yaml 18 | ```{{exec}} 19 | 20 | ***NOTE:*** To use nano, you should copy the code, paste it in the editor and save it with `CTRL+S` and then `CTRL+X` 21 | 22 | ```yaml 23 | # dynamic_docker.yaml 24 | --- 25 | plugin: community.docker.docker_containers 26 | docker_host: unix://var/run/docker.sock 27 | verbose_output: true 28 | ```{{copy}} 29 | 30 | At this point, to retrieve all the dynamic inventory data per container and based on that build our groups, we can use the `ansible-inventory` command. The `-i`{{}} flag let us use a specific dynamic file, the `--list`{{}} will output all hosts info. 31 | 32 | ```sh 33 | ansible-inventory -i dynamic_docker.yaml --list 34 | ```{{exec}} 35 | 36 | This will give us an extensive response of all docker containers dynamic inventory data. If we try to find the labels in this json response, we could find them in the following order: `container_name -> docker_config -> Labels`. For example the db1 container should have the following structure: 37 | 38 | ```json 39 | { 40 | "db1": { 41 | ... 42 | "docker_config": { 43 | ... 44 | "Labels": { 45 | ... 46 | "com.docker.compose.version": "1.25.0", 47 | "type": "web" 48 | } 49 | } 50 | } 51 | } 52 | ``` 53 | 54 | ## Creating Dynamic Inventory Groups 55 | 56 | Based on this information, we need to create two different groups, one for `web`{{}} and the other one for `db`{{}}. To achieve this, we can use the `keyed_group`{{}} parameter. 57 | 58 | With `keyed_groups`{{}}, you can define groups in your inventory file based on the values of specific variables associated with each host. The variables used as keys must be present in the dynamic inventory data. For this scenario, we will use the key as `docker_config.Labels.type` and Ansible will detect the `web`{{}} and `db`{{}} values. The `leading_separator`{{}} parameter will let us omit a leading underscore (or other separator) if no prefix is given. 59 | 60 | ```sh 61 | cd /root/ 62 | nano dynamic_docker.yaml 63 | ```{{exec}} 64 | 65 | ```yaml 66 | keyed_groups: 67 | - prefix: '' 68 | separator: '' 69 | key: 'docker_config.Labels.type' 70 | leading_separator: false 71 | ```{{copy}} 72 | 73 | Lets validate the Inventory graph: 74 | 75 | ```sh 76 | ansible-inventory --graph -i dynamic_docker.yaml 77 | ```{{exec}} 78 | The tree graph shows us 3 groups, `db`, `ungrouped` and `web`. And its corresponding hosts/containers. 79 | 80 | ```sh 81 | @all: 82 | |--@db: 83 | | |--db1 84 | |--@ungrouped: 85 | |--@web: 86 | | |--web1 87 | ```{{copy}} 88 | 89 | ***NOTE:*** You can interact with the `keyed_group` modifying the `prefix`, `separator` and `key` to get a better understanding of the inventory behavior. *Just don't forget to roll back your changes once you are ready to continue!* 90 | 91 | ## Adding Custom Variables 92 | 93 | Before moving forward, in this dynamic inventory file, you can also define custom variables for each host using the `compose`{{}} parameter, and use them in the playbook execution. These variables are created based on dynamic inventory data. For example, we will add a variable called `server_type`{{}} to identify the group that host belongs. 94 | 95 | ```sh 96 | cd /root/ 97 | nano dynamic_docker.yaml 98 | ```{{exec}} 99 | 100 | ```yaml 101 | compose: 102 | server_type: 'docker_config.Labels.type' 103 | ```{{copy}} 104 | 105 | ## Solution 106 |
107 | Click to expand 108 | 109 | Create the dynamic inventory file 110 | 111 | ```sh 112 | cd /root/ 113 | nano dynamic_docker.yaml 114 | ```{{exec}} 115 | 116 | Add the code to the file and save it. 117 | 118 | ```yaml 119 | # dynamic_docker.yaml 120 | --- 121 | plugin: community.docker.docker_containers 122 | docker_host: unix://var/run/docker.sock 123 | verbose_output: true 124 | keyed_groups: 125 | - prefix: '' 126 | separator: '' 127 | key: 'docker_config.Labels.type' 128 | leading_separator: false 129 | compose: 130 | server_type: 'docker_config.Labels.type' 131 | ```{{copy}} 132 | 133 | Validate the inventory is working as expected. 134 | 135 | ```sh 136 | ansible-inventory --graph -i dynamic_docker.yaml 137 | ```{{exec}} 138 | 139 |
-------------------------------------------------------------------------------- /modules/10. Orchestration and Clustering/Part1/README.md: -------------------------------------------------------------------------------- 1 | ## Part 1: Introduction to Containers and Docker 2 | 3 | ### 1. Introduction to Containers 4 | 5 | #### What are Containers and Why are They Important? 6 | 7 | Containers are lightweight, portable, and self-sufficient units that package everything needed to run a piece of software, including the code, runtime, libraries, and system tools. They provide consistent environments across different stages of the software development and deployment lifecycle, ensuring that applications behave the same way in various environments. 8 | 9 | > " Imagine you're a chef, and you have a bunch of different recipes you want to cook. Now, each recipe needs its own kitchen with all the right ingredients and utensils. But you don't want the flavors or ingredients from one recipe to mix up with another, right? 10 | > 11 | > Docker containers are like mini kitchens for each recipe. Each container holds a different recipe and has everything it needs to cook that recipe perfectly. It's like having your own little cooking space that's separate from the others. 12 | > 13 | > When you want to cook a recipe, you open its container and start cooking. And the best part? You can cook without worrying that the ingredients or flavors will get mixed up with the other recipes. When you're done, you clean up the container, and it's ready for the next recipe. 14 | > 15 | > Just like you keep your recipes organized and separate, Docker containers keep computer programs isolated and tidy. They're like special cooking stations for programs, making sure they don't interfere with each other and everything stays nice and organized." Simple explanation by Chatgpt 16 | 17 | Key benefits of containers: 18 | 19 | * **Isolation:** Containers isolate applications from each other and from the host system, enhancing security and preventing conflicts. 20 | * **Portability:** Containers can run consistently on different environments, from a developer's laptop to production servers. 21 | * **Resource Efficiency:** Containers share the host OS kernel, making them more efficient compared to traditional virtual machines. 22 | * **Quick Deployment:** Containers can be created and started quickly, enabling fast application deployment and scaling. 23 | 24 | #### Container vs. Virtual Machine (VM) Comparison 25 | 26 | ![Containers vs VMs](image/content/1692933167567.png) 27 | 28 | Ref: [https://www.infoworld.com/article/3204171/what-is-docker-the-spark-for-the-container-revolution.html](https://www.infoworld.com/article/3204171/what-is-docker-the-spark-for-the-container-revolution.html) 29 | 30 | | Aspect | Containers | Virtual Machines | 31 | | ------------------------ | ----------------------------- | ----------------------- | 32 | | **Isolation** | Lightweight process isolation | Full OS isolation | 33 | | **Performance** | Minimal overhead | Overhead due to full OS | 34 | | **Resource Usage** | Efficient resource sharing | Heavier resource usage | 35 | | **Startup Time** | Seconds | Minutes | 36 | | **Footprint** | Small | Larger | 37 | 38 | ### 2. Getting Started with Docker 39 | 40 | #### Installing Docker on Various Platforms 41 | 42 | Docker can be installed on different platforms. Visit the official Docker website for installation instructions tailored to your platform: [Mac](https://docs.docker.com/desktop/install/mac-install/), [Windows](https://docs.docker.com/desktop/install/windows-install/), or [Linux](https://docs.docker.com/desktop/install/linux-install/). 43 | 44 | #### Docker Architecture: Docker Engine, Images, and Containers 45 | 46 | Docker consists of three main components: 47 | 48 | 1. **Docker Engine:** The core component responsible for building, running, and managing containers. It includes a server, a REST API, and a command-line interface (`docker` command). 49 | 2. **Docker Images:** Images are read-only templates that define the application's file system and runtime. They are used to create containers. Images can be shared through Docker Hub or private registries. 50 | 3. **Docker Containers:** Containers are instances of Docker images. They run applications in isolated environments with their own filesystem, networking, and isolated process space. 51 | 52 | ### 3. Working with Docker Images 53 | 54 | #### Understanding Docker Images and Layers 55 | 56 | Docker images are composed of layers. Each layer represents a specific instruction in the Dockerfile. Layers are cached and shared between images, making image creation and distribution efficient. 57 | 58 | #### Building Custom Docker Images using Dockerfiles 59 | 60 | A Dockerfile is a script that defines the steps to create a Docker image. It includes instructions for installing dependencies, copying files, setting environment variables, and more. 61 | 62 | Example Dockerfile for a simple Node.js application: 63 | 64 | ```plaintext 65 | # Use an official Node.js runtime image as the base 66 | FROM node:14 67 | 68 | # Set the working directory in the container 69 | WORKDIR /usr/src/app 70 | 71 | # Copy package.json and package-lock.json to the container 72 | COPY package*.json ./ 73 | 74 | # Install application dependencies 75 | RUN npm install 76 | 77 | # Copy the rest of the application code 78 | COPY . . 79 | 80 | # Expose a port that the container will listen on 81 | EXPOSE 8080 82 | 83 | # Define the command to run the application 84 | CMD ["node", "app.js"] 85 | ``` 86 | 87 | #### Best Practices for Creating Efficient Images 88 | 89 | * Use minimal base images. 90 | * Combine multiple commands into a single `RUN` instruction to reduce layer count. 91 | * Clean up unnecessary files in the same `RUN` instruction to minimize image size. 92 | * Use `.dockerignore` to exclude irrelevant files from the build context. 93 | * Leverage multi-stage builds to create small final images. 94 | * Avoid running unnecessary services in the container. 95 | 96 | ### 4. Running Containers 97 | 98 | #### Pulling and Running Docker Images from Docker Hub 99 | 100 | You can pull and run existing Docker images from Docker Hub using the `docker run` command. 101 | 102 | Example: 103 | 104 | ```bash 105 | docker run -d -p 8080:80 nginx 106 | ``` 107 | 108 | This command pulls the NGINX image from Docker Hub and runs it as a detached container, mapping port 8080 on the host to port 80 in the container. 109 | 110 | #### Container Lifecycle: Starting, Stopping, and Removing Containers 111 | 112 | * `docker start `: Starts a stopped container. 113 | * `docker stop `: Stops a running container gracefully. 114 | * `docker kill `: Stops a running container forcefully. 115 | * `docker rm `: Removes a stopped container. 116 | 117 | #### Mapping Ports and Volumes Between Host and Container 118 | 119 | * Port Mapping: Use the `-p` flag with the `docker run` command to map host ports to container ports. 120 | * Volume Mapping: Use the `-v` flag to map host directories to directories inside the container. This allows data persistence between container restarts. 121 | 122 | Example: 123 | 124 | ```yaml 125 | docker run -d -p 8080:80 -v ~/myapp:/app myapp-container 126 | ``` 127 | 128 | This command maps port 8080 on the host to port 80 in the container and maps the `~/myapp` directory on the host to the `/app` directory in the container. 129 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # RS School. DevOps from Zero to Hero educational program 2 | **Description:** This program aims to guide aspiring individuals through the fundamentals of DevOps, culminating in the knowledge and practical skills necessary for a Junior Systems Engineer role in the Cloud/DevOps field. We'll tackle different core concepts, methodologies, and tools used in the industry, along with interactive sessions to strengthen your learning. 3 | 4 | ### Course Goal 5 | - Gain enough knowledge and experience to qualify for a Junior Systems Engineer position in the Cloud/DevOps field. 6 | - Develop the necessary skills to excel in job interviews. 7 | - Build a portfolio showcasing your abilities and knowledge in DevOps. 8 | 9 | ### Admission to the Course 10 | - The course is open to all interested individuals. 11 | - This course is offered free of charge. 12 | 13 | ### Prerequisites 14 | 15 | Before starting the course, the student must: 16 | - Have basic knowledge of Computer Science. 17 | - Be able to commit to studying for at least 10 hours per week. 18 | - Have a working knowledge of a programming language, preferably Python or JavaScript. 19 | - Have a basic understanding of operating systems, especially Linux. 20 | 21 | ### Learning Path 22 | 23 | #### Week #0: RS School & DevOps Introduction 24 | - Module "RS School & Course Introduction" 25 | - Module "Introduction to DevOps and its Principles" 26 | - Module "Junior DevOps Engineer Job Requirements and Responsibilities 27 | - Live Q&A session. Real-world case studies discussion 28 | 29 | #### Week #1-2: Linux Basics & Automation and Scripting with Bash 30 | 31 | - **Goal:** Gain a foundational understanding of Linux operating system basics and develop proficiency in Automation and Scripting using Bash. 32 | - **Modules:** 33 | - [01. Linux Basics](modules/01.%20Linux%20Basics/linux.md) 34 | - [A. Automation and Scripting Languages - Bash](modules/01.%20Linux%20Basics/workshop.md) 35 | 36 | #### Week #3: Networking Basics 37 | 38 | - **Goal:** Understand the fundamentals of computer networking and develop a strong foundation in networking concepts. 39 | - **Module:** 40 | - [02. Networking Basics](modules/02.%20Networking%20Basics/README.md) 41 | - [02.1 Networking Workshop](modules/02.%20Networking%20Basics/02.1%20Networking%20Workshop/README.md) 42 | 43 | 44 | #### Week #4: Git 45 | 46 | - **Goal:** Master version control systems and become proficient in using Git for effective collaboration and code management. 47 | - **Module:** 48 | - [03. Git](modules/03.%20Git/README.md) 49 | 50 | 51 | #### Week #5: Cloud 52 | 53 | - **Goal:** Gain a comprehensive understanding of cloud computing, including its fundamentals, types, and major cloud service providers like AWS, GCP, and Azure. 54 | - **Modules:** 55 | - [04. Cloud: AWS Fundamentals](modules/04.%20Cloud/AWS%20Fundamentals/README.md) 56 | - [04. Cloud: AWS Cloud Practitioner Essentials](modules/04.%20Cloud/AWS%20Cloud%20Practitioner%20Essentials/README.md) 57 | - [04. Cloud: AWS Cloud Practitioner Quests](modules/04.%20Cloud/AWS%20Cloud%20Practitioner%20Quests/README.md) 58 | 59 | 60 | #### Week #6.5: AWS General Immersion Day 61 | 62 | - **Goal:** Immerse yourself in AWS technologies and gain hands-on experience through AWS General Immersion Day. 63 | - **Resource:** [AWS Inmersion Day](https://catalog.workshops.aws/general-immersionday/) 64 | 65 | 66 | #### Week #7: CI/CD 67 | 68 | - **Goal:** Gain a deep understanding of Continuous Integration and Continuous Deployment (CI/CD) concepts, and learn how to create automated pipelines for code development, testing, and deployment using industry-standard tools. 69 | - **Practical Assignment:** Set up a CI/CD pipeline. 70 | - **Mentor Activities:** Weekly meetings with students. 71 | - **Module:** 72 | - [05. CICD](modules/05.%20CICD/README.md) 73 | 74 | #### Week #8: Databases 75 | 76 | - **Goal:** Acquire fundamental knowledge of databases, including SQL and NoSQL, along with insights into database management systems. 77 | - **Mentor Activities:** Weekly meetings with students. 78 | 79 | #### Week #9: Infrastructure as Code (IaC) 80 | 81 | - **Goal:** Understand the concept of Infrastructure as Code (IaC) and learn how to automate the provisioning and management of infrastructure using code. 82 | - **Mentor Activities:** Weekly meetings with students. 83 | - **Module:** 84 | - [07. Infrastructure as Code (IaC)](modules/07.%20Infrastructure%20as%20Code%20(IaC)/README.md) 85 | 86 | 87 | #### Week #10: Configuration Management 88 | 89 | - **Goal:** Comprehend the concept of Configuration Management, its advantages, and learn how to utilize a widely used tool like Ansible. 90 | - **Mentor Activities:** Weekly meetings with students. 91 | - **Module:** 92 | - [08. Configuration Management](modules/08.%20Configuration%20Management/README.md) 93 | 94 | 95 | #### Week #11: Virtualization vs Containerization 96 | 97 | - **Goal:** Grasp the distinctions between virtualization and containerization, and their applications within the DevOps landscape. 98 | - **Mentor Activities:** Weekly meetings with students. 99 | - **Module:** 100 | - [09. Containers vs VMs](modules/09.%20Containers%20vs%20VMs/README.md) 101 | 102 | #### Week #12: Orchestration and Clustering 103 | 104 | - **Goal:** Acquire a foundational understanding of orchestration, clustering, and become familiar with popular tools like Kubernetes and Docker Swarm. 105 | - **Mentor Activities:** Weekly meetings with students. 106 | - **Module: 10. Orchestration and Clustering** 107 | - [10. Orchestration and Clustering](modules/10.%20Orchestration%20and%20Clustering/README.md) 108 | 109 | #### Week #13: Students Presentation & Online Meetup 110 | 111 | - **Online Meetup:** 112 | - Real-world Case Studies 113 | - Guest Lectures: Insights into real-world applications, current industry trends, and potential challenges. 114 | 115 | #### Week #14 - 15: Final Project 116 | 117 | - **Final Project:** 118 | - [Final Project](modules/final-project/README.md) 119 | 120 | #### Week #16: Final Project Presentation 121 | 122 | - **Final Project Presentation:** 123 | - [Final Project Presentation](modules/final-project/README.md) 124 | 125 | #### Week #17: Mock Interview, Career Guidance, and Job Assistance 126 | - **Live Session:** 127 | - Career Guidance 128 | - Resume Building 129 | - Interview Preparation 130 | - **Mock Interview:** 131 | - Conducted by a mentor (not their assigned one). 132 | - **Note:** 133 | - Students are expected to actively participate in the mock interview. 134 | 135 | ### Communication 136 | 137 | #### [Discord Server](https://discord.gg/uWvFU2RAba) 138 | Our Discord server is the main platform for communication. We have created two categories of channels - RS-AWS-Club-EN and RS-AWS-Club-RU, for English and Russian-speaking members respectively. 139 | 140 | #### [Q&A Sessions](https://discord.gg/uWvFU2RAba) 141 | We have regular Q&A sessions every Tuesday (**TBD**) at 15:00 CET (**TBD**) in English. 142 | 143 | ### AWS Accounts 144 | 145 | - Important! You use your personal [AWS accounts](https://aws.amazon.com/) to complete the courses. 146 | - We occasionally host [AWS workshops](https://aws.amazon.com/getting-started/hands-on/) for which free accounts are provided. 147 | 148 | ## FAQ 149 | 150 | ### Where is the communication taking place? 151 | 152 | In the [Discord chat](https://discord.gg/uWvFU2RAba). 153 | 154 | ### Where can I ask a question? 155 | 156 | Questions can be asked in the [Discord chat](https://discord.gg/uWvFU2RAba). 157 | 158 | #### Can I study the learning modules and do projects in advance? 159 | 160 | Sure! 161 | 162 | ### Is it necessary to watch the webinars? 163 | 164 | No. After self-studying the materials, you can watch the webinar to consolidate the information or ask questions on the [Discord server](https://discord.gg/uWvFU2RAba). 165 | The webinar recordings can be found on our [YouTube channel](https://www.youtube.com/@RSSchool). 166 | -------------------------------------------------------------------------------- /modules/09. Containers vs VMs/9.1. VMs and Containers/README.md: -------------------------------------------------------------------------------- 1 | ### What Are Virtual Machines (VMs)? 2 | Virtualization enables the creation of computer-generated computers known as virtual machines (VMs). These are essentially autonomous virtual computers that run on a single set of hardware or a pool of hardware resources. Each virtual machine operates in isolation, forming a self-contained environment complete with its own virtualized hardware components, including CPU, memory, storage, and network interfaces. The role of the hypervisor is to allocate and manage these resources, ensuring equitable distribution and preventing any interference between individual VMs. 3 | 4 | One important characteristic of VMs is that each requires its own operating system (OS). Consequently, VMs have the flexibility to run different operating systems, facilitating the coexistence of diverse software environments and applications on the same physical machine without conflicts. VMs provide a layer of isolation, safeguarding against issues or failures in one VM from affecting others sharing the same hardware. Additionally, they offer valuable advantages for testing and development purposes, as developers can create snapshots of VMs to capture specific system states for experimentation or easy rollbacks. VMs also simplify resource scaling and backup creation through the straightforward ability to migrate or clone instances. 5 | 6 | With the advent of cost-effective virtualization technology and the availability of cloud computing services, organizations of all sizes have enthusiastically embraced VMs as a means to reduce costs and enhance operational efficiency within their IT departments. 7 | 8 | ![VMs](../img/VMs.png) 9 | 10 | VMs, on the other hand, can consume a significant amount of system resources. Each VM not only operates a complete copy of an operating system but also emulates a virtual representation of all the necessary hardware components required by the OS to function. This is why VMs are occasionally referred to as "monolithic," as they are self-contained units typically employed to run applications constructed as single, extensive files. (The term "monolithic" will become clearer as you explore containers further below.) This accumulation of resources, including RAM and CPU cycles, can be substantial. While VMs remain cost-effective in comparison to running separate physical computers, they may prove to be excessive for specific use cases, particularly for applications. This need for efficiency enhancement ultimately gave rise to the development of containers. 11 | 12 | #### Benefits of VMs 13 | - Apps have access to all available OS resources. 14 | - Established and reliable functionality. 15 | - Strong and effective management tools. 16 | - Familiar security tools and controls. 17 | - Capability to run diverse operating systems on a single physical machine. 18 | - Cost-effective compared to maintaining separate physical machines. 19 | 20 | #### Cons 21 | - Iteration speed: Virtual machines are time consuming to build and regenerate because they encompass a full stack system. Any modifications to a virtual machine snapshot can take significant time to regenerate and validate they behave as expected. 22 | 23 | - Storage size cost: Virtual machines can take up a lot of storage space. They can quickly grow to several gigabytes in size. This can lead to disk space shortage issues on the virtual machines host machine. 24 | 25 | #### Popular VM Providers 26 | - VMware Workstation Player 27 | - VirtualBox 28 | - Xen Project 29 | - Microsoft Hyper-V 30 | 31 | ### What Are Containers? 32 | Containers, unlike virtual machines (VMs) that virtualize an entire computer, focus solely on virtualizing the operating system (OS). 33 | 34 | Containers are deployed on top of a physical server alongside its host OS, which is typically Linux or Windows. Each container shares the same host OS kernel and often the binaries and libraries, optimizing resource utilization. These shared components are set to read-only mode. 35 | 36 | What makes containers more efficient? Sharing OS resources, such as libraries, significantly reduces the need to replicate the entire operating system code. As a result, a single operating system installation can support multiple workloads, making containers lightweight and highly portable. They typically only occupy megabytes in size and start within seconds. In practical terms, containers allow you to host two to three times as many applications on a single server compared to VMs. VMs, in contrast, take minutes to boot and are significantly larger, measured in gigabytes instead of megabytes compared to an equivalent container. 37 | 38 | While container technology has been around for some time, Docker's launch in 2013 made containers the industry standard for application and software development. Technologies like Docker and Kubernetes create isolated environments for applications, resolving the issue of environment inconsistency commonly encountered in software development and deployment, often referred to as the "works on my machine" problem. 39 | 40 | Developers typically write code on their local machines and then deploy it to a server. Any disparities between these environments—such as software versions, permissions, or database access—can lead to bugs. Containers allow developers to create a self-contained, portable unit containing all the necessary dependencies to run consistently in any environment, whether it's local, development, testing, or production. This portability is a key advantage of containers. 41 | 42 | Containers also offer scalability, enabling the deployment and management of multiple instances of a containerized application in parallel. This allows for efficient resource allocation and the ability to adapt quickly to changing demand. 43 | 44 | The rise of container technology also gave birth to microservices architectures for application development. Containers allowed applications to be broken down into smaller, purpose-specific components or "services" that could be developed and deployed independently, rather than in one monolithic unit. 45 | 46 | For instance, consider an app that enables customers to purchase items worldwide. It may consist of various services, such as a search bar, a shopping cart, and a buy button. Each of these "services" can exist in its own container. If, for instance, the search bar experiences high load and encounters issues, it won't affect the entire application. This approach is what enables the smooth operation of massive digital events such as major e-commerce deals. 47 | 48 | ![VMs](../img/Containers.png) 49 | 50 | #### Container Tools 51 | Linux Containers (LXC): Often referred to as LXC, this technology represents the original Linux container solution. LXC operates as a method for Linux operating system-level virtualization, allowing the execution of multiple isolated Linux systems on a single host. 52 | 53 | Docker: Initially conceived as an effort to enhance LXC containers for individual applications, Docker brought about a revolutionary transformation in the container landscape. It introduced significant improvements to enhance the mobility and versatility of containers. Over time, Docker evolved into an independent container runtime environment and became a prominent utility in the Linux ecosystem, facilitating the effortless creation, transportation, and execution of containers with exceptional efficiency. 54 | 55 | Kubernetes: While not a container technology in itself, Kubernetes assumes a pivotal role as a container orchestrator. In the realm of cloud-native architecture and microservices, where applications deploy numerous containers—ranging from hundreds to thousands or even billions—Kubernetes automates the comprehensive management of these containers. While Kubernetes relies on complementary tools like Docker for seamless operation, its significance in the container space is so substantial that it deserves mention in any discussion about containers. 56 | 57 | #### Benefits of Containers 58 | - Reduced IT management resources. 59 | - Faster spin ups. 60 | - Smaller size means one physical machine can host many containers. 61 | - Reduced and simplified security updates. 62 | - Less code to transfer, migrate, and upload workloads. 63 | 64 | #### Cons: 65 | - Shared host exploits: Containers all share the same underlying hardware system below the operating system layer, it is possible that an exploit in one container could break out of the container and affect the shared hardware. Most popular container runtimes have public repositories of pre-built containers. There is a security risk in using one of these public images as they may contain exploits or may be vulnerable to being hijacked by nefarious actors. 66 | -------------------------------------------------------------------------------- /modules/01. Linux Basics/linux.md: -------------------------------------------------------------------------------- 1 | # Linux Introduction 2 | 3 | ## Theoretical part 4 | 5 | To enhance your understanding of the topic, you have to follow the link that contains a presentation. 6 | 7 | ### [Linux Basics](https://www.canva.com/design/DAFnptjvWN0/81lhACyF4NXhUnEv4bUE9A/edit?utm_content=DAFnptjvWN0&utm_campaign=designshare&utm_medium=link2&utm_source=sharebutton) 8 | 9 | #### Course goal 10 | 11 | Our course is designed to provide you with both theoretical and practical knowledge in various aspects of Linux, including: 12 | 13 | - OS configuration (access management, security) 14 | - User management 15 | - Software installation and configuration 16 | - Networking 17 | - Linux utilities (for troubleshooting and bash scripting) 18 | - Disk and space management 19 | - Understanding specified Linux directories 20 | By the end of the course, you'll have a solid foundation in Linux and be able to perform essential tasks with confidence. 21 | 22 | #### What is Linux 23 | 24 | Linux is a Unix-like computer operating system that is assembled under the model of free and open-source software development and distribution. The defining component of Linux is the Linux kernel, which is a computer program that is the core of a computer's operating system, with complete control over everything in the system. The kernel handles the rest of start-up as well as input/output requests from software, translating them into data-processing instructions for the central processing unit. It also handles memory and peripherals like keyboards, monitors, printers, and speakers. 25 | 26 | #### Linux distributions 27 | 28 | Linux distributions are variations of the Linux operating system that are built on top of the Linux kernel. There are currently over 300 Linux distributions, with most of them being actively developed and maintained. These distros are designed to provide users with different options and choices when it comes to their computing needs. 29 | 30 | Some of the most popular Linux distributions include: 31 | 32 | 1. Ubuntu 33 | 2. Debian 34 | 3. Fedora 35 | 4. CentOS 36 | 5. Arch Linux 37 | 6. openSUSE 38 | 7. Lubuntu 39 | 8. Linux Mint 40 | 9. Manjaro 41 | 10. SteamOS 42 | 43 | These are just a few examples of the many Linux distributions available. Each distro has its own unique features, strengths, and weaknesses, and choosing the right one for your needs can be a daunting task. However, with so many options available, there is sure to be a distro that is perfect for you. 44 | 45 | #### Shell 46 | 47 | A shell is a software program that serves as an intermediary between a user and the operating system (OS) kernel. It provides a command-line interface for interacting with the OS and executing commands. 48 | The architecture of a shell is relatively simple and resembles a pipeline. It processes input from the user, parses and expands symbols, and then executes commands using either built-in shell commands or external commands. 49 | 50 | There are several types of shells available, including: 51 | 52 | 1. The Bourne Shell (sh) 53 | 2. The C Shell (csh) 54 | 3. The Korn Shell (ksh) 55 | 4. The GNU Bourne-Again Shell (bash) 56 | 57 | Each shell has its own set of features and capabilities, and users can choose the one that best suits their needs. 58 | 59 | #### File systems 60 | 61 | Linux file systems are an essential part of the Linux operating system, and there are several types of file systems that can be used in Linux. Here are some key things to know about Linux file systems: 62 | 63 | 1. Types: ext2, ext3, ext4, XFS, JFS, Btrfs 64 | 2. Structure: Hierarchical, with a root directory (/) and subdirectories and files below it 65 | 3. Permissions: Read, write, execute, for owner, group, and others 66 | 4. Ownership: Each file and directory has an owner, which is the user or group that created it 67 | 5. Links: Symbolic links allow for easy access to files or directories located in different parts of the file system 68 | 6. Can be mounted: Allowing access to files on remote servers or external storage devices 69 | 7. Security: File permissions, access control lists (ACLs), and encryption 70 | 8. Maintenance: Regular tasks include checking for errors, updating metadata, and defragmenting the file system 71 | 9. Backup: Regularly backing up the file system protects against data loss 72 | 10. Recovery: Tools like fsck, e2fsck, and xfs_repair can be used to recover data in case of a file system failure. 73 | 74 | These are just a few of the key things to know about Linux file systems. Understanding how file systems work and how to manage them is essential for using Linux effectively and ensuring the security and integrity of data. 75 | 76 | #### LVM 77 | 78 | LVM (Logical Volume Manager) is a device mapper and logical volume management system for Linux. It allows you to create logical volumes, which are virtual disk devices that can be used to store data. LVM provides several benefits over traditional disk partitioning, including flexibility, scalability, reduced complexity, and improved performance. It is commonly used in Linux servers and workstations, as well as in cloud computing environments. 79 | 80 | #### SWAP 81 | 82 | Swap is a feature in Linux that temporarily transfers data from RAM to a reserved space on a hard drive when RAM is full. This frees up memory for other uses, but swap is slower than RAM and can impact performance if used excessively. It's best to have enough physical memory to avoid relying on swap. 83 | 84 | #### Disk Quotas 85 | 86 | Disk quotas can be used to restrict disk space and alert a system administrator before a user consumes too much space or a partition becomes full. Quotas can be set for individual users or user groups, allowing for separate management of user-specific files and project files. Additionally, quotas can control the number of inodes, which allows for control over the number of files that can be created. 87 | 88 | #### Boot Loaders 89 | 90 | When a computer is turned off, its software remains on non-volatile memory. When powered on, the computer executes a small program stored in ROM to access nonvolatile devices and load the operating system and data into RAM. This program is called a bootstrap loader or boot loader. 91 | Two popular boot loaders are LILO and GRUB. LILO is standard on Linux distributions but is older and less powerful. GRUB is easier to administer and supports network booting and MD5 passwords. 92 | 93 | #### Runlevels 94 | 95 | A "runlevel" is a way to describe the state of a computer system after it has finished booting up. It's like a mode or a setting that the system is in. 96 | In traditional Linux systems, there are seven runlevels, numbered from 0 to 6. Each runlevel has a different purpose or function. For example, runlevel 3 is used for multi-user mode, while runlevel 5 is used for graphical mode. 97 | The default runlevel is the one that the system starts up in when it boots up. In most Linux systems, the default runlevel is 3, 4, or 5. 98 | In systemd, the concept of runlevels is replaced by "targets". A target is a specific state or mode that the system can be in. For example, the "graphical.target" is a target that represents the graphical mode. 99 | 100 | ## Practical part 101 | 102 | You should learn about the shell, take the KillerCoda workshops. In these KillerCoda scenarios, you will follow the below steps: 103 | 104 | ### Mandatory task 105 | 106 | Learn how to get started with a Linux system by taking [The Linux foundations course](https://killercoda.com/pawelpiwosz/course/linuxFundamentals) by Pawel Piwosz 107 | 108 | 1. Listing files and directories 109 | 2. How to use the man command for help 110 | 3. Working with directories 111 | 4. Basic operations on files 112 | 5. Pipes and redirections 113 | 6. Reading files 114 | 7. Copying and moving files 115 | 8. Understanding system performance 116 | 9. Finding processes in the system 117 | 10. Creating aliases 118 | 11. Working with users 119 | 12. Navigating through hystory 120 | 13. Elevating privileges 121 | 14. Working with logs 122 | 15. Streams 123 | 16. Crontab 124 | 17. Getting basic information about files 125 | 18. Working with links and understanding the difference between soft and hard links 126 | 19. Explaining inodes 127 | 20. Working with file permissions 128 | 129 | ### Option task 130 | 131 | Become a Linux administrator with [The Linux advanced course](https://killercoda.com/pawelpiwosz/course/linuxAdvanced) by Pawel Piwosz (you may choose to complete this optional task and earn extra points) 132 | 133 | 1. gzip - Archive files with gzip 134 | 2. tar - Archive files with tar 135 | 3. Compression tools 136 | 4. Compressed tar 137 | 5. Installing packages 138 | 6. Packages sources 139 | 7. Configuration of apt 140 | 8. Learn more about packages 141 | 9. Cleaning the cache 142 | 10. Manage packages with dpkg 143 | 11. Boot process and kernel 144 | 12. Work with services 145 | 13. Ulimits 146 | 14. Test ulimits in practice 147 | 148 | ## How to complete the task 149 | 150 | 1. Sign in to KillerCoda: 151 | - Access the KillerCoda platform. 152 | - Use your GitHub account to sign in. 153 | 154 | 2. Complete the entire workshop: 155 | - Go through all the workshop materials and exercises. 156 | - Make sure to complete all the required tasks and activities. 157 | - Do not close the KillerCoda environment after finishing the workshop. 158 | 159 | 3. Execute an `echo` command with your GitHub name in the KillerCoda environment console: 160 | - In the KillerCoda environment console, enter the following command: 161 | 162 | ```bash 163 | echo 164 | ``` 165 | 166 | - Replace `` with your actual GitHub username. 167 | - For example: `$ echo rsschool-repository` 168 | 169 | 4. Validate your participation: 170 | - Once you have executed the `echo` command, take a screenshot of the completion screen in the KillerCoda environment console. 171 | - The screenshot should clearly show your GitHub name, the date, and time. 172 | - Make sure all the required information is visible in the screenshot. 173 | 174 | 5. Cross-check: 175 | - Upload the screenshot to the RS School portal. 176 | - Your submission will be cross-checked by another student from the course. 177 | - Additionally, you will be assigned to check someone else's result. 178 | - Follow the instructions provided in the RS School portal to complete the cross-check process. 179 | 180 | Note: It is important to follow all the instructions accurately and provide the necessary evidence to validate your participation in the workshop. 181 | 182 | ## Contacts 183 | 184 | Please, feel free to reach me or anyone from RS School out. My [Discord](https://discordapp.com/users/731965143411327006) and [GitHub](https://github.com/sid-brest) 185 | -------------------------------------------------------------------------------- /modules/08. Configuration Management/ansible-workshop/step4/text.md: -------------------------------------------------------------------------------- 1 | ## Apache Role 2 | 3 | Lets create the `Apache role`{{}}. 4 | 5 | This role will have a different structure: 6 | 7 | ```yaml 8 | roles/ 9 | ├── apache/ 10 | │ ├── tasks/ 11 | │ │ └── main.yaml 12 | │ ├── templates/ 13 | │ │ └── apache.conf.j2 14 | │ └── handlers/ 15 | │ └── main.yaml 16 | └── ... 17 | ``` 18 | 19 | In this case, we have introduced two new folders, `templates`{{}} and `handlers`{{}}. 20 | 21 | ### Template Folder 22 | 23 | Is used to store `Jinja2`{{}} templates that are used to dynamically generate configuration files or text files based on variables and data provided in the Ansible playbook or inventory. Templates are a powerful feature that allows you to create flexible and customizable configurations for different environments or hosts. 24 | 25 | When you use a template in your Ansible role, you can leverage Jinja2 templating syntax to insert variable values, conditional statements, and loops into the resulting configuration file. This approach makes it easier to manage complex configurations, especially when different hosts require slightly different settings. 26 | 27 | 28 | In this template we use 2 variables: 29 | - `{{ http_port }}`{{}} 30 | - `{{ http_host }}`{{}} 31 | 32 | ***NOTE***: These variables are going to be defined later 33 | 34 | ```sh 35 | cd /root/ansible-workshop/roles/apache/templates/ 36 | nano apache.conf.j2 37 | ```{{exec}} 38 | 39 | ```jinja 40 | 41 | ServerAdmin webmaster@localhost 42 | ServerName {{ http_host }} 43 | ServerAlias www.{{ http_host }} 44 | DocumentRoot /var/www/{{ http_host }}/wordpress 45 | ErrorLog ${APACHE_LOG_DIR}/error.log 46 | CustomLog ${APACHE_LOG_DIR}/access.log combined 47 | 48 | 49 | Options -Indexes 50 | AllowOverride All 51 | 52 | 53 | 54 | DirectoryIndex index.php index.html index.cgi index.pl index.xhtml index.htm 55 | 56 | 57 | 58 | 59 | ```{{copy}} 60 | 61 | ### Handlers Folder 62 | 63 | Is used to store `handler`{{}} tasks. Handlers are special tasks in Ansible that are only executed if they are notified by other tasks in the role. When a task in the role triggers a notification, the corresponding handler task(s) will be run at the end of the role's execution. 64 | 65 | Handlers are often used to restart services or perform other actions that require synchronization with other tasks in the role. For example, if you make changes to a configuration file using a template (as discussed above), you might want to notify a handler task to restart the associated service after the configuration file is updated. 66 | 67 | We are going to use only 1 handler called `Restart Apache`{{}}. 68 | 69 | ```sh 70 | cd /root/ansible-workshop/roles/apache/handlers/ 71 | nano main.yaml 72 | ```{{exec}} 73 | 74 | ```yaml 75 | # /root/ansible-workshop/roles/apache/handlers/main.yaml 76 | --- 77 | - name: Restart Apache 78 | service: 79 | name: apache2 80 | state: restarted 81 | 82 | ```{{copy}} 83 | 84 | 85 | ### Tasks Folder 86 | 87 | Finally, we create the task file to configure Apache, use the Template and finally use notify parameter to invoke the handler. 88 | 89 | ```sh 90 | cd /root/ansible-workshop/roles/apache/tasks 91 | nano main.yaml 92 | ```{{exec}} 93 | 94 | ```yaml 95 | # /root/ansible-workshop/roles/apache/tasks/main.yaml 96 | --- 97 | - name: Create document root 98 | file: 99 | path: "/var/www/{{ http_host }}" 100 | state: directory 101 | owner: "www-data" 102 | group: "www-data" 103 | mode: '0755' 104 | tags: web 105 | 106 | - name: Set up Apache VirtualHost 107 | template: 108 | src: "templates/apache.conf.j2" 109 | dest: "/etc/apache2/sites-available/{{ http_conf }}" 110 | tags: web 111 | 112 | - name: Enable rewrite module 113 | shell: /usr/sbin/a2enmod rewrite 114 | tags: web 115 | 116 | - name: Enable new site 117 | shell: /usr/sbin/a2ensite {{ http_conf }} 118 | tags: web 119 | 120 | - name: Disable default Apache site 121 | shell: /usr/sbin/a2dissite 000-default.conf 122 | notify: Restart Apache 123 | tags: web 124 | 125 | ```{{copy}} 126 | 127 | 128 | ## MySQL Role 129 | This role is less complex than the Apache role, but it has more tasks. 130 | 131 | ```yaml 132 | roles/ 133 | └── mysql/ 134 | └── tasks/ 135 | └── main.yaml 136 | ``` 137 | 138 | We will use different modules to configure a MySQL server to be used for WordPress. 139 | 140 | ```sh 141 | cd /root/ansible-workshop/roles/mysql/tasks 142 | nano main.yaml 143 | ```{{exec}} 144 | 145 | ```yaml 146 | # /root/ansible-workshop/roles/mysql/tasks/main.yaml 147 | --- 148 | - name: Start MySQL service 149 | shell: service mysql start 150 | 151 | - name: Set the root password 152 | mysql_user: 153 | name: root 154 | password: "{{ mysql_root_password }}" 155 | login_unix_socket: /var/run/mysqld/mysqld.sock 156 | tags: db 157 | 158 | - name: Remove all anonymous user accounts 159 | mysql_user: 160 | name: '' 161 | host_all: yes 162 | state: absent 163 | login_user: root 164 | login_password: "{{ mysql_root_password }}" 165 | tags: db 166 | 167 | - name: Remove the MySQL test database 168 | mysql_db: 169 | name: test 170 | state: absent 171 | login_user: root 172 | login_password: "{{ mysql_root_password }}" 173 | tags: db 174 | 175 | - name: Creates database for WordPress 176 | mysql_db: 177 | name: "{{ mysql_db }}" 178 | state: present 179 | login_user: root 180 | login_password: "{{ mysql_root_password }}" 181 | tags: db 182 | 183 | - name: Create MySQL user for WordPress 184 | mysql_user: 185 | name: "{{ mysql_user }}" 186 | password: "{{ mysql_password }}" 187 | priv: "{{ mysql_db }}.*:ALL" 188 | state: present 189 | login_user: root 190 | login_password: "{{ mysql_root_password }}" 191 | priv: '*.*:ALL' 192 | host: '%' 193 | tags: db 194 | 195 | - name: Enable remote login to mysql 196 | lineinfile: 197 | path: /etc/mysql/mysql.conf.d/mysqld.cnf 198 | regexp: '^bind-address' 199 | line: 'bind-address = 0.0.0.0' 200 | backup: yes 201 | 202 | - name: Restart mysql 203 | shell: service mysql restart 204 | 205 | ```{{copy}} 206 | 207 | 208 | ## Wordpress Role 209 | 210 | Let's create the `Wordpress role`{{}}. As you can see, we have a template here: 211 | 212 | ```yaml 213 | roles/ 214 | └── wordpress/ 215 | ├── tasks/ 216 | │ └── main.yaml 217 | └── templates/ 218 | └── wp-config.php.j2 219 | ``` 220 | 221 | Let's start with the task 222 | 223 | ```sh 224 | cd /root/ansible-workshop/roles/wordpress/tasks 225 | nano main.yaml 226 | ```{{exec}} 227 | 228 | ```yaml 229 | # /root/ansible-workshop/roles/wordpress/tasks/main.yaml 230 | --- 231 | - name: Download and unpack latest WordPress 232 | unarchive: 233 | src: https://wordpress.org/latest.tar.gz 234 | dest: "/var/www/{{ http_host }}" 235 | remote_src: yes 236 | creates: "/var/www/{{ http_host }}/wordpress" 237 | tags: [ wordpress ] 238 | 239 | - name: Set ownership 240 | file: 241 | path: "/var/www/{{ http_host }}" 242 | state: directory 243 | recurse: yes 244 | owner: www-data 245 | group: www-data 246 | tags: [ wordpress ] 247 | 248 | - name: Set permissions for directories 249 | shell: "/usr/bin/find /var/www/{{ http_host }}/wordpress/ -type d -exec chmod 750 {} \\;" 250 | tags: [ wordpress ] 251 | 252 | - name: Set permissions for files 253 | shell: "/usr/bin/find /var/www/{{ http_host }}/wordpress/ -type f -exec chmod 640 {} \\;" 254 | tags: [ wordpress ] 255 | 256 | - name: Set up wp-config 257 | template: 258 | src: "templates/wp-config.php.j2" 259 | dest: "/var/www/{{ http_host }}/wordpress/wp-config.php" 260 | tags: [ wordpress ] 261 | 262 | ```{{copy}} 263 | 264 | Finally, the template 265 | 266 | ```sh 267 | cd /root/ansible-workshop/roles/wordpress/templates/ 268 | nano wp-config.php.j2 269 | ```{{exec}} 270 | 271 | ```jinja 272 |