├── .circleci
└── config.yml
├── .github
└── workflows
│ └── ci_workflow.yml
├── .gitignore
├── .travis.yml
├── CONTRIBUTING.md
├── LICENSE
├── README-zh_CN.md
├── README.md
├── certificates
├── aws-cloud-practitioner.md
├── aws-cloud-sysops-associate.md
├── aws-solutions-architect-associate.md
├── azure-fundamentals-az-900.md
├── cka.md
└── ckad.md
├── coding
└── python
│ └── binary_search.py
├── credits.md
├── exercises
└── shell
│ └── solutions
│ └── directories_comparison.md
├── faq.md
├── images
├── Go.png
├── HR.png
├── ansible.png
├── aws.png
├── aws
│ └── identify_load_balancer.png
├── azure.png
├── bash.png
├── big-data.png
├── certificates.png
├── cicd.png
├── cloud.png
├── containers.png
├── databases.png
├── design.png
├── design
│ ├── cdn-no-downtime.png
│ ├── development
│ │ └── git_fsmonitor.png
│ ├── input-process-output.png
│ ├── producers_consumers_fix.png
│ └── producers_consumers_issue.png
├── devops.png
├── devops_exercises.png
├── devops_resources.png
├── distributed.png
├── distributed
│ ├── distributed_design_lb.png
│ └── distributed_design_standby.png
├── dns.png
├── elastic.png
├── exercises.png
├── general.png
├── git.png
├── googlecloud.png
├── hardware.png
├── how_they_devops.png
├── infraverse.png
├── jenkins.png
├── jenkins
│ └── jenkins-to-kibana.png
├── kubernetes.png
├── linux_master.jpeg
├── logos
│ ├── argo.png
│ ├── circleci.png
│ ├── kafka.png
│ └── linux.png
├── mongo.png
├── monitoring.png
├── network.png
├── openshift.png
├── openstack.png
├── os.png
├── perl.png
├── programming.png
├── prometheus.png
├── puppet.png
├── python.png
├── regex.png
├── security.png
├── sql.png
├── storage.png
├── system_design_notebook.png
├── terraform.png
├── testing.png
├── virtualization.png
└── you.png
├── prepare_for_interview.md
├── scripts
├── count_questions.sh
├── question_utils.py
├── random_question.py
├── run_ci.sh
└── update_question_number.py
├── tests
├── scripts_question_utils_unittest.py
├── syntax_checker_unittest.py
├── syntax_lint.py
└── testcases
│ ├── testcase1.md
│ ├── testcase2.md
│ └── testcase3.md
└── topics
├── ansible
├── README.md
├── my_first_playbook.md
├── my_first_task.md
├── solutions
│ ├── my_first_playbook.md
│ ├── my_first_task.md
│ └── update_upgrade_task.md
└── update_upgrade_task.md
├── argo
└── README.md
├── aws
├── README.md
├── exercises
│ ├── access_advisor
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── alb_multiple_target_groups
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── app_load_balancer
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── asg_dynamic_scaling_policy
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── aurora_db
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── auto_scaling_groups_basics
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── basic_s3_ci
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── budget_setup
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── create_ami
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── create_efs
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── create_role
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── create_spot_instances
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── create_user
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── creating_records
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── credential_report
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── ebs_volume_creation
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── ec2_iam_roles
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── ecs_task
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── elastic_beanstalk_simple
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── elastic_ip
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── elastic_network_interfaces
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── elasticache
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── health_checks
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── hello_function
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── hibernate_instance
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── launch_ec2_web_instance
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── mysql_db
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── network_load_balancer
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── new_vpc
│ │ ├── exercise.md
│ │ ├── main.tf
│ │ ├── pulumi
│ │ │ └── __main__.py
│ │ ├── solution.md
│ │ └── terraform
│ │ │ └── main.tf
│ ├── no_application
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── password_policy_and_mfa
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── placement_groups
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── register_domain
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── route_53_failover
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── s3
│ │ └── new_bucket
│ │ │ ├── exercise.md
│ │ │ ├── pulumi
│ │ │ └── __main__.py
│ │ │ ├── solution.md
│ │ │ └── terraform
│ │ │ └── main.tf
│ ├── sample_cdk
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── security_groups
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── snapshots
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── subnets
│ │ ├── exercise.md
│ │ ├── pulumi
│ │ │ └── __main__.py
│ │ ├── solution.md
│ │ └── terraform
│ │ │ └── main.tf
│ ├── url_function
│ │ ├── exercise.md
│ │ └── solution.md
│ └── web_app_lambda_dynamodb
│ │ ├── exercise.md
│ │ └── terraform
│ │ └── main.tf
└── images
│ └── lambda
│ └── aws_lambda_direct_access.png
├── azure
└── README.md
├── cicd
├── README.md
├── ci_for_open_source_project.md
├── deploy_to_kubernetes.md
├── remove_builds.md
├── remove_jobs.md
└── solutions
│ ├── deploy_to_kubernetes
│ ├── Jenkinsfile
│ ├── README.md
│ ├── deploy.yml
│ ├── helloworld.yml
│ ├── html
│ │ ├── css
│ │ │ ├── normalize.css
│ │ │ └── skeleton.css
│ │ ├── images
│ │ │ └── favicon.png
│ │ └── index.html
│ └── inventory
│ ├── remove_builds_solution.groovy
│ └── remove_jobs_solution.groovy
├── circleci
└── README.md
├── cloud
└── README.md
├── cloud_slack_bot.md
├── containers
├── README.md
├── commit_image.md
├── containerized_db.md
├── containerized_db_persistent_storage.md
├── containerized_web_server.md
├── image_layers.md
├── multi_stage_builds.md
├── run_forest_run.md
├── running_containers.md
├── sharing_images.md
├── solutions
│ ├── commit_image.md
│ ├── containerized_db.md
│ ├── containerized_db_persistent_storage.md
│ ├── containerized_web_server.md
│ ├── image_layers.md
│ ├── multi_stage_builds.md
│ ├── run_forest_run.md
│ ├── running_containers.md
│ ├── sharing_images.md
│ └── working_with_images.md
├── working_with_images.md
└── write_containerfile_run_container.md
├── databases
├── solutions
│ └── table_for_message_board_system.md
└── table_for_message_board_system.md
├── devops
├── README.md
├── containerize_app.md
├── ha_hello_world.md
└── solutions
│ ├── containerize_app.md
│ └── ha_hello_world.md
├── dns
└── README.md
├── eflk.md
├── flask_container_ci
├── README.md
├── app
│ ├── __init__.py
│ ├── config.py
│ ├── main.py
│ └── tests.py
├── requirements.txt
├── tests.py
└── users.json
├── flask_container_ci2
├── README.md
├── app
│ ├── __init__.py
│ ├── config.py
│ ├── main.py
│ └── tests.py
├── requirements.txt
└── tests.py
├── git
├── README.md
├── branch_01.md
├── commit_01.md
├── solutions
│ ├── branch_01_solution.md
│ ├── commit_01_solution.md
│ └── squashing_commits.md
└── squashing_commits.md
├── grafana
└── README.md
├── jenkins_pipelines.md
├── jenkins_scripts.md
├── kafka
└── README.md
├── kubernetes
├── CKA.md
├── README.md
├── exercises
│ ├── labels_and_selectors
│ │ ├── exercise.md
│ │ └── solution.md
│ ├── node_selectors
│ │ ├── exercise.md
│ │ └── solution.md
│ └── taints_101
│ │ ├── exercise.md
│ │ └── solution.md
├── images
│ ├── cluster_architecture_exercise.png
│ ├── cluster_architecture_solution.png
│ ├── service_exercise.png
│ └── service_solution.png
├── killing_containers.md
├── pods_01.md
├── replicaset_01.md
├── replicaset_02.md
├── replicaset_03.md
├── services_01.md
└── solutions
│ ├── killing_containers.md
│ ├── pods_01_solution.md
│ ├── replicaset_01_solution.md
│ ├── replicaset_02_solution.md
│ ├── replicaset_03_solution.md
│ └── services_01_solution.md
├── linux
└── README.md
├── misc
└── elk_kibana_aws.md
├── openshift
├── README.md
├── projects_101.md
└── solutions
│ ├── my_first_app.md
│ └── projects_101.md
├── os
├── fork_101.md
├── fork_102.md
└── solutions
│ ├── fork_101_solution.md
│ └── fork_102_solution.md
├── perl
└── README.md
├── pipeline_deploy_image_to_k8.md
├── programming
├── grep_berfore_and_after.md
└── web_scraper.md
├── python
├── advanced_data_types.md
├── compress_string.md
├── data_types.md
├── reverse_string.md
└── solutions
│ ├── advanced_data_types_solution.md
│ ├── data_types_solution.md
│ └── reverse_string.md
├── security
└── README.md
├── shell
├── README.md
├── argument_check.md
├── basic_date.md
├── count_chars.md
├── directories_comparison.md
├── empty_files.md
├── factors.md
├── files_size.md
├── great_day.md
├── hello_world.md
├── host_status.md
├── num_of_args.md
├── print_arguments.md
├── solutions
│ ├── basic_date.md
│ ├── count_chars.md
│ ├── directories_comparison.md
│ ├── empty_files.md
│ ├── factors.md
│ ├── files_size.md
│ ├── great_day.md
│ ├── hello_world.md
│ ├── host_status.md
│ ├── num_of_args.md
│ └── sum.md
└── sum.md
├── soft_skills
└── README.md
├── software_development
└── README.md
├── sql
├── improve_query.md
└── solutions
│ └── improve_query.md
├── terraform
├── README.md
└── exercises
│ ├── launch_ec2_instance
│ ├── exercise.md
│ └── solution.md
│ ├── launch_ec2_web_instance
│ └── exercise.md
│ ├── s3_bucket_rename
│ ├── exercise.md
│ └── solution.md
│ └── terraform_local_provider
│ ├── exercise.md
│ └── solution.md
└── zuul
└── README.md
/.circleci/config.yml:
--------------------------------------------------------------------------------
1 | # Use the latest 2.1 version of CircleCI pipeline process engine.
2 | # See: https://circleci.com/docs/2.0/configuration-reference
3 | version: 2.1
4 |
5 | # Define a job to be invoked later in a workflow.
6 | # See: https://circleci.com/docs/2.0/configuration-reference/#jobs
7 | jobs:
8 | say-hello:
9 | # Specify the execution environment. You can specify an image from Dockerhub or use one of our Convenience Images from CircleCI's Developer Hub.
10 | # See: https://circleci.com/docs/2.0/configuration-reference/#docker-machine-macos-windows-executor
11 | docker:
12 | - image: cimg/base:stable
13 | # Add steps to the job
14 | # See: https://circleci.com/docs/2.0/configuration-reference/#steps
15 | steps:
16 | - checkout
17 | - run:
18 | name: "Say hello"
19 | command: "echo Hello, World!"
20 |
21 | # Invoke jobs via workflows
22 | # See: https://circleci.com/docs/2.0/configuration-reference/#workflows
23 | workflows:
24 | say-hello-workflow:
25 | jobs:
26 | - say-hello
27 |
--------------------------------------------------------------------------------
/.github/workflows/ci_workflow.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on:
4 | pull_request:
5 | branches: [ master ]
6 |
7 | jobs:
8 | ci:
9 | runs-on: ubuntu-latest
10 | steps:
11 | - uses: actions/checkout@v3
12 | - name: Install flake8
13 | run: pip install flake8
14 | - name: Give executable permissions to run_ci.sh inside the scripts directory
15 | run: chmod a+x scripts/run_ci.sh
16 | - name: Run the ci script inside the scripts folder
17 | run: sh scripts/run_ci.sh
18 | shell: bash
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 | local_settings.py
57 | db.sqlite3
58 |
59 | # Flask stuff:
60 | instance/
61 | .webassets-cache
62 |
63 | # Scrapy stuff:
64 | .scrapy
65 |
66 | # Sphinx documentation
67 | docs/_build/
68 |
69 | # PyBuilder
70 | target/
71 |
72 | # Jupyter Notebook
73 | .ipynb_checkpoints
74 |
75 | # pyenv
76 | .python-version
77 |
78 | # celery beat schedule file
79 | celerybeat-schedule
80 |
81 | # SageMath parsed files
82 | *.sage.py
83 |
84 | # Environments
85 | .env
86 | .venv
87 | env/
88 | venv/
89 | ENV/
90 | env.bak/
91 | venv.bak/
92 |
93 | *.pyc
94 |
95 | #Jetbrain's ides.
96 | .idea
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: "python"
2 | python:
3 | - "3.8"
4 | install:
5 | - pip install flake8
6 | script:
7 | - flake8 --max-line-length=100 .
8 | - python tests/syntax_lint.py
9 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | ## How to contribute
2 |
3 | Use pull requests to contribute to the project.
4 |
5 | Stick to the following format:
6 |
7 | \
8 | [Question]
9 |
10 | [Answer]
11 | \
12 |
13 | * If you added several questions and you would like to know how many questions are there you can use the script "count_questions.sh" in scripts directory.
14 |
15 | ## What to avoid
16 |
17 | * Avoid adding installation questions. Those are the worst type of questions...
18 | * Don't copy questions and answers from other sources. They probably worked hard for adding them.
19 | * If you add new images, make sure they are free and can be used.
20 |
21 | ## Before submitting the pull request
22 |
23 | You can test your changes locally with the script `run_ci.sh` in scripts directory.
24 |
--------------------------------------------------------------------------------
/certificates/azure-fundamentals-az-900.md:
--------------------------------------------------------------------------------
1 | ## AZ-900
2 |
3 |
4 | What is cloud computing?
5 |
6 | [Wikipedia](https://en.wikipedia.org/wiki/Cloud_computing): "Cloud computing is the on-demand availability of computer system resources, especially data storage (cloud storage) and computing power, without direct active management by the user"
7 |
8 |
9 |
10 | What types of clouds (or cloud deployments) are there?
11 |
12 | * Public - Cloud services sharing computing resources among multiple customers
13 | * Private - Cloud services having computing resources limited to specific customer or organization, managed by third party or organizations itself
14 | * Hybrid - Combination of public and private clouds
15 |
16 |
--------------------------------------------------------------------------------
/certificates/cka.md:
--------------------------------------------------------------------------------
1 | ## Certified Kubernetes Administrator (CKA)
2 |
3 | ### Pods
4 |
5 |
6 | Deploy a pod called web-1985 using the nginx:alpine image
7 |
8 | `kubectl run web-1985 --image=nginx:alpine --restart=Never`
9 |
10 |
11 |
12 | How to find out on which node a certain pod is running?
13 |
14 | `kubectl get po -o wide`
15 |
16 |
--------------------------------------------------------------------------------
/certificates/ckad.md:
--------------------------------------------------------------------------------
1 | ## Certified Kubernetes Application Developer (CKAD)
2 |
3 | ### Core Concepts
4 |
5 | ### Pods
6 |
7 |
8 | Deploy a pod called web-1985 using the nginx:alpine image
9 |
10 | `kubectl run web-1985 --image=nginx:alpine --restart=Never`
11 |
12 |
13 |
14 | How to find out on which node a certain pod is running?
15 |
16 | `kubectl get po -o wide`
17 |
18 |
19 | ### Namespaces
20 |
21 |
22 | List all namespaces
23 |
24 | kubectl get ns
25 |
26 |
27 |
28 | List all the pods in the namespace 'neverland'
29 |
30 | kubectl get po -n neverland
31 |
32 |
33 |
34 | List all the pods in all the namespaces
35 |
36 | kubectl get po --all-namespaces
37 |
38 |
--------------------------------------------------------------------------------
/coding/python/binary_search.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import random
4 | from typing import List
5 |
6 |
7 | def binary_search(arr: List[int], lb: int, ub: int, target: int) -> int:
8 | """
9 | A Binary Search Example which has O(log n) time complexity.
10 | """
11 | if lb <= ub:
12 | mid: int = lb + (ub - lb) // 2
13 | if arr[mid] == target:
14 | return mid
15 | elif arr[mid] < target:
16 | return binary_search(arr, mid + 1, ub, target)
17 | else:
18 | return binary_search(arr, lb, mid - 1, target)
19 | else:
20 | return -1
21 |
22 |
23 | if __name__ == '__main__':
24 | rand_num_li: List[int] = sorted([random.randint(1, 50) for _ in range(10)])
25 | target: int = random.randint(1, 50)
26 | print("List: {}\nTarget: {}\nIndex: {}".format(
27 | rand_num_li, target,
28 | binary_search(rand_num_li, 0, len(rand_num_li) - 1, target)))
29 |
--------------------------------------------------------------------------------
/credits.md:
--------------------------------------------------------------------------------
1 | ## Credits
2 |
3 | Jenkins logo created by Ksenia Nenasheva and published through jenkins.io is licensed under cc by-sa 3.0
4 | Git Logo by Jason Long is licensed under the Creative Commons Attribution 3.0 Unported License
5 | Terraform logo created by Hashicorp®
6 | Docker logo created by Docker®
7 | The Python logo is a trademark of the Python Software Foundation®
8 | Puppet logo created by Puppet®
9 | Bash logo created by Prospect One
10 | OpenStack logo created by and a trademark of The OpenStack Foundation®
11 | Linux, Kubernetes and Prometheus logos are trademarks of The Linux Foundation®
12 | Mongo logo is a trademark of Mongo®
13 | Distributed logo by Flatart
14 | Challenge icon by Elizabeth Arostegui in Technology Mix
15 | "Question you ask" (man raising hand) and "Database" icons by [Webalys](https://www.iconfinder.com/webalys)
16 | Testing logo by [Flatart](https://www.iconfinder.com/Flatart)
17 | Google Cloud Plataform Logo created by Google®
18 | VirtualBox Logo created by dAKirby309, under the Creative Commons Attribution-Noncommercial 4.0 License.
19 | Certificates logo by Flatart
20 | Storage icon by Dinosoftlab
21 | CI/CD icon made made by Freepik from www.flaticon.com
22 |
--------------------------------------------------------------------------------
/exercises/shell/solutions/directories_comparison.md:
--------------------------------------------------------------------------------
1 | ## Directories Comparison
2 |
3 | ### Objectives
4 |
5 | 1. You are given two directories as arguments and the output should be any difference between the two directories
6 |
7 | ### Solution
8 |
9 | ```
10 | #!/usr/bin/env bash
11 |
12 |
13 | help () {
14 | echo "Usage: compare "
15 | echo
16 | }
17 |
18 | validate_args() {
19 | # Ensure that 2 arguments are passed
20 | if [ $# != 2 ]
21 | then
22 | help
23 | exit 1
24 | fi
25 |
26 | i=1
27 | for dir in "$@"
28 | do
29 | # Validate existence of directories
30 | if [ ! -d "$dir" ]
31 | then
32 | echo "Directory $dir does not exist"
33 | exit 1
34 | fi
35 | echo "Directory $i: $dir"
36 | i=$((i + 1))
37 | done
38 | echo
39 | }
40 |
41 | compare() {
42 | echo "Comparing directories..."
43 | echo
44 | diff -r "$1" "$2"
45 |
46 | if [ $? -eq 0 ]
47 | then
48 | echo "No difference"
49 | fi
50 |
51 | exit 0
52 | }
53 |
54 | while getopts ":h" option; do
55 | case $option in
56 | h) # display Help
57 | help
58 | exit 0;;
59 | \?) # invalid option
60 | echo "Error: Invalid option"
61 | exit 1;;
62 | esac
63 | done
64 |
65 | validate_args "$@"
66 | compare "$1" "$2"
67 |
68 |
69 | ```
--------------------------------------------------------------------------------
/images/Go.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/Go.png
--------------------------------------------------------------------------------
/images/HR.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/HR.png
--------------------------------------------------------------------------------
/images/ansible.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/ansible.png
--------------------------------------------------------------------------------
/images/aws.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/aws.png
--------------------------------------------------------------------------------
/images/aws/identify_load_balancer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/aws/identify_load_balancer.png
--------------------------------------------------------------------------------
/images/azure.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/azure.png
--------------------------------------------------------------------------------
/images/bash.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/bash.png
--------------------------------------------------------------------------------
/images/big-data.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/big-data.png
--------------------------------------------------------------------------------
/images/certificates.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/certificates.png
--------------------------------------------------------------------------------
/images/cicd.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/cicd.png
--------------------------------------------------------------------------------
/images/cloud.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/cloud.png
--------------------------------------------------------------------------------
/images/containers.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/containers.png
--------------------------------------------------------------------------------
/images/databases.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/databases.png
--------------------------------------------------------------------------------
/images/design.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/design.png
--------------------------------------------------------------------------------
/images/design/cdn-no-downtime.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/design/cdn-no-downtime.png
--------------------------------------------------------------------------------
/images/design/development/git_fsmonitor.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/design/development/git_fsmonitor.png
--------------------------------------------------------------------------------
/images/design/input-process-output.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/design/input-process-output.png
--------------------------------------------------------------------------------
/images/design/producers_consumers_fix.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/design/producers_consumers_fix.png
--------------------------------------------------------------------------------
/images/design/producers_consumers_issue.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/design/producers_consumers_issue.png
--------------------------------------------------------------------------------
/images/devops.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/devops.png
--------------------------------------------------------------------------------
/images/devops_exercises.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/devops_exercises.png
--------------------------------------------------------------------------------
/images/devops_resources.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/devops_resources.png
--------------------------------------------------------------------------------
/images/distributed.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/distributed.png
--------------------------------------------------------------------------------
/images/distributed/distributed_design_lb.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/distributed/distributed_design_lb.png
--------------------------------------------------------------------------------
/images/distributed/distributed_design_standby.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/distributed/distributed_design_standby.png
--------------------------------------------------------------------------------
/images/dns.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/dns.png
--------------------------------------------------------------------------------
/images/elastic.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/elastic.png
--------------------------------------------------------------------------------
/images/exercises.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/exercises.png
--------------------------------------------------------------------------------
/images/general.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/general.png
--------------------------------------------------------------------------------
/images/git.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/git.png
--------------------------------------------------------------------------------
/images/googlecloud.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/googlecloud.png
--------------------------------------------------------------------------------
/images/hardware.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/hardware.png
--------------------------------------------------------------------------------
/images/how_they_devops.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/how_they_devops.png
--------------------------------------------------------------------------------
/images/infraverse.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/infraverse.png
--------------------------------------------------------------------------------
/images/jenkins.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/jenkins.png
--------------------------------------------------------------------------------
/images/jenkins/jenkins-to-kibana.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/jenkins/jenkins-to-kibana.png
--------------------------------------------------------------------------------
/images/kubernetes.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/kubernetes.png
--------------------------------------------------------------------------------
/images/linux_master.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/linux_master.jpeg
--------------------------------------------------------------------------------
/images/logos/argo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/logos/argo.png
--------------------------------------------------------------------------------
/images/logos/circleci.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/logos/circleci.png
--------------------------------------------------------------------------------
/images/logos/kafka.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/logos/kafka.png
--------------------------------------------------------------------------------
/images/logos/linux.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/logos/linux.png
--------------------------------------------------------------------------------
/images/mongo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/mongo.png
--------------------------------------------------------------------------------
/images/monitoring.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/monitoring.png
--------------------------------------------------------------------------------
/images/network.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/network.png
--------------------------------------------------------------------------------
/images/openshift.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/openshift.png
--------------------------------------------------------------------------------
/images/openstack.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/openstack.png
--------------------------------------------------------------------------------
/images/os.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/os.png
--------------------------------------------------------------------------------
/images/perl.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/perl.png
--------------------------------------------------------------------------------
/images/programming.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/programming.png
--------------------------------------------------------------------------------
/images/prometheus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/prometheus.png
--------------------------------------------------------------------------------
/images/puppet.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/puppet.png
--------------------------------------------------------------------------------
/images/python.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/python.png
--------------------------------------------------------------------------------
/images/regex.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/regex.png
--------------------------------------------------------------------------------
/images/security.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/security.png
--------------------------------------------------------------------------------
/images/sql.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/sql.png
--------------------------------------------------------------------------------
/images/storage.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/storage.png
--------------------------------------------------------------------------------
/images/system_design_notebook.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/system_design_notebook.png
--------------------------------------------------------------------------------
/images/terraform.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/terraform.png
--------------------------------------------------------------------------------
/images/testing.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/testing.png
--------------------------------------------------------------------------------
/images/virtualization.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/virtualization.png
--------------------------------------------------------------------------------
/images/you.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/images/you.png
--------------------------------------------------------------------------------
/scripts/count_questions.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | echo $(( $(grep -E "\[Exercise\]|" -c README.md topics/*/README.md | awk -F: '{ s+=$2 } END { print s }' )))
4 |
--------------------------------------------------------------------------------
/scripts/random_question.py:
--------------------------------------------------------------------------------
1 | import random
2 | import optparse
3 |
4 |
5 | def main():
6 | """Reads through README.md for question/answer pairs and adds them to a
7 | list to randomly select from and quiz yourself.
8 | Supports skipping quesitons with no documented answer with the -s flag
9 | """
10 | parser = optparse.OptionParser()
11 | parser.add_option("-s", "--skip", action="store_true",
12 | help="skips questions without an answer.",
13 | default=False)
14 | options, args = parser.parse_args()
15 |
16 | with open('README.md', 'r') as f:
17 | text = f.read()
18 |
19 | questions = []
20 |
21 | while True:
22 | question_start = text.find('') + 9
23 | question_end = text.find('')
24 | answer_end = text.find('')
25 |
26 | if answer_end == -1:
27 | break
28 |
29 | question = text[question_start: question_end].replace(' ', '').replace('', '')
30 | answer = text[question_end + 17: answer_end]
31 | questions.append((question, answer))
32 | text = text[answer_end + 1:]
33 |
34 | num_questions = len(questions)
35 |
36 | while True:
37 | try:
38 | question, answer = questions[random.randint(0, num_questions)]
39 |
40 | if options.skip and not answer.strip():
41 | continue
42 |
43 | if input(f'Q: {question} ...Show answer? "y" for yes: ').lower() == 'y':
44 | print('A: ', answer)
45 |
46 | except KeyboardInterrupt:
47 | break
48 |
49 | print("\nGoodbye! See you next time.")
50 |
51 |
52 | if __name__ == '__main__':
53 | main()
54 |
--------------------------------------------------------------------------------
/scripts/run_ci.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # These are the same steps we are running in Travis CI
3 |
4 | python $(dirname "$0")/../tests/syntax_lint.py
5 | flake8 --max-line-length=100 . && echo "PEP8 Passed"
6 |
--------------------------------------------------------------------------------
/scripts/update_question_number.py:
--------------------------------------------------------------------------------
1 | """
2 | Meant to be used like this:
3 |
4 | python scripts/update_question_number.py
5 |
6 | """
7 | import pathlib
8 | from scripts.question_utils import get_question_list, get_challenges_count
9 |
10 | LINE_FLAG = b":bar_chart:"
11 |
12 | p = pathlib.Path(__file__).parent.parent.joinpath('README.md')
13 |
14 |
15 | with open(p, 'rb') as f:
16 | file = f.readlines()
17 |
18 |
19 | file_list = [line.rstrip() for line in file]
20 |
21 | question_list = get_question_list(file_list)
22 | question_count = len(question_list)
23 | total_count = question_count + get_challenges_count()
24 | print(question_count)
25 | print(get_challenges_count())
26 | print(total_count)
27 | for line in file:
28 | if LINE_FLAG in line:
29 | file[file.index(line)] = b':bar_chart: There are currently **%s** questions\r\n' %\
30 | str(total_count).encode()
31 | break
32 |
33 | with open(p, 'wb') as f:
34 | f.writelines(file)
35 |
--------------------------------------------------------------------------------
/tests/scripts_question_utils_unittest.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from pathlib import Path
3 | from typing import List
4 | from scripts.question_utils import get_answered_questions, get_question_list
5 |
6 |
7 | def open_test_case_file(n: int) -> List[bytes]:
8 | tests_path = Path(__file__).parent.joinpath()
9 |
10 | with open(f'{tests_path}/testcases/testcase{n}.md', 'rb') as f:
11 | file_list = [line.rstrip() for line in f.readlines()]
12 | return file_list
13 |
14 |
15 | class QuestionCount(unittest.TestCase):
16 |
17 | def test_case_1(self):
18 | raw_list = open_test_case_file(1)
19 | question_list = get_question_list(raw_list)
20 | answers = get_answered_questions(question_list)
21 |
22 | self.assertEqual(len(question_list), 11)
23 | self.assertEqual(len(answers), 3)
24 |
25 | def test_case_2(self):
26 | raw_list = open_test_case_file(2)
27 | question_list = get_question_list(raw_list)
28 | answers = get_answered_questions(question_list)
29 |
30 | self.assertEqual(len(question_list), 16)
31 | self.assertEqual(len(answers), 11)
32 |
--------------------------------------------------------------------------------
/tests/syntax_checker_unittest.py:
--------------------------------------------------------------------------------
1 | """
2 | WIP
3 |
4 | Yes, we do write tests for our tests.
5 | """
6 | from pathlib import Path
7 | from typing import List
8 | from unittest import TestCase
9 | from tests import syntax_lint
10 |
11 |
12 | def open_test_case_file(n: int) -> List[bytes]:
13 | tests_path = Path(__file__).parent.joinpath()
14 |
15 | with open(f'{tests_path}/testcases/testcase{n}.md', 'rb') as f:
16 | file_list = [line.rstrip() for line in f.readlines()]
17 | return file_list
18 |
19 |
20 | test_case_1 = open_test_case_file(1)
21 | test_case_2 = open_test_case_file(2)
22 | test_case_3 = open_test_case_file(3)
23 |
24 |
25 | class TestSyntax(TestCase):
26 |
27 | def test_details_count_case1(self):
28 | self.assertTrue(syntax_lint.count_details(test_case_1))
29 |
30 | def test_details_count_case2(self):
31 | self.assertTrue(syntax_lint.count_details(test_case_2))
32 |
33 | def test_details_errors_1(self):
34 | syntax_lint.check_details_tag(test_case_1)
35 | self.assertFalse(syntax_lint.errors)
36 |
37 | def test_details_errors_2(self):
38 | syntax_lint.check_details_tag(test_case_2)
39 | self.assertFalse(syntax_lint.errors)
40 | #
41 | # def test_details_error_exist_1(self):
42 | # syntax_checker.check_details_tag(test_case_3)
43 | # print(syntax_checker.errors)
44 | # self.assertEqual(len(syntax_checker.errors), 3)
45 |
--------------------------------------------------------------------------------
/topics/ansible/my_first_playbook.md:
--------------------------------------------------------------------------------
1 | ## Ansible - My First Playbook
2 |
3 | 1. Write a playbook that will:
4 | a. Install the package zlib
5 | b. Create the file `/tmp/some_file`
6 | 2. Run the playbook on a remote host
7 |
--------------------------------------------------------------------------------
/topics/ansible/my_first_task.md:
--------------------------------------------------------------------------------
1 | ## Ansible - My First Task
2 |
3 | 1. Write a task to create the directory ‘/tmp/new_directory’
4 |
--------------------------------------------------------------------------------
/topics/ansible/solutions/my_first_playbook.md:
--------------------------------------------------------------------------------
1 | ## My first playbook - Solution
2 |
3 | 1. `vi first_playbook.yml`
4 |
5 | ```
6 | - name: Install zlib and create a file
7 | hosts: some_remote_host
8 | tasks:
9 | - name: Install zlib
10 | package:
11 | name: zlib
12 | state: present
13 | become: yes
14 | - name: Create the file /tmp/some_file
15 | file:
16 | path: '/tmp/some_file'
17 | state: touch
18 | ```
19 |
20 | 2. First, edit the inventory file: `vi /etc/ansible/hosts`
21 |
22 | ```
23 | [some_remote_host]
24 | some.remoted.host.com
25 | ```
26 |
27 | Run the playbook
28 |
29 | `ansible-playbook first_playbook.yml`
30 |
--------------------------------------------------------------------------------
/topics/ansible/solutions/my_first_task.md:
--------------------------------------------------------------------------------
1 | ## My First Task - Solution
2 |
3 | ```
4 | - name: Create a new directory
5 | file:
6 | path: "/tmp/new_directory"
7 | state: directory
8 | ```
9 |
--------------------------------------------------------------------------------
/topics/ansible/solutions/update_upgrade_task.md:
--------------------------------------------------------------------------------
1 | ## Update and Upgrade apt packages task - Solution
2 |
3 | ```
4 | - name: "update and upgrade apt packages."
5 | become: yes
6 | apt:
7 | upgrade: yes
8 | update_cache: yes
9 | ```
10 |
--------------------------------------------------------------------------------
/topics/ansible/update_upgrade_task.md:
--------------------------------------------------------------------------------
1 | ## Ansible - Update and upgrade APT packages task
2 |
3 | 1. Write a task to update and upgrade apt packages
4 |
--------------------------------------------------------------------------------
/topics/aws/exercises/access_advisor/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS IAM - Access Advisor
2 |
3 | ### Objectives
4 |
5 | Go to the Access Advisor and answer the following questions regarding one of the users:
6 |
7 | 1. Are there services this user never accessed?
8 | 2. What was the last service the user has accessed?
9 | 3. What the Access Advisor is used/good for?
10 |
11 | ## Solution
12 |
13 | Click [here to view to solution](solution.md)
--------------------------------------------------------------------------------
/topics/aws/exercises/access_advisor/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS IAM - Access Advisor
2 |
3 | ### Objectives
4 |
5 | Go to the Access Advisor and answer the following questions regarding one of the users:
6 |
7 | 1. Are there services this user never accessed?
8 | 2. What was the last service the user has accessed?
9 | 3. What the Access Advisor is used/good for?
10 |
11 | ### Solution
12 |
13 | 1. Go to AWS IAM service and click on "Users" under "Access Management"
14 | 2. Click on one of the users
15 | 3. Click on the "Access Advisor" tab
16 | 4. Check which service was last accessed and which was never accessed
17 |
18 | Access Advisor can be good to evaluate whether there are services the user is not accessing (as in never or not frequently). This can be help in deciding whether some permissions should be revoked or modified.
19 |
--------------------------------------------------------------------------------
/topics/aws/exercises/alb_multiple_target_groups/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS ELB - ALB Multiple Target Groups
2 |
3 | ### Requirements
4 |
5 | Two EC2 instances with a simple web application that shows the web page with the string "Hey, it's a me, ``!"
6 | One EC2 instance with a simple web application that shows the web page with the string "Hey, it's only a test..." under the endpoint /test
7 |
8 | ### Objectives
9 |
10 | 1. Create an application load balancer for the two instances you have, with the following properties
11 | 1. healthy threshold: 3
12 | 2. unhealthy threshold: 3
13 | 3. interval: 10 seconds
14 | 2. Create another target group for the third instance
15 | 1. Traffic should be forwarded to this group based on the "/test" path
16 |
--------------------------------------------------------------------------------
/topics/aws/exercises/alb_multiple_target_groups/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS ELB - ALB Multiple Target Groups
2 |
3 | ### Requirements
4 |
5 | Two EC2 instances with a simple web application that shows the web page with the string "Hey, it's a me, ``!"
6 | One EC2 instance with a simple web application that shows the web page with the string "Hey, it's only a test..." under the endpoint /test
7 |
8 | ### Objectives
9 |
10 | 1. Create an application load balancer for the two instances you have, with the following properties
11 | 1. healthy threshold: 3
12 | 2. unhealthy threshold: 3
13 | 3. interval: 10 seconds
14 | 2. Create another target group for the third instance
15 | 1. Traffic should be forwarded to this group based on the "/test" path
16 |
17 | ### Solution
18 |
19 | #### Console
20 |
21 | 1. Go to EC2 service
22 | 2. Click in the left side menu on "Load balancers" under "Load balancing"
23 | 3. Click on "Create load balancer"
24 | 4. Choose "Application Load Balancer"
25 | 5. Insert a name for the LB
26 | 6. Choose an AZ where you want the LB to operate
27 | 7. Choose a security group
28 | 8. Under "Listeners and routing" click on "Create target group" and choose "Instances"
29 | 1. Provide a name for the target group
30 | 2. Set healthy threshold to 3
31 | 3. Set unhealthy threshold to 3
32 | 4. Set interval to 10 seconds
33 | 5. Click on "Next" and choose two out of three instances you've created
34 | 6. Click on "Create target group"
35 | 9. Refresh target groups and choose the one you've just created
36 | 10. Click on "Create load balancer" and wait for it to be provisioned
37 |
38 | 11. In the left side menu click on "Target Groups" under "Load Balancing"
39 | 12. Click on "Create target group"
40 | 13. Set it with the same properties as previous target group but this time, add the third instance that you didn't include in the previous target group
41 | 14. Go back to your ALB and under "Listeners" click on "Edit rules" under your current listener
42 | 1. Add a rule where if the path is "/test" then traffic should be forwarded to the second target group you've created
43 | 2. Click on "Save"
44 | 15. Test it by going to the browser, insert the address and add "/test" to the address
45 |
--------------------------------------------------------------------------------
/topics/aws/exercises/app_load_balancer/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS ELB - Application Load Balancer
2 |
3 | ### Requirements
4 |
5 | Two EC2 instances with a simple web application that shows the web page with the string "Hey, it's a me, ``!"
6 |
7 | ### Objectives
8 |
9 | 1. Create an application load balancer for the two instances you have, with the following properties
10 | 1. healthy threshold: 3
11 | 2. unhealthy threshold: 3
12 | 3. interval: 10 seconds
13 | 2. Verify load balancer is working (= you get reply from both instances at different times)
14 |
--------------------------------------------------------------------------------
/topics/aws/exercises/app_load_balancer/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS ELB - Application Load Balancer
2 |
3 | ### Requirements
4 |
5 | Two EC2 instances with a simple web application that shows the web page with the string "Hey, it's a me, ``!"
6 |
7 | ### Objectives
8 |
9 | 1. Create an application load balancer for the two instances you have, with the following properties
10 | 1. healthy threshold: 3
11 | 2. unhealthy threshold: 3
12 | 3. interval: 10 seconds
13 | 2. Verify load balancer is working (= you get reply from both instances at different times)
14 |
15 | ### Solution
16 |
17 | #### Console
18 |
19 | 1. Go to EC2 service
20 | 2. Click in the left side menu on "Load balancers" under "Load balancing"
21 | 3. Click on "Create load balancer"
22 | 4. Choose "Application Load Balancer"
23 | 5. Insert a name for the LB
24 | 6. Choose an AZ where you want the LB to operate
25 | 7. Choose a security group
26 | 8. Under "Listeners and routing" click on "Create target group" and choose "Instances"
27 | 1. Provide a name for the target group
28 | 2. Set healthy threshold to 3
29 | 3. Set unhealthy threshold to 3
30 | 4. Set interval to 10 seconds
31 | 5. Click on "Next" and choose the two of the instances you've created
32 | 6. Click on "Create target group"
33 | 9. Refresh target groups and choose the one you've just created
34 | 10. Click on "Create load balancer" and wait for it to be provisioned
35 | 11. Copy DNS address and paste it in the browser. If you refresh, you should see different message based on the instance where the traffic was routed to
36 |
--------------------------------------------------------------------------------
/topics/aws/exercises/asg_dynamic_scaling_policy/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS Auto Scaling Groups - Dynamic Scaling Policy
2 |
3 | ### Requirements
4 |
5 | 1. Existing Auto Scaling Group with maximum capacity set to at least 3
6 | 2. One running EC2 instance with max of 4 CPUs
7 |
8 | ### Objectives
9 |
10 | 1. Create a dynamic scaling policy with the following properties
11 | 1. Track average CPU utilization
12 | 2. Target value should be 70%
13 | 2. Increase the CPU utilization to at least 70%
14 | 1. Do you see change in number of instances?
15 | 1. Decrease CPU utilization to less than 70%
16 | 1. Do you see change in number of instances?
17 |
--------------------------------------------------------------------------------
/topics/aws/exercises/asg_dynamic_scaling_policy/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS Auto Scaling Groups - Dynamic Scaling Policy
2 |
3 | ### Requirements
4 |
5 | 1. Existing Auto Scaling Group with maximum capacity set to at least 3
6 | 2. One running EC2 instance with max of 4 CPUs
7 |
8 | ### Objectives
9 |
10 | 1. Create a dynamic scaling policy with the following properties
11 | 1. Track average CPU utilization
12 | 2. Target value should be 70%
13 | 2. Increase the CPU utilization to at least 70%
14 | 1. Do you see change in number of instances?
15 | 1. Decrease CPU utilization to less than 70%
16 | 1. Do you see change in number of instances?
17 |
18 | ### Solution
19 |
20 | #### Console
21 |
22 | 1. Go to EC2 service -> Auto Scaling Groups and click on the tab "Automating scaling"
23 | 2. Choose "Target tracking scaling" under "Policy Type"
24 | 3. Set metric type to Average CPU utilization
25 | 4. Set target value to 70% and click on "Create"
26 |
27 | 1. If you are using Amazon Linux 2, you can stress the instance with the following:
28 |
29 | ```
30 | sudo amazon-linux-extras install epel -y
31 | sudo yum install stress -y
32 | stress -c 4 # assuming you have 4 CPUs
33 | ```
34 | 2. Yes, additional EC2 instance was added
35 |
36 | 1. Simply stop the stress command
37 | 2. Yes, one of the EC2 instances was terminated
38 |
--------------------------------------------------------------------------------
/topics/aws/exercises/aurora_db/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS Databases - Aurora DB
2 |
3 | ### Objectives
4 |
5 | 1. Create an Aurora database with the following properties
6 | * Edition: MySQL
7 | * Instance type: db.t3.small
8 | * A reader node in a different AZ
9 | * Public access should be enabled
10 | * Port should be set to 3306
11 | * DB name: 'db'
12 | * Backup retention: 10 days
13 |
14 | 2. How many instances does your DB cluster has?
15 |
--------------------------------------------------------------------------------
/topics/aws/exercises/aurora_db/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS Databases - Aurora DB
2 |
3 | ### Objectives
4 |
5 | 1. Create an Aurora database with the following properties
6 | * Edition: MySQL
7 | * Instance type: db.t3.small
8 | * A reader node in a different AZ
9 | * Public access should be enabled
10 | * Port should be set to 3306
11 | * DB name: 'db'
12 | * Backup retention: 10 days
13 |
14 | 2. How many instances does your DB cluster has?
15 |
16 | ### Solution
17 |
18 | #### Console
19 |
20 | 1. Go to RDS service
21 | 2. Click on "Databases" in the left side menu and click on the "Create database" button
22 | 3. Choose "standard create"
23 | 4. Choose "Aurora DB"
24 | 5. Choose "MySQL" edition and "Provisioned" as capacity type
25 | 6. Choose "single-master"
26 | 7. Specify Credentials (master username and password)
27 | 8. Choose DB instance type: Burstable classes, db.t3.small
28 | 9. Choose "Create an Aurora Replica or Reader node in a different AZ"
29 | 10. Choose a default VPC and subnet
30 | 11. Check "Yes" for public access
31 | 12. Database port should be 3306
32 | 13. For authentication, choose "Password and IAM database authentication"
33 | 14. Set initial database name as "db"
34 | 15. Increase backup retention period to 10 days
35 | 16. Click on "Create database" button
36 |
37 | 1. Two instances - one reader and one writer
38 |
--------------------------------------------------------------------------------
/topics/aws/exercises/auto_scaling_groups_basics/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS Auto Scaling Groups - Basics
2 |
3 | ### Requirements
4 |
5 | Zero EC2 instances running
6 |
7 | ### Objectives
8 |
9 | A. Create a scaling group for web servers with the following properties:
10 | * Amazon Linux 2 AMI
11 | * t2.micro as the instance type
12 | * user data:
13 | ```
14 | yum install -y httpd
15 | systemctl start httpd
16 | systemctl enable httpd
17 | ```
18 |
19 | B. Were new instances created since you created the auto scaling group? How many? Why?
20 | C. Change desired capacity to 2. Did it launch more instances?
21 | D. Change back the desired capacity to 1. What is the result of this action?
22 |
--------------------------------------------------------------------------------
/topics/aws/exercises/auto_scaling_groups_basics/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS Auto Scaling Groups - Basics
2 |
3 | ### Requirements
4 |
5 | Zero EC2 instances running
6 |
7 | ### Objectives
8 |
9 | A. Create a scaling group for web servers with the following properties:
10 | * Amazon Linux 2 AMI
11 | * t2.micro as the instance type
12 | * user data:
13 | ```
14 | yum install -y httpd
15 | systemctl start httpd
16 | systemctl enable httpd
17 | ```
18 |
19 | B. Were new instances created since you created the auto scaling group? How many? Why?
20 | C. Change desired capacity to 2. Did it launch more instances?
21 | D. Change back the desired capacity to 1. What is the result of this action?
22 |
23 | ### Solution
24 |
25 | #### Console
26 |
27 | A.
28 | 1. Go to EC2 service
29 | 2. Click on "Auto Scaling Groups" under "Auto Scaling"
30 | 3. Click on "Create Auto Scaling Group"
31 | 4. Insert a name
32 | 5. Click on "Create a launch template"
33 | 1. Insert a name and a version for the template
34 | 2. Select an AMI to use (Amazon Linux 2)
35 | 3. Select t2.micro instance type
36 | 4. Select a key pair
37 | 5. Attach a security group
38 | 6. Under "Advanced" insert the user data
39 | 7. Click on "Create"
40 | 6. Choose the launch template you've just created and click on "Next"
41 | 7. Choose "Adhere to launch template"
42 | 8. Choose in which AZs to launch and click on "Next"
43 | 9. Link it to ALB (if you don't have one, create it)
44 | 10. Mark ELB health check in addition to EC2. Click on "Next" until you reach the review page and click on "Create auto scaling group"
45 |
46 | B. One instance was launched to met the criteria of the auto scaling group we've created. The reason it launched only one is due to "Desired capacity" set to 1.
47 | C. Change it by going to your auto scaling group -> Details -> Edit -> "2 desired capacity". This should create another instance if only one is running
48 | D. Reducing desired capacity back to 1 will terminate one of the instances (assuming 2 are running).
49 |
--------------------------------------------------------------------------------
/topics/aws/exercises/basic_s3_ci/exercise.md:
--------------------------------------------------------------------------------
1 | # Basic CI with S3
2 |
3 | ## Objectives
4 |
5 | 1. Create a new S3 bucket
6 | 2. Add to the bucket index.html file and make it a static website
7 | 3. Create a GitHub repo and put the index.html there
8 | 4. Make sure to connect your AWS account to GitHub
9 | 5. Create a CI pipeline in AWS to publish the updated index.html from GitHub every time someone makes a change to the repo, to a specific branch
10 |
--------------------------------------------------------------------------------
/topics/aws/exercises/basic_s3_ci/solution.md:
--------------------------------------------------------------------------------
1 | # Basic CI with S3
2 |
3 | ## Objectives
4 |
5 | 1. Create a new S3 bucket
6 | 2. Add to the bucket index.html file and make it a static website
7 | 3. Create a GitHub repo and put the index.html there
8 | 4. Make sure to connect your AWS account to GitHub
9 | 5. Create a CI pipeline in AWS to publish the updated index.html from GitHub every time someone makes a change to the repo, to a specific branch
10 |
11 | ## Solution
12 |
13 | ### Manual
14 |
15 | #### Create S3 bucket
16 |
17 | 1. Go to S3 service in AWS console
18 | 2. Insert bucket name and choose region
19 | 3. Uncheck "block public access" to make it public
20 | 4. Click on "Create bucket"
21 |
22 | #### Static website hosting
23 |
24 | 1. Navigate to the newly created bucket and click on "properties" tab
25 | 2. Click on "Edit" in "Static Website Hosting" section
26 | 3. Check "Enable" for "Static web hosting"
27 | 4. Set "index.html" as index document and "error.html" as error document.
28 |
29 | #### S3 bucket permissions
30 |
31 | 1. Click on "Permissions" tab in the newly created S3 bucket
32 | 2. Click on Bucket Policy -> Edit -> Policy Generator. Click on "Generate Policy" for "GetObject"
33 | 3. Copy the generated policy and go to Permissions tab and replace it with the current policy
34 |
35 | #### GitHub Source
36 |
37 | 1. Go to Developers Tools Console and create a new connection (GitHub)
38 |
39 | #### Create a CI pipeline
40 |
41 | 1. Go to CodePipeline in AWS console
42 | 2. Click on "Create Pipeline" -> Insert a pipeline name -> Click on Next
43 | 3. Choose the newly created source (GitHub) under sources
44 | 4. Select repository name and branch name
45 | 5. Select "AWS CodeBuild" as build provider
46 | 6. Select "Managed Image", "standard" runtime and "new service role"
47 | 7. In deploy stage choose the newly created S3 bucket and for deploy provider choose "Amazon S3"
48 | 8. Review the pipeline and click on "Create pipeline"
49 |
50 | #### Test the pipeline
51 |
52 | 1. Clone the project from GitHub
53 | 2. Make changes to index.html and commit them (git commit -a)
54 | 3. Push the new change, verify that the newly created AWS pipeline was triggered and check the content of the site
55 |
--------------------------------------------------------------------------------
/topics/aws/exercises/budget_setup/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS - Budget Setup
2 |
3 | ### Objectives
4 |
5 | Setup a cost budget in your AWS account based on your needs.
6 |
--------------------------------------------------------------------------------
/topics/aws/exercises/budget_setup/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS - Budget Setup
2 |
3 | ### Objectives
4 |
5 | Setup a cost budget in your AWS account based on your needs.
6 |
7 | ### Solution
8 |
9 | 1. Go to "Billing"
10 | 2. Click on "Budgets" in the menu
11 | 3. Click on "Create a budget"
12 | 4. Choose "Cost Budget" and click on "Next"
13 | 5. Choose the values that work for you. For example, recurring monthly budget with a specific amount
14 | 6. Insert a budget name and Click on "Next"
15 | 7. Set up an alert but clicking on "Add an alert threshold"
16 | 1. Set a threshold (e.g. 75% of budgeted amount)
17 | 2. Set an email where a notification will be sent
18 | 8. Click on "Next" until you can click on "Create a budget"
19 |
--------------------------------------------------------------------------------
/topics/aws/exercises/create_ami/exercise.md:
--------------------------------------------------------------------------------
1 | ## EC2 - Create an AMI
2 |
3 | ### Requirements
4 |
5 | One running EC2 instance
6 |
7 | ### Objectives
8 |
9 | 1. Make some changes in the operating system of your instance (create files, modify files, ...)
10 | 2. Create an AMI image from running EC2 instance
11 | 3. Launch a new instance using the custom AMI you've created
12 |
--------------------------------------------------------------------------------
/topics/aws/exercises/create_ami/solution.md:
--------------------------------------------------------------------------------
1 | ## EC2 - Create an AMI
2 |
3 | ### Requirements
4 |
5 | One running EC2 instance
6 |
7 | ### Objectives
8 |
9 | 1. Make some changes in the operating system of your instance (create files, modify files, ...)
10 | 2. Create an AMI image from running EC2 instance
11 | 3. Launch a new instance using the custom AMI you've created
12 |
13 | ### Solution
14 |
15 | 1. Connect to your EC2 instance (ssh, console, ...)
16 | 2. Make some changes in the operating system
17 | 3. Go to EC2 service
18 | 4. Right click on the instance where you made some changes -> Image and templates -> Create image
19 | 5. Give the image a name and click on "Create image"
20 | 6. Launch new instance and choose the image you've just created
21 |
--------------------------------------------------------------------------------
/topics/aws/exercises/create_efs/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS - Create EFS
2 |
3 | ### Requirements
4 |
5 | Two EC2 instances in different availability zones
6 |
7 | ### Objectives
8 |
9 | 1. Create an EFS with the following properties
10 | 1. Set lifecycle management to 60 days
11 | 2. The mode should match a use case of scaling to high levels of throughput and I/O operations per second
12 | 2. Mount the EFS in both of your EC2 instances
13 |
--------------------------------------------------------------------------------
/topics/aws/exercises/create_efs/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS - Create EFS
2 |
3 | ### Requirements
4 |
5 | Two EC2 instances in different availability zones
6 |
7 | ### Objectives
8 |
9 | 1. Create an EFS with the following properties
10 | 1. Set lifecycle management to 60 days
11 | 2. The mode should match a use case of scaling to high levels of throughput and I/O operations per second
12 | 2. Mount the EFS in both of your EC2 instances
13 |
14 | ### Solution
15 |
16 | 1. Go to EFS console
17 | 2. Click on "Create file system"
18 | 3. Create on "customize"
19 | 1. Set lifecycle management to "60 days since last access"
20 | 2. Set Performance mode to "MAX I/O" due to the requirement of "Scaling to high levels of throughput"
21 | 3. Click on "Next"
22 | 4. Choose security group to attach (if you don't have any, create one and make sure it has a rule to allow NFS traffic) and click on "Next" until you are able to review and create it
23 | 5. SSH into your EC2 instances
24 | 1. Run `sudo yum install -y amazon-efs-utils`
25 | 2. Run `mkdir efs`
26 | 3. If you go to your EFS page and click on "Attach", you can see what ways are there to mount your EFS on your instancess
27 | 1. The command to mount the EFS should be similar to `sudo mount -t efs -o tls :/ efs` - copy and paste it in your ec2 instance's OS
28 |
--------------------------------------------------------------------------------
/topics/aws/exercises/create_role/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS - Create a Role
2 |
3 | ### Objectives
4 |
5 | Create a basic role to provide EC2 service with Full IAM access permissions.
6 | In the end, run from the CLI (or CloudShell) the command to verify the role was created.
7 |
8 | ### Solution
9 |
10 | 1. Go to AWS console -> IAM
11 | 2. Click in the left side menu on "Access Manamgement" -> Roles
12 | 3. Click on "Create role"
13 | 3. Choose "AWS service" as the type of trusted entity and then choose "EC2" as a use case. Click on "Next"
14 | 4. In permissions page, check "IAMFullAccess" and click on "Next" until you get to "Review" page
15 | 5. In the "Review" page, give the role a name (e.g. IAMFullAcessEC2), provide a short description and click on "Create role"
16 | 6. `aws iam list-roles` will list all the roles in the account, including the one we've just created.
17 |
--------------------------------------------------------------------------------
/topics/aws/exercises/create_role/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS - Create a Role
2 |
3 | ### Objectives
4 |
5 | Create a basic role to provide EC2 service with Full IAM access permissions.
6 | In the end, run from the CLI (or CloudShell) the command to verify the role was created.
7 |
8 | ### Solution
9 |
10 | 1. Go to AWS console -> IAM
11 | 2. Click in the left side menu on "Access Manamgement" -> Roles
12 | 3. Click on "Create role"
13 | 3. Choose "AWS service" as the type of trusted entity and then choose "EC2" as a use case. Click on "Next"
14 | 4. In permissions page, check "IAMFullAccess" and click on "Next" until you get to "Review" page
15 | 5. In the "Review" page, give the role a name (e.g. IAMFullAcessEC2), provide a short description and click on "Create role"
16 | 6. `aws iam list-roles` will list all the roles in the account, including the one we've just created.
17 |
--------------------------------------------------------------------------------
/topics/aws/exercises/create_spot_instances/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS EC2 - Spot Instances
2 | ### Objectives
3 |
4 | A. Create two Spot instances using a Spot Request with the following properties:
5 |
6 | * Amazon Linux 2 AMI
7 | * 2 instances as target capacity (at any given point of time) while each one has 2 vCPUs and 3 GiB RAM
8 |
9 | B. Create a single Spot instance using Amazon Linux 2 and t2.micro
10 |
--------------------------------------------------------------------------------
/topics/aws/exercises/create_spot_instances/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS EC2 - Spot Instances
2 | ### Objectives
3 |
4 | A. Create two Spot instances using a Spot Request with the following properties:
5 |
6 | * Amazon Linux 2 AMI
7 | * 2 instances as target capacity (at any given point of time) while each one has 2 vCPUs and 3 GiB RAM
8 |
9 | B. Create a single Spot instance using Amazon Linux 2 and t2.micro
10 |
11 | ### Solution
12 |
13 | A. Create Spot Fleets:
14 |
15 | 1. Go to EC2 service
16 | 2. Click on "Spot Requests"
17 | 3. Click on "Request Spot Instances" button
18 | 4. Set the following values for parameters:
19 | * Amazon Linux 2 AMI
20 | * Total target capacity -> 2
21 | * Check "Maintain target capacity"
22 | * vCPUs: 2
23 | * Memory: 3 GiB RAM
24 | 5. Click on Launch
25 |
26 | B. Create a single Spot instance:
27 |
28 | 1. Go to EC2 service
29 | 2. Click on "Instances"
30 | 3. Click on "Launch Instances"
31 | 4. Choose "Amazon Linux 2 AMI" and click on "Next"
32 | 5. Choose t2.micro and click on "Next: Configure Instance Details"
33 | 6. Select "Request Spot instances"
34 | 7. Set Maximum price above current price
35 | 8. Click on "Review and Launch"
36 |
--------------------------------------------------------------------------------
/topics/aws/exercises/create_user/exercise.md:
--------------------------------------------------------------------------------
1 | ## IAM AWS - Create a User
2 |
3 | ### Objectives
4 |
5 | As you probably know at this point, it's not recommended to work with the root account in AWS. For this reason you are going to create a new account which you'll use regularly as the admin account.
6 |
7 | 1. Create a user with password credentials
8 | 2. Add the newly created user to a group called "admin" and attach to it the policy called "Administrator Access"
9 | 3. Make sure the user has a tag called with the key `Role` and the value `DevOps`
10 |
--------------------------------------------------------------------------------
/topics/aws/exercises/create_user/solution.md:
--------------------------------------------------------------------------------
1 | ## IAM AWS - Create a User
2 |
3 | ### Objectives
4 |
5 | As you probably know at this point, it's not recommended to work with the root account in AWS. For this reason you are going to create a new account which you'll use regularly as the admin account.
6 |
7 | 1. Create a user with password credentials
8 | 2. Add the newly created user to a group called "admin" and attach to it the policy called "Administrator Access"
9 | 3. Make sure the user has a tag called with the key `Role` and the value `DevOps`
10 |
11 |
12 | ### Solution
13 |
14 | 1. Go to the AWS IAM service
15 | 2. Click on "Users" in the right side menu (right under "Access Management")
16 | 3. Click on the button "Add users"
17 | 4. Insert the user name (e.g. mario)
18 | 5. Select the credential type: "Password"
19 | 6. Set console password to custom and click on "Next"
20 | 7. Click on "Add user to group"
21 | 8. Insert "admin" as group name
22 | 9. Check the "AdministratorAccess" policy and click on "Create group"
23 | 10. Click on "Next: Tags"
24 | 11. Add a tag with the key `Role` and the value `DevOps`
25 | 12. Click on "Review" and then create on "Create user"
26 |
--------------------------------------------------------------------------------
/topics/aws/exercises/creating_records/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS Route 53 - Creating Records
2 |
3 | ### Requirements
4 |
5 | At least one registered domain
6 |
7 | ### Objectives
8 |
9 | 1. Create the following record for your domain:
10 | 1. Record name: foo
11 | 2. Record type: A
12 | 3. Set some IP in the value field
13 |
14 | 2. Verify from the shell that you are able to use the record you've created to lookup for the IP address by using the domain name
15 |
--------------------------------------------------------------------------------
/topics/aws/exercises/creating_records/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS Route 53 - Creating Records
2 |
3 | ### Requirements
4 |
5 | At least one registered domain
6 |
7 | ### Objectives
8 |
9 | 1. Create the following record for your domain:
10 | 1. Record name: foo
11 | 2. Record type: A
12 | 3. Set some IP in the value field
13 |
14 | 2. Verify from the shell that you are able to use the record you've created to lookup for the IP address by using the domain name
15 |
16 | ### Solution
17 |
18 | 1. Go to Route 53 service -> Hosted zones
19 | 2. Click on your domain name
20 | 3. Click on "Create record"
21 | 4. Insert "foo" in "Record name"
22 | 5. Set "Record type" to A
23 | 6. In "Value" insert "201.7.20.22"
24 | 7. Click on "Create records"
25 |
26 | 1. In your shell, type `nslookup foo.` or `dig foo. attach volume -> choose your EC2 instance and click on "Attach"
26 | 7. Terminate your instance
27 | 8. The default EBS volume (created when you launched the instance for the first time) will be deleted (unless you didn't check "Delete on termination"), but the volume you've created as part of this exercise, will remain
28 |
29 | Note: don't forget to remove the EBS volume you've created in this exercise
30 |
--------------------------------------------------------------------------------
/topics/aws/exercises/ec2_iam_roles/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS EC2 - IAM Roles
2 |
3 | ### Requirements
4 |
5 | 1. Running EC2 instance without any IAM roles (so you if you connect the instance and try to run AWS commands, it fails)
6 | 2. IAM role with "IAMReadOnlyAccess" policy
7 |
8 | ### Objectives
9 |
10 | 1. Attach a role (and if such role doesn't exists, create it) with "IAMReadOnlyAccess" policy to the EC2 instance
11 | 2. Verify you can run AWS commands in the instance
12 |
--------------------------------------------------------------------------------
/topics/aws/exercises/ec2_iam_roles/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS EC2 - IAM Roles
2 |
3 | ### Requirements
4 |
5 | 1. Running EC2 instance without any IAM roles (so you if you connect the instance and try to run AWS commands, it fails)
6 | 2. IAM role with "IAMReadOnlyAccess" policy
7 |
8 | ### Objectives
9 |
10 | 1. Attach a role (and if such role doesn't exists, create it) with "IAMReadOnlyAccess" policy to the EC2 instance
11 | 2. Verify you can run AWS commands in the instance
12 |
13 | ### Solution
14 |
15 | #### Console
16 |
17 | 1. Go to EC2 service
18 | 2. Click on the instance to which you would like to attach the IAM role
19 | 3. Click on "Actions" -> "Security" -> "Modify IAM Role"
20 | 4. Choose the IAM role with "IAMReadOnlyAccess" policy and click on "Save"
21 | 5. Running AWS commands now in the instance should work fine (e.g. `aws iam list-users`)
22 |
--------------------------------------------------------------------------------
/topics/aws/exercises/ecs_task/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS Containers - Run Tasks
2 |
3 | Note: this costs money
4 |
5 | ### Objectives
6 |
7 | Create a task in ECS to launch in Fargate.
8 |
9 | The task itself can be a sample app.
10 |
--------------------------------------------------------------------------------
/topics/aws/exercises/ecs_task/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS Containers - Run Tasks
2 |
3 | Note: this costs money
4 |
5 | ### Objectives
6 |
7 | Create a task in ECS to launch in Fargate.
8 |
9 | The task itself can be a sample app.
10 |
11 | ### Solution
12 |
13 | #### Console
14 |
15 | 1. Go to Elastic Container Service page
16 | 2. Click on "Get Started"
17 | 3. Choose "sample-app"
18 | 4. Verify it's using Farget and not ECS (EC2 Instance) and click on "Next"
19 | 5. Select "None" in Load balancer type and click on "Next"
20 | 6. Insert cluster name (e.g. my_cluster) and click on "Next"
21 | 7. Review everything and click on "Create"
22 | 8. Wait for everything to complete
23 |
24 | 1. Go to clusters page and check the status of the task (it will take a couple of seconds/minutes before changing to "Running")
25 |
26 | 1. Click on the task and you'll see the launch type is Fargate
27 |
--------------------------------------------------------------------------------
/topics/aws/exercises/elastic_beanstalk_simple/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS Elastic Beanstalk - Node.js
2 |
3 | ### Requirements
4 |
5 | 1. Having a running node.js application on AWS Elastic Beanstalk platform
6 |
7 | ### Objectives
8 |
9 | 1. Create an AWS Elastic Beanstalk application with the basic properties
10 | a. No ALB, No Database, Just use the default platform settings
11 |
12 | ### Out of scope
13 |
14 | 1. Having ALB attached in place
15 | 2. Having custom domain name in place
16 | 3. Having automated pipelines in place
17 | 4. Having blue-green deployment in place
18 | 5. Writing the Node.js application
--------------------------------------------------------------------------------
/topics/aws/exercises/elastic_beanstalk_simple/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS Elastic Beanstalk - Node.js
2 |
3 | ### Prerequisites
4 | 1. make sure the node.js application has a _npm start_ command specified in the __package.json__ file like the following example
5 | ```
6 | {
7 |
8 | "name": "application-name",
9 | "version": "0.0.1",
10 | "private": true,
11 | "scripts": {
12 | "start": "node app"
13 | },
14 | "dependencies": {
15 | "express": "3.1.0",
16 | "jade": "*",
17 | "mysql": "*",
18 | "async": "*",
19 | "node-uuid": "*"
20 | }
21 | ```
22 | 2. zip the application, and make sure to not zip the parent folder, only the files together, like:
23 |
24 | ```
25 | \Parent - (exclude the folder itself from the the zip)
26 | - file1 - (include in zip)
27 | - subfolder1 (include in zip)
28 | - file2 (include in zip)
29 | - file3 (include in zip)
30 | ```
31 |
32 | ### Solution
33 |
34 | 1. Create a "New Environment"
35 | 2. Select Environment => _Web Server Environment_
36 | 3. Fill the Create a web server environment section
37 | a. Fill the "Application Name"
38 | 4. Fill the Environment information section
39 | a. Fill the "Environment Name"
40 | b. Domain - "Leave for autogenerated value"
41 | 5. Platform
42 | a. Choose Platform => _node.js_
43 | 6. Application Code => upload the Zipped Code from your local computer
44 | 7. Create Environment
45 | 8. Wait for the environment to come up
46 | 9. Check the website
47 | a. Navigate to the _Applications_ tab,
48 | b. select the recently created node.js app
49 | c. click on the URL - highlighted
50 |
51 | ### Documentation
52 | [Elastic Beanstalk / Node.js getting started](https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/nodejs-getstarted.html)
--------------------------------------------------------------------------------
/topics/aws/exercises/elastic_ip/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS EC2 - Elastic IP
2 |
3 | ### Requirements
4 |
5 | * An EC2 instance with public IP (not elastic IP)
6 |
7 | ### Objectives
8 |
9 | 1. Write down the public IP of your EC2 instance somewhere and stop & start the instance. Does the public IP address is the same? why?
10 | 2. Handle this situation so you have the same public IP even after stopping and starting the instance
11 |
--------------------------------------------------------------------------------
/topics/aws/exercises/elastic_ip/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS EC2 - Elastic IP
2 |
3 | ### Requirements
4 |
5 | * An EC2 instance with public IP (not elastic IP)
6 |
7 | ### Objectives
8 |
9 | 1. Write down the public IP of your EC2 instance somewhere and stop & start the instance. Does the public IP address is the same? why?
10 | 2. Handle this situation so you have the same public IP even after stopping and starting the instance
11 |
12 | ### Solution
13 |
14 | 1. Go to EC2 service -> Instances
15 | 1. Write down current public IP address
16 | 2. Click on "Instance state" -> Stop instance -> Stop
17 | 3. Click on "Instance state" -> Start Instance
18 | 4. Yes, the public IP address has changed
19 | 2. Let's use an Elastic IP address
20 | 1. In EC2 service, under "Network & Security" click on "Elastic IP"
21 | 2. Click on the "Allocate elastic IP address" button
22 | 3. Make sure you select "Amazon's pool of IPv4 addresses" and click on "Allocate"
23 | 4. Click on "Actions" and then "Associate Elastic IP address"
24 | 1. Select "instance", choose your instance and provide its private IP address
25 | 2. Click on "Associate"
26 | 5. Now, if we go back to the instance page, we can see it is using the Elastic IP address as its public IP
27 |
28 | Note: to remove it, use "disassociate" option and don't forget to also release it so you won't be billed.
29 |
--------------------------------------------------------------------------------
/topics/aws/exercises/elastic_network_interfaces/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS EC2 - Elastic Network Interfaces
2 |
3 | ### Requirements
4 |
5 | * An EC2 instance with network interface
6 |
7 | ### Objectives
8 |
9 | A. Create a network interface and attach it to the EC2 instance that already has one network interface
10 | B. Explain why would anyone use two network interfaces
11 |
--------------------------------------------------------------------------------
/topics/aws/exercises/elastic_network_interfaces/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS EC2 - Elastic Network Interfaces
2 |
3 | ### Requirements
4 |
5 | * An EC2 instance with network interface
6 |
7 | ### Objectives
8 |
9 | A. Create a network interface and attach it to the EC2 instance that already has one network interface
10 | B. Explain why would anyone use two network interfaces
11 |
12 | ### Solution
13 |
14 | A.
15 | 1. Go to EC2 service
16 | 2. Click on "Network Interfaces" under "Network & Security"
17 | 3. Click on "Create network interface"
18 | 4. Provide a description
19 | 5. Choose a subnet (one that is in the AZ as the instance)
20 | 6. Optionally attach a security group and click on "Create network interface"
21 | 7. Click on "Actions" -> "Attach" and choose the instance to attach it to
22 | 8. If you go now to "Instances" page you'll see your instance has two network interfaces
23 |
24 | B.
25 | 1. You can move the second network interface between instances. This allows us to create kind of a failover mechanism between the instances.
26 |
--------------------------------------------------------------------------------
/topics/aws/exercises/elasticache/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS ElastiCache
2 |
3 | ### Objectives
4 |
5 | 1. Create ElastiCache Redis
6 | * Instance type should be "cache.t2.micro"
7 | * Replicas should be 0
8 |
--------------------------------------------------------------------------------
/topics/aws/exercises/elasticache/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS ElastiCache
2 |
3 | ### Objectives
4 |
5 | 1. Create ElastiCache Redis
6 | * Instance type should be "cache.t2.micro"
7 | * Replicas should be 0
8 |
9 | ### Solution
10 |
11 | #### Console
12 |
13 | 1. Go to ElastiCache service
14 | 2. Click on "Get Started Now"
15 | 3. Choose "Redis"
16 | 4. Insert a name and description
17 | 5. Choose "cache.t2.micro" an node type
18 | 6. Set number of replicas to 0
19 | 7. Create new subnet group
20 | 8. Click on "Create"
21 |
--------------------------------------------------------------------------------
/topics/aws/exercises/health_checks/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS Route 53 - Health Checks
2 |
3 | ## Requirements
4 |
5 | 3 web instances in different AZs.
6 |
7 | ## Objectives
8 |
9 | 1. For each instance create a health checks with the following properties:
10 | 1. Name it after the AZ where the instance resides
11 | 2. Failure threshold should be 5
12 |
13 | 2. Edit the security group of one of your instances and remove HTTP rules.
14 | 1. Did it change the status of the health check?
15 |
--------------------------------------------------------------------------------
/topics/aws/exercises/health_checks/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS Route 53 - Health Checks
2 |
3 | ## Requirements
4 |
5 | 3 web instances in different AZs.
6 |
7 | ## Objectives
8 |
9 | 1. For each instance create a health checks with the following properties:
10 | 1. Name it after the AZ where the instance resides
11 | 2. Failure threshold should be 5
12 |
13 | 2. Edit the security group of one of your instances and remove HTTP rules.
14 | 1. Did it change the status of the health check?
15 |
16 | ### Solution
17 |
18 | #### Console
19 |
20 | 1. Go to Route 53
21 | 2. Click on "Health Checks" in the left-side menu
22 | 3. Click on "Create health check"
23 | 4. Insert the name: us-east-2
24 | 5. What to monitor: endpoint
25 | 6. Insert the IP address of the instance
26 | 7. Insert the endpoint /health if your web instance supports that endpoint
27 | 8. In advanced configuration, set Failure threshold to 5
28 | 9. Click on "next" and then on "Create health check"
29 | 10. Repeat steps 1-9 for the other two instances you have
30 |
31 | 1. Go to security group of one of your instances
32 | 2. Click on "Actions" -> Edit inbound rules -> Delete HTTP based rules
33 | 3. Go back to health checks page and after a couple of seconds you should see that the status becomes "unhealthy"
34 |
--------------------------------------------------------------------------------
/topics/aws/exercises/hello_function/exercise.md:
--------------------------------------------------------------------------------
1 | # Hello Function
2 |
3 | Create a basic AWS Lambda function that when given a name, will return "Hello "
4 |
5 | ## Solution
6 |
7 | Click [here](solution.md) to view the solution.
--------------------------------------------------------------------------------
/topics/aws/exercises/hello_function/solution.md:
--------------------------------------------------------------------------------
1 | ## Hello Function - Solution
2 |
3 | ### Exercise
4 |
5 | Create a basic AWS Lambda function that when given a name, will return "Hello "
6 |
7 | ### Solution
8 |
9 | #### Define a function
10 |
11 | 1. Go to Lambda console panel and click on `Create function`
12 | 1. Give the function a name like `BasicFunction`
13 | 2. Select `Python3` runtime
14 | 3. Now to handle function's permissions, we can attach IAM role to our function either by setting a role or creating a new role. I selected "Create a new role from AWS policy templates"
15 | 4. In "Policy Templates" select "Simple Microservice Permissions"
16 |
17 | 1. Next, you should see a text editor where you will insert a code similar to the following
18 |
19 | #### Function's code
20 | ```
21 | import json
22 |
23 |
24 | def lambda_handler(event, context):
25 | firstName = event['name']
26 | return 'Hello ' + firstName
27 | ```
28 | 2. Click on "Create Function"
29 |
30 | #### Define a test
31 |
32 | 1. Now let's test the function. Click on "Test".
33 | 2. Select "Create new test event"
34 | 3. Set the "Event name" to whatever you'd like. For example "TestEvent"
35 | 4. Provide keys to test
36 |
37 | ```
38 | {
39 | "name": 'Spyro'
40 | }
41 | ```
42 | 5. Click on "Create"
43 |
44 | #### Test the function
45 |
46 | 1. Choose the test event you've create (`TestEvent`)
47 | 2. Click on the `Test` button
48 | 3. You should see something similar to `Execution result: succeeded`
49 | 4. If you'll go to AWS CloudWatch, you should see a related log stream
50 |
--------------------------------------------------------------------------------
/topics/aws/exercises/hibernate_instance/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS EC2 - Hibernate an Instance
2 |
3 | ### Objectives
4 |
5 | 1. Create an instance that supports hibernation
6 | 2. Hibernate the instance
7 | 3. Start the instance
8 | 4. What way is there to prove that instance was hibernated from OS perspective?
9 |
--------------------------------------------------------------------------------
/topics/aws/exercises/hibernate_instance/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS EC2 - Hibernate an Instance
2 |
3 | ### Objectives
4 |
5 | 1. Create an instance that supports hibernation
6 | 2. Hibernate the instance
7 | 3. Start the instance
8 | 4. What way is there to prove that instance was hibernated from OS perspective?
9 |
10 | ### Solution
11 |
12 | 1. Create an instance that supports hibernation
13 | 1. Go to EC2 service
14 | 2. Go to instances and create an instance
15 | 3. In "Configure instance" make sure to check "Enable hibernation as an additional stop behavior"
16 | 4. In "Add storage", make sure to encrypt EBS and make sure the size > instance RAM size (because hibernation saves the RAM state)
17 | 5. Review and Launch
18 |
19 | 2. Hibernate the instance
20 | 1. Go to the instance page
21 | 2. Click on "Instance state" -> "Hibernate instance" -> Hibernate
22 |
23 | 3. Instance state -> Start
24 |
25 | 4. Run the "uptime" command, which will display the amount of time the system was up
26 |
--------------------------------------------------------------------------------
/topics/aws/exercises/launch_ec2_web_instance/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS - Launch EC2 Web Instance
2 |
3 | ### Objectives
4 |
5 | Launch one EC2 instance with the following requirements:
6 |
7 | 1. Amazon Linux 2 image
8 | 2. Instance type: pick up one that has 1 vCPUs and 1 GiB memory
9 | 3. Instance storage should be deleted upon the termination of the instance
10 | 4. When the instance starts, it should install:
11 | 1. Install the httpd package
12 | 2. Start the httpd service
13 | 3. Make sure the content of /var/www/html/index.html is `I made it! This is is awesome!`
14 | 5. It should have the tag: "Type: web" and the name of the instance should be "web-1"
15 | 6. HTTP traffic (port 80) should be accepted from anywhere
16 |
--------------------------------------------------------------------------------
/topics/aws/exercises/launch_ec2_web_instance/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS - Launch EC2 Web Instance
2 |
3 | ### Objectives
4 |
5 | Launch one EC2 instance with the following requirements:
6 |
7 | 1. Amazon Linux 2 image
8 | 2. Instance type: pick up one that has 1 vCPUs and 1 GiB memory
9 | 3. Instance storage should be deleted upon the termination of the instance
10 | 4. When the instance starts, it should install:
11 | 1. Install the httpd package
12 | 2. Start the httpd service
13 | 3. Make sure the content of /var/www/html/index.html is `I made it! This is is awesome!`
14 | 5. It should have the tag: "Type: web" and the name of the instance should be "web-1"
15 | 6. HTTP traffic (port 80) should be accepted from anywhere
16 |
17 | ### Solution
18 |
19 | 1. Choose a region close to you
20 | 2. Go to EC2 service
21 | 3. Click on "Instances" in the menu and click on "Launch instances"
22 | 4. Choose image: Amazon Linux 2
23 | 5. Choose instance type: t2.micro
24 | 6. Make sure "Delete on Termination" is checked in the storage section
25 | 7. Under the "User data" field the following:
26 |
27 | ```
28 | yum update -y
29 | yum install -y httpd
30 | systemctl start httpd
31 | systemctl enable httpd
32 | echo "
I made it! This is is awesome!
" > /var/www/html/index.html
33 | ```
34 | 8. Add tags with the following keys and values:
35 | * key "Type" and the value "web"
36 | * key "Name" and the value "web-1"
37 | 9. In the security group section, add a rule to accept HTTP traffic (TCP) on port 80 from anywhere
38 | 10. Click on "Review" and then click on "Launch" after reviewing.
39 | 11. If you don't have a key pair, create one and download it.
40 |
--------------------------------------------------------------------------------
/topics/aws/exercises/mysql_db/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS Databases - MySQL DB
2 |
3 | ### Objectives
4 |
5 | 1. Create a MySQL database with the following properties
6 | * Instance type: db.t2.micro
7 | * gp2 storage
8 | * Storage Auto scaling should be enabled and threshold should be set to 500 GiB
9 | * Public access should be enabled
10 | * Port should be set to 3306
11 | * DB name: 'db'
12 | * Backup retention: 10 days
13 |
14 | 2. Create read replica for the database you've created
15 |
--------------------------------------------------------------------------------
/topics/aws/exercises/mysql_db/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS Databases - MySQL DB
2 |
3 | ### Objectives
4 |
5 | 1. Create a MySQL database with the following properties
6 | * Instance type: db.t2.micro
7 | * gp2 storage
8 | * Storage Auto scaling should be enabled and threshold should be set to 500 GiB
9 | * Public access should be enabled
10 | * Port should be set to 3306
11 | * DB name: 'db'
12 | * Backup retention: 10 days
13 |
14 | 2. Create read replica for the database you've created
15 |
16 | ### Solution
17 |
18 | #### Console
19 |
20 | 1. Go to RDS service
21 | 2. Click on "Databases" in the left side menu and click on the "Create database" button
22 | 3. Choose "standard create"
23 | 4. Choose "MySQL" and the recommended version
24 | 5. Choose "Production" template
25 | 6. Specify DB instance identifier
26 | 7. Specify Credentials (master username and password)
27 | 8. Choose DB instance type: Burstable classes, db.t2.micro
28 | 9. Choose "gp2" as storage
29 | 10. Enable storage autoscalling: maximum storage threshold of 500 GiB
30 | 11. Choose "Do not create a standby instance"
31 | 12. Choose a default VPC and subnet
32 | 12. Check "Yes" for public access
33 | 13. Choose "No preference" for AZ
34 | 14. Database port should be 3306
35 | 15. For authentication, choose "Password and IAM database authentication"
36 | 16. Set initial database name as "db"
37 | 17. Increase backup retention period to 10 days
38 | 18. Click on "Create database" button
39 |
40 | 1. Go to the database under "Databases" in the left side menu
41 | 2. Click on "Actions" -> Create read replica
42 | 3. Click on "Create read replica"
43 |
--------------------------------------------------------------------------------
/topics/aws/exercises/network_load_balancer/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS ELB - Network Load Balancer
2 |
3 | ### Requirements
4 |
5 | Two running EC2 instances
6 |
7 | ### Objectives
8 |
9 | 1. Create a network load balancer
10 | 1. healthy threshold: 3
11 | 2. unhealthy threshold: 3
12 | 3. interval: 10 seconds
13 | 4. Listener should be using TCP protocol on port 80
14 |
--------------------------------------------------------------------------------
/topics/aws/exercises/network_load_balancer/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS ELB - Network Load Balancer
2 |
3 | ### Requirements
4 |
5 | Two running EC2 instances
6 |
7 | ### Objectives
8 |
9 | 1. Create a network load balancer
10 | 1. healthy threshold: 3
11 | 2. unhealthy threshold: 3
12 | 3. interval: 10 seconds
13 | 4. Listener should be using TCP protocol on port 80
14 |
15 | ### Solution
16 |
17 | #### Console
18 |
19 | 1. Go to EC2 service
20 | 2. Click in the left side menu on "Load balancers" under "Load balancing"
21 | 3. Click on "Create load balancer"
22 | 4. Choose "Network Load Balancer"
23 | 5. Insert a name for the LB
24 | 6. Choose AZs where you want the LB to operate
25 | 7. Choose a security group
26 | 8. Under "Listeners and routing" click on "Create target group" and choose "Instances"
27 | 1. Provide a name for the target group
28 | 2. Set healthy threshold to 3
29 | 3. Set unhealthy threshold to 3
30 | 4. Set interval to 10 seconds
31 | 5. Set protocol to TCP and port to 80
32 | 6. Click on "Next" and choose two instances you have
33 | 7. Click on "Create target group"
34 | 9. Refresh target groups and choose the one you've just created
35 | 10. Click on "Create load balancer" and wait for it to be provisioned
36 |
--------------------------------------------------------------------------------
/topics/aws/exercises/new_vpc/exercise.md:
--------------------------------------------------------------------------------
1 | # My First VPC
2 |
3 | ## Objectives
4 |
5 | 1. Create a new VPC
6 | 1. It should have a CIDR that supports using at least 60,000 hosts
7 | 2. It should be named "exercise-vpc"
8 |
9 | ## Solution
10 |
11 | Click [here](solution.md) to view the solution
--------------------------------------------------------------------------------
/topics/aws/exercises/new_vpc/main.tf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/topics/aws/exercises/new_vpc/main.tf
--------------------------------------------------------------------------------
/topics/aws/exercises/new_vpc/pulumi/__main__.py:
--------------------------------------------------------------------------------
1 | import pulumi
2 | import pulumi_awsx as awsx
3 |
4 | vpc = awsx.ec2.Vpc("exercise-vpc", cidr_block="10.0.0.0/16")
5 |
6 | pulumi.export("vpc_id", vpc.vpc_id)
7 | pulumi.export("publicSubnetIds", vpc.public_subnet_ids)
8 | pulumi.export("privateSubnetIds", vpc.private_subnet_ids)
9 |
10 | # Run 'pulumi up' to create it
11 |
--------------------------------------------------------------------------------
/topics/aws/exercises/new_vpc/solution.md:
--------------------------------------------------------------------------------
1 | # My First VPC
2 |
3 | ## Objectives
4 |
5 | 1. Create a new VPC
6 | 1. It should have a CIDR that supports using at least 60,000 hosts
7 | 2. It should be named "exercise-vpc"
8 |
9 | ## Solution
10 |
11 | ### Console
12 |
13 | 1. Under "Virtual Private Cloud" click on "Your VPCs"
14 | 2. Click on "Create VPC"
15 | 3. Insert a name - "exercise-vpc"
16 | 4. Insert IPv4 CIDR block: 10.0.0.0/16
17 | 5. Keep "Tenancy" at Default
18 | 6. Click on "Create VPC"
19 |
20 | ### Terraform
21 |
22 | Click [here](terraform/main.tf) to view the solution
23 |
24 | ### Pulumi - Python
25 |
26 | Click [here](pulumi/__main__.py) to view the solution
27 |
28 | ### Verify Solution
29 |
30 | To verify you've create the VPC, you can run: `aws ec2 describe-vpcs -filters Name=tag:Name,Values=exercise-vpc`
--------------------------------------------------------------------------------
/topics/aws/exercises/new_vpc/terraform/main.tf:
--------------------------------------------------------------------------------
1 | resource "aws_vpc" "exercise-vpc" {
2 | cidr_block = "10.0.0.0/16"
3 |
4 | tags = {
5 | Name = "exercise-vpc"
6 | }
7 | }
8 |
9 | output "vpc-id" {
10 | value = aws_vpc.exercise-vpc.id
11 | }
--------------------------------------------------------------------------------
/topics/aws/exercises/no_application/exercise.md:
--------------------------------------------------------------------------------
1 | ## No Application :'(
2 |
3 | ### Objectives
4 |
5 | Explain what might be possible reasons for the following issues:
6 |
7 | 1. Getting "time out" when trying to reach an application running on EC2 instance
8 | 2. Getting "connection refused" error
9 |
--------------------------------------------------------------------------------
/topics/aws/exercises/no_application/solution.md:
--------------------------------------------------------------------------------
1 | ## No Application :'(
2 |
3 | ### Objectives
4 |
5 | Explain what might be possible reasons for the following issues:
6 |
7 | 1. Getting "time out" when trying to reach an application running on EC2 instance
8 | 2. Getting "connection refused" error
9 |
10 | ### Solution
11 |
12 | 1. 'Time out' Can be due to one of the following:
13 |
14 | * Security group doesn't allow access
15 | * No host (yes, I know. Not the first thing to check and yet...)
16 | * Operating system firewall blocking traffic
17 |
18 | 2. 'Connection refused' can happen due to one of the following:
19 |
20 | * Application didn't launch properly or has some issue (doesn't listens on the designated port)
21 | * Firewall replied with a reject instead of dropping the packets
22 |
--------------------------------------------------------------------------------
/topics/aws/exercises/password_policy_and_mfa/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS IAM - Password Policy & MFA
2 |
3 | Note: DON'T perform this exercise unless you understand what you are doing and what is the outcome of applying these changes to your account
4 |
5 | ### Objectives
6 |
7 | 1. Create password policy with the following settings:
8 | 1. At least minimum 8 characters
9 | 2. At least one number
10 | 3. Prevent password reuse
11 |
12 | 2. Then enable MFA for the account.
13 |
--------------------------------------------------------------------------------
/topics/aws/exercises/password_policy_and_mfa/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS IAM - Password Policy & MFA
2 |
3 | Note: DON'T perform this exercise unless you understand what you are doing and what is the outcome of applying these changes to your account
4 |
5 | ### Objectives
6 |
7 | 1. Create password policy with the following settings:
8 | 1. At least minimum 8 characters
9 | 2. At least one number
10 | 3. Prevent password reuse
11 |
12 | 2. Then enable MFA for the account.
13 |
14 | ### Solution
15 |
16 | Password Policy:
17 |
18 | 1. Go to IAM service in AWS
19 | 2. Click on "Account settings" under "Access management"
20 | 3. Click on "Change password policy"
21 | 1. Check "Enforce minimum password length" and set it to 8 characters
22 | 1. Check "Require at least one number"
23 | 1. Check "Prevent password reuse"
24 | 4. Click on "Save changes"
25 |
26 | MFA:
27 |
28 | 1. Click on the account name
29 | 2. Click on "My Security Credentials"
30 | 3. Expand "Multi-factor authentication (MFA)" and click on "Activate MFA"
31 | 4. Choose one of the devices
32 | 5. Follow the instructions to set it up and click on "Assign MFA"
33 |
--------------------------------------------------------------------------------
/topics/aws/exercises/placement_groups/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS EC2 - Placement Groups
2 |
3 | ### Objectives
4 |
5 | A. Create a placement group. It should be one with a low latency network. Make sure to launch an instance as part of this placement group.
6 | B. Create another placement group. This time high availability is a priority
7 |
--------------------------------------------------------------------------------
/topics/aws/exercises/placement_groups/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS EC2 - Placement Groups
2 |
3 | ### Objectives
4 |
5 | A. Create a placement group. It should be one with a low latency network. Make sure to launch an instance as part of this placement group.
6 | B. Create another placement group. This time high availability is a priority
7 |
8 | ### Solution
9 |
10 | A.
11 | 1. Go to EC2 service
12 | 2. Click on "Placement Groups" under "Network & Security"
13 | 3. Click on "Create placement group"
14 | 4. Give it a name and choose the "Cluster" placement strategy because the requirement is low latency network
15 | 5. Click on "Create group"
16 | 6. Go to "Instances" and click on "Launch an instance". Choose any properties you would like, just make sure to check "Add instance to placement group" and choose the placement group you've created
17 |
18 | B.
19 | 1. Go to EC2 service
20 | 2. Click on "Placement Groups" under "Network & Security"
21 | 3. Click on "Create placement group"
22 | 4. Give it a name and choose the "Spread" placement strategy because the requirement is high availability as top priority
23 | 5. Click on "Create group"
24 |
--------------------------------------------------------------------------------
/topics/aws/exercises/register_domain/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS Route 53 - Register Domain
2 |
3 | ### Objectives
4 |
5 | Note: registering domain costs money. Don't do this exercise, unless you understand that you are going to register a domain and it's going to cost you money.
6 |
7 | 1. Register your own custom domain using AWS Route 53
8 | 2. What is the type of your domain?
9 | 3. How many records your domain has?
10 |
--------------------------------------------------------------------------------
/topics/aws/exercises/register_domain/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS Route 53 - Register Domain
2 |
3 | ### Objectives
4 |
5 | Note: registering domain costs money. Don't do this exercise, unless you understand that you are going to register a domain and it's going to cost you money.
6 |
7 | 1. Register your own custom domain using AWS Route 53
8 | 2. What is the type of your domain?
9 | 3. How many records your domain has?
10 |
11 | ### Solution
12 |
13 | 1. Go to Route 53 service page
14 | 2. Click in the menu on "Registered Domains" under "Domains"
15 | 3. Click on "Register Domain"
16 | 4. Insert your domain
17 | 5. Check if it's available. If it is, add it to the cart
18 |
19 | Note: registering domain costs money. Don't click on "continue", unless you understand that you are going to register a domain and it's going to cost you money.
20 |
21 | 6. Click on "Continue" and fill in your contact information
22 | 7. Choose if you want to renew it in the future automatically. Accept the terms and click on "Complete Order"
23 | 8. Go to hosted zones and you should see there your newly registered domain
24 |
25 | 1. The domain type is "Public"
26 |
27 | 1. The domain has 2 DNS records: NS and SOA
28 |
--------------------------------------------------------------------------------
/topics/aws/exercises/route_53_failover/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS Route 53 - Failover
2 |
3 | ### Requirements
4 |
5 | A running EC2 web instance with an health check defined for it in Route 53
6 |
7 | ### Objectives
8 |
9 | 1. Create a failover record that will failover to another record if an health check isn't passing
10 | 1. Make sure TTL is 30
11 | 2. Associate the failover record with the health check you have
12 |
--------------------------------------------------------------------------------
/topics/aws/exercises/route_53_failover/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS Route 53 - Failover
2 |
3 | ### Requirements
4 |
5 | A running EC2 web instance with an health check defined for it in Route 53
6 |
7 | ### Objectives
8 |
9 | 1. Create a failover record that will failover to another record if an health check isn't passing
10 | 1. Make sure TTL is 30
11 | 2. Associate the failover record with the health check you have
12 |
13 | ### Solution
14 |
15 | #### Console
16 |
17 | 1. Go to Route 53 service
18 | 2. Click on "Hosted Zones" in the left-side menu
19 | 3. Click on your hosted zone
20 | 4. Click on "Created record"
21 | 5. Insert "failover" in record name and set record type to A
22 | 6. Insert the IP of your instance
23 | 7. Set the routing policy to failover
24 | 8. Set TTL to 30
25 | 9. Associate with an health check
26 | 10. Add another record with the same properties as the previous one
27 | 11. Click on "Create records"
28 | 12. Go to your EC2 instance and edit its security group to remove the HTTP rules
29 | 13. Use your web app and if you print the hotsname of your instance then you will notice, a failover was performed and a different EC2 instance is used
30 |
--------------------------------------------------------------------------------
/topics/aws/exercises/s3/new_bucket/exercise.md:
--------------------------------------------------------------------------------
1 | # Create buckets
2 |
3 | ## Objectives
4 |
5 | 1. Create the following buckets:
6 | 1. Private bucket
7 | 1. eu-west-2 region
8 | 2. Upload a single file to the bucket. Any file.
9 | 2. Public bucket
10 | 1. eu-west-1 region
11 | 2. Versioning should be enabled
12 |
13 | ## Solution
14 |
15 | Click [here](solution.md) to view the solution
--------------------------------------------------------------------------------
/topics/aws/exercises/s3/new_bucket/pulumi/__main__.py:
--------------------------------------------------------------------------------
1 | import pulumi_aws as aws
2 |
3 | # Private Bucket
4 | private_bucket = aws.s3.Bucket("my-first-private-bucket",
5 | acl="private",
6 | tags={
7 | "Environment": "Exercise",
8 | "Name": "My First Private Bucket"},
9 | region="eu-west-2"
10 | )
11 |
12 | # Bucket Object
13 | aws.s3.BucketObject("bucketObject",
14 | key="some_object_key",
15 | bucket=private_bucket.id,
16 | content="object content")
17 |
18 | # Public Bucket
19 | aws.s3.Bucket("my-first-public-bucket",
20 | acl="private",
21 | tags={
22 | "Environment": "Exercise",
23 | "Name": "My First Public Bucket"},
24 | region="eu-west-1",
25 | versioning=aws.s3.BucketVersioningArgs(enabled=True))
26 |
--------------------------------------------------------------------------------
/topics/aws/exercises/s3/new_bucket/solution.md:
--------------------------------------------------------------------------------
1 | # Create buckets
2 |
3 | ## Objectives
4 |
5 | 1. Create the following buckets:
6 | 1. Private bucket
7 | 1. eu-west-2 region
8 | 2. Upload a single file to the bucket. Any file.
9 | 2. Public bucket
10 | 1. eu-west-1 region
11 | 2. Versioning should be enabled
12 |
13 | ## Solution
14 |
15 | ### Console
16 |
17 | For the first bucket:
18 |
19 | 1. Go to S3 service in the AWS console. If not in buckets page, click on "buckets" in the left side menu
20 | 2. Click on "Create bucket"
21 | 3. Give a globally unique name for your bucket
22 | 4. Choose the region "eu-west-2"
23 | 5. Click on "Create bucket"
24 | 6. Click on the bucket name
25 | 7. Under "objects" click on "Upload" -> "Add files" -> Choose file to upload -> Click on "Upload"
26 |
27 | For the second bucket:
28 |
29 | 1. Go to S3 service in the AWS console. If not in buckets page, click on "buckets" in the left side menu
30 | 2. Click on "Create bucket"
31 | 3. Give a globally unique name for your bucket
32 | 4. Choose the region "eu-west-1"
33 | 5. Make sure to uncheck the box for "Private bucket" to make it public
34 | 6. Make sure to check the enable box for "Bucket Versioning"
35 | 7. Click on "Create bucket"
36 |
37 | ### Terraform
38 |
39 | Click [here](terraform/main.tf) to view the solution
40 |
41 | ### Pulumi - Python
42 |
43 | Click [here](pulumi/__main__.py) to view the solution
--------------------------------------------------------------------------------
/topics/aws/exercises/s3/new_bucket/terraform/main.tf:
--------------------------------------------------------------------------------
1 | resource "aws_s3_bucket" "private_bucket" {
2 | bucket = "my-first-private-bucket"
3 | region = "eu-west-2"
4 | acl = "private"
5 |
6 | tags = {
7 | Name = "My First Private Bucket"
8 | Environment = "Exercise"
9 | }
10 | }
11 |
12 | resource "aws_s3_bucket_acl" "private_bucket_acl" {
13 | bucket = aws_s3_bucket.private_bucket.id
14 | acl = "private"
15 | }
16 |
17 | resource "aws_s3_bucket" "public_bucket" {
18 | bucket = "my-first-public-bucket"
19 | region = "eu-west-1"
20 |
21 | tags = {
22 | Name = "My First Public Bucket"
23 | Environment = "Exercise"
24 | }
25 |
26 | versioning {
27 | enabled = true
28 | }
29 | }
30 |
31 | resource "aws_s3_bucket_acl" "public_bucket_acl" {
32 | bucket = aws_s3_bucket.public_bucket.id
33 | acl = "public-read"
34 | }
35 |
36 | resource "aws_s3_bucket_object" "bucket_object" {
37 | bucket = "my-first-private-bucket"
38 | key = "some_object_key"
39 | content = "object content"
40 | }
--------------------------------------------------------------------------------
/topics/aws/exercises/sample_cdk/exercise.md:
--------------------------------------------------------------------------------
1 | ### Set up a CDK Project
2 |
3 | Initialize a CDK project and set up files required to build a CDK project.
4 |
5 | ## Solution
6 |
7 | Click [here](solution.md) to view the solution.
--------------------------------------------------------------------------------
/topics/aws/exercises/security_groups/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS EC2 - Security Groups
2 |
3 | ### Requirements
4 |
5 | For this exercise you'll need:
6 |
7 | 1. EC2 instance with web application
8 | 2. Security group inbound rules that allow HTTP traffic
9 |
10 | ### Objectives
11 |
12 | 1. List the security groups you have in your account, in the region you are using
13 | 2. Remove the HTTP inbound traffic rule
14 | 3. Can you still access the application? What do you see/get?
15 | 4. Add back the rule
16 | 5. Can you access the application now?
17 |
18 | ## Solution
19 |
20 | Click [here to view to solution](solution.md)
--------------------------------------------------------------------------------
/topics/aws/exercises/security_groups/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS EC2 - Security Groups
2 |
3 | ### Requirements
4 |
5 | For this exercise you'll need:
6 |
7 | 1. EC2 instance with web application
8 | 2. Security group inbound rules that allow HTTP traffic
9 |
10 | ### Objectives
11 |
12 | 1. List the security groups you have in your account, in the region you are using
13 | 2. Remove the HTTP inbound traffic rule
14 | 3. Can you still access the application? What do you see/get?
15 | 4. Add back the rule
16 | 5. Can you access the application now?
17 |
18 | ### Solution
19 |
20 | #### Console
21 |
22 | 1. Go to EC2 service - > Click on "Security Groups" under "Network & Security"
23 | You should see at least one security group. One of them is called "default"
24 | 2. Click on the security group with HTTP rules and click on "Edit inbound rules".
25 | Remove the HTTP related rules and click on "Save rules"
26 | 3. No. There is a time out because we removed the rule allowing HTTP traffic.
27 | 4. Click on the security group -> edit inbound rules and add the following rule:
28 | * Type: HTTP
29 | * Port range: 80
30 | * Source: Anywhere -> 0.0.0.0/0
31 | 5. yes
32 |
33 | #### CLI
34 |
35 | 1. `aws ec2 describe-security-groups` -> by default, there is one security group called "default", in a new account
36 | 2. Remove the rule:
37 |
38 | ```
39 | aws ec2 revoke-security-group-ingress \
40 | --group-name someHTTPSecurityGroup
41 | --protocol tcp \
42 | --port 80 \
43 | --cidr 0.0.0.0/0
44 | ```
45 | 3. No. There is a time out because we removed the rule allowing HTTP traffic.
46 | 4. Add the rule we remove:
47 |
48 | ```
49 | aws ec2 authorize-security-group-ingress \
50 | --group-name someHTTPSecurityGroup
51 | --protocol tcp \
52 | --port 80 \
53 | --cidr 0.0.0.0/0
54 | ```
55 | 5. yes
56 |
--------------------------------------------------------------------------------
/topics/aws/exercises/snapshots/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS EC2 - EBS Snapshots
2 |
3 | ### Requirements
4 |
5 | EBS Volume
6 |
7 | ### Objectives
8 |
9 | A. Create a snapshot of an EBS volume
10 | B. Verify the snapshot was created
11 | C. Move the data to another region
12 | D. Create a volume out of it in a different AZ
13 |
14 | ## Solution
15 |
16 | Click [here to view to solution](solution.md)
--------------------------------------------------------------------------------
/topics/aws/exercises/snapshots/solution.md:
--------------------------------------------------------------------------------
1 | ## AWS EC2 - EBS Snapshots
2 |
3 | ### Requirements
4 |
5 | EBS Volume
6 |
7 | ### Objectives
8 |
9 | A. Create a snapshot of an EBS volume
10 | B. Verify the snapshot was created
11 | C. Move the data to another region
12 | D. Create a volume out of it in a different AZ
13 |
14 | ### Solution
15 |
16 | A.
17 | 1. Go to EC2 service
18 | 2. Click on "Volumes" under "Elastic Block Store"
19 | 3. Right click on the chosen volume -> Create snapshot
20 | 4. Insert a description and click on "Create Snapshot"
21 |
22 | B.
23 | 1. Click on "Snapshots" under "Elastic Block Store"
24 | 2. You should see the snapshot you've created
25 |
26 | C.
27 | 1. Select the snapshot and click on Actions -> Copy
28 | 2. Select a region to where the snapshot will be copied
29 |
30 | D.
31 | 1. Select the snapshot and click on Actions -> Create volume
32 | 2. Choose a different AZ
33 | 3. Click on "Create Volume"
34 |
--------------------------------------------------------------------------------
/topics/aws/exercises/subnets/exercise.md:
--------------------------------------------------------------------------------
1 | ## AWS VPC - Subnets
2 |
3 | ### Requirements
4 |
5 | 1. Single newly created VPC
6 | 2. Region with more than two availability zones
7 |
8 | ### Objectives
9 |
10 | 1. Create a subnet in your newly created VPC
11 | 1. CIDR: 10.0.0.0/24
12 | 2. Name: NewSubnet1
13 | 2. Create additional subnet
14 | 1. CIDR: 10.0.1.0/24
15 | 2. Name: NewSubnet2
16 | 3. Different AZ compared to previous subnet
17 | 3. Create additional subnet
18 | 1. CIDR: 10.0.2.0/24
19 | 2. Name: NewSubnet3
20 | 3. Different AZ compared to previous subnets
21 |
22 | ## Solution
23 |
24 | Click [here to view to solution](solution.md)
--------------------------------------------------------------------------------
/topics/aws/exercises/subnets/pulumi/__main__.py:
--------------------------------------------------------------------------------
1 | import pulumi_aws as aws
2 |
3 | availableZones = aws.get_availability_zones(state="available")
4 |
5 | aws.ec2.Subnet("NewSubnet1",
6 | vpc_id=aws.vpc["main"]["id"],
7 | cidr_block="10.0.0.0/24",
8 | availability_zone=availableZones.names[0],
9 | tags={"Name": "NewSubnet1"}
10 | )
11 |
12 | aws.ec2.Subnet("NewSubnet2",
13 | vpc_id=aws.vpc["main"]["id"],
14 | cidr_block="10.0.1.0/24",
15 | availability_zone=availableZones.names[1],
16 | tags={"Name": "NewSubnet2"}
17 | )
18 |
19 | aws.ec2.Subnet("NewSubnet3",
20 | vpc_id=aws.vpc["main"]["id"],
21 | cidr_block="10.0.2.0/24",
22 | availability_zone=availableZones.names[2],
23 | tags={"Name": "NewSubnet3"}
24 | )
25 |
26 | # Run "pulumi up"
27 |
--------------------------------------------------------------------------------
/topics/aws/exercises/subnets/solution.md:
--------------------------------------------------------------------------------
1 | # AWS VPC - Subnets
2 |
3 | ## Requirements
4 |
5 | 1. Single newly created VPC
6 | 2. Region with more than two availability zones
7 |
8 | ## Objectives
9 |
10 | 1. Create a subnet in your newly created VPC
11 | 1. CIDR: 10.0.0.0/24
12 | 1. Name: NewSubnet1
13 | 2. Create additional subnet
14 | 1. CIDR: 10.0.1.0/24
15 | 2. Name: NewSubnet2
16 | 3. Different AZ compared to previous subnet
17 | 3. Create additional subnet
18 | 4. CIDR: 10.0.2.0/24
19 | 5. Name: NewSubnet3
20 | 6. Different AZ compared to previous subnets
21 |
22 | ## Solution
23 |
24 | ### Console
25 |
26 | 1. Click on "Subnets" under "Virtual Private Cloud"
27 | 2. Make sure you filter by your newly created VPC (to not see the subnets in all other VPCs). You can do this in the left side menu
28 | 3. Click on "Create subnet"
29 | 4. Choose your newly created VPC
30 | 5. Set the subnet name to "NewSubnet1"
31 | 6. Choose AZ
32 | 7. Set CIDR to 10.0.0.0/24
33 | 8. Click on "Add new subnet"
34 | 9. Set the subnet name to "NewSubnet2"
35 | 10. Choose a different AZ
36 | 11. Set CIDR to 10.0.1.0/24
37 | 12. Click on "Add new subnet"
38 | 13. Set the subnet name to "NewSubnet3"
39 | 14. Choose a different AZ
40 | 15. Set CIDR to 10.0.2.0/24
41 |
42 | ### Terraform
43 |
44 | Click [here](terraform/main.tf) to view the solution
45 |
46 | ### Pulumi - Python
47 |
48 | Click [here](pulumi/__main__.py) to view the solution
--------------------------------------------------------------------------------
/topics/aws/exercises/subnets/terraform/main.tf:
--------------------------------------------------------------------------------
1 | # Variables
2 |
3 | variable "vpc_id" {
4 | type = string
5 | }
6 |
7 | # AWS Subnets
8 |
9 | resource "aws_subnet" "NewSubnet1" {
10 | cidr_block = "10.0.0.0/24"
11 | vpc_id = var.vpc_id
12 | availability_zone = data.aws_availability_zones.all.names[0]
13 | tags = {
14 | Purpose: exercise
15 | Name: "NewSubnet1"
16 | }
17 | }
18 |
19 | resource "aws_subnet" "NewSubnet2" {
20 | cidr_block = "10.0.1.0/24"
21 | vpc_id = var.vpc_id
22 | availability_zone = data.aws_availability_zones.all.names[1]
23 | tags = {
24 | Purpose: exercise
25 | Name: "NewSubnet2"
26 | }
27 | }
28 |
29 | resource "aws_subnet" "NewSubnet3" {
30 | cidr_block = "10.0.2.0/24"
31 | vpc_id = var.vpc_id
32 | availability_zone = data.aws_availability_zones.all.names[2]
33 | tags = {
34 | Purpose: exercise
35 | Name: "NewSubnet3"
36 | }
37 | }
38 |
39 | # Outputs
40 |
41 | output "NewSubnet1-id" {
42 | value = aws_subnet.NewSubnet1.id
43 | }
44 | output "NewSubnet2-id" {
45 | value = aws_subnet.NewSubnet2.id
46 | }
47 | output "NewSubnet3-id" {
48 | value = aws_subnet.NewSubnet3.id
49 | }
--------------------------------------------------------------------------------
/topics/aws/exercises/url_function/exercise.md:
--------------------------------------------------------------------------------
1 | ## URL Function
2 |
3 | Create a basic AWS Lambda function that will be triggered when you enter a URL in the browser
4 |
5 | ## Solution
6 |
7 | Click [here to view to solution](solution.md)
--------------------------------------------------------------------------------
/topics/aws/exercises/web_app_lambda_dynamodb/exercise.md:
--------------------------------------------------------------------------------
1 | # Web App with DB
2 |
3 | ## Objectives
4 |
5 | Implement the following architecture:
6 |
7 |
8 |
9 | ## Solution
10 |
11 | Click [here](solution.md) to view the solution
--------------------------------------------------------------------------------
/topics/aws/exercises/web_app_lambda_dynamodb/terraform/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = "us-west-1"
3 | }
4 |
5 | resource "aws_dynamodb_table" "users" {
6 | name = "users"
7 | hash_key = "id"
8 |
9 | attribute {
10 | name = "id"
11 | type = "S"
12 | }
13 |
14 | attribute {
15 | name = "login"
16 | type = "S"
17 | }
18 |
19 | global_secondary_index {
20 | hash_key =
21 |
22 | }
23 | }
24 |
--------------------------------------------------------------------------------
/topics/aws/images/lambda/aws_lambda_direct_access.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/topics/aws/images/lambda/aws_lambda_direct_access.png
--------------------------------------------------------------------------------
/topics/cicd/ci_for_open_source_project.md:
--------------------------------------------------------------------------------
1 | ## CI for Open Source Project
2 |
3 | 1. Choose an open source project from Github and fork it
4 | 2. Create a CI pipeline/workflow for the project you forked
5 | 3. The CI pipeline/workflow will include anything that is relevant to the project you forked. For example:
6 | * If it's a Python project, you will run PEP8
7 | * If the project has unit tests directory, you will run these unit tests as part of the CI
8 | 4. In a separate file, describe what is running as part of the CI and why you chose to include it. You can also describe any thoughts, dilemmas, challenge you had
9 |
10 | ### Bonus
11 |
12 | Containerize the app of the project you forked using any container engine you would like (e.g. Docker, Podman).
13 | Once you successfully ran the application in a container, submit the Dockerfile to the original project (but be prepared that the maintainer might not need/want that).
14 |
15 | ### Suggestions for Projects
16 |
17 | The following is a list of projects without CI (at least at the moment):
18 |
19 | Note: I wrote a script to find these (except the first project on the list, of course) based on some parameters in case you wonder why these projects specifically are listed.
20 |
21 | * [This one](https://github.com/bregman-arie/devops-exercises) - We don't have CI! help! :)
22 | * [image retrieval platform](https://github.com/skx6/image_retrieval_platform)
23 | * [FollowSpot](https://github.com/jenbrissman/FollowSpot)
24 | * [Pyrin](https://github.com/mononobi/pyrin)
25 | * [food-detection-yolov5](https://github.com/lannguyen0910/food-detection-yolov5)
26 | * [Lifely](https://github.com/sagnik1511/Lifely)
27 |
--------------------------------------------------------------------------------
/topics/cicd/deploy_to_kubernetes.md:
--------------------------------------------------------------------------------
1 | ## Deploy to Kubernetes
2 |
3 | * Write a pipeline that will deploy an "hello world" web app to Kubernete
4 | * The CI/CD system (where the pipeline resides) and the Kubernetes cluster should be on separate systems
5 | * The web app should be accessible remotely and only with HTTPS
6 |
--------------------------------------------------------------------------------
/topics/cicd/remove_builds.md:
--------------------------------------------------------------------------------
1 | ### Jenkins - Remove Jobs
2 |
3 | #### Objective
4 |
5 | Learn how to write a Jenkins script that interacts with builds by removing builds older than X days.
6 |
7 | #### Instructions
8 |
9 | 1. Pick up (or create) a job which has builds older than X days
10 | 2. Write a script to remove only the builds that are older than X days
11 |
12 | #### Hints
13 |
14 | X can be anything. For example, remove builds that are older than 3 days. Just make sure that you don't simply remove all the builds (since that's different from the objective).
15 |
--------------------------------------------------------------------------------
/topics/cicd/remove_jobs.md:
--------------------------------------------------------------------------------
1 | ### Jenkins - Remove Jobs
2 |
3 | #### Objective
4 |
5 | Learn how to write a Jenkins script to remove Jenkins jobs
6 |
7 | #### Instructions
8 |
9 | 1. Create three jobs called: test-job, test2-job and prod-job
10 | 2. Write a script to remove all the jobs that include the string "test"
11 |
--------------------------------------------------------------------------------
/topics/cicd/solutions/deploy_to_kubernetes/Jenkinsfile:
--------------------------------------------------------------------------------
1 | pipeline {
2 |
3 | agent any
4 |
5 | stages {
6 |
7 | stage('Checkout Source') {
8 | steps {
9 | git url:'https://github.com//.git',
10 | // credentialsId: 'creds_github',
11 | branch:'master'
12 | }
13 | }
14 |
15 | stage("Build image") {
16 | steps {
17 | script {
18 | myapp = docker.build("/helloworld:${env.BUILD_ID}")
19 | }
20 | }
21 | }
22 |
23 | stage("Push image") {
24 | steps {
25 | script {
26 | docker.withRegistry('https://registry.hub.docker.com', 'dockerhub') {
27 | myapp.push("latest")
28 | myapp.push("${env.BUILD_ID}")
29 | }
30 | }
31 | }
32 | }
33 |
34 |
35 | stage('Deploy App') {
36 | steps {
37 | script {
38 | sh 'ansible-playbook deploy.yml'
39 | }
40 | }
41 | }
42 |
43 | }
44 |
45 | }
46 |
--------------------------------------------------------------------------------
/topics/cicd/solutions/deploy_to_kubernetes/README.md:
--------------------------------------------------------------------------------
1 | ## Deploy to Kubernetes
2 |
3 | Note: this exercise can be solved in various ways. The solution described here is just one possible way.
4 |
5 | 1. Install Jenkins on one system (follow up the standard Jenkins installation procedure)
6 | 2. Deploy Kubernetes on a remote host (minikube can be an easy way to achieve it)
7 | 3. Create a simple web app or [page](html)
8 |
9 | 4. Create Kubernetes [resoruces](helloworld.yml) - Deployment, Service and Ingress (for HTTPS access)
10 | 5. Create an [Ansible inventory](inventory) and insert the address of the Kubernetes cluster
11 | 6. Write [Ansible playbook](deploy.yml) to deploy the Kubernetes resources and also generate
12 | 7. Create a [pipeline](Jenkinsfile)
13 |
14 | 8. Run the pipeline :)
15 | 9. Try to access the web app remotely
16 |
--------------------------------------------------------------------------------
/topics/cicd/solutions/deploy_to_kubernetes/deploy.yml:
--------------------------------------------------------------------------------
1 | - name: Apply Kubernetes YAMLs
2 | hosts: kubernetes
3 | tasks:
4 | - name: Ensure SSL related directories exist
5 | file:
6 | path: "{{ item }}"
7 | state: directory
8 | loop:
9 | - "/etc/ssl/crt"
10 | - "/etc/ssl/csr"
11 | - "/etc/ssl/private"
12 |
13 | - name: Generate an OpenSSL private key.
14 | openssl_privatekey:
15 | path: /etc/ssl/private/privkey.pem
16 |
17 | - name: generate openssl certficate signing requests
18 | openssl_csr:
19 | path: /etc/ssl/csr/hello-world.app.csr
20 | privatekey_path: /etc/ssl/private/privkey.pem
21 | common_name: hello-world.app
22 |
23 | - name: Generate a Self Signed OpenSSL certificate
24 | openssl_certificate:
25 | path: /etc/ssl/crt/hello-world.app.crt
26 | privatekey_path: /etc/ssl/private/privkey.pem
27 | csr_path: /etc/ssl/csr/hello-world.app.csr
28 | provider: selfsigned
29 |
30 | - name: Create k8s secret
31 | command: "kubectl create secret tls tls-secret --cert=/etc/ssl/crt/hello-world.app.crt --key=/etc/ssl/private/privkey.pem"
32 | register: result
33 | failed_when:
34 | - result.rc == 2
35 |
36 | - name: Deploy web app
37 | k8s:
38 | state: present
39 | definition: "{{ lookup('file', './helloworld.yml') }}"
40 | kubeconfig: '/home/abregman/.kube/config'
41 | namespace: 'default'
42 | wait: true
43 |
--------------------------------------------------------------------------------
/topics/cicd/solutions/deploy_to_kubernetes/helloworld.yml:
--------------------------------------------------------------------------------
1 | ---
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | name: hello-blue-whale
6 | spec:
7 | replicas: 3
8 | selector:
9 | matchLabels:
10 | app: hello-world-app
11 | version: blue
12 | template:
13 | metadata:
14 | name: hello-blue-whale-pod
15 | labels:
16 | app: hello-world-app
17 | version: blue
18 | spec:
19 | containers:
20 | - name: hello-whale-container
21 | image: abregman2/helloworld:latest
22 | imagePullPolicy: Always
23 | ports:
24 | - containerPort: 80
25 | - containerPort: 443
26 | ---
27 | apiVersion: v1
28 | kind: Service
29 | metadata:
30 | name: hello-world
31 | labels:
32 | app: hello-world-app
33 | spec:
34 | ports:
35 | - port: 80
36 | targetPort: 80
37 | protocol: TCP
38 | name: http
39 | selector:
40 | app: hello-world-app
41 | ---
42 | apiVersion: networking.k8s.io/v1
43 | kind: Ingress
44 | metadata:
45 | name: example-ingress
46 | annotations:
47 | cert-manager.io/cluster-issuer: selfsigned-issuer
48 | nginx.ingress.kubernetes.io/rewrite-target: /
49 | kubernetes.io/ingress.class: nginx
50 | spec:
51 | tls:
52 | - hosts:
53 | - hello-world.app
54 | secretName: shhh
55 | rules:
56 | - host: hello-world.app
57 | http:
58 | paths:
59 | - path: /
60 | pathType: Prefix
61 | backend:
62 | service:
63 | name: hello-world
64 | port:
65 | number: 80
66 |
--------------------------------------------------------------------------------
/topics/cicd/solutions/deploy_to_kubernetes/html/images/favicon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/topics/cicd/solutions/deploy_to_kubernetes/html/images/favicon.png
--------------------------------------------------------------------------------
/topics/cicd/solutions/deploy_to_kubernetes/html/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
7 |
8 | Hello World :)
9 |
10 |
11 |
12 |
14 |
15 |
16 |
18 |
19 |
20 |
22 |
23 |
24 |
25 |
27 |
28 |
29 |
30 |
31 |
32 |
34 |
35 |
36 |
37 |
Hello World :)
38 |
39 |
40 |
41 |
42 |
44 |
45 |
46 |
--------------------------------------------------------------------------------
/topics/cicd/solutions/deploy_to_kubernetes/inventory:
--------------------------------------------------------------------------------
1 | [kubernetes]
2 | x.x.x.x
3 |
--------------------------------------------------------------------------------
/topics/cicd/solutions/remove_builds_solution.groovy:
--------------------------------------------------------------------------------
1 | def removeOldBuilds(buildDirectory, days = 14) {
2 |
3 | def wp = new File("${buildDirectory}")
4 | def currentTime = new Date()
5 | def backTime = currentTime - days
6 |
7 | wp.list().each { fileName ->
8 | folder = new File("${buildDirectory}/${fileName}")
9 | if (folder.isDirectory()) {
10 | def timeStamp = new Date(folder.lastModified())
11 | if (timeStamp.before(backTime)) {
12 | folder.delete()
13 | }
14 | }
15 | }
16 | }
17 |
--------------------------------------------------------------------------------
/topics/cicd/solutions/remove_jobs_solution.groovy:
--------------------------------------------------------------------------------
1 | def jobs = Jenkins.instance.items.findAll { job -> job.name =~ /"test"/ }
2 |
3 | jobs.each { job ->
4 | println job.name
5 | //job.delete()
6 | }
7 |
--------------------------------------------------------------------------------
/topics/circleci/README.md:
--------------------------------------------------------------------------------
1 | # Circle CI
2 |
3 | ## Circle CI Questions
4 |
5 | ### Circle CI 101
6 |
7 |
8 | What is Circle CI?
9 |
10 | [Circle CI](https://circleci.com): "CircleCI is a continuous integration and continuous delivery platform that can be used to implement DevOps practices."
11 |
12 |
13 |
14 | What are some benefits of Circle CI?
15 |
16 | [Circle CI Docs](https://circleci.com/docs/about-circleci): "SSH into any job to debug your build issues.
17 | Set up parallelism in your .circleci/config.yml file to run jobs faster.
18 | Configure caching with two simple keys to reuse data from previous jobs in your workflow.
19 | Configure self-hosted runners for unique platform support.
20 | Access Arm resources for the machine executor.
21 | Use orbs, reusable packages of configuration, to integrate with third parties.
22 | Use pre-built Docker images in a variety of languages.
23 | Use the API
24 | to retrieve information about jobs and workflows.
25 | Use the CLI to access advanced tools locally.
26 | Get flaky test detection with test insights."
27 |
28 |
29 |
30 |
31 | What is an Orb?
32 |
33 | [Circle CI Docs](https://circleci.com/developer/orbs): "Orbs are shareable packages of CircleCI configuration you can use to simplify your builds"
34 |
35 | They can come from the public registry or defined privately as part of an organization.
36 |
37 |
38 | ### Circle CI Hands-On 101
39 |
40 |
41 | Where (in what location in the project) Circle CI pipelines are defined?
42 |
43 | `.circleci/config.yml`
44 |
--------------------------------------------------------------------------------
/topics/cloud_slack_bot.md:
--------------------------------------------------------------------------------
1 | ## Cloud Slack Bot
2 |
3 | Create a slack bot to manage cloud instances. You can choose whatever cloud provider you want (e.g. Openstack, AWS, GCP, Azure)
4 | You should provide:
5 |
6 | * Instructions on how to use it
7 | * Source code of the slack bot
8 | * A running slack bot account or a deployment script so we can test it
9 |
10 | The bot should be able to support:
11 |
12 | * Creating new instances
13 | * Removing existing instances
14 | * Starting an instance
15 | * Stopping an instance
16 | * Displaying the status of an instance
17 | * List all available instances
18 |
19 | The bot should also be able to show help message.
20 |
--------------------------------------------------------------------------------
/topics/containers/commit_image.md:
--------------------------------------------------------------------------------
1 | # Create Images on The Fly
2 |
3 | ## Requirements
4 |
5 | Have at least one image locally (run `podman image ls` to confirm).
6 | If you don't have images locally, run simply `podman pull nginx:alpine`.
7 |
8 | ## Objectives
9 |
10 | 1. Run a container using a web server image (e.g. httpd, nginx, ...)
11 | - Bind container's port 80 to local port 80
12 | - Run it in detached mode
13 | - Name should nginx_container
14 | 2. Verify the web server runs and accessible
15 | 3. Create an HTML file with the following content and copy it to the container to the container to path where it will be accessed as an index file
16 |
17 | ```
18 |
19 |
20 | It's a me
21 |
22 |
23 |
Mario
24 |
25 | ```
26 |
27 | 4. Create an image out of the running container and call it "nginx_mario"
28 | 5. Tag the container with "mario" tag
29 | 6. Remove the original container (container_nginx) and verify it was removed
30 | 7. Create a new container out of the image you've created (the same way as the original container)
31 | 8. Run `curl 127.0.0.1:80`. What do you see?
32 | 9. Run `podman diff` on the new image. Explain the output
33 |
34 | ## Solution
35 |
36 | Click [here to view the solution](solutions/commit_image.md)
37 |
--------------------------------------------------------------------------------
/topics/containers/containerized_db.md:
--------------------------------------------------------------------------------
1 | ## Containerized DB
2 |
3 | 1. Run a container with a database of any type of you prefer (MySql, PostgreSQL, Mongo, etc.)
4 | 2. Verify the container is running
5 | 3. Access the container and create a new table (or collection, depends on which DB type you chose) for students
6 | 4. Insert a row (or document) of a student
7 | 5. Verify the row/document was added
8 |
9 | Click [here for the solution](solutions/containerized_db.md)
10 |
--------------------------------------------------------------------------------
/topics/containers/containerized_db_persistent_storage.md:
--------------------------------------------------------------------------------
1 | # Containerized DB with Persistent Storage
2 |
3 | 1. Run a container with a database of any type of you prefer (MySql, PostgreSQL, Mongo, etc.)
4 | 1. Use a mount point on the host for the database instead of using the container storage for that
5 | 2. Explain why using the host storage instead of the container one might be a better choice
6 | 2. Verify the container is running
7 |
--------------------------------------------------------------------------------
/topics/containers/containerized_web_server.md:
--------------------------------------------------------------------------------
1 | # Containerized Web Server
2 |
3 | 1. Run a containerized web server in the background and bind its port (8080) to a local port
4 | 2. Verify the port (8080) is bound
5 | 3. Reach the webserver from your local host
6 | 4. Now run the same web application but bound it to the local port 8080
7 |
8 | Click [here for the solution](solutions/containerized_web_server.md)
9 |
--------------------------------------------------------------------------------
/topics/containers/image_layers.md:
--------------------------------------------------------------------------------
1 | ## Layer by Layer
2 |
3 | ### Objective
4 |
5 | Learn about image layers
6 |
7 | ### Requirements
8 |
9 | Make sure Docker is installed on your system and the service is started
10 |
11 | ```
12 | # Fedora/RHEL/CentOS
13 | rpm -qa | grep docker
14 | systemctl status docker
15 | ```
16 |
17 | ### Instructions
18 |
19 | 1. Write a Dockefile. Any Dockefile! :) (just make sure it's a valid one)
20 | 2. Build an image using the Dockerfile you've wrote
21 | 3. Which of the instructions you've used, created new layers and which added image metadata?
22 | 4. What ways are there to confirm your answer to the last question?
23 | 5. Can you reduce the size of the image you've created?
24 |
--------------------------------------------------------------------------------
/topics/containers/multi_stage_builds.md:
--------------------------------------------------------------------------------
1 | ## Multi-Stage Builds
2 |
3 | ### Objective
4 |
5 | Learn about multi-stage builds
6 |
7 | ### Instructions
8 |
9 | 1. Without actually building an image or running any container, use the following Dockerfile and convert it to use multi-stage:
10 |
11 | ```
12 | FROM nginx
13 | RUN apt-get update \
14 | && apt-get install -y curl python build-essential \
15 | && apt-get install -y nodejs \
16 | && apt-get clean -y
17 | RUN mkdir -p /my_app
18 | ADD ./config/nginx/docker.conf /etc/nginx/nginx.conf
19 | ADD ./config/nginx/k8s.conf /etc/nginx/nginx.conf.k8s
20 | ADD app/ /my_cool_app
21 | WORKDIR /my_cool_app
22 | RUN npm install -g ember-cli
23 | RUN npm install -g bower
24 | RUN apt-get update && apt-get install -y git \
25 | && npm install \
26 | && bower install \
27 | RUN ember build — environment=prod
28 | CMD [ “/root/nginx-app.sh”, “nginx”, “-g”, “daemon off;” ]
29 | ```
30 |
31 | 2. What are the benefits of using multi-stage builds?
32 |
--------------------------------------------------------------------------------
/topics/containers/run_forest_run.md:
--------------------------------------------------------------------------------
1 | ## Run, Forest, Run!
2 |
3 | ### Objective
4 |
5 | Learn what restart policies do and how to use them
6 |
7 | ### Requirements
8 |
9 | Make sure Docker is installed on your system and the service is started
10 |
11 | ```
12 | # Fedora/RHEL/CentOS
13 | rpm -qa | grep docker
14 | systemctl status docker
15 | ```
16 |
17 | ### Instructions
18 |
19 | 1. Run a container with the following properties:
20 | * image: alpine
21 | * name: forest
22 | * restart policy: always
23 | * command to execute: sleep 15
24 | 2. Run `docker container ls` - Is the container running? What about after 15 seconds, is it still running? why?
25 | 3. How then can we stop the container from running?
26 | 4. Remove the container you've created
27 | 5. Run the same container again but this time with `sleep 600` and verify it runs
28 | 6. Restart the Docker service. Is the container still running? why?
29 | 8. Update the policy to `unless-stopped`
30 | 9. Stop the container
31 | 10. Restart the Docker service. Is the container running? why?
32 |
--------------------------------------------------------------------------------
/topics/containers/running_containers.md:
--------------------------------------------------------------------------------
1 | ## Running Containers
2 |
3 | ### Objective
4 |
5 | Learn how to run, stop and remove containers
6 |
7 | ### Requirements
8 |
9 | Make sure Podman or Docker (or any other containers engine) is installed on your system
10 |
11 | ### Instructions
12 |
13 | 1. Run a container using the latest nginx image
14 | 2. List the containers to make sure the container is running
15 | 3. Run another container but this time use ubuntu latest and attach to the terminal of the container
16 | 4. List again the containers. How many containers are running?
17 | 5. Stop the containers
18 | 6. Remove the containers
19 |
--------------------------------------------------------------------------------
/topics/containers/sharing_images.md:
--------------------------------------------------------------------------------
1 | # Sharing Images
2 |
3 | ## Requirements
4 |
5 | Have at least one image locally (run `podman image ls` to confirm).
6 | If you don't have images locally, run simply `podman pull httpd`.
7 |
8 | ## Objectives
9 |
10 | 1. Choose an image and create an archive out of it
11 | 2. Check the archive size. Is it different than the image size? If yes, what's the difference? If not, why?
12 | 3. Copy the generated archive to a remote host
13 | 4. Load the image
14 | 5. Verify it was loaded and exists on the remote host
15 |
16 | ## Solution
17 |
18 | Click [here to view the solution](solutions/sharing_images.md)
19 |
--------------------------------------------------------------------------------
/topics/containers/solutions/containerized_db.md:
--------------------------------------------------------------------------------
1 | # Containerized DB
2 |
3 | 1. Run a container with a database of any type of you prefer (MySql, PostgreSQL, Mongo, etc.)
4 | 2. Verify the container is running
5 | 3. Access the container and create a new table (or collection, depends on which DB type you chose) for students
6 | 4. Insert a row (or document) of a student
7 | 5. Verify the row/document was added
8 |
9 |
10 | ## Solution
11 |
12 | ```
13 | # Run the container
14 | podman run --name mysql -e MYSQL_USER=mario -e MYSQL_PASSWORD=tooManyMushrooms -e MYSQL_DATABASE=university -e MYSQL_ROOT_PASSWORD=MushroomsPizza -d mysql
15 |
16 | # Verify it's running
17 | podman ps
18 |
19 | # Add student row to the database
20 | podman exec -it mysql /bin/bash
21 | mysql -u root
22 | use university;
23 | CREATE TABLE Students (id int NOT NULL, name varchar(255) DEFAULT NULL, PRIMARY KEY (id));
24 | insert into Projects (id, name) values (1,'Luigi');
25 | select * from Students;
26 | ```
27 |
--------------------------------------------------------------------------------
/topics/containers/solutions/containerized_db_persistent_storage.md:
--------------------------------------------------------------------------------
1 | # Containerized DB with Persistent Storage
2 |
3 | 1. Run a container with a database of any type of you prefer (MySql, PostgreSQL, Mongo, etc.)
4 | 1. Use a mount point on the host for the database instead of using the container storage for that
5 | 2. Explain why using the host storage instead of the container one might be a better choice
6 | 2. Verify the container is running
7 |
8 |
9 | ## Solution
10 |
11 | ```
12 | # Create the directory for the DB on host
13 | mkdir -pv ~/local/mysql
14 | sudo semanage fcontext -a -t container_file_t '/home/USERNAME/local/mysql(/.*)?'
15 | sudo restorecon -R /home/USERNAME/local/mysql
16 |
17 | # Run the container
18 | podman run --name mysql -e MYSQL_USER=mario -e MYSQL_PASSWORD=tooManyMushrooms -e MYSQL_DATABASE=university -e MYSQL_ROOT_PASSWORD=MushroomsPizza -d mysql -v /home/USERNAME/local/mysql:/var/lib/mysql/db
19 |
20 | # Verify it's running
21 | podman ps
22 | ```
23 |
24 | It's better to use the storage host because in case the container ever gets removed (or storage reclaimed) you have the DB data still available.
25 |
--------------------------------------------------------------------------------
/topics/containers/solutions/containerized_web_server.md:
--------------------------------------------------------------------------------
1 | # Containerized Web Server
2 |
3 | 1. Run a containerized web server in the background and bind its port (8080) to a local port
4 | 2. Verify the port (8080) is bound
5 | 3. Reach the webserver from your local host
6 | 4. Now run the same web application but bound it to the local port 8080
7 |
8 | ## Solution
9 |
10 | ```
11 | $ podman run -d -p 8080 httpd # run the container and bind the port 8080 to a local port
12 | $ podman port -l 8080 # show to which local port the port 8080 on the container, binds to
13 | 0.0.0.0:41203
14 | $ curl http://0.0.0.0:41203 # use the port from the output of the previous command
15 |
16 | !DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
17 |
18 |
19 |
20 | Test Page for the HTTP Server on Red Hat Enterprise Linux
21 |
22 |
23 | $ podman run -d -p 8080:8080 httpd
24 | ```
25 |
--------------------------------------------------------------------------------
/topics/containers/solutions/image_layers.md:
--------------------------------------------------------------------------------
1 | ## Layer by Layer
2 |
3 | ### Objective
4 |
5 | Learn about image layers
6 |
7 | ### Requirements
8 |
9 | Make sure Docker is installed on your system and the service is started
10 |
11 | ```
12 | # Fedora/RHEL/CentOS
13 | rpm -qa | grep docker
14 | systemctl status docker
15 | ```
16 |
17 | ### Instructions
18 |
19 | 1. Write a Dockefile. Any Dockefile! :) (just make sure it's a valid one)
20 |
21 | ```
22 | FROM ubuntu
23 | EXPOSE 212
24 | ENV foo=bar
25 | WORKDIR /tmp
26 | RUN dd if=/dev/zero of=some_file bs=1024 count=0 seek=1024
27 | RUN dd if=/dev/zero of=some_file bs=1024 count=0 seek=1024
28 | RUN dd if=/dev/zero of=some_file bs=1024 count=0 seek=1024
29 | ```
30 |
31 | 2. Build an image using the Dockerfile you've wrote
32 |
33 | `docker image build -t super_cool_app:latest .`
34 |
35 | 3. Which of the instructions you've used, created new layers and which added image metadata?
36 |
37 | ```
38 | FROM, RUN -> new layer
39 | EXPOSE, ENV, WORKDIR -> metadata
40 | ```
41 |
42 | 4. What ways are there to confirm your answer to the last question?
43 |
44 | You can run `docker image history super_cool_app`. It will show you each instruction and its size. Usually instructions that create new layers has non-zero size, but this is not something you can rely on by itself since, some run commands can have size of zero in `docker image history` output (e.g. `ls -l`).
45 |
46 | You can also use `docker image inspect super_cool_appl` and see if in the output, under "RootFS", there are the number of layers that matches the instructions that should create new layers.
47 |
48 | 5. Can you reduce the size of the image you've created?
49 |
50 | yes, for example, use all the RUN instructions as a single RUN instruction this way:
51 |
52 | `RUN dd if=/dev/zero of=some_file bs=1024 count=0 seek=1024 && dd if=/dev/zero of=some_file bs=1024 count=0 seek=1024 && dd if=/dev/zero of=some_file bs=1024 count=0 seek=1024`
53 |
54 | The change in size might not be dramatic in this case, but in some cases it will make a big impact on the image size.
55 |
--------------------------------------------------------------------------------
/topics/containers/solutions/multi_stage_builds.md:
--------------------------------------------------------------------------------
1 | ## Multi-Stage Builds
2 |
3 | ### Objective
4 |
5 | Learn about multi-stage builds
6 |
7 | ### Instructions
8 |
9 | 1. Without actually building an image or running any container, use the following Dockerfile and convert it to use multi-stage:
10 |
11 | ```
12 | FROM nginx
13 | RUN apt-get update \
14 | && apt-get install -y curl python build-essential \
15 | && apt-get install -y nodejs \
16 | && apt-get clean -y
17 | RUN mkdir -p /my_app
18 | ADD ./config/nginx/docker.conf /etc/nginx/nginx.conf
19 | ADD ./config/nginx/k8s.conf /etc/nginx/nginx.conf.k8s
20 | ADD app/ /my_cool_app
21 | WORKDIR /my_cool_app
22 | RUN npm install -g ember-cli
23 | RUN npm install -g bower
24 | RUN apt-get update && apt-get install -y git \
25 | && npm install \
26 | && bower install \
27 | RUN ember build — environment=prod
28 | CMD [ “/root/nginx-app.sh”, “nginx”, “-g”, “daemon off;” ]
29 | ```
30 |
31 | 2. What are the benefits of using multi-stage builds?
32 |
33 | ### Solution
34 |
35 | 1. One possible solution (the emphasize is on passing the app from the first stage):
36 |
37 | ```
38 | FROM node:6
39 | RUN mkdir -p /my_cool_app
40 | RUN npm install -g ember-cli
41 | RUN npm install -g bower
42 | WORKDIR /my_cool_app
43 | RUN npm install
44 | ADD app/ /my_cool_app
45 | RUN bower install
46 | RUN ember build — environment=prod
47 |
48 | FROM nginx
49 | RUN mkdir -p /my_cool_app
50 | ADD ./config/nginx/docker.conf /etc/nginx/nginx.conf
51 | ADD ./config/nginx/k8s.conf /etc/nginx/nginx.conf.k8s
52 | # Copy build artifacts from the first stage
53 | COPY — from=0 /my_cool_app/dist /my_cool_app/dist
54 | WORKDIR /my_cool_app
55 | CMD [ “/root/nginx-app.sh”, “nginx”, “-g”, “daemon off;” ]
56 | ```
57 |
58 | 2. Multi-stages builds allow you to produce smaller container images by splitting the build process into multiple stages as we did above. The app image doesn't contain anything related to the build process except the actual app.
59 |
--------------------------------------------------------------------------------
/topics/containers/solutions/run_forest_run.md:
--------------------------------------------------------------------------------
1 | ## Run, Forest, Run!
2 |
3 | ### Objective
4 |
5 | Learn what restart policies do and how to use them
6 |
7 | ### Requirements
8 |
9 | Make sure Docker is installed on your system and the service is started
10 |
11 | ```
12 | # Fedora/RHEL/CentOS
13 | rpm -qa | grep docker
14 | systemctl status docker
15 | ```
16 |
17 | ### Instructions
18 |
19 | 1. Run a container with the following properties:
20 | * image: alpine
21 | * name: forest
22 | * restart policy: always
23 | * command to execute: sleep 15
24 |
25 | `docker run --restart always --name forest alpine sleep 15`
26 |
27 | 2. Run `docker container ls` - Is the container running? What about after 15 seconds, is it still running? why?
28 |
29 |
30 | It runs even after it completes to run `sleep 15` because the restart policy is "always". This means that Docker will keep restarting the **same** container even after it exists.
31 |
32 |
33 | 3. How then can we stop the container from running?
34 |
35 | The restart policy doesn't apply when the container is stopped with the command `docker container stop`
36 |
37 | 4. Remove the container you've created
38 |
39 | ```
40 | docker container stop forest
41 | docker container rm forest
42 | ```
43 |
44 | 5. Run the same container again but this time with `sleep 600` and verify it runs
45 |
46 | ```
47 | docker run --restart always --name forest alpine sleep 600
48 | docker container ls
49 | ```
50 |
51 | 6. Restart the Docker service. Is the container still running? why?
52 |
53 | ```
54 | sudo systemctl restart docker
55 | ```
56 | Yes, it's still running due to the restart policy `always` which means Docker will always bring up the container after it exists or stopped (not with the stop command).
57 |
58 | 8. Update the policy to `unless-stopped`
59 |
60 | `docker update --restart unless-stopped forest`
61 |
62 | 9. Stop the container
63 |
64 | `docker container stop forest`
65 |
66 | 10. Restart the Docker service. Is the container running? why?
67 |
68 | ```
69 | sudo systemctl restart docker
70 | ```
71 | No, the container is not running. This is because we changed the policy to `unless-stopped` which will run the container unless it was in stopped status. Since before the restart we stopped the container, Docker didn't continue running it after the restart.
72 |
--------------------------------------------------------------------------------
/topics/containers/solutions/running_containers.md:
--------------------------------------------------------------------------------
1 | ## Running Containers
2 |
3 | ### Objective
4 |
5 | Learn how to run, stop and remove containers
6 |
7 | ### Requirements
8 |
9 | Make sure Podman or Docker (or any other containers engine) is installed on your system
10 |
11 | ### Instructions
12 |
13 | 1. Run a container using the latest nginx image - `podman container run nginx:latest`
14 | 2. List the containers to make sure the container is running - `podman container ls`
15 | 3. Run another container but this time use ubuntu latest and attach to the terminal of the container - `podman container run -it ubuntu:latest /bin/bash`
16 | 4. List again the containers. How many containers are running? - `podman container ls` -> 2
17 | 5. Stop the containers - WARNING: the following will stop all the containers on the host: `podman stop $(podman container ls -q)` or for each container `podman stop [container id/name]`
18 | 6. Remove the containers - WARNING: the following will remove other containers as well if such are running: `podman rm $(podman container ls -q -a)` or for each container `podman rm [container id/name]`
19 |
--------------------------------------------------------------------------------
/topics/containers/solutions/sharing_images.md:
--------------------------------------------------------------------------------
1 | # Sharing Images
2 |
3 | ## Requirements
4 |
5 | Have at least one image locally (run `podman image ls` to confirm).
6 | If you don't have images locally, run simply `podman pull httpd`.
7 |
8 | ## Objectives
9 |
10 | 1. Choose an image and create an archive out of it
11 | 2. Check the archive size. Is it different than the image size? If yes, what's the difference? If not, why?
12 | 3. Copy the generated archive to a remote host
13 | 4. Load the image
14 | 5. Verify it was loaded and exists on the remote host
15 |
16 | ## Solution
17 |
18 | ```
19 | # Save image as an archive
20 | podman save -o httpd.tar httpd
21 |
22 | # Check archive and image sizes
23 | du -sh httpd.tar # output: 143MB
24 | podman image ls | grep httpd # output: 149MB
25 | # The archive is obviously smaller than the image itself (6MB difference)
26 |
27 | # Copy the archive to a remote host
28 | rsync -azc httpd.tar USER@REMOTE_HOST_FQDN:/tmp/
29 |
30 | # Load the image
31 | podman load -i /tmp/httpd.tar
32 |
33 | # Verify it exists on the system after loading
34 | podman image ls
35 | ```
36 |
--------------------------------------------------------------------------------
/topics/containers/solutions/working_with_images.md:
--------------------------------------------------------------------------------
1 | ## Working with Images - Solution
2 |
3 | ### Objective
4 |
5 | Learn how to work with containers images
6 |
7 | ### Requirements
8 |
9 | Make sure Podman, Docker (or any other containers engine) is installed on your system
10 |
11 | ### Instructions
12 |
13 | 1. List the containers images in your environment - `podman image ls`
14 | 2. Pull the latest ubuntu image - `podman image pull ubuntu:latest`
15 | 3. Run a container with the image you just pulled - `podman container run -it ubuntu:latest /bin/bash`
16 | 4. Remove the image. Did it work? - No. There is a running container which is using the image we try to remove
17 | 5. Do whatever is needed in order to remove the image - `podman rm ; podman image rm ubuntu`
18 |
--------------------------------------------------------------------------------
/topics/containers/working_with_images.md:
--------------------------------------------------------------------------------
1 | ## Working with Images
2 |
3 | ### Objective
4 |
5 | Learn how to work with containers images
6 |
7 | ### Requirements
8 |
9 | Make sure Podman or Docker (or any other containers engine) is installed on your system
10 |
11 | ### Instructions
12 |
13 | 1. List the containers images in your environment
14 | 2. Pull the latest ubuntu image
15 | 3. Run a container with the image you just pulled
16 | 4. Remove the image. Did it work?
17 | 5. Do whatever is needed in order to remove the image
18 |
--------------------------------------------------------------------------------
/topics/containers/write_containerfile_run_container.md:
--------------------------------------------------------------------------------
1 | # Write a Containerfile and run a container
2 |
3 | ## Objectives
4 |
5 | 1. Create an image:
6 | * Use centos or ubuntu as the base image
7 | * Install apache web server
8 | * Deploy any web application you want
9 | * Add https support (using HAProxy as reverse-proxy)
10 | 2. Once you wrote the Containerfile and created an image, run the container and test the application. Describe how did you test it and provide output
11 | 3. Describe one or more weakness of your Containerfile. Is it ready to be used in production?
12 |
--------------------------------------------------------------------------------
/topics/databases/solutions/table_for_message_board_system.md:
--------------------------------------------------------------------------------
1 | ## Database Table for Message Board System
2 |
3 | ### Instructions
4 |
5 | Design a database table for a message board system. It should include the following information:
6 |
7 | * Personal details
8 | * Who saw the message and when
9 | * Replies
10 | * Tagged people in the message
11 | * Message categories
12 |
13 | Notes:
14 |
15 | * No SQL is needed
16 | * You should include: table names, field names, data types and mention the foreign keys used.
17 |
18 | ### Solution
19 |
20 | Note: This is just one possible design
21 | 2nd Note: PK = primary key, FK = Foreign key
22 |
23 | ----- People -----
24 | ID int PK
25 | FirstName varchar(255)
26 | LastName varchar(255)
27 | DOB date
28 | Gender varchar(1)
29 | Phone varchar(10)
30 |
31 | | \
32 | | \
33 | | \
34 | v \
35 | \
36 | --- Messages --- v
37 | ID int PK
38 | MessageBoardID FK --- MessageTags ---
39 | --- MessageBoards --- PeopleID int FK ID int PK
40 | ID int PK ----> MsgDate datetime ---> MessageID FK
41 | Board text Message text PeopleID int Fk
42 | MessageID (FK)
43 | ^ |
44 | | |
45 | |______|
46 |
47 |
--------------------------------------------------------------------------------
/topics/databases/table_for_message_board_system.md:
--------------------------------------------------------------------------------
1 | ## Database Table for Message Board System
2 |
3 | ### Instructions
4 |
5 | Design a database table for a message board system. It should include the following information:
6 |
7 | * Personal details
8 | * Who saw the message and when
9 | * Replies
10 | * Tagged people in the message
11 | * Message categories
12 |
13 | Notes:
14 |
15 | * No SQL is needed
16 | * You should include: table names, field names, data types and mention the foreign keys used.
17 |
--------------------------------------------------------------------------------
/topics/devops/containerize_app.md:
--------------------------------------------------------------------------------
1 | ## Containerize an Application
2 |
3 | 1. Clone an open source project you would like to containerize. A couple of suggestions:
4 |
5 | ```
6 | https://github.com/bregman-arie/node-hello-world
7 | https://github.com/bregman-arie/flask-hello-world
8 | ```
9 | 2. Write a Dockerfile you'll use for building an image of the application (you can use any base image you would like)
10 | 3. Build an image using the Dockerfile you've just wrote
11 | 4. Verify the image exists
12 | 5. [Optional] Push the image you've just built to a registry
13 | 6. Run the application
14 | 7. Verify the app is running
15 |
--------------------------------------------------------------------------------
/topics/devops/ha_hello_world.md:
--------------------------------------------------------------------------------
1 | ## Highly Available "Hello World"
2 |
3 | Set up an highly available "Hello World" application with the following instructions:
4 |
5 | * Use a containerized Load Balancer
6 | * Provision two virtual machines (this is where the app will run)
7 | * The page, when visited, should show "Hello World! I'm host X" - X should be the name of the virtual machine
8 |
--------------------------------------------------------------------------------
/topics/devops/solutions/containerize_app.md:
--------------------------------------------------------------------------------
1 | ## Containerize an Application
2 |
3 | 1. Clone an open source project you would like to containerize. A couple of suggestions:
4 |
5 | ```
6 | https://github.com/bregman-arie/node-hello-world
7 | https://github.com/bregman-arie/flask-hello-world
8 | ```
9 |
10 | `git clone https://github.com/bregman-arie/node-hello-world`
11 |
12 | 2. Write a Dockerfile you'll use for building an image of the application (you can use any base image you would like)
13 |
14 | ```
15 | FROM alpine
16 | LABEL maintainer="your name/email"
17 | RUN apk add --update nodejs npm
18 | COPY . /src
19 | WORKDIR /src
20 | RUN npm install
21 | EXPOSE 3000
22 | ENTRYPOINT ["node", "./app.js"]
23 | ```
24 |
25 | 3. Build an image using the Dockerfile you've just wrote
26 |
27 | `docker image build -t web_app:latest .`
28 |
29 | 4. Verify the image exists
30 |
31 | `docker image ls`
32 |
33 | 5. [Optional] Push the image you've just built to a registry
34 |
35 | ```
36 | docker login
37 | docker image tag web_app:latest /web_app:latest
38 | # Verify with "docker image ls"
39 | docker image push /web_app:latest
40 | ```
41 |
42 | 6. Run the application
43 |
44 | ```
45 | docker container run -d -p 80:3000 web_app:latest
46 | ```
47 |
48 | 7. Verify the app is running
49 |
50 | ```
51 | docker container ls
52 | docker logs
53 | # In the browser, go to 127.0.0.1:80
54 | ```
55 |
--------------------------------------------------------------------------------
/topics/devops/solutions/ha_hello_world.md:
--------------------------------------------------------------------------------
1 | ## Highly Available "Hello World"
2 |
3 | Set up an highly available "Hello World" application with the following instructions:
4 |
5 | * Use a containerized Load Balancer
6 | * Provision two virtual machines (this is where the app will run)
7 | * The page, when visited, should show "Hello World! I'm host X" - X should be the name of the virtual machine
8 |
9 | ### Solution
10 |
11 | 1. Provision two VMs
12 |
--------------------------------------------------------------------------------
/topics/eflk.md:
--------------------------------------------------------------------------------
1 | ## ELK + Filebeat
2 |
3 | Set up the following using any log you would like:
4 |
5 | * Run the following: elasticsearch, logstash, kibana and filebeat (each running in its own container)
6 | * Make filebeat transfer a log to logstash for process
7 | * Once logstash is done, index with elasticsearch
8 | * Finally, make sure data is available in Kibana
9 |
--------------------------------------------------------------------------------
/topics/flask_container_ci/app/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 |
--------------------------------------------------------------------------------
/topics/flask_container_ci/app/config.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 |
4 | import os
5 |
6 | basedir = os.path.abspath(os.path.dirname(__file__))
7 |
8 | SECRET_KEY = 'shhh'
9 | CSRF_ENABLED = True
10 |
11 | SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')
12 |
--------------------------------------------------------------------------------
/topics/flask_container_ci/app/main.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 |
4 | from flask import Flask
5 | from flask import make_response
6 |
7 | import json
8 | from flask_wtf.csrf import CSRFProtect
9 | from werkzeug.exceptions import NotFound
10 |
11 | # OpenRefactory Warning: The 'Flask' method creates a Flask app
12 | # without Cross-Site Request Forgery (CSRF) protection.
13 | app = Flask(__name__)
14 | CSRFProtect(app)
15 |
16 | with open("./users.json", "r") as f:
17 | users = json.load(f)
18 |
19 |
20 | @app.route("/", methods=['GET'])
21 | def index():
22 | return pretty_json({
23 | "resources": {
24 | "users": "/users",
25 | "user": "/users/",
26 | },
27 | "current_uri": "/"
28 | })
29 |
30 |
31 | @app.route("/users", methods=['GET'])
32 | def all_users():
33 | return pretty_json(users)
34 |
35 |
36 | @app.route("/users/", methods=['GET'])
37 | def user_data(username):
38 | if username not in users:
39 | raise NotFound
40 |
41 | return pretty_json(users[username])
42 |
43 |
44 | @app.route("/users//something", methods=['GET'])
45 | def user_something(username):
46 | raise NotImplementedError()
47 |
48 |
49 | def pretty_json(arg):
50 | response = make_response(json.dumps(arg, sort_keys=True, indent=4))
51 | response.headers['Content-type'] = "application/json"
52 | return response
53 |
54 |
55 | def create_test_app():
56 | # OpenRefactory Warning: The 'Flask' method creates a Flask app
57 | # without Cross-Site Request Forgery (CSRF) protection.
58 | app = Flask(__name__)
59 | CSRFProtect(app)
60 | return app
61 |
62 |
63 | if __name__ == "__main__":
64 | app.run(port=5000)
65 |
--------------------------------------------------------------------------------
/topics/flask_container_ci/app/tests.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 |
4 | import os
5 | import unittest
6 |
7 | from config import basedir
8 | from app import app
9 | from app import db
10 |
11 |
12 | class TestCase(unittest.TestCase):
13 |
14 | def setUp(self):
15 | app.config['TESTING'] = True
16 | app.config['WTF_CSRF_ENABLED'] = False
17 | app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(
18 | basedir, 'test.db')
19 | self.app = app.test_client()
20 | db.create_all()
21 |
22 | def tearDown(self):
23 | db.session.remove()
24 | db.drop_all()
25 |
26 |
27 | if __name__ == '__main__':
28 | unittest.main()
29 |
--------------------------------------------------------------------------------
/topics/flask_container_ci/requirements.txt:
--------------------------------------------------------------------------------
1 | flask
2 |
--------------------------------------------------------------------------------
/topics/flask_container_ci/tests.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 |
4 | import unittest
5 |
6 | from app import main
7 |
8 |
9 | class TestCase(unittest.TestCase):
10 |
11 | def setUp(self):
12 | self.app = main.app.test_client()
13 |
14 | def test_main_page(self):
15 | response = self.app.get('/', follow_redirects=True)
16 | self.assertEqual(response.status_code, 200)
17 |
18 | def test_users_page(self):
19 | response = self.app.get('/users', follow_redirects=True)
20 | self.assertEqual(response.status_code, 200)
21 |
22 |
23 | if __name__ == '__main__':
24 | unittest.main()
25 |
--------------------------------------------------------------------------------
/topics/flask_container_ci/users.json:
--------------------------------------------------------------------------------
1 | {
2 | "geralt" : {
3 | "id": "whitewolf",
4 | "name": "Geralt of Rivia",
5 | "description": "Traveling monster slayer for hire"
6 | },
7 | "lara_croft" : {
8 | "id": "m31a3n6sion",
9 | "name": "Lara Croft",
10 | "description": "Highly intelligent and athletic English archaeologist"
11 | },
12 | "mario" : {
13 | "id": "smb3igiul",
14 | "name": "Mario",
15 | "description": "Italian plumber who really likes mushrooms"
16 | },
17 | "gordon_freeman" : {
18 | "id": "nohalflife3",
19 | "name": "Gordon Freeman",
20 | "description": "Physicist with great shooting skills"
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/topics/flask_container_ci2/app/__init__.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 |
--------------------------------------------------------------------------------
/topics/flask_container_ci2/app/config.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 |
4 | import os
5 |
6 | basedir = os.path.abspath(os.path.dirname(__file__))
7 |
8 | SECRET_KEY = 'shhh'
9 | CSRF_ENABLED = True
10 |
11 | SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')
12 |
--------------------------------------------------------------------------------
/topics/flask_container_ci2/app/main.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 |
4 | from flask import Flask
5 | from flask import make_response
6 |
7 | import json
8 | from flask_wtf.csrf import CSRFProtect
9 |
10 | # OpenRefactory Warning: The 'Flask' method creates a Flask app
11 | # without Cross-Site Request Forgery (CSRF) protection.
12 | app = Flask(__name__)
13 | CSRFProtect(app)
14 |
15 |
16 | @app.routee("/", methods=['GET'])
17 | def index():
18 | return pretty_json({
19 | "resources": {
20 | "matrix": "/matrix/",
21 | "column": "/columns//",
22 | "row": "/rows//",
23 | },
24 | "current_uri": "/",
25 | "example": "/matrix/'123n456n789'",
26 | })
27 |
28 |
29 | @app.route("/matrix/", methods=['GET'])
30 | def matrix(matrix):
31 | # TODO: return matrix, each row in a new line
32 | pass
33 |
34 |
35 | @app.route("/matrix//", methods=['GET'])
36 | def column(matrix, column_number):
37 | # TODO: return column based on given column number
38 | pass
39 |
40 |
41 | @app.route("/matrix//", methods=['GET'])
42 | def row(matrix, row_number):
43 | # TODO: return row based on given row number
44 | pass
45 |
46 |
47 | def pretty_json(arg):
48 | response = make_response(json.dumps(arg, sort_keys=True, indent=4))
49 | response.headers['Content-type'] = "application/json"
50 | return response
51 |
52 |
53 | if __name__ == "__main__":
54 | app.run(port=5000)
55 |
--------------------------------------------------------------------------------
/topics/flask_container_ci2/app/tests.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 |
4 | import os
5 | import unittest
6 |
7 | from config import basedir
8 | from app import app
9 | from app import db
10 |
11 |
12 | class TestCase(unittest.TestCase):
13 |
14 | def setUp(self):
15 | app.config['TESTING'] = True
16 | app.config['WTF_CSRF_ENABLED'] = False
17 | app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(
18 | basedir, 'test.db')
19 | self.app = app.test_client()
20 | db.create_all()
21 |
22 | def tearDown(self):
23 | db.session.remove()
24 | db.drop_all()
25 |
26 |
27 | if __name__ == '__main__':
28 | unittest.main()
29 |
--------------------------------------------------------------------------------
/topics/flask_container_ci2/requirements.txt:
--------------------------------------------------------------------------------
1 | flask
2 |
--------------------------------------------------------------------------------
/topics/flask_container_ci2/tests.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # coding=utf-8
3 |
4 | import unittest
5 |
6 | from app import main
7 |
8 |
9 | class TestCase(unittest.TestCase):
10 |
11 | def setUp(self):
12 | self.app = main.app.test_client()
13 |
14 | def test_main_page(self):
15 | response = self.app.get('/', follow_redirects=True)
16 | self.assertEqual(response.status_code, 200)
17 |
18 | def test_matrix(self):
19 | response = self.app.get('/matrix/123n459,789', follow_redirects=True)
20 | # Change when the matrix route is fixed and returning the actual matrix
21 | self.assertEqual(response.status_code, 500)
22 |
23 |
24 | if __name__ == '__main__':
25 | unittest.main()
26 |
--------------------------------------------------------------------------------
/topics/git/branch_01.md:
--------------------------------------------------------------------------------
1 | ## Git Commit 01
2 |
3 | ### Objective
4 |
5 | Learn how to work with Git Branches
6 |
7 | ### Instructions
8 |
9 | 1. Pick up a Git repository (or create a new one) with at least one commit
10 | 2. Create a new branch called "dev"
11 | 3. Modify one of the files in the repository
12 | 4. Create a new commit
13 | 5. Verify the commit you created is only in "dev" branch
14 |
15 | ### After you complete the exercise
16 |
17 | Answer the following:
18 |
19 | 1. Why branches are useful? Give an example of one real-world scenario for using branches
20 |
--------------------------------------------------------------------------------
/topics/git/commit_01.md:
--------------------------------------------------------------------------------
1 | ## Git Commit 01
2 |
3 | ### Objective
4 |
5 | Learn how to commit changes in Git repositories
6 |
7 | ### Instructions
8 |
9 | 1. Create a new directory
10 | 2. Make it a git repository
11 | 3. Create a new file called `file` with the content "hello commit"
12 | 4. Commit your new file
13 | 5. Run a git command to verify your commit was recorded
14 |
15 | ### After you complete the exercise
16 |
17 | Answer the following:
18 |
19 | * What are the benefits of commits?
20 | * Is there another way to verify a commit was created?
21 |
--------------------------------------------------------------------------------
/topics/git/solutions/branch_01_solution.md:
--------------------------------------------------------------------------------
1 | ## Branch 01 - Solution
2 |
3 | ```
4 | cd some_repository
5 | echo "master branch" > file1
6 | git add file1
7 | git commit -a -m "added file1"
8 | git checkout -b dev
9 | echo "dev branch" > file2
10 | git add file2
11 | git commit -a -m "added file2"
12 | ```
13 |
14 | Verify:
15 |
16 | ```
17 | git log (you should see two commits)
18 | git checkout master
19 | git log (you should see one commit)
20 | ```
21 |
--------------------------------------------------------------------------------
/topics/git/solutions/commit_01_solution.md:
--------------------------------------------------------------------------------
1 | ## Git Commit 01 - Solution
2 |
3 | ```
4 | mkdir my_repo && cd my_repo
5 | git init
6 | echo "hello_commit" > file
7 | git add file
8 | git commit -a -m "It's my first commit. Exciting!"
9 | git log
10 | ```
11 |
--------------------------------------------------------------------------------
/topics/git/solutions/squashing_commits.md:
--------------------------------------------------------------------------------
1 | ## Git - Squashing Commits - Solution
2 |
3 |
4 | 1. In a git repository, create a new file with the content "Mario" and commit the change
5 |
6 | ```
7 | git add new_file
8 | echo "Mario" -> new_file
9 | git commit -a -m "New file"
10 | ```
11 |
12 | 2. Make change to the content of the file you just created so the content is "Mario & Luigi" and create another commit
13 |
14 | ```
15 | echo "Mario & Luigi" > new_file
16 | git commit -a -m "Added Luigi"
17 | ```
18 |
19 | 3. Verify you have two separate commits - `git log`
20 |
21 | 4. Squash the two commits you've created into one commit
22 |
23 | ```
24 | git rebase -i HEAD~2
25 | ```
26 |
27 | You should see something similar to:
28 |
29 | ```
30 | pick 5412076 New file
31 | pick 4016808 Added Luigi
32 | ```
33 |
34 | Change `pick` to `squash`
35 |
36 |
37 | ```
38 | pick 5412076 New file
39 | squash 4016808 Added Luigi
40 | ```
41 |
42 | Save it and provide a commit message for the squashed commit
43 |
44 | ### After you complete the exercise
45 |
46 | Answer the following:
47 |
48 | * What is the reason for squashing commits? - history becomes cleaner and it's easier to track changes without commit like "removed a character" for example.
49 | * Is it possible to squash more than 2 commits? - yes
50 |
--------------------------------------------------------------------------------
/topics/git/squashing_commits.md:
--------------------------------------------------------------------------------
1 | ## Git - Squashing Commits
2 |
3 | ### Objective
4 |
5 | Learn how to squash commits
6 |
7 | ### Instructions
8 |
9 | 1. In a git repository, create a new file with the content "Mario" and create a new commit
10 | 2. Make change to the content of the file you just created so the content is "Mario & Luigi" and create another commit
11 | 3. Verify you have two separate commits
12 | 4. Squash the latest two commits into one commit
13 |
14 | ### After you complete the exercise
15 |
16 | Answer the following:
17 |
18 | * What is the reason for squashing commits?
19 | * Is it possible to squash more than 2 commits?
20 |
--------------------------------------------------------------------------------
/topics/jenkins_pipelines.md:
--------------------------------------------------------------------------------
1 | ## Jenkins Pipelines
2 |
3 | Write/Create the following Jenkins pipelines:
4 |
5 | * A pipeline which will run unit tests upon git push to a certain repository
6 | * A pipeline which will do to the following:
7 |
8 | * Provision an instance (can also be a container)
9 | * Configure the instance as Apache web server
10 | * Deploy a web application on the provisioned instance
11 |
--------------------------------------------------------------------------------
/topics/jenkins_scripts.md:
--------------------------------------------------------------------------------
1 | ## Jenkins Scripts
2 |
3 | Write the following scripts:
4 |
5 | * Remove all the jobs which include the string "REMOVE_ME" in their name
6 | * Remove builds older than 14 days
7 |
8 | ### Answer
9 |
10 | * [Remove jobs which include specific string](jenkins/scripts/jobs_with_string.groovy)
11 | * [Remove builds older than 14 days](jenkins/scripts/old_builds.groovy)
12 |
--------------------------------------------------------------------------------
/topics/kafka/README.md:
--------------------------------------------------------------------------------
1 | # Apache Kafka
2 |
3 | ## Kafka Exercises
4 |
5 | |Name|Topic|Objective & Instructions|Solution|Comments|
6 | |--------|--------|------|----|----|
7 |
8 | ## Kafka Self Assessment
9 |
10 | * [Kafka 101](#questions-kafka-101)
11 |
12 |
13 | ### Kafka 101
14 |
15 |
16 | What is Kafka?
17 |
18 | [kafka.apache.org](https://kafka.apache.org): "Apache Kafka is an open-source distributed event streaming platform used by thousands of companies for high-performance data pipelines, streaming analytics, data integration, and mission-critical applications."
19 |
20 | In other words, Kafka is a sort of distributed log where you can store events, read them and distribute them to different services and do it in high-scale and real-time.
21 |
22 |
23 |
24 | What Kafka is used for?
25 |
26 | - Real-time e-commerce
27 | - Banking
28 | - Health Care
29 | - Automotive (traffic alerts, hazard alerts, ...)
30 | - Real-time Fraud Detection
31 |
32 |
33 |
34 | What is a "Producer" in regards to Kafka?
35 |
36 | An application that publishes data to the Kafka cluster.
37 |
38 |
39 |
40 | ### Kafka Architecture
41 |
42 |
43 | What's in a Kafka cluster?
44 |
45 | - Broker: a server with kafka process running on it. Such server has local storage. In a single Kafka clusters there are usually multiple brokers.
46 |
47 |
--------------------------------------------------------------------------------
/topics/kubernetes/exercises/labels_and_selectors/exercise.md:
--------------------------------------------------------------------------------
1 | # Labels and Selectors 101
2 |
3 | ## Objectives
4 |
5 | 1. How to list all the Pods with the label "app=web"?
6 | 2. How to list all objects labeled as "env=staging"?
7 | 3. How to list all deployments from "env=prod" and "type=web"?
8 |
9 | ## Solution
10 |
11 | Click [here](solution.md) to view the solution.
--------------------------------------------------------------------------------
/topics/kubernetes/exercises/labels_and_selectors/solution.md:
--------------------------------------------------------------------------------
1 | # Labels and Selectors 101
2 |
3 | ## Objectives
4 |
5 | 1. How to list all the Pods with the label "app=web"?
6 | 2. How to list all objects labeled as "env=staging"?
7 | 3. How to list all deployments from "env=prod" and "type=web"?
8 |
9 | ## Solution
10 |
11 | `k get po -l app=web`
12 | `k get all -l env=staging`
13 | `k get deploy -l env=prod,type=web`
--------------------------------------------------------------------------------
/topics/kubernetes/exercises/node_selectors/exercise.md:
--------------------------------------------------------------------------------
1 | # Node Selectors
2 |
3 | ## Objectives
4 |
5 | 1. Apply the label "hw=max" on one of the nodes in your cluster
6 | 2. Create and run a Pod called `some-pod` with the image `redis` and configure it to use the selector `hw=max`
7 | 3. Explain why node selectors might be limited
8 |
9 |
10 | ## Solution
11 |
12 | Click [here](solution.md) to view the solution
--------------------------------------------------------------------------------
/topics/kubernetes/exercises/node_selectors/solution.md:
--------------------------------------------------------------------------------
1 | # Node Selectors
2 |
3 | ## Objectives
4 |
5 | 1. Apply the label "hw=max" on one of the nodes in your cluster
6 | 2. Create and run a Pod called `some-pod` with the image `redis` and configure it to use the selector `hw=max`
7 | 3. Explain why node selectors might be limited
8 |
9 |
10 | ## Solution
11 |
12 | Click [here](solution.md) to view the solution
13 |
14 | 1. `kubectl label nodes some-node hw=max`
15 | 2.
16 |
17 | ```
18 | kubectl run some-pod --image=redis --dry-run=client -o yaml > pod.yaml
19 |
20 | vi pod.yaml
21 |
22 | spec:
23 | nodeSelector:
24 | hw: max
25 |
26 | kubectl apply -f pod.yaml
27 | ```
28 |
29 | 3. Assume you would like to run your Pod on all the nodes with with either `hw` set to max or to min, instead of just max. This is not possible with nodeSelectors which are quite simplified and this is where you might want to consider `node affinity`.
--------------------------------------------------------------------------------
/topics/kubernetes/exercises/taints_101/exercise.md:
--------------------------------------------------------------------------------
1 | # Taints 101
2 |
3 | ## Objectives
4 |
5 | 1. Check if one of the nodes in the cluster has taints (doesn't matter which node)
6 | 2. Create a taint on one of the nodes in your cluster with key of "app" and value of "web" and effect of "NoSchedule"
7 | 1. Explain what it does exactly
8 | 2. Verify it was applied
9 | 3. Run a Pod that will be able to run on the node on which you applied the taint
10 |
11 | ## Solution
12 |
13 | Click [here](solution.md) to view the solution.
--------------------------------------------------------------------------------
/topics/kubernetes/exercises/taints_101/solution.md:
--------------------------------------------------------------------------------
1 | # Taints 101
2 |
3 | ## Objectives
4 |
5 | 1. Check if one of the nodes in the cluster has taints (doesn't matter which node)
6 | 2. Create a taint on one of the nodes in your cluster with key of "app" and value of "web" and effect of "NoSchedule"
7 | 1. Explain what it does exactly
8 | 2. Verify it was applied
9 |
10 | ## Solution
11 |
12 | 1. `kubectl describe no minikube | grep -i taints`
13 | 2. `kubectl taint node minikube app=web:NoSchedule`
14 | 1. Any resource with "app=web" key value will not be scheduled on node `minikube`
15 | 2. `kubectl describe no minikube | grep -i taints`
16 | 3.
17 |
18 | ```
19 | kubectl run some-pod --image=redis
20 | kubectl edit po some-pod
21 | ```
22 |
23 | ```
24 | - effect: NoSchedule
25 | key: app
26 | operator: Equal
27 | value: web
28 | ```
29 |
30 | Save and exit. The Pod should be running.
--------------------------------------------------------------------------------
/topics/kubernetes/images/cluster_architecture_exercise.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/topics/kubernetes/images/cluster_architecture_exercise.png
--------------------------------------------------------------------------------
/topics/kubernetes/images/cluster_architecture_solution.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/topics/kubernetes/images/cluster_architecture_solution.png
--------------------------------------------------------------------------------
/topics/kubernetes/images/service_exercise.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/topics/kubernetes/images/service_exercise.png
--------------------------------------------------------------------------------
/topics/kubernetes/images/service_solution.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/topics/kubernetes/images/service_solution.png
--------------------------------------------------------------------------------
/topics/kubernetes/killing_containers.md:
--------------------------------------------------------------------------------
1 | ## "Killing" Containers
2 |
3 | 1. Run Pod with a web service (e.g. httpd)
4 | 2. Verify the web service is running with the `ps` command
5 | 3. Check how many restarts the pod has performed
6 | 4. Kill the web service process
7 | 5. Check how many restarts the pod has performed
8 | 6. Verify again the web service is running
9 |
10 | ## After you complete the exercise
11 |
12 | * Why did the "RESTARTS" count raised?
13 |
--------------------------------------------------------------------------------
/topics/kubernetes/pods_01.md:
--------------------------------------------------------------------------------
1 | ## Pods 01
2 |
3 | #### Objective
4 |
5 | Learn how to create pods
6 |
7 | #### Instructions
8 |
9 | 1. Choose a container image (e.g. redis, nginx, mongo, etc.)
10 | 2. Create a pod (in the default namespace) using the image you chose
11 | 3. Verify the pod is running
12 |
--------------------------------------------------------------------------------
/topics/kubernetes/replicaset_01.md:
--------------------------------------------------------------------------------
1 | ## ReplicaSet 101
2 |
3 | #### Objective
4 |
5 | Learn how to create and view ReplicaSets
6 |
7 | #### Instructions
8 |
9 | 1. Create a ReplicaSet with 2 replicas. The app can be anything.
10 | 2. Verify a ReplicaSet was created and there are 2 replicas
11 | 3. Delete one of the Pods the ReplicaSet has created
12 | 4. If you'll list all the Pods now, what will you see?
13 | 5. Remove the ReplicaSet you've created
14 | 6. Verify you've deleted the ReplicaSet
15 |
--------------------------------------------------------------------------------
/topics/kubernetes/replicaset_02.md:
--------------------------------------------------------------------------------
1 | ## ReplicaSet 102
2 |
3 | #### Objective
4 |
5 | Learn how to operate ReplicaSets
6 |
7 | #### Instructions
8 |
9 | 1. Create a ReplicaSet with 2 replicas. The app can be anything.
10 | 2. Verify a ReplicaSet was created and there are 2 replicas
11 | 3. Remove the ReplicaSet but NOT the pods it created
12 | 4. Verify you've deleted the ReplicaSet but the Pods are still running
13 |
--------------------------------------------------------------------------------
/topics/kubernetes/replicaset_03.md:
--------------------------------------------------------------------------------
1 | ## ReplicaSet 103
2 |
3 | #### Objective
4 |
5 | Learn how labels used by ReplicaSets
6 |
7 | #### Instructions
8 |
9 | 1. Create a ReplicaSet with 2 replicas. Make sure the label used for the selector and in the Pods is "type=web"
10 | 2. Verify a ReplicaSet was created and there are 2 replicas
11 | 3. List the Pods running
12 | 4. Remove the label (type=web) from one of the Pods created by the ReplicaSet
13 | 5. List the Pods running. Are there more Pods running after removing the label? Why?
14 | 6. Verify the ReplicaSet indeed created a new Pod
15 |
--------------------------------------------------------------------------------
/topics/kubernetes/services_01.md:
--------------------------------------------------------------------------------
1 | ## Services 01
2 |
3 | #### Objective
4 |
5 | Learn how to create services
6 |
7 | #### Instructions
8 |
9 | 1. Create a pod running ngnix
10 | 2. Create a service for the pod you've just created
11 | 3. Verify the app is reachable
12 |
--------------------------------------------------------------------------------
/topics/kubernetes/solutions/killing_containers.md:
--------------------------------------------------------------------------------
1 | ## "Killing" Containers - Solution
2 |
3 | 1. Run Pod with a web service (e.g. httpd) - `kubectl run web --image registry.redhat.io/rhscl/httpd-24-rhel7`
4 | 2. Verify the web service is running with the `ps` command - `kubectl exec web -- ps`
5 | 3. Check how many restarts the pod has performed - `kubectl get po web`
6 | 4. Kill the web service process -`kubectl exec web -- kill 1`
7 | 5. Check how many restarts the pod has performed - `kubectl get po web`
8 | 6. Verify again the web service is running - `kubectl exec web -- ps`
9 |
10 | ## After you complete the exercise
11 |
12 | * Why did the "RESTARTS" count raised? - `because we killed the process and Kubernetes identified the container isn't running proprely so it performed restart to the Pod`
13 |
--------------------------------------------------------------------------------
/topics/kubernetes/solutions/pods_01_solution.md:
--------------------------------------------------------------------------------
1 | ## Pods 01 - Solution
2 |
3 | ```
4 | kubectl run nginx --image=nginx --restart=Never
5 | kubectl get pods
6 | ```
7 |
--------------------------------------------------------------------------------
/topics/kubernetes/solutions/replicaset_01_solution.md:
--------------------------------------------------------------------------------
1 | ## ReplicaSet 01 - Solution
2 |
3 | 1. Create a ReplicaSet with 2 replicas. The app can be anything.
4 |
5 | ```
6 | cat >> rs.yaml <
43 | ```
44 |
45 | 4. If you'll list all the Pods now, what will you see?
46 |
47 | ```
48 | The same number of Pods. Since we defined 2 replicas, the ReplicaSet will make sure to create another Pod that will replace the one you've deleted.
49 | ```
50 |
51 | 5. Remove the ReplicaSet you've created
52 |
53 | ```
54 | kubectl delete -f rs.yaml
55 | ```
56 |
57 | 6. Verify you've deleted the ReplicaSet
58 |
59 | ```
60 | kubectl get rs
61 | # OR a more specific way: kubectl get -f rs.yaml
62 | ```
63 |
--------------------------------------------------------------------------------
/topics/kubernetes/solutions/replicaset_02_solution.md:
--------------------------------------------------------------------------------
1 | ## ReplicaSet 02 - Solution
2 |
3 | 1. Create a ReplicaSet with 2 replicas. The app can be anything.
4 |
5 | ```
6 | cat >> rs.yaml <> rs.yaml < running_pods.txt
43 | ```
44 |
45 | 4. Remove the label (type=web) from one of the Pods created by the ReplicaSet
46 |
47 | ```
48 | kubectl label pod type-
49 | ```
50 |
51 | 5. List the Pods running. Are there more Pods running after removing the label? Why?
52 |
53 | ```
54 | Yes, there is an additional Pod running because once the label (used as a matching selector) was removed, the Pod became independant meaning, it's not controlled by the ReplicaSet anymore and the ReplicaSet was missing replicas based on its definition so, it created a new Pod.
55 | ```
56 |
57 | 6. Verify the ReplicaSet indeed created a new Pod
58 |
59 | ```
60 | kubectl describe rs web
61 | ```
62 |
--------------------------------------------------------------------------------
/topics/kubernetes/solutions/services_01_solution.md:
--------------------------------------------------------------------------------
1 | ## Services 01 - Solution
2 |
3 | ```
4 | kubectl run nginx --image=nginx --restart=Never --port=80 --labels="app=dev-nginx"
5 |
6 | cat << EOF > nginx-service.yaml
7 | apiVersion: v1
8 | kind: Service
9 | metadata:
10 | name: nginx-service
11 | spec:
12 | selector:
13 | app: dev-nginx
14 | ports:
15 | - protocol: TCP
16 | port: 80
17 | targetPort: 9372
18 | EOF
19 | ```
20 |
--------------------------------------------------------------------------------
/topics/misc/elk_kibana_aws.md:
--------------------------------------------------------------------------------
1 | # Elasticsearch, Kibana and AWS
2 |
3 | Your task is to build an elasticsearch cluster along with Kibana dashboard on one of the following clouds:
4 |
5 | * AWS
6 | * OpenStack
7 | * Azure
8 | * GCP
9 |
10 | You have to describe in details (preferably with some drawings) how you are going to set it up.
11 | Please describe in detail:
12 |
13 | - How you scale it up or down
14 | - How you quickly (less 20 minutes) provision the cluster
15 | - How you apply security policy for access control
16 | - How you transfer the logs from the app to ELK
17 | - How you deal with multi apps running in different regions
18 |
19 | # Solution
20 |
21 | This one out of many possible solutions. This solution is relying heavily on AWS.
22 |
23 | * Create a VPC with subnet so we can place Elasticsearch node(s) in internal environment only.
24 | If required, we will also setup NAT for public access.
25 |
26 | * Create an IAM role for the access to the cluster. Also, create a separate role for admin access.
27 |
28 | * To provision the solution quickly, we will use the elasticsearch service directly from AWS for production deployment.
29 | This way we also cover multiple AZs. As for authentication, we either use Amazon cognito or the organization LDAP server.
30 |
31 | * To transfer data, we will have to install logstash agent on the instances. The agent will be responsible
32 | for pushing the data to elasticsearch.
33 |
34 | * For monitoring we will use:
35 |
36 | * Cloud watch to monitor cluster resource utilization
37 | * Cloud metrics dashboard
38 |
39 | * If access required from multiple regions we will transfer all the data to S3 which will allow us to view the data
40 | from different regions and consolidate it in one dashboard
41 |
--------------------------------------------------------------------------------
/topics/openshift/projects_101.md:
--------------------------------------------------------------------------------
1 | ## OpenShift - Projects 101
2 |
3 | ### Objectives
4 |
5 | In a newly deployed cluster (preferably) perform the following:
6 |
7 | 1. Log in to the OpenShift cluster
8 | 2. List all the projects
9 | 3. Create a new project called 'neverland'
10 | 4. Check the overview status of the current project
11 |
--------------------------------------------------------------------------------
/topics/openshift/solutions/my_first_app.md:
--------------------------------------------------------------------------------
1 | ## OpenShift - My First Application
2 |
3 | ### Objectives
4 |
5 | 1. Create a MySQL application
6 | 2. Describe which OpenShift objects were created
7 |
8 | ### Solution
9 |
10 | 1. `oc new-app mysql`
11 | 2. The following objects were created:
12 | * ImageStream:
13 |
--------------------------------------------------------------------------------
/topics/openshift/solutions/projects_101.md:
--------------------------------------------------------------------------------
1 | ## OpenShift - Projects 101
2 |
3 | ### Objectives
4 |
5 | In a newly deployed cluster (preferably) perform the following:
6 |
7 | 1. Login to the OpenShift cluster
8 | 2. List all the projects
9 | 3. Create a new project called 'neverland'
10 | 4. Check the overview status of the current project
11 |
12 | ### Solution
13 |
14 | ```
15 | oc login -u YOUR_USER -p YOUR_PASSWORD_OR_TOKEN
16 | oc get projects # Empty output in new cluster
17 | oc new-project neverland
18 | oc status
19 | ```
20 |
--------------------------------------------------------------------------------
/topics/os/fork_101.md:
--------------------------------------------------------------------------------
1 | ## Fork 101
2 |
3 | Answer the questions given the following program (without running it):
4 |
5 | ```
6 | #include
7 | #include
8 | int main()
9 | {
10 | fork();
11 | printf("\nyay\n");
12 | return 0;
13 | }
14 | ```
15 |
16 | 1. How many times the word "yay" will be printed?
17 | 2. How many processes will be created?
18 |
--------------------------------------------------------------------------------
/topics/os/fork_102.md:
--------------------------------------------------------------------------------
1 | ## Fork 101
2 |
3 | Answer the questions given the following program (without running it):
4 |
5 | ```
6 | #include
7 | #include
8 |
9 | int main()
10 | {
11 | fork();
12 | fork();
13 | printf("\nyay\n");
14 | return 0;
15 | }
16 | ```
17 |
18 | 1. How many times the word "yay" will be printed?
19 | 2. How many processes will be created?
20 |
--------------------------------------------------------------------------------
/topics/os/solutions/fork_101_solution.md:
--------------------------------------------------------------------------------
1 | ## Fork 101 - Solution
2 |
3 | 1. 2
4 | 2. 2
5 |
--------------------------------------------------------------------------------
/topics/os/solutions/fork_102_solution.md:
--------------------------------------------------------------------------------
1 | ## Fork 102 - Solution
2 |
3 | 1. 4
4 | 2. 4
5 |
--------------------------------------------------------------------------------
/topics/pipeline_deploy_image_to_k8.md:
--------------------------------------------------------------------------------
1 | ## Build & Publish Docker Images to Kubernetes Cluster
2 |
3 | Write a pipeline, on any CI/CD system you prefer, that will build am image out of a given Dockerfile and will publish that image to running Kubernetes cluster.
4 |
--------------------------------------------------------------------------------
/topics/programming/grep_berfore_and_after.md:
--------------------------------------------------------------------------------
1 | Implement the following grep command in Python (numbers can be different): `grep error -A 2 -B 2 some_file`
2 |
--------------------------------------------------------------------------------
/topics/programming/web_scraper.md:
--------------------------------------------------------------------------------
1 | ## Web Scraper
2 |
3 | 1. Pick a web site to scrape
4 | 2. Using any language you would like, write a web scraper to save some data from the site you chose
5 | 3. Save the results to a database (doesn't matter which database, just pick one)
6 |
7 |
8 | * Note: if you don't know which site to pick up have a look [here](http://toscrape.com)
9 |
--------------------------------------------------------------------------------
/topics/python/advanced_data_types.md:
--------------------------------------------------------------------------------
1 | ## (Advanced) Identify the data type
2 |
3 | For each of the following, identify what is the data type of the result variable
4 |
5 | 1. a = {'a', 'b', 'c'}
6 | 2. b = {'1': '2'}
7 | 4. c = ([1, 2, 3])
8 | 4. d = (1, 2, 3)
9 | 4. e = True+True
10 |
--------------------------------------------------------------------------------
/topics/python/compress_string.md:
--------------------------------------------------------------------------------
1 | ## Compress String
2 |
3 | 1. Write a function that gets a string and compresses it
4 | - 'aaaabbccc' -> 'a4b2c3'
5 | - 'abbbc' -> 'a1b3c1'
6 | 2. Write a function that decompresses a given string
7 | - 'a4b2c3' -> 'aaaabbccc'
8 | - 'a1b3c1' -> 'abbbc'
9 |
--------------------------------------------------------------------------------
/topics/python/data_types.md:
--------------------------------------------------------------------------------
1 | ## Data Types
2 |
3 | For each of the following, identify what is the data type of the result variable
4 |
5 | 1. a = [1, 2, 3, 4, 5]
6 | 2. b = "Hello, is it me you looking for?"
7 | 3. e = 100
8 | 4. f = '100'
9 | 5. i = 0.100
10 | 6. i = True
11 |
12 | Bonus question: how to find out in Python what is the data type of certain variable?
13 |
--------------------------------------------------------------------------------
/topics/python/reverse_string.md:
--------------------------------------------------------------------------------
1 | ## Reverse a String
2 |
3 | Write a code that reverses a string
4 |
--------------------------------------------------------------------------------
/topics/python/solutions/advanced_data_types_solution.md:
--------------------------------------------------------------------------------
1 | ## (Advanced) Identify the data type
2 |
3 | For each of the following, identify what is the data type of the result variable
4 |
5 | 1. a = {'a', 'b', 'c'} -> set
6 | 2. b = {'1': '2'} -> dict
7 | 4. c = ([1, 2, 3]) -> list
8 | 4. d = (1, 2, 3) -> tuple
9 | 4. e = True+True -> int
10 |
--------------------------------------------------------------------------------
/topics/python/solutions/data_types_solution.md:
--------------------------------------------------------------------------------
1 | ## Data Types - Solution
2 |
3 | 1. a = [1, 2, 3, 4, 5] -> list
4 | 2. b = "Hello, is it me you looking for?" -> string
5 | 3. e = 100 -> int
6 | 4. f = '100' -> string
7 | 5. i = 0.100 -> float
8 | 6. i = True -> bool
9 |
10 | ### Bonus question - Answer
11 |
12 | `type(...)`
13 |
--------------------------------------------------------------------------------
/topics/python/solutions/reverse_string.md:
--------------------------------------------------------------------------------
1 | ## Reverse a String - Solution
2 |
3 | ```
4 | my_string[::-1]
5 | ```
6 |
7 | A more visual way is:
8 | Careful: this is very slow
9 |
10 | ```
11 | def reverse_string(string):
12 | temp = ""
13 | for char in string:
14 | temp = char + temp
15 | return temp
16 | ```
17 |
--------------------------------------------------------------------------------
/topics/shell/argument_check.md:
--------------------------------------------------------------------------------
1 | ## Argument Check
2 |
3 | ### Objectives
4 |
5 | Note: assume the script is executed with an argument
6 |
7 | 1. Write a script that will check if a given argument is the string "pizza"
8 | 1. If it's the string "pizza" print "with pineapple?"
9 | 2. If it's not the string "pizza" print "I want pizza!"
10 |
11 | ### Solution
12 |
13 | ```
14 | /usr/bin/env bash
15 |
16 | arg_value=${1:-default}
17 |
18 | if [ $arg_value = "pizza" ]; then
19 | echo "with pineapple?"
20 | else
21 | echo "I want pizza!"
22 | fi
23 | ```
24 |
--------------------------------------------------------------------------------
/topics/shell/basic_date.md:
--------------------------------------------------------------------------------
1 | ## Basic Date
2 |
3 | ### Objectives
4 |
5 | 1. Write a script that will put the current date in a file called "the_date.txt"
6 |
--------------------------------------------------------------------------------
/topics/shell/count_chars.md:
--------------------------------------------------------------------------------
1 | ## Count Chars
2 |
3 | ### Objectives
4 |
5 | 1. Read input from the user until you get empty string
6 | 2. For each of the lines you read, count the number of characters and print it
7 |
8 | ### Constraints
9 |
10 | 1. You must use a while loop
11 | 2. Assume at least three lines of input
12 |
--------------------------------------------------------------------------------
/topics/shell/directories_comparison.md:
--------------------------------------------------------------------------------
1 | ## Directories Comparison
2 |
3 | ### Objectives
4 |
5 | 1. You are given two directories as arguments and the output should be any difference between the two directories
6 |
--------------------------------------------------------------------------------
/topics/shell/empty_files.md:
--------------------------------------------------------------------------------
1 | ## Empty Files
2 |
3 | ### Objectives
4 |
5 | 1. Write a script to remove all the empty files in a given directory (including nested directories)
6 |
--------------------------------------------------------------------------------
/topics/shell/factors.md:
--------------------------------------------------------------------------------
1 | ## Shell Scripting - Factors
2 |
3 | ### Objectives
4 |
5 | Write a script that when given a number, will:
6 |
7 | * Check if the number has 2 as factor, if yes it will print "one factor"
8 | * Check if the number has 3 as factor, if yes it will print "one factor...actually two!"
9 | * If none of them (2 and 3) is a factor, print the number itself
10 |
--------------------------------------------------------------------------------
/topics/shell/files_size.md:
--------------------------------------------------------------------------------
1 | ## Files Size
2 |
3 | ### Objectives
4 |
5 | 1. Print the name and size of every file and directory in current path
6 |
7 | Note: use at least one for loop!
8 |
--------------------------------------------------------------------------------
/topics/shell/great_day.md:
--------------------------------------------------------------------------------
1 | ## Great Day
2 |
3 | ### Objectives
4 |
5 | 1. Write a script that will print "Today is a great day!" unless it's given a day name and then it should print "Today is "
6 |
7 | Note: no need to check whether the given argument is actually a valid day
8 |
--------------------------------------------------------------------------------
/topics/shell/hello_world.md:
--------------------------------------------------------------------------------
1 | ## Shell Scripting - Hello World
2 |
3 | ### Objectives
4 |
5 | 1. Define a variable with the string 'Hello World'
6 | 2. Print the value of the variable you've defined and redirect the output to the file "amazing_output.txt"
7 |
--------------------------------------------------------------------------------
/topics/shell/host_status.md:
--------------------------------------------------------------------------------
1 | ## It's Alive!
2 |
3 | ### Objectives
4 |
5 | 1. Write a script to determine whether a given host is down or up
6 |
--------------------------------------------------------------------------------
/topics/shell/num_of_args.md:
--------------------------------------------------------------------------------
1 | ## Number of Arguments
2 |
3 | ### Objectives
4 |
5 | * Write a script that will print "Got it: " in case of one argument
6 | * In case no arguments were provided, it will print "Usage: ./"
7 | * In case of more than one argument, print "hey hey...too many!"
8 |
--------------------------------------------------------------------------------
/topics/shell/print_arguments.md:
--------------------------------------------------------------------------------
1 | ## Shell Scripting - Print Arguments
2 |
3 | ### Objectives
4 |
5 | You should include everything mentioned here in one shell script
6 |
7 | 1. Print the first argument passed to the script
8 | 2. Print the number of arguments passed to the script
9 | 3.
10 |
--------------------------------------------------------------------------------
/topics/shell/solutions/basic_date.md:
--------------------------------------------------------------------------------
1 | ## Basic Date
2 |
3 | ### Objectives
4 |
5 | 1. Write a script that will put the current date in a file called "the_date.txt"
6 |
7 | ### Solution
8 |
9 | ```
10 | #!/usr/bin/env bash
11 |
12 | echo $(date) > the_date.txt
13 | ```
14 |
--------------------------------------------------------------------------------
/topics/shell/solutions/count_chars.md:
--------------------------------------------------------------------------------
1 | ## Count Chars
2 |
3 | ### Objectives
4 |
5 | 1. Read input from the user until you get empty string
6 | 2. For each of the lines you read, count the number of characters and print it
7 |
8 | ### Constraints
9 |
10 | 1. You must use a while loop
11 | 2. Assume at least three lines of input
12 |
13 | ### Solution
14 |
15 | ```
16 | #!/usr/bin/env bash
17 |
18 | echo -n "Please insert your input: "
19 |
20 | while read line; do
21 | echo -n "$line" | wc -c
22 | echo -n "Please insert your input: "
23 | done
24 | ```
25 |
--------------------------------------------------------------------------------
/topics/shell/solutions/directories_comparison.md:
--------------------------------------------------------------------------------
1 | ## Directories Comparison
2 |
3 | ### Objectives
4 |
5 | 1. You are given two directories as arguments and the output should be any difference between the two directories
6 |
7 | ### Solution
8 |
9 | Suppose the name of the bash script is ```dirdiff.sh```
10 |
11 | ```
12 | #!/bin/bash
13 |
14 | if test $# -ne 2
15 | then
16 | echo -e "USAGE: ./dirdiff.sh directory1 directory2"
17 | exit 1
18 | fi
19 |
20 | # check for the checksums.
21 | # If both the checksums same, then both directories are same
22 | if test `ls -1 $1 | sort | md5sum | awk -F " " '{print $1}'` == `ls -1 $2 | sort | md5sum | awk -F " " '{print $1}'`
23 | then
24 | echo -e "No difference between the 2 directories"
25 | exit 0
26 | fi
27 |
28 | diff -q $1 $2
29 |
30 | ```
--------------------------------------------------------------------------------
/topics/shell/solutions/empty_files.md:
--------------------------------------------------------------------------------
1 | ## Empty Files
2 |
3 | ### Objectives
4 |
5 | 1. Write a script to remove all the empty files in a given directory (including nested directories)
6 |
7 | ### Solution
8 |
9 | ```
10 | #! /bin/bash
11 | for x in *
12 | do
13 | if [ -s $x ]
14 | then
15 | continue
16 | else
17 | rm -rf $x
18 | fi
19 | done
20 | ```
21 |
--------------------------------------------------------------------------------
/topics/shell/solutions/factors.md:
--------------------------------------------------------------------------------
1 | ## Shell Scripting - Factors
2 |
3 | ### Objectives
4 |
5 | Write a script that when given a number, will:
6 |
7 | * Check if the number has 2 as factor, if yes it will print "one factor"
8 | * Check if the number has 3 as factor, if yes it will print "one factor...actually two!"
9 | * If none of them (2 and 3) is a factor, print the number itself
10 |
11 | ### Solution
12 |
13 | ```
14 | #!/usr/bin/env bash
15 |
16 | (( $1 % 2 )) || res="one factor"
17 | (( $1 % 3 )) || res+="...actually two!"
18 |
19 | echo ${res:-$1}
20 | ```
21 |
--------------------------------------------------------------------------------
/topics/shell/solutions/files_size.md:
--------------------------------------------------------------------------------
1 | ## Files Size
2 |
3 | ### Objectives
4 |
5 | 1. Print the name and size of every file and directory in current path
6 |
7 | Note: use at least one for loop!
8 |
9 | ### Solution
10 |
11 | ```
12 | #!/usr/bin/env bash
13 |
14 | for i in $(ls -S1); do
15 | echo $i: $(du -sh "$i" | cut -f1)
16 | done
17 | ```
18 |
--------------------------------------------------------------------------------
/topics/shell/solutions/great_day.md:
--------------------------------------------------------------------------------
1 | ## Great Day
2 |
3 | ### Objectives
4 |
5 | 1. Write a script that will print "Today is a great day!" unless it's given a day name and then it should print "Today is "
6 |
7 | Note: no need to check whether the given argument is actually a valid day
8 |
9 | ### Solution
10 |
11 | ```
12 | #!/usr/bin/env bash
13 |
14 | echo "Today is ${1:-a great day!}"
15 | ```
16 |
--------------------------------------------------------------------------------
/topics/shell/solutions/hello_world.md:
--------------------------------------------------------------------------------
1 | ## Shell Scripting - Hello World
2 |
3 | ### Objectives
4 |
5 | 1. Define a variable with the string 'Hello World'
6 | 2. Print the value of the variable you've defined and redirect the output to the file "amazing_output.txt"
7 |
8 | ### Solution
9 |
10 | ```
11 | #!/usr/bin/env bash
12 |
13 | HW_STR="Hello World"
14 | echo $HW_STR > amazing_output.txt
15 | ```
16 |
--------------------------------------------------------------------------------
/topics/shell/solutions/host_status.md:
--------------------------------------------------------------------------------
1 | ## It's Alive!
2 |
3 | ### Objectives
4 |
5 | 1. Write a script to determine whether a given host is down or up
6 |
7 | ### Solution
8 |
9 | ```
10 | #!/usr/bin/env bash
11 | SERVERIP=
12 | NOTIFYEMAIL=test@example.com
13 |
14 | ping -c 3 $SERVERIP > /dev/null 2>&1
15 | if [ $? -ne 0 ]
16 | then
17 | # Use mailer here:
18 | mailx -s "Server $SERVERIP is down" -t "$NOTIFYEMAIL" < /dev/null
19 | fi
20 | ```
21 |
--------------------------------------------------------------------------------
/topics/shell/solutions/num_of_args.md:
--------------------------------------------------------------------------------
1 | ## Number of Arguments
2 |
3 | ### Objectives
4 |
5 | * Write a script that will print "Got it: " in case of one argument
6 | * In case no arguments were provided, it will print "Usage: ./"
7 | * In case of more than one argument, print "hey hey...too many!"
8 |
9 | ### Solution
10 |
11 | ```
12 | #!/usr/bin/env bash
13 |
14 | set -eu
15 |
16 | main() {
17 | case $# in
18 | 0) printf "%s" "Usage: ./"; return 1 ;;
19 | 1) printf "%s" "Got it: $1"; return 0 ;;
20 | *) return 1 ;;
21 | esac
22 | }
23 |
24 | main "$@"
25 | ```
26 |
27 |
--------------------------------------------------------------------------------
/topics/shell/solutions/sum.md:
--------------------------------------------------------------------------------
1 | ## Sum
2 |
3 | ### Objectives
4 |
5 | 1. Write a script that gets two numbers and prints their sum
6 | 3. Make sure the input is valid (= you got two numbers from the user)
7 | 2. Test the script by running and passing it two numbers as arguments
8 |
9 | ### Constraints
10 |
11 | 1. Use functions
12 |
13 | ### Solution
14 |
15 | ```
16 | #!/usr/bin/env bash
17 |
18 | re='^[0-9]+$'
19 |
20 | if ! [[ $1 =~ $re && $2 =~ $re ]]; then
21 | echo "Oh no...I need two numbers"
22 | exit 2
23 | fi
24 |
25 | function sum {
26 | echo $(( $1 + $2 ))
27 | }
28 |
29 | sum $1 $2
30 | ```
31 |
--------------------------------------------------------------------------------
/topics/shell/sum.md:
--------------------------------------------------------------------------------
1 | ## Sum
2 |
3 | ### Objectives
4 |
5 | 1. Write a script that gets two numbers and prints their sum
6 | 3. Make sure the input is valid (= you got two numbers from the user)
7 | 2. Test the script by running and passing it two numbers as arguments
8 |
9 | ### Constraints
10 |
11 | 1. Use functions
12 |
--------------------------------------------------------------------------------
/topics/sql/improve_query.md:
--------------------------------------------------------------------------------
1 | ## Comparisons vs. Functions
2 |
3 | 1. Improve the following query
4 |
5 | ```
6 | SELECT count(*)
7 | FROM shawarma_purchases
8 | WHERE
9 | YEAR(purchased_at) == '2017'
10 | ```
11 |
--------------------------------------------------------------------------------
/topics/sql/solutions/improve_query.md:
--------------------------------------------------------------------------------
1 | ## Comparisons vs. Functions - Solution
2 |
3 | ```
4 | SELECT count(*)
5 | FROM shawarma_purchases
6 | WHERE
7 | purchased_at >= '2017-01-01' AND
8 | purchased_at <= '2017-31-12'
9 | ```
10 |
--------------------------------------------------------------------------------
/topics/terraform/exercises/launch_ec2_instance/exercise.md:
--------------------------------------------------------------------------------
1 | # Launch EC2 instance
2 |
3 | ## Requirements
4 |
5 | * AWS account
6 |
7 | ## Objectives
8 |
9 | 1. Write Terraform configuration for launching an EC2 instance
10 | 2. Run the commands to apply the configuration and create the EC2 instance
11 | 3. What happens if you run again `terraform apply`?
12 | 4. Destroy the instance you've created with Terraform
--------------------------------------------------------------------------------
/topics/terraform/exercises/launch_ec2_instance/solution.md:
--------------------------------------------------------------------------------
1 | # Launch EC2 instance
2 |
3 | ## Requirements
4 |
5 | * AWS account
6 |
7 | ## Objectives
8 |
9 | 1. Write Terraform configuration for launching an EC2 instance
10 | 2. Run the commands to apply the configuration and create the EC2 instance
11 | 3. What happens if you run again `terraform apply`?
12 | 4. Destroy the instance you've created with Terraform
13 |
14 | ## Solution
15 |
16 | ```
17 | mkdir exercise
18 |
19 | cat << EOT >> main.tf
20 | terraform {
21 | required_providers {
22 | aws = {
23 | source = "hashicorp/aws"
24 | version = "~> 4.16"
25 | }
26 | }
27 |
28 | required_version = ">= 1.2.0"
29 | }
30 |
31 | provider "aws" {
32 | region = "us-west-2"
33 | }
34 |
35 | resource "aws_instance" "app_server" {
36 | ami = "ami-830c94e3"
37 | instance_type = "t2.micro"
38 |
39 | tags = {
40 | Name = "ExampleAppServerInstance"
41 | }
42 | }
43 | EOT
44 |
45 | terraform init
46 | terraform validate
47 | terraform plan
48 |
49 | # You should see this line at the end: Plan: 1 to add, 0 to change, 0 to destroy
50 |
51 | terraform apply -auto-approve
52 |
53 | # You should see the following output:
54 | # aws_instance.app_server: Creation complete after 49s [id=i-004651a9d4427d236
55 |
56 | # Running 'terraform apply' again won't change anything as
57 | # Terraform will compare actual infrastructure to your
58 | # configuration and won't find any difference. You should see the following line:
59 | # Apply complete! Resources: 0 added, 0 changed, 0 destroyed.
60 |
61 | # Remove instance
62 | terraform destroy -auto-approve
63 |
64 | # Destroy complete! Resources: 1 destroyed.
65 | ```
--------------------------------------------------------------------------------
/topics/terraform/exercises/launch_ec2_web_instance/exercise.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/bharatraut/devops_exercise/5753b49432bfa73b893cdaa7c84ef247b1a9ea40/topics/terraform/exercises/launch_ec2_web_instance/exercise.md
--------------------------------------------------------------------------------
/topics/terraform/exercises/s3_bucket_rename/exercise.md:
--------------------------------------------------------------------------------
1 | # Rename S3 Bucket
2 |
3 | ## Requirements
4 |
5 | * An existing S3 bucket tracked by Terraform.
6 | If you don't have it, you can use the following block and run `terraform apply`:
7 |
8 | ```terraform
9 | resource "aws_s3_bucket" "some_bucket" {
10 | bucket = "some-old-bucket"
11 | }
12 | ```
13 |
14 | ## Objectives
15 |
16 | 1. Rename an existing S3 bucket and make sure it's still tracked by Terraform
17 |
18 | ## Solution
19 |
20 | Click [here to view the solution](solution.md)
--------------------------------------------------------------------------------
/topics/terraform/exercises/s3_bucket_rename/solution.md:
--------------------------------------------------------------------------------
1 | # Rename S3 Bucket
2 |
3 | ## Requirements
4 |
5 | * An existing S3 bucket tracked by Terraform.
6 | If you don't have it, you can use the following block and run `terraform apply`:
7 |
8 | ```terraform
9 | resource "aws_s3_bucket" "some_bucket" {
10 | bucket = "some-old-bucket"
11 | }
12 | ```
13 |
14 | ## Objectives
15 |
16 | 1. Rename an existing S3 bucket and make sure it's still tracked by Terraform
17 |
18 | ## Solution
19 |
20 | ```sh
21 | # A bucket name is immutable in AWS so we'll have to create a new bucket
22 | aws s3 mb s3://some-new-bucket-123
23 |
24 | # Sync old bucket to new bucket
25 | aws s3 sync s3://some-old-bucket s3://some-new-bucket-123
26 |
27 | # Remove the old bucket from Terraform's state
28 | terraform state rm aws_s3_bucket.some_bucket
29 |
30 | # Import new bucket to Terraform's state
31 | terraform import aws_s3_bucket.some_bucket some-new-bucket-123
32 |
33 | : '
34 | aws_s3_bucket.some_bucket: Refreshing state... [id=some-new-bucket-123]
35 |
36 | Import successful!
37 | The resources that were imported are shown above. These resources are now in
38 | your Terraform state and will henceforth be managed by Terraform.
39 | '
40 |
41 | # Modify the Terraform definition to include the new name
42 | # resource "aws_s3_bucket" "some_bucket" {
43 | # bucket = "some-new-bucket-123"
44 | # }
45 |
46 | # Remove old bucket
47 | aws s3 rm s3://some-old-bucket --recursive
48 | aws s3 rb s3://some-old-bucket
49 | ```
--------------------------------------------------------------------------------
/topics/terraform/exercises/terraform_local_provider/exercise.md:
--------------------------------------------------------------------------------
1 | # Local Provider
2 |
3 | ## Objectives
4 |
5 | Learn how to use and run Terraform basic commands
6 |
7 | 1. Create a directory called "my_first_run"
8 | 2. Inside the directory create a file called "main.tf" with the following content
9 |
10 | ```terraform
11 | resource "local_file" "mario_local_file" {
12 | content = "It's a me, Mario!"
13 | filename = "/tmp/who_is_it.txt"
14 | }
15 | ```
16 | 3. Run `terraform init`. What did it do?
17 | 4. Run `terraform plan`. What Terraform is going to perform?
18 | 5. Finally, run `terraform apply` and verify the file was created
19 |
20 | ## Solution
21 |
22 | Click [here to view the solution](solution.md)
23 |
--------------------------------------------------------------------------------
/topics/terraform/exercises/terraform_local_provider/solution.md:
--------------------------------------------------------------------------------
1 | # Local Provider
2 |
3 | ## Objectives
4 |
5 | Learn how to use and run Terraform basic commands
6 |
7 | 1. Create a directory called "my_first_run"
8 | 2. Inside the directory create a file called "main.tf" with the following content
9 |
10 | ```terraform
11 | resource "local_file" "mario_local_file" {
12 | content = "It's a me, Mario!"
13 | filename = "/tmp/who_is_it.txt"
14 | }
15 | ```
16 | 3. Run `terraform init`. What did it do?
17 | 4. Run `terraform plan`. What Terraform is going to perform?
18 | 5. Finally, run 'terraform apply' and verify the file was created
19 |
20 | ## Solution
21 |
22 | ```sh
23 | # Create a directory
24 | mkdir my_first_run && cd my_first_run
25 |
26 | # Create the file 'main.tf'
27 | cat << EOT >> main.tf
28 | resource "local_file" "mario_local_file" {
29 | content = "It's a me, Mario!"
30 | filename = "/tmp/who_is_it.txt"
31 | }
32 | EOT
33 |
34 | # Run 'terraform init'
35 | terraform init
36 | # Running 'ls -la' you'll it created '.terraform' and '.terraform.lock.hcl'
37 | # In addition, it initialized (downloaded and installed) the relevant provider plugins. In this case, the "hashicorp/local"
38 |
39 | # Run 'terraform plan'
40 | terraform plan
41 | # It shows what Terraform is going to perform once you'll run 'terraform apply'
42 |
43 | << terraform_plan_output
44 | Terraform will perform the following actions:
45 |
46 | # local_file.mario_local_file will be created
47 | + resource "local_file" "mario_local_file" {
48 | + content = "It's a me, Mario!"
49 | + directory_permission = "0777"
50 | + file_permission = "0777"
51 | + filename = "/tmp/who_is_it.txt"
52 | + id = (known after apply)
53 | }
54 |
55 | Plan: 1 to add, 0 to change, 0 to destroy.
56 | terraform_plan_output
57 |
58 | # Apply main.tf (it's better to run without -auto-approve if you are new to Terraform)
59 | terraform apply -auto-approve
60 |
61 | ls /tmp/who_is_it.txt
62 | # /tmp/who_is_it.txt
63 | ```
--------------------------------------------------------------------------------
/topics/zuul/README.md:
--------------------------------------------------------------------------------
1 | # Zuul
2 |
3 | ## Questions
4 |
5 | ### Basics
6 |
7 |
8 | Describe shortly what is Zuul
9 |
10 | From [Zuul's docs](https://zuul-ci.org/docs/zuul/about.html): "Zuul is a Project Gating System. That’s like a CI or CD system, but the focus is on testing the future state of code repositories...
11 |
12 | Zuul itself is a service which listens to events from various code review systems, executes jobs based on those events, and reports the results back to the code review system."
13 |
14 |
15 |
16 | What is Nodepool and how is it related to Zuul?
17 |
18 | "Nodepool is a system for managing test node resources. It supports launching single-use test nodes from cloud providers as well as managing access to pre-defined pre-existing nodes."
19 |
20 | "Zuul uses a separate component called Nodepool to provide the resources to run jobs. Nodepool works with several cloud providers as well as statically defined nodes (again, simultaneously)."
21 |
22 |
23 |
24 | What is a Pipeline in Zuul?
25 |
26 | A pipeline in Zuul is a workflow. This workflow can be executed based on different events - when a change is submitted to a project, when it's merged, etc.
27 | The pipeline itself can be applied on one or more different projects (= repositories in hosted or private source control)
28 |
29 |
30 |
31 | What is a project in Zuul?
32 |
33 |
--------------------------------------------------------------------------------