├── .gitignore
├── CONTRIBUTING.md
├── CREDITS.md
├── LICENSE
├── README.md
├── coding-challenges
└── README.md
├── devops-challenges
├── README.md
└── code
│ ├── q15
│ ├── q15-handler.js
│ └── q15-serverless.yml
│ ├── q16
│ ├── Dockerfile
│ ├── README.md
│ ├── app.py
│ ├── main.yml
│ ├── requirements.txt
│ └── tests
│ │ └── test_app.py
│ ├── q17
│ ├── README.md
│ ├── lambda
│ │ ├── handler.py
│ │ └── requirements.txt
│ └── terraform
│ │ └── main.tf
│ ├── q2
│ ├── backup.tar.gz
│ └── runscript.sh
│ ├── q23
│ ├── .gitignore
│ ├── README.md
│ ├── policies
│ │ ├── s3.rego
│ │ └── s3_test.rego
│ └── terraform
│ │ ├── .terraform.lock.hcl
│ │ ├── main.tf
│ │ ├── outputs.tf
│ │ ├── tfplan
│ │ ├── tfplan.json
│ │ └── variables.tf
│ ├── q24
│ ├── README.md
│ ├── main.tf
│ └── packer.json
│ ├── q25
│ ├── README.md
│ ├── fetch_sec.sh
│ ├── main.tf
│ └── policy.hcl
│ ├── q26
│ ├── README.md
│ ├── cf.yaml
│ └── infra.tf
│ ├── q27
│ ├── README.md
│ └── chaos.json
│ ├── q28
│ ├── README.md
│ ├── grafana.json
│ └── prometheus.yaml
│ ├── q29
│ ├── README.md
│ ├── apps
│ │ ├── app1
│ │ │ └── Dockerfile
│ │ └── app2
│ │ │ └── Dockerfile
│ ├── terraform
│ │ └── main.tf
│ └── traefik
│ │ └── traefik.yaml
│ ├── q30
│ ├── .github
│ │ └── workflows
│ │ │ └── ansible.yml
│ ├── README.md
│ ├── ansible.cfg
│ ├── inventory.ini
│ └── playbooks
│ │ ├── backup_config.yml
│ │ ├── set_acl.yml
│ │ ├── set_ospf.yml
│ │ └── set_vlan.yml
│ ├── q31
│ ├── README.md
│ ├── dep-vol.yml
│ ├── deployment.yml
│ ├── sa.yml
│ ├── secret.yml
│ └── service.yml
│ ├── q32
│ ├── README.md
│ ├── docker-compose.yml
│ └── v2
│ │ └── docker-compose.yml
│ ├── q33
│ ├── README.md
│ ├── ansible.cfg
│ ├── inventory.ini
│ ├── main.yml
│ └── roles
│ │ ├── app_server
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── templates
│ │ │ └── app_config.j2
│ │ ├── db_server
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── templates
│ │ │ └── db_config.j2
│ │ └── load_balancer
│ │ ├── tasks
│ │ └── main.yml
│ │ └── templates
│ │ └── haproxy.cfg.j2
│ ├── q34
│ ├── README.md
│ └── charts
│ │ └── my-python-app
│ │ ├── Chart.yaml
│ │ ├── templates
│ │ ├── deployment.yaml
│ │ └── service.yaml
│ │ └── values.yaml
│ ├── q35
│ ├── README.md
│ ├── charts
│ │ ├── backend
│ │ │ ├── Chart.yml
│ │ │ ├── job.yml
│ │ │ ├── templates
│ │ │ │ ├── deployment.yml
│ │ │ │ └── service.yml
│ │ │ └── values.yml
│ │ └── frontend
│ │ │ ├── Chart.yml
│ │ │ ├── templates
│ │ │ ├── deployment.yml
│ │ │ └── service.yml
│ │ │ └── values.yml
│ └── helmfile
│ │ ├── helmfile.yaml
│ │ └── releases
│ │ ├── backend.yml
│ │ └── frontend.yml
│ ├── q36
│ ├── README.md
│ └── automate.sh
│ ├── q37
│ ├── README.md
│ ├── Tiltfile
│ ├── backend.yml
│ ├── backend
│ │ ├── Dockerfile
│ │ └── app.py
│ ├── database
│ │ └── Dockerfile
│ ├── docker-compose.yml
│ ├── frontend.yml
│ └── frontend
│ │ ├── Dockerfile
│ │ └── src
│ │ ├── README.md
│ │ ├── index.js
│ │ └── package.json
│ ├── q38
│ ├── README.md
│ └── roles
│ │ ├── jumphost
│ │ ├── tasks
│ │ │ └── main.yml
│ │ └── templates
│ │ │ └── sshd_config.j2
│ │ ├── playbook.yml
│ │ ├── production_inventory.ini
│ │ └── production_servers
│ │ └── tasks
│ │ └── main.yml
│ ├── q39
│ ├── README.md
│ ├── backup.sh
│ ├── config.env
│ └── restore.sh
│ ├── q40
│ ├── README.md
│ ├── manifests
│ │ └── site.pp
│ ├── modules
│ │ ├── apache
│ │ │ └── manifests
│ │ │ │ └── init.pp
│ │ └── users
│ │ │ └── manifests
│ │ │ └── init.pp
│ └── spec
│ │ ├── classes
│ │ └── apache_spec.rb
│ │ └── spec_helper.rb
│ ├── q41
│ ├── README.md
│ ├── ansible.cfg
│ ├── inventory.ini
│ ├── playbook.yml
│ └── roles
│ │ └── webserver
│ │ ├── molecule
│ │ └── default
│ │ │ ├── converge.yml
│ │ │ ├── molecule.yml
│ │ │ └── verify.yml
│ │ └── tasks
│ │ └── main.yml
│ ├── q42
│ ├── .github
│ │ └── workflows
│ │ │ └── ci.yml
│ ├── Makefile
│ ├── README.md
│ ├── terraform
│ │ ├── main.tf
│ │ ├── outputs.tf
│ │ └── variables.tf
│ └── tests
│ │ ├── go.mod
│ │ └── main_test.go
│ └── qinit.sh
├── dsa-challenges
├── README.md
├── Sorting Arrays I
│ ├── Challenges.md
│ └── Lesson.md
└── Sorting Arrays II
│ └── Lesson.md
├── images
└── Tech-Vault.png
├── misc
├── README.md
├── go
│ ├── count_questions.go
│ └── random.go
└── py
│ ├── question_count.py
│ ├── randomiser.py
│ └── requirements.txt
└── quiz
└── networking.md
/.gitignore:
--------------------------------------------------------------------------------
1 | ## Python gitignore template
2 |
3 | # Byte-compiled / optimized / DLL files
4 | __pycache__/
5 | *.py[cod]
6 | *$py.class
7 |
8 | # C extensions
9 | *.so
10 |
11 | # Distribution / packaging
12 | .Python
13 | build/
14 | develop-eggs/
15 | dist/
16 | downloads/
17 | eggs/
18 | .eggs/
19 | lib/
20 | lib64/
21 | parts/
22 | sdist/
23 | var/
24 | wheels/
25 | share/python-wheels/
26 | *.egg-info/
27 | .installed.cfg
28 | *.egg
29 | MANIFEST
30 |
31 | # PyInstaller
32 | # Usually these files are written by a python script from a template
33 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
34 | *.manifest
35 | *.spec
36 |
37 | # Installer logs
38 | pip-log.txt
39 | pip-delete-this-directory.txt
40 |
41 | # Unit test / coverage reports
42 | htmlcov/
43 | .tox/
44 | .nox/
45 | .coverage
46 | .coverage.*
47 | .cache
48 | nosetests.xml
49 | coverage.xml
50 | *.cover
51 | *.py,cover
52 | .hypothesis/
53 | .pytest_cache/
54 | cover/
55 |
56 | # Translations
57 | *.mo
58 | *.pot
59 |
60 | # Django stuff:
61 | *.log
62 | local_settings.py
63 | db.sqlite3
64 | db.sqlite3-journal
65 |
66 | # Flask stuff:
67 | instance/
68 | .webassets-cache
69 |
70 | # Scrapy stuff:
71 | .scrapy
72 |
73 | # Sphinx documentation
74 | docs/_build/
75 |
76 | # PyBuilder
77 | .pybuilder/
78 | target/
79 |
80 | # Jupyter Notebook
81 | .ipynb_checkpoints
82 |
83 | # IPython
84 | profile_default/
85 | ipython_config.py
86 |
87 | # pyenv
88 | # For a library or package, you might want to ignore these files since the code is
89 | # intended to run in multiple environments; otherwise, check them in:
90 | # .python-version
91 |
92 | # pipenv
93 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
94 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
95 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
96 | # install all needed dependencies.
97 | #Pipfile.lock
98 |
99 | # poetry
100 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
101 | # This is especially recommended for binary packages to ensure reproducibility, and is more
102 | # commonly ignored for libraries.
103 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
104 | #poetry.lock
105 |
106 | # pdm
107 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
108 | #pdm.lock
109 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
110 | # in version control.
111 | # https://pdm.fming.dev/#use-with-ide
112 | .pdm.toml
113 |
114 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
115 | __pypackages__/
116 |
117 | # Celery stuff
118 | celerybeat-schedule
119 | celerybeat.pid
120 |
121 | # SageMath parsed files
122 | *.sage.py
123 |
124 | # Environments
125 | .env
126 | .venv
127 | env/
128 | venv/
129 | ENV/
130 | env.bak/
131 | venv.bak/
132 |
133 | # Spyder project settings
134 | .spyderproject
135 | .spyproject
136 |
137 | # Rope project settings
138 | .ropeproject
139 |
140 | # mkdocs documentation
141 | /site
142 |
143 | # mypy
144 | .mypy_cache/
145 | .dmypy.json
146 | dmypy.json
147 |
148 | # Pyre type checker
149 | .pyre/
150 |
151 | # pytype static type analyzer
152 | .pytype/
153 |
154 | # Cython debug symbols
155 | cython_debug/
156 |
157 | # PyCharm
158 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
159 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
160 | # and can be added to the global gitignore or merged into this file. For a more nuclear
161 | # option (not recommended) you can uncomment the following to ignore the entire idea folder.
162 | #.idea/
163 |
164 | .DS_Store
165 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # How to Contribute
2 |
3 | 1. Fork the PR
4 | 2. Make changes in a branch
5 | 3. Raise a pull request to this repository
6 | 4. Wait for the pull request to be reviewed by maintainers
7 | 5. Your changes are in!
8 |
9 |
10 | ## How to add a collapsible for answers on markdown
11 |
12 | Copy the below code:
13 |
14 | ```
15 |
16 |
17 | Add question here
18 |
19 | Answer goes here
20 |
21 |
22 |
23 | ```
24 |
--------------------------------------------------------------------------------
/CREDITS.md:
--------------------------------------------------------------------------------
1 | The following acknowledges the Maintainers for this repository, those who have Contributed to this repository (via bug reports, code, design, ideas, project management, translation, testing, etc.), and any other References utilized.
2 |
3 | ## Maintainers
4 |
5 | The following individuals are responsible for curating the list of issues, responding to pull requests, and ensuring regular releases happen.
6 |
7 | -
8 |
9 | ## Contributors
10 |
11 | Thank you to all the people who have already contributed to this repository via bug reports, code, design, ideas, project management, translation, testing, etc.
12 |
13 | ## References
14 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright [yyyy] [name of copyright owner]
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
--------------------------------------------------------------------------------
/coding-challenges/README.md:
--------------------------------------------------------------------------------
1 | # Hands-on challenges
2 |
3 | 1. Build a Simple REST API with CRUD Operations and Sorting Algorithm (in Golang but you may also do a similar project in Python or Java)
4 |
5 | ```bash
6 |
7 | **Context**
8 |
9 | Welcome to AlphaTech, where we build simplified software solutions. You're a part of the backend team and we're eager to see how you adapt to our stack. Our primary language is Golang and we're currently working on a series of microservices.
10 |
11 | **The Project**
12 |
13 | We're in the process of developing a service that requires fast read and write operations. As a quick POC (Proof of Concept), you are tasked to build a REST API in Golang that performs CRUD (Create, Read, Update, Delete) operations on a list of products. Additionally, implement a sorting algorithm that sorts these products based on price.
14 |
15 | **Requirements**
16 |
17 | REST API: Your API should have endpoints to:
18 |
19 | - Create a product
20 | - Read a product
21 | - Update a product
22 | - Delete a product
23 | - List all products
24 |
25 | Data Structure: Use a simple struct for a product, which includes:
26 |
27 | - ID
28 | - Name
29 | - Price
30 |
31 | Sorting Algorithm: Implement a sorting function that sorts the products based on price. You can use any sorting algorithm.
32 |
33 | Test Cases: Write test cases to validate your sorting algorithm.
34 |
35 | Bonus: Implement pagination on the list all products endpoint.
36 |
37 | Documentation: Provide a README that explains how to run your code and use your API.
38 |
39 | **Constraints**
40 |
41 | Stick to Golang's standard library. You can use a package like Gorilla Mux for routing.
42 | Keep your application as stateless as possible.
43 | Please make sure to comment your code.
44 | Success Criteria
45 | All endpoints should work without any errors.
46 | Sorting algorithm should correctly sort the products in ascending order based on price.
47 | Test cases should cover basic scenarios and edge cases for the sorting algorithm.
48 |
49 | ```
50 |
51 | 2. Build a REST API with CRUD operations for 2 models (can use a Database of your choice. e.g. MongoDB) and JWT Authentication.
52 |
53 | ```bash
54 | **Context**
55 |
56 | AlphaTech is developing a new microservice for a blogging platform. They want to implement a secure authentication mechanism using JWT to ensure only authenticated users can create, edit, and delete blog posts. User password must be hashed to provide security.
57 |
58 | **Requirements**
59 |
60 | JWT Authentication:
61 |
62 | - Implement endpoints for user sign up, login, logout and delete.
63 | - Upon successful authentication (login), issue a JWT containing user information and necessary claims (e.g. user ID, token, iat, exp)
64 |
65 | REST API: Your API should have endpoints to:
66 |
67 | - Create a user
68 | - Read a user (protected route)
69 | - Update a user (protected route)
70 | - Delete a user (protected route)
71 |
72 | Protected routes:
73 |
74 | - Create a blog
75 | - Read a blog
76 | - Update a blog
77 | - Delete a blog
78 | - List all blogs of user
79 |
80 | Protected routes:
81 |
82 | Data Structures: use simple struct for a user and blog, which includes:
83 |
84 | User:
85 | - ID
86 | - Username
87 | - Password
88 |
89 | Blog:
90 | - ID
91 | - Title
92 | - Description
93 | - Author ID (user's ID)
94 |
95 | Security:
96 | - Use a library like BCrypt to hash passwords securely before storing in the database.
97 | - Use JWT securely, including proper signing and token validation.
98 |
99 | Searching Algorithm: Implement a searching function which returns all blogs which contain the search query, make sure it's case-insensitive.
100 |
101 | Bonus:
102 | - Implement Rate Limiting middleware.
103 | - Implement a Regular Expression pattern for password validation.
104 |
105 | Documentation: Provide a README that explains how to run your code and use your API and include instructions on how to interact with the authentication endpoints.
106 |
107 | You can use a backend framework of your choice (e.g. Quarkus, Spring Boot, Flask, ExpressJS). Keep your application as stateless as possible. Please make sure to comment your code.
108 | Success Criteria
109 | All endpoints should work without any errors.
110 | Searching algorithm should correctly return the blogs that contain the search query.
111 | Test cases should cover basic scenarios and edge cases for the searching algorithm.
112 |
113 | ```
114 |
115 | 3. Implement a Basic Linux Command Line Interface (CLI) Tool
116 |
117 | ```bash
118 | **Context**
119 |
120 | To broaden the scope of learning and provide hands-on experience with command line tools, we want to introduce a challenge focused on building a basic Linux Command Line Interface (CLI) tool. This will allow beginners to get familiar with common Linux commands and to provide as a starting point to working with the terminal.
121 |
122 | **The Project**
123 |
124 | You are tasked with developing a simple CLI tool that performs basic file management tasks. Users should be able to interact with the tool via the command line to perform actions such as creating files, listing directory contents, renaming files, and deleting files.
125 |
126 | **Requirements**
127 |
128 | 1. **CLI Commands**:
129 | - Implement commands for basic file management tasks, including:
130 | - `create`: Create a new file.
131 | - `list`: List the contents of a file.
132 | - `rename`: Rename a file.
133 | - `delete`: Delete a file.
134 |
135 | Note: When using the `create` command, if the folders leading to the file you want to create don't exist yet, the command should create those folders as well. For instance, if you're creating a file at `lib/admin/index.ts`, but the `lib` and `admin` folders are missing, the `create` command should create them along with the file.
136 |
137 | 2. **Command Syntax**:
138 | - Define a clear and intuitive syntax for each command, along with any required arguments or options.
139 |
140 | 3. **File Management**:
141 | - Ensure that file management operations are performed safely and accurately.
142 |
143 | 4. **Error Handling**:
144 | - Implement proper error handling to provide informative error messages to users.
145 |
146 | 5. **Documentation**:
147 | - Create a `help` command which provides comprehensive details about the different commands in the CLI tool.
148 |
149 | **Bonus**:
150 | - Implement additional commands for more advanced file management tasks (e.g., copying files, moving files).
151 | - Add support for working with directories (e.g., creating directories, navigating between directories).
152 | - Implement interactive mode for the CLI tool to enhance user experience.
153 |
154 | **Constraints**
155 |
156 | - Keep the project simple and beginner-friendly.
157 | - Use a programming language commonly used for building command line tools (e.g., Python).
158 | - Do not use Bash, the challenge would be too easy with Bash.
159 | - Ensure that the CLI tool is easy to install and run on a Linux system.
160 |
161 | **Success Criteria**
162 |
163 | - All commands should function correctly without errors.
164 | - File management operations should be performed accurately and safely.
165 | - The CLI tool should provide clear and helpful feedback to users, including error messages when necessary.
166 | - Documentation should be comprehensive and easy to understand for beginners, and provide documentation on the installation of the tool.
167 | ```
168 |
169 | 4. Build a chat application using Socket.IO (WebSockets)
170 |
171 | ```bash
172 | **Context**
173 |
174 | AlphaTech is planning to build a real-time chat application to facilitate communication among users. The application should support instant messaging and group chats in real-time.
175 |
176 | **The Project**
177 |
178 | You are assigned to develop a real-time chat application using Socket.IO for real-time communication and ReactJS for the frontend interface. Users should be able to create accounts, join chat rooms, send messages, and receive messages instantly without the need for page refresh.
179 |
180 | **Requirements**
181 |
182 | Frontend (ReactJS):
183 |
184 | - Create a user-friendly interface for the chat application.
185 | - Implement components for displaying chat rooms, messages, user lists, and message input.
186 | - Include features for joining chat rooms, sending messages, and receiving messages in real-time.
187 | - Use React Router for navigation between different pages, such as the login page, chat room page, and user profile page.
188 |
189 | Backend (Node.js with ExpressJS and Socket.IO):
190 |
191 | - Set up a Node.js backend with ExpressJS to handle HTTP requests and serve the frontend application.
192 | - Use Socket.IO for real-time communication between clients and the server.
193 | - Implement authentication endpoints for user registration, login, and logout.
194 | - Create endpoints for managing chat rooms, sending messages, and fetching message history.
195 |
196 | Database (MongoDB):
197 |
198 | - Set up a MongoDB database to store user data, chat room data, and message history.
199 |
200 | Security:
201 |
202 | - Implement secure authentication mechanisms using JWT tokens to authenticate users.
203 | - Ensure that only authenticated users can access chat rooms and send messages.
204 | - Protect sensitive routes and endpoints from unauthorized access.
205 |
206 | Bonus Features:
207 |
208 | - Add support for creating private chat rooms and inviting specific users to join.
209 | - Implement message encryption to secure message content during transmission.
210 | - Include features for sending multimedia files such as images and videos in chat messages.
211 | - Implement typing indicators and read receipts to enhance the user experience.
212 |
213 | Documentation:
214 |
215 | - Provide a README that explains how to run both the frontend and backend applications.
216 | - Include instructions on setting up the MongoDB database and configuring the frontend and backend environments.
217 |
218 | **Constraints**
219 |
220 | - Use Socket.IO for real-time communication between clients and the server.
221 | - Ensure that the application is scalable and can handle multiple concurrent users.
222 | - Keep the project well-organized and maintainable by following best practices and conventions.
223 |
224 | **Success Criteria**
225 |
226 | - Users should be able to create accounts, join chat rooms, send messages, and receive messages in real-time.
227 | - The application should provide a smooth and responsive user experience without any latency.
228 | - User data and chat history should be stored securely in the MongoDB database.
229 | - Documentation should be clear and comprehensive, enabling users to set up and run the application easily.
230 | ```
231 |
232 | 5. Build a Web Scraper Tool that can scrape various pages and store in database.
233 |
234 | Note: Use websites that allow web scraping for the sake of respecting digital law:
235 |
236 | [Scrape This Site](https://www.scrapethissite.com/pages/),
237 | [Books to Scrape](https://books.toscrape.com/)
238 |
239 | ```bash
240 | **Context**
241 |
242 | AlphaTech wants to stay updated with the latest information related to their industry and competitors. They are interested in developing a web scraper that can fetch data from various websites and aggregate them into a centralized database for analysis and monitoring.
243 |
244 | **Requirements**
245 |
246 | Website Scraping:
247 |
248 | - Implement a web scraping mechanism capable of traversing websites and collecting data.
249 | - Use HTML parsing libraries or techniques (e.g., BeautifulSoup, Scrapy) to extract structured data from web pages.
250 |
251 | Data Storage:
252 |
253 | - Choose an appropriate database system (e.g., MySQL, PostgreSQL, MongoDB) for storing and querying data efficiently.
254 |
255 | Error Handling:
256 |
257 | - Implement error handling mechanisms to deal with various issues encountered during web scraping, such as network errors, HTTP errors, and content parsing errors.
258 | - Provide logging and reporting functionalities to track errors and debug issues effectively.
259 |
260 | Bonus Features:
261 |
262 | - Add support for custom user-defined rules and filters to scrape specific types of information based on keywords.
263 | - Develop a user interface for browsing and searching data in the database, with features such as filtering, sorting, and pagination.
264 |
265 | Documentation:
266 |
267 | - Provide a README clearly explaining how to run the web scraper and configuring a database.
268 |
269 | **Constraints**
270 |
271 | - Adhere to web scraping best practices and ethical guidelines, respecting website terms of service and copyright laws.
272 | - Ensure scalability and performance optimization to handle large volumes of data.
273 | - Consider privacy and data security implications when storing and processing data, especially if personal or sensitive information is involved.
274 |
275 | **Success Criteria**
276 |
277 | - The web scraper should be able to fetch data from various websites and store them in a database accurately and efficiently.
278 | - The system should handle errors and exceptions gracefully, providing informative error messages and logs for troubleshooting.
279 | - Documentation should be comprehensive, covering installation instructions, usage guidelines, and best practices for configuring and running the web scraper.
280 | ```
281 |
282 | 6. Build an educational coding game for beginners.
283 |
284 | ```bash
285 | **Context**
286 |
287 | AlphaTech seeks to inspire the next generation of coders by making the journey into programming more interactive and fun. We aim to develop an educational game that not only introduces but also solidifies fundamental programming concepts in an engaging manner.
288 |
289 | **The Project**
290 |
291 | Your mission is to create an interactive game that serves as the first stepping stone for beginners into the world of coding. Through a series of thoughtfully designed puzzles and challenges, players will explore and understand the core principles of programming.
292 |
293 | **Requirements**
294 |
295 | - Create levels that teach basic concepts like variables, loops, and functions.
296 | - Design a user interface that is intuitive for beginners, including children.
297 | - Provide instant feedback and hints to guide players through each level.
298 | - Track player progress and adjust the difficulty accordingly.
299 |
300 | Bonus Features:
301 |
302 | - Integrate a code editor where players can write and execute code to solve puzzles.
303 |
304 | Documentation:
305 |
306 | - Offer a comprehensive guide on how to play the game, with examples of educational outcomes.
307 |
308 | **Success Criteria**
309 |
310 | - The game is engaging and educational for players with little to no prior programming experience.
311 | - Users should be able to play the coding game without errors.
312 | - Should illustrate using with real-world examples of how programming concepts apply outside the game.
313 | ```
314 |
--------------------------------------------------------------------------------
/devops-challenges/README.md:
--------------------------------------------------------------------------------
1 | # DevOps Challenges
2 |
3 | ## Task level
4 |
5 | The tasks below have been ranked based on seniority level, these are just recommendations and can be different based on your own experience.
6 |
7 | - Junior Engineer
8 | - Mid Engineer
9 | - Senior Engineer
10 |
11 | - [DevOps Challenges](#devops-challenges)
12 | - [1. K8s deployment (Junior)](#1-k8s-deployment-junior)
13 | - [2. Linux Automation](#2-linux-automation)
14 | - [3. K8s enhancements (multiple choice)](#3-k8s-enhancements-multiple-choice)
15 | - [4. SSL Certificates](#4-ssl-certificates)
16 | - [5. Service roles in K8s](#5-service-roles-in-k8s)
17 | - [6. Three Tier Terraform Architecture](#6-three-tier-terraform-architecture)
18 | - [7. Ansible Basics](#7-ansible-basics)
19 | - [8. CI/CD Pipeline Exercise with GitHub Actions](#8-cicd-pipeline-exercise-with-github-actions)
20 | - [9. Docker Multi-Stage Build Exercise](#9-docker-multi-stage-build-exercise)
21 | - [10. Terraform modules exercise](#10-terraform-modules-exercise)
22 | - [11. Docker compose exercise (Advanced multi-container)](#11-docker-compose-exercise-advanced-multi-container)
23 | - [12. AWS Cloud Security](#12-aws-cloud-security)
24 | - [13. Azure Security Challenge](#13-azure-security-challenge)
25 | - [14. GCP Security Challenge](#14-gcp-security-challenge)
26 | - [15. AWS Serverless Challenge](#15-aws-serverless-challenge)
27 | - [16. DevOps Troubleshooting Challenge: The Broken CI/CD Pipeline](#16-devops-troubleshooting-challenge-the-broken-cicd-pipeline)
28 | - [17. AWS Lambda/Terraform Troubleshooting](#17-aws-lambdaterraform-troubleshooting)
29 | - [18. DevOps Troubleshooting Challenge with Docker Compose](#18-devops-troubleshooting-challenge-with-docker-compose)
30 | - [19. Linux Monitoring Automation](#19-linux-monitoring-automation)
31 | - [20. ArgoCD Deployment Challenge](#20-argocd-deployment-challenge)
32 | - [21. Terraform Kubernetes Cluster Deployment Challenge](#21-terraform-kubernetes-cluster-deployment-challenge)
33 | - [22. Istio with Kubernetes](#22-istio-with-kubernetes)
34 | - [23. OPA with Terraform (Security as Code)](#23-opa-with-terraform-security-as-code)
35 | - [24. Create Immutable AMIs with Packer](#24-create-immutable-amis-with-packer)
36 | - [25. Secrets Management with HashiCorp Vault](#25-secrets-management-with-hashicorp-vault)
37 | - [26. Infrastructure as Code Refactor](#26-infrastructure-as-code-refactor)
38 | - [27. Chaos Engineering](#27-chaos-engineering)
39 | - [28. Monitoring and Alerting](#28-monitoring-and-alerting)
40 | - [29. Traefik with ECS](#29-traefik-with-ecs)
41 | - [30. Network Automation with Ansible](#30-network-automation-with-ansible)
42 | - [31. K8s with Secrets Management Challenge](#31-k8s-with-secrets-management-challenge)
43 | - [32. Docker-Compose Challenge (2)](#32-docker-compose-challenge-2)
44 | - [33. Ansible Advanced (1)](#33-ansible-advanced-1)
45 | - [34. Helm Deployment Challenge](#34-helm-deployment-challenge)
46 | - [35. Advanced Helm Challenge](#35-advanced-helm-challenge)
47 | - [36. Bash Automation Challenge](#36-bash-automation-challenge)
48 | - [37. Development with Tiltfiles](#37-development-with-tiltfiles)
49 | - [38. Secure Access to Production Systems](#38-secure-access-to-production-systems)
50 | - [39. Automated Backup and Restore Process](#39-automated-backup-and-restore-process)
51 | - [40. Configuration Management with Puppet](#40-configuration-management-with-puppet)
52 |
53 | ## 1. K8s deployment (Junior)
54 |
55 | ```bash
56 |
57 | Scenario:
58 |
59 | A company wants to roll out a web service built on Kubernetes. To make this happen, complete a file stub located at /home/ubuntu/tech-vault-q1/special-definition.yml with the steps that do the following:
60 |
61 | - Creates a new namespace named "CyberCo".
62 | - Deploys a new "redis" image (sourced from Dockerhub) using the "buster" tag, under a deployment named "cache-db", within the "CyberCo" namespace.
63 | - Scales the "cache-db" deployment to have 2 replicas.
64 | - Opens port "6379" on the "cache-db" containers.
65 |
66 | **Notes:**
67 |
68 | - The finalized solution will be assessed in a fresh, isolated environment. Make sure all the configurations are stored in the /home/ubuntu/tech-vault directory.
69 | - All tasks must be completed with a single `sudo execute` command, launched from the question directory. (Hint: use alias)
70 | - You have sudo permissions if needed.
71 |
72 | ```
73 |
74 | ## 2. Linux Automation (Junior/Mid)
75 |
76 | ```bash
77 |
78 | Scenario:
79 |
80 | Complete the file stub located at `q2/runscript.sh` with one or more commands to achieve the following tasks:
81 |
82 | - Extract the archive located at `/q2/backup.tar.gz`.
83 | - Set permission "0664" for all the files that were just extracted.
84 | - Set permission "0775" for all the directories that were just extracted.
85 | - Change the owner to "anonymous" and the group to "no-team" for all the extracted files and directories.
86 | - Create a new archive with the adjusted files and directories, naming it `/tmp/fixed-archive.tar.gz`.
87 |
88 | **Notes:**
89 |
90 | - Your solution will be evaluated in a new, clean setup. Make sure all work is performed in the `/q2` directory.
91 | - Execute all tasks with a single `sudo activate` command run from within the question directory. (Hint: use alias)
92 | - You have sudo permissions, if needed.
93 |
94 | ```
95 |
96 | ## 3. K8s enhancements (Junior/Mid)
97 |
98 | ```bash
99 |
100 | Scenario:
101 |
102 | While working on Kubernetes cluster enhancements, you need to set up a recurring task that makes a call to a remote API. Which of the following commands is correct to perform this action?
103 |
104 | Pick ONE option:
105 |
106 | 1. `kubectl run cronjob task --image=toolbox --schedule="*/1 * * * *" -- curl -s https://api.cyber-widget.com/refresh`
107 |
108 | 2. `kubecmd create periodic-task --image=toolbox --timing="/1 * * * *" -- curl -s https://api.cyber-widget.com/refresh`
109 |
110 | 3. `kubecmd run periodic task --image=toolbox --timing="*/1 * * * *" -- curl -s https://api.cyber-widget.com/refresh`
111 |
112 | 4. `kubectl create cronjob task --image=toolbox --schedule="*/1 * * * *" -- curl -s https://api.cyber-widget.com/refresh`
113 |
114 | **Notes:**
115 |
116 | - The correct answer will be evaluated on its ability to perform the task as described.
117 |
118 | ```
119 |
120 | ## 4. SSL Certificates Junior
121 |
122 | ```bash
123 |
124 | Scenario:
125 |
126 | In the directory `/home/ubuntu/tech-vault-q4/taskrunner.sh`, you have an SSL certificate named `security.crt` and a private key named `secrecy.key`. Complete the `taskrunner.sh` file with steps to accomplish the following:
127 |
128 | - **Task 1**: Create a new User entry in Kubeconfig named `operator` using the `security.crt` and `secrecy.key` located in `/home/ubuntu/tech-vault-q4/`.
129 |
130 | - **Task 2**: Create a new Context entry named `operator` in Kubeconfig for the newly created User `operator`.
131 |
132 | **Notes**
133 |
134 | - The completed solution will be evaluated in a fresh environment. Make sure all your changes are in the `taskrunner.sh` file located in `/home/ubuntu/tech-vault-q4/`. Manual changes will not be preserved.
135 |
136 | - Run `sudo execute` from the question directory to test the solution. (Hint: use alias)
137 |
138 | - Sudo privileges are available, if needed.
139 |
140 | ```
141 |
142 | ## 5. Service roles in K8s
143 |
144 | ```bash
145 |
146 | Scenario:
147 |
148 | You are required to complete the definition file located at `/home/ubuntu/tech-vault-q5/cluster-role-def.yml`. The file should have configurations to perform the following actions:
149 |
150 | - **Step 1**: Create a new Service Account named `chief-admin`.
151 |
152 | - **Step 2**: Create a new Cluster Role named `monitor`, which gives permissions on all possible API groups, resources, and verbs.
153 |
154 | - **Step 3**: Create a Cluster Role Binding named `chiefadmin-monitor` that binds the newly created Service Account `chief-admin` with the Cluster Role `monitor`.
155 |
156 | ### Notes
157 |
158 | - The completed solution will be verified in a fresh, untouched environment. Ensure all your changes are confined to the `cluster-role-def.yml` file in `/home/ubuntu/tech-vault-q5`.
159 |
160 | - Run `sudo validate` from the question directory to apply your changes and solve the task.
161 |
162 | - You have sudo access, if necessary.
163 |
164 | ```
165 |
166 | Give me some Terraform questions as well.
167 |
168 | ## 6. Three Tier Terraform Architecture
169 |
170 | ```bash
171 |
172 | ## Terraform Exercise 1: Create an AWS S3 Bucket
173 |
174 | ### Objective
175 |
176 | Your task is to write a Terraform script that does the following:
177 |
178 | - **Step 1**: Create an AWS S3 bucket named `tf-test-bucket-yourname`.
179 |
180 | - **Step 2**: Enable versioning on the S3 bucket.
181 |
182 | - **Step 3**: Create a folder within that S3 bucket and name it `uploads`.
183 |
184 | ### Notes
185 |
186 | - Make sure you save your Terraform script as `s3_bucket.tf` in the directory `/home/ubuntu/2023-terraform-s3-creation`.
187 |
188 | - Run `terraform apply` to ensure your Terraform script executes without errors.
189 |
190 | - You have AWS credentials set up in your environment.
191 |
192 | ---
193 |
194 | ## Terraform Exercise 2: Set up a VPC
195 |
196 | ### Objective
197 |
198 | Write a Terraform script to set up a Virtual Private Cloud (VPC) in AWS with the following:
199 |
200 | - **Step 1**: Create a VPC with CIDR block `10.0.0.0/16`.
201 |
202 | - **Step 2**: Create a subnet in that VPC with CIDR block `10.0.1.0/24`.
203 |
204 | - **Step 3**: Attach an internet gateway to the VPC.
205 |
206 | ### Notes
207 |
208 | - Save your Terraform script in a file named `aws_vpc.tf` in the directory `/home/ubuntu/2023-terraform-vpc-setup`.
209 |
210 | - Make sure you run `terraform init` and `terraform apply` to validate that the script works.
211 |
212 | - AWS credentials are assumed to be configured.
213 |
214 | ---
215 |
216 | ## Terraform Exercise 3: Managing EC2 Instances
217 |
218 | ### Objective
219 |
220 | Your objective is to create a Terraform script that automates the following:
221 |
222 | - **Step 1**: Launch an EC2 instance with type `t2.micro`.
223 |
224 | - **Step 2**: Make sure to use an Amazon Linux 2 AMI.
225 |
226 | - **Step 3**: Tag the instance with "Environment: Dev".
227 |
228 | ### Notes
229 |
230 | - Your Terraform script should be saved as `ec2_instance.tf` in the directory `/home/ubuntu/terraform-tech-vault`.
231 |
232 | - Ensure to run `terraform init` and `terraform apply` to verify your script.
233 |
234 | - AWS credentials should be available in your environment.
235 |
236 |
237 | ```
238 |
239 | ## 7. Ansible Basics
240 |
241 | ```bash
242 |
243 | ## Ansible Exercise 1: Configure a Web Server
244 |
245 | ### Objective
246 |
247 | Your task is to write an Ansible playbook that automates the following tasks on a remote Ubuntu server:
248 |
249 | - **Step 1**: Update the package manager cache.
250 |
251 | - **Step 2**: Install the Apache web server package (`apache2`).
252 |
253 | - **Step 3**: Enable and start the Apache service.
254 |
255 | - **Step 4**: Deploy a simple `index.html` file to the document root `/var/www/html/`. The HTML should display "Hello, Ansible!"
256 |
257 | ### Requirements
258 |
259 | - The playbook should be idempotent, meaning it can be run multiple times without changing the outcome.
260 |
261 | - The target hosts should be defined in your Ansible inventory.
262 |
263 | ### Notes
264 |
265 | - Save your Ansible playbook in a file named `web_server_setup.yml` in the directory `/home/ubuntu/tech-vault-ansible`.
266 |
267 | - Run `ansible-playbook web_server_setup.yml` to make sure your playbook executes without errors.
268 |
269 | - It's assumed that you've already configured SSH keys for authentication to the target server.
270 |
271 | ---
272 | ```
273 |
274 | ```bash
275 |
276 | Question Architecture
277 | +------------------------+
278 | | Control Node |
279 | | (Your Workstation) |
280 | +-----------+------------+
281 | |
282 | | SSH & Ansible Commands
283 | |
284 | +-----------v------------+
285 | | Target Node(s) |
286 | | (Remote Ubuntu Server)|
287 | +------------------------+
288 | | | |
289 | | | |
290 | v v v
291 | Update Package Cache Install Apache
292 | |
293 | v
294 | Enable and Start Apache Service
295 | |
296 | v
297 | Deploy index.html to /var/www/html
298 |
299 |
300 | ```
301 |
302 | ## 8. CI/CD Pipeline Exercise with GitHub Actions
303 |
304 | ```bash
305 |
306 | ## CI/CD Pipeline Exercise with GitHub Actions
307 |
308 | ### Context
309 | You are responsible for the CI/CD pipeline of a Python web application. The application is built with Flask and the source code is stored in a GitHub repository.
310 |
311 | ### Objective
312 | Create a GitHub Actions workflow that accomplishes the following:
313 |
314 | 1. Trigger the workflow on every push to the `main` branch and on pull requests targeting the `main` branch.
315 | 2. Use a Python 3.x environment.
316 | 3. Install the required Python packages defined in a `requirements.txt` file.
317 | 4. Run unit tests located in a `tests` folder.
318 | 5. If the unit tests are successful and the workflow was triggered by a push to `main`, deploy the application to a cloud server of your choice (AWS, Azure, etc.).
319 |
320 | ### Constraints
321 | - For the sake of the exercise, you can use a single YAML configuration file for the GitHub Actions workflow.
322 | - Assume that you have the necessary credentials to deploy to your chosen cloud service.
323 |
324 | ### Deliverables
325 | - The GitHub Actions YAML file.
326 | - Brief documentation explaining your workflow steps and any environment variables or secrets you used.
327 |
328 |
329 | ```
330 |
331 | ## 9. Docker Multi-Stage Build Exercise
332 |
333 | ```bash
334 |
335 | ### Context
336 | You're a DevOps engineer working on a Node.js application. Your company is keen on optimising Docker images for production use.
337 |
338 | ### Objective
339 | Create a Dockerfile that accomplishes the following:
340 |
341 | 1. Utilizes multi-stage builds for development and production.
342 | 2. In the first stage, named `builder`, use a Node.js image to install all dependencies and build the application. Assume that the build command is `npm run build`.
343 | 3. In the second stage, named `production`, use a smaller base image like `node:alpine` to set up the production environment. Copy only the essential files and folders from the `builder` stage.
344 | 4. Ensure that the production stage runs as a non-root user for added security.
345 | 5. Expose port `3000` for the application.
346 | 6. Make sure that the application starts with the command `npm start`.
347 |
348 | ### Constraints
349 | - Your Dockerfile should be optimized for size and security.
350 | - You can assume that a `.dockerignore` file is already set up to exclude unnecessary files.
351 |
352 | ### Bonus
353 | - Include health checks in your Dockerfile.
354 | - Use BuildKit features for added optimization, if you're familiar with them.
355 |
356 | ### Deliverables
357 | - The Dockerfile.
358 | - A brief README explaining the steps taken, any arguments used, and why you chose a particular base image or strategy.
359 |
360 |
361 | ```
362 |
363 | ## 10. Terraform modules exercise
364 |
365 | ```bash
366 |
367 | ## Terraform Module Exercise
368 |
369 | ### Context
370 | You're a DevOps engineer tasked with managing cloud resources at your organisation. The team has decided to use Terraform for infrastructure as code, and you've been assigned to lead the initiative.
371 |
372 | ### Objective
373 | Write a Terraform configuration that accomplishes the following:
374 |
375 | 1. Utilizes Terraform modules to create an AWS VPC.
376 | 2. Inside this VPC, deploy an EC2 instance and an RDS instance.
377 | 3. Use outputs to display essential information about the deployed resources, such as IPs and DNS names.
378 | 4. Implement remote state management using AWS S3 and state locking with DynamoDB.
379 | 5. Make sure to use variables to make your modules reusable.
380 | 6. Use locals to define any constant values or computations that are reused within the configuration.
381 |
382 | ### Constraints
383 | - Your Terraform configuration should adhere to best practices like proper formatting, commenting, and resource naming conventions.
384 | - Ensure your code is idempotent, meaning running it multiple times won't cause changes unless the actual configuration has changed.
385 |
386 | ### Bonus
387 | - Implement a basic level of security by using AWS security groups to restrict traffic.
388 | - Use Terraform workspaces to manage different environments (e.g., staging, production).
389 |
390 | ### Deliverables
391 | - The Terraform configuration files.
392 | - A README explaining:
393 | - How to use the modules.
394 | - Any prerequisites or dependencies.
395 | - How to initialize and apply the configuration.
396 | - Any assumptions or design choices made.
397 |
398 | ```
399 |
400 | ## 11. Docker compose exercise (Advanced multi-container)
401 |
402 | ```bash
403 |
404 | ### Context
405 | You are a DevOps engineer working on a new web application that uses a Python Flask API backend and a Redis cache. You've been tasked with containerizing this application using Docker and defining the multi-container environment using Docker Compose.
406 |
407 | ### Objective
408 | Create a `docker-compose.yml` file that:
409 |
410 | 1. Defines two services: one for the Flask API and another for the Redis cache.
411 | 2. Uses multi-stage builds for the Flask API to minimize the image size.
412 | 3. Utilizes environment variables to pass configurations to your services.
413 | 4. Mounts volumes to persist data and improve development experience.
414 | 5. Uses networks to isolate and secure communication between services.
415 | 6. Ensures the Flask API waits for the Redis service to be fully operational before starting.
416 |
417 | ### Constraints
418 | - Make sure to use version `3` or above for the Docker Compose file format.
419 | - Follow best practices for Dockerfile and Compose file design (e.g., avoid using the `root` user, use `.dockerignore`, etc.)
420 | - Your services should restart automatically if they fail.
421 |
422 | ### Bonus
423 | - Use overrides (`docker-compose.override.yml`) to manage settings that are specific to a development environment.
424 | - Implement a healthcheck for your services.
425 |
426 | ### Deliverables
427 | - The `docker-compose.yml` file.
428 | - Any additional Dockerfiles or script files used.
429 | - A README outlining:
430 | - How to bring up and down the environment.
431 | - How to scale services.
432 | - Any design choices or assumptions you made.
433 |
434 | ```
435 |
436 | ## 12. AWS Cloud Security
437 |
438 | ```bash
439 |
440 | ### Context
441 | You are a DevOps engineer in a company that is migrating its on-premises applications to AWS. You've been tasked with ensuring the security posture of your cloud environment.
442 |
443 | ### Objective
444 | Create an Infrastructure as Code (IaC) template using Terraform that accomplishes the following:
445 |
446 | 1. Sets up a VPC (Virtual Private Cloud) with private and public subnets.
447 | 2. Deploys an EC2 instance into the private subnet.
448 | 3. Sets up a Security Group that allows only necessary ports to be open.
449 | 4. Uses IAM Roles to grant the EC2 instance only the permissions it needs (Least Privilege).
450 | 5. Sets up CloudTrail to log API calls for your account.
451 | 6. Enable encryption for any storage services you use (EBS, S3, etc.)
452 |
453 | ### Constraints
454 | - Use Terraform to create your infrastructure.
455 | - Make use of modules to make your code re-usable.
456 | - Incorporate best practices for AWS security (e.g., enable VPC flow logs, disable root user, etc.)
457 |
458 | ### Bonus
459 | - Implement AWS Config to enforce security policies.
460 | - Set up a CloudWatch Alarm that triggers if any unauthorized actions are performed.
461 | - Use AWS Secrets Manager to store any sensitive information.
462 |
463 | ### Deliverables
464 | - Terraform files (.tf) for your infrastructure.
465 | - A README file that explains:
466 | - How to deploy your infrastructure.
467 | - Security best practices that you implemented.
468 | - Any assumptions or design choices you made.
469 |
470 | ```
471 |
472 |
473 | ## 13. Azure Security Challenge
474 |
475 | ```bash
476 |
477 | ### Context
478 | You are a DevOps engineer at a software company that's planning to use Azure services for hosting a new web application. You have been given the responsibility to ensure the security of the application and its surrounding environment.
479 |
480 | ### Objective
481 | Create an Azure ARM template or use Terraform to accomplish the following tasks:
482 |
483 | 1. Create a Virtual Network with a defined range of IP addresses.
484 | 2. Deploy a Virtual Machine into the Virtual Network, and make sure it is not directly accessible from the Internet.
485 | 3. Set up Network Security Groups (NSGs) to restrict inbound and outbound traffic to the Virtual Machine.
486 | 4. Implement Azure Key Vault to manage application secrets.
487 | 5. Enable Azure Monitor and Azure Security Center to collect performance and security metrics.
488 | 6. Set up Azure Active Directory and implement RBAC (Role-Based Access Control).
489 |
490 | ### Constraints
491 |
492 | - You must use Infrastructure as Code (IaC) for all deployments.
493 | - Wherever possible, apply the principle of Least Privilege.
494 | - Enable Azure Multi-Factor Authentication for critical roles.
495 |
496 | ### Bonus
497 |
498 | - Enable Azure DDoS Protection Standard for the Virtual Network.
499 | - Implement Azure Policy to enforce organizational requirements.
500 | - Create an Azure Logic App to send notifications if high-severity security incidents are detected.
501 |
502 | ### Deliverables
503 | - All the code files used for setting up the environment.
504 | - A README file that explains:
505 | - Steps to deploy the environment.
506 | - Best practices you've implemented.
507 | - Any assumptions or design choices you've made.
508 |
509 |
510 |
511 | ```
512 |
513 | ## 14. GCP Security Challenge
514 |
515 | ```bash
516 |
517 | ### Context
518 | You're a DevOps engineer at a start-up that's rapidly scaling its infrastructure on GCP. Your boss has asked you to ensure that the GCP environment adheres to industry security standards.
519 |
520 | ### Objective
521 | Use Terraform, Deployment Manager, or your preferred Infrastructure as Code (IaC) tool to accomplish the following tasks:
522 |
523 | 1. Create a custom VPC with a private subnet and a public subnet.
524 | 2. Deploy a Compute Engine instance within the private subnet and ensure it is not accessible from the public internet.
525 | 3. Implement firewall rules to control the inbound and outbound traffic within the VPC.
526 | 4. Set up Cloud Key Management Service (KMS) to encrypt sensitive application data.
527 | 5. Enable Stackdriver (Cloud Monitoring and Cloud Logging) for monitoring and logging activities.
528 | 6. Implement Identity and Access Management (IAM) with roles that follow the principle of Least Privilege.
529 |
530 | ### Constraints
531 | - All resources must be deployed using IaC.
532 | - Use Service Accounts to enable secure communication between different GCP services.
533 | - Enable two-factor authentication for accounts with admin access.
534 |
535 | ### Bonus
536 | - Implement a Cloud Function that triggers an alert based on a security condition (e.g., repeated login failures, firewall rule changes).
537 | - Use Cloud Security Command Center to continuously monitor and secure your environment.
538 | - Apply VPC Service Controls to limit the risk of data exfiltration.
539 |
540 | ### Deliverables
541 | - All code files used for setting up the environment.
542 | - A README file that explains:
543 | - The steps needed to deploy the environment.
544 | - Security best practices that were implemented.
545 | - Any assumptions or design choices you've made.
546 |
547 |
548 | ```
549 |
550 | ## 15. AWS Serverless Challenge
551 |
552 | ```bash
553 |
554 | Create a Serverless REST API that allows users to manage a "to-do" list. Each to-do item should have a title and a status (completed or not).
555 |
556 | ## Requirements
557 |
558 | 1. DynamoDB Table: Create a DynamoDB table that will store the to-do items
559 |
560 | 2. AWS Lambda Functions: Implement Lambda functions for the CRUD operations:
561 | - Create a new item
562 | - Read an item by ID
563 | - Update an item by ID
564 | - Delete an item by ID
565 | - List all items
566 |
567 | 3. API Gateway: Create an API Gateway to expose these Lambda functions via HTTP endpoints.
568 |
569 |
570 | 4. IAM Roles: Make sure to assign appropriate IAM roles to your Lambda functions.
571 |
572 | 5. Terraform: Write the Terraform files to deploy these resources.
573 |
574 | Files to Submit
575 |
576 | - main.tf (or others/more) - This should contain the Terraform scripts to set up your infrastructure.
577 | - handler.js (or any language you are comfortable with) - This should contain the code for your Lambda functions.
578 |
579 |
580 | ```
581 |
582 | ## 16. DevOps Troubleshooting Challenge: The Broken CI/CD Pipeline
583 |
584 | [All files located here](./code/q16/README.md)
585 |
586 | ```bash
587 |
588 | ## Objective
589 | You've just been onboarded onto a new project and found out the CI/CD pipeline hasn't been working for days. Your task is to identify and fix whatever is wrong with it. The pipeline is supposed to test the code, build a Docker image, and then push it to a container registry.
590 |
591 | ## Starting State
592 | GitHub Repository: A GitHub repo containing a simple Python Flask app.
593 | GitHub Actions: A .github/workflows/main.yml file that defines the pipeline.
594 | DockerHub: Where the Docker image should be pushed.
595 | Unit Tests: Some failing, some passing.
596 |
597 | ## Requirements
598 |
599 | Identify Issues: List down the problems you discover in the existing pipeline.
600 | Fix Unit Tests: Ensure all unit tests are passing.
601 | GitHub Actions: Make sure the pipeline successfully tests the code and builds the Docker image.
602 | DockerHub: Ensure the pipeline pushes the Docker image to DockerHub.
603 | Documentation: Update the README.md to reflect any steps needed to fix the pipeline or any dependencies that must be installed.
604 |
605 | project-root/
606 | |-- .github/
607 | | |-- workflows/
608 | | | |-- main.yml
609 | |-- tests/
610 | | |-- test_app.py
611 | |-- Dockerfile
612 | |-- app.py
613 | |-- requirements.txt
614 | |-- README.md
615 |
616 |
617 | ```
618 |
619 |
620 | Q16 Answers - ONLY LOOK once done (or for interviewers)
621 |
622 | 1. main.yml: Typo in the Docker build command: dockr instead of docker.
623 |
624 | 2. Dockerfile: Typo in the pip install command: -no-cache-dir instead of --no-cache-dir.
625 |
626 | 3. test_app.py: Incorrect comparison of bytes and string in the test assertion.
627 |
628 | 4. README.md: No mention of setting up GitHub Actions secrets for DockerHub.
629 |
630 |
631 |
632 | ## 17. AWS Lambda/Terraform Troubleshooting
633 |
634 | [All files located here](./code/q17/README.md)
635 |
636 | ```bash
637 |
638 | project-root/
639 | |-- terraform/
640 | | |-- main.tf
641 | | |-- variables.tf
642 | | |-- outputs.tf
643 | |-- lambda/
644 | | |-- handler.py
645 | | |-- requirements.txt
646 | |-- README.md
647 |
648 | Welcome to SuperCoolTech, a leading tech company that specialises in cloud-native solutions. You are part of the DevOps team and are responsible for managing and maintaining the infrastructure. Everything at SuperCoolTech is deployed as code, and you use Terraform extensively for provisioning AWS resources.
649 |
650 | Late one evening, you receive a notification that there are issues with a newly deployed Lambda function. The Lambda function is throwing errors, and there are issues with the S3 bucket where it's supposed to store data. You suspect there might also be some IAM role issues, but you're not sure yet.
651 |
652 | **Your task** is to identify and resolve the issues as quickly as possible. You have a brief window late at night to fix this, as that is the least traffic period, and you need to ensure minimal disruption.
653 |
654 | ## Your Mission
655 |
656 | Fork the existing project repo and clone it locally.
657 | Navigate through the Terraform files, Lambda function code, and other project components to identify what's wrong.
658 | Fix the broken parts and ensure that the Terraform configuration is idempotent and applies without errors.
659 | Test to confirm that the Lambda function is now working as expected and the S3 bucket is correctly configured.
660 | Document the changes you've made and what each change accomplishes.
661 |
662 | ## Constraints
663 |
664 | You cannot change the Terraform provider settings.
665 | You are limited to the current AWS services and can't introduce a new service for this task.
666 | All changes should be implemented via code (Infrastructure as Code).
667 |
668 | ## Success Criteria
669 |
670 | Terraform code applies without any errors.
671 | Lambda function executes successfully and performs its task.
672 | S3 bucket correctly configured and accessible by the Lambda function.
673 |
674 | ```
675 |
676 |
677 | Q17 Answers - ONLY LOOK once done (or for interviewers)
678 |
679 | 1. main.tf: s3_key for aws_lambda_function is wrong, as the zip file doesn't exist.
680 |
681 | 2. handler.py: Missing an import for json.
682 |
683 | 3. README.md: No mention of deploying the Lambda function code to the S3 bucket.
684 |
685 | 4. main.tf: IAM role permissions not sufficient for Lambda to write logs.
686 |
687 |
688 |
689 | ## 18. DevOps Troubleshooting Challenge with Docker Compose
690 |
691 | ```bash
692 |
693 | **Context**
694 |
695 | You have been provided with a Docker Compose YAML file that supposedly sets up a basic stack including an Nginx web server, a PostgreSQL database, and a Redis cache. The development team has reported that they are unable to get the services up and running, and they have passed the YAML file to you for investigation.
696 |
697 | **Your Task**
698 |
699 | Your task is to identify the issues that prevent the stack from running as expected. You are responsible for:
700 |
701 | Identifying the errors in the Docker Compose YAML file.
702 | Fixing the issues to ensure all services are up and running.
703 | Document the changes you've made and explain why you made them.
704 |
705 | Provided Docker Compose File
706 | Here is the Docker Compose file you've been provided:
707 |
708 | #######################################################
709 | version: '3'
710 | services:
711 | web:
712 | image: nginx
713 | ports:
714 | - "8080:80"
715 | depends_on:
716 | - databse
717 | database:
718 | image: postgres
719 | environment:
720 | POSTGRES_DB: mydatabase
721 | POSTGRES_USER: user
722 | POSTGRES_PASSWORD: password
723 | redis:
724 | image: redis
725 | ports:
726 | - "6379:6739"
727 | #######################################################
728 |
729 | **Evaluation Criteria**
730 |
731 | - Successful deployment of services.
732 | - Clear documentation explaining the errors and the steps you took to correct them.
733 |
734 | ```
735 |
736 | ## 19. Linux Monitoring Automation
737 |
738 | ```bash
739 |
740 | **Context**
741 |
742 | You're the new DevOps Engineer at XYZ Corp. The team has been having issues keeping track of system metrics for their Linux-based application servers. Your task is to write a script that will be run periodically to collect some system metrics and output them to a log file.
743 |
744 | **Your Task**
745 |
746 | Your Bash script should do the following:
747 |
748 | - Capture the current CPU usage.
749 | - Capture the current Memory usage.
750 | - Capture the Disk usage for the / partition.
751 | - Output all these metrics with timestamps to a log file located at /var/log/sys_metrics.log.
752 |
753 | **Script Requirements**
754 |
755 | - The script must be executable.
756 | - All output must be appended to the log file; do not overwrite the previous logs.
757 |
758 | Example Log File Output:
759 |
760 | #######################################################
761 |
762 | [Timestamp] CPU: x%, Memory: x%, Disk: x%
763 | [Timestamp] CPU: x%, Memory: x%, Disk: x%
764 |
765 | #######################################################
766 |
767 | **Evaluation Criteria**
768 |
769 | Accurate capture of system metrics.
770 | Successful logging of metrics to the log file.
771 | Proper error handling and script execution permissions.
772 |
773 | **Hints**
774 | You can use commands like top, free, df and utilities like awk to capture the system metrics.
775 |
776 | Please note, the users must have sudo permissions to write to /var/log/sys_metrics.log or another approach would be to execute the script as a superuser.
777 |
778 | ```
779 |
780 | ## 20. ArgoCD Deployment Challenge
781 |
782 | ```bash
783 |
784 | **Scenario**
785 |
786 | You've just joined the DevOps team at Quantum Corp. The team is working on automating the deployment process for a new Kubernetes-based application. They've chosen ArgoCD as their GitOps tool. Your task is to set up an ArgoCD pipeline to deploy an example application from a Git repository to a Kubernetes cluster.
787 |
788 | **Requirements**
789 |
790 | - Fork the sample application repository provided by the company. (For the sake of this example, let's assume it's a simple Nginx application on a K8s deployment.)
791 |
792 | - Install ArgoCD on a Kubernetes cluster. You can use Minikube or Rancher Desktop for local development.
793 |
794 | - Create an ArgoCD Application that deploys the sample application from the forked repository.
795 |
796 | - Make a change in the application and push it to the Git repository. Validate that ArgoCD picks up this change and deploys it automatically.
797 |
798 | - Set up a Rollback strategy for the ArgoCD Application.
799 |
800 | - Document the steps you've taken to complete the above tasks, and any scripts or YAML definitions you've created.
801 |
802 | **Evaluation Criteria**
803 |
804 | - Proper installation and configuration of ArgoCD.
805 | - Successful GitOps-based deployment of the application.
806 | - Ability to automatically update the application based on Git commits.
807 | - Successful rollback of a deployment.
808 | - Proper documentation and code quality.
809 |
810 | ```
811 |
812 | ## 21. Terraform Kubernetes Cluster Deployment Challenge
813 |
814 | ```bash
815 |
816 | **Scenario**
817 |
818 | Welcome to RocketCorp's DevOps team! We're migrating our applications to Kubernetes and are relying on Terraform to manage our infrastructure. Your task is to deploy a Kubernetes cluster along with some essential resources using Terraform.
819 |
820 | **Requirements**
821 |
822 | - Initialize a new Terraform project in your own Git repository.
823 |
824 | - Use a Terraform module to create a Kubernetes cluster on a cloud provider of your choice (AWS, GCP, Azure).
825 |
826 | Once the cluster is up, use Terraform to deploy the following Kubernetes resources:
827 |
828 | - A `Namespace` named `rocket-app`
829 | - A `Deployment` running the Nginx image under the `rocket-app` namespace
830 | - A `Service` of type `LoadBalancer` to expose the Nginx deployment
831 | - Output the LoadBalancer's IP or domain name at the end of the Terraform execution.
832 | - Document all the steps in a README.md, explaining how to run your code to bring up the Kubernetes cluster and how to tear it down.
833 |
834 | **Additional Constraints**
835 |
836 | - Ensure that your Terraform code is idempotent.
837 | - Add comments to your Terraform files explaining your choices.
838 |
839 | **Evaluation Criteria**
840 |
841 | - Successful creation of a Kubernetes cluster.
842 | - Successful deployment of specified Kubernetes resources.
843 | - Idempotent code.
844 | - Code quality and organization.
845 | - Detailed README.md explaining the steps to run the code.
846 |
847 | Please submit the URL of your Git repository that includes all your Terraform files and a README.md for review as a PR in this repository.
848 |
849 | ```
850 |
851 | ## 22. Istio with Kubernetes
852 |
853 | ```bash
854 |
855 | **Scenario:**
856 |
857 | Create a simple Istio project where you'll deploy a couple of microservices on a Kubernetes cluster with Istio installed. The main idea is to use Istio to manage traffic between these services.
858 |
859 | **Prerequisites**
860 |
861 | - Kubernetes cluster up and running (use local minikube or rancher desktop)
862 | - Istio installed on the cluster
863 |
864 | Files Needed
865 | 1. service-a-deployment.yaml
866 |
867 | #######################################################
868 | apiVersion: apps/v1
869 | kind: Deployment
870 | metadata:
871 | name: service-a
872 | spec:
873 | replicas: 1
874 | selector:
875 | matchLabels:
876 | app: service-a
877 | template:
878 | metadata:
879 | labels:
880 | app: service-a
881 | spec:
882 | containers:
883 | - name: service-a
884 | image: nginx
885 | ports:
886 | - containerPort: 80
887 | #######################################################
888 |
889 | 2. service-b-deployment.yaml
890 |
891 | #######################################################
892 | apiVersion: apps/v1
893 | kind: Deployment
894 | metadata:
895 | name: service-b
896 | spec:
897 | replicas: 1
898 | selector:
899 | matchLabels:
900 | app: service-b
901 | template:
902 | metadata:
903 | labels:
904 | app: service-b
905 | spec:
906 | containers:
907 | - name: service-b
908 | image: nginx
909 | ports:
910 | - containerPort: 80
911 | #######################################################
912 |
913 | Your task is to create the routing rules using Istio.
914 |
915 | Test the routing by accessing the service endpoint multiple times. You should see the traffic is distributed according to the weights defined in the virtual service.
916 |
917 | ```
918 |
919 | ## 23. OPA with Terraform (Security as Code)
920 |
921 | Solution here >> [Solution](./code/q23/README.md) >> Only look once you have attemptesd the question.
922 |
923 | ```bash
924 |
925 | ```bash
926 |
927 | **Context**
928 |
929 | You're a DevOps engineer at a growing startup. The company uses Terraform to manage its cloud infrastructure, and the management has decided to enforce specific policies for cloud resources to maintain a certain standard and compliance. Open Policy Agent (OPA) has been chosen to evaluate these policies.
930 |
931 | **Objective**
932 |
933 | - Use Terraform to provision an AWS S3 bucket.
934 |
935 | Write OPA policies that validate:
936 | - The S3 bucket must have versioning enabled.
937 | - The S3 bucket must not be publicly accessible.
938 | - Integrate the OPA policies within the Terraform flow, so that terraform apply will validate against your policies.
939 |
940 | **Constraints**
941 |
942 | - You have to use Terraform for provisioning resources.
943 | - OPA policies should be written in Rego.
944 | - The task should be accomplishable via a simple terraform apply command, assuming OPA is correctly configured.
945 |
946 | **Prerequisites**
947 |
948 | - Terraform installed
949 | - OPA installed
950 | - AWS CLI configured
951 |
952 | **Deliverables**
953 |
954 | - Terraform configuration files (main.tf, variables.tf, outputs.tf).
955 | - OPA policy file(s) in Rego format.
956 |
957 | A README explaining:
958 | - How to set up the environment.
959 | - How to run the Terraform configuration and OPA policies.
960 | - How to verify that the policies are enforced.
961 |
962 | BONUS:
963 | - Write some tests for the OPA policy (in Rego)
964 |
965 | **Notes**
966 |
967 | - Hint: You may use OPA playground to assist and for testing
968 | For the sake of this exercise, you can assume that you have AWS credentials set up, and you're familiar with basic OPA and Terraform commands. If the OPA policies fail, Terraform should not provision the S3 bucket.
969 |
970 | ```
971 |
972 | ## 24. Create Immutable AMIs with Packer
973 |
974 | Solution here >> [Solution](./code/q24/README.md) >> Only look once you have attemptesd the question.
975 |
976 | ```bash
977 |
978 | **Context**
979 |
980 | You're tasked to create a system where EC2 instances are disposable and can be replaced easily without manual intervention. You decide to go with Immutable Infrastructure by creating custom AMIs using Packer.
981 |
982 | **Objective**
983 |
984 | - Create a Packer script to build an Amazon Machine Image (AMI) with a web server (e.g., Nginx) pre-installed.
985 | - Deploy an EC2 instance using the generated AMI via Terraform.
986 | - Validate that the EC2 instance is serving the web server correctly.
987 |
988 | **Constraints**
989 |
990 | - Use Packer to create the AMI.
991 | - Deploy the EC2 instance with Terraform.
992 | - The instance should be in a public subnet and accessible over HTTP.
993 |
994 | **Deliverables**
995 |
996 | - Packer script file (packer.json).
997 | - Terraform files for infrastructure deployment.
998 | - README with instructions on how to build the AMI and deploy the EC2 instance.
999 |
1000 | ```
1001 |
1002 | ## 25. Secrets Management with HashiCorp Vault
1003 |
1004 | Solution here >> [Solution](./code/q25/README.md) >> Only look once you have attemptesd the question.
1005 |
1006 | ```bash
1007 |
1008 | **Context**
1009 |
1010 | You're responsible for the secure storage and management of secrets. You choose HashiCorp Vault as your tool for this task.
1011 |
1012 | **Objective**
1013 |
1014 | - Deploy a Vault server using Docker or directly on an EC2 instance.
1015 | - Initialize and unseal the Vault.
1016 | - Store a secret (e.g., database credentials) in the Vault.
1017 | - Use Terraform to deploy an EC2 instance.
1018 | - Configure the EC2 instance to read the secret from the Vault and write it to a text file.
1019 |
1020 | **Constraints**
1021 |
1022 | - Deploy Vault either as a Docker container or directly on an EC2 instance.
1023 | - Use Terraform for deploying any infrastructure.
1024 | - The secret should only be accessible by the EC2 instance and not exposed to the outside world.
1025 |
1026 | **Deliverables**
1027 |
1028 | - Vault configuration files, if any.
1029 | - Terraform files for Vault and/or EC2 instance deployment.
1030 | - README explaining how to deploy Vault, store a secret, and how the EC2 instance fetches it.
1031 |
1032 | ```
1033 |
1034 | ## 26. Infrastructure as Code Refactor
1035 |
1036 | Solution here >> [Solution](./code/q26/README.md) >> Only look once you have attemptesd the question.
1037 |
1038 | ```bash
1039 |
1040 | **Context**
1041 |
1042 | Your organisation has AWS CloudFormation templates. Your task is to migrate those to Terraform.
1043 |
1044 | **Objective**
1045 |
1046 | Take an existing CloudFormation template.
1047 | Convert it to a Terraform configuration.
1048 | Deploy resources using both and compare.
1049 |
1050 | ```
1051 |
1052 | ## 27. Chaos Engineering
1053 |
1054 | Solution here >> [Solution](./code/q27/README.md) >> Only look once you have attemptesd the question.
1055 |
1056 | ```bash
1057 |
1058 | **Context**
1059 |
1060 | You are the lead engineer responsible for the resilience of a microservice-based system deployed on AWS. To ensure that the system can handle failure gracefully, you have decided to introduce chaos into the system using Chaos Monkey.
1061 |
1062 | **Objective**
1063 |
1064 | - Install and configure Chaos Monkey on an AWS EC2 instance.
1065 | - Use Chaos Monkey to terminate instances in a specified Auto Scaling Group.
1066 | - Monitor the behavior of the system using AWS CloudWatch when instances are terminated.
1067 |
1068 | **Constraints**
1069 |
1070 | - Assume you have full access to the AWS environment.
1071 | - You can use any programming/scripting languages you are comfortable with.
1072 |
1073 | **Deliverables**
1074 |
1075 | - Configuration file used for Chaos Monkey.
1076 | - CloudWatch alert and monitoring configuration.
1077 | - Brief documentation explaining the steps taken, and observations during the chaos tests.
1078 |
1079 | ```
1080 |
1081 |
1082 | ## 28. Monitoring and Alerting
1083 |
1084 | Solution here >> [Solution](./code/q28/README.md) >> Only look once you have attemptesd the question.
1085 |
1086 | ```bash
1087 |
1088 | **Context**
1089 |
1090 | You are responsible for ensuring that a Python Flask web application is operating efficiently. You have decided to set up monitoring and alerting using Prometheus and Grafana, both of which are installed in a Kubernetes cluster.
1091 |
1092 | **Objective**
1093 |
1094 | - Configure Prometheus to scrape metrics from the Flask application, which exposes metrics at /metrics.
1095 | - Set up a Grafana dashboard to visualize these metrics.
1096 | - Create a Grafana alert to notify you when CPU usage goes above 80%.
1097 |
1098 | **Constraints**
1099 |
1100 | - Assume you have full access to the Kubernetes cluster.
1101 | - You can use any programming/scripting languages you are comfortable with.
1102 |
1103 | **Deliverables**
1104 |
1105 | - Prometheus configuration file.
1106 | - Grafana dashboard and alert configuration.
1107 | - Documentation detailing your monitoring and alerting setup.
1108 |
1109 | ```
1110 |
1111 | ## 29. Traefik with ECS
1112 |
1113 | Solution here >> [Solution](./code/q29/README.md) >> Only look once you have attemptesd the question.
1114 |
1115 | ```bash
1116 |
1117 | **Context**
1118 |
1119 | You're working for a startup that wants to deploy a microservices-based application on AWS ECS. They've chosen Traefik as their reverse proxy and load balancer due to its ease of use and configuration options. Your task is to set up the infrastructure and deployment pipeline.
1120 |
1121 | **Objective**
1122 |
1123 | - Create an ECS cluster using Terraform.
1124 | - Deploy at least two different services (could be simple web apps) in the ECS cluster.
1125 | - Implement Traefik as the reverse proxy for routing traffic to these services.
1126 | - Ensure SSL termination at the Traefik level.
1127 | - Make sure the services are accessible only via Traefik and not directly.
1128 |
1129 | **Constraints**
1130 |
1131 | - Use AWS ECS for container orchestration.
1132 | - Use Traefik v2.x for reverse proxy.
1133 | - Use Terraform for infrastructure as code.
1134 | - For SSL, you can use Let's Encrypt or any other method you're comfortable with.
1135 |
1136 | **Deliverables**
1137 |
1138 | - Terraform code files.
1139 | - ECS task and service definitions in JSON or YAML format.
1140 | - A Traefik configuration file.
1141 |
1142 | Brief documentation explaining:
1143 |
1144 | - Your setup and architecture.
1145 | - Any challenges you faced and how you overcame them.
1146 | - Instructions for deploying and tearing down the infrastructure.
1147 |
1148 | ```
1149 |
1150 |
1151 | ## 30. Network Automation with Ansible
1152 |
1153 | Solution here >> [Solution](./code/q30/README.md) >> Only look once you have attemptesd the question.
1154 |
1155 | ```bash
1156 |
1157 | **Context**
1158 |
1159 | You're responsible for automating the network configurations of an organization with multiple routers and switches. Traditionally, changes have been done manually, leading to a lot of human errors and inefficiency.
1160 |
1161 | **Objective**
1162 |
1163 | Automate the network configurations using Ansible.
1164 |
1165 | 1. Setup Ansible to manage network devices.
1166 | 2. Automate the following configurations:
1167 | - VLAN setups
1168 | - OSPF routing protocol configurations
1169 | - Access Control Lists (ACLs)
1170 | 3. Write a playbook that:
1171 | - Backs up the current configurations
1172 | - Applies the new configurations
1173 | - Rolls back in case of failure
1174 | 4. Integrate this with a CI/CD pipeline to automate the deployment.
1175 |
1176 | **Constraints**
1177 |
1178 | - Use Ansible for automation.
1179 | - Target devices are Cisco routers and switches.
1180 | - For CI/CD, use Jenkins or any CI/CD tool you're comfortable with.
1181 |
1182 | **Deliverables**
1183 |
1184 | - Ansible playbooks for each automation task.
1185 | - A CI/CD pipeline configuration file.
1186 | - Brief documentation explaining:
1187 | - How to set up the project
1188 | - Any credentials or environment variables used
1189 |
1190 |
1191 | For this project, you'd typically need:
1192 |
1193 | - Ansible installed on a control node.
1194 | - Network access to the routers/switches.
1195 | - Relevant credentials to access these devices.
1196 | - The Ansible playbooks would interact with the network devices via protocols like SSH, SNMP, etc. You can manage the playbooks' source code in a Git repo and use Jenkins or another CI/CD tool to deploy changes automatically.
1197 |
1198 | ```
1199 |
1200 | Go to the top of the page link >> [Top](#devops-challenges)
1201 |
1202 | ## 31. K8s with Secrets Management Challenge
1203 |
1204 | Solution here >> [Solution](./code/q31/README.md) >> Only look once you have attemptesd the question.
1205 |
1206 | ```
1207 |
1208 | ## Overview
1209 |
1210 | This challenge involves setting up Kubernetes deployments and services, either on a cloud-based Kubernetes service like EKS/AKS/GKE, or locally via Minikube.
1211 |
1212 | ### Objectives
1213 |
1214 | 1. Create a Kubernetes deployment and service.
1215 | - **Bonus**: Implement a Horizontal Pod Autoscaler (HPA).
1216 |
1217 | 2. The deployment must use a secret named `KEY_SECRET`.
1218 |
1219 | 3. Mount this secret in the deployment.
1220 |
1221 | 4. `KEY_SECRET` should be used in the container in two ways:
1222 | - As a volume mount.
1223 | - As an environment variable.
1224 |
1225 | ---
1226 |
1227 | ## Requirements
1228 |
1229 | - You may use simple applications like `nginx`, `httpd`, etc.
1230 | - Feel free to use any programming language or open-source tools.
1231 | - Additional code can be written in Terraform or as scripts.
1232 |
1233 | ### Criteria
1234 |
1235 | 1. Provide all YAML manifest files, pipelines, and scripts used.
1236 | 2. Code should be clean and well-documented.
1237 | 3. Document your steps in a `README.md` file.
1238 | 4. Use either public cloud-based Kubernetes (EKS, AKS, GKE) or local Kubernetes (Minikube).
1239 | 5. Create a dedicated service account for the deployment.
1240 | 6. Always maintain at least 2 running pods.
1241 | 7. Utilize a NodePort type service for the deployment.
1242 | 8. Only 1 pod should be unavailable during rolling updates (Hint: Use the `maxUnavailable` parameter wisely).
1243 |
1244 | ### Bonus Points
1245 |
1246 | - Scan containers before deployment; fail the pipeline if severity is high or critical.
1247 | - Containers should not run as root.
1248 |
1249 | ```
1250 |
1251 | ## 32. Docker-Compose Challenge (2)
1252 |
1253 | Solution here >> [Solution](./code/q32/README.md) >> Only look once you have attemptesd the question.
1254 |
1255 | ```bash
1256 |
1257 | Set up a basic local development environment using `docker-compose`.
1258 |
1259 | This environment will include three services:
1260 |
1261 | 1. **Backend Service**: Utilize a `ruby` image, updated to the latest version. Bind the local `backend` directory to `/project/backend` within the container. This service should also expose port `6000` for UDP traffic, facilitating local DHCP services. Use the latest `centos` OS version available.
1262 |
1263 | 2. **Frontend Service**: Employ an `aspnet` image, also using the latest version. Bind the local `frontend` directory to `/project/frontend` in the container. This service should expose port `8080` from the container to port `80` externally using TCP. Connect this service to an external network named `fend-ingress`.
1264 |
1265 | 3. **Database Service**: Run a `mysql` image, opting for the latest stable version. This service should be part of a local network called `database`.
1266 |
1267 | Ensure that the `backend` service's port mapping uses the extended syntax, while the volume definitions for both `backend` and `frontend` services employ the concise syntax.
1268 |
1269 | The services should initiate in the sequence: `db`, followed by `backend`, and finally `frontend`.
1270 |
1271 |
1272 | ```
1273 |
1274 | ## 33. Ansible Advanced (1)
1275 |
1276 | Solution here >> [Solution](./code/q33/README.md) >> Only look once you have attemptesd the question.
1277 |
1278 | ```bash
1279 |
1280 | **Ansible Role-Based Deployment Exercise**
1281 |
1282 | **Context**
1283 |
1284 | You're a DevOps engineer at a startup that's transitioning from manual configurations to automated infrastructure. The team has selected Ansible as the automation tool. You're responsible for automating the setup and management of a three-tier architecture: load balancer, application server, and database server.
1285 |
1286 | **Objective**
1287 |
1288 | Use Ansible to provision a load balancer (HAProxy), application server (running a Node.js app), and a database server (MySQL).
1289 |
1290 | Create Ansible roles for:
1291 | - Load Balancer
1292 | - Application Server
1293 | - Database Server
1294 | Make sure the configurations are idempotent and can be re-run multiple times without causing errors.
1295 |
1296 | **Constraints**
1297 |
1298 | - You must use Ansible for provisioning and configuration.
1299 | - Divide the project into roles for better manageability.
1300 | - The Ansible playbook should be run with a single command.
1301 |
1302 | **Prerequisites**
1303 |
1304 | - Ansible installed
1305 | - Vagrant or a cloud provider to host your virtual machines
1306 | - SSH access to the virtual machines
1307 |
1308 | **Deliverables**
1309 |
1310 | - Ansible roles for the Load Balancer, Application Server, and Database Server.
1311 | - An Ansible playbook that uses these roles.
1312 |
1313 | A README explaining:
1314 |
1315 | - How to set up the environment.
1316 | - How to run the Ansible playbook and roles.
1317 | - How to verify the setup works as expected.
1318 |
1319 | Notes
1320 |
1321 | - This project aims to test your ability to create reusable Ansible roles and manage a multi-tier architecture.
1322 | - Feel free to create your roles locally, or use Ansible Galaxy to pull pre-configured roles, then modify them to suit your needs.
1323 | - The configurations should be secure, taking into account things like firewall rules, minimal permissions, etc.
1324 |
1325 | ```
1326 |
1327 | ## 34. Helm Deployment Challenge
1328 |
1329 | Solution here >> [Solution](./code/q34/README.md) >> Only look once you have attemptesd the question.
1330 |
1331 | ```bash
1332 |
1333 | # Helm Deployment Exercise
1334 |
1335 | ## Context
1336 |
1337 | You're a DevOps engineer at a software development company that's leveraging Kubernetes for its new microservices-based architecture. You've been given the task of packaging, sharing, and deploying services using Helm, the package manager for Kubernetes.
1338 |
1339 | ## Objective
1340 |
1341 | - **Package an existing Kubernetes service into a Helm chart.**
1342 | - **Deploy a Redis cluster using a community Helm chart.**
1343 | - **Override specific values in the Redis Helm chart according to given requirements.**
1344 | - **Create a Helm chart for a custom Node.js application and deploy it.**
1345 |
1346 | ## Requirements
1347 |
1348 | ### Task 1: Package Kubernetes Service
1349 |
1350 | You have an existing Kubernetes deployment and service (YAML files) for a simple Python Flask application. Your task is to convert these YAML files into a Helm chart.
1351 |
1352 | ### Task 2: Deploy Redis Cluster
1353 |
1354 | - Use a Helm chart from the community charts to deploy a Redis cluster into the same Kubernetes cluster.
1355 | - The Redis cluster should have at least 1 master and 2 slave nodes.
1356 |
1357 | ### Task 3: Customize Redis Cluster
1358 |
1359 | - Override the default password for the Redis cluster.
1360 | - Set resource limits and requests for CPU and Memory on Redis nodes.
1361 |
1362 | ### Task 4: Custom Node.js App
1363 |
1364 | - Create a Helm chart for deploying a custom Node.js application that connects to the Redis cluster.
1365 | - The Node.js application should have environment variables to connect to the Redis cluster.
1366 |
1367 | ## Constraints
1368 |
1369 | - Helm should be used for all deployments.
1370 | - All Helm charts should be stored in a centralized Helm repository.
1371 | - Use Helm 3.x for this project.
1372 | - Work with any Kubernetes distribution of your choice (EKS, AKS, GKE, Minikube, etc.).
1373 |
1374 | ## Deliverables
1375 |
1376 | - Helm charts for all the services.
1377 | - A README.md file that explains:
1378 | - How to install the Helm charts.
1379 | - How to customize the Helm charts.
1380 | - How to verify that the deployments were successful.
1381 |
1382 | ## Bonus Points
1383 |
1384 | - Implement Helm hooks for database schema migration.
1385 | - Use Helm to rollback one of the deployments to its previous version.
1386 |
1387 |
1388 | ```
1389 |
1390 | ## 35. Advanced Helm Challenge
1391 |
1392 | Solution here >> [Solution](./code/q35/README.md) >> Only look once you have attemptesd the question.
1393 |
1394 | ```bash
1395 |
1396 | # Advanced Helm and Helmfile Project Scenario
1397 |
1398 | ## Context
1399 | You are a DevOps engineer at a company that heavily utilizes Kubernetes for its applications. You are tasked to improve the deployment process using advanced Helm features and Helmfile for orchestrating multiple releases.
1400 |
1401 | ## Objective
1402 |
1403 | 1. Create Helm charts for a frontend and a backend application.
1404 | 2. Create a Helmfile that orchestrates these charts with different environments (dev, staging, and prod).
1405 | 3. Use Helm hooks to run database migrations during the release process.
1406 | 4. Enable monitoring on these applications using Helm's built-in support for Prometheus and Grafana.
1407 |
1408 | ## Requirements
1409 |
1410 | - Each application (frontend and backend) must have its own Helm chart.
1411 | - Helmfile should manage the releases.
1412 | - Implement a database migration script that will run as a Helm hook.
1413 | - Use a community Helm chart for setting up Prometheus and Grafana.
1414 |
1415 | ## Constraints
1416 |
1417 | - You should be using Helm v3.x
1418 | - Helmfile should be properly installed and configured.
1419 |
1420 | ## Deliverables
1421 |
1422 | - Helm charts for frontend and backend.
1423 | - Helmfile configuration(s).
1424 | - Any script(s) used for database migrations.
1425 | - A README.md file documenting the steps taken, and how to run and manage the project.
1426 |
1427 | ## Evaluation Criteria
1428 |
1429 | - Following best practices for Helm and Helmfile.
1430 | - Clarity and maintainability of code.
1431 | - Successful deployment of both frontend and backend using Helmfile.
1432 | - Working Prometheus and Grafana setup.
1433 |
1434 | ## Bonus Points
1435 |
1436 | - Demonstrate how to rollback a failed release using Helmfile.
1437 | - Show how to securely manage sensitive data using Helm secrets or any other secure storage.
1438 |
1439 | Good luck! This task is designed to gauge your understanding of Helm and Helmfile, focusing on their advanced features for managing complex deployments. Feel free to ask if you have any questions or face any challenges! 🚀
1440 |
1441 |
1442 | ```
1443 |
1444 | ## 36. Bash Automation Challenge
1445 |
1446 | Solution here >> [Solution](./code/q36/README.md) >> Only look once you have attemptesd the question.
1447 |
1448 | ```bash
1449 |
1450 | ## Context
1451 |
1452 | You're an automation engineer, and your team often needs to set up new environments for testing and development. They rely on a mix of different databases and web servers for their tasks. Manually installing and configuring these components is time-consuming and error-prone. You decide to create a Bash script to automate this process.
1453 |
1454 | ## Objective
1455 |
1456 | Your task is to create a Bash script that can install, configure, and manage the following software components on a Ubuntu machine:
1457 |
1458 | - MySQL
1459 | - PostgreSQL
1460 | - Nginx
1461 | - Apache HTTP Server
1462 |
1463 | ### Specific Requirements
1464 |
1465 | 1. **Interactivity**:
1466 | - The script should interactively ask the user which software they want to install.
1467 | - Offer a menu of choices and allow multiple selections.
1468 |
1469 | 2. **Installation and Configuration**:
1470 | - For MySQL and PostgreSQL, set the root password, create a database, and create a user.
1471 | - For Nginx and Apache, configure a virtual host.
1472 |
1473 | 3. **Logging**:
1474 | - All actions, errors, and user inputs should be logged to a log file with a timestamp.
1475 |
1476 | 4. **Status Checks**:
1477 | - Add an option in the script to check the status of all installed services.
1478 |
1479 | 5. **Uninstallation**:
1480 | - Provide an option to uninstall any of the installed software and remove any configurations made by the script.
1481 |
1482 | ## Constraints
1483 |
1484 | - The Bash script must be compatible with Ubuntu.
1485 | - No third-party scripting languages or packages; stick to Bash and standard Ubuntu utilities.
1486 |
1487 | ## Deliverables
1488 |
1489 | - Your Bash script.
1490 | - A `README.md` file explaining:
1491 | - How to set up the environment.
1492 | - How to run the script.
1493 | - How to verify that the software components are installed and configured correctly.
1494 |
1495 | ## Bonus Points
1496 |
1497 | - **Error Handling**:
1498 | - Robust error handling and informative error messages.
1499 |
1500 | - **Dry-Run Mode**:
1501 | - Add a 'dry-run' mode that only displays the commands that would be executed, without actually running them.
1502 |
1503 | ```
1504 |
1505 | ## 37. Development with Tiltfiles
1506 |
1507 | Solution here >> [Solution](./code/q37/README.md) >> Only look once you have attemptesd the question.
1508 |
1509 | ```bash
1510 |
1511 | ## Context
1512 |
1513 | You're a DevOps Engineer responsible for setting up local development environments. The engineering team works on a microservices architecture, and there's a need to simplify the development workflow. Tilt has been chosen as the tool to automate the local setup and make the developer experience smoother.
1514 |
1515 | ## Objective
1516 |
1517 | - Use Tilt to set up a local development environment.
1518 | - The environment should include three microservices: a `frontend`, a `backend`, and a `database`.
1519 | - `frontend` should be a simple React application.
1520 | - `backend` should be a simple REST API built using Python's Flask.
1521 | # - `database` should be a MongoDB instance.
1522 |
1523 | ## Requirements
1524 |
1525 | - Each microservice should run in its own Docker container.
1526 | - The Tiltfile should be configured to live reload the `frontend` and `backend` services upon code changes.
1527 | - Implement a way to seed initial data into the MongoDB database.
1528 | - Ensure the services are inter-communicable and can talk to each other.
1529 |
1530 | ## Constraints
1531 |
1532 | - You have to use Docker for containerization.
1533 | - The configuration should be done using a Tiltfile.
1534 |
1535 | ## Prerequisites
1536 |
1537 | - Tilt installed
1538 | - Docker installed
1539 |
1540 | ## Deliverables
1541 |
1542 | - Source code for `frontend` and `backend` services.
1543 | - Dockerfiles for each service.
1544 | - The main Tiltfile to orchestrate the local environment.
1545 | - A README.md file that explains:
1546 | - How to set up the environment.
1547 | - How to run the local development setup.
1548 | - How to verify that everything is running correctly.
1549 |
1550 | ## Bonus Points
1551 |
1552 | - Use Tilt extensions to further simplify the development workflow.
1553 | - Include logging and monitoring solutions for your local environment using Tilt's built-in features.
1554 |
1555 | ```
1556 |
1557 | ## 38. Secure Access to Production Systems
1558 |
1559 | Solution here >> [Solution](./code/q38/README.md) >> Only look once you have attemptesd the question.
1560 |
1561 | ```bash
1562 |
1563 | # Secure Access to Production Systems
1564 |
1565 | ## Scenario
1566 |
1567 | You are a DevOps Engineer at an emerging tech company. As the company grows, securing access to production servers becomes increasingly crucial. However, you also need to ensure that the flow of development and deployment doesn't slow down. Your task is to set up a system that allows secure access to production servers.
1568 |
1569 | ## Objectives
1570 |
1571 | - **SSH Jump Host**: Set up a jump host that will act as an intermediary between the developers and the production servers. Developers will SSH into the jump host first, and from there, access the production servers.
1572 | - **Ansible for Access Control**: Use Ansible to automate the process of updating SSH keys and other access controls for the production servers. This automation should run through the jump host.
1573 |
1574 | ## Constraints
1575 |
1576 | - The jump host should be a minimal, hardened system, with only essential services running.
1577 | - You should use Ansible roles and playbooks to manage configurations.
1578 | - Document how to add or remove a user's access.
1579 |
1580 | ## Prerequisites
1581 |
1582 | - Ansible installed on your local machine.
1583 | - Two or more remote servers (Production Servers).
1584 | - One remote server for the Jump Host.
1585 | - SSH access to all servers.
1586 |
1587 | ## Deliverables
1588 |
1589 | 1. Ansible Playbooks and roles for setting up the jump host and managing SSH access.
1590 | 2. A secure and minimal setup on the jump host.
1591 | 3. Documentation on:
1592 | - How to set up the environment.
1593 | - How to run the Ansible playbooks.
1594 | - How to verify that the setup is secure and functional.
1595 |
1596 | ## Notes
1597 |
1598 | - Assume that you have root access to all the servers involved.
1599 | - The configuration should be idempotent. Running the Ansible playbooks multiple times should not change the state of the system beyond the first successful run.
1600 |
1601 |
1602 | ```
1603 |
1604 |
1605 | ## 39. Automated Backup and Restore Process
1606 |
1607 | Solution here >> [Solution](./code/q39/README.md) >> Only look once you have attemptesd the question.
1608 |
1609 | ```bash
1610 |
1611 | # Automated Backup and Restore Process
1612 |
1613 | ## Scenario
1614 |
1615 | Your mission is to implement an automated backup process for critical systems and databases. Time is money, and we need to make sure we can get our systems back up and running ASAP if something goes awry.
1616 |
1617 | ## Objectives
1618 |
1619 | 1. **Automate Database Backups**: Use bash scripting to automate backups for a MySQL database.
1620 | 2. **Remote Storage**: Store the database backups in a remote location like S3 or Azure Blob Storage.
1621 | 3. **Automate Restore Process**: Create a bash script that can restore the database from a backup.
1622 |
1623 | ## Tasks
1624 |
1625 | ### Automate Database Backups
1626 |
1627 | 1. Create a bash script that will backup a MySQL database and save it locally.
1628 | 2. The script should log its activities, so you know if something went wrong.
1629 |
1630 | ### Remote Storage
1631 |
1632 | 1. Modify your bash script to upload the backup to a remote storage service like S3 or Azure Blob Storage.
1633 | 2. Secure the backup with encryption before sending it off to remote storage.
1634 |
1635 | ### Automate Restore Process
1636 |
1637 | 1. Create a bash script to restore the database from a backup stored in S3 or Azure Blob Storage.
1638 | 2. The script should log its activities and any errors.
1639 |
1640 | ### Bonus
1641 |
1642 | - Implement email notifications for successful backups and restores, and for failures.
1643 |
1644 | ## Deliverables
1645 |
1646 | 1. Bash script for automated backups.
1647 | 2. Bash script for automated restores.
1648 | 3. A README guide on how to set up and use your scripts.
1649 |
1650 |
1651 | ```
1652 |
1653 | ## 40. Configuration Management with Puppet
1654 |
1655 | Solution here >> [Solution](./code/q40/README.md) >> Only look once you have attemptesd the question.
1656 |
1657 | ```bash
1658 |
1659 | # Configuration Management with Puppet
1660 |
1661 | ## Scenario
1662 | You are responsible for maintaining a large fleet of servers that need to have standardized configurations. You've decided to use Puppet for configuration management. Additionally, you want to adopt a test-driven approach to ensure reliability.
1663 |
1664 | ## Objectives
1665 |
1666 | 1. **Set up Puppet Master and Agent**: Configure a Puppet master and at least one Puppet agent. Validate successful communication between them.
1667 |
1668 | 2. **Implement Standard Configurations**: Create Puppet manifests to standardize the following configurations across all servers:
1669 | - Install and manage a web server (e.g., Apache or Nginx).
1670 | - Set up user accounts with specific permissions.
1671 | - Ensure specific packages are installed or removed.
1672 |
1673 | 3. **Test-Driven Development (TDD)**: Before deploying any changes to configurations, write tests to validate them. Use `rspec-puppet` or a similar testing framework for this.
1674 |
1675 | 4. **Monitoring**: Make sure to include monitoring checks to verify configurations.
1676 |
1677 | 5. **Documentation**: Provide a README file with setup instructions, how to run your Puppet configurations, and how to execute tests.
1678 |
1679 | ## Bonus
1680 | - **Scalability**: Show how your solution can scale to handle more servers.
1681 | - **Security**: Implement some basic security practices like firewall rules or file integrity checks.
1682 |
1683 | ## Tips
1684 | - Make sure to document your steps well.
1685 | - Try to automate as much as possible.
1686 |
1687 | ## Deliverables
1688 | - All Puppet manifests.
1689 | - Testing scripts.
1690 | - A README.md with instructions.
1691 |
1692 |
1693 | ```
1694 |
1695 | ## 41. Advanced Ansible with Molecule Testing
1696 |
1697 | Solution here >> [Solution](./code/q41/README.md) >> Only look once you have attemptesd the question.
1698 |
1699 | ```bash
1700 |
1701 | ## 41. Advanced Ansible with Molecule Testing
1702 |
1703 | ### Scenario:
1704 | You're tasked with automating server provisioning and configurations for a new microservices application. To ensure the Ansible roles and playbooks work as expected, you also need to integrate a testing framework called Molecule.
1705 |
1706 | ### Objectives:
1707 | 1. Create Ansible roles for configuring a web server, a database, and a caching layer.
1708 | 2. Utilize Ansible Vault for sensitive information.
1709 | 3. Use Molecule to test the Ansible roles.
1710 | 4. Provide documentation on how to run the Ansible playbooks and Molecule tests.
1711 |
1712 | ### Skills Required:
1713 | - Ansible
1714 | - Ansible Vault
1715 | - Molecule
1716 | - YAML
1717 | - Bash Scripting
1718 | - Git
1719 | - Docker (optional for Molecule testing)
1720 |
1721 | ### Tasks:
1722 | 1. Install Ansible, Molecule, and any other required dependencies.
1723 | 2. Initialize a new Ansible role for each of the three layers (web server, database, caching).
1724 | 3. Write playbooks that utilize these roles.
1725 | 4. Encrypt sensitive data using Ansible Vault.
1726 | 5. Write Molecule scenarios to test the functionality of each Ansible role.
1727 | 6. Use Git to version control your code.
1728 | 7. Write a README file explaining how to execute the Ansible playbooks and run the Molecule tests.
1729 |
1730 | ### Bonus:
1731 | 1. Integrate this Ansible project into a CI/CD pipeline.
1732 | 2. Utilize advanced Ansible features like loops, conditionals, and handlers.
1733 |
1734 | ### Testing:
1735 | - Use Molecule to perform the tests on your Ansible roles.
1736 | - Make sure to cover idempotency tests, and syntax checks.
1737 |
1738 | ### Deliverables:
1739 | - Ansible roles and playbooks
1740 | - Molecule test scenarios
1741 | - README file
1742 | - Git repository link
1743 |
1744 |
1745 | ```
1746 |
1747 | ## 42. Terraform with Terratest Challenge
1748 |
1749 | Solution here >> [Solution](./code/q42/README.md) >> Only look once you have attemptesd the question.
1750 |
1751 | ```bash
1752 |
1753 | ## Terraform Infrastructure Deployment with Terratest for Testing
1754 |
1755 | ### Scenario:
1756 | You are tasked with creating a scalable and maintainable AWS infrastructure for a new web application. The team wants to be sure that the infrastructure is stable and meets all the requirements before and after deployment. Hence, you'll write automated tests using Terratest to validate the Terraform code.
1757 |
1758 | ### Objectives:
1759 | 1. Create AWS infrastructure using Terraform:
1760 | - VPC, Subnets, EC2 instances, Load Balancer
1761 | 2. Use remote backends like S3 to store the Terraform state files.
1762 | 3. Write tests using Terratest to validate the infrastructure.
1763 | - Test if the VPC is created.
1764 | - Test if the number of EC2 instances matches the requirement.
1765 | - Test if the Load Balancer is distributing traffic.
1766 | 4. Automate the testing process using a CI/CD pipeline.
1767 |
1768 | ### Prerequisites:
1769 | - Basic understanding of AWS services
1770 | - Familiarity with Terraform and Go language
1771 |
1772 | ### Tools Needed:
1773 | - Terraform
1774 | - AWS CLI
1775 | - Go
1776 | - Terratest
1777 | - A CI/CD tool like Jenkins or GitHub Actions
1778 |
1779 | ### Bonus:
1780 | 1. Use a Makefile to streamline the test execution.
1781 | 2. Integrate Slack notifications for test results.
1782 |
1783 |
1784 | ```
1785 |
--------------------------------------------------------------------------------
/devops-challenges/code/q15/q15-handler.js:
--------------------------------------------------------------------------------
1 | 'use strict';
2 |
3 | module.exports.create = async (event) => {
4 | // Your logic for CREATE
5 | };
6 |
7 | module.exports.get = async (event) => {
8 | // Your logic for GET BY ID
9 | };
10 |
11 | module.exports.list = async (event) => {
12 | // Your logic for LIST ALL
13 | };
14 |
15 | module.exports.update = async (event) => {
16 | // Your logic for UPDATE
17 | };
18 |
19 | module.exports.delete = async (event) => {
20 | // Your logic for DELETE
21 | };
22 |
--------------------------------------------------------------------------------
/devops-challenges/code/q15/q15-serverless.yml:
--------------------------------------------------------------------------------
1 | service: todo-api
2 |
3 | provider:
4 | name: aws
5 | runtime: nodejs14.x
6 | stage: dev
7 | region: us-east-1
8 |
9 | functions:
10 | create:
11 | handler: handler.create
12 | events:
13 | - http:
14 | path: todos
15 | method: post
16 |
17 | get:
18 | handler: handler.get
19 | events:
20 | - http:
21 | path: todos/{id}
22 | method: get
23 |
24 | list:
25 | handler: handler.list
26 | events:
27 | - http:
28 | path: todos
29 | method: get
30 |
31 | update:
32 | handler: handler.update
33 | events:
34 | - http:
35 | path: todos/{id}
36 | method: put
37 |
38 | delete:
39 | handler: handler.delete
40 | events:
41 | - http:
42 | path: todos/{id}
43 | method: delete
44 |
--------------------------------------------------------------------------------
/devops-challenges/code/q16/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.8-slim
2 |
3 | WORKDIR /app
4 |
5 | ADD requirements.txt .
6 |
7 | RUN pip install -no-cache-dir -r requirements.txt
8 |
9 | ADD . .
10 |
11 | CMD ["python", "app.py"]
12 |
--------------------------------------------------------------------------------
/devops-challenges/code/q16/README.md:
--------------------------------------------------------------------------------
1 | # Broken CI/CD Pipeline Project
2 |
3 | This is a broken CI/CD pipeline project. Your task is to fix it.
4 |
5 | ## Setup
6 |
7 | 1. Fork this repository.
8 | 2. Clone your fork.
9 | 3. Fix the issues.
10 |
11 | ## Run Locally
12 |
13 | 1. Install dependencies: `pip install -r requirements.txt`
14 | 2. Run the app: `python app.py`
15 |
--------------------------------------------------------------------------------
/devops-challenges/code/q16/app.py:
--------------------------------------------------------------------------------
1 | from flask import Flask
2 |
3 | app = Flask(__name__)
4 |
5 | @app.route('/')
6 | def hello_world():
7 | return 'Hello, world!'
8 |
9 | if __name__ == '__main__':
10 | app.run(host='0.0.0.0', port=8000)
11 |
--------------------------------------------------------------------------------
/devops-challenges/code/q16/main.yml:
--------------------------------------------------------------------------------
1 | name: CI/CD Pipeline
2 |
3 | on: [push]
4 |
5 | jobs:
6 | build:
7 | runs-on: ubuntu-latest
8 |
9 | steps:
10 | - uses: actions/checkout@v2
11 |
12 | - name: Run Tests
13 | run: |
14 | pip install -r requirements.txt
15 | python -m unittest tests/test_app.py
16 |
17 | - name: Build Docker Image
18 | run: |
19 | dockr build . -t broken-image
20 |
21 | - name: Push to DockerHub
22 | run: |
23 | echo "${{ secrets.DOCKER_PASSWORD }}" | docker login -u "${{ secrets.DOCKER_USERNAME }}" --password-stdin
24 | docker push broken-image
25 |
--------------------------------------------------------------------------------
/devops-challenges/code/q16/requirements.txt:
--------------------------------------------------------------------------------
1 | flask==1.1.2
2 |
--------------------------------------------------------------------------------
/devops-challenges/code/q16/tests/test_app.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from app import app
3 |
4 | class TestApp(unittest.TestCase):
5 |
6 | def setUp(self):
7 | self.app = app.test_client()
8 |
9 | def test_index(self):
10 | response = self.app.get('/')
11 | self.assertEqual(response.status_code, 200)
12 | self.assertEqual(response.data, b'Hello, World!')
13 |
--------------------------------------------------------------------------------
/devops-challenges/code/q17/README.md:
--------------------------------------------------------------------------------
1 | # Broken Terraform and Lambda Project
2 |
3 | This is a project that has issues with Terraform and a simple AWS Lambda function.
4 |
5 | ## Setup
6 |
7 | 1. Fork this repository.
8 | 2. Clone your fork.
9 | 3. Run `terraform init` and `terraform apply` in the `terraform/` directory.
10 | 4. Debug and fix the issues.
11 |
12 | ## Run
13 |
14 | 1. Execute `terraform apply` to create the resources.
15 | 2. Test the Lambda function to make sure it's working as expected.
16 |
17 |
--------------------------------------------------------------------------------
/devops-challenges/code/q17/lambda/handler.py:
--------------------------------------------------------------------------------
1 | import json
2 |
3 | def handler(event, context):
4 | return {
5 | 'statusCode': 200,
6 | 'body': json.dumps('Hello from Lambda!')
7 | }
8 |
--------------------------------------------------------------------------------
/devops-challenges/code/q17/lambda/requirements.txt:
--------------------------------------------------------------------------------
1 | boto3==1.18.67
2 |
--------------------------------------------------------------------------------
/devops-challenges/code/q17/terraform/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = "us-east-1"
3 | }
4 |
5 | resource "aws_s3_bucket" "my_bucket" {
6 | bucket = "my-super-cool-bucket"
7 | acl = "private"
8 | }
9 |
10 | resource "aws_lambda_function" "my_lambda" {
11 | function_name = "my_lambda"
12 |
13 | s3_bucket = aws_s3_bucket.my_bucket.bucket
14 | s3_key = "lambda_function_payload.zip"
15 |
16 | handler = "handler.handler"
17 | runtime = "python3.8"
18 |
19 | role = aws_iam_role.iam_for_lambda.arn
20 | }
21 |
22 | resource "aws_iam_role" "iam_for_lambda" {
23 | name = "iam_for_lambda"
24 |
25 | assume_role_policy = jsonencode({
26 | Version = "2012-10-17",
27 | Statement = [
28 | {
29 | Action = "sts:AssumeRole"
30 | Effect = "Allow"
31 | Principal = {
32 | Service = "lambda.amazonaws.com"
33 | }
34 | }
35 | ]
36 | })
37 | }
38 |
--------------------------------------------------------------------------------
/devops-challenges/code/q2/backup.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/moabukar/tech-vault/edf8ee9f875cfd1a22bcab3223f58120cf293ca7/devops-challenges/code/q2/backup.tar.gz
--------------------------------------------------------------------------------
/devops-challenges/code/q2/runscript.sh:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/moabukar/tech-vault/edf8ee9f875cfd1a22bcab3223f58120cf293ca7/devops-challenges/code/q2/runscript.sh
--------------------------------------------------------------------------------
/devops-challenges/code/q23/.gitignore:
--------------------------------------------------------------------------------
1 | # Local .terraform directories
2 | **/.terraform/*
3 |
4 | # .tfstate files
5 | *.tfstate
6 | *.tfstate.*
7 |
8 | # Crash log files
9 | crash.log
10 | crash.*.log
11 |
12 | # Exclude all .tfvars files, which are likely to contain sensitive data, such as
13 | # password, private keys, and other secrets. These should not be part of version
14 | # control as they are data points which are potentially sensitive and subject
15 | # to change depending on the environment.
16 | *.tfvars
17 | *.tfvars.json
18 |
19 | # Ignore override files as they are usually used to override resources locally and so
20 | # are not checked in
21 | override.tf
22 | override.tf.json
23 | *_override.tf
24 | *_override.tf.json
25 |
26 | # Include override files you do wish to add to version control using negated pattern
27 | # !example_override.tf
28 |
29 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
30 | # example: *tfplan*
31 |
32 | # Ignore CLI configuration files
33 | .terraformrc
34 | terraform.rc
35 |
--------------------------------------------------------------------------------
/devops-challenges/code/q23/README.md:
--------------------------------------------------------------------------------
1 | # Solution to Q23 (OPA with TF)
2 |
3 |
4 | opa-terraform-challenge/
5 | │
6 | ├── policies/
7 | │ └── s3.rego
8 | │
9 | ├── terraform/
10 | │ ├── main.tf
11 | │ ├── variables.tf
12 | │ └── outputs.tf
13 | │
14 | └── README.md
15 |
16 | ## Running the Solution
17 |
18 | - `export AWS_ACCESS_KEY_ID="anaccesskey"`'`
19 | - `export AWS_SECRET_ACCESS_KEY="asecretkey"`
20 |
21 | ### Initiate Terraform:
22 |
23 | - `cd terraform`
24 | - `terraform init`
25 |
26 | ### Run OPA policy check:
27 |
28 | - `opa eval --data ../policies/s3.rego --input main.tf "data.terraform.deny"`
29 |
30 | You can generate terraform.json by running terraform plan --out=tfplan && terraform show -json tfplan > terraform.json.
31 |
32 | ### Run Terraform:
33 |
34 | - `terraform apply``
35 | You can automate the OPA check into your CI/CD pipeline to run before terraform apply.
36 |
37 |
--------------------------------------------------------------------------------
/devops-challenges/code/q23/policies/s3.rego:
--------------------------------------------------------------------------------
1 | package terraform
2 |
3 | default allow = true
4 |
5 | allow {
6 | not deny
7 | }
8 |
9 | deny[reason] {
10 | input.resource_changes[_].type == "aws_s3_bucket"
11 | input.resource_changes[_].change.actions[_] == "create"
12 | versioning_enabled = input.resource_changes[_].change.after.versioning[0].enabled
13 | not versioning_enabled
14 |
15 | reason := "Cannot create S3 buckets"
16 | }
17 |
18 | deny[reason] {
19 | input.resource_changes[_].type == "aws_s3_bucket"
20 | input.resource_changes[_].change.actions[_] == "create"
21 | acl = input.resource_changes[_].change.after.acl
22 | acl != "private"
23 |
24 | reason := "Cannot create S3 buckets"
25 | }
26 |
--------------------------------------------------------------------------------
/devops-challenges/code/q23/policies/s3_test.rego:
--------------------------------------------------------------------------------
1 | package terraform
2 |
3 | # Test to verify S3 bucket with versioning enabled
4 | test_allow_versioning {
5 | not deny with input as {
6 | "resource_changes": [{
7 | "type": "aws_s3_bucket",
8 | "change": {
9 | "actions": ["create"],
10 | "after": {
11 | "versioning": [{"enabled": true}],
12 | "acl": "private"
13 | }
14 | }
15 | }]
16 | }
17 | }
18 |
19 | # Test to verify S3 bucket with versioning disabled
20 | test_deny_versioning {
21 | deny with input as {
22 | "resource_changes": [{
23 | "type": "aws_s3_bucket",
24 | "change": {
25 | "actions": ["create"],
26 | "after": {
27 | "versioning": [{"enabled": false}],
28 | "acl": "private"
29 | }
30 | }
31 | }]
32 | }
33 | }
34 |
35 | # Test to verify S3 bucket with wrong ACL
36 | test_deny_wrong_acl {
37 | deny with input as {
38 | "resource_changes": [{
39 | "type": "aws_s3_bucket",
40 | "change": {
41 | "actions": ["create"],
42 | "after": {
43 | "versioning": [{"enabled": true}],
44 | "acl": "public-read"
45 | }
46 | }
47 | }]
48 | }
49 | }
50 |
51 | # Test to verify S3 bucket with everything right
52 | test_allow_everything_right {
53 | not deny with input as {
54 | "resource_changes": [{
55 | "type": "aws_s3_bucket",
56 | "change": {
57 | "actions": ["create"],
58 | "after": {
59 | "versioning": [{"enabled": true}],
60 | "acl": "private"
61 | }
62 | }
63 | }]
64 | }
65 | }
66 |
--------------------------------------------------------------------------------
/devops-challenges/code/q23/terraform/.terraform.lock.hcl:
--------------------------------------------------------------------------------
1 | # This file is maintained automatically by "terraform init".
2 | # Manual edits may be lost in future updates.
3 |
4 | provider "registry.terraform.io/hashicorp/aws" {
5 | version = "5.19.0"
6 | hashes = [
7 | "h1:rgsqMIwX/2b2Ghrfd3lPasPoHupkWsEA+fcXod60+v8=",
8 | "zh:03aa0f857c6dfce5f46c9bf3aad45534b9421e68983994b6f9dd9812beaece9c",
9 | "zh:0639818c5bf9f9943667f39ec38bb945c9786983025dff407390133fa1ca5041",
10 | "zh:0b82ad42ced8fb4a138eaf2fd37cf6059ca0bb482114b35fb84f22fc1500324a",
11 | "zh:173e8c19a9f1d8f6457c80f4a73a92f420a81d650fc4ad0f97a5dc4b9485bba8",
12 | "zh:42913a40ddfe9b4f3c78ad2e3cdc1dcfd48151bc132dc6b49fc32cd6da79db21",
13 | "zh:452db5caca2e53d5f7090979d518e77aa5fd98385514b11ee2ce76a46e89cb53",
14 | "zh:9b12af85486a96aedd8d7984b0ff811a4b42e3d88dad1a3fb4c0b580d04fa425",
15 | "zh:a12377ade89ee18d9be116436e411e8396898bd70b21ab027c161c785e86238d",
16 | "zh:aa9e4746ba49044ad5b4dda57fcdba7bc16fe65f696766fb2c55c30a27abf844",
17 | "zh:adfaee76d283f1c321fad2e4154be88d57da8c2ecfdca9516c8920bd2ece36ed",
18 | "zh:bf6fbc6d60661c03ed2214173c1deced908dc62480dd41e67ac399fa4abd7467",
19 | "zh:cb685da03ad00d1a27891f3d366d75e8795ac81f1b427888b434e6832ca40633",
20 | "zh:e0432c78dfaf2baebe2bf5c0ad8087f547c69c2c5a00e4c1dcd5a6344ce726df",
21 | "zh:e0ec9ccb8d34d6d0d8bf7f8628c223951832b4d50ea8887fc711fa854b3a28b4",
22 | "zh:f274397ada4ef3c1dce2f70e719c8ccf19fc4e7a2e3f45d018764c6267fd7157",
23 | ]
24 | }
25 |
--------------------------------------------------------------------------------
/devops-challenges/code/q23/terraform/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = "us-east-1"
3 | }
4 |
5 | resource "aws_s3_bucket" "this" {
6 | bucket = "my-tf-test-bucket"
7 | acl = "private"
8 |
9 | versioning {
10 | enabled = false
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/devops-challenges/code/q23/terraform/outputs.tf:
--------------------------------------------------------------------------------
1 | output "bucket_id" {
2 | value = aws_s3_bucket.this.id
3 | }
4 |
--------------------------------------------------------------------------------
/devops-challenges/code/q23/terraform/tfplan:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/moabukar/tech-vault/edf8ee9f875cfd1a22bcab3223f58120cf293ca7/devops-challenges/code/q23/terraform/tfplan
--------------------------------------------------------------------------------
/devops-challenges/code/q23/terraform/tfplan.json:
--------------------------------------------------------------------------------
1 | {"format_version":"1.1","terraform_version":"1.2.5","planned_values":{"outputs":{"bucket_id":{"sensitive":false}},"root_module":{"resources":[{"address":"aws_s3_bucket.this","mode":"managed","type":"aws_s3_bucket","name":"this","provider_name":"registry.terraform.io/hashicorp/aws","schema_version":0,"values":{"acl":"private","bucket":"my-tf-test-bucket","force_destroy":false,"tags":null,"timeouts":null,"versioning":[{"enabled":false,"mfa_delete":false}]},"sensitive_values":{"cors_rule":[],"grant":[],"lifecycle_rule":[],"logging":[],"object_lock_configuration":[],"replication_configuration":[],"server_side_encryption_configuration":[],"tags_all":{},"versioning":[{}],"website":[]}}]}},"resource_changes":[{"address":"aws_s3_bucket.this","mode":"managed","type":"aws_s3_bucket","name":"this","provider_name":"registry.terraform.io/hashicorp/aws","change":{"actions":["create"],"before":null,"after":{"acl":"private","bucket":"my-tf-test-bucket","force_destroy":false,"tags":null,"timeouts":null,"versioning":[{"enabled":false,"mfa_delete":false}]},"after_unknown":{"acceleration_status":true,"arn":true,"bucket_domain_name":true,"bucket_prefix":true,"bucket_regional_domain_name":true,"cors_rule":true,"grant":true,"hosted_zone_id":true,"id":true,"lifecycle_rule":true,"logging":true,"object_lock_configuration":true,"object_lock_enabled":true,"policy":true,"region":true,"replication_configuration":true,"request_payer":true,"server_side_encryption_configuration":true,"tags_all":true,"versioning":[{}],"website":true,"website_domain":true,"website_endpoint":true},"before_sensitive":false,"after_sensitive":{"cors_rule":[],"grant":[],"lifecycle_rule":[],"logging":[],"object_lock_configuration":[],"replication_configuration":[],"server_side_encryption_configuration":[],"tags_all":{},"versioning":[{}],"website":[]}}}],"output_changes":{"bucket_id":{"actions":["create"],"before":null,"after_unknown":true,"before_sensitive":false,"after_sensitive":false}},"configuration":{"provider_config":{"aws":{"name":"aws","full_name":"registry.terraform.io/hashicorp/aws","expressions":{"region":{"constant_value":"us-east-1"}}}},"root_module":{"outputs":{"bucket_id":{"expression":{"references":["aws_s3_bucket.this.id","aws_s3_bucket.this"]}}},"resources":[{"address":"aws_s3_bucket.this","mode":"managed","type":"aws_s3_bucket","name":"this","provider_config_key":"aws","expressions":{"acl":{"constant_value":"private"},"bucket":{"constant_value":"my-tf-test-bucket"},"versioning":[{"enabled":{"constant_value":false}}]},"schema_version":0}]}},"relevant_attributes":[{"resource":"aws_s3_bucket.this","attribute":["id"]}]}
2 |
--------------------------------------------------------------------------------
/devops-challenges/code/q23/terraform/variables.tf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/moabukar/tech-vault/edf8ee9f875cfd1a22bcab3223f58120cf293ca7/devops-challenges/code/q23/terraform/variables.tf
--------------------------------------------------------------------------------
/devops-challenges/code/q24/README.md:
--------------------------------------------------------------------------------
1 | # Immutable AMI with Packer Challenge
2 |
3 | ## Steps to create AMI
4 |
5 | 1. Run `packer validate packer.json` to validate the Packer configuration.
6 | 2. Run `packer build packer.json` to build the AMI.
7 |
8 | ## Steps to deploy EC2 instance
9 |
10 | 1. Initialize Terraform with `terraform init`.
11 | 2. Apply the configuration with `terraform apply`.
12 |
13 | Replace the `ami` field in `main.tf` with your own AMI ID.
14 |
15 | - To run Packer and Terraform, the candidate will need to set up their AWS credentials. They can then validate the Packer file with packer validate and build the AMI with packer build. To deploy the EC2 instance, they'll run terraform init followed by terraform apply.
16 |
--------------------------------------------------------------------------------
/devops-challenges/code/q24/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = "us-west-1"
3 | }
4 |
5 | resource "aws_instance" "my_instance" {
6 | ami = "ami-0abcdef1234567890" # Replace with your AMI ID created with Packer
7 | instance_type = "t2.micro"
8 |
9 | tags = {
10 | Name = "nginx-server"
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/devops-challenges/code/q24/packer.json:
--------------------------------------------------------------------------------
1 | {
2 | "builders": [{
3 | "type": "amazon-ebs",
4 | "region": "us-west-2",
5 | "source_ami_filter": {
6 | "filters": {
7 | "virtualization-type": "hvm",
8 | "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*",
9 | "root-device-type": "ebs"
10 | },
11 | "owners": ["099720109477"],
12 | "most_recent": true
13 | },
14 | "instance_type": "t2.micro",
15 | "ssh_username": "ubuntu",
16 | "ami_name": "nginx-ami"
17 | }],
18 | "provisioners": [
19 | {
20 | "type": "shell",
21 | "inline": [
22 | "sudo apt-get update",
23 | "sudo apt-get install -y nginx"
24 | ]
25 | }
26 | ]
27 | }
28 |
--------------------------------------------------------------------------------
/devops-challenges/code/q25/README.md:
--------------------------------------------------------------------------------
1 | # Secrets Management with HashiCorp Vault
2 |
3 |
4 | # Secrets Management with Vault
5 |
6 | ## Steps to Setup Vault
7 |
8 | 1. Start Vault in dev mode: `vault server -dev`
9 | 2. Initialize Vault: `vault operator init`
10 | 3. Unseal Vault: `vault operator unseal`
11 |
12 | ## Steps to Create Policy and Role
13 |
14 | 1. Initialize Terraform: `terraform init`
15 | 2. Apply Terraform: `terraform apply`
16 |
17 | ## Steps to Fetch Secret
18 |
19 | Run the `fetch_secret.sh` script to fetch the secret.
20 |
21 |
22 | ```sh
23 | Setting up Vault (dev mode for simplicity):
24 |
25 | vault server -dev
26 |
27 | vault kv put secret/my_secret value="supersecret"
28 |
29 |
30 | ```
31 |
--------------------------------------------------------------------------------
/devops-challenges/code/q25/fetch_sec.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Fetch role_id and secret_id from Vault (these commands should be adapted to your environment)
4 | role_id=$(vault read -field=role_id auth/approle/role/my_role/role-id)
5 | secret_id=$(vault write -f -field=secret_id auth/approle/role/my_role/secret-id)
6 |
7 | # Log in to Vault and get a token
8 | vault_token=$(vault write -field=token auth/approle/login role_id="$role_id" secret_id="$secret_id")
9 |
10 | # Use the token to read the secret
11 | vault read -field=value -token="$vault_token" secret/my_secret
12 |
--------------------------------------------------------------------------------
/devops-challenges/code/q25/main.tf:
--------------------------------------------------------------------------------
1 | provider "vault" {}
2 |
3 | resource "vault_policy" "my_policy" {
4 | name = "my_policy"
5 | policy = file("my_policy.hcl")
6 | }
7 |
8 | resource "vault_auth_backend" "approle" {
9 | type = "approle"
10 | }
11 |
12 | resource "vault_auth_backend_role" "my_role" {
13 | backend = vault_auth_backend.approle.path
14 | role_name = "my_role"
15 | policies = [vault_policy.my_policy.name]
16 | }
17 |
--------------------------------------------------------------------------------
/devops-challenges/code/q25/policy.hcl:
--------------------------------------------------------------------------------
1 | path "secret/my_secret" {
2 | capabilities = ["read"]
3 | }
4 |
--------------------------------------------------------------------------------
/devops-challenges/code/q26/README.md:
--------------------------------------------------------------------------------
1 | # DevOps Challenge q26
2 |
3 |
--------------------------------------------------------------------------------
/devops-challenges/code/q26/cf.yaml:
--------------------------------------------------------------------------------
1 | Resources:
2 | MyEC2Instance:
3 | Type: "AWS::EC2::Instance"
4 | Properties:
5 | InstanceType: t2.micro
6 | ImageId: ami-abc12345
7 |
8 | MyS3Bucket:
9 | Type: "AWS::S3::Bucket"
10 |
--------------------------------------------------------------------------------
/devops-challenges/code/q26/infra.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = "us-west-1"
3 | }
4 |
5 | resource "aws_instance" "my_instance" {
6 | ami = "ami-abc12345"
7 | instance_type = "t2.micro"
8 | }
9 |
10 | resource "aws_s3_bucket" "my_bucket" {
11 | bucket = "my-bucket"
12 | acl = "private"
13 | }
14 |
--------------------------------------------------------------------------------
/devops-challenges/code/q27/README.md:
--------------------------------------------------------------------------------
1 | # DevOps Challenge q27
2 |
3 | ## Solution
4 |
5 | - Chaos Monkey Installation: You could use a tool like Chaos Monkey to introduce failures in your system. Assume it's installed in an AWS EC2 instance.
6 |
7 | - Configuration: Configure Chaos Monkey to randomly terminate instances in a specified Auto Scaling Group.
8 |
9 | - Monitoring: Set up basic monitoring using CloudWatch to check how the system reacts when Chaos Monkey introduces failures.
10 |
--------------------------------------------------------------------------------
/devops-challenges/code/q27/chaos.json:
--------------------------------------------------------------------------------
1 | {
2 | "accountName": "test",
3 | "cloudProvider": "aws",
4 | "credentials": {
5 | "accessKey": "ACCESS_KEY",
6 | "secretKey": "SECRET_KEY"
7 | },
8 | "regions": ["us-west-1"],
9 | "chaosTarget": "Auto Scaling Group Name",
10 | "meanTimeBetweenKillsInWorkDays": 3,
11 | "minTimeBetweenKillsInWorkDays": 1
12 | }
13 |
--------------------------------------------------------------------------------
/devops-challenges/code/q28/README.md:
--------------------------------------------------------------------------------
1 | # DevOps Challenge q28
2 |
--------------------------------------------------------------------------------
/devops-challenges/code/q28/grafana.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "High CPU Usage",
3 | "conditions": [
4 | {
5 | "evaluator": {
6 | "params": [80],
7 | "type": "gt"
8 | },
9 | "operator": {
10 | "type": "and"
11 | },
12 | "query": {
13 | "model": {
14 | "target": "cpu_usage"
15 | },
16 | "params": ["5m"]
17 | },
18 | "reducer": {
19 | "params": [],
20 | "type": "avg"
21 | },
22 | "type": "query"
23 | }
24 | ]
25 | }
26 |
--------------------------------------------------------------------------------
/devops-challenges/code/q28/prometheus.yaml:
--------------------------------------------------------------------------------
1 | # Prometheus config
2 | scrape_configs:
3 | - job_name: 'flask_app'
4 | static_configs:
5 | - targets: ['flask_app:5000']
6 |
7 |
--------------------------------------------------------------------------------
/devops-challenges/code/q29/README.md:
--------------------------------------------------------------------------------
1 | # DevOps Challenge q29
2 |
3 | ## Folder structure
4 |
5 | ```bash
6 | project-folder/
7 | ├── apps/
8 | │ ├── app1/
9 | │ │ └── Dockerfile
10 | │ └── app2/
11 | │ └── Dockerfile
12 | ├── terraform/
13 | │ ├── main.tf
14 | │ ├── variables.tf
15 | │ └── outputs.tf
16 | └── traefik/
17 | └── traefik.toml
18 | ```
19 |
20 |
--------------------------------------------------------------------------------
/devops-challenges/code/q29/apps/app1/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM nginx:alpine
2 | COPY ./index.html /usr/share/nginx/html/index.html
3 |
--------------------------------------------------------------------------------
/devops-challenges/code/q29/apps/app2/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM nginx:alpine
2 | COPY ./index.html /usr/share/nginx/html/index.html
3 |
--------------------------------------------------------------------------------
/devops-challenges/code/q29/terraform/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = "us-east-1"
3 | }
4 |
5 | resource "aws_ecs_cluster" "my_cluster" {
6 | name = "my-cluster"
7 | }
8 |
9 | # ECS task definition for the app
10 | resource "aws_ecs_task_definition" "app_task" {
11 | family = "my-app"
12 | network_mode = "awsvpc"
13 | requires_compatibilities = ["FARGATE"]
14 | cpu = "256"
15 | memory = "512"
16 | execution_role_arn = aws_iam_role.ecs_execution_role.arn
17 |
18 | container_definitions = jsonencode([{
19 | name = "my-app"
20 | image = "my-app-image"
21 | portMappings = [{
22 | containerPort = 80
23 | hostPort = 80
24 | }]
25 | }])
26 | }
27 |
28 | # ECS task definition for Traefik
29 | resource "aws_ecs_task_definition" "traefik_task" {
30 | family = "traefik"
31 | network_mode = "awsvpc"
32 | requires_compatibilities = ["FARGATE"]
33 | cpu = "256"
34 | memory = "512"
35 | execution_role_arn = aws_iam_role.ecs_execution_role.arn
36 |
37 | container_definitions = jsonencode([{
38 | name = "traefik"
39 | image = "traefik:v2.4"
40 | portMappings = [{
41 | containerPort = 80
42 | hostPort = 80
43 | }, {
44 | containerPort = 443
45 | hostPort = 443
46 | }, {
47 | containerPort = 8080
48 | hostPort = 8080
49 | }]
50 | }])
51 | }
52 |
53 | # ECS Service
54 | resource "aws_ecs_service" "my_service" {
55 | name = "my-service"
56 | cluster = aws_ecs_cluster.my_cluster.id
57 | task_definition = aws_ecs_task_definition.app_task.arn
58 | launch_type = "FARGATE"
59 | desired_count = 1
60 |
61 | network_configuration {
62 | subnets = ["subnet-xxxxxxx", "subnet-yyyyyyy"]
63 | }
64 | }
65 |
--------------------------------------------------------------------------------
/devops-challenges/code/q29/traefik/traefik.yaml:
--------------------------------------------------------------------------------
1 | entryPoints:
2 | web:
3 | address: ":80"
4 | websecure:
5 | address: ":443"
6 |
7 | providers:
8 | docker:
9 | exposedByDefault: false
10 |
11 | api:
12 | insecure: true
13 |
--------------------------------------------------------------------------------
/devops-challenges/code/q30/.github/workflows/ansible.yml:
--------------------------------------------------------------------------------
1 | name: Run Ansible Playbooks
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 |
8 | jobs:
9 | run-ansible:
10 | runs-on: ubuntu-latest
11 |
12 | steps:
13 | - name: Checkout Repo
14 | uses: actions/checkout@v2
15 |
16 | - name: Setup Ansible
17 | run: |
18 | sudo apt-get update
19 | sudo apt-get install -y ansible
20 |
21 | - name: Run Backup Config Playbook
22 | run: ansible-playbook -i inventory.ini playbooks/backup_config.yml
23 |
24 | - name: Run Set VLAN Playbook
25 | run: ansible-playbook -i inventory.ini playbooks/set_vlan.yml
26 |
27 | - name: Run Set OSPF Playbook
28 | run: ansible-playbook -i inventory.ini playbooks/set_ospf.yml
29 |
30 | - name: Run Set ACL Playbook
31 | run: ansible-playbook -i inventory.ini playbooks/set_acl.yml
32 |
--------------------------------------------------------------------------------
/devops-challenges/code/q30/README.md:
--------------------------------------------------------------------------------
1 | # DevOps Challenge q30 - Network Automation with GitHub Actions and Ansible
2 |
3 | ## Prerequisites
4 |
5 | - Ansible installed on your local machine
6 | - Networking devices (routers, switches, etc.) that Ansible can connect to
7 |
8 | ## Inventory Setup: Update the inventory.ini with the details of your network devices.
9 |
10 | - Ansible Playbooks: Customize the Ansible playbooks as per your network requirements.
11 |
12 | - Push to GitHub: Push your changes to GitHub.
13 |
14 | - After pushing the code to GitHub, the GitHub Actions workflow specified in .github/workflows/ansible.yml will automatically run.
15 |
16 | ```bash
17 | [routers]
18 | 192.168.1.1
19 |
20 | [switches]
21 | 192.168.1.2
22 |
23 |
24 | ```
25 |
26 | ## Directory Structure:
27 |
28 | ```bash
29 | network-automation-project/
30 | ├── ansible.cfg
31 | ├── inventory.ini
32 | └── playbooks/
33 | ├── backup_config.yml
34 | ├── set_vlan.yml
35 | ├── set_ospf.yml
36 | └── set_acl.yml
37 |
38 | ```
39 |
40 |
41 |
--------------------------------------------------------------------------------
/devops-challenges/code/q30/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | host_key_checking = False
3 |
--------------------------------------------------------------------------------
/devops-challenges/code/q30/inventory.ini:
--------------------------------------------------------------------------------
1 | [routers]
2 | 192.168.x.x ansible_ssh_user=YOUR_SSH_USER ansible_ssh_pass=YOUR_SSH_PASSWORD
3 |
4 | [routers]
5 | 192.168.1.1
6 |
7 | [switches]
8 | 192.168.1.2
9 |
--------------------------------------------------------------------------------
/devops-challenges/code/q30/playbooks/backup_config.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Backup router configurations
3 | hosts: routers
4 | tasks:
5 | - name: Backup config
6 | ios_config:
7 | backup: yes
8 |
--------------------------------------------------------------------------------
/devops-challenges/code/q30/playbooks/set_acl.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Set ACL
3 | hosts: routers
4 | tasks:
5 | - name: Configure ACL
6 | ios_config:
7 | lines:
8 | - access-list 1 permit 192.168.1.0 0.0.0.255
9 |
--------------------------------------------------------------------------------
/devops-challenges/code/q30/playbooks/set_ospf.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Configure OSPF
3 | hosts: routers
4 | tasks:
5 | - name: Set OSPF
6 | ios_config:
7 | lines:
8 | - router ospf 1
9 | - network 0.0.0.0 255.255.255.255 area 0
10 |
--------------------------------------------------------------------------------
/devops-challenges/code/q30/playbooks/set_vlan.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Set VLAN
3 | hosts: routers
4 | tasks:
5 | - name: Create VLAN
6 | ios_config:
7 | lines:
8 | - name VLAN 10
9 | - exit
10 |
--------------------------------------------------------------------------------
/devops-challenges/code/q31/README.md:
--------------------------------------------------------------------------------
1 | # DevOps Challenge q31
2 |
--------------------------------------------------------------------------------
/devops-challenges/code/q31/dep-vol.yml:
--------------------------------------------------------------------------------
1 | ## deployment - mounted a secret using a volume mount and using a dedicated service account.
2 | ## with 1 pod maxUnavailable
3 | apiVersion: apps/v1
4 | kind: Deployment
5 | metadata:
6 | labels:
7 | app: k8stest
8 | name: k8stest
9 | spec:
10 | replicas: 4
11 | selector:
12 | matchLabels:
13 | app: k8stest
14 | strategy:
15 | type: RollingUpdate
16 | rollingUpdate:
17 | maxSurge: 1
18 | maxUnavailable: 25%
19 | template:
20 | metadata:
21 | labels:
22 | app: k8stest
23 | spec:
24 | serviceAccountName: k8s-sa
25 | containers:
26 | - image: nginx
27 | name: nginx
28 | volumeMounts:
29 | - name: k8s-vol
30 | mountPath: /etc/foo
31 | readOnly: true
32 | volumes:
33 | - name: k8s-vol
34 | secret:
35 | secretName: key-secret
36 |
--------------------------------------------------------------------------------
/devops-challenges/code/q31/deployment.yml:
--------------------------------------------------------------------------------
1 | ## deployment - mounted a secret using an env variable and using a dedicated service account.
2 | apiVersion: apps/v1
3 | kind: Deployment
4 | metadata:
5 | labels:
6 | app: k8stest
7 | name: k8stest
8 | spec:
9 | replicas: 4
10 | selector:
11 | matchLabels:
12 | app: k8stest
13 | strategy:
14 | type: RollingUpdate
15 | rollingUpdate:
16 | maxSurge: 1
17 | maxUnavailable: 25%
18 | template:
19 | metadata:
20 | labels:
21 | app: k8stest
22 | spec:
23 | serviceAccountName: k8s-sa
24 | containers:
25 | - image: nginx
26 | name: nginx
27 | env:
28 | - name: SECRET_USER
29 | valueFrom:
30 | secretKeyRef:
31 | name: key-secret
32 | key: user
33 | - name: SECRET_PASS
34 | valueFrom:
35 | secretKeyRef:
36 | name: key-secret
37 | key: pass
38 |
--------------------------------------------------------------------------------
/devops-challenges/code/q31/sa.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ServiceAccount
3 | metadata:
4 | name: k8s-sa
5 |
--------------------------------------------------------------------------------
/devops-challenges/code/q31/secret.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | data:
3 | pass: UDQ1NQ==
4 | user: YWRtaW4=
5 | kind: Secret
6 | metadata:
7 | name: key-secret
8 |
--------------------------------------------------------------------------------
/devops-challenges/code/q31/service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | labels:
5 | app: k8stest
6 | name: k8stestservice
7 | spec:
8 | ports:
9 | - port: 80
10 | protocol: TCP
11 | targetPort: 80
12 | selector:
13 | app: k8stest
14 | type: NodePort
15 | status:
16 | loadBalancer: {}
17 |
--------------------------------------------------------------------------------
/devops-challenges/code/q32/README.md:
--------------------------------------------------------------------------------
1 | # DevOps Challenge q32
2 |
--------------------------------------------------------------------------------
/devops-challenges/code/q32/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: "centos8.4.2105"
2 | networks:
3 | network1:
4 | external: true
5 | name: frontend-ingress
6 | network2:
7 | external: false
8 | name: database
9 | services:
10 | backend:
11 | depends_on: database
12 | image: ruby:3.0
13 | volumes:
14 | - backend:/project/backend
15 | ports:
16 | - target: 6000
17 | published: 6000
18 | protocol: udp
19 | mode: host
20 | networks:
21 | - database
22 |
23 | frontend:
24 | depends_on: backend
25 | image: aspnet:4.8
26 | volumes:
27 | - frontend:/project/frontend
28 | ports:
29 | - target: 8080
30 | published: 80
31 | protocol: tcp
32 | mode: ingress
33 | networks:
34 | - fend-ingress
35 |
36 | db:
37 | image: mysql:5.7.37
38 | networks:
39 | - database
40 |
--------------------------------------------------------------------------------
/devops-challenges/code/q32/v2/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3.8'
2 |
3 | services:
4 | db:
5 | image: mysql:5.7.37
6 | networks:
7 | - database
8 | environment:
9 | MYSQL_ROOT_PASSWORD: root_password_here
10 | ports:
11 | - target: 3306
12 | published: 3306
13 | protocol: tcp
14 | volumes:
15 | - db_data:/var/lib/mysql
16 |
17 | backend:
18 | image: ruby:3.0
19 | networks:
20 | - database
21 | volumes:
22 | - ./backend:/project/backend
23 | ports:
24 | - target: 6000
25 | published: 6000
26 | protocol: udp
27 | depends_on:
28 | - db
29 |
30 | frontend:
31 | image: mcr.microsoft.com/dotnet/framework/aspnet:4.8
32 | networks:
33 | - fend-ingress
34 | volumes:
35 | - ./frontend:/project/frontend
36 | ports:
37 | - target: 8080
38 | published: 80
39 | protocol: tcp
40 | depends_on:
41 | - backend
42 |
43 | networks:
44 | database:
45 | fend-ingress:
46 | external: true
47 |
48 | volumes:
49 | db_data:
50 |
--------------------------------------------------------------------------------
/devops-challenges/code/q33/README.md:
--------------------------------------------------------------------------------
1 | # DevOps Challenge q33 - Ansible Advanced
2 |
3 | # Runbook for XYZ Infrastructure Playbook
4 |
5 | ## Pre-requisites
6 |
7 | - Make sure Ansible is installed on the control node.
8 | - Make sure all target nodes are accessible from the control node via SSH.
9 | - Have AWS CLI installed and properly configured if you're deploying to AWS.
10 | - Your `inventory.ini` should be updated with the IPs of your servers.
11 |
12 | ## Steps to Run the Playbook
13 |
14 | ### 1. Navigate to Your Project Directory
15 |
16 | Open a terminal and navigate to the directory where your Ansible project is located.
17 |
18 | ```bash
19 | cd /path/to/ansible_project
20 | ```
21 |
22 | ## Test Ansible Host Communication
23 |
24 | ```bash
25 | ansible all -m ping
26 | ```
27 |
28 | ## Execute the Playbook
29 |
30 | ```bash
31 | ansible-playbook -i inventory.ini main.yml
32 | ```
33 |
34 | If you want to run the playbook only on a specific set of hosts, you can do so with this command:
35 |
36 | ```bash
37 | ansible-playbook --limit app_servers main.yml
38 | ```
39 |
40 | ## 4. Verify Configuration
41 |
42 | - For HAProxy: Check the status by visiting http://your_load_balancer_ip/haproxy?stats
43 | - For App Servers: Check the generated config file /path/to/app/config.js
44 | - For DB Servers: Log in to MySQL and check if the settings are applied.
45 |
--------------------------------------------------------------------------------
/devops-challenges/code/q33/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory = ./inventory.ini
3 | remote_user = ubuntu
4 |
--------------------------------------------------------------------------------
/devops-challenges/code/q33/inventory.ini:
--------------------------------------------------------------------------------
1 | [load_balancer]
2 | load_balancer_ip ansible_host=your_load_balancer_ip
3 |
4 | [app_servers]
5 | app_server_1 ansible_host=your_app_server_1_ip
6 | app_server_2 ansible_host=your_app_server_2_ip
7 |
8 | [db_servers]
9 | db_server ansible_host=your_db_server_ip
10 |
--------------------------------------------------------------------------------
/devops-challenges/code/q33/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: load_balancer
3 | roles:
4 | - load_balancer
5 |
6 | - hosts: app_servers
7 | roles:
8 | - app_server
9 |
10 | - hosts: db_servers
11 | roles:
12 | - db_server
13 |
--------------------------------------------------------------------------------
/devops-challenges/code/q33/roles/app_server/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install Node.js
3 | apt:
4 | name: nodejs
5 | state: present
6 |
7 | - name: Deploy App Configuration
8 | template:
9 | src: app_config.j2
10 | dest: /path/to/app/config.js
11 |
--------------------------------------------------------------------------------
/devops-challenges/code/q33/roles/app_server/templates/app_config.j2:
--------------------------------------------------------------------------------
1 | {
2 | "server": {
3 | "port": 3000
4 | },
5 | "database": {
6 | "host": "{{ hostvars['db_server']['ansible_host'] }}",
7 | "port": 3306
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/devops-challenges/code/q33/roles/db_server/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install MySQL
3 | apt:
4 | name: mysql-server
5 | state: present
6 |
7 | - name: Deploy DB Configuration
8 | template:
9 | src: db_config.j2
10 | dest: /etc/mysql/my.cnf
11 |
--------------------------------------------------------------------------------
/devops-challenges/code/q33/roles/db_server/templates/db_config.j2:
--------------------------------------------------------------------------------
1 | [mysqld]
2 | bind-address = 0.0.0.0
3 |
--------------------------------------------------------------------------------
/devops-challenges/code/q33/roles/load_balancer/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install HAProxy
3 | apt:
4 | name: haproxy
5 | state: present
6 |
7 | - name: Configure HAProxy
8 | template:
9 | src: haproxy.cfg.j2
10 | dest: /etc/haproxy/haproxy.cfg
11 |
--------------------------------------------------------------------------------
/devops-challenges/code/q33/roles/load_balancer/templates/haproxy.cfg.j2:
--------------------------------------------------------------------------------
1 | global
2 | log /dev/log local0
3 | log /dev/log local1 notice
4 | daemon
5 |
6 | defaults
7 | log global
8 | mode http
9 | option httplog
10 | option dontlognull
11 | timeout connect 5000
12 | timeout client 50000
13 | timeout server 50000
14 |
15 | frontend http_front
16 | bind *:80
17 | stats uri /haproxy?stats
18 | default_backend http_back
19 |
20 | backend http_back
21 | balance roundrobin
22 | {% for host in groups['app_servers'] %}
23 | server {{ host }} {{ hostvars[host]['ansible_host'] }}:80 check
24 | {% endfor %}
25 |
--------------------------------------------------------------------------------
/devops-challenges/code/q34/README.md:
--------------------------------------------------------------------------------
1 | # Runbook for Deploying Helm Charts
2 |
3 | ## Prerequisites
4 |
5 | - Make sure you have Helm v3.x installed.
6 | - Make sure you have access to a Kubernetes cluster.
7 |
8 | ## Steps
9 |
10 | ### Packaging Python Flask App
11 |
12 | 1. **Navigate to the Helm chart directory**
13 |
14 | ```bash
15 | cd helm-project/charts/my-python-app
16 | ```
17 |
18 | 2. **Lint the chart to catch issues**
19 |
20 | ```bash
21 | helm lint
22 | ```
23 |
24 | 3. **Package the chart**
25 |
26 | ```bash
27 | helm package .
28 | ```
29 |
30 | ### Deploying Redis Cluster
31 |
32 | 1. **Add the Bitnami Helm repo**
33 |
34 | ```bash
35 | helm repo add bitnami https://charts.bitnami.com/bitnami
36 | ```
37 |
38 | 2. **Install Redis with overridden values**
39 |
40 | ```bash
41 | helm install my-redis bitnami/redis --set usePassword=false
42 | ```
43 |
44 | ### Deploy Custom Python App
45 |
46 | 1. **Navigate to your Helm project directory**
47 |
48 | ```bash
49 | cd helm-project
50 | ```
51 |
52 | 2. **Deploy the chart**
53 |
54 | ```bash
55 | helm install my-python-app ./charts/my-python-app
56 | ```
57 |
58 | ## Verification
59 |
60 | - To check if your Python app is running, use:
61 |
62 | ```bash
63 | kubectl get pods
64 | ```
65 |
66 | - To check if Redis is running, use:
67 |
68 | ```bash
69 | helm ls
70 | ```
71 |
72 | ## Rollback (Bonus)
73 |
74 | To rollback a release, you can use:
75 | ```bash
76 | helm rollback
77 | ```
78 |
79 |
--------------------------------------------------------------------------------
/devops-challenges/code/q34/charts/my-python-app/Chart.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: my-python-app
3 | version: 0.1.0
4 |
--------------------------------------------------------------------------------
/devops-challenges/code/q34/charts/my-python-app/templates/deployment.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: {{ .Release.Name }}
5 | spec:
6 | replicas: {{ .Values.replicaCount }}
7 | template:
8 | metadata:
9 | labels:
10 | app: {{ .Release.Name }}
11 | spec:
12 | containers:
13 | - name: {{ .Release.Name }}
14 | image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
15 |
--------------------------------------------------------------------------------
/devops-challenges/code/q34/charts/my-python-app/templates/service.yaml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: {{ .Release.Name }}
5 | spec:
6 | type: {{ .Values.service.type }}
7 | ports:
8 | - port: {{ .Values.service.port }}
9 | selector:
10 | app: {{ .Release.Name }}
11 |
--------------------------------------------------------------------------------
/devops-challenges/code/q34/charts/my-python-app/values.yaml:
--------------------------------------------------------------------------------
1 | replicaCount: 1
2 |
3 | image:
4 | repository: my-python-app
5 | tag: latest
6 |
7 | service:
8 | type: ClusterIP
9 | port: 80
10 |
--------------------------------------------------------------------------------
/devops-challenges/code/q35/README.md:
--------------------------------------------------------------------------------
1 | # Advanced Helm Project Runbook
2 |
3 | This runbook outlines the steps for deploying advanced Helm charts using Helmfile.
4 |
5 | ## Prerequisites
6 |
7 | - Helm v3.x installed
8 | - Helmfile installed
9 | - Access to a Kubernetes cluster
10 |
11 | ## Steps
12 |
13 | ### Preparing Helm and Helmfile
14 |
15 | 1. **Navigate to the project directory**
16 | ```bash
17 | cd advanced-helm-project
18 | ```
19 |
20 | 2. **Update Helm repository**
21 | ```bash
22 | helm repo update
23 | ```
24 |
25 | ### Deploying Charts with Helmfile
26 |
27 | 1. **Navigate to the Helmfile directory**
28 | ```bash
29 | cd helmfile
30 | ```
31 |
32 | 2. **Sync all defined releases**
33 | ```bash
34 | helmfile sync
35 | ```
36 |
37 | 3. **Alternatively, apply only a single release**
38 | ```bash
39 | helmfile -f releases/frontend.yaml sync
40 | ```
41 |
42 | 4. For environments
43 | ```bash
44 | helmfile -e dev apply
45 | helmfile -e staging apply
46 | helmfile -e prod apply
47 | ```
48 |
49 | 5. Adding Grafana & Prometheus
50 | ```bash
51 |
52 | helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
53 | helm repo add grafana https://grafana.github.io/helm-charts
54 |
55 | ```
56 |
57 | ## Rollback
58 |
59 | If something goes wrong, you can rollback using Helmfile:
60 |
61 | ```bash
62 | helmfile rollback
63 | ```
64 |
65 | ## Verify your Deployment
66 |
67 | ```bash
68 | kubectl get pods
69 | ```
70 |
--------------------------------------------------------------------------------
/devops-challenges/code/q35/charts/backend/Chart.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: backend
3 | description: A simple backend chart
4 | version: 0.1.0
5 |
--------------------------------------------------------------------------------
/devops-challenges/code/q35/charts/backend/job.yml:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | name: my-backend-migration
5 | annotations:
6 | "helm.sh/hook": pre-install,pre-upgrade
7 | "helm.sh/hook-weight": "-5"
8 | "helm.sh/hook-delete-policy": hook-succeeded
9 | spec:
10 | template:
11 | spec:
12 | containers:
13 | - name: my-backend-migration
14 | image: my-backend-image
15 | command: ["your-migration-command"]
16 | restartPolicy: Never
17 |
--------------------------------------------------------------------------------
/devops-challenges/code/q35/charts/backend/templates/deployment.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: backend-deployment
5 | spec:
6 | replicas: {{ .Values.replicaCount }}
7 | ...
8 |
--------------------------------------------------------------------------------
/devops-challenges/code/q35/charts/backend/templates/service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: backend-service
5 | ...
6 |
--------------------------------------------------------------------------------
/devops-challenges/code/q35/charts/backend/values.yml:
--------------------------------------------------------------------------------
1 | replicaCount: 1
2 |
--------------------------------------------------------------------------------
/devops-challenges/code/q35/charts/frontend/Chart.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v2
2 | name: frontend
3 | description: A simple frontend chart
4 | version: 0.1.0
5 |
--------------------------------------------------------------------------------
/devops-challenges/code/q35/charts/frontend/templates/deployment.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: frontend-deployment
5 | spec:
6 | replicas: {{ .Values.replicaCount }}
7 | ...
8 |
--------------------------------------------------------------------------------
/devops-challenges/code/q35/charts/frontend/templates/service.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: frontend-service
5 | ...
6 |
--------------------------------------------------------------------------------
/devops-challenges/code/q35/charts/frontend/values.yml:
--------------------------------------------------------------------------------
1 | replicaCount: 1
2 |
--------------------------------------------------------------------------------
/devops-challenges/code/q35/helmfile/helmfile.yaml:
--------------------------------------------------------------------------------
1 | helmDefaults:
2 | wait: true
3 |
4 | environments:
5 | dev:
6 | values:
7 | - env/dev.yaml
8 | staging:
9 | values:
10 | - env/staging.yaml
11 | prod:
12 | values:
13 | - env/prod.yaml
14 |
15 | releases:
16 | - name: my-frontend
17 | namespace: my-namespace
18 | chart: ../charts/my-frontend
19 | values:
20 | - frontendValues.yaml
21 |
22 | - name: my-backend
23 | namespace: my-namespace
24 | chart: ../charts/my-backend
25 | values:
26 | - backendValues.yaml
27 |
28 | - name: prometheus
29 | namespace: monitoring
30 | chart: prometheus-community/prometheus
31 |
32 | - name: grafana
33 | namespace: monitoring
34 | chart: grafana/grafana
35 |
--------------------------------------------------------------------------------
/devops-challenges/code/q35/helmfile/releases/backend.yml:
--------------------------------------------------------------------------------
1 | # Overrides for backend
2 | replicaCount: 3
3 |
--------------------------------------------------------------------------------
/devops-challenges/code/q35/helmfile/releases/frontend.yml:
--------------------------------------------------------------------------------
1 | # Overrides for frontend
2 | replicaCount: 2
3 |
--------------------------------------------------------------------------------
/devops-challenges/code/q36/README.md:
--------------------------------------------------------------------------------
1 | # Bash Automation Script for Software Installation
2 |
3 | This is a Bash script to automate the installation of MySQL, PostgreSQL, Nginx, and Apache on an Ubuntu machine.
4 |
5 | ## Setup
6 |
7 | 1. Clone this repository or download the `automate.sh` script.
8 |
9 | 2. Give execute permission to the script:
10 |
11 | ```bash
12 | chmod +x automate.sh
13 | ```
14 |
15 | ## How to Run
16 |
17 | 1. Run the script with sudo permissions:
18 |
19 | ```bash
20 | sudo ./automate.sh
21 | ```
22 |
23 | 2. Follow the interactive prompts to choose which software to install.
24 |
25 | ## Verifying Installation
26 |
27 | You can use the following commands to check the status of the services:
28 |
29 | - MySQL: `systemctl status mysql`
30 | - PostgreSQL: `systemctl status postgresql`
31 | - Nginx: `systemctl status nginx`
32 | - Apache: `systemctl status apache2`
33 |
34 | ## Log File
35 |
36 | The actions, errors, and user inputs are logged into `automate.log` with timestamps.
37 |
38 | ## Note
39 |
40 | Make sure to run the script only on an Ubuntu machine.
41 |
--------------------------------------------------------------------------------
/devops-challenges/code/q36/automate.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | LOGFILE="automate.log"
4 |
5 | function log {
6 | echo "[$(date +%Y-%m-%d:%H:%M:%S)] $1" >> $LOGFILE
7 | }
8 |
9 | function install_mysql {
10 | log "Installing MySQL..."
11 | sudo apt-get update
12 | sudo apt-get install -y mysql-server
13 | sudo mysql_secure_installation
14 | log "MySQL installed."
15 | }
16 |
17 | function install_postgresql {
18 | log "Installing PostgreSQL..."
19 | sudo apt-get update
20 | sudo apt-get install -y postgresql postgresql-contrib
21 | log "PostgreSQL installed."
22 | }
23 |
24 | function install_nginx {
25 | log "Installing Nginx..."
26 | sudo apt-get update
27 | sudo apt-get install -y nginx
28 | sudo ufw allow 'Nginx Full'
29 | log "Nginx installed."
30 | }
31 |
32 | function install_apache {
33 | log "Installing Apache..."
34 | sudo apt-get update
35 | sudo apt-get install -y apache2
36 | sudo ufw allow 'Apache'
37 | log "Apache installed."
38 | }
39 |
40 | function check_status {
41 | log "Checking service status..."
42 | systemctl status mysql
43 | systemctl status postgresql
44 | systemctl status nginx
45 | systemctl status apache2
46 | }
47 |
48 | echo "What do you want to install?"
49 | echo "1. MySQL"
50 | echo "2. PostgreSQL"
51 | echo "3. Nginx"
52 | echo "4. Apache"
53 | echo "5. Check Status"
54 |
55 | read -p "Enter your choice (1-5): " choice
56 |
57 | case $choice in
58 | 1) install_mysql ;;
59 | 2) install_postgresql ;;
60 | 3) install_nginx ;;
61 | 4) install_apache ;;
62 | 5) check_status ;;
63 | *) echo "Invalid choice" ;;
64 | esac
65 |
--------------------------------------------------------------------------------
/devops-challenges/code/q37/README.md:
--------------------------------------------------------------------------------
1 | # Tilt DevOps Project Runbook
2 |
3 | ## Pre-requisites
4 |
5 | - [Tilt](https://docs.tilt.dev/install.html)
6 | - [Docker](https://docs.docker.com/get-docker/)
7 |
8 | ## Getting Started
9 |
10 | 1. Make sure you have Tilt and Docker installed on your machine.
11 |
12 | ```bash
13 |
14 | # macOS
15 |
16 | brew install tilt-dev/tap/tilt
17 |
18 | # Linux
19 | curl -fsSL https://raw.githubusercontent.com/tilt-dev/tilt/master/scripts/install.sh | bash
20 |
21 | # Windows
22 | iex ((new-object net.webclient).DownloadString('https://raw.githubusercontent.com/tilt-dev/tilt/master/scripts/install.ps1')
23 |
24 | To verify installation, run:
25 |
26 | tilt version
27 |
28 | ```
29 |
30 | ## How to Run
31 |
32 | 1. Start Tilt:
33 |
34 | ```bash
35 | tilt up
36 | ```
37 |
38 | 2. Open Tilt dashboard in your browser to monitor the progress:
39 |
40 | ```bash
41 | http://localhost:10350/
42 | ```
43 |
44 | 2. You should be able to access the frontend at `http://localhost:3000` and the backend at `http://localhost:5000`.
45 |
46 | ## How to Verify
47 |
48 | 1. Make a change in your frontend or backend code. The changes should be reflected automatically.
49 |
50 | 2. Check the database. Initial data should be seeded.
51 |
52 | 3. Visit the Tilt dashboard to monitor logs and service status.
53 |
--------------------------------------------------------------------------------
/devops-challenges/code/q37/Tiltfile:
--------------------------------------------------------------------------------
1 | # docker_compose('docker-compose.yml')
2 |
3 | # k8s_yaml('k8s.yaml')
4 |
5 | # local_resource(
6 | # 'db_init',
7 | # 'mongoimport --host=database --db=mydb --collection=mycollection --file=initial_data.json',
8 | # resource_deps=['database']
9 | # )
10 |
11 | # # Live update settings
12 | # local_resource(
13 | # 'frontend',
14 | # 'npm run build',
15 | # deps=['frontend/src/'],
16 | # resource_deps=['frontend']
17 | # )
18 |
19 | # local_resource(
20 | # 'backend',
21 | # 'flask run',
22 | # deps=['backend/'],
23 | # resource_deps=['backend']
24 | # )
25 | #####################
26 |
27 |
28 | k8s_yaml('backend.yml')
29 | docker_build('backend', 'backend',
30 | live_update=[
31 | sync('./backend/src/', '/app/src/')
32 | ]
33 | )
34 |
35 | # Frontend service
36 | k8s_yaml('frontend.yml')
37 | docker_build('frontend', 'frontend',
38 | live_update=[
39 | sync('./frontend/src/', '/app/src/')
40 | ]
41 | )
42 |
43 |
44 | ## K8s
45 | k8s_resource('backend', port_forwards={'5000': 5000})
46 | k8s_resource('frontend', port_forwards={'3000': 3000})
47 |
48 |
49 |
50 |
51 |
--------------------------------------------------------------------------------
/devops-challenges/code/q37/backend.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: backend
5 | spec:
6 | replicas: 2
7 | selector:
8 | matchLabels:
9 | app: backend
10 | template:
11 | metadata:
12 | labels:
13 | app: backend
14 | spec:
15 | containers:
16 | - name: backend
17 | image: backend:latest
18 | ---
19 | apiVersion: v1
20 | kind: Service
21 | metadata:
22 | name: backend
23 | spec:
24 | selector:
25 | app: backend
26 | ports:
27 | - protocol: TCP
28 | port: 80
29 | targetPort: 5000
30 |
--------------------------------------------------------------------------------
/devops-challenges/code/q37/backend/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM python:3.9-alpine
2 |
3 | WORKDIR /app
4 | COPY . .
5 |
6 | RUN pip install flask
7 |
8 | EXPOSE 5000
9 |
10 | CMD ["python", "app.py"]
11 |
--------------------------------------------------------------------------------
/devops-challenges/code/q37/backend/app.py:
--------------------------------------------------------------------------------
1 | from flask import Flask
2 | app = Flask(__name__)
3 |
4 | @app.route('/')
5 | def hello_world():
6 | return 'Hello from Backend!!!'
7 |
8 | if __name__ == '__main__':
9 | app.run(host='0.0.0.0', port=5000)
10 |
--------------------------------------------------------------------------------
/devops-challenges/code/q37/database/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM mongo:4.4
2 |
3 | CMD ["mongod"]
4 |
--------------------------------------------------------------------------------
/devops-challenges/code/q37/docker-compose.yml:
--------------------------------------------------------------------------------
1 | version: '3'
2 | services:
3 | backend:
4 | build: ./backend
5 | ports:
6 | - "6000:6000"
7 | frontend:
8 | build: ./frontend
9 | ports:
10 | - "8080:8080"
11 |
--------------------------------------------------------------------------------
/devops-challenges/code/q37/frontend.yml:
--------------------------------------------------------------------------------
1 | apiVersion: apps/v1
2 | kind: Deployment
3 | metadata:
4 | name: frontend
5 | spec:
6 | replicas: 2
7 | selector:
8 | matchLabels:
9 | app: frontend
10 | template:
11 | metadata:
12 | labels:
13 | app: frontend
14 | spec:
15 | containers:
16 | - name: frontend
17 | image: frontend:latest
18 | ---
19 | apiVersion: v1
20 | kind: Service
21 | metadata:
22 | name: frontend
23 | spec:
24 | selector:
25 | app: frontend
26 | ports:
27 | - protocol: TCP
28 | port: 80
29 | targetPort: 3000
30 |
--------------------------------------------------------------------------------
/devops-challenges/code/q37/frontend/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM node:18-alpine
2 |
3 | WORKDIR /app
4 | COPY src/package*.json ./
5 | RUN npm install
6 | RUN npm install -g serve
7 | COPY . .
8 |
9 | EXPOSE 3000
10 |
11 | CMD ["serve", "-s", "src", "-l", "3000"]
12 |
--------------------------------------------------------------------------------
/devops-challenges/code/q37/frontend/src/README.md:
--------------------------------------------------------------------------------
1 | Test usage for live updates
2 |
--------------------------------------------------------------------------------
/devops-challenges/code/q37/frontend/src/index.js:
--------------------------------------------------------------------------------
1 | import React from 'react';
2 | import ReactDOM from 'react-dom';
3 |
4 | const App = () => {
5 | return Hello from Frontend!
;
6 | };
7 |
8 | ReactDOM.render(, document.getElementById('root'));
9 |
--------------------------------------------------------------------------------
/devops-challenges/code/q37/frontend/src/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "src",
3 | "version": "1.0.0",
4 | "description": "",
5 | "main": "index.js",
6 | "scripts": {
7 | "test": "echo \"Error: no test specified\" && exit 1"
8 | },
9 | "author": "",
10 | "license": "ISC"
11 | }
12 |
--------------------------------------------------------------------------------
/devops-challenges/code/q38/README.md:
--------------------------------------------------------------------------------
1 | # Secure Access to Production Systems
2 |
3 | ## Setting Up
4 |
5 | 1. Install Ansible on your machine.
6 | 2. Update `production_inventory.ini` with your Jump Host and Production Server IPs.
7 | 3. Update variables for authorized users and keys as needed in role variables.
8 |
9 | ## How to Run
10 |
11 | 1. Navigate to the `ansible` directory.
12 | 2. Run the Ansible playbook:
13 | ```
14 | ansible-playbook -i production_inventory.ini main_playbook.yml
15 | ```
16 | 3. SSH into the jump host to verify if the setup is working:
17 | ```
18 | ssh your_user@jump_host_ip
19 | ```
20 |
21 | ## How to Verify
22 |
23 | 1. SSH into the Jump Host, and then SSH into one of the production servers.
24 | 2. Verify that only authorized keys can access the production servers.
25 | 3. Check the jump host to ensure that only essential services are running.
26 |
27 |
--------------------------------------------------------------------------------
/devops-challenges/code/q38/roles/jumphost/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Ensure only essential services are running
3 | ansible.builtin.service:
4 | name: "{{ item }}"
5 | state: stopped
6 | loop:
7 | - apache2
8 | - nginx
9 | - mysql
10 |
11 | - name: Harden SSH config
12 | template:
13 | src: sshd_config.j2
14 | dest: /etc/ssh/sshd_config
15 | notify: Restart SSHD
16 |
--------------------------------------------------------------------------------
/devops-challenges/code/q38/roles/jumphost/templates/sshd_config.j2:
--------------------------------------------------------------------------------
1 | # Only include essential SSHD config settings here
2 | Port 22
3 | Protocol 2
4 | PermitRootLogin no
5 | AllowUsers {{ authorized_users | join(' ') }}
6 |
--------------------------------------------------------------------------------
/devops-challenges/code/q38/roles/playbook.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: jumphost
3 | roles:
4 | - jumphost
5 |
6 | - hosts: production
7 | roles:
8 | - production_servers
9 |
--------------------------------------------------------------------------------
/devops-challenges/code/q38/roles/production_inventory.ini:
--------------------------------------------------------------------------------
1 | [jumphost]
2 | jump_host_ip ansible_ssh_user=root
3 |
4 | [production]
5 | prod_server_ip ansible_ssh_user=root
6 |
--------------------------------------------------------------------------------
/devops-challenges/code/q38/roles/production_servers/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Copy authorized keys for access
3 | authorized_key:
4 | user: "{{ user }}"
5 | key: "{{ lookup('file', item) }}"
6 | loop: "{{ public_keys }}"
7 |
--------------------------------------------------------------------------------
/devops-challenges/code/q39/README.md:
--------------------------------------------------------------------------------
1 | # Automated Backup and Restore Process
2 |
3 | ## Prerequisites
4 |
5 | - AWS CLI configured
6 | - MySQL Client
7 | - Bash environment
8 |
9 | ## Setup
10 |
11 | 1. Clone this repository.
12 | 2. Set your AWS credentials and MySQL connection details in the `config.env` file.
13 |
14 | ## How to Run
15 |
16 | ### Backup
17 |
18 | 1. Open your terminal and navigate to the project directory.
19 | 2. Run `bash backup-script.sh`.
20 |
21 | ### Restore
22 |
23 | 1. Open your terminal and navigate to the project directory.
24 | 2. Run `bash restore-script.sh`.
25 |
26 | ## Logs
27 |
28 | Check the `logs/` directory for backup and restore logs.
29 |
30 |
--------------------------------------------------------------------------------
/devops-challenges/code/q39/backup.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source config.env
3 |
4 | # Create a timestamp
5 | TIMESTAMP=$(date +%Y%m%d%H%M%S)
6 |
7 | # Backup
8 | mysqldump -u $DB_USER -p$DB_PASS $DB_NAME > ./backup-$TIMESTAMP.sql
9 |
10 | # Log
11 | echo "Backup created at $TIMESTAMP" >> logs/backup.log
12 |
13 | # Upload to S3
14 | aws s3 cp ./backup-$TIMESTAMP.sql s3://$S3_BUCKET/
15 |
16 | # Log
17 | echo "Backup uploaded to S3 at $TIMESTAMP" >> logs/backup.log
18 |
--------------------------------------------------------------------------------
/devops-challenges/code/q39/config.env:
--------------------------------------------------------------------------------
1 | # AWS S3 bucket details
2 | S3_BUCKET=my-s3-bucket
3 |
4 | # MySQL details
5 | DB_HOST=localhost
6 | DB_USER=root
7 | DB_PASS=password
8 | DB_NAME=mydatabase
9 |
--------------------------------------------------------------------------------
/devops-challenges/code/q39/restore.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source config.env
3 |
4 | # Ask for the backup file
5 | echo "Enter the timestamp of the backup you want to restore:"
6 | read BACKUP_TIME
7 |
8 | # Download from S3
9 | aws s3 cp s3://$S3_BUCKET/backup-$BACKUP_TIME.sql .
10 |
11 | # Restore
12 | mysql -u $DB_USER -p$DB_PASS $DB_NAME < backup-$BACKUP_TIME.sql
13 |
14 | # Log
15 | echo "Backup restored from $BACKUP_TIME" >> logs/restore.log
16 |
--------------------------------------------------------------------------------
/devops-challenges/code/q40/README.md:
--------------------------------------------------------------------------------
1 | # Configuration Management with Puppet
2 |
3 | ## Getting Started
4 |
5 | 1. **Install Puppet Master and Agent**: Follow the instructions [here](https://puppet.com/docs/puppet/latest/puppet_index.html).
6 |
7 | 2. **Clone this repository**:
8 | ```bash
9 | git clone https://github.com/your-repo.git
10 | ```
11 | 3. **Navigate to project directory**:
12 | ```bash
13 | cd puppet-config-management
14 | ```
15 |
16 | ## How to Run
17 |
18 | 1. **Apply Puppet Manifests**:
19 | ```bash
20 | sudo puppet apply manifests/site.pp
21 | ```
22 |
23 | 2. **Run Tests**:
24 | ```bash
25 | rspec spec/classes/apache_spec.rb
26 | ```
27 |
28 | ## How to Verify
29 |
30 | 1. **Check Web Server**: Access the web server to ensure it's running.
31 |
32 | 2. **Check Users**: Validate that the user accounts have been created with specified permissions.
33 |
34 | 3. **Run Tests**: Ensure that all your rspec tests pass.
35 |
36 |
--------------------------------------------------------------------------------
/devops-challenges/code/q40/manifests/site.pp:
--------------------------------------------------------------------------------
1 | node default {
2 | include apache
3 | include users
4 | }
5 |
--------------------------------------------------------------------------------
/devops-challenges/code/q40/modules/apache/manifests/init.pp:
--------------------------------------------------------------------------------
1 | class apache {
2 | package { 'httpd':
3 | ensure => present,
4 | }
5 | service { 'httpd':
6 | ensure => running,
7 | enable => true,
8 | require => Package['httpd'],
9 | }
10 | }
11 |
--------------------------------------------------------------------------------
/devops-challenges/code/q40/modules/users/manifests/init.pp:
--------------------------------------------------------------------------------
1 | class users {
2 | user { 'john':
3 | ensure => present,
4 | managehome => true,
5 | }
6 | }
7 |
--------------------------------------------------------------------------------
/devops-challenges/code/q40/spec/classes/apache_spec.rb:
--------------------------------------------------------------------------------
1 | require 'spec_helper'
2 |
3 | describe 'apache', :type => :class do
4 | it { should contain_package('httpd').with_ensure('present') }
5 | it { should contain_service('httpd').with({
6 | 'ensure' => 'running',
7 | 'enable' => 'true',
8 | })
9 | }
10 | end
11 |
--------------------------------------------------------------------------------
/devops-challenges/code/q40/spec/spec_helper.rb:
--------------------------------------------------------------------------------
1 | require 'rspec-puppet'
2 |
3 | RSpec.configure do |c|
4 | c.module_path = '/etc/puppet/modules'
5 | c.manifest_dir = '/etc/puppet/manifests'
6 | end
7 |
--------------------------------------------------------------------------------
/devops-challenges/code/q41/README.md:
--------------------------------------------------------------------------------
1 | # Advanced Ansible Project with Molecule Testing
2 |
3 | ## Setup and Pre-requisites
4 |
5 | 1. Install Ansible and Molecule.
6 |
7 | ```bash
8 | pip install ansible molecule docker
9 | ```
10 |
11 | 2. Clone this repository.
12 |
13 | ```bash
14 | git clone https://github.com/your-github/advanced-ansible-molecule.git
15 | ```
16 |
17 | ## How to Run
18 |
19 | 1. Run the Ansible playbook.
20 |
21 | ```bash
22 | ansible-playbook webserver-playbook.yml
23 | ```
24 |
25 | 2. To test the role using Molecule, navigate to the role directory.
26 |
27 | ```bash
28 | cd roles/webserver
29 | ```
30 |
31 | 3. Run the Molecule tests.
32 |
33 | ```bash
34 | molecule test
35 | ```
36 |
37 | ## Important Notes
38 |
39 | - Make sure to replace the IP address in the `inventory.ini` file with the IP of your test server.
40 | - The Molecule tests will run on a Docker container.
41 |
--------------------------------------------------------------------------------
/devops-challenges/code/q41/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory = inventory.ini
3 | remote_user = ubuntu
4 |
--------------------------------------------------------------------------------
/devops-challenges/code/q41/inventory.ini:
--------------------------------------------------------------------------------
1 | [webservers]
2 | web ansible_host=your_web_server_ip
3 |
--------------------------------------------------------------------------------
/devops-challenges/code/q41/playbook.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: webservers
3 | roles:
4 | - webserver
5 |
--------------------------------------------------------------------------------
/devops-challenges/code/q41/roles/webserver/molecule/default/converge.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Converge
3 | hosts: all
4 | roles:
5 | - role: webserver
6 |
--------------------------------------------------------------------------------
/devops-challenges/code/q41/roles/webserver/molecule/default/molecule.yml:
--------------------------------------------------------------------------------
1 | ---
2 | dependency:
3 | name: galaxy
4 | driver:
5 | name: docker
6 | platforms:
7 | - name: ubuntu
8 | image: geerlingguy/docker-ubuntu1804-ansible
9 |
--------------------------------------------------------------------------------
/devops-challenges/code/q41/roles/webserver/molecule/default/verify.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Verify
3 | hosts: all
4 | tasks:
5 | - name: Check if nginx is installed
6 | command: nginx -v
7 | register: nginx_version
8 | changed_when: false
9 |
10 | - name: Assert nginx installed
11 | assert:
12 | that:
13 | - "'nginx version' in nginx_version.stderr"
14 |
--------------------------------------------------------------------------------
/devops-challenges/code/q41/roles/webserver/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install nginx
3 | apt:
4 | name: nginx
5 | state: present
6 | become: true
7 |
8 | - name: Start nginx
9 | systemd:
10 | name: nginx
11 | state: started
12 | become: true
13 |
--------------------------------------------------------------------------------
/devops-challenges/code/q42/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: "CI"
2 |
3 | on:
4 | push:
5 | branches:
6 | - main
7 |
8 | jobs:
9 | test:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - name: Checkout code
13 | uses: actions/checkout@v2
14 |
15 | - name: Setup Go
16 | uses: actions/setup-go@v2
17 | with:
18 | go-version: 1.15
19 |
20 | - name: Test
21 | run: cd tests && go test
22 |
--------------------------------------------------------------------------------
/devops-challenges/code/q42/Makefile:
--------------------------------------------------------------------------------
1 | test:
2 | cd tests && go test
3 |
--------------------------------------------------------------------------------
/devops-challenges/code/q42/README.md:
--------------------------------------------------------------------------------
1 | # Terraform with Terratest Example Project
2 |
3 | This repository demonstrates how to set up an AWS instance using Terraform and how to test it using Terratest.
4 |
5 | ## Prerequisites
6 |
7 | - [Terraform](https://www.terraform.io/downloads.html) >= 0.14
8 | - [Go](https://golang.org/dl/) >= 1.15
9 | - [AWS CLI](https://aws.amazon.com/cli/) configured with Admin-level credentials
10 | - [GitHub Account](https://github.com/) (Optional, if you wish to set up CI/CD)
11 |
12 | ## Project Structure
13 |
14 | ```bash
15 | .
16 | ├── .github
17 | │ └── workflows
18 | │ └── ci.yml
19 | ├── Makefile
20 | ├── README.md
21 | ├── terraform
22 | │ ├── main.tf
23 | │ ├── outputs.tf
24 | │ └── variables.tf
25 | └── tests
26 | ├── go.mod
27 | └── main_test.go
28 | ```
29 |
30 |
31 | ## How to Run
32 |
33 | ### Terraform Setup
34 |
35 | 1. Navigate to the `terraform` directory:
36 |
37 | ```bash
38 | cd terraform
39 | ```
40 |
41 | 2. Initialize Terraform:
42 |
43 | ```bash
44 | terraform init
45 | ```
46 |
47 | 3. Apply the Terraform code:
48 |
49 | ```bash
50 | terraform apply
51 | ```
52 |
53 | ### Test Setup
54 |
55 | 1. Navigate to the `tests` directory:
56 |
57 | ```bash
58 | cd tests
59 | ```
60 |
61 | 2. Run the tests:
62 |
63 | ```bash
64 | go test
65 | ```
66 |
67 | Or you can use the provided Makefile to run the tests:
68 |
69 | ```bash
70 | make test
71 | ```
72 |
--------------------------------------------------------------------------------
/devops-challenges/code/q42/terraform/main.tf:
--------------------------------------------------------------------------------
1 | provider "aws" {
2 | region = "us-east-1"
3 | }
4 |
5 | resource "aws_instance" "example" {
6 | ami = "ami-0c55b159cbfafe1f0"
7 | instance_type = "t2.micro"
8 |
9 | tags = {
10 | Name = "example-instance"
11 | }
12 | }
13 |
--------------------------------------------------------------------------------
/devops-challenges/code/q42/terraform/outputs.tf:
--------------------------------------------------------------------------------
1 | output "instance_id" {
2 | value = aws_instance.example.id
3 | }
4 |
--------------------------------------------------------------------------------
/devops-challenges/code/q42/terraform/variables.tf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/moabukar/tech-vault/edf8ee9f875cfd1a22bcab3223f58120cf293ca7/devops-challenges/code/q42/terraform/variables.tf
--------------------------------------------------------------------------------
/devops-challenges/code/q42/tests/go.mod:
--------------------------------------------------------------------------------
1 | module terraform-terratest/tests
2 |
3 | go 1.15
4 |
5 | require github.com/gruntwork-io/terratest v0.34.0
6 |
--------------------------------------------------------------------------------
/devops-challenges/code/q42/tests/main_test.go:
--------------------------------------------------------------------------------
1 | package test
2 |
3 | import (
4 | "strings"
5 | "testing"
6 |
7 | "github.com/gruntwork-io/terratest/modules/terraform"
8 | )
9 |
10 | func TestTerraformWithTerratest(t *testing.T) {
11 | t.Parallel()
12 |
13 | options := &terraform.Options{
14 | // Set the path to the Terraform code that will be tested.
15 | TerraformDir: "../terraform",
16 |
17 | // Variables to pass to our Terraform code using VAR=value environment variables
18 | EnvVars: map[string]string{
19 | "AWS_DEFAULT_REGION": "us-east-1",
20 | },
21 | }
22 |
23 | defer terraform.Destroy(t, options)
24 | terraform.InitAndApply(t, options)
25 |
26 | // Validate your code works as expected
27 | instanceID := terraform.Output(t, options, "instance_id")
28 | if strings.HasPrefix(instanceID, "i-") != true {
29 | t.Fatalf("Instance ID not found")
30 | }
31 | }
32 |
--------------------------------------------------------------------------------
/devops-challenges/code/qinit.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "Enter the question number (e.g., 26, 27):"
4 | read question_number
5 |
6 | folder_name="q${question_number}"
7 |
8 | # Check if folder already exists
9 | if [ -d "$folder_name" ]; then
10 | echo "Folder ${folder_name} already exists."
11 | exit 1
12 | fi
13 |
14 | mkdir $folder_name
15 |
16 | cd $folder_name
17 |
18 | touch README.md
19 |
20 | echo "# DevOps Challenge ${folder_name}" > README.md
21 |
22 | # Go back to the original directory
23 | cd ..
24 |
25 | # Confirmation message
26 | echo "Folder ${folder_name} with README.md has been created."
27 |
--------------------------------------------------------------------------------
/dsa-challenges/README.md:
--------------------------------------------------------------------------------
1 | # 📚 Data Structures and Algorithms Challenges
2 |
3 | Welcome to the **Data Structures and Algorithms Challenges** folder! 🎉
4 |
5 | This project is designed to provide you with valuable lessons and challenges to help you master data structures and algorithms.
6 |
7 | ## 📁 Folder Structure
8 | In each folder, you'll find:
9 | - **Lesson**: An informative lesson that explores key concepts and techniques related to the data structure or algorithm.
10 | - **Checklist**: A list of problems you can solve to practice your skills. ✅
11 |
12 | ## 🚧 Work in Progress
13 | This folder is a **work in progress**! 🚀 We will be continuously pushing new content, so stay tuned for updates. The lessons will be sorted from **beginner to advanced**, ensuring a smooth learning curve for everyone.
14 |
15 | ## 💼 Importance in Technical Interviews
16 | Mastering data structures and algorithms is crucial for technical interviews, as they form the backbone of many coding challenges you'll encounter. 🧠 By honing these skills, you'll be better equipped to:
17 | - Solve complex problems efficiently.
18 | - Think critically and develop optimal solutions.
19 | - Impress interviewers with your technical prowess! 💪
20 |
21 | Happy coding! 👩💻👨💻
22 |
--------------------------------------------------------------------------------
/dsa-challenges/Sorting Arrays I/Challenges.md:
--------------------------------------------------------------------------------
1 | # 🚀 Sorting Arrays I - Challenges
2 |
3 | Welcome to the **Sorting Challenges** folder! This repository contains a collection of problems designed to help you practice and apply various sorting algorithms.
4 |
5 | ### 📋 Problem Checklist
6 | Here are some sorting-related problems for you to tackle. **Remember:** The goal of these challenges is to implement the sorting algorithms we've learned rather than using pre-built sorting functions. I recommend practicing all four sorting algorithms and not just one!
7 |
8 | 1. [Sort the People](https://leetcode.com/problems/sort-the-people/description/)
9 | 2. [Relative Sort Array](https://leetcode.com/problems/relative-sort-array/description/)
10 | 3. [Pancake Sorting](https://leetcode.com/problems/pancake-sorting/description/)
11 | 4. [Find Target Indices After Sorting Array](https://leetcode.com/problems/find-target-indices-after-sorting-array/description/)
12 | 5. [Maximum Number of Coins You Can Get](https://leetcode.com/problems/maximum-number-of-coins-you-can-get/description/)
13 | 6. [How Many Numbers Are Smaller Than the Current Number](https://leetcode.com/problems/how-many-numbers-are-smaller-than-the-current-number/description/)
14 |
15 | ### 📖 Tips for Success
16 | - **Understand the Algorithms**: Make sure you understand how each sorting algorithm works before attempting the problems.
17 | - **Practice Different Algorithms**: Try to solve each problem using multiple sorting algorithms to deepen your understanding.
18 | - **Review Your Solutions**: After completing a problem, review your solution and consider how you could optimize it further.
19 |
20 | Happy coding! 💻✨
--------------------------------------------------------------------------------
/dsa-challenges/Sorting Arrays I/Lesson.md:
--------------------------------------------------------------------------------
1 | # 🚀 Sorting Arrays I - Lesson
2 |
3 | ## 1. Introduction
4 | Sorting allows us to:
5 | - Find the nth greatest or smallest number.
6 | - Group items by quality.
7 | - Find the median and the largest or smallest elements.
8 |
9 | There are different sorting algorithms, which can be categorized into:
10 |
11 | 1. **Sorting by Comparison**
12 | - Algorithms that sort elements by comparing them.
13 |
14 | 2. **Sorting by Distribution**
15 | - Algorithms that utilize the distribution of the data to sort.
16 |
17 | 3. **Stable vs Non-Stable Sorting**
18 | - **Stable Sorting**: Maintains the relative order of equal elements.
19 | - **Non-Stable Sorting**: Does not guarantee the order of equal elements.
20 |
21 | 4. **In-Place vs Out-of-Place Sorting**
22 | - **In-Place Sorting**: Requires only a small, constant amount of extra space.
23 | - **Out-of-Place Sorting**: Requires additional space proportional to the size of the input.
24 |
25 | ## 2. Comparison Sorting
26 | Some examples include: Bubble sort, selection sort and insertion.
27 |
28 | ### A. Bubble Sort
29 | Bubble Sort is the simplest sorting algorithm that works by repeatedly swapping adjacent elements if they are in the wrong order.
30 |
31 | #### Pseudo Code in Python
32 | ```python
33 | def bubble_sort(arr):
34 | n = len(arr)
35 |
36 | # Traverse through all array elements
37 | for i in range(n):
38 | swapped = False # Flag to check if a swap happened
39 |
40 | # Last i elements are already sorted, no need to check them
41 | for j in range(0, n-i-1):
42 | if arr[j] > arr[j+1]:
43 | # Swap if the element is greater than the next element
44 | arr[j], arr[j+1] = arr[j+1], arr[j]
45 | swapped = True # A swap occurred
46 |
47 | # If no elements were swapped in the inner loop, break out early
48 | if not swapped:
49 | break
50 |
51 | return arr
52 | ```
53 |
54 | #### Time and Space Complexity of Bubble Sort
55 | - **Best Case Time Complexity**: O(n) – This happens when the array is already sorted, and no swaps are needed.
56 | - **Average Case Time Complexity**: O(n²) – This occurs when the elements are in random order.
57 | - **Worst Case Time Complexity**: O(n²) – This happens when the array is sorted in reverse order, meaning every element needs to be swapped.
58 | - **Space Complexity**: O(1) – Bubble sort sorts the array in place, so it only requires a constant amount of extra memory regardless of the input size.
59 | - **Stability**: Stable – Bubble sort does not change the relative order of equal elements.
60 | - **In-Place**: Yes – Since it doesn’t use extra memory to store another copy of the input array.
61 |
62 | ### B. Selection Sort
63 |
64 | Selection Sort is a comparison-based sorting algorithm. It divides the array into two parts:
65 | - The **sorted part** on the left.
66 | - The **unsorted part** on the right.
67 |
68 | Initially, the sorted part is empty, and the unsorted part contains all elements. The algorithm repeatedly selects the smallest (or largest, depending on sorting order) element from the unsorted part and swaps it with the leftmost unsorted element, moving the boundary between the sorted and unsorted parts one element to the right.
69 |
70 | #### Pseudo Code in Python
71 | ```python
72 | def selection_sort(arr):
73 | n = len(arr)
74 | for i in range(n):
75 | min_idx = i
76 | for j in range(i+1, n):
77 | if arr[j] < arr[min_idx]:
78 | min_idx = j
79 | arr[i], arr[min_idx] = arr[min_idx], arr[i]
80 | return arr
81 | ```
82 |
83 | #### Time and Space Complexity of Selection Sort
84 |
85 | - **Best Case Time Complexity**: O(n²) – Selection Sort does not optimize for sorted arrays. It always scans the entire unsorted part of the array to find the minimum element, even if the array is already sorted.
86 | - **Average Case Time Complexity**: O(n²) – Regardless of the initial arrangement of elements, the algorithm will perform the same number of comparisons.
87 | - **Worst Case Time Complexity**: O(n²) – The worst case occurs when the array is in reverse order, but the time complexity remains O(n²) due to the nature of the algorithm, which compares each element with the remaining unsorted part.
88 | - **Space Complexity**: O(1) – Since Selection Sort sorts the array in place, no additional memory is required other than a few variables for index tracking and swapping.
89 |
90 | ### C. Insertion Sort
91 |
92 | **Insertion Sort** is a simple comparison-based sorting algorithm that works similarly to sorting playing cards in your hands. The array is divided into a "sorted" and "unsorted" part, and elements from the unsorted part are picked and placed at the correct position in the sorted part.
93 |
94 | #### How It Works:
95 | 1. We assume the first element is sorted.
96 | 2. Starting from the second element, we pick the element and compare it with the elements in the sorted portion (the left part of the array).
97 | 3. If the picked element is smaller than the compared element, it is shifted to the right until the correct position is found.
98 | 4. This process is repeated until all elements are sorted.
99 |
100 | #### Pseudo Code in Python
101 | ```python
102 | def insertion_sort(arr):
103 | # Traverse from 1 to len(arr)
104 | for i in range(1, len(arr)):
105 | key = arr[i]
106 | j = i - 1
107 |
108 | # Move elements of arr[0..i-1], that are greater than key, to one position ahead
109 | while j >= 0 and arr[j] > key:
110 | arr[j + 1] = arr[j]
111 | j -= 1
112 |
113 | # Place key at after the element just smaller than it
114 | arr[j + 1] = key
115 |
116 | return arr
117 | ```
118 |
119 | #### Time and Space Complexity of Insertion Sort
120 |
121 | - **Best Case Time Complexity**: O(n) – The best case occurs when the array is already sorted. The algorithm only compares each element once, without any shifts.
122 | - **Average Case Time Complexity**: O(n²) – In the average case, elements are in random order, so multiple shifts will be required for each insertion.
123 | - **Worst Case Time Complexity**: O(n²) – The worst case occurs when the array is sorted in reverse order, requiring the maximum number of comparisons and shifts for each element.
124 | - **Space Complexity**: O(1) – Insertion Sort uses only a constant amount of extra memory (for variables like `key` and index tracking).
125 |
126 | ## 3. Distribution Sorting
127 |
128 | One of the most common distribution sorting algorithms is **Counting Sort**.
129 |
130 | ### A. Counting Sort
131 |
132 | **Counting Sort** is an efficient algorithm when the range of numbers is small compared to the input size. It works by counting the occurrences of each value in the input array and using those counts to place elements in the correct order.
133 |
134 | #### Python Code Implementation
135 | ```python
136 | def counting_sort(nums):
137 | # Find the maximum value in the array
138 | maximum = max(nums)
139 |
140 | # Create a count array to store the count of each unique element
141 | count = [0] * (maximum + 1)
142 |
143 | # Store the count of each element in the array
144 | for num in nums:
145 | count[num] += 1
146 |
147 | # Rebuild the original array using the count array
148 | target = 0
149 | for index, value in enumerate(count):
150 | for _ in range(value):
151 | nums[target] = index
152 | target += 1
153 |
154 | return nums
155 | ```
156 |
157 | #### Time and Space Complexity
158 |
159 | - **Best, Average, and Worst Case Time Complexity**: O(n + k) – Where `n` is the number of elements in the input array and `k` is the range (maximum value in the array).
160 | - **Space Complexity**: O(n + k) – We use additional space for the count array and to store the sorted result.
161 |
162 | ## 4. Conclusion
163 |
164 | Understanding various sorting algorithms provides a strong foundation for solving diverse data manipulation challenges. Sorting not only helps us organize data efficiently but also serves as a building block for other algorithms and applications, such as searching, data analysis, and optimization. In this lesson, we covered different approaches:
165 |
166 | 1. **Comparison Sorting**: Algorithms like **Bubble Sort**, **Selection Sort**, and **Insertion Sort** provide intuitive ways to sort data through element comparisons. Although their time complexity is typically O(n²), they can be useful for small datasets or when a simple implementation is preferred.
167 |
168 | 2. **Distribution Sorting**: Algorithms such as **Counting Sort** excel when the range of input data is limited. By leveraging the distribution of values, they achieve linear time complexity, making them ideal for sorting large datasets with a known value range.
169 |
170 | 3. **Stable vs Non-Stable Sorting**: Sorting stability can be crucial when maintaining the relative order of equal elements is important, such as in applications like radix sort or sorting linked lists.
171 |
172 | 4. **In-Place vs Out-of-Place Sorting**: Depending on memory constraints, choosing an in-place algorithm like insertion sort, which requires minimal extra space, can be beneficial.
--------------------------------------------------------------------------------
/dsa-challenges/Sorting Arrays II/Lesson.md:
--------------------------------------------------------------------------------
1 | # 🚀 Sorting Arrays II - Lesson
2 |
3 | ## 1. Introduction
4 | Advanced sorting algorithms are essential for efficiently handling larger datasets and specific types of data distributions. In this lesson, we will explore four advanced sorting algorithms: **Merge Sort**, **Bucket Sort**, **Quick Sort** and **Cyclic Sort**.
5 |
6 | ## 2. Merge Sort
7 |
8 | **Merge Sort** is a divide-and-conquer sorting algorithm that works by dividing an array into smaller subarrays, sorting each subarray, and then merging the sorted subarrays back together to form the final sorted array.
9 |
10 | ### How It Works:
11 | 1. If the array has one or zero elements, it is already sorted.
12 | 2. Divide the array into two halves.
13 | 3. Recursively apply merge sort to each half.
14 | 4. Merge the two sorted halves back into a single sorted array.
15 |
16 | ### Pseudo Code in Python
17 | ```python
18 | def merge_sort(arr):
19 | if len(arr) <= 1:
20 | return arr
21 |
22 | mid = len(arr) // 2
23 | left_half = merge_sort(arr[:mid])
24 | right_half = merge_sort(arr[mid:])
25 |
26 | return merge(left_half, right_half)
27 |
28 | def merge(left, right):
29 | sorted_array = []
30 | i = j = 0
31 |
32 | # Merge the two halves
33 | while i < len(left) and j < len(right):
34 | if left[i] < right[j]:
35 | sorted_array.append(left[i])
36 | i += 1
37 | else:
38 | sorted_array.append(right[j])
39 | j += 1
40 |
41 | # Append remaining elements
42 | sorted_array.extend(left[i:])
43 | sorted_array.extend(right[j:])
44 |
45 | return sorted_array
46 | ```
47 |
48 | ### Time and Space Complexity of Merge Sort
49 | - **Best Case Time Complexity**: O(n log n) – This occurs in all cases because the array is always divided in half.
50 | - **Average Case Time Complexity**: O(n log n) – The same reasoning as the best case.
51 | - **Worst Case Time Complexity**: O(n log n) – The merging process always requires linear time regardless of the arrangement of elements.
52 | - **Space Complexity**: O(n) – Merge sort requires additional space for the temporary arrays used during the merge process.
53 |
54 | ## 3. Bucket Sort
55 |
56 | **Bucket Sort** is a distribution-based sorting algorithm that works by distributing the elements of an array into a number of buckets. Each bucket is then sorted individually, either using another sorting algorithm or recursively applying bucket sort.
57 |
58 | ### How It Works:
59 | 1. Create an empty array of buckets.
60 | 2. Distribute the input elements into buckets based on a specific range.
61 | 3. Sort each bucket individually (using another sorting algorithm or recursively).
62 | 4. Concatenate the sorted buckets to produce the final sorted array.
63 |
64 | ### Pseudo Code in Python
65 | ```python
66 | def bucket_sort(arr):
67 | # Create empty buckets
68 | num_buckets = 10
69 | buckets = [[] for _ in range(num_buckets)]
70 |
71 | # Place elements into buckets
72 | for num in arr:
73 | index = int(num * num_buckets) # Assuming numbers are in [0, 1)
74 | buckets[min(index, num_buckets - 1)].append(num)
75 |
76 | # Sort each bucket and concatenate
77 | sorted_array = []
78 | for bucket in buckets:
79 | sorted_array.extend(sorted(bucket)) # Using built-in sort for simplicity
80 |
81 | return sorted_array
82 | ```
83 |
84 | ### Time and Space Complexity of Bucket Sort
85 | - **Best Case Time Complexity**: O(n + k) – When the elements are uniformly distributed, and each bucket has a small number of elements (where `k` is the number of buckets).
86 | - **Average Case Time Complexity**: O(n + k) – The same reasoning as the best case.
87 | - **Worst Case Time Complexity**: O(n²) – This can occur when all elements are placed into a single bucket (e.g., all elements are the same), leading to a call to a quadratic sorting algorithm within that bucket.
88 | - **Space Complexity**: O(n + k) – Additional space is required for the buckets.
89 |
90 | ## 3. Quick Sort
91 |
92 | **Quick Sort** is an efficient, comparison-based sorting algorithm that uses a divide-and-conquer strategy to sort an array. It is one of the most popular sorting algorithms due to its average-case time complexity of O(n log n) and practical performance on large datasets. Quick Sort works by selecting a "pivot" element and partitioning the array into two subarrays based on this pivot.
93 |
94 | ### How It Works:
95 | 1. **Choose a Pivot:** A pivot element is selected from the array. This can be any element, but common choices include the first element, the last element, or a randomly selected element.
96 | 2. **Partition the Array:** Rearrange the elements such that elements smaller than the pivot are on the left and elements larger than the pivot are on the right.
97 | 3. **Recursively Sort:** Recursively apply Quick Sort to the left and right subarrays.
98 | 4. **Combine:** The array is sorted once all recursive calls are completed.
99 |
100 | ### Pseudo Code in Python
101 | ```python
102 | def quick_sort(arr):
103 | # Base case: If the array has 1 or 0 elements, it is already sorted
104 | if len(arr) <= 1:
105 | return arr
106 |
107 | # Choose the pivot (using the last element here)
108 | pivot = arr[-1]
109 |
110 | # Partition the array into three lists
111 | left = [x for x in arr[:-1] if x <= pivot] # Elements less than or equal to pivot
112 | right = [x for x in arr[:-1] if x > pivot] # Elements greater than pivot
113 |
114 | # Recursively sort the left and right sub-arrays and combine with pivot
115 | return quick_sort(left) + [pivot] + quick_sort(right)
116 | ```
117 |
118 | ### Time and Space Complexity of Quick Sort
119 | - **Best Case Time Complexity**: O(n log n) – This occurs when the pivot divides the array into two equal halves at each step.
120 | - **Average Case Time Complexity**: O(n log n) – On average, the algorithm performs well with most types of input data.
121 | - **Worst Case Time Complexity**: O(n²) – The worst-case occurs when the smallest or largest element is always chosen as the pivot, leading to unbalanced partitions (e.g., sorted or reverse-sorted arrays).
122 | - **Space Complexity**: O(log n) – This is due to the recursion stack space needed for balanced partitions.
123 | - **Stability**: Not stable – Equal elements may not retain their original relative order.
124 | - **In-Place**: Yes – It doesn't require extra space proportional to the input size.
125 |
126 | ## 4. Cyclic Sort
127 |
128 | Cyclic Sort is an efficient sorting algorithm designed for arrays where the elements are in a specific range from 1 to n. The algorithm works by placing each element in its correct position in a single pass through the array.
129 |
130 | ### How It Works
131 | 1. Traverse the array from the beginning.
132 | 2. For each element, if it is not in the correct position (i.e., the element `arr[i]` is not equal to `i + 1`), swap it with the element at its correct position (`arr[arr[i] - 1]`).
133 | 3. Repeat this process until all elements are in their correct positions.
134 |
135 | ### Pseudo Code in Python
136 | ```python
137 | def cyclic_sort(arr):
138 | i = 0
139 | while i < len(arr):
140 | correct_idx = arr[i] - 1
141 | if arr[i] != arr[correct_idx]:
142 | # Swap the elements
143 | arr[i], arr[correct_idx] = arr[correct_idx], arr[i]
144 | else:
145 | i += 1
146 | return arr
147 | ```
148 |
149 | ### Time and Space Complexity of Cyclic Sort
150 | - **Best Case Time Complexity**: O(n) – The algorithm traverses the array once, placing all elements in their correct positions with minimal swaps.
151 | - **Average Case Time Complexity**: O(n) – In the average case, each element is moved to its correct position in a linear number of operations.
152 | - **Worst Case Time Complexity**: O(n) – Even in the worst case, each element is swapped at most once, making the total number of operations linear with respect to the input size.
153 | - **Space Complexity**: O(1) – Cyclic Sort sorts the array in place, so it requires a constant amount of extra memory.
154 | - **Stability**: Not stable – The algorithm may alter the relative order of equal elements during sorting.
155 | - **In-Place**: Yes – Sorting is performed without using extra space proportional to the input size.
156 |
157 | ## 5. Conclusion
158 |
159 | Advanced sorting algorithms like **Merge Sort**, **Bucket Sort**, **Quick Sort**, and **Cyclic Sort** play a crucial role in optimizing performance across different scenarios. Each algorithm has unique strengths, making them suitable for specific data distributions and requirements:
160 |
161 | - **Merge Sort** excels in handling large datasets with consistent time complexity, providing stability and guaranteeing a predictable O(n log n) performance.
162 | - **Bucket Sort** leverages the uniform distribution of elements to achieve linear time complexity, making it efficient for sorting floating-point numbers or datasets with a known range.
163 | - **Quick Sort** is favored for its in-place sorting and average-case efficiency, although it requires careful choice of pivot to avoid worst-case scenarios.
164 | - **Cyclic Sort** shines in scenarios where elements are in a limited range and need to be placed in a specific order with minimal swaps.
--------------------------------------------------------------------------------
/images/Tech-Vault.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/moabukar/tech-vault/edf8ee9f875cfd1a22bcab3223f58120cf293ca7/images/Tech-Vault.png
--------------------------------------------------------------------------------
/misc/README.md:
--------------------------------------------------------------------------------
1 | # Useful scripts
2 |
3 | ## Random Question Generator
4 |
5 | This script will go through the list of questions and generate a random question for you to answer.
6 |
7 | ```bash
8 |
9 | python3 randomiser.py
10 |
11 | Keep pressing enter to get a new question.
12 |
13 | Type 'x' and press enter to exit.
14 |
15 | ```
16 |
17 | ## Repo Question
18 |
19 | This script will show you how many questions we currently have in the repo.
20 |
21 | ```bash
22 |
23 | python3 question_count.py
24 |
25 | ```
26 |
--------------------------------------------------------------------------------
/misc/go/count_questions.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "bufio"
5 | "fmt"
6 | "log"
7 | "net/http"
8 | "strings"
9 | )
10 |
11 | func main() {
12 | // repo linkkk
13 | url := "https://raw.githubusercontent.com/moabukar/tech-interview-questions/main/README.md"
14 |
15 | resp, err := http.Get(url)
16 | if err != nil {
17 | log.Fatal(err)
18 | }
19 | defer resp.Body.Close()
20 |
21 | scanner := bufio.NewScanner(resp.Body)
22 | questionCount := -18
23 |
24 | for scanner.Scan() {
25 | line := scanner.Text()
26 | if strings.HasPrefix(line, "- ") {
27 | questionCount++
28 | }
29 | }
30 |
31 | if err := scanner.Err(); err != nil {
32 | log.Fatal(err)
33 | }
34 |
35 | fmt.Printf("There are %d questions in the repo.\n", questionCount)
36 | }
37 |
--------------------------------------------------------------------------------
/misc/go/random.go:
--------------------------------------------------------------------------------
1 | package main
2 |
3 | import (
4 | "fmt"
5 | "io/ioutil"
6 | "math/rand"
7 | "net/http"
8 | "regexp"
9 | "strings"
10 | "time"
11 | )
12 |
13 | func getQuestionsFromRepo() ([]string, error) {
14 | url := "https://raw.githubusercontent.com/moabukar/tech-vault/main/README.md"
15 | resp, err := http.Get(url)
16 | if err != nil {
17 | return nil, err
18 | }
19 | defer resp.Body.Close()
20 |
21 | body, err := ioutil.ReadAll(resp.Body)
22 | if err != nil {
23 | return nil, err
24 | }
25 |
26 | content := string(body)
27 | re := regexp.MustCompile(`(.*?)<\/summary>`)
28 | matches := re.FindAllStringSubmatch(content, -1)
29 |
30 | var questions []string
31 | for _, match := range matches {
32 | questions = append(questions, match[1])
33 | }
34 |
35 | rand.Seed(time.Now().UnixNano())
36 | rand.Shuffle(len(questions), func(i, j int) {
37 | questions[i], questions[j] = questions[j], questions[i]
38 | })
39 |
40 | return questions, nil
41 | }
42 |
43 | func main() {
44 | questions, err := getQuestionsFromRepo()
45 | if err != nil {
46 | fmt.Println("Error:", err)
47 | return
48 | }
49 |
50 | for _, question := range questions {
51 | var userInput string
52 | fmt.Printf("Question: %s\nPress any key to continue or 'x' to exit.\n", question)
53 | fmt.Scanln(&userInput)
54 | if strings.ToLower(userInput) == "x" {
55 | break
56 | }
57 | }
58 | }
59 |
--------------------------------------------------------------------------------
/misc/py/question_count.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import re
3 |
4 | def count_questions_in_repo():
5 | # Fetch the content of the README.md from the repo
6 | url = "https://raw.githubusercontent.com/moabukar/tech-vault/main/README.md"
7 | response = requests.get(url)
8 |
9 | if response.status_code != 200:
10 | print("Failed to fetch the content.")
11 | return
12 |
13 | content = response.text
14 |
15 | # Use regex to find all questions
16 | pattern = r'(.*?)<\/summary>'
17 | questions = re.findall(pattern, content)
18 |
19 | # Count and display the number of questions
20 | print(f"Total number of questions in the repo: {len(questions)}")
21 |
22 | if __name__ == "__main__":
23 | count_questions_in_repo()
24 |
--------------------------------------------------------------------------------
/misc/py/randomiser.py:
--------------------------------------------------------------------------------
1 | import requests
2 | import re
3 | import random
4 | import keyboard
5 |
6 | def get_questions_from_repo():
7 | url = "https://raw.githubusercontent.com/moabukar/tech-vault/main/README.md"
8 | response = requests.get(url)
9 | content = response.text
10 |
11 | questions = re.findall(r'(.*?)<\/summary>', content)
12 |
13 | random.shuffle(questions)
14 |
15 | return questions
16 |
17 | def main():
18 | questions = get_questions_from_repo()
19 |
20 | for question in questions:
21 | user_input = input(f"Question: {question}\nPress any key to continue or 'x' to exit.\n").strip()
22 | if user_input.lower() == 'x':
23 | break
24 |
25 |
26 | ## Uses method of just tapping x to leave the question
27 |
28 | # def main():
29 | # questions = get_questions_from_repo()
30 |
31 | # for question in questions:
32 | # print(f"Question: {question}")
33 | # print("Press any key to continue or 'x' to exit.")
34 |
35 | # # Capture a single key press
36 | # event = keyboard.read_event(suppress=True)
37 |
38 | # if event.name.lower() == 'x':
39 | # break
40 |
41 |
42 | if __name__ == "__main__":
43 | main()
44 |
--------------------------------------------------------------------------------
/misc/py/requirements.txt:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/moabukar/tech-vault/edf8ee9f875cfd1a22bcab3223f58120cf293ca7/misc/py/requirements.txt
--------------------------------------------------------------------------------
/quiz/networking.md:
--------------------------------------------------------------------------------
1 | # Networking & Linux quiz
2 |
3 |
4 | Which of these protocols make use of a reliable transport protocol at Layer 4?
5 |
6 | - HTTP
7 | - NTP
8 | - DHCP
9 | - HTTPS
10 |
11 |
12 |
13 |
14 |
15 |
16 | Which of the following addresses would be a valid MAC/hardware address for an ethernet card?
17 |
18 | - FF:FF:FF:FF:FF:FF
19 | - 00:1F:22:01:23:45
20 | - 00:1G:22:01:23:45
21 | - AAAA:1111::2222
22 |
23 |
24 |
25 |
26 |
27 |
28 | How can you transfer a file over the network?
29 |
30 | - nice
31 | - rsync
32 | - nc
33 | - scp
34 |
35 |
36 |
37 |
38 | Which of these is true about an SSL certificate for a particular domain name?
39 |
40 | - The certificate includes the DNS Server information for the domain name
41 | - The certificate includes the digital signature of the certificate authority (CA)signing the key
42 | - The certificate includes a list of revoked certificates for the domain
43 | - The certificate includes the public key associated with the domain
44 |
45 |
46 |
47 |
48 |
49 | Which one of these is NOT the job of a package manager?
50 |
51 | - Download packages
52 | - Resolve dependencies
53 | - Upload packages to a repository
54 | - Search in a repository
55 |
56 |
57 |
58 |
59 |
60 | Which of these can be used to run programs on a recurring schedule?
61 |
62 | - scheduler
63 | - jobman
64 | - cron
65 | - systemd
66 |
67 |
68 |
69 |
70 | Which variable name in bash stores the return code of the last command?
71 |
72 |
73 | - $SRET_CODE
74 | - $RETURN
75 | - $$
76 | - $?
77 |
78 |
79 |
80 |
81 | Which signal is sent to a process when using the command 'kill -9 ‹pid>?
82 |
83 |
84 | - SIGQUIT
85 | - SIGABRT
86 | - SIGKILL
87 | - SIGTERM
88 |
89 |
90 |
91 |
92 | When using public key authentication for password-less authentication for SSH, what purpose does the 'authorized_keys' file in '~/.ssh' serve?
93 |
94 | - The file stores the public keys of the users allowed to SSH into the server
95 | - The file stores the public key of SSH
96 | - The file stores the private key for the SSH Server
97 | - The file stores the private keys of the users allowed to SSH into the server
98 |
99 |
100 |
101 |
102 |
103 | If you want to host a web server, which ports would you typically open in your firewall?
104 |
105 | - 80
106 | - 8080
107 | - 443
108 | - 56
109 |
110 |
111 |
112 |
113 | How can a computer receive its IP address configuration?
114 |
115 | - NTP
116 | - ARP
117 | - DNS
118 | - DHCP
119 |
120 |
121 |
122 |
123 | Which of the following commands could be used to show load averages?
124 |
125 | - ps aux
126 | - cat /proc/cpuinfo
127 | - top
128 | - uptime
129 |
130 |
131 |
--------------------------------------------------------------------------------