├── .gitignore ├── CNAME ├── README.md ├── _config.yml.bak ├── about.md ├── ansible └── local │ ├── create-file-from-template │ ├── README.md │ ├── ansible.cfg │ ├── inventory.ini │ ├── playbook.yml │ └── templates │ │ └── vars.env.j2 │ └── http-requests │ ├── README.md │ ├── ansible.cfg │ ├── inventory.ini │ └── playbooks │ └── get-requests.yml ├── docker ├── code-server │ ├── standalone │ │ ├── Makefile │ │ ├── README.md │ │ └── docker-compose.yaml │ └── with-docker │ │ ├── README.md │ │ └── docker-compose.yaml ├── docker-compose-env-file │ ├── README.md │ ├── config │ │ ├── .env │ │ └── .env_extras │ └── docker-compose.yml ├── docker-compose-extends │ ├── Dockerfile │ ├── README.md │ ├── base.Dockerfile │ ├── boot.sh │ ├── common-services.yml │ ├── docker-compose.yml │ ├── main.py │ └── requirements.txt ├── ethereum │ ├── README.md │ └── docker-compose.yaml ├── flatnotes │ ├── Makefile │ └── docker-compose.yaml ├── gitea │ ├── README.md │ ├── configs │ │ └── gitea-runner │ │ │ └── config.yaml │ └── docker-compose.yaml ├── kafka-cluster-metrics │ ├── README.md │ ├── configs │ │ ├── grafana │ │ │ ├── dashboards │ │ │ │ └── kafka-metrics.json │ │ │ └── provisioning │ │ │ │ ├── dashboards.yml │ │ │ │ └── datasources.yml │ │ └── prometheus │ │ │ └── prometheus.yml │ ├── docker-compose.yaml │ └── python-client │ │ ├── Dockerfile │ │ ├── consume.py │ │ ├── produce.py │ │ ├── requirements.txt │ │ └── run.sh ├── kafka-single-node │ └── docker-compose.yaml ├── kafka-three-node-cluster-kraft │ ├── README.md │ ├── docker-compose.yaml │ └── gen_uuid.py ├── kafka-three-node-cluster │ ├── Makefile │ └── docker-compose.yaml ├── kafka │ ├── README.md │ ├── docker-compose.yaml │ └── python-client │ │ ├── Dockerfile │ │ ├── consume.py │ │ ├── produce.py │ │ ├── requirements.txt │ │ └── run.sh ├── localstack │ ├── Makefile │ ├── README.md │ └── docker-compose.yaml ├── mailhog │ ├── README.md │ ├── docker-compose.yaml │ └── send_mail.py ├── mysql │ ├── README.md │ └── docker-compose.yaml ├── nodejs-express-mongodb │ ├── Dockerfile │ ├── README.md │ ├── docker-compose.yaml │ ├── index.js │ ├── models │ │ └── model.js │ ├── package.json │ └── routes │ │ └── routes.js ├── postgresql │ └── docker-compose.yaml ├── rabbitmq-python │ ├── Makefile │ ├── consumer │ │ ├── Dockerfile │ │ └── consumer.py │ ├── docker-compose.yaml │ └── publisher │ │ ├── Dockerfile │ │ └── publisher.py ├── redis │ ├── README.md │ └── docker-compose.yaml ├── spark-cluster │ ├── .env │ ├── README.md │ ├── docker-compose.yaml │ └── workspace │ │ ├── application.py │ │ └── data.txt ├── traefik-http │ ├── README.md │ └── docker-compose.yml └── traefik-https │ ├── README.md │ ├── config │ └── traefik.toml │ └── docker-compose.yml ├── helm └── README.md ├── index.md_bak ├── kind ├── 3-node-cluster │ ├── README.md │ └── kind.yaml ├── Makefile └── README.md ├── kubernetes ├── argocd │ ├── README.md │ └── kind-config.yaml ├── grafana-prometheus │ ├── README.md │ └── kind-config.yaml └── ingress-nginx │ ├── README.md │ ├── deployment │ ├── deployment.yaml │ ├── ingress.yaml │ └── service.yaml │ └── kind-config.yaml └── terraform ├── README.md ├── aws-localstack ├── dynamodb │ ├── README.md │ ├── main.tf │ └── provider.tf └── kinesis │ ├── main.tf │ └── provider.tf ├── aws ├── ec2-instance │ ├── README.md │ ├── iam.tf │ ├── locals.tf │ ├── main.tf │ ├── outputs.tf │ ├── provider.tf │ ├── security.tf │ ├── terraform.tfvars │ └── variables.tf ├── modules │ ├── ec2 │ │ ├── README.md │ │ ├── example │ │ │ ├── main.tf │ │ │ ├── provider.tf │ │ │ ├── terraform.tfvars │ │ │ └── variables.tf │ │ ├── iam.tf │ │ ├── locals.tf │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── screenshots │ │ │ └── ec2-module-aws-console-screenshot.png │ │ ├── security.tf │ │ └── variables.tf │ └── vpc │ │ ├── example │ │ ├── main.tf │ │ ├── outputs.tf │ │ ├── providers.tf │ │ ├── terraform.tfvars │ │ └── variables.tf │ │ ├── main.tf │ │ ├── outputs.tf │ │ └── variables.tf └── vpc │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── provider.tf │ └── variables.tf ├── docker └── nginx │ ├── README.md │ └── main.tf ├── kafka-topics ├── README.md ├── docker-compose.yaml ├── example │ ├── main.tf │ └── variables.tf ├── main.tf ├── outputs.tf ├── providers.tf └── variables.tf ├── kind-kubernetes ├── README.md ├── helm.tf ├── main.tf ├── outputs.tf ├── providers.tf ├── terraform.tfvars └── variables.tf ├── local-exec ├── for-loops │ ├── README.md │ ├── main.tf │ └── provider.tf └── time-sleep │ ├── README.md │ ├── main.tf │ └── provider.tf └── mysql ├── paynetworx-provider ├── README.md ├── docker-compose.yml ├── example │ ├── main.tf │ ├── outputs.tf │ ├── terraform.tfvars │ └── variables.tf ├── main.tf ├── outputs.tf ├── provider.tf └── variables.tf └── petoju-provider ├── README.md ├── docker-compose.yml ├── main.tf ├── outputs.tf ├── providers.tf ├── terraform.tfvars └── variables.tf /.gitignore: -------------------------------------------------------------------------------- 1 | terraform/*/*/.terraform* 2 | terraform/*/*/terraform.tfstate* 3 | terraform/*/*/*/.terraform* 4 | terraform/*/*/*/terraform.tfstate* 5 | terraform/*/*/*/*/.terraform* 6 | terraform/*/*/*/*/terraform.tfstate* 7 | docker/localstack/volume/* 8 | -------------------------------------------------------------------------------- /CNAME: -------------------------------------------------------------------------------- 1 | quickstarts.ruan.dev -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # quick-starts 2 | Collection of quick starts on docker, terraform, ansible, etc 3 | 4 | ![image](https://github.com/ruanbekker/quick-starts/assets/567298/1b92e58e-40e0-44cb-9fd2-3452baf67325) 5 | 6 | 7 | ## Index 8 | 9 | The following quick-starts are available: 10 | 11 | - [Ansible](./ansible) 12 | - [ansible/local](./ansible/local/) 13 | - [Docker](./docker) 14 | - [code-server: with-docker](./docker/code-server/with-docker) 15 | - [code-server: standalone](./docker/code-server/standalone) 16 | - [docker-compose-env-file](./docker/docker-compose-env-file) 17 | - [docker-compose-extends](./docker/docker-compose-extends) 18 | - [flatnotes](./docker/flatnotes) 19 | - [gitea](./docker/gitea/) 20 | - [kafka-single-node](./docker/kafka-single-node) 21 | - [kafka-three-node](./docker/kafka-three-node-cluster) 22 | - [kafka-three-node-kraft](./docker/kafka-three-node-cluster-kraft) 23 | - [kafka-cluster](./docker/kafka) 24 | - [kafka-cluster: with metrics](./docker/kafka-cluster-metrics) 25 | - [mailhog](./docker/mailhog/) 26 | - [nodejs-express-mongodb](./docker/nodejs-express-mongodb) 27 | - [redis](./docker/redis) 28 | - [spark-cluster](./docker/spark-cluster) 29 | - [traefik-http](./docker/traefik-http) 30 | - [traefik-https: with http challenge](./docker/traefik-https) 31 | - [Helm](./helm) 32 | - [Kind](./kind) 33 | - [3-node-cluster](./kind/3-node-cluster) 34 | - [Kubernetes](./kubernetes) 35 | - [argocd](./kubernetes/argocd) 36 | - [grafana-prometheus](./kubernetes/grafana-prometheus) 37 | - [ingress-nginx](./kubernetes/ingress-nginx) 38 | - [Terraform](./terraform) 39 | - [aws](./terraform/aws) 40 | - [aws-localstack](./terraform/aws-localstack) 41 | - [docker](./terraform/docker) 42 | - [kafka-topics](./terraform/kafka-topics) 43 | - [kind-kubernetes](./terraform/kind-kubernetes) 44 | - [local-exec](./terraform/local-exec) 45 | 46 | 47 | More will be added soon, but for the meanwhile you can see more examples on [ruan.dev/projects](https://ruan.dev/projects/) 48 | 49 | ## Stargazers over time 50 | 51 | [![Stargazers over time](https://starchart.cc/ruanbekker/quick-starts.svg)](https://starchart.cc/ruanbekker/quick-starts) 52 | -------------------------------------------------------------------------------- /_config.yml.bak: -------------------------------------------------------------------------------- 1 | # https://github.com/jekyll/minima/blob/master/_config.yml 2 | # https://github.com/MichaelCurrin/jekyll-blog-demo 3 | # https://talk.jekyllrb.com/t/minima-header-html-functionality/6721 4 | # https://www.chrishasz.com/yaght/general/working-with-minima 5 | title: Quick Starts 6 | description: > 7 | Collection of quick starts on docker, terraform, ansible, etc. 8 | 9 | author: 10 | name: Ruan Bekker 11 | 12 | twitter_username: ruanbekker 13 | github_username: ruanbekker 14 | rss: RSS 15 | 16 | url: https://quickstarts.ruan.dev 17 | baseurl: / 18 | permalink: pretty 19 | 20 | theme: minima 21 | plugins: 22 | - jekyll-feed 23 | - jekyll-seo-tag 24 | 25 | header_files: 26 | - about.md 27 | 28 | # collections: 29 | # tools: 30 | # output: true 31 | # languages: 32 | # output: true 33 | 34 | # defaults: 35 | # - scope: 36 | # path: "" 37 | # values: 38 | # layout: page 39 | 40 | # exclude: 41 | # - docs/ 42 | # - vendor/ 43 | # - Gemfile 44 | # - LICENSE 45 | # - Makefile 46 | # - README.md 47 | # - sample*.png 48 | 49 | # liquid: 50 | # error_mode: strict 51 | 52 | # strict_front_matter: true 53 | -------------------------------------------------------------------------------- /about.md: -------------------------------------------------------------------------------- 1 | --- 2 | title: About 3 | layout: page 4 | permalink: /about/ 5 | --- 6 | 7 | This is the base Jekyll theme. You can find out more info about customizing your Jekyll theme, as well as basic Jekyll usage documentation at [jekyllrb.com](https://jekyllrb.com/) 8 | 9 | Source code: 10 | 11 | [![jekyll - jekyll](https://img.shields.io/static/v1?label=jekyll&message=jekyll&color=blue&logo=github)](https://github.com/jekyll/jekyll) 12 | 13 | [![jekyll - minima](https://img.shields.io/static/v1?label=jekyll&message=minima&color=blue&logo=github)](https://github.com/jekyll/minima) 14 | -------------------------------------------------------------------------------- /ansible/local/create-file-from-template/README.md: -------------------------------------------------------------------------------- 1 | # Ansible Template Example 2 | 3 | This quick-start shows you how to generate a file from a template using variables to determine the outcome. 4 | 5 | ## Usage 6 | 7 | To run the playbook: 8 | 9 | ```bash 10 | $ ansible-playbook playbook.yml 11 | 12 | PLAY [generates file from template playbook] ********************************************************************* 13 | TASK [generate config from template] ***************************************************************************** 14 | changed: [laptop] 15 | PLAY RECAP ******************************************************************************************************* 16 | laptop : ok=1 changed=1 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 17 | ``` 18 | 19 | Now we can see that the template from `./tempaltes/vars.env.j2` was parsed with our variables from our `playbook.yml` and the `vars.env` file was created: 20 | 21 | ```bash 22 | $ cat vars.env 23 | APP_OWNER=ruan 24 | APP_ENV=dev 25 | APP_IDS='AAB,ABC,ADF' 26 | APP_IPS=10.0.1.3,192.168.10.4,172.16.18.2 27 | EXTRA_PASS=foobar! 28 | API_SECRET=null 29 | ``` 30 | 31 | ## Resources 32 | 33 | - https://docs.ansible.com/ansible/latest/collections/ansible/builtin/template_module.html 34 | -------------------------------------------------------------------------------- /ansible/local/create-file-from-template/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | inventory = inventory.ini 3 | deprecation_warnings=False 4 | interpreter_python=/usr/bin/python3 5 | -------------------------------------------------------------------------------- /ansible/local/create-file-from-template/inventory.ini: -------------------------------------------------------------------------------- 1 | [localhost] 2 | laptop ansible_connection=local 3 | 4 | [localhost:vars] 5 | ansible_connection=local 6 | ansible_python_interpreter=/usr/bin/python3 7 | -------------------------------------------------------------------------------- /ansible/local/create-file-from-template/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | name: generates file from template playbook 4 | gather_facts: False 5 | vars: 6 | env: dev 7 | app_ids: "'AAB,ABC,ADF'" 8 | app_ip_address: "192.168.10.4" 9 | include_extra_vars: true 10 | user: nobody 11 | group: nobody 12 | api_secret: asdadasd 13 | passw: "foobar!" 14 | tasks: 15 | - name: generate config from template 16 | template: 17 | src: "templates/vars.env.j2" 18 | dest: vars.env 19 | #owner: "{{ user }}" 20 | #group: "{{ group }}" 21 | mode: 0644 22 | -------------------------------------------------------------------------------- /ansible/local/create-file-from-template/templates/vars.env.j2: -------------------------------------------------------------------------------- 1 | APP_OWNER=ruan 2 | APP_ENV={{ env }} 3 | APP_IDS={{ app_ids }} 4 | APP_IPS=10.0.1.3,{{ app_ip_address }},172.16.18.2 5 | {% if include_extra_vars %} 6 | EXTRA_PASS={{ passw }} 7 | {% endif %} 8 | {% if env == 'prod' %} 9 | API_SECRET={{ api_secret }} 10 | {% else %} 11 | API_SECRET=null 12 | {% endif %} 13 | -------------------------------------------------------------------------------- /ansible/local/http-requests/README.md: -------------------------------------------------------------------------------- 1 | # Ansible HTTP Requests 2 | 3 | This quick-start demonstrate how to use the ansible [uri](https://docs.ansible.com/ansible/latest/collections/ansible/builtin/uri_module.html) module to make http requests. The examples below are all using the `local` connection method. 4 | 5 | ## Usage 6 | 7 | To run the HTTP **GET** request playbook: 8 | 9 | ```bash 10 | $ ansible-playbook playbooks/get-requests.yml 11 | 12 | PLAY [get requests playbook] ************************************************************************************* 13 | TASK [make a get request] **************************************************************************************** 14 | ok: [laptop] 15 | 16 | TASK [return response] ******************************************************************************************* 17 | ok: [laptop] => { 18 | "msg": { 19 | "completed": false, 20 | "id": 1, 21 | "title": "delectus aut autem", 22 | "userId": 1 23 | } 24 | } 25 | 26 | PLAY RECAP ******************************************************************************************************* 27 | laptop : ok=2 changed=0 unreachable=0 failed=0 skipped=0 rescued=0 ignored=0 28 | ``` 29 | 30 | ## Resources 31 | 32 | - https://docs.ansible.com/ansible/latest/collections/ansible/builtin/uri_module.html -------------------------------------------------------------------------------- /ansible/local/http-requests/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | inventory = inventory.ini 3 | deprecation_warnings=False 4 | interpreter_python=/usr/bin/python3 5 | -------------------------------------------------------------------------------- /ansible/local/http-requests/inventory.ini: -------------------------------------------------------------------------------- 1 | [localhost] 2 | laptop ansible_connection=local 3 | 4 | [localhost:vars] 5 | ansible_connection=local 6 | ansible_python_interpreter=/usr/bin/python3 7 | -------------------------------------------------------------------------------- /ansible/local/http-requests/playbooks/get-requests.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | name: get requests playbook 4 | gather_facts: False 5 | vars: 6 | api_baseurl: "jsonplaceholder.typicode.com" 7 | api_path: "todos/1" 8 | 9 | tasks: 10 | - name: make a get request 11 | uri: 12 | url: "https://{{ api_baseurl }}/{{ api_path }}" 13 | method: GET 14 | body_format: json 15 | headers: 16 | Content-Type: application/json 17 | register: response 18 | 19 | - name: return response 20 | debug: 21 | msg: "{{ response.json }}" 22 | -------------------------------------------------------------------------------- /docker/code-server/standalone/Makefile: -------------------------------------------------------------------------------- 1 | # Thanks: https://gist.github.com/mpneuried/0594963ad38e68917ef189b4e6a269db 2 | .PHONY: help 3 | 4 | help: ## This help. 5 | @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) 6 | 7 | .DEFAULT_GOAL := help 8 | 9 | # DOCKER TASKS 10 | up: ## Runs the containers in detached mode 11 | docker-compose up -d --build 12 | 13 | clean: ## Stops and removes all containers 14 | docker-compose down 15 | 16 | logs: ## View the logs from the containers 17 | docker-compose logs -f 18 | 19 | open: ## Opens tabs in container 20 | open http://localhost:8443/ 21 | -------------------------------------------------------------------------------- /docker/code-server/standalone/README.md: -------------------------------------------------------------------------------- 1 | # code-server 2 | 3 | Runs VSCode in a UI 4 | 5 | ## Usage 6 | 7 | Make targets available: 8 | 9 | ```bash 10 | make 11 | 12 | help This help. 13 | up Runs the containers in detached mode 14 | clean Stops and removes all containers 15 | logs View the logs from the containers 16 | open Opens tabs in container 17 | ``` 18 | 19 | Run: 20 | 21 | ```bash 22 | make up 23 | ``` 24 | 25 | Access code-server: 26 | 27 | ```bash 28 | make open 29 | ``` 30 | 31 | Tear down: 32 | 33 | ```bash 34 | make clean 35 | ``` 36 | 37 | 38 | -------------------------------------------------------------------------------- /docker/code-server/standalone/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # More info: https://hub.docker.com/r/linuxserver/code-server 3 | version: "3.9" 4 | services: 5 | code-server: 6 | image: lscr.io/linuxserver/code-server:4.13.0 7 | container_name: code-server 8 | restart: unless-stopped 9 | environment: 10 | - PUID=1000 11 | - PGID=1000 12 | - TZ=Africa/Johannesburg 13 | - PASSWORD=quickstart 14 | - SUDO_PASSWORD=quickstart 15 | - DEFAULT_WORKSPACE=/config/workspace 16 | volumes: 17 | - code-server-volume:/config 18 | ports: 19 | - 8443:8443 20 | logging: 21 | driver: "json-file" 22 | options: 23 | max-size: "1m" 24 | max-file: "1" 25 | 26 | volumes: 27 | code-server-volume: 28 | driver: local 29 | -------------------------------------------------------------------------------- /docker/code-server/with-docker/README.md: -------------------------------------------------------------------------------- 1 | # code-server-with-docker 2 | 3 | This is a CodeServer container with Docker-in-Docker 4 | 5 | ## Usage 6 | 7 | Start the stack: 8 | 9 | ```bash 10 | docker-compose up -d 11 | ``` 12 | 13 | Get the docker binary: 14 | 15 | ```bash 16 | curl -Ls https://download.docker.com/linux/static/stable/x86_64/docker-20.10.24.tgz | tar -xz 17 | sudo mv docker/docker /usr/bin/docker 18 | rm -rf docker 19 | ``` 20 | 21 | At the moment our docker host is set to: 22 | 23 | ```bash 24 | env | grep DOCKER_HOST 25 | DOCKER_HOST=tcp://docker:2376 26 | ``` 27 | 28 | And we can test that with: 29 | 30 | ```bash 31 | docker ps 32 | docker pull nginx 33 | ``` 34 | -------------------------------------------------------------------------------- /docker/code-server/with-docker/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # More info: https://hub.docker.com/r/linuxserver/code-server 3 | version: "3.9" 4 | services: 5 | code-server: 6 | image: lscr.io/linuxserver/code-server:4.13.0 7 | container_name: code-server 8 | restart: unless-stopped 9 | environment: 10 | - PUID=1000 11 | - PGID=1000 12 | - TZ=Africa/Johannesburg 13 | - PASSWORD=quickstart 14 | - SUDO_PASSWORD=quickstart 15 | - DEFAULT_WORKSPACE=/config/workspace 16 | - DOCKER_HOST=tcp://docker:2376 17 | - DOCKER_TLS_CERTDIR=/certs/client 18 | - DOCKER_TLS_VERIFY="1" 19 | - DOCKER_MACHINE_NAME=docker 20 | - DOCKER_CERT_PATH=/certs/client 21 | networks: 22 | - code-server 23 | volumes: 24 | - code-server-volume:/config 25 | - certs:/certs/client 26 | depends_on: 27 | - docker 28 | ports: 29 | - 8443:8443 30 | logging: 31 | driver: "json-file" 32 | options: 33 | max-size: "1m" 34 | max-file: "1" 35 | 36 | docker: 37 | image: docker:20.10-dind 38 | privileged: yes 39 | networks: 40 | - code-server 41 | volumes: 42 | - certs:/certs/client 43 | logging: 44 | driver: "json-file" 45 | options: 46 | max-size: "1m" 47 | max-file: "1" 48 | 49 | networks: 50 | code-server: 51 | name: code-server 52 | 53 | volumes: 54 | code-server-volume: 55 | driver: local 56 | certs: 57 | driver: local 58 | -------------------------------------------------------------------------------- /docker/docker-compose-env-file/README.md: -------------------------------------------------------------------------------- 1 | # docker-compose env-file 2 | 3 | Example on how to use env_file in docker-compose 4 | 5 | ## Use Cases 6 | 7 | There will be 3 scenarios: 8 | 9 | 1. The app en db container shares the same environment variables from `config/.env` 10 | 2. With the extra `environment` section on the app container we can overwrite the value of `APP_DEBUG` from the `config/.env` file 11 | 3. We have an extra `config/.env_extra` which is read by only the app container 12 | 13 | ## Run it 14 | 15 | Run the containers: 16 | 17 | ```bash 18 | docker-compose up -d 19 | ``` 20 | 21 | Run the `env` commands in the container `app` to view its environment variables: 22 | 23 | ```bash 24 | docker exec app env 25 | 26 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 27 | HOSTNAME=b3ac14fa53fc 28 | APP_ENV=test 29 | APP_CONNECTION=remote 30 | APP_DEBUG=false 31 | MYSQL_USER=app 32 | MYSQL_PASSWORD=password 33 | MYSQL_DATABASE=appdb 34 | EXTRAS=foobar 35 | HOME=/root 36 | ``` 37 | 38 | Run the `env` commands in the container `db` to view its environment variables: 39 | 40 | ```bash 41 | docker exec db env 42 | 43 | PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin 44 | HOSTNAME=57b646da8263 45 | APP_ENV=test 46 | APP_CONNECTION=remote 47 | APP_DEBUG=true 48 | MYSQL_USER=app 49 | MYSQL_PASSWORD=password 50 | MYSQL_DATABASE=appdb 51 | HOME=/root 52 | ``` 53 | 54 | Stop the containers: 55 | 56 | ```bash 57 | docker-compose down 58 | ``` 59 | 60 | ## Resources 61 | 62 | - https://docs.docker.com/compose/environment-variables/ -------------------------------------------------------------------------------- /docker/docker-compose-env-file/config/.env: -------------------------------------------------------------------------------- 1 | APP_ENV=test 2 | APP_CONNECTION=remote 3 | APP_DEBUG=true 4 | MYSQL_USER=app 5 | MYSQL_PASSWORD=password 6 | MYSQL_DATABASE=appdb 7 | -------------------------------------------------------------------------------- /docker/docker-compose-env-file/config/.env_extras: -------------------------------------------------------------------------------- 1 | EXTRAS=foobar 2 | -------------------------------------------------------------------------------- /docker/docker-compose-env-file/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | 3 | services: 4 | app: 5 | image: busybox 6 | container_name: app 7 | command: tail -f /dev/null 8 | env_file: 9 | - config/.env 10 | - config/.env_extras 11 | environment: 12 | - APP_DEBUG=false 13 | depends_on: 14 | - db 15 | 16 | db: 17 | image: busybox 18 | container_name: db 19 | command: tail -f /dev/null 20 | env_file: 21 | - config/.env 22 | -------------------------------------------------------------------------------- /docker/docker-compose-extends/Dockerfile: -------------------------------------------------------------------------------- 1 | # base 2 | FROM python:3.8-alpine 3 | 4 | ENV PYTHONUNBUFFERED=1 5 | ENV PYTHONDONTWRITEBYTECODE=1 6 | 7 | # dependencies 8 | #RUN apk --no-cache add gcc make g++ zlib-dev 9 | ADD requirements.txt /deps/requirements.txt 10 | RUN pip install -r /deps/requirements.txt 11 | 12 | # add the code 13 | WORKDIR /src 14 | ADD boot.sh /src/boot.sh 15 | RUN chmod +x /src/boot.sh 16 | 17 | ADD main.py /src/main.py 18 | 19 | # command 20 | CMD ["/src/boot.sh"] 21 | 22 | -------------------------------------------------------------------------------- /docker/docker-compose-extends/README.md: -------------------------------------------------------------------------------- 1 | # Docker Compose Extends Quick-Start 2 | 3 | This quick-start shows how docker-compose's `extends` option enables sharing of common configurations among different files. 4 | 5 | ## Use Case 6 | 7 | There might be a couple of services that follows the same build pattern, or build file, or perhaps just configuration options. 8 | 9 | In this case for one example, we can define how our build context looks like in our `common-services.yml`, eg: 10 | 11 | ```yaml 12 | base: 13 | build: 14 | context: . 15 | dockerfile: base.Dockerfile 16 | ``` 17 | 18 | And in our main `docker-compose.yml` we reference the service and compose file to extend our compose: 19 | 20 | ```yaml 21 | webapp: 22 | container_name: webapp 23 | extends: 24 | file: common-services.yml 25 | service: base 26 | ``` 27 | 28 | And in our main `docker-compose.yml` we can provide overrides or just the config that we want. 29 | 30 | ## Example 31 | 32 | This will deploy a python flask and redis application: 33 | 34 | ```bash 35 | docker-compose --build up -d 36 | ``` 37 | 38 | We can inspect the `webapp` container to see if it has the labels and logging config from the `base` service from the `common-services.yml`: 39 | 40 | ```bash 41 | docker inspect webapp 42 | ``` 43 | 44 | Which returns something like this: 45 | 46 | ```json 47 | [ 48 | { 49 | "Id": "825ea8eeac63c2ee33a334c301984d507d24f7f672be32801ed2d1b623ef4c8b", 50 | "HostConfig": { 51 | "LogConfig": { 52 | "Type": "json-file", 53 | "Config": { 54 | "max-size": "1m" 55 | } 56 | }, 57 | "PortBindings": { 58 | "8000/tcp": [ 59 | { 60 | "HostIp": "", 61 | "HostPort": "8000" 62 | } 63 | ] 64 | }, 65 | }, 66 | "Config": { 67 | "Env": [ 68 | "REDIS_HOST=redis", 69 | "PYTHONDONTWRITEBYTECODE=1" 70 | ], 71 | "Cmd": [ 72 | "/src/boot.sh" 73 | ], 74 | "Labels": { 75 | "com.docker.compose.project": "docker-extends", 76 | "com.docker.compose.project.config_files": "docker-compose.yml", 77 | "com.docker.compose.service": "webapp", 78 | "project.name": "quick-starts" 79 | } 80 | }, 81 | ... 82 | } 83 | } 84 | ] 85 | ``` 86 | 87 | So we can confirm that the config was merged. 88 | 89 | ## Access the Application 90 | 91 | Accessing the application: 92 | 93 | ```bash 94 | curl http://localhost:8000 95 | ``` 96 | 97 | The first request will respond with: 98 | 99 | ``` 100 | This quick-start application has been accessed only 1 time 101 | ``` 102 | 103 | And the second request will respond with: 104 | 105 | ``` 106 | This quick-start application has been accessed 2 times 107 | ``` 108 | 109 | ## Resources 110 | 111 | - https://github.com/Yelp/docker-compose/blob/master/docs/extends.md 112 | -------------------------------------------------------------------------------- /docker/docker-compose-extends/base.Dockerfile: -------------------------------------------------------------------------------- 1 | # base 2 | FROM python:3.8-alpine 3 | 4 | ENV PYTHONUNBUFFERED=1 5 | ENV PYTHONDONTWRITEBYTECODE=1 6 | 7 | # dependencies 8 | #RUN apk --no-cache add gcc make g++ zlib-dev 9 | ADD requirements.txt /deps/requirements.txt 10 | RUN pip install -r /deps/requirements.txt 11 | 12 | # add the code 13 | WORKDIR /src 14 | ADD boot.sh /src/boot.sh 15 | RUN chmod +x /src/boot.sh 16 | 17 | ADD main.py /src/main.py 18 | 19 | # command 20 | #CMD ["/src/boot.sh"] 21 | 22 | -------------------------------------------------------------------------------- /docker/docker-compose-extends/boot.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | gunicorn main:app \ 3 | --workers 2 \ 4 | --threads 2 \ 5 | --bind 0.0.0.0:8000 \ 6 | --capture-output \ 7 | --access-logfile '-' \ 8 | --error-logfile '-' 9 | -------------------------------------------------------------------------------- /docker/docker-compose-extends/common-services.yml: -------------------------------------------------------------------------------- 1 | services: 2 | base: 3 | build: 4 | context: . 5 | dockerfile: base.Dockerfile 6 | labels: 7 | project.name: "quick-starts" 8 | logging: 9 | driver: "json-file" 10 | options: 11 | max-size: "1m" 12 | 13 | logs: 14 | labels: 15 | project.name: "quick-starts" 16 | logging: 17 | driver: "json-file" 18 | options: 19 | max-size: "1m" 20 | 21 | -------------------------------------------------------------------------------- /docker/docker-compose-extends/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | 3 | services: 4 | webapp: 5 | container_name: webapp 6 | extends: 7 | file: common-services.yml 8 | service: base 9 | environment: 10 | - REDIS_HOST=redis 11 | command: /src/boot.sh 12 | ports: 13 | - 8000:8000 14 | depends_on: 15 | - redis 16 | 17 | redis: 18 | image: redis:6 19 | container_name: redis 20 | extends: 21 | file: common-services.yml 22 | service: logs 23 | 24 | -------------------------------------------------------------------------------- /docker/docker-compose-extends/main.py: -------------------------------------------------------------------------------- 1 | from flask import Flask, g, request 2 | from redis import Redis 3 | import datetime 4 | import time 5 | import logging 6 | from logging.config import dictConfig 7 | import os 8 | 9 | dictConfig({ 10 | 'version': 1, 11 | 'formatters': { 12 | 'default': { 13 | 'format': '%(asctime)s.%(msecs)03d, %(levelname)s, %(message)s', 14 | 'datefmt': '%Y-%m-%dT%H:%M:%S' 15 | }, 16 | }, 17 | 'handlers': { 18 | 'stdout': { 19 | 'class': "logging.StreamHandler", 20 | 'stream': 'ext://sys.stdout', 21 | 'formatter': 'default' 22 | } 23 | }, 24 | 'root': { 25 | 'handlers': ['stdout'], 26 | 'level': os.getenv('APP_LOG_LEVEL', 'INFO')}, 27 | }) 28 | 29 | app = Flask(__name__) 30 | redis = Redis(host=os.environ['REDIS_HOST'], port=6379) 31 | 32 | @app.before_request 33 | def start_timer(): 34 | g.start = time.time() 35 | 36 | @app.after_request 37 | def log_request(response): 38 | if ( 39 | request.path == "/favicon.ico" 40 | or request.path.startswith("/static") 41 | ): 42 | return response 43 | 44 | now = time.time() 45 | duration = round(now - g.start, 6) 46 | ip_address = request.headers.get("X-Forwarded-For", request.remote_addr) 47 | host = request.host.split(":", 1)[0] 48 | params = dict(request.args) 49 | request_id = request.headers.get("X-Request-ID", "") 50 | log_params = { 51 | "method": request.method, 52 | "path": request.path, 53 | "status": response.status_code, 54 | "duration": duration, 55 | "ip": ip_address, 56 | "host": host, 57 | "params": params, 58 | "request_id": request_id 59 | } 60 | app.logger.info(log_params) 61 | return response 62 | 63 | @app.route('/', methods=['GET']) 64 | def hello(): 65 | res = redis.incr('hits') 66 | if res == 1: 67 | return f'This quick-start application has been accessed only {res} time\n' 68 | else: 69 | return f'This quick-start application has been accessed {res} times\n' 70 | 71 | if __name__ == "__main__": 72 | app.run(debug=False) 73 | -------------------------------------------------------------------------------- /docker/docker-compose-extends/requirements.txt: -------------------------------------------------------------------------------- 1 | flask<2.0 2 | redis 3 | markupsafe==2.0.1 4 | gunicorn==20.0.4 5 | -------------------------------------------------------------------------------- /docker/ethereum/README.md: -------------------------------------------------------------------------------- 1 | # ethereum quick-start 2 | 3 | This spins up geth and prysm in the sepolia testnet. 4 | 5 | ## Usage 6 | 7 | Create the shared secret: 8 | 9 | ```bash 10 | openssl rand -hex 32 | tr -d "\n" > "jwt.hex" 11 | ``` 12 | 13 | Create the wallet password for the validator: 14 | 15 | ```bash 16 | mkdir -p data/prysm-wallet 17 | openssl rand -hex 32 | tr -d "\n" > data/prysm-wallet/password.txt 18 | ``` 19 | 20 | Create the wallet: 21 | 22 | ```bash 23 | docker run -it -v $PWD/data/prysm-wallet:/wallet gcr.io/prysmaticlabs/prysm/validator:stable \ 24 | wallet create --wallet-dir=/wallet --keymanager-kind=derived \ 25 | --wallet-password-file=/wallet/password.txt \ 26 | --accept-terms-of-use --sepolia 27 | ``` 28 | 29 | Start the containers: 30 | 31 | ```bash 32 | docker-compose up -d 33 | ``` 34 | 35 | View the logs: 36 | 37 | ```bash 38 | docker-compose logs -f 39 | ``` 40 | 41 | View the sync progress: 42 | 43 | ```bash 44 | curl -s -XPOST -H 'Content-Type: application/json' --data '{"jsonrpc":"2.0","method":"eth_syncing","params":[],"id":1}' http://127.0.0.1:8545 45 | ``` 46 | 47 | View the current block number: 48 | 49 | ```bash 50 | curl -s -XPOST -H "Content-type: application/json" -d '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' localhost:8545 | jq -r '.result' | tr -d '\n' | xargs -0 printf "%d" 51 | ``` 52 | 53 | ## Resources 54 | 55 | - [Go-Eth](https://geth.ethereum.org/docs) 56 | - [Prysm](https://docs.prylabs.network/docs/getting-started) 57 | - [Ethereum JSON-RPC API](https://ethereum.org/en/developers/docs/apis/json-rpc) 58 | -------------------------------------------------------------------------------- /docker/ethereum/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | geth: 5 | image: ethereum/client-go:v1.11.6 6 | container_name: geth 7 | restart: always 8 | stop_grace_period: 1m 9 | ports: 10 | - 8545:8545 11 | - 8546:8546 12 | - 9191:9191 13 | - 30303:30303 14 | - 30304:30304 15 | volumes: 16 | - ./data/geth:/data 17 | - ./jwt.hex:/jwt/jwt.hex:ro 18 | command: > 19 | --sepolia 20 | --syncmode light 21 | --datadir=/data 22 | --http 23 | --http.api eth,net,web3,txpool,engine,admin 24 | --http.addr 0.0.0.0 25 | --http.port 8545 26 | --http.corsdomain '*' 27 | --ws 28 | --ws.api eth,web3,net 29 | --authrpc.addr 0.0.0.0 30 | --authrpc.port 8551 31 | --authrpc.jwtsecret /jwt/jwt.hex 32 | --authrpc.vhosts '*' 33 | --metrics 34 | --metrics.addr=0.0.0.0 35 | --metrics.port=9191 36 | networks: 37 | - ethnet 38 | 39 | prysm-beacon: 40 | image: gcr.io/prysmaticlabs/prysm/beacon-chain:stable 41 | container_name: prysm-beacon 42 | restart: always 43 | volumes: 44 | - ./data/prysm-beacon:/data 45 | - ./jwt.hex:/jwt/jwt.hex:ro 46 | command: > 47 | --sepolia 48 | --datadir=/data 49 | --rpc-host=0.0.0.0 50 | --execution-endpoint=http://geth:8551 51 | --checkpoint-sync-url=https://checkpoint-sync.sepolia.ethpandaops.io 52 | --genesis-beacon-api-url=https://checkpoint-sync.sepolia.ethpandaops.io 53 | --jwt-secret=/jwt/jwt.hex 54 | --genesis-state=/genesis.ssz 55 | --suggested-fee-recipient=0x9A675eDF85532D9f3924Ff18251c5fEcB7398AC7 56 | --accept-terms-of-use 57 | networks: 58 | - ethnet 59 | 60 | prysm-validator: 61 | image: gcr.io/prysmaticlabs/prysm/validator:stable 62 | container_name: prysm-validator 63 | restart: unless-stopped 64 | depends_on: 65 | - prysm-beacon 66 | ports: 67 | - 4242:4242 68 | volumes: 69 | - ./data/prysm-wallet:/wallet 70 | - ./data/prysm-validator:/validatorDB 71 | command: 72 | - --beacon-rpc-provider=prysm-beacon:4000 73 | - --monitoring-host=0.0.0.0 74 | - --accept-terms-of-use 75 | - --suggested-fee-recipient=0x9A675eDF85532D9f3924Ff18251c5fEcB7398AC7 76 | - --wallet-dir=/wallet 77 | - --wallet-password-file=/wallet/password.txt 78 | - --datadir=/validatorDB 79 | - --sepolia 80 | networks: 81 | - ethnet 82 | 83 | networks: 84 | ethnet: 85 | name: ethnet 86 | -------------------------------------------------------------------------------- /docker/flatnotes/Makefile: -------------------------------------------------------------------------------- 1 | # Thanks: https://gist.github.com/mpneuried/0594963ad38e68917ef189b4e6a269db 2 | .PHONY: help 3 | 4 | help: ## This help. 5 | @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) 6 | 7 | .DEFAULT_GOAL := help 8 | 9 | # DOCKER TASKS 10 | up: ## Runs the containers in detached mode 11 | docker-compose up -d --build 12 | 13 | clean: ## Stops and removes all containers 14 | docker-compose down 15 | 16 | logs: ## View the logs from the containers 17 | docker-compose logs -f 18 | 19 | open: ## Opens tabs in container 20 | open http://localhost:8080/ 21 | -------------------------------------------------------------------------------- /docker/flatnotes/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | flatnotes: 5 | image: dullage/flatnotes:v3 6 | container_name: flatnotes 7 | restart: unless-stopped 8 | environment: 9 | PUID: 1000 10 | GUID: 1000 11 | FLATNOTES_AUTH_TYPE: "password" 12 | FLATNOTES_USERNAME: "admin" 13 | FLATNOTES_PASSWORD: "password" 14 | FLATNOTES_SECRET_KEY: "FmbqYkE3R1Sq8SDNj14dGCGL+R+PQjeC" 15 | volumes: 16 | - flatnotes-data-volume:/data 17 | - flatnotes-index-volume:/data/.flatnotes 18 | ports: 19 | - 8080:8080 20 | logging: 21 | driver: "json-file" 22 | options: 23 | max-size: "1m" 24 | max-file: "1" 25 | 26 | volumes: 27 | flatnotes-data-volume: 28 | driver: local 29 | flatnotes-index-volume: 30 | driver: local 31 | -------------------------------------------------------------------------------- /docker/gitea/README.md: -------------------------------------------------------------------------------- 1 | # gitea-quick-start 2 | 3 | This is a quick-start on docker for gitea and gitea actions. 4 | 5 | ## Usage 6 | 7 | Boot the stack: 8 | 9 | ```bash 10 | docker compose up -d 11 | ``` 12 | 13 | Generate a runner token: 14 | 15 | ```bash 16 | docker exec -it --user git gitea gitea --config /data/gitea/conf/app.ini actions generate-runner-token 17 | ``` 18 | 19 | Then you can define the value as an environment variable `GITEA_RUNNER_REGISTRATION_TOKEN` for your gitea-runner container. 20 | 21 | From [this](https://docs.gitea.com/usage/actions/quickstart#use-actions) page they show that you need to enable your repository actions: 22 | 23 | - Settings 24 | - Repository 25 | - Advanced Settings 26 | - Then enable "Actions: Enable Repository Actions" 27 | 28 | Commit a workflow yaml file `.gitea/workflows/default.yml`: 29 | 30 | ```yaml 31 | name: quickstart gitea actions 32 | run-name: ${{ gitea.actor }} is testing out actions 33 | on: [push] 34 | jobs: 35 | quickstart-actions: 36 | container: 37 | image: alpine:latest 38 | runs-on: quickstart 39 | steps: 40 | - name: checks out repository 41 | uses: actions/checkout@v4 42 | - name: list files in the repository 43 | run: | 44 | ls ${{ github.workspace }} 45 | ``` 46 | 47 | Screenshot: 48 | 49 | ![image](https://github.com/ruanbekker/quick-starts/assets/567298/b7c6f34d-27fe-4619-81fd-dfbf8e11e53c) 50 | 51 | 52 | ## Resources 53 | 54 | - [gitea actions quickstart](https://docs.gitea.com/usage/actions/quickstart) 55 | - [gitea config - actions](https://docs.gitea.com/administration/config-cheat-sheet#actions-actions) 56 | - [gitea actions - act runner](https://docs.gitea.com/usage/actions/act-runner) 57 | - [gitea actions kubernetes example](https://gitea.com/gitea/helm-chart/issues/459#issuecomment-750723) 58 | 59 | 60 | -------------------------------------------------------------------------------- /docker/gitea/configs/gitea-runner/config.yaml: -------------------------------------------------------------------------------- 1 | # Example configuration file, it's safe to copy this as the default config file without any modification. 2 | 3 | # You don't have to copy this file to your instance, 4 | # just run `./act_runner generate-config > config.yaml` to generate a config file. 5 | 6 | log: 7 | # The level of logging, can be trace, debug, info, warn, error, fatal 8 | level: info 9 | 10 | runner: 11 | # Where to store the registration result. 12 | file: .runner 13 | # Execute how many tasks concurrently at the same time. 14 | capacity: 1 15 | # Extra environment variables to run jobs. 16 | envs: 17 | A_TEST_ENV_NAME_1: a_test_env_value_1 18 | A_TEST_ENV_NAME_2: a_test_env_value_2 19 | # Extra environment variables to run jobs from a file. 20 | # It will be ignored if it's empty or the file doesn't exist. 21 | env_file: .env 22 | # The timeout for a job to be finished. 23 | # Please note that the Gitea instance also has a timeout (3h by default) for the job. 24 | # So the job could be stopped by the Gitea instance if it's timeout is shorter than this. 25 | timeout: 3h 26 | # Whether skip verifying the TLS certificate of the Gitea instance. 27 | insecure: false 28 | # The timeout for fetching the job from the Gitea instance. 29 | fetch_timeout: 5s 30 | # The interval for fetching the job from the Gitea instance. 31 | fetch_interval: 2s 32 | # The labels of a runner are used to determine which jobs the runner can run, and how to run them. 33 | # Like: ["macos-arm64:host", "ubuntu-latest:docker://node:16-bullseye", "ubuntu-22.04:docker://node:16-bullseye"] 34 | # If it's empty when registering, it will ask for inputting labels. 35 | # If it's empty when execute `deamon`, will use labels in `.runner` file. 36 | labels: ['quickstart'] 37 | 38 | cache: 39 | # Enable cache server to use actions/cache. 40 | enabled: true 41 | # The directory to store the cache data. 42 | # If it's empty, the cache data will be stored in $HOME/.cache/actcache. 43 | dir: "" 44 | # The host of the cache server. 45 | # It's not for the address to listen, but the address to connect from job containers. 46 | # So 0.0.0.0 is a bad choice, leave it empty to detect automatically. 47 | host: "" 48 | # The port of the cache server. 49 | # 0 means to use a random available port. 50 | port: 0 51 | # The external cache server URL. Valid only when enable is true. 52 | # If it's specified, act_runner will use this URL as the ACTIONS_CACHE_URL rather than start a server by itself. 53 | # The URL should generally end with "/". 54 | external_server: "" 55 | 56 | container: 57 | # Specifies the network to which the container will connect. 58 | # Could be host, bridge or the name of a custom network. 59 | # If it's empty, act_runner will create a network automatically. 60 | network: "" 61 | # Whether to use privileged mode or not when launching task containers (privileged mode is required for Docker-in-Docker). 62 | privileged: false 63 | # And other options to be used when the container is started (eg, --add-host=my.gitea.url:host-gateway). 64 | options: 65 | # The parent directory of a job's working directory. 66 | # If it's empty, /workspace will be used. 67 | workdir_parent: 68 | # Volumes (including bind mounts) can be mounted to containers. Glob syntax is supported, see https://github.com/gobwas/glob 69 | # You can specify multiple volumes. If the sequence is empty, no volumes can be mounted. 70 | # For example, if you only allow containers to mount the `data` volume and all the json files in `/src`, you should change the config to: 71 | # valid_volumes: 72 | # - data 73 | # - /src/*.json 74 | # If you want to allow any volume, please use the following configuration: 75 | # valid_volumes: 76 | # - '**' 77 | valid_volumes: [] 78 | # overrides the docker client host with the specified one. 79 | # If it's empty, act_runner will find an available docker host automatically. 80 | # If it's "-", act_runner will find an available docker host automatically, but the docker host won't be mounted to the job containers and service containers. 81 | # If it's not empty or "-", the specified docker host will be used. An error will be returned if it doesn't work. 82 | docker_host: "" 83 | # Pull docker image(s) even if already present 84 | force_pull: false 85 | 86 | host: 87 | # The parent directory of a job's working directory. 88 | # If it's empty, $HOME/.cache/act/ will be used. 89 | workdir_parent: 90 | -------------------------------------------------------------------------------- /docker/gitea/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | gitea: 5 | image: gitea/gitea:1.21.1 6 | container_name: gitea 7 | restart: unless-stopped 8 | environment: 9 | - USER_UID=1000 10 | - USER_GID=1000 11 | - GITEA__database__DB_TYPE=postgres 12 | - GITEA__database__HOST=gitea-db:5432 13 | - GITEA__database__NAME=gitea 14 | - GITEA__database__USER=gitea 15 | - GITEA__database__PASSWD=gitea 16 | - GITEA__mailer__ENABLED=true 17 | - GITEA__mailer__FROM=gitea@localhost 18 | - GITEA__mailer__PROTOCOL=smtp 19 | - GITEA__mailer__SMTP_ADDR=gitea-mail:1025 20 | - GITEA__mailer__USER= 21 | - GITEA__mailer__PASSWD= 22 | # docker run -it --rm gitea/gitea:1 gitea generate secret SECRET_KEY 23 | - GITEA__security__SECRET_KEY=replace-me 24 | # docker run -it --rm gitea/gitea:1 gitea generate secret INTERNAL_TOKEN 25 | - GITEA__security__INTERNAL_TOKEN=replace-me 26 | - GITEA__actions__enabled=true 27 | - GITEA__server__local_root_url=http://gitea:3000 28 | # https://docs.gitea.com/installation/install-with-docker 29 | networks: 30 | - quick-starts 31 | depends_on: 32 | gitea-db: 33 | condition: service_healthy 34 | gitea-mail: 35 | condition: service_started 36 | volumes: 37 | - gitea-data-volume:/data 38 | - /etc/timezone:/etc/timezone:ro 39 | - /etc/localtime:/etc/localtime:ro 40 | ports: 41 | - 3000:3000 42 | - 222:22 43 | logging: 44 | driver: "json-file" 45 | options: 46 | max-size: "1m" 47 | max-file: "1" 48 | 49 | gitea-runner: 50 | image: gitea/act_runner:nightly 51 | container_name: gitea-runner 52 | restart: unless-stopped 53 | environment: 54 | # docker run --entrypoint="" --rm -it gitea/act_runner:latest act_runner generate-config > configs/gitea-runner/config.yaml 55 | CONFIG_FILE: /config.yaml 56 | # gitea --config /etc/gitea/app.ini actions generate-runner-token 57 | # docker exec -it --user git gitea gitea --config /data/gitea/conf/app.ini actions generate-runner-token 58 | GITEA_RUNNER_REGISTRATION_TOKEN: "" 59 | GITEA_RUNNER_NAME: "default-runner" 60 | GITEA_RUNNER_LABELS: "quickstart" 61 | GITEA_INSTANCE_URL: "http://gitea:3000" 62 | networks: 63 | - quick-starts 64 | volumes: 65 | - ./configs/gitea-runner/config.yaml:/config.yaml 66 | - gitea-runner-data-volume:/data 67 | - /var/run/docker.sock:/var/run/docker.sock 68 | logging: 69 | driver: "json-file" 70 | options: 71 | max-size: "1m" 72 | max-file: "1" 73 | 74 | gitea-db: 75 | image: postgres:14 76 | container_name: gitea-db 77 | restart: unless-stopped 78 | environment: 79 | - POSTGRES_USER=gitea 80 | - POSTGRES_PASSWORD=gitea 81 | - POSTGRES_DB=gitea 82 | networks: 83 | - quick-starts 84 | volumes: 85 | - gitea-db-data-volume:/var/lib/postgresql/data 86 | healthcheck: 87 | test: ["CMD-SHELL", "pg_isready -U gitea"] 88 | interval: 10s 89 | timeout: 5s 90 | retries: 5 91 | logging: 92 | driver: "json-file" 93 | options: 94 | max-size: "1m" 95 | max-file: "1" 96 | 97 | gitea-mail: 98 | image: mailhog/mailhog:v1.0.1 99 | container_name: gitea-mail 100 | ports: 101 | - 1025:1025 # SMTP 102 | - 8025:8025 # UI 103 | networks: 104 | - quick-starts 105 | logging: 106 | driver: "json-file" 107 | options: 108 | max-size: "1m" 109 | max-file: "1" 110 | 111 | volumes: 112 | gitea-data-volume: 113 | driver: local 114 | gitea-runner-data-volume: 115 | driver: local 116 | gitea-db-data-volume: 117 | driver: local 118 | 119 | networks: 120 | quick-starts: 121 | name: quick-starts 122 | -------------------------------------------------------------------------------- /docker/kafka-cluster-metrics/README.md: -------------------------------------------------------------------------------- 1 | # kafka-metrics in grafana 2 | 3 | This will boot up a 3 node kafka cluster with grafana, prometheus and the kafka-exporter 4 | 5 | ## Quick Start 6 | 7 | Boot the stack: 8 | 9 | ```bash 10 | docker-compose up --build -d 11 | ``` 12 | 13 | Tail the producer logs: 14 | 15 | ```bash 16 | docker logs -f producer 17 | ``` 18 | 19 | Tail the consumer logs: 20 | 21 | ```bash 22 | docker logs -f consumer 23 | ``` 24 | 25 | Access Grafana: 26 | - http://localhost:3000 27 | -------------------------------------------------------------------------------- /docker/kafka-cluster-metrics/configs/grafana/provisioning/dashboards.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | providers: 4 | - name: 'dashboards' 5 | orgId: 1 6 | type: file 7 | disableDeletion: false 8 | updateIntervalSeconds: 60 9 | allowUiUpdates: false 10 | options: 11 | path: /var/lib/grafana/dashboards 12 | foldersFromFilesStructure: true 13 | -------------------------------------------------------------------------------- /docker/kafka-cluster-metrics/configs/grafana/provisioning/datasources.yml: -------------------------------------------------------------------------------- 1 | apiVersion: 1 2 | 3 | datasources: 4 | - name: Prometheus 5 | type: prometheus 6 | access: proxy 7 | orgId: 1 8 | url: http://prometheus:9090 9 | version: 1 10 | editable: true 11 | isDefault: false 12 | -------------------------------------------------------------------------------- /docker/kafka-cluster-metrics/configs/prometheus/prometheus.yml: -------------------------------------------------------------------------------- 1 | global: 2 | scrape_interval: 5s 3 | external_labels: 4 | namespace: local 5 | 6 | scrape_configs: 7 | - job_name: prometheus 8 | static_configs: 9 | - targets: [ 'localhost:9090' ] 10 | labels: 11 | container: 'prometheus' 12 | 13 | - job_name: kafka-exporter 14 | static_configs: 15 | - targets: [ 'kafka-exporter:9308' ] 16 | labels: 17 | container: 'kafka-exporter' 18 | 19 | -------------------------------------------------------------------------------- /docker/kafka-cluster-metrics/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3.9" 3 | 4 | x-logging: 5 | &default-logging 6 | driver: "json-file" 7 | options: 8 | max-size: "1m" 9 | max-file: "1" 10 | 11 | services: 12 | zookeeper: 13 | platform: linux/amd64 14 | image: confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-7.4.0} 15 | container_name: zookeeper 16 | restart: unless-stopped 17 | ports: 18 | - '32181:32181' 19 | - '2888:2888' 20 | - '3888:3888' 21 | environment: 22 | ZOOKEEPER_SERVER_ID: 1 23 | ZOOKEEPER_CLIENT_PORT: 32181 24 | ZOOKEEPER_TICK_TIME: 2000 25 | ZOOKEEPER_INIT_LIMIT: 5 26 | ZOOKEEPER_SYNC_LIMIT: 2 27 | ZOOKEEPER_SERVERS: zookeeper:2888:3888 28 | healthcheck: 29 | test: echo stat | nc localhost 32181 30 | interval: 10s 31 | timeout: 10s 32 | retries: 3 33 | networks: 34 | - kafka 35 | logging: *default-logging 36 | 37 | broker-1: 38 | platform: linux/amd64 39 | image: confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-7.4.0} 40 | container_name: broker-1 41 | restart: unless-stopped 42 | ports: 43 | - '9091:9091' 44 | depends_on: 45 | - zookeeper 46 | environment: 47 | KAFKA_BROKER_ID: 1 48 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:32181 49 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT 50 | KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL 51 | KAFKA_ADVERTISED_LISTENERS: INTERNAL://broker-1:29091,EXTERNAL://localhost:9091 52 | KAFKA_DEFAULT_REPLICATION_FACTOR: 1 53 | KAFKA_NUM_PARTITIONS: 3 54 | #KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 55 | #KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 56 | #KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 57 | #KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 58 | KAFKA_JMX_PORT: 19101 59 | KAFKA_JMX_HOSTNAME: localhost 60 | healthcheck: 61 | test: nc -vz localhost 9091 62 | interval: 10s 63 | timeout: 10s 64 | retries: 3 65 | networks: 66 | - kafka 67 | logging: *default-logging 68 | 69 | broker-2: 70 | platform: linux/amd64 71 | image: confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-7.4.0} 72 | container_name: broker-2 73 | restart: unless-stopped 74 | ports: 75 | - '9092:9092' 76 | depends_on: 77 | - zookeeper 78 | environment: 79 | KAFKA_BROKER_ID: 2 80 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:32181 81 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT 82 | KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL 83 | KAFKA_ADVERTISED_LISTENERS: INTERNAL://broker-2:29092,EXTERNAL://localhost:9092 84 | KAFKA_DEFAULT_REPLICATION_FACTOR: 1 85 | KAFKA_NUM_PARTITIONS: 3 86 | #KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 87 | #KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 88 | #KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 89 | #KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 90 | KAFKA_JMX_PORT: 19102 91 | KAFKA_JMX_HOSTNAME: localhost 92 | healthcheck: 93 | test: nc -vz localhost 9092 94 | interval: 10s 95 | timeout: 10s 96 | retries: 3 97 | networks: 98 | - kafka 99 | logging: *default-logging 100 | 101 | broker-3: 102 | platform: linux/amd64 103 | image: confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-7.4.0} 104 | container_name: broker-3 105 | restart: unless-stopped 106 | ports: 107 | - '9093:9093' 108 | depends_on: 109 | - zookeeper 110 | environment: 111 | KAFKA_BROKER_ID: 3 112 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:32181 113 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT 114 | KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL 115 | KAFKA_ADVERTISED_LISTENERS: INTERNAL://broker-3:29093,EXTERNAL://localhost:9093 116 | KAFKA_DEFAULT_REPLICATION_FACTOR: 1 117 | KAFKA_NUM_PARTITIONS: 3 118 | #KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 119 | #KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 120 | #KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 121 | #KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 122 | KAFKA_JMX_PORT: 19103 123 | KAFKA_JMX_HOSTNAME: localhost 124 | healthcheck: 125 | test: nc -vz localhost 9093 126 | interval: 10s 127 | timeout: 10s 128 | retries: 3 129 | networks: 130 | - kafka 131 | logging: *default-logging 132 | 133 | producer: 134 | platform: linux/amd64 135 | container_name: producer 136 | build: ./python-client/ 137 | restart: always 138 | environment: 139 | - ACTION=producer 140 | - BOOTSTRAP_SERVERS=broker-1:29091,broker-2:29092,broker-3:29093 141 | - TOPIC=my-topic-2 142 | - PYTHONUNBUFFERED=1 # https://github.com/docker/compose/issues/4837#issuecomment-302765592 143 | networks: 144 | - kafka 145 | depends_on: 146 | - zookeeper 147 | - broker-1 148 | - broker-2 149 | - broker-3 150 | logging: *default-logging 151 | 152 | consumer: 153 | platform: linux/amd64 154 | container_name: consumer 155 | build: ./python-client/ 156 | restart: always 157 | environment: 158 | - ACTION=consumer 159 | - BOOTSTRAP_SERVERS=broker-1:29091,broker-2:29092,broker-3:29093 160 | - TOPIC=my-topic-2 161 | - CONSUMER_GROUP=cg-group-id 162 | - PYTHONUNBUFFERED=1 # https://github.com/docker/compose/issues/4837#issuecomment-302765592 163 | networks: 164 | - kafka 165 | depends_on: 166 | - zookeeper 167 | - broker-1 168 | - broker-2 169 | - broker-3 170 | - producer 171 | logging: *default-logging 172 | 173 | prometheus: 174 | image: prom/prometheus:${PROMETHEUS_VERSION:-v2.42.0} 175 | container_name: prometheus 176 | restart: unless-stopped 177 | command: 178 | - '--config.file=/etc/prometheus/prometheus.yml' 179 | - '--log.level=error' 180 | - '--storage.tsdb.path=/prometheus' 181 | - '--storage.tsdb.retention.time=7d' 182 | - '--web.console.libraries=/usr/share/prometheus/console_libraries' 183 | - '--web.console.templates=/usr/share/prometheus/consoles' 184 | - '--web.external-url=http://localhost:9090' 185 | volumes: 186 | - ./configs/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml 187 | ports: 188 | - 9090:9090 189 | networks: 190 | - kafka 191 | logging: *default-logging 192 | 193 | grafana: 194 | image: grafana/grafana:${GRAFANA_VERSION:-9.4.3} 195 | container_name: grafana 196 | restart: unless-stopped 197 | environment: 198 | - GF_AUTH_ANONYMOUS_ENABLED=true 199 | - GF_AUTH_ANONYMOUS_ORG_ROLE=Admin 200 | - GF_USERS_DEFAULT_THEME=light 201 | - GF_LOG_MODE=console 202 | - GF_LOG_LEVEL=critical 203 | - GF_PANELS_ENABLE_ALPHA=true 204 | volumes: 205 | - ./configs/grafana/provisioning/dashboards.yml:/etc/grafana/provisioning/dashboards/provisioning-dashboards.yaml:ro 206 | - ./configs/grafana/provisioning/datasources.yml:/etc/grafana/provisioning/datasources/provisioning-datasources.yaml:ro 207 | - ./configs/grafana/dashboards/kafka-metrics.json:/var/lib/grafana/dashboards/kafka-metrics.json:ro 208 | depends_on: 209 | - prometheus 210 | ports: 211 | - 3000:3000 212 | networks: 213 | - kafka 214 | logging: *default-logging 215 | 216 | kafka-exporter: 217 | image: danielqsj/kafka-exporter:${EXPORTER_VERSION:-latest} 218 | container_name: kafka-exporter 219 | restart: unless-stopped 220 | depends_on: 221 | - broker-1 222 | - broker-2 223 | - broker-3 224 | ports: 225 | - 9308:9308 226 | command: 227 | - '--kafka.server=broker-1:29091' 228 | - '--kafka.server=broker-2:29092' 229 | - '--kafka.server=broker-3:29093' 230 | - '--zookeeper.server=zookeeper:32181' 231 | networks: 232 | - kafka 233 | logging: *default-logging 234 | 235 | networks: 236 | kafka: 237 | name: kafka 238 | -------------------------------------------------------------------------------- /docker/kafka-cluster-metrics/python-client/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM --platform=amd64 python:3.8-alpine 2 | RUN pip install kafka-python Faker 3 | ENV ACTION produce 4 | ENV BOOTSTRAP_SERVERS "broker-1:29091,broker-2:29092,broker-3:29093" 5 | ADD consume.py /src/consume.py 6 | ADD produce.py /src/produce.py 7 | ADD run.sh /src/run.sh 8 | CMD ["sh", "/src/run.sh", "$ACTION"] 9 | -------------------------------------------------------------------------------- /docker/kafka-cluster-metrics/python-client/consume.py: -------------------------------------------------------------------------------- 1 | # https://kafka-python.readthedocs.io/en/master/#kafkaproducer 2 | import json 3 | import os 4 | import time 5 | from datetime import datetime 6 | from kafka import KafkaConsumer 7 | 8 | TOPIC = os.environ.get('TOPIC', 'foobar') 9 | CONSUMER_GROUP = os.environ.get('CONSUMER_GROUP', 'cg-group-id') 10 | BOOTSTRAP_SERVERS = os.environ.get('BOOTSTRAP_SERVERS', 'localhost:9091,localhost:9092,localhost:9093').split(',') 11 | 12 | print('start') 13 | 14 | def setup_consumer(): 15 | try: 16 | consumer = KafkaConsumer( 17 | TOPIC, 18 | bootstrap_servers = BOOTSTRAP_SERVERS, 19 | auto_offset_reset = 'latest', # 'earliest' # https://stackoverflow.com/a/64829426 | https://github.com/confluentinc/confluent-kafka-python/issues/137 20 | enable_auto_commit = True, 21 | group_id = CONSUMER_GROUP, 22 | value_deserializer = lambda x : json.loads(x.decode('utf-8')) 23 | ) 24 | return consumer 25 | 26 | except Exception as e: 27 | if e == 'NoBrokersAvailable': 28 | print('waiting for brokers to become available') 29 | return 'not-ready' 30 | 31 | def time_delta(received_time): 32 | now = datetime.now().strftime("%s") 33 | return int(now) - received_time 34 | 35 | print('starting consumer, checks if brokers are availabe') 36 | consumer='not-ready' 37 | 38 | while consumer == 'not-ready': 39 | print('brokers not availbe yet') 40 | time.sleep(5) 41 | consumer = setup_consumer() 42 | 43 | print('brokers are available and ready to consume messages') 44 | 45 | for message in consumer: 46 | try: 47 | print(message.value) 48 | #print(f"Received message at: {message.timestamp}") 49 | #now = datetime.now().strftime("%s") 50 | #print(f"Current timestamp {now}") 51 | except Exception as e: 52 | print('exception ocurred in consumption') 53 | print(e) 54 | 55 | # Close the consumer 56 | print('closing consumer') 57 | consumer.close() 58 | 59 | -------------------------------------------------------------------------------- /docker/kafka-cluster-metrics/python-client/produce.py: -------------------------------------------------------------------------------- 1 | # https://kafka-python.readthedocs.io/en/master/#kafkaproducer 2 | import json 3 | import uuid 4 | import os 5 | import time 6 | import random 7 | from faker import Faker 8 | from kafka import KafkaProducer 9 | 10 | fake = Faker() 11 | 12 | TOPIC = os.environ.get('TOPIC', 'foobar') 13 | BOOTSTRAP_SERVERS = os.environ.get('BOOTSTRAP_SERVERS', 'localhost:9091,localhost:9092,localhost:9093').split(',') 14 | 15 | def create_transaction(counter): 16 | message = { 17 | 'sequence_id': counter, 18 | 'user_id': str(fake.random_int(min=20000, max=100000)), 19 | 'transaction_id': str(uuid.uuid4()), 20 | 'product_id': str(uuid.uuid4().fields[-1])[:5], 21 | 'address': str(fake.street_address() + ' | ' + fake.city() + ' | ' + fake.country_code()), 22 | 'signup_at': str(fake.date_time_this_month()), 23 | 'platform_id': str(random.choice(['Mobile', 'Laptop', 'Tablet'])), 24 | 'message': 'transaction made by userid {}'.format(str(uuid.uuid4().fields[-1])) 25 | } 26 | return message 27 | 28 | def setup_producer(): 29 | try: 30 | producer = KafkaProducer( 31 | bootstrap_servers=BOOTSTRAP_SERVERS, 32 | value_serializer=lambda v: json.dumps(v).encode('utf-8') 33 | ) 34 | return producer 35 | except Exception as e: 36 | if e == 'NoBrokersAvailable': 37 | print('waiting for brokers to become available') 38 | return 'not-ready' 39 | 40 | print('setting up producer, checking if brokers are available') 41 | producer='not-ready' 42 | 43 | while producer == 'not-ready': 44 | print('brokers not available yet') 45 | time.sleep(5) 46 | producer = setup_producer() 47 | 48 | print('brokers are available and ready to produce messages') 49 | counter = 0 50 | 51 | while True: 52 | counter = counter + 1 53 | json_message = create_transaction(counter) 54 | producer.send(TOPIC, json_message) 55 | print('message sent to kafka with squence id of {}'.format(counter)) 56 | time.sleep(2) 57 | 58 | producer.close() -------------------------------------------------------------------------------- /docker/kafka-cluster-metrics/python-client/requirements.txt: -------------------------------------------------------------------------------- 1 | kafka-python 2 | Faker -------------------------------------------------------------------------------- /docker/kafka-cluster-metrics/python-client/run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -x 3 | 4 | if [ "$ACTION" == "producer" ] 5 | then 6 | echo "starting $ACTION" 7 | env | grep BOOTSTRAP 8 | python3 /src/produce.py 9 | fi 10 | 11 | if [ "$ACTION" == "consumer" ] 12 | then 13 | echo "starting $ACTION" 14 | env | grep BOOTSTRAP 15 | python3 /src/consume.py 16 | fi 17 | 18 | if [ "$ACTION" == "shell" ] 19 | then 20 | sleep 10000000 21 | fi 22 | -------------------------------------------------------------------------------- /docker/kafka-single-node/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | 3 | services: 4 | zookeeper: 5 | image: confluentinc/cp-zookeeper:7.3.0 6 | hostname: zookeeper 7 | container_name: zookeeper 8 | environment: 9 | ZOOKEEPER_SERVER_ID: 1 10 | ZOOKEEPER_CLIENT_PORT: 2181 11 | ZOOKEEPER_TICK_TIME: 2000 12 | networks: 13 | - kafka-network 14 | logging: 15 | driver: "json-file" 16 | options: 17 | max-size: "1m" 18 | max-file: "1" 19 | 20 | kafka-broker: 21 | image: confluentinc/cp-kafka:7.3.0 22 | hostname: kafka-broker 23 | container_name: kafka-broker 24 | ports: 25 | - 29092:29092 26 | - 9092:9092 27 | - 9101:9101 28 | depends_on: 29 | - zookeeper 30 | environment: 31 | KAFKA_BROKER_ID: 1 32 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181 33 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT 34 | KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL 35 | KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka-broker:29092,EXTERNAL://localhost:9092 # internal (kafka-broker:29092) external (localhost:9092) 36 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 37 | KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 38 | KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 39 | KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 40 | KAFKA_JMX_PORT: 9101 41 | KAFKA_JMX_HOSTNAME: localhost 42 | networks: 43 | - kafka-network 44 | healthcheck: 45 | test: nc -vz localhost 9092 46 | interval: 10s 47 | timeout: 10s 48 | retries: 3 49 | logging: 50 | driver: "json-file" 51 | options: 52 | max-size: "1m" 53 | max-file: "1" 54 | 55 | networks: 56 | kafka-network: 57 | name: kafka-network 58 | -------------------------------------------------------------------------------- /docker/kafka-three-node-cluster-kraft/README.md: -------------------------------------------------------------------------------- 1 | # kafka-three-node-cluster-kraft 2 | 3 | Kafka cluster using docker compose with KRaft mode (Kafka Raft metadata mode), which eliminates the need for zookeeper. 4 | 5 | ## Usage 6 | 7 | Boot the cluster with: 8 | 9 | ```bash 10 | docker-compose up -d 11 | ``` 12 | 13 | Run a client container: 14 | 15 | ```bash 16 | docker run -it --network=kafka -e BOOTSTRAP_SERVERS="kafka1:9092,kafka2:9092,kafka3:9092" --entrypoint="" confluentinc/cp-kafka:7.4.3 bash 17 | ``` 18 | 19 | Create a topic named `foobar`: 20 | 21 | ```bash 22 | kafka-topics --bootstrap-server "$BOOTSTRAP_SERVERS" --create --topic foobar --partitions 1 --replication-factor 3 --if-not-exists 23 | # Created topic foobar. 24 | ``` 25 | 26 | List topics: 27 | 28 | ```bash 29 | kafka-topics --list --bootstrap-server "$BOOTSTRAP_SERVERS" 30 | # foobar 31 | ``` 32 | 33 | Describe the topic named `foobar`: 34 | 35 | ```bash 36 | kafka-topics --describe --bootstrap-server "$BOOTSTRAP_SERVERS" --topic foobar 37 | # Topic: foobar TopicId: V5vltO3WTZi2p1vyUY7mIw PartitionCount: 1 ReplicationFactor: 3 Configs: 38 | # Topic: foobar Partition: 0 Leader: 1 Replicas: 1,2,3 Isr: 1,2,3 39 | ``` 40 | 41 | Produce a message to the topic: 42 | 43 | ```bash 44 | echo "test1" | kafka-console-producer --bootstrap-server "$BOOTSTRAP_SERVERS" --topic foobar 45 | ``` 46 | 47 | Consume messages from the topic: 48 | 49 | ```bash 50 | kafka-console-consumer --bootstrap-server "$BOOTSTRAP_SERVERS" --topic foobar --from-beginning 51 | # test1 52 | # Processed a total of 1 messages 53 | ``` 54 | -------------------------------------------------------------------------------- /docker/kafka-three-node-cluster-kraft/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3.9" 3 | 4 | services: 5 | kafka1: 6 | image: confluentinc/cp-kafka:7.4.3 7 | hostname: kafka1 8 | container_name: kafka1 9 | ports: 10 | - "9092:9092" 11 | environment: 12 | # python3 gen_uuid.py 13 | CLUSTER_ID: 0vUY1FN2TMaCijaY6MOatw 14 | KAFKA_BROKER_ID: 1 15 | KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092,CONTROLLER://0.0.0.0:9093 16 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka1:9092 17 | KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER 18 | KAFKA_CONTROLLER_QUORUM_VOTERS: 1@kafka1:9093,2@kafka2:9093,3@kafka3:9093 19 | KAFKA_NODE_ID: 1 20 | KAFKA_PROCESS_ROLES: broker,controller 21 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 22 | KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 23 | KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 24 | KAFKA_LOG_DIRS: /var/lib/kafka/data 25 | volumes: 26 | - kafka1-data:/var/lib/kafka/data 27 | networks: 28 | - kafka 29 | logging: 30 | driver: "json-file" 31 | options: 32 | max-size: "1m" 33 | 34 | kafka2: 35 | image: confluentinc/cp-kafka:7.4.3 36 | hostname: kafka2 37 | container_name: kafka2 38 | ports: 39 | - "9093:9092" 40 | environment: 41 | CLUSTER_ID: 0vUY1FN2TMaCijaY6MOatw 42 | KAFKA_BROKER_ID: 2 43 | KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092,CONTROLLER://0.0.0.0:9093 44 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka2:9092 45 | KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER 46 | KAFKA_CONTROLLER_QUORUM_VOTERS: 1@kafka1:9093,2@kafka2:9093,3@kafka3:9093 47 | KAFKA_NODE_ID: 2 48 | KAFKA_PROCESS_ROLES: broker,controller 49 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 50 | KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 51 | KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 52 | KAFKA_LOG_DIRS: /var/lib/kafka/data 53 | volumes: 54 | - kafka2-data:/var/lib/kafka/data 55 | networks: 56 | - kafka 57 | logging: 58 | driver: "json-file" 59 | options: 60 | max-size: "1m" 61 | 62 | kafka3: 63 | image: confluentinc/cp-kafka:7.4.3 64 | hostname: kafka3 65 | container_name: kafka3 66 | ports: 67 | - "9094:9092" 68 | environment: 69 | CLUSTER_ID: 0vUY1FN2TMaCijaY6MOatw 70 | KAFKA_BROKER_ID: 3 71 | KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092,CONTROLLER://0.0.0.0:9093 72 | KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://kafka3:9092 73 | KAFKA_CONTROLLER_LISTENER_NAMES: CONTROLLER 74 | KAFKA_CONTROLLER_QUORUM_VOTERS: 1@kafka1:9093,2@kafka2:9093,3@kafka3:9093 75 | KAFKA_NODE_ID: 3 76 | KAFKA_PROCESS_ROLES: broker,controller 77 | KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 78 | KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 79 | KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 80 | KAFKA_LOG_DIRS: /var/lib/kafka/data 81 | volumes: 82 | - kafka3-data:/var/lib/kafka/data 83 | networks: 84 | - kafka 85 | logging: 86 | driver: "json-file" 87 | options: 88 | max-size: "1m" 89 | 90 | networks: 91 | kafka: 92 | name: kafka 93 | 94 | volumes: 95 | kafka1-data: {} 96 | kafka2-data: {} 97 | kafka3-data: {} 98 | -------------------------------------------------------------------------------- /docker/kafka-three-node-cluster-kraft/gen_uuid.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | import base64 3 | 4 | # Original UUID 5 | original_uuid = uuid.uuid4() 6 | 7 | # Convert to bytes 8 | uuid_bytes = original_uuid.bytes 9 | 10 | # Encode to base64 11 | encoded_uuid = base64.urlsafe_b64encode(uuid_bytes).decode('utf-8').rstrip("=") 12 | 13 | print(encoded_uuid) 14 | 15 | -------------------------------------------------------------------------------- /docker/kafka-three-node-cluster/Makefile: -------------------------------------------------------------------------------- 1 | # Thanks: https://gist.github.com/mpneuried/0594963ad38e68917ef189b4e6a269db 2 | .PHONY: help 3 | 4 | help: ## This help. 5 | @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) 6 | 7 | .DEFAULT_GOAL := help 8 | 9 | # DOCKER TASKS 10 | up: ## Runs the containers in detached mode 11 | docker-compose up -d 12 | 13 | clean: ## Stops and removes all containers 14 | docker-compose down 15 | 16 | logs: ## View the logs from the containers 17 | docker-compose logs -f 18 | 19 | open: ## Opens tabs in container 20 | open http://localhost:8080/ 21 | -------------------------------------------------------------------------------- /docker/kafka-three-node-cluster/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3.9" 3 | 4 | services: 5 | zookeeper: 6 | platform: linux/amd64 7 | image: confluentinc/cp-zookeeper:${CONFLUENT_PLATFORM_VERSION:-7.4.0} 8 | container_name: zookeeper 9 | restart: unless-stopped 10 | ports: 11 | - '32181:32181' 12 | - '2888:2888' 13 | - '3888:3888' 14 | environment: 15 | ZOOKEEPER_SERVER_ID: 1 16 | ZOOKEEPER_CLIENT_PORT: 32181 17 | ZOOKEEPER_TICK_TIME: 2000 18 | ZOOKEEPER_INIT_LIMIT: 5 19 | ZOOKEEPER_SYNC_LIMIT: 2 20 | ZOOKEEPER_SERVERS: zookeeper:2888:3888 21 | healthcheck: 22 | test: echo stat | nc localhost 32181 23 | interval: 10s 24 | timeout: 10s 25 | retries: 3 26 | networks: 27 | - kafka 28 | logging: 29 | driver: "json-file" 30 | options: 31 | max-size: "1m" 32 | 33 | kafka-ui: # https://github.com/provectus/kafka-ui/blob/master/documentation/compose/kafka-ui.yaml 34 | container_name: kafka-ui 35 | image: provectuslabs/kafka-ui:latest 36 | ports: 37 | - 8080:8080 38 | depends_on: 39 | - broker-1 40 | - broker-2 41 | - broker-3 42 | environment: 43 | KAFKA_CLUSTERS_0_NAME: broker-1 44 | KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS: broker-1:29091 45 | KAFKA_CLUSTERS_0_METRICS_PORT: 19101 46 | KAFKA_CLUSTERS_1_NAME: broker-2 47 | KAFKA_CLUSTERS_1_BOOTSTRAPSERVERS: broker-2:29092 48 | KAFKA_CLUSTERS_1_METRICS_PORT: 19102 49 | KAFKA_CLUSTERS_2_NAME: broker-3 50 | KAFKA_CLUSTERS_2_BOOTSTRAPSERVERS: broker-3:29093 51 | KAFKA_CLUSTERS_2_METRICS_PORT: 19103 52 | DYNAMIC_CONFIG_ENABLED: 'true' 53 | networks: 54 | - kafka 55 | logging: 56 | driver: "json-file" 57 | options: 58 | max-size: "1m" 59 | 60 | broker-1: 61 | platform: linux/amd64 62 | image: confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-7.4.0} 63 | container_name: broker-1 64 | restart: unless-stopped 65 | ports: 66 | - '9091:9091' 67 | depends_on: 68 | - zookeeper 69 | environment: 70 | KAFKA_BROKER_ID: 1 71 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:32181 72 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT 73 | KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL 74 | KAFKA_ADVERTISED_LISTENERS: INTERNAL://broker-1:29091,EXTERNAL://localhost:9091 75 | KAFKA_DEFAULT_REPLICATION_FACTOR: 3 76 | KAFKA_NUM_PARTITIONS: 3 77 | KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 78 | KAFKA_JMX_PORT: 19101 79 | KAFKA_JMX_HOSTNAME: localhost 80 | healthcheck: 81 | test: nc -vz localhost 9091 82 | interval: 10s 83 | timeout: 10s 84 | retries: 3 85 | networks: 86 | - kafka 87 | logging: 88 | driver: "json-file" 89 | options: 90 | max-size: "1m" 91 | 92 | broker-2: 93 | platform: linux/amd64 94 | image: confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-7.4.0} 95 | container_name: broker-2 96 | restart: unless-stopped 97 | ports: 98 | - '9092:9092' 99 | depends_on: 100 | - zookeeper 101 | environment: 102 | KAFKA_BROKER_ID: 2 103 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:32181 104 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT 105 | KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL 106 | KAFKA_ADVERTISED_LISTENERS: INTERNAL://broker-2:29092,EXTERNAL://localhost:9092 107 | KAFKA_DEFAULT_REPLICATION_FACTOR: 3 108 | KAFKA_NUM_PARTITIONS: 3 109 | KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 110 | KAFKA_JMX_PORT: 19102 111 | KAFKA_JMX_HOSTNAME: localhost 112 | healthcheck: 113 | test: nc -vz localhost 9092 114 | interval: 10s 115 | timeout: 10s 116 | retries: 3 117 | networks: 118 | - kafka 119 | logging: 120 | driver: "json-file" 121 | options: 122 | max-size: "1m" 123 | 124 | broker-3: 125 | platform: linux/amd64 126 | image: confluentinc/cp-kafka:${CONFLUENT_PLATFORM_VERSION:-7.4.0} 127 | container_name: broker-3 128 | restart: unless-stopped 129 | ports: 130 | - '9093:9093' 131 | depends_on: 132 | - zookeeper 133 | environment: 134 | KAFKA_BROKER_ID: 3 135 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:32181 136 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT 137 | KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL 138 | KAFKA_ADVERTISED_LISTENERS: INTERNAL://broker-3:29093,EXTERNAL://localhost:9093 139 | KAFKA_DEFAULT_REPLICATION_FACTOR: 3 140 | KAFKA_NUM_PARTITIONS: 3 141 | KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 142 | KAFKA_JMX_PORT: 19103 143 | KAFKA_JMX_HOSTNAME: localhost 144 | healthcheck: 145 | test: nc -vz localhost 9093 146 | interval: 10s 147 | timeout: 10s 148 | retries: 3 149 | networks: 150 | - kafka 151 | logging: 152 | driver: "json-file" 153 | options: 154 | max-size: "1m" 155 | 156 | networks: 157 | kafka: 158 | name: kafka 159 | -------------------------------------------------------------------------------- /docker/kafka/README.md: -------------------------------------------------------------------------------- 1 | # kafka in docker 2 | 3 | ## Quick Start 4 | 5 | Boot the stack: 6 | 7 | ```bash 8 | docker-compose up --build -d 9 | ``` 10 | 11 | Tail the producer logs: 12 | 13 | ```bash 14 | docker logs -f producer 15 | ``` 16 | 17 | Tail the consumer logs: 18 | 19 | ```bash 20 | docker logs -f consumer 21 | ``` 22 | 23 | ## Other Examples 24 | 25 | List: 26 | 27 | ```bash 28 | docker run --network host -it --rm edenhill/kcat:1.7.1 -b 127.0.0.1:9091 -L 29 | ``` 30 | 31 | Produce: 32 | 33 | ```bash 34 | docker run --network host -i --rm edenhill/kcat:1.7.1 -b 127.0.0.1:9091 -t test -K: -P < { 15 | console.log(error) 16 | }) 17 | 18 | database.once('connected', () => { 19 | console.log('Database Connected'); 20 | }) 21 | 22 | const app = express(); 23 | app.use(express.json()); 24 | app.use(morgan('combined')); 25 | 26 | const routes = require('./routes/routes'); 27 | 28 | app.use('/api', routes) 29 | 30 | app.listen(appPort, () => { 31 | console.log(`Server Started at ${appPort}`) 32 | }) 33 | 34 | -------------------------------------------------------------------------------- /docker/nodejs-express-mongodb/models/model.js: -------------------------------------------------------------------------------- 1 | const mongoose = require('mongoose'); 2 | 3 | // https://mongoosejs.com/docs/guide.html 4 | const dataSchema = new mongoose.Schema({ 5 | name: { 6 | required: true, 7 | type: String 8 | }, 9 | age: { 10 | required: true, 11 | type: Number 12 | }, 13 | hobbies: { 14 | required: true, 15 | type: Array 16 | } 17 | }) 18 | 19 | module.exports = mongoose.model('Data', dataSchema) 20 | -------------------------------------------------------------------------------- /docker/nodejs-express-mongodb/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "src", 3 | "version": "1.0.0", 4 | "description": "express api with mongodb", 5 | "main": "index.js", 6 | "scripts": { 7 | "start": "node index.js", 8 | "test": "echo \"Error: no test specified\" && exit 1" 9 | }, 10 | "repository": { 11 | "type": "git", 12 | "url": "git+https://github.com/ruanbekker/quick-starts.git" 13 | }, 14 | "keywords": [ 15 | "express" 16 | ], 17 | "author": "Ruan Bekker", 18 | "license": "ISC", 19 | "bugs": { 20 | "url": "https://github.com/ruanbekker/quick-starts/issues" 21 | }, 22 | "homepage": "https://github.com/ruanbekker/quick-starts#readme", 23 | "dependencies": { 24 | "express": "^4.18.2", 25 | "mongoose": "^7.0.3", 26 | "morgan": "^1.10.0" 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /docker/nodejs-express-mongodb/routes/routes.js: -------------------------------------------------------------------------------- 1 | const express = require('express'); 2 | const Model = require('../models/model'); 3 | const router = express.Router(); 4 | 5 | // Post Method 6 | router.post('/', async (req, res) => { 7 | const data = new Model({ 8 | name: req.body.name, 9 | age: req.body.age, 10 | hobbies: req.body.hobbies 11 | }) 12 | 13 | try { 14 | const dataToSave = await data.save(); 15 | res.status(200).json(dataToSave) 16 | } 17 | catch (error) { 18 | res.status(400).json({ message: error.message }) 19 | } 20 | }) 21 | 22 | // Get all Method 23 | router.get('/', async (req, res) => { 24 | try { 25 | const data = await Model.find(); 26 | res.json(data) 27 | } 28 | catch (error) { 29 | res.status(500).json({ message: error.message }) 30 | } 31 | }) 32 | 33 | // Get by ID Method 34 | router.get('/:id', async (req, res) => { 35 | try { 36 | const data = await Model.findById(req.params.id); 37 | res.json(data) 38 | } 39 | catch (error) { 40 | res.status(500).json({ message: error.message }) 41 | } 42 | }) 43 | 44 | // Update by ID Method 45 | router.patch('/:id', async (req, res) => { 46 | try { 47 | const id = req.params.id; 48 | const updatedData = req.body; 49 | const options = { new: true }; 50 | 51 | const result = await Model.findByIdAndUpdate( 52 | id, updatedData, options 53 | ) 54 | 55 | res.send(result) 56 | } 57 | catch (error) { 58 | res.status(500).json({ message: error.message }) 59 | } 60 | }) 61 | 62 | // Delete by ID Method 63 | router.delete('/:id', async (req, res) => { 64 | try { 65 | const id = req.params.id; 66 | const data = await Model.findByIdAndDelete(id) 67 | res.send(`Document with ${data.name} has been deleted..`) 68 | } 69 | catch (error) { 70 | res.status(400).json({ message: error.message }) 71 | } 72 | }) 73 | 74 | module.exports = router; 75 | -------------------------------------------------------------------------------- /docker/postgresql/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3" 2 | 3 | services: 4 | postgres: 5 | image: postgres:14 6 | container_name: postgres 7 | restart: unless-stopped 8 | environment: 9 | - POSTGRES_USER=postgres 10 | - POSTGRES_PASSWORD=quickstart 11 | - POSTGRES_DB=quickstart 12 | networks: 13 | - quickstart 14 | volumes: 15 | - postgres-volume:/var/lib/postgresql/data 16 | ports: 17 | - 5432:5432 18 | healthcheck: 19 | test: ["CMD-SHELL", "pg_isready -U postgres"] 20 | interval: 10s 21 | timeout: 5s 22 | retries: 5 23 | logging: 24 | driver: "json-file" 25 | options: 26 | max-size: "1m" 27 | max-file: "1" 28 | 29 | volumes: 30 | postgres-volume: {} 31 | 32 | networks: 33 | quickstart: 34 | name: quickstart 35 | -------------------------------------------------------------------------------- /docker/rabbitmq-python/Makefile: -------------------------------------------------------------------------------- 1 | # Thanks: https://gist.github.com/mpneuried/0594963ad38e68917ef189b4e6a269db 2 | .PHONY: help 3 | 4 | help: ## This help. 5 | @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) 6 | 7 | .DEFAULT_GOAL := help 8 | 9 | # DOCKER TASKS 10 | up: ## Runs the containers in detached mode 11 | docker-compose up -d --build 12 | 13 | clean: ## Stops and removes all containers 14 | docker-compose down 15 | 16 | logs: ## View the logs from the containers 17 | docker-compose logs -f 18 | 19 | open: ## Opens tabs in container 20 | open http://localhost:15672/ 21 | -------------------------------------------------------------------------------- /docker/rabbitmq-python/consumer/Dockerfile: -------------------------------------------------------------------------------- 1 | # Use an official Python runtime as a parent image 2 | FROM python:3.9-slim-buster 3 | 4 | # Set the working directory in the container to /app 5 | WORKDIR /app 6 | 7 | # Set the environment variable PYTHONUNBUFFERED to disable Python's buffering 8 | ENV PYTHONUNBUFFERED=1 9 | 10 | # Copy the current directory contents into the container at /app 11 | COPY . /app 12 | 13 | # Install any needed packages specified in requirements.txt 14 | RUN pip install --no-cache-dir pika 15 | 16 | # Run app.py when the container launches 17 | CMD ["python", "consumer.py"] 18 | 19 | -------------------------------------------------------------------------------- /docker/rabbitmq-python/consumer/consumer.py: -------------------------------------------------------------------------------- 1 | import pika 2 | import os 3 | import time 4 | import sys 5 | 6 | RABBITMQ_HOST = os.getenv('RABBITMQ_HOST', 'localhost') 7 | 8 | try: 9 | connection = pika.BlockingConnection(pika.ConnectionParameters(host=RABBITMQ_HOST)) 10 | channel = connection.channel() 11 | except pika.exceptions.AMQPConnectionError: 12 | print(f"Could not connect to RabbitMQ at {RABBITMQ_HOST}") 13 | sys.exit(1) 14 | 15 | channel.queue_declare(queue='hello') 16 | 17 | def callback(ch, method, properties, body): 18 | print(" [x] Received %r" % body) 19 | 20 | channel.basic_consume(queue='hello', on_message_callback=callback, auto_ack=True) 21 | 22 | print(' [*] Waiting for messages. To exit press CTRL+C') 23 | channel.start_consuming() 24 | 25 | -------------------------------------------------------------------------------- /docker/rabbitmq-python/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.8' 2 | 3 | services: 4 | rabbitmq: 5 | image: rabbitmq:3-management 6 | container_name: rabbitmq 7 | restart: unless-stopped 8 | ports: 9 | - 5672:5672 10 | - 15672:15672 11 | networks: 12 | - quickstarts 13 | logging: 14 | driver: "json-file" 15 | options: 16 | max-size: "1m" 17 | max-file: "1" 18 | 19 | publisher: 20 | build: ./publisher 21 | container_name: publisher 22 | restart: unless-stopped 23 | volumes: 24 | - ./publisher:/app 25 | environment: 26 | - RABBITMQ_HOST=rabbitmq 27 | depends_on: 28 | - rabbitmq 29 | networks: 30 | - quickstarts 31 | logging: 32 | driver: "json-file" 33 | options: 34 | max-size: "1m" 35 | max-file: "1" 36 | 37 | consumer: 38 | build: ./consumer 39 | container_name: consumer 40 | restart: unless-stopped 41 | volumes: 42 | - ./consumer:/app 43 | environment: 44 | - RABBITMQ_HOST=rabbitmq 45 | depends_on: 46 | - rabbitmq 47 | networks: 48 | - quickstarts 49 | logging: 50 | driver: "json-file" 51 | options: 52 | max-size: "1m" 53 | max-file: "1" 54 | 55 | networks: 56 | quickstarts: 57 | name: quickstarts 58 | -------------------------------------------------------------------------------- /docker/rabbitmq-python/publisher/Dockerfile: -------------------------------------------------------------------------------- 1 | # Use an official Python runtime as a parent image 2 | FROM python:3.9-slim-buster 3 | 4 | # Set the working directory in the container to /app 5 | WORKDIR /app 6 | 7 | # Set the environment variable PYTHONUNBUFFERED to disable Python's buffering 8 | ENV PYTHONUNBUFFERED=1 9 | 10 | # Copy the current directory contents into the container at /app 11 | COPY . /app 12 | 13 | # Install any needed packages specified in requirements.txt 14 | RUN pip install --no-cache-dir pika 15 | 16 | # Run app.py when the container launches 17 | CMD ["python", "publisher.py"] 18 | 19 | -------------------------------------------------------------------------------- /docker/rabbitmq-python/publisher/publisher.py: -------------------------------------------------------------------------------- 1 | import pika 2 | import os 3 | import time 4 | import random 5 | 6 | RABBITMQ_HOST = os.getenv('RABBITMQ_HOST', 'localhost') 7 | COUNT = 0 8 | 9 | try: 10 | connection = pika.BlockingConnection(pika.ConnectionParameters(host=RABBITMQ_HOST)) 11 | channel = connection.channel() 12 | except pika.exceptions.AMQPConnectionError: 13 | print(f"Could not connect to RabbitMQ at {RABBITMQ_HOST}") 14 | 15 | channel.queue_declare(queue='hello') 16 | 17 | print("starting") 18 | RANDOM_ID = random.randint(1,10000) 19 | while COUNT <= 20: 20 | COUNT=COUNT+1 21 | channel.basic_publish(exchange='', routing_key='hello', body=f'[id: {RANDOM_ID} count: {COUNT}] Hello World!') 22 | print(f" [x] Sent 'Hello World! {COUNT} from {RANDOM_ID}'") 23 | time.sleep(1) 24 | 25 | print("ending") 26 | connection.close() 27 | 28 | -------------------------------------------------------------------------------- /docker/redis/README.md: -------------------------------------------------------------------------------- 1 | # redis 2 | 3 | Quick Start with Redis. 4 | 5 | ## Boot 6 | 7 | Run the redis container with docker compose using: 8 | 9 | ```bash 10 | docker-compose up -d 11 | ``` 12 | 13 | ## Access Redis 14 | 15 | You can get the output from the `INFO` command in `redis-cli` using: 16 | 17 | ```bash 18 | docker exec -it redis redis-cli INFO 19 | ``` 20 | 21 | To write to redis, we can set a key `foo` with the value of `bar` using `set`: 22 | 23 | ```bash 24 | docker exec -it redis redis-cli set foo bar 25 | ``` 26 | 27 | We can then retrieve the value of `foo` using `get`: 28 | 29 | ```bash 30 | docker exec -it redis redis-cli get foo 31 | ``` 32 | 33 | Which returns: 34 | 35 | ``` 36 | "bar" 37 | ``` 38 | 39 | ## Tear Down 40 | 41 | We can stop the container using: 42 | 43 | ```bash 44 | docker-compose down 45 | ``` -------------------------------------------------------------------------------- /docker/redis/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | 3 | services: 4 | redis: 5 | image: redis:7.0-bullseye 6 | container_name: redis 7 | ports: 8 | - 6379:6379 9 | networks: 10 | - public 11 | healthcheck: 12 | test: [ "CMD", "redis-cli", "--raw", "incr", "ping" ] 13 | interval: 5s 14 | timeout: 3s 15 | retries: 5 16 | logging: 17 | driver: "json-file" 18 | options: 19 | max-size: "1m" 20 | 21 | networks: 22 | public: 23 | name: public 24 | 25 | -------------------------------------------------------------------------------- /docker/spark-cluster/.env: -------------------------------------------------------------------------------- 1 | SPARK_VERSION=3.4.2 2 | -------------------------------------------------------------------------------- /docker/spark-cluster/README.md: -------------------------------------------------------------------------------- 1 | # docker-spark-cluster 2 | 3 | This is a quick start with Apache Spark and a example word count. 4 | 5 | ## Usage 6 | 7 | Boot the cluster: 8 | 9 | ```bash 10 | docker compose up -d 11 | ``` 12 | 13 | Run a word count: 14 | 15 | ```bash 16 | docker-compose exec spark-master spark-submit --master spark://spark-master:7077 /src/application.py 17 | ``` 18 | 19 | The output: 20 | 21 | ```bash 22 | 23/12/13 11:59:30 INFO TaskSchedulerImpl: Adding task set 3.0 with 2 tasks resource profile 0 23 | 23/12/13 11:59:30 INFO TaskSetManager: Starting task 0.0 in stage 3.0 (TID 4) (172.20.0.3, executor 1, partition 0, NODE_LOCAL, 7185 bytes) 24 | 23/12/13 11:59:30 INFO TaskSetManager: Starting task 1.0 in stage 3.0 (TID 5) (172.20.0.3, executor 1, partition 1, NODE_LOCAL, 7185 bytes) 25 | 23/12/13 11:59:30 INFO BlockManagerInfo: Added broadcast_3_piece0 in memory on 172.20.0.3:42395 (size: 5.9 KiB, free: 434.3 MiB) 26 | 23/12/13 11:59:30 INFO TaskSetManager: Finished task 0.0 in stage 3.0 (TID 4) in 78 ms on 172.20.0.3 (executor 1) (1/2) 27 | 23/12/13 11:59:30 INFO TaskSetManager: Finished task 1.0 in stage 3.0 (TID 5) in 78 ms on 172.20.0.3 (executor 1) (2/2) 28 | 23/12/13 11:59:30 INFO TaskSchedulerImpl: Removed TaskSet 3.0, whose tasks have all completed, from pool 29 | 23/12/13 11:59:30 INFO DAGScheduler: ResultStage 3 (collect at /src/application.py:19) finished in 0.088 s 30 | 23/12/13 11:59:30 INFO DAGScheduler: Job 1 is finished. Cancelling potential speculative or zombie tasks for this job 31 | 23/12/13 11:59:30 INFO TaskSchedulerImpl: Killing all running tasks in stage 3: Stage finished 32 | 23/12/13 11:59:30 INFO DAGScheduler: Job 1 finished: collect at /src/application.py:19, took 0.090312 s 33 | ... 34 | over: 2 35 | Fiction: 1 36 | special: 1 37 | permission: 1 38 | Total number of words: 810 39 | ``` 40 | 41 | ## Screenshots 42 | 43 | The spark ui on http://localhost:8080 44 | 45 | ![image](https://github.com/ruanbekker/quick-starts/assets/567298/973cf7fb-15af-4866-a35b-e6d71fbbfac0) 46 | 47 | 48 | ## Resources 49 | 50 | - https://github.com/bitnami/containers/tree/main/bitnami/spark 51 | -------------------------------------------------------------------------------- /docker/spark-cluster/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3.7' 2 | 3 | services: 4 | spark-master: 5 | image: bitnami/spark:${SPARK_VERSION:-latest} 6 | container_name: spark-master 7 | command: bin/spark-class org.apache.spark.deploy.master.Master 8 | environment: 9 | - SPARK_MODE=master 10 | - SPARK_RPC_AUTHENTICATION_ENABLED=no 11 | - SPARK_RPC_ENCRYPTION_ENABLED=no 12 | - SPARK_LOCAL_STORAGE_ENCRYPTION_ENABLED=no 13 | - SPARK_SSL_ENABLED=no 14 | - SPARK_USER=spark 15 | ports: 16 | - 8080:8080 17 | - 7077:7077 18 | volumes: 19 | - ./workspace/data.txt:/src/data.txt 20 | - ./workspace/application.py:/src/application.py 21 | networks: 22 | - quick-starts 23 | logging: 24 | driver: "json-file" 25 | options: 26 | max-size: "1m" 27 | 28 | spark-worker-1: 29 | image: bitnami/spark:${SPARK_VERSION:-latest} 30 | container_name: spark-worker-1 31 | command: bin/spark-class org.apache.spark.deploy.worker.Worker spark://spark-master:7077 32 | depends_on: 33 | spark-master: 34 | condition: service_started 35 | environment: 36 | - SPARK_MODE=worker 37 | - SPARK_MASTER_URL=spark://spark-master:7077 38 | - SPARK_WORKER_MEMORY=2G 39 | - SPARK_WORKER_CORES=1 40 | - SPARK_RPC_AUTHENTICATION_ENABLED=no 41 | - SPARK_RPC_ENCRYPTION_ENABLED=no 42 | - SPARK_LOCAL_STORAGE_ENCRYPTION_ENABLED=no 43 | - SPARK_SSL_ENABLED=no 44 | - SPARK_USER=spark 45 | ports: 46 | - 8081:8081 47 | networks: 48 | - quick-starts 49 | volumes: 50 | - ./workspace/data.txt:/src/data.txt 51 | logging: 52 | driver: "json-file" 53 | options: 54 | max-size: "1m" 55 | 56 | spark-worker-2: 57 | image: bitnami/spark:${SPARK_VERSION:-latest} 58 | container_name: spark-worker-2 59 | command: bin/spark-class org.apache.spark.deploy.worker.Worker spark://spark-master:7077 60 | depends_on: 61 | spark-master: 62 | condition: service_started 63 | environment: 64 | - SPARK_MODE=worker 65 | - SPARK_MASTER_URL=spark://spark-master:7077 66 | - SPARK_WORKER_MEMORY=2G 67 | - SPARK_WORKER_CORES=1 68 | - SPARK_RPC_AUTHENTICATION_ENABLED=no 69 | - SPARK_RPC_ENCRYPTION_ENABLED=no 70 | - SPARK_LOCAL_STORAGE_ENCRYPTION_ENABLED=no 71 | - SPARK_SSL_ENABLED=no 72 | - SPARK_USER=spark 73 | ports: 74 | - 8082:8081 75 | networks: 76 | - quick-starts 77 | volumes: 78 | - ./workspace/data.txt:/src/data.txt 79 | logging: 80 | driver: "json-file" 81 | options: 82 | max-size: "1m" 83 | 84 | networks: 85 | quick-starts: 86 | name: quick-starts 87 | -------------------------------------------------------------------------------- /docker/spark-cluster/workspace/application.py: -------------------------------------------------------------------------------- 1 | from pyspark.sql import SparkSession 2 | 3 | # Step 1: Initialize Spark Session 4 | spark = SparkSession.builder.appName("WordCount").getOrCreate() 5 | 6 | # Step 2: Read Data 7 | text_file = spark.sparkContext.textFile("/src/data.txt") 8 | 9 | # Step 3: Data Processing for Word Count 10 | word_counts = text_file.flatMap(lambda line: line.split(" ")) \ 11 | .map(lambda word: (word, 1)) \ 12 | .reduceByKey(lambda a, b: a + b) 13 | 14 | # Additional Step: Counting All Words 15 | total_words = word_counts.map(lambda x: x[1]).reduce(lambda a, b: a + b) 16 | 17 | # Step 4: Output the Results 18 | # Print individual word counts 19 | for word, count in word_counts.collect(): 20 | print(f"{word}: {count}") 21 | 22 | # Print total word count 23 | print(f"Total number of words: {total_words}") 24 | 25 | # Stop the SparkSession 26 | spark.stop() 27 | 28 | -------------------------------------------------------------------------------- /docker/spark-cluster/workspace/data.txt: -------------------------------------------------------------------------------- 1 | this is a sample word count from wikipedia 2 | the word count is the number of words in a document or passage of text Word counting may be needed when a text 3 | is required to stay within certain numbers of words This may particularly be the case in academia legal 4 | proceedings journalism and advertising Word count is commonly used by translators to determine the price for 5 | the translation job Word counts may also be used to calculate measures of readability and to measure typing 6 | and reading speeds usually in words per minute When converting character counts to words a measure of five or 7 | six characters to a word is generally used Contents Details and variations of definition Software In fiction 8 | In non fiction See also References Sources External links Details and variations of definition 9 | This section does not cite any references or sources Please help improve this section by adding citations to 10 | reliable sources Unsourced material may be challenged and removed 11 | Variations in the operational definitions of how to count the words can occur namely what counts as a word and 12 | which words don't count toward the total However especially since the advent of widespread word processing there 13 | is a broad consensus on these operational definitions and hence the bottom line integer result 14 | The consensus is to accept the text segmentation rules generally found in most word processing software including how 15 | word boundaries are determined which depends on how word dividers are defined The first trait of that definition is that a space any of various whitespace 16 | characters such as a regular word space an em space or a tab character is a word divider Usually a hyphen or a slash is too 17 | Different word counting programs may give varying results depending on the text segmentation rule 18 | details and on whether words outside the main text such as footnotes endnotes or hidden text) are counted But the behavior 19 | of most major word processing applications is broadly similar However during the era when school assignments were done in 20 | handwriting or with typewriters the rules for these definitions often differed from todays consensus 21 | Most importantly many students were drilled on the rule that certain words don't count usually articles namely a an the but 22 | sometimes also others such as conjunctions for example and or but and some prepositions usually to of Hyphenated permanent 23 | compounds such as follow up noun or long term adjective were counted as one word To save the time and effort of counting 24 | word by word often a rule of thumb for the average number of words per line was used such as 10 words per line These rules 25 | have fallen by the wayside in the word processing era the word count feature of such software which follows the text 26 | segmentation rules mentioned earlier is now the standard arbiter because it is largely consistent across documents and 27 | applications and because it is fast effortless and costless already included with the application As for which sections of 28 | a document count toward the total such as footnotes endnotes abstracts reference lists and bibliographies tables figure 29 | captions hidden text the person in charge teacher client can define their choice and users students workers can simply 30 | select or exclude the elements accordingly and watch the word count automatically update Software Modern web browsers 31 | support word counting via extensions via a JavaScript bookmarklet or a script that is hosted in a website Most word 32 | processors can also count words Unix like systems include a program wc specifically for word counting 33 | As explained earlier different word counting programs may give varying results depending on the text segmentation rule 34 | details The exact number of words often is not a strict requirement thus the variation is acceptable 35 | In fiction Novelist Jane Smiley suggests that length is an important quality of the novel However novels can vary 36 | tremendously in length Smiley lists novels as typically being between and words while National Novel Writing Month 37 | requires its novels to be at least words There are no firm rules for example the boundary between a novella and a novel 38 | is arbitrary and a literary work may be difficult to categorise But while the length of a novel is to a large extent up 39 | to its writer lengths may also vary by subgenre many chapter books for children start at a length of about words and a 40 | typical mystery novel might be in the to word range while a thriller could be over words 41 | The Science Fiction and Fantasy Writers of America specifies word lengths for each category of its Nebula award categories 42 | Classification Word count Novel over words Novella to words Novelette to words Short story under words 43 | In non fiction The acceptable length of an academic dissertation varies greatly dependent predominantly on the subject 44 | Numerous American universities limit Ph.D. dissertations to at most words barring special permission for exceeding this limit 45 | -------------------------------------------------------------------------------- /docker/traefik-http/README.md: -------------------------------------------------------------------------------- 1 | # Docker Traefik HTTP Quick Start 2 | 3 | This quick-start shows how to run [traefik proxy](https://traefik.io/traefik/) in http mode with a sample web application behind the proxy. 4 | 5 | ## About Traefik 6 | 7 | Traefik is a Modern HTTP Reverse Proxy and Load Balancer that makes deploying microservices easily. 8 | 9 | ## Usage 10 | 11 | Run the containers using: 12 | 13 | ```bash 14 | docker-compose up -d 15 | ``` 16 | 17 | The `traefik-proxy` will route traffic to the `whoami-app` container when the host header is `whoami.127.0.0.1.nip.io` which is configured with labels on the `whoami-app` container's configuration. You can inspect `docker-compsoe.yml`. 18 | 19 | We can test that with: 20 | 21 | ```bash 22 | curl http://whoami.127.0.0.1.nip.io 23 | ``` 24 | 25 | And the response should be: 26 | 27 | ``` 28 | Hostname: d4b9fcfd4744 29 | IP: 127.0.0.1 30 | IP: 172.23.0.4 31 | RemoteAddr: 172.23.0.2:49012 32 | GET / HTTP/1.1 33 | Host: whoami.127.0.0.1.nip.io 34 | User-Agent: curl/7.64.1 35 | Accept: */* 36 | Accept-Encoding: gzip 37 | X-Forwarded-For: 172.23.0.1 38 | X-Forwarded-Host: whoami.127.0.0.1.nip.io 39 | X-Forwarded-Port: 80 40 | X-Forwarded-Proto: http 41 | X-Forwarded-Server: b8df98295fec 42 | X-Real-Ip: 172.23.0.1 43 | ``` 44 | 45 | ## Traefik with other apps 46 | 47 | If you would like to use traefik proxy with other containers, you just need to wire it up with similar config on the application container as below: 48 | 49 | ```yaml 50 | labels: 51 | - "traefik.enable=true" 52 | - "traefik.http.routers.whoami.rule=Host(`whoami.127.0.0.1.nip.io`)" 53 | - "traefik.http.routers.whoami.entrypoints=web" 54 | - "traefik.http.services.whoami.loadbalancer.server.port=80" 55 | ``` 56 | 57 | In summary: 58 | 59 | 1. The host header 60 | 2. The entrypoint name, defined on traefik-proxy ie. `web` 61 | 3. The port that traefik-proxy needs to pass the connection to. 62 | 63 | ## Resources 64 | 65 | - https://traefik.io/traefik/ 66 | -------------------------------------------------------------------------------- /docker/traefik-http/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | 3 | services: 4 | traefik-proxy: 5 | image: traefik:v2.7 6 | container_name: traefik-proxy 7 | command: 8 | - "--log.level=INFO" 9 | - "--api.insecure=true" 10 | - "--providers.docker=true" 11 | - "--providers.docker.exposedbydefault=false" 12 | - "--providers.docker.network=public" 13 | - "--entrypoints.web.address=:80" 14 | ports: 15 | - 80:80 16 | - 8080:8080 17 | networks: 18 | - public 19 | volumes: 20 | - /var/run/docker.sock:/var/run/docker.sock:ro 21 | logging: 22 | driver: "json-file" 23 | options: 24 | max-size: "1m" 25 | 26 | whoami-app: 27 | image: traefik/whoami 28 | container_name: whoami-app 29 | labels: 30 | - "traefik.enable=true" 31 | - "traefik.http.routers.whoami.rule=Host(`whoami.127.0.0.1.nip.io`)" 32 | - "traefik.http.routers.whoami.entrypoints=web" 33 | - "traefik.http.services.whoami.loadbalancer.server.port=80" 34 | networks: 35 | - public 36 | logging: 37 | driver: "json-file" 38 | options: 39 | max-size: "1m" 40 | 41 | networks: 42 | public: 43 | name: public 44 | 45 | -------------------------------------------------------------------------------- /docker/traefik-https/README.md: -------------------------------------------------------------------------------- 1 | # Docker Traefik HTTPS Quick Start 2 | 3 | This quick-start shows how to run [traefik proxy](https://traefik.io/traefik/) in https mode with letsencrypt and a sample web application behind the proxy. 4 | 5 | ## About Traefik 6 | 7 | Traefik is a Modern HTTP Reverse Proxy and Load Balancer that makes deploying microservices easily. 8 | 9 | ## Note 10 | 11 | This quick-start uses http challenge for letsencrypt, so that means we should have a direct / public http route to our service in order to complete the http challenge. 12 | 13 | ## Usage 14 | 15 | Run the containers using: 16 | 17 | ```bash 18 | docker-compose up -d 19 | ``` 20 | 21 | The `traefik-proxy` will route traffic to the `whoami-app` container when the host header is `whoami.example.com` which is configured with labels on the `whoami-app` container's configuration. You can inspect `docker-compsoe.yml`. 22 | 23 | We can test that with: 24 | 25 | ```bash 26 | curl https://whoami.example.com 27 | ``` 28 | 29 | And the response should be: 30 | 31 | ```bash 32 | Hostname: d4b9fcfd4744 33 | IP: 127.0.0.1 34 | IP: 172.23.0.4 35 | RemoteAddr: 172.23.0.2:49012 36 | GET / HTTP/1.1 37 | Host: whoami.127.0.0.1.nip.io 38 | User-Agent: curl/7.64.1 39 | Accept: */* 40 | Accept-Encoding: gzip 41 | X-Forwarded-For: 172.23.0.1 42 | X-Forwarded-Host: whoami.127.0.0.1.nip.io 43 | X-Forwarded-Port: 80 44 | X-Forwarded-Proto: http 45 | X-Forwarded-Server: b8df98295fec 46 | X-Real-Ip: 172.23.0.1 47 | ``` 48 | 49 | ## Traefik with other apps 50 | 51 | If you would like to use traefik proxy with other containers, you just need to wire it up with similar config on the application container as below: 52 | 53 | ```yaml 54 | labels: 55 | - "traefik.enable=true" 56 | - "traefik.http.routers.whoami.rule=Host(`whoami.example.com`)" 57 | - "traefik.http.routers.whoami.service=whoami" 58 | - "traefik.http.services.whoami.loadbalancer.server.port=80" 59 | - "traefik.http.routers.whoami.entrypoints=websecure" 60 | - "traefik.http.routers.whoami.tls=true" 61 | - "traefik.http.routers.whoami.tls.certresolver=letsencrypt" 62 | ``` 63 | 64 | In summary: 65 | 66 | 1. The host header 67 | 2. The entrypoint name, defined on traefik-proxy ie. `websecure` 68 | 3. The port that traefik-proxy needs to pass the connection to. 69 | 70 | ## Resources 71 | 72 | - https://traefik.io/traefik/ 73 | -------------------------------------------------------------------------------- /docker/traefik-https/config/traefik.toml: -------------------------------------------------------------------------------- 1 | [entryPoints] 2 | [entryPoints.web] 3 | address = ":80" 4 | [entryPoints.web.http] 5 | [entryPoints.web.http.redirections] 6 | [entryPoints.web.http.redirections.entryPoint] 7 | to = "websecure" 8 | scheme = "https" 9 | [entryPoints.websecure] 10 | address = ":443" 11 | 12 | [api] 13 | dashboard = true 14 | insecure = true 15 | 16 | [providers.docker] 17 | watch = true 18 | network = "public" 19 | exposedByDefault = false 20 | 21 | [certificatesResolvers.letsencrypt.acme] 22 | email = "me@must-be-a-legit-domain.com" 23 | storage = "acme.json" 24 | [certificatesResolvers.letsencrypt.acme.httpChallenge] 25 | entryPoint = "web" 26 | -------------------------------------------------------------------------------- /docker/traefik-https/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | 3 | services: 4 | traefik-proxy: 5 | image: traefik:v2.7 6 | container_name: traefik-proxy 7 | ports: 8 | - target: 80 9 | published: 80 10 | protocol: tcp 11 | mode: host 12 | - target: 443 13 | published: 443 14 | protocol: tcp 15 | mode: host 16 | networks: 17 | - public 18 | volumes: 19 | - /var/run/docker.sock:/var/run/docker.sock:ro 20 | - ./config/traefik.toml:/traefik.toml 21 | - ./config/acme.json:/acme.json 22 | logging: 23 | driver: "json-file" 24 | options: 25 | max-size: "1m" 26 | 27 | whoami-app: 28 | image: traefik/whoami 29 | container_name: whoami-app 30 | labels: 31 | - "traefik.enable=true" 32 | - "traefik.http.routers.whoami.rule=Host(`whoami.example.com`)" 33 | - "traefik.http.routers.whoami.service=whoami" 34 | - "traefik.http.services.whoami.loadbalancer.server.port=80" 35 | - "traefik.http.routers.whoami.entrypoints=websecure" 36 | - "traefik.http.routers.whoami.tls=true" 37 | - "traefik.http.routers.whoami.tls.certresolver=letsencrypt" 38 | networks: 39 | - public 40 | logging: 41 | driver: "json-file" 42 | options: 43 | max-size: "1m" 44 | 45 | networks: 46 | public: 47 | name: public 48 | 49 | -------------------------------------------------------------------------------- /helm/README.md: -------------------------------------------------------------------------------- 1 | # Helm Quick-Start 2 | 3 | This is a quick-start for using helm on kubernetes. 4 | 5 | ## Installing Helm 6 | 7 | - [Docs](https://helm.sh/docs/intro/install/) 8 | 9 | From a script: 10 | 11 | ```bash 12 | curl -fsSL -o get_helm.sh https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash 13 | ``` 14 | 15 | On Mac: 16 | 17 | ```bash 18 | brew install helm 19 | ``` 20 | 21 | Manually from their [releases](https://github.com/helm/helm/releases): 22 | 23 | ```bash 24 | # mac 25 | wget https://get.helm.sh/helm-v3.9.4-darwin-amd64.tar.gz 26 | tar helm-v3.9.4-darwin-amd64.tar.gz 27 | mv darwin-amd64/helm /usr/local/bin 28 | 29 | # linux 30 | wget https://get.helm.sh/helm-v3.9.4-linux-amd64.tar.gz 31 | tar helm-v3.9.4-linux-amd64.tar.gz 32 | mv linux-amd64/helm /usr/local/bin/helm 33 | ``` 34 | -------------------------------------------------------------------------------- /index.md_bak: -------------------------------------------------------------------------------- 1 | --- 2 | title: Welcome 3 | layout: default 4 | --- 5 | 6 | Hello, Minima! 7 | -------------------------------------------------------------------------------- /kind/3-node-cluster/README.md: -------------------------------------------------------------------------------- 1 | # kind-3-node-cluster 2 | 3 | 3 Node Kubernetes Local Cluster with KinD 4 | 5 | ## Create 6 | 7 | Create a cluster: 8 | 9 | ```bash 10 | kind create cluster --name sektor --config kind.yaml 11 | ``` 12 | 13 | ## View 14 | 15 | View the nodes: 16 | 17 | ```bash 18 | kubectl get nodes -o wide 19 | ``` 20 | 21 | ## Delete 22 | 23 | List the clusters: 24 | 25 | ```bash 26 | kind get clusters 27 | ``` 28 | 29 | Delete the cluster: 30 | 31 | ```bash 32 | kind delete cluster --name sektor 33 | ``` 34 | -------------------------------------------------------------------------------- /kind/3-node-cluster/kind.yaml: -------------------------------------------------------------------------------- 1 | # https://kind.sigs.k8s.io/docs/user/quick-start/ 2 | # https://kind.sigs.k8s.io/docs/user/configuration/ 3 | kind: Cluster 4 | apiVersion: kind.x-k8s.io/v1alpha4 5 | nodes: 6 | - role: control-plane 7 | image: kindest/node:v1.24.0@sha256:0866296e693efe1fed79d5e6c7af8df71fc73ae45e3679af05342239cdc5bc8e 8 | - role: worker 9 | image: kindest/node:v1.24.0@sha256:0866296e693efe1fed79d5e6c7af8df71fc73ae45e3679af05342239cdc5bc8e 10 | - role: worker 11 | image: kindest/node:v1.24.0@sha256:0866296e693efe1fed79d5e6c7af8df71fc73ae45e3679af05342239cdc5bc8e 12 | -------------------------------------------------------------------------------- /kind/Makefile: -------------------------------------------------------------------------------- 1 | # Thanks: https://gist.github.com/mpneuried/0594963ad38e68917ef189b4e6a269db 2 | .PHONY: help 3 | 4 | help: ## This help section 5 | @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) 6 | 7 | .DEFAULT_GOAL := help 8 | 9 | create: ## Creates a kind cluster 10 | kind create cluster --name local --image 'kindest/node:v1.24.0@sha256:0866296e693efe1fed79d5e6c7af8df71fc73ae45e3679af05342239cdc5bc8e' 11 | 12 | list: ## Lists kind clusters 13 | kind get clusters 14 | 15 | delete: ## Deletes a kind cluster 16 | kind delete cluster --name local 17 | -------------------------------------------------------------------------------- /kind/README.md: -------------------------------------------------------------------------------- 1 | # kind 2 | 3 | Kubernetes in Docker Project to run Local Kubernetes Clusters 4 | 5 | ## Installation 6 | 7 | Follow the docs: 8 | - https://kind.sigs.k8s.io/docs/user/quick-start/#installation 9 | 10 | ### Linux 11 | 12 | On Linux, install Docker as it's a dependency: 13 | 14 | ```bash 15 | curl https://get.docker.com | bash && sudo chmod 666 /var/run/docker.sock 16 | ``` 17 | 18 | Install KinD: 19 | 20 | ```bash 21 | curl -Lo ./kind https://kind.sigs.k8s.io/dl/v0.20.0/kind-linux-amd64 22 | sudo install -o root -g root -m 0755 kind /usr/local/bin/kind 23 | rm kind 24 | ``` 25 | 26 | Install kubectl: 27 | 28 | ```bash 29 | curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" 30 | sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl 31 | rm kubectl 32 | ``` 33 | 34 | ## Configuration 35 | 36 | Configuration docs: 37 | - https://kind.sigs.k8s.io/docs/user/quick-start/ 38 | - https://kind.sigs.k8s.io/docs/user/configuration/ 39 | -------------------------------------------------------------------------------- /kubernetes/argocd/README.md: -------------------------------------------------------------------------------- 1 | # argocd quickstart 2 | 3 | Argo CD is an open source, declarative, GitOps Continuous Delivery tool for Kubernetes applications. 4 | 5 | ## Getting started 6 | 7 | You will need the following: 8 | 9 | - [docker](https://docs.docker.com/get-docker/) 10 | - [kind](https://kind.sigs.k8s.io/docs/user/quick-start/) 11 | - [kubectl](https://kubernetes.io/docs/tasks/tools/) 12 | - [helm](https://helm.sh/docs/intro/install/) 13 | 14 | Install a kubernetes cluster locally, we will need to install kind: 15 | 16 | ```bash 17 | brew install kind 18 | ``` 19 | 20 | ## Deploy a Cluster 21 | 22 | Then deploy a 1 node cluster: 23 | 24 | ```bash 25 | kind create cluster --name quickstart --config kind-config.yaml 26 | ``` 27 | 28 | Verify connectivity by viewing the nodes: 29 | 30 | ```bash 31 | kubectl get nodes --context kind-quickstart 32 | ``` 33 | 34 | Switch to the `kind-quickstart` kubernetes context: 35 | 36 | ```bash 37 | kubectl config use-context kind-quickstart 38 | ``` 39 | 40 | ## Deploy Argo CD 41 | 42 | Create the namespace: 43 | 44 | ```bash 45 | kubectl create namespace argocd 46 | ``` 47 | 48 | Deploy Argo CD: 49 | 50 | ```bash 51 | kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml 52 | ``` 53 | 54 | Ensure all pods are running: 55 | 56 | ```bash 57 | kubectl get pods -n argocd 58 | ``` 59 | 60 | Get the initial admin password: 61 | 62 | ```bash 63 | kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d 64 | ``` 65 | 66 | Port forward to the server: 67 | 68 | ```bash 69 | kubectl -n argocd port-forward svc/argocd-server 8080:80 70 | ``` 71 | 72 | Access the UI on http://localhost:8080 73 | 74 | ## Clean Up 75 | 76 | Delete the cluster: 77 | 78 | ```bash 79 | kind delete cluster --name quickstart 80 | ``` 81 | 82 | ## Where to go from here 83 | 84 | - [ArgoCD](https://argo-cd.readthedocs.io/en/stable/getting_started/) 85 | - [Redhat Tutorial](https://redhat-scholars.github.io/argocd-tutorial/argocd-tutorial/01-setup.html) 86 | -------------------------------------------------------------------------------- /kubernetes/argocd/kind-config.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | nodes: 4 | - role: control-plane 5 | image: kindest/node:v1.25.9@sha256:c08d6c52820aa42e533b70bce0c2901183326d86dcdcbedecc9343681db45161 6 | # - role: worker 7 | # image: kindest/node:v1.25.9@sha256:c08d6c52820aa42e533b70bce0c2901183326d86dcdcbedecc9343681db45161 8 | -------------------------------------------------------------------------------- /kubernetes/grafana-prometheus/README.md: -------------------------------------------------------------------------------- 1 | # grafana-prometheus quickstart 2 | 3 | This quick-start will show you how to deploy prometheus and grafana as two different deployable releases. 4 | 5 | 6 | ## Getting started 7 | 8 | You will need the following: 9 | 10 | - [docker](https://docs.docker.com/get-docker/) 11 | - [kind](https://kind.sigs.k8s.io/docs/user/quick-start/) 12 | - [kubectl](https://kubernetes.io/docs/tasks/tools/) 13 | - [helm](https://helm.sh/docs/intro/install/) 14 | 15 | Install a kubernetes cluster locally, we will need to install kind: 16 | 17 | ```bash 18 | brew install kind 19 | ``` 20 | 21 | ## Create a Cluster 22 | 23 | Then deploy a 1 node cluster: 24 | 25 | ```bash 26 | kind create cluster --name quickstart --config kind-config.yaml 27 | ``` 28 | 29 | Verify connectivity by viewing the nodes: 30 | 31 | ```bash 32 | kubectl get nodes --context kind-quickstart 33 | ``` 34 | 35 | Switch to the `kind-quickstart` kubernetes context: 36 | 37 | ```bash 38 | kubectl config use-context kind-quickstart 39 | ``` 40 | 41 | ## Helm Repositories 42 | 43 | Add the prometheus repository: 44 | 45 | ```bash 46 | helm repo add prometheus-community https://prometheus-community.github.io/helm-charts 47 | helm repo update 48 | ``` 49 | 50 | Add the grafana repository: 51 | 52 | ```bash 53 | helm repo add grafana https://grafana.github.io/helm-charts 54 | helm repo update 55 | ``` 56 | 57 | ## Deploy Prometheus 58 | 59 | Create the namespace: 60 | 61 | ```bash 62 | kubectl create namespace monitoring 63 | ``` 64 | 65 | Deploy the Prometheus Operator: 66 | 67 | ```bash 68 | helm upgrade --install prometheus prometheus-community/prometheus \ 69 | --namespace monitoring \ 70 | --set server.persistentVolume.size=10Gi,server.retention=14d 71 | ``` 72 | 73 | View the deployed resources: 74 | 75 | ```bash 76 | kubectl -n monitoring get pods,pv,pvc 77 | ``` 78 | 79 | One the deployment has finished, you can port forward to the prometheus service: 80 | 81 | ```bash 82 | kubectl -n port-forward svc/prometheus-server 9090:80 83 | ``` 84 | 85 | ## Deploy Grafana 86 | 87 | Deploy Grafana into the monitoring namespace: 88 | 89 | ```bash 90 | helm upgrade --install grafana grafana/grafana \ 91 | --namespace monitoring \ 92 | --set persistence.enabled=true,persistence.type=pvc,persistence.size=10Gi 93 | ``` 94 | 95 | Get the admin password for grafana: 96 | 97 | ```bash 98 | kubectl -n monitoring get secret grafana -o jsonpath="{.data.admin-password}" | base64 --decode ; echo 99 | ``` 100 | 101 | Port forward to the grafana service: 102 | 103 | ```bash 104 | kubectl -n monitoring port-forward service/grafana 3000:80 105 | ``` 106 | 107 | Dashboard that are nice to have: 108 | 109 | - [1860](https://grafana.com/grafana/dashboards/1860-node-exporter-full/) 110 | 111 | ## Destroy 112 | 113 | Tear down the applications: 114 | 115 | ```bash 116 | helm -n monitoring uninstall prometheus 117 | helm -n monitoring uninstall grafana 118 | ``` 119 | 120 | Tear down the cluster: 121 | 122 | ```bash 123 | kind delete cluster --name quickstart 124 | ``` 125 | 126 | ## Resources 127 | 128 | - [Prometheus Helm Chart](https://github.com/prometheus-community/helm-charts) 129 | - [Grafana Helm Chart](https://github.com/grafana/helm-charts) 130 | - [Kind: Ingress Nginx](https://kind.sigs.k8s.io/docs/user/ingress/) 131 | -------------------------------------------------------------------------------- /kubernetes/grafana-prometheus/kind-config.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | nodes: 4 | - role: control-plane 5 | image: kindest/node:v1.25.9@sha256:c08d6c52820aa42e533b70bce0c2901183326d86dcdcbedecc9343681db45161 6 | kubeadmConfigPatches: 7 | - | 8 | kind: InitConfiguration 9 | nodeRegistration: 10 | kubeletExtraArgs: 11 | node-labels: "ingress-ready=true" 12 | extraPortMappings: 13 | - containerPort: 80 14 | hostPort: 80 15 | protocol: TCP 16 | - containerPort: 443 17 | hostPort: 443 18 | protocol: TCP 19 | -------------------------------------------------------------------------------- /kubernetes/ingress-nginx/README.md: -------------------------------------------------------------------------------- 1 | # ingress-nginx quickstart 2 | 3 | 4 | ## Getting started 5 | 6 | You will need the following: 7 | 8 | - [docker](https://docs.docker.com/get-docker/) 9 | - [kind](https://kind.sigs.k8s.io/docs/user/quick-start/) 10 | - [kubectl](https://kubernetes.io/docs/tasks/tools/) 11 | - [helm](https://helm.sh/docs/intro/install/) 12 | 13 | Install a kubernetes cluster locally, we will need to install kind: 14 | 15 | ```bash 16 | brew install kind 17 | ``` 18 | 19 | ## Create a Cluster 20 | 21 | Then deploy a 1 node cluster: 22 | 23 | ```bash 24 | kind create cluster --name quickstart --config kind-config.yaml 25 | ``` 26 | 27 | Verify connectivity by viewing the nodes: 28 | 29 | ```bash 30 | kubectl get nodes --context kind-quickstart 31 | ``` 32 | 33 | Switch to the `kind-quickstart` kubernetes context: 34 | 35 | ```bash 36 | kubectl config use-context kind-quickstart 37 | ``` 38 | 39 | ### Deploy Ingress Nginx 40 | 41 | Create the namespace: 42 | 43 | ```bash 44 | kubectl create namespace ingress-nginx 45 | ``` 46 | 47 | Deploy Ingress Nginx: 48 | 49 | ```bash 50 | kubectl apply -n ingress-nginx -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.7.1/deploy/static/provider/kind/deploy.yaml 51 | ``` 52 | 53 | Ensure all pods are running: 54 | 55 | ```bash 56 | kubectl wait -n ingress-nginx --for=condition=ready pod --selector=app.kubernetes.io/component=controller --timeout=90s 57 | kubectl get pods -n ingress-nginx 58 | ``` 59 | 60 | ## Deploy a Web App 61 | 62 | Deploy the application: 63 | 64 | ```bash 65 | kubectl apply -f deployment/ 66 | ``` 67 | 68 | View the ingress: 69 | 70 | ```bash 71 | kubectl get ingress -n default 72 | ``` 73 | 74 | ## Access the Web App 75 | 76 | Access the application on http://example.127.0.0.1.nip.io 77 | 78 | ## Destroy 79 | 80 | Tear down the application: 81 | 82 | ```bash 83 | kubectl delete -f deployment/ 84 | ``` 85 | 86 | Tear down the cluster: 87 | 88 | ```bash 89 | kind delete cluster --name quickstart 90 | ``` 91 | 92 | ## Resources 93 | 94 | - [Kind: Ingress Nginx](https://kind.sigs.k8s.io/docs/user/ingress/) 95 | -------------------------------------------------------------------------------- /kubernetes/ingress-nginx/deployment/deployment.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: apps/v1 3 | kind: Deployment 4 | metadata: 5 | labels: 6 | app: webapp 7 | name: webapp 8 | namespace: default 9 | spec: 10 | replicas: 2 11 | selector: 12 | matchLabels: 13 | app: webapp 14 | template: 15 | metadata: 16 | labels: 17 | app: webapp 18 | spec: 19 | containers: 20 | - image: ruanbekker/web-center-name-v2 21 | name: webapp 22 | ports: 23 | - name: http 24 | containerPort: 5000 25 | env: 26 | - name: APP_TITLE 27 | value: "Runs on Kind" 28 | resources: 29 | requests: 30 | memory: "64Mi" 31 | cpu: "250m" 32 | limits: 33 | memory: "256Mi" 34 | cpu: "1000m" 35 | -------------------------------------------------------------------------------- /kubernetes/ingress-nginx/deployment/ingress.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: networking.k8s.io/v1 2 | kind: Ingress 3 | metadata: 4 | name: webapp 5 | namespace: default 6 | spec: 7 | ingressClassName: nginx 8 | rules: 9 | - host: example.127.0.0.1.nip.io 10 | http: 11 | paths: 12 | - pathType: Prefix 13 | backend: 14 | service: 15 | name: webapp 16 | port: 17 | number: 80 18 | path: / 19 | -------------------------------------------------------------------------------- /kubernetes/ingress-nginx/deployment/service.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: v1 3 | kind: Service 4 | metadata: 5 | name: webapp 6 | namespace: default 7 | spec: 8 | type: ClusterIP 9 | selector: 10 | app: webapp 11 | ports: 12 | - name: http 13 | protocol: TCP 14 | port: 80 15 | targetPort: 5000 16 | -------------------------------------------------------------------------------- /kubernetes/ingress-nginx/kind-config.yaml: -------------------------------------------------------------------------------- 1 | kind: Cluster 2 | apiVersion: kind.x-k8s.io/v1alpha4 3 | nodes: 4 | - role: control-plane 5 | image: kindest/node:v1.25.9@sha256:c08d6c52820aa42e533b70bce0c2901183326d86dcdcbedecc9343681db45161 6 | kubeadmConfigPatches: 7 | - | 8 | kind: InitConfiguration 9 | nodeRegistration: 10 | kubeletExtraArgs: 11 | node-labels: "ingress-ready=true" 12 | extraPortMappings: 13 | - containerPort: 80 14 | hostPort: 80 15 | protocol: TCP 16 | - containerPort: 443 17 | hostPort: 443 18 | protocol: TCP 19 | -------------------------------------------------------------------------------- /terraform/README.md: -------------------------------------------------------------------------------- 1 | # terraform quick-start 2 | 3 | ## Installation 4 | 5 | Install [Terraform](https://developer.hashicorp.com/terraform/downloads) on Linux: 6 | 7 | ```bash 8 | wget https://releases.hashicorp.com/terraform/1.4.6/terraform_1.4.6_linux_amd64.zip 9 | unzip terraform_1.4.6_linux_amd64.zip 10 | rm -f terraform_1.4.6_linux_amd64.zip 11 | sudo mv terraform /usr/bin/terraform 12 | ``` 13 | -------------------------------------------------------------------------------- /terraform/aws-localstack/dynamodb/README.md: -------------------------------------------------------------------------------- 1 | # localstack-dynamodb quick-start 2 | 3 | This will provision a dynamodb table with terraform using localstack to mock the aws infrastructure. 4 | 5 | ## Usage 6 | 7 | Initialize: 8 | 9 | ```bash 10 | terraform init 11 | ``` 12 | 13 | Plan: 14 | 15 | ```bash 16 | terraform plan 17 | ``` 18 | 19 | Apply: 20 | 21 | ```bash 22 | terraform apply 23 | ``` 24 | 25 |
26 | Response: 27 | 28 | ```terraform 29 | Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: 30 | + create 31 | 32 | Terraform will perform the following actions: 33 | 34 | # aws_dynamodb_table.orders will be created 35 | + resource "aws_dynamodb_table" "orders" { 36 | + arn = (known after apply) 37 | + billing_mode = "PROVISIONED" 38 | + hash_key = "OrderID" 39 | + id = (known after apply) 40 | + name = "orders" 41 | + read_capacity = 2 42 | + stream_arn = (known after apply) 43 | + stream_label = (known after apply) 44 | + stream_view_type = (known after apply) 45 | + tags_all = (known after apply) 46 | + write_capacity = 5 47 | 48 | + attribute { 49 | + name = "OrderID" 50 | + type = "S" 51 | } 52 | } 53 | 54 | Plan: 1 to add, 0 to change, 0 to destroy. 55 | ``` 56 | 57 |
58 | 59 | List tables using the aws cli on the localstack container: 60 | 61 | ```bash 62 | docker exec -it localstack awslocal dynamodb list-tables 63 | ``` 64 | 65 |
66 | Response: 67 | 68 | ```json 69 | { 70 | "TableNames": [ 71 | "orders" 72 | ] 73 | } 74 | ``` 75 | 76 |
77 | 78 | Create a item into the table: 79 | 80 | ```bash 81 | docker exec -it localstack awslocal dynamodb put-item --table-name orders --item '{"OrderID": {"S": "order-123"}}' 82 | ``` 83 | 84 | Scan the table to see if the item is in the table: 85 | 86 | ```bash 87 | docker exec -it localstack awslocal dynamodb scan --table-name orders 88 | ``` 89 | 90 |
91 | Response: 92 | 93 | ```json 94 | { 95 | "Items": [ 96 | { 97 | "OrderID": { 98 | "S": "order-123" 99 | } 100 | } 101 | ], 102 | "Count": 1, 103 | "ScannedCount": 1, 104 | "ConsumedCapacity": null 105 | } 106 | ``` 107 | 108 |
109 | 110 | Destroy the infrastructure: 111 | 112 | ```bash 113 | terraform destroy 114 | ``` 115 | -------------------------------------------------------------------------------- /terraform/aws-localstack/dynamodb/main.tf: -------------------------------------------------------------------------------- 1 | resource "aws_dynamodb_table" "orders" { 2 | name = "orders" 3 | read_capacity = "2" 4 | write_capacity = "5" 5 | hash_key = "OrderID" 6 | 7 | attribute { 8 | name = "OrderID" 9 | type = "S" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /terraform/aws-localstack/dynamodb/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "4.23.0" 6 | } 7 | } 8 | } 9 | 10 | provider "aws" { 11 | region = "us-east-1" 12 | access_key = "quickstart" 13 | secret_key = "quickstart" 14 | skip_credentials_validation = true 15 | skip_metadata_api_check = true 16 | skip_requesting_account_id = true 17 | 18 | endpoints { 19 | dynamodb = "http://localhost:4566" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /terraform/aws-localstack/kinesis/main.tf: -------------------------------------------------------------------------------- 1 | resource "aws_kinesis_stream" "orders_processor" { 2 | name = "orders_processor" 3 | shard_count = 1 4 | retention_period = 30 5 | 6 | shard_level_metrics = [ 7 | "IncomingBytes", 8 | "OutgoingBytes", 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /terraform/aws-localstack/kinesis/provider.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | region = "us-east-1" 3 | access_key = "quickstart" 4 | secret_key = "quickstart" 5 | skip_credentials_validation = true 6 | skip_metadata_api_check = true 7 | skip_requesting_account_id = true 8 | 9 | endpoints { 10 | dynamodb = "http://localhost:4566" 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /terraform/aws/ec2-instance/README.md: -------------------------------------------------------------------------------- 1 | # Terraform AWS EC2 Instance 2 | 3 | This quick-start provisions a EC2 instance with a IAM instance role as well as a Security Group using the [aws](https://registry.terraform.io/providers/hashicorp/aws/latest/docs) provider. 4 | 5 | ## Requirements 6 | 7 | You will need to provide the following variables either via the cli `-var` or via the `terraform.tfvars` file: 8 | 9 | - `vpc`: the vpc id where you want your instance to be deployed to. 10 | - `keyname`: the ssh keypair that you would like to use to authenticate with ssh. 11 | - `subnetid`: the subnetid into which subnet you want to place your instance in. 12 | 13 | ## Usage 14 | 15 | Launching a EC2 instance with required variables placed in the `./terraform.tfvars` file: 16 | 17 | ```bash 18 | $ terraform init 19 | $ terraform plan -var-file="terraform.tfvars" 20 | $ terraform apply -var-file="terraform.tfvars" -auto-approve 21 | ``` 22 | 23 | Provides the outputs of your EC2 instance: 24 | 25 | ``` 26 | Outputs: 27 | id = "i-0a34cf350f0125781" 28 | ip = "34.241.125.17" 29 | subnet = "subnet-01e5141ce5c72d38a" 30 | ``` 31 | 32 | And testing SSH: 33 | 34 | ```bash 35 | $ ssh -i ~/.ssh/rbkr.pem ubuntu@$(terraform output -raw ip) 36 | ubuntu@ip-10-0-171-62:~$ uname -m 37 | x86_64 38 | ubuntu@ip-10-0-171-62:~$ logout 39 | ``` 40 | 41 | Destroying the infrastructure that we've deployed: 42 | 43 | ```bash 44 | $ terraform destroy -auto-approve 45 | ``` 46 | 47 | ## Resources 48 | 49 | - https://registry.terraform.io/providers/hashicorp/aws/latest/docs -------------------------------------------------------------------------------- /terraform/aws/ec2-instance/iam.tf: -------------------------------------------------------------------------------- 1 | data "aws_iam_policy_document" "assume_role_policy" { 2 | statement { 3 | actions = ["sts:AssumeRole"] 4 | principals { 5 | type = "Service" 6 | identifiers = ["ec2.amazonaws.com"] 7 | } 8 | } 9 | } 10 | 11 | data "aws_iam_policy" "ec2_read_only_access" { 12 | arn = "arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess" 13 | } 14 | 15 | resource "aws_iam_role" "ec2_access_role" { 16 | name = "${local.project_name}-ec2-role" 17 | assume_role_policy = data.aws_iam_policy_document.assume_role_policy.json 18 | } 19 | 20 | resource "aws_iam_policy_attachment" "readonly_role_policy_attach" { 21 | name = "${local.project_name}-ec2-role-attachment" 22 | roles = [aws_iam_role.ec2_access_role.name] 23 | policy_arn = data.aws_iam_policy.ec2_read_only_access.arn 24 | } 25 | 26 | resource "aws_iam_instance_profile" "instance_profile" { 27 | name = "${local.project_name}-ec2-instance-profile" 28 | role = aws_iam_role.ec2_access_role.name 29 | } -------------------------------------------------------------------------------- /terraform/aws/ec2-instance/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | project_name = "${var.project_id}-${var.environment_name}" 3 | } -------------------------------------------------------------------------------- /terraform/aws/ec2-instance/main.tf: -------------------------------------------------------------------------------- 1 | data "aws_ami" "latest_ubuntu" { 2 | most_recent = true 3 | owners = ["099720109477"] 4 | 5 | filter { 6 | name = "name" 7 | values = ["ubuntu/images/hvm-ssd/ubuntu-focal-20.04-*-server-*"] 8 | } 9 | 10 | filter { 11 | name = "virtualization-type" 12 | values = ["hvm"] 13 | } 14 | 15 | filter { 16 | name = "root-device-type" 17 | values = ["ebs"] 18 | } 19 | 20 | filter { 21 | name = "architecture" 22 | values = [var.arch] 23 | } 24 | 25 | } 26 | 27 | resource "aws_instance" "ec2" { 28 | ami = data.aws_ami.latest_ubuntu.id 29 | instance_type = var.instance_tye 30 | subnet_id = var.subnetid 31 | key_name = var.keyname 32 | vpc_security_group_ids = [aws_security_group.ec2.id] 33 | associate_public_ip_address = true 34 | monitoring = true 35 | iam_instance_profile = aws_iam_instance_profile.instance_profile.name 36 | 37 | lifecycle { 38 | ignore_changes = [subnet_id, ami] 39 | } 40 | 41 | root_block_device { 42 | volume_type = "gp2" 43 | volume_size = var.ebs_root_size_in_gb 44 | encrypted = false 45 | delete_on_termination = true 46 | } 47 | 48 | tags = merge( 49 | var.default_tags, 50 | { 51 | Name = "${local.project_name}" 52 | }, 53 | ) 54 | 55 | } -------------------------------------------------------------------------------- /terraform/aws/ec2-instance/outputs.tf: -------------------------------------------------------------------------------- 1 | output "id" { 2 | description = "The ec2 instance id" 3 | value = aws_instance.ec2.id 4 | sensitive = false 5 | } 6 | 7 | output "ip" { 8 | description = "The ec2 instance public ip address" 9 | value = aws_instance.ec2.public_ip 10 | sensitive = false 11 | } 12 | 13 | output "subnet" { 14 | description = "the subnet id which will be used" 15 | value = var.subnetid 16 | sensitive = false 17 | } -------------------------------------------------------------------------------- /terraform/aws/ec2-instance/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "4.23.0" 6 | } 7 | } 8 | } 9 | 10 | provider "aws" { 11 | region = "eu-west-1" 12 | profile = "default" 13 | shared_credentials_files = ["~/.aws/credentials"] 14 | } -------------------------------------------------------------------------------- /terraform/aws/ec2-instance/security.tf: -------------------------------------------------------------------------------- 1 | resource "aws_security_group" "ec2" { 2 | name = "${local.project_name}-ec2-sg" 3 | description = "${local.project_name}-ec2-sg" 4 | vpc_id = var.vpc 5 | 6 | tags = merge( 7 | var.default_tags, 8 | { 9 | Name = "${local.project_name}-ec2-sg" 10 | }, 11 | ) 12 | } 13 | 14 | resource "aws_security_group_rule" "ssh" { 15 | description = "allows public ssh access to ec2" 16 | security_group_id = aws_security_group.ec2.id 17 | type = "ingress" 18 | protocol = "tcp" 19 | from_port = 22 20 | to_port = 22 21 | cidr_blocks = ["0.0.0.0/0"] 22 | } 23 | 24 | resource "aws_security_group_rule" "egress" { 25 | description = "allows egress" 26 | security_group_id = aws_security_group.ec2.id 27 | type = "egress" 28 | protocol = "-1" 29 | from_port = 0 30 | to_port = 0 31 | cidr_blocks = ["0.0.0.0/0"] 32 | } -------------------------------------------------------------------------------- /terraform/aws/ec2-instance/terraform.tfvars: -------------------------------------------------------------------------------- 1 | # required 2 | vpc = "vpc-xxxxxxxxxxxxx" 3 | keyname = "default" 4 | subnetid = "subnet-xxxxxxxxxxxxx" -------------------------------------------------------------------------------- /terraform/aws/ec2-instance/variables.tf: -------------------------------------------------------------------------------- 1 | variable "default_tags" { 2 | default = { 3 | Environment = "test" 4 | Owner = "ruan" 5 | Project = "terraform-quick-start" 6 | CostCenter = "engineering" 7 | ManagedBy = "terraform" 8 | } 9 | } 10 | 11 | variable "aws_region" { 12 | type = string 13 | default = "eu-west-1" 14 | description = "the region to use in aws" 15 | } 16 | 17 | variable "vpc" { 18 | type = string 19 | description = "the vpc to use" 20 | } 21 | 22 | variable "keyname" { 23 | type = string 24 | description = "ssh key to use" 25 | } 26 | 27 | variable "subnetid" { 28 | type = string 29 | description = "the subnet id where the ec2 instance needs to be placed in" 30 | } 31 | 32 | variable "instance_type" { 33 | type = string 34 | default = "t3.nano" 35 | description = "the instance type to use" 36 | } 37 | 38 | variable "project_id" { 39 | type = string 40 | default = "terraform-quick-start" 41 | description = "the project name" 42 | } 43 | 44 | variable "ebs_root_size_in_gb" { 45 | type = number 46 | default = 20 47 | description = "the size in GB for the root disk" 48 | } 49 | 50 | variable "environment_name" { 51 | type = string 52 | default = "test" 53 | description = "the environment this resource will go to (assumption being made theres one account)" 54 | } 55 | 56 | variable "arch" { 57 | type = string 58 | default = "x86_64" 59 | description = "architecture" 60 | } 61 | -------------------------------------------------------------------------------- /terraform/aws/modules/ec2/README.md: -------------------------------------------------------------------------------- 1 | # Terraform AWS EC2 Module 2 | 3 | This quick-start shows you how to implement your own AWS EC2 module. 4 | 5 | ## Info 6 | 7 | In the `./example/` directory is a example `main.tf`: 8 | 9 | ``` 10 | module "example" { 11 | source = "../../../modules/ec2/" 12 | 13 | aws_region = "us-west-1" 14 | aws_profile = "default" 15 | project_identifier = "quickstart" 16 | keyname = "ssh-default" 17 | selected_ami_type = "amazon" 18 | } 19 | 20 | output "instance_id" { 21 | value = module.example.id 22 | } 23 | 24 | output "instance_ip" { 25 | value = module.example.ip 26 | } 27 | ``` 28 | 29 | This module accepts parameters thats been referenced as variables from the module to provision a ec2 instance, which is useful for repeatability. 30 | 31 | ## Variables 32 | 33 | The following variables can be passed as arguments to override the defaults: 34 | 35 | Variable Name | Description | Required | Default Value 36 | --------------------- | -------------------------------------- | --------- | ------------- 37 | `aws_profile` | AWS Profile | True | N/A 38 | `keyname` | SSH Key Pair | True | N/A 39 | `aws_region` | AWS Region | False | eu-west-1 40 | `instance_type` | EC2 Instance Type | False | t3a.nano 41 | `project_identifier` | Your Project Name | False | terraform-quick-start 42 | `ebs_root_size_in_gb` | EBS Root Disk Size | False | 20 43 | `environment_name` | Your Environment Name | False | test 44 | `team_name` | Your Team Name | False | engineering 45 | `owner_name` | Owner of this resource | False | james.dean 46 | `arch` | The architecture type (aarch/x86_64) | False | x86_64 47 | `selected_ami_type` | The linux distribution (amazon/ubuntu) | True | N/A 48 | 49 | ## Usage 50 | 51 | This demonstrates the usage of this module inside the `example` directory: 52 | 53 | ```bash 54 | cd example 55 | terraform init 56 | terraform plan 57 | terraform apply 58 | 59 | Apply complete! Resources: 7 added, 0 changed, 0 destroyed. 60 | 61 | Outputs: 62 | 63 | instance_id = "i-0b89d740f9c8ea8cb" 64 | instance_ip = "54.176.68.234" 65 | ``` 66 | 67 | In the AWS Console: 68 | 69 | ![](screenshots/ec2-module-aws-console-screenshot.png) 70 | -------------------------------------------------------------------------------- /terraform/aws/modules/ec2/example/main.tf: -------------------------------------------------------------------------------- 1 | module "example" { 2 | source = "../../../modules/ec2/" 3 | 4 | aws_region = "us-west-1" 5 | aws_profile = "test" 6 | project_identifier = "quickstart" 7 | keyname = "ssh-default" 8 | selected_ami_type = "amazon" 9 | } 10 | 11 | output "instance_id" { 12 | value = module.example.id 13 | } 14 | 15 | output "instance_ip" { 16 | value = module.example.ip 17 | } 18 | -------------------------------------------------------------------------------- /terraform/aws/modules/ec2/example/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "4.23.0" 6 | } 7 | } 8 | } 9 | 10 | provider "aws" { 11 | region = var.aws_region 12 | profile = var.aws_profile 13 | shared_credentials_files = ["~/.aws/credentials"] 14 | } 15 | -------------------------------------------------------------------------------- /terraform/aws/modules/ec2/example/terraform.tfvars: -------------------------------------------------------------------------------- 1 | aws_region = "eu-west-1" 2 | aws_profile = "personal" 3 | selected_ami_type = "ubuntu" 4 | -------------------------------------------------------------------------------- /terraform/aws/modules/ec2/example/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_profile" {} 2 | variable "aws_region" {} 3 | variable "selected_ami_type" {} 4 | -------------------------------------------------------------------------------- /terraform/aws/modules/ec2/iam.tf: -------------------------------------------------------------------------------- 1 | data "aws_iam_policy_document" "assume_role_policy" { 2 | statement { 3 | actions = ["sts:AssumeRole"] 4 | principals { 5 | type = "Service" 6 | identifiers = ["ec2.amazonaws.com"] 7 | } 8 | } 9 | } 10 | 11 | data "aws_iam_policy" "ec2_read_only_access" { 12 | arn = "arn:aws:iam::aws:policy/AmazonEC2ReadOnlyAccess" 13 | } 14 | 15 | resource "aws_iam_role" "ec2_access_role" { 16 | name = "${local.project_name}-ec2-role" 17 | assume_role_policy = data.aws_iam_policy_document.assume_role_policy.json 18 | } 19 | 20 | resource "aws_iam_policy_attachment" "readonly_role_policy_attach" { 21 | name = "${local.project_name}-ec2-role-attachment" 22 | roles = [aws_iam_role.ec2_access_role.name] 23 | policy_arn = data.aws_iam_policy.ec2_read_only_access.arn 24 | } 25 | 26 | resource "aws_iam_instance_profile" "instance_profile" { 27 | name = "${local.project_name}-ec2-instance-profile" 28 | role = aws_iam_role.ec2_access_role.name 29 | } -------------------------------------------------------------------------------- /terraform/aws/modules/ec2/locals.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | project_name = "${var.project_identifier}-${var.environment_name}" 3 | ami_architecture = var.selected_ami_type == "ubuntu" ? var.arch : var.ami_configs[var.selected_ami_type].architecture 4 | default_tags = { 5 | "Owner" = var.owner_name 6 | "ManagedBy" = "terraform" 7 | "Environment" = var.environment_name 8 | "Project" = var.project_identifier 9 | "CostCenter" = var.team_name 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /terraform/aws/modules/ec2/main.tf: -------------------------------------------------------------------------------- 1 | data "aws_ami" "latest_selected" { 2 | most_recent = true 3 | owners = var.ami_configs[var.selected_ami_type].owners 4 | 5 | filter { 6 | name = "name" 7 | values = [var.ami_configs[var.selected_ami_type].name_pattern] 8 | } 9 | 10 | filter { 11 | name = "virtualization-type" 12 | values = ["hvm"] 13 | } 14 | 15 | filter { 16 | name = "root-device-type" 17 | values = ["ebs"] 18 | } 19 | 20 | filter { 21 | name = "architecture" 22 | values = [local.ami_architecture] 23 | } 24 | 25 | } 26 | 27 | resource "aws_key_pair" "this" { 28 | key_name = "${local.project_name}-key" 29 | public_key = file("~/.ssh/id_rsa.pub") 30 | } 31 | 32 | resource "aws_instance" "ec2" { 33 | ami = data.aws_ami.latest_selected.id 34 | instance_type = var.instance_type 35 | key_name = aws_key_pair.this.key_name 36 | vpc_security_group_ids = [aws_security_group.ec2.id] 37 | associate_public_ip_address = true 38 | monitoring = true 39 | iam_instance_profile = aws_iam_instance_profile.instance_profile.name 40 | 41 | lifecycle { 42 | ignore_changes = [ami] 43 | } 44 | 45 | root_block_device { 46 | volume_type = "gp3" 47 | volume_size = var.ebs_root_size_in_gb 48 | encrypted = false 49 | delete_on_termination = true 50 | } 51 | 52 | tags = merge( 53 | local.default_tags, 54 | { 55 | Name = "${local.project_name}" 56 | }, 57 | ) 58 | 59 | } 60 | -------------------------------------------------------------------------------- /terraform/aws/modules/ec2/outputs.tf: -------------------------------------------------------------------------------- 1 | output "id" { 2 | description = "The ec2 instance id" 3 | value = aws_instance.ec2.id 4 | sensitive = false 5 | } 6 | 7 | output "ip" { 8 | description = "The ec2 instance public ip address" 9 | value = aws_instance.ec2.public_ip 10 | sensitive = false 11 | } 12 | 13 | -------------------------------------------------------------------------------- /terraform/aws/modules/ec2/screenshots/ec2-module-aws-console-screenshot.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ruanbekker/quick-starts/4b9352df3d5ff15dd3eb7c5492c67d38e8f7204f/terraform/aws/modules/ec2/screenshots/ec2-module-aws-console-screenshot.png -------------------------------------------------------------------------------- /terraform/aws/modules/ec2/security.tf: -------------------------------------------------------------------------------- 1 | resource "aws_security_group" "ec2" { 2 | name = "${local.project_name}-ec2-sg" 3 | description = "${local.project_name}-ec2-sg" 4 | 5 | tags = merge( 6 | local.default_tags, 7 | { 8 | Name = "${local.project_name}-ec2-sg" 9 | }, 10 | ) 11 | } 12 | 13 | resource "aws_security_group_rule" "ssh" { 14 | description = "allows public ssh access to ec2" 15 | security_group_id = aws_security_group.ec2.id 16 | type = "ingress" 17 | protocol = "tcp" 18 | from_port = 22 19 | to_port = 22 20 | cidr_blocks = ["0.0.0.0/0"] 21 | } 22 | 23 | resource "aws_security_group_rule" "egress" { 24 | description = "allows egress" 25 | security_group_id = aws_security_group.ec2.id 26 | type = "egress" 27 | protocol = "-1" 28 | from_port = 0 29 | to_port = 0 30 | cidr_blocks = ["0.0.0.0/0"] 31 | } 32 | -------------------------------------------------------------------------------- /terraform/aws/modules/ec2/variables.tf: -------------------------------------------------------------------------------- 1 | variable "aws_region" { 2 | type = string 3 | default = "eu-west-1" 4 | description = "the region to use in aws" 5 | } 6 | 7 | variable "aws_profile" { 8 | type = string 9 | description = "the aws profile to use to authenticate" 10 | } 11 | 12 | variable "keyname" { 13 | type = string 14 | description = "ssh key to use" 15 | } 16 | 17 | variable "instance_type" { 18 | type = string 19 | default = "t3.nano" 20 | description = "the instance type to use" 21 | } 22 | 23 | variable "project_identifier" { 24 | type = string 25 | default = "terraform-quick-start" 26 | description = "the project name" 27 | } 28 | 29 | variable "ebs_root_size_in_gb" { 30 | type = number 31 | default = 20 32 | description = "the size in GB for the root disk" 33 | } 34 | 35 | variable "environment_name" { 36 | type = string 37 | default = "test" 38 | description = "the environment this resource will go to (assumption being made theres one account)" 39 | } 40 | 41 | variable "owner_name" { 42 | type = string 43 | default = "james.dean" 44 | description = "the owner of this resource - mostly used for tagging" 45 | } 46 | 47 | variable "team_name" { 48 | type = string 49 | default = "engineering" 50 | description = "the team that will be responsible for this resource - mostly for naming conventions and tagging" 51 | } 52 | 53 | variable "arch" { 54 | type = string 55 | default = "x86_64" 56 | description = "architecture" 57 | } 58 | 59 | variable "ami_configs" { 60 | type = map(object({ 61 | owners : list(string) 62 | name_pattern : string 63 | architecture : string 64 | })) 65 | 66 | default = { 67 | ubuntu = { 68 | owners = ["099720109477"] 69 | name_pattern = "ubuntu/images/hvm-ssd/ubuntu-jammy-22.04-*-server-*" 70 | architecture = "x86_64" 71 | }, 72 | amazon = { 73 | owners = ["amazon"] 74 | name_pattern = "amzn2-ami-hvm-*" 75 | architecture = "x86_64" 76 | } 77 | } 78 | } 79 | 80 | variable "selected_ami_type" { 81 | description = "The selected AMI type (e.g., 'ubuntu', 'amazon')" 82 | type = string 83 | } 84 | -------------------------------------------------------------------------------- /terraform/aws/modules/vpc/example/main.tf: -------------------------------------------------------------------------------- 1 | module "vpc" { 2 | source = "../../vpc" 3 | 4 | region = var.region 5 | environment = var.environment 6 | vpc_cidr = var.vpc_cidr 7 | public_subnets_cidr = var.public_subnets_cidr 8 | private_subnets_cidr = var.private_subnets_cidr 9 | 10 | } 11 | -------------------------------------------------------------------------------- /terraform/aws/modules/vpc/example/outputs.tf: -------------------------------------------------------------------------------- 1 | output "vpc_id" { 2 | value = module.vpc.vpc_id 3 | } 4 | 5 | output "private_subnets_id" { 6 | value = module.vpc.private_subnets_id[0] 7 | } 8 | 9 | output "public_subnets_id" { 10 | value = module.vpc.public_subnets_id[0] 11 | } 12 | -------------------------------------------------------------------------------- /terraform/aws/modules/vpc/example/providers.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "4.31.0" 6 | } 7 | } 8 | } 9 | 10 | provider "aws" { 11 | shared_config_files = ["~/.aws/config"] 12 | shared_credentials_files = ["~/.aws/credentials"] 13 | profile = "personal" 14 | region = var.region 15 | } 16 | -------------------------------------------------------------------------------- /terraform/aws/modules/vpc/example/terraform.tfvars: -------------------------------------------------------------------------------- 1 | region = "us-west-2" 2 | environment = "dev" 3 | vpc_cidr = "172.18.0.0/16" 4 | private_subnets_cidr = ["172.18.10.0/24", "172.18.11.0/24", "172.18.12.0/24", "172.18.13.0/24"] 5 | public_subnets_cidr = ["172.18.100.0/24", "172.18.101.0/24", "172.18.102.0/24", "172.18.103.0/24"] 6 | -------------------------------------------------------------------------------- /terraform/aws/modules/vpc/example/variables.tf: -------------------------------------------------------------------------------- 1 | variable "region" { 2 | description = "us-east-2" 3 | } 4 | 5 | variable "environment" { 6 | description = "The Target Environment" 7 | } 8 | 9 | variable "vpc_cidr" { 10 | description = "The CIDR of the VPC" 11 | } 12 | 13 | variable "public_subnets_cidr" { 14 | type = list 15 | description = "The CIDR for the Public Subnets" 16 | } 17 | 18 | variable "private_subnets_cidr" { 19 | type = list 20 | description = "The CIDR for the Private Subnets" 21 | } 22 | -------------------------------------------------------------------------------- /terraform/aws/modules/vpc/main.tf: -------------------------------------------------------------------------------- 1 | data "aws_availability_zones" "available" {} 2 | 3 | resource "aws_vpc" "vpc" { 4 | cidr_block = var.vpc_cidr 5 | enable_dns_hostnames = true 6 | enable_dns_support = true 7 | 8 | tags = { 9 | Name = "${var.environment}-vpc" 10 | Environment = var.environment 11 | } 12 | } 13 | 14 | resource "aws_internet_gateway" "ig" { 15 | vpc_id = aws_vpc.vpc.id 16 | 17 | tags = { 18 | Name = "${var.environment}-igw" 19 | Environment = var.environment 20 | } 21 | } 22 | 23 | resource "aws_eip" "nat_eip" { 24 | vpc = true 25 | depends_on = [aws_internet_gateway.ig] 26 | } 27 | 28 | resource "aws_nat_gateway" "nat" { 29 | allocation_id = aws_eip.nat_eip.id 30 | subnet_id = element(aws_subnet.public_subnet.*.id, 0) 31 | depends_on = [aws_internet_gateway.ig] 32 | 33 | tags = { 34 | Name = "nat" 35 | Environment = var.environment 36 | } 37 | } 38 | 39 | resource "aws_subnet" "public_subnet" { 40 | vpc_id = aws_vpc.vpc.id 41 | count = length(var.public_subnets_cidr) 42 | cidr_block = element(var.public_subnets_cidr, count.index) 43 | availability_zone = element(data.aws_availability_zones.available.names, count.index) 44 | map_public_ip_on_launch = true 45 | 46 | tags = { 47 | Name = "${var.environment}-${element(data.aws_availability_zones.available.names, count.index)}-public-subnet" 48 | Environment = var.environment 49 | Tier = "public" 50 | } 51 | } 52 | 53 | resource "aws_subnet" "private_subnet" { 54 | vpc_id = aws_vpc.vpc.id 55 | count = length(var.private_subnets_cidr) 56 | cidr_block = element(var.private_subnets_cidr, count.index) 57 | availability_zone = element(data.aws_availability_zones.available.names, count.index) 58 | map_public_ip_on_launch = false 59 | 60 | tags = { 61 | Name = "${var.environment}-${element(data.aws_availability_zones.available.names, count.index)}-private-subnet" 62 | Environment = var.environment 63 | Tier = "private" 64 | } 65 | } 66 | 67 | resource "aws_route_table" "private" { 68 | vpc_id = aws_vpc.vpc.id 69 | 70 | tags = { 71 | Name = "${var.environment}-private-route-table" 72 | Environment = var.environment 73 | } 74 | } 75 | 76 | resource "aws_route_table" "public" { 77 | vpc_id = aws_vpc.vpc.id 78 | 79 | tags = { 80 | Name = "${var.environment}-public-route-table" 81 | Environment = var.environment 82 | } 83 | } 84 | 85 | resource "aws_route" "public_internet_gateway" { 86 | route_table_id = aws_route_table.public.id 87 | destination_cidr_block = "0.0.0.0/0" 88 | gateway_id = aws_internet_gateway.ig.id 89 | } 90 | 91 | resource "aws_route" "private_nat_gateway" { 92 | route_table_id = aws_route_table.private.id 93 | destination_cidr_block = "0.0.0.0/0" 94 | nat_gateway_id = aws_nat_gateway.nat.id 95 | } 96 | 97 | resource "aws_route_table_association" "public" { 98 | count = length(var.public_subnets_cidr) 99 | subnet_id = element(aws_subnet.public_subnet.*.id, count.index) 100 | route_table_id = aws_route_table.public.id 101 | } 102 | 103 | resource "aws_route_table_association" "private" { 104 | count = length(var.private_subnets_cidr) 105 | subnet_id = element(aws_subnet.private_subnet.*.id, count.index) 106 | route_table_id = aws_route_table.private.id 107 | } 108 | 109 | resource "aws_security_group" "default" { 110 | name = "${var.environment}-default-sg" 111 | description = "Security Group to Allow Inbound and Outbound Traffic from and to the VPC" 112 | vpc_id = aws_vpc.vpc.id 113 | depends_on = [aws_vpc.vpc] 114 | 115 | ingress { 116 | from_port = "0" 117 | to_port = "0" 118 | protocol = "-1" 119 | self = true 120 | } 121 | 122 | egress { 123 | from_port = "0" 124 | to_port = "0" 125 | protocol = "-1" 126 | self = "true" 127 | } 128 | 129 | tags = { 130 | Environment = var.environment 131 | } 132 | } 133 | -------------------------------------------------------------------------------- /terraform/aws/modules/vpc/outputs.tf: -------------------------------------------------------------------------------- 1 | output "vpc_id" { 2 | value = aws_vpc.vpc.id 3 | } 4 | 5 | output "public_subnets_id" { 6 | value = [aws_subnet.public_subnet.*.id] 7 | } 8 | 9 | output "private_subnets_id" { 10 | value = [aws_subnet.private_subnet.*.id] 11 | } 12 | 13 | output "default_sg_id" { 14 | value = aws_security_group.default.id 15 | } 16 | 17 | output "security_groups_ids" { 18 | value = [aws_security_group.default.id] 19 | } 20 | 21 | output "public_route_table" { 22 | value = aws_route_table.public.id 23 | } 24 | -------------------------------------------------------------------------------- /terraform/aws/modules/vpc/variables.tf: -------------------------------------------------------------------------------- 1 | variable "environment" { 2 | description = "The Target Environment" 3 | } 4 | 5 | variable "vpc_cidr" { 6 | description = "The CIDR for the VPC" 7 | } 8 | 9 | variable "public_subnets_cidr" { 10 | type = list 11 | description = "The CIDR for the Public Subnets" 12 | } 13 | 14 | variable "private_subnets_cidr" { 15 | type = list 16 | description = "The CIDR block for the Private Subnets" 17 | } 18 | 19 | variable "region" { 20 | description = "The target AWS Region to use" 21 | } 22 | -------------------------------------------------------------------------------- /terraform/aws/vpc/README.md: -------------------------------------------------------------------------------- 1 | # Terraform - AWS VPC 2 | 3 | This quick-start creates a AWS VPC with the terraform [aws](https://registry.terraform.io/providers/hashicorp/aws/latest/docs) provider. 4 | 5 | ## Info 6 | 7 | This will create the following: 8 | 9 | - VPC with a CIDR of `172.18.0.0/16` (cidr has been split up into 8 subnets - 2 spare for if new az's come online) 10 | - 3 subnets in a private range 11 | - 3 subnets in a public range 12 | - internet gateway for public routes 13 | - nat gateway with a eip for private routes 14 | - subnets tagged with Tier=public/private 15 | - defaults to the `us-west-1` region 16 | 17 | ## Subnets 18 | 19 | CIDR: 172.18.0.0/16 20 | 21 | Left two ranges open for future availability zones. 22 | 23 | | Subnet Address | Netmask | Range of Addresses | Useable IPs | Hosts | AWS AZ | Tier | 24 | | --------------- | ------------- | ----------------------------- | ----------------------------- | ----- | ------------ | ------- | 25 | | 172.18.0.0/19 | 255.255.224.0 | 172.18.0.0 - 172.18.31.255 | 172.18.0.1 - 172.18.31.254 | 8190 | us-west-1a | Private | 26 | | 172.18.32.0/19 | 255.255.224.0 | 172.18.32.0 - 172.18.63.255 | 172.18.32.1 - 172.18.63.254 | 8190 | us-west-1b | Private | 27 | | 172.18.64.0/19 | 255.255.224.0 | 172.18.64.0 - 172.18.95.255 | 172.18.64.1 - 172.18.95.254 | 8190 | us-west-1c | Private | 28 | | 172.18.96.0/19 | 255.255.224.0 | 172.18.96.0 - 172.18.127.255 | 172.18.96.1 - 172.18.127.254 | 8190 | n/a | Private | 29 | | 172.18.128.0/19 | 255.255.224.0 | 172.18.128.0 - 172.18.159.255 | 172.18.128.1 - 172.18.159.254 | 8190 | us-west-1a | Public | 30 | | 172.18.160.0/19 | 255.255.224.0 | 172.18.160.0 - 172.18.191.255 | 172.18.160.1 - 172.18.191.254 | 8190 | us-west-1b | Public | 31 | | 172.18.192.0/19 | 255.255.224.0 | 172.18.192.0 - 172.18.223.255 | 172.18.192.1 - 172.18.223.254 | 8190 | us-west-1c | Public | 32 | | 172.18.224.0/19 | 255.255.224.0 | 172.18.224.0 - 172.18.255.255 | 172.18.224.1 - 172.18.255.254 | 8190 | n/a | Public | 33 | 34 | ## Usage 35 | 36 | Define the aws region that you would like to use in `variables.tf` as it defaults to `us-west-1`, then provision: 37 | 38 | ```bash 39 | $ terraform init 40 | $ terraform plan 41 | $ terraform apply 42 | ``` 43 | 44 | ## Resources 45 | 46 | - https://www.davidc.net/sites/default/subnets/subnets.html 47 | - https://registry.terraform.io/providers/hashicorp/aws/latest/docs 48 | 49 | -------------------------------------------------------------------------------- /terraform/aws/vpc/main.tf: -------------------------------------------------------------------------------- 1 | # vpc 2 | resource "aws_vpc" "main" { 3 | cidr_block = "172.18.0.0/16" 4 | 5 | enable_dns_support = true 6 | enable_dns_hostnames = true 7 | 8 | tags = { 9 | Name = "main" 10 | } 11 | } 12 | 13 | # internet gateway 14 | resource "aws_internet_gateway" "igw" { 15 | vpc_id = aws_vpc.main.id 16 | 17 | tags = { 18 | Name = "main" 19 | } 20 | } 21 | 22 | # subnets 23 | resource "aws_subnet" "private_1a" { 24 | vpc_id = aws_vpc.main.id 25 | cidr_block = "172.18.0.0/19" 26 | availability_zone = "${var.region}a" 27 | 28 | tags = { 29 | "Name" = "private-${var.region}" 30 | "Tier" = "private" 31 | } 32 | } 33 | 34 | resource "aws_subnet" "private_1b" { 35 | vpc_id = aws_vpc.main.id 36 | cidr_block = "172.18.32.0/19" 37 | availability_zone = "${var.region}b" 38 | 39 | tags = { 40 | "Name" = "private-${var.region}b" 41 | "Tier" = "private" 42 | } 43 | } 44 | 45 | resource "aws_subnet" "private_1c" { 46 | vpc_id = aws_vpc.main.id 47 | cidr_block = "172.18.64.0/19" 48 | availability_zone = "${var.region}c" 49 | 50 | tags = { 51 | "Name" = "private-${var.region}c" 52 | "Tier" = "private" 53 | } 54 | } 55 | 56 | resource "aws_subnet" "public_1a" { 57 | vpc_id = aws_vpc.main.id 58 | cidr_block = "172.18.128.0/19" 59 | availability_zone = "${var.region}a" 60 | map_public_ip_on_launch = true 61 | 62 | tags = { 63 | "Name" = "public-${var.region}a" 64 | "Tier" = "public" 65 | } 66 | } 67 | 68 | resource "aws_subnet" "public_1b" { 69 | vpc_id = aws_vpc.main.id 70 | cidr_block = "172.18.160.0/19" 71 | availability_zone = "${var.region}b" 72 | map_public_ip_on_launch = true 73 | 74 | tags = { 75 | "Name" = "public-${var.region}b" 76 | "Tier" = "public" 77 | } 78 | } 79 | 80 | resource "aws_subnet" "public_1c" { 81 | vpc_id = aws_vpc.main.id 82 | cidr_block = "172.18.192.0/19" 83 | availability_zone = "${var.region}c" 84 | map_public_ip_on_launch = true 85 | 86 | tags = { 87 | "Name" = "public-${var.region}c" 88 | "Tier" = "public" 89 | } 90 | } 91 | 92 | # nat gateway 93 | resource "aws_eip" "nat" { 94 | vpc = true 95 | 96 | tags = { 97 | Name = "main" 98 | } 99 | } 100 | 101 | resource "aws_nat_gateway" "nat" { 102 | allocation_id = aws_eip.nat.id 103 | subnet_id = aws_subnet.public_1a.id 104 | 105 | tags = { 106 | Name = "main" 107 | } 108 | 109 | depends_on = [aws_internet_gateway.igw] 110 | } 111 | 112 | # routing tables 113 | resource "aws_route_table" "private" { 114 | vpc_id = aws_vpc.main.id 115 | 116 | route { 117 | cidr_block = "0.0.0.0/0" 118 | nat_gateway_id = aws_nat_gateway.nat.id 119 | } 120 | 121 | tags = { 122 | Name = "private" 123 | Tier = "private" 124 | } 125 | } 126 | 127 | resource "aws_route_table" "public" { 128 | vpc_id = aws_vpc.main.id 129 | 130 | route { 131 | cidr_block = "0.0.0.0/0" 132 | gateway_id = aws_internet_gateway.igw.id 133 | } 134 | 135 | tags = { 136 | Name = "public" 137 | Tier = "public" 138 | } 139 | } 140 | 141 | # route table associations 142 | resource "aws_route_table_association" "private_1a" { 143 | subnet_id = aws_subnet.private_1a.id 144 | route_table_id = aws_route_table.private.id 145 | } 146 | 147 | resource "aws_route_table_association" "private_1b" { 148 | subnet_id = aws_subnet.private_1b.id 149 | route_table_id = aws_route_table.private.id 150 | } 151 | 152 | resource "aws_route_table_association" "private_1c" { 153 | subnet_id = aws_subnet.private_1c.id 154 | route_table_id = aws_route_table.private.id 155 | } 156 | 157 | resource "aws_route_table_association" "public_1a" { 158 | subnet_id = aws_subnet.public_1a.id 159 | route_table_id = aws_route_table.public.id 160 | } 161 | 162 | resource "aws_route_table_association" "public_1b" { 163 | subnet_id = aws_subnet.public_1b.id 164 | route_table_id = aws_route_table.public.id 165 | } 166 | 167 | resource "aws_route_table_association" "public_1c" { 168 | subnet_id = aws_subnet.public_1c.id 169 | route_table_id = aws_route_table.public.id 170 | } 171 | -------------------------------------------------------------------------------- /terraform/aws/vpc/outputs.tf: -------------------------------------------------------------------------------- 1 | output vpcid { 2 | value = aws_vpc.main.id 3 | sensitive = false 4 | description = "the aws vpc id" 5 | } 6 | 7 | output private_subnet_1a { 8 | value = aws_subnet.private_1a.id 9 | sensitive = false 10 | description = "the private subnet-1a id" 11 | } 12 | 13 | output private_subnet_1b { 14 | value = aws_subnet.private_1b.id 15 | sensitive = false 16 | description = "the private subnet-1b id" 17 | } 18 | 19 | output private_subnet_1c { 20 | value = aws_subnet.private_1c.id 21 | sensitive = false 22 | description = "the private subnet-1c id" 23 | } 24 | 25 | output public_subnet_1a { 26 | value = aws_subnet.public_1a.id 27 | sensitive = false 28 | description = "the public subnet-1a id" 29 | } 30 | 31 | output public_subnet_1b { 32 | value = aws_subnet.public_1b.id 33 | sensitive = false 34 | description = "the public subnet-1b id" 35 | } 36 | 37 | output public_subnet_1c { 38 | value = aws_subnet.public_1c.id 39 | sensitive = false 40 | description = "the public subnet-1c id" 41 | } 42 | 43 | output natgw_ip { 44 | value = aws_eip.nat.public_ip 45 | sensitive = false 46 | description = "nat gateway elastic ip" 47 | } 48 | -------------------------------------------------------------------------------- /terraform/aws/vpc/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "4.23.0" 6 | } 7 | } 8 | } 9 | 10 | provider "aws" { 11 | region = "eu-west-1" 12 | profile = "default" 13 | shared_credentials_files = ["~/.aws/credentials"] 14 | } -------------------------------------------------------------------------------- /terraform/aws/vpc/variables.tf: -------------------------------------------------------------------------------- 1 | variable region { 2 | type = string 3 | default = "us-west-1" 4 | description = "aws region" 5 | } 6 | -------------------------------------------------------------------------------- /terraform/docker/nginx/README.md: -------------------------------------------------------------------------------- 1 | # terraform docker nginx quick-start 2 | 3 | This runs a nginx container with terraform 4 | 5 | ## Deploy 6 | 7 | Deploy the docker container with Terraform: 8 | 9 | ``` 10 | terraform init 11 | terraform plan 12 | terraform apply 13 | ``` 14 | 15 | ## Test 16 | 17 | View containers: 18 | 19 | ```bash 20 | docker ps 21 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 22 | 6fa52d3be18b eb4a57159180 "/docker-entrypoint.…" 44 seconds ago Up 43 seconds 0.0.0.0:8000->80/tcp nginx 23 | ``` 24 | 25 | Make a GET request against the nginx web server: 26 | 27 | ```bash 28 | curl -I http://localhost:8000 29 | HTTP/1.1 200 OK 30 | Server: nginx/1.25.1 31 | Date: Wed, 14 Jun 2023 21:26:53 GMT 32 | Content-Type: text/html 33 | Content-Length: 615 34 | Last-Modified: Tue, 13 Jun 2023 15:08:10 GMT 35 | Connection: keep-alive 36 | ETag: "6488865a-267" 37 | Accept-Ranges: bytes 38 | ``` 39 | 40 | ## Cleanup 41 | 42 | Delete the container with Terraform 43 | 44 | ```bash 45 | terraform destroy 46 | ``` 47 | -------------------------------------------------------------------------------- /terraform/docker/nginx/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | docker = { 4 | source = "kreuzwerker/docker" 5 | version = "~> 3.0.1" 6 | } 7 | } 8 | } 9 | 10 | provider "docker" {} 11 | 12 | resource "docker_image" "nginx" { 13 | name = "nginx" 14 | keep_locally = false 15 | } 16 | 17 | resource "docker_container" "nginx" { 18 | image = docker_image.nginx.image_id 19 | name = "nginx" 20 | 21 | ports { 22 | internal = 80 23 | external = 8000 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /terraform/kafka-topics/README.md: -------------------------------------------------------------------------------- 1 | # kafka-with-terraform 2 | 3 | ## Quick-Start 4 | 5 | Boot the kafka cluster: 6 | 7 | ```bash 8 | docker-compose up -d 9 | ``` 10 | 11 | List the current topics: 12 | 13 | ```bash 14 | docker exec -it broker-3 kafka-topics --list --bootstrap-server "broker-1:9092,broker-2:9093,broker-3:9094" 15 | ``` 16 | 17 | Change into terraform's example directory: 18 | 19 | ```bash 20 | cd example 21 | ``` 22 | 23 | Initialize, plan and apply to create the topics: 24 | 25 | ```bash 26 | terraform init 27 | terraform plan 28 | terraform apply 29 | ``` 30 | 31 | List topics again: 32 | 33 | ```bash 34 | docker exec -it broker-3 kafka-topics --list --bootstrap-server "broker-1:9092,broker-2:9093,broker-3:9094" 35 | ``` 36 | 37 | Destroy created topics: 38 | 39 | ```bash 40 | terraform destroy 41 | ``` 42 | 43 | Destroy kafka cluster: 44 | 45 | ```bash 46 | cd .. 47 | docker-compose down 48 | ``` 49 | -------------------------------------------------------------------------------- /terraform/kafka-topics/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | version: "3.9" 3 | 4 | services: 5 | zookeeper: 6 | image: confluentinc/cp-zookeeper:5.5.1 7 | container_name: zookeeper 8 | ports: 9 | - '32181:32181' 10 | environment: 11 | ZOOKEEPER_CLIENT_PORT: 32181 12 | ZOOKEEPER_TICK_TIME: 2000 13 | networks: 14 | - kafka 15 | logging: 16 | driver: "json-file" 17 | options: 18 | max-size: "1m" 19 | 20 | broker-1: 21 | image: confluentinc/cp-kafka:5.5.1 22 | container_name: broker-1 23 | ports: 24 | - '9092:9092' 25 | depends_on: 26 | - zookeeper 27 | environment: 28 | KAFKA_BROKER_ID: 1 29 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:32181 30 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT 31 | KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL 32 | KAFKA_ADVERTISED_LISTENERS: INTERNAL://broker-1:29092,EXTERNAL://localhost:9092 33 | KAFKA_DEFAULT_REPLICATION_FACTOR: 3 34 | KAFKA_NUM_PARTITIONS: 3 35 | networks: 36 | - kafka 37 | logging: 38 | driver: "json-file" 39 | options: 40 | max-size: "1m" 41 | 42 | broker-2: 43 | image: confluentinc/cp-kafka:5.5.1 44 | container_name: broker-2 45 | ports: 46 | - '9093:9093' 47 | depends_on: 48 | - zookeeper 49 | environment: 50 | KAFKA_BROKER_ID: 2 51 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:32181 52 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT 53 | KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL 54 | KAFKA_ADVERTISED_LISTENERS: INTERNAL://broker-2:29093,EXTERNAL://localhost:9093 55 | KAFKA_DEFAULT_REPLICATION_FACTOR: 3 56 | KAFKA_NUM_PARTITIONS: 3 57 | networks: 58 | - kafka 59 | logging: 60 | driver: "json-file" 61 | options: 62 | max-size: "1m" 63 | 64 | broker-3: 65 | image: confluentinc/cp-kafka:5.5.1 66 | container_name: broker-3 67 | ports: 68 | - '9094:9094' 69 | depends_on: 70 | - zookeeper 71 | environment: 72 | KAFKA_BROKER_ID: 3 73 | KAFKA_ZOOKEEPER_CONNECT: zookeeper:32181 74 | KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT 75 | KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL 76 | KAFKA_ADVERTISED_LISTENERS: INTERNAL://broker-3:29094,EXTERNAL://localhost:9094 77 | KAFKA_DEFAULT_REPLICATION_FACTOR: 3 78 | KAFKA_NUM_PARTITIONS: 3 79 | networks: 80 | - kafka 81 | logging: 82 | driver: "json-file" 83 | options: 84 | max-size: "1m" 85 | 86 | networks: 87 | kafka: 88 | name: kafka 89 | -------------------------------------------------------------------------------- /terraform/kafka-topics/example/main.tf: -------------------------------------------------------------------------------- 1 | module "kafka_topics" { 2 | source = "../../kafka" 3 | bootstrap_servers = var.bootstrap_servers 4 | topic_name = var.topic_name 5 | } 6 | 7 | -------------------------------------------------------------------------------- /terraform/kafka-topics/example/variables.tf: -------------------------------------------------------------------------------- 1 | variable "bootstrap_servers" { 2 | type = list(string) 3 | default = ["127.0.0.1:9092", "127.0.0.1:9093", "127.0.0.1:9094"] 4 | } 5 | 6 | variable "topic_name" { 7 | type = string 8 | default = "test123" 9 | } 10 | -------------------------------------------------------------------------------- /terraform/kafka-topics/main.tf: -------------------------------------------------------------------------------- 1 | resource "kafka_topic" "this" { 2 | name = var.topic_name 3 | replication_factor = 3 4 | partitions = 9 5 | } 6 | 7 | -------------------------------------------------------------------------------- /terraform/kafka-topics/outputs.tf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ruanbekker/quick-starts/4b9352df3d5ff15dd3eb7c5492c67d38e8f7204f/terraform/kafka-topics/outputs.tf -------------------------------------------------------------------------------- /terraform/kafka-topics/providers.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | kafka = { 4 | source = "Mongey/kafka" 5 | } 6 | } 7 | } 8 | 9 | provider "kafka" { 10 | bootstrap_servers = var.bootstrap_servers 11 | tls_enabled = false 12 | } 13 | 14 | -------------------------------------------------------------------------------- /terraform/kafka-topics/variables.tf: -------------------------------------------------------------------------------- 1 | variable "bootstrap_servers" { 2 | type = list(string) 3 | } 4 | 5 | variable "topic_name" { 6 | type = string 7 | } 8 | -------------------------------------------------------------------------------- /terraform/kind-kubernetes/README.md: -------------------------------------------------------------------------------- 1 | # terraform-kind-kubernetes 2 | 3 | Deploy KinD Kubernetes Clusters with Terraform using [tehcyx/kind](https://registry.terraform.io/providers/tehcyx/kind/latest/docs) 4 | 5 | ## Example 6 | 7 | Deploy the Cluster: 8 | 9 | ```bash 10 | terraform init 11 | terraform apply -auto-approve 12 | ``` 13 | 14 | View the Nodes: 15 | 16 | ```bash 17 | KUBECONFIG=/tmp/config kubectl get nodes 18 | ``` 19 | 20 | The output: 21 | 22 | ```bash 23 | NAME STATUS ROLES AGE VERSION 24 | test-cluster-control-plane Ready control-plane 110s v1.27.1 25 | test-cluster-worker Ready 85s v1.27.1 26 | ``` 27 | 28 | Destroy the Cluster: 29 | 30 | ```bash 31 | terraform destroy -auto-approve 32 | ``` 33 | 34 | ## Resources 35 | 36 | ### Documentation 37 | - https://registry.terraform.io/providers/tehcyx/kind/latest/docs/resources/cluster 38 | 39 | ### Releases 40 | - https://github.com/kubernetes-sigs/kind/releases 41 | 42 | -------------------------------------------------------------------------------- /terraform/kind-kubernetes/helm.tf: -------------------------------------------------------------------------------- 1 | resource "helm_release" "nginx" { 2 | count = var.install_nginx ? 1 : 0 3 | name = "nginx" 4 | repository = "https://charts.bitnami.com/bitnami" 5 | chart = "nginx" 6 | namespace = "default" 7 | version = "16.0.1" 8 | 9 | set { 10 | name = "replicaCount" 11 | value = "1" 12 | } 13 | 14 | } 15 | -------------------------------------------------------------------------------- /terraform/kind-kubernetes/main.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | k8s_config_path = pathexpand("/tmp/config") 3 | } 4 | 5 | resource "kind_cluster" "default" { 6 | name = var.cluster_name 7 | node_image = "kindest/node:${var.kind_version}" 8 | kubeconfig_path = local.k8s_config_path 9 | wait_for_ready = true 10 | 11 | kind_config { 12 | kind = "Cluster" 13 | api_version = "kind.x-k8s.io/v1alpha4" 14 | 15 | node { 16 | role = "control-plane" 17 | 18 | kubeadm_config_patches = [ 19 | "kind: InitConfiguration\nnodeRegistration:\n kubeletExtraArgs:\n node-labels: \"ingress-ready=true\"\n" 20 | ] 21 | 22 | extra_port_mappings { 23 | container_port = 80 24 | host_port = 80 25 | } 26 | extra_port_mappings { 27 | container_port = 443 28 | host_port = 443 29 | } 30 | } 31 | 32 | node { 33 | role = "worker" 34 | } 35 | } 36 | } 37 | 38 | -------------------------------------------------------------------------------- /terraform/kind-kubernetes/outputs.tf: -------------------------------------------------------------------------------- 1 | output "kubeconfig" { 2 | value = kind_cluster.default.kubeconfig 3 | } 4 | 5 | output "endpoint" { 6 | value = kind_cluster.default.endpoint 7 | } 8 | 9 | output "client_certificate" { 10 | value = kind_cluster.default.client_certificate 11 | } 12 | 13 | output "client_key" { 14 | value = kind_cluster.default.client_key 15 | } 16 | 17 | output "cluster_ca_certificate" { 18 | value = kind_cluster.default.cluster_ca_certificate 19 | } 20 | -------------------------------------------------------------------------------- /terraform/kind-kubernetes/providers.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | kind = { 4 | source = "tehcyx/kind" 5 | version = "0.4.0" 6 | } 7 | helm = { 8 | source = "hashicorp/helm" 9 | version = "2.13.0" 10 | } 11 | } 12 | } 13 | 14 | provider "helm" { 15 | kubernetes { 16 | host = kind_cluster.default.endpoint 17 | 18 | client_certificate = kind_cluster.default.client_certificate 19 | client_key = kind_cluster.default.client_key 20 | cluster_ca_certificate = kind_cluster.default.cluster_ca_certificate 21 | } 22 | } 23 | 24 | provider "kind" {} 25 | 26 | -------------------------------------------------------------------------------- /terraform/kind-kubernetes/terraform.tfvars: -------------------------------------------------------------------------------- 1 | install_nginx = false 2 | -------------------------------------------------------------------------------- /terraform/kind-kubernetes/variables.tf: -------------------------------------------------------------------------------- 1 | variable "cluster_name" { 2 | description = "The kind cluster name." 3 | default = "test-cluster2" 4 | type = string 5 | } 6 | 7 | variable "kind_version" { 8 | description = "The kind version of kubernetes." 9 | default = "v1.27.1" 10 | type = string 11 | } 12 | 13 | variable "install_nginx" { 14 | description = "Whether to install the nginx Helm chart" 15 | type = bool 16 | default = false 17 | } 18 | 19 | -------------------------------------------------------------------------------- /terraform/local-exec/for-loops/README.md: -------------------------------------------------------------------------------- 1 | # Terraform For Loops 2 | 3 | For loops using the local-exec provisioner inside a [null_resource](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) 4 | 5 | ## Usage 6 | 7 | ```bash 8 | $ terraform init 9 | $ terraform apply -auto-approve 10 | $ cat results.yml 11 | hostname: 'main.example.com' 12 | username: 'james' 13 | ``` 14 | 15 | ## Resources 16 | 17 | - https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource -------------------------------------------------------------------------------- /terraform/local-exec/for-loops/main.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "this" { 2 | for_each = { 3 | hostname = "main.example.com" 4 | username = "james" 5 | } 6 | 7 | provisioner "local-exec" { 8 | command = <> results.yml 10 | EOT 11 | } 12 | 13 | } -------------------------------------------------------------------------------- /terraform/local-exec/for-loops/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | null = { 4 | source = "hashicorp/null" 5 | version = "3.1.1" 6 | } 7 | } 8 | } 9 | 10 | provider "null" {} -------------------------------------------------------------------------------- /terraform/local-exec/time-sleep/README.md: -------------------------------------------------------------------------------- 1 | # Terraform with Delay 2 | 3 | Using the null resource we can demonstrate a delay after resource creation and deletion using [sleep](https://registry.terraform.io/providers/hashicorp/time/latest/docs/resources/sleep). 4 | 5 | ## Usage 6 | 7 | When creating the resource it will sleep for 10s: 8 | 9 | ```bash 10 | $ terraform init 11 | $ terraform apply -auto-approve 12 | 13 | null_resource.previous: Creating... 14 | null_resource.previous: Provisioning with 'local-exec'... 15 | null_resource.previous (local-exec): Executing: ["/bin/sh" "-c" "echo 'foo' > file.txt"] 16 | null_resource.previous: Creation complete after 0s [id=4572588651629938240] 17 | time_sleep.wait_10_seconds: Creating... 18 | time_sleep.wait_10_seconds: Creation complete after 10s [id=2022-07-25T15:09:57Z] 19 | time_sleep.wait_10_seconds: Still creating... [10s elapsed] 20 | null_resource.next: Creating... 21 | null_resource.next: Creation complete after 0s [id=3616636602518223470] 22 | ``` 23 | 24 | When we destroy the resource it will sleep for 10s: 25 | 26 | ```bash 27 | $ terraform destroy -auto-approve 28 | 29 | Plan: 0 to add, 0 to change, 3 to destroy. 30 | null_resource.next: Destroying... [id=3616636602518223470] 31 | null_resource.next: Destruction complete after 0s 32 | time_sleep.wait_10_seconds: Destroying... [id=2022-07-25T15:09:57Z] 33 | time_sleep.wait_10_seconds: Still destroying... [id=2022-07-25T15:09:57Z, 10s elapsed] 34 | time_sleep.wait_10_seconds: Destruction complete after 10s 35 | null_resource.previous: Destroying... [id=4572588651629938240] 36 | null_resource.previous: Destruction complete after 0s 37 | ``` 38 | 39 | ## Resources 40 | 41 | - https://registry.terraform.io/providers/hashicorp/time/latest/docs/resources/sleep 42 | - https://www.terraform.io/language/resources/provisioners/local-exec -------------------------------------------------------------------------------- /terraform/local-exec/time-sleep/main.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "previous" { 2 | provisioner "local-exec" { 3 | command = "echo 'foo' > file.txt" 4 | } 5 | } 6 | 7 | resource "time_sleep" "wait_10_seconds" { 8 | depends_on = [null_resource.previous] 9 | create_duration = "10s" 10 | destroy_duration = "10s" 11 | } 12 | 13 | resource "null_resource" "next" { 14 | depends_on = [time_sleep.wait_10_seconds] 15 | } -------------------------------------------------------------------------------- /terraform/local-exec/time-sleep/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | time = { 4 | source = "hashicorp/time" 5 | version = "0.7.2" 6 | } 7 | } 8 | } 9 | 10 | provider "time" {} -------------------------------------------------------------------------------- /terraform/mysql/paynetworx-provider/README.md: -------------------------------------------------------------------------------- 1 | # terraform-mysql-quickstart 2 | 3 | Creates a database, user and grants. Outputs the encrypted string with your `keybase_username`, using the [paynetworx](https://registry.terraform.io/providers/Paynetworx/mysql/latest/docs) provider. 4 | 5 | ## MySQL Container 6 | 7 | The `docker-compose.yml` defines the mysql container. 8 | 9 | ## Docs 10 | 11 | - https://registry.terraform.io/providers/Paynetworx/mysql/latest/docs 12 | -------------------------------------------------------------------------------- /terraform/mysql/paynetworx-provider/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | 3 | services: 4 | mysql: 5 | image: mysql:8.0 6 | ports: 7 | - 3306:3306 8 | environment: 9 | - MYSQL_DATABASE=sample 10 | - MYSQL_ROOT_PASSWORD=rootpassword 11 | -------------------------------------------------------------------------------- /terraform/mysql/paynetworx-provider/example/main.tf: -------------------------------------------------------------------------------- 1 | module "test_db" { 2 | source = "../" 3 | 4 | database_host = var.database_host 5 | database_port = var.database_port 6 | database_username = var.database_username 7 | database_password = var.database_password 8 | database_name = "foobar" 9 | keybase_username = var.keybase_username 10 | } 11 | 12 | -------------------------------------------------------------------------------- /terraform/mysql/paynetworx-provider/example/outputs.tf: -------------------------------------------------------------------------------- 1 | output "database_name" { 2 | value = module.test_db.db_name 3 | } 4 | 5 | output "database_password" { 6 | value = module.test_db.password 7 | } 8 | -------------------------------------------------------------------------------- /terraform/mysql/paynetworx-provider/example/terraform.tfvars: -------------------------------------------------------------------------------- 1 | database_host = "127.0.0.1" 2 | database_port = "3306" 3 | database_username = "root" 4 | database_password = "rootpassword" 5 | keybase_username = "rbekker87" 6 | -------------------------------------------------------------------------------- /terraform/mysql/paynetworx-provider/example/variables.tf: -------------------------------------------------------------------------------- 1 | variable "database_host" {} 2 | variable "database_port" {} 3 | variable "database_username" {} 4 | variable "database_password" {} 5 | variable "keybase_username" {} 6 | 7 | -------------------------------------------------------------------------------- /terraform/mysql/paynetworx-provider/main.tf: -------------------------------------------------------------------------------- 1 | resource "mysql_database" "user_db" { 2 | name = var.database_name 3 | } 4 | 5 | resource "mysql_user" "user_id" { 6 | user = var.database_username 7 | host = "%" 8 | } 9 | 10 | resource "mysql_grant" "user_id" { 11 | user = "${mysql_user.user_id.user}" 12 | host = "%" 13 | database = var.database_name 14 | privileges = ["SELECT", "UPDATE"] 15 | } 16 | 17 | resource "mysql_user_password" "user_id" { 18 | user = "${mysql_user.user_id.user}" 19 | host = "%" 20 | pgp_key = "keybase:${var.keybase_username}" 21 | } 22 | -------------------------------------------------------------------------------- /terraform/mysql/paynetworx-provider/outputs.tf: -------------------------------------------------------------------------------- 1 | output "db_name" { 2 | value = mysql_database.user_db.name 3 | } 4 | 5 | output "password" { 6 | value = mysql_user_password.user_id.encrypted_password 7 | } 8 | -------------------------------------------------------------------------------- /terraform/mysql/paynetworx-provider/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | mysql = { 4 | source = "Paynetworx/mysql" 5 | version = "1.12.7" 6 | } 7 | } 8 | } 9 | 10 | provider "mysql" { 11 | endpoint = "${var.database_host}:${var.database_port}" 12 | username = var.database_username 13 | password = var.database_password 14 | } 15 | -------------------------------------------------------------------------------- /terraform/mysql/paynetworx-provider/variables.tf: -------------------------------------------------------------------------------- 1 | variable "database_name" { 2 | default = "" 3 | type = string 4 | } 5 | 6 | variable "database_host" {} 7 | variable "database_port" {} 8 | variable "database_username" {} 9 | variable "database_password" {} 10 | variable "keybase_username" {} 11 | -------------------------------------------------------------------------------- /terraform/mysql/petoju-provider/README.md: -------------------------------------------------------------------------------- 1 | # terraform-mysql-provider 2 | 3 | Creates a database, user and grants, using the [petoju](https://registry.terraform.io/providers/petoju/mysql/latest/docs) provider. 4 | 5 | ## MySQL Container 6 | 7 | The `docker-compose.yml` has a mysql container defined. 8 | 9 | ## Docs 10 | 11 | - https://registry.terraform.io/providers/petoju/mysql/latest/docs 12 | -------------------------------------------------------------------------------- /terraform/mysql/petoju-provider/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.8" 2 | 3 | services: 4 | mysql: 5 | image: mysql:8.0 6 | ports: 7 | - 3306:3306 8 | environment: 9 | - MYSQL_DATABASE=sample 10 | - MYSQL_ROOT_PASSWORD=rootpassword 11 | -------------------------------------------------------------------------------- /terraform/mysql/petoju-provider/main.tf: -------------------------------------------------------------------------------- 1 | resource "random_password" "user_password" { 2 | length = 24 3 | special = true 4 | min_special = 2 5 | override_special = "!#$%^&*()-_=+[]{}<>:?" 6 | keepers = { 7 | password_version = var.password_version 8 | } 9 | } 10 | 11 | resource "mysql_database" "user_db" { 12 | provider = mysql.local 13 | name = var.database_name 14 | } 15 | 16 | resource "mysql_user" "user_id" { 17 | provider = mysql.local 18 | user = var.database_username 19 | plaintext_password = random_password.user_password.result 20 | host = "%" 21 | tls_option = "NONE" 22 | } 23 | 24 | resource "mysql_grant" "user_id" { 25 | provider = mysql.local 26 | user = var.database_username 27 | host = "%" 28 | database = var.database_name 29 | privileges = ["SELECT", "UPDATE"] 30 | depends_on = [ 31 | mysql_user.user_id 32 | ] 33 | } 34 | -------------------------------------------------------------------------------- /terraform/mysql/petoju-provider/outputs.tf: -------------------------------------------------------------------------------- 1 | output "user" { 2 | value = mysql_user.user_id.user 3 | } 4 | 5 | output "password" { 6 | sensitive = true 7 | value = random_password.user_password.result 8 | } 9 | -------------------------------------------------------------------------------- /terraform/mysql/petoju-provider/providers.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | mysql = { 4 | source = "petoju/mysql" 5 | version = "3.0.37" 6 | } 7 | } 8 | } 9 | 10 | provider "mysql" { 11 | alias = "local" 12 | endpoint = "127.0.0.1:3306" 13 | username = "root" 14 | password = "rootpassword" 15 | } 16 | -------------------------------------------------------------------------------- /terraform/mysql/petoju-provider/terraform.tfvars: -------------------------------------------------------------------------------- 1 | database_name = "foobar" 2 | database_username = "ruanb" 3 | password_version = 0 4 | -------------------------------------------------------------------------------- /terraform/mysql/petoju-provider/variables.tf: -------------------------------------------------------------------------------- 1 | variable "database_name" { 2 | description = "The name of the database that you want created." 3 | type = string 4 | default = null 5 | } 6 | 7 | variable "database_username" { 8 | description = "The name of the database username that you want created." 9 | type = string 10 | default = null 11 | } 12 | 13 | variable "password_version" { 14 | description = "The password rotates when this value gets updated." 15 | type = number 16 | default = 0 17 | } 18 | --------------------------------------------------------------------------------