├── .dockerignore ├── .github └── workflows │ └── docker-latest.yml ├── .gitignore ├── Dockerfile ├── LICENSE ├── Makefile ├── README.md ├── bargs.sh ├── bargs_vars ├── docker-compose-aws.yml ├── docker-compose.yml ├── entrypoint.sh ├── examples ├── basic │ └── tfcoding.tf ├── complex │ ├── nodegroup.tpl │ └── tfcoding.tf ├── complex2 │ ├── tfcoding.tf │ └── vmconfig.tpl ├── mock-aws-pulumi │ ├── provider.tf │ └── tfcoding.tf └── mock-aws │ ├── provider.tf │ └── tfcoding.tf ├── helm-app ├── .helmignore ├── Chart.lock ├── Chart.yaml └── values.yaml ├── kubernetes └── argocd-repositories.yml ├── localstack └── healthcheck-init.sh ├── pulumi ├── .gitignore ├── Pulumi.dev.yaml ├── Pulumi.yaml ├── README.md ├── __main__.py ├── constants.py ├── containers.py └── requirements.txt └── scripts └── tests.sh /.dockerignore: -------------------------------------------------------------------------------- 1 | ** 2 | !./entrypoint.sh 3 | !./bargs.sh 4 | !./bargs_vars 5 | -------------------------------------------------------------------------------- /.github/workflows/docker-latest.yml: -------------------------------------------------------------------------------- 1 | name: Push latest version to DockerHub 2 | 3 | on: 4 | push: 5 | paths-ignore: 6 | - "README.md" 7 | # TODO: Remove 8 | branches-ignore: 9 | - "feature/kubernetes" 10 | 11 | env: 12 | TARGET_APP: app 13 | DOCKERHUB_REPOSITORY: docker.io/unfor19/tfcoding 14 | TFCODING_VERSION: 0.0.12 15 | 16 | jobs: 17 | build-push: 18 | name: Docker Build Push 19 | if: ${{ github.event_name == 'push' || github.event_name == 'workflow_dispatch' }} 20 | runs-on: ubuntu-22.04 21 | strategy: 22 | matrix: 23 | include: 24 | - DOCKERFILE_PATH: Dockerfile 25 | TERRAFORM_VERSION: "1.4.6" 26 | - DOCKERFILE_PATH: Dockerfile 27 | TERRAFORM_VERSION: "1.5.5" 28 | - DOCKERFILE_PATH: Dockerfile 29 | TERRAFORM_VERSION: "1.6.6" 30 | - DOCKERFILE_PATH: Dockerfile 31 | TERRAFORM_VERSION: "1.7.0-rc1" 32 | env: 33 | DOCKERFILE_PATH: ${{ matrix.DOCKERFILE_PATH }} 34 | TERRAFORM_VERSION: ${{ matrix.TERRAFORM_VERSION }} 35 | steps: 36 | - uses: actions/checkout@v4.1.1 37 | - name: Inject slug/short variables 38 | uses: rlespinasse/github-slug-action@v4.4.1 39 | - name: Set up QEMU 40 | uses: docker/setup-qemu-action@v3.0.0 41 | - name: Set up Docker Buildx 42 | uses: docker/setup-buildx-action@v3.0.0 43 | - name: Login to Docker Hub 44 | uses: docker/login-action@v3.0.0 45 | with: 46 | username: ${{ secrets.DOCKER_USERNAME }} 47 | password: ${{ secrets.DOCKER_PASSWORD }} 48 | - name: Build - amd64 for running Test 49 | uses: docker/build-push-action@v5.1.0 50 | with: 51 | file: Dockerfile 52 | context: . 53 | push: false 54 | platforms: linux/amd64 55 | target: app 56 | build-args: | 57 | TERRAFORM_VERSION=${{ env.TERRAFORM_VERSION }} 58 | tags: | 59 | ${{ env.DOCKERHUB_REPOSITORY }}:${{ env.TFCODING_VERSION }}-${{ env.GITHUB_REF_SLUG }} 60 | ${{ env.DOCKERHUB_REPOSITORY }}:${{ env.TFCODING_VERSION }}-${{ env.GITHUB_SHA_SHORT }} 61 | load: true # For later to run tests on the built image 62 | cache-from: type=gha 63 | cache-to: type=gha,mode=max 64 | - name: Test 65 | run: make test 66 | - name: Side Branch - Build - arm64, Push - arm64,amd64 67 | if: github.ref != 'refs/heads/master' 68 | uses: docker/build-push-action@v5.1.0 69 | with: 70 | file: Dockerfile 71 | context: . 72 | push: true 73 | platforms: linux/amd64,linux/arm64 74 | target: app 75 | build-args: | 76 | TERRAFORM_VERSION=${{ env.TERRAFORM_VERSION }} 77 | tags: | 78 | ${{ env.DOCKERHUB_REPOSITORY }}:${{ env.TFCODING_VERSION }}-${{ env.TERRAFORM_VERSION }}-${{ env.GITHUB_REF_SLUG }} 79 | ${{ env.DOCKERHUB_REPOSITORY }}:${{ env.TFCODING_VERSION }}-${{ env.TERRAFORM_VERSION }}-${{ env.GITHUB_SHA_SHORT }} 80 | load: false # Must be false to push linux/arm64 images 81 | cache-from: type=gha 82 | cache-to: type=gha,mode=max 83 | - name: Latest master - Build - arm64, Push - arm64,amd64 84 | if: github.ref == 'refs/heads/master' 85 | uses: docker/build-push-action@v5.1.0 86 | with: 87 | file: Dockerfile 88 | context: . 89 | push: true 90 | platforms: linux/amd64,linux/arm64 91 | target: app 92 | build-args: | 93 | TERRAFORM_VERSION=${{ env.TERRAFORM_VERSION }} 94 | tags: | 95 | ${{ env.DOCKERHUB_REPOSITORY }}:${{ env.TFCODING_VERSION }}-${{ env.TERRAFORM_VERSION }} 96 | ${{ env.DOCKERHUB_REPOSITORY }}:${{ env.TFCODING_VERSION }}-${{ env.TERRAFORM_VERSION }}-${{ env.GITHUB_SHA_SHORT }} 97 | load: false # Must be false to push linux/arm64 images 98 | cache-from: type=gha 99 | cache-to: type=gha,mode=max 100 | - name: Latest Tag - Build - arm64, Push - arm64,amd64 101 | if: github.ref == 'refs/heads/master' && env.TERRAFORM_VERSION == '1.6.6' 102 | uses: docker/build-push-action@v5.1.0 103 | with: 104 | file: Dockerfile 105 | context: . 106 | push: true 107 | platforms: linux/amd64,linux/arm64 108 | target: app 109 | build-args: | 110 | TERRAFORM_VERSION=${{ env.TERRAFORM_VERSION }} 111 | tags: | 112 | ${{ env.DOCKERHUB_REPOSITORY }}:latest 113 | ${{ env.DOCKERHUB_REPOSITORY }}:${{ env.TFCODING_VERSION }} 114 | ${{ env.DOCKERHUB_REPOSITORY }}:${{ env.TFCODING_VERSION }}-${{ env.TERRAFORM_VERSION }} 115 | load: false # Must be false to push linux/arm64 images 116 | cache-from: type=gha 117 | cache-to: type=gha,mode=max 118 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .* 2 | !.github/ 3 | !.*ignore* 4 | *tfstate* 5 | *tfvars* 6 | !.tfcoding.tf 7 | .terraform 8 | *.tgz 9 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | ARG PYTHON_VERSION="3.11.6" 2 | ARG ALPINE_VERSION="3.18" 3 | ARG TERRAFORM_VERSION="1.6.6" 4 | ARG HCL2JSON_VERSION="v0.6.0" 5 | ARG FSWATCH_VERSION="1.17.1" 6 | 7 | FROM alpine:${ALPINE_VERSION} as download 8 | ARG TERRAFORM_VERSION 9 | ARG HCL2JSON_VERSION 10 | ARG FSWATCH_VERSION 11 | 12 | ENV OS_ARCH="amd64" 13 | WORKDIR /downloads/ 14 | RUN if [ "$(uname -m)" = "aarch64" ]; then export OS_ARCH=arm64; fi && \ 15 | apk add --no-cache unzip curl && \ 16 | curl -sL -o terraform.zip "https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_${OS_ARCH}.zip" && \ 17 | unzip terraform.zip && rm terraform.zip && \ 18 | curl -sL -o hcl2json "https://github.com/tmccombs/hcl2json/releases/download/${HCL2JSON_VERSION}/hcl2json_linux_${OS_ARCH}" && chmod +x hcl2json && \ 19 | mkdir fswatch && cd fswatch && \ 20 | curl -L -o fswatch.tar.gz "https://github.com/unfor19/fswatch/releases/download/${FSWATCH_VERSION}/fswatch-${FSWATCH_VERSION}-linux-alpine-${OS_ARCH}.tar.gz" && \ 21 | tar -xzf fswatch.tar.gz && chmod +x fswatch && rm fswatch.tar.gz 22 | # Output: /downloads/ terraform, hcl2json, fswatch 23 | 24 | FROM python:${PYTHON_VERSION}-alpine${ALPINE_VERSION} as app 25 | ARG APP_USER_NAME="appuser" 26 | ARG APP_USER_ID="1000" 27 | ARG APP_GROUP_NAME="appgroup" 28 | ARG APP_GROUP_ID="1000" 29 | 30 | # fswatch: litstdc++, gettext 31 | # tfcoding: bash, jq, util-linux (bargs) 32 | RUN apk add --no-cache \ 33 | libstdc++ gettext \ 34 | bash jq util-linux \ 35 | git openssh-client curl aws-cli && \ 36 | python -m pip install -U pip setuptools wheel && \ 37 | python -m pip install awscli-local terraform-local 38 | COPY --from=download /downloads/terraform /usr/local/bin/terraform 39 | COPY --from=download /downloads/hcl2json /usr/local/bin/hcl2json 40 | COPY --from=download /downloads/fswatch/*.so* /usr/local/lib/ 41 | COPY --from=download /downloads/fswatch/fswatch /usr/local/bin/fswatch 42 | WORKDIR /src/ 43 | RUN \ 44 | addgroup -g "${APP_GROUP_ID}" "${APP_GROUP_NAME}" && \ 45 | adduser -H -D -u "$APP_USER_ID" -G "$APP_GROUP_NAME" "$APP_USER_NAME" && \ 46 | chown -R "$APP_USER_ID":"$APP_GROUP_ID" . 47 | USER "$APP_USER_NAME" 48 | SHELL [ "/usr/local/bin/bash" ] 49 | COPY . /usr/local/bin/ 50 | ENTRYPOINT ["/usr/local/bin/entrypoint.sh"] 51 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Meir Gabay 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .EXPORT_ALL_VARIABLES: 2 | TFCODING_VERSION ?= 0.0.12 3 | TERRAFORM_VERSION ?= 1.6.6 4 | DOCKER_TAG ?= unfor19/tfcoding:$(TERRAFORM_VERSION)-$(TFCODING_VERSION) 5 | DOCKER_TAG_LATEST:=unfor19/tfcoding:latest 6 | SRC_DIR_RELATIVE_PATH ?= examples/basic 7 | 8 | ifndef DOCKER_PLATFORM 9 | DOCKER_PLATFORM:=$(shell arch) 10 | endif 11 | 12 | help: ## Available make commands 13 | @fgrep -h "##" $(MAKEFILE_LIST) | fgrep -v fgrep | sed -e 's~:~~' | sed -e 's~##~~' 14 | 15 | usage: help 16 | 17 | 18 | build: ## Build tfcoding Docker image - default: terraform v0.13.6 19 | docker build --platform linux/${DOCKER_PLATFORM} \ 20 | --progress=plain \ 21 | -t $(DOCKER_TAG) -t ${DOCKER_TAG_LATEST} \ 22 | --build-arg TERRAFORM_VERSION=$(TERRAFORM_VERSION) \ 23 | --build-arg OS_ARCH=${OS_ARCH} . 24 | 25 | 26 | run: ## Run tfcoding in Docker 27 | docker run --rm -it \ 28 | -v ${PWD}/:/src/:ro \ 29 | $(DOCKER_TAG_LATEST) -r $(SRC_DIR_RELATIVE_PATH) --watching 30 | 31 | 32 | up: ## Run tfcoding in Docker Compose 33 | @docker-compose -p tfcoding up 34 | 35 | 36 | down: ## Stop tfcoding in Docker Compose 37 | @docker-compose -p tfcoding down 38 | 39 | 40 | clean: ## Clean tfcoding in Docker Compose 41 | @docker-compose -p tfcoding down -v --remove-orphans 42 | @docker rm -f tfcoding 2>/dev/null || true 43 | 44 | 45 | up-aws: ## Run tfcoding-aws in Docker Compose 46 | @export SRC_DIR_RELATIVE_PATH="examples/mock-aws" && \ 47 | docker-compose -p tfcoding_aws -f docker-compose-aws.yml up 48 | 49 | 50 | down-aws: ## Stop tfcoding-aws in Docker Compose 51 | @docker-compose -p tfcoding_aws -f docker-compose-aws.yml down 52 | 53 | 54 | test: ## Run tests 55 | @./scripts/tests.sh 56 | 57 | 58 | clean-aws: ## Clean tfcoding in Docker Compose 59 | @docker-compose -p tfcoding_aws -f docker-compose-aws.yml down -v --remove-orphans 60 | @docker rm -f tfcoding-aws 2>/dev/null || true 61 | 62 | clean-all: clean clean-aws 63 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # tfcoding 2 | 3 | [![Push latest version to DockerHub](https://github.com/unfor19/tfcoding/actions/workflows/docker-latest.yml/badge.svg)](https://github.com/unfor19/tfcoding/actions/workflows/docker-latest.yml) [![Dockerhub pulls](https://img.shields.io/docker/pulls/unfor19/tfcoding)](https://hub.docker.com/r/unfor19/tfcoding) 4 | 5 | ![tfcoding](https://d33vo9sj4p3nyc.cloudfront.net/tfcoding/tfcoding-localstack-aws.gif) 6 | 7 | Render Terraform's [Expressions](https://www.terraform.io/docs/language/expressions/index.html) and [Functions](https://www.terraform.io/docs/language/functions/index.html) locally without any hassle. 8 | 9 | This application runs in the background and watches for changes in the file `tfcoding.tf`, once this file is modified its [Local Values](https://www.terraform.io/docs/language/values/locals.html) are automatically rendered to the terminal's output (stdout). 10 | 11 | This is especially useful for learning about Expressions and Functions that you are not familiar with, while avoiding the whole shebang of terraform init, plan and apply. The goal here is to "compile Terraform" locally to speed up the development (and learning) process. 12 | 13 | ## Requirements 14 | 15 | - [Docker](https://docs.docker.com/get-docker/) 16 | - (Optional) [Docker Compose](https://docs.docker.com/compose/install/) 17 | - **Windows** 18 | 19 | - [Windows Git Bash](https://gitforwindows.org/) 20 | - [Chocolatey Windows Package Manager](https://chocolatey.org/install) 21 | 22 | **IMPORTANT**: Open a PowerShell terminal as Administrator 23 | 24 | ```powershell 25 | Set-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; iex ((New-Object System.Net.WebClient).DownloadString('https://community.chocolatey.org/install.ps1')) 26 | ``` 27 | 28 | - Install requirements 29 | 30 | **IMPORTANT**: Open a PowerShell terminal as Administrator 31 | 32 | ```bash 33 | choco install -y make 34 | ``` 35 | 36 | - **macOS**: 37 | - [Homebrew macOS Package Manager](https://brew.sh/) 38 | ```bash 39 | /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" 40 | ``` 41 | - Install requirements 42 | ```bash 43 | brew install make 44 | ``` 45 | 46 | ## Quick Start 47 | 48 | 1. Clone this repo 49 | ```bash 50 | git clone https://github.com/unfor19/tfcoding.git 51 | ``` 52 | 1. From now on your working direcotry should be `tfcoding` 53 | ``` 54 | cd tfcoding 55 | ``` 56 | 1. Render [examples/basic/tfcoding.tf](./examples/basic/tfcoding.tf) - Make changes in that file, like checking new [Terraform Expressions](https://developer.hashicorp.com/terraform/language/expressions) 57 | ``` 58 | make run 59 | ``` 60 | 1. Clean resources - Removes `tfcoding` container 61 | ```bash 62 | make clean 63 | ``` 64 | 65 | ## Getting Started 66 | 67 | This project uses [localstack](https://github.com/localstack/localstack), which means you can provision the [AWS core resources](https://github.com/localstack/localstack#overview), see [examples/mock-aws/](./examples/mock-aws/). 68 | 69 | 1. Clone this repo 70 | ```bash 71 | git clone https://github.com/unfor19/tfcoding.git 72 | ``` 73 | 1. From now on your working direcotry should be `tfcoding` 74 | ``` 75 | cd tfcoding 76 | ``` 77 | 1. Render [examples/mock-aws/tfcoding.tf](./examples/mock-aws/tfcoding.tf) - Make changes in that file, like changing the CIDR of subnets 78 | ``` 79 | make up-aws 80 | ``` 81 | 1. Execute `terraform destroy` on changing `tfcoding.tf`, add the Local Value `terraform_destroy = true`. For example: 82 | 83 | ```go 84 | // After "destroying" the infra, comment out this variable to execute `terraform apply` 85 | locals { 86 | terraform_destroy = true 87 | } 88 | ``` 89 | 90 | 1. Clean resources - Removes `tfcoding` and `localstack` containers 91 | ```bash 92 | make clean-all 93 | ``` 94 | 95 | ## Help Menu 96 | 97 | ```bash 98 | make help 99 | ``` 100 | 101 | With Docker: 102 | 103 | ```bash 104 | docker run --rm -it unfor19/tfcoding --help 105 | ``` 106 | 107 | 108 | 109 | ```bash 110 | Usage: bash entrypoint.sh -r basic/exmaples --watching -o private_subnets 111 | 112 | --src_dir_relative_path | -r [REQUIRED] Relative path to the dir that contains tfcoding.tf 113 | --single_value_output | -o [all] Render a single local variable 114 | --src_dir_root | -s [/src] Source root dir in container 115 | --logging | -l [true] Show logging messages 116 | --debug | -d [false] Print verbose output 117 | --watching | -w [FLAG] Auto-render tfcoding.tf on change 118 | --mock_aws | -aws [FLAG] Use this flag for communicating with Localstack 119 | ``` 120 | 121 | 122 | 123 | ## Authors 124 | 125 | Created and maintained by [Meir Gabay](https://github.com/unfor19) 126 | 127 | ## License 128 | 129 | This project is licensed under the MIT License - see the [LICENSE](https://github.com/unfor19/tfcoding/blob/master/LICENSE) file for details 130 | -------------------------------------------------------------------------------- /bargs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # trap ctrl-c and call ctrl_c() 4 | trap ctrl_c INT 5 | ctrl_c() { 6 | exit 0 7 | } 8 | 9 | ### Global variables 10 | _BARGS_VARS_PATH="" 11 | _ARGS="" 12 | _NUM_OF_ARGS=0 13 | declare -A _LIST_ARGS_DICTS 14 | _NUM_OF_DICTS=0 15 | 16 | 17 | ### Functions 18 | error_msg(){ 19 | local msg=$1 20 | local no_usage=$2 21 | echo -e "[ERROR] $msg" 22 | [[ -z $no_usage ]] && usage 23 | export DEBUG=1 24 | exit 1 25 | } 26 | 27 | 28 | hint_msg(){ 29 | local msg=$1 30 | echo -e "[HINT] $msg" 31 | } 32 | 33 | 34 | export_env_var(){ 35 | local var_name=$1 36 | local var_value=$2 37 | export "${var_name}=${var_value}" 38 | export "${var_name^^}=${var_value}" 39 | } 40 | 41 | 42 | check_options(){ 43 | local options=$1 44 | local var_name=$2 45 | local var_value=$3 46 | local allow_empty=$4 47 | local valid=false 48 | if [[ -n $options ]]; then 49 | for o in $options; do 50 | [[ $o = "$var_value" ]] && valid=true 51 | done 52 | elif [[ -z $var_value && -n $allow_empty ]]; then 53 | valid=true 54 | elif [[ -n $var_value ]]; then 55 | valid=true 56 | fi 57 | echo $valid 58 | } 59 | 60 | 61 | usage (){ 62 | local usage_msg= 63 | local i=0 64 | declare -A arg_dict 65 | while [[ $i -lt $_NUM_OF_DICTS ]]; do 66 | eval "arg_dict=(${_LIST_ARGS_DICTS[$i]})" 67 | if [[ ${arg_dict[name]} = "bargs" ]]; then 68 | echo -e "\nUsage: ${arg_dict[description]}\n" 69 | elif [[ ${arg_dict[type]} = "group" ]]; then 70 | : # group do nothing 71 | elif [[ -n ${arg_dict[name]} ]]; then 72 | usage_msg+="\n\t--${arg_dict[name]}~|~-${arg_dict[short]}" 73 | if [[ -n ${arg_dict[flag]} ]]; then 74 | usage_msg+="~[FLAG]" 75 | elif [[ -n ${arg_dict[allow_empty]} ]]; then 76 | usage_msg+="~[]" 77 | elif [[ -n ${arg_dict[default]} ]]; then 78 | usage_msg+="~[${arg_dict[default]}]" 79 | elif [[ ${arg_dict[allow_env_var]} ]]; then 80 | usage_msg+="~[ENV_VAR]" 81 | else 82 | usage_msg+="~[REQUIRED]" 83 | fi 84 | 85 | if [[ -n ${arg_dict[description]} ]]; then 86 | usage_msg+="~${arg_dict[description]}" 87 | fi 88 | usage_msg="$usage_msg\n" 89 | fi 90 | i=$((i+1)) 91 | done 92 | 93 | echo -e "$usage_msg" | column -t -s "~" 94 | } 95 | 96 | 97 | clean_chars(){ 98 | local str=$1 99 | str=${str//\'/} 100 | str=${str//\"/} 101 | echo "$str" 102 | } 103 | 104 | 105 | check_bargs_vars_path(){ 106 | local bargs_vars_path 107 | if [[ -n "$BARGS_VARS_PATH" && -f "$BARGS_VARS_PATH" ]]; then 108 | _BARGS_VARS_PATH="$BARGS_VARS_PATH" 109 | elif [[ -z "$BARGS_VARS_PATH" || ! -f "$BARGS_VARS_PATH" ]]; then 110 | bargs_vars_path=$(dirname "${BASH_SOURCE[0]}")/bargs_vars 111 | [[ ! -f $bargs_vars_path ]] && error_msg "Make sure bargs_vars is in the same folder as bargs.sh\n\tAnother option - export BARGS_VARS_PATH=\"\${PWD}/path/to/my_bargs_vars\"" no_usage 112 | _BARGS_VARS_PATH="$bargs_vars_path" 113 | else 114 | error_msg "Invalid path to bargs_vars: $BARGS_VARS_PATH" 115 | fi 116 | } 117 | 118 | 119 | read_bargs_vars(){ 120 | # Reads the file, saving each arg as one string in the string ${args} 121 | # The arguments are separated with "~" 122 | check_bargs_vars_path 123 | local delimiter="---" 124 | local arg_name 125 | local arg_value 126 | local str 127 | local line 128 | while read -r line; do 129 | if [[ $line != "$delimiter" ]]; then 130 | line=$(clean_chars "$line") 131 | arg_name=$(echo "$line" | cut -f1 -d "=") 132 | arg_value=$(echo "$line" | cut -f2 -d "=") 133 | [[ -z $str ]] && \ 134 | str="[${arg_name}]=\"${arg_value}\"" || \ 135 | str="${str} [${arg_name}]=\"${arg_value}\"" 136 | 137 | elif [[ $line = "$delimiter" ]]; then 138 | _NUM_OF_ARGS=$((_NUM_OF_ARGS+1)) 139 | [[ -n $str ]] && _ARGS="$_ARGS~$str" 140 | unset str 141 | fi 142 | done < "$_BARGS_VARS_PATH" 143 | } 144 | 145 | 146 | args_to_list_dicts(){ 147 | # _ARGS to list of dictionaries (associative arrays) 148 | local cut_num=1 149 | local arg= 150 | while [[ $cut_num -le $((_NUM_OF_ARGS+1)) ]]; do 151 | arg=$(echo "${_ARGS[@]}" | cut -d "~" -f $cut_num) 152 | if [[ ${#arg} -gt 0 ]]; then 153 | _LIST_ARGS_DICTS[$_NUM_OF_DICTS]=$arg 154 | _NUM_OF_DICTS=$((_NUM_OF_DICTS+1)) 155 | fi 156 | cut_num=$((cut_num+1)) 157 | done 158 | } 159 | 160 | 161 | set_args_to_vars(){ 162 | # The good old 'while case shift' 163 | declare -A arg_dict 164 | local i 165 | local found 166 | local definition 167 | local contains_equal 168 | local value 169 | while [[ -n $1 ]]; do 170 | i=0 171 | found= 172 | while [[ $i -lt $_NUM_OF_DICTS ]]; do 173 | eval "arg_dict=(${_LIST_ARGS_DICTS[$i]})" 174 | contains_equal=$(echo "$1" | grep "^[\-|\-\-]*\w*=") 175 | if [[ -n $contains_equal ]]; then 176 | definition=${1%=*} # "--definition=value" 177 | else 178 | definition=$1 # "--definition value" 179 | fi 180 | 181 | case "$definition" in 182 | -h | --help ) 183 | usage 184 | export DEBUG=0 185 | exit 0 186 | ;; 187 | -"${arg_dict[short]}" | --"${arg_dict[name]}" ) 188 | if [[ -n $contains_equal ]]; then 189 | value=${1#*=} 190 | elif [[ -z ${arg_dict[flag]} ]]; then 191 | shift 192 | value=$1 193 | fi 194 | 195 | if [[ -z $value && -n ${arg_dict[allow_env_var]} ]]; then 196 | declare -n env_var_value=${arg_dict[name]^^} 197 | export_env_var "${arg_dict[name]}" "$env_var_value" 198 | elif [[ -z $value && -z ${arg_dict[default]} ]]; then 199 | # arg is empty and default is empty 200 | error_msg "Empty argument \"${arg_dict[name]}\"" 201 | elif [[ -z $value && -n ${arg_dict[default]} ]]; then 202 | # arg is empty and default is not empty 203 | export_env_var "${arg_dict[name]}" "${arg_dict[default]}" 204 | found=${arg_dict[name]} 205 | elif [[ -n $value ]]; then 206 | # arg is not empty 207 | if [[ -n ${arg_dict[flag]} ]]; then 208 | # it's a flag 209 | export_env_var "${arg_dict[name]}" true 210 | else 211 | # not a flag, regular argument 212 | export_env_var "${arg_dict[name]}" "$value" 213 | fi 214 | found=${arg_dict[name]} 215 | fi 216 | ;; 217 | esac 218 | i=$((i+1)) 219 | done 220 | [[ -z $found ]] && error_msg "Unknown argument \"$definition\"" 221 | shift 222 | done 223 | } 224 | 225 | 226 | export_args_validation(){ 227 | # Export variables only if passed validation test 228 | declare -A arg_dict 229 | local result 230 | local default 231 | local hidden 232 | local prompt_value 233 | local confirm_value 234 | local valid 235 | local i=0 236 | while [[ $i -lt $_NUM_OF_DICTS ]]; do 237 | eval "arg_dict=(${_LIST_ARGS_DICTS[$i]})" 238 | result=$(printenv | grep "^${arg_dict[name]}=" | cut -f2 -d "=") 239 | if [[ -z $result ]]; then 240 | default=${arg_dict[default]} 241 | if [[ -n ${arg_dict[allow_env_var]} ]]; then 242 | # set default to env var only if env var is UPPERCASED 243 | declare -n env_var_value=${arg_dict[name]^^} 244 | [[ -n $env_var_value ]] && default=$env_var_value 245 | fi 246 | 247 | if [[ -n $default ]]; then 248 | export_env_var "${arg_dict[name]}" "${default}" 249 | elif [[ -n ${arg_dict[allow_empty]} || -n ${arg_dict[flag]} ]]; then 250 | export_env_var "${arg_dict[name]}" "" 251 | elif [[ -n ${arg_dict[prompt]} ]]; then 252 | # will not prompt if default is not empty 253 | hidden= 254 | [[ -n ${arg_dict[hidden]} ]] && hidden=s 255 | prompt_value= 256 | trap 'trap - INT; kill -s HUP -- -$$' INT 257 | while :; do 258 | echo -n "${arg_dict[name]^^}: " 259 | read -re${hidden} prompt_value 260 | [[ -n $hidden ]] && echo "" 261 | if [[ -n ${arg_dict[confirmation]} ]]; then 262 | while :; do 263 | confirm_value= 264 | echo -n "${arg_dict[name]^^} Confirmation: " 265 | read -re${hidden} confirm_value 266 | [[ -n $hidden ]] && echo "" 267 | [[ $prompt_value = "$confirm_value" ]] && break 268 | done 269 | fi 270 | valid=$(check_options "${arg_dict[options]}" "${arg_dict[name]}" "$prompt_value" "${arg_dict[allow_empty]}") 271 | if [[ $valid = "true" ]]; then 272 | [[ -n ${arg_dict[hidden]} ]] && echo "" 273 | break 274 | else 275 | [[ -n ${arg_dict[options]} ]] && hint_msg "Valid options: ${arg_dict[options]}" 276 | fi 277 | done 278 | export_env_var "${arg_dict[name]}" "${prompt_value}" 279 | elif [[ -z $default && ${arg_dict[type]} != "group" ]]; then 280 | error_msg "Required argument: ${arg_dict[name]}" 281 | fi 282 | elif [[ -n $result ]]; then 283 | valid=$(check_options "${arg_dict[options]}" "${arg_dict[name]}" "$result" "${arg_dict[allow_empty]}") 284 | if [[ $valid != "true" ]]; then 285 | hint_msg "Valid options: ${arg_dict[options]// / OR }" 286 | error_msg "Invalid value \"${result}\" for the argument \"${arg_dict[name]}\"" 287 | fi 288 | : # argument is valid 289 | fi 290 | i=$((i+1)) 291 | done 292 | } 293 | 294 | ### Main 295 | read_bargs_vars 296 | args_to_list_dicts 297 | set_args_to_vars "$@" # <-- user input 298 | export_args_validation -------------------------------------------------------------------------------- /bargs_vars: -------------------------------------------------------------------------------- 1 | --- 2 | name=src_dir_relative_path 3 | short=r 4 | description=Relative path to the dir that contains tfcoding.tf 5 | --- 6 | name=single_value_output 7 | short=o 8 | description=Render a single local variable 9 | default=all 10 | --- 11 | name=src_dir_root 12 | short=s 13 | description=Source root dir in container 14 | default=/src 15 | --- 16 | name=logging 17 | short=l 18 | description=Show logging messages 19 | default=true 20 | --- 21 | name=debug 22 | short=d 23 | description=Print verbose output 24 | default=false 25 | --- 26 | name=watching 27 | short=w 28 | description=Auto-render tfcoding.tf on change 29 | flag=true 30 | default=false 31 | --- 32 | name=mock_aws 33 | short=aws 34 | description=Use this flag for communicating with Localstack 35 | flag=true 36 | default=false 37 | --- 38 | name=bargs 39 | short=bargs 40 | description=bash entrypoint.sh -r basic/exmaples --watching -o private_subnets 41 | default=irrelevant 42 | --- 43 | --- -------------------------------------------------------------------------------- /docker-compose-aws.yml: -------------------------------------------------------------------------------- 1 | version: "3.9" 2 | 3 | networks: 4 | shared: 5 | name: shared 6 | 7 | services: 8 | localstack: 9 | container_name: localstack 10 | image: localstack/localstack:${LOCALSTACK_VERSION:-latest} 11 | platform: linux/${DOCKER_PLATFORM:-amd64} 12 | ports: 13 | - "4566:4566" 14 | - "4571:4571" 15 | environment: 16 | DEBUG: ${DEBUG:-0} 17 | LS_LOG: ${LS_LOG:-error} 18 | LAMBDA_DOCKER_NETWORK: shared 19 | MAIN_DOCKER_NETWORK: shared 20 | DOCKER_HOST: unix:///var/run/docker.sock 21 | volumes: 22 | - "/var/run/docker.sock:/var/run/docker.sock" 23 | - "./localstack/healthcheck-init.sh:/etc/localstack/init/ready.d/healthcheck-init.sh" 24 | networks: 25 | - shared 26 | healthcheck: 27 | test: 28 | - CMD 29 | - bash 30 | - -c 31 | - curl -f http://localhost:4566/_localstack/init/ready | jq .completed==true || exit 1 32 | interval: 1s 33 | timeout: 5s 34 | start_period: 2s 35 | retries: 5 36 | 37 | tfcoding-aws: 38 | container_name: tfcoding-aws 39 | depends_on: 40 | localstack: 41 | condition: service_healthy 42 | restart: true 43 | restart: always 44 | platform: linux/${DOCKER_PLATFORM:-amd64} 45 | image: ${DOCKER_TAG_LATEST:-unfor19/tfcoding:latest} 46 | volumes: 47 | - ./:/src/:ro 48 | - ${HOME}/.aws:/home/appuser/.aws:ro 49 | environment: 50 | AWS_REGION: "us-east-1" 51 | AWS_DEFAULT_REGION: "us-east-1" 52 | command: 53 | - "--src_dir_relative_path" 54 | - "${SRC_DIR_RELATIVE_PATH:-examples/mock-aws}" 55 | - "--watching" 56 | - "--mock_aws" 57 | tty: true # colorful output 58 | networks: 59 | - shared 60 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "3.7" 2 | 3 | volumes: 4 | code_dir_tmp: 5 | 6 | services: 7 | tfcoding: 8 | container_name: tfcoding 9 | image: ${DOCKER_TAG_LATEST:-unfor19/tfcoding:latest} 10 | platform: ${DOCKER_PLATFORM:-linux/amd64} 11 | volumes: 12 | - ./:/src/:ro 13 | - code_dir_tmp:/tmp/ 14 | command: 15 | - "--src_dir_relative_path" 16 | - "${SRC_DIR_RELATIVE_PATH:-examples/basic}" 17 | - "--watching" 18 | restart: always 19 | tty: true # colorful output 20 | -------------------------------------------------------------------------------- /entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # bargs 4 | source /usr/local/bin/bargs.sh "$@" 5 | 6 | set -e 7 | set -o pipefail 8 | 9 | 10 | # Global variables 11 | [[ "$SINGLE_VALUE_OUTPUT" = "all" ]] && SINGLE_VALUE_OUTPUT="" 12 | _SINGLE_VALUE_OUTPUT="${SINGLE_VALUE_OUTPUT:-""}" 13 | 14 | # Dirs and paths 15 | _SRC_DIR_ROOT="${SRC_DIR_ROOT:-"/src"}" 16 | _SRC_DIR_RELATIVE_PATH="$SRC_DIR_RELATIVE_PATH" 17 | [[ -z "$_SRC_DIR_RELATIVE_PATH" ]] && error_msg "Relative path is required, create a directory that contains tfcoding.tf" 18 | _SRC_DIR_ABSOLUTE_PATH="${_SRC_DIR_ROOT}/${_SRC_DIR_RELATIVE_PATH}" 19 | _CODE_FILE_NAME="tfcoding.tf" 20 | _CODE_DIR_TMP="/tmp" 21 | _TMP_DIR_TF_FILES="${_CODE_DIR_TMP}/code" 22 | _SRC_FILE_ABSOLUTE_PATH="${_SRC_DIR_ABSOLUTE_PATH}/${_CODE_FILE_NAME}" 23 | 24 | 25 | # Terraform Plugins (providers) cache dir 26 | export TF_PLUGIN_CACHE_DIR="${_CODE_DIR_TMP}/.terraform.d/plugin-cache" 27 | [[ ! -d "$TF_PLUGIN_CACHE_DIR" ]] && mkdir -p "$TF_PLUGIN_CACHE_DIR" 28 | 29 | 30 | # Terraform Modules cache dir 31 | export TF_DATA_DIR="${_CODE_DIR_TMP}/.terraform" 32 | [[ ! -d "$_CODE_DIR_TMP" ]] && mkdir -p "$_CODE_DIR_TMP" 33 | 34 | 35 | # Flags and boolean args 36 | _LOGGING="${LOGGING:-"true"}" 37 | _DEBUG="${DEBUG:-"false"}" 38 | _WATCHING="${WATCHING:-"false"}" 39 | _MOCK_AWS="${MOCK_AWS:-"false"}" 40 | 41 | 42 | # Functions 43 | error_msg(){ 44 | local msg="$1" 45 | echo -e "[ERROR] $(date) :: $msg" 46 | exit 1 47 | } 48 | 49 | 50 | log_msg(){ 51 | local msg="$1" 52 | echo -e "[LOG] $(date) :: $msg" 53 | } 54 | 55 | 56 | # trap ctrl-c and call ctrl_c() 57 | trap ctrl_c INT 58 | ctrl_c() { 59 | log_msg "Stopped with CTRL+C" 60 | exit 0 61 | } 62 | 63 | # trap container stopped and 64 | trap container_stopped SIGINT SIGTERM SIGHUP 65 | container_stopped() { 66 | log_msg "Continaer Stopped By Orchestrator" 67 | exit 2 68 | } 69 | 70 | 71 | validation(){ 72 | if [[ -d "$_SRC_DIR_ABSOLUTE_PATH" ]]; then 73 | : 74 | else 75 | error_msg "Directory does not exist - $_SRC_DIR_ABSOLUTE_PATH" 76 | fi 77 | } 78 | 79 | 80 | copy_files(){ 81 | [[ -f "${_TMP_DIR_TF_FILES}/terraform.tfstate" ]] && mv "${_TMP_DIR_TF_FILES}/terraform.tfstate" "${_CODE_DIR_TMP}/terraform.tfstate" 82 | rm -rf "${_TMP_DIR_TF_FILES}" 83 | mkdir -p "${_TMP_DIR_TF_FILES}" 84 | find "$_SRC_DIR_ABSOLUTE_PATH" -type f \( -name '*.tf' -o -name '*.tpl' -o -name '*.json' \) \ 85 | -and \( -not -path '.git/' -not -path '.terraform/' \) -exec cp {} "$_TMP_DIR_TF_FILES" \; 86 | [[ -f "${_CODE_DIR_TMP}/terraform.tfstate" ]] && cp "${_CODE_DIR_TMP}/terraform.tfstate" "${_TMP_DIR_TF_FILES}/terraform.tfstate" 87 | wait # prevents sudden exit 88 | } 89 | 90 | 91 | terraform_init(){ 92 | # Create a local empty tfstate file 93 | cd "$_TMP_DIR_TF_FILES" 94 | if [[ "$_MOCK_AWS" = "true" ]]; then 95 | terraform init 96 | else 97 | terraform init 1>/dev/null 98 | fi 99 | } 100 | 101 | 102 | inject_outputs(){ 103 | # Inject outputs to $_CODE_FILE_NAME according to locals{} 104 | local tf_json 105 | tf_json=$(hcl2json "${_TMP_DIR_TF_FILES}/${_CODE_FILE_NAME}" | jq -r '.locals[]') 106 | declare -a arr_json=($(echo "$tf_json" | jq -r 'keys[]')) 107 | for local_value in "${arr_json[@]}"; do 108 | if [[ "$local_value" = "terraform_destroy" ]] ; then 109 | export _TERRAFORM_DESTROY="true" 110 | continue 111 | fi 112 | cat <> "${_TMP_DIR_TF_FILES}/${_CODE_FILE_NAME}" 113 | 114 | 115 | output "${local_value}" { 116 | value = local.${local_value} 117 | } 118 | 119 | 120 | EOF 121 | done 122 | 123 | } 124 | 125 | 126 | debug_mode(){ 127 | if [[ $_DEBUG = "true" ]]; then 128 | cat "$_TMP_DIR_TF_FILES/$_CODE_FILE_NAME" 129 | fi 130 | } 131 | 132 | 133 | render_tfcoding(){ 134 | # terraform apply renders the outputs 135 | cd "$_TMP_DIR_TF_FILES" 136 | terraform fmt 1>/dev/null 137 | 138 | if ! terraform validate 1>/dev/null ; then 139 | log_msg "Fix the above syntax error" 140 | return 141 | fi 142 | 143 | if [[ "$_TERRAFORM_DESTROY" = "true" ]]; then 144 | terraform destroy -auto-approve 145 | unset _TERRAFORM_DESTROY 146 | return 147 | fi 148 | 149 | if [[ "$_MOCK_AWS" = "true" ]]; then 150 | # Mock AWS 151 | if terraform plan -input=false -out=plan.tfout -compact-warnings ; then 152 | if ! terraform apply -lock=false -auto-approve -compact-warnings plan.tfout ; then 153 | log_msg "terraform apply - Fix the above error" 154 | return 155 | fi 156 | else 157 | log_msg "terraform plan - Fix the above error" 158 | return 159 | fi 160 | elif [[ "$_MOCK_AWS" != "true" ]]; then 161 | # Local Values only 162 | if ! terraform apply -lock=false -input=false -auto-approve -compact-warnings 1>/dev/null ; then 163 | log_msg "terraform apply - Fix the above error" 164 | return 165 | fi 166 | 167 | if [[ -n $_SINGLE_VALUE_OUTPUT ]]; then 168 | # Single Local Value Output 169 | local output_msg 170 | output_msg="$(terraform output -json "${_SINGLE_VALUE_OUTPUT}" 2>&1 || true)" 171 | if [[ "$output_msg" =~ .*output.*not.*found ]]; then 172 | error_msg "Local Value not defined: ${_SINGLE_VALUE_OUTPUT}" 173 | else 174 | echo "{\"${_SINGLE_VALUE_OUTPUT}\":${output_msg}}" | jq 175 | fi 176 | else 177 | # All Outputs (Local Values) 178 | terraform output -json | jq 'map_values(.value)' 179 | fi 180 | fi 181 | } 182 | 183 | 184 | # Main 185 | main(){ 186 | validation 187 | copy_files 188 | terraform_init 189 | inject_outputs 190 | debug_mode 191 | render_tfcoding 192 | [[ "$_LOGGING" = "true" && "$_WATCHING" = "true" ]] && log_msg "Watching for changes in ${_SRC_FILE_ABSOLUTE_PATH}" 193 | } 194 | 195 | 196 | [[ "$_LOGGING" = "true" ]] && log_msg "$(terraform version)" 197 | if [[ "$_WATCHING" = "true" ]]; then 198 | # Execute on file change in code file - tfcoding.tf 199 | log_msg "Rendered for the first time" 200 | main 201 | fswatch -0 -m poll_monitor --batch-marker --event-flags "${_SRC_DIR_ABSOLUTE_PATH}/${_CODE_FILE_NAME}" | while read -r -d "" event; do 202 | if [[ "$event" = "NoOp" ]]; then 203 | [[ "$_LOGGING" = "true" ]] && log_msg "Rendered" 204 | main 205 | fi 206 | done 207 | else 208 | # Run-once 209 | main 210 | fi -------------------------------------------------------------------------------- /examples/basic/tfcoding.tf: -------------------------------------------------------------------------------- 1 | variable "environment" { 2 | default = "stg" 3 | } 4 | 5 | variable "cidr_ab" { 6 | type = map(string) 7 | default = { 8 | "dev" : "10.10" 9 | "stg" : "10.11" 10 | "prd" : "10.12" 11 | } 12 | } 13 | 14 | locals { 15 | cidr_ab = lookup(var.cidr_ab, var.environment) 16 | private_subnets = [ 17 | "${local.cidr_ab}.0.0/24", 18 | "${local.cidr_ab}.1.0/24", 19 | ] 20 | } 21 | -------------------------------------------------------------------------------- /examples/complex/nodegroup.tpl: -------------------------------------------------------------------------------- 1 | ${jsonencode( 2 | { 3 | "override_instance_types": { 4 | "dev": ["t2.micro", "t3.micro"] 5 | "stg": ["m5.large", "m5a.large", "m5d.large", "m5ad.large"] 6 | "prd": ["m5.large", "m5a.large", "m5d.large", "m5ad.large"] 7 | } 8 | "asg_min_size": { 9 | "dev": 1 10 | "stg": 2 11 | "prd": 2 12 | } 13 | "asg_max_size": { 14 | "dev": 2 15 | "stg": 4 16 | "prd": 4 17 | } 18 | "asg_desired_capacity": { 19 | "dev": 1 20 | "stg": 2 21 | "prd": 2 22 | } 23 | "root_volume_size": { 24 | "dev": 30 25 | "stg": 50 26 | "prd": 50 27 | } 28 | } 29 | )} -------------------------------------------------------------------------------- /examples/complex/tfcoding.tf: -------------------------------------------------------------------------------- 1 | variable "environment" { 2 | default = "stg" 3 | } 4 | 5 | locals { 6 | node_group_template = jsondecode(templatefile("${path.module}/nodegroup.tpl", {})) 7 | node_group_template_rendered = { 8 | for ng_key in keys(local.node_group_template) : 9 | ng_key => local.node_group_template[ng_key][var.environment] 10 | } 11 | my_keys = keys(local.node_group_template_rendered) 12 | } 13 | -------------------------------------------------------------------------------- /examples/complex2/tfcoding.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | vm_configuration_template = jsondecode(templatefile("${path.module}/vmconfig.tpl", {})) 3 | vm_configuration_rendered = { 4 | for key in keys(local.vm_configuration_template) : 5 | key => local.vm_configuration_template[key] 6 | } 7 | } -------------------------------------------------------------------------------- /examples/complex2/vmconfig.tpl: -------------------------------------------------------------------------------- 1 | ${jsonencode( 2 | { 3 | "vm_resource_group": "mystring", 4 | "vm_rg_location": "mystring", 5 | "vm_as_name": "mystring", 6 | "vm_name": "mystring", 7 | "vm_size": "mystring", 8 | "vm_image_publisher": "mystring", 9 | "vm_image_offer": "mystring", 10 | "vm_image_sku": "mystring", 11 | "vm_image_version": "mystring", 12 | "vm_admin_username": "mystring", 13 | "vm_admin_password": "mystring", 14 | "vm_os_disk_caching": "mystring", 15 | "vm_storage_account_type": "mystring", 16 | "vm_tags": {}, 17 | "vm_nics": { 18 | "nic_name": "mystring", 19 | "ipconfig_name": "mystring", 20 | "subnet_block_id": "mystring" 21 | } 22 | } 23 | )} 24 | -------------------------------------------------------------------------------- /examples/mock-aws-pulumi/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "~> 5.0" 6 | } 7 | } 8 | } 9 | 10 | variable "region" { 11 | default = "us-east-1" 12 | } 13 | 14 | provider "aws" { 15 | access_key = "mock_access_key" 16 | region = "us-east-1" 17 | secret_key = "mock_secret_key" 18 | s3_use_path_style = true 19 | skip_credentials_validation = true 20 | skip_metadata_api_check = true 21 | skip_requesting_account_id = true 22 | insecure = true 23 | 24 | endpoints { 25 | apigateway = "http://localhost:4566" 26 | apigatewayv2 = "http://localhost:4566" 27 | cloudformation = "http://localhost:4566" 28 | cloudwatch = "http://localhost:4566" 29 | cloudwatchlogs = "http://localhost:4566" 30 | dynamodb = "http://localhost:4566" 31 | ec2 = "http://localhost:4566" 32 | es = "http://localhost:4566" 33 | firehose = "http://localhost:4566" 34 | iam = "http://localhost:4566" 35 | kinesis = "http://localhost:4566" 36 | lambda = "http://localhost:4566" 37 | rds = "http://localhost:4566" 38 | route53 = "http://localhost:4566" 39 | redshift = "http://localhost:4566" 40 | s3 = "http://localhost:4566" 41 | secretsmanager = "http://localhost:4566" 42 | ses = "http://localhost:4566" 43 | sns = "http://localhost:4566" 44 | sqs = "http://localhost:4566" 45 | ssm = "http://localhost:4566" 46 | stepfunctions = "http://localhost:4566" 47 | sts = "http://localhost:4566" 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /examples/mock-aws-pulumi/tfcoding.tf: -------------------------------------------------------------------------------- 1 | variable "environment" { 2 | default = "stg" 3 | } 4 | 5 | variable "cidr_ab" { 6 | type = map(string) 7 | default = { 8 | "dev" : "10.11" 9 | "stg" : "10.12" 10 | "prd" : "10.13" 11 | } 12 | } 13 | 14 | module "vpc" { 15 | source = "terraform-aws-modules/vpc/aws" 16 | version = "~>5.4.0" 17 | 18 | name = local.base_name 19 | cidr = "${local.cidr_ab}.0.0/16" 20 | 21 | azs = ["${var.region}a", "${var.region}b"] 22 | private_subnets = local.private_subnets 23 | public_subnets = local.public_subnets 24 | 25 | tags = local.tags 26 | } 27 | 28 | 29 | locals { 30 | # terraform_destroy = true 31 | base_name = "myapp" 32 | cidr_ab = lookup(var.cidr_ab, var.environment) 33 | private_subnets = [ 34 | "${local.cidr_ab}.0.0/24", 35 | "${local.cidr_ab}.1.0/24", 36 | ] 37 | public_subnets = [ 38 | "${local.cidr_ab}.30.0/24", 39 | "${local.cidr_ab}.31.0/24", 40 | ] 41 | 42 | tags = { 43 | Terraform = "true" 44 | Environment = var.environment 45 | } 46 | 47 | my_value = module.vpc.vpc_id 48 | my_subnet = module.vpc.public_subnets 49 | my_private_subnets = module.vpc.private_subnets 50 | my_sg_id = module.vpc.default_security_group_id 51 | } 52 | -------------------------------------------------------------------------------- /examples/mock-aws/provider.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | aws = { 4 | source = "hashicorp/aws" 5 | version = "~> 5.0" 6 | } 7 | } 8 | } 9 | 10 | variable "region" { 11 | default = "us-east-1" 12 | } 13 | 14 | provider "aws" { 15 | access_key = "mock_access_key" 16 | region = "us-east-1" 17 | secret_key = "mock_secret_key" 18 | s3_use_path_style = true 19 | skip_credentials_validation = true 20 | skip_metadata_api_check = true 21 | skip_requesting_account_id = true 22 | insecure = true 23 | 24 | endpoints { 25 | apigateway = "http://localstack:4566" 26 | apigatewayv2 = "http://localstack:4566" 27 | cloudformation = "http://localstack:4566" 28 | cloudwatch = "http://localstack:4566" 29 | cloudwatchlogs = "http://localstack:4566" 30 | dynamodb = "http://localstack:4566" 31 | ec2 = "http://localstack:4566" 32 | es = "http://localstack:4566" 33 | firehose = "http://localstack:4566" 34 | iam = "http://localstack:4566" 35 | kinesis = "http://localstack:4566" 36 | lambda = "http://localstack:4566" 37 | rds = "http://localstack:4566" 38 | route53 = "http://localstack:4566" 39 | redshift = "http://localstack:4566" 40 | s3 = "http://localstack:4566" 41 | secretsmanager = "http://localstack:4566" 42 | ses = "http://localstack:4566" 43 | sns = "http://localstack:4566" 44 | sqs = "http://localstack:4566" 45 | ssm = "http://localstack:4566" 46 | stepfunctions = "http://localstack:4566" 47 | sts = "http://localstack:4566" 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /examples/mock-aws/tfcoding.tf: -------------------------------------------------------------------------------- 1 | variable "environment" { 2 | default = "stg" 3 | } 4 | 5 | variable "cidr_ab" { 6 | type = map(string) 7 | default = { 8 | "dev" : "10.11" 9 | "stg" : "10.12" 10 | "prd" : "10.13" 11 | } 12 | } 13 | 14 | module "vpc" { 15 | source = "terraform-aws-modules/vpc/aws" 16 | version = "~>5.4.0" 17 | 18 | name = local.base_name 19 | cidr = "${local.cidr_ab}.0.0/16" 20 | 21 | azs = ["${var.region}a", "${var.region}b"] 22 | private_subnets = local.private_subnets 23 | public_subnets = local.public_subnets 24 | 25 | tags = local.tags 26 | } 27 | 28 | 29 | locals { 30 | # terraform_destroy = true 31 | base_name = "myapp" 32 | cidr_ab = lookup(var.cidr_ab, var.environment) 33 | private_subnets = [ 34 | "${local.cidr_ab}.0.0/24", 35 | "${local.cidr_ab}.1.0/24", 36 | ] 37 | public_subnets = [ 38 | "${local.cidr_ab}.30.0/24", 39 | "${local.cidr_ab}.31.0/24", 40 | ] 41 | 42 | tags = { 43 | Terraform = "true" 44 | Environment = var.environment 45 | } 46 | 47 | my_value = module.vpc.vpc_id 48 | my_subnet = module.vpc.public_subnets 49 | my_private_subnets = module.vpc.private_subnets 50 | my_sg_id = module.vpc.default_security_group_id 51 | } 52 | -------------------------------------------------------------------------------- /helm-app/.helmignore: -------------------------------------------------------------------------------- 1 | # Patterns to ignore when building packages. 2 | # This supports shell glob matching, relative path matching, and 3 | # negation (prefixed with !). Only one pattern per line. 4 | .DS_Store 5 | # Common VCS dirs 6 | .git/ 7 | .gitignore 8 | .bzr/ 9 | .bzrignore 10 | .hg/ 11 | .hgignore 12 | .svn/ 13 | # Common backup files 14 | *.swp 15 | *.bak 16 | *.tmp 17 | *.orig 18 | *~ 19 | # Various IDEs 20 | .project 21 | .idea/ 22 | *.tmproj 23 | .vscode/ 24 | -------------------------------------------------------------------------------- /helm-app/Chart.lock: -------------------------------------------------------------------------------- 1 | dependencies: 2 | - name: application 3 | repository: https://stakater.github.io/stakater-charts 4 | version: 2.3.2 5 | digest: sha256:0aec5e9344733891dc493468bcdcda4ffe057f16b90c5fd0335d6dc6c3ccc2b1 6 | generated: "2024-01-14T01:35:28.333802+02:00" 7 | -------------------------------------------------------------------------------- /helm-app/Chart.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v2 2 | name: helm-app 3 | description: A Helm chart for Kubernetes 4 | 5 | # A chart can be either an 'application' or a 'library' chart. 6 | # 7 | # Application charts are a collection of templates that can be packaged into versioned archives 8 | # to be deployed. 9 | # 10 | # Library charts provide useful utilities or functions for the chart developer. They're included as 11 | # a dependency of application charts to inject those utilities and functions into the rendering 12 | # pipeline. Library charts do not define any templates and therefore cannot be deployed. 13 | type: application 14 | 15 | # This is the chart version. This version number should be incremented each time you make changes 16 | # to the chart and its templates, including the app version. 17 | # Versions are expected to follow Semantic Versioning (https://semver.org/) 18 | version: 0.1.0 19 | 20 | # This is the version number of the application being deployed. This version number should be 21 | # incremented each time you make changes to the application. Versions are not expected to 22 | # follow Semantic Versioning. They should reflect the version the application is using. 23 | # It is recommended to use it with quotes. 24 | appVersion: "1.16.0" 25 | 26 | # Add Stakater chart as a dependency 27 | dependencies: 28 | - name: application 29 | version: "2.3.2" 30 | repository: "https://stakater.github.io/stakater-charts" 31 | -------------------------------------------------------------------------------- /helm-app/values.yaml: -------------------------------------------------------------------------------- 1 | # Source: https://github.com/stakater/application/blob/5088b3887088fe71eb0d57c498a87e4edadc7c09/application/values.yaml 2 | 3 | application: 4 | # -- Same as nameOverride but for the namespace. 5 | namespaceOverride: "" 6 | 7 | # -- Same as nameOverride but for the component. 8 | componentOverride: "" 9 | 10 | # -- Same as nameOverride but for the partOf. 11 | partOfOverride: "" 12 | 13 | ########################################################## 14 | # Name of the application. 15 | ########################################################## 16 | applicationName: "application" 17 | 18 | ########################################################## 19 | # Global labels 20 | # These labels will be added on all resources, 21 | # and you can add additional labels from below 22 | # on individual resource 23 | ########################################################## 24 | 25 | cronJob: 26 | enabled: false 27 | jobs: 28 | # db-migration: 29 | # schedule: "* * * 8 *" 30 | # env: 31 | # KEY: 32 | # value: VALUE 33 | # image: 34 | # repository: docker.io/nginx 35 | # tag: v1.0.0 36 | # digest: '' # if set to a non empty value, digest takes precedence on the tag 37 | # imagePullPolicy: IfNotPresent 38 | # command: ["/bin/bash"] 39 | # args: ["-c","sleep 5000"] 40 | # resources: 41 | # requests: 42 | # memory: 5Gi 43 | # cpu: 1 44 | 45 | ########################################################## 46 | # Deployment 47 | ########################################################## 48 | deployment: 49 | enabled: true 50 | # By default deploymentStrategy is set to rollingUpdate with maxSurge of 25% and maxUnavailable of 25% 51 | # You can change type to `Recreate` or can uncomment `rollingUpdate` specification and adjust them to your usage. 52 | strategy: 53 | type: RollingUpdate 54 | # rollingUpdate: 55 | # maxSurge: 25% 56 | # maxUnavailable: 25% 57 | 58 | # Reload deployment if configMap/secret updates 59 | reloadOnChange: true 60 | 61 | # Select nodes to deploy which matches the following labels 62 | nodeSelector: 63 | # cloud.google.com/gke-nodepool: default-pool 64 | 65 | # Init containers which runs before the app container 66 | hostAliases: 67 | # - ip: "127.0.0.1" 68 | # hostnames: 69 | # - "foo.local" 70 | # - "bar.local" 71 | # - ip: "10.1.2.3" 72 | # hostnames: 73 | # - "foo.remote" 74 | # - "bar.remote" 75 | 76 | # Init containers which runs before the app container 77 | initContainers: 78 | git: 79 | name: git 80 | image: unfor19/tfcoding:latest 81 | command: ["git", "clone"] 82 | args: ["https://github.com/unfor19/tfcoding.git"] 83 | workingDir: /home/appuser 84 | volumeMounts: 85 | - name: workdir-volume 86 | mountPath: /home/appuser 87 | readOnly: false 88 | prepare: 89 | image: unfor19/tfcoding:latest 90 | command: ["bash", "-c"] 91 | args: 92 | [ 93 | "git checkout feature/kubernetes && chmod +x localstack/healthcheck-init.sh", 94 | ] 95 | workingDir: /home/appuser 96 | volumeMounts: 97 | - name: workdir-volume 98 | mountPath: /home/appuser 99 | subPath: tfcoding 100 | readOnly: false 101 | 102 | # init-contaner: 103 | # image: busybox 104 | # imagePullPolicy: IfNotPresent 105 | # command: ['/bin/sh'] 106 | 107 | # Additional labels for Deployment 108 | additionalLabels: 109 | # key: value 110 | 111 | # Additional label added on pod which is used in Service's Label Selector 112 | podLabels: 113 | # env: prod 114 | 115 | # Annotations on deployments 116 | annotations: 117 | 118 | # Additional Pod Annotations added on pod created by this Deployment 119 | additionalPodAnnotations: 120 | # key: value 121 | 122 | # Annotations for fluentd Configurations 123 | fluentdConfigAnnotations: 124 | # fluentd: 125 | # regex: hello 126 | # timeFormat: world 127 | 128 | # Replicas to be created 129 | replicas: 1 130 | 131 | # Secrets used to pull image 132 | imagePullSecrets: "" 133 | 134 | # If want to mount Envs from configmap or secret 135 | envFrom: 136 | # production-cm: 137 | # type: configmap 138 | # nameSuffix: my-configmap 139 | # logging-config: 140 | # type: configmap 141 | # nameSuffix: your-configmap 142 | # postgres-config: 143 | # type: secret 144 | # nameSuffix: postgres 145 | 146 | # Environment variables to be passed to the app container 147 | env: 148 | # ENVIRONMENT: 149 | # value: "dev" 150 | # FREQUENCY: 151 | # valueFrom: 152 | # configMapKeyRef: 153 | # name: config 154 | # key: frequency 155 | 156 | # Volumes to be added to the pod 157 | volumes: 158 | workdir-volume: 159 | emptyDir: 160 | name: workdir-volume 161 | sizeLimit: 200Mi 162 | tfcoding-terraform-cache-volume: 163 | emptyDir: 164 | name: tfcoding-terraform-cache-volume 165 | sizeLimit: 100Mi 166 | tfcoding-tmp-volume: 167 | emptyDir: 168 | name: tfcoding-tmp-volume 169 | sizeLimit: 300Mi 170 | # configmap-volume: 171 | # configMap: 172 | # name: '{{ template "application.name" . }}-configmap-nameSuffix' 173 | # secret-volume: 174 | # secret: 175 | # secretName: secret-name 176 | # persistent-volume-name: 177 | # persistentVolumeClaim: 178 | # claimName: claim-name 179 | 180 | # Mount path for Volumes 181 | volumeMounts: 182 | workdir-volume: 183 | mountPath: "/home/coder/project" 184 | name: workdir-volume 185 | readOnly: false 186 | subPath: "tfcoding/examples/mock-aws-pulumi" 187 | 188 | # volume-name-2: 189 | # mountPath: path-2 190 | 191 | # Taint tolerations for nodes 192 | tolerations: 193 | # - key: "dedicated" 194 | # operator: "Equal" 195 | # value: "app" 196 | # effect: "NoSchedule" 197 | 198 | # Pod affinity and pod anti-affinity allow you to specify rules about how pods should be placed relative to other pods. 199 | affinity: 200 | # nodeAffinity: 201 | # requiredDuringSchedulingIgnoredDuringExecution: 202 | # nodeSelectorTerms: 203 | # - matchExpressions: 204 | # - key: disktype 205 | # operator: In 206 | # values: 207 | # - ssd 208 | 209 | # Topology spread constraints 210 | topologySpreadConstraints: 211 | # - maxSkew: 1 212 | # topologyKey: kubernetes.io/hostname 213 | # whenUnsatisfiable: ScheduleAnyway 214 | # labelSelector: 215 | # matchExpressions: 216 | # - key: disktype 217 | # operator: In 218 | # values: 219 | # - ssd 220 | # - maxSkew: 1 221 | # topologyKey: topology.kubernetes.io/zone 222 | # whenUnsatisfiable: ScheduleAnyway 223 | # labelSelector: 224 | # matchExpressions: 225 | # - key: disktype 226 | # operator: In 227 | # values: 228 | # - ssd 229 | 230 | # Number of ReplicaSet versions to retain 231 | revisionHistoryLimit: 3 232 | 233 | # Image of the app container 234 | name: code-server 235 | # Image of the app container 236 | image: 237 | repository: codercom/code-server 238 | tag: latest 239 | digest: "" # if set to a non empty value, digest takes precedence on the tag 240 | pullPolicy: IfNotPresent 241 | security_context: 242 | run_as_user: 1000 # appuser 243 | capabilities: 244 | drop: ["all"] 245 | 246 | dnsConfig: 247 | # options: 248 | # - name: ndots 249 | # value: '1' 250 | # Startup, Readiness and Liveness probes 251 | startupProbe: 252 | enabled: false 253 | failureThreshold: 30 254 | periodSeconds: 10 255 | # Must specify either one of the following field when enabled 256 | httpGet: {} 257 | exec: {} 258 | tcpSocket: {} 259 | 260 | readinessProbe: 261 | enabled: true 262 | failureThreshold: 3 263 | periodSeconds: 10 264 | successThreshold: 1 265 | timeoutSeconds: 1 266 | initialDelaySeconds: 10 267 | # Must specify either one of the following field when enabled 268 | httpGet: {} 269 | exec: 270 | command: ["curl", "-sL", "http://localhost:8080/healthz"] 271 | tcpSocket: {} 272 | 273 | livenessProbe: 274 | enabled: false 275 | failureThreshold: 3 276 | periodSeconds: 10 277 | successThreshold: 1 278 | timeoutSeconds: 1 279 | initialDelaySeconds: 10 280 | # Must specify either one of the following field when enabled 281 | httpGet: {} 282 | exec: {} 283 | tcpSocket: {} 284 | 285 | # Resources to be defined for pod 286 | resources: 287 | limits: 288 | memory: 256Mi 289 | cpu: 0.5 290 | requests: 291 | memory: 128Mi 292 | cpu: 0.1 293 | 294 | # Security Context at Container Level 295 | containerSecurityContext: 296 | readOnlyRootFilesystem: false 297 | runAsNonRoot: false 298 | runAsUser: 1000 299 | fsGroup: 1000 300 | 301 | openshiftOAuthProxy: 302 | enabled: false 303 | port: 8080 # Port on which application is running inside container 304 | secretName: "openshift-oauth-proxy-tls" 305 | image: openshift/oauth-proxy:latest # If you have a custom container for oauth-proxy that can be updated here 306 | disableTLSArg: false # If disabled --http-address=:8081 will be used instead of --https-address=:8443 , to be used when an ingress is used for application 307 | # Add additional containers besides init and app containers 308 | additionalContainers: 309 | - name: localstack 310 | image: localstack/localstack:latest 311 | env: 312 | - name: LS_LOG 313 | value: error 314 | ports: 315 | - containerPort: 4566 316 | - containerPort: 4571 317 | readinessProbe: 318 | exec: 319 | command: 320 | - /bin/bash 321 | - -c 322 | - curl -s http://localhost:4566/_localstack/init/ready | jq -r .completed==true || exit 1 323 | initialDelaySeconds: 5 324 | periodSeconds: 5 325 | volumeMounts: 326 | - mountPath: /etc/localstack/init/ready.d/healthcheck-init.sh 327 | name: workdir-volume 328 | readOnly: false 329 | subPath: tfcoding/localstack/healthcheck-init.sh 330 | - name: tfcoding 331 | image: unfor19/tfcoding:latest 332 | pullPolicy: Always 333 | volumeMounts: 334 | - name: workdir-volume 335 | mountPath: /src 336 | subPath: tfcoding 337 | readOnly: false 338 | - name: tfcoding-terraform-cache-volume 339 | mountPath: /home/appuser/.terraform.d/plugin-cache 340 | readOnly: false 341 | - name: tfcoding-tmp-volume 342 | mountPath: /tmp/.terraform 343 | readOnly: false 344 | args: 345 | - "--src_dir_relative_path" 346 | - "examples/mock-aws-pulumi" 347 | - "--watching" 348 | - "--mock_aws" 349 | 350 | # Security Context for the pod 351 | securityContext: 352 | # fsGroup: 1000 353 | # runAsUser: 1000 354 | # readOnlyRootFilesystem: true 355 | # runAsNonRoot: false 356 | 357 | # Command for primary container 358 | command: [] 359 | 360 | # Args for primary contaner 361 | args: 362 | - "--app-name" 363 | - "tfcoding-playground" 364 | - "--auth" 365 | - "none" 366 | 367 | # List of ports for the primary container 368 | ports: 369 | - containerPort: 8080 370 | name: http 371 | protocol: TCP 372 | #- containerPort: 8778 373 | # name: jolokia 374 | # protocol: TCP 375 | #- containerPort: 8443 376 | # name: https 377 | # protocol: TCP 378 | 379 | # Networking using the host network 380 | hostNetwork: 381 | 382 | # Graceful termination timeout 383 | terminationGracePeriodSeconds: 384 | 385 | ########################################################## 386 | # Add Storage volumes to the pods 387 | ########################################################## 388 | persistence: 389 | enabled: false 390 | mountPVC: false 391 | mountPath: "/" 392 | name: "" 393 | accessMode: ReadWriteOnce 394 | ## If defined, storageClass: 395 | ## If set to "-", storageClass: "", which disables dynamic provisioning 396 | ## If undefined (the default) or set to null, no storageClass spec is 397 | ## set, choosing the default provisioner. (gp2 on AWS, standard on 398 | ## GKE, AWS & OpenStack) 399 | ## 400 | storageClass: "-" 401 | additionalLabels: 402 | # key: "value" 403 | annotations: 404 | # "helm.sh/resource-policy": keep 405 | storageSize: 8Gi 406 | volumeMode: "" 407 | volumeName: "" 408 | 409 | ########################################################## 410 | # Service object for servicing pods 411 | ########################################################## 412 | service: 413 | enabled: true 414 | additionalLabels: 415 | # expose: "true" 416 | 417 | annotations: 418 | # config.xposer.stakater.com/Domain: stakater.com 419 | # config.xposer.stakater.com/IngressNameTemplate: '{{ "{{.Service}}-{{.Namespace}}" }}' 420 | # config.xposer.stakater.com/IngressURLPath: / 421 | # config.xposer.stakater.com/IngressURLTemplate: '{{ "{{.Service}}.{{.Namespace}}.{{.Domain}}" }}' 422 | # service.alpha.openshift.io/serving-cert-secret-name: | 423 | # '{{ template "application.name" . }}-tls' 424 | # xposer.stakater.com/annotations: |- 425 | # kubernetes.io/ingress.class: external-ingress 426 | # ingress.kubernetes.io/rewrite-target: / 427 | # ingress.kubernetes.io/force-ssl-redirect: true 428 | 429 | ports: 430 | - port: 8080 431 | name: http 432 | protocol: TCP 433 | targetPort: 8080 434 | type: ClusterIP 435 | 436 | # Set to 'None' will make this service headless 437 | clusterIP: 438 | 439 | ########################################################## 440 | # Ingress object for exposing services 441 | ########################################################## 442 | ingress: 443 | enabled: false 444 | 445 | # Name of the ingress class 446 | ingressClassName: "" 447 | 448 | # Port of the service that serves pods 449 | servicePort: http 450 | 451 | #Set pathType: default is ImplementationSpecific; Options: Exact, Prefix 452 | pathType: ImplementationSpecific 453 | 454 | # List of host addresses to be exposed by this Ingress 455 | hosts: 456 | - host: chart-example.local 457 | paths: 458 | - path: / 459 | # pathType: '' 460 | # serviceName: '' 461 | # servicePort: '' 462 | # Additional labels for this Ingress 463 | additionalLabels: 464 | 465 | # Add annotations to this Ingress 466 | annotations: 467 | # kubernetes.io/ingress.class: external-ingress 468 | # ingress.kubernetes.io/rewrite-target: / 469 | # ingress.kubernetes.io/force-ssl-redirect: true 470 | 471 | # TLS details for this Ingress 472 | tls: 473 | # Secrets must be manually created in the namespace. 474 | # - secretName: chart-example-tls 475 | # hosts: 476 | # - chart-example.local 477 | 478 | ########################################################## 479 | # Route object for exposing services (OpenShift) 480 | ########################################################## 481 | route: 482 | enabled: false 483 | 484 | # Add annotations to this Route 485 | annotations: 486 | # kubernetes.io/ingress.class: external-ingress 487 | # ingress.kubernetes.io/rewrite-target: / 488 | # ingress.kubernetes.io/force-ssl-redirect: true 489 | 490 | # Additional labels for this Route 491 | additionalLabels: 492 | 493 | # If no host is added then openshift inserts the default hostname. To Add host explicitly, use host attribute 494 | host: 495 | 496 | path: 497 | # Port of the service that serves pods 498 | port: 499 | targetPort: http 500 | 501 | to: 502 | weight: 100 503 | 504 | wildcardPolicy: None 505 | 506 | tls: 507 | # TLS Termination strategy 508 | termination: edge 509 | insecureEdgeTerminationPolicy: Redirect 510 | 511 | alternateBackends: 512 | # kind: Service 513 | # name: alternate-application 514 | # weight: 20 515 | 516 | ########################################################## 517 | # SecretProviderClass 518 | ########################################################## 519 | secretProviderClass: 520 | enabled: false 521 | name: "" 522 | # name: example 523 | provider: "" 524 | # provider: vault 525 | vaultAddress: "" 526 | # vaultAddress: http://vault:8200 527 | roleName: "" 528 | # roleName: example-role 529 | objects: 530 | #- objectName: MONGO_HOST 531 | # secretPath: testing/data/mongoDb 532 | # secretKey: MONGO_HOST 533 | secretObjects: 534 | #- data: 535 | # - key: MONGO_HOST 536 | # objectName: host 537 | # secretName: secret-mongo-host 538 | # type: Opaque 539 | 540 | ########################################################## 541 | # Expose Application on Forecastle Dashboard 542 | # https://github.com/stakater/Forecastle 543 | ########################################################## 544 | forecastle: 545 | enabled: false 546 | 547 | # Add additional labels on Forecastle Custom Resource 548 | additionalLabels: 549 | 550 | # URL of the icon for the custom app 551 | icon: https://raw.githubusercontent.com/stakater/ForecastleIcons/master/stakater-big.png 552 | 553 | # Name of the application to be displayed on the Forecastle Dashboard 554 | displayName: "application" 555 | 556 | # Group for the custom app (default: .Release.Namespace) 557 | group: "" 558 | 559 | # Add properties to Custom Resource 560 | properties: 561 | 562 | # Whether app is network restricted or not 563 | networkRestricted: false 564 | 565 | ########################################################## 566 | # Role Based Access Control (RBAC) 567 | ########################################################## 568 | rbac: 569 | enabled: true 570 | 571 | # Service Account to use by pods 572 | serviceAccount: 573 | enabled: false 574 | name: "" 575 | 576 | # Additional Labels on service account 577 | additionalLabels: 578 | # key: value 579 | 580 | # Annotations on service account 581 | annotations: 582 | # key: value 583 | 584 | # Create Roles (Namespaced) 585 | roles: 586 | # - name: configmaps 587 | # rules: 588 | # - apiGroups: 589 | # - "" 590 | # resources: 591 | # - configmaps 592 | # verbs: 593 | # - get 594 | # - name: secrets 595 | # rules: 596 | # - apiGroups: 597 | # - "" 598 | # resources: 599 | # - secrets 600 | # verbs: 601 | # - get 602 | 603 | ########################################################## 604 | # Additional ConfigMaps 605 | ########################################################## 606 | configMap: 607 | enabled: false 608 | additionalLabels: 609 | # key: value 610 | annotations: 611 | # key: value 612 | files: 613 | # nameSuffix of configMap 614 | # code-config: 615 | # key1: value1 616 | # key2: value2 617 | # dev-config: 618 | # key1: value1 619 | # key2: value2 620 | 621 | ########################################################## 622 | # SealedSecrets 623 | ########################################################## 624 | sealedSecret: 625 | enabled: false 626 | additionalLabels: 627 | #key: value 628 | annotations: 629 | #key: value 630 | files: 631 | # #nameSuffix of sealedSecret 632 | # example: 633 | # encryptedData: 634 | # name: AgBghrdepGMKmp/rdtJrkBv/CWpJbtmoMsbKQ7QiZZ2kUoLeeTbrDnhmJY03kWKkNW4kN/sQRf6r1vvBEaR4nkHt5f/ayAeaH3NveI3bdb0xv/svvWjyjehwqwr/kNEAVWxRoUij0Y7MyIEAr4hnV2UnrhgvcjPJLNA8bK6spA+kuT328Vpyceyvnm6yArNn1aYlEckaFHrnculHWRpG73iRYxS5GWAY7EdkLXx7OLLWoopHtLcupklYyPfraJzPvBNZ5/PsyjlUBvoQbGV3cZlrdEj1WHj2S1RQ13ddf2WGtMHmY83t9B3LFZAZuA7BBt4rjludbwQm3/tJ5Kas1dDsSIRIIF7MTeum9YfRB8XUz8IxVKQ/JDskeynrWe3VzN/3HFVnv9GGFy+BCVXZKVU/roIRancz+nXkyoOHS722ZpBi53dfLItoS5dG+0EzArMTQzK/KXHz3b1rxp5oWWDNt3WggTiSg2zwy5ZR8VV2ToTDof6UrFmbCZv/kKriyxbVSxIo3KFnvuRiUZ5MwC0TNut4mW3LKyJfHqkUuLa1mYV6tKF58qBnoj/+JaibAIBEudT9hms5U52p7/jKmgHuop7XPEsz4OVwER//Vbv7X6ctoXtyPu6mZyOfOyJHM8Qj/H7/gwMBYhZHQ96DWrVmZOsWSRpZGJni4Xm7rgt2cFj6UtWv6lvl8aOi/HSZVC3TwWZ9mQrk 635 | # annotations: 636 | # key: value 637 | # labels: 638 | # key: value 639 | # clusterWide: true 640 | # example2: 641 | # encryptedData: 642 | # name: AgBghrdepGMKmp/rdtJrkBv/CWpJbtmoMsbKQ7QiZZ2kUoLeeTbrDnhmJY03kWKkNW4kN/sQRf6r1vvBEaR4nkHt5f/ayAeaH3NveI3bdb0xv/svvWjyjehwqwr/kNEAVWxRoUij0Y7MyIEAr4hnV2UnrhgvcjPJLNA8bK6spA+kuT328Vpyceyvnm6yArNn1aYlEckaFHrnculHWRpG73iRYxS5GWAY7EdkLXx7OLLWoopHtLcupklYyPfraJzPvBNZ5/PsyjlUBvoQbGV3cZlrdEj1WHj2S1RQ13ddf2WGtMHmY83t9B3LFZAZuA7BBt4rjludbwQm3/tJ5Kas1dDsSIRIIF7MTeum9YfRB8XUz8IxVKQ/JDskeynrWe3VzN/3HFVnv9GGFy+BCVXZKVU/roIRancz+nXkyoOHS722ZpBi53dfLItoS5dG+0EzArMTQzK/KXHz3b1rxp5oWWDNt3WggTiSg2zwy5ZR8VV2ToTDof6UrFmbCZv/kKriyxbVSxIo3KFnvuRiUZ5MwC0TNut4mW3LKyJfHqkUuLa1mYV6tKF58qBnoj/+JaibAIBEudT9hms5U52p7/jKmgHuop7XPEsz4OVwER//Vbv7X6ctoXtyPu6mZyOfOyJHM8Qj/H7/gwMBYhZHQ96DWrVmZOsWSRpZGJni4Xm7rgt2cFj6UtWv6lvl8aOi/HSZVC3TwWZ9mQrk 643 | 644 | ########################################################## 645 | # Additional Secrets 646 | ########################################################## 647 | secret: 648 | enabled: false 649 | additionalLabels: 650 | # key: value 651 | annotations: 652 | # key: value 653 | files: 654 | # nameSuffix of Secret 655 | # credentials: 656 | # data: 657 | # secretKey1: secretValue1 658 | # secretKey2: secretValue2 659 | # password: 660 | # data: 661 | # secretKey1: secretValue1 662 | # secretKey2: secretValue2 663 | # apiKey: 664 | # stringData: 665 | # secretKey1: secretValue1 666 | # secretKey2: secretValue2 667 | # secondApiKeu: 668 | # encodedData: 669 | # secretKey1: dGVzdFZhbHVl 670 | # secretKey2: dGVzdFZhbHVl 671 | 672 | ########################################################## 673 | # Service Monitor to collect Prometheus metrices 674 | ########################################################## 675 | serviceMonitor: 676 | enabled: false 677 | 678 | # Additional labels 679 | additionalLabels: 680 | # key: value 681 | 682 | # Additional annotations 683 | annotations: 684 | # key: value 685 | 686 | # List of the endpoints of service from which prometheus will scrape data 687 | endpoints: 688 | - interval: 5s 689 | path: /actuator/prometheus 690 | port: http 691 | 692 | ########################################################## 693 | # HPA - Horizontal Pod Autoscaling 694 | ########################################################## 695 | autoscaling: 696 | # enabled is a boolean flag for enabling or disabling autoscaling 697 | enabled: false 698 | # additionalLabels defines additional labels 699 | additionalLabels: 700 | # key: value 701 | # annotations defines annotations in key value pair 702 | annotations: 703 | # key: value 704 | # minReplicas sets the minimum number of replicas 705 | minReplicas: 1 706 | # maxReplicas sets the maximum number of replicas 707 | maxReplicas: 10 708 | # metrics is the list of metrics used for hpa 709 | metrics: 710 | - type: Resource 711 | resource: 712 | name: cpu 713 | target: 714 | type: Utilization 715 | averageUtilization: 60 716 | - type: Resource 717 | resource: 718 | name: memory 719 | target: 720 | type: Utilization 721 | averageUtilization: 60 722 | 723 | ########################################################## 724 | # VPA - Vertical Pod Autoscaling 725 | ########################################################## 726 | vpa: 727 | # enabled is a boolean flag for enabling or disabling vpa 728 | enabled: false 729 | # additionalLabels defines additional labels 730 | additionalLabels: 731 | # key: value 732 | # annotations defines annotations in key value pair 733 | annotations: 734 | # key: value 735 | # container policies for individual containers. 736 | containerPolicies: [] 737 | updatePolicy: 738 | updateMode: Auto 739 | 740 | ########################################################## 741 | # EndpointMonitor for IMC 742 | # https://github.com/stakater/IngressMonitorController 743 | ########################################################## 744 | endpointMonitor: 745 | enabled: false 746 | 747 | # Additional labels 748 | additionalLabels: 749 | # key: value 750 | 751 | # Additional annotations 752 | annotations: 753 | # key: value 754 | 755 | ########################################################## 756 | # Certficate CRD to generate the certificate 757 | ########################################################## 758 | certificate: 759 | enabled: false 760 | 761 | # Additional labels 762 | additionalLabels: 763 | # key: value 764 | 765 | # Additional annotations 766 | annotations: 767 | # key: value 768 | 769 | secretName: tls-cert 770 | duration: 8760h0m0s # 1 year 771 | renewBefore: 720h0m0s # 30d 772 | subject: 773 | # organizations: 774 | # - stakater 775 | # countries: 776 | # - SE 777 | # organizationalUnits: 778 | # - '{{ template "application.namespace" . }}' 779 | # localities: 780 | # - Stockholm 781 | # provinces: 782 | # - Stockholm 783 | commonName: admin-app 784 | keyAlgorithm: rsa 785 | keyEncoding: pkcs1 786 | keySize: 2048 787 | isCA: false 788 | usages: 789 | # - digital signature 790 | # - client auth 791 | dnsNames: 792 | # - admin-app 793 | ipAddresses: 794 | # - 192.168.0.5 795 | uriSANs: 796 | # - spiffe://cluster.local/ns/sandbox/sa/example 797 | emailSANs: 798 | # - emailSubjectAltNames 799 | privateKey: 800 | enabled: false 801 | rotationPolicy: Always 802 | issuerRef: 803 | name: ca-issuer 804 | # We can reference ClusterIssuers by changing the kind here. 805 | kind: ClusterIssuer 806 | group: #cert-manager.io 807 | keystores: 808 | enabled: false 809 | pkcs12: 810 | create: true 811 | key: test_key 812 | name: test-creds 813 | jks: 814 | create: false 815 | key: test_key 816 | name: test-creds 817 | 818 | ########################################################## 819 | # AlertmanagerConfig object for defining application 820 | # specific alertmanager configurations 821 | ########################################################## 822 | alertmanagerConfig: 823 | enabled: false 824 | 825 | # AlertmanagerConfig selectionLabels to specify label to be picked up by Alertmanager to add it to base config. Read more about it at [https://docs.openshift.com/container-platform/4.7/rest_api/monitoring_apis/alertmanager-monitoring-coreos-com-v1.html] under .spec.alertmanagerConfigSelector 826 | selectionLabels: 827 | alertmanagerConfig: "workload" 828 | 829 | # AlertmanagerConfig spec, read details here [https://docs.openshift.com/container-platform/4.7/rest_api/monitoring_apis/alertmanagerconfig-monitoring-coreos-com-v1alpha1.html] 830 | spec: 831 | route: 832 | # receiver: "null" 833 | # groupBy: 834 | # - job 835 | # routes: 836 | # - receiver: "null" 837 | # groupBy: 838 | # - alertname 839 | # - severity 840 | # continue: true 841 | # groupWait: 30s 842 | # groupInterval: 5m 843 | # repeatInterval: 12h 844 | receivers: [] 845 | # - name: "null" 846 | inhibitRules: [] 847 | # - sourceMatch: 848 | # severity: 'critical' 849 | # targetMatch: 850 | # severity: 'warning' 851 | # equal: ['cluster', 'service'] 852 | 853 | ########################################################## 854 | # PrometheusRule object for defining application 855 | # alerting rules 856 | ########################################################## 857 | prometheusRule: 858 | enabled: false 859 | 860 | # PrometheusRule labels 861 | additionalLabels: 862 | # prometheus: stakater-workload-monitoring 863 | # role: alert-rules 864 | 865 | # Groups with alerting rules. Read more here [https://docs.openshift.com/container-platform/4.7/rest_api/monitoring_apis/prometheusrule-monitoring-coreos-com-v1.html] 866 | 867 | groups: 868 | [] 869 | # - name: example-app-uptime 870 | # rules: 871 | # - alert: ExampleAppDown 872 | # annotations: 873 | # message: >- 874 | # The Example App is Down (Test Alert) 875 | # expr: up{namespace="test-app"} == 0 876 | # for: 1m 877 | # labels: 878 | # severity: critical 879 | ########################################################## 880 | # External Secrets 881 | ########################################################## 882 | externalSecret: 883 | enabled: false 884 | 885 | # Default SecretStore for all externalsecrets defines which SecretStore to use when fetching the secret data 886 | secretStore: 887 | name: tenant-vault-secret-store 888 | #kind: ClusterSecretStore # Defaults to SecretStore if not specified 889 | 890 | # RefreshInterval is the amount of time before the values reading again from the SecretStore provider 891 | refreshInterval: "1m" 892 | files: 893 | # mongodb: 894 | # # Data defines the connection between the Kubernetes Secret keys and the Provider data 895 | # data: 896 | # mongo-password: 897 | # remoteRef: 898 | # key: monodb 899 | # property: passowrd 900 | # secretStore: 901 | # name: secret-store-name-2 # specify if value is other than default secretstore 902 | # labels: 903 | # stakater.com/app: mongodb 904 | # # 905 | # postgres: 906 | ## Used to fetch all properties from the Provider key 907 | # dataFrom: 908 | # key: postgres 909 | 910 | ########################################################## 911 | # Network Policy 912 | ########################################################## 913 | networkPolicy: 914 | enabled: false 915 | additionalLabels: 916 | # key: value 917 | annotations: 918 | # key: value 919 | ingress: 920 | # - from: 921 | # - ipBlock: 922 | # cidr: 172.17.0.0/16 923 | # except: 924 | # - 172.17.1.0/24 925 | # - namespaceSelector: 926 | # matchLabels: 927 | # project: myproject 928 | # - podSelector: 929 | # matchLabels: 930 | # role: frontend 931 | # ports: 932 | # - protocol: TCP 933 | # port: 6379 934 | egress: 935 | # - to: 936 | # - ipBlock: 937 | # cidr: 10.0.0.0/24 938 | # ports: 939 | # - protocol: TCP 940 | # port: 5978 941 | 942 | ########################################################## 943 | # Pod disruption budget - PDB 944 | ########################################################## 945 | pdb: 946 | enabled: false 947 | minAvailable: 1 948 | # maxUnavailable: 1 949 | 950 | ########################################################## 951 | # grafanaDashboard object for defining application 952 | # Grafana Dashboard 953 | ########################################################## 954 | grafanaDashboard: 955 | enabled: false 956 | 957 | # GrafanaDashboard additonal labels 958 | additionalLabels: 959 | # grafanaDashboard: grafana-operator 960 | 961 | # GrafanaDashboard annotations 962 | annotations: 963 | # key: value 964 | 965 | # GrafanaDashboard contents 966 | # this includes pairs of dashboard name and associated json content 967 | # Accoroding to GrafanaDashboard behavior, if both url and json are specified then the GrafanaDashboard content will be updated with fetched content from url 968 | contents: 969 | # dashboard-name-1: 970 | # json: |- 971 | # { 972 | # "data" 973 | # } 974 | # url: http://hostname/path/to/file.json 975 | # dashboard-name-2: 976 | # json: |- 977 | # { 978 | # "data" 979 | # } 980 | # url: http://hostname/path/to/file.json 981 | -------------------------------------------------------------------------------- /kubernetes/argocd-repositories.yml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Secret 3 | metadata: 4 | name: tfcoding-helm-repo-secret 5 | namespace: argocd 6 | labels: 7 | argocd.argoproj.io/secret-type: repository 8 | type: Opaque 9 | stringData: 10 | url: https://github.com/unfor19/tfcoding.git 11 | name: helm-app 12 | type: helm 13 | path: helm-app 14 | -------------------------------------------------------------------------------- /localstack/healthcheck-init.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -e 3 | if jq --version 2>/dev/null; then 4 | : 5 | else 6 | curl -sL -o /usr/bin/jq https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 7 | chmod +x /usr/bin/jq 8 | fi 9 | export AWS_REGION="eu-west-1" 10 | export AWS_DEFAULT_REGION="$AWS_REGION" 11 | 12 | # Tests for init 13 | awslocal s3 mb s3://test-bucket 1>/dev/null 2>/dev/null || true 14 | awslocal s3 ls test-bucket 1>/dev/null 2>/dev/null 15 | 16 | # Completed init :) 17 | echo "Localstack is Ready!" 18 | -------------------------------------------------------------------------------- /pulumi/.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | venv/ -------------------------------------------------------------------------------- /pulumi/Pulumi.dev.yaml: -------------------------------------------------------------------------------- 1 | config: 2 | tfcoding:namespace: apps 3 | tfcoding:replicas: "1" 4 | -------------------------------------------------------------------------------- /pulumi/Pulumi.yaml: -------------------------------------------------------------------------------- 1 | name: tfcoding 2 | runtime: 3 | name: python 4 | options: 5 | virtualenv: venv 6 | description: A Python program to deploy a web application onto a Kubernetes cluster 7 | -------------------------------------------------------------------------------- /pulumi/README.md: -------------------------------------------------------------------------------- 1 | ## Requirements 2 | 3 | ```bash 4 | brew install minikube pulumi/tap/pulumi 5 | ``` 6 | 7 | ## Getting Started 8 | 9 | ```bash 10 | minikube start --driver=docker --kubernetes-version=v1.28.5 11 | ``` 12 | 13 | ```bash 14 | pulumi login 15 | ``` 16 | 17 | ```bash 18 | pulumi up 19 | ``` 20 | 21 | ```bash 22 | pulumi refresh 23 | ``` 24 | 25 | ```bash 26 | pulumi destroy 27 | ``` 28 | 29 | Forces cancel deployment - Resolves 409 errors 30 | 31 | ```bash 32 | pulumi cancel -y 33 | ``` 34 | -------------------------------------------------------------------------------- /pulumi/__main__.py: -------------------------------------------------------------------------------- 1 | import pulumi 2 | import pulumi_kubernetes as kubernetes 3 | 4 | from containers import Containers, InitContainers 5 | import constants 6 | 7 | # Get some values from the Pulumi stack configuration, or use defaults 8 | config = pulumi.Config() 9 | k8sNamespace = config.get("namespace", "apps") 10 | numReplicas = config.get_float("replicas", 1) 11 | app_labels = { 12 | "app": "tfcoding-playground", 13 | } 14 | 15 | # Create a namespace 16 | webserverns = kubernetes.core.v1.Namespace( 17 | "apps", 18 | metadata=kubernetes.meta.v1.ObjectMetaArgs( 19 | name=k8sNamespace, 20 | ) 21 | ) 22 | 23 | 24 | tfcoding_playground_deployment = kubernetes.apps.v1.Deployment( 25 | "tfcoding-playground", 26 | metadata=kubernetes.meta.v1.ObjectMetaArgs( 27 | namespace=webserverns.metadata.name, 28 | ), 29 | spec=kubernetes.apps.v1.DeploymentSpecArgs( 30 | selector=kubernetes.meta.v1.LabelSelectorArgs( 31 | match_labels=app_labels, 32 | ), 33 | replicas=numReplicas, 34 | template=kubernetes.core.v1.PodTemplateSpecArgs( 35 | metadata=kubernetes.meta.v1.ObjectMetaArgs( 36 | labels=app_labels, 37 | ), 38 | spec=kubernetes.core.v1.PodSpecArgs( 39 | init_containers=[ 40 | InitContainers["git"], 41 | InitContainers["prepare"] 42 | ], 43 | containers=[ 44 | Containers["tfcoding"], 45 | Containers["localstack"], 46 | Containers["code-server"], 47 | ], 48 | volumes=[ 49 | kubernetes.core.v1.VolumeArgs( 50 | name=constants.WORKDIR_VOLUME_NAME, 51 | empty_dir=kubernetes.core.v1.EmptyDirVolumeSourceArgs(), 52 | ), 53 | ], 54 | ), 55 | ), 56 | ) 57 | ) 58 | 59 | 60 | codeServerService = kubernetes.core.v1.Service( 61 | "code-server-service", 62 | metadata=kubernetes.meta.v1.ObjectMetaArgs( 63 | namespace=webserverns.metadata.name, 64 | ), 65 | spec=kubernetes.core.v1.ServiceSpecArgs( 66 | type="LoadBalancer", 67 | ports=[ 68 | kubernetes.core.v1.ServicePortArgs( 69 | port=8080, 70 | target_port=8080 71 | ) 72 | ], 73 | selector=app_labels 74 | ) 75 | ) 76 | 77 | pulumi.export("namespace", webserverns.metadata.name) 78 | pulumi.export("code-server-service", codeServerService.metadata.name) 79 | pulumi.export("deploymentName", tfcoding_playground_deployment.metadata.name) 80 | -------------------------------------------------------------------------------- /pulumi/constants.py: -------------------------------------------------------------------------------- 1 | 2 | 3 | ROOT_DIR = "/home/appuser" 4 | WORKDIR_PATH = f"{ROOT_DIR}/tfcoding" 5 | WORKDIR_VOLUME_NAME = "workdir-volume" 6 | -------------------------------------------------------------------------------- /pulumi/containers.py: -------------------------------------------------------------------------------- 1 | import pulumi_kubernetes as kubernetes 2 | import constants 3 | 4 | 5 | InitContainers = { 6 | "git": kubernetes.core.v1.ContainerArgs( 7 | working_dir=constants.ROOT_DIR, 8 | image="unfor19/tfcoding:latest", 9 | name="git", 10 | command=["git", "clone"], 11 | args=["https://github.com/unfor19/tfcoding.git"], 12 | volume_mounts=[ 13 | kubernetes.core.v1.VolumeMountArgs( 14 | name=constants.WORKDIR_VOLUME_NAME, 15 | mount_path=constants.ROOT_DIR, 16 | read_only=False, 17 | ) 18 | ], 19 | ), 20 | "prepare": kubernetes.core.v1.ContainerArgs( 21 | image="unfor19/tfcoding:latest", 22 | working_dir=constants.WORKDIR_PATH, 23 | name="prepare", 24 | command=["bash", "-c"], 25 | args=[ 26 | "git checkout feature/kubernetes && \ 27 | chmod +x localstack/healthcheck-init.sh" 28 | ], 29 | volume_mounts=[ 30 | kubernetes.core.v1.VolumeMountArgs( 31 | name=constants.WORKDIR_VOLUME_NAME, 32 | mount_path=constants.WORKDIR_PATH, 33 | read_only=False, 34 | sub_path="tfcoding" 35 | ) 36 | ], 37 | restart_policy="Always", 38 | ) 39 | } 40 | 41 | Containers = { 42 | "localstack": kubernetes.core.v1.ContainerArgs( 43 | image="localstack/localstack:latest", 44 | name="localstack", 45 | env=[kubernetes.core.v1.EnvVarArgs( 46 | name="LS_LOG", 47 | value="error" 48 | )], 49 | ports=[ 50 | kubernetes.core.v1.ContainerPortArgs( 51 | container_port=4566, 52 | ), 53 | kubernetes.core.v1.ContainerPortArgs( 54 | container_port=4571, 55 | ) 56 | ], 57 | readiness_probe=kubernetes.core.v1.ProbeArgs( 58 | exec_=kubernetes.core.v1.ExecActionArgs( 59 | command=[ 60 | "/bin/bash", "-c", "curl -s http://localhost:4566/_localstack/init/ready | jq -r .completed==true || exit 1" 61 | ], 62 | ), 63 | initial_delay_seconds=5, 64 | period_seconds=5, 65 | ), 66 | volume_mounts=[ 67 | kubernetes.core.v1.VolumeMountArgs( 68 | mount_path="/etc/localstack/init/ready.d/healthcheck-init.sh", 69 | name=constants.WORKDIR_VOLUME_NAME, 70 | read_only=False, 71 | sub_path=f"tfcoding/localstack/healthcheck-init.sh", 72 | ) 73 | ], 74 | ), 75 | "tfcoding": kubernetes.core.v1.ContainerArgs( 76 | image="unfor19/tfcoding:latest", 77 | security_context=kubernetes.core.v1.SecurityContextArgs( 78 | run_as_user=1000, # appuser 79 | capabilities=kubernetes.core.v1.CapabilitiesArgs( 80 | drop=["all"] 81 | ) 82 | ), 83 | name="tfcoding", 84 | env=[ 85 | kubernetes.core.v1.EnvVarArgs( 86 | name="AWS_REGION", 87 | value="us-east-1" 88 | ), 89 | kubernetes.core.v1.EnvVarArgs( 90 | name="AWS_DEFAULT_REGION", 91 | value="us-east-1" 92 | ) 93 | ], 94 | volume_mounts=[ 95 | kubernetes.core.v1.VolumeMountArgs( 96 | mount_path="/src", 97 | name=constants.WORKDIR_VOLUME_NAME, 98 | read_only=False, 99 | sub_path="tfcoding", 100 | ) 101 | ], 102 | args=[ 103 | "--src_dir_relative_path", 104 | "examples/mock-aws-pulumi", 105 | "--watching", 106 | "--mock_aws"] 107 | ), 108 | "code-server": kubernetes.core.v1.ContainerArgs( 109 | name="code-server", 110 | working_dir="/home/coder/project", 111 | image="codercom/code-server:latest", 112 | security_context=kubernetes.core.v1.SecurityContextArgs( 113 | run_as_user=1000, # appuser 114 | capabilities=kubernetes.core.v1.CapabilitiesArgs( 115 | drop=["all"] 116 | ) 117 | ), 118 | ports=[ 119 | kubernetes.core.v1.ContainerPortArgs( 120 | container_port=8080, 121 | ) 122 | ], 123 | args=[ 124 | "--app-name", 125 | "tfcoding-playground", 126 | "--auth", 127 | "none", 128 | ], 129 | volume_mounts=[ 130 | kubernetes.core.v1.VolumeMountArgs( 131 | mount_path="/home/coder/project", 132 | name=constants.WORKDIR_VOLUME_NAME, 133 | read_only=False, 134 | sub_path="tfcoding/examples/mock-aws-pulumi", 135 | ) 136 | ], 137 | ), 138 | "loki": kubernetes.core.v1.ContainerArgs( 139 | name="loki", 140 | image="grafana/loki:latest", 141 | args=[ 142 | "-config.file=/etc/loki/local-config.yaml" 143 | ], 144 | ports=[ 145 | kubernetes.core.v1.ContainerPortArgs( 146 | container_port=3100, 147 | ) 148 | ], 149 | volume_mounts=[ 150 | kubernetes.core.v1.VolumeMountArgs( 151 | name="loki-volume", 152 | mount_path="/etc/loki", 153 | read_only=True 154 | ) 155 | ] 156 | ), 157 | "promtail": kubernetes.core.v1.ContainerArgs( 158 | name="promtail", 159 | image="grafana/promtail:latest", 160 | args=[ 161 | "-config.file=/etc/promtail/config.yaml" 162 | ], 163 | volume_mounts=[ 164 | kubernetes.core.v1.VolumeMountArgs( 165 | name="promtail-volume", 166 | mount_path="/etc/promtail", 167 | read_only=True 168 | ) 169 | ] 170 | ) 171 | } 172 | -------------------------------------------------------------------------------- /pulumi/requirements.txt: -------------------------------------------------------------------------------- 1 | pulumi>=3.0.0,<4.0.0 2 | pulumi-kubernetes>=4.0.0,<5.0.0 3 | -------------------------------------------------------------------------------- /scripts/tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | set -o pipefail 4 | 5 | _DOCKERHUB_TAG="${DOCKERHUB_TAG:-"unfor19/tfcoding:latest"}" 6 | 7 | 8 | error_msg(){ 9 | local msg=$1 10 | echo -e "\e[31m[ERROR]\e[0m $msg" 11 | export DEBUG=1 12 | exit 1 13 | } 14 | 15 | should(){ 16 | local expected=$1 17 | local test_name=$2 18 | local expr=$3 19 | echo "-------------------------------------------------------" 20 | echo "[LOG] $test_name - Should $expected" 21 | echo "[LOG] Executing: $expr" 22 | output_msg=$(trap '$expr' EXIT) 23 | output_code=$? 24 | 25 | echo -e "[LOG] Output:\n\n$output_msg\n" 26 | 27 | if [[ $expected == "pass" && $output_code -eq 0 && ! $output_msg =~ .*(ERROR|Error|error).* ]]; then 28 | echo -e "\e[92m[SUCCESS]\e[0m Test passed as expected" 29 | elif [[ $expected == "fail" && $output_code -eq 1 ]] || [[ $expected == "fail" && $output_msg =~ .*(ERROR|Error|error).* ]]; then 30 | echo -e "\e[92m[SUCCESS]\e[0m Test failed as expected" 31 | else 32 | error_msg "Test output is not expected, terminating" 33 | fi 34 | } 35 | 36 | 37 | tfcoding(){ 38 | docker run --rm -t -v "${PWD}"/:/src/:ro \ 39 | "${_DOCKERHUB_TAG}" "$@" 40 | } 41 | 42 | 43 | # Tests 44 | should pass "Help Menu" "tfcoding --help" 45 | should pass "Basic Example - All local values" "tfcoding -r examples/basic" 46 | should pass "Basic Example - Single local value" "tfcoding -r examples/basic -o private_subnets" 47 | should pass "Complex Example - All local values" "tfcoding -r examples/complex" 48 | should pass "Complex Example - Single local values" "tfcoding -r examples/complex -o my_keys" 49 | should fail "No arguments provided" "tfcoding" 50 | should fail "Unknown single value" "tfcoding -r examples/complex -o unknown_local_value" 51 | should fail "Non existing dir" "tfcoding -r examples/non-existing-dir" 52 | --------------------------------------------------------------------------------