├── .gitignore ├── LICENSE ├── README.md ├── aws ├── ecs.mk ├── localstack.mk ├── main.mk ├── rds.mk ├── secrets.mk └── variables.mk ├── examples └── simple │ ├── .envrc-example │ ├── .infra │ ├── .gitignore │ ├── env │ │ └── testnut │ │ │ ├── env.mk │ │ │ ├── main.tf │ │ │ ├── secrets │ │ │ └── example-service.json │ │ │ └── variables.tf │ └── icmk │ ├── Makefile │ └── README.md ├── init.mk ├── ize ├── ize.mk └── variables.mk ├── main.mk ├── serverless ├── main.mk └── variables.mk ├── terraform ├── main.mk ├── templates │ ├── backend.tf.gotmpl │ ├── terraform.tfplan.gotmpl │ └── terraform.tfvars.gotmpl └── variables.mk ├── tunnel ├── main.mk └── variables.mk ├── variables.mk └── waypoint ├── main.mk ├── templates └── waypoint.wpvars.gotmpl └── variables.mk /.gitignore: -------------------------------------------------------------------------------- 1 | report.xml 2 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020-present HazelOps OÜ https://hazelops.com 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ⚠️This repository is not maintained⚠️ 2 | # ⚠️This repository is outdated and deprecated⚠️ 3 | 4 | ### Please, use [ize](https://github.com/hazelops/ize) tool instead. 5 | 6 | # Infrastructure as Code Make Framework 7 | 8 | This framework is an attempt to create a convenient way to manage Infrastructure as Code with low barrier of entry for the Runner. 9 | 10 | The idea is to use [GNU Make](https://www.gnu.org/software/make/) as a vehicle for wrapping the complexity and presenting a nice Runner Experience. 11 | 12 | This way, a coherent set of commands can be used locally or on the CI, as simple as: 13 | ```shell script 14 | make deploy # One-stop command that deploys everything from scratch in a right order. Secrets, Infra, Applications, etc. 15 | ``` 16 | Or 17 | ```shell script 18 | make secrets # Pushes secrets to SSM 19 | make infra # Deploys the whole infrastructure (Terraform) 20 | make api # Builds a Docker image, Pushes it to Docker registry and deploys ECS service 21 | ``` 22 | Or 23 | ```shell script 24 | make tunnel # Creates an SSH tunnel via bastion host 25 | ``` 26 | 27 | # Quickstart 28 | This `init` onliner will download and configure icmk in your directory. (Defaults to .infra/icmk, customizable). 29 | ```shell script 30 | make init -f $(curl -Ls https://hzl.xyz/icmk > $TMPDIR/icmk.mk && echo "$TMPDIR/icmk.mk") 31 | ``` 32 | 33 | ## Populate sample config 34 | ```shell script 35 | make examples.simple -f .infra/icmk/init.mk 36 | ``` 37 | 38 | This will create the following: 39 | - Sample `Makefile`, which you can (and should) customize 40 | - Sample .envrc (which you can use with [direnv](https://github.com/direnv/direnv)) 41 | - Sample Terraform environment structure under `.infra/env/testnut` which has a demo of bastion host and ssh tunnel. It forwards `localhost:222` to `bastion:22`. See `make tunnel.up` and `make tunnel.down`. In order to use make tunnel.up, Terraform config must be applied at least once (locally or via CI/CD). 42 | - Sample secrets directory that is used to push secrets to SSM via `make secrets`. Make sure to keep your `secrets/*.json` files out of git. 43 | 44 | This won't create: 45 | - Anything else. 46 | 47 | 48 | # Whats Wrong With Shell Scripts? 49 | Shell scripts do the job, but eventually they loose the coherency by turning into bash spaghetti. Makefiles are declarative and have ability to have dependencies. Also, GNU Make can be modular, which allows to build good Runner Experience with abstractions. There is more, but if this is not enough, feel free to submit a Github Issue with any questions or concerns. 50 | 51 | ## Ensure your Terraform has required outputs 52 | This framework heavily relies on Terraform to get different values. It stores them in `output.json` and then reads them as needed. 53 | 54 | # Features 55 | Currently, main features include 56 | - Terraform 57 | - AWS 58 | - Docker 59 | - ECS 60 | - SSH Tunnel 61 | 62 | # Dependencies 63 | The only dependencies you'd need: 64 | - GNU Make 65 | - Git 66 | - Docker 67 | 68 | # Disclaimer 69 | This framework is inspired by the principles of delivering a good [Runner Experience](https://automationd.com/runner-experience-design/). It is provided as-is. 70 | 71 | \*This is nothing close to a complete framework: lots of features are still missing, naming and structuring can be improved. Even though it works, use it on your own risk. PRs are welcome! 72 | 73 | -------------------------------------------------------------------------------- /aws/ecs.mk: -------------------------------------------------------------------------------- 1 | # Macroses 2 | ######################################################################################################################## 3 | TAG ?= $(ENV) 4 | TAG_LATEST ?= $(ENV)-latest 5 | 6 | ECS_CLUSTER_NAME ?= $(ENV)-$(NAMESPACE) 7 | ECS_SERVICE_NAME ?= $(SERVICE_NAME) 8 | ECS_TASK_NAME ?= $(ENV)-$(SVC) 9 | DOCKER_REGISTRY ?= $(AWS_ACCOUNT).dkr.ecr.$(AWS_REGION).amazonaws.com 10 | DOCKER_IMAGE_NAME ?= $(NAMESPACE)-$(SVC) 11 | ENABLE_INLINE_CACHE ?= $(ENABLE_BUILDKIT) 12 | ECS_DEPLOY_TIMEOUT ?= 600 13 | 14 | ECS_SERVICE_TASK_NETWORK_CONFIG ?= $(shell $(AWS) ssm get-parameter --name "/$(ENV)/terraform-output" --with-decryption | $(JQ) -r '.Parameter.Value' | $(BASE64) -d | $(JQ) -rc '.$(shell echo $(SVC) | sed 's/-/_/g')_task_network_configuration.value') 15 | ECS_SERVICE_TASK_LAUNCH_TYPE ?= $(shell $(AWS) ssm get-parameter --name "/$(ENV)/terraform-output" --with-decryption | $(JQ) -r '.Parameter.Value' | $(BASE64) -d | $(JQ) -rc '.$(shell echo $(SVC) | sed 's/-/_/g')_task_launch_type.value') 16 | SSM_OUTPUT_JSON = $(shell $(AWS) ssm get-parameter --name "/$(ENV)/terraform-output" --with-decryption | $(JQ) -r '.Parameter.Value' | $(BASE64) -d) 17 | 18 | # This is required due to a bug in Docker Multistage + Cache configuration. 19 | ECS_SERVICE_DOCKER_BUILD_CACHE_PARAMETER ?= $(shell [ "$(ENABLE_BUILDKIT)" = "1" ] && echo "--cache-from $(DOCKER_REGISTRY)/$(DOCKER_IMAGE_NAME):$(TAG_LATEST)" || echo "" ) 20 | 21 | ECS_SERVICE_TASK_ID = $(eval ECS_SERVICE_TASK_ID := $(shell $(AWS) ecs run-task --cluster $(ECS_CLUSTER_NAME) --task-definition "$(ECS_SERVICE_TASK_DEFINITION_ARN)" --network-configuration '$(ECS_SERVICE_TASK_NETWORK_CONFIG)' --launch-type "$(ECS_SERVICE_TASK_LAUNCH_TYPE)" | $(JQ) -r '.tasks[].taskArn' | $(REV) | $(CUT) -d'/' -f1 | $(REV)))$(ECS_SERVICE_TASK_ID) 22 | ECS_SERVICE_TASK_DEFINITION_ARN = $(shell $(AWS) ecs describe-task-definition --task-definition $(ECS_TASK_NAME) | $(JQ) -r '.taskDefinition.taskDefinitionArn') 23 | 24 | ECS_SERVICE_RUNNING_TASK_ID = $(eval ECS_SERVICE_RUNNING_TASK_ID := $(shell $(AWS) ecs list-tasks --cluster $(ECS_CLUSTER_NAME) --service-name $(ECS_SERVICE_NAME) --desired-status "RUNNING" | $(JQ) -r '.taskArns[]' | $(CUT) -d'/' -f3 | head -n 1))$(ECS_SERVICE_RUNNING_TASK_ID) 25 | CMD_SSM_TO_FARGATE_TASK ?= aws ecs $(AWS_CLI_PROFILE) execute-command --cluster $(ECS_CLUSTER_NAME) --task $(ECS_SERVICE_RUNNING_TASK_ID) --container $(SVC) --command "/bin/sh" --interactive 26 | 27 | CMD_ECS_SERVICE_DEPLOY = @$(ECS) deploy $(AWS_CLI_PROFILE) $(ECS_CLUSTER_NAME) $(ECS_SERVICE_NAME) --task $(ECS_SERVICE_TASK_DEFINITION_ARN) --image $(SVC) $(DOCKER_REGISTRY)/$(DOCKER_IMAGE_NAME):$(TAG) --diff --timeout $(ECS_DEPLOY_TIMEOUT) --rollback -e $(SVC) DD_VERSION $(TAG) 28 | CMD_ECS_SERVICE_REDEPLOY = @$(ECS) deploy $(AWS_CLI_PROFILE) --region $(AWS_REGION) $(ECS_CLUSTER_NAME) $(ECS_SERVICE_NAME) --diff --rollback 29 | CMD_ECS_SERVICE_DOCKER_BUILD = DOCKER_BUILDKIT=$(ENABLE_BUILDKIT) $(DOCKER) build \ 30 | . \ 31 | -t $(DOCKER_IMAGE_NAME) \ 32 | -t $(DOCKER_REGISTRY)/$(DOCKER_IMAGE_NAME):$(TAG) \ 33 | -t $(DOCKER_REGISTRY)/$(DOCKER_IMAGE_NAME):$(TAG_LATEST) \ 34 | -f $(PROJECT_PATH)/$(DOCKERFILE) \ 35 | $(ECS_SERVICE_DOCKER_BUILD_CACHE_PARAMETER) \ 36 | --build-arg DOCKER_REGISTRY=$(DOCKER_REGISTRY) \ 37 | --build-arg DOCKER_IMAGE_NAME=$(DOCKER_IMAGE_NAME) \ 38 | --build-arg ENV=$(ENV) \ 39 | --build-arg BUILDKIT_INLINE_CACHE=$(ENABLE_INLINE_CACHE) \ 40 | --build-arg PROJECT_PATH=$(PROJECT_PATH) \ 41 | $(DOCKER_BUILD_ADDITIONAL_PARAMS) 42 | 43 | CMD_ECS_SERVICE_DOCKER_PUSH = \ 44 | $(DOCKER) push $(DOCKER_REGISTRY)/$(DOCKER_IMAGE_NAME):$(TAG) && \ 45 | $(DOCKER) push $(DOCKER_REGISTRY)/$(DOCKER_IMAGE_NAME):$(TAG_LATEST) 46 | 47 | # Checks every 5 seconds the task status and goes on once task status is not RUNNING 48 | CMD_ECS_SERVICE_TASK_STOP_WAIT = printf "%s" "Task $(ECS_SERVICE_TASK_ID) is running ."; until [ $$($(AWS) ecs describe-tasks --cluster $(ECS_CLUSTER_NAME) --tasks arn:aws:ecs:$(AWS_REGION):$(AWS_ACCOUNT):task/$(ECS_CLUSTER_NAME)/$(ECS_SERVICE_TASK_ID) | jq -r '.tasks[].lastStatus') != RUNNING ]; do printf "%s" "."; sleep 5; done 49 | # Composes the respective exit code. Event though one of task's containers has non zero exit code macros fails 50 | CMD_ECS_SERVICE_TASK_EXIT_CODE_GET = FLAG=0; $(AWS) ecs describe-tasks --cluster $(ECS_CLUSTER_NAME) --tasks arn:aws:ecs:$(AWS_REGION):$(AWS_ACCOUNT):task/$(ECS_CLUSTER_NAME)/$(ECS_SERVICE_TASK_ID) | jq -r '.tasks[].containers[].exitCode' | while read EXIT_CODE; do if [ "$${EXIT_CODE}" != "0" ]; then printf "%s\n" " stopped with issue"; exit $${EXIT_CODE}; fi; done 51 | 52 | CMD_ECS_SERVICE_TASK_LOG = $(ECS_CLI) logs --task-id "$(ECS_SERVICE_TASK_ID)" --cluster "$(ECS_CLUSTER_NAME)" --timestamps 53 | CMD_ECS_SERVICE_TASK_GET_LOG = until echo $$($(ECS_CLI) logs --task-id "$(ECS_SERVICE_TASK_ID)" --cluster "$(ECS_CLUSTER_NAME)" --timestamps) | grep -Fqe "Z "; do sleep 2; done 54 | CMD_ECS_SERVICE_TASK_RUN = @echo "Task $(ECS_SERVICE_TASK_ID) for definition $(ECS_SERVICE_TASK_DEFINITION_ARN) has been started.\nLogs: \n " && $(CMD_ECS_SERVICE_TASK_GET_LOG) && $(CMD_ECS_SERVICE_TASK_LOG) && $(CMD_ECS_SERVICE_TASK_STOP_WAIT) && $(CMD_ECS_SERVICE_TASK_EXIT_CODE_GET) 55 | 56 | CMD_ECR_DOCKER_PURGE_CACHE = @echo "Removing '$(TAG_LATEST)' tag from AWS ECR" && $(AWS) ecr batch-delete-image --repository-name $(DOCKER_IMAGE_NAME) --image-ids imageTag=$(TAG_LATEST) | $(JQ) -er 'select(.failures[].failureReason != null) | def yellow: "\u001b[33m"; def reset: "\u001b[0m"; yellow + "[WARNING]:", reset + "\( .failures[].failureReason)"' || echo "\033[32m[OK]\033[0m '$(TAG_LATEST)' tag was removed from AWS ECR" 57 | 58 | CMD_ECS_SERVICE_SCALE = @$(ECS) scale $(AWS_CLI_PROFILE) $(ECS_CLUSTER_NAME) $(ECS_TASK_NAME) $(SCALE) 59 | CMD_ECS_SERVICE_DESTROY = echo "Destroy $(SVC) is not implemented" 60 | 61 | CMD_ECS_SERVICE_LOCAL_UP = $(ECS_CLI) local up --task-def-remote $(ECS_SERVICE_TASK_DEFINITION_ARN) --force 62 | CMD_ECS_SERVICE_LOCAL_DOWN = $(ECS_CLI) local down --task-def-remote $(ECS_SERVICE_TASK_DEFINITION_ARN) 63 | 64 | CMD_ECS_SERVICE_DOCKER_RUN = $(DOCKER) run $(DOCKER_RUN_ADDITIONAL_PARAMS) --rm $(DOCKER_REGISTRY)/$(DOCKER_IMAGE_NAME):$(TAG) 65 | 66 | ECS ?= $(DOCKER) run -i --rm -v $(HOME)/.aws/:/root/.aws hazelops/ecs-deploy@$(ECS_DEPLOY_IMAGE_SHA) ecs 67 | ECS_CLI ?= $(DOCKER) run \ 68 | -i --rm -v /var/run/docker.sock:/var/run/docker.sock \ 69 | -v $(HOME)/.aws/:/root/.aws \ 70 | -e AWS_PROFILE=$(AWS_PROFILE) \ 71 | -e AWS_REGION=$(AWS_REGION) \ 72 | hazelops/ecs-cli 73 | 74 | # Tasks 75 | ######################################################################################################################## 76 | AWS_CLI_ECR_LOGIN ?= $$(echo $$(if echo "$(DOCKER_REGISTRY)" | grep -Fqe "public"; then echo "ecr-public"; else echo "ecr"; fi)) 77 | ecr.login: aws 78 | @echo $(shell $(AWS) $(AWS_CLI_ECR_LOGIN) get-login-password --region $(AWS_REGION) | docker login --username AWS --password-stdin $(DOCKER_REGISTRY)) 79 | 80 | # Dependencies 81 | ######################################################################################################################## 82 | # Ensures ecs-deploy is installed 83 | ecs: 84 | ifeq (, $(ECS)) 85 | $(error "ecs-deploy is not installed or incorrectly configured. Run \\n`pip install ecs-deploy`. More info: https://github.com/fabfuel/ecs-deploy") 86 | endif 87 | 88 | # Ensures ecs-cli is installed 89 | ecs-cli: 90 | ifeq (, $(ECS_CLI)) 91 | $(error "AWS ecs-cli is not installed or incorrectly configured." ) 92 | endif 93 | 94 | # Backwards Compatibility, should be removed in 2.0 95 | ######################################################################################################################## 96 | CMD_SERVICE_DEPLOY = $(CMD_ECS_SERVICE_DEPLOY) 97 | CMD_SERVICE_REDEPLOY = $(CMD_ECS_SERVICE_REDEPLOY) 98 | CMD_SERVICE_DOCKER_BUILD = $(CMD_ECS_SERVICE_DOCKER_BUILD) 99 | CMD_SERVICE_DOCKER_PUSH = $(CMD_ECS_SERVICE_DOCKER_PUSH) 100 | CMD_SERVICE_TASK_RUN = $(CMD_ECS_SERVICE_TASK_RUN) 101 | CMD_SERVICE_SCALE = $(CMD_ECS_SERVICE_SCALE) 102 | CMD_SERVICE_DESTROY = $(CMD_ECS_SERVICE_DESTROY) 103 | CMD_SERVICE_LOCAL_UP =$(CMD_ECS_SERVICE_LOCAL_UP) 104 | CMD_SERVICE_LOCAL_DOWN = $(CMD_ECS_SERVICE_LOCAL_DOWN) 105 | CMD_SERVICE_BIN = $(CMD_ECS_SERVICE_BIN) 106 | -------------------------------------------------------------------------------- /aws/localstack.mk: -------------------------------------------------------------------------------- 1 | # Macroses 2 | ######################################################################################################################## 3 | LOCALSTACK_HOST ?= $(LOCALSTACK_CONTAINER_IP) 4 | LOCALSTACK_ENDPOINT ?= http://$(LOCALSTACK_HOST):4566 5 | 6 | CMD_LOCALSTACK_UP ?= @ ( $(DOCKER) run -d --name localstack -p $(LOCALSTACK_WEB_UI_PORT):$(LOCALSTACK_WEB_UI_PORT) \ 7 | -p $(LOCALSTACK_PORTS):$(LOCALSTACK_PORTS) \ 8 | -p 53:53 \ 9 | -p 443:443 \ 10 | -e LOCALSTACK_API_KEY=$(LOCALSTACK_API_KEY) \ 11 | -e DEBUG=1 \ 12 | -e SERVICES=$(LOCALSTACK_SERVICE_LIST) \ 13 | -e DATA_DIR=/tmp/localstack/data \ 14 | -e PORT_WEB_UI=$(LOCALSTACK_WEB_UI_PORT) \ 15 | -e DOCKER_HOST=unix:///var/run/docker.sock \ 16 | -v /tmp/localstack:/tmp/localstack \ 17 | $(LOCALSTACK_IMAGE):$(LOCALSTACK_VERSION) > /dev/null) && \ 18 | sleep 10 && \ 19 | echo "\033[32m[OK]\033[0m Localstack is UP. \nUse locally: aws --endpoint-url=http://localhost:4566 [options] " || \ 20 | echo "\033[31m[ERROR]\033[0m Localstack start failed" 21 | 22 | CMD_LOCALSTACK_DOWN ?= @ ( $(DOCKER) rm $$($(DOCKER) stop $$($(DOCKER) ps -a -q --filter ancestor=$(LOCALSTACK_IMAGE):$(LOCALSTACK_VERSION) --format="{{.ID}}")) > /dev/null) && echo "\033[32m[OK]\033[0m Localstack is DOWN." || echo "\033[31m[ERROR]\033[0m Localstack stopping failed" 23 | 24 | LOCALSTACK_CONTAINER_IP ?= $$($(DOCKER) ps | grep "localstack" > /dev/null && echo "$(LOCALSTACK_IP)" || echo "") 25 | LOCALSTACK_IP ?= $$($(DOCKER) inspect --format='{{range.NetworkSettings.Networks}}{{.IPAddress}}{{end}}' localstack) 26 | AWS_ARGS ?= $$(if [ "$(ENV)" = "localstack" ] && [ $(LOCALSTACK_CONTAINER_IP) ]; then echo "--endpoint-url=http://$(LOCALSTACK_CONTAINER_IP):4566"; else echo ""; fi) 27 | 28 | # Tasks 29 | ######################################################################################################################## 30 | localstack: localstack.up 31 | localstack.up: 32 | $(CMD_LOCALSTACK_UP) 33 | localstack.down: 34 | $(CMD_LOCALSTACK_DOWN) -------------------------------------------------------------------------------- /aws/main.mk: -------------------------------------------------------------------------------- 1 | # Environment Validation 2 | ######################################################################################################################## 3 | ifndef AWS_REGION 4 | $(error Please set AWS_REGION via `export AWS_REGION=` or use direnv. This is nessesary for additional tools that are not able to read a region from your AWS profile) 5 | endif 6 | 7 | ifeq ($(AWS_MFA_ENABLED),"true") 8 | export AWS_ACCESS_KEY_ID=$(shell echo $(MFA_AWS_ACCESS_KEY_VALUE)) 9 | export AWS_SECRET_ACCESS_KEY=$(shell echo $(MFA_AWS_SECRET_ACCESS_KEY_VALUE)) 10 | export AWS_SESSION_TOKEN=$(shell echo $(MFA_AWS_SESSION_TOKEN_VALUE)) 11 | endif 12 | # Macroses 13 | ######################################################################################################################## 14 | # We don't check for AWS_PROFILE, but instead we assume the profile name. 15 | # You can override it, although it's recommended to have a profile per environment in your ~/.aws/credentials 16 | AWS_PROFILE ?= $(NAMESPACE)-$(ENV) 17 | AWS_CLI_PROFILE ?= $(shell echo $$(if [ "$(AWS_MFA_ENABLED)" = "true" ]; then echo ""; else echo "--profile $(AWS_PROFILE)"; fi)) 18 | AWS_USER ?= $(shell [ -f ~/.aws/credentials ] && $(AWS) iam get-user | $(JQ) -r ".User.UserName") 19 | AWS_ACCOUNT ?= $(shell [ -f ~/.aws/credentials ] && $(AWS) sts get-caller-identity | $(JQ) -r '.Account' || echo "nil" ) 20 | 21 | AWS_DEV_ENV_NAME ?= $(shell [ -f ~/.aws/credentials ] && $(AWS) iam list-user-tags --user-name $(AWS_USER) | ( $(JQ) -e -r '.Tags[] | select(.Key == "devEnvironmentName").Value') || echo "$(ENV) (User env is not configured)") 22 | 23 | # AWS MFA 24 | AWS_MFA_DEVICE_ARN ?= $(MFA_DEVICE_ARN) # Since some users have already used MFA_DEVICE_ARN 25 | AWS_MFA_TOKEN_CODE ?= $(eval AWS_MFA_TOKEN_CODE := $(shell bash -c 'read -p "Enter MFA token: " token; echo $$token'))$(AWS_MFA_TOKEN_CODE) 26 | MFA_GET_SESSION_TOKEN ?= $(eval MFA_GET_SESSION_TOKEN := $(shell echo $$(aws sts get-session-token --serial-number ${AWS_MFA_DEVICE_ARN} --token-code $(AWS_MFA_TOKEN_CODE) | $(JQ) 'map_values(tostring)' | $(JQ) .Credentials)))$(MFA_GET_SESSION_TOKEN) 27 | MFA_AWS_ACCESS_KEY_ID ?= $(shell echo $(MFA_GET_SESSION_TOKEN) | $(JQ) .AccessKeyId | xargs > ~/.aws/mfa_aws_access_key) 28 | MFA_AWS_SECRET_ACCESS_KEY ?= $(shell echo $(MFA_GET_SESSION_TOKEN) | $(JQ) .SecretAccessKey | xargs > ~/.aws/mfa_aws_secret_access_key) 29 | MFA_AWS_SESSION_TOKEN ?= $(shell echo $(MFA_GET_SESSION_TOKEN) | $(JQ) .SessionToken | xargs > ~/.aws/mfa_aws_session_token) 30 | MFA_AWS_EXPIRATION ?= $(shell echo $(MFA_GET_SESSION_TOKEN) | $(JQ) .Expiration | xargs) 31 | 32 | MFA_AWS_ACCESS_KEY_VALUE ?= $(shell echo $$(if [ "$(AWS_MFA_ENABLED)" = "true" ]; then cat ~/.aws/mfa_aws_access_key; else echo ""; fi)) #$$(cat ~/.aws/mfa_aws_access_key) 33 | MFA_AWS_SECRET_ACCESS_KEY_VALUE ?= $(shell echo $$(if [ "$(AWS_MFA_ENABLED)" = "true" ]; then cat ~/.aws/mfa_aws_secret_access_key; else echo ""; fi)) #$$(cat ~/.aws/mfa_aws_secret_access_key) 34 | MFA_AWS_SESSION_TOKEN_VALUE ?= $(shell echo $$(if [ "$(AWS_MFA_ENABLED)" = "true" ]; then cat ~/.aws/mfa_aws_session_token; else echo ""; fi)) #$$(cat ~/.aws/mfa_aws_session_token) 35 | 36 | 37 | # $(AWS_ARGS) definition see in .infra/icmk/aws/localstack.mk 38 | AWS_MFA_ENV_VARS ?= $$(if [ "$(AWS_MFA_ENABLED)" = "true" ]; then echo "-e AWS_ACCESS_KEY_ID="$(MFA_AWS_ACCESS_KEY_VALUE)" -e AWS_SECRET_ACCESS_KEY="$(MFA_AWS_SECRET_ACCESS_KEY_VALUE)" -e AWS_SESSION_TOKEN="$(MFA_AWS_SESSION_TOKEN_VALUE)""; else echo ""; fi) 39 | DOCKER_PLATFORM ?= $$(if [ "$(LINUX_ARCH)" = "arm64" ]; then echo "--platform "linux/amd64""; else echo ""; fi) 40 | 41 | AWS ?= $(DOCKER) run --user "$(CURRENT_USER_ID):$(CURRENT_USERGROUP_ID)" $(DOCKER_PLATFORM) \ 42 | -v $(HOME)/.aws/:/.aws \ 43 | -i \ 44 | -e AWS_PROFILE="$(AWS_PROFILE)" \ 45 | -e AWS_REGION="$(AWS_REGION)" $(AWS_MFA_ENV_VARS) \ 46 | -e AWS_CONFIG_FILE="/.aws/config" \ 47 | -e AWS_SHARED_CREDENTIALS_FILE="/.aws/credentials" \ 48 | amazon/aws-cli:$(AWS_CLI_VERSION) $(AWS_ARGS) 49 | 50 | 51 | CMD_AWS_LOGS_TAIL = @$(AWS) logs tail $(SERVICE_NAME) --follow --format "short" 52 | CMD_AWS_EC2_IMPORT_KEY_PAIR = @$(AWS) ec2 import-key-pair --key-name="$(EC2_KEY_PAIR_NAME)" --public-key-material="$(SSH_PUBLIC_KEY_BASE64)" 53 | 54 | # VPC settings 55 | VPC_PUBLIC_SUBNETS ?= $(shell $(AWS) ssm get-parameter --name "/$(ENV)/terraform-output" --with-decryption | $(JQ) -r '.Parameter.Value' | $(BASE64) -d | $(JQ) -rc '.vpc_public_subnets.value') 56 | VPC_PRIVATE_SUBNETS ?= $(shell $(AWS) ssm get-parameter --name "/$(ENV)/terraform-output" --with-decryption | $(JQ) -r '.Parameter.Value' | $(BASE64) -d | $(JQ) -rc '.vpc_private_subnets.value') 57 | 58 | 59 | aws.mfa: 60 | $(MFA_AWS_ACCESS_KEY_ID) 61 | $(MFA_AWS_SECRET_ACCESS_KEY) 62 | $(MFA_AWS_SESSION_TOKEN) 63 | @echo "\033[36mMFA Token will be expired at:\033[0m $(MFA_AWS_EXPIRATION)" 64 | 65 | 66 | # Getting OS|Linux info 67 | OS_NAME ?= $(shell uname -s) 68 | LINUX_OS_DISTRIB ?= $$(cat /etc/issue) 69 | LINUX_ARCH ?= $(shell uname -m) 70 | LINUX_BITS ?= $(shell uname -m | sed 's/x86_//;s/i[3-6]86/32/') 71 | ARCH ?= $$(echo $$(if echo "$(LINUX_ARCH)" | grep -Fqe "arm"; then echo "arm$(LINUX_BITS)"; else echo "$(LINUX_BITS)bit"; fi)) 72 | LINUX_DISTRIB_TEMP ?= $$(echo $$(if echo "$(LINUX_OS_DISTRIB)" | grep -Fqe "Ubuntu"; then echo "ubuntu"; elif echo "$(LINUX_OS_DISTRIB)" | grep -Fqe "Debian"; then echo "ubuntu"; else echo "linux"; fi)) 73 | LINUX_DISTRIB ?= $$(echo $(LINUX_DISTRIB_TEMP) | xargs) 74 | LINUX_PACKAGE_EXT ?= $$(echo $$(if echo "$(LINUX_OS_DISTRIB)" | grep -Fqe "Ubuntu"; then echo ".deb"; elif echo "$(LINUX_OS_DISTRIB)" | grep -Fqe "Debian"; then echo ".deb"; else echo ".rpm"; fi)) 75 | # Download Session Manager cmds 76 | SSM_DOWNLOAD_FOR_MAC_OS ?= curl -s "https://s3.amazonaws.com/session-manager-downloads/plugin/latest/mac/sessionmanager-bundle.zip" > "sessionmanager-bundle.zip" && unzip -qq sessionmanager-bundle.zip 77 | SSM_DOWNLOAD_FOR_LINUX_OS ?= curl -s "https://s3.amazonaws.com/session-manager-downloads/plugin/latest/$(LINUX_DISTRIB)_$(ARCH)/session-manager-plugin$(LINUX_PACKAGE_EXT)" > "session-manager-plugin$(LINUX_PACKAGE_EXT)" 78 | CMD_SSM_DOWNLOAD ?= $(shell echo $$(if [ "$(OS_NAME)" = "Linux" ]; then echo "$(SSM_DOWNLOAD_FOR_LINUX_OS)"; else echo "$(SSM_DOWNLOAD_FOR_MAC_OS)"; fi)) 79 | # Installation Session Manager cmds 80 | LINUX_INSTALLER ?= $$(echo $$(if echo "$(LINUX_OS_DISTRIB)" | grep -Fqe "Ubuntu"; then echo "sudo dpkg -i"; elif echo "$(LINUX_OS_DISTRIB)" | grep -Fqe "Debian"; then echo "sudo dpkg -i"; else echo "sudo yum install -y -q"; fi)) 81 | SSM_INSTALL_ON_MAC_OS ?= sudo ./sessionmanager-bundle/install -i /usr/local/sessionmanagerplugin -b /usr/local/bin/session-manager-plugin 82 | SSM_INSTALL_ON_LINUX_OS ?= $(LINUX_INSTALLER) session-manager-plugin$(LINUX_PACKAGE_EXT) 83 | CMD_SSM_INSTALL ?= $(shell echo $$(if [ "$(OS_NAME)" = "Linux" ]; then echo "$(SSM_INSTALL_ON_LINUX_OS)"; else echo "$(SSM_INSTALL_ON_MAC_OS)"; fi)) 84 | # Cleanup Session Manager installation package 85 | SSM_CLEANUP_ON_MAC_OS ?= rm -rf sessionmanager-bundle sessionmanager-bundle.zip 86 | SSM_CLEANUP_ON_LINUX_OS ?= rm -rf session-manager-plugin$(LINUX_PACKAGE_EXT) 87 | CMD_SSM_CLEANUP ?= $(shell echo $$(if [ "$(OS_NAME)" = "Linux" ]; then echo "$(SSM_CLEANUP_ON_LINUX_OS)"; else echo "$(SSM_CLEANUP_ON_MAC_OS)"; fi)) 88 | 89 | # SSM access to Fargate ECS 90 | SSM_MI_TARGET ?= $(shell $(AWS) ssm describe-instance-information | $(JQ) -er '.InstanceInformationList[] | select(.Name == "$(SVC)" and .PingStatus == "Online") | .InstanceId' > tmp && cat tmp | head -1 && rm -rf tmp || rm -rf tmp) 91 | # We use local aws-cli here due to interactive actions 92 | SSM_TO_FARGATE ?= aws $(AWS_CLI_PROFILE) ssm start-session --target $(SSM_MI_TARGET) 93 | CMD_SSM_TO_FARGATE ?= $(shell echo $$(if [ -z "$(SSM_MI_TARGET)" ]; then echo "echo '[ERROR] SSM mi target is not available now (please try in a minute) or not configured. Exit.'"; else echo "$(SSM_TO_FARGATE)"; fi)) 94 | # Tasks 95 | ######################################################################################################################## 96 | aws.debug: ## Show environment information for debug purposes 97 | @echo "\033[32m=== AWS Environment Info ===\033[0m" 98 | @echo "\033[36mAWS_DEV_ENV_NAME\033[0m: $(AWS_DEV_ENV_NAME)" 99 | @echo "\033[36mAWS_ACCOUNT\033[0m: $(AWS_ACCOUNT)" 100 | @echo "\033[36mAWS_PROFILE\033[0m: $(AWS_PROFILE)" 101 | @echo "\033[36mAWS_USER\033[0m: $(AWS_USER)" 102 | 103 | os.debug: 104 | @echo "\033[32m=== System Info ===\033[0m" 105 | @echo "\033[36mOS_NAME\033[0m: $(OS_NAME)" 106 | @echo "\033[36mLINUX_ARCH\033[0m: $(LINUX_ARCH)" 107 | @echo "\033[36mARCH\033[0m: $(ARCH)" 108 | 109 | aws.profile: 110 | $(shell mkdir -p ~/.aws && echo "[$(AWS_PROFILE)]\naws_access_key_id = $(AWS_ACCESS_KEY_ID)\naws_secret_access_key = $(AWS_SECRET_ACCESS_KEY)\nregion = $(AWS_REGION)" >> ~/.aws/credentials) 111 | 112 | aws.key-pair: aws.import-ssh-key 113 | aws.import-ssh-key: 114 | $(CMD_AWS_EC2_IMPORT_KEY_PAIR) 115 | 116 | # Install AWS SSM Session Manager plugin 117 | ssm-plugin: ssm-plugin.download ssm-plugin.install ssm-plugin.check 118 | ssm-plugin.download: 119 | @$(CMD_SSM_DOWNLOAD) 120 | ssm-plugin.install: 121 | @$(CMD_SSM_INSTALL) 122 | @$(CMD_SSM_CLEANUP) 123 | ssm-plugin.check: 124 | ifeq (, $(shell which session-manager-plugin)) 125 | @echo "\033[31m[FAILED]\033[0m Your SSM Session Manager Plugin is not installed or incorrectly configured.\n Use \033[33mmake ssm-plugin\033[0m to install it.\n Alternatively you can follow AWS Documentation \033[34mhttps://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html\033[0m \n and install it manually." 126 | else 127 | @echo "\n\033[32m[OK]\033[0m SSM Session Manager Plugin is installed." 128 | endif 129 | 130 | # Dependencies 131 | ######################################################################################################################## 132 | # TODO: Add validation for ability to connect to AWS 133 | # Ensures aws toolchain is installed 134 | aws: 135 | ifeq (, $(AWS)) 136 | $(error "aws cli toolchain is not installed or incorrectly configured.") 137 | endif 138 | -------------------------------------------------------------------------------- /aws/rds.mk: -------------------------------------------------------------------------------- 1 | # This file contains AWS RDS related logic 2 | ################################################################## 3 | # Macroses 4 | ######################################################################################################################## 5 | # Default value for RDS CLUSTER Identifier 6 | RDS_DB_CLUSTER_IDENTIFIER ?= $(ENV)-$(RDS_DB_SUFFIX) 7 | 8 | # It gets a current DB Writer instance Identifier 9 | CMD_RDS_DB_CLUSTER_WR_INSTANCE_IDENTIFIER = $$($(AWS) rds describe-db-clusters --db-cluster-identifier $(RDS_DB_CLUSTER_IDENTIFIER) | $(JQ) -r --arg DB_ID $(RDS_DB_CLUSTER_IDENTIFIER) '.DBClusters[] | select(contains({DBClusterIdentifier: $$DB_ID})) | .DBClusterMembers[] | select(contains({IsClusterWriter: true})) | .DBInstanceIdentifier') 10 | 11 | # It enables Failover mechanism and stories instance identifier 12 | CMD_RDS_DB_CLUSTER_WR_INSTANCE_IDENTIFIER_PREVIOUS = $(eval CMD_RDS_DB_CLUSTER_WR_INSTANCE_IDENTIFIER_PREVIOUS := $(shell $(AWS) rds failover-db-cluster --db-cluster-identifier $(RDS_DB_CLUSTER_IDENTIFIER) | $(JQ) -r '.DBCluster.DBClusterMembers[] | select(contains({IsClusterWriter: true})) | .DBInstanceIdentifier'))$(CMD_RDS_DB_CLUSTER_WR_INSTANCE_IDENTIFIER_PREVIOUS) 13 | 14 | # Simple loop for looking for Writer instance switching 15 | CMD_RDS_DB_CLUSTER_FAILOVER = WAIT_TIME=0; SUCCESS_FLAG=0; printf "%s" "Getting new Primary DB instance."; while [ $$WAIT_TIME -lt $$(($(RDS_FAILOVER_TIMEOUT) * 60)) ]; do if [ $(CMD_RDS_DB_CLUSTER_WR_INSTANCE_IDENTIFIER_PREVIOUS) != $(CMD_RDS_DB_CLUSTER_WR_INSTANCE_IDENTIFIER) ]; then echo "\n\n\033[32m[OK]\033[0m The Primary DB instance has been changed to '$(CMD_RDS_DB_CLUSTER_WR_INSTANCE_IDENTIFIER)'.\n"; SUCCESS_FLAG=1; break; else printf "%s" "."; WAIT_TIME=$$(expr $$WAIT_TIME + $(RDS_FAILOVER_LOOP_TIMEOUT)); sleep $(RDS_FAILOVER_LOOP_TIMEOUT); fi; done; if [ $$SUCCESS_FLAG -ne 1 ]; then echo "\n\033[31m[ERROR]\033[0m Something went wrong during RDS Failover process."; exit 1; fi 16 | 17 | # Notification before main logic invocation 18 | CMD_RDS_DB_CLUSTER_FAILOVER_RUN = @ echo "\nFailover process within '$(RDS_DB_CLUSTER_IDENTIFIER)' RDS cluster has been started. \nThe Primary DB instance was '$(CMD_RDS_DB_CLUSTER_WR_INSTANCE_IDENTIFIER_PREVIOUS)'.\n" && $(CMD_RDS_DB_CLUSTER_FAILOVER) 19 | 20 | # RDS DB Writer instance reboot 21 | CMD_RDS_DB_CLUSTER_WR_INSTANCE_START_REBOOT = $$($(AWS) rds reboot-db-instance --db-instance-identifier $(CMD_RDS_DB_CLUSTER_WR_INSTANCE_IDENTIFIER)) 22 | # RDS DB wait instance available 23 | CMD_RDS_DB_CLUSTER_WR_INSTANCE_AVAILABLE = $$($(AWS) rds wait db-instance-available --db-instance-identifier $(CMD_RDS_DB_CLUSTER_WR_INSTANCE_IDENTIFIER)) 24 | -------------------------------------------------------------------------------- /aws/secrets.mk: -------------------------------------------------------------------------------- 1 | # Macroses 2 | ######################################################################################################################## 3 | SERVICE_SECRETS_BACKUP_FILE ?= $(INFRA_DIR)/env/$(ENV)/secrets/$(SVC)-backup.json 4 | SERVICE_SECRETS_FILE = $(INFRA_DIR)/env/$(ENV)/secrets/$(SVC).json 5 | SERVICE_SECRETS = $(shell cat $(SERVICE_SECRETS_FILE) | $(JQ) -e -r '. | to_entries[] | .key' ) 6 | GLOBAL_SECRETS_BACKUP_FILE ?= $(INFRA_DIR)/env/$(ENV)/secrets/global-backup.json 7 | GLOBAL_SECRETS_FILE = $(INFRA_DIR)/env/$(ENV)/secrets/global.json 8 | GLOBAL_SECRETS = $(shell cat $(GLOBAL_SECRETS_FILE) | $(JQ) -e -r '. | to_entries[] | .key' ) 9 | 10 | # TODO: Figure out whether to use shell's foreach or Make can build the list dynamically 11 | CMD_SERVICE_SECRETS_PUSH = @ (echo $(foreach item, $(SERVICE_SECRETS), \ 12 | $(shell $(AWS) --profile=$(AWS_PROFILE) ssm put-parameter --name="/$(ENV)/$(SVC)/$(item)" --value="$(shell \ 13 | cat $(SERVICE_SECRETS_FILE) | $(JQ) -r '.$(item)' \ 14 | )" --type SecureString --overwrite && \ 15 | $(AWS) --profile=$(AWS_PROFILE) ssm add-tags-to-resource --resource-type "Parameter" --resource-id "/$(ENV)/$(SVC)/$(item)" \ 16 | --tags "Key=Application,Value=$(SVC)" "Key=EnvVarName,Value=$(item)" || \ 17 | echo "\033[31m[ERROR]\033[0m /$(ENV)/$(SVC)/* secrets upload")) > /dev/null ) && echo "\033[32m[OK]\033[0m $(SVC) secrets upload" || echo "\033[31m[ERROR]\033[0m $(SVC) secrets upload" 18 | 19 | CMD_GLOBAL_SECRETS_PUSH = @ (echo $(foreach item, $(GLOBAL_SECRETS), \ 20 | $(shell $(AWS) --profile=$(AWS_PROFILE) ssm put-parameter --name="/$(ENV)/global/$(item)" --value="$(shell \ 21 | cat $(GLOBAL_SECRETS_FILE) | $(JQ) -r '.$(item)' \ 22 | )" --type SecureString --overwrite || \ 23 | echo "\033[31m[ERROR]\033[0m /$(ENV)/global/* secrets upload")) > /dev/null ) && echo "\033[32m[OK]\033[0m Global secrets upload" || echo "\033[31m[ERROR]\033[0m Global secrets upload" 24 | 25 | CMD_SERVICE_SECRETS_DELETE = @ (echo $(foreach item, $(shell $(AWS) --profile=$(AWS_PROFILE) ssm get-parameters-by-path \ 26 | --path "/$(ENV)/$(SVC)" --query "Parameters[*].Name" --recursive | $(JQ) -e -r '. | to_entries[] | .value' ), \ 27 | $(shell $(AWS) --profile=$(AWS_PROFILE) ssm delete-parameter --name $(item))) > /dev/null ) && \ 28 | echo "\033[32m[OK]\033[0m /$(ENV)/$(SVC)/* secrets deleted" || echo "\033[31m[ERROR]\033[0m /$(ENV)/$(SVC)/* secrets deletion" 29 | 30 | CMD_GLOBAL_SECRETS_DELETE = @ (echo $(foreach item, $(shell $(AWS) --profile=$(AWS_PROFILE) ssm get-parameters-by-path \ 31 | --path "/$(ENV)/global" --query "Parameters[*].Name" --recursive | $(JQ) -e -r '. | to_entries[] | .value' ), \ 32 | $(shell $(AWS) --profile=$(AWS_PROFILE) ssm delete-parameter --name $(item))) > /dev/null ) && \ 33 | echo "\033[32m[OK]\033[0m /$(ENV)/global/* secrets deleted" || echo "\033[31m[ERROR]\033[0m /$(ENV)/global/* secrets deletion" 34 | 35 | 36 | CMD_SERVICE_ALL_SECRET_KEYS = $(foreach item, $(shell $(AWS) --profile=$(AWS_PROFILE) ssm get-parameters-by-path \ 37 | --path "/$(ENV)/$(SVC)" --recursive | $(JQ) -e -r '.Parameters[] | select(.Type == "SecureString") | .Name' ), $(item)) 38 | CMD_SERVICE_SECRETS_PULL = @ (echo $(shell echo "{\"INFO\":\"EMPTY_JSON_CREATED\"}" > $(SERVICE_SECRETS_BACKUP_FILE)) && \ 39 | $(foreach item, $(CMD_SERVICE_ALL_SECRET_KEYS), \ 40 | $(shell $(JQ) --arg value "$(shell echo $(item) | sed 's|.*/||')" '.[$$value] = "'$(shell $(AWS) --profile=$(AWS_PROFILE) ssm get-parameter --with-decryption --name $(item) --query Parameter.Value)'"' \ 41 | $(SERVICE_SECRETS_BACKUP_FILE) > tmp.json && mv tmp.json $(SERVICE_SECRETS_BACKUP_FILE) || echo "\033[31m[ERROR]\033[0m /$(ENV)/$(SVC)/* secrets getting")) > /dev/null ) && \ 42 | $(JQ) 'del(."INFO")' $(SERVICE_SECRETS_BACKUP_FILE) > tmp.json && mv tmp.json $(SERVICE_SECRETS_BACKUP_FILE) && \ 43 | echo "\033[32m[OK]\033[0m /$(ENV)/$(SVC)/* secrets pulled" || echo "\033[31m[ERROR]\033[0m /$(ENV)/$(SVC)/* secrets getting" 44 | 45 | CMD_GLOBAL_ALL_SECRET_KEYS = $(foreach item, $(shell $(AWS) --profile=$(AWS_PROFILE) ssm get-parameters-by-path \ 46 | --path "/$(ENV)/global" --recursive | $(JQ) -e -r '.Parameters[] | select(.Type == "SecureString") | .Name' ), $(item)) 47 | CMD_GLOBAL_SECRETS_PULL = @ (echo $(shell echo "{\"INFO\":\"EMPTY_JSON_CREATED\"}" > $(GLOBAL_SECRETS_BACKUP_FILE)) && \ 48 | $(foreach item, $(CMD_GLOBAL_ALL_SECRET_KEYS), \ 49 | $(shell $(JQ) --arg value "$(shell echo $(item) | sed 's|.*/||')" '.[$$value] = "'$(shell $(AWS) --profile=$(AWS_PROFILE) ssm get-parameter --with-decryption --name $(item) --query Parameter.Value)'"' \ 50 | $(GLOBAL_SECRETS_BACKUP_FILE) > tmp.json && mv tmp.json $(GLOBAL_SECRETS_BACKUP_FILE) || echo "\033[31m[ERROR]\033[0m /$(ENV)/global/* secrets getting")) > /dev/null ) && \ 51 | $(JQ) 'del(."INFO")' $(GLOBAL_SECRETS_BACKUP_FILE) > tmp.json && mv tmp.json $(GLOBAL_SECRETS_BACKUP_FILE) && \ 52 | echo "\033[32m[OK]\033[0m /$(ENV)/global/* secrets pulled" || echo "\033[31m[ERROR]\033[0m /$(ENV)/global/* secrets getting" 53 | 54 | # Tasks 55 | ######################################################################################################################## 56 | secrets.push: 57 | @$(CMD_SERVICE_SECRETS_PUSH) 58 | @$(CMD_SERVICE_SECRETS_TAGS) 59 | secrets.pull: 60 | @$(CMD_SERVICE_SECRETS_PULL) 61 | secrets.delete: 62 | @$(CMD_SERVICE_SECRETS_DELETE) 63 | global-secrets.push: 64 | @$(CMD_GLOBAL_SECRETS_PUSH) 65 | global-secrets.pull: 66 | @$(CMD_GLOBAL_SECRETS_PULL) 67 | global-secrets.delete: 68 | @$(CMD_GLOBAL_SECRETS_DELETE) 69 | # Dependencies 70 | ######################################################################################################################## -------------------------------------------------------------------------------- /aws/variables.mk: -------------------------------------------------------------------------------- 1 | # This file should contain variables used in current module 2 | ################################################################## 3 | # main variables 4 | AWS_CLI_VERSION ?= 2.2.0 5 | AWS_MFA_ENABLED ?= false 6 | 7 | # ecs variables 8 | SCALE ?= 3 9 | DOCKERFILE ?= Dockerfile 10 | ECS_DEPLOY_VERSION ?= latest 11 | ENABLE_BUILDKIT ?= 1 12 | DOCKER_BUILD_ADDITIONAL_PARAMS ?= 13 | DOCKER_RUN_ADDITIONAL_PARAMS ?= 14 | ECS_DEPLOY_IMAGE_SHA ?= sha256:acca364f44b8cbc01401baf53a39324cd23c11257c3ab66ca52261f85e69f60d 15 | 16 | # localstack variables 17 | # Actual LOCALSTACK_API_KEY should be set in Makefile 18 | LOCALSTACK_API_KEY ?= 1234567Local 19 | # This can be overriden for different args, like setting an endpoint, like localstack 20 | LOCALSTACK_IMAGE ?= localstack/localstack 21 | LOCALSTACK_VERSION ?= latest 22 | LOCALSTACK_WEB_UI_PORT ?= 8088 23 | LOCALSTACK_PORTS ?= "4510-4620" 24 | LOCALSTACK_SERVICE_LIST ?= "dynamodb,s3,lambda,cloudformation,sts,iam,acm,ec2,route53,ssm,cloudwatch,apigateway,ecs,ecr,events,serverless" #etc. serverless? api-gateway? 25 | 26 | # Maximum time for RDS Failover execution, in minutes 27 | RDS_FAILOVER_TIMEOUT ?= 5 28 | # Timeout of RDS Failover check, in seconds 29 | RDS_FAILOVER_LOOP_TIMEOUT ?= 3 30 | # Default suffix for RDS DB Identifier 31 | RDS_DB_SUFFIX ?= APP-DB 32 | -------------------------------------------------------------------------------- /examples/simple/.envrc-example: -------------------------------------------------------------------------------- 1 | export AWS_PROFILE=nutcorp-dev 2 | export ENV=testnut 3 | export AWS_REGION=us-east-1 4 | -------------------------------------------------------------------------------- /examples/simple/.infra/.gitignore: -------------------------------------------------------------------------------- 1 | 2 | env/*/secrets/*.json 3 | !env/*/secrets/example-service.json 4 | 5 | 6 | # Infra 7 | .terraform 8 | .retry 9 | __pycache__ 10 | terraform*.tfstate 11 | backend.tf 12 | output.json 13 | tfplan 14 | *.tfvars 15 | 16 | env/* 17 | !env/testnut 18 | !env/prod 19 | ssh.config 20 | -------------------------------------------------------------------------------- /examples/simple/.infra/env/testnut/env.mk: -------------------------------------------------------------------------------- 1 | TERRAFORM_STATE_BUCKET_NAME = nutcorpnet-tf-state 2 | AWS_REGION = us-east-1 3 | -------------------------------------------------------------------------------- /examples/simple/.infra/env/testnut/main.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | profile = var.aws_profile 3 | region = var.aws_region 4 | } 5 | 6 | resource "aws_key_pair" "root" { 7 | key_name = var.ec2_key_pair_name 8 | public_key = var.ssh_public_key 9 | } 10 | 11 | module "vpc" { 12 | source = "terraform-aws-modules/vpc/aws" 13 | 14 | name = "${var.env}-vpc" 15 | cidr = "10.0.0.0/16" 16 | 17 | azs = ["us-east-1a"] 18 | private_subnets = ["10.0.1.0/24"] 19 | public_subnets = ["10.0.101.0/24"] 20 | 21 | enable_nat_gateway = true 22 | 23 | tags = { 24 | Terraform = "true" 25 | Environment = var.env 26 | } 27 | } 28 | 29 | data "aws_route53_zone" "root" { 30 | name = "${var.root_domain_name}." 31 | private_zone = false 32 | } 33 | 34 | module "bastion" { 35 | source = "hazelops/ec2-bastion/aws" 36 | version = "~> 1.0" 37 | env = var.env 38 | vpc_id = module.vpc.vpc_id 39 | zone_id = data.aws_route53_zone.root.zone_id 40 | public_subnets = module.vpc.public_subnets 41 | ec2_key_pair_name = var.ec2_key_pair_name 42 | ssh_forward_rules = [ 43 | "LocalForward 222 127.0.0.1:22" 44 | ] 45 | } 46 | 47 | output "cmd" { 48 | description = "Map of useful commands" 49 | value = { 50 | tunnel = module.bastion.cmd 51 | } 52 | } 53 | 54 | output "ssh_forward_config" { 55 | value = module.bastion.ssh_config 56 | } 57 | -------------------------------------------------------------------------------- /examples/simple/.infra/env/testnut/secrets/example-service.json: -------------------------------------------------------------------------------- 1 | { 2 | "SVC_SECRET_ONE": "Value one of the secret", 3 | "SVC_SECRET_TWO": "Value two of the secret" 4 | } 5 | -------------------------------------------------------------------------------- /examples/simple/.infra/env/testnut/variables.tf: -------------------------------------------------------------------------------- 1 | variable "env" {} 2 | variable "aws_profile" {} 3 | variable "aws_region" {} 4 | variable "ssh_public_key" {} 5 | variable "ec2_key_pair_name" { 6 | default = "nutcorp" 7 | } 8 | variable "root_domain_name" { 9 | default = "nutcorp.net" 10 | } 11 | -------------------------------------------------------------------------------- /examples/simple/.infra/icmk: -------------------------------------------------------------------------------- 1 | ../../../ -------------------------------------------------------------------------------- /examples/simple/Makefile: -------------------------------------------------------------------------------- 1 | NAMESPACE = nutcorp 2 | ICMK_VERSION ?= master 3 | 4 | include .infra/icmk/init.mk 5 | 6 | deploy: infra.deploy app.deploy 7 | destroy: infra.destroy 8 | secrets: app.secrets ingest.secrets 9 | 10 | # Infrastructure 11 | infra: terraform.init terraform.apply 12 | terraform.test: terraform.checkov terraform.tflint 13 | 14 | ################################################################################################### 15 | # Services 16 | app: app.image app.push app.deploy 17 | app.image: docker 18 | $(CMD_SERVICE_DOCKER_BUILD) 19 | app.push: docker ecr.login 20 | $(CMD_SERVICE_DOCKER_PUSH) 21 | app.deploy: ecs jq ## Deploy service 22 | $(CMD_SERVICE_DEPLOY) 23 | app.scale: ecs ## Change scale (`make app.scale SCALE=n`) 24 | $(CMD_SERVICE_SCALE) 25 | app.destroy: confirm 26 | $(CMD_SERVICE_DESTROY) 27 | app.secrets: 28 | $(CMD_SERVICE_SECRETS_PUSH) 29 | app.up: 30 | $(CMD_SERVICE_LOCAL_UP) 31 | app.down: 32 | $(CMD_SERVICE_LOCAL_DOWN) 33 | app.purge-cache: 34 | $(CMD_ECR_DOCKER_PURGE_CACHE) 35 | -------------------------------------------------------------------------------- /examples/simple/README.md: -------------------------------------------------------------------------------- 1 | # ICMK Simple Example 2 | 3 | ```shell script 4 | make infra 5 | make tunnel 6 | ``` 7 | -------------------------------------------------------------------------------- /init.mk: -------------------------------------------------------------------------------- 1 | # Macroses 2 | ######################################################################################################################## 3 | ROOT_DIR ?= $(shell pwd) 4 | INFRA_DIR ?= $(ROOT_DIR)/.infra 5 | 6 | ICMK_VERSION ?= origin/master 7 | ICMK_REPO ?= https://github.com/hazelops/icmk.git 8 | ICMK_GIT_REVISION = $(shell cd $(INFRA_DIR)/icmk && $(GIT) rev-parse HEAD) $(shell cd $(INFRA_DIR)/icmk && $(GIT) describe --tags) 9 | 10 | CURRENT_USER_ID = $(shell id -u) 11 | CURRENT_USERGROUP_ID = $(shell id -g) 12 | 13 | # Tasks 14 | ######################################################################################################################## 15 | .PHONY: update 16 | init: icmk.install 17 | init.bootstrap: icmk.install examples.simple 18 | 19 | icmk.install: $(INFRA_DIR)/icmk 20 | $(INFRA_DIR)/icmk: 21 | @echo "Installing icmk from $(ICMK_VERSION)" 22 | mkdir -p $(INFRA_DIR) && cd $(INFRA_DIR) && $(GIT) submodule add $(ICMK_REPO) icmk 23 | cd $(INFRA_DIR)/icmk && $(GIT) fetch --all && $(GIT) reset $(ICMK_VERSION) --hard 24 | @rm -f $(TMPDIR)/icmk.mk && rm -f $(TMPDIR)/icmk.mk 25 | @echo "Done!" 26 | 27 | icmk.clean: 28 | @rm -rf $(INFRA_DIR)/icmk && echo "Cleaning Done" 29 | 30 | icmk.update: ## Updates ICMK 31 | @[ -d "$(INFRA_DIR)/icmk" ] && (cd $(INFRA_DIR)/icmk && $(GIT) fetch --all --tags && $(GIT) reset $(ICMK_VERSION) --hard && $(GIT) checkout $(ICMK_VERSION)) || (echo "No ICMK installed. Please install it first." && exit 1) 32 | 33 | icmk.update-init: ## Updates ICMK with a remote init script 34 | @echo Updating via new init from https://hzl.xyz/icmk 35 | @make icmk.update -f $$(curl -Ls https://hzl.xyz/icmk > $$TMPDIR/icmk.mk && echo "$$TMPDIR/icmk.mk") 36 | 37 | examples.simple: confirm $(INFRA_DIR)/icmk 38 | @cp $(INFRA_DIR)/icmk/examples/simple/Makefile ./Makefile 39 | @cp $(INFRA_DIR)/icmk/examples/simple/.envrc-example .envrc-example 40 | @cp -R $(INFRA_DIR)/icmk/examples/simple/.infra/.gitignore $(INFRA_DIR)/ 41 | @cp -R $(INFRA_DIR)/icmk/examples/simple/.infra/env $(INFRA_DIR)/ 42 | 43 | confirm: 44 | @echo "\033[31mAre you sure? [y/N]\033[0m" && read ans && [ $${ans:-N} = y ] || (echo "\033[32mCancelled.\033[0m" && exit 1) 45 | 46 | # Dependencies 47 | ######################################################################################################################## 48 | # Core Dependencies 49 | GIT ?= $(shell which git) 50 | DOCKER ?= $(shell which docker) 51 | AWS_CLI ?= $(shell which aws) 52 | 53 | # Ensures that all dependencies are installed 54 | prereqs: git docker aws-cli ssh-pub-key 55 | 56 | docker: 57 | ifeq (, $(DOCKER)) 58 | @echo "\033[31mX Docker is not installed or incorrectly configured. https://www.docker.com/ \033[0m" 59 | else 60 | @echo "\033[32m✔ Docker\033[0m" 61 | endif 62 | 63 | git: 64 | ifeq (, $(GIT)) 65 | @echo "\033[31mX Git is not installed or incorrectly configured. https://git-scm.com/downloads/ \033[0m" 66 | else 67 | @echo "\033[32m✔ Git\033[0m" 68 | endif 69 | 70 | aws-cli: 71 | ifeq (, $(AWS_CLI)) 72 | @echo "\033[31mX AWS (CLI) is not installed or incorrectly configured. https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html/ \033[0m" 73 | else 74 | @echo "\033[32m✔ AWS (CLI)\033[0m" 75 | endif 76 | 77 | ssh-pub-key: 78 | ifeq (,$(wildcard ~/.ssh/id_rsa.pub)) 79 | @echo "\033[31m! SSH Public Key is not found here: ~/.ssh/id_rsa.pub . Please make sure that you have a key and pass it via '$(SSH_PUBLIC_KEY)' variable. \033[0m" 80 | else 81 | @echo "\033[32m✔ SSH Public Key\033[0m" 82 | endif 83 | 84 | # This ensures we include main.mk and variables.mk only if it's there. If not we don't error out (IE in case of bootstrap) 85 | -include $(INFRA_DIR)/icmk/variables.mk 86 | -include $(INFRA_DIR)/icmk/main.mk 87 | -------------------------------------------------------------------------------- /ize/ize.mk: -------------------------------------------------------------------------------- 1 | #### IZE Download script 2 | CMD_DOWNLOAD_IZE = curl -L $(IZE_DOWNLOAD_URL) -o $(TMPDIR)/$(IZE_ARCHIVE_NAME) && tar -xvzf $(TMPDIR)/$(IZE_ARCHIVE_NAME) -C $(IZE_DIR) && rm $(TMPDIR)/$(IZE_ARCHIVE_NAME) 3 | 4 | #### IZE Install script 5 | CMD_INSTALL_IZE = chmod +x $(IZE_DIR)/ize 6 | 7 | #### Create IZE folder 8 | CMD_CREATE_IZE_FOLDER = mkdir -p $(IZE_DIR) 9 | 10 | # Tasks 11 | ######################################################################################################################## 12 | ize.install: 13 | @$(CMD_CREATE_IZE_FOLDER) && \ 14 | echo "\n\033[33m[...]\033[0m IZE downloading" && \ 15 | $(CMD_DOWNLOAD_IZE) && \ 16 | echo "\n\033[32m[OK]\033[0m IZE downloaded successfully" && \ 17 | $(CMD_INSTALL_IZE) && \ 18 | echo "\n\033[32m[OK]\033[0m IZE successfully installed" 19 | -------------------------------------------------------------------------------- /ize/variables.mk: -------------------------------------------------------------------------------- 1 | # This file should contain variables used in current module 2 | ################################################################## 3 | #### Default settings for IZE 4 | MACOS_ARCH_NAME = $(shell uname -m) 5 | MACOS_IZE_ARCH ?= $(shell echo $$(if [ "$(MACOS_ARCH_NAME)" = "x86_64" ]; then echo "amd64"; else echo "arm64"; fi)) 6 | LINUX_IZE_ARCH ?= $$(echo $$(if echo "$(LINUX_ARCH)" | grep -Fqe "arm"; then echo "arm$(LINUX_BITS)"; else echo "amd$(LINUX_BITS)"; fi)) 7 | IZE_DIR ?= $(INFRA_DIR)/bin 8 | TMPDIR ?= /tmp 9 | IZE_VERSION ?= 0.1.0 10 | MACOS_ARCHIVE_NAME = ize_$(IZE_VERSION)_$(OS_NAME)_$(MACOS_IZE_ARCH).tar.gz 11 | LINUX_ARCHIVE_NAME = ize_$(IZE_VERSION)_$(OS_NAME)_$(LINUX_IZE_ARCH).tar.gz 12 | IZE_ARCHIVE_NAME ?= $(shell echo $$(if [ "$(OS_NAME)" = "Linux" ]; then echo "$(LINUX_ARCHIVE_NAME)"; else echo "$(MACOS_ARCHIVE_NAME)"; fi)) 13 | IZE_DOWNLOAD_URL = https://github.com/hazelops/ize/releases/download/$(IZE_VERSION)/$(IZE_ARCHIVE_NAME) 14 | -------------------------------------------------------------------------------- /main.mk: -------------------------------------------------------------------------------- 1 | # Environment Validation 2 | ######################################################################################################################## 3 | # Ensure ENV is set 4 | ifndef ENV 5 | $(error Please set ENV via `export ENV=` or use direnv) 6 | endif 7 | 8 | 9 | -include $(INFRA_DIR)/env/$(ENV)/*.mk 10 | -include $(INFRA_DIR)/projects/*.mk 11 | include $(INFRA_DIR)/icmk/*/*.mk 12 | 13 | # Macroses 14 | ######################################################################################################################## 15 | # Makefile Helpers 16 | # Get Service name. We're parsing Make task name and extracting SVC. So foo.bar or baz/foo.bar will result to SVC=foo 17 | SVC = $(shell echo $(@) | grep $(SLASHSIGN) > /dev/null && echo $$(echo $(@) | $(CUT) -d/ -f2 | $(CUT) -d. -f1) || echo $$(echo $(@) | $(CUT) -d. -f1)) 18 | SVC_TYPE = $(shell echo $(SVC) | $(CUT) -d- -f1 ) 19 | 20 | ICMK_TEMPLATE_TERRAFORM_BACKEND_CONFIG = $(INFRA_DIR)/icmk/terraform/templates/backend.tf.gotmpl 21 | ICMK_TEMPLATE_TERRAFORM_VARS = $(INFRA_DIR)/icmk/terraform/templates/terraform.tfvars.gotmpl 22 | ICMK_TEMPLATE_TERRAFORM_TFPLAN = $(INFRA_DIR)/icmk/terraform/templates/terraform.tfplan.gotmpl 23 | 24 | ICMK_TEMPLATE_WAYPOINT_VARS = $(INFRA_DIR)/icmk/waypoint/templates/waypoint.wpvars.gotmpl 25 | 26 | # We are using a tag from AWS User which would tell us which environment this user is using. You can always override it. 27 | ENV ?= $(AWS_DEV_ENV_NAME) 28 | ENV_DIR ?= $(INFRA_DIR)/env/$(ENV) 29 | 30 | GIT_REPO ?= $(shell basename `git rev-parse --show-toplevel`) 31 | 32 | # Support for stack/tier workspace paths 33 | ifneq (,$(TIER)) 34 | ifneq (,$(STACK)) 35 | ENV_DIR:=$(ENV_DIR)/$(STACK)/$(TIER) 36 | TERRAFORM_STATE_KEY=$(ENV)/$(STACK)/$(TIER)/terraform.tfstate 37 | -include $(INFRA_DIR)/env/$(ENV)/$(STACK)/$(TIER)/*.mk 38 | else 39 | ENV_DIR:=$(ENV_DIR)/$(TIER) 40 | TERRAFORM_STATE_KEY=$(ENV)/$(TIER)/terraform.tfstate 41 | -include $(INFRA_DIR)/env/$(ENV)/$(TIER)/*.mk 42 | endif 43 | endif 44 | 45 | # Get Service sub-directory name in "projects" folder. We're parsing Make task name and extracting PROJECT_SUB_DIR. So baz/foo.bar will result to PROJECT_SUB_DIR=baz 46 | PROJECT_SUB_DIR ?= $(shell echo $(@) | grep $(SLASHSIGN) > /dev/null && echo $$(echo $(@) | $(CUT) -d/ -f1)$(SLASHSIGN) || echo "") 47 | PROJECT_ROOT ?= projects/$(PROJECT_SUB_DIR) 48 | PROJECT_PATH_ABS ?= $(shell cd $(PROJECT_ROOT)$(SVC) && pwd -P) 49 | PROJECT_PATH ?= $(PROJECT_ROOT)$(shell basename $(PROJECT_PATH_ABS)) 50 | SERVICE_NAME ?= $(ENV)-$(SVC) 51 | # Tasks 52 | ######################################################################################################################## 53 | .PHONY: auth help 54 | all: help 55 | 56 | env.debug: prereqs icmk.debug os.debug aws.debug 57 | icmk.debug: 58 | @echo "\033[32m=== ICMK Info ===\033[0m" 59 | @echo "\033[36mENV\033[0m: $(ENV)" 60 | @echo "\033[36mTAG\033[0m: $(TAG)" 61 | @echo "\033[36mINFRA_DIR\033[0m: $(INFRA_DIR)" 62 | @echo "\033[36mPWD\033[0m: $(PWD)" 63 | @echo "\033[36mICMK_VERSION\033[0m: $(ICMK_VERSION)" 64 | @echo "\033[36mICMK_GIT_REVISION\033[0m: $(ICMK_GIT_REVISION)" 65 | @echo "\033[36mENV_DIR\033[0m: $(ENV_DIR)" 66 | 67 | 68 | up: docker 69 | # TODO: This should probably use individual apps "up" definitions 70 | echo "TODO: aws ecs local up" 71 | 72 | login: ecr.login ## Perform all required authentication (ECR) 73 | auth: ecr.login 74 | help: ## Display this help screen (default) 75 | @echo "\033[32m=== Available Tasks ===\033[0m" 76 | @grep -h -E '^([a-zA-Z_-]|\.)+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' 77 | 78 | env: env.use 79 | use: env.use 80 | plan: terraform.plan 81 | 82 | # Verification of README existing 83 | README_FILE ?= $(PROJECT_ROOT)$(SVC)/README.md 84 | README_FILE_1SYMBOL ?= $$(cat $(README_FILE) | head -n 1 | head -c 1) 85 | README ?= @$$([ -f $(README_FILE) ]) && $$([ "$(README_FILE_1SYMBOL)" = "$(HASHSIGN)" ]) && echo "\033[32m[OK]\033[0m README exists" || echo "\033[31m[FAILED]\033[0m README does not exist. Please describe your project in README.md." 86 | 87 | ## Tool Dependencies 88 | COMPOSE ?= $(shell which docker-compose) 89 | 90 | JQ_ARM ?= $(DOCKER) run --user "$(CURRENT_USER_ID):$(CURRENT_USERGROUP_ID)" --platform "linux/amd64" -v $(INFRA_DIR):$(INFRA_DIR) -i --rm colstrom/jq 91 | JQ_DEFAULT ?= $(DOCKER) run --user "$(CURRENT_USER_ID):$(CURRENT_USERGROUP_ID)" -v $(INFRA_DIR):$(INFRA_DIR) -i --rm colstrom/jq 92 | JQ ?= $(shell echo $$(if [ "$(LINUX_ARCH)" = "arm64" ]; then echo "$(JQ_ARM)"; else echo "$(JQ_DEFAULT)"; fi)) 93 | BASE64_ARM ?= $(DOCKER) run --user "$(CURRENT_USER_ID):$(CURRENT_USERGROUP_ID)" --platform "linux/amd64" -i --rm busybox:$(BUSYBOX_VERSION) base64 94 | BASE64_DEFAULT ?= $(DOCKER) run --user "$(CURRENT_USER_ID):$(CURRENT_USERGROUP_ID)" -i --rm busybox:$(BUSYBOX_VERSION) base64 95 | BASE64 ?= $(shell echo $$(if [ "$(LINUX_ARCH)" = "arm64" ]; then echo "$(BASE64_ARM)"; else echo "$(BASE64_DEFAULT)"; fi)) 96 | CUT ?= $(DOCKER) run --user "$(CURRENT_USER_ID):$(CURRENT_USERGROUP_ID)" -i --rm busybox:$(BUSYBOX_VERSION) cut 97 | REV ?= $(DOCKER) run --user "$(CURRENT_USER_ID):$(CURRENT_USERGROUP_ID)" -i --rm busybox:$(BUSYBOX_VERSION) rev 98 | AWK ?= $(DOCKER) run --user "$(CURRENT_USER_ID):$(CURRENT_USERGROUP_ID)" -i --rm busybox:$(BUSYBOX_VERSION) awk 99 | 100 | 101 | GOMPLATE ?= $(DOCKER) run --user "$(CURRENT_USER_ID):$(CURRENT_USERGROUP_ID)" \ 102 | -e ENV="$(ENV)" \ 103 | -e AWS_PROFILE="$(AWS_PROFILE)" \ 104 | -e AWS_REGION="$(AWS_REGION)" \ 105 | -e NAMESPACE="$(NAMESPACE)" \ 106 | -e GIT_REPO="$(GIT_REPO)" \ 107 | -e ROOT_DIR="$(ROOT_DIR)" \ 108 | -e EC2_KEY_PAIR_NAME="$(EC2_KEY_PAIR_NAME)" \ 109 | -e TAG="$(TAG)" \ 110 | -e SSH_PUBLIC_KEY="$(SSH_PUBLIC_KEY)" \ 111 | -e DOCKER_REGISTRY="$(DOCKER_REGISTRY)" \ 112 | -e LOCALSTACK_ENDPOINT=$(LOCALSTACK_ENDPOINT) \ 113 | -e TERRAFORM_AWS_PROVIDER_VERSION=$(TERRAFORM_AWS_PROVIDER_VERSION) \ 114 | -e TERRAFORM_STATE_BUCKET_NAME="$(TERRAFORM_STATE_BUCKET_NAME)" \ 115 | -e TERRAFORM_STATE_KEY="$(TERRAFORM_STATE_KEY)" \ 116 | -e TERRAFORM_STATE_REGION="$(TERRAFORM_STATE_REGION)" \ 117 | -e TERRAFORM_STATE_PROFILE="$(TERRAFORM_STATE_PROFILE)" \ 118 | -e TERRAFORM_STATE_DYNAMODB_TABLE="$(TERRAFORM_STATE_DYNAMODB_TABLE)" \ 119 | -e SHORT_SHA="$(SHORT_SHA)" \ 120 | -e COMMIT_MESSAGE="$(COMMIT_MESSAGE)" \ 121 | -e GITHUB_ACTOR="$(GITHUB_ACTOR)" \ 122 | -e TASK_ROLE_NAME="$(TASK_ROLE_NAME)" \ 123 | -v $(ENV_DIR):/temp \ 124 | --rm -i hairyhenderson/gomplate 125 | 126 | ECHO = @echo 127 | 128 | # Dependencies 129 | ######################################################################################################################## 130 | # Ensures docker-compose is installed - does not enforce. 131 | docker-compose: docker 132 | ifeq (, $(COMPOSE)) 133 | $(error "docker-compose is not installed or incorrectly configured.") 134 | #else 135 | # @$(COMPOSE) --version 136 | endif 137 | 138 | # Ensures gomplate is installed 139 | gomplate: 140 | ifeq (, $(GOMPLATE)) 141 | $(error "gomplate is not installed or incorrectly configured. https://github.com/hairyhenderson/gomplate") 142 | endif 143 | 144 | # Ensures jq is installed 145 | jq: 146 | ifeq (, $(JQ)) 147 | $(error "jq is not installed or incorrectly configured.") 148 | endif 149 | -------------------------------------------------------------------------------- /serverless/main.mk: -------------------------------------------------------------------------------- 1 | # Docker executors 2 | ######################################################################################################################## 3 | SLS ?= @$(DOCKER) run --rm \ 4 | --user root:root \ 5 | --workdir=/app \ 6 | --entrypoint="/usr/local/bin/npx" \ 7 | -e AWS_PROFILE=$(AWS_PROFILE) \ 8 | -e LOCALSTACK_HOST=$(LOCALSTACK_HOST) \ 9 | -e SLS_DEBUG='$(SLS_DEBUG)' \ 10 | -e SLS_DEPRECATION_DISABLE='$(SLS_DEPRECATION_DISABLE)' \ 11 | -e SLS_WARNING_DISABLE='$(SLS_WARNING_DISABLE)' \ 12 | -v ~/.aws/:/root/.aws:ro \ 13 | -v $(ROOT_DIR)/$(PROJECT_PATH):/app \ 14 | -v $(ROOT_DIR)/$(PROJECT_PATH)/.serverless/:/root/.serverless \ 15 | -v $(ROOT_DIR)/.npm/:/root/.npm \ 16 | -v $(SLS_NODE_MODULES_CACHE_MOUNT):/app/node_modules \ 17 | node:$(NODE_VERSION) serverless 18 | 19 | NPM ?= @$(DOCKER) run --rm \ 20 | --user root:root \ 21 | --workdir=/app \ 22 | -v $(ROOT_DIR)/$(PROJECT_PATH):/app \ 23 | -v $(ROOT_DIR)/$(PROJECT_PATH)/.config/:/root/.config \ 24 | -v $(ROOT_DIR)/.npm/:/root/.npm \ 25 | -v $(SLS_NODE_MODULES_CACHE_MOUNT):/app/node_modules \ 26 | node:$(NODE_VERSION) npm 27 | 28 | # Serverless CLI Reference 29 | ######################################################################################################################## 30 | CMD_SLS_SERVICE_INSTALL = $(NPM) install --save-dev 31 | CMD_SLS_SERVICE_DEPLOY = $(SLS) deploy --config $(SLS_FILE) --service $(SVC) --verbose --region $(AWS_REGION) --env $(ENV) $(AWS_CLI_PROFILE) 32 | CMD_SLS_SERVICE_INVOKE = $(SLS) invoke --function $(SVC) --path $(EVENT_FILE) --log --config $(SLS_FILE) --service $(SVC) --region $(AWS_REGION) --env $(ENV) $(AWS_CLI_PROFILE) 33 | CMD_SLS_SERVICE_DESTROY = $(SLS) remove --config $(SLS_FILE) --service $(SVC) --verbose --region $(AWS_REGION) --env $(ENV) $(AWS_CLI_PROFILE) || true 34 | CMD_SLS_SERVICE_BUILD = cd $(ROOT_DIR)/$(PROJECT_PATH) && make 35 | CMD_SLS_SERVICE_SECRETS_PUSH = $(CMD_SERVICE_SECRETS_PUSH) 36 | CMD_SLS_SERVICE_SECRETS_PULL = $(CMD_SERVICE_SECRETS_PULL) 37 | # This works with "serverless-domain-manager" plugin and provide domain creation and remove 38 | CMD_SLS_SERVICE_CREATE_DOMAIN = $(SLS) create_domain --verbose --region $(AWS_REGION) --env $(ENV) $(AWS_CLI_PROFILE) 39 | CMD_SLS_SERVICE_DELETE_DOMAIN = $(SLS) delete_domain --verbose --region $(AWS_REGION) --env $(ENV) $(AWS_CLI_PROFILE) 40 | 41 | # Tasks 42 | ######################################################################################################################## 43 | 44 | 45 | # Dependencies 46 | ######################################################################################################################## 47 | # Ensures aws toolchain is installed 48 | aws: 49 | ifeq (, $(SLS)) 50 | $(error "aws cli toolchain is not installed or incorrectly configured.") 51 | endif 52 | -------------------------------------------------------------------------------- /serverless/variables.mk: -------------------------------------------------------------------------------- 1 | # This file should contain variables used in current module 2 | ################################################################## 3 | # main variables 4 | SLS_DOCKER_IMAGE ?= amaysim/serverless 5 | SLS_VERSION ?= 1.82.0 6 | SLS_FILE ?= serverless.yml 7 | SLS_DEBUG ?= 8 | SLS_DEPRECATION_DISABLE ?= 9 | SLS_WARNING_DISABLE ?= 10 | SLS_NODE_MODULES_CACHE_MOUNT ?= $(SVC)-node-modules 11 | EVENT_FILE ?= event.json 12 | NODE_VERSION ?= 12-alpine3.10 13 | -------------------------------------------------------------------------------- /terraform/main.mk: -------------------------------------------------------------------------------- 1 | # Macroses 2 | ######################################################################################################################## 3 | SSH_PUBLIC_KEY ?= $(shell cat ~/.ssh/id_rsa.pub) 4 | SSH_PUBLIC_KEY_BASE64 = $(shell echo "$(SSH_PUBLIC_KEY)" | $(BASE64)) 5 | EC2_KEY_PAIR_NAME ?= $(ENV)-$(NAMESPACE) 6 | ENV_DIR ?= $(INFRA_DIR)/env/$(ENV) 7 | OUTPUT_JSON_FILE = $(ENV_DIR)/.terraform/output.json 8 | 9 | AWS_LIMITS ?= @ ( echo $(foreach item, $(shell echo $(AWS_LIMITS_LIST) | $(JQ) -e -r '. | to_entries[] | .key' ), \ 10 | "$$(if [ $(shell grep -c "+ resource \"$(item)\"" $(ENV_DIR)/.terraform/tfplan.txt) -lt $(shell echo $(AWS_LIMITS_LIST) | $(JQ) -r '.$(item)[].value') ]; \ 11 | then echo "\n\033[32m[OK]\033[0m $(item) limit"; \ 12 | else echo "\n\033[33m[WARNING]\033[0m $(item) limit (Value:$(shell echo $(AWS_LIMITS_LIST) | $(JQ) -r '.$(item)[].value')) exceeded! \ 13 | Current value:$(shell grep -c "+ resource \"$(item)\"" $(ENV_DIR)/.terraform/tfplan.txt) \ 14 | \033[33m To request a service quota increase:\033[0m \033[36m aws service-quotas request-service-quota-increase --service-code $(shell echo $(AWS_LIMITS_LIST) | $(JQ) -r '.$(item)[].service') --quota-code $(shell echo $(AWS_LIMITS_LIST) | $(JQ) -r '.$(item)[].quotacode') --desired-value \033[0m"; fi )") ) 15 | 16 | # Terraform Backend Config 17 | TERRAFORM_STATE_KEY ?= $(ENV)/terraform.tfstate 18 | TERRAFORM_STATE_PROFILE ?= $(AWS_PROFILE) 19 | TERRAFORM_STATE_REGION ?= $(AWS_REGION) 20 | TERRAFORM_STATE_BUCKET_NAME ?= $(NAMESPACE)-tf-state 21 | CHECKOV ?= $(DOCKER) run --user "$(CURRENT_USER_ID):$(CURRENT_USERGROUP_ID)" -v $(ENV_DIR):/tf -i bridgecrew/checkov -d /tf -s 22 | TFLINT ?= $(DOCKER) run --user "$(CURRENT_USER_ID):$(CURRENT_USERGROUP_ID)" --rm -v $(ENV_DIR):/data -t wata727/tflint 23 | TFLOCK ?= $(DOCKER) run --rm --hostname=$(USER)-icmk-terraform -v $(ENV_DIR):/$(ENV_DIR) -v "$(ENV_DIR)/.terraform":/"$(ENV_DIR)/.terraform" -v "$(INFRA_DIR)":"$(INFRA_DIR)" -v $(HOME)/.aws/:/root/.aws:ro -w $(ENV_DIR) -e AWS_PROFILE=$(AWS_PROFILE) -e ENV=$(ENV) hazelops/tflock 24 | TF_LOG_PATH ?= /$(ENV_DIR)/tflog.txt 25 | 26 | TF_VERSION_MAJOR ?= $$(echo $(TERRAFORM_VERSION) | tr "." "\n" | head -n 1) 27 | TERRAFORM_VERSION_VERIFICATION ?= $(shell echo $$(if [ "$(TF_VERSION_MAJOR)" -lt "1" ]; then echo "\033[33m[WARNING]\033[0m Your version of Terraform is out of date! The minimally compatible version: 1.0.0"; else echo "Terraform version: $(TERRAFORM_VERSION)"; fi)) 28 | 29 | terraform.compat: 30 | @echo $(TERRAFORM_VERSION_VERIFICATION) 31 | 32 | TERRAFORM ?= $(DOCKER) run \ 33 | --user "$(CURRENT_USER_ID)":"$(CURRENT_USERGROUP_ID)" \ 34 | --rm \ 35 | --hostname="$(USER)-icmk-terraform" \ 36 | -v "$(ENV_DIR)":"$(ENV_DIR)" \ 37 | -v "$(INFRA_DIR)":"$(INFRA_DIR)" \ 38 | -v "$(HOME)/.aws/":"/.aws:ro" \ 39 | -w "$(ENV_DIR)" \ 40 | -e AWS_PROFILE="$(AWS_PROFILE)" $(AWS_MFA_ENV_VARS) \ 41 | -e ENV="$(ENV)" \ 42 | -e TF_LOG="$(TF_LOG_LEVEL)" \ 43 | -e TF_LOG_PATH="$(TF_LOG_PATH)" \ 44 | hashicorp/terraform:$(TERRAFORM_VERSION) 45 | 46 | CMD_SAVE_OUTPUT_TO_SSM = $(AWS) ssm put-parameter --name "/$(ENV)/terraform-output" --type "SecureString" --tier "Intelligent-Tiering" --data-type "text" --overwrite --value "$$(cat $(OUTPUT_JSON_FILE) | $(BASE64))" > /dev/null && echo "\033[32m[OK]\033[0m Terraform output saved to ssm://$(ENV)/terraform-output" || (echo "\033[31m[ERROR]\033[0m Terraform output saving failed" && exit 1) 47 | 48 | # Optional cmd to be used, because the branch related to TF v0.13 upgrade already have updated versions.tf files 49 | CMD_TERRAFORM_MODULES_UPGRADE = $(shell find $(INFRA_DIR)/terraform -name '*.tf' | xargs -n1 dirname | uniq | xargs -n1 $(TERRAFORM) 0.13upgrade -yes) 50 | 51 | CMD_TERRAFORM_INIT ?= @ [ "$(IZE_ENABLED)" = "true" ] || cd $(ENV_DIR) && \ 52 | cat $(ICMK_TEMPLATE_TERRAFORM_BACKEND_CONFIG) | $(GOMPLATE) > backend.tf && \ 53 | cat $(ICMK_TEMPLATE_TERRAFORM_VARS) | $(GOMPLATE) > terraform.tfvars && \ 54 | $(TERRAFORM) init -input=true 55 | 56 | CMD_TERRAFORM_PLAN ?= @ [ "$(IZE_ENABLED)" = "true" ] || cd $(ENV_DIR) && \ 57 | $(TERRAFORM) plan -out=$(ENV_DIR)/.terraform/tfplan -input=false && \ 58 | $(TERRAFORM) show $(ENV_DIR)/.terraform/tfplan -input=false -no-color > $(ENV_DIR)/.terraform/tfplan.txt && \ 59 | cat $(ICMK_TEMPLATE_TERRAFORM_TFPLAN) | $(GOMPLATE) > $(ENV_DIR)/.terraform/tfplan.md 60 | 61 | CMD_TERRAFORM_APPLY ?= @ [ "$(IZE_ENABLED)" = "true" ] || cd $(ENV_DIR) && \ 62 | $(TERRAFORM) apply -input=false $(ENV_DIR)/.terraform/tfplan && \ 63 | $(TERRAFORM) output -json > $(ENV_DIR)/.terraform/output.json && \ 64 | $(CMD_SAVE_OUTPUT_TO_SSM) 65 | 66 | # Tasks 67 | ######################################################################################################################## 68 | infra.init: terraform.init 69 | infra.deploy: terraform.apply 70 | infra.destroy: terraform.destroy 71 | infra.checkov: terraform.checkov 72 | infra.tflint: terraform.tflint 73 | 74 | 75 | 76 | terraform.debug: 77 | @echo "\033[32m=== Terraform Environment Info ===\033[0m" 78 | @echo "\033[36mENV\033[0m: $(ENV)" 79 | @echo "\033[36mTF_VAR_ssh_public_key\033[0m: $(TF_VAR_ssh_public_key)" 80 | 81 | # TODO: Potentionally replace gomplate by terragrunt 82 | terraform.init: terraform.compat gomplate terraform 83 | $(CMD_TERRAFORM_INIT) 84 | 85 | # TODO: Potentionally replace gomplate by terragrunt 86 | # TODO:? Implement -target approach so we can deploy specific apps only 87 | # TODO: generate env vars into tfvars in only one task 88 | 89 | terraform.lock: terraform.init 90 | @ \ 91 | cd $(ENV_DIR) && \ 92 | $(TFLOCK) 93 | 94 | # Re-initialization of the backend to TF v0.13 version format 95 | terraform.reconfig: 96 | @ \ 97 | cd $(ENV_DIR) && \ 98 | $(TERRAFORM) init -input=true -reconfigure 99 | 100 | # TF Apply / Deploy infrastructure 101 | terraform.apply: terraform.plan 102 | $(CMD_TERRAFORM_APPLY) 103 | 104 | ## Test infrastructure with checkov 105 | terraform.checkov: 106 | @ echo "Testing with Checkov:" 107 | @ echo "--------------------" 108 | @ cd $(ENV_DIR) 109 | @ $(CHECKOV) 110 | 111 | ## Test infrastructure with tflint 112 | terraform.tflint: 113 | @ echo "Testing with TFLint:" 114 | @ echo "--------------------" 115 | @ cd $(ENV_DIR) 116 | @ $(TFLINT) && \ 117 | echo "Test passed (OK)" 118 | 119 | terraform.refresh: terraform.init ## Test infrastructure 120 | @ cd $(ENV_DIR) && \ 121 | $(TERRAFORM) refresh 122 | 123 | terraform.get: terraform.init ## Test infrastructure 124 | @ cd $(ENV_DIR) && \ 125 | $(TERRAFORM) get --update 126 | 127 | # TODO:? Potentionally replace gomplate by terragrunt 128 | terraform.destroy: ## Destroy infrastructure 129 | @ cd $(ENV_DIR) && \ 130 | $(TERRAFORM) destroy 131 | 132 | terraform.destroy-quiet: ## Destroy infrastructure without confirmation 133 | @ cd $(ENV_DIR) && \ 134 | $(TERRAFORM) destroy -auto-approve || $(TERRAFORM) destroy -auto-approve 135 | @ echo "\n\033[36m[INFO] Please run: make secrets.delete or app.delete_secrets now\033[0m" 136 | 137 | terraform.output-to-ssm: ## Manual upload output.json to AWS SSM. Output.json encoded in base64. 138 | @ cd $(ENV_DIR) && \ 139 | $(CMD_SAVE_OUTPUT_TO_SSM) 140 | 141 | ## Terraform plan output for Github Action 142 | terraform.plan: terraform.init 143 | $(CMD_TERRAFORM_PLAN) 144 | 145 | terraform.limits: terraform.plan 146 | @ $(AWS_LIMITS) 147 | 148 | # Upgrading TF from v0.12 to v0.13 149 | terraform.13upgrade: 150 | @ echo "Terraform upgrade to v0.13 :" 151 | @ echo "-----------------------------" 152 | @ $(CMD_TERRAFORM_MODULES_UPGRADE) 153 | 154 | env.use: terraform jq 155 | @ [ -e $(ENV_DIR) ] && \ 156 | ( \ 157 | echo "Found $(ENV)" && \ 158 | cd $(INFRA_DIR)/env/ && \ 159 | [ -f $(ENV)/.terraform/terraform.tfstate ] && ( \ 160 | mv $(ENV)/.terraform/terraform.tfstate $(ENV)/terraform.$(shell date +%s).bak.tfstate && \ 161 | echo "Local state file backed up as $(ENV)/terraform.$(shell date +%s).bak.tfstate. Using $(ENV)" \ 162 | ) \ 163 | || echo "Local state file not found. Using $(ENV). You can run 'make infra.init'" \ 164 | ) \ 165 | || (\ 166 | cd $(INFRA_DIR)/env/ && \ 167 | ln -s $(ENV_BASE) $(ENV) && \ 168 | echo "Created new $(ENV) from $(ENV_BASE)" \ 169 | ) 170 | 171 | env.rm: terraform jq 172 | @ [ -e $(ENV_DIR) ] && ( \ 173 | cd $(INFRA_DIR)/env/ && \ 174 | [ -f $(ENV)/.terraform/terraform.tfstate ] && ( \ 175 | mv $(ENV)/.terraform/terraform.tfstate $(ENV)/terraform.$(ENV).$(shell date +%s).bak.tfstate \ 176 | ) || echo "No local state file found." && \ 177 | unlink $(ENV) && \ 178 | echo "Deleted $(ENV)" \ 179 | ) || echo "No $(ENV) found. Can't de-init" 180 | 181 | 182 | # Dependencies 183 | ######################################################################################################################## 184 | # Ensures terraform is installed 185 | terraform: 186 | ifeq (, $(TERRAFORM)) 187 | $(error "terraform is not installed or incorrectly configured.") 188 | endif 189 | -------------------------------------------------------------------------------- /terraform/templates/backend.tf.gotmpl: -------------------------------------------------------------------------------- 1 | {{if or (.Env.ENV | strings.Contains "localstack") (.Env.ENV | strings.Contains "local") }} 2 | terraform { 3 | backend "local" {} 4 | } 5 | 6 | provider "aws" { 7 | profile = var.aws_profile 8 | region = var.aws_region 9 | s3_force_path_style = true 10 | secret_key = "mock_secret_key" 11 | skip_credentials_validation = true 12 | skip_metadata_api_check = true 13 | skip_requesting_account_id = true 14 | 15 | endpoints { 16 | apigateway = "{{ .Env.LOCALSTACK_ENDPOINT }}" 17 | acm = "{{ .Env.LOCALSTACK_ENDPOINT }}" 18 | cloudformation = "{{ .Env.LOCALSTACK_ENDPOINT }}" 19 | cloudwatch = "{{ .Env.LOCALSTACK_ENDPOINT }}" 20 | ec2 = "{{ .Env.LOCALSTACK_ENDPOINT }}" 21 | dynamodb = "{{ .Env.LOCALSTACK_ENDPOINT }}" 22 | es = "{{ .Env.LOCALSTACK_ENDPOINT }}" 23 | firehose = "{{ .Env.LOCALSTACK_ENDPOINT }}" 24 | iam = "{{ .Env.LOCALSTACK_ENDPOINT }}" 25 | kinesis = "{{ .Env.LOCALSTACK_ENDPOINT }}" 26 | lambda = "{{ .Env.LOCALSTACK_ENDPOINT }}" 27 | route53 = "{{ .Env.LOCALSTACK_ENDPOINT }}" 28 | redshift = "{{ .Env.LOCALSTACK_ENDPOINT }}" 29 | s3 = "{{ .Env.LOCALSTACK_ENDPOINT }}" 30 | secretsmanager = "{{ .Env.LOCALSTACK_ENDPOINT }}" 31 | ses = "{{ .Env.LOCALSTACK_ENDPOINT }}" 32 | sns = "{{ .Env.LOCALSTACK_ENDPOINT }}" 33 | sqs = "{{ .Env.LOCALSTACK_ENDPOINT }}" 34 | ssm = "{{ .Env.LOCALSTACK_ENDPOINT }}" 35 | stepfunctions = "{{ .Env.LOCALSTACK_ENDPOINT }}" 36 | sts = "{{ .Env.LOCALSTACK_ENDPOINT }}" 37 | ecs = "{{ .Env.LOCALSTACK_ENDPOINT }}" 38 | ecr = "{{ .Env.LOCALSTACK_ENDPOINT }}" 39 | } 40 | } 41 | {{else}} 42 | provider "aws" { 43 | profile = var.aws_profile 44 | region = var.aws_region 45 | default_tags { 46 | tags = { 47 | terraform = "true" 48 | namespace = "{{ .Env.NAMESPACE }}" 49 | env = "{{ .Env.ENV }}" 50 | terraform-git-repo = "{{ .Env.GIT_REPO }}" 51 | } 52 | } 53 | } 54 | 55 | terraform { 56 | backend "s3" { 57 | bucket = "{{if .Env.TERRAFORM_STATE_BUCKET_NAME}}{{ .Env.TERRAFORM_STATE_BUCKET_NAME }}{{else}}nutcorpnet-tf-state{{end}}" 58 | key = "{{if .Env.TERRAFORM_STATE_KEY}}{{ .Env.TERRAFORM_STATE_KEY }}{{else}}foo/terraform.tfstate{{end}}" 59 | region = "{{if .Env.TERRAFORM_STATE_REGION}}{{ .Env.TERRAFORM_STATE_REGION }}{{else}}us-east-1{{end}}" 60 | profile = "{{if .Env.TERRAFORM_STATE_PROFILE}}{{ .Env.TERRAFORM_STATE_PROFILE }}{{else}}nutcorp-dev{{end}}" 61 | dynamodb_table = "{{if .Env.TERRAFORM_STATE_DYNAMODB_TABLE}}{{ .Env.TERRAFORM_STATE_DYNAMODB_TABLE }}{{else}}tf-state-lock{{end}}" 62 | } 63 | } 64 | {{end}} 65 | -------------------------------------------------------------------------------- /terraform/templates/terraform.tfplan.gotmpl: -------------------------------------------------------------------------------- 1 | ### Terraform Changes {{if .Env.SHORT_SHA }}({{ .Env.SHORT_SHA }}) by {{end}}{{if .Env.GITHUB_ACTOR }}{{ .Env.GITHUB_ACTOR }}{{end}} 2 | 3 | {{if .Env.COMMIT_MESSAGE }}On commit: {{ .Env.COMMIT_MESSAGE }}{{end}} 4 | 5 |
Expand to view details

6 | 7 | ``` 8 | {{file.Read "/temp/.terraform/tfplan.txt"}} 9 | ``` 10 | 11 |

12 | -------------------------------------------------------------------------------- /terraform/templates/terraform.tfvars.gotmpl: -------------------------------------------------------------------------------- 1 | # This file is generated via gomplate 2 | env="{{ .Env.ENV }}" 3 | aws_profile="{{ .Env.AWS_PROFILE }}" 4 | aws_region="{{ .Env.AWS_REGION }}" 5 | ec2_key_pair_name="{{ .Env.EC2_KEY_PAIR_NAME }}" 6 | docker_image_tag="{{ .Env.TAG }}" 7 | ssh_public_key="{{ .Env.SSH_PUBLIC_KEY }}" 8 | docker_registry="{{ .Env.DOCKER_REGISTRY }}" 9 | namespace="{{ .Env.NAMESPACE }}" 10 | -------------------------------------------------------------------------------- /terraform/variables.mk: -------------------------------------------------------------------------------- 1 | # This file should contain variables used in current module 2 | ################################################################## 3 | # main variables 4 | TERRAFORM_VERSION ?= "0.12.29" 5 | 6 | TERRAFORM_AWS_PROVIDER_VERSION ?= "~> 3.0" 7 | 8 | IZE_ENABLED ?= false 9 | # AWS_LIMITS_LIST contains name of aws resources like we see in terraform plan output (example: aws_s3_bucket) 10 | # AWS resources have the following properties: limit value, name of aws service and quota code for raising a request. 11 | # If you need to check one more service limit - please just add a new service info to this json list 12 | AWS_LIMITS_LIST ?= $$(echo "{ \ 13 | \"aws_s3_bucket\":[ \ 14 | {\"value\":\"100\", \"service\":\"s3\", \"quotacode\":\"L-DC2B2D3D\"}], \ 15 | \"aws_route53_health_check\":[ \ 16 | {\"value\":\"200\", \"service\":\"route53\", \"quotacode\":\"L-ACB674F3\"}], \ 17 | \"aws_dynamodb_table\":[ \ 18 | {\"value\":\"256\", \"service\":\"dynamodb\", \"quotacode\":\"L-F98FE922\"}], \ 19 | \"aws_eip\":[ \ 20 | {\"value\":\"5\", \"service\":\"vpc\", \"quotacode\":\"L-2AFB9258\"}] \ 21 | }") 22 | TERRAFORM_STATE_DYNAMODB_TABLE ?= tf-state-lock 23 | TF_LOG_LEVEL ?= 24 | -------------------------------------------------------------------------------- /tunnel/main.mk: -------------------------------------------------------------------------------- 1 | # Macroses 2 | ######################################################################################################################## 3 | SSH_CONFIG ?= $(INFRA_DIR)/env/$(ENV)/ssh.config 4 | BASTION_INSTANCE_ID = $(shell $(AWS) ssm get-parameter --name "/$(ENV)/terraform-output" --with-decryption | $(JQ) -r '.Parameter.Value' | $(BASE64) -d | $(JQ) -r '.bastion_instance_id.value') 5 | CMD_BASTION_SSH_TUNNEL_CONFIG_CREATE = echo $(shell $(AWS) ssm get-parameter --name "/$(ENV)/terraform-output" --with-decryption | $(JQ) -r '.Parameter.Value' | $(BASE64) -d | $(JQ) -r '.ssh_forward_config.value[]' > $(SSH_CONFIG)) && echo "$(HASHSIGN) SSH Tunnel Config \n$(HASHSIGN) Use the Forward ports to connect to remote instances (localhost:)\n-----" && cat $(SSH_CONFIG) 6 | 7 | # Bastion commands are stored in SSM now, so user without admin permissions won't be able to connect 8 | CMD_BASTION_SSH_TUNNEL_UP = $(shell $(AWS) ssm get-parameter --name "/$(ENV)/terraform-output" --with-decryption | $(JQ) -r '.Parameter.Value' | $(BASE64) -d | $(JQ) -r '.cmd.value.tunnel.up') -F $(SSH_CONFIG) && echo "\n\033[32m[OK]\033[0m SSH tunnel up" 9 | CMD_BASTION_SSH_TUNNEL_DOWN = $(shell $(AWS) ssm get-parameter --name "/$(ENV)/terraform-output" --with-decryption | $(JQ) -r '.Parameter.Value' | $(BASE64) -d | $(JQ) -r '.cmd.value.tunnel.down') -F $(SSH_CONFIG) && echo "\n\033[32m[OK]\033[0m SSH tunnel disabled" 10 | CMD_BASTION_SSH_TUNNEL_STATUS = $(shell $(AWS) ssm get-parameter --name "/$(ENV)/terraform-output" --with-decryption | $(JQ) -r '.Parameter.Value' | $(BASE64) -d | $(JQ) -r '.cmd.value.tunnel.status') -F $(SSH_CONFIG) && echo "\n\033[32m[OK]\033[0m SSH tunnel up.\nConfig:\n-----" && cat $(SSH_CONFIG) 11 | CMD_BASTION_SSH_TUNNEL_SSH_KEY_ENSURE_PRESENT = ($(AWS) ssm send-command --instance-ids $(BASTION_INSTANCE_ID) --document-name AWS-RunShellScript --comment 'Add an SSH public key to authorized_keys' --parameters '{"commands": ["grep -qR \"$(SSH_PUBLIC_KEY)\" /home/ubuntu/.ssh/authorized_keys || echo \"$(SSH_PUBLIC_KEY)\" >> /home/ubuntu/.ssh/authorized_keys"]}' 1> /dev/null) && echo "\n\033[32m[OK]\033[0m SSH Key" 12 | 13 | # Tasks 14 | ######################################################################################################################## 15 | tunnel: tunnel.config tunnel.up 16 | tunnel.up: 17 | @$(CMD_BASTION_SSH_TUNNEL_UP) 18 | 19 | tunnel.down: 20 | @$(CMD_BASTION_SSH_TUNNEL_DOWN) 21 | 22 | tunnel.status: 23 | @$(CMD_BASTION_SSH_TUNNEL_STATUS) 24 | 25 | tunnel.ssh-key: 26 | @$(CMD_BASTION_SSH_TUNNEL_SSH_KEY_ENSURE_PRESENT) 27 | 28 | tunnel.config: tunnel.ssh-key 29 | $(CMD_BASTION_SSH_TUNNEL_CONFIG_CREATE) 30 | 31 | # Dependencies 32 | ######################################################################################################################## 33 | -------------------------------------------------------------------------------- /tunnel/variables.mk: -------------------------------------------------------------------------------- 1 | # This file should contain variables used in current module 2 | ################################################################## 3 | -------------------------------------------------------------------------------- /variables.mk: -------------------------------------------------------------------------------- 1 | # This file should contain variables used in current module 2 | ################################################################## 3 | # Main variables 4 | ENV_BASE = dev 5 | NPM_TOKEN ?= nil 6 | BUSYBOX_VERSION ?= 1.31.1 7 | # This is a workaround for syntax highlighters that break on a "Comment" symbol. 8 | HASHSIGN = \# 9 | SLASHSIGN = / 10 | 11 | # For unit tests 12 | BATS_BIN_PATH = tests/test/libs/bats/bin 13 | -------------------------------------------------------------------------------- /waypoint/main.mk: -------------------------------------------------------------------------------- 1 | # Macroses 2 | ######################################################################################################################## 3 | # TODO: Waypoint Config mount is MacOS-only for now. Needs to be platform-independent 4 | WAYPOINT ?= $(DOCKER) run \ 5 | --user "root":"$(CURRENT_USERGROUP_ID)" \ 6 | --rm \ 7 | --hostname="$(USER)-icmk-waypoint" \ 8 | -v "$(ENV_DIR)":"$(ENV_DIR)" \ 9 | -v "$(INFRA_DIR)":"$(INFRA_DIR)" \ 10 | -v "$(ROOT_DIR)":"$(ROOT_DIR)" \ 11 | -v "$(HOME)/.aws/":"/home/waypoint/.aws:ro" \ 12 | -v "$(HOME)/Library/Preferences/waypoint":"/home/waypoint/.config/waypoint" \ 13 | -v "$(HOME)/.waypoint/":"/home/waypoint/.waypoint" \ 14 | -v "$(HOME)/.aws/":"/root/.aws:ro" \ 15 | -v "/var/run/docker.sock":"/var/run/docker.sock" \ 16 | -w "$(ENV_DIR)" \ 17 | -e AWS_PROFILE="$(AWS_PROFILE)" \ 18 | -e ROOT_DIR="$(ROOT_DIR)" \ 19 | -e ENV="$(ENV)" \ 20 | $(WAYPOINT_DOCKER_IMAGE):$(WAYPOINT_VERSION) 21 | 22 | WAYPOINT_INTERPOLATE_VARS ?= \ 23 | sed -i 's/vpc_private_subnets=\[\]/vpc_private_subnets=$(VPC_PRIVATE_SUBNETS)/g' $(ENV_DIR)/waypoint.wpvars && \ 24 | sed -i 's/vpc_public_subnets=\[\]/vpc_public_subnets=$(VPC_PUBLIC_SUBNETS)/g' $(ENV_DIR)/waypoint.wpvars && \ 25 | sed -i 's/AWS_ZONE_ID/"$(ZONE_ID)"/g' $(ENV_DIR)/waypoint.wpvars 26 | 27 | CMD_WAYPOINT_SERVICE_BUILD ?= \ 28 | @\ 29 | cd $(ENV_DIR) && \ 30 | cat $(ICMK_TEMPLATE_WAYPOINT_VARS) | $(GOMPLATE) > waypoint.wpvars && \ 31 | $(WAYPOINT_INTERPOLATE_VARS) && \ 32 | $(WAYPOINT) build -var-file=waypoint.wpvars -app $(SVC) 33 | 34 | CMD_WAYPOINT_SERVICE_DEPLOY ?= \ 35 | @\ 36 | cd $(ENV_DIR) && \ 37 | cat $(ICMK_TEMPLATE_WAYPOINT_VARS) | $(GOMPLATE) > waypoint.wpvars && \ 38 | $(WAYPOINT_INTERPOLATE_VARS) && \ 39 | $(WAYPOINT) deploy -var-file=waypoint.wpvars -release=false -app $(SVC) 40 | 41 | CMD_WAYPOINT_SERVICE_RELEASE ?= \ 42 | @\ 43 | cd $(ENV_DIR) && \ 44 | cat $(ICMK_TEMPLATE_WAYPOINT_VARS) | $(GOMPLATE) > waypoint.wpvars && \ 45 | $(WAYPOINT_INTERPOLATE_VARS) && \ 46 | $(WAYPOINT) release -app $(SVC) 47 | 48 | CMD_WAYPOINT_INIT ?= \ 49 | @\ 50 | cd $(ENV_DIR) && \ 51 | cat $(ICMK_TEMPLATE_WAYPOINT_VARS) | $(GOMPLATE) > waypoint.wpvars && \ 52 | $(WAYPOINT_INTERPOLATE_VARS) && \ 53 | $(WAYPOINT) init 54 | 55 | CMD_WAYPOINT_INSTALL ?= \ 56 | @\ 57 | cd $(ENV_DIR) && \ 58 | cat $(ICMK_TEMPLATE_WAYPOINT_VARS) | $(GOMPLATE) > waypoint.wpvars && \ 59 | $(WAYPOINT_INTERPOLATE_VARS) && \ 60 | $(WAYPOINT) install -accept-tos -platform=ecs -ecs-cluster=$(WAYPOINT_ECS_CLUSTER_NAME) -ecs-region=$(AWS_REGION) -runner=$(WAYPOINT_RUNNER_ENABLED) -ecs-server-image=$(WAYPOINT_DOCKER_IMAGE):$(WAYPOINT_VERSION) 61 | 62 | CMD_WAYPOINT_UNINSTALL ?= \ 63 | @\ 64 | cd $(ENV_DIR) && \ 65 | cat $(ICMK_TEMPLATE_WAYPOINT_VARS) | $(GOMPLATE) > waypoint.wpvars && \ 66 | $(WAYPOINT_INTERPOLATE_VARS) && \ 67 | $(WAYPOINT) server uninstall -platform=ecs -ecs-cluster=$(WAYPOINT_ECS_CLUSTER_NAME) -ecs-region=$(AWS_REGION) -auto-approve -ignore-runner-error 68 | 69 | CMD_WAYPOINT_DESTROY ?= \ 70 | @\ 71 | cd $(ENV_DIR) && \ 72 | cat $(ICMK_TEMPLATE_WAYPOINT_VARS) | $(GOMPLATE) > waypoint.wpvars && \ 73 | $(WAYPOINT_INTERPOLATE_VARS) && \ 74 | $(WAYPOINT) destroy -auto-approve 75 | 76 | CMD_WAYPOINT_AUTH ?= \ 77 | @\ 78 | cd $(ENV_DIR) && \ 79 | cat $(ICMK_TEMPLATE_WAYPOINT_VARS) | $(GOMPLATE) > waypoint.wpvars && \ 80 | $(WAYPOINT_INTERPOLATE_VARS) && \ 81 | $(WAYPOINT) token new 82 | 83 | 84 | CMD_WAYPOINT_CONFIG_SET ?= @$(WAYPOINT) config source-set --type=aws-ssm --config region=$(AWS_REGION) 85 | # Tasks 86 | ######################################################################################################################## 87 | waypoint: waypoint.install waypoint.init 88 | waypoint.config: 89 | $(CMD_WAYPOINT_CONFIG_SET) 90 | 91 | waypoint.init: gomplate waypoint-dependency 92 | $(CMD_WAYPOINT_CONTEXT_CLEAR) 93 | $(CMD_WAYPOINT_INIT) 94 | $(CMD_WAYPOINT_CONFIG_SET) 95 | 96 | waypoint.install: gomplate waypoint-dependency 97 | $(CMD_WAYPOINT_INSTALL) 98 | 99 | waypoint.auth: gomplate waypoint-dependency 100 | $(CMD_WAYPOINT_AUTH) 101 | 102 | waypoint.destroy: gomplate waypoint-dependency 103 | $(CMD_WAYPOINT_DESTROY) 104 | 105 | waypoint.uninstall: gomplate waypoint-dependency 106 | $(CMD_WAYPOINT_UNINSTALL) 107 | 108 | waypoint.context-create: 109 | $(CMD_WAYPOINT_CONTEXT_CREATE) 110 | waypoint.debug: waypoint-dependency 111 | @echo "\033[32m=== Waypoint Info ===\033[0m" 112 | @echo "\033[36mDocker Image\033[0m: $(WAYPOINT_DOCKER_IMAGE):$(WAYPOINT_VERSION)" 113 | @echo "\033[36mVersion\033[0m: $(shell $(WAYPOINT) version)" 114 | 115 | # Dependencies 116 | ######################################################################################################################## 117 | # Ensures terraform is installed 118 | waypoint-dependency: 119 | ifeq (, $(WAYPOINT)) 120 | $(error "waypoint is not installed or incorrectly configured.") 121 | endif 122 | -------------------------------------------------------------------------------- /waypoint/templates/waypoint.wpvars.gotmpl: -------------------------------------------------------------------------------- 1 | # This file is generated via gomplate 2 | env="{{ .Env.ENV }}" 3 | root_dir="{{ .Env.ROOT_DIR }}" 4 | task_role_name="{{ .Env.TASK_ROLE_NAME }}" 5 | aws_region="{{ .Env.AWS_REGION }}" 6 | ec2_key_pair_name="{{ .Env.EC2_KEY_PAIR_NAME }}" 7 | docker_image_tag="{{ .Env.TAG }}" 8 | ssh_public_key="{{ .Env.SSH_PUBLIC_KEY }}" 9 | namespace="{{ .Env.NAMESPACE }}" 10 | vpc_private_subnets=[] 11 | vpc_public_subnets=[] 12 | zone_id=AWS_ZONE_ID 13 | -------------------------------------------------------------------------------- /waypoint/variables.mk: -------------------------------------------------------------------------------- 1 | # This file should contain variables used in current module 2 | ################################################################## 3 | # main variables 4 | WAYPOINT_VERSION ?= latest 5 | WAYPOINT_DOCKER_IMAGE = hazelops/waypoint 6 | WAYPOINT_CONFIG_FILE ?= $(ENV_DIR)/waypoint.hcl 7 | WAYPOINT_ECS_CLUSTER_NAME ?= $(ENV)-$(NAMESPACE)-waypoint 8 | WAYPOINT_RUNNER_ENABLED ?= false 9 | --------------------------------------------------------------------------------