├── aws_reverse_proxy ├── .gitignore ├── providers.tf ├── .prettierrc ├── versions.tf ├── outputs.tf ├── data.tf ├── route53.tf ├── certificate.tf ├── lambda.tpl.js ├── lambda.tf ├── variables.tf ├── cloudfront.tf └── README.md ├── aws_ses_forwarder ├── .gitignore ├── versions.tf ├── outputs.tf ├── data.tf ├── s3.tf ├── permissions.tf ├── lambda.tf ├── variables.tf ├── main.tf ├── README.md └── index.js ├── aws_lambda_api ├── example-project │ ├── .gitignore │ ├── src │ │ ├── types.d.ts │ │ └── index.ts │ ├── dist │ │ └── lambda.zip │ ├── package.json │ ├── tsconfig.json │ ├── build.sh │ └── package-lock.json ├── providers.tf ├── versions.tf ├── data.tf ├── outputs.tf ├── route53.tf ├── main.tf ├── certificate.tf ├── permissions.tf ├── api_gateway_config.tf ├── api_gateway_resources.tf ├── variables.tf └── README.md ├── .gitignore ├── _test ├── variables.tf ├── aws_reverse_proxy.tf └── providers.tf ├── docker_compose_host ├── versions.tf ├── outputs.tf ├── main.tf ├── variables.tf └── README.md ├── aws_domain_redirect ├── providers.tf ├── versions.tf ├── outputs.tf ├── data.tf ├── main.tf ├── variables.tf └── README.md ├── aws_static_site ├── versions.tf ├── providers.tf ├── data.tf ├── outputs.tf ├── s3.tf ├── main.tf ├── variables.tf └── README.md ├── aws_ec2_ebs_docker_host ├── versions.tf ├── data.tf ├── provision-docker.sh ├── provision-ebs.sh ├── outputs.tf ├── provision-swap.sh ├── security.tf ├── main.tf ├── variables.tf └── README.md ├── aws_lambda_cronjob ├── versions.tf ├── outputs.tf ├── data.tf ├── main.tf ├── permissions.tf ├── variables.tf └── README.md ├── .editorconfig ├── release.sh └── README.md /aws_reverse_proxy/.gitignore: -------------------------------------------------------------------------------- 1 | /*.zip 2 | -------------------------------------------------------------------------------- /aws_ses_forwarder/.gitignore: -------------------------------------------------------------------------------- 1 | /lambda.zip 2 | -------------------------------------------------------------------------------- /aws_lambda_api/example-project/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | -------------------------------------------------------------------------------- /aws_lambda_api/example-project/src/types.d.ts: -------------------------------------------------------------------------------- 1 | declare module 'one-liner-joke'; 2 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .history 3 | .vscode/ 4 | .env 5 | .ssh 6 | .terraform 7 | terraform.tfvars 8 | terraform.tfstate* 9 | -------------------------------------------------------------------------------- /_test/variables.tf: -------------------------------------------------------------------------------- 1 | variable "tld" { 2 | description = "Domain under which we assume we can create resources (e.g. `\"example.com\"`)" 3 | } 4 | -------------------------------------------------------------------------------- /aws_lambda_api/example-project/dist/lambda.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/jareware/terraform-utils/HEAD/aws_lambda_api/example-project/dist/lambda.zip -------------------------------------------------------------------------------- /docker_compose_host/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12" 3 | 4 | required_providers { 5 | null = ">= 2.1.2" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /aws_lambda_api/providers.tf: -------------------------------------------------------------------------------- 1 | # This alias is needed because ACM is only available in the "us-east-1" region 2 | provider "aws" { 3 | alias = "us_east_1" 4 | } 5 | -------------------------------------------------------------------------------- /aws_domain_redirect/providers.tf: -------------------------------------------------------------------------------- 1 | # This alias is needed because ACM is only available in the "us-east-1" region 2 | provider "aws" { 3 | alias = "us_east_1" 4 | } 5 | -------------------------------------------------------------------------------- /aws_reverse_proxy/providers.tf: -------------------------------------------------------------------------------- 1 | # This alias is needed because ACM is only available in the "us-east-1" region 2 | provider "aws" { 3 | alias = "us_east_1" 4 | } 5 | -------------------------------------------------------------------------------- /aws_lambda_api/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12" 3 | 4 | required_providers { 5 | aws = ">= 2.58.0" 6 | random = ">= 2.2.1" 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /aws_ses_forwarder/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12" 3 | 4 | required_providers { 5 | aws = ">= 2.58.0" 6 | random = ">= 2.2.1" 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /aws_static_site/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12" 3 | 4 | required_providers { 5 | aws = ">= 2.58.0" 6 | random = ">= 2.2.1" 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /aws_domain_redirect/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12" 3 | 4 | required_providers { 5 | aws = ">= 2.58.0" 6 | random = ">= 2.2.1" 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /aws_ec2_ebs_docker_host/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12" 3 | 4 | required_providers { 5 | aws = ">= 2.58.0" 6 | null = ">= 2.1.2" 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /aws_lambda_cronjob/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12" 3 | 4 | required_providers { 5 | aws = ">= 2.58.0" 6 | random = ">= 2.2.1" 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /aws_reverse_proxy/.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "singleQuote": true, 3 | "trailingComma": "all", 4 | "bracketSpacing": true, 5 | "jsxBracketSameLine": false, 6 | "printWidth": 120 7 | } 8 | -------------------------------------------------------------------------------- /aws_static_site/providers.tf: -------------------------------------------------------------------------------- 1 | # This alias is needed because ACM is only available in the "us-east-1" region 2 | provider "aws" { 3 | alias = "us_east_1" 4 | region = "us-east-1" 5 | } 6 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | # see: http://EditorConfig.org 2 | root = true 3 | 4 | [*] 5 | end_of_line = lf 6 | insert_final_newline = true 7 | charset = utf-8 8 | indent_style = space 9 | indent_size = 2 10 | -------------------------------------------------------------------------------- /aws_domain_redirect/outputs.tf: -------------------------------------------------------------------------------- 1 | output "reverse_proxy" { 2 | description = "CloudFront-based reverse-proxy that's used for implementing the redirect" 3 | value = module.aws_reverse_proxy 4 | } 5 | -------------------------------------------------------------------------------- /aws_reverse_proxy/versions.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_version = ">= 0.12" 3 | 4 | required_providers { 5 | aws = ">= 2.58.0" 6 | null = ">= 2.1.2" 7 | random = ">= 2.2.1" 8 | archive = ">= 1.3.0" 9 | template = ">= 2.1.2" 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /_test/aws_reverse_proxy.tf: -------------------------------------------------------------------------------- 1 | module "aws_reverse_proxy_1" { 2 | # Available inputs: https://github.com/futurice/terraform-utils/tree/master/aws_reverse_proxy#inputs 3 | source = "../aws_reverse_proxy" 4 | 5 | site_domain = "proxy1.${var.tld}" 6 | origin_url = "https://www.futurice.com/" 7 | } 8 | -------------------------------------------------------------------------------- /aws_lambda_api/example-project/src/index.ts: -------------------------------------------------------------------------------- 1 | import { APIGatewayProxyHandler } from "aws-lambda"; 2 | import { getRandomJoke } from "one-liner-joke"; 3 | 4 | export const handler: APIGatewayProxyHandler = () => { 5 | return Promise.resolve({ 6 | statusCode: 200, 7 | body: JSON.stringify(getRandomJoke(), null, 2) 8 | }); 9 | }; 10 | -------------------------------------------------------------------------------- /aws_lambda_api/example-project/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "engines": { 3 | "node": ">=12 <13", 4 | "npm": ">=6 <7" 5 | }, 6 | "devDependencies": { 7 | "@types/aws-lambda": "^8.10.23", 8 | "@types/node": "^8.10.45", 9 | "check-node-version": "^3.3.0", 10 | "typescript": "^3.4.1" 11 | }, 12 | "dependencies": { 13 | "one-liner-joke": "^1.2.0" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /aws_lambda_cronjob/outputs.tf: -------------------------------------------------------------------------------- 1 | output "function_name" { 2 | description = "This is the unique name of the Lambda function that was created" 3 | value = aws_lambda_function.this.id 4 | } 5 | 6 | output "function_role" { 7 | value = aws_iam_role.this.name 8 | description = "The IAM role for the function created; can be used to attach additional policies/permissions" 9 | } 10 | -------------------------------------------------------------------------------- /aws_lambda_api/example-project/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es2017", 4 | "module": "commonjs", 5 | "strict": true, 6 | "esModuleInterop": true, 7 | "typeRoots": ["node_modules/@types"], 8 | "types": ["node", "aws-lambda"], 9 | "lib": ["es2017"], 10 | "outDir": "temp" 11 | }, 12 | "include": ["src/**/*.ts"], 13 | "exclude": ["node_modules"] 14 | } 15 | -------------------------------------------------------------------------------- /aws_domain_redirect/data.tf: -------------------------------------------------------------------------------- 1 | resource "random_string" "name_suffix" { 2 | length = 8 3 | special = false 4 | upper = false 5 | } 6 | 7 | locals { 8 | # If an external name_prefix wasn't provided, use the default one with a random suffix (to prevent clashes on resources that require globally unique names) 9 | name_prefix = var.name_prefix == "" ? "aws-domain-redirect-${random_string.name_suffix.result}" : var.name_prefix 10 | } 11 | -------------------------------------------------------------------------------- /aws_lambda_cronjob/data.tf: -------------------------------------------------------------------------------- 1 | resource "random_string" "name_suffix" { 2 | length = 8 3 | special = false 4 | upper = false 5 | } 6 | 7 | locals { 8 | # If an external name_prefix wasn't provided, use the default one with a random suffix (to prevent clashes on resources that require globally unique names) 9 | name_prefix = var.name_prefix == "" ? "aws-lambda-cronjob-${random_string.name_suffix.result}" : var.name_prefix 10 | } 11 | -------------------------------------------------------------------------------- /aws_ec2_ebs_docker_host/data.tf: -------------------------------------------------------------------------------- 1 | # Access data about available availability zones in the current region 2 | data "aws_availability_zones" "this" {} 3 | 4 | # Retrieve info about the VPC this host should join 5 | 6 | data "aws_vpc" "this" { 7 | default = var.vpc_id == "" ? true : false 8 | id = var.vpc_id == "" ? null : var.vpc_id 9 | } 10 | 11 | data "aws_subnet" "this" { 12 | vpc_id = data.aws_vpc.this.id 13 | availability_zone = local.availability_zone 14 | } 15 | -------------------------------------------------------------------------------- /aws_reverse_proxy/outputs.tf: -------------------------------------------------------------------------------- 1 | output "cloudfront_id" { 2 | description = "The ID of the CloudFront distribution that's used for hosting the content" 3 | value = aws_cloudfront_distribution.this.id 4 | } 5 | 6 | output "web_endpoint" { 7 | description = "URL on which the site will be made available" 8 | value = "https://${var.site_domain}/" 9 | } 10 | 11 | output "site_domain" { 12 | description = "Domain on which the site will be made available" 13 | value = var.site_domain 14 | } 15 | -------------------------------------------------------------------------------- /aws_static_site/data.tf: -------------------------------------------------------------------------------- 1 | resource "random_string" "name_suffix" { 2 | length = 8 3 | special = false 4 | upper = false 5 | } 6 | 7 | locals { 8 | # If an external name_prefix wasn't provided, use the default one with a random suffix (to prevent clashes on resources that require globally unique names) 9 | name_prefix = var.name_prefix == "" ? "aws-static-site-${random_string.name_suffix.result}" : var.name_prefix 10 | } 11 | 12 | resource "random_string" "s3_read_password" { 13 | length = 32 14 | special = false 15 | } 16 | -------------------------------------------------------------------------------- /aws_ses_forwarder/outputs.tf: -------------------------------------------------------------------------------- 1 | output "function_name" { 2 | description = "This is the unique name of the Lambda function that was created" 3 | value = aws_lambda_function.this.id 4 | } 5 | 6 | output "forward_mapping" { 7 | description = "Map defining receiving email addresses, and to which addesses they forward their incoming email" 8 | value = local.forward_mapping 9 | } 10 | 11 | output "distinct_recipients" { 12 | description = "Distinct recipient addresses mentioned in `forward_mapping`" 13 | value = local.distinct_recipients 14 | } 15 | -------------------------------------------------------------------------------- /aws_lambda_api/data.tf: -------------------------------------------------------------------------------- 1 | resource "random_string" "name_suffix" { 2 | length = 8 3 | special = false 4 | upper = false 5 | } 6 | 7 | locals { 8 | # If an external name_prefix wasn't provided, use the default one with a random suffix (to prevent clashes on resources that require globally unique names) 9 | name_prefix = var.name_prefix == "" ? "aws-lambda-api-${random_string.name_suffix.result}" : var.name_prefix 10 | } 11 | 12 | data "aws_route53_zone" "this" { 13 | name = replace(var.api_domain, "/.*?\\b([\\w-]+\\.[\\w-]+)\\.?$/", "$1") # e.g. "foo.example.com" => "example.com" 14 | } 15 | -------------------------------------------------------------------------------- /aws_reverse_proxy/data.tf: -------------------------------------------------------------------------------- 1 | resource "random_string" "name_suffix" { 2 | length = 8 3 | special = false 4 | upper = false 5 | } 6 | 7 | locals { 8 | # If an external name_prefix wasn't provided, use the default one with a random suffix (to prevent clashes on resources that require globally unique names) 9 | name_prefix = var.name_prefix == "" ? "aws-reverse-proxy-${random_string.name_suffix.result}" : var.name_prefix 10 | } 11 | 12 | data "aws_route53_zone" "this" { 13 | name = replace(var.site_domain, "/.*?\\b([\\w-]+\\.[\\w-]+)\\.?$/", "$1") # e.g. "foo.example.com" => "example.com" 14 | } 15 | -------------------------------------------------------------------------------- /_test/providers.tf: -------------------------------------------------------------------------------- 1 | # Beware: This assumes your "default" AWS profile. 2 | provider "aws" { 3 | version = "~> 2.4" 4 | region = "eu-central-1" 5 | } 6 | 7 | # Lambda@Edge and ACM, when used with CloudFront, need to be used in the US East region. 8 | # Thus, we need a separate AWS provider for that region, which can be used with an alias. 9 | # Make sure you customize this block to match your regular AWS provider configuration. 10 | # https://www.terraform.io/docs/configuration/providers.html#multiple-provider-instances 11 | provider "aws" { 12 | version = "~> 2.4" 13 | alias = "us_east_1" 14 | region = "us-east-1" 15 | } 16 | -------------------------------------------------------------------------------- /aws_static_site/outputs.tf: -------------------------------------------------------------------------------- 1 | output "bucket_name" { 2 | description = "The name of the S3 bucket that's used for hosting the content" 3 | value = aws_s3_bucket.this.id 4 | } 5 | 6 | output "reverse_proxy" { 7 | description = "CloudFront-based reverse-proxy that's used for performance, access control, etc" 8 | value = module.aws_reverse_proxy 9 | } 10 | 11 | output "bucket_domain_name" { 12 | description = "Full S3 domain name for the bucket used for hosting the content (e.g. `\"aws-static-site.s3-website.eu-central-1.amazonaws.com\"`)" 13 | value = "http://${aws_s3_bucket.this.website_endpoint}/" 14 | } 15 | -------------------------------------------------------------------------------- /aws_lambda_api/example-project/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Exit if any of the following commands exit with non-0, and echo our commands back 4 | set -ex 5 | 6 | # For running npm "binaries" 7 | PATH=$PATH:./node_modules/.bin 8 | 9 | # Check that we're running the correct version of node 10 | check-node-version --package 11 | 12 | # Compile TypeScript into "temp" (defined is tsconfig.json) 13 | tsc 14 | 15 | # Install production dependencies under "temp" 16 | cp package*.json temp 17 | (cd temp && npm install --production) 18 | 19 | # Create Lambda zipfile under "dist" 20 | (cd temp && zip -r ../dist/lambda.zip *) 21 | 22 | # Clean up 23 | rm -rf temp 24 | -------------------------------------------------------------------------------- /aws_ec2_ebs_docker_host/provision-docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # https://docs.docker.com/engine/install/ubuntu/#install-using-the-convenience-script 4 | curl -fsSL https://get.docker.com -o get-docker.sh 5 | sudo sh get-docker.sh 6 | rm get-docker.sh 7 | 8 | # Allow using docker without sudo 9 | sudo usermod -aG docker $(whoami) 10 | 11 | # https://success.docker.com/article/how-to-setup-log-rotation-post-installation 12 | echo '{ 13 | "log-driver": "json-file", 14 | "log-opts": { 15 | "max-size": "10m", 16 | "max-file": "5" 17 | } 18 | } 19 | ' | sudo tee /etc/docker/daemon.json 20 | sudo service docker restart # restart the daemon so the settings take effect 21 | -------------------------------------------------------------------------------- /docker_compose_host/outputs.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | reprovision_trigger = jsonencode({ 3 | docker_compose_version = var.docker_compose_version 4 | docker_compose_env = var.docker_compose_env 5 | docker_compose_yml = var.docker_compose_yml 6 | docker_compose_override_yml = var.docker_compose_override_yml 7 | docker_compose_up_command = var.docker_compose_up_command 8 | }) 9 | } 10 | 11 | output "reprovision_trigger" { 12 | description = "Stringified version of all docker-compose configuration used for this host; can be used as the `reprovision_trigger` input to an `aws_ec2_ebs_docker_host` module" 13 | value = local.reprovision_trigger 14 | } 15 | -------------------------------------------------------------------------------- /aws_lambda_api/outputs.tf: -------------------------------------------------------------------------------- 1 | output "function_name" { 2 | description = "This is the unique name of the Lambda function that was created" 3 | value = aws_lambda_function.this.id 4 | } 5 | 6 | output "web_endpoint" { 7 | description = "This URL can be used to invoke the Lambda through the API Gateway" 8 | value = var.api_domain == "" ? aws_api_gateway_deployment.this.invoke_url : "https://${var.api_domain}/" 9 | } 10 | 11 | output "function_role" { 12 | value = aws_iam_role.this.name 13 | description = "The IAM role for the function created; can be used to attach additional policies/permissions" 14 | } 15 | 16 | output "rest_api_name" { 17 | description = "Name of the API Gateway API that was created" 18 | value = aws_api_gateway_rest_api.this.name 19 | } 20 | -------------------------------------------------------------------------------- /aws_ses_forwarder/data.tf: -------------------------------------------------------------------------------- 1 | resource "random_string" "name_suffix" { 2 | length = 8 3 | special = false 4 | upper = false 5 | } 6 | 7 | locals { 8 | # If an external name_prefix wasn't provided, use the default one with a random suffix (to prevent clashes on resources that require globally unique names) 9 | name_prefix = var.name_prefix == "" ? "aws-ses-forwarder-${random_string.name_suffix.result}" : var.name_prefix 10 | } 11 | 12 | # Provides details about the current AWS region 13 | data "aws_region" "this" {} 14 | 15 | # Use this data source to get the access to the effective Account ID, User ID, and ARN in which Terraform is authorized 16 | data "aws_caller_identity" "this" {} 17 | 18 | data "aws_route53_zone" "this" { 19 | name = replace(var.email_domain, "/.*?\\b([\\w-]+\\.[\\w-]+)\\.?$/", "$1") # e.g. "foo.example.com" => "example.com" 20 | } 21 | -------------------------------------------------------------------------------- /aws_reverse_proxy/route53.tf: -------------------------------------------------------------------------------- 1 | # Add an IPv4 DNS record pointing to the CloudFront distribution 2 | resource "aws_route53_record" "ipv4" { 3 | zone_id = data.aws_route53_zone.this.zone_id 4 | name = var.site_domain 5 | type = "A" 6 | 7 | alias { 8 | name = aws_cloudfront_distribution.this.domain_name 9 | zone_id = aws_cloudfront_distribution.this.hosted_zone_id 10 | evaluate_target_health = false 11 | } 12 | } 13 | 14 | # Add an IPv6 DNS record pointing to the CloudFront distribution 15 | resource "aws_route53_record" "ipv6" { 16 | zone_id = data.aws_route53_zone.this.zone_id 17 | name = var.site_domain 18 | type = "AAAA" 19 | 20 | alias { 21 | name = aws_cloudfront_distribution.this.domain_name 22 | zone_id = aws_cloudfront_distribution.this.hosted_zone_id 23 | evaluate_target_health = false 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /aws_ses_forwarder/s3.tf: -------------------------------------------------------------------------------- 1 | # Create the S3 bucket in which the emails are stored 2 | resource "aws_s3_bucket" "this" { 3 | bucket = local.name_prefix 4 | tags = var.tags 5 | 6 | lifecycle_rule { 7 | enabled = true 8 | 9 | expiration { 10 | days = 1 11 | } 12 | } 13 | } 14 | 15 | # Allow SES to write incoming email to the bucket for storage 16 | resource "aws_s3_bucket_policy" "this" { 17 | bucket = aws_s3_bucket.this.id 18 | policy = <&2 echo "usage: provision-swap.sh SWAP_FILE_SIZE SWAPPINESS" 7 | exit 1 8 | fi 9 | 10 | SWAP_FILE_SIZE=$1 11 | SWAPPINESS=$2 12 | 13 | echo "Setting up a swap file (size: $SWAP_FILE_SIZE, swappiness: $SWAPPINESS)..." 14 | 15 | # Create the swap file 16 | sudo fallocate -l ${SWAP_FILE_SIZE} /swapfile 17 | 18 | # Only root should be able to access to this file 19 | sudo chmod 600 /swapfile 20 | 21 | # Define the file as swap space 22 | sudo mkswap /swapfile 23 | 24 | # Enable the swap file, allowing the system to start using it 25 | sudo swapon /swapfile 26 | 27 | # Make the swap file permanent, otherwise, previous settings will be lost on reboot 28 | # Create a backup of the existing fstab, JustInCase(tm) 29 | sudo cp /etc/fstab /etc/fstab.bak 30 | # Add the swap file information at the end of the fstab 31 | echo '/swapfile none swap sw 0 0' | sudo tee -a /etc/fstab 32 | 33 | # Adjust the swappiness 34 | # With the default value of 10, the host will use swap if it has almost no other choice. Value is between 0 and 100. 35 | # 100 will make the host use the swap as much as possible, 0 will make it use only in case of emergency. 36 | # As swap access is slower than RAM access, having a low value here for a server is better. 37 | sudo sysctl vm.swappiness=${SWAPPINESS} 38 | 39 | # Make this setting permanent, to not lose it on reboot 40 | sudo cp /etc/sysctl.conf /etc/sysctl.conf.bak 41 | echo "vm.swappiness=${SWAPPINESS}" | sudo tee -a /etc/sysctl.conf 42 | -------------------------------------------------------------------------------- /aws_ses_forwarder/permissions.tf: -------------------------------------------------------------------------------- 1 | # Allow our Lambda to be invoked by SES 2 | resource "aws_lambda_permission" "ses_forwarding_function_policy" { 3 | statement_id = "AllowExecutionFromSES" 4 | action = "lambda:InvokeFunction" 5 | function_name = aws_lambda_function.this.arn 6 | principal = "ses.amazonaws.com" 7 | source_account = data.aws_caller_identity.this.account_id 8 | } 9 | 10 | # Allow Lambda to invoke our functions 11 | resource "aws_iam_role" "this" { 12 | name = local.name_prefix 13 | tags = var.tags 14 | 15 | assume_role_policy = < 26 | 27 | 28 | 29 | Redirecting 30 | 31 | 32 |
Redirecting to: ${var.redirect_url}
33 | 34 | EOF 35 | } 36 | -------------------------------------------------------------------------------- /aws_lambda_api/permissions.tf: -------------------------------------------------------------------------------- 1 | # Allow Lambda to invoke our functions: 2 | resource "aws_iam_role" "this" { 3 | name = local.name_prefix 4 | tags = var.tags 5 | 6 | assume_role_policy = < this is 100% safe to override with your own env, should you need one 52 | aws_lambda_cronjob = "" 53 | } 54 | } 55 | 56 | variable "lambda_logging_enabled" { 57 | description = "When true, writes any console output to the Lambda function's CloudWatch group" 58 | default = false 59 | } 60 | 61 | variable "tags" { 62 | description = "AWS Tags to add to all resources created (where possible); see https://aws.amazon.com/answers/account-management/aws-tagging-strategies/" 63 | type = map(string) 64 | default = {} 65 | } 66 | -------------------------------------------------------------------------------- /aws_ec2_ebs_docker_host/security.tf: -------------------------------------------------------------------------------- 1 | # Create an SSH key pair for accessing the EC2 instance 2 | resource "aws_key_pair" "this" { 3 | public_key = file(var.ssh_public_key_path) 4 | } 5 | 6 | # Create our default security group to access the instance, over specific protocols 7 | resource "aws_security_group" "this" { 8 | vpc_id = data.aws_vpc.this.id 9 | tags = merge(var.tags, { Name = var.hostname }) # make this resource easier to identify in the AWS Console (tag "Name" is effectively "display name" in some services) 10 | } 11 | 12 | # Incoming SSH & outgoing ANY needs to be allowed for provisioning to work 13 | 14 | resource "aws_security_group_rule" "outgoing_any" { 15 | security_group_id = aws_security_group.this.id 16 | type = "egress" 17 | from_port = 0 18 | to_port = 0 19 | protocol = "-1" 20 | cidr_blocks = ["0.0.0.0/0"] 21 | } 22 | 23 | resource "aws_security_group_rule" "incoming_ssh" { 24 | security_group_id = aws_security_group.this.id 25 | type = "ingress" 26 | from_port = 22 27 | to_port = 22 28 | protocol = "tcp" 29 | cidr_blocks = ["0.0.0.0/0"] 30 | } 31 | 32 | # The rest of the security rules are opt-in 33 | 34 | resource "aws_security_group_rule" "incoming_http" { 35 | count = var.allow_incoming_http ? 1 : 0 36 | security_group_id = aws_security_group.this.id 37 | type = "ingress" 38 | from_port = 80 39 | to_port = 80 40 | protocol = "tcp" 41 | cidr_blocks = ["0.0.0.0/0"] 42 | } 43 | 44 | resource "aws_security_group_rule" "incoming_https" { 45 | count = var.allow_incoming_https ? 1 : 0 46 | security_group_id = aws_security_group.this.id 47 | type = "ingress" 48 | from_port = 443 49 | to_port = 443 50 | protocol = "tcp" 51 | cidr_blocks = ["0.0.0.0/0"] 52 | } 53 | 54 | resource "aws_security_group_rule" "incoming_dns_tcp" { 55 | count = var.allow_incoming_dns ? 1 : 0 56 | security_group_id = aws_security_group.this.id 57 | type = "ingress" 58 | from_port = 53 59 | to_port = 53 60 | protocol = "tcp" 61 | cidr_blocks = ["0.0.0.0/0"] 62 | } 63 | 64 | resource "aws_security_group_rule" "incoming_dns_udp" { 65 | count = var.allow_incoming_dns ? 1 : 0 66 | security_group_id = aws_security_group.this.id 67 | type = "ingress" 68 | from_port = 53 69 | to_port = 53 70 | protocol = "udp" 71 | cidr_blocks = ["0.0.0.0/0"] 72 | } 73 | -------------------------------------------------------------------------------- /aws_lambda_api/api_gateway_resources.tf: -------------------------------------------------------------------------------- 1 | # Add root resource to the API (it it needs to be included separately from the "proxy" resource defined below), which forwards to our Lambda: 2 | 3 | resource "aws_api_gateway_method" "proxy_root" { 4 | rest_api_id = aws_api_gateway_rest_api.this.id 5 | resource_id = aws_api_gateway_rest_api.this.root_resource_id 6 | http_method = "ANY" 7 | authorization = "NONE" 8 | } 9 | 10 | resource "aws_api_gateway_integration" "proxy_root" { 11 | rest_api_id = aws_api_gateway_rest_api.this.id 12 | resource_id = aws_api_gateway_method.proxy_root.resource_id 13 | http_method = aws_api_gateway_method.proxy_root.http_method 14 | integration_http_method = "POST" 15 | type = "AWS_PROXY" 16 | uri = aws_lambda_function.this.invoke_arn 17 | } 18 | 19 | # Add a "proxy" resource, that matches all paths (except the root, defined above) and forwards them to our Lambda: 20 | 21 | resource "aws_api_gateway_resource" "proxy_other" { 22 | rest_api_id = aws_api_gateway_rest_api.this.id 23 | parent_id = aws_api_gateway_rest_api.this.root_resource_id 24 | path_part = "{proxy+}" 25 | } 26 | 27 | resource "aws_api_gateway_method" "proxy_other" { 28 | rest_api_id = aws_api_gateway_rest_api.this.id 29 | resource_id = aws_api_gateway_resource.proxy_other.id 30 | http_method = "ANY" 31 | authorization = "NONE" 32 | } 33 | 34 | resource "aws_api_gateway_integration" "proxy_other" { 35 | rest_api_id = aws_api_gateway_rest_api.this.id 36 | resource_id = aws_api_gateway_method.proxy_other.resource_id 37 | http_method = aws_api_gateway_method.proxy_other.http_method 38 | integration_http_method = "POST" 39 | type = "AWS_PROXY" 40 | uri = aws_lambda_function.this.invoke_arn 41 | } 42 | 43 | resource "aws_api_gateway_method_response" "proxy_other" { 44 | rest_api_id = aws_api_gateway_rest_api.this.id 45 | resource_id = aws_api_gateway_resource.proxy_other.id 46 | http_method = aws_api_gateway_method.proxy_other.http_method 47 | status_code = "200" 48 | 49 | response_models = { 50 | "application/json" = "Empty" 51 | } 52 | } 53 | 54 | resource "aws_api_gateway_integration_response" "proxy_other" { 55 | depends_on = [aws_api_gateway_integration.proxy_other] 56 | rest_api_id = aws_api_gateway_rest_api.this.id 57 | resource_id = aws_api_gateway_resource.proxy_other.id 58 | http_method = aws_api_gateway_method.proxy_other.http_method 59 | status_code = aws_api_gateway_method_response.proxy_other.status_code 60 | 61 | response_templates = { 62 | "application/json" = "" 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /aws_ses_forwarder/main.tf: -------------------------------------------------------------------------------- 1 | # Add our domain to SES 2 | resource "aws_ses_domain_identity" "this" { 3 | domain = var.email_domain 4 | } 5 | 6 | # Perform verification via DNS record, proving to SES we have ownership of the domain 7 | resource "aws_route53_record" "ses_verification" { 8 | zone_id = data.aws_route53_zone.this.zone_id 9 | name = "_amazonses.${var.email_domain}" 10 | type = "TXT" 11 | ttl = "600" 12 | records = ["${aws_ses_domain_identity.this.verification_token}"] 13 | } 14 | 15 | # Create DNS records telling email servers SES handles the incoming email for our domain 16 | resource "aws_route53_record" "mx_record" { 17 | zone_id = data.aws_route53_zone.this.zone_id 18 | name = var.email_domain 19 | type = "MX" 20 | ttl = "600" 21 | records = ["10 inbound-smtp.${data.aws_region.this.name}.amazonaws.com"] 22 | } 23 | 24 | # Because we assume we're in the SES sandbox, generate a verification for each email address to which we want to forward mail 25 | resource "aws_ses_email_identity" "recipient" { 26 | for_each = var.skip_recipient_verification ? toset([]) : local.distinct_recipients 27 | email = each.value 28 | } 29 | 30 | # Create a new SES rule set (only one can be active at a time, though) 31 | resource "aws_ses_receipt_rule_set" "this" { 32 | count = var.rule_set_name == "" ? 1 : 0 33 | rule_set_name = local.name_prefix 34 | } 35 | 36 | # Ensure our rule set is the active one 37 | resource "aws_ses_active_receipt_rule_set" "this" { 38 | count = var.rule_set_name == "" ? 1 : 0 39 | rule_set_name = var.rule_set_name == "" ? aws_ses_receipt_rule_set.this[0].rule_set_name : var.rule_set_name 40 | } 41 | 42 | # Configure actions SES should take when email comes in 43 | # https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-receipt-rules.html#receiving-email-receipt-rules-set-up 44 | resource "aws_ses_receipt_rule" "store" { 45 | name = local.name_prefix 46 | rule_set_name = var.rule_set_name == "" ? aws_ses_receipt_rule_set.this[0].rule_set_name : var.rule_set_name 47 | recipients = [var.email_domain] # i.e. match all mailboxes on this domain 48 | enabled = true 49 | scan_enabled = true 50 | 51 | # First, store the email into S3 52 | s3_action { 53 | bucket_name = aws_s3_bucket.this.id 54 | object_key_prefix = "emails/" 55 | position = 1 56 | } 57 | 58 | # Then, invoke the Lambda to forward it 59 | lambda_action { 60 | function_arn = aws_lambda_function.this.arn 61 | invocation_type = "Event" 62 | position = 2 63 | } 64 | } 65 | 66 | # Set up DKIM for our domain, to improve deliverability 67 | resource "aws_ses_domain_dkim" "this" { 68 | domain = aws_ses_domain_identity.this.domain 69 | } 70 | 71 | # Publish the DKIM public key info on our DNS 72 | resource "aws_route53_record" "example_amazonses_dkim_record" { 73 | count = 3 74 | zone_id = data.aws_route53_zone.this.zone_id 75 | name = "${element(aws_ses_domain_dkim.this.dkim_tokens, count.index)}._domainkey.${var.email_domain}" 76 | type = "CNAME" 77 | ttl = "600" 78 | records = ["${element(aws_ses_domain_dkim.this.dkim_tokens, count.index)}.dkim.amazonses.com"] 79 | } 80 | -------------------------------------------------------------------------------- /aws_static_site/variables.tf: -------------------------------------------------------------------------------- 1 | variable "site_domain" { 2 | description = "Domain on which the static site will be made available (e.g. `\"www.example.com\"`)" 3 | } 4 | 5 | variable "name_prefix" { 6 | description = "Name prefix to use for objects that need to be created (only lowercase alphanumeric characters and hyphens allowed, for S3 bucket name compatibility)" 7 | default = "" 8 | } 9 | 10 | variable "comment_prefix" { 11 | description = "This will be included in comments for resources that are created" 12 | default = "Static site: " 13 | } 14 | 15 | variable "cloudfront_price_class" { 16 | description = "CloudFront price class to use (`100`, `200` or `\"All\"`, see https://aws.amazon.com/cloudfront/pricing/)" 17 | default = 100 18 | } 19 | 20 | variable "viewer_https_only" { 21 | description = "Set this to `false` if you need to support insecure HTTP access for clients, in addition to HTTPS" 22 | default = true 23 | } 24 | 25 | variable "cache_ttl_override" { 26 | description = "When >= 0, override the cache behaviour for ALL objects in S3, so that they stay in the CloudFront cache for this amount of seconds" 27 | default = -1 28 | } 29 | 30 | variable "default_root_object" { 31 | description = "The object to return when the root URL is requested" 32 | default = "index.html" 33 | } 34 | 35 | variable "default_error_object" { 36 | description = "The object to return when an unknown URL is requested" 37 | default = "error.html" 38 | } 39 | 40 | variable "client_side_routing" { 41 | description = "When enabled, every request that doesn't match a static file in the bucket will get rewritten to the index file; this allows you to handle routing fully in client-side JavaScript" 42 | default = false 43 | } 44 | 45 | variable "add_response_headers" { 46 | description = "Map of HTTP headers (if any) to add to outgoing responses before sending them to clients" 47 | type = map 48 | default = {} 49 | } 50 | 51 | variable "hsts_max_age" { 52 | description = "How long should `Strict-Transport-Security` remain in effect for the site; disabled automatically when `viewer_https_only = false`" 53 | default = 31557600 # i.e. one year in seconds 54 | } 55 | 56 | variable "basic_auth_username" { 57 | description = "When non-empty, require this username with HTTP Basic Auth" 58 | default = "" 59 | } 60 | 61 | variable "basic_auth_password" { 62 | description = "When non-empty, require this password with HTTP Basic Auth" 63 | default = "" 64 | } 65 | 66 | variable "basic_auth_realm" { 67 | description = "When using HTTP Basic Auth, this will be displayed by the browser in the auth prompt" 68 | default = "Authentication Required" 69 | } 70 | 71 | variable "basic_auth_body" { 72 | description = "When using HTTP Basic Auth, and authentication has failed, this will be displayed by the browser as the page content" 73 | default = "Unauthorized" 74 | } 75 | 76 | variable "lambda_logging_enabled" { 77 | description = "When true, writes information about incoming requests to the Lambda function's CloudWatch group" 78 | default = false 79 | } 80 | 81 | variable "tags" { 82 | description = "AWS Tags to add to all resources created (where possible); see https://aws.amazon.com/answers/account-management/aws-tagging-strategies/" 83 | type = map(string) 84 | default = {} 85 | } 86 | -------------------------------------------------------------------------------- /aws_ec2_ebs_docker_host/main.tf: -------------------------------------------------------------------------------- 1 | # Create the main EC2 instance 2 | # https://www.terraform.io/docs/providers/aws/r/instance.html 3 | resource "aws_instance" "this" { 4 | instance_type = var.instance_type 5 | ami = var.instance_ami 6 | availability_zone = local.availability_zone 7 | key_name = aws_key_pair.this.id # the name of the SSH keypair to use for provisioning 8 | vpc_security_group_ids = [aws_security_group.this.id] 9 | subnet_id = data.aws_subnet.this.id 10 | user_data = sha1(local.reprovision_trigger) # this value isn't used by the EC2 instance, but its change will trigger re-creation of the resource 11 | tags = merge(var.tags, { Name = var.hostname }) # make this resource easier to identify in the AWS Console (tag "Name" is effectively "display name" in some services) 12 | volume_tags = merge(var.tags, { Name = var.hostname }) # ^ ditto 13 | 14 | root_block_device { 15 | volume_size = var.root_volume_size 16 | } 17 | 18 | connection { 19 | host = coalesce(self.public_ip, self.private_ip) 20 | type = "ssh" 21 | user = var.ssh_username 22 | private_key = file(var.ssh_private_key_path) 23 | agent = false # don't use SSH agent because we have the private key right here 24 | } 25 | 26 | provisioner "remote-exec" { 27 | inline = [ 28 | "sudo hostnamectl set-hostname ${var.hostname}", 29 | "echo 127.0.0.1 ${var.hostname} | sudo tee -a /etc/hosts", # https://askubuntu.com/a/59517 30 | ] 31 | } 32 | 33 | provisioner "remote-exec" { 34 | script = "${path.module}/provision-docker.sh" 35 | } 36 | 37 | provisioner "file" { 38 | source = "${path.module}/provision-swap.sh" 39 | destination = "/home/${var.ssh_username}/provision-swap.sh" 40 | } 41 | 42 | provisioner "remote-exec" { 43 | inline = [ 44 | "sh /home/${var.ssh_username}/provision-swap.sh ${var.swap_file_size} ${var.swap_swappiness}", 45 | "rm /home/${var.ssh_username}/provision-swap.sh", 46 | ] 47 | } 48 | } 49 | 50 | # Attach the separate data volume to the instance, if so configured 51 | 52 | resource "aws_volume_attachment" "this" { 53 | count = var.data_volume_id == "" ? 0 : 1 # only create this resource if an external EBS data volume was provided 54 | device_name = "/dev/xvdh" # note: this depends on the AMI, and can't be arbitrarily changed 55 | instance_id = aws_instance.this.id 56 | volume_id = var.data_volume_id 57 | } 58 | 59 | resource "null_resource" "provisioners" { 60 | count = var.data_volume_id == "" ? 0 : 1 # only create this resource if an external EBS data volume was provided 61 | depends_on = [aws_volume_attachment.this] # because we depend on the EBS volume being available 62 | 63 | connection { 64 | host = aws_instance.this.public_ip # see https://github.com/hashicorp/terraform/issues/23679 for discussion on the deprecation warning for this line 65 | user = var.ssh_username 66 | private_key = file(var.ssh_private_key_path) 67 | agent = false # don't use SSH agent because we have the private key right here 68 | } 69 | 70 | # When creating the attachment 71 | provisioner "remote-exec" { 72 | script = "${path.module}/provision-ebs.sh" 73 | } 74 | 75 | # When tearing down the attachment 76 | provisioner "remote-exec" { 77 | when = destroy 78 | inline = ["sudo umount -v ${aws_volume_attachment.this[0].device_name}"] 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /aws_lambda_api/variables.tf: -------------------------------------------------------------------------------- 1 | variable "api_domain" { 2 | description = "Domain on which the Lambda will be made available (e.g. `\"api.example.com\"`)" 3 | } 4 | 5 | variable "name_prefix" { 6 | description = "Name prefix to use for objects that need to be created (only lowercase alphanumeric characters and hyphens allowed, for S3 bucket name compatibility); if omitted, a random, unique one will be used" 7 | default = "" 8 | } 9 | 10 | variable "comment_prefix" { 11 | description = "This will be included in comments for resources that are created" 12 | default = "Lambda API: " 13 | } 14 | 15 | variable "function_zipfile" { 16 | description = "Path to a ZIP file that will be installed as the Lambda function (e.g. `\"my-api.zip\"`)" 17 | } 18 | 19 | variable "function_s3_bucket" { 20 | description = "When provided, the zipfile is retrieved from an S3 bucket by this name instead (filename is still provided via `function_zipfile`)" 21 | default = "" 22 | } 23 | 24 | variable "function_handler" { 25 | description = "Instructs Lambda on which function to invoke within the ZIP file" 26 | default = "index.handler" 27 | } 28 | 29 | variable "function_timeout" { 30 | description = "The amount of time your Lambda Function has to run in seconds" 31 | default = 3 32 | } 33 | 34 | variable "memory_size" { 35 | description = "Amount of memory in MB your Lambda Function can use at runtime" 36 | default = 128 37 | } 38 | 39 | variable "function_runtime" { 40 | description = "Which node.js version should Lambda use for this function" 41 | default = "nodejs12.x" 42 | } 43 | 44 | variable "function_env_vars" { 45 | description = "Which env vars (if any) to invoke the Lambda with" 46 | type = map(string) 47 | 48 | default = { 49 | # This effectively useless, but an empty map can't be used in the "aws_lambda_function" resource 50 | # -> this is 100% safe to override with your own env, should you need one 51 | aws_lambda_api = "" 52 | } 53 | } 54 | 55 | variable "stage_name" { 56 | description = "Name of the single stage created for the API on API Gateway" # we're not using the deployment features of API Gateway, so a single static stage is fine 57 | default = "default" 58 | } 59 | 60 | variable "lambda_logging_enabled" { 61 | description = "When true, writes any console output to the Lambda function's CloudWatch group" 62 | default = false 63 | } 64 | 65 | variable "api_gateway_logging_level" { 66 | description = "Either `\"OFF\"`, `\"INFO\"` or `\"ERROR\"`; note that this requires having a CloudWatch log role ARN globally in API Gateway Settings" 67 | default = "OFF" 68 | } 69 | 70 | variable "api_gateway_cloudwatch_metrics" { 71 | description = "When true, sends metrics to CloudWatch" 72 | default = false 73 | } 74 | 75 | variable "api_gateway_endpoint_config" { 76 | description = "Either `\"EDGE\"`, `\"REGIONAL\"` or `\"PRIVATE\"`; see https://docs.aws.amazon.com/apigateway/latest/developerguide/create-regional-api.html" 77 | default = "EDGE" 78 | } 79 | 80 | variable "tags" { 81 | description = "AWS Tags to add to all resources created (where possible); see https://aws.amazon.com/answers/account-management/aws-tagging-strategies/" 82 | type = map(string) 83 | default = {} 84 | } 85 | 86 | variable "throttling_rate_limit" { 87 | description = "How many sustained requests per second should the API process at most; see https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-request-throttling.html" 88 | default = 10000 89 | } 90 | 91 | variable "throttling_burst_limit" { 92 | description = "How many burst requests should the API process at most; see https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-request-throttling.html" 93 | default = 5000 94 | } 95 | -------------------------------------------------------------------------------- /aws_ec2_ebs_docker_host/variables.tf: -------------------------------------------------------------------------------- 1 | # Whenever the contents of this block changes, the host should be re-provisioned 2 | locals { 3 | reprovision_trigger = <<-EOF 4 | # Trigger reprovision on variable changes: 5 | ${var.hostname} 6 | ${var.ssh_username} 7 | ${var.ssh_private_key_path} 8 | ${var.ssh_public_key_path} 9 | ${var.swap_file_size} 10 | ${var.swap_swappiness} 11 | ${var.reprovision_trigger} 12 | # Trigger reprovision on file changes: 13 | ${file("${path.module}/provision-docker.sh")} 14 | ${file("${path.module}/provision-ebs.sh")} 15 | ${file("${path.module}/provision-swap.sh")} 16 | EOF 17 | } 18 | 19 | locals { 20 | availability_zone = data.aws_availability_zones.this.names[0] # use the first available AZ in the region (AWS ensures this is constant per user) 21 | } 22 | 23 | variable "hostname" { 24 | description = "Hostname by which this service is identified in metrics, logs etc" 25 | default = "aws-ec2-ebs-docker-host" 26 | } 27 | 28 | variable "instance_type" { 29 | description = "See https://aws.amazon.com/ec2/instance-types/ for options; for example, typical values for small workloads are `\"t2.nano\"`, `\"t2.micro\"`, `\"t2.small\"`, `\"t2.medium\"`, and `\"t2.large\"`" 30 | default = "t2.micro" 31 | } 32 | 33 | variable "instance_ami" { 34 | description = "See https://cloud-images.ubuntu.com/locator/ec2/ for options" 35 | default = "ami-0c4c42893066a139e" # Ubuntu 20.04 LTS (eu-west-1, amd64, hvm:ebs-ssd, 2020-09-24) 36 | } 37 | 38 | variable "ssh_private_key_path" { 39 | description = "SSH private key file path, relative to Terraform project root" 40 | default = "ssh.private.key" 41 | } 42 | 43 | variable "ssh_public_key_path" { 44 | description = "SSH public key file path, relative to Terraform project root" 45 | default = "ssh.public.key" 46 | } 47 | 48 | variable "ssh_username" { 49 | description = "Default username built into the AMI (see 'instance_ami')" 50 | default = "ubuntu" 51 | } 52 | 53 | variable "vpc_id" { 54 | description = "ID of the VPC our host should join; if empty, joins your Default VPC" 55 | default = "" 56 | } 57 | 58 | variable "reprovision_trigger" { 59 | description = "An arbitrary string value; when this value changes, the host needs to be reprovisioned" 60 | default = "" 61 | } 62 | 63 | variable "root_volume_size" { 64 | description = "Size (in GiB) of the EBS volume that will be created and mounted as the root fs for the host" 65 | default = 8 # this matches the other defaults, including the selected AMI 66 | } 67 | 68 | variable "data_volume_id" { 69 | description = "The ID of the EBS volume to mount as `/data`" 70 | default = "" # empty string means no EBS volume will be attached 71 | } 72 | 73 | variable "swap_file_size" { 74 | description = "Size of the swap file allocated on the root volume" 75 | default = "512M" # a smallish default to match default 8 GiB EBS root volume 76 | } 77 | 78 | variable "swap_swappiness" { 79 | description = "Swappiness value provided when creating the swap file" 80 | default = "10" # 100 will make the host use the swap as much as possible, 0 will make it use only in case of emergency 81 | } 82 | 83 | variable "allow_incoming_http" { 84 | description = "Whether to allow incoming HTTP traffic on the host security group" 85 | default = false 86 | } 87 | 88 | variable "allow_incoming_https" { 89 | description = "Whether to allow incoming HTTPS traffic on the host security group" 90 | default = false 91 | } 92 | 93 | variable "allow_incoming_dns" { 94 | description = "Whether to allow incoming DNS traffic on the host security group" 95 | default = false 96 | } 97 | 98 | variable "tags" { 99 | description = "AWS Tags to add to all resources created (where possible); see https://aws.amazon.com/answers/account-management/aws-tagging-strategies/" 100 | type = map(string) 101 | default = {} 102 | } 103 | -------------------------------------------------------------------------------- /aws_lambda_cronjob/README.md: -------------------------------------------------------------------------------- 1 | # aws_lambda_cronjob 2 | 3 | This module creates a Lambda function, and configures it to be invoked on a schedule. 4 | 5 | ## Example 1: Simple cronjob 6 | 7 | First, write down some simple code to deploy in a file called `index.js`: 8 | 9 | ```js 10 | exports.handler = function(event, context, callback) { 11 | console.log("Lambda function event:", event); 12 | console.log("Lambda function context:", context); 13 | callback(null); 14 | }; 15 | ``` 16 | 17 | Assuming you have the [AWS provider](https://www.terraform.io/docs/providers/aws/index.html) set up: 18 | 19 | ```tf 20 | # Lambda functions can only be uploaded as ZIP files, so we need to package our JS file into one 21 | data "archive_file" "lambda_zip" { 22 | type = "zip" 23 | source_file = "${path.module}/index.js" 24 | output_path = "${path.module}/lambda.zip" 25 | } 26 | 27 | module "my_cronjob" { 28 | # Available inputs: https://github.com/futurice/terraform-utils/tree/master/aws_lambda_cronjob#inputs 29 | # Check for updates: https://github.com/futurice/terraform-utils/compare/v13.1...master 30 | source = "git::ssh://git@github.com/futurice/terraform-utils.git//aws_lambda_cronjob?ref=v13.1" 31 | 32 | schedule_expression = "rate(5 minutes)" # note: full cron expressions are also supported 33 | function_zipfile = data.archive_file.lambda_zip.output_path 34 | lambda_logging_enabled = true 35 | } 36 | ``` 37 | 38 | After `terraform apply`, because we included the `lambda_logging_enabled` option, you can log into CloudWatch and check out the properties Lambda makes available in the `event` and `context` properties. 39 | 40 | ## Example 2: Other options for deploying code 41 | 42 | As this module is a close relative of [`aws_lambda_api`](../aws_lambda_api), the other options for deploying code are equally applicable here. 43 | 44 | 45 | ## Inputs 46 | 47 | | Name | Description | Type | Default | Required | 48 | |------|-------------|------|---------|:--------:| 49 | | name_prefix | Name prefix to use for objects that need to be created (only lowercase alphanumeric characters and hyphens allowed, for S3 bucket name compatibility) | `string` | `""` | no | 50 | | comment_prefix | This will be included in comments for resources that are created | `string` | `"Lambda Cronjob: "` | no | 51 | | schedule_expression | How often to run the Lambda (see https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html); e.g. `"rate(15 minutes)"` or `"cron(0 12 * * ? *)"` | `string` | `"rate(60 minutes)"` | no | 52 | | function_zipfile | Path to a ZIP file that will be installed as the Lambda function (e.g. `"my-cronjob.zip"`) | `any` | n/a | yes | 53 | | function_s3_bucket | When provided, the zipfile is retrieved from an S3 bucket by this name instead (filename is still provided via `function_zipfile`) | `string` | `""` | no | 54 | | function_handler | Instructs Lambda on which function to invoke within the ZIP file | `string` | `"index.handler"` | no | 55 | | function_timeout | The amount of time your Lambda Function has to run in seconds | `number` | `3` | no | 56 | | memory_size | Amount of memory in MB your Lambda Function can use at runtime | `number` | `128` | no | 57 | | function_runtime | Which node.js version should Lambda use for this function | `string` | `"nodejs12.x"` | no | 58 | | function_env_vars | Which env vars (if any) to invoke the Lambda with | `map(string)` |
{
"aws_lambda_cronjob": ""
}
| no | 59 | | lambda_logging_enabled | When true, writes any console output to the Lambda function's CloudWatch group | `bool` | `false` | no | 60 | | tags | AWS Tags to add to all resources created (where possible); see https://aws.amazon.com/answers/account-management/aws-tagging-strategies/ | `map(string)` | `{}` | no | 61 | 62 | ## Outputs 63 | 64 | | Name | Description | 65 | |------|-------------| 66 | | function_name | This is the unique name of the Lambda function that was created | 67 | | function_role | The IAM role for the function created; can be used to attach additional policies/permissions | 68 | 69 | -------------------------------------------------------------------------------- /release.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" # dir of this script 4 | 5 | set -e # exit on error 6 | 7 | echo -n "Checking dependencies... " 8 | deps="git curl node terraform terraform-docs" 9 | if [ "$(echo $deps | tr ' ' '\n' | wc -l)" != "$(command -v $deps | wc -l)" ]; then 10 | echo -e "ERROR\n\nRequired commands not available: $deps" 11 | exit 1 12 | fi 13 | echo OK 14 | 15 | echo -n "Running terraform fmt... " 16 | for dir in $(echo */); do 17 | terraform fmt $dir 18 | done 19 | echo OK 20 | 21 | echo -n "Checking for clean working copy... " 22 | if [ "$(git diff-index HEAD)" != "" ]; then 23 | echo -e "ERROR\n\nThere's uncommitted changes in the working copy" 24 | exit 1 25 | fi 26 | echo OK 27 | 28 | echo -n "Parsing git remote... " 29 | github_raw="$(git config --get remote.origin.url | sed 's/.*://' | sed 's/\..*//')" # e.g. "git@github.com:user/project.git" => "user/project" 30 | github_user="$(echo "$github_raw" | cut -d / -f 1)" 31 | github_project="$(echo "$github_raw" | cut -d / -f 2)" 32 | if [[ ! "$github_user" =~ ^[[:alnum:]-]+$ ]]; then 33 | echo -e "ERROR\n\nCan't seem to determine GitHub user name reliably: \"$github_user\"" 34 | exit 1 35 | fi 36 | if [[ ! "$github_project" =~ ^[[:alnum:]-]+$ ]]; then 37 | echo -e "ERROR\n\nCan't seem to determine GitHub project name reliably: \"$github_project\"" 38 | exit 1 39 | fi 40 | echo OK 41 | 42 | echo -n "Verifying GitHub API access... " 43 | github_test="$(curl -s -n -o /dev/null -w "%{http_code}" https://api.github.com/user)" 44 | if [ "$github_test" != "200" ]; then 45 | echo -e "ERROR\n\nPlease ensure that:" 46 | echo "* You've set up a Personal access token for the GitHub API (https://github.com/settings/tokens/new)" 47 | echo "* The resulting token is listed in your ~/.netrc file (under \"machine api.github.com\" and \"machine uploads.github.com\")" 48 | exit 1 49 | fi 50 | echo OK 51 | 52 | echo -n "Fetching previous tags from GitHub... " 53 | git fetch --tags --quiet 54 | tag_prev="$(git log --tags --simplify-by-decoration --pretty="%D" | grep tag: | cut -d ' ' -f 2 | head -n 1)" # figure out the latest tag (https://stackoverflow.com/a/6900369) 55 | echo OK 56 | 57 | echo 58 | echo "Previous release was: $tag_prev" 59 | echo -n "This release will be: " 60 | read tag_next 61 | echo 62 | 63 | echo -n "Tagging new release... " 64 | git tag "$tag_next" 65 | echo OK 66 | 67 | echo -n "Pushing release to GitHub... " 68 | git push --quiet origin master 69 | git push --quiet origin "$tag_next" 70 | echo OK 71 | 72 | echo -n "Creating release on GitHub... " # https://developer.github.com/v3/repos/releases/ 73 | curl -o curl-out -s -n -X POST "https://api.github.com/repos/$github_user/$github_project/releases" --data "{\"tag_name\":\"$tag_next\"}" 74 | release_html_url="$(cat curl-out | node -p 'JSON.parse(fs.readFileSync(0)).html_url')" 75 | if [[ ! "$release_html_url" =~ ^https:// ]]; then 76 | echo ERROR 77 | cat curl-out 78 | exit 1 79 | fi 80 | echo OK 81 | 82 | echo -n "Updating example code with new release... " 83 | find "$DIR"/* -name README.md | xargs sed -i'.sed-bak' -E "s/\?ref=v[0-9.]+\"/?ref=$tag_next\"/g" # update all "source" links in examples 84 | find "$DIR"/* -name README.md | xargs sed -i'.sed-bak' -E "s#/compare/v[0-9.]+...master#/compare/$tag_next...master#g" # update all "check for updates" links in examples 85 | echo OK 86 | 87 | echo -n "Updating Terraform module docs... " 88 | for file in $(find $DIR/* -name README.md); do 89 | perl -i -p0e "s/terraform-docs:begin.*?terraform-docs:end/terraform-docs:begin -->\n$( terraform-docs markdown --hide providers --hide requirements --sort=false $(dirname $file) | sed 's#/#\\/#g' | sed 's#u0026#\&#g' )\n<\!-- terraform-docs:end/s" "$file" 90 | done 91 | echo OK 92 | 93 | echo -n "Creating commit from docs updates... " 94 | git add */README.md 95 | git commit --quiet --message "Update docs & examples for $tag_next." 96 | echo OK 97 | 98 | echo -n "Pushing updated docs to GitHub... " 99 | git push --quiet origin 100 | echo OK 101 | 102 | echo -n "Cleaning up... " 103 | rm -f curl-out # remove curl temp file 104 | find . -name README.md.sed-bak | xargs rm -f # remove sed's backup files 105 | echo OK 106 | 107 | echo 108 | echo "New release is: $release_html_url" 109 | echo 110 | -------------------------------------------------------------------------------- /docker_compose_host/README.md: -------------------------------------------------------------------------------- 1 | # docker_compose_host 2 | 3 | Provisions an existing host to run services defined in a `docker-compose.yml` file. 4 | 5 | This is a convenient companion to [`aws_ec2_ebs_docker_host`](https://github.com/futurice/terraform-utils/tree/master/aws_ec2_ebs_docker_host), though any Debian-like host reachable over SSH should work. 6 | 7 | Changing the contents of your `docker-compose.yml` file (or any other variables defined for this module) will trigger re-creation of the containers on the next `terraform apply`. 8 | 9 | ## Example 10 | 11 | Assuming you have the [AWS provider](https://www.terraform.io/docs/providers/aws/index.html) set up: 12 | 13 | ```tf 14 | module "my_host" { 15 | # Available inputs: https://github.com/futurice/terraform-utils/tree/master/docker_compose_host#inputs 16 | # Check for updates: https://github.com/futurice/terraform-utils/compare/v13.1...master 17 | source = "git::ssh://git@github.com/futurice/terraform-utils.git//aws_ec2_ebs_docker_host?ref=v13.1" 18 | 19 | hostname = "my-docker-host" 20 | ssh_private_key_path = "~/.ssh/id_rsa" 21 | ssh_public_key_path = "~/.ssh/id_rsa.pub" 22 | allow_incoming_http = true 23 | } 24 | 25 | module "my_docker_compose" { 26 | # Available inputs: https://github.com/futurice/terraform-utils/tree/master/docker_compose_host#inputs 27 | # Check for updates: https://github.com/futurice/terraform-utils/compare/v13.1...master 28 | source = "git::ssh://git@github.com/futurice/terraform-utils.git//docker_compose_host?ref=v13.1" 29 | 30 | public_ip = module.my_host.public_ip 31 | ssh_username = module.my_host.ssh_username 32 | ssh_private_key = module.my_host.ssh_private_key 33 | docker_compose_yml = file("./docker-compose.yml") 34 | } 35 | 36 | output "test_link" { 37 | value = "http://${module.my_host.public_ip}/" 38 | } 39 | ``` 40 | 41 | In your `docker-compose.yml` file, try: 42 | 43 | ```yml 44 | version: "3" 45 | services: 46 | nginx: 47 | image: nginx 48 | ports: 49 | - "80:80" 50 | ``` 51 | 52 | After a `terraform apply`, you should be able to visit the `test_link` and see nginx greeting you. 53 | 54 | Any changes to the compose file trigger re-provisioning of the services. For example, try changing your services to: 55 | 56 | ```yml 57 | version: "3" 58 | services: 59 | whoami: 60 | image: jwilder/whoami 61 | ports: 62 | - "80:8000" 63 | ``` 64 | 65 | When running `terraform apply`, the previous `nginx` service will be stopped and removed, and then the new `whoami` service will be started in its stead. Visiting the `test_link` URL again should give you a different result now. 66 | 67 | 68 | ## Inputs 69 | 70 | | Name | Description | Type | Default | Required | 71 | |------|-------------|------|---------|:--------:| 72 | | public_ip | Public IP address of a host running docker | `any` | n/a | yes | 73 | | ssh_username | SSH username, which can be used for provisioning the host | `string` | `"ubuntu"` | no | 74 | | ssh_private_key | SSH private key, which can be used for provisioning the host | `any` | n/a | yes | 75 | | docker_compose_version | Version of docker-compose to install during provisioning (see https://github.com/docker/compose/releases) | `string` | `"1.23.2"` | no | 76 | | docker_compose_env | Env-vars (in `.env` file syntax) that will be substituted into docker-compose.yml (see https://docs.docker.com/compose/environment-variables/#the-env-file) | `string` | `"# No env-vars set"` | no | 77 | | docker_compose_yml | Contents for the `docker-compose.yml` file | `any` | n/a | yes | 78 | | docker_compose_override_yml | Contents for the `docker-compose.override.yml` file (see https://docs.docker.com/compose/extends/#multiple-compose-files) | `string` | `""` | no | 79 | | docker_compose_up_command | Command to start services with; you can customize this to do work before/after, or to disable this completely in favor of your own provisioning scripts | `string` | `"docker-compose pull --quiet && docker-compose up -d"` | no | 80 | | docker_compose_down_command | Command to remove services with; will be run during un- or re-provisioning | `string` | `"docker-compose stop && docker-compose rm -f"` | no | 81 | 82 | ## Outputs 83 | 84 | | Name | Description | 85 | |------|-------------| 86 | | reprovision_trigger | Stringified version of all docker-compose configuration used for this host; can be used as the `reprovision_trigger` input to an `aws_ec2_ebs_docker_host` module | 87 | 88 | -------------------------------------------------------------------------------- /aws_domain_redirect/README.md: -------------------------------------------------------------------------------- 1 | # aws_domain_redirect 2 | 3 | This module implements a domain that redirects clients to another URL. Useful for creating human-friendly shortcuts for deeper links into a site, or for dynamic links (e.g. `download.example.com` always pointing to your latest release). 4 | 5 | Main features: 6 | 7 | - DNS entries are created automatically 8 | - HTTPS enabled by default 9 | - HTTP Strict Transport Security supported 10 | 11 | Optional features: 12 | 13 | - Plain HTTP instead of HTTPS 14 | - Sending a permanent redirect (`301 Moved Permanently`) instead of default (`302 Found`) 15 | 16 | Resources used: 17 | 18 | - Route53 for DNS entries 19 | - ACM for SSL certificates 20 | - CloudFront for proxying requests 21 | - Lambda@Edge for transforming requests 22 | - IAM for permissions 23 | 24 | ## About CloudFront operations 25 | 26 | This module manages CloudFront distributions, and these operations are generally very slow. Your `terraform apply` may take anywhere from a few minutes **up to 45 minutes** (if you're really unlucky). Be patient: if they start successfully, they almost always finish successfully, it just takes a while. 27 | 28 | Additionally, this module uses Lambda@Edge functions with CloudFront. Because Lambda@Edge functions are replicated, [they can't be deleted immediately](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/lambda-edge-delete-replicas.html). This means a `terraform destroy` won't successfully remove all resources on its first run. It should complete successfully when running it again after a few hours, however. 29 | 30 | ## Example 31 | 32 | Assuming you have the [AWS provider](https://www.terraform.io/docs/providers/aws/index.html) set up, and a DNS zone for `example.com` configured on Route 53: 33 | 34 | ```tf 35 | # Lambda@Edge and ACM, when used with CloudFront, need to be used in the US East region. 36 | # Thus, we need a separate AWS provider for that region, which can be used with an alias. 37 | # Make sure you customize this block to match your regular AWS provider configuration. 38 | # https://www.terraform.io/docs/configuration/providers.html#multiple-provider-instances 39 | provider "aws" { 40 | alias = "us_east_1" 41 | region = "us-east-1" 42 | } 43 | 44 | module "my_redirect" { 45 | # Available inputs: https://github.com/futurice/terraform-utils/tree/master/aws_domain_redirect#inputs 46 | # Check for updates: https://github.com/futurice/terraform-utils/compare/v13.1...master 47 | source = "git::ssh://git@github.com/futurice/terraform-utils.git//aws_domain_redirect?ref=v13.1" 48 | providers = { aws.us_east_1 = aws.us_east_1 } # this alias is needed because ACM is only available in the "us-east-1" region 49 | 50 | redirect_domain = "go.example.com" 51 | redirect_url = "https://www.futurice.com/careers/" 52 | } 53 | ``` 54 | 55 | Applying this will take a long time, because both ACM and especially CloudFront are quite slow to update. After that, both `http://go.example.com` and `https://go.example.com` should redirect clients to `https://www.futurice.com/careers/`. 56 | 57 | 58 | ## Inputs 59 | 60 | | Name | Description | Type | Default | Required | 61 | |------|-------------|------|---------|:--------:| 62 | | redirect_domain | Domain which will redirect to the given `redirect_url`; e.g. `"docs.example.com"` | `any` | n/a | yes | 63 | | redirect_url | The URL this domain redirect should send clients to; e.g. `"https://readthedocs.org/projects/example"` | `any` | n/a | yes | 64 | | name_prefix | Name prefix to use for objects that need to be created (only lowercase alphanumeric characters and hyphens allowed, for S3 bucket name compatibility) | `string` | `""` | no | 65 | | comment_prefix | This will be included in comments for resources that are created | `string` | `"Domain redirect: "` | no | 66 | | cloudfront_price_class | Price class to use (`100`, `200` or `"All"`, see https://aws.amazon.com/cloudfront/pricing/) | `number` | `100` | no | 67 | | viewer_https_only | Set this to `false` if you need to support insecure HTTP access for clients, in addition to HTTPS | `bool` | `true` | no | 68 | | redirect_permanently | Which HTTP status code to use for the redirect; if `true`, uses `301 Moved Permanently`, instead of `302 Found` | `bool` | `false` | no | 69 | | hsts_max_age | How long should `Strict-Transport-Security` remain in effect for the site; disabled automatically when `viewer_https_only = false` | `number` | `31557600` | no | 70 | | lambda_logging_enabled | When `true`, writes information about incoming requests to the Lambda function's CloudWatch group | `bool` | `false` | no | 71 | | tags | AWS Tags to add to all resources created (where possible); see https://aws.amazon.com/answers/account-management/aws-tagging-strategies/ | `map(string)` | `{}` | no | 72 | 73 | ## Outputs 74 | 75 | | Name | Description | 76 | |------|-------------| 77 | | reverse_proxy | CloudFront-based reverse-proxy that's used for implementing the redirect | 78 | 79 | -------------------------------------------------------------------------------- /aws_reverse_proxy/lambda.tpl.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | // Lambda@Edge doesn't support environment variables, so this config will be expanded from a Terraform template 4 | const config = ${config}; 5 | const addResponseHeaders = ${add_response_headers}; 6 | const validCredentials = config.basic_auth_username + ':' + config.basic_auth_password; 7 | const validAuthHeader = 'Basic ' + Buffer.from(validCredentials).toString('base64'); 8 | const hstsHeaders = 9 | config.hsts_max_age && config.viewer_https_only 10 | ? { 'Strict-Transport-Security': 'max-age=' + config.hsts_max_age + '; preload' } 11 | : {}; // don't send HSTS headers when vanilla HTTP is allowed, as that'll make the vanilla HTTP site unavailable for anyone having visited the HTTPS one! 12 | 13 | log('aws_reverse_proxy.config', { config, addResponseHeaders }); 14 | 15 | // Handle incoming request from the client 16 | exports.viewer_request = (event, context, callback) => { 17 | const request = event.Records[0].cf.request; 18 | const headers = request.headers; 19 | 20 | log('aws_reverse_proxy.viewer_request.before', request); 21 | 22 | if (config.override_response_code && config.override_response_status && config.override_response_body) { 23 | const response = { 24 | status: config.override_response_code, 25 | statusDescription: config.override_response_status, 26 | body: config.override_response_body, 27 | headers: { 28 | ...formatHeaders(hstsHeaders), 29 | ...formatHeaders(addResponseHeaders), 30 | }, 31 | }; 32 | callback(null, response); // reply to the client with the overridden content, and don't forward request to origin at all 33 | log('aws_reverse_proxy.viewer_request.after', response); 34 | } else if ( 35 | (config.basic_auth_username || config.basic_auth_password) && 36 | (typeof headers.authorization == 'undefined' || headers.authorization[0].value != validAuthHeader) && 37 | !(request.method === 'OPTIONS' && getHeader(headers, 'access-control-request-method')) // as per the CORS spec, pre-flight requests are sent without credentials (https://stackoverflow.com/a/15734032) -> always allow them 38 | ) { 39 | const response = { 40 | status: '401', 41 | statusDescription: 'Unauthorized', 42 | body: config.basic_auth_body, 43 | headers: { 44 | ...formatHeaders(hstsHeaders), 45 | ...formatHeaders(addResponseHeaders), 46 | ...formatHeaders({ 47 | 'WWW-Authenticate': 'Basic realm="' + config.basic_auth_realm + '", charset="UTF-8"', 48 | }), 49 | }, 50 | }; 51 | callback(null, response); // reply to the client with Unauthorized, and don't forward request to origin 52 | log('aws_reverse_proxy.viewer_request.after', response); 53 | } else { 54 | callback(null, request); // allow the request to be forwarded to origin normally 55 | log('aws_reverse_proxy.viewer_request.after', 'OK'); 56 | } 57 | }; 58 | 59 | // Handle outgoing response to the client 60 | exports.origin_response = (event, context, callback) => { 61 | const response = event.Records[0].cf.response; 62 | 63 | log('aws_reverse_proxy.origin_response.before', response); 64 | 65 | // Add any additional headers: 66 | response.headers = { 67 | ...formatHeaders(hstsHeaders), 68 | ...response.headers, 69 | ...formatHeaders(addResponseHeaders), 70 | }; 71 | 72 | // Remove headers that have an override value of "" completely: 73 | Object.keys(addResponseHeaders).forEach(header => { 74 | if (!addResponseHeaders[header]) delete response.headers[header.toLowerCase()]; 75 | }); 76 | 77 | // Override status code (if so configured): 78 | if (!config.override_only_on_code || new RegExp(config.override_only_on_code).test(response.status)) { 79 | response.status = config.override_response_code || response.status; 80 | response.statusDescription = config.override_response_status || response.statusDescription; 81 | } 82 | 83 | log('aws_reverse_proxy.origin_response.after', response); 84 | 85 | callback(null, response); 86 | }; 87 | 88 | // Outputs incoming/outgoing requests for debugging 89 | function log(label, meta) { 90 | console.log(label, require('util').inspect(meta, false, 10, false)); 91 | } 92 | 93 | // Converts a set of headers into the rather-verbose format CloudFront expects; headers with "" as the value are dropped 94 | function formatHeaders(headers) { 95 | return Object.keys(headers) 96 | .filter(next => headers[next] !== '') 97 | .reduce( 98 | (memo, next) => 99 | Object.assign(memo, { 100 | [next.toLowerCase()]: [{ key: next, value: headers[next] }], 101 | }), 102 | {}, 103 | ); 104 | } 105 | 106 | // Reads a header value (very safely) from the CloudFront-formatted headers object 107 | function getHeader(headers, key) { 108 | return headers[key] && headers[key][0] && headers[key][0].value || ''; 109 | } 110 | -------------------------------------------------------------------------------- /aws_reverse_proxy/lambda.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | config = { 3 | hsts_max_age = var.hsts_max_age 4 | basic_auth_username = var.basic_auth_username 5 | basic_auth_password = var.basic_auth_password 6 | basic_auth_realm = var.basic_auth_realm 7 | basic_auth_body = var.basic_auth_body 8 | override_response_code = var.override_response_code 9 | override_response_status = var.override_response_status 10 | override_response_body = var.override_response_body 11 | override_only_on_code = var.override_only_on_code 12 | } 13 | } 14 | 15 | # Lambda@Edge functions don't support environment variables, so let's inline the relevant parts of the config to the JS file. 16 | # (see: "error creating CloudFront Distribution: InvalidLambdaFunctionAssociation: The function cannot have environment variables") 17 | data "template_file" "lambda" { 18 | template = file("${path.module}/lambda.tpl.js") 19 | 20 | vars = { 21 | config = jsonencode(local.config) # single quotes need to be escaped, lest we end up with a parse error on the JS side 22 | add_response_headers = jsonencode(var.add_response_headers) # ^ ditto 23 | } 24 | } 25 | 26 | # This is to ensure there's a unique component to the filename, so that invoking the module multiple times doesn't interfere with other instances 27 | resource "random_string" "zipfile_name" { 28 | length = 32 29 | special = false 30 | } 31 | 32 | # Lambda functions can only be uploaded as ZIP files, so we need to package our JS file into one 33 | data "archive_file" "lambda_zip" { 34 | type = "zip" 35 | output_path = "${path.module}/lambda-${random_string.zipfile_name.result}.zip" 36 | 37 | source { 38 | filename = "lambda.js" 39 | content = data.template_file.lambda.rendered 40 | } 41 | } 42 | 43 | # This resource doesn't actually do anything (as is (kind of) the case with null_resource's anyway). 44 | # It merely exists to make Terraform plans more informative: because the Lambda@Edge config is baked 45 | # into the JS template, normally you would just see the opaque source_code_hash changing in the plan. 46 | # With this, you'll actually see which config/header is being changed. 47 | resource "null_resource" "cloudfront_lambda_at_edge" { 48 | triggers = merge( 49 | local.config, 50 | { add_response_headers = jsonencode(var.add_response_headers) } 51 | ) 52 | } 53 | 54 | resource "aws_lambda_function" "viewer_request" { 55 | provider = aws.us_east_1 # This alias is needed because Lambda@Edge is only available in the "us-east-1" region 56 | 57 | filename = data.archive_file.lambda_zip.output_path 58 | source_code_hash = data.archive_file.lambda_zip.output_base64sha256 59 | function_name = "${local.name_prefix}-request-handler" 60 | role = aws_iam_role.this.arn 61 | description = "${var.comment_prefix}${var.site_domain} (request handler)" 62 | handler = "lambda.viewer_request" 63 | runtime = "nodejs12.x" 64 | publish = true # because: error creating CloudFront Distribution: InvalidLambdaFunctionAssociation: The function ARN must reference a specific function version. (The ARN must end with the version number.) 65 | tags = var.tags 66 | } 67 | 68 | resource "aws_lambda_function" "origin_response" { 69 | provider = aws.us_east_1 # This alias is needed because Lambda@Edge is only available in the "us-east-1" region 70 | 71 | filename = data.archive_file.lambda_zip.output_path 72 | source_code_hash = data.archive_file.lambda_zip.output_base64sha256 73 | function_name = "${local.name_prefix}-response-handler" 74 | role = aws_iam_role.this.arn 75 | description = "${var.comment_prefix}${var.site_domain} (response handler)" 76 | handler = "lambda.origin_response" 77 | runtime = "nodejs12.x" 78 | publish = true # because: error creating CloudFront Distribution: InvalidLambdaFunctionAssociation: The function ARN must reference a specific function version. (The ARN must end with the version number.) 79 | tags = var.tags 80 | } 81 | 82 | # Allow Lambda@Edge to invoke our functions 83 | resource "aws_iam_role" "this" { 84 | name = local.name_prefix 85 | tags = var.tags 86 | 87 | assume_role_policy = <= 0 ? var.cache_ttl_override : 0 122 | } 123 | 124 | # Because CloudFront origins expect the URL to be provided as components, we need to do a bit of URL "parsing" 125 | locals { 126 | url_protocol = replace(var.origin_url, "/^(?:(\\w+):\\/\\/).*/", "$1") 127 | url_hostname = replace(var.origin_url, "/^(?:\\w+:\\/\\/)?([^/]+).*/", "$1") 128 | url_path = replace( 129 | var.origin_url, 130 | "/^(?:\\w+:\\/\\/)?[^/]+(?:\\/(.*)|$)/", 131 | "$1", 132 | ) 133 | } 134 | -------------------------------------------------------------------------------- /aws_reverse_proxy/cloudfront.tf: -------------------------------------------------------------------------------- 1 | # Create the CloudFront distribution through which the site contents will be served 2 | # https://www.terraform.io/docs/providers/aws/r/cloudfront_distribution.html 3 | resource "aws_cloudfront_distribution" "this" { 4 | enabled = true 5 | is_ipv6_enabled = true 6 | default_root_object = var.default_root_object 7 | aliases = [var.site_domain] 8 | price_class = "PriceClass_${var.cloudfront_price_class}" 9 | comment = "${var.comment_prefix}${var.site_domain}" 10 | tags = var.tags 11 | 12 | # Define the "upstream" for the CloudFront distribution 13 | origin { 14 | domain_name = local.url_hostname 15 | origin_id = "default" 16 | origin_path = local.url_path == "" ? "" : "/${local.url_path}" 17 | 18 | custom_origin_config { 19 | http_port = var.origin_custom_port > 0 ? var.origin_custom_port : 80 20 | https_port = var.origin_custom_port > 0 ? var.origin_custom_port : 443 21 | origin_protocol_policy = "${local.url_protocol}-only" 22 | origin_ssl_protocols = ["TLSv1", "TLSv1.1", "TLSv1.2"] 23 | } 24 | 25 | custom_header { 26 | name = var.origin_custom_header_name 27 | value = var.origin_custom_header_value 28 | } 29 | } 30 | 31 | # Define how to serve the content to clients 32 | default_cache_behavior { 33 | allowed_methods = ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"] 34 | cached_methods = ["GET", "HEAD"] 35 | target_origin_id = "default" 36 | viewer_protocol_policy = var.viewer_https_only ? "redirect-to-https" : "allow-all" 37 | compress = true 38 | 39 | min_ttl = var.cache_ttl_override >= 0 ? var.cache_ttl_override : 0 # for reference: AWS default is 0 40 | default_ttl = var.cache_ttl_override >= 0 ? var.cache_ttl_override : 0 # for reference: AWS default is 86400 (i.e. one day) 41 | max_ttl = var.cache_ttl_override >= 0 ? var.cache_ttl_override : 86400 # i.e. 1 day; for reference: AWS default is 31536000 (i.e. one year) 42 | 43 | forwarded_values { 44 | query_string = true 45 | 46 | cookies { 47 | forward = "all" 48 | } 49 | } 50 | 51 | # Note: This will make the Lambda undeletable, as long as this distribution/association exists 52 | # https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/lambda-edge-delete-replicas.html 53 | lambda_function_association { 54 | event_type = "viewer-request" # one of [ viewer-request, origin-request, viewer-response, origin-response ] 55 | lambda_arn = "${aws_lambda_function.viewer_request.arn}:${aws_lambda_function.viewer_request.version}" 56 | } 57 | 58 | # Note: This will make the Lambda undeletable, as long as this distribution/association exists 59 | # https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/lambda-edge-delete-replicas.html 60 | lambda_function_association { 61 | event_type = "origin-response" # one of [ viewer-request, origin-request, viewer-response, origin-response ] 62 | lambda_arn = "${aws_lambda_function.origin_response.arn}:${aws_lambda_function.origin_response.version}" 63 | } 64 | } 65 | 66 | # This (and other custom_error_response's below) are important, because otherwise CloudFront defaults to caching errors for 5 minutes. 67 | # This means that if you accidentally deploy broken code, your users will be stuck seeing the error regardless of how quickly you roll back. 68 | # Unless a "cache_ttl_override" is provided, we never cache errors. 69 | # https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/custom-error-pages-expiration.html 70 | custom_error_response { 71 | error_code = 400 # == "Bad Request" 72 | error_caching_min_ttl = local.error_ttl 73 | } 74 | 75 | custom_error_response { 76 | error_code = 403 # == "Forbidden" 77 | error_caching_min_ttl = local.error_ttl 78 | } 79 | 80 | custom_error_response { 81 | error_code = 404 # == "Not Found" 82 | error_caching_min_ttl = local.error_ttl 83 | } 84 | 85 | custom_error_response { 86 | error_code = 405 # == "Method Not Allowed" 87 | error_caching_min_ttl = local.error_ttl 88 | } 89 | 90 | custom_error_response { 91 | error_code = 414 # == "Request-URI Too Long" 92 | error_caching_min_ttl = local.error_ttl 93 | } 94 | 95 | custom_error_response { 96 | error_code = 416 # == "Requested Range Not Satisfiable" 97 | error_caching_min_ttl = local.error_ttl 98 | } 99 | 100 | custom_error_response { 101 | error_code = 500 # == "Internal Server Error" 102 | error_caching_min_ttl = local.error_ttl 103 | } 104 | 105 | custom_error_response { 106 | error_code = 501 # == "Not Implemented" 107 | error_caching_min_ttl = local.error_ttl 108 | } 109 | 110 | custom_error_response { 111 | error_code = 502 # == "Bad Gateway" 112 | error_caching_min_ttl = local.error_ttl 113 | } 114 | 115 | custom_error_response { 116 | error_code = 503 # == "Service Unavailable" 117 | error_caching_min_ttl = local.error_ttl 118 | } 119 | 120 | custom_error_response { 121 | error_code = 504 # == "Gateway Timeout" 122 | error_caching_min_ttl = local.error_ttl 123 | } 124 | 125 | # This is mandatory in Terraform :shrug: 126 | restrictions { 127 | geo_restriction { 128 | restriction_type = "none" 129 | } 130 | } 131 | 132 | # Attach our auto-generated ACM certificate to the distribution 133 | # https://www.terraform.io/docs/providers/aws/r/cloudfront_distribution.html#viewer-certificate-arguments 134 | viewer_certificate { 135 | acm_certificate_arn = aws_acm_certificate_validation.this.certificate_arn 136 | ssl_support_method = "sni-only" 137 | minimum_protocol_version = "TLSv1.1_2016" 138 | } 139 | } 140 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Terraform Utils 2 | 3 | This repository contains reusable [Terraform](https://www.terraform.io/) utility modules, which are liberally licensed, and can be shared between projects. 4 | 5 | ## Design decisions 6 | 7 | 1. Each module should be thoroughly documented with a README - no source code dumps 8 | 1. Each module should have easy to use examples - for delicious copy-pasta 9 | 1. Modules need not offer infinite flexibility, but do one thing well - users can always make their own module using ours as a baseline 10 | 11 | ## Naming conventions 12 | 13 | ### Short version 14 | 15 | For each module in this repository, you can either: 16 | 17 | - Provide a `name_prefix` as input, and all resources created by that module will have names starting with that prefix, or 18 | - Not provide `name_prefix`, and all resource names will have a unique, generated prefix, guaranteed to not conflict with other resource names 19 | 20 | ### Longer version 21 | 22 | When creating resources on AWS, it's common to follow a hierarchical naming convention such as the following: 23 | 24 | 1. All resources related to your app have names starting with `my-app` 25 | 1. Then the environment (e.g. `dev` or `prod`) 26 | 1. Then the component/tier (e.g. `frontend` or `backend`) 27 | 1. Then a possible sub-component (e.g. `api` or `worker` for backend) 28 | 1. Then a possible name for the individual resource (e.g. `logs` or `content`) 29 | 30 | If you model your Terraform module structure in the same fashion, you might end up with something like this: 31 | 32 | ``` 33 | my-app 34 | ├── dev 35 | │   ├── backend 36 | │   │   ├── api 37 | │   │   └── worker 38 | │   │   └── logs 39 | │   └── frontend 40 | │   └── content 41 | └── prod 42 | ├── backend 43 | │   ├── api 44 | │   └── worker 45 | │   └── logs 46 | └── frontend 47 | └── content 48 | ``` 49 | 50 | And thus resource names like: 51 | 52 | ``` 53 | my-app-dev-backend 54 | my-app-dev-backend-api 55 | my-app-dev-backend-worker 56 | my-app-dev-backend-worker-logs 57 | my-app-dev-frontend 58 | my-app-dev-frontend-content 59 | my-app-prod-backend 60 | my-app-prod-backend-api 61 | my-app-prod-backend-worker 62 | my-app-prod-backend-worker-logs 63 | my-app-prod-frontend 64 | my-app-prod-frontend-content 65 | ``` 66 | 67 | An elegant way to implement this is to have each module take an input called `name_prefix`, and pass it along to its child modules. That is: 68 | 69 | 1. In your root module, set `name_prefix` to a default value: 70 | ``` 71 | variable "name_prefix" { 72 | default = "my-app" 73 | } 74 | ``` 75 | 1. When you instantiate your main module for different environments, you pass along `name_prefix` with the appropriate suffix: 76 | 77 | ``` 78 | module "dev" { 79 | name_prefix = "${var.name_prefix}-dev" 80 | } 81 | 82 | module "prod" { 83 | name_prefix = "${var.name_prefix}-prod" 84 | } 85 | ``` 86 | 87 | 1. Within that module, when you instantiate modules for backend & frontend, you again pass along `name_prefix`: 88 | ``` 89 | module "backend" { 90 | name_prefix = "${var.name_prefix}-backend" 91 | } 92 | ``` 93 | 1. And so on, for each level of the module hierarchy 94 | 1. On any level, when creating resources, you do so using the same prefix, for example creating an S3 bucket: 95 | ``` 96 | resource "aws_s3_bucket" "content" { 97 | bucket = "${var.name_prefix}-content" 98 | } 99 | ``` 100 | 101 | Thus, each module gets a dedicated namespace that's: 102 | 103 | - guaranteed to not conflict with resources from other modules 104 | - not tied to the top level namespace, facilitating reuse 105 | - easy to identify on the AWS web console as belonging to a specific env/component/etc 106 | - convenient for use with IAM permissions (e.g. granting dev env backend access to `my-app-dev-backend-*`, thus excluding the frontend component, and the production environment entirely) 107 | 108 | **All modules within this repository follow this convention**, taking a `name_prefix`, and passing it along to their child modules (if any). 109 | 110 | If you don't want to follow this convention, you can simply omit the `name_prefix` input. In that case, a unique name prefix is generated automatically (`"aws-static-site-2rdc7iqm"` for the `aws_static_site` module, for example), thus ensuring your resource names won't clash with those of others. 111 | 112 | ## Caveats 113 | 114 | - At the time of writing, [support for the `profile` property of the AWS provider is still... wonky](https://github.com/terraform-providers/terraform-provider-aws/issues/233), especially in cases where the provider needs to be aliased. Configuring your AWS provider via the standard `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables is recommended wholeheartedly. 115 | 116 | ## Versioning policy 117 | 118 | 1. New versions are released often, so users can pin their modules (using `master` as a `source` for Terraform modules is a terrible idea) 119 | 1. Bump major version when either new modules are released, or existing modules get backwards-incompatible changes 120 | 1. Bump minor version otherwise 121 | 122 | ## Additional resources 123 | 124 | In addition to the modules here, there's a lot of useful ones in the wild. For example: 125 | 126 | - https://registry.terraform.io/ - lots of solutions to common problems, some verified by Hashicorp themselves 127 | - https://github.com/cloudposse - look for repos starting with `terraform-` for lots of good building blocks 128 | 129 | ## Release process 130 | 131 | Please use the included release script. For example: 132 | 133 | ``` 134 | $ ./release.sh 135 | Checking dependencies... OK 136 | Running terraform fmt... OK 137 | Checking for clean working copy... OK 138 | Parsing git remote... OK 139 | Verifying GitHub API access... OK 140 | Fetching previous tags from GitHub... OK 141 | 142 | Previous release was: v9.1 143 | This release will be: v9.2 144 | 145 | Tagging new release... OK 146 | Pushing release to GitHub... OK 147 | Creating release on GitHub... OK 148 | Updating example code with new release... OK 149 | Updating Terraform module docs... OK 150 | Creating commit from docs updates... OK 151 | Pushing updated docs to GitHub... OK 152 | Cleaning up... OK 153 | 154 | New release is: https://github.com/futurice/terraform-utils/releases/tag/v9.2 155 | 156 | ``` 157 | 158 | ## License 159 | 160 | MIT 161 | -------------------------------------------------------------------------------- /aws_lambda_api/example-project/package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "requires": true, 3 | "lockfileVersion": 1, 4 | "dependencies": { 5 | "@types/aws-lambda": { 6 | "version": "8.10.51", 7 | "resolved": "https://registry.npmjs.org/@types/aws-lambda/-/aws-lambda-8.10.51.tgz", 8 | "integrity": "sha512-XK7RerpXj4r+IO0r7qIeNqUSU6L4qhPMwNhISxozJJiUX/jdXj9WYzTShRVisEcUQHXgJ4TTBqTArM8f9Mjb8g==", 9 | "dev": true 10 | }, 11 | "@types/node": { 12 | "version": "8.10.60", 13 | "resolved": "https://registry.npmjs.org/@types/node/-/node-8.10.60.tgz", 14 | "integrity": "sha512-YjPbypHFuiOV0bTgeF07HpEEqhmHaZqYNSdCKeBJa+yFoQ/7BC+FpJcwmi34xUIIRVFktnUyP1dPU8U0612GOg==", 15 | "dev": true 16 | }, 17 | "ansi-styles": { 18 | "version": "3.2.1", 19 | "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", 20 | "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", 21 | "dev": true, 22 | "requires": { 23 | "color-convert": "^1.9.0" 24 | } 25 | }, 26 | "chalk": { 27 | "version": "2.4.2", 28 | "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", 29 | "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", 30 | "dev": true, 31 | "requires": { 32 | "ansi-styles": "^3.2.1", 33 | "escape-string-regexp": "^1.0.5", 34 | "supports-color": "^5.3.0" 35 | } 36 | }, 37 | "check-node-version": { 38 | "version": "3.3.0", 39 | "resolved": "https://registry.npmjs.org/check-node-version/-/check-node-version-3.3.0.tgz", 40 | "integrity": "sha512-OAtp7prQf+8YYKn2UB/fK1Ppb9OT+apW56atoKYUvucYLPq69VozOY0B295okBwCKymk2cictrS3qsdcZwyfzw==", 41 | "dev": true, 42 | "requires": { 43 | "chalk": "^2.3.0", 44 | "map-values": "^1.0.1", 45 | "minimist": "^1.2.0", 46 | "object-filter": "^1.0.2", 47 | "object.assign": "^4.0.4", 48 | "run-parallel": "^1.1.4", 49 | "semver": "^5.0.3" 50 | } 51 | }, 52 | "color-convert": { 53 | "version": "1.9.3", 54 | "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", 55 | "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", 56 | "dev": true, 57 | "requires": { 58 | "color-name": "1.1.3" 59 | } 60 | }, 61 | "color-name": { 62 | "version": "1.1.3", 63 | "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", 64 | "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", 65 | "dev": true 66 | }, 67 | "define-properties": { 68 | "version": "1.1.3", 69 | "resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.1.3.tgz", 70 | "integrity": "sha512-3MqfYKj2lLzdMSf8ZIZE/V+Zuy+BgD6f164e8K2w7dgnpKArBDerGYpM46IYYcjnkdPNMjPk9A6VFB8+3SKlXQ==", 71 | "dev": true, 72 | "requires": { 73 | "object-keys": "^1.0.12" 74 | } 75 | }, 76 | "escape-string-regexp": { 77 | "version": "1.0.5", 78 | "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", 79 | "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", 80 | "dev": true 81 | }, 82 | "function-bind": { 83 | "version": "1.1.1", 84 | "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.1.tgz", 85 | "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==", 86 | "dev": true 87 | }, 88 | "has-flag": { 89 | "version": "3.0.0", 90 | "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", 91 | "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", 92 | "dev": true 93 | }, 94 | "has-symbols": { 95 | "version": "1.0.1", 96 | "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.1.tgz", 97 | "integrity": "sha512-PLcsoqu++dmEIZB+6totNFKq/7Do+Z0u4oT0zKOJNl3lYK6vGwwu2hjHs+68OEZbTjiUE9bgOABXbP/GvrS0Kg==", 98 | "dev": true 99 | }, 100 | "map-values": { 101 | "version": "1.0.1", 102 | "resolved": "https://registry.npmjs.org/map-values/-/map-values-1.0.1.tgz", 103 | "integrity": "sha1-douOecAJvytk/ugG4ip7HEGQyZA=", 104 | "dev": true 105 | }, 106 | "minimist": { 107 | "version": "1.2.5", 108 | "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", 109 | "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==", 110 | "dev": true 111 | }, 112 | "object-filter": { 113 | "version": "1.0.2", 114 | "resolved": "https://registry.npmjs.org/object-filter/-/object-filter-1.0.2.tgz", 115 | "integrity": "sha1-rwt5f/6+r4pSxmN87b6IFs/sG8g=", 116 | "dev": true 117 | }, 118 | "object-keys": { 119 | "version": "1.1.1", 120 | "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", 121 | "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", 122 | "dev": true 123 | }, 124 | "object.assign": { 125 | "version": "4.1.0", 126 | "resolved": "https://registry.npmjs.org/object.assign/-/object.assign-4.1.0.tgz", 127 | "integrity": "sha512-exHJeq6kBKj58mqGyTQ9DFvrZC/eR6OwxzoM9YRoGBqrXYonaFyGiFMuc9VZrXf7DarreEwMpurG3dd+CNyW5w==", 128 | "dev": true, 129 | "requires": { 130 | "define-properties": "^1.1.2", 131 | "function-bind": "^1.1.1", 132 | "has-symbols": "^1.0.0", 133 | "object-keys": "^1.0.11" 134 | } 135 | }, 136 | "one-liner-joke": { 137 | "version": "1.2.0", 138 | "resolved": "https://registry.npmjs.org/one-liner-joke/-/one-liner-joke-1.2.0.tgz", 139 | "integrity": "sha512-cyqGnIRKCe25ZERjlyKtpQp4BfN+iV22nC6rLtTMkJh+w300DK+9UitFzJZfilMdFKImKQO+w/G/E1sUSYBsrw==" 140 | }, 141 | "run-parallel": { 142 | "version": "1.1.9", 143 | "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.1.9.tgz", 144 | "integrity": "sha512-DEqnSRTDw/Tc3FXf49zedI638Z9onwUotBMiUFKmrO2sdFKIbXamXGQ3Axd4qgphxKB4kw/qP1w5kTxnfU1B9Q==", 145 | "dev": true 146 | }, 147 | "semver": { 148 | "version": "5.7.1", 149 | "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", 150 | "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==", 151 | "dev": true 152 | }, 153 | "supports-color": { 154 | "version": "5.5.0", 155 | "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", 156 | "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", 157 | "dev": true, 158 | "requires": { 159 | "has-flag": "^3.0.0" 160 | } 161 | }, 162 | "typescript": { 163 | "version": "3.8.3", 164 | "resolved": "https://registry.npmjs.org/typescript/-/typescript-3.8.3.tgz", 165 | "integrity": "sha512-MYlEfn5VrLNsgudQTVJeNaQFUAI7DkhnOjdpAp4T+ku1TfQClewlbSuTVHiA+8skNBgaf02TL/kLOvig4y3G8w==", 166 | "dev": true 167 | } 168 | } 169 | } 170 | -------------------------------------------------------------------------------- /aws_ses_forwarder/README.md: -------------------------------------------------------------------------------- 1 | # aws_ses_forwarder 2 | 3 | This module implements a simple, serverless email forwarding service for your custom domain. 4 | 5 | Main features: 6 | 7 | - MX records for email routing are created automatically 8 | - DKIM records are set up to improve deliverability 9 | - Automatic verification of recipient emails 10 | 11 | Optional features: 12 | 13 | - Custom "From" address for forwarded emails 14 | - Custom prefix added to "Subject" fields of forwarded emails 15 | - Skipping recipient verification when [out of the SES Sandbox](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/request-production-access.html) 16 | 17 | Resources used: 18 | 19 | - Route53 for DNS entries 20 | - S3 for temporary email storage 21 | - Lambda for performing email routing 22 | - SES for email ingress and egress 23 | - IAM for permissions 24 | 25 | The JavaScript code used on the Lambda is based on the excellent [`aws-lambda-ses-forwarder`](https://github.com/arithmetric/aws-lambda-ses-forwarder) library. 26 | 27 | ## SES Sandbox limits 28 | 29 | By default, to discourage spammers, SES will limit you to forwarding **at most 200 emails per a 24 hour period** (or 1 email per second). 30 | 31 | To go beyond these limits, you need to [request a service limit increase from AWS](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/request-production-access.html). 32 | 33 | ## Example 1: Simple forwarding 34 | 35 | Assuming you have the [AWS provider](https://www.terraform.io/docs/providers/aws/index.html) set up, and a DNS zone for `example.com` configured on Route 53: 36 | 37 | ```tf 38 | module "my_email_forwarder" { 39 | # Available inputs: https://github.com/futurice/terraform-utils/tree/master/aws_ses_forwarder#inputs 40 | # Check for updates: https://github.com/futurice/terraform-utils/compare/v13.1...master 41 | source = "git::ssh://git@github.com/futurice/terraform-utils.git//aws_ses_forwarder?ref=v13.1" 42 | 43 | email_domain = "example.com" 44 | forward_all_to = ["john.doe@futurice.com"] 45 | } 46 | ``` 47 | 48 | After `terraform apply`, SES will send a verification email to all recipient emails you included. Each recipient must click on the verification link in that email before they start receiving forwarded emails. This is a feature of [the SES Sandbox](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/request-production-access.html), and you can get rid of it by contacting AWS support (and then setting `skip_recipient_verification` to `true`). 49 | 50 | Once the emails are verified, drop an email to `whatever@example.com`, and it should pop into the inbox of `john.doe@futurice.com`. 51 | 52 | ## Example 2: Forwarding specific mailboxes 53 | 54 | You can also have specific mailboxes forward email to specific addresses: 55 | 56 | ```tf 57 | module "my_email_forwarder" { 58 | # Available inputs: https://github.com/futurice/terraform-utils/tree/master/aws_ses_forwarder#inputs 59 | # Check for updates: https://github.com/futurice/terraform-utils/compare/v13.1...master 60 | source = "git::ssh://git@github.com/futurice/terraform-utils.git//aws_ses_forwarder?ref=v13.1" 61 | 62 | email_domain = "example.com" 63 | 64 | forward_mapping = { 65 | sales = ["alice@futurice.com"] 66 | admin = ["bob@futurice.com"] 67 | } 68 | } 69 | ``` 70 | 71 | Once applied, and recipients verified: 72 | 73 | - Emails sent to `sales@example.com` are forwarded to `alice@futurice.com` 74 | - Emails sent to `admin@example.com` are forwarded to `bob@futurice.com` 75 | 76 | This can be combined with `forward_all_to`, so that instead of getting a bounce for sending email to a non-existent mailbox, those emails also get forwarded somewhere. 77 | 78 | ## Example 3: Multiple instances 79 | 80 | Due to the way AWS SES works, [there can be only one active receipt rule set at a time](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html#receiving-email-managing-receipt-rule-sets-enable-disable). Normally this module manages the rule set for you, and you don't need to care. But if you need to use the module multiple times (say, for several domains), they can't both have their rule sets be the active one, and you need to manage the rule set yourself: 81 | 82 | ```tf 83 | resource "aws_ses_receipt_rule_set" "forwarding" { 84 | rule_set_name = "my-forwarding-rules" 85 | } 86 | 87 | resource "aws_ses_active_receipt_rule_set" "forwarding" { 88 | rule_set_name = aws_ses_receipt_rule_set.forwarding.rule_set_name 89 | } 90 | 91 | module "my_email_forwarder" { 92 | # Available inputs: https://github.com/futurice/terraform-utils/tree/master/aws_ses_forwarder#inputs 93 | # Check for updates: https://github.com/futurice/terraform-utils/compare/v13.1...master 94 | source = "git::ssh://git@github.com/futurice/terraform-utils.git//aws_ses_forwarder?ref=v13.1" 95 | 96 | rule_set_name = aws_ses_receipt_rule_set.forwarding.rule_set_name 97 | email_domain = "example.com" 98 | forward_all_to = ["john.doe@futurice.com"] 99 | } 100 | 101 | module "other_email_forwarder" { 102 | # Available inputs: https://github.com/futurice/terraform-utils/tree/master/aws_ses_forwarder#inputs 103 | # Check for updates: https://github.com/futurice/terraform-utils/compare/v13.1...master 104 | source = "git::ssh://git@github.com/futurice/terraform-utils.git//aws_ses_forwarder?ref=v13.1" 105 | 106 | rule_set_name = aws_ses_receipt_rule_set.forwarding.rule_set_name 107 | email_domain = "example.org" 108 | forward_all_to = ["john.doe@futurice.com"] 109 | } 110 | ``` 111 | 112 | 113 | ## Inputs 114 | 115 | | Name | Description | Type | Default | Required | 116 | |------|-------------|------|---------|:--------:| 117 | | email_domain | Domain on which the email forwarding should be set up (e.g. `"example.com"`) | `any` | n/a | yes | 118 | | name_prefix | Name prefix to use for objects that need to be created (only lowercase alphanumeric characters and hyphens allowed, for S3 bucket name compatibility) | `string` | `""` | no | 119 | | comment_prefix | This will be included in comments for resources that are created | `string` | `"SES Forwarder: "` | no | 120 | | from_name | Mailbox name from which forwarded emails are sent | `string` | `"noreply"` | no | 121 | | subject_prefix | Text to prepend to the subject of each email before forwarding it (e.g. `"Forwarded: "`) | `string` | `""` | no | 122 | | forward_all_to | List of addesses to which ALL incoming email should be forwarded | `list(string)` | `[]` | no | 123 | | forward_mapping | Map defining receiving mailboxes, and to which addesses they forward their incoming email; takes precedence over `forward_all_to` | `map(list(string))` | `{}` | no | 124 | | rule_set_name | Name of the externally provided SES Rule Set, if you want to manage it yourself | `string` | `""` | no | 125 | | skip_recipient_verification | If you're not in the SES sandbox, you don't need to verify individual recipients; see https://docs.aws.amazon.com/ses/latest/DeveloperGuide/request-production-access.html | `bool` | `false` | no | 126 | | function_timeout | The amount of time our Lambda Function has to run in seconds | `number` | `10` | no | 127 | | memory_size | Amount of memory in MB our Lambda Function can use at runtime | `number` | `128` | no | 128 | | function_runtime | Which node.js version should Lambda use for our function | `string` | `"nodejs12.x"` | no | 129 | | tags | AWS Tags to add to all resources created (where possible); see https://aws.amazon.com/answers/account-management/aws-tagging-strategies/ | `map(string)` | `{}` | no | 130 | 131 | ## Outputs 132 | 133 | | Name | Description | 134 | |------|-------------| 135 | | function_name | This is the unique name of the Lambda function that was created | 136 | | forward_mapping | Map defining receiving email addresses, and to which addesses they forward their incoming email | 137 | | distinct_recipients | Distinct recipient addresses mentioned in `forward_mapping` | 138 | 139 | -------------------------------------------------------------------------------- /aws_reverse_proxy/README.md: -------------------------------------------------------------------------------- 1 | # aws_reverse_proxy 2 | 3 | This module implements a website that proxies content from another server. 4 | 5 | Main features: 6 | 7 | - DNS entries are created automatically 8 | - HTTPS enabled by default 9 | - HTTP Strict Transport Security supported 10 | 11 | Optional features: 12 | 13 | - HTTP Basic Auth 14 | - Plain HTTP instead of HTTPS 15 | - Cache TTL overrides 16 | - Custom response headers sent to clients 17 | - Custom request headers sent to origin server 18 | - Static response status/body override 19 | 20 | Resources used: 21 | 22 | - Route53 for DNS entries 23 | - ACM for SSL certificates 24 | - CloudFront for proxying requests 25 | - Lambda@Edge for transforming requests 26 | - IAM for permissions 27 | 28 | ## About CloudFront operations 29 | 30 | This module manages CloudFront distributions, and these operations are generally very slow. Your `terraform apply` may take anywhere from a few minutes **up to 45 minutes** (if you're really unlucky). Be patient: if they start successfully, they almost always finish successfully, it just takes a while. 31 | 32 | Additionally, this module uses Lambda@Edge functions with CloudFront. Because Lambda@Edge functions are replicated, [they can't be deleted immediately](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/lambda-edge-delete-replicas.html). This means a `terraform destroy` won't successfully remove all resources on its first run. It should complete successfully when running it again after a few hours, however. 33 | 34 | ## Examples 35 | 36 | Some common use cases for this module are: 37 | 38 | - [Static website hosting with S3](../aws_static_site) 39 | - [Redirecting clients from a domain to another URL](../aws_domain_redirect) 40 | - SSL termination in front of a server/load balancer elsewhere on AWS 41 | 42 | ## How CloudFront caching works 43 | 44 | It's important to understand that CloudFront, by default, **respects cache headers given by the origin**, that is, the server it's proxying requests to. 45 | 46 | ### Default cache behaviour 47 | 48 | Consider an origin server that doesn't give any `Cache-Control` headers. Any changes you make to its responses **will be reflected immediately** on the CloudFront distribution. That's is because this module will **by default** not cache such objects at all. This is a sensible default, because the AWS default TTL for CloudFront is 24 hours, and for an origin that doesn't explicitly send `Cache-Control` headers, it's rarely the desired behaviour: your site will be serving stale content for up to 24 hours. Users will be sad, and engineers will be yelled at. 49 | 50 | Having immediate updates on CloudFront is convenient, but the downside is that every request for every file will be forwarded to your origin, to make sure the CloudFront cache still has the latest version. This can increase request latency for users, and infrastructure costs for you. 51 | 52 | ### Specifying cache lifetimes on the origin 53 | 54 | Let's say we're serving static files from an S3 bucket. Using the official [AWS CLI](https://aws.amazon.com/cli/), you can specify cache lifetimes as your objects are uploaded: 55 | 56 | ```bash 57 | aws s3 cp --cache-control=no-store,must-revalidate index.html "s3://my-bucket/" 58 | aws s3 cp --cache-control=max-age=31536000 static/image-v123.jpg "s3://my-bucket/" 59 | ``` 60 | 61 | This will upload `index.html` so that CloudFront will **never** serve its content to a user, without first checking that it's not been updated on S3. However, `image-v123.jpg` will be uploaded with cache headers that allow CloudFront to keep its copy for that object **forever** (well, technically 1 year, which is the maximum recommended value for `max-age`; in practice CloudFront will probably evict it before that for other reasons). 62 | 63 | The above is a good middle ground caching strategy, for when you want immediate updates for your HTML documents (e.g. `index.html`), but static assets (e.g. `image-v123.jpg`) can be cached for much longer. This means that for the HTML document itself, you won't get any boost from CloudFront, but as the browser starts downloading the various linked static assets, they can be served directly from the CloudFront edge location, which should be much closer to the user, geographically. When you need to update the linked image, instead of updating `image-v123.jpg`, you should instead upload `image-v124.jpg`, and update any links in `index.html` to point to the new version. This ensures that: 64 | 65 | 1. Users will see the new document (including its updated images) immediately 66 | 1. Users won't see an inconsistent version of the document, where the document content is updated, but it's still showing the old images 67 | 68 | ### Overriding cache lifetimes on CloudFront 69 | 70 | If your origin server doesn't give out sensible cache control headers, or you're just feeling lazy, this module supports overriding cache behaviour on CloudFront, effectively ignoring anything your origin says about caching objects. 71 | 72 | That is, if you specify `cache_ttl_override = 0` for your site, every object will always be fetched from the origin, for every request. Importantly, though, this won't invalidate objects that _are already_ in the CloudFront cache with a longer TTL. If you have an object that's "stuck" in your cache and you can't shake it, the CloudFront feature you're looking for is [file invalidation](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Invalidation.html). 73 | 74 | Conversely, if you specify `cache_ttl_override = 300`, every object will stay in CloudFront for 5 minutes, regardless of its cache headers. This can be a good performance boost for your site, since only 1 request per file per 5 minutes will need to go all the way to the origin, and all the others can be served immediately from the CloudFront edge location. Keep in mind the aforementioned warning about "inconsistent versions", however: each object has their own TTL counter, so `index.html` and `image.jpg` may update at different times in the cache, even if you update content at your origin at the same time. 75 | 76 | 77 | ## Inputs 78 | 79 | | Name | Description | Type | Default | Required | 80 | |------|-------------|------|---------|:--------:| 81 | | site_domain | Domain on which the reverse proxy will be made available (e.g. `"www.example.com"`) | `any` | n/a | yes | 82 | | name_prefix | Name prefix to use for objects that need to be created (only lowercase alphanumeric characters and hyphens allowed, for S3 bucket name compatibility); if omitted, a random, unique one will be used | `string` | `""` | no | 83 | | comment_prefix | This will be included in comments for resources that are created | `string` | `"Reverse proxy: "` | no | 84 | | origin_url | Base URL for proxy upstream site (e.g. `"https://example.com/"`) | `any` | n/a | yes | 85 | | cloudfront_price_class | CloudFront price class to use (`100`, `200` or `"All"`, see https://aws.amazon.com/cloudfront/pricing/) | `number` | `100` | no | 86 | | viewer_https_only | Set this to `false` if you need to support insecure HTTP access for clients, in addition to HTTPS | `bool` | `true` | no | 87 | | cache_ttl_override | When `-1`, cache based on origin cache headers; when `0`, disable caching completely; when `>0`, cache ALL objects for this many seconds, regardless of their cache headers | `number` | `-1` | no | 88 | | default_root_object | The object to return when the root URL is requested | `string` | `""` | no | 89 | | add_response_headers | Map of HTTP headers (if any) to add to outgoing responses before sending them to clients | `map(string)` | `{}` | no | 90 | | hsts_max_age | How long should `Strict-Transport-Security` remain in effect for the site; disabled automatically when `viewer_https_only = false` | `number` | `31557600` | no | 91 | | origin_custom_header_name | Name of a custom header to send to the origin; this can be used to convey an authentication header to the origin, for example | `string` | `"X-Custom-Origin-Header"` | no | 92 | | origin_custom_header_value | Value of a custom header to send to the origin; see `origin_custom_header_name` | `string` | `""` | no | 93 | | origin_custom_port | When > 0, use this port for communication with the origin server, instead of relevant standard port | `number` | `0` | no | 94 | | override_response_code | When non-empty, replace the HTTP status code received from the origin with this; e.g. override a `404` into a `200` | `string` | `""` | no | 95 | | override_response_status | When non-empty, replace the HTTP status description received from the origin with this; e.g. override a `"Not Found"` into a `"OK"` | `string` | `""` | no | 96 | | override_response_body | When this and ALL other `override_response_*` variables are non-empty, skip sending the request to the origin altogether, and instead respond as instructed here | `string` | `""` | no | 97 | | override_only_on_code | When non-empty, limits when `override_response_*` variables take effect; for example, setting this to `"404"` allows you to turn origin 404's into 200's, while still passing a 302 redirect through to the client (JS-style regex allowed) | `string` | `""` | no | 98 | | basic_auth_username | When non-empty, require this username with HTTP Basic Auth | `string` | `""` | no | 99 | | basic_auth_password | When non-empty, require this password with HTTP Basic Auth | `string` | `""` | no | 100 | | basic_auth_realm | When using HTTP Basic Auth, this will be displayed by the browser in the auth prompt | `string` | `"Authentication Required"` | no | 101 | | basic_auth_body | When using HTTP Basic Auth, and authentication has failed, this will be displayed by the browser as the page content | `string` | `"Unauthorized"` | no | 102 | | lambda_logging_enabled | When true, writes information about incoming requests to the Lambda function's CloudWatch group; IMPORTANT: Lambda will log to CloudWatch on the nearest region of the POP processing the request, NOT necessarily your own region. | `bool` | `false` | no | 103 | | tags | AWS Tags to add to all resources created (where possible); see https://aws.amazon.com/answers/account-management/aws-tagging-strategies/ | `map(string)` | `{}` | no | 104 | 105 | ## Outputs 106 | 107 | | Name | Description | 108 | |------|-------------| 109 | | cloudfront_id | The ID of the CloudFront distribution that's used for hosting the content | 110 | | web_endpoint | URL on which the site will be made available | 111 | | site_domain | Domain on which the site will be made available | 112 | 113 | -------------------------------------------------------------------------------- /aws_ec2_ebs_docker_host/README.md: -------------------------------------------------------------------------------- 1 | # aws_ec2_ebs_docker_host 2 | 3 | Creates a standalone Docker host on EC2, optionally attaching an external EBS volume for persistent data. 4 | 5 | This is convenient for quickly setting up non-production-critical Docker workloads. If you need something fancier, consider e.g. ECS, EKS or Fargate. 6 | 7 | ## Example 1: Running a docker container 8 | 9 | Assuming you have the [AWS provider](https://www.terraform.io/docs/providers/aws/index.html) set up: 10 | 11 | ```tf 12 | module "my_host" { 13 | # Available inputs: https://github.com/futurice/terraform-utils/tree/master/aws_ec2_ebs_docker_host#inputs 14 | # Check for updates: https://github.com/futurice/terraform-utils/compare/v13.1...master 15 | source = "git::ssh://git@github.com/futurice/terraform-utils.git//aws_ec2_ebs_docker_host?ref=v13.1" 16 | 17 | hostname = "my-docker-host" 18 | ssh_private_key_path = "~/.ssh/id_rsa" # if you use shared Terraform state, consider changing this to something that doesn't depend on "~" 19 | ssh_public_key_path = "~/.ssh/id_rsa.pub" 20 | allow_incoming_http = true # by default, only incoming SSH is allowed; other protocols for the security group are opt-in 21 | } 22 | 23 | output "host_ssh_command" { 24 | description = "Run this command to create a port-forward to the remote docker daemon" 25 | value = "ssh -i ${module.my_host.ssh_private_key_path} -o StrictHostKeyChecking=no -L localhost:2377:/var/run/docker.sock ${module.my_host.ssh_username}@${module.my_host.public_ip}" 26 | } 27 | ``` 28 | 29 | After `terraform apply`, and running the `host_ssh_command`, you should be able to connect from your local Docker CLI to the remote daemon, e.g.: 30 | 31 | ```bash 32 | $ DOCKER_HOST=localhost:2377 docker run -d -p 80:80 nginx 33 | ``` 34 | 35 | Visit the IP address of your host in a browser to make sure it works. 36 | 37 | ## Example 2: Using a persistent data volume 38 | 39 | Assuming you have the [AWS provider](https://www.terraform.io/docs/providers/aws/index.html) set up: 40 | 41 | ```tf 42 | resource "aws_ebs_volume" "my_data" { 43 | availability_zone = module.my_host.availability_zone # ensure the volume is created in the same AZ the docker host 44 | type = "gp2" # i.e. "Amazon EBS General Purpose SSD" 45 | size = 25 # in GiB; if you change this in-place, you need to SSH over and run e.g. $ sudo resize2fs /dev/xvdh 46 | tags = { Name = "my-host-data" } # make this resource easier to identify in the AWS Console (tag "Name" is effectively "display name" in some services) 47 | } 48 | 49 | module "my_host" { 50 | # Available inputs: https://github.com/futurice/terraform-utils/tree/master/aws_ec2_ebs_docker_host#inputs 51 | # Check for updates: https://github.com/futurice/terraform-utils/compare/v13.1...master 52 | source = "git::ssh://git@github.com/futurice/terraform-utils.git//aws_ec2_ebs_docker_host?ref=v13.1" 53 | 54 | hostname = "my-host" 55 | ssh_private_key_path = "~/.ssh/id_rsa" # note that with a shared Terraform state, paths with "~" will become problematic 56 | ssh_public_key_path = "~/.ssh/id_rsa.pub" 57 | data_volume_id = aws_ebs_volume.my_data.id # attach our EBS data volume 58 | } 59 | 60 | output "host_ssh_command" { 61 | description = "Run this command to check that the data volume got mounted" 62 | value = "ssh -i ${module.my_host.ssh_private_key_path} -o StrictHostKeyChecking=no ${module.my_host.ssh_username}@${module.my_host.public_ip} df -h" 63 | } 64 | ``` 65 | 66 | Note that due to [a bug in Terraform](https://github.com/hashicorp/terraform/issues/12570), at the time of writing, you need to apply in two parts: 67 | 68 | ```bash 69 | $ terraform apply -target aws_ebs_volume.my_data 70 | ... 71 | $ terraform apply 72 | ... 73 | ``` 74 | 75 | Afterwards, running the `host_ssh_command` should give you something like: 76 | 77 | ``` 78 | Filesystem Size Used Avail Use% Mounted on 79 | udev 481M 0 481M 0% /dev 80 | tmpfs 99M 752K 98M 1% /run 81 | /dev/xvda1 7.7G 2.1G 5.7G 27% / 82 | tmpfs 492M 0 492M 0% /dev/shm 83 | tmpfs 5.0M 0 5.0M 0% /run/lock 84 | tmpfs 492M 0 492M 0% /sys/fs/cgroup 85 | /dev/loop0 88M 88M 0 100% /snap/core/5328 86 | /dev/loop1 13M 13M 0 100% /snap/amazon-ssm-agent/495 87 | /dev/xvdh 25G 45M 24G 1% /data 88 | tmpfs 99M 0 99M 0% /run/user/1000 89 | ``` 90 | 91 | That is, you can see the 25 GB data volume mounted at `/data`. 92 | 93 | ## Example 3: Running additional provisioners 94 | 95 | Assuming you have the [AWS provider](https://www.terraform.io/docs/providers/aws/index.html) set up: 96 | 97 | ```tf 98 | module "my_host" { 99 | # Available inputs: https://github.com/futurice/terraform-utils/tree/master/aws_ec2_ebs_docker_host#inputs 100 | # Check for updates: https://github.com/futurice/terraform-utils/compare/v13.1...master 101 | source = "git::ssh://git@github.com/futurice/terraform-utils.git//aws_ec2_ebs_docker_host?ref=v13.1" 102 | 103 | hostname = "my-docker-host" 104 | ssh_private_key_path = "~/.ssh/id_rsa" 105 | ssh_public_key_path = "~/.ssh/id_rsa.pub" 106 | } 107 | 108 | resource "null_resource" "provisioners" { 109 | depends_on = [module.my_host] # wait until other provisioners within the module have finished 110 | 111 | connection { 112 | host = module.my_host.public_ip 113 | user = module.my_host.ssh_username 114 | private_key = module.my_host.ssh_private_key 115 | agent = false 116 | } 117 | 118 | provisioner "remote-exec" { 119 | inline = ["echo HELLO WORLD"] 120 | } 121 | } 122 | ``` 123 | 124 | ## Example 4: Using the `docker` provider 125 | 126 | Note that until [`depends_on` support for providers](https://github.com/hashicorp/terraform/issues/2430) lands in Terraform, this is a bit cumbersome, but it's documented here in case it's useful. 127 | 128 | Assuming you have the [AWS provider](https://www.terraform.io/docs/providers/aws/index.html) set up: 129 | 130 | ```tf 131 | module "my_host" { 132 | # Available inputs: https://github.com/futurice/terraform-utils/tree/master/aws_ec2_ebs_docker_host#inputs 133 | # Check for updates: https://github.com/futurice/terraform-utils/compare/v13.1...master 134 | source = "git::ssh://git@github.com/futurice/terraform-utils.git//aws_ec2_ebs_docker_host?ref=v13.1" 135 | 136 | hostname = "my-docker-host" 137 | ssh_private_key_path = "~/.ssh/id_rsa" 138 | ssh_public_key_path = "~/.ssh/id_rsa.pub" 139 | allow_incoming_http = true 140 | } 141 | 142 | output "ssh_config_entry" { 143 | description = "Add this entry to your `~/.ssh/config` to be able to connect to the docker daemon over SSH" 144 | value = <<-EOF 145 | # This ~/.ssh/config entry was generated by Terraform 146 | Host ${module.my_host.public_ip} 147 | User ${module.my_host.ssh_username} 148 | IdentityFile ${path.root}/${module.my_host.ssh_private_key_path} 149 | StrictHostKeyChecking no 150 | EOF 151 | } 152 | 153 | provider "docker" { 154 | host = "ssh://${module.my_host.ssh_username}@${module.my_host.public_ip}" 155 | } 156 | 157 | resource "docker_image" "nginx" { 158 | name = "nginx" 159 | } 160 | 161 | resource "docker_container" "nginx" { 162 | image = docker_image.nginx.latest 163 | name = "nginx" 164 | must_run = true 165 | 166 | ports { 167 | internal = 80 168 | external = 80 169 | } 170 | } 171 | 172 | output "test_link" { 173 | value = "http://${module.my_host.public_ip}/" 174 | } 175 | ``` 176 | 177 | Due to the limitation mentioned above, applying this for the first time is somewhat involved: 178 | 179 | 1. Create the host on EC2 first, with `terraform apply -target module.my_host` 180 | 1. Add its key config to your SSH client, with `terraform output ssh_config_entry >> ~/.ssh/config` 181 | 1. Create the Docker resources, with `terraform apply` 182 | 1. You should now be able to visit the `test_link` and see nginx greeting you 183 | 184 | After the first apply, however, you don't need to take these special steps again. 185 | 186 | 187 | ## Inputs 188 | 189 | | Name | Description | Type | Default | Required | 190 | |------|-------------|------|---------|:--------:| 191 | | hostname | Hostname by which this service is identified in metrics, logs etc | `string` | `"aws-ec2-ebs-docker-host"` | no | 192 | | instance_type | See https://aws.amazon.com/ec2/instance-types/ for options; for example, typical values for small workloads are `"t2.nano"`, `"t2.micro"`, `"t2.small"`, `"t2.medium"`, and `"t2.large"` | `string` | `"t2.micro"` | no | 193 | | instance_ami | See https://cloud-images.ubuntu.com/locator/ec2/ for options | `string` | `"ami-0c4c42893066a139e"` | no | 194 | | ssh_private_key_path | SSH private key file path, relative to Terraform project root | `string` | `"ssh.private.key"` | no | 195 | | ssh_public_key_path | SSH public key file path, relative to Terraform project root | `string` | `"ssh.public.key"` | no | 196 | | ssh_username | Default username built into the AMI (see 'instance_ami') | `string` | `"ubuntu"` | no | 197 | | vpc_id | ID of the VPC our host should join; if empty, joins your Default VPC | `string` | `""` | no | 198 | | reprovision_trigger | An arbitrary string value; when this value changes, the host needs to be reprovisioned | `string` | `""` | no | 199 | | root_volume_size | Size (in GiB) of the EBS volume that will be created and mounted as the root fs for the host | `number` | `8` | no | 200 | | data_volume_id | The ID of the EBS volume to mount as `/data` | `string` | `""` | no | 201 | | swap_file_size | Size of the swap file allocated on the root volume | `string` | `"512M"` | no | 202 | | swap_swappiness | Swappiness value provided when creating the swap file | `string` | `"10"` | no | 203 | | allow_incoming_http | Whether to allow incoming HTTP traffic on the host security group | `bool` | `false` | no | 204 | | allow_incoming_https | Whether to allow incoming HTTPS traffic on the host security group | `bool` | `false` | no | 205 | | allow_incoming_dns | Whether to allow incoming DNS traffic on the host security group | `bool` | `false` | no | 206 | | tags | AWS Tags to add to all resources created (where possible); see https://aws.amazon.com/answers/account-management/aws-tagging-strategies/ | `map(string)` | `{}` | no | 207 | 208 | ## Outputs 209 | 210 | | Name | Description | 211 | |------|-------------| 212 | | hostname | Hostname by which this service is identified in metrics, logs etc | 213 | | public_ip | Public IP address assigned to the host by EC2 | 214 | | instance_id | AWS ID for the EC2 instance used | 215 | | availability_zone | AWS Availability Zone in which the EC2 instance was created | 216 | | ssh_username | Username that can be used to access the EC2 instance over SSH | 217 | | ssh_private_key_path | Path to SSH private key that can be used to access the EC2 instance | 218 | | ssh_private_key | SSH private key that can be used to access the EC2 instance | 219 | | security_group_id | Security Group ID, for attaching additional security rules externally | 220 | 221 | -------------------------------------------------------------------------------- /aws_ses_forwarder/index.js: -------------------------------------------------------------------------------- 1 | "use strict"; 2 | 3 | var AWS = require('aws-sdk'); 4 | 5 | console.log("AWS Lambda SES Forwarder // @arithmetric // Version 4.2.0"); 6 | 7 | // Configure the S3 bucket and key prefix for stored raw emails, and the 8 | // mapping of email addresses to forward from and to. 9 | // 10 | // Expected keys/values: 11 | // 12 | // - fromEmail: Forwarded emails will come from this verified address 13 | // 14 | // - subjectPrefix: Forwarded emails subject will contain this prefix 15 | // 16 | // - emailBucket: S3 bucket name where SES stores emails. 17 | // 18 | // - emailKeyPrefix: S3 key name prefix where SES stores email. Include the 19 | // trailing slash. 20 | // 21 | // - forwardMapping: Object where the key is the lowercase email address from 22 | // which to forward and the value is an array of email addresses to which to 23 | // send the message. 24 | // 25 | // To match all email addresses on a domain, use a key without the name part 26 | // of an email address before the "at" symbol (i.e. `@example.com`). 27 | // 28 | // To match a mailbox name on all domains, use a key without the "at" symbol 29 | // and domain part of an email address (i.e. `info`). 30 | var defaultConfig = JSON.parse(process.env.LAMBDA_CONFIG); 31 | 32 | /** 33 | * Parses the SES event record provided for the `mail` and `receipients` data. 34 | * 35 | * @param {object} data - Data bundle with context, email, etc. 36 | * 37 | * @return {object} - Promise resolved with data. 38 | */ 39 | exports.parseEvent = function(data) { 40 | // Validate characteristics of a SES event record. 41 | if (!data.event || 42 | !data.event.hasOwnProperty('Records') || 43 | data.event.Records.length !== 1 || 44 | !data.event.Records[0].hasOwnProperty('eventSource') || 45 | data.event.Records[0].eventSource !== 'aws:ses' || 46 | data.event.Records[0].eventVersion !== '1.0') { 47 | data.log({message: "parseEvent() received invalid SES message:", 48 | level: "error", event: JSON.stringify(data.event)}); 49 | return Promise.reject(new Error('Error: Received invalid SES message.')); 50 | } 51 | 52 | data.email = data.event.Records[0].ses.mail; 53 | data.recipients = data.event.Records[0].ses.receipt.recipients; 54 | return Promise.resolve(data); 55 | }; 56 | 57 | /** 58 | * Transforms the original recipients to the desired forwarded destinations. 59 | * 60 | * @param {object} data - Data bundle with context, email, etc. 61 | * 62 | * @return {object} - Promise resolved with data. 63 | */ 64 | exports.transformRecipients = function(data) { 65 | var newRecipients = []; 66 | data.originalRecipients = data.recipients; 67 | data.recipients.forEach(function(origEmail) { 68 | var origEmailKey = origEmail.toLowerCase(); 69 | if (data.config.forwardMapping.hasOwnProperty(origEmailKey)) { 70 | newRecipients = newRecipients.concat( 71 | data.config.forwardMapping[origEmailKey]); 72 | data.originalRecipient = origEmail; 73 | } else { 74 | var origEmailDomain; 75 | var origEmailUser; 76 | var pos = origEmailKey.lastIndexOf("@"); 77 | if (pos === -1) { 78 | origEmailUser = origEmailKey; 79 | } else { 80 | origEmailDomain = origEmailKey.slice(pos); 81 | origEmailUser = origEmailKey.slice(0, pos); 82 | } 83 | if (origEmailDomain && 84 | data.config.forwardMapping.hasOwnProperty(origEmailDomain)) { 85 | newRecipients = newRecipients.concat( 86 | data.config.forwardMapping[origEmailDomain]); 87 | data.originalRecipient = origEmail; 88 | } else if (origEmailUser && 89 | data.config.forwardMapping.hasOwnProperty(origEmailUser)) { 90 | newRecipients = newRecipients.concat( 91 | data.config.forwardMapping[origEmailUser]); 92 | data.originalRecipient = origEmail; 93 | } 94 | } 95 | }); 96 | 97 | if (!newRecipients.length) { 98 | data.log({message: "Finishing process. No new recipients found for " + 99 | "original destinations: " + data.originalRecipients.join(", "), 100 | level: "info"}); 101 | return data.callback(); 102 | } 103 | 104 | data.recipients = newRecipients; 105 | return Promise.resolve(data); 106 | }; 107 | 108 | /** 109 | * Fetches the message data from S3. 110 | * 111 | * @param {object} data - Data bundle with context, email, etc. 112 | * 113 | * @return {object} - Promise resolved with data. 114 | */ 115 | exports.fetchMessage = function(data) { 116 | // Copying email object to ensure read permission 117 | data.log({level: "info", message: "Fetching email at s3://" + 118 | data.config.emailBucket + '/' + data.config.emailKeyPrefix + 119 | data.email.messageId}); 120 | return new Promise(function(resolve, reject) { 121 | data.s3.copyObject({ 122 | Bucket: data.config.emailBucket, 123 | CopySource: data.config.emailBucket + '/' + data.config.emailKeyPrefix + 124 | data.email.messageId, 125 | Key: data.config.emailKeyPrefix + data.email.messageId, 126 | ACL: 'private', 127 | ContentType: 'text/plain', 128 | StorageClass: 'STANDARD' 129 | }, function(err) { 130 | if (err) { 131 | data.log({level: "error", message: "copyObject() returned error:", 132 | error: err, stack: err.stack}); 133 | return reject( 134 | new Error("Error: Could not make readable copy of email.")); 135 | } 136 | 137 | // Load the raw email from S3 138 | data.s3.getObject({ 139 | Bucket: data.config.emailBucket, 140 | Key: data.config.emailKeyPrefix + data.email.messageId 141 | }, function(err, result) { 142 | if (err) { 143 | data.log({level: "error", message: "getObject() returned error:", 144 | error: err, stack: err.stack}); 145 | return reject( 146 | new Error("Error: Failed to load message body from S3.")); 147 | } 148 | data.emailData = result.Body.toString(); 149 | return resolve(data); 150 | }); 151 | }); 152 | }); 153 | }; 154 | 155 | /** 156 | * Processes the message data, making updates to recipients and other headers 157 | * before forwarding message. 158 | * 159 | * @param {object} data - Data bundle with context, email, etc. 160 | * 161 | * @return {object} - Promise resolved with data. 162 | */ 163 | exports.processMessage = function(data) { 164 | var match = data.emailData.match(/^((?:.+\r?\n)*)(\r?\n(?:.*\s+)*)/m); 165 | var header = match && match[1] ? match[1] : data.emailData; 166 | var body = match && match[2] ? match[2] : ''; 167 | 168 | // Add "Reply-To:" with the "From" address if it doesn't already exists 169 | if (!/^Reply-To: /mi.test(header)) { 170 | match = header.match(/^From: (.*(?:\r?\n\s+.*)*\r?\n)/m); 171 | var from = match && match[1] ? match[1] : ''; 172 | if (from) { 173 | header = header + 'Reply-To: ' + from; 174 | data.log({level: "info", message: "Added Reply-To address of: " + from}); 175 | } else { 176 | data.log({level: "info", message: "Reply-To address not added because " + 177 | "From address was not properly extracted."}); 178 | } 179 | } 180 | 181 | // SES does not allow sending messages from an unverified address, 182 | // so replace the message's "From:" header with the original 183 | // recipient (which is a verified domain) 184 | header = header.replace( 185 | /^From: (.*(?:\r?\n\s+.*)*)/mg, 186 | function(match, from) { 187 | var fromText; 188 | if (data.config.fromEmail) { 189 | fromText = 'From: ' + from.replace(/<(.*)>/, '').trim() + 190 | ' <' + data.config.fromEmail + '>'; 191 | } else { 192 | fromText = 'From: ' + from.replace('<', 'at ').replace('>', '') + 193 | ' <' + data.originalRecipient + '>'; 194 | } 195 | return fromText; 196 | }); 197 | 198 | // Add a prefix to the Subject 199 | if (data.config.subjectPrefix) { 200 | header = header.replace( 201 | /^Subject: (.*)/mg, 202 | function(match, subject) { 203 | return 'Subject: ' + data.config.subjectPrefix + subject; 204 | }); 205 | } 206 | 207 | // Replace original 'To' header with a manually defined one 208 | if (data.config.toEmail) { 209 | header = header.replace(/^To: (.*)/mg, () => 'To: ' + data.config.toEmail); 210 | } 211 | 212 | // Remove the Return-Path header. 213 | header = header.replace(/^Return-Path: (.*)\r?\n/mg, ''); 214 | 215 | // Remove Sender header. 216 | header = header.replace(/^Sender: (.*)\r?\n/mg, ''); 217 | 218 | // Remove Message-ID header. 219 | header = header.replace(/^Message-ID: (.*)\r?\n/mig, ''); 220 | 221 | // Remove all DKIM-Signature headers to prevent triggering an 222 | // "InvalidParameterValue: Duplicate header 'DKIM-Signature'" error. 223 | // These signatures will likely be invalid anyways, since the From 224 | // header was modified. 225 | header = header.replace(/^DKIM-Signature: .*\r?\n(\s+.*\r?\n)*/mg, ''); 226 | 227 | data.emailData = header + body; 228 | return Promise.resolve(data); 229 | }; 230 | 231 | /** 232 | * Send email using the SES sendRawEmail command. 233 | * 234 | * @param {object} data - Data bundle with context, email, etc. 235 | * 236 | * @return {object} - Promise resolved with data. 237 | */ 238 | exports.sendMessage = function(data) { 239 | var params = { 240 | Destinations: data.recipients, 241 | Source: data.originalRecipient, 242 | RawMessage: { 243 | Data: data.emailData 244 | } 245 | }; 246 | data.log({level: "info", message: "sendMessage: Sending email via SES. " + 247 | "Original recipients: " + data.originalRecipients.join(", ") + 248 | ". Transformed recipients: " + data.recipients.join(", ") + "."}); 249 | return new Promise(function(resolve, reject) { 250 | data.ses.sendRawEmail(params, function(err, result) { 251 | if (err) { 252 | data.log({level: "error", message: "sendRawEmail() returned error.", 253 | error: err, stack: err.stack}); 254 | return reject(new Error('Error: Email sending failed.')); 255 | } 256 | data.log({level: "info", message: "sendRawEmail() successful.", 257 | result: result}); 258 | resolve(data); 259 | }); 260 | }); 261 | }; 262 | 263 | /** 264 | * Handler function to be invoked by AWS Lambda with an inbound SES email as 265 | * the event. 266 | * 267 | * @param {object} event - Lambda event from inbound email received by AWS SES. 268 | * @param {object} context - Lambda context object. 269 | * @param {object} callback - Lambda callback object. 270 | * @param {object} overrides - Overrides for the default data, including the 271 | * configuration, SES object, and S3 object. 272 | */ 273 | exports.handler = function(event, context, callback, overrides) { 274 | var steps = overrides && overrides.steps ? overrides.steps : 275 | [ 276 | exports.parseEvent, 277 | exports.transformRecipients, 278 | exports.fetchMessage, 279 | exports.processMessage, 280 | exports.sendMessage 281 | ]; 282 | var data = { 283 | event: event, 284 | callback: callback, 285 | context: context, 286 | config: overrides && overrides.config ? overrides.config : defaultConfig, 287 | log: overrides && overrides.log ? overrides.log : console.log, 288 | ses: overrides && overrides.ses ? overrides.ses : new AWS.SES(), 289 | s3: overrides && overrides.s3 ? 290 | overrides.s3 : new AWS.S3({signatureVersion: 'v4'}) 291 | }; 292 | Promise.series(steps, data) 293 | .then(function(data) { 294 | data.log({level: "info", message: "Process finished successfully."}); 295 | return data.callback(); 296 | }) 297 | .catch(function(err) { 298 | data.log({level: "error", message: "Step returned error: " + err.message, 299 | error: err, stack: err.stack}); 300 | return data.callback(new Error("Error: Step returned error.")); 301 | }); 302 | }; 303 | 304 | Promise.series = function(promises, initValue) { 305 | return promises.reduce(function(chain, promise) { 306 | if (typeof promise !== 'function') { 307 | return Promise.reject(new Error("Error: Invalid promise item: " + 308 | promise)); 309 | } 310 | return chain.then(promise); 311 | }, Promise.resolve(initValue)); 312 | }; 313 | -------------------------------------------------------------------------------- /aws_lambda_api/README.md: -------------------------------------------------------------------------------- 1 | # aws_lambda_api 2 | 3 | This module creates a Lambda function, and makes it available via a custom domain, complete with SSL termination: e.g. `https://api.example.com/`. This includes: 4 | 5 | - DNS records on [Route 53](https://aws.amazon.com/route53/) 6 | - An SSL certificate for the domain from [ACM](https://aws.amazon.com/certificate-manager/) 7 | - [API Gateway](https://aws.amazon.com/api-gateway/) configuration for invoking the function over HTTP 8 | - A [Lambda](https://aws.amazon.com/lambda/) function built from your JavaScript code 9 | 10 | ## Example 1: Simple API 11 | 12 | First, write down some simple code to deploy in a file called `index.js`: 13 | 14 | ```js 15 | exports.handler = function(event, context, callback) { 16 | console.log("Lambda function event:", event); 17 | console.log("Lambda function context:", context); 18 | callback(null, { 19 | // See here for docs on this response object: 20 | // https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-output-format 21 | statusCode: 200, 22 | headers: { "Content-Type": "text/plain; charset=utf-8" }, 23 | body: "Hello World!" 24 | }); 25 | }; 26 | ``` 27 | 28 | Assuming you have the [AWS provider](https://www.terraform.io/docs/providers/aws/index.html) set up, and a DNS zone for `example.com` configured on Route 53: 29 | 30 | ```tf 31 | # Lambda functions can only be uploaded as ZIP files, so we need to package our JS file into one 32 | data "archive_file" "lambda_zip" { 33 | type = "zip" 34 | source_file = "${path.module}/index.js" 35 | output_path = "${path.module}/lambda.zip" 36 | } 37 | 38 | module "my_api" { 39 | # Available inputs: https://github.com/futurice/terraform-utils/tree/master/aws_lambda_api#inputs 40 | # Check for updates: https://github.com/futurice/terraform-utils/compare/v13.1...master 41 | source = "git::ssh://git@github.com/futurice/terraform-utils.git//aws_lambda_api?ref=v13.1" 42 | providers = { aws.us_east_1 = aws.us_east_1 } # this alias is needed because ACM is only available in the "us-east-1" region 43 | 44 | api_domain = "api.example.com" 45 | function_zipfile = data.archive_file.lambda_zip.output_path 46 | lambda_logging_enabled = true 47 | } 48 | ``` 49 | 50 | After `terraform apply`, you should be able to visit `https://api.example.com/`, and be greeted by the above `Hello World!` message. 51 | 52 | Because we included the `lambda_logging_enabled` option, you can also log into CloudWatch and check out the properties Lambda makes available in the `event` and `context` properties. 53 | 54 | The associated API Gateway has been configured to route **all requests** to our Lambda function. Try visiting `https://api.example.com/foo/bar?baz=123` for instance, and you should get the same message, but with different parameters in the `event` object. This allows you to implement arbitrary routing rules in JavaScript, without having to define them in API Gateway also. 55 | 56 | ## Example 2: Adding a build step 57 | 58 | Say you want to do something non-trivial in your Lambda. This probably means installing some libraries from [npm](https://www.npmjs.com/), and possibly writing the Lambda in [TypeScript](https://www.typescriptlang.org/). 59 | 60 | An [example project](./example-project) is included with these docs. It demonstrates a simple workflow for: 61 | 62 | 1. Compiling your Lambda function from TypeScript 63 | 1. Including external dependencies from npm (the [`one-liner-joke`](https://www.npmjs.com/package/one-liner-joke) package serves as an example) 64 | 1. Releasing code changes via Terraform 65 | 66 | Importantly, the most recent compiled version of the Lambda function should always exist in `example-project/dist/lambda.zip`, **and be committed to version control**. This seems counter to best practices, but otherwise developers who have just cloned your Terraform repo will be unable to e.g. `terraform apply`, before installing the full `node` toolchain locally, to be able to compile the Lambda function. The same applies to your CI server, for example. This may not be the correct workflow for larger projects, however; see below for suggestions in that regard. 67 | 68 | Assuming you have the [AWS provider](https://www.terraform.io/docs/providers/aws/index.html) set up, and a DNS zone for `example.com` configured on Route 53: 69 | 70 | ```tf 71 | module "my_api" { 72 | # Available inputs: https://github.com/futurice/terraform-utils/tree/master/aws_lambda_api#inputs 73 | # Check for updates: https://github.com/futurice/terraform-utils/compare/v13.1...master 74 | source = "git::ssh://git@github.com/futurice/terraform-utils.git//aws_lambda_api?ref=v13.1" 75 | providers = { aws.us_east_1 = aws.us_east_1 } # this alias is needed because ACM is only available in the "us-east-1" region 76 | 77 | api_domain = "api.example.com" 78 | function_zipfile = "./path/to/example-project/dist/lambda.zip" 79 | } 80 | ``` 81 | 82 | After `terraform apply`, you should be able to receive a random joke with: 83 | 84 | ```bash 85 | $ curl https://api.example.com 86 | { 87 | "body": "You look like a before picture.", 88 | "tags": [ 89 | "insults" 90 | ] 91 | } 92 | ``` 93 | 94 | Whenever you make changes to the function code, make sure you run `build.sh` again, commit the result, and then `terraform apply` to deploy your changes. 95 | 96 | ## Example 3: Separating Lambda code from infra code 97 | 98 | Bundling the code and build artifacts for your Lambda function is all well and good when you just want to get things done. However, for a larger or more active project, you're probably better off separating the JavaScript project for the Lambda function into a separate repository. In that case, the process usually looks something like this: 99 | 100 | 1. Changes to the Lambda code are pushed to version control 101 | 1. A CI process picks up the changes, builds the code into a zipfile 102 | 1. The zipfile gets named with some versioning scheme, e.g. `lambda-v123.zip` 103 | 1. The CI process uploads the zipfile into an S3 bucket 104 | 1. The release is made by updating the Terraform config accordingly 105 | 106 | This also makes it easy to support multiple environments, and release promotions between them. For example: 107 | 108 | ```tf 109 | resource "aws_s3_bucket" "my_builds" { 110 | bucket = "my-builds" 111 | } 112 | 113 | module "my_api_stage" { 114 | # Available inputs: https://github.com/futurice/terraform-utils/tree/master/aws_lambda_api#inputs 115 | # Check for updates: https://github.com/futurice/terraform-utils/compare/v13.1...master 116 | source = "git::ssh://git@github.com/futurice/terraform-utils.git//aws_lambda_api?ref=v13.1" 117 | providers = { aws.us_east_1 = aws.us_east_1 } # this alias is needed because ACM is only available in the "us-east-1" region 118 | 119 | api_domain = "api-stage.example.com" 120 | function_s3_bucket = aws_s3_bucket.my_builds.id 121 | function_zipfile = "lambda-v123.zip" 122 | 123 | function_env_vars = { 124 | ENV_NAME = "stage" 125 | } 126 | } 127 | 128 | module "my_api_prod" { 129 | # Available inputs: https://github.com/futurice/terraform-utils/tree/master/aws_lambda_api#inputs 130 | # Check for updates: https://github.com/futurice/terraform-utils/compare/v13.1...master 131 | source = "git::ssh://git@github.com/futurice/terraform-utils.git//aws_lambda_api?ref=v13.1" 132 | providers = { aws.us_east_1 = aws.us_east_1 } # this alias is needed because ACM is only available in the "us-east-1" region 133 | 134 | api_domain = "api-prod.example.com" 135 | function_s3_bucket = aws_s3_bucket.my_builds.id 136 | function_zipfile = "lambda-v122.zip" 137 | 138 | function_env_vars = { 139 | ENV_NAME = "prod" 140 | } 141 | } 142 | ``` 143 | 144 | You'll note how the `stage` environment is running the latest `v123` release, while `prod` is still on the previous `v122` release. Once the `v123` release has been thoroughly tested on the `stage` environment, it can be promoted to `prod` by changing the `function_zipfile` variable, and issuing a `terraform apply`. This process supports immutable releases, easy rollbacks, and an audit trail of past releases. 145 | 146 | ## Example 4: Releasing without Terraform 147 | 148 | Sometimes it's convenient to let your CI perform the release unattended. One way to accomplish this is to use just `function_zipfile = "lambda-stage.zip"` and `function_zipfile = "lambda-prod.zip"` in your Terraform configuration, but then do something like this for releases to `stage`: 149 | 150 | ```bash 151 | ./build.sh 152 | aws s3 cp ./dist/lambda.zip s3://my-builds/lambda-stage.zip 153 | aws lambda update-function-code --function-name my-stage-function-name --s3-bucket my-builds --s3-key lambda-stage.zip 154 | ``` 155 | 156 | And then to promote the current `stage` to `prod`: 157 | 158 | ```bash 159 | aws s3 cp s3://my-builds/lambda-stage.zip s3://my-builds/lambda-prod.zip 160 | aws lambda update-function-code --function-name my-prod-function-name --s3-bucket my-builds --s3-key lambda-prod.zip 161 | ``` 162 | 163 | ...or some variation thereof. You get the idea. 164 | 165 | ## Debugging API Gateway 166 | 167 | If something isn't working right with your API Gateway, set `api_gateway_logging_level = "INFO"`. Additionally, you need to add the following **global configuration** for your API Gateway: 168 | 169 | ```tf 170 | resource "aws_api_gateway_account" "this" { 171 | cloudwatch_role_arn = aws_iam_role.apigateway_cloudwatch_logging.arn 172 | } 173 | 174 | resource "aws_iam_role" "apigateway_cloudwatch_logging" { 175 | name = "apigateway-cloudwatch-logging" 176 | 177 | assume_role_policy = < 257 | ## Inputs 258 | 259 | | Name | Description | Type | Default | Required | 260 | |------|-------------|------|---------|:--------:| 261 | | api_domain | Domain on which the Lambda will be made available (e.g. `"api.example.com"`) | `any` | n/a | yes | 262 | | name_prefix | Name prefix to use for objects that need to be created (only lowercase alphanumeric characters and hyphens allowed, for S3 bucket name compatibility); if omitted, a random, unique one will be used | `string` | `""` | no | 263 | | comment_prefix | This will be included in comments for resources that are created | `string` | `"Lambda API: "` | no | 264 | | function_zipfile | Path to a ZIP file that will be installed as the Lambda function (e.g. `"my-api.zip"`) | `any` | n/a | yes | 265 | | function_s3_bucket | When provided, the zipfile is retrieved from an S3 bucket by this name instead (filename is still provided via `function_zipfile`) | `string` | `""` | no | 266 | | function_handler | Instructs Lambda on which function to invoke within the ZIP file | `string` | `"index.handler"` | no | 267 | | function_timeout | The amount of time your Lambda Function has to run in seconds | `number` | `3` | no | 268 | | memory_size | Amount of memory in MB your Lambda Function can use at runtime | `number` | `128` | no | 269 | | function_runtime | Which node.js version should Lambda use for this function | `string` | `"nodejs12.x"` | no | 270 | | function_env_vars | Which env vars (if any) to invoke the Lambda with | `map(string)` |
{
"aws_lambda_api": ""
}
| no | 271 | | stage_name | Name of the single stage created for the API on API Gateway | `string` | `"default"` | no | 272 | | lambda_logging_enabled | When true, writes any console output to the Lambda function's CloudWatch group | `bool` | `false` | no | 273 | | api_gateway_logging_level | Either `"OFF"`, `"INFO"` or `"ERROR"`; note that this requires having a CloudWatch log role ARN globally in API Gateway Settings | `string` | `"OFF"` | no | 274 | | api_gateway_cloudwatch_metrics | When true, sends metrics to CloudWatch | `bool` | `false` | no | 275 | | api_gateway_endpoint_config | Either `"EDGE"`, `"REGIONAL"` or `"PRIVATE"`; see https://docs.aws.amazon.com/apigateway/latest/developerguide/create-regional-api.html | `string` | `"EDGE"` | no | 276 | | tags | AWS Tags to add to all resources created (where possible); see https://aws.amazon.com/answers/account-management/aws-tagging-strategies/ | `map(string)` | `{}` | no | 277 | | throttling_rate_limit | How many sustained requests per second should the API process at most; see https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-request-throttling.html | `number` | `10000` | no | 278 | | throttling_burst_limit | How many burst requests should the API process at most; see https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-request-throttling.html | `number` | `5000` | no | 279 | 280 | ## Outputs 281 | 282 | | Name | Description | 283 | |------|-------------| 284 | | function_name | This is the unique name of the Lambda function that was created | 285 | | web_endpoint | This URL can be used to invoke the Lambda through the API Gateway | 286 | | function_role | The IAM role for the function created; can be used to attach additional policies/permissions | 287 | | rest_api_name | Name of the API Gateway API that was created | 288 | 289 | -------------------------------------------------------------------------------- /aws_static_site/README.md: -------------------------------------------------------------------------------- 1 | # aws_static_site 2 | 3 | This module implements a website for hosting static content. 4 | 5 | Main features: 6 | 7 | - DNS entries are created automatically 8 | - S3 bucket is created automatically 9 | - HTTPS enabled by default 10 | - HTTP Strict Transport Security supported 11 | - Direct access to the S3 bucket is prevented 12 | 13 | Optional features: 14 | 15 | - HTTP Basic Auth 16 | - Plain HTTP instead of HTTPS 17 | - Cache TTL overrides 18 | - Custom response headers sent to clients 19 | 20 | Resources used: 21 | 22 | - Route53 for DNS entries 23 | - ACM for SSL certificates 24 | - CloudFront for proxying requests 25 | - Lambda@Edge for transforming requests 26 | - IAM for permissions 27 | 28 | ## About CloudFront operations 29 | 30 | This module manages CloudFront distributions, and these operations are generally very slow. Your `terraform apply` may take anywhere from a few minutes **up to 45 minutes** (if you're really unlucky). Be patient: if they start successfully, they almost always finish successfully, it just takes a while. 31 | 32 | Additionally, this module uses Lambda@Edge functions with CloudFront. Because Lambda@Edge functions are replicated, [they can't be deleted immediately](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/lambda-edge-delete-replicas.html). This means a `terraform destroy` won't successfully remove all resources on its first run. It should complete successfully when running it again after a few hours, however. 33 | 34 | ## Example 1: Simple static site 35 | 36 | Assuming you have the [AWS provider](https://www.terraform.io/docs/providers/aws/index.html) set up, and a DNS zone for `example.com` configured on Route 53: 37 | 38 | ```tf 39 | # Lambda@Edge and ACM, when used with CloudFront, need to be used in the US East region. 40 | # Thus, we need a separate AWS provider for that region, which can be used with an alias. 41 | # Make sure you customize this block to match your regular AWS provider configuration. 42 | # https://www.terraform.io/docs/configuration/providers.html#multiple-provider-instances 43 | provider "aws" { 44 | alias = "us_east_1" 45 | region = "us-east-1" 46 | } 47 | 48 | module "my_site" { 49 | # Available inputs: https://github.com/futurice/terraform-utils/tree/master/aws_static_site#inputs 50 | # Check for updates: https://github.com/futurice/terraform-utils/compare/v13.1...master 51 | source = "git::ssh://git@github.com/futurice/terraform-utils.git//aws_static_site?ref=v13.1" 52 | providers = { aws.us_east_1 = aws.us_east_1 } # this alias is needed because ACM is only available in the "us-east-1" region 53 | 54 | site_domain = "hello.example.com" 55 | } 56 | 57 | resource "aws_s3_bucket_object" "my_index" { 58 | bucket = module.my_site.bucket_name 59 | key = "index.html" 60 | content = "
Hello World!
" 61 | content_type = "text/html; charset=utf-8" 62 | } 63 | 64 | output "bucket_name" { 65 | description = "The name of the S3 bucket that's used for hosting the content" 66 | value = module.my_site.bucket_name 67 | } 68 | ``` 69 | 70 | After `terraform apply` (which may take a long time), you should be able to visit `hello.example.com`, be redirected to HTTPS, and be greeted by the above `Hello World!` message. 71 | 72 | You may (and probably will) want to upload more files into the bucket outside of Terraform. Using the official [AWS CLI](https://aws.amazon.com/cli/) this could look like: 73 | 74 | ```bash 75 | aws s3 cp --cache-control=no-store,must-revalidate image.jpg "s3://$(terraform output bucket_name)/" 76 | ``` 77 | 78 | After this, `image.jpg` will be available at `https://hello.example.com/image.jpg`. 79 | 80 | ## Example 2: Basic Authentication 81 | 82 | This module supports password-protecting your site with HTTP Basic Authentication, via a Lambda@Edge function. 83 | 84 | Update the `my_site` module in Example 1 as follows: 85 | 86 | ```tf 87 | module "my_site" { 88 | # Available inputs: https://github.com/futurice/terraform-utils/tree/master/aws_static_site#inputs 89 | # Check for updates: https://github.com/futurice/terraform-utils/compare/v13.1...master 90 | source = "git::ssh://git@github.com/futurice/terraform-utils.git//aws_static_site?ref=v13.1" 91 | 92 | site_domain = "hello.example.com" 93 | 94 | basic_auth_username = "admin" 95 | basic_auth_password = "secret" 96 | } 97 | ``` 98 | 99 | After `terraform apply` (which may take a long time), visiting `hello.example.com` should pop out the browser's authentication dialog, and not let you proceed without the above credentials. 100 | 101 | ## Example 3: Custom response headers 102 | 103 | This module supports injecting custom headers into CloudFront responses, via a Lambda@Edge function. 104 | 105 | By default, the function only adds `Strict-Transport-Security` headers (as it [significantly improves security](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Strict-Transport-Security#An_example_scenario) with HTTPS), but you may need other customization. 106 | 107 | For [additional security hardening of your static site](https://aws.amazon.com/blogs/networking-and-content-delivery/adding-http-security-headers-using-lambdaedge-and-amazon-cloudfront/), including a fairly-draconian (and thoroughly-documented) [Content Security Policy](https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP), update the `my_site` module in Example 1 as follows: 108 | 109 | ```tf 110 | module "my_site" { 111 | # Available inputs: https://github.com/futurice/terraform-utils/tree/master/aws_static_site#inputs 112 | # Check for updates: https://github.com/futurice/terraform-utils/compare/v13.1...master 113 | source = "git::ssh://git@github.com/futurice/terraform-utils.git//aws_static_site?ref=v13.1" 114 | 115 | site_domain = "hello.example.com" 116 | 117 | add_response_headers = { 118 | 119 | # Add basic security headers: 120 | Strict-Transport-Security = "max-age=31536000" # the page should ONLY be accessed using HTTPS, instead of using HTTP (max-age == one year) 121 | X-Content-Type-Options = "nosniff" # the MIME types advertised in the Content-Type headers should ALWAYS be followed; this allows to opt-out of MIME type sniffing 122 | X-Frame-Options = "DENY" # disallow rendering the page inside a frame; besides legacy browsers, superseded by CSP 123 | X-XSS-Protection = "1; mode=block" # stops pages from loading when they detect reflected cross-site scripting (XSS) attacks; besides legacy browsers, superseded by CSP 124 | Referrer-Policy = "same-origin" # a referrer will be sent for same-site origins, but cross-origin requests will send no referrer information 125 | 126 | # Remove some headers which could disclose details about our upstream server 127 | # Note that not all headers can be altered by Lambda@Edge: https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/lambda-requirements-limits.html#lambda-header-restrictions 128 | Server = "" # "Server" header can't be removed, but this will reset it to "CloudFront" 129 | X-Amz-Error-Code = "" 130 | X-Amz-Error-Message = "" 131 | X-Amz-Error-Detail-Key = "" 132 | X-Amz-Request-Id = "" 133 | X-Amz-Id-2 = "" 134 | 135 | # Add CSP header: 136 | # https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Security-Policy 137 | Content-Security-Policy = replace(replace(replace(<<-EOT 138 | 139 | default-src # serves as a fallback for the other CSP fetch directives; for many of the following directives, if they are absent, the user agent will look for the default-src directive and will use this value for it 140 | 'none' # by default, don't allow anything; we'll specifically white-list things below 141 | ; 142 | block-all-mixed-content # prevents loading any assets using HTTP when the page is loaded using HTTPS 143 | ; 144 | connect-src # restricts the URLs which can be loaded using script interfaces (e.g. XHR, WebSocket) 145 | api.example.com # allow connecting to this specific API (not not others!) 146 | ; 147 | form-action # restricts the URLs which can be used as the target of a form submission 148 | 'none' # for better or worse, most forms today are JavaScript-only -> we can prohibit all normal form submission 149 | ; 150 | img-src # specifies valid sources of images and favicons 151 | 'self' # allow regular images that ship with the UI 152 | data: # allow small assets which have been inlined by webpack 153 | ; 154 | font-src # specifies valid sources of webfonts 155 | 'self' # allow loading self-hosted fonts; add e.g. fonts.googleapis.com here (without any quotes!) to allow loading Google Fonts (https://fonts.google.com/) 156 | ; 157 | manifest-src # specifies which manifest can be applied to the resource 158 | 'self' # our manifest is always on our own domain 159 | ; 160 | navigate-to # restricts the URLs to which a document can initiate navigations by any means including
(if form-action is not specified), , window.location, window.open, etc 161 | 'self' # allow navigating within our own site, but not anywhere else 162 | ; 163 | prefetch-src # specifies valid resources that may be prefetched or prerendered 164 | 'none' # we don't currently have any or the like -> prohibit until we do 165 | ; 166 | script-src # specifies valid sources for JavaScript; this includes not only URLs loaded directly into