├── .editorconfig ├── .gitignore ├── LICENSE ├── README.md ├── _header_ ├── README.md ├── design.sketch └── hero.png ├── aws ├── _header_ │ └── README.md ├── aws_domain_redirect │ ├── README.md │ ├── main.tf │ └── variables.tf ├── aws_ec2_ebs_docker_host │ ├── README.md │ ├── data.tf │ ├── main.tf │ ├── outputs.tf │ ├── provision-docker.sh │ ├── provision-ebs.sh │ ├── provision-swap.sh │ ├── security.tf │ └── variables.tf ├── aws_lambda_api │ ├── README.md │ ├── api_gateway_config.tf │ ├── api_gateway_resources.tf │ ├── certificate.tf │ ├── data.tf │ ├── example-project │ │ ├── .gitignore │ │ ├── build.sh │ │ ├── dist │ │ │ └── lambda.zip │ │ ├── package.json │ │ ├── src │ │ │ ├── index.ts │ │ │ └── types.d.ts │ │ └── tsconfig.json │ ├── main.tf │ ├── outputs.tf │ ├── permissions.tf │ ├── route53.tf │ └── variables.tf ├── aws_lambda_cronjob │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ ├── permissions.tf │ └── variables.tf ├── aws_mailgun_domain │ ├── README.md │ ├── data.tf │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── aws_reverse_proxy │ ├── .gitignore │ ├── .prettierrc │ ├── README.md │ ├── certificate.tf │ ├── cloudfront.tf │ ├── data.tf │ ├── lambda.tf │ ├── lambda.tpl.js │ ├── outputs.tf │ ├── route53.tf │ └── variables.tf ├── aws_static_site │ ├── README.md │ ├── data.tf │ ├── main.tf │ ├── outputs.tf │ ├── s3.tf │ └── variables.tf ├── aws_vpc_msk │ ├── LICENSE │ ├── README.md │ ├── acmpca.tf │ ├── common-tags-data.tf │ ├── kafka-client-msk.sh │ ├── keypair.tf │ ├── kms.tf │ ├── msk-client.tf │ ├── msk-cluster.tf │ ├── network-routing.tf │ ├── provider.tf │ ├── security_group.tf │ ├── subnets.tf │ ├── terraform.tfvars │ ├── variables.tf │ └── vpc.tf ├── static_website_ssl_cloudfront_private_s3 │ ├── README.md │ ├── images │ │ └── s3-static-website.png │ ├── init.tf │ ├── main.tf │ └── variables.tf └── wordpress_fargate │ ├── README.md │ ├── alb.tf │ ├── cloudfront.tf │ ├── data.tf │ ├── db.tf │ ├── efs.tf │ ├── fargate.tf │ ├── output.tf │ ├── provider.tf │ ├── route53.tf │ ├── variables.tf │ └── vpc.tf ├── azure ├── _header_ │ └── README.md ├── azure_linux_docker_app_service │ ├── README.md │ ├── access_policies.tf │ ├── app_service.tf │ ├── data.tf │ ├── example-app │ │ ├── Dockerfile │ │ ├── build-and-push.sh │ │ ├── deploy.sh │ │ ├── index.js │ │ ├── package-lock.json │ │ └── package.json │ ├── images │ │ └── architecture.png │ ├── monitoring.tf │ ├── outputs.tf │ ├── provider.tf │ ├── secrets.tf │ ├── shared.tf │ └── variables.tf └── layers │ ├── README.md │ ├── create.sh │ ├── destroy.sh │ ├── layers.tf │ ├── main.tf │ └── variables.tf ├── generic ├── _header_ │ └── README.md └── docker_compose_host │ ├── README.md │ ├── main.tf │ ├── outputs.tf │ └── variables.tf ├── google_cloud ├── .gitignore ├── CQRS_bigquery_memorystore │ ├── README.md │ ├── bigquery │ │ ├── controls.tf │ │ ├── ingress.tf │ │ ├── outputs.tf │ │ ├── reports.tf │ │ ├── schemas │ │ │ ├── control.template.schema.json │ │ │ ├── prober.schema.json │ │ │ ├── report.schema.json │ │ │ └── vendor1.schema.json │ │ ├── sql │ │ │ ├── control_range_view.sql │ │ │ ├── daily_adjusted_totals.sql │ │ │ ├── last_n_days_totals.sql │ │ │ ├── unified_values.sql │ │ │ └── vendor1_cleanup.sql │ │ ├── udf │ │ │ ├── CUSTOM_JSON_EXTRACT_ARRAY_FLOAT.sql │ │ │ └── jsonpath-0.8.0.js │ │ ├── urdf.tf │ │ ├── variables.tf │ │ └── views.tf │ ├── functions │ │ ├── function_memorystoreloader.tf │ │ ├── function_prober.tf │ │ ├── function_test.tf │ │ ├── function_update_current.tf │ │ ├── function_update_historical.tf │ │ ├── gcs.tf │ │ ├── pubsub.tf │ │ ├── scheduler.tf │ │ ├── src │ │ │ ├── materialize │ │ │ │ ├── index.js │ │ │ │ └── package.json │ │ │ ├── memorystoreload │ │ │ │ ├── index.js │ │ │ │ └── package.json │ │ │ ├── probe │ │ │ │ ├── index.js │ │ │ │ └── package.json │ │ │ └── test │ │ │ │ ├── index.js │ │ │ │ └── package.json │ │ ├── variables.tf │ │ └── vpc.tf │ ├── main.tf │ └── memorystore │ │ ├── memorystore.tf │ │ ├── outputs.tf │ │ └── variables.tf ├── _header_ │ └── README.md ├── camunda-secure │ ├── .dockerignore │ ├── .gcloudignore │ ├── .gitignore │ ├── Dockerfile.template │ ├── build.tf │ ├── camunda.tf │ ├── cloudsql.tf │ ├── config │ │ └── bpm-platform.xml.template │ ├── main.tf │ └── readme.md ├── camunda │ ├── .dockerignore │ ├── .gcloudignore │ ├── .gitignore │ ├── Dockerfile.template │ ├── build.tf │ ├── camunda.tf │ ├── cloudsql.tf │ ├── config │ │ └── bpm-platform.xml.template │ ├── main.tf │ └── readme.md ├── minecraft │ ├── README.md │ └── main.tf ├── oathkeeper │ ├── .dockerignore │ ├── .gitignore │ ├── Dockerfile.template │ ├── README.md │ ├── build.tf │ ├── config.template.yml │ ├── main.tf │ └── rules.template.yml └── openresty-beyondcorp │ ├── .dockerignore │ ├── .gitignore │ ├── Dockerfile.template │ ├── README.md │ ├── build.tf │ ├── docker-compose.yaml │ ├── files │ ├── default.template.conf │ ├── login.template │ └── swiss │ │ ├── secretmanager.lua │ │ └── slack.lua │ ├── main.tf │ └── test │ └── dev.sh ├── package-lock.json ├── package.json └── repotools └── generate_readme.js /.editorconfig: -------------------------------------------------------------------------------- 1 | # see: http://EditorConfig.org 2 | root = true 3 | 4 | [*] 5 | end_of_line = lf 6 | insert_final_newline = true 7 | charset = utf-8 8 | indent_style = space 9 | indent_size = 2 10 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | **/.terraform 2 | # .tfstate files 3 | *.tfstate 4 | *.tfstate.* 5 | *.plan 6 | .DS_Store 7 | .history 8 | .vscode/ 9 | .env 10 | .ssh 11 | **/node_modules/ 12 | .terraform.lock.hcl 13 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Futurice 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /_header_/README.md: -------------------------------------------------------------------------------- 1 | ![Hero](hero.png) 2 | 3 | # Repository containing various Terraform code 4 | 5 | Lots of Terraform recipes for doing things, aimed for copy and pasting into projects. 6 | 7 | - [AWS Examples](#aws-examples) 8 | - [Azure Examples](#azure-examples) 9 | - [Google Cloud Platform Examples](#google-cloud-platform-examples) 10 | 11 | # Knowledge-as-code 12 | 13 | Terraform is an ideal knowledge transfer tool that can communicate the minutea of using certain technology combinations. We use this at [Futurice](https://futurice.com?source=terraform-examples) to disseminate hard won learnings across projects and industries, increasing the development velocity for all of our clients. 14 | 15 | # Read more about specific projects 16 | 17 | A few of the recipes have associated blog posts. 18 | 19 | - [Terraform Recipe for WordPress on Fargate](https://futurice.com/blog/terraform-recipe-wordpress-fargate) 20 | - [OpenResty: a Swiss Army Proxy for Serverless; WAL, Slack, Zapier and Auth](https://futurice.com/blog/openresty-a-swiss-army-proxy-for-serverless) 21 | - [Low cost Friends and Family Minecraft server](https://www.futurice.com/blog/friends-and-family-minecraft-server-terraform-recipe) 22 | - [Minimalist BeyondCorp style Identity Aware Proxy for Cloud Run](https://futurice.com/blog/identity-aware-proxy-for-google-cloud-run) 23 | - [Serverless Camunda Business Workflow Engine on Cloud Run](https://www.futurice.com/blog/serverless-camunda-terraform-recipe-using-cloud-run-and-cloud-sql) 24 | - [A Detailed Look at Camunda BPMN Application Development](https://futurice.com/blog/a-detailed-look-at-camunda-bpmn-application-development) 25 | - [Exporting Bigquery to Cloud Memorystore](https://www.futurice.com/blog/bigquery-to-memorystore) 26 | 27 | # Contribution 28 | 29 | External contributions welcome! All that we ask is that the recipe is interesting, and that it worked at some point. There is no expectation of maintenance (maintained projects should probably have their own repository). No two projects are alike, and so, we expect most uses of this repository to require customization. 30 | 31 | To regenerate the readme, run `npm run readme` 32 | -------------------------------------------------------------------------------- /_header_/design.sketch: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/futurice/terraform-examples/c191f7bcb0fa786b68c7b3904b6f3bbb90881fc6/_header_/design.sketch -------------------------------------------------------------------------------- /_header_/hero.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/futurice/terraform-examples/c191f7bcb0fa786b68c7b3904b6f3bbb90881fc6/_header_/hero.png -------------------------------------------------------------------------------- /aws/_header_/README.md: -------------------------------------------------------------------------------- 1 | # AWS Examples -------------------------------------------------------------------------------- /aws/aws_domain_redirect/main.tf: -------------------------------------------------------------------------------- 1 | module "aws_reverse_proxy" { 2 | # Available inputs: https://github.com/futurice/terraform-utils/tree/master/aws_reverse_proxy#inputs 3 | # Check for updates: https://github.com/futurice/terraform-utils/compare/v11.0...master 4 | source = "git::ssh://git@github.com/futurice/terraform-utils.git//aws_reverse_proxy?ref=v11.0" 5 | 6 | origin_url = "http://example.com/" # note that this is just a dummy value to satisfy CloudFront, it won't ever be used with the override_* variables in place 7 | site_domain = "${var.redirect_domain}" 8 | name_prefix = "${var.name_prefix}" 9 | comment_prefix = "${var.comment_prefix}" 10 | cloudfront_price_class = "${var.cloudfront_price_class}" 11 | viewer_https_only = "${var.viewer_https_only}" 12 | lambda_logging_enabled = "${var.lambda_logging_enabled}" 13 | tags = "${var.tags}" 14 | 15 | add_response_headers = { 16 | "Strict-Transport-Security" = "${var.redirect_with_hsts ? "max-age=31557600; preload" : ""}" 17 | "Location" = "${var.redirect_url}" 18 | } 19 | 20 | override_response_status = "${var.redirect_permanently ? "301" : "302"}" 21 | override_response_status_description = "${var.redirect_permanently ? "Moved Permanently" : "Found"}" 22 | 23 | override_response_body = < 25 | 26 | 27 | 28 | Redirecting 29 | 30 | 31 |
Redirecting to: ${var.redirect_url}
32 | 33 | EOF 34 | } 35 | -------------------------------------------------------------------------------- /aws/aws_domain_redirect/variables.tf: -------------------------------------------------------------------------------- 1 | variable "redirect_domain" { 2 | description = "Domain which will redirect to the given `redirect_url`; e.g. `\"docs.example.com\"`" 3 | } 4 | 5 | variable "redirect_url" { 6 | description = "The URL this domain redirect should send clients to; e.g. `\"https://readthedocs.org/projects/example\"`" 7 | } 8 | 9 | variable "name_prefix" { 10 | description = "Name prefix to use for objects that need to be created (only lowercase alphanumeric characters and hyphens allowed, for S3 bucket name compatibility)" 11 | default = "aws-domain-redirect---" 12 | } 13 | 14 | variable "comment_prefix" { 15 | description = "This will be included in comments for resources that are created" 16 | default = "Domain redirect: " 17 | } 18 | 19 | variable "cloudfront_price_class" { 20 | description = "Price class to use (`100`, `200` or `\"All\"`, see https://aws.amazon.com/cloudfront/pricing/)" 21 | default = 100 22 | } 23 | 24 | variable "viewer_https_only" { 25 | description = "Set this to `false` if you need to support insecure HTTP access for clients, in addition to HTTPS" 26 | default = true 27 | } 28 | 29 | variable "redirect_permanently" { 30 | description = "Which HTTP status code to use for the redirect; if `true`, uses `301 Moved Permanently`, instead of `302 Found`" 31 | default = false 32 | } 33 | 34 | variable "redirect_with_hsts" { 35 | description = "Whether to send the `Strict-Transport-Security` header with the redirect (recommended for security)" 36 | default = true 37 | } 38 | 39 | variable "lambda_logging_enabled" { 40 | description = "When `true`, writes information about incoming requests to the Lambda function's CloudWatch group" 41 | default = false 42 | } 43 | 44 | variable "tags" { 45 | description = "AWS Tags to add to all resources created (where possible); see https://aws.amazon.com/answers/account-management/aws-tagging-strategies/" 46 | type = "map" 47 | default = {} 48 | } 49 | -------------------------------------------------------------------------------- /aws/aws_ec2_ebs_docker_host/data.tf: -------------------------------------------------------------------------------- 1 | # Access data about available availability zones in the current region 2 | data "aws_availability_zones" "this" {} 3 | 4 | # Retrieve info about the VPC this host should join 5 | 6 | data "aws_vpc" "this" { 7 | default = "${var.vpc_id == "" ? true : false}" 8 | id = "${var.vpc_id}" 9 | } 10 | 11 | data "aws_subnet" "this" { 12 | vpc_id = "${data.aws_vpc.this.id}" 13 | availability_zone = "${local.availability_zone}" 14 | } 15 | -------------------------------------------------------------------------------- /aws/aws_ec2_ebs_docker_host/main.tf: -------------------------------------------------------------------------------- 1 | # Create the main EC2 instance 2 | # https://www.terraform.io/docs/providers/aws/r/instance.html 3 | resource "aws_instance" "this" { 4 | instance_type = "${var.instance_type}" 5 | ami = "${var.instance_ami}" 6 | availability_zone = "${local.availability_zone}" 7 | key_name = "${aws_key_pair.this.id}" # the name of the SSH keypair to use for provisioning 8 | vpc_security_group_ids = ["${aws_security_group.this.id}"] 9 | subnet_id = "${data.aws_subnet.this.id}" 10 | user_data = "${sha1(local.reprovision_trigger)}" # this value isn't used by the EC2 instance, but its change will trigger re-creation of the resource 11 | tags = "${merge(var.tags, map("Name", "${var.hostname}"))}" 12 | volume_tags = "${merge(var.tags, map("Name", "${var.hostname}"))}" # give the root EBS volume a name (+ other possible tags) that makes it easier to identify as belonging to this host 13 | 14 | root_block_device { 15 | volume_size = "${var.root_volume_size}" 16 | } 17 | 18 | connection { 19 | user = "${var.ssh_username}" 20 | private_key = "${file("${var.ssh_private_key_path}")}" 21 | agent = false # don't use SSH agent because we have the private key right here 22 | } 23 | 24 | provisioner "remote-exec" { 25 | inline = [ 26 | "sudo hostnamectl set-hostname ${var.hostname}", 27 | "echo 127.0.0.1 ${var.hostname} | sudo tee -a /etc/hosts", # https://askubuntu.com/a/59517 28 | ] 29 | } 30 | 31 | provisioner "remote-exec" { 32 | script = "${path.module}/provision-docker.sh" 33 | } 34 | 35 | provisioner "file" { 36 | source = "${path.module}/provision-swap.sh" 37 | destination = "/home/${var.ssh_username}/provision-swap.sh" 38 | } 39 | 40 | provisioner "remote-exec" { 41 | inline = [ 42 | "sh /home/${var.ssh_username}/provision-swap.sh ${var.swap_file_size} ${var.swap_swappiness}", 43 | "rm /home/${var.ssh_username}/provision-swap.sh", 44 | ] 45 | } 46 | } 47 | 48 | # Attach the separate data volume to the instance, if so configured 49 | 50 | resource "aws_volume_attachment" "this" { 51 | count = "${var.data_volume_id == "" ? 0 : 1}" # only create this resource if an external EBS data volume was provided 52 | device_name = "/dev/xvdh" # note: this depends on the AMI, and can't be arbitrarily changed 53 | instance_id = "${aws_instance.this.id}" 54 | volume_id = "${var.data_volume_id}" 55 | } 56 | 57 | resource "null_resource" "provisioners" { 58 | count = "${var.data_volume_id == "" ? 0 : 1}" # only create this resource if an external EBS data volume was provided 59 | depends_on = ["aws_volume_attachment.this"] # because we depend on the EBS volume being available 60 | 61 | connection { 62 | host = "${aws_instance.this.public_ip}" 63 | user = "${var.ssh_username}" 64 | private_key = "${file("${var.ssh_private_key_path}")}" 65 | agent = false # don't use SSH agent because we have the private key right here 66 | } 67 | 68 | # When creating the attachment 69 | provisioner "remote-exec" { 70 | script = "${path.module}/provision-ebs.sh" 71 | } 72 | 73 | # When tearing down the attachment 74 | provisioner "remote-exec" { 75 | when = "destroy" 76 | inline = ["sudo umount -v ${aws_volume_attachment.this.device_name}"] 77 | } 78 | } 79 | -------------------------------------------------------------------------------- /aws/aws_ec2_ebs_docker_host/outputs.tf: -------------------------------------------------------------------------------- 1 | output "hostname" { 2 | description = "Hostname by which this service is identified in metrics, logs etc" 3 | value = "${var.hostname}" 4 | } 5 | 6 | output "public_ip" { 7 | description = "Public IP address assigned to the host by EC2" 8 | value = "${aws_instance.this.public_ip}" 9 | } 10 | 11 | output "instance_id" { 12 | description = "AWS ID for the EC2 instance used" 13 | value = "${aws_instance.this.id}" 14 | } 15 | 16 | output "availability_zone" { 17 | description = "AWS Availability Zone in which the EC2 instance was created" 18 | value = "${local.availability_zone}" 19 | } 20 | 21 | output "ssh_username" { 22 | description = "Username that can be used to access the EC2 instance over SSH" 23 | value = "${var.ssh_username}" 24 | } 25 | 26 | output "ssh_private_key_path" { 27 | description = "Path to SSH private key that can be used to access the EC2 instance" 28 | value = "${var.ssh_private_key_path}" 29 | } 30 | 31 | output "ssh_private_key" { 32 | description = "SSH private key that can be used to access the EC2 instance" 33 | value = "${file("${var.ssh_private_key_path}")}" 34 | } 35 | 36 | output "security_group_id" { 37 | description = "Security Group ID, for attaching additional security rules externally" 38 | value = "${aws_security_group.this.id}" 39 | } 40 | -------------------------------------------------------------------------------- /aws/aws_ec2_ebs_docker_host/provision-docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # https://docs.docker.com/engine/installation/linux/docker-ce/ubuntu/#install-using-the-convenience-script 4 | curl -fsSL get.docker.com -o get-docker.sh 5 | sudo sh get-docker.sh 6 | rm get-docker.sh 7 | 8 | # Allow using docker without sudo 9 | sudo usermod -aG docker $(whoami) 10 | 11 | # https://success.docker.com/article/how-to-setup-log-rotation-post-installation 12 | echo '{ 13 | "log-driver": "json-file", 14 | "log-opts": { 15 | "max-size": "10m", 16 | "max-file": "5" 17 | } 18 | } 19 | ' | sudo tee /etc/docker/daemon.json 20 | sudo service docker restart # restart the daemon so the settings take effect 21 | -------------------------------------------------------------------------------- /aws/aws_ec2_ebs_docker_host/provision-ebs.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Perform setup 4 | set -x 5 | DEV_NAME="$(lsblk --output NAME --list | tail -n 1)" # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html 6 | DEV_FS_TYPE="ext4" 7 | MOUNT_POINT="/data" 8 | set +x 9 | 10 | # Format EBS volume, if not already formatted 11 | sudo file -s "/dev/$DEV_NAME" | grep "$DEV_FS_TYPE" 12 | if [ $? -eq 0 ]; then 13 | echo "File system already exists on /dev/$DEV_NAME, not going to format" 14 | else 15 | echo "No file system on /dev/$DEV_NAME, formatting" 16 | sudo mkfs -t "$DEV_FS_TYPE" "/dev/$DEV_NAME" 17 | fi 18 | 19 | # Wait until we can determine the UUID of the EBS device that was attached 20 | while true; do 21 | uuid="$(ls -la /dev/disk/by-uuid/ | grep $DEV_NAME | sed -e 's/.*\([0-9a-f-]\{36\}\).*/\1/')" # seems more reliable than using blkid :shrug: 22 | if [ ! -z "$uuid" ]; then 23 | echo "EBS device \"$uuid\" found" 24 | break 25 | fi 26 | echo "Waiting for EBS device..." 27 | sleep 1 28 | done 29 | 30 | # Mount EBS volume, and set it to auto-mount after reboots 31 | sudo mkdir "$MOUNT_POINT" 32 | echo "UUID=$uuid $MOUNT_POINT $DEV_FS_TYPE defaults,nofail 0 2" | sudo tee -a /etc/fstab 33 | sudo mount -a 34 | 35 | # List the filesystems, for debugging convenience 36 | df -h 37 | -------------------------------------------------------------------------------- /aws/aws_ec2_ebs_docker_host/provision-swap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | if [ $# -ne 2 ]; then 6 | >&2 echo "usage: provision-swap.sh SWAP_FILE_SIZE SWAPPINESS" 7 | exit 1 8 | fi 9 | 10 | SWAP_FILE_SIZE=$1 11 | SWAPPINESS=$2 12 | 13 | echo "Setting up a swap file (size: $SWAP_FILE_SIZE, swappiness: $SWAPPINESS)..." 14 | 15 | # Create the swap file 16 | sudo fallocate -l ${SWAP_FILE_SIZE} /swapfile 17 | 18 | # Only root should be able to access to this file 19 | sudo chmod 600 /swapfile 20 | 21 | # Define the file as swap space 22 | sudo mkswap /swapfile 23 | 24 | # Enable the swap file, allowing the system to start using it 25 | sudo swapon /swapfile 26 | 27 | # Make the swap file permanent, otherwise, previous settings will be lost on reboot 28 | # Create a backup of the existing fstab, JustInCase(tm) 29 | sudo cp /etc/fstab /etc/fstab.bak 30 | # Add the swap file information at the end of the fstab 31 | echo '/swapfile none swap sw 0 0' | sudo tee -a /etc/fstab 32 | 33 | # Adjust the swappiness 34 | # With the default value of 10, the host will use swap if it has almost no other choice. Value is between 0 and 100. 35 | # 100 will make the host use the swap as much as possible, 0 will make it use only in case of emergency. 36 | # As swap access is slower than RAM access, having a low value here for a server is better. 37 | sudo sysctl vm.swappiness=${SWAPPINESS} 38 | 39 | # Make this setting permanent, to not lose it on reboot 40 | sudo cp /etc/sysctl.conf /etc/sysctl.conf.bak 41 | echo "vm.swappiness=${SWAPPINESS}" | sudo tee -a /etc/sysctl.conf 42 | -------------------------------------------------------------------------------- /aws/aws_ec2_ebs_docker_host/security.tf: -------------------------------------------------------------------------------- 1 | # Create an SSH key pair for accessing the EC2 instance 2 | resource "aws_key_pair" "this" { 3 | public_key = "${file("${var.ssh_public_key_path}")}" 4 | } 5 | 6 | # Create our default security group to access the instance, over specific protocols 7 | resource "aws_security_group" "this" { 8 | vpc_id = "${data.aws_vpc.this.id}" 9 | tags = "${merge(var.tags, map("Name", "${var.hostname}"))}" 10 | } 11 | 12 | # Incoming SSH & outgoing ANY needs to be allowed for provisioning to work 13 | 14 | resource "aws_security_group_rule" "outgoing_any" { 15 | security_group_id = "${aws_security_group.this.id}" 16 | type = "egress" 17 | from_port = 0 18 | to_port = 0 19 | protocol = "-1" 20 | cidr_blocks = ["0.0.0.0/0"] 21 | } 22 | 23 | resource "aws_security_group_rule" "incoming_ssh" { 24 | security_group_id = "${aws_security_group.this.id}" 25 | type = "ingress" 26 | from_port = 22 27 | to_port = 22 28 | protocol = "tcp" 29 | cidr_blocks = ["0.0.0.0/0"] 30 | } 31 | 32 | # The rest of the security rules are opt-in 33 | 34 | resource "aws_security_group_rule" "incoming_http" { 35 | count = "${var.allow_incoming_http ? 1 : 0}" 36 | security_group_id = "${aws_security_group.this.id}" 37 | type = "ingress" 38 | from_port = 80 39 | to_port = 80 40 | protocol = "tcp" 41 | cidr_blocks = ["0.0.0.0/0"] 42 | } 43 | 44 | resource "aws_security_group_rule" "incoming_https" { 45 | count = "${var.allow_incoming_https ? 1 : 0}" 46 | security_group_id = "${aws_security_group.this.id}" 47 | type = "ingress" 48 | from_port = 443 49 | to_port = 443 50 | protocol = "tcp" 51 | cidr_blocks = ["0.0.0.0/0"] 52 | } 53 | 54 | resource "aws_security_group_rule" "incoming_dns_tcp" { 55 | count = "${var.allow_incoming_dns ? 1 : 0}" 56 | security_group_id = "${aws_security_group.this.id}" 57 | type = "ingress" 58 | from_port = 53 59 | to_port = 53 60 | protocol = "tcp" 61 | cidr_blocks = ["0.0.0.0/0"] 62 | } 63 | 64 | resource "aws_security_group_rule" "incoming_dns_udp" { 65 | count = "${var.allow_incoming_dns ? 1 : 0}" 66 | security_group_id = "${aws_security_group.this.id}" 67 | type = "ingress" 68 | from_port = 53 69 | to_port = 53 70 | protocol = "udp" 71 | cidr_blocks = ["0.0.0.0/0"] 72 | } 73 | -------------------------------------------------------------------------------- /aws/aws_lambda_api/api_gateway_config.tf: -------------------------------------------------------------------------------- 1 | resource "aws_api_gateway_rest_api" "this" { 2 | name = "${local.prefix_with_domain}" 3 | description = "${var.comment_prefix}${var.api_domain}" 4 | } 5 | 6 | resource "aws_api_gateway_deployment" "this" { 7 | rest_api_id = "${aws_api_gateway_rest_api.this.id}" 8 | 9 | depends_on = [ 10 | "aws_api_gateway_integration.proxy_root", 11 | "aws_api_gateway_integration.proxy_other", 12 | ] 13 | } 14 | 15 | resource "aws_api_gateway_stage" "this" { 16 | stage_name = "${var.stage_name}" 17 | description = "${var.comment_prefix}${var.api_domain}" 18 | rest_api_id = "${aws_api_gateway_rest_api.this.id}" 19 | deployment_id = "${aws_api_gateway_deployment.this.id}" 20 | tags = "${var.tags}" 21 | } 22 | 23 | resource "aws_api_gateway_method_settings" "this" { 24 | rest_api_id = "${aws_api_gateway_rest_api.this.id}" 25 | stage_name = "${aws_api_gateway_stage.this.stage_name}" 26 | method_path = "*/*" 27 | 28 | settings { 29 | metrics_enabled = "${var.api_gateway_cloudwatch_metrics}" 30 | logging_level = "${var.api_gateway_logging_level}" 31 | data_trace_enabled = "${var.api_gateway_logging_level == "OFF" ? false : true}" 32 | throttling_rate_limit = "${var.throttling_rate_limit}" 33 | throttling_burst_limit = "${var.throttling_burst_limit}" 34 | } 35 | } 36 | 37 | resource "aws_api_gateway_domain_name" "this" { 38 | domain_name = "${var.api_domain}" 39 | regional_certificate_arn = "${aws_acm_certificate_validation.this.certificate_arn}" 40 | 41 | endpoint_configuration { 42 | types = ["REGIONAL"] 43 | } 44 | } 45 | 46 | resource "aws_api_gateway_base_path_mapping" "this" { 47 | api_id = "${aws_api_gateway_rest_api.this.id}" 48 | stage_name = "${aws_api_gateway_stage.this.stage_name}" 49 | domain_name = "${aws_api_gateway_domain_name.this.domain_name}" 50 | } 51 | -------------------------------------------------------------------------------- /aws/aws_lambda_api/api_gateway_resources.tf: -------------------------------------------------------------------------------- 1 | # Add root resource to the API (it it needs to be included separately from the "proxy" resource defined below), which forwards to our Lambda: 2 | 3 | resource "aws_api_gateway_method" "proxy_root" { 4 | rest_api_id = "${aws_api_gateway_rest_api.this.id}" 5 | resource_id = "${aws_api_gateway_rest_api.this.root_resource_id}" 6 | http_method = "ANY" 7 | authorization = "NONE" 8 | } 9 | 10 | resource "aws_api_gateway_integration" "proxy_root" { 11 | rest_api_id = "${aws_api_gateway_rest_api.this.id}" 12 | resource_id = "${aws_api_gateway_method.proxy_root.resource_id}" 13 | http_method = "${aws_api_gateway_method.proxy_root.http_method}" 14 | integration_http_method = "POST" 15 | type = "AWS_PROXY" 16 | uri = "${local.function_invoke_arn}" 17 | } 18 | 19 | # Add a "proxy" resource, that matches all paths (except the root, defined above) and forwards them to our Lambda: 20 | 21 | resource "aws_api_gateway_resource" "proxy_other" { 22 | rest_api_id = "${aws_api_gateway_rest_api.this.id}" 23 | parent_id = "${aws_api_gateway_rest_api.this.root_resource_id}" 24 | path_part = "{proxy+}" 25 | } 26 | 27 | resource "aws_api_gateway_method" "proxy_other" { 28 | rest_api_id = "${aws_api_gateway_rest_api.this.id}" 29 | resource_id = "${aws_api_gateway_resource.proxy_other.id}" 30 | http_method = "ANY" 31 | authorization = "NONE" 32 | } 33 | 34 | resource "aws_api_gateway_integration" "proxy_other" { 35 | rest_api_id = "${aws_api_gateway_rest_api.this.id}" 36 | resource_id = "${aws_api_gateway_method.proxy_other.resource_id}" 37 | http_method = "${aws_api_gateway_method.proxy_other.http_method}" 38 | integration_http_method = "POST" 39 | type = "AWS_PROXY" 40 | uri = "${local.function_invoke_arn}" 41 | } 42 | 43 | resource "aws_api_gateway_method_response" "proxy_other" { 44 | rest_api_id = "${aws_api_gateway_rest_api.this.id}" 45 | resource_id = "${aws_api_gateway_resource.proxy_other.id}" 46 | http_method = "${aws_api_gateway_method.proxy_other.http_method}" 47 | status_code = "200" 48 | 49 | response_models = { 50 | "application/json" = "Empty" 51 | } 52 | } 53 | 54 | resource "aws_api_gateway_integration_response" "proxy_other" { 55 | depends_on = ["aws_api_gateway_integration.proxy_other"] 56 | rest_api_id = "${aws_api_gateway_rest_api.this.id}" 57 | resource_id = "${aws_api_gateway_resource.proxy_other.id}" 58 | http_method = "${aws_api_gateway_method.proxy_other.http_method}" 59 | status_code = "${aws_api_gateway_method_response.proxy_other.status_code}" 60 | 61 | response_templates = { 62 | "application/json" = "" 63 | } 64 | } 65 | -------------------------------------------------------------------------------- /aws/aws_lambda_api/certificate.tf: -------------------------------------------------------------------------------- 1 | # Generate a certificate for the domain automatically using ACM 2 | # https://www.terraform.io/docs/providers/aws/r/acm_certificate.html 3 | resource "aws_acm_certificate" "this" { 4 | domain_name = "${var.api_domain}" 5 | validation_method = "DNS" # the required records are created below 6 | tags = "${merge(var.tags, map("Name", "${var.comment_prefix}${var.api_domain}"))}" 7 | } 8 | 9 | # Add the DNS records needed by the ACM validation process 10 | resource "aws_route53_record" "cert_validation" { 11 | name = "${aws_acm_certificate.this.domain_validation_options.0.resource_record_name}" 12 | type = "${aws_acm_certificate.this.domain_validation_options.0.resource_record_type}" 13 | zone_id = "${data.aws_route53_zone.this.zone_id}" 14 | records = ["${aws_acm_certificate.this.domain_validation_options.0.resource_record_value}"] 15 | ttl = 60 16 | } 17 | 18 | # Request a validation for the cert with ACM 19 | resource "aws_acm_certificate_validation" "this" { 20 | certificate_arn = "${aws_acm_certificate.this.arn}" 21 | validation_record_fqdns = ["${aws_route53_record.cert_validation.fqdn}"] 22 | } 23 | -------------------------------------------------------------------------------- /aws/aws_lambda_api/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_route53_zone" "this" { 2 | name = "${replace("${var.api_domain}", "/.*\\b(\\w+\\.\\w+)\\.?$/", "$1")}" # e.g. "foo.example.com" => "example.com" 3 | } 4 | -------------------------------------------------------------------------------- /aws/aws_lambda_api/example-project/.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | -------------------------------------------------------------------------------- /aws/aws_lambda_api/example-project/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Exit if any of the following commands exit with non-0, and echo our commands back 4 | set -ex 5 | 6 | # For running npm "binaries" 7 | PATH=$PATH:./node_modules/.bin 8 | 9 | # Check that we're running the correct version of node 10 | check-node-version --package 11 | 12 | # Compile TypeScript into "temp" (defined is tsconfig.json) 13 | tsc 14 | 15 | # Install production dependencies under "temp" 16 | cp package*.json temp 17 | (cd temp && npm install --production) 18 | 19 | # Create Lambda zipfile under "dist" 20 | (cd temp && zip -r ../dist/lambda.zip *) 21 | 22 | # Clean up 23 | rm -rf temp 24 | -------------------------------------------------------------------------------- /aws/aws_lambda_api/example-project/dist/lambda.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/futurice/terraform-examples/c191f7bcb0fa786b68c7b3904b6f3bbb90881fc6/aws/aws_lambda_api/example-project/dist/lambda.zip -------------------------------------------------------------------------------- /aws/aws_lambda_api/example-project/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "engines": { 3 | "npm": "5.6.0", 4 | "node": "8.10.0" 5 | }, 6 | "devDependencies": { 7 | "@types/aws-lambda": "^8.10.23", 8 | "@types/node": "^8.10.45", 9 | "check-node-version": "^3.3.0", 10 | "typescript": "^3.4.1" 11 | }, 12 | "dependencies": { 13 | "one-liner-joke": "^1.2.0" 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /aws/aws_lambda_api/example-project/src/index.ts: -------------------------------------------------------------------------------- 1 | import { APIGatewayProxyHandler } from "aws-lambda"; 2 | import { getRandomJoke } from "one-liner-joke"; 3 | 4 | export const handler: APIGatewayProxyHandler = () => { 5 | return Promise.resolve({ 6 | statusCode: 200, 7 | body: JSON.stringify(getRandomJoke(), null, 2) 8 | }); 9 | }; 10 | -------------------------------------------------------------------------------- /aws/aws_lambda_api/example-project/src/types.d.ts: -------------------------------------------------------------------------------- 1 | declare module 'one-liner-joke'; 2 | -------------------------------------------------------------------------------- /aws/aws_lambda_api/example-project/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es2017", 4 | "module": "commonjs", 5 | "strict": true, 6 | "esModuleInterop": true, 7 | "typeRoots": ["node_modules/@types"], 8 | "types": ["node", "aws-lambda"], 9 | "lib": ["es2017"], 10 | "outDir": "temp" 11 | }, 12 | "include": ["src/**/*.ts"], 13 | "exclude": ["node_modules"] 14 | } 15 | -------------------------------------------------------------------------------- /aws/aws_lambda_api/main.tf: -------------------------------------------------------------------------------- 1 | # Based on: https://www.terraform.io/docs/providers/aws/guides/serverless-with-aws-lambda-and-api-gateway.html 2 | # See also: https://github.com/hashicorp/terraform/issues/10157 3 | # See also: https://github.com/carrot/terraform-api-gateway-cors-module/ 4 | 5 | # This aws_lambda_function is used when invoked with a local zipfile 6 | resource "aws_lambda_function" "local_zipfile" { 7 | count = "${var.function_s3_bucket == "" ? 1 : 0}" 8 | 9 | # These are SPECIFIC to the deployment method: 10 | filename = "${var.function_zipfile}" 11 | source_code_hash = "${var.function_s3_bucket == "" ? "${base64sha256(file("${var.function_zipfile}"))}" : ""}" 12 | 13 | # These are the SAME for both: 14 | description = "${var.comment_prefix}${var.api_domain}" 15 | function_name = "${local.prefix_with_domain}" 16 | handler = "${var.function_handler}" 17 | runtime = "${var.function_runtime}" 18 | timeout = "${var.function_timeout}" 19 | memory_size = "${var.memory_size}" 20 | role = "${aws_iam_role.this.arn}" 21 | tags = "${var.tags}" 22 | 23 | environment { 24 | variables = "${var.function_env_vars}" 25 | } 26 | } 27 | 28 | # This aws_lambda_function is used when invoked with a zipfile in S3 29 | resource "aws_lambda_function" "s3_zipfile" { 30 | count = "${var.function_s3_bucket == "" ? 0 : 1}" 31 | 32 | # These are SPECIFIC to the deployment method: 33 | s3_bucket = "${var.function_s3_bucket}" 34 | s3_key = "${var.function_zipfile}" 35 | 36 | # These are the SAME for both: 37 | description = "${var.comment_prefix}${var.api_domain}" 38 | function_name = "${local.prefix_with_domain}" 39 | handler = "${var.function_handler}" 40 | runtime = "${var.function_runtime}" 41 | timeout = "${var.function_timeout}" 42 | memory_size = "${var.memory_size}" 43 | role = "${aws_iam_role.this.arn}" 44 | tags = "${var.tags}" 45 | 46 | environment { 47 | variables = "${var.function_env_vars}" 48 | } 49 | } 50 | 51 | # Terraform isn't particularly helpful when you want to depend on the existence of a resource which may have count 0 or 1, like our functions. 52 | # This is a hacky way of referring to the properties of the function, regardless of which one got created. 53 | # https://github.com/hashicorp/terraform/issues/16580#issuecomment-342573652 54 | locals { 55 | function_id = "${element(concat(aws_lambda_function.local_zipfile.*.id, list("")), 0)}${element(concat(aws_lambda_function.s3_zipfile.*.id, list("")), 0)}" 56 | function_arn = "${element(concat(aws_lambda_function.local_zipfile.*.arn, list("")), 0)}${element(concat(aws_lambda_function.s3_zipfile.*.arn, list("")), 0)}" 57 | function_invoke_arn = "${element(concat(aws_lambda_function.local_zipfile.*.invoke_arn, list("")), 0)}${element(concat(aws_lambda_function.s3_zipfile.*.invoke_arn, list("")), 0)}" 58 | } 59 | -------------------------------------------------------------------------------- /aws/aws_lambda_api/outputs.tf: -------------------------------------------------------------------------------- 1 | output "function_name" { 2 | description = "This is the unique name of the Lambda function that was created" 3 | value = "${local.function_id}" 4 | } 5 | 6 | output "api_gw_invoke_url" { 7 | description = "This URL can be used to invoke the Lambda through the API Gateway" 8 | value = "${aws_api_gateway_deployment.this.invoke_url}" 9 | } 10 | -------------------------------------------------------------------------------- /aws/aws_lambda_api/permissions.tf: -------------------------------------------------------------------------------- 1 | # Allow Lambda to invoke our functions: 2 | resource "aws_iam_role" "this" { 3 | name = "${local.prefix_with_domain}" 4 | tags = "${var.tags}" 5 | 6 | assume_role_policy = < this is 100% safe to override with your own env, should you need one 51 | aws_lambda_api = "" 52 | } 53 | } 54 | 55 | variable "stage_name" { 56 | description = "Name of the single stage created for the API on API Gateway" # we're not using the deployment features of API Gateway, so a single static stage is fine 57 | default = "default" 58 | } 59 | 60 | variable "lambda_logging_enabled" { 61 | description = "When true, writes any console output to the Lambda function's CloudWatch group" 62 | default = false 63 | } 64 | 65 | variable "api_gateway_logging_level" { 66 | description = "Either `\"OFF\"`, `\"INFO\"` or `\"ERROR\"`; note that this requires having a CloudWatch log role ARN globally in API Gateway Settings" 67 | default = "OFF" 68 | } 69 | 70 | variable "api_gateway_cloudwatch_metrics" { 71 | description = "When true, sends metrics to CloudWatch" 72 | default = false 73 | } 74 | 75 | variable "tags" { 76 | description = "AWS Tags to add to all resources created (where possible); see https://aws.amazon.com/answers/account-management/aws-tagging-strategies/" 77 | type = "map" 78 | default = {} 79 | } 80 | 81 | variable "throttling_rate_limit" { 82 | description = "How many sustained requests per second should the API process at most; see https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-request-throttling.html" 83 | default = 10000 84 | } 85 | 86 | variable "throttling_burst_limit" { 87 | description = "How many burst requests should the API process at most; see https://docs.aws.amazon.com/apigateway/latest/developerguide/api-gateway-request-throttling.html" 88 | default = 5000 89 | } 90 | 91 | locals { 92 | prefix_with_domain = "${var.name_prefix}${replace("${var.api_domain}", "/[^a-z0-9-]+/", "-")}" # only lowercase alphanumeric characters and hyphens are allowed in e.g. S3 bucket names 93 | } 94 | -------------------------------------------------------------------------------- /aws/aws_lambda_cronjob/main.tf: -------------------------------------------------------------------------------- 1 | # This aws_lambda_function is used when invoked with a local zipfile 2 | resource "aws_lambda_function" "local_zipfile" { 3 | count = "${var.function_s3_bucket == "" ? 1 : 0}" 4 | 5 | # These are SPECIFIC to the deployment method: 6 | filename = "${var.function_zipfile}" 7 | source_code_hash = "${var.function_s3_bucket == "" ? "${base64sha256(file("${var.function_zipfile}"))}" : ""}" 8 | 9 | # These are the SAME for both: 10 | description = "${var.comment_prefix}${var.cronjob_name}" 11 | function_name = "${local.prefix_with_name}" 12 | handler = "${var.function_handler}" 13 | runtime = "${var.function_runtime}" 14 | timeout = "${var.function_timeout}" 15 | memory_size = "${var.memory_size}" 16 | role = "${aws_iam_role.this.arn}" 17 | tags = "${var.tags}" 18 | 19 | environment { 20 | variables = "${var.function_env_vars}" 21 | } 22 | } 23 | 24 | # This aws_lambda_function is used when invoked with a zipfile in S3 25 | resource "aws_lambda_function" "s3_zipfile" { 26 | count = "${var.function_s3_bucket == "" ? 0 : 1}" 27 | 28 | # These are SPECIFIC to the deployment method: 29 | s3_bucket = "${var.function_s3_bucket}" 30 | s3_key = "${var.function_zipfile}" 31 | 32 | # These are the SAME for both: 33 | description = "${var.comment_prefix}${var.cronjob_name}" 34 | function_name = "${local.prefix_with_name}" 35 | handler = "${var.function_handler}" 36 | runtime = "${var.function_runtime}" 37 | timeout = "${var.function_timeout}" 38 | memory_size = "${var.memory_size}" 39 | role = "${aws_iam_role.this.arn}" 40 | tags = "${var.tags}" 41 | 42 | environment { 43 | variables = "${var.function_env_vars}" 44 | } 45 | } 46 | 47 | # Terraform isn't particularly helpful when you want to depend on the existence of a resource which may have count 0 or 1, like our functions. 48 | # This is a hacky way of referring to the properties of the function, regardless of which one got created. 49 | # https://github.com/hashicorp/terraform/issues/16580#issuecomment-342573652 50 | locals { 51 | function_id = "${element(concat(aws_lambda_function.local_zipfile.*.id, list("")), 0)}${element(concat(aws_lambda_function.s3_zipfile.*.id, list("")), 0)}" 52 | function_arn = "${element(concat(aws_lambda_function.local_zipfile.*.arn, list("")), 0)}${element(concat(aws_lambda_function.s3_zipfile.*.arn, list("")), 0)}" 53 | function_invoke_arn = "${element(concat(aws_lambda_function.local_zipfile.*.invoke_arn, list("")), 0)}${element(concat(aws_lambda_function.s3_zipfile.*.invoke_arn, list("")), 0)}" 54 | } 55 | -------------------------------------------------------------------------------- /aws/aws_lambda_cronjob/outputs.tf: -------------------------------------------------------------------------------- 1 | output "function_name" { 2 | description = "This is the unique name of the Lambda function that was created" 3 | value = "${local.function_id}" 4 | } 5 | -------------------------------------------------------------------------------- /aws/aws_lambda_cronjob/permissions.tf: -------------------------------------------------------------------------------- 1 | # Allow Lambda to invoke our functions: 2 | 3 | resource "aws_iam_role" "this" { 4 | name = "${local.prefix_with_name}" 5 | tags = "${var.tags}" 6 | 7 | assume_role_policy = < this is 100% safe to override with your own env, should you need one 56 | aws_lambda_cronjob = "" 57 | } 58 | } 59 | 60 | variable "lambda_logging_enabled" { 61 | description = "When true, writes any console output to the Lambda function's CloudWatch group" 62 | default = false 63 | } 64 | 65 | variable "tags" { 66 | description = "AWS Tags to add to all resources created (where possible); see https://aws.amazon.com/answers/account-management/aws-tagging-strategies/" 67 | type = "map" 68 | default = {} 69 | } 70 | 71 | locals { 72 | prefix_with_name = "${var.name_prefix}${replace("${var.cronjob_name}", "/[^a-z0-9-]+/", "-")}" # only lowercase alphanumeric characters and hyphens are allowed in e.g. S3 bucket names 73 | } 74 | -------------------------------------------------------------------------------- /aws/aws_mailgun_domain/README.md: -------------------------------------------------------------------------------- 1 | # aws_mailgun_domain 2 | 3 | Uses the [Terraform Mailgun provider](https://www.terraform.io/docs/providers/mailgun/index.html) to set up and verify a domain, so you can use [Mailgun](https://www.mailgun.com/) for sending email from it. 4 | 5 | ## Example 6 | 7 | Assuming you have the [AWS provider](https://www.terraform.io/docs/providers/aws/index.html) set up, and a DNS zone for `example.com` configured on Route 53: 8 | 9 | ```tf 10 | variable "mailgun_api_key" { 11 | description = "Your Mailgun API key" 12 | } 13 | 14 | variable "demo_email_address" { 15 | description = "Enter your email (e.g. me@gmail.com), so you'll get a copy-pasteable curl command for testing the API immediately" 16 | } 17 | 18 | # Configure the Mailgun provider 19 | # https://www.terraform.io/docs/providers/mailgun/index.html 20 | provider "mailgun" { 21 | version = "~> 0.1" 22 | api_key = "${var.mailgun_api_key}" 23 | } 24 | 25 | module "my_mailgun_domain" { 26 | # Available inputs: https://github.com/futurice/terraform-utils/tree/master/aws_mailgun_domain#inputs 27 | # Check for updates: https://github.com/futurice/terraform-utils/compare/v11.0...master 28 | source = "git::ssh://git@github.com/futurice/terraform-utils.git//aws_mailgun_domain?ref=v11.0" 29 | 30 | mail_domain = "example.com" 31 | smtp_password = "SECRET SECRET SECRET" 32 | } 33 | 34 | output "demo_curl_command" { 35 | value = "curl -s --user 'api:${var.mailgun_api_key}' ${module.my_mailgun_domain.api_base_url}messages -F from='Demo ' -F to='${var.demo_email_address}' -F subject='Hello' -F text='Testing, testing...'" 36 | } 37 | ``` 38 | 39 | Note that due to [a bug in Terraform](https://github.com/hashicorp/terraform/issues/12570), at the time of writing, you need to apply in two parts: 40 | 41 | ```bash 42 | $ terraform apply -target module.my_mailgun_domain.mailgun_domain.this 43 | ... 44 | $ terraform apply 45 | ... 46 | ``` 47 | 48 | After the `terraform apply`, you either need to wait a bit, or if you're impatient, log into your Mailgun control panel and manually trigger the DNS verification. If you're too quick, running the command given by `demo_curl_command` will give you something like: 49 | 50 | ```json 51 | { 52 | "message": "The domain is unverified and requires DNS configuration. Log in to your control panel to view required DNS records." 53 | } 54 | ``` 55 | 56 | After Mailgun is happy with your DNS records, however, you should get something like: 57 | 58 | ```json 59 | { 60 | "id": "<20190401125249.1.XXXYYYZZZ@example.com>", 61 | "message": "Queued. Thank you." 62 | } 63 | ``` 64 | 65 | ...and you should receive the test email shortly. 66 | 67 | 68 | ## Inputs 69 | 70 | | Name | Description | Type | Default | Required | 71 | |------|-------------|:----:|:-----:|:-----:| 72 | | mail_domain | Domain which you want to use for sending/receiving email (e.g. `"example.com"`) | string | n/a | yes | 73 | | smtp_password | Password that Mailgun will require for sending out SMPT mail via this domain | string | n/a | yes | 74 | | spam_action | See https://www.terraform.io/docs/providers/mailgun/r/domain.html#spam_action | string | `"disabled"` | no | 75 | | tags | AWS Tags to add to all resources created (where possible); see https://aws.amazon.com/answers/account-management/aws-tagging-strategies/ | map | `` | no | 76 | | wildcard | See https://www.terraform.io/docs/providers/mailgun/r/domain.html#wildcard | string | `"false"` | no | 77 | 78 | ## Outputs 79 | 80 | | Name | Description | 81 | |------|-------------| 82 | | api_base_url | Base URL of the Mailgun API for your domain | 83 | | mail_domain | Domain which you want to use for sending/receiving email (e.g. `"example.com"`) | 84 | 85 | -------------------------------------------------------------------------------- /aws/aws_mailgun_domain/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_route53_zone" "this" { 2 | name = "${replace("${var.mail_domain}", "/.*\\b(\\w+\\.\\w+)\\.?$/", "$1")}" # e.g. "foo.example.com" => "example.com" 3 | } 4 | -------------------------------------------------------------------------------- /aws/aws_mailgun_domain/main.tf: -------------------------------------------------------------------------------- 1 | # Create a new Mailgun domain 2 | resource "mailgun_domain" "this" { 3 | name = "${var.mail_domain}" 4 | spam_action = "${var.spam_action}" 5 | wildcard = "${var.wildcard}" 6 | smtp_password = "${var.smtp_password}" 7 | } 8 | 9 | # DNS records for domain setup & verification are below 10 | # See https://app.mailgun.com/app/domains//verify for these instructions 11 | 12 | resource "aws_route53_record" "sending" { 13 | count = "${length(mailgun_domain.this.sending_records)}" 14 | 15 | zone_id = "${data.aws_route53_zone.this.zone_id}" 16 | name = "${lookup(mailgun_domain.this.sending_records[count.index], "name")}" 17 | type = "${lookup(mailgun_domain.this.sending_records[count.index], "record_type")}" 18 | ttl = 300 19 | 20 | records = [ 21 | "${lookup(mailgun_domain.this.sending_records[count.index], "value")}", 22 | ] 23 | } 24 | 25 | resource "aws_route53_record" "receiving" { 26 | zone_id = "${data.aws_route53_zone.this.zone_id}" 27 | name = "${var.mail_domain}" 28 | type = "${lookup(mailgun_domain.this.receiving_records[0], "record_type")}" 29 | ttl = 300 30 | 31 | records = [ 32 | "${lookup(mailgun_domain.this.receiving_records[0], "priority")} ${lookup(mailgun_domain.this.receiving_records[0], "value")}", 33 | "${lookup(mailgun_domain.this.receiving_records[1], "priority")} ${lookup(mailgun_domain.this.receiving_records[1], "value")}", 34 | ] 35 | } 36 | -------------------------------------------------------------------------------- /aws/aws_mailgun_domain/outputs.tf: -------------------------------------------------------------------------------- 1 | output "mail_domain" { 2 | value = "${var.mail_domain}" 3 | description = "Domain which you want to use for sending/receiving email (e.g. `\"example.com\"`)" 4 | } 5 | 6 | output "api_base_url" { 7 | value = "https://api.mailgun.net/v3/${var.mail_domain}/" 8 | description = "Base URL of the Mailgun API for your domain" 9 | } 10 | -------------------------------------------------------------------------------- /aws/aws_mailgun_domain/variables.tf: -------------------------------------------------------------------------------- 1 | variable "mail_domain" { 2 | description = "Domain which you want to use for sending/receiving email (e.g. `\"example.com\"`)" 3 | } 4 | 5 | variable "smtp_password" { 6 | description = "Password that Mailgun will require for sending out SMPT mail via this domain" 7 | } 8 | 9 | variable "spam_action" { 10 | description = "See https://www.terraform.io/docs/providers/mailgun/r/domain.html#spam_action" 11 | default = "disabled" 12 | } 13 | 14 | variable "wildcard" { 15 | description = "See https://www.terraform.io/docs/providers/mailgun/r/domain.html#wildcard" 16 | default = false 17 | } 18 | 19 | variable "tags" { 20 | description = "AWS Tags to add to all resources created (where possible); see https://aws.amazon.com/answers/account-management/aws-tagging-strategies/" 21 | type = "map" 22 | default = {} 23 | } 24 | -------------------------------------------------------------------------------- /aws/aws_reverse_proxy/.gitignore: -------------------------------------------------------------------------------- 1 | /lambda.zip 2 | -------------------------------------------------------------------------------- /aws/aws_reverse_proxy/.prettierrc: -------------------------------------------------------------------------------- 1 | { 2 | "singleQuote": true, 3 | "trailingComma": "all", 4 | "bracketSpacing": true, 5 | "jsxBracketSameLine": false, 6 | "printWidth": 120 7 | } 8 | -------------------------------------------------------------------------------- /aws/aws_reverse_proxy/certificate.tf: -------------------------------------------------------------------------------- 1 | # Generate a certificate for the domain automatically using ACM 2 | # https://www.terraform.io/docs/providers/aws/r/acm_certificate.html 3 | resource "aws_acm_certificate" "this" { 4 | provider = "aws.us_east_1" # because ACM is only available in the "us-east-1" region 5 | domain_name = "${var.site_domain}" 6 | validation_method = "DNS" # the required records are created below 7 | tags = "${merge(var.tags, map("Name", "${var.comment_prefix}${var.site_domain}"))}" 8 | } 9 | 10 | # Add the DNS records needed by the ACM validation process 11 | resource "aws_route53_record" "cert_validation" { 12 | name = "${aws_acm_certificate.this.domain_validation_options.0.resource_record_name}" 13 | type = "${aws_acm_certificate.this.domain_validation_options.0.resource_record_type}" 14 | zone_id = "${data.aws_route53_zone.this.zone_id}" 15 | records = ["${aws_acm_certificate.this.domain_validation_options.0.resource_record_value}"] 16 | ttl = 60 17 | } 18 | 19 | # Request a validation for the cert with ACM 20 | resource "aws_acm_certificate_validation" "this" { 21 | provider = "aws.us_east_1" # because ACM is only available in the "us-east-1" region 22 | certificate_arn = "${aws_acm_certificate.this.arn}" 23 | validation_record_fqdns = ["${aws_route53_record.cert_validation.fqdn}"] 24 | } 25 | -------------------------------------------------------------------------------- /aws/aws_reverse_proxy/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_route53_zone" "this" { 2 | name = "${replace("${var.site_domain}", "/.*\\b(\\w+\\.\\w+)\\.?$/", "$1")}" # e.g. "foo.example.com" => "example.com" 3 | } 4 | -------------------------------------------------------------------------------- /aws/aws_reverse_proxy/lambda.tpl.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | 3 | // Lambda@Edge doesn't support environment variables, so this config will be expanded from a Terraform template 4 | const config = ${config}; 5 | const addResponseHeaders = ${add_response_headers}; 6 | const validCredentials = config.basic_auth_username + ':' + config.basic_auth_password; 7 | const validAuthHeader = 'Basic ' + new Buffer(validCredentials).toString('base64'); 8 | 9 | log('aws_static_site.config', { config, addResponseHeaders }); 10 | 11 | // Handle incoming request from the client 12 | exports.viewer_request = (event, context, callback) => { 13 | const request = event.Records[0].cf.request; 14 | const headers = request.headers; 15 | 16 | log('aws_static_site.viewer_request.before', request); 17 | 18 | if (config.override_response_status && config.override_response_status_description && config.override_response_body) { 19 | const response = { 20 | status: config.override_response_status, 21 | statusDescription: config.override_response_status_description, 22 | body: config.override_response_body, 23 | headers: formatHeaders(addResponseHeaders), 24 | }; 25 | callback(null, response); // reply to the client with the overridden content, and don't forward request to origin 26 | log('aws_static_site.viewer_request.after', response); 27 | } else if ( 28 | (config.basic_auth_username || config.basic_auth_password) && 29 | (typeof headers.authorization == 'undefined' || headers.authorization[0].value != validAuthHeader) 30 | ) { 31 | const response = { 32 | status: '401', 33 | statusDescription: 'Unauthorized', 34 | body: config.basic_auth_body, 35 | headers: { 36 | ...formatHeaders(addResponseHeaders), 37 | ...formatHeaders({ 38 | 'WWW-Authenticate': 'Basic realm="' + config.basic_auth_realm + '", charset="UTF-8"', 39 | }), 40 | }, 41 | }; 42 | callback(null, response); // reply to the client with Unauthorized, and don't forward request to origin 43 | log('aws_static_site.viewer_request.after', response); 44 | } else { 45 | callback(null, request); // allow the request to be forwarded to origin normally 46 | log('aws_static_site.viewer_request.after', 'OK'); 47 | } 48 | }; 49 | 50 | // Handle outgoing response to the client 51 | exports.viewer_response = (event, context, callback) => { 52 | const response = event.Records[0].cf.response; 53 | 54 | log('aws_static_site.viewer_response.before', response); 55 | 56 | response.headers = { 57 | ...response.headers, 58 | ...formatHeaders(addResponseHeaders), 59 | }; 60 | 61 | log('aws_static_site.viewer_response.after', response); 62 | 63 | callback(null, response); 64 | }; 65 | 66 | // Outputs incoming/outgoing requests for debugging 67 | function log(label, meta) { 68 | console.log(label, require('util').inspect(meta, false, 10, false)); 69 | } 70 | 71 | // Converts a set of headers into the rather-verbose format CloudFront expects; headers with "" as the value are dropped 72 | function formatHeaders(headers) { 73 | return Object.keys(headers) 74 | .filter(next => headers[next] !== '') 75 | .reduce( 76 | (memo, next) => 77 | Object.assign(memo, { 78 | [next.toLowerCase()]: [{ key: next, value: headers[next] }], 79 | }), 80 | {}, 81 | ); 82 | } 83 | -------------------------------------------------------------------------------- /aws/aws_reverse_proxy/outputs.tf: -------------------------------------------------------------------------------- 1 | output "cloudfront_id" { 2 | description = "The ID of the CloudFront distribution that's used for hosting the content" 3 | value = "${aws_cloudfront_distribution.this.id}" 4 | } 5 | 6 | output "site_domain" { 7 | description = "Domain on which the site will be made available" 8 | value = "${var.site_domain}" 9 | } 10 | -------------------------------------------------------------------------------- /aws/aws_reverse_proxy/route53.tf: -------------------------------------------------------------------------------- 1 | # Add an IPv4 DNS record pointing to the CloudFront distribution 2 | resource "aws_route53_record" "ipv4" { 3 | zone_id = "${data.aws_route53_zone.this.zone_id}" 4 | name = "${var.site_domain}" 5 | type = "A" 6 | 7 | alias { 8 | name = "${aws_cloudfront_distribution.this.domain_name}" 9 | zone_id = "${aws_cloudfront_distribution.this.hosted_zone_id}" 10 | evaluate_target_health = false 11 | } 12 | } 13 | 14 | # Add an IPv6 DNS record pointing to the CloudFront distribution 15 | resource "aws_route53_record" "ipv6" { 16 | zone_id = "${data.aws_route53_zone.this.zone_id}" 17 | name = "${var.site_domain}" 18 | type = "AAAA" 19 | 20 | alias { 21 | name = "${aws_cloudfront_distribution.this.domain_name}" 22 | zone_id = "${aws_cloudfront_distribution.this.hosted_zone_id}" 23 | evaluate_target_health = false 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /aws/aws_static_site/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_route53_zone" "this" { 2 | name = "${replace("${var.site_domain}", "/.*\\b(\\w+\\.\\w+)\\.?$/", "$1")}" # e.g. "foo.example.com" => "example.com" 3 | } 4 | 5 | resource "random_string" "s3_read_password" { 6 | length = 32 7 | special = false 8 | } 9 | -------------------------------------------------------------------------------- /aws/aws_static_site/main.tf: -------------------------------------------------------------------------------- 1 | module "aws_reverse_proxy" { 2 | # Available inputs: https://github.com/futurice/terraform-utils/tree/master/aws_reverse_proxy#inputs 3 | # Check for updates: https://github.com/futurice/terraform-utils/compare/v11.0...master 4 | source = "git::ssh://git@github.com/futurice/terraform-utils.git//aws_reverse_proxy?ref=v11.0" 5 | 6 | # S3 website endpoints are only available over plain HTTP 7 | origin_url = "http://${local.bucket_domain_name}/" 8 | 9 | # Our S3 bucket will only allow requests containing this custom header 10 | origin_custom_header_name = "User-Agent" 11 | 12 | # Somewhat perplexingly, this is the "correct" way to ensure users can't bypass CloudFront on their way to S3 resources 13 | # https://abridge2devnull.com/posts/2018/01/restricting-access-to-a-cloudfront-s3-website-origin/ 14 | origin_custom_header_value = "${random_string.s3_read_password.result}" 15 | 16 | site_domain = "${var.site_domain}" 17 | name_prefix = "${var.name_prefix}" 18 | comment_prefix = "${var.comment_prefix}" 19 | cloudfront_price_class = "${var.cloudfront_price_class}" 20 | viewer_https_only = "${var.viewer_https_only}" 21 | cache_ttl_override = "${var.cache_ttl_override}" 22 | default_root_object = "${var.default_root_object}" 23 | add_response_headers = "${var.add_response_headers}" 24 | basic_auth_username = "${var.basic_auth_username}" 25 | basic_auth_password = "${var.basic_auth_password}" 26 | basic_auth_realm = "${var.basic_auth_realm}" 27 | basic_auth_body = "${var.basic_auth_body}" 28 | lambda_logging_enabled = "${var.lambda_logging_enabled}" 29 | tags = "${var.tags}" 30 | } 31 | -------------------------------------------------------------------------------- /aws/aws_static_site/outputs.tf: -------------------------------------------------------------------------------- 1 | output "bucket_name" { 2 | description = "The name of the S3 bucket that's used for hosting the content (either auto-generated or externally provided)" 3 | 4 | # Terraform isn't particularly helpful when you want to depend on the existence of a resource which may have count 0 or 1, like our bucket. 5 | # This is a hacky way of only resolving the bucket_name output once the bucket exists (if created by us). 6 | # https://github.com/hashicorp/terraform/issues/16580#issuecomment-342573652 7 | value = "${local.bucket_name}${replace("${element(concat(aws_s3_bucket.this.*.bucket, list("")), 0)}", "/.*/", "")}" 8 | } 9 | 10 | output "cloudfront_id" { 11 | description = "The ID of the CloudFront distribution that's used for hosting the content" 12 | value = "${module.aws_reverse_proxy.cloudfront_id}" 13 | } 14 | 15 | output "site_domain" { 16 | description = "Domain on which the static site will be made available" 17 | value = "${var.site_domain}" 18 | } 19 | 20 | output "bucket_domain_name" { 21 | description = "Full S3 domain name for the bucket used for hosting the content (e.g. `\"aws-static-site---hello-example-com.s3-website.eu-central-1.amazonaws.com\"`)" 22 | value = "${local.bucket_domain_name}" 23 | } 24 | -------------------------------------------------------------------------------- /aws/aws_static_site/s3.tf: -------------------------------------------------------------------------------- 1 | # Query the current AWS region so we know its S3 endpoint 2 | data "aws_region" "current" {} 3 | 4 | # Create the S3 bucket in which the static content for the site should be hosted 5 | resource "aws_s3_bucket" "this" { 6 | count = "${var.bucket_override_name == "" ? 1 : 0}" 7 | bucket = "${local.bucket_name}" 8 | tags = "${var.tags}" 9 | 10 | # Add a CORS configuration, so that we don't have issues with webfont loading 11 | # http://www.holovaty.com/writing/cors-ie-cloudfront/ 12 | cors_rule { 13 | allowed_headers = ["*"] 14 | allowed_methods = ["GET"] 15 | allowed_origins = ["*"] 16 | max_age_seconds = 3000 17 | } 18 | 19 | # Enable website hosting 20 | # Note, though, that when accessing the bucket over its SSL endpoint, the index_document will not be used 21 | website { 22 | index_document = "index.html" 23 | error_document = "error.html" 24 | } 25 | } 26 | 27 | # Use a bucket policy (instead of the simpler acl = "public-read") so we don't need to always remember to upload objects with: 28 | # $ aws s3 cp --acl public-read ... 29 | # https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl 30 | resource "aws_s3_bucket_policy" "this" { 31 | depends_on = ["aws_s3_bucket.this"] # because we refer to the bucket indirectly, we need to explicitly define the dependency 32 | count = "${var.bucket_override_name == "" ? 1 : 0}" 33 | bucket = "${local.bucket_name}" 34 | 35 | # https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-2 36 | policy = < 6 | [![Infrastructure Tests](https://www.bridgecrew.cloud/badges/github/troydieter/tf-msk/cis_aws)](https://www.bridgecrew.cloud/link/badge?vcs=github&fullRepo=troydieter%2Ftf-msk&benchmark=CIS+AWS+V1.2) 7 |
8 | [![Infrastructure Tests](https://www.bridgecrew.cloud/badges/github/troydieter/tf-msk/hipaa)](https://www.bridgecrew.cloud/link/badge?vcs=github&fullRepo=troydieter%2Ftf-msk&benchmark=HIPAA) -------------------------------------------------------------------------------- /aws/aws_vpc_msk/acmpca.tf: -------------------------------------------------------------------------------- 1 | /* 2 | resource "aws_acmpca_certificate_authority" "pca" { 3 | certificate_authority_configuration { 4 | key_algorithm = "RSA_4096" 5 | signing_algorithm = "SHA512WITHRSA" 6 | 7 | subject { 8 | common_name = "pca.${lower(var.environment)}.example.com" 9 | } 10 | } 11 | } 12 | */ 13 | 14 | /* 15 | resource "aws_iam_role_policy_attachment" "Kafka-Client-IAM-role-att3" { 16 | policy_arn = "arn:aws:iam::aws:policy/AWSCertificateManagerPrivateCAPrivilegedUser" 17 | role = "${aws_iam_role.KafkaClientIAM_Role.name}" 18 | } 19 | */ 20 | -------------------------------------------------------------------------------- /aws/aws_vpc_msk/common-tags-data.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | common-tags = { 3 | "project" = "${upper("${substr("${var.aws-profile}", 0, 3)}")}" 4 | "platform" = "${upper("${substr("${var.aws-profile}", 0, 3)}")}" 5 | "environment-type" = var.environment 6 | "business-domain" = "na" 7 | "cost-center" = "na" 8 | "tier" = "private" 9 | "application" = var.application 10 | } 11 | } 12 | 13 | data "aws_caller_identity" "current" {} 14 | resource "random_uuid" "randuuid" {} -------------------------------------------------------------------------------- /aws/aws_vpc_msk/kafka-client-msk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | yum update -y 4 | 5 | yum install python3.7 -y 6 | 7 | yum install java-1.8.0-openjdk-devel -y 8 | 9 | yum erase awscli -y 10 | 11 | cd /home/ec2-user 12 | 13 | echo "export PATH=.local/bin:$PATH" >> .bash_profile 14 | 15 | mkdir kafka 16 | 17 | mkdir mm 18 | 19 | cd kafka 20 | 21 | wget https://archive.apache.org/dist/kafka/2.2.1/kafka_2.12-2.2.1.tgz 22 | 23 | tar -xzf kafka_2.12-2.2.1.tgz 24 | 25 | cd /home/ec2-user 26 | 27 | wget https://bootstrap.pypa.io/get-pip.py 28 | 29 | su -c "python3.7 get-pip.py --user" -s /bin/sh ec2-user 30 | 31 | su -c "/home/ec2-user/.local/bin/pip3 install boto3 --user" -s /bin/sh 32 | ec2-user 33 | 34 | su -c "/home/ec2-user/.local/bin/pip3 install awscli --user" -s /bin/sh 35 | ec2-user 36 | 37 | chown -R ec2-user ./kafka 38 | 39 | chgrp -R ec2-user ./kafka 40 | 41 | chown -R ec2-user ./mm 42 | 43 | chgrp -R ec2-user ./mm -------------------------------------------------------------------------------- /aws/aws_vpc_msk/keypair.tf: -------------------------------------------------------------------------------- 1 | data "local_file" "msk-keypair-public" { 2 | filename = "${path.module}/keys/MSK-Keypair.pub" 3 | } 4 | 5 | resource "aws_key_pair" "MSK-Keypair-ec2-keypair" { 6 | key_name = var.key_name 7 | public_key = data.local_file.msk-keypair-public.content 8 | } 9 | 10 | output "msk-user-keypair" { 11 | value = aws_key_pair.MSK-Keypair-ec2-keypair.key_name 12 | } -------------------------------------------------------------------------------- /aws/aws_vpc_msk/kms.tf: -------------------------------------------------------------------------------- 1 | resource "aws_kms_key" "pca-kms" { 2 | description = "KMS Key for encrypting the PCA s3 bucket" 3 | enable_key_rotation = "true" 4 | policy = < \ 29 | TF_VAR_public_alb_domain= \ 30 | TF_VAR_db_master_username= \ 31 | TF_VAR_db_master_password="" \ 32 | AWS_PROFILE= \ 33 | AWS_DEFAULT_REGION= \ 34 | terraform apply 35 | ``` 36 | 37 | ### Tear down 38 | 39 | ``` 40 | AWS_SDK_LOAD_CONFIG=1 \ 41 | TF_VAR_site_domain= \ 42 | TF_VAR_public_alb_domain= \ 43 | TF_VAR_db_master_username= \ 44 | TF_VAR_db_master_password="" \ 45 | AWS_PROFILE= \ 46 | AWS_DEFAULT_REGION= \ 47 | terraform destroy 48 | ``` 49 | 50 | p.s. Instead of environment variables, you can obviously use .tfvar files for assigning values to terraform variables. 51 | -------------------------------------------------------------------------------- /aws/wordpress_fargate/alb.tf: -------------------------------------------------------------------------------- 1 | module "acm_alb" { 2 | source = "terraform-aws-modules/acm/aws" 3 | version = "~> v2.0" 4 | domain_name = var.public_alb_domain 5 | zone_id = data.aws_route53_zone.this.zone_id 6 | tags = var.tags 7 | } 8 | 9 | resource "aws_security_group" "alb" { 10 | name = "${var.prefix}-alb-${var.environment}" 11 | description = "Allow HTTPS inbound traffc" 12 | vpc_id = module.vpc.vpc_id 13 | 14 | egress { 15 | from_port = 0 16 | to_port = 0 17 | protocol = "-1" 18 | cidr_blocks = ["0.0.0.0/0"] 19 | } 20 | 21 | ingress { 22 | from_port = 443 23 | to_port = 443 24 | protocol = "tcp" 25 | cidr_blocks = ["0.0.0.0/0"] 26 | } 27 | 28 | ingress { 29 | from_port = 0 30 | to_port = 0 31 | protocol = "-1" 32 | self = true 33 | } 34 | 35 | tags = var.tags 36 | } 37 | 38 | 39 | module "alb" { 40 | source = "terraform-aws-modules/alb/aws" 41 | version = "~> 5.0" 42 | name = "${var.prefix}-${var.environment}" 43 | load_balancer_type = "application" 44 | vpc_id = module.vpc.vpc_id 45 | subnets = module.vpc.public_subnets 46 | security_groups = [aws_security_group.alb.id] 47 | 48 | https_listeners = [ 49 | { 50 | "certificate_arn" = module.acm_alb.this_acm_certificate_arn 51 | "port" = 443 52 | }, 53 | ] 54 | 55 | target_groups = [ 56 | { 57 | name = "${var.prefix}-default-${var.environment}" 58 | backend_protocol = "HTTP" 59 | backend_port = 80 60 | } 61 | ] 62 | tags = var.tags 63 | } 64 | -------------------------------------------------------------------------------- /aws/wordpress_fargate/data.tf: -------------------------------------------------------------------------------- 1 | data "aws_region" "current" {} 2 | data "aws_caller_identity" "current" {} 3 | data "aws_availability_zones" "this" {} 4 | -------------------------------------------------------------------------------- /aws/wordpress_fargate/db.tf: -------------------------------------------------------------------------------- 1 | resource "random_string" "snapshot_suffix" { 2 | length = 8 3 | special = false 4 | } 5 | 6 | resource "aws_rds_cluster" "this" { 7 | cluster_identifier = "${var.prefix}-${var.environment}" 8 | engine = "aurora" 9 | engine_mode = "serverless" 10 | vpc_security_group_ids = [aws_security_group.db.id] 11 | db_subnet_group_name = aws_db_subnet_group.this.name 12 | engine_version = var.db_engine_version 13 | availability_zones = data.aws_availability_zones.this.names 14 | database_name = "wordpress" 15 | master_username = var.db_master_username 16 | master_password = var.db_master_password 17 | backup_retention_period = var.db_backup_retention_days 18 | preferred_backup_window = var.db_backup_window 19 | scaling_configuration { 20 | auto_pause = var.db_auto_pause 21 | seconds_until_auto_pause = var.db_seconds_until_auto_pause 22 | max_capacity = var.db_max_capacity 23 | min_capacity = var.db_min_capacity 24 | } 25 | final_snapshot_identifier = "${var.prefix}-${var.environment}-${random_string.snapshot_suffix.result}" 26 | tags = var.tags 27 | } 28 | 29 | resource "aws_db_subnet_group" "this" { 30 | name = "${var.prefix}-${var.environment}" 31 | subnet_ids = module.vpc.private_subnets 32 | tags = var.tags 33 | } 34 | 35 | resource "aws_security_group" "db" { 36 | vpc_id = module.vpc.vpc_id 37 | name = "${var.prefix}-db-${var.environment}" 38 | ingress { 39 | protocol = "tcp" 40 | from_port = 3306 41 | to_port = 3306 42 | self = true 43 | } 44 | 45 | egress { 46 | protocol = "-1" 47 | from_port = 0 48 | to_port = 0 49 | cidr_blocks = ["0.0.0.0/0"] 50 | } 51 | 52 | lifecycle { 53 | create_before_destroy = true 54 | } 55 | tags = var.tags 56 | } 57 | 58 | resource "aws_ssm_parameter" "db_master_user" { 59 | name = "/${var.prefix}/${var.environment}/db_master_user" 60 | type = "SecureString" 61 | value = var.db_master_username 62 | tags = var.tags 63 | } 64 | 65 | resource "aws_ssm_parameter" "db_master_password" { 66 | name = "/${var.prefix}/${var.environment}/db_master_password" 67 | type = "SecureString" 68 | value = var.db_master_password 69 | tags = var.tags 70 | } 71 | -------------------------------------------------------------------------------- /aws/wordpress_fargate/efs.tf: -------------------------------------------------------------------------------- 1 | resource "aws_efs_file_system" "this" { 2 | creation_token = "${var.prefix}-${var.environment}" 3 | tags = var.tags 4 | } 5 | 6 | resource "aws_efs_mount_target" "this" { 7 | count = length(module.vpc.private_subnets) 8 | file_system_id = aws_efs_file_system.this.id 9 | subnet_id = module.vpc.private_subnets[count.index] 10 | security_groups = [ 11 | aws_security_group.efs.id 12 | ] 13 | } 14 | 15 | resource "aws_security_group" "efs" { 16 | name = "${var.prefix}-efs-${var.environment}" 17 | description = "Allow traffic ffrom self" 18 | vpc_id = module.vpc.vpc_id 19 | 20 | egress { 21 | from_port = 0 22 | to_port = 0 23 | protocol = "-1" 24 | cidr_blocks = ["0.0.0.0/0"] 25 | } 26 | 27 | ingress { 28 | from_port = 2049 29 | to_port = 2049 30 | protocol = "tcp" 31 | self = true 32 | } 33 | tags = var.tags 34 | } 35 | -------------------------------------------------------------------------------- /aws/wordpress_fargate/output.tf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/futurice/terraform-examples/c191f7bcb0fa786b68c7b3904b6f3bbb90881fc6/aws/wordpress_fargate/output.tf -------------------------------------------------------------------------------- /aws/wordpress_fargate/provider.tf: -------------------------------------------------------------------------------- 1 | provider "aws" { 2 | } 3 | 4 | terraform { 5 | backend "s3" { 6 | bucket = "mybucket" 7 | key = "wordpress" 8 | } 9 | } 10 | 11 | provider "aws" { 12 | alias = "us_east_1" 13 | region = "us-east-1" 14 | } 15 | 16 | provider "random" { 17 | } 18 | -------------------------------------------------------------------------------- /aws/wordpress_fargate/route53.tf: -------------------------------------------------------------------------------- 1 | # ACM 2 | data "aws_route53_zone" "this" { 3 | name = replace(var.site_domain, "/.*\\b(\\w+\\.\\w+)\\.?$/", "$1") # gets domain from subdomain e.g. "foo.example.com" => "example.com" 4 | } 5 | 6 | 7 | ## Route53 8 | # Add an IPv4 DNS record pointing to the CloudFront distribution 9 | resource "aws_route53_record" "ipv4" { 10 | zone_id = data.aws_route53_zone.this.zone_id 11 | name = var.site_domain 12 | type = "A" 13 | 14 | alias { 15 | name = aws_cloudfront_distribution.this.domain_name 16 | zone_id = aws_cloudfront_distribution.this.hosted_zone_id 17 | evaluate_target_health = false 18 | } 19 | } 20 | 21 | # Add an IPv6 DNS record pointing to the CloudFront distribution 22 | resource "aws_route53_record" "ipv6" { 23 | zone_id = data.aws_route53_zone.this.zone_id 24 | name = var.site_domain 25 | type = "AAAA" 26 | 27 | alias { 28 | name = aws_cloudfront_distribution.this.domain_name 29 | zone_id = aws_cloudfront_distribution.this.hosted_zone_id 30 | evaluate_target_health = false 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /aws/wordpress_fargate/vpc.tf: -------------------------------------------------------------------------------- 1 | module "vpc" { 2 | source = "terraform-aws-modules/vpc/aws" 3 | name = "${var.prefix}-${var.environment}" 4 | cidr = var.vpc_cidr 5 | azs = data.aws_availability_zones.this.names 6 | private_subnets = var.private_subnet_cidrs 7 | public_subnets = var.public_subnet_cidrs 8 | enable_nat_gateway = true 9 | single_nat_gateway = false 10 | one_nat_gateway_per_az = false 11 | tags = var.tags 12 | version = "~>2.0" 13 | enable_dns_hostnames = true 14 | } 15 | -------------------------------------------------------------------------------- /azure/_header_/README.md: -------------------------------------------------------------------------------- 1 | # Azure Examples -------------------------------------------------------------------------------- /azure/azure_linux_docker_app_service/access_policies.tf: -------------------------------------------------------------------------------- 1 | # Key vault access for the current client principal 2 | resource "azurerm_key_vault_access_policy" "principal" { 3 | key_vault_id = azurerm_key_vault.current.id 4 | 5 | tenant_id = data.azurerm_client_config.current.tenant_id 6 | object_id = data.azurerm_client_config.current.object_id 7 | 8 | secret_permissions = [ 9 | "get", 10 | "set", 11 | "delete" 12 | ] 13 | } 14 | 15 | # Key vault access for the App Service 16 | resource "azurerm_key_vault_access_policy" "app_service" { 17 | key_vault_id = azurerm_key_vault.current.id 18 | 19 | tenant_id = data.azurerm_client_config.current.tenant_id 20 | object_id = azurerm_app_service.current.identity.0.principal_id 21 | 22 | secret_permissions = [ 23 | "get", 24 | ] 25 | } 26 | 27 | # Key vault access for the App Service's next slot 28 | resource "azurerm_key_vault_access_policy" "app_service_next_slot" { 29 | key_vault_id = azurerm_key_vault.current.id 30 | 31 | tenant_id = data.azurerm_client_config.current.tenant_id 32 | object_id = azurerm_app_service_slot.next.identity.0.principal_id 33 | 34 | secret_permissions = [ 35 | "get", 36 | ] 37 | } 38 | 39 | # Pull access for the app service 40 | resource "azurerm_role_assignment" "app_service_acr_pull" { 41 | scope = azurerm_container_registry.current.id 42 | role_definition_name = "AcrPull" 43 | principal_id = azurerm_app_service.current.identity.0.principal_id 44 | } 45 | 46 | # Pull access for the app service's next slot 47 | resource "azurerm_role_assignment" "app_service_next_slot_acr_pull" { 48 | scope = azurerm_container_registry.current.id 49 | role_definition_name = "AcrPull" 50 | principal_id = azurerm_app_service_slot.next.identity.0.principal_id 51 | } 52 | -------------------------------------------------------------------------------- /azure/azure_linux_docker_app_service/data.tf: -------------------------------------------------------------------------------- 1 | data "azurerm_client_config" "current" {} 2 | 3 | data "azurerm_resource_group" "current" { 4 | name = var.resource_group_name 5 | } 6 | -------------------------------------------------------------------------------- /azure/azure_linux_docker_app_service/example-app/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:14-slim 2 | 3 | WORKDIR /app 4 | COPY package.json package-lock.json ./ 5 | RUN npm ci 6 | COPY index.js ./ 7 | 8 | EXPOSE 4000 9 | CMD ["node", "index.js"] 10 | -------------------------------------------------------------------------------- /azure/azure_linux_docker_app_service/example-app/build-and-push.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TAG=$(date +%s) 4 | ACR_URI=$1.azurecr.io 5 | az acr login -n "$1" 6 | 7 | docker build -t "$ACR_URI/nodeapp:$TAG" . 8 | 9 | docker push "$ACR_URI/nodeapp:$TAG" 10 | 11 | echo "TAGGED AS $TAG" 12 | -------------------------------------------------------------------------------- /azure/azure_linux_docker_app_service/example-app/deploy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | RG_NAME=$1 4 | APP_SERVICE_NAME=$2 5 | ACR_URI=$3.azurecr.io 6 | TAG=$4 7 | 8 | FX_Version="Docker|"$ACR_URI"/"nodeapp:$TAG 9 | WEBAPP_ID=$(az webapp show -g "$RG_NAME" -n "$APP_SERVICE_NAME" --query id --output tsv)"/config/web" 10 | az resource update --ids "$WEBAPP_ID" --set "properties.linuxFxVersion=$FX_Version" -o none --force-string 11 | -------------------------------------------------------------------------------- /azure/azure_linux_docker_app_service/example-app/index.js: -------------------------------------------------------------------------------- 1 | const http = require("http"); 2 | const appInsights = require("applicationinsights"); 3 | 4 | if (!process.env.APPINSIGHTS_INSTRUMENTATIONKEY) { 5 | console.error("Missing APPINSIGHTS_INSTRUMENTATIONKEY config"); 6 | process.exit(2); 7 | } 8 | 9 | // Instrumentation key is loaded from APPINSIGHTS_INSTRUMENTATIONKEY env var 10 | appInsights.setup().setAutoCollectConsole(true, true); 11 | // The cloud role can be used to identify the app in Application Insights 12 | appInsights.defaultClient.context.tags[ 13 | appInsights.defaultClient.context.keys.cloudRole 14 | ] = "MyApplication"; 15 | appInsights.start(); 16 | 17 | const port = 4000; 18 | 19 | async function connectToDatabase() { 20 | try { 21 | const connectionString = process.env.DB_URL; 22 | const parsedConnectionString = parseConnectionString(connectionString); 23 | await sql.connect({ 24 | authentication: { 25 | type: "azure-active-directory-msi-app-service", 26 | options: { 27 | // These are available when we have enabled system managed identity 28 | msiEndpoint: process.env.MSI_ENDPOINT, 29 | msiSecret: process.env.MSI_SECRET, 30 | }, 31 | }, 32 | server: parsedConnectionString.host, 33 | database: parsedConnectionString.database, 34 | options: { 35 | trustServerCertificate: false, 36 | encrypt: true, 37 | port: 1433, 38 | }, 39 | }); 40 | 41 | console.log("Connected successfully to database"); 42 | } catch (err) { 43 | console.error(err); 44 | process.exit(2); 45 | } 46 | } 47 | 48 | async function startServer() { 49 | const requestListener = async function (req, res) { 50 | try { 51 | console.log("Request to", req.url); 52 | 53 | res.writeHead(200); 54 | res.end("My first server!"); 55 | } catch (error) { 56 | console.error(err); 57 | res.writeHead(500); 58 | res.end("2"); 59 | } 60 | }; 61 | 62 | const server = http.createServer(requestListener); 63 | server.listen(port, () => { 64 | console.log(`Server is running on port ${port}`); 65 | }); 66 | } 67 | 68 | (async function start() { 69 | await startServer(); 70 | })(); 71 | -------------------------------------------------------------------------------- /azure/azure_linux_docker_app_service/example-app/package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "app", 3 | "version": "1.0.0", 4 | "lockfileVersion": 1, 5 | "requires": true, 6 | "dependencies": { 7 | "applicationinsights": { 8 | "version": "1.8.8", 9 | "resolved": "https://registry.npmjs.org/applicationinsights/-/applicationinsights-1.8.8.tgz", 10 | "integrity": "sha512-B43D4t/taGP5quGviVSdFWqarhIlzyGSi5mfngjbXpR2Ed3VrikJGIr1i5UtGzvwWqEbfIF6i298GvjFaB8RFA==", 11 | "requires": { 12 | "cls-hooked": "^4.2.2", 13 | "continuation-local-storage": "^3.2.1", 14 | "diagnostic-channel": "0.3.1", 15 | "diagnostic-channel-publishers": "0.4.2" 16 | } 17 | }, 18 | "async-hook-jl": { 19 | "version": "1.7.6", 20 | "resolved": "https://registry.npmjs.org/async-hook-jl/-/async-hook-jl-1.7.6.tgz", 21 | "integrity": "sha512-gFaHkFfSxTjvoxDMYqDuGHlcRyUuamF8s+ZTtJdDzqjws4mCt7v0vuV79/E2Wr2/riMQgtG4/yUtXWs1gZ7JMg==", 22 | "requires": { 23 | "stack-chain": "^1.3.7" 24 | } 25 | }, 26 | "async-listener": { 27 | "version": "0.6.10", 28 | "resolved": "https://registry.npmjs.org/async-listener/-/async-listener-0.6.10.tgz", 29 | "integrity": "sha512-gpuo6xOyF4D5DE5WvyqZdPA3NGhiT6Qf07l7DCB0wwDEsLvDIbCr6j9S5aj5Ch96dLace5tXVzWBZkxU/c5ohw==", 30 | "requires": { 31 | "semver": "^5.3.0", 32 | "shimmer": "^1.1.0" 33 | } 34 | }, 35 | "cls-hooked": { 36 | "version": "4.2.2", 37 | "resolved": "https://registry.npmjs.org/cls-hooked/-/cls-hooked-4.2.2.tgz", 38 | "integrity": "sha512-J4Xj5f5wq/4jAvcdgoGsL3G103BtWpZrMo8NEinRltN+xpTZdI+M38pyQqhuFU/P792xkMFvnKSf+Lm81U1bxw==", 39 | "requires": { 40 | "async-hook-jl": "^1.7.6", 41 | "emitter-listener": "^1.0.1", 42 | "semver": "^5.4.1" 43 | } 44 | }, 45 | "continuation-local-storage": { 46 | "version": "3.2.1", 47 | "resolved": "https://registry.npmjs.org/continuation-local-storage/-/continuation-local-storage-3.2.1.tgz", 48 | "integrity": "sha512-jx44cconVqkCEEyLSKWwkvUXwO561jXMa3LPjTPsm5QR22PA0/mhe33FT4Xb5y74JDvt/Cq+5lm8S8rskLv9ZA==", 49 | "requires": { 50 | "async-listener": "^0.6.0", 51 | "emitter-listener": "^1.1.1" 52 | } 53 | }, 54 | "diagnostic-channel": { 55 | "version": "0.3.1", 56 | "resolved": "https://registry.npmjs.org/diagnostic-channel/-/diagnostic-channel-0.3.1.tgz", 57 | "integrity": "sha512-6eb9YRrimz8oTr5+JDzGmSYnXy5V7YnK5y/hd8AUDK1MssHjQKm9LlD6NSrHx4vMDF3+e/spI2hmWTviElgWZA==", 58 | "requires": { 59 | "semver": "^5.3.0" 60 | } 61 | }, 62 | "diagnostic-channel-publishers": { 63 | "version": "0.4.2", 64 | "resolved": "https://registry.npmjs.org/diagnostic-channel-publishers/-/diagnostic-channel-publishers-0.4.2.tgz", 65 | "integrity": "sha512-gbt5BVjwTV1wnng0Xi766DVrRxSeGECAX8Qrig7tKCDfXW2SbK7bKY6A3tgGjk5BB50aXgVXIsbtQiYIkt57Mg==" 66 | }, 67 | "emitter-listener": { 68 | "version": "1.1.2", 69 | "resolved": "https://registry.npmjs.org/emitter-listener/-/emitter-listener-1.1.2.tgz", 70 | "integrity": "sha512-Bt1sBAGFHY9DKY+4/2cV6izcKJUf5T7/gkdmkxzX/qv9CcGH8xSwVRW5mtX03SWJtRTWSOpzCuWN9rBFYZepZQ==", 71 | "requires": { 72 | "shimmer": "^1.2.0" 73 | } 74 | }, 75 | "semver": { 76 | "version": "5.7.1", 77 | "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", 78 | "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" 79 | }, 80 | "shimmer": { 81 | "version": "1.2.1", 82 | "resolved": "https://registry.npmjs.org/shimmer/-/shimmer-1.2.1.tgz", 83 | "integrity": "sha512-sQTKC1Re/rM6XyFM6fIAGHRPVGvyXfgzIDvzoq608vM+jeyVD0Tu1E6Np0Kc2zAIFWIj963V2800iF/9LPieQw==" 84 | }, 85 | "stack-chain": { 86 | "version": "1.3.7", 87 | "resolved": "https://registry.npmjs.org/stack-chain/-/stack-chain-1.3.7.tgz", 88 | "integrity": "sha1-0ZLJ/06moiyUxN1FkXHj8AzqEoU=" 89 | } 90 | } 91 | } 92 | -------------------------------------------------------------------------------- /azure/azure_linux_docker_app_service/example-app/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "app", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "index.js", 6 | "scripts": { 7 | "test": "echo \"Error: no test specified\" && exit 1" 8 | }, 9 | "author": "", 10 | "license": "MIT", 11 | "dependencies": { 12 | "applicationinsights": "^1.8.8" 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /azure/azure_linux_docker_app_service/images/architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/futurice/terraform-examples/c191f7bcb0fa786b68c7b3904b6f3bbb90881fc6/azure/azure_linux_docker_app_service/images/architecture.png -------------------------------------------------------------------------------- /azure/azure_linux_docker_app_service/outputs.tf: -------------------------------------------------------------------------------- 1 | output "app_service_name" { 2 | description = "This is the unique name of the App Service that was created" 3 | value = azurerm_app_service.current.name 4 | } 5 | 6 | output "app_service_url" { 7 | description = "This is the URL of the App Service that was created" 8 | value = azurerm_app_service.current.default_site_hostname 9 | } 10 | 11 | output "container_registry" { 12 | value = azurerm_container_registry.current.login_server 13 | } 14 | -------------------------------------------------------------------------------- /azure/azure_linux_docker_app_service/provider.tf: -------------------------------------------------------------------------------- 1 | # Configure the Azure Provider 2 | provider "azurerm" { 3 | version = "= 2.37.0" 4 | skip_provider_registration = true 5 | features {} 6 | } 7 | 8 | provider "random" { 9 | version = "~> 2.3" 10 | } 11 | 12 | provider "template" { 13 | version = "~> 2.1" 14 | } 15 | -------------------------------------------------------------------------------- /azure/azure_linux_docker_app_service/secrets.tf: -------------------------------------------------------------------------------- 1 | # Application insights instrumentation key 2 | resource "azurerm_key_vault_secret" "app_insights_instrumentation_key" { 3 | key_vault_id = azurerm_key_vault.current.id 4 | name = "app-insights-key" 5 | value = azurerm_application_insights.current.instrumentation_key 6 | 7 | depends_on = [azurerm_key_vault_access_policy.principal] 8 | } 9 | -------------------------------------------------------------------------------- /azure/azure_linux_docker_app_service/shared.tf: -------------------------------------------------------------------------------- 1 | # Since many services require a globally unique name (such as keyvault), 2 | # generate random suffix for the resources 3 | resource "random_string" "suffix" { 4 | length = 8 5 | special = false 6 | upper = false 7 | } 8 | 9 | locals { 10 | # Key vault and ACR required globally unique names. Only alphanumeric characters allowed 11 | key_vault_name = "${local.cleansed_prefix}${random_string.suffix.result}" 12 | acr_name = "${local.cleansed_prefix}${random_string.suffix.result}" 13 | 14 | # App insights needs to be unique only within the resource group 15 | app_insights_name = "${var.name_prefix}app-insights" 16 | } 17 | 18 | resource "azurerm_key_vault" "current" { 19 | name = local.key_vault_name 20 | location = data.azurerm_resource_group.current.location 21 | resource_group_name = data.azurerm_resource_group.current.name 22 | tenant_id = data.azurerm_client_config.current.tenant_id 23 | 24 | soft_delete_enabled = true 25 | soft_delete_retention_days = 7 26 | purge_protection_enabled = false 27 | 28 | sku_name = "standard" 29 | } 30 | 31 | resource "azurerm_container_registry" "current" { 32 | name = local.acr_name 33 | resource_group_name = data.azurerm_resource_group.current.name 34 | location = data.azurerm_resource_group.current.location 35 | sku = "Standard" 36 | 37 | # We'll be using AD login 38 | admin_enabled = false 39 | } 40 | 41 | resource "azurerm_application_insights" "current" { 42 | name = local.app_insights_name 43 | resource_group_name = data.azurerm_resource_group.current.name 44 | location = data.azurerm_resource_group.current.location 45 | application_type = var.app_insights_app_type 46 | } 47 | -------------------------------------------------------------------------------- /azure/azure_linux_docker_app_service/variables.tf: -------------------------------------------------------------------------------- 1 | variable "resource_group_name" { 2 | type = string 3 | description = "Name of the resource group where resources are to be deployed" 4 | } 5 | 6 | variable "alert_email_address" { 7 | type = string 8 | description = "Email address where alert emails are sent" 9 | } 10 | 11 | variable "name_prefix" { 12 | type = string 13 | description = "Name prefix to use for resources that need to be created (only lowercase characters and hyphens allowed)" 14 | default = "azure-app-example--" 15 | } 16 | 17 | variable "app_service_name" { 18 | type = string 19 | description = "Name for the app service" 20 | default = "appservice" 21 | } 22 | 23 | # https://www.terraform.io/docs/providers/azurerm/r/application_insights.html#application_type 24 | variable "app_insights_app_type" { 25 | type = string 26 | description = "The type of Application Insights to create." 27 | default = "other" 28 | } 29 | 30 | # https://azure.microsoft.com/en-gb/pricing/details/app-service/linux/ 31 | variable "app_service_plan_tier" { 32 | type = string 33 | description = "App service plan's tier" 34 | default = "PremiumV2" 35 | } 36 | 37 | variable "app_service_plan_size" { 38 | type = string 39 | description = "App service plan's size" 40 | default = "P1v2" 41 | } 42 | 43 | locals { 44 | cleansed_prefix = replace(var.name_prefix, "/[^a-zA-Z0-9]+/", "") 45 | } 46 | -------------------------------------------------------------------------------- /azure/layers/README.md: -------------------------------------------------------------------------------- 1 | # Terraform Azure Layers example 2 | 3 | Azure resources may take a long time to create. Sometimes Terraform fails to spot that some resource actually requires another resource that has not been fully created yet. Layers help to ensure that all prerequisite resources for later ones are created before them. 4 | 5 | ## Try it out 6 | 7 | ```sh 8 | az login 9 | terraform init 10 | sh create.sh -auto-approve -var resource_name_prefix=${USER}trylayers 11 | ``` 12 | 13 | ## Clean up 14 | 15 | ```sh 16 | sh destroy.sh ${USER}trylayers 17 | ``` 18 | 19 | ## Files 20 | 21 | - `create.sh` presents a simple hard-coded deployment run that ensures each layer is completed separately. 22 | - `destroy.sh` takes a quick, resource-group based approach to wiping out the whole deployment. 23 | - `layers.tf` lists each layer with associated dependencies. 24 | - `main.tf` contains sample resources used on different layers. 25 | - `variables.sh` declares associated variables with sane defaults. 26 | -------------------------------------------------------------------------------- /azure/layers/create.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Usage: sh create.sh [options_for_terraform_apply] 3 | # options eg. -auto-approve -var resource_name_prefix=${USER} 4 | 5 | set -e 6 | 7 | #terraform apply "$@" -target null_resource.service_principal_layer 8 | terraform apply "$@" -target null_resource.resource_group_layer 9 | terraform apply "$@" -target null_resource.network_layer 10 | terraform apply "$@" -target null_resource.subnet_layer 11 | terraform apply "$@" -target null_resource.monitoring_layer 12 | terraform apply "$@" -target null_resource.storage_layer 13 | terraform apply "$@" 14 | -------------------------------------------------------------------------------- /azure/layers/destroy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # Usage: [DRY_RUN=1] sh destroy.sh [resource_name_prefix] 3 | 4 | PREFIX=${1:-trylayers} 5 | RUN=${DRY_RUN:+echo} 6 | 7 | # Remove each resource group, in reverse order of layers during create. 8 | 9 | $RUN az group delete --yes -n ${PREFIX}-storage-rgroup 10 | $RUN az group delete --yes -n ${PREFIX}-network-rgroup 11 | 12 | # Delete here any resources that do not belong to above resource groups, 13 | # ie. Service Principals. 14 | 15 | # Finally tell Terraform that nothing exists anymore. 16 | $RUN rm terraform.tfstate # please use shared statefile in real world 17 | -------------------------------------------------------------------------------- /azure/layers/layers.tf: -------------------------------------------------------------------------------- 1 | /* 2 | You may need service principals for eg. managing access to Key Vault. 3 | However, that requires privileges to manage AD, which is outside of focus of this example. 4 | 5 | resource "null_resource" "service_principal_layer" { 6 | provisioner "local-exec" { 7 | command = "echo === Created all Service Principals" 8 | } 9 | 10 | depends_on = [ 11 | azuread_service_principal.keyvault_sp, 12 | azuread_service_principal_password.keyvault_sp_password 13 | ] 14 | } 15 | */ 16 | 17 | resource "null_resource" "resource_group_layer" { 18 | provisioner "local-exec" { 19 | command = "echo === Created all resource groups" 20 | } 21 | 22 | depends_on = [ 23 | # null_resource.service_principal_layer, 24 | azurerm_resource_group.network, 25 | azurerm_resource_group.storage 26 | ] 27 | } 28 | 29 | resource "null_resource" "network_layer" { 30 | provisioner "local-exec" { 31 | command = "echo === Created all virtual networks" 32 | } 33 | 34 | depends_on = [ 35 | null_resource.resource_group_layer, 36 | azurerm_virtual_network.network 37 | ] 38 | } 39 | 40 | resource "null_resource" "subnet_layer" { 41 | provisioner "local-exec" { 42 | command = "echo === Created all subnets" 43 | } 44 | 45 | depends_on = [ 46 | null_resource.network_layer, 47 | azurerm_subnet.subnet 48 | ] 49 | } 50 | 51 | resource "null_resource" "monitoring_layer" { 52 | provisioner "local-exec" { 53 | command = "echo === Created monitoring components" 54 | } 55 | 56 | depends_on = [ 57 | null_resource.subnet_layer, 58 | # monitoring is a bit out of scope, but it would go here 59 | ] 60 | } 61 | 62 | resource "null_resource" "storage_layer" { 63 | provisioner "local-exec" { 64 | command = "echo === Created storages" 65 | } 66 | depends_on = [ 67 | null_resource.monitoring_layer, 68 | azurerm_storage_account.storage 69 | ] 70 | } 71 | 72 | /* 73 | Resources outside layers can be created by not targeting anything 74 | once the layers have been created. 75 | 76 | Resources can use depends_on to ensure associated layers have been created for them. 77 | */ 78 | -------------------------------------------------------------------------------- /azure/layers/main.tf: -------------------------------------------------------------------------------- 1 | provider "azuread" { 2 | version = "=0.7.0" 3 | } 4 | 5 | provider "random" { 6 | version = "=2.2.1" 7 | } 8 | 9 | provider "null" { 10 | version = "=2.1.2" 11 | } 12 | 13 | resource "azurerm_resource_group" "network" { 14 | name = "${var.resource_name_prefix}-network-rgroup" 15 | location = var.location 16 | } 17 | 18 | resource "azurerm_virtual_network" "network" { 19 | name = "${var.resource_name_prefix}-network" 20 | location = var.location 21 | resource_group_name = azurerm_resource_group.network.name 22 | address_space = ["10.137.0.0/16"] 23 | } 24 | 25 | resource "azurerm_subnet" "subnet" { 26 | name = "${var.resource_name_prefix}-subnet" 27 | virtual_network_name = azurerm_virtual_network.network.name 28 | resource_group_name = azurerm_resource_group.network.name 29 | address_prefix = "10.137.1.0/24" 30 | service_endpoints = ["Microsoft.KeyVault"] 31 | 32 | lifecycle { 33 | ignore_changes = [ 34 | network_security_group_id, 35 | route_table_id 36 | ] 37 | } 38 | } 39 | 40 | resource "azurerm_resource_group" "storage" { 41 | name = "${var.resource_name_prefix}-storage-rgroup" 42 | location = var.location 43 | } 44 | 45 | resource "azurerm_storage_account" "storage" { 46 | name = "${var.resource_name_prefix}storage" 47 | resource_group_name = azurerm_resource_group.storage.name 48 | location = var.location 49 | account_tier = "Standard" 50 | account_replication_type = "LRS" 51 | enable_https_traffic_only = true 52 | } 53 | 54 | resource "azurerm_storage_container" "storage" { 55 | name = "${var.resource_name_prefix}container" 56 | storage_account_name = azurerm_storage_account.storage.name 57 | container_access_type = "private" 58 | } 59 | 60 | resource "azurerm_storage_blob" "a_file" { 61 | name = "hello.txt" 62 | storage_account_name = azurerm_storage_account.storage.name 63 | storage_container_name = azurerm_storage_container.storage.name 64 | type = "Block" 65 | source_content = "Hello, Blob!" 66 | } 67 | -------------------------------------------------------------------------------- /azure/layers/variables.tf: -------------------------------------------------------------------------------- 1 | variable "resource_name_prefix" { 2 | type = string 3 | default = "trylayers" 4 | } 5 | 6 | variable "location" { 7 | type = string 8 | default = "westeurope" 9 | } 10 | -------------------------------------------------------------------------------- /generic/_header_/README.md: -------------------------------------------------------------------------------- 1 | # Generic Examles -------------------------------------------------------------------------------- /generic/docker_compose_host/main.tf: -------------------------------------------------------------------------------- 1 | resource "null_resource" "provisioners" { 2 | triggers = { 3 | docker_host_ip = "${var.public_ip}" # whenever the docker host on which docker-compose runs changes, re-run the provisioners 4 | reprovision_trigger = "${sha1("${local.reprovision_trigger}")}" # whenever the docker-compose config, environment etc changes, re-run the provisioners 5 | } 6 | 7 | connection { 8 | host = "${var.public_ip}" 9 | user = "${var.ssh_username}" 10 | private_key = "${var.ssh_private_key}" 11 | agent = false 12 | } 13 | 14 | provisioner "remote-exec" { 15 | inline = [< 10 | gcloud auth activate-service-account --key-file $GOOGLE_CREDENTIALS 11 | terraform init 12 | 13 | Note you need to switch on the App Engine API (dependancy of Cloud Scheduler), choose wisely, this is irreversable. The region CANNOT be changed. 14 | 15 | 16 | ### Tips 17 | 18 | Shut down memorystore 19 | 20 | terraform destroy -target module.memorystore.google_redis_instance.cache 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/bigquery/controls.tf: -------------------------------------------------------------------------------- 1 | 2 | locals { 3 | control_fields = ["multiplier"] 4 | control_types = ["FLOAT"] 5 | default_value = ["1.0"] 6 | } 7 | 8 | resource "google_bigquery_table" "control_operations" { 9 | count = length(local.control_fields) 10 | dataset_id = google_bigquery_dataset.ingress.dataset_id 11 | table_id = "control_${element(local.control_fields, count.index)}" 12 | schema = templatefile( 13 | "${path.module}/schemas/control.template.schema.json", { 14 | FIELD = element(local.control_fields, count.index) 15 | TYPE = element(local.control_types, count.index) 16 | }) 17 | time_partitioning { 18 | field = "timestamp" 19 | type = "DAY" 20 | require_partition_filter = false 21 | } 22 | lifecycle { 23 | prevent_destroy = true 24 | } 25 | } 26 | 27 | resource "google_bigquery_table" "control_range_view" { 28 | count = length(local.control_fields) 29 | dataset_id = google_bigquery_dataset.views.dataset_id 30 | table_id = "control_value_range_${element(local.control_fields, count.index)}" 31 | view { 32 | query = templatefile("${path.module}/sql/control_range_view.sql", { 33 | NAME = element(local.control_fields, count.index), 34 | DEFAULT = element(local.default_value, count.index) 35 | OPERATIONS = "${var.config.project}.${google_bigquery_table.control_operations[count.index].dataset_id}.${google_bigquery_table.control_operations[count.index].table_id}" 36 | }) 37 | use_legacy_sql = false 38 | } 39 | } -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/bigquery/ingress.tf: -------------------------------------------------------------------------------- 1 | resource "google_bigquery_dataset" "ingress" { 2 | dataset_id = "ingress" 3 | description = "Raw event data" 4 | location = "EU" 5 | } 6 | 7 | resource "google_bigquery_table" "vendor1_ingress" { 8 | dataset_id = google_bigquery_dataset.ingress.dataset_id 9 | table_id = "vendor1_ingress" 10 | schema = file("${path.module}/schemas/vendor1.schema.json") 11 | time_partitioning { 12 | field = "timestamp" 13 | type = "DAY" 14 | require_partition_filter = true 15 | } 16 | lifecycle { 17 | prevent_destroy = true 18 | } 19 | } 20 | 21 | resource "google_bigquery_table" "prober_ingress" { 22 | dataset_id = google_bigquery_dataset.ingress.dataset_id 23 | table_id = "prober_ingress" 24 | schema = "${file("${path.module}/schemas/prober.schema.json")}" 25 | time_partitioning { 26 | field = "timestamp" 27 | type = "DAY" 28 | require_partition_filter = true 29 | } 30 | lifecycle { 31 | prevent_destroy = true 32 | } 33 | } 34 | 35 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/bigquery/outputs.tf: -------------------------------------------------------------------------------- 1 | output "prober_ingress_table" { 2 | value = google_bigquery_table.prober_ingress 3 | } 4 | 5 | output "control_dataset" { 6 | value = google_bigquery_dataset.ingress 7 | } 8 | output "unified_values_table" { 9 | value = google_bigquery_table.unified_values 10 | } 11 | output "current_totals_latest_table" { 12 | value = google_bigquery_table.current_totals_latest 13 | } 14 | 15 | output "historical_totals_latest_table" { 16 | value = google_bigquery_table.historical_totals_latest 17 | } 18 | 19 | output "current_totals_table" { 20 | value = google_bigquery_table.current_totals 21 | } 22 | 23 | output "historical_totals_table" { 24 | value = google_bigquery_table.historical_totals 25 | } 26 | 27 | output "ingress_dataset" { 28 | value = google_bigquery_dataset.ingress 29 | } 30 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/bigquery/reports.tf: -------------------------------------------------------------------------------- 1 | 2 | resource "google_bigquery_dataset" "reports" { 3 | dataset_id = "reports" 4 | description = "Materialized reports" 5 | location = "EU" 6 | } 7 | 8 | resource "google_bigquery_table" "current_totals" { 9 | dataset_id = google_bigquery_dataset.reports.dataset_id 10 | table_id = "current_totals" 11 | schema = "${file("${path.module}/schemas/report.schema.json")}" 12 | } 13 | 14 | resource "google_bigquery_table" "historical_totals" { 15 | dataset_id = google_bigquery_dataset.reports.dataset_id 16 | table_id = "historical_totals" 17 | schema = "${file("${path.module}/schemas/report.schema.json")}" 18 | time_partitioning { 19 | field = "day" 20 | type = "DAY" 21 | require_partition_filter = true 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/bigquery/schemas/control.template.schema.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "mode": "NULLABLE", 4 | "name": "user", 5 | "type": "STRING" 6 | }, 7 | { 8 | "mode": "NULLABLE", 9 | "name": "timestamp", 10 | "type": "TIMESTAMP" 11 | }, 12 | { 13 | "mode": "NULLABLE", 14 | "name": "${FIELD}", 15 | "type": "${TYPE}" 16 | } 17 | ] 18 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/bigquery/schemas/prober.schema.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "mode": "NULLABLE", 4 | "name": "user", 5 | "type": "STRING" 6 | }, 7 | { 8 | "mode": "NULLABLE", 9 | "name": "timestamp", 10 | "type": "TIMESTAMP" 11 | }, 12 | { 13 | "mode": "NULLABLE", 14 | "name": "value", 15 | "type": "FLOAT" 16 | } 17 | ] 18 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/bigquery/schemas/report.schema.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "mode": "NULLABLE", 4 | "name": "user", 5 | "type": "STRING" 6 | }, 7 | { 8 | "mode": "NULLABLE", 9 | "name": "day", 10 | "type": "DATE" 11 | }, 12 | { 13 | "mode": "NULLABLE", 14 | "name": "adjusted_total", 15 | "type": "FLOAT" 16 | }, 17 | { 18 | "mode": "NULLABLE", 19 | "name": "KEY", 20 | "type": "STRING" 21 | } 22 | ] 23 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/bigquery/schemas/vendor1.schema.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "mode": "NULLABLE", 4 | "name": "timestamp", 5 | "type": "TIMESTAMP" 6 | }, 7 | { 8 | "mode": "NULLABLE", 9 | "name": "payload", 10 | "type": "STRING" 11 | } 12 | ] 13 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/bigquery/sql/control_range_view.sql: -------------------------------------------------------------------------------- 1 | SELECT 2 | user, 3 | ${NAME}, 4 | timestamp AS timestamp_start, 5 | LEAD(timestamp, 1, CURRENT_TIMESTAMP()) OVER (PARTITION BY user ORDER BY timestamp ASC) 6 | AS timestamp_end 7 | FROM 8 | `${OPERATIONS}` 9 | WHERE 10 | timestamp > TIMESTAMP_SUB(CURRENT_TIMESTAMP(), INTERVAL 90 DAY) -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/bigquery/sql/daily_adjusted_totals.sql: -------------------------------------------------------------------------------- 1 | WITH values_controls AS ( 2 | SELECT * 3 | FROM `${values}` 4 | %{ for field in control_fields ~} 5 | JOIN `${control_prefix}${field}` ${field} USING (user) 6 | %{ endfor ~} 7 | WHERE TRUE 8 | %{ for field in control_fields ~} 9 | AND (timestamp >= ${field}.timestamp_start AND timestamp < ${field}.timestamp_end) 10 | %{ endfor ~} 11 | ) 12 | 13 | SELECT 14 | user, 15 | day, 16 | SUM(value * multiplier) as adjusted_total 17 | FROM values_controls 18 | GROUP BY user, day 19 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/bigquery/sql/last_n_days_totals.sql: -------------------------------------------------------------------------------- 1 | WITH 2 | historical_window AS ( 3 | SELECT 4 | user, 5 | day, 6 | adjusted_total 7 | FROM 8 | `${daily_totals}` 9 | WHERE 10 | day >= DATE_SUB(CURRENT_DATE, INTERVAL ${n_days} DAY) AND 11 | day <= CURRENT_DATE 12 | ), 13 | withKeyPrefix AS ( 14 | SELECT 15 | *, CONCAT('${PREFIX}', user) AS KEY 16 | FROM 17 | historical_window 18 | ) 19 | SELECT 20 | * 21 | FROM 22 | withKeyPrefix 23 | 24 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/bigquery/sql/unified_values.sql: -------------------------------------------------------------------------------- 1 | WITH 2 | vendor1 AS ( 3 | SELECT 4 | user as user, 5 | value as value, 6 | timestamp as timestamp, 7 | "vendor1" as source 8 | FROM 9 | `${vendor1}` 10 | ), 11 | prober AS ( 12 | SELECT 13 | user as user, 14 | value as value, 15 | timestamp as timestamp, 16 | "prober" as source 17 | FROM 18 | `${prober}` 19 | ), 20 | combined AS ( 21 | SELECT * FROM vendor1 22 | UNION ALL 23 | SELECT * FROM prober 24 | ) 25 | SELECT *, DATE(timestamp) as DAY FROM combined; 26 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/bigquery/sql/vendor1_cleanup.sql: -------------------------------------------------------------------------------- 1 | WITH 2 | ingress AS ( 3 | SELECT 4 | timestamp, 5 | /* Assumed payload structure: 6 | {"user": "29837", "values": [5, 4, 3, 5, 2, 1]} 7 | */ 8 | payload 9 | FROM 10 | `${ingress}` 11 | ), 12 | extraction AS ( 13 | SELECT *, 14 | JSON_EXTRACT_SCALAR(payload, '$.user') as user, 15 | `${urdfs}`.CUSTOM_JSON_EXTRACT_ARRAY_FLOAT(payload, '$.values[*]') as values 16 | FROM ingress 17 | ) 18 | 19 | SELECT user, timestamp, value 20 | FROM extraction 21 | JOIN UNNEST(values) as value 22 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/bigquery/udf/CUSTOM_JSON_EXTRACT_ARRAY_FLOAT.sql: -------------------------------------------------------------------------------- 1 | /* 2 | Adapted from 3 | https://stackoverflow.com/questions/46742020/what-jsonpath-expressions-are-supported-in-bigquery 4 | */ 5 | CREATE OR REPLACE FUNCTION 6 | ${dataset}.CUSTOM_JSON_EXTRACT_ARRAY_FLOAT(json STRING, path STRING) 7 | RETURNS ARRAY 8 | LANGUAGE js AS """ 9 | try { 10 | return jsonPath(JSON.parse(json), path); 11 | } catch (e) { return null } 12 | """ 13 | OPTIONS ( 14 | library="${library}" 15 | ); -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/bigquery/udf/jsonpath-0.8.0.js: -------------------------------------------------------------------------------- 1 | /* JSONPath 0.8.0 - XPath for JSON 2 | * 3 | * Copyright (c) 2007 Stefan Goessner (goessner.net) 4 | * Licensed under the MIT (MIT-LICENSE.txt) licence. 5 | */ 6 | function jsonPath(obj, expr, arg) { 7 | var P = { 8 | resultType: arg && arg.resultType || "VALUE", 9 | result: [], 10 | normalize: function (expr) { 11 | var subx = []; 12 | return expr.replace(/[\['](\??\(.*?\))[\]']/g, function ($0, $1) { return "[#" + (subx.push($1) - 1) + "]"; }) 13 | .replace(/'?\.'?|\['?/g, ";") 14 | .replace(/;;;|;;/g, ";..;") 15 | .replace(/;$|'?\]|'$/g, "") 16 | .replace(/#([0-9]+)/g, function ($0, $1) { return subx[$1]; }); 17 | }, 18 | asPath: function (path) { 19 | var x = path.split(";"), p = "$"; 20 | for (var i = 1, n = x.length; i < n; i++) 21 | p += /^[0-9*]+$/.test(x[i]) ? ("[" + x[i] + "]") : ("['" + x[i] + "']"); 22 | return p; 23 | }, 24 | store: function (p, v) { 25 | if (p) P.result[P.result.length] = P.resultType == "PATH" ? P.asPath(p) : v; 26 | return !!p; 27 | }, 28 | trace: function (expr, val, path) { 29 | if (expr) { 30 | var x = expr.split(";"), loc = x.shift(); 31 | x = x.join(";"); 32 | if (val && val.hasOwnProperty(loc)) 33 | P.trace(x, val[loc], path + ";" + loc); 34 | else if (loc === "*") 35 | P.walk(loc, x, val, path, function (m, l, x, v, p) { P.trace(m + ";" + x, v, p); }); 36 | else if (loc === "..") { 37 | P.trace(x, val, path); 38 | P.walk(loc, x, val, path, function (m, l, x, v, p) { typeof v[m] === "object" && P.trace("..;" + x, v[m], p + ";" + m); }); 39 | } 40 | else if (/,/.test(loc)) { // [name1,name2,...] 41 | for (var s = loc.split(/'?,'?/), i = 0, n = s.length; i < n; i++) 42 | P.trace(s[i] + ";" + x, val, path); 43 | } 44 | else if (/^\(.*?\)$/.test(loc)) // [(expr)] 45 | P.trace(P.eval(loc, val, path.substr(path.lastIndexOf(";") + 1)) + ";" + x, val, path); 46 | else if (/^\?\(.*?\)$/.test(loc)) // [?(expr)] 47 | P.walk(loc, x, val, path, function (m, l, x, v, p) { if (P.eval(l.replace(/^\?\((.*?)\)$/, "$1"), v[m], m)) P.trace(m + ";" + x, v, p); }); 48 | else if (/^(-?[0-9]*):(-?[0-9]*):?([0-9]*)$/.test(loc)) // [start:end:step] phyton slice syntax 49 | P.slice(loc, x, val, path); 50 | } 51 | else 52 | P.store(path, val); 53 | }, 54 | walk: function (loc, expr, val, path, f) { 55 | if (val instanceof Array) { 56 | for (var i = 0, n = val.length; i < n; i++) 57 | if (i in val) 58 | f(i, loc, expr, val, path); 59 | } 60 | else if (typeof val === "object") { 61 | for (var m in val) 62 | if (val.hasOwnProperty(m)) 63 | f(m, loc, expr, val, path); 64 | } 65 | }, 66 | slice: function (loc, expr, val, path) { 67 | if (val instanceof Array) { 68 | var len = val.length, start = 0, end = len, step = 1; 69 | loc.replace(/^(-?[0-9]*):(-?[0-9]*):?(-?[0-9]*)$/g, function ($0, $1, $2, $3) { start = parseInt($1 || start); end = parseInt($2 || end); step = parseInt($3 || step); }); 70 | start = (start < 0) ? Math.max(0, start + len) : Math.min(len, start); 71 | end = (end < 0) ? Math.max(0, end + len) : Math.min(len, end); 72 | for (var i = start; i < end; i += step) 73 | P.trace(i + ";" + expr, val, path); 74 | } 75 | }, 76 | eval: function (x, _v, _vname) { 77 | try { return $ && _v && eval(x.replace(/@/g, "_v")); } 78 | catch (e) { throw new SyntaxError("jsonPath: " + e.message + ": " + x.replace(/@/g, "_v").replace(/\^/g, "_a")); } 79 | } 80 | }; 81 | 82 | var $ = obj; 83 | if (expr && obj && (P.resultType == "VALUE" || P.resultType == "PATH")) { 84 | P.trace(P.normalize(expr).replace(/^\$;/, ""), obj, "$"); 85 | return P.result.length ? P.result : false; 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/bigquery/urdf.tf: -------------------------------------------------------------------------------- 1 | resource "google_bigquery_dataset" "urdfs" { 2 | dataset_id = "urdfs" 3 | description = "Data processing" 4 | location = "EU" 5 | } 6 | resource "google_storage_bucket_object" "jsonpath" { 7 | name = "udf/jsonpath-0.8.0.js" 8 | source = "${path.module}/udf/jsonpath-0.8.0.js" 9 | bucket = "${var.config.code_bucket.name}" 10 | } 11 | 12 | resource "null_resource" "CUSTOM_JSON_EXTRACT_ARRAY_FLOAT" { 13 | triggers = { 14 | version = "0.0.4" // Bump to force apply to this resource 15 | } 16 | provisioner "local-exec" { 17 | command = "bq query --project=${var.config.project} --use_legacy_sql=false '${templatefile("${path.module}/udf/CUSTOM_JSON_EXTRACT_ARRAY_FLOAT.sql", { 18 | dataset = google_bigquery_dataset.urdfs.dataset_id 19 | library = "gs://${var.config.code_bucket.name}/${google_storage_bucket_object.jsonpath.output_name}" 20 | })}'" 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/bigquery/variables.tf: -------------------------------------------------------------------------------- 1 | variable "config" { 2 | type = any 3 | } 4 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/bigquery/views.tf: -------------------------------------------------------------------------------- 1 | resource "google_bigquery_dataset" "views" { 2 | dataset_id = "views" 3 | description = "Data processing" 4 | location = "EU" 5 | } 6 | 7 | resource "google_bigquery_table" "vendor1" { 8 | dataset_id = google_bigquery_dataset.views.dataset_id 9 | table_id = "vendor1" 10 | view { 11 | query = templatefile("${path.module}/sql/vendor1_cleanup.sql", { 12 | urdfs = "${var.config.project}.${google_bigquery_dataset.urdfs.dataset_id}" 13 | ingress = "${var.config.project}.${google_bigquery_table.vendor1_ingress.dataset_id}.${google_bigquery_table.vendor1_ingress.table_id}" 14 | }) 15 | use_legacy_sql = false 16 | } 17 | depends_on = [null_resource.CUSTOM_JSON_EXTRACT_ARRAY_FLOAT] 18 | } 19 | 20 | resource "google_bigquery_table" "unified_values" { 21 | dataset_id = google_bigquery_dataset.views.dataset_id 22 | table_id = "unified_values" 23 | view { 24 | query = templatefile("${path.module}/sql/unified_values.sql", { 25 | prober = "${var.config.project}.${google_bigquery_table.prober_ingress.dataset_id}.${google_bigquery_table.prober_ingress.table_id}", 26 | vendor1 = "${var.config.project}.${google_bigquery_table.vendor1.dataset_id}.${google_bigquery_table.vendor1.table_id}" 27 | }) 28 | use_legacy_sql = false 29 | } 30 | } 31 | 32 | resource "google_bigquery_table" "daily_adjusted_totals" { 33 | dataset_id = google_bigquery_dataset.views.dataset_id 34 | table_id = "daily_adjusted_totals" 35 | view { 36 | query = templatefile("${path.module}/sql/daily_adjusted_totals.sql", { 37 | values = "${var.config.project}.${google_bigquery_table.unified_values.dataset_id}.${google_bigquery_table.unified_values.table_id}", 38 | control_prefix = "${var.config.project}.${google_bigquery_table.control_range_view[0].dataset_id}.control_value_range_", 39 | control_fields = ["multiplier"] 40 | }) 41 | use_legacy_sql = false 42 | } 43 | } 44 | 45 | resource "google_bigquery_table" "current_totals_latest" { 46 | dataset_id = google_bigquery_dataset.views.dataset_id 47 | table_id = "current_totals" 48 | view { 49 | query = templatefile("${path.module}/sql/last_n_days_totals.sql", { 50 | n_days = 1 51 | PREFIX = "current_totals/" 52 | daily_totals = "${var.config.project}.${google_bigquery_table.daily_adjusted_totals.dataset_id}.${google_bigquery_table.daily_adjusted_totals.table_id}" 53 | }) 54 | use_legacy_sql = false 55 | } 56 | } 57 | 58 | resource "google_bigquery_table" "historical_totals_latest" { 59 | dataset_id = google_bigquery_dataset.views.dataset_id 60 | table_id = "historical_totals" 61 | view { 62 | query = templatefile("${path.module}/sql/last_n_days_totals.sql", { 63 | n_days = "${var.config.retention_days}" 64 | PREFIX = "historic_totals/" 65 | daily_totals = "${var.config.project}.${google_bigquery_table.daily_adjusted_totals.dataset_id}.${google_bigquery_table.daily_adjusted_totals.table_id}" 66 | }) 67 | use_legacy_sql = false 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/functions/function_memorystoreloader.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | memorystoreloader_function_name = "memorystoreload" 3 | } 4 | 5 | resource "google_cloudfunctions_function" "memorystoreloader" { 6 | name = "memorystoreloader" 7 | runtime = "nodejs10" 8 | /* Testing has minimal resource requirements */ 9 | max_instances = 2 10 | available_memory_mb = 2048 // Cache loading speed is improved with better instance type, linearly 11 | timeout = 60 12 | entry_point = "memorystoreload" 13 | region = var.config.region 14 | 15 | source_archive_bucket = var.config.code_bucket.name 16 | source_archive_object = google_storage_bucket_object.memorystoreload_code.name 17 | 18 | // Function triggered by mutations in the upload bucket 19 | event_trigger { 20 | event_type = "providers/cloud.storage/eventTypes/object.change" 21 | resource = google_storage_bucket.memorystore_uploads.name 22 | failure_policy { 23 | retry = false 24 | } 25 | } 26 | 27 | provider = "google-beta" 28 | vpc_connector = google_vpc_access_connector.serverless_vpc_connector.name 29 | 30 | environment_variables = { 31 | REDIS_HOST = var.memorystore_host 32 | REDIS_PORT = 6379 33 | EXPIRY = 60 * 60 * 24 * 30 // 30d expiry for keys 34 | } 35 | } 36 | 37 | data "archive_file" "memorystoreload_zip" { 38 | type = "zip" 39 | source_dir = "${path.module}/src/memorystoreload" 40 | output_path = ".tmp/${local.memorystoreloader_function_name}.zip" 41 | } 42 | 43 | resource "google_storage_bucket_object" "memorystoreload_code" { 44 | /* Name needs to be mangled to enable functions to be updated */ 45 | name = "${local.memorystoreloader_function_name}.${data.archive_file.memorystoreload_zip.output_md5}.zip" 46 | bucket = var.config.code_bucket.name 47 | source = data.archive_file.memorystoreload_zip.output_path 48 | } 49 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/functions/function_prober.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | probe_function_name = "probe" 3 | } 4 | 5 | resource "google_cloudfunctions_function" "prober" { 6 | name = "prober" 7 | runtime = "nodejs10" 8 | /* Probing has minimal resource requirements */ 9 | max_instances = 1 10 | available_memory_mb = 128 11 | timeout = 30 12 | entry_point = "probe" 13 | region = var.config.region 14 | 15 | source_archive_bucket = var.config.code_bucket.name 16 | source_archive_object = google_storage_bucket_object.probe_code.name 17 | 18 | event_trigger { 19 | event_type = "providers/cloud.pubsub/eventTypes/topic.publish" 20 | resource = google_pubsub_topic.version_every_minute.name 21 | failure_policy { 22 | retry = false 23 | } 24 | } 25 | 26 | environment_variables = { 27 | PROBE_DATASET = var.prober_ingress_table.dataset_id 28 | PROBE_TABLE = var.prober_ingress_table.table_id 29 | CONTROLS_DATASET = var.control_dataset.dataset_id 30 | } 31 | } 32 | 33 | data "archive_file" "probe_zip" { 34 | type = "zip" 35 | source_dir = "${path.module}/src/probe" 36 | output_path = ".tmp/${local.probe_function_name}.zip" 37 | } 38 | 39 | resource "google_storage_bucket_object" "probe_code" { 40 | /* Name needs to be mangled to enable functions to be updated */ 41 | name = "${local.probe_function_name}.${data.archive_file.probe_zip.output_md5}.zip" 42 | bucket = var.config.code_bucket.name 43 | source = data.archive_file.probe_zip.output_path 44 | } 45 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/functions/function_test.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | test_function_name = "test" 3 | } 4 | 5 | resource "google_cloudfunctions_function" "test" { 6 | name = "test" 7 | runtime = "nodejs10" 8 | /* Testing has minimal resource requirements */ 9 | max_instances = 1 10 | available_memory_mb = 128 11 | timeout = 30 12 | entry_point = "test" 13 | region = var.config.region 14 | 15 | source_archive_bucket = var.config.code_bucket.name 16 | source_archive_object = google_storage_bucket_object.test_code.name 17 | 18 | trigger_http = true 19 | 20 | provider = "google-beta" 21 | vpc_connector = google_vpc_access_connector.serverless_vpc_connector.name 22 | 23 | environment_variables = { 24 | CONFIG_BUCKET = var.config.code_bucket.name 25 | PROBER_DATASET = var.prober_ingress_table.dataset_id 26 | PROBER_TABLE = var.prober_ingress_table.table_id 27 | UNIFIED_VALUES_DATASET = var.unified_values_table.dataset_id 28 | UNIFIED_VALUES_TABLE = var.unified_values_table.table_id 29 | /* 30 | UNIFIED_METABOLICS_DATASET = var.unified_metabolics_table.dataset_id 31 | UNIFIED_METABOLICS_TABLE = var.unified_metabolics_table.table_id 32 | */ 33 | CURRENT_TOTALS_DATASET = var.current_totals_table.dataset_id 34 | CURRENT_TOTALS_TABLE = var.current_totals_table.table_id 35 | /* 36 | DAILY_METABOLICS_PRECOMPUTE_DATASET = var.daily_metabolics_precompute_table.dataset_id 37 | DAILY_METABOLICS_PRECOMPUTE_TABLE = var.daily_metabolics_precompute_table.table_id 38 | */ 39 | MEMORYSTORE_UPLOADS_BUCKET = google_storage_bucket.memorystore_uploads.name 40 | REDIS_HOST = var.memorystore_host 41 | REDIS_PORT = 6379 42 | } 43 | } 44 | 45 | data "archive_file" "test_zip" { 46 | type = "zip" 47 | source_dir = "${path.module}/src/test" 48 | output_path = ".tmp/${local.test_function_name}.zip" 49 | } 50 | 51 | resource "google_storage_bucket_object" "test_code" { 52 | /* Name needs to be mangled to enable functions to be updated */ 53 | name = "${local.test_function_name}.${data.archive_file.test_zip.output_md5}.zip" 54 | bucket = var.config.code_bucket.name 55 | source = data.archive_file.test_zip.output_path 56 | } 57 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/functions/function_update_current.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | materializer_function_name = "materialize" 3 | } 4 | 5 | resource "google_cloudfunctions_function" "update_current" { 6 | name = "update_current" 7 | runtime = "nodejs10" 8 | /* Running BQ client has minimal resource requirements */ 9 | max_instances = 1 10 | available_memory_mb = 128 11 | timeout = 30 12 | entry_point = "materialize" 13 | region = var.config.region 14 | 15 | source_archive_bucket = var.config.code_bucket.name 16 | source_archive_object = google_storage_bucket_object.materialize_code.name 17 | 18 | // Function triggered by mutations in the upload bucket 19 | event_trigger { 20 | event_type = "providers/cloud.pubsub/eventTypes/topic.publish" 21 | resource = google_pubsub_topic.version_every_two_minutes.name 22 | failure_policy { 23 | retry = false 24 | } 25 | } 26 | 27 | environment_variables = { 28 | PROJECT = var.config.project 29 | DATASET = var.current_totals_table.dataset_id 30 | TABLE = var.current_totals_table.table_id 31 | SOURCE_DATASET = var.current_totals_latest_table.dataset_id 32 | SOURCE_TABLE = var.current_totals_latest_table.table_id 33 | BUCKET = google_storage_bucket.memorystore_uploads.name 34 | FILE = "current_totals.json" 35 | } 36 | } 37 | 38 | data "archive_file" "materialize_zip" { 39 | type = "zip" 40 | source_dir = "${path.module}/src/materialize" 41 | output_path = ".tmp/${local.materializer_function_name}.zip" 42 | } 43 | 44 | resource "google_storage_bucket_object" "materialize_code" { 45 | /* Name needs to be mangled to enable functions to be updated */ 46 | name = "${local.materializer_function_name}.${data.archive_file.materialize_zip.output_md5}.zip" 47 | bucket = var.config.code_bucket.name 48 | source = data.archive_file.materialize_zip.output_path 49 | } 50 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/functions/function_update_historical.tf: -------------------------------------------------------------------------------- 1 | resource "google_cloudfunctions_function" "update_historical" { 2 | name = "update_historical" 3 | runtime = "nodejs10" 4 | /* Running BQ client has minimal resource requirements */ 5 | max_instances = 1 6 | available_memory_mb = 128 7 | timeout = 30 8 | entry_point = "materialize" 9 | region = var.config.region 10 | 11 | source_archive_bucket = var.config.code_bucket.name 12 | // Note we reuse source code setup in function_update_current.tf 13 | source_archive_object = google_storage_bucket_object.materialize_code.name 14 | 15 | // Function triggered by mutations in the upload bucket 16 | event_trigger { 17 | event_type = "providers/cloud.pubsub/eventTypes/topic.publish" 18 | resource = google_pubsub_topic.version_every_hour.name 19 | failure_policy { 20 | retry = false 21 | } 22 | } 23 | 24 | environment_variables = { 25 | PROJECT = var.config.project 26 | DATASET = var.historical_totals_table.dataset_id 27 | TABLE = var.historical_totals_table.table_id 28 | SOURCE_DATASET = var.historical_totals_latest_table.dataset_id 29 | SOURCE_TABLE = var.historical_totals_latest_table.table_id 30 | N_DAYS = var.config.retention_days 31 | BUCKET = google_storage_bucket.memorystore_uploads.name 32 | FILE = "historical_totals.json" 33 | } 34 | } 35 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/functions/gcs.tf: -------------------------------------------------------------------------------- 1 | resource "google_storage_bucket" "memorystore_uploads" { 2 | name = "${var.config.project}_memorystore_uploads" 3 | location = "${var.config.region}" 4 | } 5 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/functions/pubsub.tf: -------------------------------------------------------------------------------- 1 | resource "google_pubsub_topic" "version_every_minute" { 2 | name = "version_every_minute" 3 | } 4 | 5 | resource "google_pubsub_topic" "version_every_two_minutes" { 6 | name = "version_every_two_minutes" 7 | } 8 | 9 | resource "google_pubsub_topic" "version_every_hour" { 10 | name = "version_every_hour" 11 | } 12 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/functions/scheduler.tf: -------------------------------------------------------------------------------- 1 | resource "google_cloud_scheduler_job" "version_every_minute" { 2 | name = "version_every_minute" 3 | description = "Pings topic with version once a min" 4 | schedule = "* * * * *" 5 | project = var.config.project 6 | region = var.config.region 7 | 8 | pubsub_target { 9 | topic_name = "${google_pubsub_topic.version_every_minute.id}" 10 | data = "${base64encode(jsonencode({ 11 | version = "${var.config.version}" 12 | }))}" 13 | } 14 | } 15 | 16 | resource "google_cloud_scheduler_job" "version_every_two_minutes" { 17 | name = "version_every_two_minutes" 18 | description = "Pings topic with version once every 2 mins" 19 | schedule = "*/2 * * * *" 20 | project = "${var.config.project}" 21 | region = "${var.config.region}" 22 | 23 | pubsub_target { 24 | topic_name = "${google_pubsub_topic.version_every_two_minutes.id}" 25 | data = "${base64encode(jsonencode({ 26 | version = "${var.config.version}" 27 | }))}" 28 | } 29 | } 30 | 31 | resource "google_cloud_scheduler_job" "version_every_hour" { 32 | name = "version_every_hour" 33 | description = "Pings topic with version once every hour" 34 | schedule = "0 * * * *" 35 | region = "${var.config.region}" 36 | project = "${var.config.project}" 37 | 38 | pubsub_target { 39 | topic_name = "${google_pubsub_topic.version_every_hour.id}" 40 | data = "${base64encode(jsonencode({ 41 | version = "${var.config.version}" 42 | }))}" 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/functions/src/materialize/index.js: -------------------------------------------------------------------------------- 1 | const { BigQuery } = require('@google-cloud/bigquery'); 2 | const { Storage } = require('@google-cloud/storage'); 3 | const bigquery = new BigQuery(); 4 | const gcs = new Storage(); 5 | 6 | exports.materialize = async (event, ctx) => { 7 | if (process.env.SOURCE_DATASET && process.env.SOURCE_TABLE && process.env.N_DAYS) { 8 | const query = ` 9 | MERGE 10 | \`${process.env.PROJECT}.${process.env.DATASET}.${process.env.TABLE}\` 11 | USING (SELECT * FROM 12 | \`${process.env.PROJECT}.${process.env.SOURCE_DATASET}.${process.env.SOURCE_TABLE}\`) 13 | ON FALSE 14 | WHEN NOT MATCHED BY SOURCE AND day > DATE_SUB(CURRENT_DATE, INTERVAL ${process.env.N_DAYS} DAY) THEN DELETE 15 | WHEN NOT MATCHED BY TARGET AND dat > DATE_SUB(CURRENT_DATE, INTERVAL ${process.env.N_DAYS} DAY) THEN INSERT ROW`; 16 | console.log(`Executing: ${query}`) 17 | const [job] = await bigquery.createQueryJob({ 18 | query: query, 19 | location: 'EU', 20 | }); 21 | await job.getQueryResults(); 22 | } else if (process.env.SOURCE_DATASET && process.env.SOURCE_TABLE) { 23 | const query = ` 24 | CREATE OR REPLACE TABLE 25 | \`${process.env.PROJECT}.${process.env.DATASET}.${process.env.TABLE}\` 26 | AS SELECT * FROM 27 | \`${process.env.PROJECT}.${process.env.SOURCE_DATASET}.${process.env.SOURCE_TABLE}\``; 28 | console.log(`Executing: ${query}`) 29 | const [job] = await bigquery.createQueryJob({ 30 | query: query, 31 | location: 'EU', 32 | }); 33 | await job.getQueryResults(); 34 | } 35 | 36 | if (process.env.BUCKET && process.env.FILE) { 37 | console.log(`Exporting to ${process.env.BUCKET} ${process.env.FILE}`); 38 | const destfile = gcs 39 | .bucket(process.env.BUCKET) 40 | .file(process.env.FILE); 41 | 42 | // Now export this to a gcs location 43 | const dataset = bigquery.dataset(process.env.DATASET); 44 | const table = dataset.table(process.env.TABLE); 45 | await table.createExtractJob(destfile, { 46 | format: "JSON" 47 | }); 48 | } 49 | }; 50 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/functions/src/materialize/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "materialize", 3 | "version": "0.0.1", 4 | "dependencies": { 5 | "@google-cloud/bigquery": "^3.0.0", 6 | "@google-cloud/storage": "^3.0.2" 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/functions/src/memorystoreload/index.js: -------------------------------------------------------------------------------- 1 | const Redis = require('redis'); 2 | const split = require('split'); 3 | const { Storage } = require('@google-cloud/storage'); 4 | 5 | const redis = Redis.createClient( 6 | process.env.REDIS_PORT, 7 | process.env.REDIS_HOST); 8 | const gcs = new Storage(); 9 | 10 | redis.on("error", function (err) { 11 | console.error("Redis error " + err); 12 | }); 13 | 14 | /* 15 | * Read a bucket and path, assume the file is line deliminated JSON, extract field "key" 16 | * and push into redis with the whole JSON record as the value. 17 | * Triggered by writes to a bucket 18 | */ 19 | exports.memorystoreload = (info, context) => new Promise((resolve, reject) => { 20 | const bucket = gcs.bucket(info.bucket); 21 | const file = bucket.file(info.name); 22 | let keysWritten = 0; 23 | file.createReadStream() 24 | .on('error', reject) 25 | .pipe(split()) // convert to lines 26 | .on('data', function (line) { 27 | if (!line || line === "") { 28 | return; 29 | } 30 | keysWritten++; 31 | const data = JSON.parse(line); 32 | redis.set(data.KEY, line, 'EX', process.env.EXPIRY, redis.print); 33 | }) 34 | .on('end', () => { 35 | console.log(`Keys written: ${keysWritten}`); 36 | redis.wait(1, resolve); 37 | }) 38 | .on('error', reject); 39 | }); 40 | 41 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/functions/src/memorystoreload/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "memorystoreloader", 3 | "version": "0.0.1", 4 | "dependencies": { 5 | "redis": "^2.8.0", 6 | "split": "^1.0.1", 7 | "@google-cloud/storage": "^3.0.2" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/functions/src/probe/index.js: -------------------------------------------------------------------------------- 1 | const { BigQuery } = require('@google-cloud/bigquery'); 2 | const bigquery = new BigQuery(); 3 | 4 | 5 | let lastVersionUpdated = null; 6 | 7 | const controls = [{ 8 | field: "multiplier", 9 | value: 1.0 10 | }]; 11 | 12 | exports.probe = async (event, ctx) => { 13 | const payload = event.data ? JSON.parse(Buffer.from(event.data, 'base64')) 14 | : event.query; // If running as http 15 | 16 | // Unique user for each deployment version 17 | const user = `probe_${payload.version}`; 18 | const timestamp = new Date().toISOString(); 19 | const work = [] 20 | 21 | // Regular prober work is to insert activity reguarly 22 | work.push( 23 | bigquery 24 | .dataset(process.env.PROBE_DATASET) 25 | .table(process.env.PROBE_TABLE) 26 | .insert([{ 27 | user: user, 28 | timestamp: timestamp, 29 | value: 1.0 30 | }]) 31 | ); 32 | 33 | // But also with new deploys we need to update user control values 34 | if (payload.version != lastVersionUpdated) { 35 | console.log(`New version detected ${payload.version} writing control`); 36 | work.concat(controls.map( 37 | control => bigquery 38 | .dataset(process.env.CONTROLS_DATASET) 39 | .table(`control_${control.field}`) 40 | .insert([{ 41 | user: user, 42 | timestamp: timestamp, 43 | [control.field]: control.value 44 | }]).catch(err => { 45 | console.error(JSON.stringify(err)); 46 | }) 47 | )); 48 | } 49 | 50 | await Promise.all(work) 51 | lastVersionUpdated = payload.version; 52 | if (event.query) ctx.sendStatus(200); // If running as HTTP funciton 53 | }; 54 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/functions/src/probe/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "probe", 3 | "version": "0.0.1", 4 | "dependencies": { 5 | "@google-cloud/bigquery": "^3.0.0" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/functions/src/test/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "test", 3 | "version": "0.0.1", 4 | "dependencies": { 5 | "@google-cloud/bigquery": "^3.0.0", 6 | "@google-cloud/storage": "^3.0.2", 7 | "moment-timezone": "^0.5.26", 8 | "redis": "^2.8.0" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/functions/variables.tf: -------------------------------------------------------------------------------- 1 | variable "config" { 2 | type = any 3 | } 4 | 5 | variable "prober_ingress_table" { 6 | type = any 7 | } 8 | 9 | variable "control_dataset" { 10 | type = any 11 | } 12 | 13 | variable "unified_values_table" { 14 | type = any 15 | } 16 | 17 | variable "current_totals_latest_table" { 18 | type = any 19 | } 20 | 21 | variable "historical_totals_latest_table" { 22 | type = any 23 | } 24 | 25 | variable "current_totals_table" { 26 | type = any 27 | } 28 | 29 | variable "historical_totals_table" { 30 | type = any 31 | } 32 | 33 | variable "memorystore_host" { 34 | type = any 35 | } 36 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/functions/vpc.tf: -------------------------------------------------------------------------------- 1 | resource "google_vpc_access_connector" "serverless_vpc_connector" { 2 | name = "${var.config.network}-connector" 3 | provider = "google-beta" 4 | region = var.config.region 5 | ip_cidr_range = var.config.ip_cidr_range 6 | network = var.config.network 7 | } 8 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "gcs" { 3 | prefix = "terraform/state" 4 | bucket = "terraform-larkworthy-tester" // Must be pre-provisioned 5 | } 6 | } 7 | 8 | provider "google" { 9 | project = "larkworthy-tester" 10 | region = "europe-west1" 11 | } 12 | 13 | provider "google-beta" { 14 | project = "larkworthy-tester" 15 | region = "europe-west1" 16 | } 17 | 18 | provider "archive" { 19 | version = "~> 1.2.0" 20 | } 21 | 22 | locals { 23 | project = "larkworthy-tester" 24 | config = { 25 | project = local.project 26 | region = "europe-west1" 27 | version = module.version.result 28 | retention_days = 30 29 | network = "default" 30 | ip_cidr_range = "10.9.0.0/28" 31 | memorystore_tier = "BASIC" 32 | code_bucket = google_storage_bucket.code 33 | } 34 | } 35 | 36 | // We generate a version of the backend by hashing the contents of this directory 37 | module "version" { 38 | source = "github.com/claranet/terraform-path-hash?ref=v0.1.0" 39 | path = "." 40 | } 41 | 42 | resource "google_storage_bucket" "code" { 43 | name = "${local.project}_code" 44 | location = "EU" 45 | } 46 | 47 | resource "google_storage_bucket_object" "config_file" { 48 | name = "config.json" 49 | content = jsonencode(local.config) 50 | bucket = google_storage_bucket.code.name 51 | } 52 | 53 | module "bigquery" { 54 | source = "./bigquery" 55 | config = local.config 56 | } 57 | 58 | module "memorystore" { 59 | source = "./memorystore" 60 | config = "${local.config}" 61 | } 62 | 63 | module "functions" { 64 | source = "./functions" 65 | memorystore_host = module.memorystore.memorystore_host 66 | 67 | prober_ingress_table = module.bigquery.prober_ingress_table 68 | control_dataset = module.bigquery.control_dataset 69 | unified_values_table = module.bigquery.unified_values_table 70 | current_totals_latest_table = module.bigquery.current_totals_latest_table 71 | historical_totals_latest_table = module.bigquery.historical_totals_latest_table 72 | current_totals_table = module.bigquery.current_totals_table 73 | historical_totals_table = module.bigquery.historical_totals_table 74 | 75 | config = local.config 76 | } 77 | 78 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/memorystore/memorystore.tf: -------------------------------------------------------------------------------- 1 | resource "google_redis_instance" "cache" { 2 | name = "redis" 3 | memory_size_gb = 1 4 | project = var.config.project 5 | location_id = "${var.config.region}-c" 6 | tier = "BASIC" 7 | authorized_network = var.config.network 8 | } 9 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/memorystore/outputs.tf: -------------------------------------------------------------------------------- 1 | output "memorystore_host" { 2 | value = google_redis_instance.cache.host 3 | } 4 | -------------------------------------------------------------------------------- /google_cloud/CQRS_bigquery_memorystore/memorystore/variables.tf: -------------------------------------------------------------------------------- 1 | variable "config" { 2 | type = any 3 | } 4 | -------------------------------------------------------------------------------- /google_cloud/_header_/README.md: -------------------------------------------------------------------------------- 1 | # Google Cloud Platform Examples -------------------------------------------------------------------------------- /google_cloud/camunda-secure/.dockerignore: -------------------------------------------------------------------------------- 1 | .terraform/ 2 | 3 | -------------------------------------------------------------------------------- /google_cloud/camunda-secure/.gcloudignore: -------------------------------------------------------------------------------- 1 | .terraform/ 2 | -------------------------------------------------------------------------------- /google_cloud/camunda-secure/.gitignore: -------------------------------------------------------------------------------- 1 | .build 2 | -------------------------------------------------------------------------------- /google_cloud/camunda-secure/Dockerfile.template: -------------------------------------------------------------------------------- 1 | FROM eu.gcr.io/${project}/${image}:${tag} 2 | # https://forum.camunda.org/t/apt-get-is-missing-on-camunda-bpm-platform-7-9-0-image/7789 3 | USER root 4 | RUN apk add --no-cache wget 5 | USER camunda 6 | RUN rm /camunda/lib/postgresql-*.jar 7 | RUN wget --directory-prefix=/camunda/lib https://github.com/GoogleCloudPlatform/cloud-sql-jdbc-socket-factory/releases/download/v1.0.15/postgres-socket-factory-1.0.15-jar-with-driver-and-dependencies.jar 8 | 9 | # json-path and deps 10 | RUN wget --directory-prefix=/camunda/lib https://repo1.maven.org/maven2/com/jayway/jsonpath/json-path/2.4.0/json-path-2.4.0.jar 11 | RUN wget --directory-prefix=/camunda/lib https://repo1.maven.org/maven2/net/minidev/json-smart/2.3/json-smart-2.3.jar 12 | RUN wget --directory-prefix=/camunda/lib https://repo1.maven.org/maven2/net/minidev/accessors-smart/1.2/accessors-smart-1.2.jar 13 | 14 | COPY bpm-platform.xml /camunda/conf/ 15 | # COPY server.xml /camunda/conf/ 16 | -------------------------------------------------------------------------------- /google_cloud/camunda-secure/build.tf: -------------------------------------------------------------------------------- 1 | # Copy Camunda base image from Dockerhub image into Google Container Registry 2 | module "docker-mirror-camunda-bpm-platform" { 3 | source = "github.com/neomantra/terraform-docker-mirror" 4 | image_name = local.config.base_image_name 5 | image_tag = local.config.base_image_tag 6 | dest_prefix = "eu.gcr.io/${local.project}" 7 | } 8 | 9 | # Hydrate docker template file into .build directory 10 | resource "local_file" "dockerfile" { 11 | content = templatefile("${path.module}/Dockerfile.template", { 12 | project = local.project 13 | image = local.config.base_image_name 14 | tag = local.config.base_image_tag 15 | }) 16 | filename = "${path.module}/.build/Dockerfile" 17 | } 18 | 19 | # Hydrate bpm-platform config into .build directory 20 | resource "local_file" "bpm-platform" { 21 | content = templatefile("${path.module}/config/bpm-platform.xml.template", { 22 | maxJobsPerAcquisition = null 23 | lockTimeInMillis = null 24 | waitTimeInMillis = 1 25 | maxWait = null 26 | history = "none" 27 | databaseSchemaUpdate = null # default 28 | authorizationEnabled = null # default 29 | jobExecutorDeploymentAware = "false" 30 | historyCleanupBatchWindowStartTime = null # default 31 | }) 32 | filename = "${path.module}/.build/bpm-platform.xml" 33 | } 34 | 35 | # Build a customized image of Camunda to include the cloud sql postgres socket factory library 36 | # Required to connect to Cloud SQL 37 | # Built using Cloud Build, image stored in GCR 38 | resource "null_resource" "camunda_cloudsql_image" { 39 | depends_on = [module.docker-mirror-camunda-bpm-platform] 40 | triggers = { 41 | # Rebuild if we change the base image, dockerfile, or bpm-platform config 42 | image = "eu.gcr.io/${local.project}/camunda_secure:${local.config.base_image_tag}_${ 43 | sha1( 44 | "${sha1(local_file.dockerfile.content)}${sha1(local_file.bpm-platform.content)}" 45 | ) 46 | }" 47 | } 48 | provisioner "local-exec" { 49 | command = <<-EOT 50 | gcloud builds submit \ 51 | --project ${local.project} \ 52 | --tag ${self.triggers.image} \ 53 | ${path.module}/.build 54 | EOT 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /google_cloud/camunda-secure/camunda.tf: -------------------------------------------------------------------------------- 1 | # Create service account to run service 2 | resource "google_service_account" "camunda" { 3 | account_id = "camunda-secure-worker" 4 | display_name = "Camunda Secure Worker" 5 | } 6 | 7 | # Give the service account access to Cloud SQL 8 | resource "google_project_iam_member" "project" { 9 | role = "roles/cloudsql.client" 10 | member = "serviceAccount:${google_service_account.camunda.email}" 11 | } 12 | 13 | # Cloud Run Camunda service 14 | resource "google_cloud_run_service" "camunda" { 15 | name = "camunda-secure" 16 | location = local.config.region 17 | template { 18 | spec { 19 | # Use locked down Service Account 20 | service_account_name = google_service_account.camunda.email 21 | containers { 22 | image = null_resource.camunda_cloudsql_image.triggers.image 23 | resources { 24 | limits = { 25 | # Default of 256Mb is not enough to start Camunda 26 | memory = "2Gi" 27 | cpu = "2000m" 28 | } 29 | } 30 | env { 31 | name = "DB_URL" 32 | # Complicated DB URL to Cloud SQL 33 | # See https://github.com/GoogleCloudPlatform/cloud-sql-jdbc-socket-factory 34 | value = "jdbc:postgresql:///${google_sql_database.database.name}?cloudSqlInstance=${google_sql_database_instance.camunda-db.connection_name}&socketFactory=com.google.cloud.sql.postgres.SocketFactory" 35 | } 36 | env { 37 | name = "DB_DRIVER" 38 | value = "org.postgresql.Driver" 39 | } 40 | env { 41 | name = "DB_USERNAME" 42 | value = google_sql_user.user.name 43 | } 44 | env { 45 | name = "nonce" 46 | value = "ddd" 47 | } 48 | env { 49 | name = "DB_PASSWORD" 50 | value = google_sql_user.user.password 51 | } 52 | # Test instance of Cloud SQL has low connection limit 53 | # So we turn down the connection pool size 54 | env { 55 | name = "DB_CONN_MAXACTIVE" 56 | value = "5" 57 | } 58 | env { 59 | name = "DB_CONN_MAXIDLE" 60 | value = "0" 61 | } 62 | env { 63 | name = "DB_CONN_MINIDLE" 64 | value = "0" 65 | } 66 | env { 67 | name = "DB_VALIDATE_ON_BORROW" 68 | value = "true" 69 | } 70 | } 71 | } 72 | 73 | metadata { 74 | annotations = { 75 | "autoscaling.knative.dev/maxScale" = "1" # no clusting 76 | "run.googleapis.com/cloudsql-instances" = google_sql_database_instance.camunda-db.connection_name 77 | } 78 | } 79 | } 80 | 81 | traffic { 82 | percent = 100 83 | latest_revision = true 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /google_cloud/camunda-secure/cloudsql.tf: -------------------------------------------------------------------------------- 1 | resource "google_sql_database_instance" "camunda-db" { 2 | name = "camunda-db-postgres" 3 | database_version = "POSTGRES_11" 4 | region = local.config.region 5 | 6 | settings { 7 | # Very small instance for testing. 8 | tier = "db-f1-micro" 9 | ip_configuration { 10 | ipv4_enabled = true 11 | } 12 | } 13 | } 14 | 15 | resource "google_sql_user" "user" { 16 | name = "camundasecure" 17 | instance = google_sql_database_instance.camunda-db.name 18 | password = "futurice" 19 | } 20 | 21 | resource "google_sql_database" "database" { 22 | name = "camundasecure" 23 | instance = google_sql_database_instance.camunda-db.name 24 | } 25 | -------------------------------------------------------------------------------- /google_cloud/camunda-secure/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "gcs" { 3 | prefix = "camunda-secure/state" 4 | bucket = "terraform-larkworthy-camunda" // Must be pre-provisioned 5 | } 6 | } 7 | 8 | provider "google" { 9 | project = "larkworthy-tester" 10 | region = "europe-west1" 11 | } 12 | 13 | locals { 14 | project = "larkworthy-tester" 15 | config = { 16 | project = local.project 17 | base_image_name = "camunda/camunda-bpm-platform" 18 | base_image_tag = "7.12.0" 19 | region = "europe-west1" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /google_cloud/camunda-secure/readme.md: -------------------------------------------------------------------------------- 1 | ## Provisioning Camunda on Cloud Run + Cloud SQL, using Terraform and Cloud Build 2 | 3 | Terraform receipe for running Camunda BPMN workflow engine serverlessly on Cloud Run, using Cloud SQL as the backing store. Custom image building offloaded to Cloud Build. Private container image hosting in Google Container Engine. 4 | 5 | Customize the base image in the main.tf locals. 6 | 7 | Read more on the blog 8 | - [Provisioning Serverless Camunda on Cloud Run](https://www.futurice.com/blog/serverless-camunda-terraform-recipe-using-cloud-run-and-cloud-sql) 9 | - [Call external services with at-least-once delevery](https://www.futurice.com/blog/at-least-once-delivery-for-serverless-camunda-workflow-automation) 10 | 11 | 12 | #Camunda # Cloud Run #Cloud SQL #Cloud Build #Container Registry #Docker 13 | 14 | ### Terraform setup 15 | 16 | Create service account credentials for running terraform locally. Then 17 | 18 | export GOOGLE_CREDENTIALS= 19 | gcloud auth activate-service-account --key-file $GOOGLE_CREDENTIALS 20 | terraform init 21 | 22 | 23 | Terraform service account, Editor role was not enough 24 | - to set cloud run service to noauth, had to add Security Admin on camunda cloud run resource (NOT PROJECT level) 25 | 26 | ### Docker / gcloud Setup 27 | 28 | For mac I needed to expose the docker deamon on a tcp port:- 29 | 30 | docker run -d -v /var/run/docker.sock:/var/run/docker.sock -p 127.0.0.1:1234:1234 bobrik/socat TCP-LISTEN:1234,fork UNIX-CONNECT:/var/run/docker.sock 31 | 32 | Then in bash_profile: 33 | 34 | export DOCKER_HOST=tcp://localhost:1234 35 | 36 | Also needed to setup GCR creds in docker 37 | 38 | gcloud auth configure-docker 39 | -------------------------------------------------------------------------------- /google_cloud/camunda/.dockerignore: -------------------------------------------------------------------------------- 1 | .terraform/ 2 | 3 | -------------------------------------------------------------------------------- /google_cloud/camunda/.gcloudignore: -------------------------------------------------------------------------------- 1 | .terraform/ 2 | -------------------------------------------------------------------------------- /google_cloud/camunda/.gitignore: -------------------------------------------------------------------------------- 1 | .build 2 | -------------------------------------------------------------------------------- /google_cloud/camunda/Dockerfile.template: -------------------------------------------------------------------------------- 1 | FROM eu.gcr.io/${project}/${image}:${tag} 2 | # https://forum.camunda.org/t/apt-get-is-missing-on-camunda-bpm-platform-7-9-0-image/7789 3 | USER root 4 | RUN apk add --no-cache wget 5 | USER camunda 6 | RUN rm /camunda/lib/postgresql-*.jar 7 | RUN wget --directory-prefix=/camunda/lib https://github.com/GoogleCloudPlatform/cloud-sql-jdbc-socket-factory/releases/download/v1.0.15/postgres-socket-factory-1.0.15-jar-with-driver-and-dependencies.jar 8 | 9 | # json-path and deps 10 | RUN wget --directory-prefix=/camunda/lib https://repo1.maven.org/maven2/com/jayway/jsonpath/json-path/2.4.0/json-path-2.4.0.jar 11 | RUN wget --directory-prefix=/camunda/lib https://repo1.maven.org/maven2/net/minidev/json-smart/2.3/json-smart-2.3.jar 12 | RUN wget --directory-prefix=/camunda/lib https://repo1.maven.org/maven2/net/minidev/accessors-smart/1.2/accessors-smart-1.2.jar 13 | 14 | COPY bpm-platform.xml /camunda/conf/ 15 | -------------------------------------------------------------------------------- /google_cloud/camunda/build.tf: -------------------------------------------------------------------------------- 1 | # Copy Camunda base image from Dockerhub image into Google Container Registry 2 | module "docker-mirror-camunda-bpm-platform" { 3 | source = "github.com/neomantra/terraform-docker-mirror" 4 | image_name = local.config.base_image_name 5 | image_tag = local.config.base_image_tag 6 | dest_prefix = "eu.gcr.io/${local.project}" 7 | } 8 | 9 | # Hydrate docker template file into .build directory 10 | resource "local_file" "dockerfile" { 11 | content = templatefile("${path.module}/Dockerfile.template", { 12 | project = local.project 13 | image = local.config.base_image_name 14 | tag = local.config.base_image_tag 15 | }) 16 | filename = "${path.module}/.build/Dockerfile" 17 | } 18 | 19 | # Hydrate bpm-platform config into .build directory 20 | resource "local_file" "bpm-platform" { 21 | content = templatefile("${path.module}/config/bpm-platform.xml.template", { 22 | maxJobsPerAcquisition = null 23 | lockTimeInMillis = null 24 | waitTimeInMillis = 1 25 | maxWait = null 26 | history = "none" 27 | databaseSchemaUpdate = null # default 28 | authorizationEnabled = null # default 29 | jobExecutorDeploymentAware = "false" 30 | historyCleanupBatchWindowStartTime = null # default 31 | }) 32 | filename = "${path.module}/.build/bpm-platform.xml" 33 | } 34 | 35 | # Build a customized image of Camunda to include the cloud sql postgres socket factory library 36 | # Required to connect to Cloud SQL 37 | # Built using Cloud Build, image stored in GCR 38 | resource "null_resource" "camunda_cloudsql_image" { 39 | depends_on = [module.docker-mirror-camunda-bpm-platform] 40 | triggers = { 41 | # Rebuild if we change the base image, dockerfile, or bpm-platform config 42 | image = "eu.gcr.io/${local.project}/camunda_cloudsql:${local.config.base_image_tag}_${ 43 | sha1( 44 | "${sha1(local_file.dockerfile.content)}${sha1(local_file.bpm-platform.content)}" 45 | ) 46 | }" 47 | } 48 | provisioner "local-exec" { 49 | command = <<-EOT 50 | gcloud builds submit \ 51 | --project ${local.project} \ 52 | --tag ${self.triggers.image} \ 53 | ${path.module}/.build 54 | EOT 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /google_cloud/camunda/camunda.tf: -------------------------------------------------------------------------------- 1 | 2 | # Policy to allow public access to Cloud Run endpoint 3 | data "google_iam_policy" "noauth" { 4 | binding { 5 | role = "roles/run.invoker" 6 | members = ["allUsers"] 7 | } 8 | } 9 | 10 | # Bind public policy to our Camunda Cloud Run service 11 | resource "google_cloud_run_service_iam_policy" "noauth" { 12 | location = google_cloud_run_service.camunda.location 13 | project = google_cloud_run_service.camunda.project 14 | service = google_cloud_run_service.camunda.name 15 | policy_data = data.google_iam_policy.noauth.policy_data 16 | } 17 | 18 | # Create service account to run service 19 | resource "google_service_account" "camunda" { 20 | account_id = "camunda-worker" 21 | display_name = "Camunda Worker" 22 | } 23 | 24 | # Give the service account access to Cloud SQL 25 | resource "google_project_iam_member" "project" { 26 | role = "roles/cloudsql.client" 27 | member = "serviceAccount:${google_service_account.camunda.email}" 28 | } 29 | 30 | # Cloud Run Camunda service 31 | resource "google_cloud_run_service" "camunda" { 32 | name = "camunda" 33 | location = local.config.region 34 | template { 35 | spec { 36 | # Use locked down Service Account 37 | service_account_name = google_service_account.camunda.email 38 | containers { 39 | image = null_resource.camunda_cloudsql_image.triggers.image 40 | resources { 41 | limits = { 42 | # Default of 256Mb is not enough to start Camunda 43 | memory = "2Gi" 44 | cpu = "1000m" 45 | } 46 | } 47 | env { 48 | name = "DB_URL" 49 | # Complicated DB URL to Cloud SQL 50 | # See https://github.com/GoogleCloudPlatform/cloud-sql-jdbc-socket-factory 51 | value = "jdbc:postgresql:///${google_sql_database.database.name}?cloudSqlInstance=${google_sql_database_instance.camunda-db.connection_name}&socketFactory=com.google.cloud.sql.postgres.SocketFactory" 52 | } 53 | 54 | env { 55 | name = "DB_DRIVER" 56 | value = "org.postgresql.Driver" 57 | } 58 | env { 59 | name = "DB_USERNAME" 60 | value = google_sql_user.user.name 61 | } 62 | env { 63 | name = "DB_PASSWORD" 64 | value = google_sql_user.user.password 65 | } 66 | # Test instance of Cloud SQL has low connection limit 67 | # So we turn down the connection pool size 68 | env { 69 | name = "DB_CONN_MAXACTIVE" 70 | value = "5" 71 | } 72 | env { 73 | name = "DB_CONN_MAXIDLE" 74 | value = "5" 75 | } 76 | env { 77 | name = "DB_CONN_MINIDLE" 78 | value = "0" 79 | } 80 | env { 81 | name = "DB_VALIDATE_ON_BORROW" 82 | value = "true" 83 | } 84 | } 85 | } 86 | 87 | metadata { 88 | annotations = { 89 | "autoscaling.knative.dev/maxScale" = "1" # no clusting 90 | "run.googleapis.com/cloudsql-instances" = google_sql_database_instance.camunda-db.connection_name 91 | } 92 | } 93 | } 94 | 95 | traffic { 96 | percent = 100 97 | latest_revision = true 98 | } 99 | } 100 | -------------------------------------------------------------------------------- /google_cloud/camunda/cloudsql.tf: -------------------------------------------------------------------------------- 1 | resource "google_sql_database_instance" "camunda-db" { 2 | name = "camunda-db-postgres" 3 | database_version = "POSTGRES_11" 4 | region = local.config.region 5 | 6 | settings { 7 | # Very small instance for testing. 8 | tier = "db-f1-micro" 9 | ip_configuration { 10 | ipv4_enabled = true 11 | } 12 | } 13 | } 14 | 15 | resource "google_sql_user" "user" { 16 | name = "camunda" 17 | instance = google_sql_database_instance.camunda-db.name 18 | password = "futurice" 19 | } 20 | 21 | resource "google_sql_database" "database" { 22 | name = "camunda" 23 | instance = google_sql_database_instance.camunda-db.name 24 | } 25 | -------------------------------------------------------------------------------- /google_cloud/camunda/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | backend "gcs" { 3 | prefix = "terraform/state" 4 | bucket = "terraform-larkworthy-camunda" // Must be pre-provisioned 5 | } 6 | } 7 | 8 | provider "google" { 9 | project = "larkworthy-tester" 10 | region = "europe-west1" 11 | } 12 | 13 | locals { 14 | project = "larkworthy-tester" 15 | config = { 16 | project = local.project 17 | base_image_name = "camunda/camunda-bpm-platform" 18 | base_image_tag = "7.12.0" 19 | region = "europe-west1" 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /google_cloud/camunda/readme.md: -------------------------------------------------------------------------------- 1 | ## Provisioning Camunda on Cloud Run + Cloud SQL, using Terraform and Cloud Build 2 | 3 | Terraform receipe for running Camunda BPMN workflow engine serverlessly on Cloud Run, using Cloud SQL as the backing store. Custom image building offloaded to Cloud Build. Private container image hosting in Google Container Engine. 4 | 5 | Customize the base image in the main.tf locals. 6 | 7 | Read more on the blog 8 | - [Provisioning Serverless Camunda on Cloud Run](https://www.futurice.com/blog/serverless-camunda-terraform-recipe-using-cloud-run-and-cloud-sql) 9 | - [Call external services with at-least-once delevery](https://www.futurice.com/blog/at-least-once-delivery-for-serverless-camunda-workflow-automation) 10 | 11 | 12 | #Camunda # Cloud Run #Cloud SQL #Cloud Build #Container Registry #Docker 13 | 14 | ### Terraform setup 15 | 16 | Create service account credentials for running terraform locally. Then 17 | 18 | export GOOGLE_CREDENTIALS= 19 | gcloud auth activate-service-account --key-file $GOOGLE_CREDENTIALS 20 | terraform init 21 | 22 | 23 | Terraform service account, Editor role was not enough 24 | - to set cloud run service to noauth, had to add Security Admin on camunda cloud run resource (NOT PROJECT level) 25 | 26 | ### Docker / gcloud Setup 27 | 28 | For mac I needed to expose the docker deamon on a tcp port:- 29 | 30 | docker run -d -v /var/run/docker.sock:/var/run/docker.sock -p 127.0.0.1:1234:1234 bobrik/socat TCP-LISTEN:1234,fork UNIX-CONNECT:/var/run/docker.sock 31 | 32 | Then in bash_profile: 33 | 34 | export DOCKER_HOST=tcp://localhost:1234 35 | 36 | Also needed to setup GCR creds in docker 37 | 38 | gcloud auth configure-docker 39 | -------------------------------------------------------------------------------- /google_cloud/minecraft/README.md: -------------------------------------------------------------------------------- 1 | # Economical Minecraft server 2 | 3 | A safe Minecraft server that won't break the bank. Game data is preserved across sessions. Server is hosted on a permenant IP address. You need to start the VM each session, but it will shutdown within 24 hours if you forget to turn it off. Process is run in a sandboxed VM, so any server exploits cannot do any serious damage. 4 | 5 | We are experimenting with providing support through a [google doc](https://docs.google.com/document/d/1TXyzHKqoKMS-jY9FSMrYNLEGathqSG8YuHdj0Z9GP34). 6 | 7 | Help us make this simple for others to use by asking for help. 8 | 9 | Launch blog can be found [here](https://www.futurice.com/blog/friends-and-family-minecraft-server-terraform-recipe) 10 | 11 | Features 12 | - Runs [itzg/minecraft-server](https://hub.docker.com/r/itzg/minecraft-server/) Docker image 13 | - Preemtible VM (cheapest), shuts down automatically within 24h if you forget to stop the VM 14 | - Reserves a stable public IP, so the minecraft clients do not need to be reconfigured 15 | - Reserves the disk, so game data is remembered across sessions 16 | - Restricted service account, VM has no ability to consume GCP resources beyond its instance and disk 17 | - 2$ per month 18 | - Reserved IP address costs: $1.46 per month 19 | - Reserved 10Gb disk costs: $0.40 20 | - VM cost: $0.01 per hour, max session cost $0.24 21 | 22 | -------------------------------------------------------------------------------- /google_cloud/oathkeeper/.dockerignore: -------------------------------------------------------------------------------- 1 | .terraform/ 2 | 3 | -------------------------------------------------------------------------------- /google_cloud/oathkeeper/.gitignore: -------------------------------------------------------------------------------- 1 | .build 2 | -------------------------------------------------------------------------------- /google_cloud/oathkeeper/Dockerfile.template: -------------------------------------------------------------------------------- 1 | FROM eu.gcr.io/${project}/${image}:${tag} 2 | ADD config.yml /config.yaml 3 | -------------------------------------------------------------------------------- /google_cloud/oathkeeper/README.md: -------------------------------------------------------------------------------- 1 | I was hoping to add an identity aware proxy to a Google Cloud Run endpoint using oathkeeper. 2 | However, as of 2020/05/02 there is not easy way to fetch a token from the metadata server 3 | and add it to an upstream header, required to make an authenticated call to a protected Cloud Run endpoint -------------------------------------------------------------------------------- /google_cloud/oathkeeper/build.tf: -------------------------------------------------------------------------------- 1 | # Mirror base image from Dockerhub image into Google Container Registry 2 | module "docker-mirror" { 3 | source = "github.com/neomantra/terraform-docker-mirror" 4 | image_name = local.base_image_name 5 | image_tag = local.base_image_tag 6 | dest_prefix = "eu.gcr.io/${local.project}" 7 | } 8 | 9 | # Hydrate docker template file into .build directory 10 | resource "local_file" "dockerfile" { 11 | content = templatefile("${path.module}/Dockerfile.template", { 12 | project = local.project 13 | image = local.base_image_name 14 | tag = local.base_image_tag 15 | }) 16 | filename = "${path.module}/.build/Dockerfile" 17 | } 18 | 19 | # Hydrate bpm-platform config into .build directory 20 | resource "local_file" "config" { 21 | content = templatefile("${path.module}/config.template.yml", { 22 | }) 23 | filename = "${path.module}/.build/config.yml" 24 | } 25 | 26 | # Build a customized image of Camunda to include the cloud sql postgres socket factory library 27 | # Required to connect to Cloud SQL 28 | # Built using Cloud Build, image stored in GCR 29 | resource "null_resource" "oathkeeper_image" { 30 | depends_on = [module.docker-mirror] 31 | triggers = { 32 | # Rebuild if we change the base image, dockerfile, or bpm-platform config 33 | image = "eu.gcr.io/${local.project}/oathkeeper:${local.base_image_tag}_${ 34 | sha1( 35 | "${sha1(local_file.dockerfile.content)}${sha1(local_file.config.content)}" 36 | ) 37 | }" 38 | } 39 | provisioner "local-exec" { 40 | command = <<-EOT 41 | gcloud builds submit \ 42 | --project ${local.project} \ 43 | --tag ${self.triggers.image} \ 44 | ${path.module}/.build 45 | EOT 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /google_cloud/oathkeeper/config.template.yml: -------------------------------------------------------------------------------- 1 | serve: 2 | proxy: 3 | host: 0.0.0.0 # Cloud Run requirements 4 | port: 8080 5 | api: 6 | port: 4456 # run the api at port 4456 7 | 8 | access_rules: 9 | repositories: # overwritten elsewhere 10 | 11 | errors: 12 | fallback: 13 | - json 14 | handlers: 15 | json: 16 | enabled: true 17 | config: 18 | verbose: true 19 | redirect: 20 | enabled: true 21 | config: 22 | to: https://www.ory.sh/docs 23 | 24 | mutators: 25 | header: 26 | enabled: true 27 | config: 28 | headers: 29 | X-User: "{{ print .Subject }}" 30 | # You could add some other headers, for example with data from the 31 | # session. 32 | # X-Some-Arbitrary-Data: "{{ print .Extra.some.arbitrary.data }}" 33 | noop: 34 | enabled: true 35 | id_token: 36 | enabled: true 37 | config: 38 | issuer_url: http://localhost:4455/ 39 | jwks_url: file:///jwks.json 40 | 41 | authorizers: 42 | allow: 43 | enabled: true 44 | deny: 45 | enabled: true 46 | 47 | authenticators: 48 | anonymous: 49 | enabled: true 50 | config: 51 | subject: guest 52 | noop: 53 | enabled: true 54 | 55 | oauth2_introspection: 56 | enabled: false # Does not work with google as they do not have an RFC 7662 endpoint 57 | 58 | jwt: 59 | enabled: true 60 | config: 61 | jwks_urls: 62 | - https://www.googleapis.com/oauth2/v3/certs 63 | scope_strategy: hierarchic 64 | required_scope: 65 | - email 66 | trusted_issuers: 67 | - https://accounts.google.com 68 | allowed_algorithms: 69 | - RS256 -------------------------------------------------------------------------------- /google_cloud/oathkeeper/main.tf: -------------------------------------------------------------------------------- 1 | 2 | locals { 3 | project = "larkworthy-tester" 4 | location = "EU" 5 | region = "europe-west1" 6 | base_image_name = "oryd/oathkeeper" 7 | base_image_tag = "v0.37.1" 8 | #base_image_tag = "v0.36.0-beta.4" 9 | } 10 | 11 | terraform { 12 | backend "gcs" { 13 | prefix = "ORY_Oathkeeper/state" 14 | bucket = "terraform-larkworthy" 15 | } 16 | } 17 | 18 | provider "google" { 19 | project = local.project 20 | region = local.region 21 | } 22 | 23 | # Create service account to run service with no permissions 24 | resource "google_service_account" "oathkeeper" { 25 | account_id = "oathkeeper" 26 | display_name = "oathkeeper" 27 | } 28 | 29 | # Policy to allow public access to Cloud Run endpoint 30 | data "google_iam_policy" "noauth" { 31 | binding { 32 | role = "roles/run.invoker" 33 | members = ["allUsers"] 34 | } 35 | } 36 | 37 | # Allow public access to ORY Oathkeeper 38 | resource "google_cloud_run_service_iam_policy" "noauth" { 39 | location = google_cloud_run_service.oathkeeper.location 40 | project = google_cloud_run_service.oathkeeper.project 41 | service = google_cloud_run_service.oathkeeper.name 42 | policy_data = data.google_iam_policy.noauth.policy_data 43 | } 44 | 45 | # config bucket for service 46 | resource "google_storage_bucket" "config" { 47 | name = "${local.project}_${local.region}_oathkeeper" 48 | location = local.location 49 | bucket_policy_only = true 50 | } 51 | 52 | # rules for service 53 | resource "google_storage_bucket_object" "rules" { 54 | name = "rules_${filesha256("${path.module}/rules.template.yml")}.yml" 55 | content = templatefile( 56 | "${path.module}/rules.template.yml", { 57 | // camunda_url = "https://camunda-flxotk3pnq-ew.a.run.app" 58 | camunda_url = "https://camunda-secure-flxotk3pnq-ew.a.run.app" 59 | # Note Cloud run terminates https so container exposed only to http 60 | oathkeeper_url = "http://oathkeeper-flxotk3pnq-ew.a.run.app" 61 | }) 62 | bucket = google_storage_bucket.config.name 63 | } 64 | 65 | # Let oathkeeper read objects from it 66 | resource "google_storage_bucket_iam_member" "oathkeeper-viewer" { 67 | bucket = google_storage_bucket.config.name 68 | role = "roles/storage.objectViewer" 69 | # member = "serviceAccount:${google_service_account.oathkeeper.email}" 70 | member = "allUsers" # work around until we can use the cloud API https://github.com/ory/oathkeeper/issues/425 71 | } 72 | 73 | # Cloud Run ORY Oathkeeper 74 | resource "google_cloud_run_service" "oathkeeper" { 75 | name = "oathkeeper" 76 | location = local.region 77 | depends_on = [google_storage_bucket_object.rules] 78 | template { 79 | spec { 80 | # Use locked down Service Account 81 | service_account_name = google_service_account.oathkeeper.email 82 | containers { 83 | image = null_resource.oathkeeper_image.triggers.image 84 | args = ["--config", "/config.yaml"] 85 | env { 86 | name = "nonce" 87 | value = filesha256("${path.module}/rules.template.yml") # Force refresh on rule change 88 | } 89 | env { 90 | name = "ACCESS_RULES_REPOSITORIES" 91 | # storage.cloud.google.com domain serves content via redirects which is does not work ATM https://github.com/ory/oathkeeper/issues/425 92 | value = "https://storage.googleapis.com/${google_storage_bucket.config.name}/${google_storage_bucket_object.rules.name}" 93 | } 94 | env { 95 | name = "LOG_LEVEL" 96 | value = "debug" 97 | } 98 | } 99 | } 100 | } 101 | 102 | traffic { 103 | percent = 100 104 | latest_revision = true 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /google_cloud/oathkeeper/rules.template.yml: -------------------------------------------------------------------------------- 1 | - id: camunda-apps 2 | upstream: 3 | url: ${camunda_url} 4 | preserve_host: false 5 | match: 6 | url: ${oathkeeper_url}/camunda/<.*> 7 | methods: ["GET", "POST", "DELETE", "PUT"] 8 | authorizer: 9 | handler: allow 10 | authenticators: 11 | - handler: anonymous 12 | mutators: [{handler: noop}] 13 | errors: [{handler: json}] 14 | 15 | - id: camunda-welcome 16 | upstream: 17 | url: ${camunda_url} 18 | preserve_host: false 19 | match: 20 | url: ${oathkeeper_url}/camunda-welcome/<.*> 21 | methods: ["GET", "POST", "DELETE", "PUT"] 22 | authorizer: 23 | handler: allow 24 | authenticators: 25 | - handler: anonymous 26 | mutators: [{handler: noop}] 27 | errors: [{handler: json}] 28 | 29 | 30 | - id: camunda-invoice 31 | upstream: 32 | url: ${camunda_url} 33 | preserve_host: false 34 | match: 35 | url: ${oathkeeper_url}/camunda-invoice/<.*> 36 | methods: ["GET", "POST", "DELETE", "PUT"] 37 | authorizer: 38 | handler: allow 39 | authenticators: 40 | - handler: anonymous 41 | mutators: [{handler: noop}] 42 | errors: [{handler: json}] 43 | 44 | # Protected 45 | - id: camunda-rest 46 | upstream: 47 | url: ${camunda_url} 48 | preserve_host: false 49 | match: 50 | url: ${oathkeeper_url}/engine-rest/<.*> 51 | methods: ["GET", "POST", "DELETE", "PUT"] 52 | authorizer: 53 | handler: allow 54 | authenticators: 55 | - handler: jwt 56 | mutators: [{handler: noop}] 57 | errors: [{handler: json}] -------------------------------------------------------------------------------- /google_cloud/openresty-beyondcorp/.dockerignore: -------------------------------------------------------------------------------- 1 | .terraform/ 2 | 3 | -------------------------------------------------------------------------------- /google_cloud/openresty-beyondcorp/.gitignore: -------------------------------------------------------------------------------- 1 | .secret 2 | .build 3 | -------------------------------------------------------------------------------- /google_cloud/openresty-beyondcorp/Dockerfile.template: -------------------------------------------------------------------------------- 1 | FROM eu.gcr.io/${project}/${image}:${tag} 2 | RUN apk add --no-cache curl perl 3 | RUN opm get ledgetech/lua-resty-http 4 | RUN opm get bungle/lua-resty-reqargs 5 | RUN opm get jkeys089/lua-resty-hmac 6 | 7 | ADD default.conf /etc/nginx/conf.d/default.conf 8 | ADD login /assets/login 9 | ADD swiss/ /usr/local/openresty/lualib/swiss/ 10 | 11 | 12 | -------------------------------------------------------------------------------- /google_cloud/openresty-beyondcorp/README.md: -------------------------------------------------------------------------------- 1 | # Swiss Army Identity Aware Proxy 2 | 3 | Very fast Serverless OpenResty based proxy that can wrap upstream binaries with a login. Furthermore, we have examples of 4 | - Local development environment 5 | - Slack/Zapier intergration. 6 | - A Write Ahead Log 7 | - Google Secret Manager intergration 8 | 9 | Read more on the [OpenResty: a Swiss Army Proxy for Serverless; WAL, Slack, Zapier and Auth](https://futurice.com/blog/openresty-a-swiss-army-proxy-for-serverless) blog. 10 | 11 | An earlier version is linked to in the [Minimalist BeyondCorp style Identity Aware Proxy for Cloud Run](https://futurice.com/blog/identity-aware-proxy-for-google-cloud-run) blog that is just the login part. 12 | 13 | ## OpenResty and Cloud Run 14 | 15 | Build on top of OpenResty, hosted on Cloud Run (and excellent match) 16 | 17 | ## Extensions Fast Response using a Write Ahead Log 18 | 19 | If upstream is slow (e.g. scaling up), you can redirect to a WAL. Latency is the time to store the message. 20 | A different location plays back the WAL with retries so you can be sure the request is eventially handled. 21 | 22 | ## Extensions Securing a Slack Intergration 23 | 24 | Intergration with Slack 25 | Reads a secret from Google secrets manager and verifies the signature HMAC 26 | 27 | ## Extensions Securing a Zapier Intergration 28 | 29 | Zapier can be protected with an Oauth account 30 | 31 | ## Local testing with docker-compose 32 | 33 | Generate a local service account key in .secret 34 | 35 | `gcloud iam service-accounts keys create .secret/sa.json --iam-account=openresty@larkworthy-tester.iam.gserviceaccount.com` 36 | 37 | run this script to get a setup that reloads on CTRL + C 38 | 39 | `/bin/bash test/dev.sh` 40 | 41 | The use of bash to start the script gives it an easier name to find to kill 42 | 43 | killall "bash" 44 | 45 | # Get prod tokens 46 | 47 | https://openresty-flxotk3pnq-ew.a.run.app/login?token=true 48 | 49 | # Test WAL verification 50 | 51 | curl -X POST -d "{}" http://localhost:8080/wal-playback/ 52 | 53 | # Test token validation 54 | 55 | curl http://localhost:8080/httptokeninfo?id_token=foo 56 | curl http://localhost:8080/httptokeninfo?access_token=foo 57 | 58 | 59 | # Test slack 60 | 61 | curl http://localhost:8080/slack/command 62 | -------------------------------------------------------------------------------- /google_cloud/openresty-beyondcorp/build.tf: -------------------------------------------------------------------------------- 1 | # Mirror base image from Dockerhub image into Google Container Registry 2 | module "docker-mirror" { 3 | source = "github.com/neomantra/terraform-docker-mirror" 4 | image_name = local.base_image_name 5 | image_tag = local.base_image_tag 6 | dest_prefix = "eu.gcr.io/${local.project}" 7 | } 8 | 9 | # Hydrate docker template file into .build directory 10 | resource "local_file" "dockerfile" { 11 | depends_on = [template_dir.swiss] 12 | content = templatefile("${path.module}/Dockerfile.template", { 13 | project = local.project 14 | image = local.base_image_name 15 | tag = local.base_image_tag 16 | }) 17 | filename = "${path.module}/.build/Dockerfile" 18 | } 19 | 20 | # Build a customized image 21 | resource "null_resource" "openresty_image" { 22 | depends_on = [module.docker-mirror] 23 | triggers = { 24 | # Rebuild if we change the base image, dockerfile, or bpm-platform config 25 | image = "eu.gcr.io/${local.project}/openresty:${local.base_image_tag}_${ 26 | sha1( 27 | "${sha1(local_file.dockerfile.content)}${sha1(local_file.config.content)}${sha1(local_file.login.content)}${data.archive_file.swiss.output_sha}" 28 | ) 29 | }" 30 | } 31 | provisioner "local-exec" { 32 | command = <<-EOT 33 | gcloud builds submit \ 34 | --project ${local.project} \ 35 | --tag ${self.triggers.image} \ 36 | ${path.module}/.build 37 | EOT 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /google_cloud/openresty-beyondcorp/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | openresty: 4 | build: .build/ 5 | ports: 6 | - "8080:8080" 7 | metadata.google.internal: 8 | image: salrashid123/gcemetadataserver 9 | # Fix (tokenScopes): https://github.com/salrashid123/gce_metadata_server/issues/5 10 | command: "-port :80 11 | --serviceAccountFile /conf/sa.json 12 | --tokenScopes https://www.googleapis.com/auth/cloud-platform 13 | " 14 | volumes: 15 | - .secret/:/conf/ -------------------------------------------------------------------------------- /google_cloud/openresty-beyondcorp/files/login.template: -------------------------------------------------------------------------------- 1 | 2 | Futurice IAP 3 | 4 | 5 | 6 | 7 | 8 |
9 |
10 | 43 |
44 | 45 |

ACCESS_TOKEN>

46 | 47 | -------------------------------------------------------------------------------- /google_cloud/openresty-beyondcorp/files/swiss/secretmanager.lua: -------------------------------------------------------------------------------- 1 | local _M = { 2 | _VERSION = '0.01', 3 | } 4 | local cjson = require "cjson" 5 | local http = require "resty.http" 6 | local b64 = require("ngx.base64") 7 | 8 | function _M.fetch(location, access_token) 9 | local httpc = http.new() 10 | local secretlocation = location 11 | ngx.log(ngx.WARN, "fetching secret from: " .. secretlocation) 12 | local res, err = httpc:request_uri( 13 | secretlocation, 14 | { 15 | headers = { 16 | ["Content-Type"] = "application/json", 17 | ["Authorization"] = "Bearer " .. access_token 18 | }, 19 | ssl_verify = false 20 | } 21 | ) 22 | if err ~= nil then 23 | return nil, err 24 | elseif res.status == 200 then 25 | local content = cjson.decode(res.body) 26 | return b64.decode_base64url(content.payload.data), nil 27 | else 28 | return nil, res.body 29 | end 30 | end 31 | 32 | return _M -------------------------------------------------------------------------------- /google_cloud/openresty-beyondcorp/files/swiss/slack.lua: -------------------------------------------------------------------------------- 1 | local hmac = require "resty.hmac" 2 | local hmacs = {} 3 | local version = "v0" 4 | local err 5 | 6 | local _M = { 7 | _VERSION = '0.01', 8 | } 9 | 10 | function _M.isAuthentic(request, signingsecret) 11 | if hmacs[signingsecret] == nil then 12 | hmacs[signingsecret] = hmac:new(signingsecret, hmac.ALGOS.SHA256) 13 | end 14 | 15 | local hmac_sha256 = hmacs[signingsecret] 16 | local timestamp = request.get_headers()["X-Slack-Request-Timestamp"] 17 | local signature = request.get_headers()["X-Slack-Signature"] 18 | local body = request.get_body_data() 19 | if body == nil or timestamp == nil or signature == nil then 20 | return false 21 | end 22 | 23 | local basestring = version .. ":" .. timestamp .. ":" .. body 24 | local mac = version .. "=" .. hmac_sha256:final(basestring, true) 25 | hmac_sha256:reset() 26 | return mac == signature 27 | end 28 | 29 | return _M -------------------------------------------------------------------------------- /google_cloud/openresty-beyondcorp/test/dev.sh: -------------------------------------------------------------------------------- 1 | while : 2 | do 3 | # Generate local artifacts 4 | terraform apply \ 5 | -target=template_dir.swiss \ 6 | -target=local_file.dockerfile \ 7 | -target=local_file.login \ 8 | --auto-approve 9 | docker-compose build # Rebuild local image 10 | docker-compose up # Run local container 11 | done 12 | -------------------------------------------------------------------------------- /package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "requires": true, 3 | "lockfileVersion": 1, 4 | "dependencies": { 5 | "balanced-match": { 6 | "version": "1.0.0", 7 | "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", 8 | "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=", 9 | "dev": true 10 | }, 11 | "brace-expansion": { 12 | "version": "1.1.11", 13 | "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", 14 | "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", 15 | "dev": true, 16 | "requires": { 17 | "balanced-match": "^1.0.0", 18 | "concat-map": "0.0.1" 19 | } 20 | }, 21 | "concat-map": { 22 | "version": "0.0.1", 23 | "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", 24 | "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", 25 | "dev": true 26 | }, 27 | "fs.realpath": { 28 | "version": "1.0.0", 29 | "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", 30 | "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", 31 | "dev": true 32 | }, 33 | "glob": { 34 | "version": "7.1.6", 35 | "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", 36 | "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", 37 | "dev": true, 38 | "requires": { 39 | "fs.realpath": "^1.0.0", 40 | "inflight": "^1.0.4", 41 | "inherits": "2", 42 | "minimatch": "^3.0.4", 43 | "once": "^1.3.0", 44 | "path-is-absolute": "^1.0.0" 45 | } 46 | }, 47 | "inflight": { 48 | "version": "1.0.6", 49 | "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", 50 | "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", 51 | "dev": true, 52 | "requires": { 53 | "once": "^1.3.0", 54 | "wrappy": "1" 55 | } 56 | }, 57 | "inherits": { 58 | "version": "2.0.4", 59 | "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", 60 | "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", 61 | "dev": true 62 | }, 63 | "minimatch": { 64 | "version": "3.0.4", 65 | "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", 66 | "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", 67 | "dev": true, 68 | "requires": { 69 | "brace-expansion": "^1.1.7" 70 | } 71 | }, 72 | "once": { 73 | "version": "1.4.0", 74 | "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", 75 | "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", 76 | "dev": true, 77 | "requires": { 78 | "wrappy": "1" 79 | } 80 | }, 81 | "path-is-absolute": { 82 | "version": "1.0.1", 83 | "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", 84 | "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", 85 | "dev": true 86 | }, 87 | "wrappy": { 88 | "version": "1.0.2", 89 | "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", 90 | "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", 91 | "dev": true 92 | } 93 | } 94 | } 95 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "scripts": { 3 | "readme": "node repotools/generate_readme.js" 4 | }, 5 | "devDependencies": { 6 | "glob": "^7.1.6" 7 | } 8 | } 9 | --------------------------------------------------------------------------------