├── .github └── PULL_REQUEST_TEMPLATE.md ├── .gitignore ├── LICENSE ├── README.md ├── main.tf ├── outputs.tf ├── variables.tf └── versions.tf /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | ## Related Issues 2 | 3 | - _[none]_ 4 | 5 | ## Public Changelog 6 | 7 | _[none]_ 8 | 9 | ## Security Implications 10 | 11 | _[none]_ 12 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Local .terraform directories 2 | **/.terraform/* 3 | 4 | # .tfstate files 5 | *.tfstate 6 | *.tfstate.* 7 | 8 | # Crash log files 9 | crash.log 10 | 11 | # Ignore any .tfvars files that are generated automatically for each Terraform run. Most 12 | # .tfvars files are managed as part of configuration and so should be included in 13 | # version control. 14 | # 15 | # example.tfvars 16 | 17 | # Ignore override files as they are usually used to override resources locally and so 18 | # are not checked in 19 | override.tf 20 | override.tf.json 21 | *_override.tf 22 | *_override.tf.json 23 | 24 | # Include override files you do wish to add to version control using negated pattern 25 | # 26 | # !example_override.tf 27 | 28 | # Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan 29 | # example: *tfplan* 30 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Transcend 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Lambda@Edge Module 2 | 3 | A terraform module for creating Lambda@Edge functions. 4 | 5 | This module supports any type of Lambda Function supported by Edge, including NodeJs and Python functions. 6 | 7 | You just point it at a set of local file globs and it handles bundling your code and deploying it. 8 | 9 | ## Requirements 10 | 11 | You must use a versioned S3 bucket for your deployment artifacts 12 | -------------------------------------------------------------------------------- /main.tf: -------------------------------------------------------------------------------- 1 | /** 2 | * Creates a Lambda@Edge function to integrate with CloudFront distributions. 3 | */ 4 | 5 | /** 6 | * Lambdas are uploaded to via zip files, so we create a zip out of a given directory. 7 | * In the future, we may want to source our code from an s3 bucket instead of a local zip. 8 | */ 9 | data "archive_file" "zip_file_for_lambda" { 10 | type = "zip" 11 | output_path = "${var.local_file_dir}/${var.name}.zip" 12 | 13 | dynamic "source" { 14 | for_each = distinct(flatten([ 15 | for blob in var.file_globs : 16 | fileset(var.lambda_code_source_dir, blob) 17 | ])) 18 | content { 19 | content = try( 20 | file("${var.lambda_code_source_dir}/${source.value}"), 21 | filebase64("${var.lambda_code_source_dir}/${source.value}"), 22 | ) 23 | filename = source.value 24 | } 25 | } 26 | 27 | # Optionally write a `config.json` file if any plaintext params were given 28 | dynamic "source" { 29 | for_each = length(keys(var.plaintext_params)) > 0 ? ["true"] : [] 30 | content { 31 | content = jsonencode(var.plaintext_params) 32 | filename = var.config_file_name 33 | } 34 | } 35 | } 36 | 37 | /** 38 | * Upload the build artifact zip file to S3. 39 | * 40 | * Doing this makes the plans more resiliant, where it won't always 41 | * appear that the function needs to be updated 42 | */ 43 | resource "aws_s3_bucket_object" "artifact" { 44 | bucket = var.s3_artifact_bucket 45 | key = "${var.name}.zip" 46 | source = data.archive_file.zip_file_for_lambda.output_path 47 | etag = data.archive_file.zip_file_for_lambda.output_md5 48 | tags = var.tags 49 | } 50 | 51 | /** 52 | * Create the Lambda function. Each new apply will publish a new version. 53 | */ 54 | resource "aws_lambda_function" "lambda" { 55 | function_name = var.name 56 | description = var.description 57 | 58 | # Find the file from S3 59 | s3_bucket = var.s3_artifact_bucket 60 | s3_key = aws_s3_bucket_object.artifact.id 61 | s3_object_version = aws_s3_bucket_object.artifact.version_id 62 | source_code_hash = filebase64sha256(data.archive_file.zip_file_for_lambda.output_path) 63 | 64 | publish = true 65 | handler = var.handler 66 | runtime = var.runtime 67 | role = aws_iam_role.lambda_at_edge.arn 68 | tags = var.tags 69 | 70 | lifecycle { 71 | ignore_changes = [ 72 | last_modified, 73 | ] 74 | } 75 | } 76 | 77 | /** 78 | * Policy to allow AWS to access this lambda function. 79 | */ 80 | data "aws_iam_policy_document" "assume_role_policy_doc" { 81 | statement { 82 | sid = "AllowAwsToAssumeRole" 83 | effect = "Allow" 84 | 85 | actions = ["sts:AssumeRole"] 86 | 87 | principals { 88 | type = "Service" 89 | 90 | identifiers = [ 91 | "edgelambda.amazonaws.com", 92 | "lambda.amazonaws.com", 93 | ] 94 | } 95 | } 96 | } 97 | 98 | /** 99 | * Make a role that AWS services can assume that gives them access to invoke our function. 100 | * This policy also has permissions to write logs to CloudWatch. 101 | */ 102 | resource "aws_iam_role" "lambda_at_edge" { 103 | name = "${var.name}-role" 104 | assume_role_policy = data.aws_iam_policy_document.assume_role_policy_doc.json 105 | tags = var.tags 106 | } 107 | 108 | /** 109 | * Allow lambda to write logs. 110 | */ 111 | data "aws_iam_policy_document" "lambda_logs_policy_doc" { 112 | statement { 113 | effect = "Allow" 114 | resources = ["*"] 115 | actions = [ 116 | "logs:CreateLogStream", 117 | "logs:PutLogEvents", 118 | 119 | # Lambda@Edge logs are logged into Log Groups in the region of the edge location 120 | # that executes the code. Because of this, we need to allow the lambda role to create 121 | # Log Groups in other regions 122 | "logs:CreateLogGroup", 123 | ] 124 | } 125 | } 126 | 127 | /** 128 | * Attach the policy giving log write access to the IAM Role 129 | */ 130 | resource "aws_iam_role_policy" "logs_role_policy" { 131 | name = "${var.name}at-edge" 132 | role = aws_iam_role.lambda_at_edge.id 133 | policy = data.aws_iam_policy_document.lambda_logs_policy_doc.json 134 | } 135 | 136 | /** 137 | * Creates a Cloudwatch log group for this function to log to. 138 | * With lambda@edge, only test runs will log to this group. All 139 | * logs in production will be logged to a log group in the region 140 | * of the CloudFront edge location handling the request. 141 | */ 142 | resource "aws_cloudwatch_log_group" "log_group" { 143 | name = "/aws/lambda/${var.name}" 144 | tags = var.tags 145 | kms_key_id = var.cloudwatch_log_groups_kms_arn 146 | } 147 | 148 | /** 149 | * Create the secret SSM parameters that can be fetched and decrypted by the lambda function. 150 | */ 151 | resource "aws_ssm_parameter" "params" { 152 | for_each = var.ssm_params 153 | 154 | description = "param ${each.key} for the lambda function ${var.name}" 155 | 156 | name = each.key 157 | value = each.value 158 | 159 | type = "SecureString" 160 | tier = length(each.value) > 4096 ? "Advanced" : "Standard" 161 | 162 | tags = var.tags 163 | } 164 | 165 | /** 166 | * Create an IAM policy document giving access to read and fetch the SSM params 167 | */ 168 | data "aws_iam_policy_document" "secret_access_policy_doc" { 169 | count = length(var.ssm_params) > 0 ? 1 : 0 170 | 171 | statement { 172 | sid = "AccessParams" 173 | effect = "Allow" 174 | actions = [ 175 | "ssm:GetParameter", 176 | "secretsmanager:GetSecretValue", 177 | ] 178 | resources = [ 179 | for name, outputs in aws_ssm_parameter.params : 180 | outputs.arn 181 | ] 182 | } 183 | } 184 | 185 | /** 186 | * Create a policy from the SSM policy document 187 | */ 188 | resource "aws_iam_policy" "ssm_policy" { 189 | count = length(var.ssm_params) > 0 ? 1 : 0 190 | 191 | name = "${var.name}-ssm-policy" 192 | description = "Gives the lambda ${var.name} access to params from SSM" 193 | policy = data.aws_iam_policy_document.secret_access_policy_doc[0].json 194 | } 195 | 196 | /** 197 | * Attach the policy giving SSM param access to the Lambda IAM Role 198 | */ 199 | resource "aws_iam_role_policy_attachment" "ssm_policy_attachment" { 200 | count = length(var.ssm_params) > 0 ? 1 : 0 201 | 202 | role = aws_iam_role.lambda_at_edge.id 203 | policy_arn = aws_iam_policy.ssm_policy[0].arn 204 | } 205 | -------------------------------------------------------------------------------- /outputs.tf: -------------------------------------------------------------------------------- 1 | // ARN of the lambda function with the most recently built version attached. 2 | output "arn" { 3 | value = "${aws_lambda_function.lambda.arn}:${aws_lambda_function.lambda.version}" 4 | } 5 | 6 | output "function_arn" { 7 | value = aws_lambda_function.lambda.arn 8 | } 9 | output "function_name" { 10 | value = var.name 11 | } 12 | 13 | output execution_role_name { 14 | value = aws_iam_role.lambda_at_edge.name 15 | } 16 | 17 | output execution_role_arn { 18 | value = aws_iam_role.lambda_at_edge.arn 19 | } 20 | -------------------------------------------------------------------------------- /variables.tf: -------------------------------------------------------------------------------- 1 | variable name { 2 | description = "Name of the Lambda@Edge Function" 3 | } 4 | 5 | variable description { 6 | description = "Description of what the Lambda@Edge Function does" 7 | } 8 | 9 | variable s3_artifact_bucket { 10 | description = "Name of the S3 bucket to upload versioned artifacts to" 11 | } 12 | 13 | variable tags { 14 | type = map(string) 15 | description = "Tags to apply to all resources that support them" 16 | default = {} 17 | } 18 | 19 | variable lambda_code_source_dir { 20 | description = "An absolute path to the directory containing the code to upload to lambda" 21 | } 22 | 23 | variable file_globs { 24 | type = list(string) 25 | default = ["index.js", "node_modules/**", "yarn.lock", "package.json"] 26 | description = "list of files or globs that you want included from the lambda_code_source_dir" 27 | } 28 | 29 | variable local_file_dir { 30 | description = "A path to the directory to store plan time generated local files" 31 | default = "." 32 | } 33 | 34 | variable runtime { 35 | description = "The runtime of the lambda function" 36 | default = "nodejs14.x" 37 | } 38 | 39 | variable handler { 40 | description = "The path to the main method that should handle the incoming requests" 41 | default = "index.handler" 42 | } 43 | 44 | variable config_file_name { 45 | description = "The name of the file var.plaintext_params will be written to as json" 46 | default = "config.json" 47 | } 48 | 49 | variable plaintext_params { 50 | type = map(string) 51 | default = {} 52 | description = <