├── .dockerignore ├── .github ├── ISSUE_TEMPLATE │ └── generic-issue.md ├── actions │ ├── build-asset │ │ └── action.yml │ ├── change-shell │ │ └── action.yml │ └── setup-node-deps │ │ └── action.yml └── workflows │ ├── ci.yml │ ├── pre-release.yml │ └── release.yml ├── .gitignore ├── .node-version ├── .scalafmt.conf ├── .vscode └── settings.json ├── Dockerfile ├── LICENSE ├── README.md ├── benchmark ├── benchmark-payload.json ├── benchmark.ts ├── response-times-average.svg ├── response-times-extremes.svg └── response-times.md ├── build.sbt ├── cdk.json ├── deployment ├── bin │ └── stack.ts └── lib │ └── lambda-stack.ts ├── docker-compose.yml ├── package-lock.json ├── package.json ├── project ├── assembly.sbt └── build.properties ├── src └── main │ └── scala │ ├── Bootstrap.scala │ ├── Bootstrap │ └── Types.scala │ ├── Handler.scala │ └── internal │ └── substitutes │ ├── HasReleaseFenceMethod.java │ ├── Target_scala_collection_immutable_VM.java │ ├── Target_scala_runtime_Statistics.java │ └── UnsafeUtils.java └── tsconfig.json /.dockerignore: -------------------------------------------------------------------------------- 1 | * 2 | !src 3 | !target 4 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/generic-issue.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Generic Issue 3 | about: Generic issue template 4 | title: '' 5 | labels: untriaged 6 | assignees: '' 7 | 8 | --- 9 | 10 | ## Description 11 | -------------------------------------------------------------------------------- /.github/actions/build-asset/action.yml: -------------------------------------------------------------------------------- 1 | name: "Build asset" 2 | description: "Build the asset required for deployment" 3 | 4 | runs: 5 | using: "composite" 6 | steps: 7 | - name: Install musl-tools 8 | run: | 9 | sudo apt-get install musl-tools 10 | shell: bash 11 | - run: | 12 | npm run build 13 | shell: bash 14 | -------------------------------------------------------------------------------- /.github/actions/change-shell/action.yml: -------------------------------------------------------------------------------- 1 | name: "Change shell" 2 | description: "Convert /bin/sh to /bin/bash" 3 | 4 | runs: 5 | using: "composite" 6 | steps: 7 | # Convert /bin/sh to /bin/bash. 8 | - run: | 9 | sudo mv /bin/sh /bin/sh-dash 10 | sudo ln -s /bin/bash /bin/sh 11 | shell: bash 12 | -------------------------------------------------------------------------------- /.github/actions/setup-node-deps/action.yml: -------------------------------------------------------------------------------- 1 | name: "Setup node dependencies" 2 | description: "Setup all node dependencies necessary for deployment" 3 | 4 | runs: 5 | using: "composite" 6 | steps: 7 | - name: Upgrade NPM to 6.14.8 8 | run: | 9 | npm i -g npm@6.14.8 10 | shell: bash 11 | 12 | - name: "Install dependencies" 13 | run: | 14 | npm ci 15 | shell: bash 16 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: ci 2 | 3 | # Check out https://help.github.com/en/articles/workflow-syntax-for-github-actions for documentation on Actions. 4 | on: push 5 | 6 | jobs: 7 | test: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - uses: actions/checkout@v2 11 | - uses: olafurpg/setup-scala@v10 12 | with: 13 | java-version: graalvm@19.3.1 14 | - uses: actions/cache@v2 15 | with: 16 | path: | 17 | ~/.ivy2/cache 18 | ~/.sbt 19 | key: ${{ runner.os }}-sbt-${{ hashFiles('**/build.sbt') }} 20 | - run: sbt test 21 | 22 | # Generate a static build artifact for usage in deployments. 23 | assets: 24 | runs-on: ubuntu-latest 25 | steps: 26 | - uses: actions/checkout@v2 27 | - uses: olafurpg/setup-scala@v10 28 | with: 29 | java-version: graalvm@19.3.1 30 | - uses: actions/cache@v2 31 | with: 32 | path: | 33 | ~/.ivy2/cache 34 | ~/.sbt 35 | key: ${{ runner.os }}-sbt-${{ hashFiles('**/build.sbt') }} 36 | 37 | # Cache docker layers by pulling down the base image first, and then activating 38 | # caching (keeps the cache small). 39 | - run: docker pull oracle/graalvm-ce:20.3.0-java11 40 | - uses: satackey/action-docker-layer-caching@v0.0.8 41 | - name: "Build asset" 42 | uses: ./.github/actions/build-asset 43 | 44 | # NOTE: Artifacts are not available across workflows. 45 | - uses: actions/upload-artifact@v2 46 | with: 47 | name: bootstrap 48 | path: ./dist/bootstrap 49 | if-no-files-found: error 50 | 51 | # Validate our deployment setup by deploying the stack to LocalStack. 52 | localstack: 53 | runs-on: ubuntu-latest 54 | needs: [assets] 55 | environment: 56 | name: local 57 | services: 58 | localstack: 59 | image: localstack/localstack:latest 60 | env: 61 | SERVICES: serverless,cloudformation,iam,sts,sqs,ssm,s3,cloudwatch,cloudwatch-logs,lambda,dynamodb,apigateway 62 | DEFAULT_REGION: eu-west-1 63 | AWS_ACCESS_KEY_ID: localkey 64 | AWS_SECRET_ACCESS_KEY: localsecret 65 | ports: 66 | - 4566:4566 67 | - 4571:4571 68 | steps: 69 | - uses: actions/checkout@v2 70 | - name: Setup Node.js 14.12.0 71 | uses: actions/setup-node@v1 72 | with: 73 | node-version: "14.12.0" 74 | 75 | - uses: actions/cache@v2 76 | with: 77 | path: ~/.npm 78 | key: node-${{ hashFiles('**/package-lock.json') }} 79 | restore-keys: | 80 | node- 81 | - name: "Setup node dependencies" 82 | uses: ./.github/actions/setup-node-deps 83 | 84 | - uses: actions/download-artifact@v2 85 | with: 86 | name: bootstrap 87 | path: ./dist 88 | 89 | - name: Bootstrapping LocalStack 90 | run: | 91 | npm run cdklocal:bootstrap 92 | env: 93 | DEFAULT_REGION: eu-west-1 94 | AWS_ACCOUNT_ID: "000000000000" 95 | AWS_ACCESS_KEY_ID: localkey 96 | AWS_SECRET_ACCESS_KEY: localsecret 97 | 98 | - name: Deploying to LocalStack 99 | run: | 100 | npm run cdklocal:deploy 101 | env: 102 | DEFAULT_REGION: eu-west-1 103 | AWS_ACCOUNT_ID: "000000000000" 104 | AWS_ACCESS_KEY_ID: localkey 105 | AWS_SECRET_ACCESS_KEY: localsecret 106 | -------------------------------------------------------------------------------- /.github/workflows/pre-release.yml: -------------------------------------------------------------------------------- 1 | name: pre-release 2 | 3 | # Refer to https://help.github.com/en/articles/workflow-syntax-for-github-actions for documentation on Actions. 4 | on: 5 | release: 6 | types: 7 | - prereleased 8 | 9 | jobs: 10 | # Generate a static build artifact for usage in deployments. 11 | assets: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v2 15 | - uses: olafurpg/setup-scala@v10 16 | with: 17 | java-version: graalvm@19.3.1 18 | - uses: actions/cache@v2 19 | with: 20 | path: | 21 | ~/.ivy2/cache 22 | ~/.sbt 23 | key: ${{ runner.os }}-sbt-${{ hashFiles('**/build.sbt') }} 24 | 25 | # Cache docker layers by pulling down the base image first, and then activating 26 | # caching (keeps the cache small). 27 | - run: docker pull oracle/graalvm-ce:20.3.0-java11 28 | - uses: satackey/action-docker-layer-caching@v0.0.8 29 | - name: "Build asset" 30 | uses: ./.github/actions/build-asset 31 | 32 | # NOTE: Artifacts are not available across workflows. 33 | - uses: actions/upload-artifact@v2 34 | with: 35 | name: bootstrap 36 | path: ./dist/bootstrap 37 | if-no-files-found: error 38 | 39 | benchmark: 40 | runs-on: ubuntu-latest 41 | needs: [assets] 42 | environment: 43 | name: benchmark 44 | 45 | steps: 46 | - uses: actions/checkout@v2 47 | - name: Setup Node.js 14.12.0 48 | uses: actions/setup-node@v1 49 | with: 50 | node-version: "14.12.0" 51 | 52 | - uses: actions/cache@v2 53 | with: 54 | path: ~/.npm 55 | key: node-${{ hashFiles('**/package-lock.json') }} 56 | restore-keys: | 57 | node- 58 | - name: "Setup node dependencies" 59 | uses: ./.github/actions/setup-node-deps 60 | - uses: ./.github/actions/change-shell 61 | 62 | - uses: actions/download-artifact@v2 63 | with: 64 | name: bootstrap 65 | path: ./dist 66 | 67 | - name: Benchmark 68 | id: benchmark 69 | if: env.AWS_ACCESS_KEY_ID && env.AWS_SECRET_ACCESS_KEY 70 | env: 71 | AWS_ACCESS_KEY_ID: ${{ secrets.BENCHMARK_AWS_ACCESS_KEY_ID }} 72 | AWS_SECRET_ACCESS_KEY: ${{ secrets.BENCHMARK_AWS_SECRET_ACCESS_KEY }} 73 | AWS_REGION: ${{ secrets.BENCHMARK_AWS_REGION }} 74 | CI: "true" 75 | run: | 76 | export BENCHMARK_SUFFIX=$RANDOM 77 | echo "::set-output name=BENCHMARK_SUFFIX::$BENCHMARK_SUFFIX" 78 | npm run benchmark 79 | 80 | # Make sure we always clean up even after failing. 81 | - name: Ensure clean up 82 | if: failure() 83 | env: 84 | AWS_ACCESS_KEY_ID: ${{ secrets.BENCHMARK_AWS_ACCESS_KEY_ID }} 85 | AWS_SECRET_ACCESS_KEY: ${{ secrets.BENCHMARK_AWS_SECRET_ACCESS_KEY }} 86 | AWS_REGION: ${{ secrets.BENCHMARK_AWS_REGION }} 87 | CI: "true" 88 | BENCHMARK_SUFFIX: ${{ steps.benchmark.outputs.BENCHMARK_SUFFIX }} 89 | run: | 90 | npm run benchmark:destroy 91 | 92 | # Only commit the results upon success. 93 | - name: Commit benchmark results 94 | if: success() 95 | uses: codetalkio/add-and-commit@v6 96 | with: 97 | author_name: GitHub Actions 98 | author_email: no-reply@codetalk.io 99 | message: "Bot: Updating benchmark results" 100 | add: "./benchmark/*" 101 | branch: master 102 | 103 | deploy: 104 | runs-on: ubuntu-latest 105 | name: "deploy pre-release" 106 | needs: [assets] 107 | environment: 108 | name: staging 109 | 110 | steps: 111 | - uses: actions/checkout@v2 112 | - name: Setup Node.js 14.12.0 113 | uses: actions/setup-node@v1 114 | with: 115 | node-version: "14.12.0" 116 | 117 | - uses: actions/cache@v2 118 | with: 119 | path: ~/.npm 120 | key: node-${{ hashFiles('**/package-lock.json') }} 121 | restore-keys: | 122 | node- 123 | - name: "Setup node dependencies" 124 | uses: ./.github/actions/setup-node-deps 125 | - uses: ./.github/actions/change-shell 126 | 127 | - uses: actions/download-artifact@v2 128 | with: 129 | name: bootstrap 130 | path: ./dist 131 | 132 | - name: Deploy 133 | if: env.AWS_ACCESS_KEY_ID && env.AWS_SECRET_ACCESS_KEY 134 | env: 135 | AWS_ACCESS_KEY_ID: ${{ secrets.PRE_RELEASE_AWS_ACCESS_KEY_ID }} 136 | AWS_SECRET_ACCESS_KEY: ${{ secrets.PRE_RELEASE_AWS_SECRET_ACCESS_KEY }} 137 | AWS_REGION: ${{ secrets.PRE_RELEASE_AWS_REGION }} 138 | CI: "true" 139 | run: | 140 | npm run cdk:deploy 141 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: release 2 | 3 | # Refer to https://help.github.com/en/articles/workflow-syntax-for-github-actions for documentation on Actions. 4 | on: 5 | release: 6 | types: 7 | - released 8 | 9 | jobs: 10 | # Generate a static build artifact for usage in deployments. 11 | assets: 12 | runs-on: ubuntu-latest 13 | steps: 14 | - uses: actions/checkout@v2 15 | - uses: olafurpg/setup-scala@v10 16 | with: 17 | java-version: graalvm@19.3.1 18 | - uses: actions/cache@v2 19 | with: 20 | path: | 21 | ~/.ivy2/cache 22 | ~/.sbt 23 | key: ${{ runner.os }}-sbt-${{ hashFiles('**/build.sbt') }} 24 | 25 | # Cache docker layers by pulling down the base image first, and then activating 26 | # caching (keeps the cache small). 27 | - run: docker pull oracle/graalvm-ce:20.3.0-java11 28 | - uses: satackey/action-docker-layer-caching@v0.0.8 29 | - name: "Build asset" 30 | uses: ./.github/actions/build-asset 31 | 32 | # NOTE: Artifacts are not available across workflows. 33 | - uses: actions/upload-artifact@v2 34 | with: 35 | name: bootstrap 36 | path: ./dist/bootstrap 37 | if-no-files-found: error 38 | 39 | deploy: 40 | runs-on: ubuntu-latest 41 | name: "deploy release" 42 | needs: [assets] 43 | environment: 44 | name: production 45 | 46 | steps: 47 | - uses: actions/checkout@v2 48 | - name: Setup Node.js 14.12.0 49 | uses: actions/setup-node@v1 50 | with: 51 | node-version: "14.12.0" 52 | 53 | - uses: actions/cache@v2 54 | with: 55 | path: ~/.npm 56 | key: node-${{ hashFiles('**/package-lock.json') }} 57 | restore-keys: | 58 | node- 59 | - name: "Setup node dependencies" 60 | uses: ./.github/actions/setup-node-deps 61 | - uses: ./.github/actions/change-shell 62 | 63 | - uses: actions/download-artifact@v2 64 | with: 65 | name: bootstrap 66 | path: ./dist 67 | 68 | - name: Deploy 69 | if: env.AWS_ACCESS_KEY_ID && env.AWS_SECRET_ACCESS_KEY 70 | env: 71 | AWS_ACCESS_KEY_ID: ${{ secrets.RELEASE_AWS_ACCESS_KEY_ID }} 72 | AWS_SECRET_ACCESS_KEY: ${{ secrets.RELEASE_AWS_SECRET_ACCESS_KEY }} 73 | AWS_REGION: ${{ secrets.RELEASE_AWS_REGION }} 74 | CI: "true" 75 | run: | 76 | npm run cdk:deploy 77 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Generated by Cargo 2 | # will have compiled files and executables 3 | /target/ 4 | 5 | # These are backup files generated by rustfmt 6 | **/*.rs.bk 7 | 8 | .DS_Store 9 | lambda.zip 10 | cdk.out 11 | 12 | node_modules 13 | benchmark/traces.json 14 | 15 | 16 | # Scala files 17 | dist 18 | project/* 19 | !project/assembly.sbt 20 | !project/build.properties 21 | .metals 22 | .bloop 23 | -------------------------------------------------------------------------------- /.node-version: -------------------------------------------------------------------------------- 1 | 14.12.0 2 | -------------------------------------------------------------------------------- /.scalafmt.conf: -------------------------------------------------------------------------------- 1 | version = "2.6.4" 2 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "editor.formatOnSave": true 3 | } 4 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ghcr.io/graalvm/graalvm-ce:java11-21.0.0 2 | # NOTE: Because of https://github.com/scala/bug/issues/11634 we use 20.3.0-java11. 3 | 4 | RUN gu install native-image 5 | 6 | ENV AWS_LAMBDA_RUNTIME_API="0.0.0" 7 | ENV _HANDLER="handle" 8 | 9 | WORKDIR /tmp/sls-graalvm-dist 10 | CMD native-image \ 11 | --no-fallback \ 12 | --static \ 13 | --enable-url-protocols=http \ 14 | --initialize-at-build-time \ 15 | --initialize-at-build-time=scala.runtime.Statics$VM \ 16 | -Djava.net.preferIPv4Stack=true \ 17 | -H:+ReportExceptionStackTraces \ 18 | -H:+StackTrace \ 19 | -H:ConfigurationFileDirectories=/tmp/sls-graalvm-dist/META-INF \ 20 | -jar /tmp/sls-graalvm-target/bootstrap.jar \ 21 | bootstrap 22 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 Christian Kjær 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Patterns: Serverless Scala (minimal) ![ci badge](https://github.com/codetalkio/patterns-serverless-scala-minimal/workflows/ci/badge.svg?branch=master) 2 | The following is an minimal template for deploying a Scala AWS Lambda function. All deployment is managed by the AWS CDK tool. 3 | 4 | **✨ Features ✨** 5 | 6 | - 🦀 Ready-to-use serverless setup using Scala, GraalVM, and [AWS CDK](https://github.com/aws/aws-cdk). 7 | - 🚗 CI using [GitHub Actions](https://github.com/features/actions) testing the deployment using [LocalStack](https://github.com/localstack/localstack). 8 | - 👩‍💻 Local development using [LocalStack](https://github.com/localstack/localstack). 9 | - 🚀 Deployments via [GitHub Releases](https://docs.github.com/en/free-pro-team@latest/github/administering-a-repository/about-releases). 10 | 11 | **⚡️ Quick start ⚡️** 12 | 13 | Assuming you have set up npm and cargo/rustup, the following will get you going: 14 | 15 | - `npm ci`: install all our deployment dependencies. 16 | - `npm run build`: build the Scala standalone executable, using GraalVM, and package it as an asset for CDK. 17 | - `npm run cdk:deploy`: deploy the packaged asset. 18 | 19 | The stack name is controlled by the `name` field in `package.json`. Other than that, just use your regular Rust development setup. 20 | 21 | Screenshot 2020-10-06 at 22 56 27 22 | 23 | Use this repo as a template to get quickly started! 24 | 25 | 26 | ### Overview 27 | 28 | - [Building](#-building) 29 | - [Deployment using CDK](#-deployment-using-cdk) 30 | - [Development using LocalStack](#-development-using-localstack) 31 | - [GitHub Actions (CI/CD)](#--github-actions-cicd) 32 | - [Benchmarks using AWS XRay](#️️-benchmarks-using-aws-xray) 33 | - [Libraries](#-libraries) 34 | - [Contributing](#️-contributing) 35 | 36 | An overview of commands (all prefixed with `npm run`): 37 | 38 | | Command | Description | Purpose | 39 | |---------|-------------|---------| 40 | | `build` | Build the Scala standalone executable, using GraalVM, for release | 📦 | 41 | | `build:archive` | Creates a `./lambda.zip` for deployment using the AWS CLI | 📦 | 42 | | `build:clean` | Cleans build artifcats from `dist` | 📦 | 43 | | `deploy` | Cleans and builds a new executable, and deploys it via CDK | 📦 + 🚢 | 44 | | `cdk:bootstrap` | Bootstrap necessary resources on first usage of CDK in a region | 🚢 | 45 | | `cdk:deploy` | deploy this stack to your default AWS account/region | 🚢 | 46 | | `cdklocal:start` | Starts the LocalStack docker image | 👩‍💻 | 47 | | `cdklocal:bootstrap` | Bootstrap necessary resources for CDK against LocalStack | 👩‍💻 | 48 | | `cdklocal:deploy` | Deploy this stack to LocalStack | 👩‍💻 | 49 | 50 | 51 | 52 | ## 📦 Building 53 | We build our executable by running `npm run build`. 54 | 55 | Behind the scenes, the `build` NPM script does the following: 56 | 57 | - Builds our Scala project to a .jar file. 58 | - Runs GraalVM on our .jar to output a standalone executable. 59 | 60 | 61 | ## 🚢 Deployment using CDK 62 | We build and deploy by running `npm run deploy`, or just `npm run cdk:deploy` if you have already run `npm run build` previouslt. 63 | 64 | A couple of notes: 65 | 66 | - If this is the first CDK deployment ever on your AWS account/region, run `npm run cdk:bootstrap` first. This creates the necessary CDK stack resources on the cloud. 67 | - The CDK deployment bundles the `dist` folder as its assets. This is where the `bootstrap` file needs to be located (handled by `npm run build`). 68 | 69 | 70 | **Generate our build assets** 71 | 72 | ```bash 73 | $ npm run build 74 | ``` 75 | 76 | **Deploy the Scala asset** 77 | 78 | To deploy your function, call `npm run cdk:deploy`, 79 | 80 | ```bash 81 | $ npm run cdk:deploy 82 | ... 83 | sls-scala: deploying... 84 | [0%] start: Publishing bdbf8354358bc096823baac946ba64130b6397ff8e7eda2f18d782810e158c39:current 85 | [100%] success: Published bdbf8354358bc096823baac946ba64130b6397ff8e7eda2f18d782810e158c39:current 86 | sls-scala: creating CloudFormation changeset... 87 | [██████████████████████████████████████████████████████████] (5/5) 88 | 89 | ✅ sls-scala 90 | 91 | Outputs: 92 | sls-scala.entryArn = arn:aws:lambda:eu-west-1:xxxxxxxxxxxxxx:function:sls-scala-main 93 | 94 | Stack ARN: 95 | arn:aws:cloudformation:eu-west-1:xxxxxxxxxxxxxx:stack/sls-scala/xxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxx 96 | ``` 97 | 98 | > 💡 The security prompt is automatically disabled on CIs that set `CI=true`. You can remove this check by setting `--require-approval never` in the `cdk:deploy` npm command. 99 | 100 | **Validate you CDK CloudFormation** 101 | 102 | If you want to check if you CDK generated CloudFormation is valid, you can do that via, 103 | 104 | ```bash 105 | $ npm run cdk:synth 106 | ``` 107 | 108 | **Compare local against deployed** 109 | 110 | And finally, if you want to see a diff between your deployed stack and your local stack, 111 | 112 | ```bash 113 | $ npm run cdk:diff 114 | ``` 115 | 116 | 117 |
118 | 👈 Expand here for deployment using AWS CLI 119 | 120 | For real-usage we will deploy using AWS CDK, but you can dip your feet by deploying the Scala function via the AWS CLI. 121 | 122 | We'll do a couple of steps additional steps for the first time setup. Only step 5. is necessary after having done this once: 123 | 124 | 1. Set up a role to use with our Lambda function. 125 | 2. Attach policies to that role to be able to actually do something. 126 | 3. Deploy the Lambda function using the `lambda.zip` we've built. 127 | 4. Invoke the function with a test payload. 128 | 5. (Optional) Update the Lambda function with a new `lambda.zip`. 129 | 130 | **Generate our build assets** 131 | 132 | ```bash 133 | $ npm run build && npm run build:archive 134 | ``` 135 | 136 | **Set up the IAM Role** 137 | ```bash 138 | $ aws iam create-role \ 139 | --role-name sls-scala-test-execution \ 140 | --assume-role-policy-document \ 141 | '{"Version": "2012-10-17","Statement": [{ "Effect": "Allow", "Principal": {"Service": "lambda.amazonaws.com"}, "Action": "sts:AssumeRole"}]}' 142 | ``` 143 | 144 | We also need to set some basic policies on the IAM Role for it to be invokeable and for XRay traces to work, 145 | ```bash 146 | $ aws iam attach-role-policy \ 147 | --role-name sls-scala-test-execution \ 148 | --policy-arn arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole 149 | $ aws iam attach-role-policy \ 150 | --role-name sls-scala-test-execution \ 151 | --policy-arn arn:aws:iam::aws:policy/AWSXRayDaemonWriteAccess 152 | ``` 153 | 154 | **Deploy our function** 155 | ```bash 156 | $ aws lambda create-function \ 157 | --function-name sls-scala-test \ 158 | --handler doesnt.matter \ 159 | --cli-binary-format raw-in-base64-out \ 160 | --zip-file fileb://./lambda.zip \ 161 | --runtime provided.al2 \ 162 | --role arn:aws:iam::$(aws sts get-caller-identity | jq -r .Account):role/sls-scala-test-execution \ 163 | --tracing-config Mode=Active 164 | ``` 165 | 166 | > 💡 You can replace the `$(aws sts get-caller-identity | jq -r .Account)` call with your AWS account ID, if you do not have [jq](https://stedolan.github.io/jq/) installed. 167 | 168 | **Invoke our function** 169 | ```bash 170 | $ aws lambda invoke \ 171 | --function-name sls-scala-test \ 172 | --cli-binary-format raw-in-base64-out \ 173 | --payload '{"firstName": "world"}' \ 174 | tmp-output.json > /dev/null && cat tmp-output.json && rm tmp-output.json 175 | {"message":"Hello, world!"} 176 | ``` 177 | 178 | **(Optional) Update the function** 179 | We can also update the function code again, after creating a new asset `lambda.zip`, 180 | 181 | ```bash 182 | $ aws lambda update-function-code \ 183 | --cli-binary-format raw-in-base64-out \ 184 | --function-name sls-scala-test \ 185 | --zip-file fileb://lambda.zip 186 | ``` 187 | 188 | **Clean up the function** 189 | 190 | ```bash 191 | $ aws lambda delete-function --function-name sls-scala-test 192 | $ aws iam detach-role-policy --role-name sls-scala-test-execution --policy-arn arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole 193 | $ aws iam detach-role-policy --role-name sls-scala-test-execution --policy-arn arn:aws:iam::aws:policy/AWSXRayDaemonWriteAccess 194 | $ aws iam delete-role --role-name sls-scala-test-execution 195 | ``` 196 | 197 |
198 | 199 | 200 | ## 👩‍💻 Development using LocalStack 201 | 202 | LocalStack allows us to deploy our CDK services directly to our local environment: 203 | 204 | 1. `npm run cdklocal:start` to start the LocalStack services. 205 | 2. `npm run cdklocal:boostrap` to create the necessary CDK stack resources on the cloud. 206 | 3. `npm run cdklocal:deploy` to deploy our stack. 207 | 4. Target the local services from our application, with `cdklocal`, or by setting the `endpoint` option on the AWS CLI, e.g. `aws --endpoint-url=http://localhost:4566`. 208 | 209 | 🚧 Adapt the Rust setup below to Scala 🚧 210 | 211 | ```bash 212 | $ cargo watch -s 'npm run build:debug' 213 | ``` 214 | 215 | If you want to test the application through the AWS CLI, the following should do the trick, 216 | 217 | ```bash 218 | $ aws --endpoint-url=http://localhost:4566 lambda invoke \ 219 | --function-name sls-scala-minimal-main \ 220 | --cli-binary-format raw-in-base64-out \ 221 | --payload '{"firstName": "world"}' \ 222 | tmp-output.json > /dev/null && cat tmp-output.json && rm tmp-output.json 223 | {"message":"Hello, world!"} 224 | ``` 225 | 226 | #### How does this work? 227 | 228 | LocalStack supports [using local code for lambdas](https://github.com/localstack/localstack#using-local-code-with-lambda), which is what we take advantage of here. This works because step 3. mounts the `./target/cdk/release` directory. Whenever we update the `bootstrap` executable in here (still targeting `x86_64-unknown-linux-musl`) , it will be reflected in the Lambda function. 229 | 230 | You can see this in the `./deployment/lib/lambda-stack.ts` file where we conditionally switch out how we bundle the Lambda code based on the presence of a `CDK_LOCAL` environment variable. 231 | 232 | 233 | ## 🚗 🚀 GitHub Actions (CI/CD) 234 | Using [GitHub actions](/actions) allows us to have an efficient CI/CD setup with minimal work. 235 | 236 | | Workflow | Trigger | Purpose | Environment Variables | 237 | |----------|---------|---------|-----------------------| 238 | | **ci** | push | Continously test the build along with linting, formatting, best-practices (clippy), and validate deployment against LocalStack | | 239 | | **pre-release** | Pre-release using GitHub Releases | Run benchmark suite | **BENCHMARK_AWS_ACCESS_KEY_ID**
**BENCHMARK_AWS_SECRET_ACCESS_KEY**
**BENCHMARK_AWS_SECRET_ACCESS_KEY** | 240 | | **pre-release** | Pre-release using GitHub Releases | Deploy to a QA or staging environment | **PRE_RELEASE_AWS_ACCESS_KEY_ID**
**PRE_RELEASE_AWS_SECRET_ACCESS_KEY**
**PRE_RELEASE_AWS_SECRET_ACCESS_KEY** | 241 | | **release** | Release using GitHub Releases | Deploy to production environment | **RELEASE_AWS_ACCESS_KEY_ID**
**RELEASE_AWS_SECRET_ACCESS_KEY**
**RELEASE_AWS_SECRET_ACCESS_KEY** | 242 | 243 | The CI will work seamlessly without any manual steps, but for deployments via [GitHub Releases](/releases) to work, you will need to set up your GitHub secrets for the repository for the variables in the table above. 244 | 245 | These are used in the `.github/workflows/release.yml` and `.github/workflows/pre-release.yml` workflows for deploying the CDK stack whenever a GitHub pre-release/release is made. 246 | 247 | 248 | ## 🕵️‍♀️ Benchmarks using AWS XRay 249 | 250 | Since we have enabled `tracing: lambda.Tracing.ACTIVE` in CDK and `tracing-config Mode=Active` in the CLI, we will get XRay traces for our AWS Lambda invocations. 251 | 252 | You can checkout each trace in the AWS Console inside the XRay service, which is extremely valuable for figuring our timings between services, slow AWS SDK calls, annotating cost centers in your code, and much more. 253 | 254 | We can benchmark our performance using `npm run benchmark`, which will deploy the AWS Lambda to your AWS account, invoke it a bunch of times and trigger cold starts, along with gathering up all the AWS XRay traces into a neat table. 255 | 256 | Below are two charts generated by the benchmark, you can see the raw data in [the response-times table](./benchmark/response-times.md). 257 | 258 | 259 | ![Average Cold/Warm Response Times](./benchmark/response-times-average.svg) 260 | 261 | - 🔵: Average cold startup times 262 | - 🔴: Average warm startup times 263 | 264 | ![Fastest and Slowest Response Times](./benchmark/response-times-extremes.svg) 265 | 266 | - 🔵: Fastest warm response time 267 | - 🔴: Slowest warm response time 268 | - 🟡: Fastest cold response time 269 | - 🟠: Slowest cold response time 270 | 271 | Benchmarks can be triggered in the CI by setting up its environment variables and creating a pre-release via GitHub Releases. 272 | 273 | 274 | ## 📚 Libraries 275 | We are using a couple of libraries, in various state of maturity/release: 276 | 277 | - [sbt](https://www.scala-sbt.org) is used to build our Scala project. 278 | - We will use [GraalVM](https://www.graalvm.org) to achieve a low Cold Start latency, minimizing memory, speed up our code, and create a standalone executable. 279 | 280 | 281 | ## 🙋‍♀️ Contributing 282 | Have any improvements our ideas? Don't be afraid to create an issue to discuss what's on your mind! 283 | -------------------------------------------------------------------------------- /benchmark/benchmark-payload.json: -------------------------------------------------------------------------------- 1 | { 2 | "firstName": "world" 3 | } 4 | -------------------------------------------------------------------------------- /benchmark/benchmark.ts: -------------------------------------------------------------------------------- 1 | import { 2 | LambdaClient, 3 | GetFunctionConfigurationCommand, 4 | UpdateFunctionConfigurationCommand, 5 | UpdateFunctionConfigurationCommandInput, 6 | InvokeCommand, 7 | InvokeCommandInput, 8 | } from "@aws-sdk/client-lambda"; 9 | import { 10 | XRayClient, 11 | GetTraceSummariesCommand, 12 | GetTraceSummariesCommandInput, 13 | BatchGetTracesCommand, 14 | BatchGetTracesCommandInput, 15 | TraceSummary, 16 | Trace, 17 | } from "@aws-sdk/client-xray"; 18 | import * as fs from "fs"; 19 | 20 | import * as pkg from "../package.json"; 21 | import * as benchmarkPayload from "./benchmark-payload.json"; 22 | 23 | const chartistSvg = require("svg-chartist"); 24 | 25 | /** 26 | * Benchmark configuration values. 27 | */ 28 | const COLD_STARTS = 50; 29 | const WARM_STARTS = 50; 30 | const MEMORY_SIZES = [128, 256, 512, 1024, 2048, 3072, 4096]; 31 | 32 | /** 33 | * Dynamic benchmark variables. The stack name is generated by taking the package name 34 | * and appending the random number generated in the benchmark suffix. 35 | */ 36 | const { BENCHMARK_SUFFIX, DRY_RUN } = process.env; 37 | const STACK_NAME = BENCHMARK_SUFFIX ? `${pkg.name}-${BENCHMARK_SUFFIX}` : pkg.name; 38 | 39 | /** 40 | * Map of memory configuration and their benchmark results. 41 | */ 42 | interface MemoryTimes { 43 | memorySize: number; 44 | times: BenchmarkResults; 45 | } 46 | 47 | /** 48 | * The benchmark numbers. 49 | */ 50 | interface BenchmarkResults { 51 | overallTimes: BenchmarkAggregateMetrics; 52 | traceTimes: BenchmarkSingleInvocationMetric[]; 53 | } 54 | 55 | /** 56 | * All aggregate benchmark metrics. 57 | */ 58 | interface BenchmarkAggregateMetrics { 59 | avgWarmMs: number | undefined; 60 | avgColdMs: number | undefined; 61 | fastestWarmMs: number | undefined; 62 | fastestColdMs: number | undefined; 63 | slowestWarmMs: number | undefined; 64 | slowestColdMs: number | undefined; 65 | } 66 | 67 | /** 68 | * The metrics for a single invocation, extract from XRay. 69 | */ 70 | interface BenchmarkSingleInvocationMetric { 71 | id: string | undefined; 72 | totalTime: number | undefined; 73 | initTime: number | undefined; 74 | invocTime: number | undefined; 75 | overheadTime: number | undefined; 76 | } 77 | 78 | /** 79 | * Map of memory configuration and the traces extracted. 80 | */ 81 | interface MemoryTraces { 82 | memorySize: number; 83 | traces: MinimalTrace[]; 84 | } 85 | 86 | /** 87 | * The minimal trace information we require for processing. 88 | */ 89 | type MinimalTrace = Pick & { Segments: SegmentDocument[] }; 90 | 91 | /** 92 | * The layout of the XRay segments. 93 | */ 94 | interface SegmentDocument { 95 | origin?: string; 96 | end_time: number; 97 | start_time: number; 98 | subsegments?: SubSegment[]; 99 | } 100 | 101 | /** 102 | * The layout of the XRay sub-segments. 103 | */ 104 | interface SubSegment { 105 | name?: string; 106 | end_time: number; 107 | start_time: number; 108 | } 109 | 110 | /** 111 | * Run the benchmark by: 112 | * - Iterating through memory configurations. 113 | * - Updating the Lambda with each memory configuration. 114 | * - Invoking the Lambda for n cold starts and m warm starts. 115 | * - Extract all XRay traces. 116 | * - Process the traces to get the benchmark results. 117 | * - Output a markdown table and two charts. 118 | * 119 | * If you just want to reprocess the results, run the benchmark with `DRY_RUN`, 120 | * e.g. `DRY_RUN=true npm run benchmark`. This will skip deployment, teardown, invocations, and 121 | * fetching of XRay traces, and instead load the existing traces from `traces.json` and generate 122 | * the output again. 123 | */ 124 | const main = async (functionName: string) => { 125 | const memoryTimes: MemoryTimes[] = []; 126 | if (DRY_RUN === "true") { 127 | // If we are running a dry run, we only need to load in the existing traces and process them. 128 | const memoryTraces = JSON.parse(fs.readFileSync("./benchmark/traces.json").toString()); 129 | memoryTraces.forEach(({ memorySize, traces: traceBatches }: MemoryTraces) => { 130 | const times = processXRayTraces(traceBatches); 131 | memoryTimes.push({ 132 | memorySize, 133 | times, 134 | }); 135 | }); 136 | } else { 137 | // For each memory configuration, run through all invocations first. 138 | const benchmarkStartTimes: Date[] = []; 139 | for (let i = 0; i < MEMORY_SIZES.length; i++) { 140 | const benchmarkStartTime = new Date(); 141 | benchmarkStartTimes.push(benchmarkStartTime); 142 | const memorySize = MEMORY_SIZES[i]; 143 | await invokeFunctions(functionName, memorySize); 144 | await sleep(1000); 145 | } 146 | // The XRay traces should now be ready for us to fetch. 147 | const memoryTraces: MemoryTraces[] = []; 148 | for (let i = 0; i < MEMORY_SIZES.length; i++) { 149 | const benchmarkStartTime = benchmarkStartTimes[i]; 150 | const memorySize = MEMORY_SIZES[i]; 151 | const traceSummaries = await fetchXRayTraceSummaries(functionName, benchmarkStartTime); 152 | const traceBatches = await fetchXRayTraceBatches(traceSummaries); 153 | memoryTraces.push({ 154 | memorySize, 155 | traces: traceBatches, 156 | }); 157 | const times = processXRayTraces(traceBatches); 158 | memoryTimes.push({ 159 | memorySize, 160 | times, 161 | }); 162 | } 163 | fs.writeFileSync("./benchmark/traces.json", JSON.stringify(memoryTraces)); 164 | } 165 | 166 | outputBenchmarkMarkdown(memoryTimes); 167 | outputBenchmarkChart(memoryTimes); 168 | }; 169 | 170 | /** 171 | * Sleep for the specified time. 172 | */ 173 | const sleep = (ms: number) => { 174 | return new Promise((resolve) => { 175 | setTimeout(resolve, ms); 176 | }); 177 | }; 178 | 179 | /** 180 | * Run the actual benchmark, performing a series of invocations to the Lambda. To ensure cold 181 | * starts, we trigger an update on the functions environment variables before invoking it. We 182 | * then invoke it a number of times afterwards to gather warm startup data as well. 183 | * 184 | * The `memorySize` configuration sets the Lambda memory size, allowing easy scale up and down. 185 | */ 186 | const invokeFunctions = async (functionName: string, memorySize: number) => { 187 | const lambdaClient = new LambdaClient({}); 188 | const baseConfiguration = await lambdaClient.send( 189 | new GetFunctionConfigurationCommand({ 190 | FunctionName: functionName, 191 | }) 192 | ); 193 | 194 | // We generate the update configuration on the fly to always provide a unique 195 | // benchmark run time. 196 | const updateConfiguration: () => UpdateFunctionConfigurationCommandInput = () => ({ 197 | FunctionName: functionName, 198 | MemorySize: memorySize, 199 | Environment: { 200 | Variables: { 201 | ...baseConfiguration.Environment?.Variables, 202 | BENCHMARK_RUN_TIME: `${new Date().toISOString()}-${Math.random()}`, 203 | }, 204 | }, 205 | }); 206 | const testPayload: () => InvokeCommandInput = () => { 207 | // Dynamically replace placeholders in the payload, so we can generate unique 208 | // test data per invocation. 209 | const payload = JSON.stringify(benchmarkPayload) 210 | .replace("##DATE##", new Date().toISOString()) 211 | .replace("##NUM##", `${Math.floor(Math.random() * 10000)}`); 212 | return { 213 | FunctionName: functionName, 214 | Payload: Buffer.from(payload), 215 | }; 216 | }; 217 | 218 | for (let cI = 0; cI < COLD_STARTS; cI++) { 219 | console.log("[BENCHMARK] Updating the function to ensure a cold start."); 220 | await lambdaClient.send(new UpdateFunctionConfigurationCommand(updateConfiguration())); 221 | const s = Date.now(); 222 | await lambdaClient.send(new InvokeCommand(testPayload())); 223 | console.log(`[BENCHMARK] Invoked cold-start function: ${Date.now() - s}ms.`); 224 | await sleep(500); 225 | } 226 | for (let wI = 0; wI < WARM_STARTS; wI++) { 227 | const s = Date.now(); 228 | await lambdaClient.send(new InvokeCommand(testPayload())); 229 | console.log(`[BENCHMARK] Invoked warm-start function: ${Date.now() - s}ms.`); 230 | await sleep(500); 231 | } 232 | }; 233 | 234 | /** 235 | * Fetch all XRay trace summaries, which contain the trace IDs we will need to get the detailed information. We 236 | * limit the information we search for by filtering on the `functionName` and on service type of AWS:Lambda. Additionally, 237 | * we only look in the period between the `benchmarkStartTime` time and the time that this function is called. 238 | * 239 | * Since XRay trace can take some time to appear, we also gracefully handle waiting if we don't see at least 90% of 240 | * the traces in the results. 241 | */ 242 | const fetchXRayTraceSummaries = async (functionName: string, benchmarkStartTime: Date): Promise => { 243 | const benchmarkEndTime = new Date(); 244 | const xRayClient = new XRayClient({}); 245 | 246 | const traceSummaries: TraceSummary[] = []; 247 | let nextTokenSummary: string | undefined; 248 | let retries = 0; 249 | let retry = true; 250 | while (retry) { 251 | const traceInput: GetTraceSummariesCommandInput = { 252 | StartTime: benchmarkStartTime, 253 | EndTime: benchmarkEndTime, 254 | FilterExpression: `service(id(name: "${functionName}", type: "AWS::Lambda"))`, 255 | NextToken: nextTokenSummary, 256 | }; 257 | const traceSummariesRes = await xRayClient.send(new GetTraceSummariesCommand(traceInput)); 258 | nextTokenSummary = traceSummariesRes.NextToken; 259 | traceSummaries.push(...(traceSummariesRes.TraceSummaries ?? [])); 260 | 261 | // Make sure we've fetched all our traces. We only require 90% to have been gathered, since 262 | // XRay is sampling our requests. 263 | if ((traceSummariesRes.TraceSummaries?.length ?? 0) + traceSummaries.length < (COLD_STARTS + WARM_STARTS) * 0.8) { 264 | if (retries >= 40) { 265 | throw new Error( 266 | `[TEARDOWN] Failed to get all traces for the invocations, was only able to find '${traceSummariesRes.TraceSummaries?.length}' traces.` 267 | ); 268 | } 269 | console.log("[TRACES] Traces has still not appeared, waiting 1 seconds and trying again..."); 270 | await sleep(1000); 271 | retries++; 272 | } else { 273 | retry = false; 274 | } 275 | 276 | if (!retry && nextTokenSummary === undefined) { 277 | break; 278 | } 279 | } 280 | console.log("[TRACES] Fetched trace summaries, fetching detailed trace information."); 281 | return traceSummaries; 282 | }; 283 | 284 | /** 285 | * Split an array into chunks based on the `size`. 286 | */ 287 | const chunkArray = (arr: any[], size: number) => { 288 | var results = []; 289 | while (arr.length) { 290 | results.push(arr.splice(0, size)); 291 | } 292 | return results; 293 | }; 294 | 295 | /** 296 | * Once we have a list of trace IDs, we can get the detailed trace information which contain the breakdown 297 | * of the different stages the Lambda function went through. 298 | * 299 | * Because the XRay API is limited to fetching 5 traces at a time, we divide all the trace IDs into chunks of 300 | * 5. Additionally, we re-request traces if we detect any that are unprocessed. 301 | * 302 | * NOTE: To avoid leaking information, all traces are stripped of information and only a whitelist is saved. 303 | */ 304 | const fetchXRayTraceBatches = async (traceSummaries: Pick[]): Promise => { 305 | const xRayClient = new XRayClient({}); 306 | const batchTraces: MinimalTrace[] = []; 307 | 308 | // We can only request 5 traces at a time, so we split the summary IDs into chunks. 309 | const batchSummaryChunks = chunkArray(traceSummaries, 5); 310 | 311 | let nextTokenBatch: string | undefined; 312 | for (let i = 0; i < batchSummaryChunks.length; i++) { 313 | const batchSummaryChunk = batchSummaryChunks[i]; 314 | while (true) { 315 | const batchInput: BatchGetTracesCommandInput = { 316 | TraceIds: [...new Set(batchSummaryChunk.filter((t) => t.Id).map((t) => t.Id!))], 317 | NextToken: nextTokenBatch, 318 | }; 319 | const batchTracesRes = await xRayClient.send(new BatchGetTracesCommand(batchInput)); 320 | 321 | // Check if there are any unprocessed traces. If there are, we wait 1 second and retry the loop. 322 | if ((batchTracesRes.UnprocessedTraceIds?.length ?? 0) > 0) { 323 | console.log("[TRACES] Detailed traces are still being processed, waiting 1 second and trying again..."); 324 | await sleep(1000); 325 | continue; 326 | } 327 | 328 | // Only store the relevant parts of the traces, so we can't accidentally leak information. 329 | const minimalTraces: MinimalTrace[] = (batchTracesRes.Traces ?? []).map((t) => ({ 330 | Id: t.Id, 331 | Segments: (t.Segments ?? []) 332 | .filter((s) => s.Document !== undefined) 333 | .map((s: any) => { 334 | const seg: SegmentDocument = JSON.parse(s.Document!); 335 | const cleanedSeg: SegmentDocument = { 336 | origin: seg.origin, 337 | end_time: seg.end_time, 338 | start_time: seg.start_time, 339 | subsegments: seg.subsegments?.map((subSeg) => ({ 340 | name: subSeg.name, 341 | end_time: subSeg.end_time, 342 | start_time: subSeg.start_time, 343 | })), 344 | }; 345 | return cleanedSeg; 346 | }), 347 | })); 348 | batchTraces.push(...minimalTraces); 349 | 350 | nextTokenBatch = batchTracesRes.NextToken; 351 | if (nextTokenBatch === undefined) { 352 | break; 353 | } 354 | } 355 | } 356 | 357 | return batchTraces; 358 | }; 359 | 360 | /** 361 | * Process a list of XRay detailed traces, extracting the timings for the various 362 | * segments, along with overall metrics. 363 | */ 364 | const processXRayTraces = (traces: MinimalTrace[]): BenchmarkResults => { 365 | console.log("[TRACES] Processing trace information."); 366 | // Gather overall metrics. 367 | let avgWarmMs: number | undefined; 368 | let avgColdMs: number | undefined; 369 | let fastestWarmMs: number | undefined; 370 | let fastestColdMs: number | undefined; 371 | let slowestWarmMs: number | undefined; 372 | let slowestColdMs: number | undefined; 373 | 374 | // Gather per-trace metrics. 375 | const traceTimes: BenchmarkSingleInvocationMetric[] = []; 376 | traces.map((trace) => { 377 | let totalTime: number | undefined; 378 | let initTime: number | undefined; 379 | let invocTime: number | undefined; 380 | let overheadTime: number | undefined; 381 | 382 | // Piece together the segment timings into one measurement. 383 | trace.Segments?.map((seg) => { 384 | if (seg.origin === "AWS::Lambda") { 385 | totalTime = seg.end_time - seg.start_time; 386 | } else if (seg.origin === "AWS::Lambda::Function") { 387 | seg.subsegments?.map((subSeg) => { 388 | if (subSeg.name === "Initialization") { 389 | initTime = subSeg.end_time - subSeg.start_time; 390 | } else if (subSeg.name === "Invocation") { 391 | invocTime = subSeg.end_time - subSeg.start_time; 392 | } else if (subSeg.name === "Overhead") { 393 | overheadTime = subSeg.end_time - subSeg.start_time; 394 | } 395 | }); 396 | } 397 | }); 398 | 399 | const isColdStart = initTime ? true : false; 400 | 401 | // XRay validation (see https://github.com/codetalkio/patterns-serverless-rust-minimal/issues/5 for context): 402 | // 1. XRay can sometimes hand us back invalid traces where the total time is less than the 403 | // sum of its elements. We discard these traces. 404 | const otherTime = (initTime || 0) + (invocTime || 0) + (overheadTime || 0); 405 | if (totalTime! < otherTime) { 406 | console.error( 407 | `[TRACES] Invalid trace with total time '${totalTime}' less than sum of other times '${otherTime}'. ID = ${trace.Id}.` 408 | ); 409 | return; 410 | } 411 | // 2. Similarly, XRay sometimes only catches the Lambda service part, but not the function metrics 412 | // themselves. We are then unable to tell if it was a cold start or not. 413 | if (!invocTime) { 414 | console.error(`[TRACES] Invalid trace with missing invocation time. ID = ${trace.Id}.`); 415 | return; 416 | } 417 | 418 | traceTimes.push({ 419 | id: trace.Id, 420 | totalTime, 421 | initTime, 422 | invocTime, 423 | overheadTime, 424 | }); 425 | 426 | // Keep track of overall metrics. 427 | if (!isColdStart) { 428 | avgWarmMs = !avgWarmMs ? totalTime : (avgWarmMs + totalTime!) / 2; 429 | fastestWarmMs = !fastestWarmMs || totalTime! < fastestWarmMs ? totalTime : fastestWarmMs; 430 | slowestWarmMs = !slowestWarmMs || totalTime! > slowestWarmMs ? totalTime : slowestWarmMs; 431 | } else if (isColdStart) { 432 | avgColdMs = !avgColdMs ? totalTime : (avgColdMs + totalTime!) / 2; 433 | fastestColdMs = !fastestColdMs || totalTime! < fastestColdMs ? totalTime : fastestColdMs; 434 | slowestColdMs = !slowestColdMs || totalTime! > slowestColdMs ? totalTime : slowestColdMs; 435 | } 436 | }); 437 | 438 | return { 439 | overallTimes: { 440 | avgWarmMs, 441 | avgColdMs, 442 | fastestWarmMs, 443 | slowestWarmMs, 444 | fastestColdMs, 445 | slowestColdMs, 446 | }, 447 | traceTimes, 448 | }; 449 | }; 450 | 451 | /** 452 | * Output the results to the `response-times.md` markdown file by manually piecing together the 453 | * markdown content. 454 | */ 455 | const outputBenchmarkMarkdown = async (memoryTimes: MemoryTimes[]) => { 456 | console.log("[OUTPUT] Saving benchmark times to 'response-times.md'."); 457 | 458 | // Generate the measurement tables for each memory configuration section. 459 | let benchmarkData = ""; 460 | memoryTimes.map(({ memorySize, times }) => { 461 | benchmarkData += ` 462 | 463 | ## Results for ${memorySize} MB`; 464 | benchmarkData += ` 465 | 466 | | Measurement (${memorySize} MB) | Time (ms) | 467 | |-------------|------| 468 | | Average warm start response time | ${Math.floor(times.overallTimes.avgWarmMs! * 10000) / 10} ms | 469 | | Average cold start response time | ${Math.floor(times.overallTimes.avgColdMs! * 10000) / 10} ms | 470 | | Fastest warm response time | ${Math.floor(times.overallTimes.fastestWarmMs! * 10000) / 10} ms | 471 | | Slowest warm response time | ${Math.floor(times.overallTimes.slowestWarmMs! * 10000) / 10} ms | 472 | | Fastest cold response time | ${Math.floor(times.overallTimes.fastestColdMs! * 10000) / 10} ms | 473 | | Slowest cold response time | ${Math.floor(times.overallTimes.slowestColdMs! * 10000) / 10} ms | 474 | `; 475 | 476 | benchmarkData += ` 477 | 478 | | Response time | Initialization | Invocation | Overhead | Cold/ Warm Start | Memory Size | Trace ID | 479 | |---------------|----------------|------------|----------|------------------|-------------|----------|`; 480 | times.traceTimes.map((time) => { 481 | const isColdStart = !!time.initTime; 482 | const totalTimeMs = time.totalTime ? `${Math.floor(time.totalTime * 10000) / 10} ms` : ""; 483 | const initTimeMs = time.initTime ? `${Math.floor(time.initTime * 10000) / 10} ms` : ""; 484 | const invocTimeMs = time.invocTime ? `${Math.floor(time.invocTime * 10000) / 10} ms` : ""; 485 | const overheadTimeMs = time.overheadTime ? `${Math.floor(time.overheadTime * 10000) / 10} ms` : ""; 486 | const coldOrWarmStart = isColdStart ? "🥶" : "🥵"; 487 | benchmarkData += ` 488 | | ${totalTimeMs} | ${initTimeMs} | ${invocTimeMs} | ${overheadTimeMs} | ${coldOrWarmStart} | ${memorySize} MB | ${time.id} |`; 489 | }); 490 | }); 491 | 492 | // Set up the page, including the generated charts. 493 | const header = ` 494 | # Benchmark: Response Times 495 | 496 | The following are the response time results from AWS XRay, generated after running \`npm run benchmark\`. 497 | 498 | ![Average Cold/Warm Response Times](./response-times-average.svg) 499 | 500 | - 🔵: Average cold startup times 501 | - 🔴: Average warm startup times 502 | 503 | ![Fastest and Slowest Response Times](./response-times-extremes.svg) 504 | 505 | - 🔵: Fastest warm response time 506 | - 🔴: Slowest warm response time 507 | - 🟡: Fastest cold response time 508 | - 🟠: Slowest cold response time 509 | 510 | `; 511 | 512 | // Provide a table of contents for quick access to the different measurements. 513 | let tableOfContents = ` 514 | ## Overview 515 | 516 | `; 517 | memoryTimes.map(({ memorySize }) => { 518 | tableOfContents += ` 519 | - [Results for ${memorySize} MB](#results-for-${memorySize}-mb)`; 520 | }); 521 | 522 | // Include generic XRay trace examples in the bottom of the page. 523 | const footer = ` 524 | 525 | ## XRay Example of a Cold Start 526 | 527 | Screenshot 2020-10-07 at 23 01 40 528 | 529 | ## XRay Example of a Warm Start 530 | 531 | Screenshot 2020-10-07 at 23 01 23 532 | `; 533 | const markdown = [header, tableOfContents, benchmarkData, footer].join("\n"); 534 | fs.writeFileSync("./benchmark/response-times.md", markdown); 535 | }; 536 | 537 | /** 538 | * Output two charts based on the data, showing the behaviour and performance of the AWS Lambda: 539 | * - response-times-average.svg: Shows average cold start and warm starts, for each memory configuration. 540 | * - response-times-extremes.svg: Shows the fastest and slowests response times for both cold and warm 541 | * starts, for each memory configuration. 542 | */ 543 | const outputBenchmarkChart = async (memoryTimes: MemoryTimes[]) => { 544 | console.log("[OUTPUT] Charting benchmark times to 'response-times.svg'."); 545 | 546 | const opts = { 547 | options: { 548 | width: 700, 549 | height: 300, 550 | axisX: { 551 | showLabel: true, 552 | showGrid: false, 553 | }, 554 | axisY: { 555 | labelInterpolationFnc: function (value: any) { 556 | return value + "ms"; 557 | }, 558 | scaleMinSpace: 15, 559 | }, 560 | }, 561 | title: { 562 | height: 50, 563 | fill: "#4A5572", 564 | }, 565 | css: `.ct-series-a .ct-bar, .ct-series-a .ct-line, .ct-series-a .ct-point, .ct-series-a .ct-slice-donut{ 566 | stroke: #4A5572 567 | }`, 568 | }; 569 | 570 | const labels: string[] = []; 571 | const avgSeries: number[][] = [ 572 | [], // Avg. Cold 573 | [], // Avg. Warm 574 | ]; 575 | const extremesSeries: number[][] = [ 576 | [], // Fastest Warm 577 | [], // Slowest Warm 578 | [], // Fastest Cold 579 | [], // Slowest Cold 580 | ]; 581 | memoryTimes.map(({ memorySize, times }) => { 582 | labels.push(`${memorySize}MB`); 583 | // Add the average data to the first chart series. 584 | avgSeries[0].push(Math.floor(times.overallTimes.avgColdMs! * 10000) / 10); 585 | avgSeries[1].push(Math.floor(times.overallTimes.avgWarmMs! * 10000) / 10); 586 | // Add the extremes data to the second chart series. 587 | extremesSeries[0].push(Math.floor(times.overallTimes.fastestWarmMs! * 10000) / 10); 588 | extremesSeries[1].push(Math.floor(times.overallTimes.slowestWarmMs! * 10000) / 10); 589 | extremesSeries[2].push(Math.floor(times.overallTimes.fastestColdMs! * 10000) / 10); 590 | extremesSeries[3].push(Math.floor(times.overallTimes.slowestColdMs! * 10000) / 10); 591 | }); 592 | 593 | const avgSeriesData = { 594 | title: "Average Cold/Warm Response Times Across Memory Configurations", 595 | labels, 596 | series: avgSeries, 597 | }; 598 | const extremesSeriesData = { 599 | title: "Fastest and Slowest Response Times Across Memory Configurations", 600 | labels, 601 | series: extremesSeries, 602 | }; 603 | 604 | chartistSvg("bar", avgSeriesData, opts).then((html: any) => { 605 | fs.writeFileSync("./benchmark/response-times-average.svg", html); 606 | }); 607 | chartistSvg("bar", extremesSeriesData, opts).then((html: any) => { 608 | fs.writeFileSync("./benchmark/response-times-extremes.svg", html); 609 | }); 610 | }; 611 | 612 | (async () => { 613 | const functionName = `${STACK_NAME}-main`; 614 | console.log(`[SETUP] BENCHMARK_SUFFIX = ${BENCHMARK_SUFFIX}`); 615 | console.log(`[SETUP] Stack Name = ${functionName}`); 616 | if (!BENCHMARK_SUFFIX) { 617 | console.error("No 'BENCHMARK_SUFFIX' was set!"); 618 | process.exit(1); 619 | } 620 | 621 | try { 622 | await main(functionName); 623 | } catch (err) { 624 | console.error("[ERROR] Benchmark failed unexpectedly:", err); 625 | process.exit(1); 626 | } 627 | })(); 628 | -------------------------------------------------------------------------------- /benchmark/response-times-average.svg: -------------------------------------------------------------------------------- 1 | 128MB256MB512MB1024MB2048MB3072MB4096MB0ms25ms50ms75ms100ms125ms150ms175ms200ms225ms250msAverage Cold/Warm Response Times Across Memory Configurations -------------------------------------------------------------------------------- /benchmark/response-times-extremes.svg: -------------------------------------------------------------------------------- 1 | 128MB256MB512MB1024MB2048MB3072MB4096MB0ms25ms50ms75ms100ms125ms150ms175ms200ms225ms250ms275ms300ms325ms350msFastest and Slowest Response Times Across Memory Configurations -------------------------------------------------------------------------------- /build.sbt: -------------------------------------------------------------------------------- 1 | name := "sls-scala-minimal" 2 | scalaVersion := "2.13.3" 3 | assemblyJarName in assembly := "bootstrap.jar" 4 | mainClass in assembly := Some("Bootstrap") 5 | 6 | libraryDependencies ++= Seq( 7 | "com.lihaoyi" %% "requests" % "0.6.5", 8 | "com.github.plokhotnyuk.jsoniter-scala" %% "jsoniter-scala-core" % "2.6.2", 9 | "com.github.plokhotnyuk.jsoniter-scala" %% "jsoniter-scala-macros" % "2.6.2" % Provided, // only required in compile-time 10 | // Check out https://github.com/scala/bug/issues/11634 for why these are necessary. 11 | "org.graalvm.nativeimage" % "svm" % "20.3.0" % "compile-internal" 12 | // "org.scalameta" %% "svm-subs" % "20.2.0" % "compile-internal" 13 | ) 14 | 15 | scalacOptions ++= Seq( 16 | "-deprecation", // Emit warning and location for usages of deprecated APIs. 17 | "-encoding", 18 | "utf-8", // Specify character encoding used by source files. 19 | "-explaintypes", // Explain type errors in more detail. 20 | "-feature", // Emit warning and location for usages of features that should be imported explicitly. 21 | "-unchecked", // Enable additional warnings where generated code depends on assumptions. 22 | "-Xcheckinit", // Wrap field accessors to throw an exception on uninitialized access. 23 | "-Xfatal-warnings" // Fail the compilation if there are any warnings. 24 | ) 25 | -------------------------------------------------------------------------------- /cdk.json: -------------------------------------------------------------------------------- 1 | { 2 | "app": "ts-node -r tsconfig-paths/register deployment/bin/stack.ts" 3 | } 4 | -------------------------------------------------------------------------------- /deployment/bin/stack.ts: -------------------------------------------------------------------------------- 1 | import * as cdk from "@aws-cdk/core"; 2 | 3 | import { LambdaStack } from "../lib/lambda-stack"; 4 | import * as pkg from "../../package.json"; 5 | 6 | // Allow appending a randomized benchmark number. 7 | const { BENCHMARK_SUFFIX } = process.env; 8 | const STACK_NAME = BENCHMARK_SUFFIX ? `${pkg.name}-${BENCHMARK_SUFFIX}` : pkg.name; 9 | 10 | /** 11 | * Construct for the Serverless Application. 12 | * 13 | * NOTE: We export the our construct so that it's possible to stitch it into a larger deployment. 14 | */ 15 | export default class Stack { 16 | public lambdaStack: LambdaStack; 17 | 18 | constructor(app: cdk.App) { 19 | // Set up our Lambda Stack. 20 | this.lambdaStack = new LambdaStack(app, `${STACK_NAME}`, {}); 21 | } 22 | } 23 | 24 | const app = new cdk.App(); 25 | new Stack(app); 26 | -------------------------------------------------------------------------------- /deployment/lib/lambda-stack.ts: -------------------------------------------------------------------------------- 1 | import * as core from "@aws-cdk/core"; 2 | import * as lambda from "@aws-cdk/aws-lambda"; 3 | import * as s3 from "@aws-cdk/aws-s3"; 4 | import * as cdk from "@aws-cdk/core"; 5 | 6 | const { CDK_LOCAL } = process.env; 7 | 8 | interface Props {} 9 | 10 | export class LambdaStack extends core.Stack { 11 | constructor(scope: cdk.App, id: string, props: Props) { 12 | super(scope, id); 13 | 14 | const bootstrapLocation = `${__dirname}/../../dist`; 15 | 16 | // Our Lambda function details. 17 | const entryId = "main"; 18 | const entryFnName = `${id}-${entryId}`; 19 | const entry = new lambda.Function(this, entryId, { 20 | functionName: entryFnName, 21 | description: "Scala serverless minimal microservice", 22 | runtime: lambda.Runtime.PROVIDED_AL2, 23 | handler: `${id}`, // The handler value syntax is `{cargo-package-name}.{bin-name}`. 24 | code: 25 | CDK_LOCAL !== "true" 26 | ? lambda.Code.fromAsset(bootstrapLocation) 27 | : lambda.Code.fromBucket(s3.Bucket.fromBucketName(this, `LocalBucket`, "__local__"), bootstrapLocation), 28 | memorySize: 256, 29 | timeout: cdk.Duration.seconds(10), 30 | tracing: lambda.Tracing.ACTIVE, 31 | }); 32 | 33 | // Our Lambda function environment variables. 34 | entry.addEnvironment("AWS_NODEJS_CONNECTION_REUSE_ENABLED", "1"); 35 | 36 | // Tag our resource. 37 | core.Aspects.of(entry).add(new cdk.Tag("service-type", "API")); 38 | core.Aspects.of(entry).add(new cdk.Tag("billing", `lambda-${entryFnName}`)); 39 | } 40 | } 41 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2.1" 2 | 3 | services: 4 | localstack: 5 | container_name: "${LOCALSTACK_DOCKER_NAME-localstack_main}" 6 | # image: localstack/localstack:0.12.4 7 | image: localstack/localstack:latest 8 | network_mode: bridge 9 | ports: 10 | - "4566:4566" 11 | - "4571:4571" 12 | - "${PORT_WEB_UI-9888}:${PORT_WEB_UI-9888}" 13 | environment: 14 | - SERVICES=${SERVICES-serverless,cloudfront,cloudformation,iam,sts,sqs,ssm,s3,route53,acm,cloudwatch,cloudwatch-logs,lambda,dynamodb,apigateway} 15 | - DEFAULT_REGION=${DEFAULT_REGION-eu-west-1} 16 | - DEBUG=${DEBUG- } 17 | - DATA_DIR=${DATA_DIR- } 18 | - PORT_WEB_UI=${PORT_WEB_UI-9888} 19 | - LAMBDA_EXECUTOR=${LAMBDA_EXECUTOR- } 20 | - LAMBDA_REMOTE_DOCKER=${LAMBDA_REMOTE_DOCKER-false} 21 | - KINESIS_ERROR_PROBABILITY=${KINESIS_ERROR_PROBABILITY- } 22 | - DOCKER_HOST=unix:///var/run/docker.sock 23 | - HOST_TMP_FOLDER=${TMPDIR:-/tmp/localstack} 24 | volumes: 25 | - "${TMPDIR:-/tmp/localstack}:/tmp/localstack" 26 | - "/var/run/docker.sock:/var/run/docker.sock" 27 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "sls-scala-minimal", 3 | "description": "Deployment tools for the minimal Serverless Scala application.", 4 | "version": "0.1.0", 5 | "author": "Christian Kjaer ", 6 | "license": "MIT", 7 | "keywords": [ 8 | "serverless", 9 | "scala", 10 | "aws", 11 | "cdk" 12 | ], 13 | "scripts": { 14 | "build": "npm run build:scala && npm run build:docker && npm run build:native", 15 | "build:scala": "sbt clean assembly", 16 | "build:docker": "docker build -t sls-graalvm-build .", 17 | "build:native": "mkdir -p dist/META-INF && docker run -v \"$(pwd -P)/target/scala-2.13\":/tmp/sls-graalvm-target -v \"$(pwd -P)/dist\":/tmp/sls-graalvm-dist sls-graalvm-build", 18 | "build:meta-inf": "npm run build:native -- java -agentlib:native-image-agent=config-output-dir=/tmp/sls-graalvm-dist/META-INF/native-image -jar /tmp/sls-graalvm-target/bootstrap.jar", 19 | "build:archive": "(cd ./dist/ && zip ./lambda.zip ./bootstrap) && mv ./dist/lambda.zip ./lambda.zip", 20 | "build:clean": "(rm target/scala-*/bootstrap.jar && rm -f dist/bootstrap) || echo '[build:clean] No existing release found.'", 21 | "deploy": "npm run build:clean && npm run build && npm run cdk:deploy", 22 | "benchmark": "export BENCHMARK_SUFFIX=${BENCHMARK_SUFFIX-$RANDOM}; npm run benchmark:setup && ts-node -- ./benchmark/benchmark.ts && npm run benchmark:destroy", 23 | "benchmark:setup": "[[ $DRY_RUN != 'true' ]] && CI=true npm run cdk:deploy || echo 'Dry run, skipping deploy.'", 24 | "benchmark:destroy": "[[ $BENCHMARK_SUFFIX != '' && $DRY_RUN != 'true' ]] && cdk destroy --force '*' || echo 'Dry run, skipping teardown.'", 25 | "cdk:synth": "cdk synth", 26 | "cdk:diff": "cdk diff", 27 | "cdk:deploy": "[[ $CI == 'true' ]] && export CDK_APPROVAL='never' || export CDK_APPROVAL='broadening'; cdk deploy --require-approval $CDK_APPROVAL '*'", 28 | "cdk:bootstrap": "cdk bootstrap aws://$(aws sts get-caller-identity | jq -r .Account)/$AWS_REGION", 29 | "cdklocal:start": "docker-compose up", 30 | "cdklocal:clear-cache": "(rm ~/.cdk/cache/accounts.json || true) && (rm ~/.cdk/cache/accounts_partitions.json || true)", 31 | "cdklocal:deploy": "npm run --silent cdklocal:clear-cache && CDK_LOCAL=true AWS_REGION=eu-west-1 cdklocal deploy --require-approval never '*'", 32 | "cdklocal:bootstrap": "npm run --silent cdklocal:clear-cache && CDK_LOCAL=true AWS_REGION=eu-west-1 cdklocal bootstrap aws://000000000000/eu-west-1", 33 | "ts-node": "ts-node" 34 | }, 35 | "dependencies": {}, 36 | "devDependencies": { 37 | "@aws-cdk/assert": "1.81.0", 38 | "@aws-cdk/aws-lambda": "1.81.0", 39 | "@aws-cdk/aws-s3": "1.81.0", 40 | "@aws-cdk/core": "1.81.0", 41 | "@aws-sdk/client-lambda": "^3.1.0", 42 | "@aws-sdk/client-xray": "^3.1.0", 43 | "@types/node": "14.11.2", 44 | "aws-cdk": "1.81.0", 45 | "aws-cdk-local": "1.65.2", 46 | "svg-chartist": "^1.0.1", 47 | "ts-node": "9.1.1", 48 | "tsconfig-paths": "3.9.0", 49 | "typescript": "4.1.3" 50 | }, 51 | "prettier": { 52 | "printWidth": 120 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /project/assembly.sbt: -------------------------------------------------------------------------------- 1 | addSbtPlugin("com.eed3si9n" % "sbt-assembly" % "0.15.0") 2 | -------------------------------------------------------------------------------- /project/build.properties: -------------------------------------------------------------------------------- 1 | sbt.version=1.4.0 2 | -------------------------------------------------------------------------------- /src/main/scala/Bootstrap.scala: -------------------------------------------------------------------------------- 1 | /** Our custom runtime object. The AWS runtime runs this as an executable, and expects us 2 | * to handle the rest. 3 | * 4 | * Interested in knowing more about how it all works? Check out the Walkthrough of creating 5 | * you own runtime: https://docs.aws.amazon.com/lambda/latest/dg/runtimes-walkthrough.html. 6 | */ 7 | object Bootstrap { 8 | 9 | /** The entrypoint of the AWS provided runtime. */ 10 | def main(args: Array[String]): Unit = { 11 | val awsLambdaRuntimeAPI = sys.env("AWS_LAMBDA_RUNTIME_API") 12 | val awsLambdaHandler = sys.env("_HANDLER") 13 | val nextEventUrl = 14 | s"http://$awsLambdaRuntimeAPI/2018-06-01/runtime/invocation/next" 15 | 16 | Console.err.println(s"Requesting $awsLambdaRuntimeAPI...") 17 | 18 | while (true) { 19 | val runtimeResponse = requests.get(nextEventUrl) 20 | 21 | // TODO: Handle the response body in a more robust way. 22 | // requestEvent <- RequestEvent.fromJsonSafe(runtimeResponse.text()) 23 | // runtimeResponse.text() => (Full request body,{"firstName": "world"}) 24 | // Response(http://127.0.0.1:9001/2018-06-01/runtime/invocation/next,200,OK,{"firstName": "world"},HashMap(lambda-runtime-invoked-function-arn -> List(arn:aws:lambda:eu-central-1:388072253866:function:sls-benchmark-31721), content-length -> List(22), date -> List(Sun, 18 Oct 2020 10:12:09 GMT), content-type -> List(application/json), lambda-runtime-trace-id -> List(Root=1-5f8c14f6-7a4d438b1734e19455e65c47;Parent=7303de1783bc5e13;Sampled=1), lambda-runtime-aws-request-id -> List(c144e7ae-ca48-409f-8252-3cc867061dde), lambda-runtime-deadline-ms -> List(1603015929242)),None) 25 | 26 | val deadlineMs = 27 | runtimeResponse.headers("lambda-runtime-deadline-ms").head.toLong 28 | val requestId = runtimeResponse 29 | .headers("lambda-runtime-aws-request-id") 30 | .head 31 | 32 | val errorResponseUrl = 33 | s"http://$awsLambdaRuntimeAPI/2018-06-01/runtime/invocation/$requestId/error" 34 | val successResponseUrl = 35 | s"http://$awsLambdaRuntimeAPI/2018-06-01/runtime/invocation/$requestId/response" 36 | val requestUrl = successResponseUrl 37 | val statusCode = 38 | try { 39 | val lambdaResponse = 40 | Handler.handle(runtimeResponse.text(), deadlineMs) 41 | requests 42 | .post( 43 | successResponseUrl, 44 | data = lambdaResponse.toJson 45 | ) 46 | .statusCode 47 | } catch { 48 | case e: Throwable => 49 | Console.err.println(s"Could not handle event: $runtimeResponse") 50 | val lambdaResponse = LambdaResponse( 51 | 500, 52 | Map("Content-Type" -> "application/json"), 53 | e.getMessage 54 | ) 55 | requests 56 | .post( 57 | errorResponseUrl, 58 | data = lambdaResponse.toJson 59 | ) 60 | .statusCode 61 | } 62 | } 63 | } 64 | 65 | } 66 | -------------------------------------------------------------------------------- /src/main/scala/Bootstrap/Types.scala: -------------------------------------------------------------------------------- 1 | import scala.util.Try 2 | 3 | import com.github.plokhotnyuk.jsoniter_scala.core._ 4 | import com.github.plokhotnyuk.jsoniter_scala.macros._ 5 | 6 | // The response written to the response url by the function. 7 | case class LambdaResponse( 8 | statusCode: Int, 9 | headers: Map[String, String], 10 | body: String, 11 | isBase64Encoded: Boolean = false 12 | ) { 13 | private implicit val codec: JsonValueCodec[LambdaResponse] = { 14 | JsonCodecMaker.make[LambdaResponse](CodecMakerConfig) 15 | } 16 | 17 | def toJson: String = { 18 | writeToString(this) 19 | } 20 | } 21 | 22 | case class RequestIdentity( 23 | apiKey: Option[String], 24 | userArn: Option[String], 25 | cognitoAuthenticationType: Option[String], 26 | caller: Option[String], 27 | userAgent: Option[String], 28 | user: Option[String], 29 | cognitoIdentityPoolId: Option[String], 30 | cognitoAuthenticationProvider: Option[String], 31 | sourceIp: Option[String], 32 | accountId: Option[String] 33 | ) 34 | 35 | case class RequestContext( 36 | resourceId: String, 37 | apiId: String, 38 | resourcePath: String, 39 | httpMethod: String, 40 | accountId: String, 41 | stage: String, 42 | identity: RequestIdentity, 43 | extendedRequestId: Option[String], 44 | path: String 45 | ) 46 | 47 | // The request returned from the next-event url. 48 | case class RequestEvent( 49 | httpMethod: String, 50 | body: Option[String], 51 | resource: String, 52 | requestContext: RequestContext, 53 | queryStringParameters: Option[Map[String, String]], 54 | headers: Option[Map[String, String]], 55 | pathParameters: Option[Map[String, String]], 56 | stageVariables: Option[Map[String, String]], 57 | path: String, 58 | isBase64Encoded: Boolean 59 | ) 60 | 61 | object RequestEvent { 62 | private implicit val codec: JsonValueCodec[RequestEvent] = { 63 | JsonCodecMaker.make[RequestEvent](CodecMakerConfig) 64 | } 65 | 66 | def fromJsonSafe(s: String): Option[RequestEvent] = { 67 | Try(readFromString[RequestEvent](s)) match { 68 | case util.Success(re) => Some(re) 69 | case util.Failure(ex) => 70 | Console.err.println( 71 | s"Failed to parse body into RequestEvent: $ex \nbody: $s" 72 | ); None 73 | } 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /src/main/scala/Handler.scala: -------------------------------------------------------------------------------- 1 | object Handler { 2 | // Handle a valid request to this function 3 | def handle(requestEvent: String, deadlineMs: Long): LambdaResponse = { 4 | LambdaResponse( 5 | 200, 6 | Map("Content-Type" -> "application/json"), 7 | s"Hello, World!\n" 8 | ) 9 | // requestEvent.pathParameters.flatMap(_.get("name")) match { 10 | // case Some(name) => 11 | // LambdaResponse( 12 | // 200, 13 | // Map("Content-Type" -> "application/json"), 14 | // s"Hello, $name!\n" 15 | // ) 16 | // case None => 17 | // LambdaResponse( 18 | // 200, 19 | // Map("Content-Type" -> "application/json"), 20 | // s"Hello, World!\n" 21 | // ) 22 | // } 23 | } 24 | 25 | } 26 | -------------------------------------------------------------------------------- /src/main/scala/internal/substitutes/HasReleaseFenceMethod.java: -------------------------------------------------------------------------------- 1 | package scala.internal.substitutes; 2 | 3 | import java.util.function.Predicate; 4 | 5 | final class HasReleaseFenceMethod implements Predicate { 6 | @Override 7 | public boolean test(String className) { 8 | try { 9 | final Class classForName = Class.forName(className); 10 | classForName.getMethod("releaseFence"); 11 | return true; 12 | } catch (Exception cnfe) { 13 | return false; 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /src/main/scala/internal/substitutes/Target_scala_collection_immutable_VM.java: -------------------------------------------------------------------------------- 1 | package scala.internal.substitutes; 2 | 3 | import com.oracle.svm.core.annotate.Substitute; 4 | import com.oracle.svm.core.annotate.TargetClass; 5 | 6 | @TargetClass(className = "scala.collection.immutable.VM", onlyWith = HasReleaseFenceMethod.class) 7 | final class Target_scala_collection_immutable_VM { 8 | 9 | @Substitute 10 | public static void releaseFence() { 11 | UnsafeUtils.UNSAFE.storeFence(); 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /src/main/scala/internal/substitutes/Target_scala_runtime_Statistics.java: -------------------------------------------------------------------------------- 1 | package scala.internal.substitutes; 2 | 3 | import com.oracle.svm.core.annotate.Substitute; 4 | import com.oracle.svm.core.annotate.TargetClass; 5 | 6 | @TargetClass(className = "scala.runtime.Statics", onlyWith = HasReleaseFenceMethod.class) 7 | final class Target_scala_runtime_Statics { 8 | 9 | @Substitute 10 | public static void releaseFence() { 11 | UnsafeUtils.UNSAFE.storeFence(); 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /src/main/scala/internal/substitutes/UnsafeUtils.java: -------------------------------------------------------------------------------- 1 | package scala.internal.substitutes; 2 | 3 | import java.lang.reflect.Field; 4 | 5 | class UnsafeUtils { 6 | static final sun.misc.Unsafe UNSAFE; 7 | static { 8 | try { 9 | Field field = sun.misc.Unsafe.class.getDeclaredField("theUnsafe"); 10 | field.setAccessible(true); 11 | UNSAFE = (sun.misc.Unsafe) field.get(null); 12 | } catch (Throwable ex) { 13 | throw new ExceptionInInitializerError(ex); 14 | } 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES2018", 4 | "module": "commonjs", 5 | "lib": ["es2016", "es2017.object", "es2017.string"], 6 | "resolveJsonModule": true, 7 | "declaration": true, 8 | "strict": true, 9 | "noImplicitAny": true, 10 | "strictNullChecks": true, 11 | "noImplicitThis": true, 12 | "alwaysStrict": true, 13 | "noUnusedLocals": false, 14 | "noUnusedParameters": false, 15 | "noImplicitReturns": true, 16 | "noFallthroughCasesInSwitch": true, 17 | "inlineSourceMap": true, 18 | "inlineSources": true, 19 | "experimentalDecorators": true, 20 | "strictPropertyInitialization": false, 21 | "typeRoots": ["./node_modules/@types"], 22 | "allowSyntheticDefaultImports": true, 23 | "baseUrl": "." 24 | }, 25 | "exclude": ["cdk.out"] 26 | } 27 | --------------------------------------------------------------------------------