├── .gitattributes ├── .gitignore ├── .npmignore ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── Dockerfile ├── LICENSE ├── README.md ├── bin └── clickhouse-lambda.ts ├── cdk.json ├── clickhouse-lambda.png ├── jest.config.js ├── lib └── clickhouse-lambda-stack.ts ├── package-lock.json ├── package.json ├── postman.gif ├── src ├── app.ts └── clickhouse_runner.ts ├── test ├── integration │ └── app.test.ts ├── testdata.zip └── unit │ ├── app.test.ts │ └── clickhouse_runner.test.ts └── tsconfig.json /.gitattributes: -------------------------------------------------------------------------------- 1 | clickhouse_bin/clickhouse filter=lfs diff=lfs merge=lfs -text 2 | postman.gif filter=lfs diff=lfs merge=lfs -text 3 | clickhouse-lambda.png filter=lfs diff=lfs merge=lfs -text 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.js 2 | !jest.config.js 3 | *.d.ts 4 | node_modules 5 | dist 6 | # CDK asset staging directory 7 | .cdk.staging 8 | cdk.out 9 | coverage 10 | 11 | -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | *.ts 2 | !*.d.ts 3 | 4 | # CDK asset staging directory 5 | .cdk.staging 6 | cdk.out 7 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *main* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM public.ecr.aws/lambda/nodejs:20 as builder 2 | WORKDIR /usr/app 3 | COPY package.json ./ 4 | COPY src ./src 5 | RUN npm install 6 | RUN npm run build:app 7 | 8 | FROM public.ecr.aws/amazonlinux/amazonlinux:2023 as downloader 9 | WORKDIR /tmp 10 | RUN curl -LO https://github.com/ClickHouse/ClickHouse/releases/download/v23.8.9.54-lts/clickhouse-common-static-23.8.9.54-arm64.tgz \ 11 | && mkdir clickhouse_bin \ 12 | && dnf install tar gzip -y \ 13 | && tar xzf clickhouse-common-static-23.8.9.54-arm64.tgz -C clickhouse_bin --strip-components=1 14 | 15 | FROM public.ecr.aws/lambda/nodejs:20 16 | WORKDIR ${LAMBDA_TASK_ROOT} 17 | COPY --from=builder /usr/app/dist/* ./ 18 | COPY --from=downloader /tmp/clickhouse_bin/usr/bin/clickhouse ./ 19 | CMD ["app.handler"] -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT No Attribution 2 | 3 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of 6 | this software and associated documentation files (the "Software"), to deal in 7 | the Software without restriction, including without limitation the rights to 8 | use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of 9 | the Software, and to permit persons to whom the Software is furnished to do so. 10 | 11 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 12 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS 13 | FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR 14 | COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER 15 | IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 16 | CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 17 | 18 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ClickHouse in AWS Lambda 2 | 3 | This sample shows how to run the open-source online analytics database [ClickHouse](https://github.com/ClickHouse/ClickHouse) in an AWS Lambda function. 4 | 5 | It enables ad-hoc querying of existing data in Amazon S3 buckets with ClickHouse SQL using a simple HTTP client, without the need to run clickhouse-local on your computer or to deploy a ClickHouse cluster. 6 | 7 | The sample can also help to build other serverless solutions around ClickHouse query engine. 8 | 9 | ![postman-example](./postman.gif) 10 | 11 | # Quick start 12 | 1. Have docker and [AWS Command Line Interface (AWS CLI)](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) installed and configured. 13 | 1. Clone the repo. 14 | 1. Install and bootstrap AWS Cloud Development Kit (AWS CDK) - more details [here](https://docs.aws.amazon.com/cdk/v2/guide/getting_started.html#getting_started_install). 15 | ``` 16 | npm install -g aws-cdk 17 | cdk bootstrap aws:/// 18 | ``` 19 | 1. Install dependencies. 20 | ``` 21 | npm install 22 | ``` 23 | 1. Set USER_ARN environmental variable to the Amazon Resource Name (ARN) of the AWS IAM user that will be granted permission to run queries. To set it to your default AWS CLI user: 24 | ``` 25 | export USER_ARN=`aws sts get-caller-identity | jq -r .Arn` 26 | ``` 27 | 1. Deploy the stack to your AWS account. 28 | ``` 29 | cdk deploy 30 | ``` 31 | 32 | 1. See the TestUrl output in AWS CDK console output. It references a test file in the S3 bucket created during deployment. E.g. 33 | ``` 34 | https://<...>.lambda-url.eu-central-1.on.aws/clickhouselambdastack-clickhousebucket<...>/test.csv 35 | ``` 36 | 1. Issue an HTTP POST request signed with [AWS Signature Version 4](#q-what-is-aws-signature-version-4-and-how-to-sign-requests-with-it) to this URL. 37 | Pass your SQL statement as a plain text in the request body. Use a pre-defined name `table` in all SQL statemetns. For example, with curl: 38 | ``` 39 | curl 'https://<...>.lambda-url.eu-central-1.on.aws/clickhouselambdastack-clickhousebucket<...>/test.csv' 40 | --aws-sigv4 "aws:amz:eu-central-1:lambda" 41 | -u ':' 42 | -d 'SELECT * FROM table LIMIT 5;' 43 | ``` 44 | Or use Postman for graphical UI. 45 | 1. Get your query execution results in the HTTP response body 46 | 47 | Check the [guide](#issue-queries) for more querying options. 48 | 49 | # Architecture 50 | 51 | ![clickhouse-lambda architecture](./clickhouse-lambda.png) 52 | 53 | The sample includes a Lambda Function with the Lambda Function URL enabled, an IAM user identity policy, a Lambda function execution role, and an S3 bucket. 54 | 55 | **The Lambda function** processes HTTP requests and runs ClickHouse binary. It has 2048MB memory by default. The memory size can be increased up to 10240 MB. More memory can improve query performance, but also increases cost. The Lambda code is deployed using container image stored in Amazon Elastic Container Registry (ECR), because of clickhouse binary size. We use a standard clickhouse binary, see [the docs](#q-how-to-make-clickhouse-binary-run-better-in-aws-lambda) for ideas how to optimize it for the AWS Lambda environment. 56 | 57 | Data resides in the **S3 bucket**. You can upload any additional data in formats, supported by ClickHouse (parquet, json, csv and [many others](https://clickhouse.com/docs/en/interfaces/formats)). 58 | 59 | **The Lambda function URL** provides an easy way to invoke the Lambda function without Amazon API Gateway. It uses AWS_IAM auth type, so all requests need to be signed with AWS Signature Version 4. You could change it to NONE to disable authentication on the Lambda function URL side completely (this might impose a security risk!) or implement authentication of your choice in the Lambda code, through Amazon API Gateway or Amazon CloudFront. 60 | 61 | Permissions to invoke the Lambda function URL are granted through the **IAM user identity policy**. The sample grants access to only one user specified during deployment through the IAM_USER environmental variable. Another way to grant access is through the Lambda function's resource policy. 62 | 63 | **Lambda execution role** provides the Lambda function with access to S3 data. This sample allows read access to one bucket only (specified during deployment). You can modify role's policies to allow more buckets. Note, that access to S3 is controlled completely by this role. Any IAM user that is allowed to make requests to the Lambda function URL will be able to query data regardless of their S3 permissions. You can build a solution around [Amazon S3 Object Lambda](https://aws.amazon.com/s3/features/object-lambda/) instead of the regular Lambda if you need to control access based on users' existing S3 permissions. 64 | 65 | # User guide 66 | 67 | ## Query existing S3 bucket 68 | To query your real data you can either upload it to the sample S3 bucket or connect your existing S3 bucket during deployment . 69 | 70 | To connect an existing S3 bucket instead of creating a new one set `CREATE_BUCKET` environmental variable to `false`, and `BUCKET_NAME` to your bucket name (just a name, without s3:// or https://) during the CDK stack deployment. The bucket must be in the same AWS region where the sample is deployed. 71 | 72 | Alternatively, you can simply upload your own data in json, parquet, csv or [another supported format](https://clickhouse.com/docs/en/interfaces/formats) to the sample bucket. Get the bucket name from `ClickhouseBucketName` AWS CDK output. 73 | 74 | ## Change authentication method 75 | You may want your function URL to be public, for example to make queries directly from the web browser without signing requests with AWS Signature Version 4. For that you can leverage the `NONE` auth type. See the guide [here](https://docs.aws.amazon.com/lambda/latest/dg/urls-auth.html). This imposes a security risk and allows access to the data in the bucket through ClickHouse queries to all users. 76 | 77 | ## Issue queries 78 | The URL format is: 79 | ``` 80 | https://// 81 | ``` 82 | You can also omit the bucket name, then the bucket created or specified during the deployment will be used: 83 | ``` 84 | https:/// 85 | ``` 86 | Requests need to be signed with AWS [Signature Version 4](#q-what-is-aws-signature-version-4-and-how-to-sign-requests-with-it) for security. 87 | 88 | You can pass a query SQL statement either as a query parameter of HTTP GET requests (with URL encoding, e.g. spaces should be replaced with `%20`, and so on) or just as raw text in the body of HTTP POST requests. 89 | 90 | Pre-defined table name `table` should be used in all queries. Under the hood a temporary table named `table` is created before each query run. This way you can issue complex queries with predicates. 91 | 92 | ## Logging 93 | ClickHouse stdout and stderr are passed to Amazon CloudWatch Logs. You can find the log group in the AWS Console on the monitoring tab of your Lambda function's properties page. 94 | 95 | ## Limitations 96 | This version has the following limitations that we plan to address in future: 97 | - There is no plain CloudFormation template, only CDK is supported for deployment. You will need CDK, AWS CLI and docker installed to deploy the sample. 98 | - Only IAM users are supported for querying, not IAM roles. 99 | - Permissions to invoke Lambda function URL are granted through IAM user identity policies. This means the user or role you use to deploy the CDK stack requires permissions to grant `lambda:InvokeFunctionUrl` to the IAM user, that will be used for querying. 100 | 101 | 102 | # FAQ 103 | 104 | ## Q: Why use this instead of [clickhouse-local](https://clickhouse.com/docs/en/operations/utilities/clickhouse-local)? 105 | The main difference from clickhouse-local is that the sample does not run clickhouse query engine on the client device and does not transfer raw data from the S3 bucket to the client. That's why some interesting use-cases for the sample are: 106 | - Slow client devices. AWS Lambda [supports](https://aws.amazon.com/about-aws/whats-new/2020/12/aws-lambda-supports-10gb-memory-6-vcpu-cores-lambda-functions/) up to 10 GB of memory and 6 vCPU, which is more than an entry-level laptop might have. 107 | - Mobile devices. You can query massive data in S3 from mobile devices using HTTPS client applications or web clients like postman. 108 | - Slow network connection. AWS Lambda runs close to you data stored in S3. Only query results will be transferred to the client. That's why it can run queries faster than clickhouse-local and saves traffic. 109 | - Advanced authentication scenarios, when users shouldn't have AWS credentials and IAM permissions to access S3 data. 110 | - No-effort data lake :-) 111 | 112 | ## Q: What is AWS Signature Version 4 and how to sign requests with it? 113 | AWS Signature Version 4 is a protocol for authenticating incoming API requests to AWS services. 114 | 115 | AWS SDKs and tools such as curl, Postman, and [AWS SigV4 Proxy](https://github.com/awslabs/aws-sigv4-proxy) offer built-in ways to sign your requests with AWS Signature V4. 116 | 117 | Find more details [here](https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html). 118 | 119 | ## Q: How much does it cost to run the sample? 120 | You pay for: 121 | 1. Lambda [invocation and execution time](https://aws.amazon.com/lambda/pricing/) 122 | 1. [S3 GET requests](https://aws.amazon.com/s3/pricing/). Data transfer between a Lambda function and Amazon Simple Storage Service (S3) within the same AWS Region is free. 123 | 1. [Data Transfer Out](https://aws.amazon.com/ec2/pricing/on-demand/#Data_Transfer) from AWS Lambda to the client. This includes only query execution results, but not the source data. 124 | 125 | The overall price depends on the volume of data, the number and type of queries. What's important, there are no other costs besides S3 storage when no queries are run. 126 | 127 | The cost to deploy the sample and run several queries on provided test data should be under 1$ per month. 128 | 129 | ## Q: How to make clickhouse binary run (better) in AWS Lambda? 130 | To reduce Lambda container image size and Lambda function cold start times, you can build a custom ClickHouse binary with only neccessary components (basically, -DENABLE_CLICKHOUSE_LOCAL=ON). 131 | 132 | # Security 133 | 134 | See [CONTRIBUTING](CONTRIBUTING.md#security-issue-notifications) for more information. 135 | 136 | # License 137 | 138 | This sample is licensed under the MIT-0 License. See [LICENSE](LICENSE) . 139 | -------------------------------------------------------------------------------- /bin/clickhouse-lambda.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | // SPDX-License-Identifier: MIT-0 4 | import "source-map-support/register"; 5 | import * as cdk from "aws-cdk-lib"; 6 | import { ClickhouseLambdaStack } from "../lib/clickhouse-lambda-stack"; 7 | 8 | const app = new cdk.App(); 9 | new ClickhouseLambdaStack(app, "ClickhouseLambdaStack", { 10 | /* If you don't specify 'env', this stack will be environment-agnostic. 11 | * Account/Region-dependent features and context lookups will not work, 12 | * but a single synthesized template can be deployed anywhere. */ 13 | /* Uncomment the next line to specialize this stack for the AWS Account 14 | * and Region that are implied by the current CLI configuration. */ 15 | // env: { account: process.env.CDK_DEFAULT_ACCOUNT, region: process.env.CDK_DEFAULT_REGION }, 16 | /* Uncomment the next line if you know exactly what Account and Region you 17 | * want to deploy the stack to. */ 18 | // env: { account: '123456789012', region: 'us-east-1' }, 19 | /* For more information, see https://docs.aws.amazon.com/cdk/latest/guide/environments.html */ 20 | }); 21 | -------------------------------------------------------------------------------- /cdk.json: -------------------------------------------------------------------------------- 1 | { 2 | "app": "npx ts-node --prefer-ts-exts bin/clickhouse-lambda.ts", 3 | "watch": { 4 | "include": [ 5 | "**" 6 | ], 7 | "exclude": [ 8 | "README.md", 9 | "cdk*.json", 10 | "**/*.d.ts", 11 | "**/*.js", 12 | "tsconfig.json", 13 | "package*.json", 14 | "yarn.lock", 15 | "node_modules", 16 | "test" 17 | ] 18 | }, 19 | "context": { 20 | "@aws-cdk/aws-lambda:recognizeLayerVersion": true, 21 | "@aws-cdk/core:checkSecretUsage": true, 22 | "@aws-cdk/core:target-partitions": [ 23 | "aws", 24 | "aws-cn" 25 | ], 26 | "@aws-cdk-containers/ecs-service-extensions:enableDefaultLogDriver": true, 27 | "@aws-cdk/aws-ec2:uniqueImdsv2TemplateName": true, 28 | "@aws-cdk/aws-ecs:arnFormatIncludesClusterName": true, 29 | "@aws-cdk/aws-iam:minimizePolicies": true, 30 | "@aws-cdk/core:validateSnapshotRemovalPolicy": true, 31 | "@aws-cdk/aws-codepipeline:crossAccountKeyAliasStackSafeResourceName": true, 32 | "@aws-cdk/aws-s3:createDefaultLoggingPolicy": true, 33 | "@aws-cdk/aws-sns-subscriptions:restrictSqsDescryption": true, 34 | "@aws-cdk/aws-apigateway:disableCloudWatchRole": true, 35 | "@aws-cdk/core:enablePartitionLiterals": true, 36 | "@aws-cdk/aws-events:eventsTargetQueueSameAccount": true, 37 | "@aws-cdk/aws-iam:standardizedServicePrincipals": true, 38 | "@aws-cdk/aws-ecs:disableExplicitDeploymentControllerForCircuitBreaker": true, 39 | "@aws-cdk/aws-iam:importedRoleStackSafeDefaultPolicyName": true, 40 | "@aws-cdk/aws-s3:serverAccessLogsUseBucketPolicy": true, 41 | "@aws-cdk/aws-route53-patters:useCertificate": true, 42 | "@aws-cdk/customresources:installLatestAwsSdkDefault": false, 43 | "@aws-cdk/aws-rds:databaseProxyUniqueResourceName": true, 44 | "@aws-cdk/aws-codedeploy:removeAlarmsFromDeploymentGroup": true, 45 | "@aws-cdk/aws-apigateway:authorizerChangeDeploymentLogicalId": true, 46 | "@aws-cdk/aws-ec2:launchTemplateDefaultUserData": true, 47 | "@aws-cdk/aws-secretsmanager:useAttachedSecretResourcePolicyForSecretTargetAttachments": true, 48 | "@aws-cdk/aws-redshift:columnId": true, 49 | "@aws-cdk/aws-stepfunctions-tasks:enableEmrServicePolicyV2": true, 50 | "@aws-cdk/aws-ec2:restrictDefaultSecurityGroup": true, 51 | "@aws-cdk/aws-apigateway:requestValidatorUniqueId": true, 52 | "@aws-cdk/aws-kms:aliasNameRef": true 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /clickhouse-lambda.png: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:e5e8c92532dc4ddb383765f1482f5e3d13eab97db50a390974bc79b8a9ba7c79 3 | size 48861 4 | -------------------------------------------------------------------------------- /jest.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | projects: [ 3 | "/test/jest.unit.config.js", 4 | "/test/jest.integration.config.js", 5 | ], 6 | testEnvironment: "node", 7 | clearMocks: true, 8 | collectCoverage: true, 9 | coverageDirectory: "coverage", 10 | coverageProvider: "v8", 11 | roots: ["/test"], 12 | testMatch: ["**/*.test.ts"], 13 | transform: { 14 | "^.+\\.tsx?$": "ts-jest", 15 | }, 16 | }; 17 | -------------------------------------------------------------------------------- /lib/clickhouse-lambda-stack.ts: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | import * as cdk from "aws-cdk-lib"; 4 | import { Construct } from "constructs"; 5 | import * as lambda from "aws-cdk-lib/aws-lambda"; 6 | import * as s3 from "aws-cdk-lib/aws-s3"; 7 | import * as iam from "aws-cdk-lib/aws-iam"; 8 | import * as s3deploy from "aws-cdk-lib/aws-s3-deployment"; 9 | import path = require("path"); 10 | import { RetentionDays } from "aws-cdk-lib/aws-logs"; 11 | 12 | export class ClickhouseLambdaStack extends cdk.Stack { 13 | constructor(scope: Construct, id: string, props?: cdk.StackProps) { 14 | super(scope, id, props); 15 | 16 | const userArn = process.env.USER_ARN ?? ""; 17 | const createBucket = process.env.CREATE_BUCKET ?? "true"; 18 | const bucketName = process.env.BUCKET_NAME ?? "clickhouse-bucket"; 19 | let clickhouseBucket; 20 | if (userArn === "") { 21 | console.error( 22 | "USER_ARN is required.\nPlease set USER_ARN env variable to your IAM user ARN" 23 | ); 24 | process.exit(1); 25 | } 26 | 27 | const user = iam.User.fromUserArn(this, "User", userArn); 28 | 29 | if (createBucket === "true") { 30 | clickhouseBucket = new s3.Bucket(this, "ClickhouseBucket"); 31 | } else { 32 | clickhouseBucket = s3.Bucket.fromBucketName( 33 | this, 34 | "ClickhouseBucket", 35 | bucketName 36 | ); 37 | } 38 | 39 | new s3deploy.BucketDeployment(this, "DeploySampleData", { 40 | sources: [ 41 | s3deploy.Source.asset(path.join(__dirname, "../test", "testdata.zip")), 42 | ], 43 | destinationBucket: clickhouseBucket, 44 | }); 45 | 46 | const clickhouseLambda = new lambda.DockerImageFunction( 47 | this, 48 | "ClickhouseLambda", 49 | { 50 | code: lambda.DockerImageCode.fromImageAsset(path.join(__dirname, "..")), 51 | environment: { 52 | BUCKET_NAME: clickhouseBucket.bucketName, 53 | REGION: this.region, 54 | BINARY_PATH: "./clickhouse", 55 | LOG_LEVEL: "INFO", 56 | TZ: "UTC", 57 | }, 58 | timeout: cdk.Duration.seconds(300), 59 | memorySize: 2048, 60 | architecture: lambda.Architecture.ARM_64, 61 | logRetention: RetentionDays.ONE_WEEK, 62 | } 63 | ); 64 | const clickhouseLambdaUrl = clickhouseLambda.addFunctionUrl({ 65 | authType: lambda.FunctionUrlAuthType.AWS_IAM, 66 | }); 67 | clickhouseLambdaUrl.grantInvokeUrl(user); 68 | clickhouseBucket.grantReadWrite(clickhouseLambda); 69 | 70 | new cdk.CfnOutput(this, "ClickhouseLambdaUrl", { 71 | value: clickhouseLambdaUrl.url, 72 | }); 73 | 74 | new cdk.CfnOutput(this, "ClickhouseBucketName", { 75 | value: clickhouseBucket.bucketName, 76 | }); 77 | 78 | new cdk.CfnOutput(this, "TestUrl", { 79 | value: `${clickhouseLambdaUrl.url}/${clickhouseBucket.bucketName}/test.csv`, 80 | }); 81 | } 82 | } 83 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "clickhouse-lambda", 3 | "version": "0.1.0", 4 | "bin": { 5 | "clickhouse-lambda": "bin/clickhouse-lambda.js" 6 | }, 7 | "scripts": { 8 | "build": "tsc", 9 | "watch": "tsc -w", 10 | "test": "jest", 11 | "test:unit": "jest --selectProjects unit", 12 | "test:integration": "jest --selectProjects integration", 13 | "cdk": "cdk", 14 | "build:app": "esbuild src/app.ts --bundle --minify --sourcemap --platform=node --target=es2020 --outfile=dist/app.js" 15 | }, 16 | "devDependencies": { 17 | "@aws-sdk/client-s3": "^3.490.0", 18 | "@types/aws-lambda": "^8.10.131", 19 | "@types/jest": "^29.5.11", 20 | "@types/node": "20.1.7", 21 | "aws-cdk": "2.83.1", 22 | "aws4-axios": "^3.3.0", 23 | "axios": "^1.6.5", 24 | "esbuild": "^0.18.20", 25 | "jest": "^29.7.0", 26 | "ts-jest": "^29.1.1", 27 | "ts-node": "^10.9.2", 28 | "typescript": "~5.0.4" 29 | }, 30 | "dependencies": { 31 | "aws-cdk-lib": "^2.121.1", 32 | "constructs": "^10.3.0", 33 | "source-map-support": "^0.5.21" 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /postman.gif: -------------------------------------------------------------------------------- 1 | version https://git-lfs.github.com/spec/v1 2 | oid sha256:fd23c6ff1421cd9ce6e1f119c0746b1128dde01ace73b6237f64d4752e2e0653 3 | size 1394085 4 | -------------------------------------------------------------------------------- /src/app.ts: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | import { 4 | Context, 5 | APIGatewayProxyResultV2, 6 | APIGatewayProxyEventV2, 7 | } from "aws-lambda"; 8 | import { ClickHouseRunner, LogLevel } from "./clickhouse_runner"; 9 | 10 | const region = process.env.REGION ?? "us-east-1"; 11 | const bucketName = process.env.BUCKET_NAME ?? "clickhouse-bucket"; 12 | const binaryPath = process.env.BINARY_PATH ?? "./clickhouse"; 13 | const logLevel: LogLevel = (process.env.LOG_LEVEL ?? 14 | ("INFO" as LogLevel)) as LogLevel; 15 | 16 | export const handler = async ( 17 | event: APIGatewayProxyEventV2, 18 | context: Context 19 | ): Promise => { 20 | const path = event.requestContext.http.path ?? "/test.csv"; 21 | // if bucketname is already part of the URL, remove it 22 | let objectPath = ""; 23 | if (path.startsWith("/" + bucketName)) { 24 | objectPath = path.replace("/" + bucketName, ""); 25 | } else { 26 | objectPath = path; 27 | } 28 | const method = event.requestContext.http.method ?? "GET"; 29 | let statement = ""; 30 | if (method === "GET") { 31 | statement = 32 | event.queryStringParameters?.statement ?? "SELECT * FROM table LIMIT 5"; 33 | } 34 | if (method === "POST") { 35 | statement = event.body ?? "SELECT * FROM table LIMIT 5"; 36 | } 37 | 38 | const clickhouseRunnerParams = { 39 | bucketName, 40 | bucketRegion: region, 41 | objectKey: objectPath, 42 | queryStatement: statement, 43 | logLevel: logLevel, 44 | binaryPath, 45 | }; 46 | const clickhouseRunner = new ClickHouseRunner(clickhouseRunnerParams); 47 | console.log(event.requestContext); 48 | try { 49 | const result = await clickhouseRunner.run(); 50 | return { 51 | statusCode: 200, 52 | body: String(result), 53 | }; 54 | } catch (error) { 55 | return { 56 | statusCode: 500, 57 | body: String(error), 58 | }; 59 | } 60 | }; 61 | -------------------------------------------------------------------------------- /src/clickhouse_runner.ts: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | import { exec } from "child_process"; 4 | 5 | // Log Levels for ClickHouseRunner 6 | export type LogLevel = "ERROR" | "WARN" | "INFO" | "DEBUG"; 7 | 8 | // Type with parameters for ClickHouseRunner 9 | export interface ClickHouseRunnerParams { 10 | bucketName: string; 11 | bucketRegion: string; 12 | objectKey: string; 13 | queryStatement: string; 14 | logLevel: LogLevel; 15 | binaryPath: string; 16 | } 17 | 18 | // Runner Class for ClickHouse loacal 19 | export class ClickHouseRunner { 20 | private readonly bucketName: string; 21 | private readonly objectKey: string; 22 | private readonly bucketRegion: string; 23 | private readonly queryStatement: string; 24 | private readonly logLevel: LogLevel; 25 | private readonly binaryPath: string; 26 | 27 | constructor(params: ClickHouseRunnerParams) { 28 | this.bucketName = params.bucketName; 29 | this.bucketRegion = params.bucketRegion; 30 | this.objectKey = params.objectKey; 31 | this.queryStatement = params.queryStatement; 32 | this.logLevel = params.logLevel; 33 | this.binaryPath = params.binaryPath; 34 | } 35 | 36 | // Run ClickHouse 37 | // TODO: implement with spawn to work with large data and stream 38 | public async run(): Promise { 39 | const command = this.buildCommand(); 40 | return new Promise((resolve, reject) => { 41 | exec(command, (error, stdout, stderr) => { 42 | if (error) { 43 | console.error(`exec error: ${error}`); 44 | reject(error); 45 | } 46 | // do not reject on stderr need to configure ClickHouse to log to stdout 47 | if (stderr) { 48 | console.log(`stderr: ${stderr}`); 49 | //reject(stderr); 50 | } 51 | console.log(`stdout: ${stdout}`); 52 | resolve(stdout); 53 | }); 54 | }); 55 | } 56 | 57 | // Build ClickHouse command 58 | private buildCommand(): string { 59 | const query = this.buildQuery(); 60 | const command = `${this.binaryPath} local --query="${query}" --logger.level="${this.logLevel}" --logger.console`; 61 | //const command = `${this.binaryPath} local --query="${query}"`; 62 | return command; 63 | } 64 | 65 | // Build ClickHouse query 66 | private buildQuery(): string { 67 | const s3Uri = this.buildS3Uri(); 68 | const query = `CREATE TABLE table AS s3('${s3Uri}'); ${this.queryStatement}`; 69 | return query; 70 | } 71 | 72 | // Build S3 URI 73 | private buildS3Uri(): string { 74 | const s3Uri = `https://${this.bucketName}.s3.${this.bucketRegion}.amazonaws.com/${this.objectKey}`; 75 | return s3Uri; 76 | } 77 | } 78 | -------------------------------------------------------------------------------- /test/integration/app.test.ts: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | import axios from "axios"; 4 | import { aws4Interceptor } from "aws4-axios"; 5 | import * as fs from "fs"; 6 | import { PutObjectCommand, S3Client } from "@aws-sdk/client-s3"; 7 | 8 | const uploadFile = async ( 9 | fileName: string, 10 | bucketName: string, 11 | region: string 12 | ) => { 13 | const file = fs.createReadStream(`${__dirname}/${fileName}`); 14 | const s3 = new S3Client({ region: region }); 15 | const command = new PutObjectCommand({ 16 | Bucket: bucketName, 17 | Key: fileName, 18 | Body: file, 19 | }); 20 | try { 21 | const data = await s3.send(command); 22 | console.log("Success. File uploaded to S3."); 23 | } catch (err) { 24 | console.log("Error", err); 25 | } 26 | }; 27 | 28 | describe("App integration test", () => { 29 | let baseApiUrl: string; 30 | let s3BucketName: string; 31 | let region: string; 32 | let accessKeyId: string; 33 | let secretAccessKey: string; 34 | const fileName = "test.csv"; 35 | const client = axios.create(); 36 | beforeAll(async () => { 37 | baseApiUrl = process.env.BASE_API_URL ?? ""; 38 | s3BucketName = process.env.BUCKET_NAME ?? ""; 39 | region = process.env.AWS_REGION ?? "us-east-1"; 40 | accessKeyId = process.env.AWS_ACCESS_KEY_ID ?? ""; 41 | secretAccessKey = process.env.AWS_SECRET_ACCESS_KEY ?? ""; 42 | if (baseApiUrl === "") { 43 | throw new Error("BASE_API_URL is not defined"); 44 | } 45 | if (s3BucketName === "") { 46 | throw new Error("BUCKET_NAME is not defined"); 47 | } 48 | if (accessKeyId === "") { 49 | throw new Error("AWS_ACCESS_KEY_ID is not defined"); 50 | } 51 | if (secretAccessKey === "") { 52 | throw new Error("AWS_SECRET_ACCESS_KEY is not defined"); 53 | } 54 | const interceptor = aws4Interceptor({ 55 | options: { region: region, service: "lambda" }, 56 | credentials: { 57 | accessKeyId: accessKeyId, 58 | secretAccessKey: secretAccessKey, 59 | }, 60 | }); 61 | client.interceptors.request.use(interceptor); 62 | await uploadFile(fileName, s3BucketName, region); 63 | }); 64 | 65 | it("should return 200 and result for get request with query", async () => { 66 | const path = fileName; 67 | const statement = "select count(*) from table"; 68 | const url = `${baseApiUrl}/${path}?statement=${statement}`; 69 | const config = { 70 | method: "get", 71 | url: url, 72 | headers: {}, 73 | }; 74 | const response = await client(config); 75 | expect(response.status).toBe(200); 76 | expect(response.data.message).toContain("100"); 77 | }, 10000); 78 | 79 | it("should return 200 and result for post request with query", async () => { 80 | const path = fileName; 81 | const statement = "select count(*) from table"; 82 | const url = `${baseApiUrl}/${path}`; 83 | const config = { 84 | method: "post", 85 | url: url, 86 | headers: { 87 | "Content-Type": "text/plain", 88 | }, 89 | data: statement, 90 | }; 91 | const response = await client(config); 92 | expect(response.status).toBe(200); 93 | expect(response.data.message).toContain("100"); 94 | }, 10000); 95 | }); 96 | -------------------------------------------------------------------------------- /test/testdata.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-samples/aws-lambda-clickhouse/638a718a4437c9847ac4dc688f86f50afc0361bf/test/testdata.zip -------------------------------------------------------------------------------- /test/unit/app.test.ts: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | import { APIGatewayProxyEventV2 } from "aws-lambda"; 4 | import { handler } from "../../src/app"; 5 | import { ClickHouseRunner, LogLevel } from "../../src/clickhouse_runner"; 6 | 7 | // Mock the ClickHouseRunner class 8 | jest.mock("../../src/clickhouse_runner", () => ({ 9 | ClickHouseRunner: jest.fn().mockImplementation(() => ({ 10 | run: jest.fn(), 11 | })), 12 | })); 13 | 14 | describe("handler", () => { 15 | const mockClickHouseRunner = ClickHouseRunner as jest.Mock; 16 | const bucketName = process.env.BUCKET_NAME ?? "clickhouse-bucket"; 17 | beforeEach(() => { 18 | mockClickHouseRunner.mockClear(); 19 | }); 20 | 21 | it("should handle GET request and return a successful response", async () => { 22 | const runMock = jest.fn().mockResolvedValue("Result from ClickHouse"); 23 | mockClickHouseRunner.mockImplementation(() => ({ 24 | run: runMock, 25 | })); 26 | 27 | const event: APIGatewayProxyEventV2 = { 28 | requestContext: { 29 | http: { 30 | path: "/test", 31 | method: "GET", 32 | protocol: "http", 33 | userAgent: "test-user-agent", 34 | sourceIp: "0.0.0.0", 35 | }, 36 | accountId: "123456789012", 37 | requestId: "test-request-id", 38 | routeKey: "test-route-key", 39 | stage: "test-stage", 40 | time: "test-time", 41 | timeEpoch: 1234567890, 42 | apiId: "test-api-id", 43 | domainName: "test-domain-name", 44 | domainPrefix: "test-domain-prefix", 45 | }, 46 | headers: { 47 | "Content-Type": "application/json", 48 | }, 49 | isBase64Encoded: false, 50 | rawPath: "/test", 51 | rawQueryString: "statement=SELECT%20*%20FROM%20table", 52 | routeKey: "test-route-key", 53 | version: "2.0", 54 | body: "", 55 | cookies: ["test-cookie"], 56 | pathParameters: {}, 57 | queryStringParameters: { 58 | statement: "SELECT * FROM table", 59 | }, 60 | }; 61 | const context = {} as any; 62 | 63 | const result = await handler(event, context); 64 | 65 | expect(mockClickHouseRunner).toHaveBeenCalledWith({ 66 | bucketName: bucketName, 67 | bucketRegion: "us-east-1", 68 | objectKey: "/test", 69 | queryStatement: "SELECT * FROM table", 70 | logLevel: "INFO", 71 | binaryPath: "./clickhouse", 72 | }); 73 | expect(runMock).toHaveBeenCalled(); 74 | expect(result).toEqual({ 75 | statusCode: 200, 76 | body: "Result from ClickHouse", 77 | }); 78 | }); 79 | 80 | it("should handle POST request and return a successful response", async () => { 81 | const runMock = jest.fn().mockResolvedValue("Result from ClickHouse"); 82 | mockClickHouseRunner.mockImplementation(() => ({ 83 | run: runMock, 84 | })); 85 | 86 | const event: APIGatewayProxyEventV2 = { 87 | requestContext: { 88 | http: { 89 | path: "/test", 90 | method: "POST", 91 | protocol: "http", 92 | userAgent: "test-user-agent", 93 | sourceIp: "0.0.0.0", 94 | }, 95 | accountId: "123456789012", 96 | requestId: "test-request-id", 97 | routeKey: "test-route-key", 98 | stage: "test-stage", 99 | time: "test-time", 100 | timeEpoch: 1234567890, 101 | apiId: "test-api-id", 102 | domainName: "test-domain-name", 103 | domainPrefix: "test-domain-prefix", 104 | }, 105 | headers: { 106 | "Content-Type": "application/json", 107 | }, 108 | isBase64Encoded: false, 109 | rawPath: "/test", 110 | rawQueryString: "statement=SELECT%20*%20FROM%20table", 111 | routeKey: "test-route-key", 112 | version: "2.0", 113 | body: "SELECT * FROM table", 114 | cookies: ["test-cookie"], 115 | pathParameters: {}, 116 | queryStringParameters: {}, 117 | }; 118 | 119 | const context = {} as any; 120 | 121 | const result = await handler(event, context); 122 | 123 | expect(mockClickHouseRunner).toHaveBeenCalledWith({ 124 | bucketName: bucketName, 125 | bucketRegion: "us-east-1", 126 | objectKey: "/test", 127 | queryStatement: "SELECT * FROM table", 128 | logLevel: "INFO", 129 | binaryPath: "./clickhouse", 130 | }); 131 | expect(runMock).toHaveBeenCalled(); 132 | expect(result).toEqual({ 133 | statusCode: 200, 134 | body: "Result from ClickHouse", 135 | }); 136 | }); 137 | 138 | it("should handle request and return an error response when ClickHouse execution fails", async () => { 139 | const error = new Error("ClickHouse execution failed"); 140 | const runMock = jest.fn().mockRejectedValue(error); 141 | mockClickHouseRunner.mockImplementation(() => ({ 142 | run: runMock, 143 | })); 144 | 145 | const event: APIGatewayProxyEventV2 = { 146 | requestContext: { 147 | http: { 148 | path: "/test", 149 | method: "GET", 150 | protocol: "http", 151 | userAgent: "test-user-agent", 152 | sourceIp: "0.0.0.0", 153 | }, 154 | accountId: "123456789012", 155 | requestId: "test-request-id", 156 | routeKey: "test-route-key", 157 | stage: "test-stage", 158 | time: "test-time", 159 | timeEpoch: 1234567890, 160 | apiId: "test-api-id", 161 | domainName: "test-domain-name", 162 | domainPrefix: "test-domain-prefix", 163 | }, 164 | headers: { 165 | "Content-Type": "application/json", 166 | }, 167 | isBase64Encoded: false, 168 | rawPath: "/test", 169 | rawQueryString: "statement=SELECT%20*%20FROM%20table", 170 | routeKey: "test-route-key", 171 | version: "2.0", 172 | body: "", 173 | cookies: ["test-cookie"], 174 | pathParameters: {}, 175 | queryStringParameters: { 176 | statement: "SELECT * FROM table", 177 | }, 178 | }; 179 | const context = {} as any; 180 | 181 | const result = await handler(event, context); 182 | 183 | expect(mockClickHouseRunner).toHaveBeenCalledWith({ 184 | bucketName: bucketName, 185 | bucketRegion: "us-east-1", 186 | objectKey: "/test", 187 | queryStatement: "SELECT * FROM table", 188 | logLevel: "INFO", 189 | binaryPath: "./clickhouse", 190 | }); 191 | expect(runMock).toHaveBeenCalled(); 192 | expect(result).toEqual({ 193 | statusCode: 500, 194 | body: "Error: ClickHouse execution failed", 195 | }); 196 | }); 197 | }); 198 | -------------------------------------------------------------------------------- /test/unit/clickhouse_runner.test.ts: -------------------------------------------------------------------------------- 1 | // Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | // SPDX-License-Identifier: MIT-0 3 | import { exec } from "child_process"; 4 | import { 5 | ClickHouseRunner, 6 | ClickHouseRunnerParams, 7 | LogLevel, 8 | } from "../../src/clickhouse_runner"; 9 | 10 | // Mock the child_process.exec function 11 | jest.mock("child_process", () => ({ 12 | exec: jest.fn(), 13 | })); 14 | 15 | describe("ClickHouseRunner", () => { 16 | const mockExec = exec as unknown as jest.Mock; 17 | 18 | afterEach(() => { 19 | mockExec.mockReset(); 20 | }); 21 | 22 | describe("run", () => { 23 | it("should execute the command and resolve with stdout", async () => { 24 | const stdout = "Output from ClickHouse"; 25 | mockExec.mockImplementation((command: string, callback: any) => { 26 | callback(null, stdout, ""); 27 | }); 28 | 29 | const params: ClickHouseRunnerParams = { 30 | bucketName: "test-bucket", 31 | bucketRegion: "us-west-1", 32 | objectKey: "test-object-key", 33 | queryStatement: "SELECT * FROM table", 34 | logLevel: "DEBUG", 35 | binaryPath: "/path/to/clickhouse", 36 | }; 37 | const runner = new ClickHouseRunner(params); 38 | 39 | const result = await runner.run(); 40 | 41 | expect(mockExec).toHaveBeenCalledWith( 42 | expect.stringContaining(params.binaryPath), 43 | expect.any(Function) 44 | ); 45 | expect(result).toBe(stdout); 46 | }); 47 | 48 | it("should reject with an error when execution fails", async () => { 49 | const error = new Error("Execution failed"); 50 | mockExec.mockImplementation((command: string, callback: any) => { 51 | callback(error, "", ""); 52 | }); 53 | 54 | const params: ClickHouseRunnerParams = { 55 | bucketName: "test-bucket", 56 | bucketRegion: "us-east-1", 57 | objectKey: "test-object-key", 58 | queryStatement: "SELECT * FROM table", 59 | logLevel: "DEBUG", 60 | binaryPath: "/path/to/clickhouse", 61 | }; 62 | const runner = new ClickHouseRunner(params); 63 | 64 | await expect(runner.run()).rejects.toThrow(error); 65 | expect(mockExec).toHaveBeenCalledWith( 66 | expect.stringContaining(params.binaryPath), 67 | expect.any(Function) 68 | ); 69 | }); 70 | }); 71 | 72 | describe("buildCommand", () => { 73 | it("should build the ClickHouse command with the provided parameters", () => { 74 | const params: ClickHouseRunnerParams = { 75 | bucketName: "test-bucket", 76 | bucketRegion: "us-east-1", 77 | objectKey: "test-object-key", 78 | queryStatement: "SELECT * FROM table", 79 | logLevel: "DEBUG", 80 | binaryPath: "/path/to/clickhouse", 81 | }; 82 | const runner = new ClickHouseRunner(params); 83 | 84 | const command = runner["buildCommand"](); 85 | 86 | expect(command).toContain(params.binaryPath); 87 | expect(command).toContain(`--query="${runner["buildQuery"]()}"`); 88 | expect(command).toContain(`--logger.level="${params.logLevel}"`); 89 | expect(command).toContain("--logger.console"); 90 | }); 91 | }); 92 | 93 | describe("buildQuery", () => { 94 | it("should build the ClickHouse query with the provided parameters", () => { 95 | const params: ClickHouseRunnerParams = { 96 | bucketName: "test-bucket", 97 | bucketRegion: "us-east-1", 98 | objectKey: "test-object-key", 99 | queryStatement: "SELECT * FROM table", 100 | logLevel: "DEBUG", 101 | binaryPath: "/path/to/clickhouse", 102 | }; 103 | const runner = new ClickHouseRunner(params); 104 | 105 | const query = runner["buildQuery"](); 106 | 107 | expect(query).toContain(runner["buildS3Uri"]()); 108 | expect(query).toContain(params.queryStatement); 109 | }); 110 | }); 111 | 112 | describe("buildS3Uri", () => { 113 | it("should build the S3 URI with the provided parameters", () => { 114 | const params: ClickHouseRunnerParams = { 115 | bucketName: "test-bucket", 116 | bucketRegion: "us-west-1", 117 | objectKey: "test-object-key", 118 | queryStatement: "SELECT * FROM table", 119 | logLevel: "DEBUG", 120 | binaryPath: "/path/to/clickhouse", 121 | }; 122 | const runner = new ClickHouseRunner(params); 123 | 124 | const s3Uri = runner["buildS3Uri"](); 125 | 126 | expect(s3Uri).toContain(params.bucketName); 127 | expect(s3Uri).toContain(params.bucketRegion); 128 | expect(s3Uri).toContain(params.objectKey); 129 | }); 130 | }); 131 | }); 132 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES2020", 4 | "module": "commonjs", 5 | "lib": [ 6 | "es2020", 7 | "dom" 8 | ], 9 | "declaration": true, 10 | "strict": true, 11 | "noImplicitAny": true, 12 | "strictNullChecks": true, 13 | "noImplicitThis": true, 14 | "alwaysStrict": true, 15 | "noUnusedLocals": false, 16 | "noUnusedParameters": false, 17 | "noImplicitReturns": true, 18 | "noFallthroughCasesInSwitch": false, 19 | "inlineSourceMap": true, 20 | "inlineSources": true, 21 | "experimentalDecorators": true, 22 | "strictPropertyInitialization": false, 23 | "typeRoots": [ 24 | "./node_modules/@types" 25 | ] 26 | }, 27 | "exclude": [ 28 | "node_modules", 29 | "cdk.out" 30 | ] 31 | } 32 | --------------------------------------------------------------------------------