├── .circleci └── config.yml ├── .gitignore ├── LICENSE ├── README.md ├── assets └── images │ ├── rds-connection.png │ └── ssm-rds.png ├── examples ├── README.md ├── database-with-web-server-and-redis │ ├── .gitignore │ ├── Pulumi.yaml │ ├── app │ │ ├── .dockerignore │ │ ├── .env.example │ │ ├── Dockerfile │ │ ├── knexfile.ts │ │ ├── package-lock.json │ │ ├── package.json │ │ ├── src │ │ │ ├── index.ts │ │ │ └── migrations │ │ │ │ └── 20231129230735_create_posts.ts │ │ └── tsconfig.json │ ├── index.ts │ ├── package-lock.json │ ├── package.json │ └── tsconfig.json ├── mongo-with-web-server │ ├── .gitignore │ ├── Pulumi.yaml │ ├── app │ │ ├── .dockerignore │ │ ├── .env.example │ │ ├── Dockerfile │ │ ├── package-lock.json │ │ ├── package.json │ │ ├── src │ │ │ ├── index.ts │ │ │ └── postsSeed.ts │ │ └── tsconfig.json │ ├── index.ts │ ├── package-lock.json │ ├── package.json │ └── tsconfig.json └── static-site │ ├── .gitignore │ ├── Pulumi.yaml │ ├── index.ts │ ├── package-lock.json │ ├── package.json │ ├── src │ └── index.html │ └── tsconfig.json ├── package-lock.json ├── package.json ├── src ├── components │ ├── acm-certificate.ts │ ├── database-replica.ts │ ├── database.ts │ ├── ec2-ssm-connect.ts │ ├── ecs-service.ts │ ├── mongo.ts │ ├── nuxt-ssr.ts │ ├── password.ts │ ├── project.ts │ ├── redis.ts │ ├── static-site.ts │ └── web-server.ts ├── constants.ts ├── index.ts ├── tsconfig.json ├── types │ ├── pulumi.ts │ └── size.ts └── v2 │ ├── components │ ├── ecs-service │ │ ├── index.ts │ │ └── policies.ts │ ├── grafana │ │ ├── dashboards │ │ │ ├── index.ts │ │ │ ├── panels.ts │ │ │ ├── types.ts │ │ │ └── web-server-slo.ts │ │ └── index.ts │ ├── prometheus │ │ ├── index.ts │ │ ├── queries.test.ts │ │ └── queries.ts │ └── web-server │ │ ├── builder.ts │ │ ├── index.ts │ │ └── load-balancer.ts │ ├── index.ts │ └── otel │ ├── batch-processor.ts │ ├── builder.ts │ ├── config.ts │ ├── index.ts │ ├── memory-limiter-processor.ts │ ├── otlp-receiver.ts │ └── prometheus-remote-write-exporter.ts ├── tests ├── automation.ts ├── build │ ├── index.tst.ts │ └── tsconfig.json ├── ecs-service │ ├── autoscaling.test.ts │ ├── index.test.ts │ ├── infrastructure │ │ └── index.ts │ ├── load-balancer.test.ts │ ├── persistent-storage.test.ts │ ├── service-discovery.test.ts │ └── test-context.ts ├── otel │ ├── index.test.ts │ └── validation.test.ts ├── tsconfig.json └── web-server │ ├── index.test.ts │ ├── infrastructure │ ├── config.ts │ └── index.ts │ └── test-context.ts ├── tsconfig.json ├── tsconfig.options.json └── tstyche.config.json /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2.1 2 | 3 | orbs: 4 | core: studion/core@3.0.0 5 | security: studion/security@2.0.0 6 | aws-cli: circleci/aws-cli@5.2.0 7 | node: circleci/node@7.1.0 8 | 9 | change-filters: &change-filters 10 | branches: 11 | ignore: master 12 | 13 | trunk-filters: &trunk-filters 14 | branches: 15 | only: master 16 | 17 | commands: 18 | setup-pulumi: 19 | steps: 20 | - run: 21 | name: Install Pulumi 22 | command: | 23 | curl -sSL https://get.pulumi.com/ | bash -s 24 | echo 'export PATH=${HOME}/.pulumi/bin:$PATH' >> $BASH_ENV 25 | source $BASH_ENV 26 | - run: 27 | name: Login into Pulumi Cloud 28 | command: pulumi login 29 | - run: 30 | name: Set default Pulumi Organization 31 | command: pulumi org set-default extensionengine 32 | 33 | jobs: 34 | detect-leaks: 35 | executor: security/node 36 | steps: 37 | - checkout 38 | - security/detect_secrets 39 | - security/scan_dockerfile 40 | audit-dependencies: 41 | executor: core/node 42 | parameters: 43 | pkg_json_dir: 44 | type: string 45 | default: '.' 46 | steps: 47 | - checkout 48 | - security/scan_dependencies: 49 | pkg_json_dir: <> 50 | test-build: 51 | executor: core/node 52 | steps: 53 | - checkout 54 | - core/run_script: 55 | script: 'test:build' 56 | test-components: 57 | machine: 58 | image: ubuntu-2204:current 59 | steps: 60 | - checkout 61 | - aws-cli/setup: 62 | role_arn: ${AWS_ROLE_ARN} 63 | - setup-pulumi 64 | - node/install 65 | - core/run_script: 66 | script: 'test' 67 | 68 | workflows: 69 | scan-and-test: 70 | jobs: 71 | - detect-leaks: 72 | filters: *change-filters 73 | - audit-dependencies: 74 | filters: *trunk-filters 75 | - security/detect_secrets_dir: 76 | name: detect-secrets 77 | filters: *trunk-filters 78 | - test-build: 79 | filters: *change-filters 80 | - test-components: 81 | filters: *trunk-filters 82 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | dist 3 | bin 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2023 Extension Engine, LLC 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /assets/images/rds-connection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ExtensionEngine/infra-code-blocks/83def8a2be44695c697e3577b9af2dc55cc230c9/assets/images/rds-connection.png -------------------------------------------------------------------------------- /assets/images/ssm-rds.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ExtensionEngine/infra-code-blocks/83def8a2be44695c697e3577b9af2dc55cc230c9/assets/images/ssm-rds.png -------------------------------------------------------------------------------- /examples/README.md: -------------------------------------------------------------------------------- 1 | # Infra code blocks examples 2 | 3 | ## Table of contents 4 | 5 | 1. [Prerequisites](#prerequisites) 6 | 2. [Setup](#setup) 7 | 3. [Mongo with web server](#mongo-with-web-server) 8 | 4. [Database with web server and redis](#database-with-web-server-and-redis) 9 | 5. [Static site](#static-site) 10 | 11 | ### Prerequisites 12 | 13 | - Working [Pulumi](https://www.pulumi.com/docs/clouds/aws/get-started/begin/#pulumi-aws-before-you-begin) project 14 | - AWS account with neccessary permissions for each component used 15 | - aws-cli package 16 | 17 | ### Setup 18 | 19 | - Build infra code blocks library: 20 | 21 | ```bash 22 | $ npm run build 23 | ``` 24 | 25 | - Navigate to example directory and install dependencies: 26 | 27 | ```bash 28 | $ npm i 29 | ``` 30 | 31 | ### Mongo with web server 32 | 33 | - Set ENV variables using provided example 34 | 35 | - Deploy pulumi project: 36 | 37 | ```bash 38 | $ pulumi up 39 | ``` 40 | 41 | ### Database with web server and redis 42 | 43 | - Set ENV variables using provided example 44 | 45 | - Deploy pulumi project: 46 | 47 | ```bash 48 | $ pulumi up 49 | ``` 50 | 51 | ### Static site 52 | 53 | - Deploy pulumi project: 54 | 55 | ```bash 56 | $ pulumi up 57 | ``` 58 | 59 | Deploy command will output bucket and service names. Bucket name can be used 60 | to upload static site files. 61 | 62 | ``` 63 | bucket: [BUCKET_NAME] 64 | default: [SERVICE_NAME] 65 | ``` 66 | 67 | - Files are upload to bucket with following command: 68 | 69 | ```bash 70 | $ S3_SITE_BUCKET=[BUCKET_NAME] npm run deploy 71 | ``` 72 | -------------------------------------------------------------------------------- /examples/database-with-web-server-and-redis/.gitignore: -------------------------------------------------------------------------------- 1 | /bin/ 2 | /node_modules/ 3 | .env 4 | 5 | Pulumi.*.yaml -------------------------------------------------------------------------------- /examples/database-with-web-server-and-redis/Pulumi.yaml: -------------------------------------------------------------------------------- 1 | name: database_with_web_server_and_redis 2 | runtime: nodejs 3 | description: Infra Code Blocks database with web server and redis example 4 | -------------------------------------------------------------------------------- /examples/database-with-web-server-and-redis/app/.dockerignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | -------------------------------------------------------------------------------- /examples/database-with-web-server-and-redis/app/.env.example: -------------------------------------------------------------------------------- 1 | DATABASE_USERNAME=username 2 | DATABASE_PASSWORD=password 3 | DATABASE_HOST=host 4 | DATABASE_DBNAME=dbname 5 | REDIS_PORT=port 6 | REDIS_HOST=redis_host 7 | REDIS_PASSWORD=password 8 | NODE_ENV=development 9 | -------------------------------------------------------------------------------- /examples/database-with-web-server-and-redis/app/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:20.9-bookworm-slim@sha256:c325fe5059c504933948ae6483f3402f136b96492dff640ced5dfa1f72a51716 AS base 2 | RUN apt update && apt install -y --no-install-recommends dumb-init 3 | ENTRYPOINT ["dumb-init", "--"] 4 | 5 | FROM node:20.9-bookworm@sha256:3c48678afb1ae5ca5931bd154d8c1a92a4783555331b535bbd7e0822f9ca8603 AS install 6 | WORKDIR /usr/src/app 7 | COPY package*.json . 8 | RUN npm ci 9 | 10 | FROM base AS configure 11 | WORKDIR /usr/src/app 12 | COPY --chown=node:node --from=install /usr/src/app/node_modules ./node_modules 13 | COPY --chown=node:node ./package.json ./tsconfig.json ./knexfile.ts ./ 14 | COPY --chown=node:node ./src ./src 15 | RUN npm run build 16 | 17 | FROM configure AS run 18 | USER node 19 | CMD npm run migrate && npm run prod 20 | -------------------------------------------------------------------------------- /examples/database-with-web-server-and-redis/app/knexfile.ts: -------------------------------------------------------------------------------- 1 | import type { Knex } from 'knex'; 2 | 3 | require('dotenv').config(); 4 | 5 | const username = process.env.DATABASE_USERNAME; 6 | const password = process.env.DATABASE_PASSWORD; 7 | const host = process.env.DATABASE_HOST; 8 | const dbName = process.env.DATABASE_DBNAME; 9 | 10 | const connectionString = `postgres://${username}:${password}@${host}:5432/${dbName}`; 11 | 12 | const knexConfig: { [key: string]: Knex.Config } = { 13 | development: { 14 | client: 'postgresql', 15 | connection: { 16 | connectionString, 17 | }, 18 | migrations: { 19 | directory: 'src/migrations', 20 | }, 21 | }, 22 | production: { 23 | client: 'postgresql', 24 | connection: { 25 | connectionString, 26 | ssl: { rejectUnauthorized: false }, 27 | }, 28 | migrations: { 29 | directory: 'src/migrations', 30 | }, 31 | }, 32 | }; 33 | 34 | export default knexConfig; 35 | -------------------------------------------------------------------------------- /examples/database-with-web-server-and-redis/app/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "app", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "dist/index.js", 6 | "scripts": { 7 | "build": "tsc", 8 | "dev": "nodemon", 9 | "migrate": "npx knex migrate:latest", 10 | "prod": "node dist/src/index.js" 11 | }, 12 | "nodemonConfig": { 13 | "watch": [ 14 | "src/index.ts" 15 | ], 16 | "ext": "ts", 17 | "exec": "ts-node src/index.ts" 18 | }, 19 | "author": "", 20 | "license": "ISC", 21 | "dependencies": { 22 | "dotenv": "^16.3.1", 23 | "express": "^4.18.2", 24 | "ioredis": "^5.3.2", 25 | "knex": "^3.0.1", 26 | "pg": "^8.11.3" 27 | }, 28 | "devDependencies": { 29 | "@types/express": "^4.17.21", 30 | "@types/node": "^20.9.3", 31 | "nodemon": "^3.0.1", 32 | "ts-node": "^10.9.1", 33 | "typescript": "^5.3.2" 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /examples/database-with-web-server-and-redis/app/src/index.ts: -------------------------------------------------------------------------------- 1 | import * as express from 'express'; 2 | import knexConfig from '../knexfile'; 3 | import { Redis } from 'ioredis'; 4 | import { knex } from 'knex'; 5 | 6 | const COUNTER_KEY = 'VISIT_COUNTER'; 7 | 8 | const app = express.default(); 9 | 10 | require('dotenv').config(); 11 | 12 | const redisPort = process.env.REDIS_PORT; 13 | const redisHost = process.env.REDIS_HOST; 14 | const redisPassword = process.env.REDIS_PASSWORD; 15 | 16 | if (!redisPort || !redisHost || !redisPassword) 17 | throw new Error('Invalid redis configuration'); 18 | 19 | const redisClient = new Redis({ 20 | port: parseInt(redisPort), 21 | host: redisHost, 22 | password: redisPassword, 23 | tls: {}, 24 | }); 25 | 26 | const isProd = process.env.NODE_ENV == 'production'; 27 | const config = isProd ? knexConfig.production : knexConfig.development; 28 | const knexClient = knex(config); 29 | 30 | app.use(express.json()); 31 | 32 | app.use('/posts', async (req: any, res: any) => { 33 | const posts = await knexClient('posts').select('*'); 34 | 35 | const counter = await getVisitCounter(); 36 | redisClient.set(COUNTER_KEY, counter + 1); 37 | 38 | return res.json({ posts }); 39 | }); 40 | 41 | app.get('/counters/visit', async (req: any, res: any) => { 42 | const counter = await getVisitCounter(); 43 | return res.json({ visitCounter: counter }); 44 | }); 45 | 46 | async function getVisitCounter() { 47 | const counterResult = await redisClient.get(COUNTER_KEY); 48 | return counterResult ? parseInt(counterResult) : 0; 49 | } 50 | 51 | app.listen(3000, () => { 52 | console.log('App is listening on port 3000'); 53 | }); 54 | -------------------------------------------------------------------------------- /examples/database-with-web-server-and-redis/app/src/migrations/20231129230735_create_posts.ts: -------------------------------------------------------------------------------- 1 | import { Knex } from 'knex'; 2 | 3 | const TABLE_NAME = 'posts'; 4 | export const postsSeed = [ 5 | { 6 | name: 'What is Lorem Ipsum?', 7 | content: 8 | "Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book.", 9 | }, 10 | { 11 | name: 'Why do we use it?', 12 | content: 13 | "It is a long established fact that a reader will be distracted by the readable content of a page when looking at its layout. The point of using Lorem Ipsum is that it has a more-or-less normal distribution of letters, as opposed to using 'Content here, content here', making it look like readable English.", 14 | }, 15 | { 16 | name: 'Where does it come from?', 17 | content: 18 | 'Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old. Richard McClintock, a Latin professor at Hampden-Sydney College in Virginia, looked up one of the more obscure Latin words, consectetur, from a Lorem Ipsum passage, and going through the cites of the word in classical literature, discovered the undoubtable source.', 19 | }, 20 | ]; 21 | 22 | export async function up(knex: Knex): Promise { 23 | await knex.schema.createTable(TABLE_NAME, table => { 24 | table.increments('id').primary(); 25 | table.string('name').notNullable(); 26 | table.string('content', 9999).notNullable(); 27 | }); 28 | 29 | return knex.batchInsert(TABLE_NAME, postsSeed); 30 | } 31 | 32 | export async function down(knex: Knex): Promise { 33 | return knex.schema.dropTable(TABLE_NAME); 34 | } 35 | -------------------------------------------------------------------------------- /examples/database-with-web-server-and-redis/app/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es6", 4 | "module": "commonjs", 5 | "esModuleInterop": true, 6 | "allowSyntheticDefaultImports": true, 7 | "noImplicitAny": true, 8 | "moduleResolution": "node", 9 | "experimentalDecorators": true, 10 | "emitDecoratorMetadata": true, 11 | "sourceMap": true, 12 | "outDir": "dist", 13 | "strict": true, 14 | "baseUrl": ".", 15 | "paths": { 16 | "*": ["node_modules/*"] 17 | } 18 | }, 19 | "include": ["*", "./src/**/*.ts"] 20 | } 21 | -------------------------------------------------------------------------------- /examples/database-with-web-server-and-redis/index.ts: -------------------------------------------------------------------------------- 1 | import { Database, Project, Redis, Services } from '@studion/infra-code-blocks'; 2 | import * as pulumi from '@pulumi/pulumi'; 3 | import * as aws from '@pulumi/aws'; 4 | import * as awsx from '@pulumi/awsx'; 5 | 6 | const databaseConfig = new pulumi.Config('database'); 7 | const username = databaseConfig.require('username'); 8 | const password = databaseConfig.require('password'); 9 | const dbName = databaseConfig.require('dbname'); 10 | 11 | const webServerImage = createWebServerImage(); 12 | 13 | const project: Project = new Project('database-project', { 14 | services: [ 15 | { 16 | type: 'DATABASE', 17 | serviceName: 'database-example', 18 | dbName: dbName, 19 | username, 20 | password, 21 | applyImmediately: true, 22 | skipFinalSnapshot: true, 23 | }, 24 | { 25 | type: 'REDIS', 26 | serviceName: 'redis', 27 | dbName: 'test-db', 28 | region: 'us-east-1', 29 | }, 30 | { 31 | type: 'WEB_SERVER', 32 | serviceName: 'web-server-example', 33 | port: 3000, 34 | image: webServerImage.imageUri, 35 | desiredCount: 1, 36 | size: 'small', 37 | autoscaling: { enabled: false }, 38 | environment: (services: Services) => { 39 | const db = services['database-example'] as Database; 40 | const redis = services['redis'] as Redis; 41 | 42 | const redisPort = redis.instance.port.apply(port => port.toString()); 43 | 44 | return [ 45 | { 46 | name: 'DATABASE_USERNAME', 47 | value: username, 48 | }, 49 | { 50 | name: 'DATABASE_HOST', 51 | value: db.instance.address, 52 | }, 53 | { 54 | name: 'DATABASE_DBNAME', 55 | value: dbName, 56 | }, 57 | { 58 | name: 'REDIS_PORT', 59 | value: redisPort, 60 | }, 61 | { 62 | name: 'REDIS_HOST', 63 | value: redis.instance.endpoint, 64 | }, 65 | { 66 | name: 'NODE_ENV', 67 | value: 'production', 68 | }, 69 | ]; 70 | }, 71 | secrets: (services: Services) => { 72 | const db = services['database-example'] as Database; 73 | const redis = services['redis'] as Redis; 74 | 75 | return [ 76 | { 77 | name: 'DATABASE_PASSWORD', 78 | valueFrom: db.passwordSecret.arn, 79 | }, 80 | { 81 | name: 'REDIS_PASSWORD', 82 | valueFrom: redis.passwordSecret.arn, 83 | }, 84 | ]; 85 | }, 86 | }, 87 | ], 88 | }); 89 | 90 | function createWebServerImage() { 91 | const imageRepository = new aws.ecr.Repository('repository', { 92 | forceDelete: true, 93 | }); 94 | 95 | return new awsx.ecr.Image('app', { 96 | repositoryUrl: imageRepository.repositoryUrl, 97 | context: './app', 98 | extraOptions: ['--platform', 'linux/amd64', '--ssh', 'default'], 99 | }); 100 | } 101 | 102 | export default project.name; 103 | -------------------------------------------------------------------------------- /examples/database-with-web-server-and-redis/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "database-with-web-server-and-redis", 3 | "main": "index.ts", 4 | "devDependencies": { 5 | "@types/node": "^18" 6 | }, 7 | "dependencies": { 8 | "@pulumi/pulumi": "^3.0.0", 9 | "@pulumi/aws": "^6.0.0", 10 | "@pulumi/awsx": "^2.0.2", 11 | "@studion/infra-code-blocks": "file:../../", 12 | "dotenv": "^16.3.1" 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /examples/database-with-web-server-and-redis/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "strict": true, 4 | "outDir": "bin", 5 | "target": "es2016", 6 | "module": "commonjs", 7 | "moduleResolution": "node", 8 | "sourceMap": true, 9 | "experimentalDecorators": true, 10 | "pretty": true, 11 | "noFallthroughCasesInSwitch": true, 12 | "noImplicitReturns": true, 13 | "forceConsistentCasingInFileNames": true 14 | }, 15 | "files": ["index.ts"] 16 | } 17 | -------------------------------------------------------------------------------- /examples/mongo-with-web-server/.gitignore: -------------------------------------------------------------------------------- 1 | /bin/ 2 | /node_modules/ 3 | .env 4 | 5 | Pulumi.*.yaml -------------------------------------------------------------------------------- /examples/mongo-with-web-server/Pulumi.yaml: -------------------------------------------------------------------------------- 1 | name: mongo_with_web_server 2 | runtime: nodejs 3 | description: Infra Code Blocks mongo with web server example 4 | -------------------------------------------------------------------------------- /examples/mongo-with-web-server/app/.dockerignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | -------------------------------------------------------------------------------- /examples/mongo-with-web-server/app/.env.example: -------------------------------------------------------------------------------- 1 | MONGO_USERNAME=username 2 | MONGO_PASSWORD=password 3 | MONGO_HOST=host 4 | MONGO_DATABASE=dbname 5 | MONGO_PORT=port 6 | -------------------------------------------------------------------------------- /examples/mongo-with-web-server/app/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:20.9-bookworm-slim@sha256:c325fe5059c504933948ae6483f3402f136b96492dff640ced5dfa1f72a51716 AS base 2 | RUN apt update && apt install -y --no-install-recommends dumb-init 3 | ENTRYPOINT ["dumb-init", "--"] 4 | 5 | FROM node:20.9-bookworm@sha256:3c48678afb1ae5ca5931bd154d8c1a92a4783555331b535bbd7e0822f9ca8603 AS install 6 | WORKDIR /usr/src/app 7 | COPY package*.json . 8 | RUN npm ci 9 | 10 | FROM base AS configure 11 | WORKDIR /usr/src/app 12 | COPY --chown=node:node --from=install /usr/src/app/node_modules ./node_modules 13 | COPY --chown=node:node ./package.json ./tsconfig.json ./ 14 | COPY --chown=node:node ./src ./src 15 | RUN npm run build 16 | 17 | FROM configure AS run 18 | USER node 19 | CMD npm run prod 20 | -------------------------------------------------------------------------------- /examples/mongo-with-web-server/app/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "app", 3 | "version": "1.0.0", 4 | "description": "", 5 | "main": "dist/index.js", 6 | "scripts": { 7 | "build": "tsc", 8 | "dev": "nodemon", 9 | "prod": "node dist/index.js" 10 | }, 11 | "nodemonConfig": { 12 | "watch": [ 13 | "src/index.ts" 14 | ], 15 | "ext": "ts", 16 | "exec": "ts-node src/index.ts" 17 | }, 18 | "author": "", 19 | "license": "ISC", 20 | "dependencies": { 21 | "dotenv": "^16.3.1", 22 | "express": "^4.18.2", 23 | "mongoose": "^8.0.2" 24 | }, 25 | "devDependencies": { 26 | "@types/express": "^4.17.21", 27 | "@types/node": "^20.9.3", 28 | "nodemon": "^3.0.1", 29 | "ts-node": "^10.9.1", 30 | "typescript": "^5.3.2" 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /examples/mongo-with-web-server/app/src/index.ts: -------------------------------------------------------------------------------- 1 | import * as express from 'express'; 2 | import { connect, Schema, model } from 'mongoose'; 3 | import { posts } from './postsSeed'; 4 | 5 | require('dotenv').config(); 6 | 7 | const app = express.default(); 8 | 9 | export const init = (async () => { 10 | const username = process.env.MONGO_USERNAME; 11 | const password = process.env.MONGO_PASSWORD; 12 | const host = process.env.MONGO_HOST; 13 | const dbname = process.env.MONGO_DATABASE; 14 | const port = process.env.MONGO_PORT; 15 | 16 | const mongoConnectionString = `mongodb://${username}:${password}@${host}:${port}/${dbname}`; 17 | 18 | await connect(mongoConnectionString, { 19 | authSource: 'admin', 20 | }); 21 | 22 | const Post = await createDatabaseWithPosts(); 23 | 24 | app.use(express.json()); 25 | 26 | app.use('/posts', async (req: any, res: any) => { 27 | const posts = await Post.find(); 28 | return res.json(posts); 29 | }); 30 | 31 | app.listen(3000, () => { 32 | console.log('App is listening on port 3000'); 33 | }); 34 | 35 | async function createDatabaseWithPosts() { 36 | const postSchema = new Schema({ 37 | name: String, 38 | content: String, 39 | }); 40 | const Post = model('Post', postSchema); 41 | 42 | const existingPosts = await Post.find(); 43 | if (!existingPosts.length) { 44 | const mappedPosts = posts.map(post => new Post(post)); 45 | await Post.bulkSave(mappedPosts); 46 | } 47 | 48 | return Post; 49 | } 50 | })(); 51 | -------------------------------------------------------------------------------- /examples/mongo-with-web-server/app/src/postsSeed.ts: -------------------------------------------------------------------------------- 1 | export const posts = [ 2 | { 3 | name: 'What is Lorem Ipsum?', 4 | content: 5 | "Lorem Ipsum is simply dummy text of the printing and typesetting industry. Lorem Ipsum has been the industry's standard dummy text ever since the 1500s, when an unknown printer took a galley of type and scrambled it to make a type specimen book.", 6 | }, 7 | { 8 | name: 'Why do we use it?', 9 | content: 10 | "It is a long established fact that a reader will be distracted by the readable content of a page when looking at its layout. The point of using Lorem Ipsum is that it has a more-or-less normal distribution of letters, as opposed to using 'Content here, content here', making it look like readable English.", 11 | }, 12 | { 13 | name: 'Where does it come from?', 14 | content: 15 | 'Contrary to popular belief, Lorem Ipsum is not simply random text. It has roots in a piece of classical Latin literature from 45 BC, making it over 2000 years old. Richard McClintock, a Latin professor at Hampden-Sydney College in Virginia, looked up one of the more obscure Latin words, consectetur, from a Lorem Ipsum passage, and going through the cites of the word in classical literature, discovered the undoubtable source.', 16 | }, 17 | ]; 18 | -------------------------------------------------------------------------------- /examples/mongo-with-web-server/app/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "es6", 4 | "module": "commonjs", 5 | "esModuleInterop": true, 6 | "allowSyntheticDefaultImports": true, 7 | "noImplicitAny": true, 8 | "moduleResolution": "node", 9 | "experimentalDecorators": true, 10 | "emitDecoratorMetadata": true, 11 | "sourceMap": true, 12 | "outDir": "dist", 13 | "strict": true, 14 | "baseUrl": ".", 15 | "paths": { 16 | "*": ["node_modules/*"] 17 | } 18 | }, 19 | "include": ["*", "./src/**/*.ts"] 20 | } 21 | -------------------------------------------------------------------------------- /examples/mongo-with-web-server/index.ts: -------------------------------------------------------------------------------- 1 | import { Project } from '@studion/infra-code-blocks'; 2 | import * as pulumi from '@pulumi/pulumi'; 3 | import * as aws from '@pulumi/aws'; 4 | import * as awsx from '@pulumi/awsx'; 5 | 6 | const config = new pulumi.Config('mongo'); 7 | 8 | const serviceName = 'mongo-service'; 9 | 10 | const host = `${serviceName}.${serviceName}`; 11 | const username = config.require('username'); 12 | const password = config.require('password'); 13 | const database = config.require('database'); 14 | const port = parseInt(config.require('port') || '27017'); 15 | 16 | const passwordSecret = createPasswordSecret(password); 17 | 18 | const webServerImage = createWebServerImage(); 19 | 20 | const project: Project = new Project('mongo-project', { 21 | services: [ 22 | { 23 | type: 'MONGO', 24 | serviceName: serviceName, 25 | port: port, 26 | username: username, 27 | password: password, 28 | size: 'small', 29 | }, 30 | { 31 | type: 'WEB_SERVER', 32 | serviceName: 'mongo-web-server', 33 | port: 3000, 34 | image: webServerImage.imageUri, 35 | desiredCount: 1, 36 | size: 'small', 37 | autoscaling: { enabled: false }, 38 | environment: () => { 39 | return [ 40 | { 41 | name: 'MONGO_USERNAME', 42 | value: username, 43 | }, 44 | { 45 | name: 'MONGO_HOST', 46 | value: host, 47 | }, 48 | { 49 | name: 'MONGO_DATABASE', 50 | value: database, 51 | }, 52 | { 53 | name: 'MONGO_PORT', 54 | value: port.toString(), 55 | }, 56 | ]; 57 | }, 58 | secrets: [{ name: 'MONGO_PASSWORD', valueFrom: passwordSecret.arn }], 59 | }, 60 | ], 61 | }); 62 | 63 | function createWebServerImage() { 64 | const imageRepository = new aws.ecr.Repository('repository', { 65 | forceDelete: true, 66 | }); 67 | 68 | return new awsx.ecr.Image('app', { 69 | repositoryUrl: imageRepository.repositoryUrl, 70 | context: './app', 71 | extraOptions: ['--platform', 'linux/amd64', '--ssh', 'default'], 72 | }); 73 | } 74 | 75 | function createPasswordSecret(password: string) { 76 | const project = pulumi.getProject(); 77 | const stack = pulumi.getStack(); 78 | 79 | const passwordSecret = new aws.secretsmanager.Secret( 80 | 'mongo-password-secret', 81 | { 82 | namePrefix: `${stack}/${project}/MongoPassword-`, 83 | }, 84 | ); 85 | 86 | const passwordSecretValue = new aws.secretsmanager.SecretVersion( 87 | 'mongo-password-secret-value', 88 | { 89 | secretId: passwordSecret.id, 90 | secretString: password, 91 | }, 92 | { dependsOn: [passwordSecret] }, 93 | ); 94 | 95 | return passwordSecret; 96 | } 97 | 98 | export default project.name; 99 | -------------------------------------------------------------------------------- /examples/mongo-with-web-server/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "mongo-with-web-server", 3 | "main": "index.ts", 4 | "devDependencies": { 5 | "@types/node": "^18" 6 | }, 7 | "dependencies": { 8 | "@pulumi/aws": "^6.9.0", 9 | "@pulumi/awsx": "^2.2.0", 10 | "@studion/infra-code-blocks": "file:../../", 11 | "dotenv": "^16.3.1" 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /examples/mongo-with-web-server/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "strict": true, 4 | "outDir": "bin", 5 | "target": "es2016", 6 | "module": "commonjs", 7 | "moduleResolution": "node", 8 | "sourceMap": true, 9 | "experimentalDecorators": true, 10 | "pretty": true, 11 | "noFallthroughCasesInSwitch": true, 12 | "noImplicitReturns": true, 13 | "forceConsistentCasingInFileNames": true 14 | }, 15 | "files": ["index.ts"] 16 | } 17 | -------------------------------------------------------------------------------- /examples/static-site/.gitignore: -------------------------------------------------------------------------------- 1 | /bin/ 2 | /node_modules/ 3 | -------------------------------------------------------------------------------- /examples/static-site/Pulumi.yaml: -------------------------------------------------------------------------------- 1 | name: static_site 2 | runtime: nodejs 3 | description: Infra Code Blocks static site example 4 | -------------------------------------------------------------------------------- /examples/static-site/index.ts: -------------------------------------------------------------------------------- 1 | import { 2 | Project, 3 | StaticSite, 4 | StaticSiteServiceOptions, 5 | } from '@studion/infra-code-blocks'; 6 | 7 | const serviceName = 'static-site-example'; 8 | 9 | const project: Project = new Project('static-site', { 10 | services: [ 11 | { 12 | type: 'STATIC_SITE', 13 | serviceName, 14 | }, 15 | ], 16 | }); 17 | 18 | export default project.name; 19 | 20 | const staticSite = project.services[serviceName] as StaticSite; 21 | export const bucket = staticSite.bucket.id; 22 | -------------------------------------------------------------------------------- /examples/static-site/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "static_site", 3 | "main": "index.ts", 4 | "scripts": { 5 | "deploy": "aws s3 sync src s3://$S3_SITE_BUCKET --no-progress --delete" 6 | }, 7 | "devDependencies": { 8 | "@types/node": "^18" 9 | }, 10 | "dependencies": { 11 | "@pulumi/pulumi": "^3.0.0", 12 | "@pulumi/aws": "^6.0.0", 13 | "@pulumi/awsx": "^2.0.2", 14 | "@studion/infra-code-blocks": "file:../../" 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /examples/static-site/src/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Static site example 7 | 8 | 9 | This is infra code blocks static site example 10 | 11 | 12 | -------------------------------------------------------------------------------- /examples/static-site/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "strict": true, 4 | "outDir": "bin", 5 | "target": "es2016", 6 | "module": "commonjs", 7 | "moduleResolution": "node", 8 | "sourceMap": true, 9 | "experimentalDecorators": true, 10 | "pretty": true, 11 | "noFallthroughCasesInSwitch": true, 12 | "noImplicitReturns": true, 13 | "forceConsistentCasingInFileNames": true 14 | }, 15 | "files": ["index.ts"] 16 | } 17 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@studion/infra-code-blocks", 3 | "version": "0.8.0-next.0", 4 | "description": "Studion common infra components", 5 | "keywords": [ 6 | "infrastructure", 7 | "Pulumi", 8 | "components", 9 | "Studion" 10 | ], 11 | "homepage": "https://github.com/ExtensionEngine/infra-code-blocks#readme", 12 | "bugs": { 13 | "url": "https://github.com/ExtensionEngine/infra-code-blocks/issues" 14 | }, 15 | "repository": "git+https://github.com/ExtensionEngine/infra-code-blocks.git", 16 | "license": "MIT", 17 | "author": { 18 | "name": "Studion", 19 | "email": "info@gostudion.com", 20 | "url": "https://www.gostudion.com" 21 | }, 22 | "main": "dist/index.js", 23 | "files": [ 24 | "dist", 25 | "!dist/*.tsbuildinfo" 26 | ], 27 | "scripts": { 28 | "clean": "rm -rf dist", 29 | "build": "npm run clean && tsc -b src", 30 | "format": "prettier -w .", 31 | "release": "npm run build && release-it", 32 | "test": "TS_NODE_PROJECT=tests node --test --test-concurrency=none -r ts-node/register tests/**[!build]/index.test.ts", 33 | "test:build": "npm run build && tstyche build" 34 | }, 35 | "prettier": "@studion/prettier-config", 36 | "dependencies": { 37 | "@pulumi/aws": "^6.66.3", 38 | "@pulumi/awsx": "^2.21.0", 39 | "@pulumi/pulumi": "^3.146.0", 40 | "@pulumi/random": "^4.17.0", 41 | "@pulumiverse/grafana": "^0.16.3", 42 | "@upstash/pulumi": "^0.3.14", 43 | "yaml": "^2.7.1" 44 | }, 45 | "devDependencies": { 46 | "@aws-sdk/client-acm": "^3.782.0", 47 | "@aws-sdk/client-application-auto-scaling": "^3.758.0", 48 | "@aws-sdk/client-cloudwatch-logs": "^3.767.0", 49 | "@aws-sdk/client-ec2": "^3.767.0", 50 | "@aws-sdk/client-ecs": "^3.766.0", 51 | "@aws-sdk/client-efs": "^3.758.0", 52 | "@aws-sdk/client-elastic-load-balancing-v2": "^3.764.0", 53 | "@aws-sdk/client-route-53": "^3.782.0", 54 | "@aws-sdk/client-servicediscovery": "^3.758.0", 55 | "@studion/prettier-config": "^0.1.0", 56 | "@types/node": "^22", 57 | "exponential-backoff": "^3.1.2", 58 | "http-status": "^2.1.0", 59 | "nanospinner": "^1.2.2", 60 | "pathe": "^2.0.3", 61 | "prettier": "^3.4.2", 62 | "release-it": "^18.1.1", 63 | "ts-node": "^10.9.2", 64 | "tsconfig-paths": "^4.2.0", 65 | "tstyche": "^4.0.0-beta.9", 66 | "typescript": "^5.7.3", 67 | "undici": "^6.21.2" 68 | }, 69 | "publishConfig": { 70 | "access": "public" 71 | }, 72 | "release-it": { 73 | "git": { 74 | "commitMessage": "chore: release ${version}" 75 | }, 76 | "github": { 77 | "release": true 78 | } 79 | } 80 | } 81 | -------------------------------------------------------------------------------- /src/components/acm-certificate.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from '@pulumi/pulumi'; 2 | import * as aws from '@pulumi/aws'; 3 | import { commonTags } from '../constants'; 4 | 5 | export type AcmCertificateArgs = { 6 | domain: pulumi.Input; 7 | hostedZoneId: pulumi.Input; 8 | }; 9 | 10 | export class AcmCertificate extends pulumi.ComponentResource { 11 | certificate: aws.acm.Certificate; 12 | 13 | constructor( 14 | name: string, 15 | args: AcmCertificateArgs, 16 | opts: pulumi.ComponentResourceOptions = {}, 17 | ) { 18 | super('studion:acm:Certificate', name, {}, opts); 19 | 20 | this.certificate = new aws.acm.Certificate( 21 | `${args.domain}-certificate`, 22 | { domainName: args.domain, validationMethod: 'DNS', tags: commonTags }, 23 | { parent: this }, 24 | ); 25 | 26 | const certificateValidationDomain = new aws.route53.Record( 27 | `${args.domain}-cert-validation-domain`, 28 | { 29 | name: this.certificate.domainValidationOptions[0].resourceRecordName, 30 | type: this.certificate.domainValidationOptions[0].resourceRecordType, 31 | zoneId: args.hostedZoneId, 32 | records: [ 33 | this.certificate.domainValidationOptions[0].resourceRecordValue, 34 | ], 35 | ttl: 600, 36 | }, 37 | { 38 | parent: this, 39 | deleteBeforeReplace: true, 40 | }, 41 | ); 42 | 43 | const certificateValidation = new aws.acm.CertificateValidation( 44 | `${args.domain}-cert-validation`, 45 | { 46 | certificateArn: this.certificate.arn, 47 | validationRecordFqdns: [certificateValidationDomain.fqdn], 48 | }, 49 | { parent: this }, 50 | ); 51 | 52 | this.registerOutputs(); 53 | } 54 | } 55 | -------------------------------------------------------------------------------- /src/components/database-replica.ts: -------------------------------------------------------------------------------- 1 | import * as aws from '@pulumi/aws'; 2 | import * as pulumi from '@pulumi/pulumi'; 3 | import { commonTags } from '../constants'; 4 | 5 | export type DatabaseReplicaArgs = { 6 | /** 7 | * ARN of the primary DB that we want to replicate. 8 | */ 9 | replicateSourceDb: pulumi.Input; 10 | /** 11 | * DB subnet group name. Should be the same as primary instance. 12 | * * If primary DB is instance of studion:Database, it can be accessed as 13 | * `db.dbSubnetGroup.name`. 14 | */ 15 | dbSubnetGroupName?: pulumi.Input; 16 | /** 17 | * DB security group ID. Should be the same as primary instance. 18 | * If primary DB is instance of studion:Database, it can be accessed as 19 | * `db.dbSecurityGroup.id`. 20 | */ 21 | dbSecurityGroupId: pulumi.Input; 22 | /** 23 | * IAM Monitoring role. Should be the same as primary instance. 24 | */ 25 | monitoringRole?: aws.iam.Role; 26 | /** 27 | * Specifies if the RDS instance is multi-AZ. Defaults to false. 28 | */ 29 | multiAz?: pulumi.Input; 30 | /** 31 | * Specifies whether any database modifications are applied immediately, 32 | * or during the next maintenance window. Default is false. 33 | */ 34 | applyImmediately?: pulumi.Input; 35 | /** 36 | * The allocated storage in gibibytes. Defaults to 20GB. 37 | */ 38 | allocatedStorage?: pulumi.Input; 39 | /** 40 | * The upper limit to which Amazon RDS can automatically scale 41 | * the storage of the DB instance. Defaults to 100GB. 42 | */ 43 | maxAllocatedStorage?: pulumi.Input; 44 | /** 45 | * The instance type of the RDS instance. Defaults to 'db.t4g.micro'. 46 | */ 47 | instanceClass?: pulumi.Input; 48 | /** 49 | * The name of custom aws.rds.ParameterGroup. Setting this param will apply custom 50 | * DB parameters to this instance. 51 | */ 52 | parameterGroupName?: pulumi.Input; 53 | /** 54 | * The DB engine version. Defaults to '17.2'. 55 | */ 56 | engineVersion?: pulumi.Input; 57 | /** 58 | * A map of tags to assign to the resource. 59 | */ 60 | tags?: pulumi.Input<{ 61 | [key: string]: pulumi.Input; 62 | }>; 63 | }; 64 | 65 | const defaults = { 66 | multiAz: false, 67 | applyImmediately: false, 68 | allocatedStorage: 20, 69 | maxAllocatedStorage: 100, 70 | instanceClass: 'db.t4g.micro', 71 | enableMonitoring: false, 72 | engineVersion: '17.2', 73 | }; 74 | 75 | export class DatabaseReplica extends pulumi.ComponentResource { 76 | name: string; 77 | instance: aws.rds.Instance; 78 | monitoringRole?: aws.iam.Role; 79 | 80 | constructor( 81 | name: string, 82 | args: DatabaseReplicaArgs, 83 | opts: pulumi.ComponentResourceOptions = {}, 84 | ) { 85 | super('studion:DatabaseReplica', name, {}, opts); 86 | 87 | this.name = name; 88 | 89 | const argsWithDefaults = Object.assign({}, defaults, args); 90 | this.monitoringRole = argsWithDefaults.monitoringRole; 91 | this.instance = this.createDatabaseInstance(args); 92 | 93 | this.registerOutputs(); 94 | } 95 | 96 | private createDatabaseInstance(args: DatabaseReplicaArgs) { 97 | const argsWithDefaults = Object.assign({}, defaults, args); 98 | const stack = pulumi.getStack(); 99 | 100 | const monitoringOptions = 101 | argsWithDefaults.enableMonitoring && this.monitoringRole 102 | ? { 103 | monitoringInterval: 60, 104 | monitoringRoleArn: this.monitoringRole.arn, 105 | performanceInsightsEnabled: true, 106 | performanceInsightsRetentionPeriod: 7, 107 | } 108 | : {}; 109 | 110 | const instance = new aws.rds.Instance( 111 | `${this.name}-rds`, 112 | { 113 | identifierPrefix: `${this.name}-`, 114 | engine: 'postgres', 115 | engineVersion: argsWithDefaults.engineVersion, 116 | allocatedStorage: argsWithDefaults.allocatedStorage, 117 | maxAllocatedStorage: argsWithDefaults.maxAllocatedStorage, 118 | instanceClass: argsWithDefaults.instanceClass, 119 | dbSubnetGroupName: argsWithDefaults.dbSubnetGroupName, 120 | vpcSecurityGroupIds: [argsWithDefaults.dbSecurityGroupId], 121 | storageEncrypted: true, 122 | multiAz: argsWithDefaults.multiAz, 123 | publiclyAccessible: false, 124 | applyImmediately: argsWithDefaults.applyImmediately, 125 | autoMinorVersionUpgrade: true, 126 | maintenanceWindow: 'Mon:07:00-Mon:07:30', 127 | replicateSourceDb: argsWithDefaults.replicateSourceDb, 128 | parameterGroupName: argsWithDefaults.parameterGroupName, 129 | skipFinalSnapshot: true, 130 | ...monitoringOptions, 131 | tags: { ...commonTags, ...argsWithDefaults.tags }, 132 | }, 133 | { parent: this } 134 | ); 135 | return instance; 136 | } 137 | } 138 | -------------------------------------------------------------------------------- /src/components/database.ts: -------------------------------------------------------------------------------- 1 | import * as aws from '@pulumi/aws'; 2 | import * as pulumi from '@pulumi/pulumi'; 3 | import { Password } from './password'; 4 | import { commonTags } from '../constants'; 5 | 6 | export type DatabaseArgs = { 7 | /** 8 | * The name of the database to create when the DB instance is created. 9 | */ 10 | dbName: pulumi.Input; 11 | /** 12 | * Username for the master DB user. 13 | */ 14 | username: pulumi.Input; 15 | vpcId: pulumi.Input; 16 | isolatedSubnetIds: pulumi.Input[]>; 17 | /** 18 | * The IPv4 CIDR block for the VPC. 19 | */ 20 | vpcCidrBlock: pulumi.Input; 21 | /** 22 | * Specifies if the RDS instance is multi-AZ. Defaults to false. 23 | */ 24 | multiAz?: pulumi.Input; 25 | /** 26 | * Password for the master DB user. If not specified it will be autogenerated. 27 | * The value will be stored as a secret in AWS Secret Manager. 28 | */ 29 | password?: pulumi.Input; 30 | /** 31 | * Specifies whether any database modifications are applied immediately, 32 | * or during the next maintenance window. Default is false. 33 | */ 34 | applyImmediately?: pulumi.Input; 35 | /** 36 | * Determines whether a final DB snapshot is created before the DB 37 | * instance is deleted. Defaults to false. 38 | */ 39 | skipFinalSnapshot?: pulumi.Input; 40 | /** 41 | * The allocated storage in gibibytes. Defaults to 20GB. 42 | */ 43 | allocatedStorage?: pulumi.Input; 44 | /** 45 | * The upper limit to which Amazon RDS can automatically scale 46 | * the storage of the DB instance. Defaults to 100GB. 47 | */ 48 | maxAllocatedStorage?: pulumi.Input; 49 | /** 50 | * The instance type of the RDS instance. Defaults to 'db.t4g.micro'. 51 | */ 52 | instanceClass?: pulumi.Input; 53 | /** 54 | * Set this to true to enable database monitoring. Defaults to false. 55 | */ 56 | enableMonitoring?: pulumi.Input; 57 | /** 58 | * Set this to true to allow major version upgrades, for example when creating 59 | * db from the snapshot. Defaults to false. 60 | */ 61 | allowMajorVersionUpgrade?: pulumi.Input; 62 | /** 63 | * The name of custom aws.rds.ParameterGroup. Setting this param will apply custom 64 | * DB parameters to this instance. 65 | */ 66 | parameterGroupName?: pulumi.Input; 67 | /** 68 | * Specifies whether to create this database from a snapshot. 69 | * This correlates to the snapshot ID you'd find in the RDS console, 70 | * e.g: rds:production-2015-06-26-06-05. 71 | */ 72 | snapshotIdentifier?: pulumi.Input; 73 | /** 74 | * The DB engine version. Defaults to '17.2'. 75 | */ 76 | engineVersion?: pulumi.Input; 77 | /** 78 | * A map of tags to assign to the resource. 79 | */ 80 | tags?: pulumi.Input<{ 81 | [key: string]: pulumi.Input; 82 | }>; 83 | }; 84 | 85 | const defaults = { 86 | multiAz: false, 87 | applyImmediately: false, 88 | skipFinalSnapshot: false, 89 | allocatedStorage: 20, 90 | maxAllocatedStorage: 100, 91 | instanceClass: 'db.t4g.micro', 92 | enableMonitoring: false, 93 | allowMajorVersionUpgrade: false, 94 | engineVersion: '17.2', 95 | }; 96 | 97 | export class Database extends pulumi.ComponentResource { 98 | name: string; 99 | instance: aws.rds.Instance; 100 | kms: aws.kms.Key; 101 | dbSubnetGroup: aws.rds.SubnetGroup; 102 | dbSecurityGroup: aws.ec2.SecurityGroup; 103 | password: Password; 104 | encryptedSnapshotCopy?: aws.rds.SnapshotCopy; 105 | monitoringRole?: aws.iam.Role; 106 | 107 | constructor( 108 | name: string, 109 | args: DatabaseArgs, 110 | opts: pulumi.ComponentResourceOptions = {}, 111 | ) { 112 | super('studion:Database', name, {}, opts); 113 | 114 | this.name = name; 115 | 116 | const argsWithDefaults = Object.assign({}, defaults, args); 117 | const { 118 | vpcId, 119 | isolatedSubnetIds, 120 | vpcCidrBlock, 121 | enableMonitoring, 122 | snapshotIdentifier, 123 | } = argsWithDefaults; 124 | this.dbSubnetGroup = this.createSubnetGroup({ isolatedSubnetIds }); 125 | this.dbSecurityGroup = this.createSecurityGroup({ vpcId, vpcCidrBlock }); 126 | this.kms = this.createEncryptionKey(); 127 | this.password = new Password( 128 | `${this.name}-database-password`, 129 | { value: args.password }, 130 | { parent: this }, 131 | ); 132 | if (enableMonitoring) { 133 | this.monitoringRole = this.createMonitoringRole(); 134 | } 135 | if (snapshotIdentifier) { 136 | this.encryptedSnapshotCopy = 137 | this.createEncryptedSnapshotCopy(snapshotIdentifier); 138 | } 139 | this.instance = this.createDatabaseInstance(args); 140 | 141 | this.registerOutputs(); 142 | } 143 | 144 | private createSubnetGroup({ 145 | isolatedSubnetIds, 146 | }: Pick) { 147 | const dbSubnetGroup = new aws.rds.SubnetGroup( 148 | `${this.name}-subnet-group`, 149 | { 150 | subnetIds: isolatedSubnetIds, 151 | tags: commonTags, 152 | }, 153 | { parent: this }, 154 | ); 155 | return dbSubnetGroup; 156 | } 157 | 158 | private createSecurityGroup({ 159 | vpcId, 160 | vpcCidrBlock, 161 | }: Pick) { 162 | const dbSecurityGroup = new aws.ec2.SecurityGroup( 163 | `${this.name}-security-group`, 164 | { 165 | vpcId, 166 | ingress: [ 167 | { 168 | protocol: 'tcp', 169 | fromPort: 5432, 170 | toPort: 5432, 171 | cidrBlocks: [vpcCidrBlock], 172 | }, 173 | ], 174 | tags: commonTags, 175 | }, 176 | { parent: this }, 177 | ); 178 | return dbSecurityGroup; 179 | } 180 | 181 | private createEncryptionKey() { 182 | const kms = new aws.kms.Key( 183 | `${this.name}-rds-key`, 184 | { 185 | description: `${this.name} RDS encryption key`, 186 | customerMasterKeySpec: 'SYMMETRIC_DEFAULT', 187 | isEnabled: true, 188 | keyUsage: 'ENCRYPT_DECRYPT', 189 | multiRegion: false, 190 | enableKeyRotation: true, 191 | tags: commonTags, 192 | }, 193 | { parent: this }, 194 | ); 195 | return kms; 196 | } 197 | 198 | private createMonitoringRole() { 199 | const monitoringRole = new aws.iam.Role(`${this.name}-rds-monitoring`, { 200 | assumeRolePolicy: { 201 | Version: '2012-10-17', 202 | Statement: [ 203 | { 204 | Action: 'sts:AssumeRole', 205 | Effect: 'Allow', 206 | Principal: { 207 | Service: 'monitoring.rds.amazonaws.com', 208 | }, 209 | }, 210 | ], 211 | }, 212 | }); 213 | 214 | new aws.iam.RolePolicyAttachment( 215 | `${this.name}-rds-monitoring-role-attachment`, 216 | { 217 | role: monitoringRole.name, 218 | policyArn: 219 | 'arn:aws:iam::aws:policy/service-role/AmazonRDSEnhancedMonitoringRole', 220 | }, 221 | ); 222 | 223 | return monitoringRole; 224 | } 225 | 226 | private createEncryptedSnapshotCopy( 227 | snapshotIdentifier: NonNullable, 228 | ) { 229 | const encryptedSnapshotCopy = new aws.rds.SnapshotCopy( 230 | `${this.name}-encrypted-snapshot-copy`, 231 | { 232 | sourceDbSnapshotIdentifier: snapshotIdentifier, 233 | targetDbSnapshotIdentifier: `${snapshotIdentifier}-encrypted-copy`, 234 | kmsKeyId: this.kms.arn, 235 | }, 236 | { parent: this }, 237 | ); 238 | return encryptedSnapshotCopy; 239 | } 240 | 241 | private createDatabaseInstance(args: DatabaseArgs) { 242 | const argsWithDefaults = Object.assign({}, defaults, args); 243 | const stack = pulumi.getStack(); 244 | 245 | const monitoringOptions = 246 | argsWithDefaults.enableMonitoring && this.monitoringRole 247 | ? { 248 | monitoringInterval: 60, 249 | monitoringRoleArn: this.monitoringRole.arn, 250 | performanceInsightsEnabled: true, 251 | performanceInsightsRetentionPeriod: 7, 252 | } 253 | : {}; 254 | 255 | const instance = new aws.rds.Instance( 256 | `${this.name}-rds`, 257 | { 258 | identifierPrefix: `${this.name}-`, 259 | engine: 'postgres', 260 | engineVersion: argsWithDefaults.engineVersion, 261 | allocatedStorage: argsWithDefaults.allocatedStorage, 262 | maxAllocatedStorage: argsWithDefaults.maxAllocatedStorage, 263 | instanceClass: argsWithDefaults.instanceClass, 264 | dbName: argsWithDefaults.dbName, 265 | username: argsWithDefaults.username, 266 | password: this.password.value, 267 | dbSubnetGroupName: this.dbSubnetGroup.name, 268 | vpcSecurityGroupIds: [this.dbSecurityGroup.id], 269 | storageEncrypted: true, 270 | kmsKeyId: this.kms.arn, 271 | multiAz: argsWithDefaults.multiAz, 272 | publiclyAccessible: false, 273 | skipFinalSnapshot: argsWithDefaults.skipFinalSnapshot, 274 | applyImmediately: argsWithDefaults.applyImmediately, 275 | autoMinorVersionUpgrade: true, 276 | maintenanceWindow: 'Mon:07:00-Mon:07:30', 277 | finalSnapshotIdentifier: `${this.name}-final-snapshot-${stack}`, 278 | backupWindow: '06:00-06:30', 279 | backupRetentionPeriod: 14, 280 | caCertIdentifier: 'rds-ca-rsa2048-g1', 281 | parameterGroupName: argsWithDefaults.parameterGroupName, 282 | allowMajorVersionUpgrade: argsWithDefaults.allowMajorVersionUpgrade, 283 | snapshotIdentifier: 284 | this.encryptedSnapshotCopy?.targetDbSnapshotIdentifier, 285 | ...monitoringOptions, 286 | tags: { ...commonTags, ...argsWithDefaults.tags }, 287 | }, 288 | { parent: this, dependsOn: [this.password] }, 289 | ); 290 | return instance; 291 | } 292 | } 293 | -------------------------------------------------------------------------------- /src/components/ec2-ssm-connect.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from '@pulumi/pulumi'; 2 | import * as aws from '@pulumi/aws'; 3 | import { commonTags } from '../constants'; 4 | 5 | const config = new pulumi.Config('aws'); 6 | const awsRegion = config.require('region'); 7 | 8 | export type Ec2SSMConnectArgs = { 9 | vpcId: pulumi.Input; 10 | privateSubnetId: pulumi.Input; 11 | /** 12 | * The IPv4 CIDR block for the VPC. 13 | */ 14 | vpcCidrBlock: pulumi.Input; 15 | tags?: pulumi.Input<{ 16 | [key: string]: pulumi.Input; 17 | }>; 18 | }; 19 | 20 | export class Ec2SSMConnect extends pulumi.ComponentResource { 21 | ec2SecurityGroup: aws.ec2.SecurityGroup; 22 | ssmVpcEndpoint: aws.ec2.VpcEndpoint; 23 | ec2MessagesVpcEndpoint: aws.ec2.VpcEndpoint; 24 | ssmMessagesVpcEndpoint: aws.ec2.VpcEndpoint; 25 | ec2: aws.ec2.Instance; 26 | 27 | constructor( 28 | name: string, 29 | args: Ec2SSMConnectArgs, 30 | opts: pulumi.ComponentResourceOptions = {}, 31 | ) { 32 | super('studion:Ec2BastionSSMConnect', name, {}, opts); 33 | 34 | const subnetId = args.privateSubnetId; 35 | 36 | const AmazonLinux2023_ARM_EC2_AMI = aws.ec2.getAmiOutput({ 37 | filters: [ 38 | { name: 'architecture', values: ['arm64'] }, 39 | { name: 'root-device-type', values: ['ebs'] }, 40 | { name: 'virtualization-type', values: ['hvm'] }, 41 | { name: 'ena-support', values: ['true'] }, 42 | ], 43 | owners: ['amazon'], 44 | // TODO: Improve this nameRegex property. Use * for kernel version. 45 | // https://docs.aws.amazon.com/linux/al2023/ug/ec2.html 46 | nameRegex: 'al2023-ami-20[0-9]+.*-kernel-6.1-arm64', 47 | mostRecent: true, 48 | }); 49 | 50 | this.ec2SecurityGroup = new aws.ec2.SecurityGroup( 51 | `${name}-ec2-security-group`, 52 | { 53 | ingress: [ 54 | { 55 | protocol: 'tcp', 56 | fromPort: 22, 57 | toPort: 22, 58 | cidrBlocks: [args.vpcCidrBlock], 59 | }, 60 | { 61 | protocol: 'tcp', 62 | fromPort: 443, 63 | toPort: 443, 64 | cidrBlocks: [args.vpcCidrBlock], 65 | }, 66 | ], 67 | egress: [ 68 | { protocol: '-1', fromPort: 0, toPort: 0, cidrBlocks: ['0.0.0.0/0'] }, 69 | ], 70 | vpcId: args.vpcId, 71 | tags: commonTags, 72 | }, 73 | { parent: this }, 74 | ); 75 | 76 | const role = new aws.iam.Role( 77 | `${name}-ec2-role`, 78 | { 79 | assumeRolePolicy: { 80 | Version: '2012-10-17', 81 | Statement: [ 82 | { 83 | Effect: 'Allow', 84 | Principal: { 85 | Service: 'ec2.amazonaws.com', 86 | }, 87 | Action: 'sts:AssumeRole', 88 | }, 89 | ], 90 | }, 91 | tags: commonTags, 92 | }, 93 | { parent: this }, 94 | ); 95 | 96 | const ssmPolicyAttachment = new aws.iam.RolePolicyAttachment( 97 | `${name}-ssm-policy-attachment`, 98 | { 99 | role: role.name, 100 | policyArn: 'arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore', 101 | }, 102 | { parent: this }, 103 | ); 104 | 105 | const ssmProfile = new aws.iam.InstanceProfile( 106 | `${name}-ssm-profile`, 107 | { 108 | role: role.name, 109 | tags: commonTags, 110 | }, 111 | { parent: this, dependsOn: [ssmPolicyAttachment] }, 112 | ); 113 | 114 | this.ec2 = new aws.ec2.Instance( 115 | `${name}-ec2`, 116 | { 117 | ami: AmazonLinux2023_ARM_EC2_AMI.id, 118 | associatePublicIpAddress: false, 119 | instanceType: 't4g.nano', 120 | iamInstanceProfile: ssmProfile.name, 121 | subnetId, 122 | vpcSecurityGroupIds: [this.ec2SecurityGroup.id], 123 | tags: { 124 | ...commonTags, 125 | Name: `${name}-ec2`, 126 | ...args.tags, 127 | }, 128 | }, 129 | { parent: this }, 130 | ); 131 | 132 | this.ssmVpcEndpoint = new aws.ec2.VpcEndpoint( 133 | `${name}-ssm-vpc-endpoint`, 134 | { 135 | vpcId: args.vpcId, 136 | ipAddressType: 'ipv4', 137 | serviceName: `com.amazonaws.${awsRegion}.ssm`, 138 | vpcEndpointType: 'Interface', 139 | subnetIds: [subnetId], 140 | securityGroupIds: [this.ec2SecurityGroup.id], 141 | privateDnsEnabled: true, 142 | tags: commonTags, 143 | }, 144 | { parent: this, dependsOn: [this.ec2] }, 145 | ); 146 | 147 | this.ec2MessagesVpcEndpoint = new aws.ec2.VpcEndpoint( 148 | `${name}-ec2messages-vpc-endpoint`, 149 | { 150 | vpcId: args.vpcId, 151 | ipAddressType: 'ipv4', 152 | serviceName: `com.amazonaws.${awsRegion}.ec2messages`, 153 | vpcEndpointType: 'Interface', 154 | subnetIds: [subnetId], 155 | securityGroupIds: [this.ec2SecurityGroup.id], 156 | privateDnsEnabled: true, 157 | tags: commonTags, 158 | }, 159 | { parent: this, dependsOn: [this.ec2] }, 160 | ); 161 | 162 | this.ssmMessagesVpcEndpoint = new aws.ec2.VpcEndpoint( 163 | `${name}-ssmmessages-vpc-endpoint`, 164 | { 165 | vpcId: args.vpcId, 166 | ipAddressType: 'ipv4', 167 | serviceName: `com.amazonaws.${awsRegion}.ssmmessages`, 168 | vpcEndpointType: 'Interface', 169 | subnetIds: [subnetId], 170 | securityGroupIds: [this.ec2SecurityGroup.id], 171 | privateDnsEnabled: true, 172 | tags: commonTags, 173 | }, 174 | { parent: this, dependsOn: [this.ec2] }, 175 | ); 176 | 177 | this.registerOutputs(); 178 | } 179 | } 180 | -------------------------------------------------------------------------------- /src/components/mongo.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from '@pulumi/pulumi'; 2 | import { EcsService, EcsServiceArgs } from './ecs-service'; 3 | import { Password } from './password'; 4 | 5 | export type MongoArgs = Pick< 6 | EcsServiceArgs, 7 | 'size' | 'clusterId' | 'clusterName' | 'vpcId' | 'vpcCidrBlock' | 'tags' 8 | > & { 9 | privateSubnetIds: pulumi.Input[]>; 10 | /** 11 | * Username for the master DB user. 12 | */ 13 | username: pulumi.Input; 14 | /** 15 | * Password for the master DB user. If not specified it will be autogenerated. 16 | * The value will be stored as a secret in AWS Secret Manager. 17 | */ 18 | password?: pulumi.Input; 19 | /** 20 | * Mongo Docker image. Defaults to mongo:7.0.3. 21 | */ 22 | image?: pulumi.Input; 23 | /** 24 | * Exposed service port. Defaults to 27017. 25 | */ 26 | port?: pulumi.Input; 27 | /** 28 | * Configuration for persistent storage using EFS volumes. 29 | * By default, creates a volume named 'mongo' mounted at '/data/db'. 30 | * You can override this by providing your own volume and mount point configuration. 31 | */ 32 | persistentStorageConfig?: EcsServiceArgs['persistentStorageConfig']; 33 | }; 34 | 35 | export class Mongo extends pulumi.ComponentResource { 36 | readonly name: string; 37 | readonly username: pulumi.Output; 38 | readonly port: pulumi.Output; 39 | readonly host: pulumi.Output; 40 | readonly service: EcsService; 41 | readonly password: Password; 42 | 43 | constructor( 44 | name: string, 45 | args: MongoArgs, 46 | opts: pulumi.ComponentResourceOptions = {}, 47 | ) { 48 | super('studion:Mongo', name, args, opts); 49 | 50 | const image = 51 | args.image || 52 | 'mongo:7.0.3@sha256:238b1636bdd7820c752b91bec8a669f92568eb313ad89a1fc4a92903c1b40489'; 53 | const port = args.port || 27017; 54 | const persistentStorageConfig = args.persistentStorageConfig || { 55 | volumes: [{ name: 'mongo' }], 56 | mountPoints: [{ 57 | sourceVolume: 'mongo', 58 | containerPath: '/data/db' 59 | }] 60 | }; 61 | 62 | const { username, password, privateSubnetIds, ...ecsServiceArgs } = args; 63 | 64 | this.name = name; 65 | this.host = pulumi.output(`${name}.${name}`); 66 | this.username = pulumi.output(username); 67 | this.port = pulumi.output(port); 68 | 69 | this.password = new Password( 70 | `${this.name}-mongo-password`, 71 | { value: password }, 72 | { parent: this }, 73 | ); 74 | 75 | this.service = new EcsService( 76 | name, 77 | { 78 | ...ecsServiceArgs, 79 | port, 80 | image, 81 | desiredCount: 1, 82 | autoscaling: { enabled: false }, 83 | enableServiceAutoDiscovery: true, 84 | persistentStorageConfig, 85 | dockerCommand: ['mongod', '--port', port.toString()], 86 | assignPublicIp: false, 87 | subnetIds: privateSubnetIds, 88 | environment: [ 89 | { 90 | name: 'MONGO_INITDB_ROOT_USERNAME', 91 | value: username, 92 | }, 93 | ], 94 | secrets: [ 95 | { 96 | name: 'MONGO_INITDB_ROOT_PASSWORD', 97 | valueFrom: this.password.secret.arn, 98 | }, 99 | ], 100 | }, 101 | { ...opts, parent: this }, 102 | ); 103 | 104 | this.registerOutputs(); 105 | } 106 | } 107 | -------------------------------------------------------------------------------- /src/components/password.ts: -------------------------------------------------------------------------------- 1 | import * as aws from '@pulumi/aws'; 2 | import * as pulumi from '@pulumi/pulumi'; 3 | import * as random from '@pulumi/random'; 4 | import { commonTags } from '../constants'; 5 | 6 | export type PasswordArgs = { 7 | value?: pulumi.Input; 8 | }; 9 | 10 | export class Password extends pulumi.ComponentResource { 11 | name: string; 12 | value: pulumi.Output; 13 | secret: aws.secretsmanager.Secret; 14 | 15 | constructor( 16 | name: string, 17 | args: PasswordArgs, 18 | opts: pulumi.ComponentResourceOptions = {}, 19 | ) { 20 | const optsWithDefauls = pulumi.mergeOptions(opts, { 21 | additionalSecretOutputs: ['value'], 22 | }); 23 | super('studion:Password', name, {}, optsWithDefauls); 24 | 25 | this.name = name; 26 | if (args.value) { 27 | this.value = pulumi.output(args.value); 28 | } else { 29 | const password = new random.RandomPassword( 30 | `${this.name}-random-password`, 31 | { 32 | length: 16, 33 | overrideSpecial: '_$', 34 | special: true, 35 | }, 36 | { parent: this }, 37 | ); 38 | this.value = password.result; 39 | } 40 | 41 | this.secret = this.createPasswordSecret(this.value); 42 | this.registerOutputs(); 43 | } 44 | 45 | private createPasswordSecret(password: pulumi.Input) { 46 | const project = pulumi.getProject(); 47 | const stack = pulumi.getStack(); 48 | 49 | const passwordSecret = new aws.secretsmanager.Secret( 50 | `${this.name}-password-secret`, 51 | { 52 | namePrefix: `${stack}/${project}/${this.name}-`, 53 | tags: commonTags, 54 | }, 55 | { parent: this }, 56 | ); 57 | 58 | const passwordSecretValue = new aws.secretsmanager.SecretVersion( 59 | `${this.name}-password-secret-value`, 60 | { 61 | secretId: passwordSecret.id, 62 | secretString: password, 63 | }, 64 | { parent: this, dependsOn: [passwordSecret] }, 65 | ); 66 | 67 | return passwordSecret; 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /src/components/project.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from '@pulumi/pulumi'; 2 | import * as aws from '@pulumi/aws'; 3 | import * as awsx from '@pulumi/awsx'; 4 | import * as upstash from '@upstash/pulumi'; 5 | import { Database, DatabaseArgs } from './database'; 6 | import { WebServer, WebServerArgs } from './web-server'; 7 | import { Mongo, MongoArgs } from './mongo'; 8 | import { Redis, RedisArgs } from './redis'; 9 | import { StaticSite, StaticSiteArgs } from './static-site'; 10 | import { Ec2SSMConnect } from './ec2-ssm-connect'; 11 | import { commonTags } from '../constants'; 12 | import { EcsService, EcsServiceArgs } from './ecs-service'; 13 | import { NuxtSSR, NuxtSSRArgs } from './nuxt-ssr'; 14 | 15 | export type Service = 16 | | Database 17 | | Redis 18 | | StaticSite 19 | | WebServer 20 | | NuxtSSR 21 | | Mongo 22 | | EcsService; 23 | export type Services = Record; 24 | 25 | type ServiceArgs = { 26 | /** 27 | * The unique name for the service. 28 | */ 29 | serviceName: string; 30 | }; 31 | 32 | export type DatabaseServiceOptions = { type: 'DATABASE' } & ServiceArgs & 33 | Omit; 34 | 35 | export type RedisServiceOptions = { type: 'REDIS' } & ServiceArgs & RedisArgs; 36 | 37 | export type StaticSiteServiceOptions = { type: 'STATIC_SITE' } & ServiceArgs & 38 | StaticSiteArgs; 39 | 40 | export type WebServerServiceOptions = { 41 | type: 'WEB_SERVER'; 42 | environment?: 43 | | aws.ecs.KeyValuePair[] 44 | | ((services: Services) => aws.ecs.KeyValuePair[]); 45 | secrets?: aws.ecs.Secret[] | ((services: Services) => aws.ecs.Secret[]); 46 | } & ServiceArgs & 47 | Omit< 48 | WebServerArgs, 49 | | 'clusterId' 50 | | 'clusterName' 51 | | 'vpcId' 52 | | 'vpcCidrBlock' 53 | | 'publicSubnetIds' 54 | | 'environment' 55 | | 'secrets' 56 | >; 57 | 58 | export type NuxtSSRServiceOptions = { 59 | type: 'NUXT_SSR'; 60 | environment?: 61 | | aws.ecs.KeyValuePair[] 62 | | ((services: Services) => aws.ecs.KeyValuePair[]); 63 | secrets?: aws.ecs.Secret[] | ((services: Services) => aws.ecs.Secret[]); 64 | } & ServiceArgs & 65 | Omit< 66 | NuxtSSRArgs, 67 | | 'clusterId' 68 | | 'clusterName' 69 | | 'vpcId' 70 | | 'vpcCidrBlock' 71 | | 'publicSubnetIds' 72 | | 'environment' 73 | | 'secrets' 74 | >; 75 | 76 | export type MongoServiceOptions = { 77 | type: 'MONGO'; 78 | } & ServiceArgs & 79 | Omit< 80 | MongoArgs, 81 | | 'clusterId' 82 | | 'clusterName' 83 | | 'vpcId' 84 | | 'vpcCidrBlock' 85 | | 'privateSubnetIds' 86 | | 'environment' 87 | | 'secrets' 88 | >; 89 | 90 | export type EcsServiceOptions = { 91 | type: 'ECS_SERVICE'; 92 | environment?: 93 | | aws.ecs.KeyValuePair[] 94 | | ((services: Services) => aws.ecs.KeyValuePair[]); 95 | secrets?: aws.ecs.Secret[] | ((services: Services) => aws.ecs.Secret[]); 96 | } & ServiceArgs & 97 | Omit< 98 | EcsServiceArgs, 99 | | 'clusterId' 100 | | 'clusterName' 101 | | 'vpcId' 102 | | 'vpcCidrBlock' 103 | | 'subnetIds' 104 | | 'environment' 105 | | 'secrets' 106 | >; 107 | 108 | export type ProjectArgs = { 109 | services: ( 110 | | DatabaseServiceOptions 111 | | RedisServiceOptions 112 | | StaticSiteServiceOptions 113 | | WebServerServiceOptions 114 | | NuxtSSRServiceOptions 115 | | MongoServiceOptions 116 | | EcsServiceOptions 117 | )[]; 118 | enableSSMConnect?: pulumi.Input; 119 | numberOfAvailabilityZones?: number; 120 | }; 121 | 122 | export class MissingEcsCluster extends Error { 123 | constructor() { 124 | super('Ecs Cluster does not exist'); 125 | this.name = this.constructor.name; 126 | } 127 | } 128 | 129 | export class Project extends pulumi.ComponentResource { 130 | name: string; 131 | vpc: awsx.ec2.Vpc; 132 | cluster?: aws.ecs.Cluster; 133 | upstashProvider?: upstash.Provider; 134 | ec2SSMConnect?: Ec2SSMConnect; 135 | services: Services = {}; 136 | 137 | constructor( 138 | name: string, 139 | args: ProjectArgs, 140 | opts: pulumi.ComponentResourceOptions = {}, 141 | ) { 142 | super('studion:Project', name, {}, opts); 143 | this.name = name; 144 | 145 | this.vpc = this.createVpc(args.numberOfAvailabilityZones); 146 | this.createServices(args.services); 147 | 148 | if (args.enableSSMConnect) { 149 | this.ec2SSMConnect = new Ec2SSMConnect(`${name}-ssm-connect`, { 150 | vpcId: this.vpc.vpcId, 151 | privateSubnetId: this.vpc.privateSubnetIds.apply(ids => ids[0]), 152 | vpcCidrBlock: this.vpc.vpc.cidrBlock, 153 | }); 154 | } 155 | 156 | this.registerOutputs(); 157 | } 158 | 159 | private createVpc( 160 | numberOfAvailabilityZones: ProjectArgs['numberOfAvailabilityZones'] = 2, 161 | ) { 162 | const vpc = new awsx.ec2.Vpc( 163 | `${this.name}-vpc`, 164 | { 165 | numberOfAvailabilityZones, 166 | enableDnsHostnames: true, 167 | enableDnsSupport: true, 168 | subnetSpecs: [ 169 | { type: awsx.ec2.SubnetType.Public, cidrMask: 24 }, 170 | { type: awsx.ec2.SubnetType.Private, cidrMask: 24 }, 171 | { type: awsx.ec2.SubnetType.Isolated, cidrMask: 24 }, 172 | ], 173 | tags: commonTags, 174 | }, 175 | { parent: this }, 176 | ); 177 | return vpc; 178 | } 179 | 180 | private createServices(services: ProjectArgs['services']) { 181 | const hasRedisService = services.some(it => it.type === 'REDIS'); 182 | const shouldCreateEcsCluster = 183 | services.some( 184 | it => 185 | it.type === 'WEB_SERVER' || 186 | it.type === 'NUXT_SSR' || 187 | it.type === 'MONGO' || 188 | it.type === 'ECS_SERVICE', 189 | ) && !this.cluster; 190 | if (hasRedisService) this.createRedisPrerequisites(); 191 | if (shouldCreateEcsCluster) this.createEcsCluster(); 192 | services.forEach(it => { 193 | if (it.type === 'DATABASE') this.createDatabaseService(it); 194 | if (it.type === 'REDIS') this.createRedisService(it); 195 | if (it.type === 'STATIC_SITE') this.createStaticSiteService(it); 196 | if (it.type === 'WEB_SERVER') this.createWebServerService(it); 197 | if (it.type === 'NUXT_SSR') this.createNuxtSSRService(it); 198 | if (it.type === 'MONGO') this.createMongoService(it); 199 | if (it.type === 'ECS_SERVICE') this.createEcsService(it); 200 | }); 201 | } 202 | 203 | private createRedisPrerequisites() { 204 | const upstashConfig = new pulumi.Config('upstash'); 205 | 206 | this.upstashProvider = new upstash.Provider('upstash', { 207 | email: upstashConfig.requireSecret('email'), 208 | apiKey: upstashConfig.requireSecret('apiKey'), 209 | }); 210 | } 211 | 212 | private createEcsCluster() { 213 | const stack = pulumi.getStack(); 214 | this.cluster = new aws.ecs.Cluster( 215 | `${this.name}-cluster`, 216 | { 217 | name: `${this.name}-${stack}`, 218 | tags: commonTags, 219 | }, 220 | { parent: this }, 221 | ); 222 | } 223 | 224 | private createDatabaseService(options: DatabaseServiceOptions) { 225 | const { serviceName, type, ...databaseOptions } = options; 226 | const service = new Database( 227 | serviceName, 228 | { 229 | ...databaseOptions, 230 | vpcId: this.vpc.vpcId, 231 | isolatedSubnetIds: this.vpc.isolatedSubnetIds, 232 | vpcCidrBlock: this.vpc.vpc.cidrBlock, 233 | }, 234 | { parent: this }, 235 | ); 236 | this.services[serviceName] = service; 237 | } 238 | 239 | private createRedisService(options: RedisServiceOptions) { 240 | if (!this.upstashProvider) return; 241 | const { serviceName, ...redisOptions } = options; 242 | const service = new Redis(serviceName, redisOptions, { 243 | parent: this, 244 | provider: this.upstashProvider, 245 | }); 246 | this.services[options.serviceName] = service; 247 | } 248 | 249 | private createStaticSiteService(options: StaticSiteServiceOptions) { 250 | const { serviceName, ...staticSiteOptions } = options; 251 | const service = new StaticSite(serviceName, staticSiteOptions, { 252 | parent: this, 253 | }); 254 | this.services[serviceName] = service; 255 | } 256 | 257 | private createWebServerService(options: WebServerServiceOptions) { 258 | if (!this.cluster) throw new MissingEcsCluster(); 259 | 260 | const { serviceName, environment, secrets, ...ecsOptions } = options; 261 | const parsedEnv = 262 | typeof environment === 'function' 263 | ? environment(this.services) 264 | : environment; 265 | 266 | const parsedSecrets = 267 | typeof secrets === 'function' ? secrets(this.services) : secrets; 268 | 269 | const service = new WebServer( 270 | serviceName, 271 | { 272 | ...ecsOptions, 273 | clusterId: this.cluster.id, 274 | clusterName: this.cluster.name, 275 | vpcId: this.vpc.vpcId, 276 | vpcCidrBlock: this.vpc.vpc.cidrBlock, 277 | publicSubnetIds: this.vpc.publicSubnetIds, 278 | environment: parsedEnv, 279 | secrets: parsedSecrets, 280 | }, 281 | { parent: this }, 282 | ); 283 | this.services[options.serviceName] = service; 284 | } 285 | 286 | private createNuxtSSRService(options: NuxtSSRServiceOptions) { 287 | if (!this.cluster) throw new MissingEcsCluster(); 288 | 289 | const { serviceName, environment, secrets, ...ecsOptions } = options; 290 | const parsedEnv = 291 | typeof environment === 'function' 292 | ? environment(this.services) 293 | : environment; 294 | 295 | const parsedSecrets = 296 | typeof secrets === 'function' ? secrets(this.services) : secrets; 297 | 298 | const service = new NuxtSSR( 299 | serviceName, 300 | { 301 | ...ecsOptions, 302 | clusterId: this.cluster.id, 303 | clusterName: this.cluster.name, 304 | vpcId: this.vpc.vpcId, 305 | vpcCidrBlock: this.vpc.vpc.cidrBlock, 306 | publicSubnetIds: this.vpc.publicSubnetIds, 307 | environment: parsedEnv, 308 | secrets: parsedSecrets, 309 | }, 310 | { parent: this }, 311 | ); 312 | this.services[options.serviceName] = service; 313 | } 314 | 315 | private createMongoService(options: MongoServiceOptions) { 316 | if (!this.cluster) throw new MissingEcsCluster(); 317 | 318 | const { serviceName, ...mongoOptions } = options; 319 | 320 | const service = new Mongo( 321 | serviceName, 322 | { 323 | ...mongoOptions, 324 | clusterId: this.cluster.id, 325 | clusterName: this.cluster.name, 326 | vpcId: this.vpc.vpcId, 327 | vpcCidrBlock: this.vpc.vpc.cidrBlock, 328 | privateSubnetIds: this.vpc.privateSubnetIds, 329 | }, 330 | { parent: this }, 331 | ); 332 | this.services[options.serviceName] = service; 333 | } 334 | 335 | private createEcsService(options: EcsServiceOptions) { 336 | if (!this.cluster) throw new MissingEcsCluster(); 337 | 338 | const { serviceName, environment, secrets, ...ecsOptions } = options; 339 | const parsedEnv = 340 | typeof environment === 'function' 341 | ? environment(this.services) 342 | : environment; 343 | 344 | const parsedSecrets = 345 | typeof secrets === 'function' ? secrets(this.services) : secrets; 346 | 347 | const service = new EcsService( 348 | serviceName, 349 | { 350 | ...ecsOptions, 351 | clusterId: this.cluster.id, 352 | clusterName: this.cluster.name, 353 | vpcId: this.vpc.vpcId, 354 | vpcCidrBlock: this.vpc.vpc.cidrBlock, 355 | subnetIds: ecsOptions.assignPublicIp 356 | ? this.vpc.publicSubnetIds 357 | : this.vpc.privateSubnetIds, 358 | environment: parsedEnv, 359 | secrets: parsedSecrets, 360 | }, 361 | { parent: this }, 362 | ); 363 | this.services[options.serviceName] = service; 364 | } 365 | } 366 | -------------------------------------------------------------------------------- /src/components/redis.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from '@pulumi/pulumi'; 2 | import * as upstash from '@upstash/pulumi'; 3 | import * as aws from '@pulumi/aws'; 4 | import { commonTags } from '../constants'; 5 | 6 | export type RedisArgs = { 7 | /** 8 | * Redis database name. 9 | */ 10 | dbName: pulumi.Input; 11 | /** 12 | * Region of the database. Possible values are: "global", "eu-west-1", "us-east-1", "us-west-1", "ap-northeast-1" , "eu-central1". 13 | */ 14 | region?: pulumi.Input; 15 | }; 16 | 17 | const defaults = { 18 | region: 'us-east-1', 19 | }; 20 | 21 | export interface RedisOptions extends pulumi.ComponentResourceOptions { 22 | provider: upstash.Provider; 23 | } 24 | 25 | export class Redis extends pulumi.ComponentResource { 26 | instance: upstash.RedisDatabase; 27 | passwordSecret: aws.secretsmanager.Secret; 28 | username = 'default'; 29 | 30 | constructor(name: string, args: RedisArgs, opts: RedisOptions) { 31 | super('studion:Redis', name, {}, opts); 32 | 33 | const project = pulumi.getProject(); 34 | const stack = pulumi.getStack(); 35 | 36 | const argsWithDefaults = Object.assign({}, defaults, args); 37 | 38 | this.instance = new upstash.RedisDatabase( 39 | name, 40 | { 41 | databaseName: `${argsWithDefaults.dbName}-${stack}`, 42 | region: argsWithDefaults.region, 43 | eviction: true, 44 | tls: true, 45 | }, 46 | { provider: opts.provider, parent: this }, 47 | ); 48 | 49 | this.passwordSecret = new aws.secretsmanager.Secret( 50 | `${name}-password-secret`, 51 | { 52 | namePrefix: `${stack}/${project}/RedisPassword-`, 53 | tags: commonTags, 54 | }, 55 | { parent: this, dependsOn: [this.instance] }, 56 | ); 57 | 58 | const passwordSecretValue = new aws.secretsmanager.SecretVersion( 59 | `${name}-password-secret-value`, 60 | { 61 | secretId: this.passwordSecret.id, 62 | secretString: this.instance.password, 63 | }, 64 | { parent: this, dependsOn: [this.passwordSecret] }, 65 | ); 66 | 67 | this.registerOutputs(); 68 | } 69 | } 70 | -------------------------------------------------------------------------------- /src/components/static-site.ts: -------------------------------------------------------------------------------- 1 | import * as aws from '@pulumi/aws'; 2 | import * as pulumi from '@pulumi/pulumi'; 3 | import { AcmCertificate } from './acm-certificate'; 4 | import { commonTags } from '../constants'; 5 | 6 | export type StaticSiteArgs = { 7 | /** 8 | * The domain which will be used to access the static site. 9 | * The domain or subdomain must belong to the provided hostedZone. 10 | */ 11 | domain?: pulumi.Input; 12 | /** 13 | * The ID of the hosted zone. 14 | */ 15 | hostedZoneId?: pulumi.Input; 16 | /** 17 | * ARN of the CloudFront viewer-request function. 18 | */ 19 | viewerRequestFunctionArn?: pulumi.Input; 20 | /** 21 | * A map of tags to assign to the resource. 22 | */ 23 | tags?: pulumi.Input<{ 24 | [key: string]: pulumi.Input; 25 | }>; 26 | }; 27 | 28 | export class StaticSite extends pulumi.ComponentResource { 29 | name: string; 30 | certificate?: AcmCertificate; 31 | bucket: aws.s3.Bucket; 32 | cloudfront: aws.cloudfront.Distribution; 33 | 34 | constructor( 35 | name: string, 36 | args: StaticSiteArgs, 37 | opts: pulumi.ComponentResourceOptions = {}, 38 | ) { 39 | super('studion:StaticSite', name, {}, opts); 40 | 41 | this.name = name; 42 | const { domain, hostedZoneId, viewerRequestFunctionArn, tags } = args; 43 | const hasCustomDomain = domain && hostedZoneId; 44 | if (domain && !hostedZoneId) { 45 | throw new Error( 46 | 'StaticSite:hostedZoneId must be provided when the domain is specified', 47 | ); 48 | } 49 | if (hasCustomDomain) { 50 | this.certificate = this.createTlsCertificate({ domain, hostedZoneId }); 51 | } 52 | this.bucket = this.createPublicBucket({ tags }); 53 | this.cloudfront = this.createCloudfrontDistribution({ 54 | domain, 55 | viewerRequestFunctionArn, 56 | tags, 57 | }); 58 | if (hasCustomDomain) { 59 | this.createDnsRecord({ domain, hostedZoneId }); 60 | } 61 | 62 | this.registerOutputs(); 63 | } 64 | 65 | private createTlsCertificate({ 66 | domain, 67 | hostedZoneId, 68 | }: Pick, 'domain' | 'hostedZoneId'>) { 69 | const certificate = new AcmCertificate( 70 | `${domain}-acm-certificate`, 71 | { 72 | domain, 73 | hostedZoneId, 74 | }, 75 | { parent: this }, 76 | ); 77 | return certificate; 78 | } 79 | 80 | private createPublicBucket({ tags }: Pick) { 81 | const bucket = new aws.s3.Bucket( 82 | `${this.name}-bucket`, 83 | { 84 | bucketPrefix: `${this.name}-`, 85 | website: { 86 | indexDocument: 'index.html', 87 | errorDocument: 'index.html', 88 | }, 89 | tags: { ...commonTags, ...tags }, 90 | }, 91 | { parent: this }, 92 | ); 93 | 94 | const bucketPublicAccessBlock = new aws.s3.BucketPublicAccessBlock( 95 | `${this.name}-bucket-access-block`, 96 | { 97 | bucket: bucket.id, 98 | blockPublicAcls: false, 99 | blockPublicPolicy: false, 100 | ignorePublicAcls: false, 101 | restrictPublicBuckets: false, 102 | }, 103 | { parent: this }, 104 | ); 105 | 106 | const siteBucketPolicy = new aws.s3.BucketPolicy( 107 | `${this.name}-bucket-policy`, 108 | { 109 | bucket: bucket.bucket, 110 | policy: bucket.bucket.apply(publicReadPolicy), 111 | }, 112 | { parent: this, dependsOn: [bucketPublicAccessBlock] }, 113 | ); 114 | 115 | function publicReadPolicy(bucketName: string): aws.iam.PolicyDocument { 116 | return { 117 | Version: '2012-10-17', 118 | Statement: [ 119 | { 120 | Effect: 'Allow', 121 | Principal: '*', 122 | Action: ['s3:GetObject'], 123 | Resource: [`arn:aws:s3:::${bucketName}/*`], 124 | }, 125 | ], 126 | }; 127 | } 128 | 129 | return bucket; 130 | } 131 | 132 | private createCloudfrontDistribution({ 133 | domain, 134 | viewerRequestFunctionArn, 135 | tags, 136 | }: Pick) { 137 | const functionAssociations = viewerRequestFunctionArn 138 | ? [ 139 | { 140 | eventType: 'viewer-request', 141 | functionArn: viewerRequestFunctionArn, 142 | }, 143 | ] 144 | : []; 145 | 146 | const cloudfront = new aws.cloudfront.Distribution( 147 | `${this.name}-cloudfront`, 148 | { 149 | enabled: true, 150 | defaultRootObject: 'index.html', 151 | ...(domain && { aliases: [domain] }), 152 | isIpv6Enabled: true, 153 | waitForDeployment: true, 154 | httpVersion: 'http2and3', 155 | viewerCertificate: { 156 | ...(this.certificate 157 | ? { 158 | acmCertificateArn: this.certificate.certificate.arn, 159 | sslSupportMethod: 'sni-only', 160 | minimumProtocolVersion: 'TLSv1.2_2021', 161 | } 162 | : { 163 | cloudfrontDefaultCertificate: true, 164 | }), 165 | }, 166 | origins: [ 167 | { 168 | originId: this.bucket.arn, 169 | domainName: this.bucket.websiteEndpoint, 170 | connectionAttempts: 3, 171 | connectionTimeout: 10, 172 | customOriginConfig: { 173 | originProtocolPolicy: 'http-only', 174 | httpPort: 80, 175 | httpsPort: 443, 176 | originSslProtocols: ['TLSv1.2'], 177 | }, 178 | }, 179 | ], 180 | defaultCacheBehavior: { 181 | targetOriginId: this.bucket.arn, 182 | viewerProtocolPolicy: 'redirect-to-https', 183 | allowedMethods: ['GET', 'HEAD', 'OPTIONS'], 184 | cachedMethods: ['GET', 'HEAD', 'OPTIONS'], 185 | compress: true, 186 | defaultTtl: 86400, 187 | minTtl: 1, 188 | maxTtl: 31536000, 189 | forwardedValues: { 190 | cookies: { forward: 'none' }, 191 | queryString: false, 192 | }, 193 | functionAssociations, 194 | }, 195 | priceClass: 'PriceClass_100', 196 | restrictions: { 197 | geoRestriction: { restrictionType: 'none' }, 198 | }, 199 | tags: { ...commonTags, ...tags }, 200 | }, 201 | { parent: this }, 202 | ); 203 | return cloudfront; 204 | } 205 | 206 | private createDnsRecord({ 207 | domain, 208 | hostedZoneId, 209 | }: Pick, 'domain' | 'hostedZoneId'>) { 210 | const cdnAliasRecord = new aws.route53.Record( 211 | `${this.name}-cdn-route53-record`, 212 | { 213 | type: 'A', 214 | name: domain, 215 | zoneId: hostedZoneId, 216 | aliases: [ 217 | { 218 | name: this.cloudfront.domainName, 219 | zoneId: this.cloudfront.hostedZoneId, 220 | evaluateTargetHealth: true, 221 | }, 222 | ], 223 | }, 224 | { parent: this }, 225 | ); 226 | return cdnAliasRecord; 227 | } 228 | } 229 | -------------------------------------------------------------------------------- /src/components/web-server.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from '@pulumi/pulumi'; 2 | import * as aws from '@pulumi/aws'; 3 | import { commonTags } from '../constants'; 4 | import { AcmCertificate } from './acm-certificate'; 5 | import { EcsService, EcsServiceArgs } from './ecs-service'; 6 | 7 | export type WebServerArgs = Pick< 8 | EcsServiceArgs, 9 | | 'image' 10 | | 'port' 11 | | 'clusterId' 12 | | 'clusterName' 13 | | 'vpcId' 14 | | 'vpcCidrBlock' 15 | | 'desiredCount' 16 | | 'autoscaling' 17 | | 'size' 18 | | 'environment' 19 | | 'secrets' 20 | | 'persistentStorageConfig' 21 | | 'taskExecutionRoleInlinePolicies' 22 | | 'taskRoleInlinePolicies' 23 | | 'tags' 24 | > & { 25 | publicSubnetIds: pulumi.Input[]>; 26 | /** 27 | * The domain which will be used to access the service. 28 | * The domain or subdomain must belong to the provided hostedZone. 29 | */ 30 | domain?: pulumi.Input; 31 | /** 32 | * The ID of the hosted zone. 33 | */ 34 | hostedZoneId?: pulumi.Input; 35 | /** 36 | * Path for the health check request. Defaults to "/healthcheck". 37 | */ 38 | healthCheckPath?: pulumi.Input; 39 | }; 40 | 41 | const defaults = { 42 | healthCheckPath: '/healthcheck', 43 | }; 44 | 45 | export class WebServer extends pulumi.ComponentResource { 46 | name: string; 47 | service: EcsService; 48 | lbSecurityGroup: aws.ec2.SecurityGroup; 49 | serviceSecurityGroup: aws.ec2.SecurityGroup; 50 | lb: aws.lb.LoadBalancer; 51 | lbTargetGroup: aws.lb.TargetGroup; 52 | lbHttpListener: aws.lb.Listener; 53 | certificate?: AcmCertificate; 54 | lbTlsListener?: aws.lb.Listener; 55 | 56 | constructor( 57 | name: string, 58 | args: WebServerArgs, 59 | opts: pulumi.ComponentResourceOptions = {}, 60 | ) { 61 | const aliases = opts.aliases || []; 62 | super('studion:LegacyWebServer', name, args, { 63 | ...opts, 64 | aliases: [...aliases, { type: 'studion:WebServer' }] 65 | }); 66 | 67 | const { vpcId, domain, hostedZoneId } = args; 68 | 69 | const hasCustomDomain = !!domain && !!hostedZoneId; 70 | if (domain && !hostedZoneId) { 71 | throw new Error( 72 | 'WebServer:hostedZoneId must be provided when the domain is specified', 73 | ); 74 | } 75 | 76 | this.name = name; 77 | if (hasCustomDomain) { 78 | this.certificate = this.createTlsCertificate({ domain, hostedZoneId }); 79 | } 80 | const { 81 | lb, 82 | lbTargetGroup, 83 | lbHttpListener, 84 | lbTlsListener, 85 | lbSecurityGroup, 86 | } = this.createLoadBalancer(args); 87 | this.lb = lb; 88 | this.lbTargetGroup = lbTargetGroup; 89 | this.lbHttpListener = lbHttpListener; 90 | this.lbTlsListener = lbTlsListener; 91 | this.lbSecurityGroup = lbSecurityGroup; 92 | this.serviceSecurityGroup = this.createSecurityGroup(vpcId); 93 | this.service = this.createEcsService(args); 94 | 95 | if (hasCustomDomain) { 96 | this.createDnsRecord({ domain, hostedZoneId }); 97 | } 98 | 99 | this.registerOutputs(); 100 | } 101 | 102 | private createTlsCertificate({ 103 | domain, 104 | hostedZoneId, 105 | }: Pick, 'domain' | 'hostedZoneId'>) { 106 | const certificate = new AcmCertificate( 107 | `${domain}-acm-certificate`, 108 | { 109 | domain, 110 | hostedZoneId, 111 | }, 112 | { parent: this }, 113 | ); 114 | return certificate; 115 | } 116 | 117 | private createLoadBalancer({ 118 | vpcId, 119 | publicSubnetIds, 120 | port, 121 | healthCheckPath, 122 | }: Pick< 123 | WebServerArgs, 124 | 'vpcId' | 'publicSubnetIds' | 'port' | 'healthCheckPath' 125 | >) { 126 | const lbSecurityGroup = new aws.ec2.SecurityGroup( 127 | `${this.name}-lb-security-group`, 128 | { 129 | vpcId, 130 | ingress: [ 131 | { 132 | protocol: 'tcp', 133 | fromPort: 80, 134 | toPort: 80, 135 | cidrBlocks: ['0.0.0.0/0'], 136 | }, 137 | { 138 | protocol: 'tcp', 139 | fromPort: 443, 140 | toPort: 443, 141 | cidrBlocks: ['0.0.0.0/0'], 142 | }, 143 | ], 144 | egress: [ 145 | { 146 | fromPort: 0, 147 | toPort: 0, 148 | protocol: '-1', 149 | cidrBlocks: ['0.0.0.0/0'], 150 | }, 151 | ], 152 | tags: commonTags, 153 | }, 154 | { parent: this }, 155 | ); 156 | 157 | const lb = new aws.lb.LoadBalancer( 158 | `${this.name}-lb`, 159 | { 160 | namePrefix: 'lb-', 161 | loadBalancerType: 'application', 162 | subnets: publicSubnetIds, 163 | securityGroups: [lbSecurityGroup.id], 164 | internal: false, 165 | ipAddressType: 'ipv4', 166 | tags: { ...commonTags, Name: `${this.name}-lb` }, 167 | }, 168 | { parent: this }, 169 | ); 170 | 171 | const lbTargetGroup = new aws.lb.TargetGroup( 172 | `${this.name}-lb-tg`, 173 | { 174 | namePrefix: 'lb-tg-', 175 | port, 176 | protocol: 'HTTP', 177 | targetType: 'ip', 178 | vpcId, 179 | healthCheck: { 180 | healthyThreshold: 3, 181 | unhealthyThreshold: 2, 182 | interval: 60, 183 | timeout: 5, 184 | path: healthCheckPath || defaults.healthCheckPath, 185 | }, 186 | tags: { ...commonTags, Name: `${this.name}-lb-target-group` }, 187 | }, 188 | { parent: this, dependsOn: [this.lb] }, 189 | ); 190 | 191 | const defaultAction = this.certificate 192 | ? { 193 | type: 'redirect', 194 | redirect: { 195 | port: '443', 196 | protocol: 'HTTPS', 197 | statusCode: 'HTTP_301', 198 | }, 199 | } 200 | : { 201 | type: 'forward', 202 | targetGroupArn: lbTargetGroup.arn, 203 | }; 204 | 205 | const lbHttpListener = new aws.lb.Listener( 206 | `${this.name}-lb-listener-80`, 207 | { 208 | loadBalancerArn: lb.arn, 209 | port: 80, 210 | defaultActions: [defaultAction], 211 | tags: commonTags, 212 | }, 213 | { parent: this }, 214 | ); 215 | 216 | let lbTlsListener = undefined; 217 | if (this.certificate) { 218 | lbTlsListener = new aws.lb.Listener( 219 | `${this.name}-lb-listener-443`, 220 | { 221 | loadBalancerArn: lb.arn, 222 | port: 443, 223 | protocol: 'HTTPS', 224 | sslPolicy: 'ELBSecurityPolicy-2016-08', 225 | certificateArn: this.certificate.certificate.arn, 226 | defaultActions: [ 227 | { 228 | type: 'forward', 229 | targetGroupArn: lbTargetGroup.arn, 230 | }, 231 | ], 232 | tags: commonTags, 233 | }, 234 | { parent: this, dependsOn: [this.certificate] }, 235 | ); 236 | } 237 | 238 | return { 239 | lb, 240 | lbTargetGroup, 241 | lbHttpListener, 242 | lbTlsListener, 243 | lbSecurityGroup, 244 | }; 245 | } 246 | 247 | private createSecurityGroup(vpcId: WebServerArgs['vpcId']) { 248 | const securityGroup = new aws.ec2.SecurityGroup( 249 | `${this.name}-security-group`, 250 | { 251 | vpcId, 252 | ingress: [ 253 | { 254 | fromPort: 0, 255 | toPort: 0, 256 | protocol: '-1', 257 | securityGroups: [this.lbSecurityGroup.id], 258 | }, 259 | ], 260 | egress: [ 261 | { 262 | fromPort: 0, 263 | toPort: 0, 264 | protocol: '-1', 265 | cidrBlocks: ['0.0.0.0/0'], 266 | }, 267 | ], 268 | tags: commonTags, 269 | }, 270 | { parent: this }, 271 | ); 272 | return securityGroup; 273 | } 274 | 275 | private createEcsService(args: WebServerArgs) { 276 | const service = new EcsService( 277 | this.name, 278 | { 279 | ...args, 280 | enableServiceAutoDiscovery: false, 281 | lbTargetGroupArn: this.lbTargetGroup.arn, 282 | assignPublicIp: true, 283 | subnetIds: args.publicSubnetIds, 284 | securityGroup: this.serviceSecurityGroup, 285 | }, 286 | { 287 | parent: this, 288 | dependsOn: [this.lb, this.lbTargetGroup], 289 | }, 290 | ); 291 | return service; 292 | } 293 | 294 | private createDnsRecord({ 295 | domain, 296 | hostedZoneId, 297 | }: Pick, 'domain' | 'hostedZoneId'>) { 298 | const albAliasRecord = new aws.route53.Record( 299 | `${this.name}-route53-record`, 300 | { 301 | type: 'A', 302 | name: domain, 303 | zoneId: hostedZoneId, 304 | aliases: [ 305 | { 306 | name: this.lb.dnsName, 307 | zoneId: this.lb.zoneId, 308 | evaluateTargetHealth: true, 309 | }, 310 | ], 311 | }, 312 | { parent: this }, 313 | ); 314 | } 315 | } 316 | -------------------------------------------------------------------------------- /src/constants.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from '@pulumi/pulumi'; 2 | 3 | const CPU_1_VCPU = 1024; 4 | const MEMORY_1GB = 1024; 5 | 6 | export const PredefinedSize = { 7 | small: { 8 | cpu: CPU_1_VCPU / 4, // 0.25 vCPU 9 | memory: MEMORY_1GB / 2, // 0.5 GB memory 10 | }, 11 | medium: { 12 | cpu: CPU_1_VCPU / 2, // 0.5 vCPU 13 | memory: MEMORY_1GB, // 1 GB memory 14 | }, 15 | large: { 16 | cpu: CPU_1_VCPU, // 1 vCPU 17 | memory: MEMORY_1GB * 2, // 2 GB memory 18 | }, 19 | xlarge: { 20 | cpu: CPU_1_VCPU * 2, // 2 vCPU 21 | memory: MEMORY_1GB * 4, // 4 GB memory 22 | }, 23 | } as const; 24 | 25 | export const commonTags = { 26 | Env: pulumi.getStack(), 27 | Project: pulumi.getProject(), 28 | }; 29 | -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | export * from './components/web-server'; 2 | export * from './components/mongo'; 3 | export * from './components/static-site'; 4 | export * from './components/database'; 5 | export * from './components/database-replica'; 6 | export * from './components/redis'; 7 | export * from './components/project'; 8 | export * from './components/ec2-ssm-connect'; 9 | export * from './components/ecs-service'; 10 | export * from './components/nuxt-ssr'; 11 | export * as next from './v2'; 12 | -------------------------------------------------------------------------------- /src/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../tsconfig.options.json", 3 | "files": [], 4 | "include": ["**/*"], 5 | "references": [], 6 | "compilerOptions": { 7 | "rootDir": ".", 8 | "outDir": "../dist" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /src/types/pulumi.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from '@pulumi/pulumi'; 2 | 3 | export type WithInput = { 4 | [K in keyof T]: pulumi.Input; 5 | }; 6 | -------------------------------------------------------------------------------- /src/types/size.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from '@pulumi/pulumi'; 2 | import { PredefinedSize } from '../constants'; 3 | 4 | export type CustomSize = { 5 | cpu: pulumi.Input; 6 | memory: pulumi.Input; 7 | }; 8 | export type Size = keyof typeof PredefinedSize | CustomSize; 9 | -------------------------------------------------------------------------------- /src/v2/components/ecs-service/policies.ts: -------------------------------------------------------------------------------- 1 | import * as aws from '@pulumi/aws'; 2 | 3 | export const assumeRolePolicy: aws.iam.PolicyDocument = { 4 | Version: '2012-10-17', 5 | Statement: [ 6 | { 7 | Action: 'sts:AssumeRole', 8 | Principal: { 9 | Service: 'ecs-tasks.amazonaws.com', 10 | }, 11 | Effect: 'Allow', 12 | Sid: '', 13 | }, 14 | ], 15 | }; 16 | 17 | -------------------------------------------------------------------------------- /src/v2/components/grafana/dashboards/index.ts: -------------------------------------------------------------------------------- 1 | export { default as WebServerSloDashboardBuilder } from "./web-server-slo"; 2 | export * as panel from './panels'; 3 | -------------------------------------------------------------------------------- /src/v2/components/grafana/dashboards/panels.ts: -------------------------------------------------------------------------------- 1 | import { Grafana } from './types'; 2 | 3 | const percentageFieldConfig = { 4 | unit: 'percent', 5 | min: 0, 6 | max: 100 7 | } 8 | 9 | export function createStatPercentagePanel( 10 | title: string, 11 | position: Grafana.Panel.Position, 12 | dataSource: string, 13 | metric: Grafana.Metric 14 | ): Grafana.Panel { 15 | return { 16 | title, 17 | gridPos: position, 18 | type: 'stat', 19 | datasource: dataSource, 20 | targets: [{ 21 | expr: metric.query, 22 | legendFormat: metric.label 23 | }], 24 | fieldConfig: { 25 | defaults: { 26 | ...percentageFieldConfig, 27 | ...(metric.thresholds ? { 28 | thresholds: { 29 | mode: 'absolute', 30 | steps: metric.thresholds 31 | } 32 | } : {}) 33 | } 34 | } 35 | }; 36 | } 37 | 38 | export function createTimeSeriesPercentagePanel( 39 | title: string, 40 | position: Grafana.Panel.Position, 41 | dataSource: string, 42 | metric: Grafana.Metric 43 | ): Grafana.Panel { 44 | return createTimeSeriesPanel( 45 | title, 46 | position, 47 | dataSource, 48 | metric, 49 | percentageFieldConfig.unit, 50 | percentageFieldConfig.min, 51 | percentageFieldConfig.max 52 | ); 53 | } 54 | 55 | export function createTimeSeriesPanel( 56 | title: string, 57 | position: Grafana.Panel.Position, 58 | dataSource: string, 59 | metric: Grafana.Metric, 60 | unit?: string, 61 | min?: number, 62 | max?: number 63 | ): Grafana.Panel { 64 | return { 65 | title, 66 | type: 'timeseries', 67 | datasource: dataSource, 68 | gridPos: position, 69 | targets: [{ 70 | expr: metric.query, 71 | legendFormat: metric.label 72 | }], 73 | fieldConfig: { 74 | defaults: { 75 | unit, 76 | min, 77 | max, 78 | ...(metric.thresholds ? { 79 | thresholds: { 80 | mode: 'absolute', 81 | steps: metric.thresholds 82 | } 83 | } : {}), 84 | } 85 | } 86 | }; 87 | } 88 | 89 | export function createBurnRatePanel( 90 | title: string, 91 | position: Grafana.Panel.Position, 92 | dataSource: string, 93 | metric: Grafana.Metric 94 | ): Grafana.Panel { 95 | return { 96 | type: 'stat', 97 | title, 98 | gridPos: position, 99 | datasource: dataSource, 100 | targets: [{ 101 | expr: metric.query, 102 | legendFormat: metric.label 103 | }], 104 | options: { 105 | reduceOptions: { 106 | calcs: ['last'], 107 | fields: '', 108 | values: false 109 | }, 110 | colorMode: 'value', 111 | graphMode: 'none', 112 | textMode: 'value' 113 | }, 114 | fieldConfig: { 115 | defaults: { 116 | unit: 'none', 117 | thresholds: { 118 | mode: 'absolute', 119 | steps: [ 120 | { color: 'green', value: null }, 121 | { color: 'orange', value: 1 }, 122 | { color: 'red', value: 2 } 123 | ] 124 | } 125 | } 126 | }, 127 | } 128 | } 129 | -------------------------------------------------------------------------------- /src/v2/components/grafana/dashboards/types.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from '@pulumi/pulumi'; 2 | import * as grafana from '@pulumiverse/grafana'; 3 | 4 | // TODO: Should we prefix all namespaces with `Studion` 5 | export namespace Grafana { 6 | // TODO: Create SLO abstraction that enables configuring: 7 | // - panels (long-window SLI, long-window error budget) 8 | // - alerts (long-window burn, short-window burn) 9 | export type Threshold = { 10 | value: number | null; 11 | color: string; 12 | }; 13 | export type Metric = { 14 | label: string; 15 | query: string; 16 | thresholds: Threshold[]; 17 | }; 18 | 19 | export type Args = { 20 | title: pulumi.Input; 21 | provider: pulumi.Input; 22 | tags: pulumi.Input[]>; 23 | }; 24 | 25 | export type Panel = { 26 | title: string; 27 | gridPos: Panel.Position; 28 | type: string; 29 | datasource: string; 30 | targets: { 31 | expr: string; 32 | legendFormat: string; 33 | }[]; 34 | fieldConfig: { 35 | defaults: { 36 | unit?: string; 37 | min?: number; 38 | max?: number; 39 | color?: { 40 | mode: string; 41 | }; 42 | thresholds?: { 43 | mode: string; 44 | steps: Threshold[]; 45 | }; 46 | custom?: { 47 | lineInterpolation?: string, 48 | spanNulls: boolean 49 | } 50 | }; 51 | }; 52 | options?: { 53 | colorMode?: string; 54 | graphMode?: string; 55 | justifyMode?: string; 56 | textMode?: string; 57 | reduceOptions?: { 58 | calcs?: string[]; 59 | fields?: string; 60 | values?: boolean; 61 | }; 62 | }; 63 | } 64 | 65 | export namespace Panel { 66 | export type Position = { 67 | x: number; 68 | y: number; 69 | w: number; 70 | h: number; 71 | } 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /src/v2/components/grafana/dashboards/web-server-slo.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from '@pulumi/pulumi'; 2 | import * as grafana from '@pulumiverse/grafana'; 3 | import { queries as promQ } from '../../prometheus' 4 | import { Grafana } from './types'; 5 | import { 6 | createBurnRatePanel, 7 | createStatPercentagePanel, 8 | createTimeSeriesPanel, 9 | createTimeSeriesPercentagePanel 10 | } from './panels'; 11 | 12 | class WebServerSloDashboardBuilder { 13 | name: string; 14 | title: pulumi.Output; 15 | panels: Grafana.Panel[] = []; 16 | tags?: pulumi.Output; 17 | 18 | constructor( 19 | name: string, 20 | args: Grafana.Args, 21 | ) { 22 | this.name = name; 23 | this.title = pulumi.output(args.title); 24 | } 25 | 26 | withAvailability( 27 | target: number, 28 | window: promQ.TimeRange, 29 | dataSource: string, 30 | prometheusNamespace: string 31 | ): this { 32 | const availabilityPercentage = promQ.getAvailabilityPercentageQuery(prometheusNamespace, window); 33 | const availabilityBurnRate = promQ.getBurnRateQuery(promQ.getAvailabilityQuery(prometheusNamespace, '1h'), target); 34 | 35 | const availabilitySloPanel = createStatPercentagePanel( 36 | 'Availability', 37 | { x: 0, y: 0, w: 8, h: 8 }, 38 | dataSource, { 39 | label: 'Availability', 40 | query: availabilityPercentage, 41 | thresholds: [] 42 | }); 43 | const availabilityBurnRatePanel = createBurnRatePanel( 44 | 'Availability Burn Rate', 45 | { x: 0, y: 8, w: 8, h: 4 }, 46 | dataSource, { 47 | label: 'Burn Rate', 48 | query: availabilityBurnRate, 49 | thresholds: [] 50 | }); 51 | 52 | this.panels.push(availabilitySloPanel, availabilityBurnRatePanel); 53 | 54 | return this; 55 | } 56 | 57 | withSuccessRate( 58 | target: number, 59 | window: promQ.TimeRange, 60 | shortWindow: promQ.TimeRange, 61 | filter: string, 62 | dataSource: string, 63 | prometheusNamespace: string 64 | ): this { 65 | const successRateSlo = promQ.getSuccessPercentageQuery(prometheusNamespace, window, filter); 66 | const successRateBurnRate = promQ.getBurnRateQuery(promQ.getSuccessRateQuery(prometheusNamespace, '1h', filter), target); 67 | const successRate = promQ.getSuccessPercentageQuery(prometheusNamespace, shortWindow, filter); 68 | 69 | const successRateSloPanel = createStatPercentagePanel( 70 | 'Success Rate', 71 | { x: 8, y: 0, w: 8, h: 8 }, 72 | dataSource, 73 | { 74 | label: 'Success Rate', 75 | query: successRateSlo, 76 | thresholds: [] 77 | } 78 | ); 79 | const successRatePanel = createTimeSeriesPercentagePanel( 80 | 'HTTP Request Success Rate', 81 | { x: 0, y: 16, w: 12, h: 8 }, 82 | dataSource, 83 | { 84 | label: 'Success Rate', 85 | query: successRate, 86 | thresholds: [] 87 | } 88 | ); 89 | const successRateBurnRatePanel = createBurnRatePanel( 90 | 'Success Rate Burn Rate', 91 | { x: 8, y: 8, w: 8, h: 4 }, 92 | dataSource, 93 | { 94 | label: 'Burn Rate', 95 | query: successRateBurnRate, 96 | thresholds: [] 97 | } 98 | ); 99 | 100 | this.panels.push(successRateSloPanel, successRatePanel, successRateBurnRatePanel); 101 | 102 | return this; 103 | } 104 | 105 | withLatency( 106 | target: number, 107 | targetLatency: number, 108 | window: promQ.TimeRange, 109 | shortWindow: promQ.TimeRange, 110 | filter: string, 111 | dataSource: string, 112 | prometheusNamespace: string 113 | ): this { 114 | const latencySlo = promQ.getLatencyPercentageQuery(prometheusNamespace, window, targetLatency, filter); 115 | const latencyBurnRate = promQ.getBurnRateQuery(promQ.getLatencyRateQuery(prometheusNamespace, '1h', targetLatency), target); 116 | const percentileLatency = promQ.getPercentileLatencyQuery(prometheusNamespace, shortWindow, target, filter); 117 | const latencyBelowThreshold = promQ.getLatencyPercentageQuery(prometheusNamespace, shortWindow, targetLatency, filter); 118 | 119 | const latencySloPanel = createStatPercentagePanel( 120 | 'Request % below 250ms', 121 | { x: 16, y: 0, w: 8, h: 8 }, 122 | dataSource, 123 | { 124 | label: 'Request % below 250ms', 125 | query: latencySlo, 126 | thresholds: [] 127 | } 128 | ); 129 | const percentileLatencyPanel = createTimeSeriesPanel( 130 | '99th Percentile Latency', 131 | { x: 12, y: 16, w: 12, h: 8 }, 132 | dataSource, 133 | { 134 | label: '99th Percentile Latency', 135 | query: percentileLatency, 136 | thresholds: [] 137 | }, 138 | 'ms' 139 | ); 140 | const latencyPercentagePanel = createTimeSeriesPercentagePanel( 141 | 'Request percentage below 250ms', 142 | { x: 0, y: 24, w: 12, h: 8 }, 143 | dataSource, 144 | { 145 | label: 'Request percentage below 250ms', 146 | query: latencyBelowThreshold, 147 | thresholds: [] 148 | } 149 | ); 150 | const latencyBurnRatePanel = createBurnRatePanel( 151 | 'Latency Burn Rate', 152 | { x: 16, y: 8, w: 8, h: 4 }, 153 | dataSource, 154 | { 155 | label: 'Burn Rate', 156 | query: latencyBurnRate, 157 | thresholds: [] 158 | } 159 | ); 160 | 161 | this.panels.push( 162 | latencySloPanel, 163 | percentileLatencyPanel, 164 | latencyPercentagePanel, 165 | latencyBurnRatePanel 166 | ); 167 | 168 | return this; 169 | } 170 | 171 | build(provider: pulumi.Output): pulumi.Output { 172 | return pulumi.all([ 173 | this.title, 174 | this.panels, 175 | provider, 176 | this.tags 177 | ]).apply(([ 178 | title, 179 | panels, 180 | provider, 181 | tags 182 | ]) => { 183 | return new grafana.oss.Dashboard(this.name, { 184 | configJson: JSON.stringify({ 185 | title, 186 | tags, 187 | timezone: 'browser', 188 | refresh: '10s', 189 | panels, 190 | }) 191 | }, { provider }); 192 | }); 193 | } 194 | } 195 | 196 | export default WebServerSloDashboardBuilder; 197 | -------------------------------------------------------------------------------- /src/v2/components/grafana/index.ts: -------------------------------------------------------------------------------- 1 | export * as dashboard from './dashboards'; 2 | -------------------------------------------------------------------------------- /src/v2/components/prometheus/index.ts: -------------------------------------------------------------------------------- 1 | export * as queries from './queries'; 2 | -------------------------------------------------------------------------------- /src/v2/components/prometheus/queries.test.ts: -------------------------------------------------------------------------------- 1 | import { describe, it } from 'node:test'; 2 | import * as assert from 'node:assert/strict'; 3 | import { 4 | getAvailabilityQuery, 5 | getSuccessRateQuery, 6 | getPercentileLatencyQuery, 7 | getLatencyPercentageQuery, 8 | TimeRange 9 | } from './queries'; 10 | 11 | describe('Prometheus Query Builders', async () => { 12 | const namespace = 'app'; 13 | const timeRange: TimeRange = '2m'; 14 | const apiRouteFilter = 'http_route=~"/api/.*"'; 15 | 16 | describe('getAvailabilityQuery', async () => { 17 | it('should build correct query', () => { 18 | const result = getAvailabilityQuery(namespace, timeRange); 19 | const expected = 20 | `(sum(rate(${namespace}_http_server_duration_milliseconds_count{http_status_code!~"5.."}[${timeRange}]))) / ` + 21 | `(sum(rate(${namespace}_http_server_duration_milliseconds_count[${timeRange}]))) * 100`; 22 | assert.equal(result, expected); 23 | }); 24 | }); 25 | 26 | describe('getSuccessRateQuery', async () => { 27 | it('should build correct query', () => { 28 | const result = getSuccessRateQuery(namespace, timeRange, apiRouteFilter); 29 | const expected = 30 | `(sum(rate(${namespace}_http_server_duration_milliseconds_count{http_status_code=~"[2-4]..",${apiRouteFilter}}[2m]))) / ` + 31 | `(sum(rate(${namespace}_http_server_duration_milliseconds_count{${apiRouteFilter}}[2m]))) * 100`; 32 | assert.equal(result, expected); 33 | }); 34 | }); 35 | 36 | describe('getPercentileLatencyQuery', async () => { 37 | it('should build correct query', () => { 38 | const percentile = 0.95; 39 | const result = getPercentileLatencyQuery(namespace, timeRange, percentile, apiRouteFilter); 40 | const expected = 41 | `histogram_quantile(${percentile}, sum by(le) (rate(${namespace}_http_server_duration_milliseconds_bucket{${apiRouteFilter}}[${timeRange}])))`; 42 | assert.equal(result, expected); 43 | }); 44 | }); 45 | 46 | describe('getLatencyPercentageQuery', async () => { 47 | it('should build correct query', () => { 48 | const threshold = 200; 49 | const result = getLatencyPercentageQuery(namespace, timeRange, threshold, apiRouteFilter); 50 | const expected = 51 | `(sum(rate(${namespace}_http_server_duration_milliseconds_bucket{le="200",${apiRouteFilter}}[2m]))) / ` + 52 | `(sum(rate(${namespace}_http_server_duration_milliseconds_count{${apiRouteFilter}}[2m]))) * 100`; 53 | assert.equal(result, expected); 54 | }); 55 | }); 56 | }); 57 | -------------------------------------------------------------------------------- /src/v2/components/prometheus/queries.ts: -------------------------------------------------------------------------------- 1 | export type TimeRange = '30s' | '2m' | '5m' | '1h' | '1d'; 2 | 3 | const metricName = 'http_server_duration_milliseconds'; 4 | const countPostfix = 'count'; 5 | const bucketPostfix = 'bucket'; 6 | const httpStatusCodeLabel = 'http_status_code'; 7 | 8 | export function getBurnRateQuery( 9 | metricQuery: string, 10 | target: number 11 | ): string { 12 | return `(1 - ${metricQuery}) / ${(1 - target).toFixed(5)}` 13 | } 14 | 15 | export function getAvailabilityQuery( 16 | namespace: string, 17 | timeRange: TimeRange 18 | ): string { 19 | const successFilter = `${httpStatusCodeLabel}!~"5.."`; 20 | const successfulRequestsQuery = getCountRate( 21 | namespace, 22 | timeRange, 23 | successFilter 24 | ); 25 | const totalRequestsQuery = getCountRate( 26 | namespace, 27 | timeRange 28 | ); 29 | 30 | return `${successfulRequestsQuery} / ${totalRequestsQuery}`; 31 | } 32 | 33 | export function getAvailabilityPercentageQuery( 34 | namespace: string, 35 | timeRange: TimeRange 36 | ): string { 37 | return `${getAvailabilityQuery(namespace, timeRange)} * 100`; 38 | } 39 | 40 | export function getSuccessRateQuery( 41 | namespace: string, 42 | timeRange: TimeRange, 43 | filter: string 44 | ): string { 45 | const successFilter = [`${httpStatusCodeLabel}=~"[2-4].."`, filter].join(','); 46 | const totalFilter = filter; 47 | 48 | const successfulRequestsQuery = getCountRate( 49 | namespace, 50 | timeRange, 51 | successFilter 52 | ); 53 | const totalRequestsQuery = getCountRate( 54 | namespace, 55 | timeRange, 56 | totalFilter 57 | ); 58 | 59 | return `${successfulRequestsQuery} / ${totalRequestsQuery}`; 60 | } 61 | 62 | export function getSuccessPercentageQuery( 63 | namespace: string, 64 | timeRange: TimeRange, 65 | filter: string 66 | ): string { 67 | return `${getSuccessRateQuery(namespace, timeRange, filter)} * 100`; 68 | } 69 | 70 | export function getPercentileLatencyQuery( 71 | namespace: string, 72 | timeRange: TimeRange, 73 | percentile: number, 74 | filter: string 75 | ): string { 76 | const bucketMetric = getMetric(namespace, bucketPostfix, filter); 77 | const bucketRate = `sum by(le) (rate(${bucketMetric}[${timeRange}]))`; 78 | 79 | return `histogram_quantile(${percentile}, ${bucketRate})`; 80 | } 81 | 82 | export function getLatencyRateQuery( 83 | namespace: string, 84 | timeRange: TimeRange, 85 | threshold: number, 86 | filter?: string 87 | ): string { 88 | const filterWithThreshold = [`le="${threshold}"`, filter].join(','); 89 | 90 | const requestsUnderThreshold = getBucketRate( 91 | namespace, 92 | timeRange, 93 | filterWithThreshold 94 | ); 95 | const totalRequests = getCountRate(namespace, timeRange, filter); 96 | 97 | return `${requestsUnderThreshold} / ${totalRequests}`; 98 | } 99 | 100 | export function getLatencyPercentageQuery( 101 | namespace: string, 102 | timeRange: TimeRange, 103 | threshold: number, 104 | filter?: string 105 | ): string { 106 | return `${getLatencyRateQuery(namespace, timeRange, threshold, filter)} * 100`; 107 | } 108 | 109 | function getCountRate( 110 | namespace: string, 111 | timeRange: TimeRange, 112 | filter?: string 113 | ): string { 114 | const countMetric = getMetric(namespace, countPostfix, filter); 115 | 116 | return `sum(rate(${countMetric}[${timeRange}]))`; 117 | } 118 | 119 | function getBucketRate( 120 | namespace: string, 121 | timeRange: TimeRange, 122 | filter?: string 123 | ): string { 124 | const bucketMetric = getMetric(namespace, bucketPostfix, filter); 125 | 126 | return `sum(rate(${bucketMetric}[${timeRange}]))`; 127 | } 128 | 129 | function getMetric( 130 | namespace: string, 131 | postfix: string, 132 | filter?: string 133 | ): string { 134 | return `${namespace}_${metricName}_${postfix}${filter ? `{${filter}}` : ''}`; 135 | } 136 | -------------------------------------------------------------------------------- /src/v2/components/web-server/builder.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from '@pulumi/pulumi'; 2 | import * as awsx from '@pulumi/awsx'; 3 | import { EcsService } from '../ecs-service'; 4 | import { WebServer } from '.'; 5 | import { OtelCollector } from '../../otel'; 6 | 7 | export namespace WebServerBuilder { 8 | export type EcsConfig = Omit; 9 | 10 | export type Args = Omit< 11 | WebServer.Args, 12 | | 'vpc' 13 | | 'publicSubnetIds' 14 | | 'cluster' 15 | | 'volumes' 16 | | 'domain' 17 | | 'hostedZoneId' 18 | | 'otelCollectorConfig' 19 | > 20 | } 21 | 22 | export class WebServerBuilder { 23 | private _name: string; 24 | private _container?: WebServer.Container; 25 | private _vpc?: pulumi.Output; 26 | private _ecsConfig?: WebServerBuilder.EcsConfig; 27 | private _domain?: pulumi.Input; 28 | private _hostedZoneId?: pulumi.Input; 29 | private _healthCheckPath?: pulumi.Input; 30 | private _otelCollector?: pulumi.Input; 31 | private _initContainers: pulumi.Input[] = []; 32 | private _sidecarContainers: pulumi.Input[] = []; 33 | private _volumes: EcsService.PersistentStorageVolume[] = []; 34 | 35 | constructor(name: string) { 36 | this._name = name; 37 | } 38 | 39 | public configureWebServer( 40 | image: WebServer.Container['image'], 41 | port: WebServer.Container['port'], 42 | config: Omit = {} 43 | ): this { 44 | this._container = { 45 | image, 46 | port, 47 | ...config 48 | }; 49 | 50 | return this; 51 | } 52 | 53 | public configureEcs(config: WebServerBuilder.EcsConfig): this { 54 | this._ecsConfig = { 55 | cluster: config.cluster, 56 | desiredCount: config.desiredCount, 57 | autoscaling: config.autoscaling, 58 | size: config.size, 59 | taskExecutionRoleInlinePolicies: config.taskExecutionRoleInlinePolicies, 60 | taskRoleInlinePolicies: config.taskRoleInlinePolicies, 61 | tags: config.tags, 62 | } 63 | 64 | return this; 65 | } 66 | 67 | public withVpc(vpc: pulumi.Input): this { 68 | this._vpc = pulumi.output(vpc); 69 | 70 | return this; 71 | } 72 | 73 | public withVolume(volume: EcsService.PersistentStorageVolume): this { 74 | this._volumes.push(volume); 75 | 76 | return this; 77 | } 78 | 79 | public withCustomDomain( 80 | domain: pulumi.Input, 81 | hostedZoneId: pulumi.Input 82 | ): this { 83 | this._domain = domain; 84 | this._hostedZoneId = hostedZoneId; 85 | 86 | return this; 87 | } 88 | 89 | public withInitContainer(container: WebServer.InitContainer): this { 90 | this._initContainers.push(container); 91 | 92 | return this; 93 | } 94 | 95 | public withSidecarContainer(container: WebServer.SidecarContainer): this { 96 | this._sidecarContainers.push(container); 97 | 98 | return this; 99 | } 100 | 101 | public withOtelCollector(collector: OtelCollector): this { 102 | this._otelCollector = collector; 103 | 104 | return this; 105 | } 106 | 107 | public withCustomHealthCheckPath( 108 | path: WebServer.Args['healthCheckPath'] 109 | ): this { 110 | this._healthCheckPath = path; 111 | 112 | return this; 113 | } 114 | 115 | public build(opts: pulumi.ComponentResourceOptions = {}): WebServer { 116 | if (!this._container) { 117 | throw new Error('Web server not configured. Make sure to call WebServerBuilder.configureWebServer().'); 118 | } 119 | if (!this._ecsConfig) { 120 | throw new Error('ECS not configured. Make sure to call WebServerBuilder.configureEcs().'); 121 | } 122 | if (!this._vpc) { 123 | throw new Error('VPC not provided. Make sure to call WebServerBuilder.withVpc().'); 124 | } 125 | 126 | return new WebServer(this._name, { 127 | ...this._ecsConfig, 128 | ...this._container, 129 | vpc: this._vpc, 130 | volumes: this._volumes, 131 | publicSubnetIds: this._vpc.publicSubnetIds, 132 | domain: this._domain, 133 | hostedZoneId: this._hostedZoneId, 134 | healthCheckPath: this._healthCheckPath, 135 | otelCollector: this._otelCollector, 136 | initContainers: this._initContainers, 137 | sidecarContainers: this._sidecarContainers 138 | }, opts) 139 | } 140 | } 141 | -------------------------------------------------------------------------------- /src/v2/components/web-server/index.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from '@pulumi/pulumi'; 2 | import * as aws from '@pulumi/aws'; 3 | import * as awsx from '@pulumi/awsx'; 4 | import { commonTags } from '../../../constants'; 5 | import { AcmCertificate } from '../../../components/acm-certificate'; 6 | import { EcsService } from '../ecs-service'; 7 | import { WebServerLoadBalancer } from './load-balancer'; 8 | import { OtelCollector } from '../../otel'; 9 | 10 | export namespace WebServer { 11 | export type Container = Pick< 12 | EcsService.Container, 13 | 'image' | 'environment' | 'secrets' | 'mountPoints' 14 | > & { 15 | port: pulumi.Input; 16 | }; 17 | 18 | export type EcsConfig = Pick< 19 | EcsService.Args, 20 | | 'cluster' 21 | | 'vpc' 22 | | 'volumes' 23 | | 'desiredCount' 24 | | 'autoscaling' 25 | | 'size' 26 | | 'taskExecutionRoleInlinePolicies' 27 | | 'taskRoleInlinePolicies' 28 | | 'tags' 29 | >; 30 | 31 | export type InitContainer = Omit; 32 | export type SidecarContainer = 33 | & Omit 34 | & Required>; 35 | 36 | export type Args = EcsConfig 37 | & Container 38 | & { 39 | // TODO: Automatically use subnet IDs from passed `vpc` 40 | publicSubnetIds: pulumi.Input[]>; 41 | /** 42 | * The domain which will be used to access the service. 43 | * The domain or subdomain must belong to the provided hostedZone. 44 | */ 45 | domain?: pulumi.Input; 46 | hostedZoneId?: pulumi.Input; 47 | /** 48 | * Path for the load balancer target group health check request. 49 | * 50 | * @default 51 | * "/healthcheck" 52 | */ 53 | healthCheckPath?: pulumi.Input; 54 | initContainers?: pulumi.Input[]>; 55 | sidecarContainers?: pulumi.Input[]>; 56 | otelCollector?: pulumi.Input; 57 | }; 58 | } 59 | 60 | export class WebServer extends pulumi.ComponentResource { 61 | name: string; 62 | container: WebServer.Container; 63 | ecsConfig: WebServer.EcsConfig; 64 | service: pulumi.Output; 65 | serviceSecurityGroup: aws.ec2.SecurityGroup; 66 | lb: WebServerLoadBalancer; 67 | initContainers?: pulumi.Output; 68 | sidecarContainers?: pulumi.Output; 69 | volumes?: pulumi.Output; 70 | certificate?: AcmCertificate; 71 | dnsRecord?: aws.route53.Record; 72 | 73 | constructor( 74 | name: string, 75 | args: WebServer.Args, 76 | opts: pulumi.ComponentResourceOptions = {}, 77 | ) { 78 | super('studion:WebServer', name, args, opts); 79 | 80 | const { vpc, domain, hostedZoneId } = args; 81 | 82 | if (domain && !hostedZoneId) { 83 | throw new Error( 84 | 'WebServer:hostedZoneId must be provided when the domain is specified', 85 | ); 86 | } 87 | const hasCustomDomain = !!domain && !!hostedZoneId; 88 | if (hasCustomDomain) { 89 | this.certificate = this.createTlsCertificate({ domain, hostedZoneId }); 90 | } 91 | 92 | this.name = name; 93 | this.lb = new WebServerLoadBalancer(`${this.name}-lb`, { 94 | vpc, 95 | port: args.port, 96 | certificate: this.certificate?.certificate, 97 | healthCheckPath: args.healthCheckPath 98 | }); 99 | this.serviceSecurityGroup = this.createSecurityGroup(vpc); 100 | 101 | this.initContainers = this.getInitContainers(args); 102 | this.sidecarContainers = this.getSidecarContainers(args); 103 | this.container = this.createWebServerContainer(args); 104 | this.ecsConfig = this.createEcsConfig(args); 105 | this.volumes = this.getVolumes(args); 106 | 107 | // TODO: Move output mapping to createEcsService 108 | this.service = pulumi.all([ 109 | this.initContainers, 110 | this.sidecarContainers, 111 | ]).apply(([ 112 | initContainers, 113 | sidecarContainers, 114 | ]) => { 115 | return this.createEcsService( 116 | this.container, 117 | this.lb, 118 | this.ecsConfig, 119 | this.volumes, 120 | [...initContainers, ...sidecarContainers] 121 | ) 122 | }); 123 | 124 | if (hasCustomDomain) { 125 | this.dnsRecord = this.createDnsRecord({ domain, hostedZoneId }); 126 | } 127 | 128 | this.registerOutputs(); 129 | } 130 | 131 | private getVolumes(args: WebServer.Args): pulumi.Output { 132 | return pulumi.all([ 133 | pulumi.output(args.volumes), 134 | args.otelCollector 135 | ]).apply(([passedVolumes, otelCollector]) => { 136 | const volumes = []; 137 | if (passedVolumes) volumes.push(...passedVolumes); 138 | if (otelCollector) volumes.push({ name: otelCollector.configVolume }); 139 | 140 | return volumes; 141 | }); 142 | } 143 | 144 | private getInitContainers(args: WebServer.Args): pulumi.Output { 145 | return pulumi.all([ 146 | pulumi.output(args.initContainers), 147 | args.otelCollector 148 | ]).apply(([passedInits, otelCollector]) => { 149 | const containers = []; 150 | if (passedInits) containers.push(...passedInits); 151 | if (otelCollector) containers.push(otelCollector.configContainer); 152 | 153 | return containers.map(container => ({ ...container, essential: false })); 154 | }); 155 | } 156 | 157 | private getSidecarContainers(args: WebServer.Args): pulumi.Output { 158 | return pulumi.all([ 159 | pulumi.output(args.sidecarContainers), 160 | args.otelCollector 161 | ]).apply(([passedSidecars, otelCollector]) => { 162 | const containers = []; 163 | if (passedSidecars) containers.push(...passedSidecars); 164 | if (otelCollector) containers.push(otelCollector.container); 165 | 166 | return containers.map(container => ({ ...container, essential: true })); 167 | }); 168 | } 169 | 170 | private getTaskRoleInlinePolicies( 171 | args: WebServer.Args 172 | ): pulumi.Output { 173 | return pulumi.all([ 174 | pulumi.output(args.taskExecutionRoleInlinePolicies), 175 | args.otelCollector 176 | ]).apply(([passedTaskRoleInlinePolicies, otelCollector]) => { 177 | const inlinePolicies = []; 178 | if (passedTaskRoleInlinePolicies) inlinePolicies.push(...passedTaskRoleInlinePolicies); 179 | if (otelCollector && otelCollector.taskRoleInlinePolicies) { 180 | inlinePolicies.push(...otelCollector.taskRoleInlinePolicies); 181 | } 182 | 183 | return inlinePolicies; 184 | }); 185 | } 186 | 187 | 188 | private createEcsConfig(args: WebServer.Args): WebServer.EcsConfig { 189 | return { 190 | vpc: args.vpc, 191 | cluster: args.cluster, 192 | desiredCount: args.desiredCount, 193 | autoscaling: args.autoscaling, 194 | size: args.size, 195 | taskExecutionRoleInlinePolicies: args.taskExecutionRoleInlinePolicies, 196 | taskRoleInlinePolicies: this.getTaskRoleInlinePolicies(args), 197 | tags: args.tags, 198 | }; 199 | } 200 | 201 | private createWebServerContainer(args: WebServer.Args): WebServer.Container { 202 | return { 203 | image: args.image, 204 | mountPoints: args.mountPoints, 205 | environment: args.environment, 206 | secrets: args.secrets, 207 | port: args.port 208 | }; 209 | } 210 | 211 | private createTlsCertificate({ 212 | domain, 213 | hostedZoneId, 214 | }: Pick, 'domain' | 'hostedZoneId'>): AcmCertificate { 215 | return new AcmCertificate(`${domain}-acm-certificate`, { 216 | domain, 217 | hostedZoneId, 218 | }, { parent: this }); 219 | } 220 | 221 | private createSecurityGroup( 222 | vpc: pulumi.Input 223 | ): aws.ec2.SecurityGroup { 224 | const vpcId = pulumi.output(vpc).vpcId; 225 | return new aws.ec2.SecurityGroup( 226 | `${this.name}-security-group`, { 227 | vpcId, 228 | ingress: [{ 229 | fromPort: 0, 230 | toPort: 0, 231 | protocol: '-1', 232 | securityGroups: [this.lb.securityGroup.id], 233 | }], 234 | egress: [{ 235 | fromPort: 0, 236 | toPort: 0, 237 | protocol: '-1', 238 | cidrBlocks: ['0.0.0.0/0'], 239 | }], 240 | tags: commonTags, 241 | }, { parent: this }); 242 | } 243 | 244 | private createEcsService( 245 | webServerContainer: WebServer.Container, 246 | lb: WebServerLoadBalancer, 247 | ecsConfig: WebServer.EcsConfig, 248 | volumes?: pulumi.Output, 249 | containers?: EcsService.Container[] 250 | ): EcsService { 251 | return new EcsService(`${this.name}-ecs`, { 252 | ...ecsConfig, 253 | volumes, 254 | containers: [{ 255 | ...webServerContainer, 256 | name: this.name, 257 | portMappings: [EcsService.createTcpPortMapping(webServerContainer.port)], 258 | essential: true 259 | }, ...(containers || [])], 260 | enableServiceAutoDiscovery: false, 261 | loadBalancers: [{ 262 | containerName: this.name, 263 | containerPort: webServerContainer.port, 264 | targetGroupArn: lb.targetGroup.arn, 265 | }], 266 | assignPublicIp: true, 267 | securityGroup: this.serviceSecurityGroup, 268 | }, { 269 | parent: this, 270 | dependsOn: [lb, lb.targetGroup], 271 | }); 272 | } 273 | 274 | private createDnsRecord({ 275 | domain, 276 | hostedZoneId, 277 | }: Pick< 278 | Required, 279 | 'domain' | 'hostedZoneId' 280 | >): aws.route53.Record { 281 | return new aws.route53.Record(`${this.name}-route53-record`, { 282 | type: 'A', 283 | name: domain, 284 | zoneId: hostedZoneId, 285 | aliases: [{ 286 | name: this.lb.lb.dnsName, 287 | zoneId: this.lb.lb.zoneId, 288 | evaluateTargetHealth: true, 289 | }], 290 | }, { parent: this }); 291 | } 292 | } 293 | -------------------------------------------------------------------------------- /src/v2/components/web-server/load-balancer.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from '@pulumi/pulumi'; 2 | import * as aws from '@pulumi/aws'; 3 | import * as awsx from '@pulumi/awsx'; 4 | import { commonTags } from '../../../constants'; 5 | 6 | export namespace WebServerLoadBalancer { 7 | export type Args = { 8 | vpc: pulumi.Input; 9 | port: pulumi.Input; 10 | certificate?: aws.acm.Certificate; 11 | healthCheckPath?: pulumi.Input; 12 | }; 13 | } 14 | 15 | const webServerLoadBalancerNetworkConfig = { 16 | ingress: [{ 17 | protocol: 'tcp', 18 | fromPort: 80, 19 | toPort: 80, 20 | cidrBlocks: ['0.0.0.0/0'], 21 | }, { 22 | protocol: 'tcp', 23 | fromPort: 443, 24 | toPort: 443, 25 | cidrBlocks: ['0.0.0.0/0'], 26 | }], 27 | egress: [{ 28 | fromPort: 0, 29 | toPort: 0, 30 | protocol: '-1', 31 | cidrBlocks: ['0.0.0.0/0'], 32 | }] 33 | }; 34 | 35 | const defaults = { 36 | healthCheckPath: '/healthcheck', 37 | }; 38 | 39 | export class WebServerLoadBalancer extends pulumi.ComponentResource { 40 | name: string; 41 | lb: aws.lb.LoadBalancer; 42 | targetGroup: aws.lb.TargetGroup; 43 | httpListener: aws.lb.Listener; 44 | tlsListener: aws.lb.Listener | undefined; 45 | securityGroup: aws.ec2.SecurityGroup; 46 | 47 | constructor( 48 | name: string, 49 | args: WebServerLoadBalancer.Args, 50 | opts: pulumi.ComponentResourceOptions = {} 51 | ) { 52 | super('studion:WebServerLoadBalancer', name, args, opts); 53 | 54 | this.name = name; 55 | const vpc = pulumi.output(args.vpc); 56 | const { port, certificate, healthCheckPath } = args; 57 | 58 | this.securityGroup = this.createLbSecurityGroup(vpc.vpcId); 59 | 60 | this.lb = new aws.lb.LoadBalancer(this.name, { 61 | namePrefix: 'lb-', 62 | loadBalancerType: 'application', 63 | subnets: vpc.publicSubnetIds, 64 | securityGroups: [this.securityGroup.id], 65 | internal: false, 66 | ipAddressType: 'ipv4', 67 | tags: { ...commonTags, Name: name }, 68 | }, { parent: this }); 69 | 70 | this.targetGroup = this.createLbTargetGroup( 71 | port, 72 | vpc.vpcId, 73 | healthCheckPath 74 | ); 75 | this.httpListener = this.createLbHttpListener( 76 | this.lb, 77 | this.targetGroup, 78 | !!certificate 79 | ); 80 | this.tlsListener = certificate && 81 | this.createLbTlsListener(this.lb, this.targetGroup, certificate); 82 | 83 | this.registerOutputs(); 84 | } 85 | 86 | private createLbTlsListener( 87 | lb: aws.lb.LoadBalancer, 88 | lbTargetGroup: aws.lb.TargetGroup, 89 | certificate: aws.acm.Certificate 90 | ): aws.lb.Listener { 91 | return new aws.lb.Listener(`${this.name}-listener-443`, { 92 | loadBalancerArn: lb.arn, 93 | port: 443, 94 | protocol: 'HTTPS', 95 | sslPolicy: 'ELBSecurityPolicy-2016-08', 96 | certificateArn: certificate.arn, 97 | defaultActions: [{ 98 | type: 'forward', 99 | targetGroupArn: lbTargetGroup.arn, 100 | }], 101 | tags: commonTags, 102 | }, { parent: this, dependsOn: [certificate] }); 103 | } 104 | 105 | private createLbHttpListener( 106 | lb: aws.lb.LoadBalancer, 107 | lbTargetGroup: aws.lb.TargetGroup, 108 | redirectToHttps: boolean 109 | ): aws.lb.Listener { 110 | const httpsRedirectAction = { 111 | type: 'redirect', 112 | redirect: { 113 | port: '443', 114 | protocol: 'HTTPS', 115 | statusCode: 'HTTP_301', 116 | }, 117 | }; 118 | const defaultAction = redirectToHttps ? httpsRedirectAction : { 119 | type: 'forward', 120 | targetGroupArn: lbTargetGroup.arn, 121 | }; 122 | 123 | return new aws.lb.Listener(`${this.name}-listener-80`, { 124 | loadBalancerArn: lb.arn, 125 | port: 80, 126 | defaultActions: [defaultAction], 127 | tags: commonTags, 128 | }, { parent: this }); 129 | } 130 | 131 | private createLbTargetGroup( 132 | port: pulumi.Input, 133 | vpcId: awsx.ec2.Vpc['vpcId'], 134 | healthCheckPath: pulumi.Input | undefined 135 | ): aws.lb.TargetGroup { 136 | return new aws.lb.TargetGroup(`${this.name}-tg`, { 137 | namePrefix: 'lb-tg-', 138 | port, 139 | protocol: 'HTTP', 140 | targetType: 'ip', 141 | vpcId, 142 | healthCheck: { 143 | healthyThreshold: 3, 144 | unhealthyThreshold: 2, 145 | interval: 60, 146 | timeout: 5, 147 | path: healthCheckPath || defaults.healthCheckPath, 148 | }, 149 | tags: { ...commonTags, Name: `${this.name}-target-group` }, 150 | }, { parent: this, dependsOn: [this.lb] }); 151 | } 152 | 153 | private createLbSecurityGroup( 154 | vpcId: awsx.ec2.Vpc['vpcId'] 155 | ): aws.ec2.SecurityGroup { 156 | return new aws.ec2.SecurityGroup(`${this.name}-security-group`, { 157 | ...webServerLoadBalancerNetworkConfig, 158 | vpcId, 159 | tags: commonTags, 160 | }, { parent: this }); 161 | } 162 | } 163 | -------------------------------------------------------------------------------- /src/v2/index.ts: -------------------------------------------------------------------------------- 1 | export { EcsService } from './components/ecs-service'; 2 | export { WebServer } from './components/web-server'; 3 | export { WebServerBuilder } from './components/web-server/builder'; 4 | export { WebServerLoadBalancer } from './components/web-server/load-balancer'; 5 | 6 | import { OtelCollectorBuilder } from './otel/builder'; 7 | import { OtelCollector } from './otel'; 8 | export const openTelemetry = { OtelCollector, OtelCollectorBuilder }; 9 | 10 | export * as grafana from './components/grafana'; 11 | export * as prometheus from './components/prometheus'; 12 | -------------------------------------------------------------------------------- /src/v2/otel/batch-processor.ts: -------------------------------------------------------------------------------- 1 | export namespace BatchProcessor { 2 | export type Config = { 3 | send_batch_size: number; 4 | send_batch_max_size: number; 5 | timeout: string; 6 | }; 7 | } 8 | 9 | export const defaults = { 10 | name: 'batch', 11 | size: 8192, 12 | maxSize: 10000, 13 | timeout: '5s' 14 | }; 15 | -------------------------------------------------------------------------------- /src/v2/otel/builder.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from '@pulumi/pulumi'; 2 | import * as aws from '@pulumi/aws'; 3 | import * as batchProcessor from './batch-processor'; 4 | import * as memoryLimiterProcessor from './memory-limiter-processor'; 5 | import { OtelCollector } from '.'; 6 | import { OtelCollectorConfigBuilder } from './config'; 7 | import { EcsService } from '../components/ecs-service'; 8 | import { OTLPReceiver } from './otlp-receiver'; 9 | 10 | export class OtelCollectorBuilder { 11 | private readonly _serviceName: pulumi.Output; 12 | private readonly _env: pulumi.Output; 13 | private readonly _configBuilder: OtelCollectorConfigBuilder; 14 | private _taskRoleInlinePolicies: pulumi.Output[] = []; 15 | 16 | constructor( 17 | serviceName: pulumi.Input, 18 | env: pulumi.Input 19 | ) { 20 | this._serviceName = pulumi.output(serviceName); 21 | this._env = pulumi.output(env); 22 | this._configBuilder = new OtelCollectorConfigBuilder(); 23 | } 24 | 25 | withOTLPReceiver( 26 | protocols: OTLPReceiver.Protocol[] = ['http'] 27 | ): this { 28 | this._configBuilder.withOTLPReceiver(protocols); 29 | 30 | return this; 31 | } 32 | 33 | withBatchProcessor( 34 | name = batchProcessor.defaults.name, 35 | size = batchProcessor.defaults.size, 36 | maxSize = batchProcessor.defaults.maxSize, 37 | timeout = batchProcessor.defaults.timeout 38 | ): this { 39 | this._configBuilder.withBatchProcessor(name, size, maxSize, timeout); 40 | 41 | return this; 42 | } 43 | 44 | withMemoryLimiterProcessor( 45 | checkInterval = memoryLimiterProcessor.defaults.checkInterval, 46 | limitPercentage = memoryLimiterProcessor.defaults.limitPercentage, 47 | spikeLimitPercentage = memoryLimiterProcessor.defaults.spikeLimitPercentage 48 | ): this { 49 | this._configBuilder.withMemoryLimiterProcessor( 50 | checkInterval, 51 | limitPercentage, 52 | spikeLimitPercentage 53 | ); 54 | 55 | return this; 56 | } 57 | 58 | withAWSXRayExporter(region: string): this { 59 | this._configBuilder.withAWSXRayExporter(region); 60 | this.createAWSXRayPolicy(); 61 | 62 | return this; 63 | } 64 | 65 | withHealthCheckExtension(endpoint = '0.0.0.0:13133'): this { 66 | this._configBuilder.withHealthCheckExtension(endpoint); 67 | 68 | return this; 69 | } 70 | 71 | withPprofExtension(endpoint = '0.0.0.0:1777'): this { 72 | this._configBuilder.withPprofExtension(endpoint); 73 | 74 | return this; 75 | } 76 | 77 | withAPS( 78 | namespace: pulumi.Input, 79 | workspace: aws.amp.Workspace, 80 | region: string 81 | ): this { 82 | this._configBuilder.withAPS( 83 | pulumi.output(namespace), 84 | pulumi.interpolate`${workspace.prometheusEndpoint}api/v1/remote_write`, 85 | region 86 | ); 87 | this.createAPSInlinePolicy(workspace); 88 | 89 | return this; 90 | } 91 | 92 | withDebug(verbosity: 'normal' | 'basic' | 'detailed' = 'detailed'): this { 93 | this._configBuilder.withDebug(verbosity); 94 | 95 | return this; 96 | } 97 | 98 | withTelemetry( 99 | logLevel: 'debug' | 'warn' | 'error' = 'error', 100 | metricsVerbosity: 'basic' | 'normal' | 'detailed' = 'basic' 101 | ): this { 102 | this._configBuilder.withTelemetry(logLevel, metricsVerbosity); 103 | 104 | return this; 105 | } 106 | 107 | withMetricsPipeline( 108 | receivers: OtelCollector.ReceiverType[], 109 | processors: OtelCollector.ProcessorType[], 110 | exporters: OtelCollector.ExporterType[], 111 | ): this { 112 | this._configBuilder.withMetricsPipeline( 113 | receivers, 114 | processors, 115 | exporters 116 | ); 117 | 118 | return this; 119 | } 120 | 121 | withTracesPipeline( 122 | receivers: OtelCollector.ReceiverType[], 123 | processors: OtelCollector.ProcessorType[], 124 | exporters: OtelCollector.ExporterType[], 125 | ): this { 126 | this._configBuilder.withTracesPipeline( 127 | receivers, 128 | processors, 129 | exporters 130 | ); 131 | 132 | return this; 133 | } 134 | 135 | withDefault( 136 | prometheusNamespace: pulumi.Input, 137 | prometheusWorkspace: aws.amp.Workspace, 138 | awsRegion: string 139 | ): this { 140 | this._configBuilder.withDefault( 141 | pulumi.output(prometheusNamespace), 142 | pulumi.interpolate`${prometheusWorkspace.prometheusEndpoint}api/v1/remote_write`, 143 | awsRegion 144 | ); 145 | this.createAPSInlinePolicy(prometheusWorkspace); 146 | this.createAWSXRayPolicy(); 147 | 148 | return this; 149 | } 150 | 151 | build(): OtelCollector { 152 | return new OtelCollector( 153 | this._serviceName, 154 | this._env, 155 | this._configBuilder.build(), 156 | { taskRoleInlinePolicies: this._taskRoleInlinePolicies } 157 | ); 158 | } 159 | 160 | private createAPSInlinePolicy(workspace: aws.amp.Workspace): void { 161 | const policy: pulumi.Output = pulumi.all(([ 162 | this._serviceName, 163 | workspace.arn 164 | ])).apply(([serviceName, workspaceArn]) => ({ 165 | name: `${serviceName}-task-role-aps-write`, 166 | policy: JSON.stringify({ 167 | Version: '2012-10-17', 168 | Statement: [{ 169 | Effect: 'Allow', 170 | Action: ['aps:RemoteWrite'], 171 | Resource: workspaceArn, 172 | }], 173 | }), 174 | })); 175 | 176 | this._taskRoleInlinePolicies.push(policy); 177 | } 178 | 179 | private createAWSXRayPolicy() { 180 | const policy: pulumi.Output = this._serviceName 181 | .apply(serviceName => ({ 182 | name: `${serviceName}-task-role-xray`, 183 | policy: JSON.stringify({ 184 | Version: '2012-10-17', 185 | Statement: [{ 186 | Effect: 'Allow', 187 | Action: [ 188 | 'xray:PutTraceSegments', 189 | 'xray:PutTelemetryRecords', 190 | 'xray:GetSamplingRules', 191 | 'xray:GetSamplingTargets', 192 | 'xray:GetSamplingStatisticSummaries', 193 | ], 194 | Resource: '*', 195 | }], 196 | }), 197 | })); 198 | 199 | this._taskRoleInlinePolicies.push(policy); 200 | } 201 | } 202 | -------------------------------------------------------------------------------- /src/v2/otel/config.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from '@pulumi/pulumi'; 2 | import { OTLPReceiver, Protocol } from './otlp-receiver'; 3 | import type { OtelCollector } from '.'; 4 | 5 | export class OtelCollectorConfigBuilder { 6 | private readonly _receivers: OtelCollector.Receiver = {}; 7 | private readonly _processors: OtelCollector.Processor = {}; 8 | private readonly _exporters: OtelCollector.Exporter = {}; 9 | private readonly _extensions: OtelCollector.Extension = {}; 10 | private readonly _service: OtelCollector.Service = { 11 | pipelines: {} 12 | }; 13 | 14 | withOTLPReceiver( 15 | protocols: OTLPReceiver.Protocol[] = ['http'] 16 | ): this { 17 | if (!protocols.length) { 18 | throw new Error('At least one OTLP receiver protocol should be provided'); 19 | } 20 | 21 | const protocolsConfig = protocols.reduce((all, current) => { 22 | const protocolConfig = Protocol[current]; 23 | if (!protocolConfig) { 24 | throw new Error(`OTLP receiver protocol ${current} is not supported`); 25 | } 26 | 27 | return { ...all, [current]: protocolConfig } 28 | }, {}); 29 | 30 | this._receivers.otlp = { protocols: protocolsConfig }; 31 | 32 | return this; 33 | } 34 | 35 | withBatchProcessor( 36 | name = 'batch', 37 | size = 8192, 38 | maxSize = 10000, 39 | timeout = '5s' 40 | ): this { 41 | this._processors[name] = { 42 | 'send_batch_size': size, 43 | 'send_batch_max_size': maxSize, 44 | timeout 45 | }; 46 | 47 | return this; 48 | } 49 | 50 | withMemoryLimiterProcessor( 51 | checkInterval = '1s', 52 | limitPercentage = 80, 53 | spikeLimitPercentage = 15 54 | ): this { 55 | this._processors.memory_limiter = { 56 | check_interval: checkInterval, 57 | limit_percentage: limitPercentage, 58 | spike_limit_percentage: spikeLimitPercentage 59 | }; 60 | 61 | return this; 62 | } 63 | 64 | withAWSXRayExporter(region: string): this { 65 | this._exporters.awsxray = { region }; 66 | 67 | return this; 68 | } 69 | 70 | withHealthCheckExtension(endpoint = '0.0.0.0:13133'): this { 71 | this._extensions.health_check = { endpoint }; 72 | 73 | return this; 74 | } 75 | 76 | withPprofExtension(endpoint = '0.0.0.0:1777'): this { 77 | this._extensions.pprof = { endpoint }; 78 | 79 | return this; 80 | } 81 | 82 | withAPS( 83 | namespace: pulumi.Input, 84 | endpoint: pulumi.Input, 85 | region: string 86 | ): this { 87 | this._exporters.prometheusremotewrite = { 88 | endpoint, 89 | namespace, 90 | auth: { 91 | authenticator: 'sigv4auth' 92 | } 93 | }; 94 | 95 | this._extensions.sigv4auth = { 96 | region, 97 | service: 'aps' 98 | }; 99 | 100 | return this; 101 | } 102 | 103 | withDebug(verbosity: 'normal' | 'basic' | 'detailed' = 'detailed'): this { 104 | this._exporters.debug = { verbosity }; 105 | 106 | return this; 107 | } 108 | 109 | withTelemetry( 110 | logLevel: 'debug' | 'warn' | 'error' = 'error', 111 | metricsVerbosity: 'basic' | 'normal' | 'detailed' = 'basic' 112 | ): this { 113 | this._service.telemetry = { 114 | logs: { level: logLevel }, 115 | metrics: { level: metricsVerbosity } 116 | }; 117 | 118 | return this; 119 | } 120 | 121 | withMetricsPipeline( 122 | receivers: OtelCollector.ReceiverType[], 123 | processors: OtelCollector.ProcessorType[], 124 | exporters: OtelCollector.ExporterType[], 125 | ): this { 126 | this._service.pipelines.metrics = { 127 | receivers, 128 | processors, 129 | exporters 130 | }; 131 | 132 | return this; 133 | } 134 | 135 | withTracesPipeline( 136 | receivers: OtelCollector.ReceiverType[], 137 | processors: OtelCollector.ProcessorType[], 138 | exporters: OtelCollector.ExporterType[], 139 | ): this { 140 | this._service.pipelines.traces = { 141 | receivers, 142 | processors, 143 | exporters 144 | }; 145 | 146 | return this; 147 | } 148 | 149 | withDefault( 150 | prometheusNamespace: pulumi.Input, 151 | prometheusWriteEndpoint: pulumi.Input, 152 | awsRegion: string 153 | ): this { 154 | return this.withOTLPReceiver(['http']) 155 | .withMemoryLimiterProcessor() 156 | .withBatchProcessor('batch/metrics') 157 | .withBatchProcessor('batch/traces', 2000, 5000, '2s') 158 | .withAPS(prometheusNamespace, prometheusWriteEndpoint, awsRegion) 159 | .withAWSXRayExporter(awsRegion) 160 | .withHealthCheckExtension() 161 | .withMetricsPipeline( 162 | ['otlp'], 163 | ['memory_limiter', 'batch/metrics'], 164 | ['prometheusremotewrite'] 165 | ) 166 | .withTracesPipeline( 167 | ['otlp'], 168 | ['memory_limiter', 'batch/traces'], 169 | ['awsxray'] 170 | ) 171 | .withTelemetry(); 172 | } 173 | 174 | build(): OtelCollector.Config { 175 | this.validatePipelineComponents('metrics'); 176 | this.validatePipelineComponents('traces'); 177 | this.validatePipelineProcessorOrder('metrics'); 178 | this.validatePipelineProcessorOrder('traces'); 179 | 180 | // FIX: Fix type inference 181 | const extensions = Object.keys( 182 | this._extensions 183 | ) as OtelCollector.ExtensionType[]; 184 | if (extensions.length) { 185 | this._service.extensions = extensions; 186 | } 187 | 188 | // TODO: Add schema validation (non-empty receivers, non-empty receiver protocols) 189 | return { 190 | receivers: this._receivers, 191 | processors: this._processors, 192 | exporters: this._exporters, 193 | extensions: this._extensions, 194 | service: this._service 195 | }; 196 | } 197 | 198 | private validatePipelineProcessorOrder(pipelineType: 'metrics' | 'traces'): void { 199 | const pipeline = this._service.pipelines[pipelineType]; 200 | if (!pipeline) return; 201 | 202 | const { processors } = pipeline; 203 | const memoryLimiterIndex = processors 204 | .findIndex(processor => processor === 'memory_limiter'); 205 | if (memoryLimiterIndex > 0) { 206 | throw new Error(`memory_limiter processor is not the first processor in the ${pipelineType} pipeline.`); 207 | } 208 | } 209 | 210 | private validatePipelineComponents(pipelineType: 'metrics' | 'traces'): void { 211 | this._service.pipelines[pipelineType]?.receivers.forEach(receiver => { 212 | if (!this._receivers[receiver]) { 213 | throw new Error(`Receiver '${receiver}' is used in ${pipelineType} pipeline but not defined`); 214 | } 215 | }); 216 | 217 | this._service.pipelines[pipelineType]?.processors.forEach(processor => { 218 | if (!this._processors[processor]) { 219 | throw new Error(`Processor '${processor}' is used in ${pipelineType} pipeline but not defined`); 220 | } 221 | }); 222 | 223 | this._service.pipelines[pipelineType]?.exporters.forEach(exporter => { 224 | if (!this._exporters[exporter]) { 225 | throw new Error(`Exporter '${exporter}' is used in ${pipelineType} pipeline but not defined`); 226 | } 227 | }); 228 | } 229 | } 230 | -------------------------------------------------------------------------------- /src/v2/otel/index.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from '@pulumi/pulumi'; 2 | import * as aws from '@pulumi/aws'; 3 | import * as yaml from 'yaml'; 4 | import { EcsService } from '../components/ecs-service'; 5 | import { OTLPReceiver } from './otlp-receiver'; 6 | import { BatchProcessor } from './batch-processor'; 7 | import { MemoryLimiterProcessor } from './memory-limiter-processor'; 8 | import { PrometheusRemoteWriteExporter } from './prometheus-remote-write-exporter'; 9 | 10 | export namespace OtelCollector { 11 | export type Receiver = { 12 | otlp?: OTLPReceiver.Config; 13 | }; 14 | export type ReceiverType = keyof Receiver; 15 | 16 | export type Processor = { 17 | batch?: BatchProcessor.Config; 18 | memory_limiter?: MemoryLimiterProcessor.Config; 19 | } & { 20 | [name: string]: BatchProcessor.Config; 21 | }; 22 | export type ProcessorType = keyof Processor; 23 | 24 | export type AwsXRayExporterConfig = { 25 | region: string; 26 | endpoint?: string; 27 | }; 28 | 29 | export type DebugExportedConfig = { 30 | verbosity: string; 31 | }; 32 | 33 | export type Exporter = { 34 | prometheusremotewrite?: PrometheusRemoteWriteExporter.Config; 35 | awsxray?: AwsXRayExporterConfig; 36 | debug?: DebugExportedConfig; 37 | }; 38 | export type ExporterType = keyof Exporter; 39 | 40 | export type SigV4AuthExtensionConfig = { 41 | region: string; 42 | service: string; 43 | }; 44 | 45 | export type HealthCheckExtensionConfig = { 46 | endpoint: string; 47 | }; 48 | 49 | export type PprofExtensionConfig = { 50 | endpoint: string; 51 | }; 52 | 53 | export type Extension = { 54 | sigv4auth?: SigV4AuthExtensionConfig; 55 | health_check?: HealthCheckExtensionConfig; 56 | pprof?: PprofExtensionConfig; 57 | }; 58 | export type ExtensionType = keyof Extension; 59 | 60 | export type PipelineConfig = { 61 | receivers: ReceiverType[]; 62 | processors: ProcessorType[]; 63 | exporters: ExporterType[]; 64 | }; 65 | 66 | export type TelemetryConfig = { 67 | logs?: { 68 | level: string; 69 | }; 70 | metrics?: { 71 | level: string; 72 | }; 73 | }; 74 | 75 | export type Service = { 76 | pipelines: { 77 | metrics?: PipelineConfig; 78 | traces?: PipelineConfig; 79 | }; 80 | extensions?: ExtensionType[]; 81 | telemetry?: TelemetryConfig; 82 | }; 83 | 84 | export type Config = { 85 | receivers: Receiver; 86 | processors: Processor; 87 | exporters: Exporter; 88 | extensions: Extension; 89 | service: Service; 90 | }; 91 | 92 | export type Opts = { 93 | containerName?: pulumi.Input; 94 | configVolumeName?: pulumi.Input; 95 | taskRoleInlinePolicies?: pulumi.Input< 96 | pulumi.Input[] 97 | >; 98 | }; 99 | } 100 | 101 | export class OtelCollector { 102 | config: pulumi.Output; 103 | configVolume: pulumi.Output; 104 | container: pulumi.Output; 105 | configContainer: EcsService.Container; 106 | taskRoleInlinePolicies: OtelCollector.Opts['taskRoleInlinePolicies']; 107 | 108 | constructor( 109 | serviceName: pulumi.Input, 110 | env: pulumi.Input, 111 | config: pulumi.Input, 112 | opts: OtelCollector.Opts = {}, 113 | ) { 114 | const containerName = 115 | opts.containerName || pulumi.interpolate`${serviceName}-otel-collector`; 116 | const configVolumeName = 117 | opts.configVolumeName || 'otel-collector-config-volume'; 118 | this.configVolume = pulumi.output(configVolumeName); 119 | this.taskRoleInlinePolicies = opts.taskRoleInlinePolicies || []; 120 | 121 | this.config = pulumi.output(config); 122 | this.configContainer = this.createConfigContainer( 123 | this.config, 124 | configVolumeName, 125 | ); 126 | this.container = this.createContainer( 127 | containerName, 128 | this.config, 129 | configVolumeName, 130 | serviceName, 131 | env, 132 | ); 133 | } 134 | 135 | private createContainer( 136 | containerName: pulumi.Input, 137 | config: pulumi.Output, 138 | configVolumeName: pulumi.Input, 139 | serviceName: pulumi.Input, 140 | env: pulumi.Input, 141 | ): pulumi.Output { 142 | return pulumi 143 | .all([containerName, config, configVolumeName, serviceName, env]) 144 | .apply(([containerName, config, configVolumeName, serviceName, env]) => ({ 145 | name: containerName, 146 | image: 'otel/opentelemetry-collector-contrib:0.123.0', 147 | portMappings: this.getCollectorPortMappings(config), 148 | mountPoints: [ 149 | { 150 | sourceVolume: configVolumeName, 151 | containerPath: '/etc/otelcol-contrib', 152 | readOnly: true, 153 | }, 154 | ], 155 | dependsOn: [ 156 | { 157 | containerName: this.configContainer.name, 158 | condition: 'COMPLETE', 159 | }, 160 | ], 161 | environment: this.getCollectorEnvironment(serviceName, env), 162 | })); 163 | } 164 | 165 | private getCollectorEnvironment( 166 | serviceName: string, 167 | env: string, 168 | ): { name: string; value: string }[] { 169 | return [ 170 | { 171 | name: 'OTEL_RESOURCE_ATTRIBUTES', 172 | value: `service.name=${serviceName},env=${env}`, 173 | }, 174 | ]; 175 | } 176 | 177 | private getCollectorPortMappings( 178 | config: OtelCollector.Config, 179 | ): EcsService.Container['portMappings'] { 180 | const hasOTLPGRpcReceiver = !!config.receivers.otlp?.protocols.grpc; 181 | const hasOTLPHttpReceiver = !!config.receivers.otlp?.protocols.http; 182 | const protocol: aws.ecs.Protocol = 'tcp'; 183 | 184 | return [ 185 | ...(hasOTLPGRpcReceiver 186 | ? [{ containerPort: 4317, hostPort: 4317, protocol }] 187 | : []), 188 | ...(hasOTLPHttpReceiver 189 | ? [{ containerPort: 4318, hostPort: 4318, protocol }] 190 | : []), 191 | // TODO: Expose 8888 for collector telemetry 192 | { containerPort: 13133, hostPort: 13133, protocol }, 193 | ]; 194 | } 195 | 196 | private createConfigContainer( 197 | config: pulumi.Output, 198 | volume: pulumi.Input, 199 | ): EcsService.Container { 200 | return { 201 | name: 'otel-config-writer', 202 | image: 'amazonlinux:latest', 203 | essential: false, 204 | command: config.apply(config => [ 205 | 'sh', 206 | '-c', 207 | `echo '${yaml.stringify(config)}' > /etc/otelcol-contrib/config.yaml`, 208 | ]), 209 | mountPoints: [ 210 | { 211 | sourceVolume: volume, 212 | containerPath: '/etc/otelcol-contrib', 213 | }, 214 | ], 215 | }; 216 | } 217 | } 218 | -------------------------------------------------------------------------------- /src/v2/otel/memory-limiter-processor.ts: -------------------------------------------------------------------------------- 1 | export namespace MemoryLimiterProcessor { 2 | export type Config = { 3 | check_interval: string; 4 | limit_percentage: number; 5 | spike_limit_percentage: number; 6 | }; 7 | } 8 | 9 | export const defaults = { 10 | checkInterval: '1s', 11 | limitPercentage: 80, 12 | spikeLimitPercentage: 15 13 | }; 14 | -------------------------------------------------------------------------------- /src/v2/otel/otlp-receiver.ts: -------------------------------------------------------------------------------- 1 | export namespace OTLPReceiver { 2 | export type Protocol = 'http' | 'grpc'; 3 | export type Config = { 4 | protocols: { 5 | [K in Protocol]?: { 6 | endpoint: string; 7 | }; 8 | }; 9 | }; 10 | } 11 | 12 | export const Protocol = { 13 | grpc: { 14 | endpoint: '0.0.0.0:4317' 15 | }, 16 | http: { 17 | endpoint: '0.0.0.0:4318' 18 | } 19 | }; 20 | 21 | -------------------------------------------------------------------------------- /src/v2/otel/prometheus-remote-write-exporter.ts: -------------------------------------------------------------------------------- 1 | import * as pulumi from '@pulumi/pulumi'; 2 | 3 | export namespace PrometheusRemoteWriteExporter { 4 | export type Config = { 5 | namespace: pulumi.Input; 6 | endpoint: pulumi.Input; 7 | auth?: { 8 | authenticator: pulumi.Input; 9 | }; 10 | }; 11 | } 12 | 13 | -------------------------------------------------------------------------------- /tests/automation.ts: -------------------------------------------------------------------------------- 1 | import { 2 | DestroyResult, 3 | InlineProgramArgs, 4 | LocalWorkspace, 5 | OutputMap 6 | } from '@pulumi/pulumi/automation'; 7 | import { createSpinner } from 'nanospinner'; 8 | 9 | export async function deploy(args: InlineProgramArgs): Promise { 10 | const spinner = createSpinner('Deploying stack...').start(); 11 | const stack = await LocalWorkspace.createOrSelectStack(args); 12 | await stack.setConfig('aws:region', { value: 'us-east-2' }); 13 | const up = await stack.up({ logToStdErr: true }); 14 | spinner.success({ text: 'Stack deployed' }); 15 | 16 | return up.outputs; 17 | } 18 | 19 | export async function destroy(args: InlineProgramArgs): Promise { 20 | const spinner = createSpinner('Destroying stack...').start(); 21 | const stack = await LocalWorkspace.createOrSelectStack(args); 22 | const result = await stack.destroy(); 23 | spinner.success({ text: 'Stack destroyed' }); 24 | 25 | return result; 26 | } 27 | 28 | export async function getOutputs(args: InlineProgramArgs): Promise { 29 | const stack = await LocalWorkspace.createOrSelectStack(args); 30 | 31 | return stack.outputs(); 32 | } 33 | -------------------------------------------------------------------------------- /tests/build/index.tst.ts: -------------------------------------------------------------------------------- 1 | import * as aws from '@pulumi/aws'; 2 | import * as awsx from '@pulumi/awsx'; 3 | import { describe, expect, it } from 'tstyche'; 4 | import { next as studion } from '@studion/infra-code-blocks'; 5 | import { OtelCollector } from '../../dist/v2/otel'; 6 | import { OtelCollectorBuilder } from '../../dist/v2/otel/builder'; 7 | 8 | describe('Build output', () => { 9 | describe('ECS Service', () => { 10 | it('should export EcsService', () => { 11 | expect(studion).type.toHaveProperty('EcsService'); 12 | }); 13 | 14 | describe('Instantiation', () => { 15 | const { EcsService } = studion; 16 | 17 | it('should construct EcsService', () => { 18 | expect(EcsService).type.toBeConstructableWith('ecsName', { 19 | vpc: new awsx.ec2.Vpc('vpcName'), 20 | cluster: new aws.ecs.Cluster('clusterName'), 21 | containers: [ 22 | { 23 | name: 'mainContainer', 24 | image: 'sample/image', 25 | }, 26 | ], 27 | }); 28 | }); 29 | }); 30 | }); 31 | 32 | describe('Web Server', () => { 33 | it('should export WebServer', () => { 34 | expect(studion).type.toHaveProperty('WebServer'); 35 | }); 36 | 37 | it('should export WebServerBuilder', () => { 38 | expect(studion).type.toHaveProperty('WebServerBuilder'); 39 | }); 40 | 41 | it('should export WebServerLoadBalancer', () => { 42 | expect(studion).type.toHaveProperty('WebServerLoadBalancer'); 43 | }); 44 | 45 | describe('Instantiation', () => { 46 | const { WebServer, WebServerBuilder, WebServerLoadBalancer } = studion; 47 | 48 | it('should construct WebServer', () => { 49 | expect(WebServer).type.toBeConstructableWith('wsName', { 50 | vpc: new awsx.ec2.Vpc('vpcName'), 51 | cluster: new aws.ecs.Cluster('clusterName'), 52 | image: 'sample/image', 53 | port: 8080, 54 | publicSubnetIds: ['sub-1', 'sub-2', 'sub-3'], 55 | }); 56 | }); 57 | 58 | it('should construct WebServerBuilder', () => { 59 | expect(WebServerBuilder).type.toBeConstructableWith('wsbName'); 60 | }); 61 | 62 | it('should construct WebServerLoadBalancer', () => { 63 | expect(WebServerLoadBalancer).type.toBeConstructableWith('wslbName', { 64 | vpc: new awsx.ec2.Vpc('vpcName'), 65 | port: 80, 66 | }); 67 | }); 68 | }); 69 | 70 | describe('Builder', () => { 71 | const builder = new studion.WebServerBuilder('wsbName'); 72 | 73 | it('should have build method', () => { 74 | expect(builder.build).type.toBeCallableWith(); 75 | }); 76 | 77 | it('should have configureEcs method', () => { 78 | expect(builder.configureEcs).type.toBeCallableWith({ 79 | cluster: new aws.ecs.Cluster('clusterName'), 80 | }); 81 | }); 82 | 83 | it('should have configureWebServer method', () => { 84 | expect(builder.configureWebServer).type.toBeCallableWith( 85 | 'sample/image', 86 | 8080, 87 | ); 88 | }); 89 | 90 | it('should have withCustomDomain method', () => { 91 | expect(builder.withCustomDomain).type.toBeCallableWith( 92 | 'domainName', 93 | 'hzId', 94 | ); 95 | }); 96 | 97 | it('should have withCustomHealthCheckPath method', () => { 98 | expect(builder.withCustomHealthCheckPath).type.toBeCallableWith( 99 | '/custom/healthCheck/path', 100 | ); 101 | }); 102 | 103 | it('should have withInitContainer method', () => { 104 | expect(builder.withInitContainer).type.toBeCallableWith({ 105 | name: 'containerName', 106 | image: 'sample/image', 107 | }); 108 | }); 109 | 110 | it('should have withOtelCollector method', () => { 111 | expect(builder.withOtelCollector).type.toBeCallableWith( 112 | new OtelCollector('serviceName', 'testEnv', { 113 | receivers: {}, 114 | processors: {}, 115 | exporters: {}, 116 | extensions: {}, 117 | service: { 118 | pipelines: {}, 119 | }, 120 | }), 121 | ); 122 | }); 123 | 124 | it('should have withSidecarContainer method', () => { 125 | expect(builder.withSidecarContainer).type.toBeCallableWith({ 126 | name: 'containerName', 127 | image: 'sample/image', 128 | healthCheck: {}, 129 | }); 130 | }); 131 | 132 | it('should have withVolume method', () => { 133 | expect(builder.withVolume).type.toBeCallableWith({ 134 | name: 'volumeName', 135 | }); 136 | }); 137 | 138 | it('should have withVpc method', () => { 139 | expect(builder.withVpc).type.toBeCallableWith( 140 | new awsx.ec2.Vpc('vpcName'), 141 | ); 142 | }); 143 | }); 144 | }); 145 | 146 | describe('Open Telemetry', () => { 147 | it('should export openTelemetry', () => { 148 | expect(studion).type.toHaveProperty('openTelemetry'); 149 | }); 150 | 151 | it('should contain OtelCollector', () => { 152 | expect(studion.openTelemetry).type.toHaveProperty('OtelCollector'); 153 | }); 154 | 155 | it('should contain OtelCollectorBuilder', () => { 156 | expect(studion.openTelemetry).type.toHaveProperty('OtelCollectorBuilder'); 157 | }); 158 | 159 | describe('Instantiation', () => { 160 | const { 161 | openTelemetry: { OtelCollector, OtelCollectorBuilder }, 162 | } = studion; 163 | 164 | it('should construct OtelCollector', () => { 165 | expect(OtelCollector).type.toBeConstructableWith( 166 | 'serviceName', 167 | 'testEnv', 168 | { 169 | receivers: {}, 170 | processors: {}, 171 | exporters: {}, 172 | extensions: {}, 173 | service: { 174 | pipelines: {}, 175 | }, 176 | }, 177 | ); 178 | }); 179 | 180 | it('should construct OtelCollectorBuilder', () => { 181 | expect(OtelCollectorBuilder).type.toBeConstructableWith( 182 | 'serviceName', 183 | 'testEnv', 184 | ); 185 | }); 186 | }); 187 | 188 | describe('Builder', () => { 189 | const builder = new OtelCollectorBuilder('serviceName', 'testEnv'); 190 | 191 | it('should have build method', () => { 192 | expect(builder.build).type.toBeCallableWith(); 193 | }); 194 | 195 | it('should have withAPS method', () => { 196 | expect(builder.withAPS).type.toBeCallableWith( 197 | 'namespace', 198 | new aws.amp.Workspace('name'), 199 | 'region', 200 | ); 201 | }); 202 | 203 | it('should have withAWSXRayExporter method', () => { 204 | expect(builder.withAWSXRayExporter).type.toBeCallableWith('region'); 205 | }); 206 | 207 | it('should have withBatchProcessor method', () => { 208 | expect(builder.withBatchProcessor).type.toBeCallableWith( 209 | 'batch', 210 | 3, 211 | 9, 212 | '7s', 213 | ); 214 | }); 215 | 216 | it('should have withDebug method', () => { 217 | expect(builder.withDebug).type.toBeCallableWith(); 218 | }); 219 | 220 | it('should have withDefault method', () => { 221 | expect(builder.withDefault).type.toBeCallableWith( 222 | 'namespace', 223 | new aws.amp.Workspace('name'), 224 | 'region', 225 | ); 226 | }); 227 | 228 | it('should have withHealthCheckExtension method', () => { 229 | expect(builder.withHealthCheckExtension).type.toBeCallableWith(); 230 | }); 231 | 232 | it('should have withMemoryLimiterProcessor method', () => { 233 | expect(builder.withMemoryLimiterProcessor).type.toBeCallableWith( 234 | '3s', 235 | 77, 236 | 7, 237 | ); 238 | }); 239 | 240 | it('should have withMetricsPipeline method', () => { 241 | expect(builder.withMetricsPipeline).type.toBeCallableWith([], [], []); 242 | }); 243 | 244 | it('should have withOTLPReceiver method', () => { 245 | expect(builder.withOTLPReceiver).type.toBeCallableWith(); 246 | }); 247 | 248 | it('should have withPprofExtension method', () => { 249 | expect(builder.withPprofExtension).type.toBeCallableWith(); 250 | }); 251 | 252 | it('should have withTelemetry method', () => { 253 | expect(builder.withTelemetry).type.toBeCallableWith(); 254 | }); 255 | 256 | it('should have withTracesPipeline method', () => { 257 | expect(builder.withTracesPipeline).type.toBeCallableWith([], [], []); 258 | }); 259 | }); 260 | }); 261 | }); 262 | -------------------------------------------------------------------------------- /tests/build/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "include": ["**/*"], 3 | "compilerOptions": { 4 | "strict": true, 5 | "noEmit": true, 6 | "paths": { 7 | "@studion/infra-code-blocks": ["../../dist/index"] 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /tests/ecs-service/autoscaling.test.ts: -------------------------------------------------------------------------------- 1 | import { it } from 'node:test'; 2 | import * as assert from 'node:assert'; 3 | import { EcsTestContext } from './test-context'; 4 | import { 5 | DescribeScalableTargetsCommand, 6 | DescribeScalingPoliciesCommand 7 | } from '@aws-sdk/client-application-auto-scaling'; 8 | 9 | export function testEcsServiceWithAutoscaling(ctx: EcsTestContext) { 10 | it('should create autoscaling resources when autoscaling is enabled', async () => { 11 | const ecsService = ctx.outputs.ecsServiceWithAutoscaling.value; 12 | const clusterName = ctx.outputs.cluster.value.name; 13 | const serviceName = ecsService.name; 14 | 15 | const resourceId = `service/${clusterName}/${serviceName}`; 16 | 17 | const targetsCommand = new DescribeScalableTargetsCommand({ 18 | ServiceNamespace: 'ecs', 19 | ResourceIds: [resourceId], 20 | ScalableDimension: 'ecs:service:DesiredCount' 21 | }); 22 | 23 | const { ScalableTargets } = await ctx.clients.appAutoscaling.send(targetsCommand); 24 | 25 | assert.ok(ScalableTargets && ScalableTargets.length > 0, 'Autoscaling target should exist'); 26 | 27 | assert.strictEqual(ScalableTargets[0].MinCapacity, 2, 'Min capacity should match configuration'); 28 | assert.strictEqual(ScalableTargets[0].MaxCapacity, 5, 'Max capacity should match configuration'); 29 | }); 30 | 31 | it('should create CPU and memory scaling policies', async () => { 32 | const ecsService = ctx.outputs.ecsServiceWithAutoscaling.value; 33 | const clusterName = ctx.outputs.cluster.value.name; 34 | const serviceName = ecsService.name; 35 | 36 | const resourceId = `service/${clusterName}/${serviceName}`; 37 | 38 | const policiesCommand = new DescribeScalingPoliciesCommand({ 39 | ServiceNamespace: 'ecs', 40 | ResourceId: resourceId, 41 | ScalableDimension: 'ecs:service:DesiredCount' 42 | }); 43 | 44 | const { ScalingPolicies } = await ctx.clients.appAutoscaling.send(policiesCommand); 45 | 46 | assert.ok(ScalingPolicies && ScalingPolicies.length > 0, 'Autoscaling policies should exist'); 47 | assert.strictEqual(ScalingPolicies.length, 2, 'Should have 2 scaling policies (CPU and memory)'); 48 | 49 | const cpuPolicy = ScalingPolicies.find((policy: any) => 50 | policy.TargetTrackingScalingPolicyConfiguration?.PredefinedMetricSpecification?.PredefinedMetricType === 51 | 'ECSServiceAverageCPUUtilization' 52 | ); 53 | 54 | const memoryPolicy = ScalingPolicies.find((policy: any) => 55 | policy.TargetTrackingScalingPolicyConfiguration?.PredefinedMetricSpecification?.PredefinedMetricType === 56 | 'ECSServiceAverageMemoryUtilization' 57 | ); 58 | 59 | assert.ok(cpuPolicy, 'CPU autoscaling policy should exist'); 60 | assert.ok(memoryPolicy, 'Memory autoscaling policy should exist'); 61 | 62 | assert.strictEqual( 63 | cpuPolicy?.TargetTrackingScalingPolicyConfiguration?.TargetValue, 64 | 70, 65 | 'CPU policy target should be 70%' 66 | ); 67 | assert.strictEqual( 68 | memoryPolicy?.TargetTrackingScalingPolicyConfiguration?.TargetValue, 69 | 70, 70 | 'Memory policy target should be 70%' 71 | ); 72 | }); 73 | } 74 | -------------------------------------------------------------------------------- /tests/ecs-service/index.test.ts: -------------------------------------------------------------------------------- 1 | import { describe, it, before, after } from 'node:test'; 2 | import * as assert from 'node:assert'; 3 | import { InlineProgramArgs } from '@pulumi/pulumi/automation'; 4 | import { 5 | ECSClient, 6 | ListTasksCommand, 7 | DescribeTasksCommand, 8 | DescribeServicesCommand 9 | } from '@aws-sdk/client-ecs'; 10 | import { EC2Client } from '@aws-sdk/client-ec2'; 11 | import { ElasticLoadBalancingV2Client } from '@aws-sdk/client-elastic-load-balancing-v2'; 12 | import { ServiceDiscoveryClient } from '@aws-sdk/client-servicediscovery'; 13 | import { ApplicationAutoScalingClient } from '@aws-sdk/client-application-auto-scaling'; 14 | import { EFSClient } from '@aws-sdk/client-efs'; 15 | import { backOff } from 'exponential-backoff'; 16 | import * as automation from '../automation'; 17 | import { EcsTestContext } from './test-context'; 18 | import { testEcsServiceWithLb } from './load-balancer.test'; 19 | import { testEcsServiceWithStorage } from './persistent-storage.test'; 20 | import { testEcsServiceWithServiceDiscovery } from './service-discovery.test'; 21 | import { testEcsServiceWithAutoscaling } from './autoscaling.test'; 22 | 23 | const programArgs: InlineProgramArgs = { 24 | stackName: 'dev', 25 | projectName: 'icb-test-ecs-service', 26 | program: () => import('./infrastructure') 27 | }; 28 | 29 | describe('EcsService component deployment', () => { 30 | const region = process.env.AWS_REGION || 'us-east-2'; 31 | const ctx: EcsTestContext = { 32 | outputs: {}, 33 | config: { 34 | minEcsName: 'ecs-test-min', 35 | exponentialBackOffConfig: { 36 | delayFirstAttempt: true, 37 | numOfAttempts: 5, 38 | startingDelay: 1000, 39 | timeMultiple: 2, 40 | jitter: 'full' 41 | } 42 | }, 43 | clients: { 44 | ecs: new ECSClient({ region }), 45 | ec2: new EC2Client({ region }), 46 | elb: new ElasticLoadBalancingV2Client({ region }), 47 | sd: new ServiceDiscoveryClient({ region }), 48 | appAutoscaling: new ApplicationAutoScalingClient({ region }), 49 | efs: new EFSClient({ region }) 50 | } 51 | }; 52 | 53 | before(async () => { 54 | ctx.outputs = await automation.deploy(programArgs); 55 | }); 56 | 57 | after(() => automation.destroy(programArgs)); 58 | 59 | it('should create an ECS service with the correct configuration', async () => { 60 | const ecsService = ctx.outputs.minimalEcsService.value; 61 | assert.ok(ecsService, 'ECS Service should be defined'); 62 | assert.strictEqual(ecsService.name, ctx.config.minEcsName, 'Service should have the correct name'); 63 | assert.strictEqual(ecsService.service.launchType, 'FARGATE', 'Service should use FARGATE launch type'); 64 | assert.strictEqual(ecsService.service.desiredCount, 1, 'Service should have 1 desired task'); 65 | }); 66 | 67 | it('should have a running ECS service with desired count of tasks', async () => { 68 | const ecsService = ctx.outputs.minimalEcsService.value; 69 | const clusterName = ctx.outputs.cluster.value.name; 70 | const serviceName = ecsService.name; 71 | 72 | return backOff(async () => { 73 | const command = new DescribeServicesCommand({ 74 | cluster: clusterName, 75 | services: [serviceName] 76 | }); 77 | const { services } = await ctx.clients.ecs.send(command); 78 | 79 | assert.ok(services && services.length > 0, 'Service should exist'); 80 | const [service] = services; 81 | 82 | assert.strictEqual(service.status, 'ACTIVE', 'Service should be active'); 83 | assert.strictEqual( 84 | service.desiredCount, 85 | service.runningCount, 86 | `Service should have ${service.desiredCount} running tasks` 87 | ); 88 | }, ctx.config.exponentialBackOffConfig); 89 | }); 90 | 91 | it('should have running tasks with the correct task definition', async () => { 92 | const ecsService = ctx.outputs.minimalEcsService.value; 93 | const clusterName = ctx.outputs.cluster.value.name; 94 | const taskDefArn = ecsService.taskDefinition.arn; 95 | 96 | const listCommand = new ListTasksCommand({ 97 | cluster: clusterName, 98 | family: ecsService.taskDefinition.family 99 | }); 100 | const { taskArns } = await ctx.clients.ecs.send(listCommand); 101 | 102 | assert.ok(taskArns && taskArns.length > 0, 'Tasks should be running'); 103 | 104 | const describeCommand = new DescribeTasksCommand({ 105 | cluster: clusterName, 106 | tasks: taskArns 107 | }); 108 | const { tasks } = await ctx.clients.ecs.send(describeCommand); 109 | 110 | assert.ok(tasks && tasks.length, 'Tasks should exist'); 111 | tasks.forEach(task => { 112 | assert.strictEqual(task.taskDefinitionArn, taskDefArn, 113 | 'Task should use the correct task definition'); 114 | assert.strictEqual(task.lastStatus, 'RUNNING', 'Task should be in RUNNING state'); 115 | }); 116 | }); 117 | 118 | it('should create a task definition with the correct container configuration', async () => { 119 | const ecsService = ctx.outputs.minimalEcsService.value; 120 | const taskDef = ecsService.taskDefinition; 121 | assert.ok(taskDef, 'Task definition should be defined'); 122 | 123 | const containerDefs = JSON.parse(taskDef.containerDefinitions); 124 | assert.strictEqual( 125 | containerDefs.length, 126 | 1, 127 | 'Should have 1 container definition' 128 | ); 129 | assert.strictEqual( 130 | containerDefs[0].name, 131 | 'sample-service', 132 | 'Container should have correct name' 133 | ); 134 | assert.strictEqual( 135 | containerDefs[0].image, 136 | 'amazon/amazon-ecs-sample', 137 | 'Container should use correct image' 138 | ); 139 | assert.strictEqual( 140 | containerDefs[0].portMappings[0].containerPort, 141 | 80, 142 | 'Container should map port 80' 143 | ); 144 | }); 145 | 146 | it('should set the correct CPU and memory values', async () => { 147 | const ecsService = ctx.outputs.minimalEcsService.value; 148 | const taskDef = ecsService.taskDefinition; 149 | 150 | // Default size is 'small' (0.25 vCPU, 0.5 GB) 151 | assert.strictEqual(taskDef.cpu, '256', 'CPU should be 256 (0.25 vCPU)'); 152 | assert.strictEqual(taskDef.memory, '512', 'Memory should be 512 MB'); 153 | }); 154 | 155 | it('should create a CloudWatch log group for the service', async () => { 156 | const ecsService = ctx.outputs.minimalEcsService.value; 157 | assert.ok(ecsService.logGroup, 'Log group should be defined'); 158 | assert.strictEqual( 159 | ecsService.logGroup.retentionInDays, 160 | 14, 161 | 'Log group should have 14-day retention' 162 | ); 163 | assert.ok( 164 | ecsService.logGroup.namePrefix.startsWith(`/ecs/${ctx.config.minEcsName}-`), 165 | 'Log group should have correct name prefix' 166 | ); 167 | }); 168 | 169 | it('should create IAM roles with proper permissions', async () => { 170 | const ecsService = ctx.outputs.minimalEcsService.value; 171 | const taskDef = ecsService.taskDefinition; 172 | 173 | assert.ok(taskDef.executionRoleArn, 'Task execution role should be defined'); 174 | assert.ok(taskDef.taskRoleArn, 'Task role should be defined'); 175 | 176 | assert.ok( 177 | taskDef.executionRoleArn.includes(`${ctx.config.minEcsName}-task-exec-role`), 178 | 'Execution role should have correct name' 179 | ); 180 | assert.ok( 181 | taskDef.taskRoleArn.includes(`${ctx.config.minEcsName}-task-role`), 182 | 'Task role should have correct name' 183 | ); 184 | }); 185 | 186 | it('should configure network settings correctly', async () => { 187 | const ecsService = ctx.outputs.minimalEcsService.value; 188 | const networkConfig = ecsService.service.networkConfiguration; 189 | 190 | assert.ok(networkConfig, 'Network configuration should be defined'); 191 | assert.strictEqual( 192 | networkConfig.assignPublicIp, 193 | false, 194 | 'Should not assign public IP by default' 195 | ); 196 | assert.ok( 197 | networkConfig.securityGroups.length > 0, 198 | 'Should have at least one security group' 199 | ); 200 | assert.ok( 201 | networkConfig.subnets.length > 0, 202 | 'Should have at least one subnet' 203 | ); 204 | }); 205 | 206 | it('should have security group with proper rules', async () => { 207 | const ecsService = ctx.outputs.minimalEcsService.value; 208 | const project = ctx.outputs.project.value; 209 | assert.ok(ecsService.securityGroups.length > 0, 'Should have security groups'); 210 | 211 | const sg = ecsService.securityGroups[0]; 212 | assert.ok(sg.ingress[0].cidrBlocks.includes(project.vpc.vpc.cidrBlock), 213 | 'Ingress rule should allow traffic from VPC CIDR'); 214 | assert.strictEqual( 215 | sg.egress[0].cidrBlocks[0], 216 | '0.0.0.0/0', 217 | 'Egress rule should allow all outbound traffic' 218 | ); 219 | }); 220 | 221 | it('should create security group in the correct VPC', async () => { 222 | const ecsService = ctx.outputs.minimalEcsService.value; 223 | const project = ctx.outputs.project.value; 224 | assert.ok(ecsService.securityGroups.length > 0, 'Should have security groups'); 225 | 226 | const sg = ecsService.securityGroups[0]; 227 | const expectedVpcId = project.vpc.vpcId; 228 | 229 | assert.strictEqual( 230 | sg.vpcId, 231 | expectedVpcId, 232 | `Security group should be created in the correct VPC (expected: ${expectedVpcId}, got: ${sg.vpcId})` 233 | ); 234 | }); 235 | 236 | describe('With autoscaling', () => testEcsServiceWithAutoscaling(ctx)); 237 | describe('With service discovery', () => testEcsServiceWithServiceDiscovery(ctx)); 238 | describe('With persistent storage', () => testEcsServiceWithStorage(ctx)); 239 | describe('With load balancer', () => testEcsServiceWithLb(ctx)); 240 | }); 241 | -------------------------------------------------------------------------------- /tests/ecs-service/infrastructure/index.ts: -------------------------------------------------------------------------------- 1 | import * as aws from '@pulumi/aws'; 2 | import * as pulumi from '@pulumi/pulumi'; 3 | import { Project, next as studion } from '@studion/infra-code-blocks'; 4 | 5 | const appName = 'ecs-test'; 6 | const stackName = pulumi.getStack(); 7 | const appPort = 80; 8 | const tags = { 9 | Project: appName, 10 | Environment: stackName 11 | } 12 | const sampleServiceContainer = { 13 | name: 'sample-service', 14 | image: 'amazon/amazon-ecs-sample', 15 | portMappings: [studion.EcsService.createTcpPortMapping(appPort)], 16 | }; 17 | 18 | const project = new Project(appName, { services: [] }); 19 | 20 | const cluster = new aws.ecs.Cluster(`${appName}-cluster`, { 21 | name: `${appName}-cluster-${stackName}`, 22 | tags, 23 | }, { parent: project }); 24 | 25 | const minimalEcsService = new studion.EcsService(`${appName}-min`, { 26 | cluster, 27 | vpc: project.vpc, 28 | containers: [sampleServiceContainer], 29 | tags 30 | }); 31 | 32 | const lbSecurityGroup = new aws.ec2.SecurityGroup(`${appName}-lb-sg`, { 33 | vpcId: project.vpc.vpcId, 34 | ingress: [{ 35 | protocol: "tcp", 36 | fromPort: appPort, 37 | toPort: appPort, 38 | cidrBlocks: ["0.0.0.0/0"] 39 | }], 40 | egress: [{ 41 | protocol: "-1", 42 | fromPort: 0, 43 | toPort: 0, 44 | cidrBlocks: ["0.0.0.0/0"] 45 | }], 46 | tags 47 | }); 48 | 49 | const lb = new aws.lb.LoadBalancer(`${appName}-lb`, { 50 | internal: false, 51 | loadBalancerType: "application", 52 | securityGroups: [lbSecurityGroup.id], 53 | subnets: project.vpc.publicSubnetIds, 54 | tags 55 | }); 56 | 57 | const targetGroup = new aws.lb.TargetGroup(`${appName}-tg`, { 58 | port: appPort, 59 | protocol: "HTTP", 60 | targetType: "ip", 61 | vpcId: project.vpc.vpcId, 62 | healthCheck: { 63 | path: "/", 64 | port: "traffic-port", 65 | }, 66 | tags 67 | }); 68 | 69 | const listener = new aws.lb.Listener(`${appName}-listener`, { 70 | loadBalancerArn: lb.arn, 71 | port: appPort, 72 | protocol: "HTTP", 73 | defaultActions: [{ 74 | type: "forward", 75 | targetGroupArn: targetGroup.arn, 76 | }], 77 | tags 78 | }); 79 | 80 | const ecsServiceWithLb = new studion.EcsService(`${appName}-lb`, { 81 | cluster, 82 | vpc: project.vpc, 83 | containers: [sampleServiceContainer], 84 | assignPublicIp: true, 85 | loadBalancers: [{ 86 | containerName: 'sample-service', 87 | containerPort: appPort, 88 | targetGroupArn: targetGroup.arn, 89 | }], 90 | tags 91 | }); 92 | 93 | const lbUrl = pulumi.interpolate`http://${lb.dnsName}`; 94 | 95 | const ecsWithDiscovery = new studion.EcsService(`${appName}-sd`, { 96 | cluster, 97 | vpc: project.vpc, 98 | containers: [sampleServiceContainer], 99 | enableServiceAutoDiscovery: true, 100 | tags 101 | }); 102 | 103 | const ecsServiceWithAutoscaling = new studion.EcsService(`${appName}-autoscale`, { 104 | cluster, 105 | vpc: project.vpc, 106 | containers: [sampleServiceContainer], 107 | autoscaling: { 108 | enabled: true, 109 | minCount: 2, 110 | maxCount: 5 111 | }, 112 | tags 113 | }); 114 | 115 | const ecsServiceWithStorage = new studion.EcsService(`${appName}-storage`, { 116 | cluster, 117 | vpc: project.vpc, 118 | volumes: [{ name: "data-volume" }], 119 | containers: [{ 120 | name: "test-container", 121 | image: "amazonlinux:2", 122 | portMappings: [studion.EcsService.createTcpPortMapping(appPort)], 123 | mountPoints: [{ 124 | sourceVolume: "data-volume", 125 | containerPath: "/data" 126 | }], 127 | environment: [ 128 | { name: "TEST_FILE", value: "/data/test.txt" } 129 | ], 130 | // Enables testing EFS functionality based on logs. 131 | // Command writes to EFS, then reads from it. 132 | command: [ 133 | "sh", 134 | "-c", 135 | "echo 'EFS test content' > $TEST_FILE && " + 136 | "if [ -f $TEST_FILE ] && grep 'EFS test content' $TEST_FILE; then " + 137 | "echo 'Successfully wrote to and read from EFS volume'; " + 138 | "else " + 139 | "echo 'Failed to write to or read from EFS volume'; " + 140 | "exit 1; " + 141 | "fi && " + 142 | "while true; do sleep 30; done" 143 | ] 144 | }], 145 | tags 146 | }); 147 | 148 | module.exports = { 149 | project, 150 | cluster, 151 | minimalEcsService, 152 | ecsServiceWithLb, 153 | lbUrl, 154 | ecsWithDiscovery, 155 | ecsServiceWithAutoscaling, 156 | ecsServiceWithStorage 157 | }; 158 | -------------------------------------------------------------------------------- /tests/ecs-service/load-balancer.test.ts: -------------------------------------------------------------------------------- 1 | import { it } from 'node:test'; 2 | import * as assert from 'node:assert'; 3 | import { backOff } from 'exponential-backoff'; 4 | import { EcsTestContext } from './test-context'; 5 | import { DescribeTargetGroupsCommand, DescribeTargetHealthCommand } from '@aws-sdk/client-elastic-load-balancing-v2'; 6 | 7 | export function testEcsServiceWithLb(ctx: EcsTestContext) { 8 | it('should properly configure load balancer when provided', async () => { 9 | const ecsService = ctx.outputs.ecsServiceWithLb.value; 10 | 11 | assert.ok(ecsService.service.loadBalancers && 12 | ecsService.service.loadBalancers.length > 0, 13 | 'Service should have load balancer configuration'); 14 | 15 | const [lbConfig] = ecsService.service.loadBalancers; 16 | assert.strictEqual(lbConfig.containerName, 'sample-service', 'Load balancer should target correct container'); 17 | assert.strictEqual(lbConfig.containerPort, 80, 'Load balancer should target correct port'); 18 | 19 | const targetGroupArn = lbConfig.targetGroupArn; 20 | const describeTargetGroups = new DescribeTargetGroupsCommand({ 21 | TargetGroupArns: [targetGroupArn] 22 | }); 23 | const { TargetGroups } = await ctx.clients.elb.send(describeTargetGroups); 24 | 25 | assert.ok(TargetGroups && TargetGroups.length > 0, 'Target group should exist'); 26 | assert.strictEqual(TargetGroups[0].TargetType, 'ip', 'Target group should be IP-based for Fargate'); 27 | 28 | const describeHealth = new DescribeTargetHealthCommand({ 29 | TargetGroupArn: targetGroupArn 30 | }); 31 | 32 | return backOff(async () => { 33 | const { TargetHealthDescriptions } = await ctx.clients.elb.send(describeHealth); 34 | assert.ok(TargetHealthDescriptions && TargetHealthDescriptions.length > 0, 35 | 'Target group should have registered targets'); 36 | 37 | // At least one target should be healthy 38 | const healthyTargets = TargetHealthDescriptions.filter( 39 | (target: any) => target.TargetHealth?.State === 'healthy' 40 | ); 41 | assert.ok(healthyTargets.length > 0, 'At least one target should be healthy'); 42 | }, { 43 | ...ctx.config.exponentialBackOffConfig, 44 | numOfAttempts: 10, 45 | }); 46 | }); 47 | 48 | it('should be able to access the service via load balancer URL', async () => { 49 | const url = ctx.outputs.lbUrl.value; 50 | 51 | return backOff(async () => { 52 | const response = await fetch(url); 53 | assert.strictEqual(response.status, 200, 'HTTP request should return 200 OK'); 54 | 55 | const text = await response.text(); 56 | assert.ok(text.includes('Simple PHP App'), 57 | 'Response should contain expected content'); 58 | }, ctx.config.exponentialBackOffConfig); 59 | }); 60 | } 61 | -------------------------------------------------------------------------------- /tests/ecs-service/persistent-storage.test.ts: -------------------------------------------------------------------------------- 1 | import { it } from 'node:test'; 2 | import * as assert from 'node:assert'; 3 | import { 4 | DescribeAccessPointsCommand, 5 | DescribeFileSystemsCommand, 6 | DescribeMountTargetsCommand, 7 | DescribeMountTargetSecurityGroupsCommand 8 | } from '@aws-sdk/client-efs'; 9 | import { DescribeSecurityGroupsCommand } from '@aws-sdk/client-ec2'; 10 | import { 11 | CloudWatchLogsClient, 12 | DescribeLogStreamsCommand, 13 | GetLogEventsCommand 14 | } from '@aws-sdk/client-cloudwatch-logs'; 15 | import { DescribeTasksCommand, ListTasksCommand } from '@aws-sdk/client-ecs'; 16 | import { backOff } from 'exponential-backoff'; 17 | import { EcsTestContext } from './test-context'; 18 | 19 | export function testEcsServiceWithStorage(ctx: EcsTestContext) { 20 | it('should create EFS file system with correct configuration', async () => { 21 | const ecsServiceWithStorage = ctx.outputs.ecsServiceWithStorage.value; 22 | const efsFileSystem = ecsServiceWithStorage.persistentStorage.fileSystem; 23 | 24 | assert.ok(efsFileSystem, 'EFS file system should be created'); 25 | 26 | const command = new DescribeFileSystemsCommand({ 27 | FileSystemId: efsFileSystem.id 28 | }); 29 | const { FileSystems } = await ctx.clients.efs.send(command); 30 | 31 | assert.ok(FileSystems && FileSystems.length === 1, 'File system should exist'); 32 | assert.strictEqual(FileSystems[0].Encrypted, true, 'File system should be encrypted'); 33 | assert.strictEqual(FileSystems[0].PerformanceMode, 'generalPurpose', 'Should use general purpose performance mode'); 34 | assert.strictEqual(FileSystems[0].ThroughputMode, 'bursting', 'Should use bursting throughput mode'); 35 | }); 36 | 37 | it('should create security group for EFS with correct rules', async () => { 38 | const ecsServiceWithStorage = ctx.outputs.ecsServiceWithStorage.value; 39 | const vpc = ctx.outputs.project.value.vpc; 40 | 41 | const describeMountTargetsCommand = new DescribeMountTargetsCommand({ 42 | FileSystemId: ecsServiceWithStorage.persistentStorage.fileSystem.id 43 | }); 44 | const { MountTargets } = await ctx.clients.efs.send(describeMountTargetsCommand); 45 | 46 | assert.ok(MountTargets && MountTargets.length > 0, 'Mount targets should exist'); 47 | 48 | const describeSecurityGroupsCommand = new DescribeMountTargetSecurityGroupsCommand({ 49 | MountTargetId: MountTargets[0].MountTargetId 50 | }); 51 | const { SecurityGroups } = await ctx.clients.efs.send(describeSecurityGroupsCommand); 52 | 53 | assert.ok(SecurityGroups && SecurityGroups.length > 0, 'Security groups should be attached to mount target'); 54 | 55 | const ec2DescribeSecurityGroupsCommand = new DescribeSecurityGroupsCommand({ 56 | GroupIds: SecurityGroups 57 | }); 58 | const { SecurityGroups: securityGroupDetails } = await ctx.clients.ec2.send(ec2DescribeSecurityGroupsCommand); 59 | 60 | assert.ok(securityGroupDetails && securityGroupDetails.length > 0, 'Security group details should be available'); 61 | 62 | const efsSecurityGroup = securityGroupDetails.find(sg => 63 | sg.IpPermissions?.some(permission => 64 | permission.FromPort === 2049 && 65 | permission.ToPort === 2049 && 66 | permission.IpProtocol === 'tcp' 67 | ) 68 | ); 69 | 70 | assert.ok(efsSecurityGroup, 'EFS security group with port 2049 should exist'); 71 | assert.ok(efsSecurityGroup.GroupName?.includes(ecsServiceWithStorage.name), 72 | 'Security group should have correct name'); 73 | 74 | const nfsRule = efsSecurityGroup.IpPermissions?.find(p => p.FromPort === 2049); 75 | assert.ok(nfsRule?.IpRanges?.some(range => range.CidrIp === vpc.vpc.cidrBlock), 76 | 'Security group should allow access from VPC CIDR'); 77 | }); 78 | 79 | it('should create mount targets in all private subnets', async () => { 80 | const ecsServiceWithStorage = ctx.outputs.ecsServiceWithStorage.value; 81 | const vpc = ctx.outputs.project.value.vpc; 82 | 83 | const command = new DescribeMountTargetsCommand({ 84 | FileSystemId: ecsServiceWithStorage.persistentStorage.fileSystem.id 85 | }); 86 | const { MountTargets } = await ctx.clients.efs.send(command); 87 | 88 | assert.ok(MountTargets, 'Mount targets should exist'); 89 | 90 | const privateSubnetIds = vpc.privateSubnetIds; 91 | assert.strictEqual(MountTargets.length, privateSubnetIds.length, 92 | 'Should have a mount target for each private subnet'); 93 | 94 | privateSubnetIds.forEach((subnetId: any) => { 95 | const hasTarget = MountTargets.some(target => target.SubnetId === subnetId); 96 | assert.ok(hasTarget, `Subnet ${subnetId} should have a mount target`); 97 | }); 98 | }); 99 | 100 | it('should create an EFS access point with correct configuration', async () => { 101 | const ecsServiceWithStorage = ctx.outputs.ecsServiceWithStorage.value; 102 | const accessPoint = ecsServiceWithStorage.persistentStorage.accessPoint; 103 | 104 | assert.ok(accessPoint, 'Access point should be created'); 105 | 106 | const command = new DescribeAccessPointsCommand({ 107 | AccessPointId: accessPoint.id 108 | }); 109 | const { AccessPoints } = await ctx.clients.efs.send(command); 110 | 111 | assert.ok(AccessPoints && AccessPoints.length === 1, 'Access point should exist'); 112 | const ap = AccessPoints[0]; 113 | 114 | assert.strictEqual(ap.PosixUser?.Uid, 1000, 'Should use UID 1000'); 115 | assert.strictEqual(ap.PosixUser?.Gid, 1000, 'Should use GID 1000'); 116 | 117 | assert.strictEqual(ap.RootDirectory?.Path, '/data', 'Root directory should be /data'); 118 | assert.strictEqual(ap.RootDirectory?.CreationInfo?.OwnerUid, 1000, 'Owner UID should be 1000'); 119 | assert.strictEqual(ap.RootDirectory?.CreationInfo.OwnerGid, 1000, 'Owner GID should be 1000'); 120 | assert.strictEqual(ap.RootDirectory?.CreationInfo.Permissions, '0755', 'Permissions should be 0755'); 121 | }); 122 | 123 | it('should configure task definition with EFS volumes', async () => { 124 | const ecsServiceWithStorage = ctx.outputs.ecsServiceWithStorage.value; 125 | 126 | const taskDef = ecsServiceWithStorage.taskDefinition; 127 | 128 | assert.ok(taskDef.volumes && taskDef.volumes.length > 0, 'Task definition should have volumes'); 129 | 130 | const efsVolume = taskDef.volumes[0]; 131 | assert.ok(efsVolume.efsVolumeConfiguration, 'Volume should have EFS configuration'); 132 | assert.strictEqual(efsVolume.efsVolumeConfiguration.fileSystemId, 133 | ecsServiceWithStorage.persistentStorage.fileSystem.id, 134 | 'Volume should reference correct EFS file system'); 135 | assert.strictEqual(efsVolume.efsVolumeConfiguration.transitEncryption, 'ENABLED', 136 | 'Transit encryption should be enabled'); 137 | assert.strictEqual(efsVolume.efsVolumeConfiguration.authorizationConfig.accessPointId, 138 | ecsServiceWithStorage.persistentStorage.accessPoint.id, 139 | 'Should use correct access point'); 140 | assert.strictEqual(efsVolume.efsVolumeConfiguration.authorizationConfig.iam, 'ENABLED', 141 | 'IAM authorization should be enabled'); 142 | }); 143 | 144 | it('should configure container with mount points', async () => { 145 | const ecsServiceWithStorage = ctx.outputs.ecsServiceWithStorage.value; 146 | 147 | const containerDefs = JSON.parse(ecsServiceWithStorage.taskDefinition.containerDefinitions); 148 | assert.ok(containerDefs && containerDefs.length > 0, 'Container definitions should exist'); 149 | 150 | const container = containerDefs[0]; 151 | assert.ok(container.mountPoints && container.mountPoints.length > 0, 152 | 'Container should have mount points'); 153 | 154 | const mountPoint = container.mountPoints[0]; 155 | assert.strictEqual(mountPoint.sourceVolume, 'data-volume', 'Should reference correct volume'); 156 | assert.strictEqual(mountPoint.containerPath, '/data', 'Should mount at correct container path'); 157 | assert.strictEqual(mountPoint.readOnly, false, 'Should be writeable by default'); 158 | }); 159 | 160 | it('should successfully write to and read from EFS volume', async () => { 161 | const ecsServiceWithStorage = ctx.outputs.ecsServiceWithStorage.value; 162 | const clusterName = ctx.outputs.cluster.value.name; 163 | const region = process.env.AWS_REGION || 'us-east-2'; 164 | const logsClient = new CloudWatchLogsClient({ region }); 165 | 166 | const listCommand = new ListTasksCommand({ 167 | cluster: clusterName, 168 | family: ecsServiceWithStorage.taskDefinition.family 169 | }); 170 | const { taskArns } = await ctx.clients.ecs.send(listCommand); 171 | assert.ok(taskArns && taskArns.length > 0, 'Task should be running'); 172 | 173 | const describeTasksCommand = new DescribeTasksCommand({ 174 | cluster: clusterName, 175 | tasks: taskArns 176 | }); 177 | const { tasks = [] } = await ctx.clients.ecs.send(describeTasksCommand); 178 | const [task] = tasks; 179 | const container = task?.containers?.find(c => c.name === 'test-container'); 180 | assert.ok(container, 'Test container should exist in task'); 181 | 182 | // Determine log stream name - typically follows a pattern like: 183 | // ecs/container-name/task-id 184 | const taskId = task?.taskArn?.split('/').pop(); 185 | 186 | const describeStreamsCommand = new DescribeLogStreamsCommand({ 187 | logGroupName: ecsServiceWithStorage.logGroup.name, 188 | logStreamNamePrefix: `ecs/test-container/${taskId}` 189 | }); 190 | 191 | return backOff(async () => { 192 | const { logStreams = [] } = await logsClient.send(describeStreamsCommand); 193 | assert.ok(logStreams.length > 0, 'Log stream should exist'); 194 | 195 | const getLogsCommand = new GetLogEventsCommand({ 196 | logGroupName: ecsServiceWithStorage.logGroup.name, 197 | logStreamName: logStreams[0].logStreamName, 198 | startFromHead: true 199 | }); 200 | 201 | const { events } = await logsClient.send(getLogsCommand); 202 | assert.ok(events && events.length > 0, 'Log events should exist'); 203 | 204 | const logContent = events.map(e => e.message).join('\n'); 205 | assert.ok( 206 | logContent.includes('Successfully wrote to and read from EFS volume'), 207 | 'Logs should indicate successful EFS operation' 208 | ); 209 | assert.ok( 210 | !logContent.includes('Failed to write to or read from EFS volume'), 211 | 'Logs should not contain failure messages' 212 | ); 213 | }, { 214 | ...ctx.config.exponentialBackOffConfig, 215 | numOfAttempts: 8, 216 | }); 217 | }); 218 | } 219 | -------------------------------------------------------------------------------- /tests/ecs-service/service-discovery.test.ts: -------------------------------------------------------------------------------- 1 | import { it } from 'node:test'; 2 | import * as assert from 'node:assert'; 3 | import { backOff } from 'exponential-backoff'; 4 | import { EcsTestContext } from './test-context'; 5 | import { GetNamespaceCommand, ListInstancesCommand } from '@aws-sdk/client-servicediscovery'; 6 | 7 | export function testEcsServiceWithServiceDiscovery(ctx: EcsTestContext) { 8 | it('should create a private DNS namespace for service discovery', async () => { 9 | const ecsWithDiscovery = ctx.outputs.ecsWithDiscovery.value; 10 | const namespace = ecsWithDiscovery.serviceDiscoveryService?.namespaceId; 11 | 12 | assert.ok(namespace, 'Service discovery namespace should be created'); 13 | 14 | const command = new GetNamespaceCommand({ Id: namespace }); 15 | const { Namespace } = await ctx.clients.sd.send(command); 16 | 17 | assert.ok(Namespace, 'Namespace should exist'); 18 | assert.strictEqual(Namespace.Type, 'DNS_PRIVATE', 'Should be a private DNS namespace'); 19 | assert.strictEqual(Namespace.Name, ecsWithDiscovery.name, 'Namespace name should match service name'); 20 | }); 21 | 22 | it('should register the service in service discovery', async () => { 23 | const ecsWithDiscovery = ctx.outputs.ecsWithDiscovery.value; 24 | const serviceId = ecsWithDiscovery.serviceDiscoveryService?.id; 25 | 26 | assert.ok(serviceId, 'Service discovery service should be created'); 27 | 28 | return backOff(async () => { 29 | const command = new ListInstancesCommand({ ServiceId: serviceId }); 30 | const { Instances } = await ctx.clients.sd.send(command); 31 | 32 | assert.ok(Instances && Instances.length > 0, 'Service should have registered instances'); 33 | }, ctx.config.exponentialBackOffConfig); 34 | }); 35 | 36 | } 37 | -------------------------------------------------------------------------------- /tests/ecs-service/test-context.ts: -------------------------------------------------------------------------------- 1 | import { OutputMap } from '@pulumi/pulumi/automation'; 2 | import { ECSClient } from '@aws-sdk/client-ecs'; 3 | import { EC2Client } from '@aws-sdk/client-ec2'; 4 | import { ElasticLoadBalancingV2Client } from '@aws-sdk/client-elastic-load-balancing-v2'; 5 | import { ServiceDiscoveryClient } from '@aws-sdk/client-servicediscovery'; 6 | import { ApplicationAutoScalingClient } from '@aws-sdk/client-application-auto-scaling'; 7 | import { EFSClient } from '@aws-sdk/client-efs'; 8 | 9 | interface ConfigContext { 10 | config: { 11 | [key: string]: any; 12 | }; 13 | } 14 | 15 | interface PulumiProgramContext { 16 | outputs: OutputMap; 17 | } 18 | 19 | interface AwsContext { 20 | clients: { 21 | ecs: ECSClient; 22 | ec2: EC2Client; 23 | elb: ElasticLoadBalancingV2Client; 24 | sd: ServiceDiscoveryClient; 25 | appAutoscaling: ApplicationAutoScalingClient; 26 | efs: EFSClient; 27 | }; 28 | } 29 | 30 | export interface EcsTestContext extends ConfigContext, PulumiProgramContext, AwsContext { }; 31 | -------------------------------------------------------------------------------- /tests/otel/validation.test.ts: -------------------------------------------------------------------------------- 1 | import { it } from 'node:test'; 2 | import * as assert from 'node:assert'; 3 | import { OtelCollectorConfigBuilder } from '../../src/v2/otel/config'; 4 | 5 | export function testOtelCollectorConfigBuilderValidation() { 6 | it('should throw error when no OTLP receiver protocols are provided', () => { 7 | const createInvalidConfig = () => new OtelCollectorConfigBuilder() 8 | .withOTLPReceiver([]) 9 | .build(); 10 | 11 | assert.throws(createInvalidConfig, { 12 | name: 'Error', 13 | message: 'At least one OTLP receiver protocol should be provided' 14 | }); 15 | }); 16 | 17 | it('should throw error when unsupported OTLP receiver protocol is provided', () => { 18 | const createInvalidConfig = () => new OtelCollectorConfigBuilder() 19 | // @ts-expect-error - Passing invalid protocol to test runtime error 20 | .withOTLPReceiver(['invalid']) 21 | .build(); 22 | 23 | assert.throws(createInvalidConfig, { 24 | name: 'Error', 25 | message: 'OTLP receiver protocol invalid is not supported' 26 | }); 27 | }); 28 | 29 | it('should throw error when metrics pipeline references undefined receiver', () => { 30 | const createInvalidConfig = () => new OtelCollectorConfigBuilder() 31 | .withMetricsPipeline(['otlp'], [], []) 32 | .build(); 33 | 34 | assert.throws(createInvalidConfig, { 35 | name: 'Error', 36 | message: "Receiver 'otlp' is used in metrics pipeline but not defined" 37 | }); 38 | }); 39 | 40 | it('should throw error when metrics pipeline references undefined processor', () => { 41 | const createInvalidConfig = () => new OtelCollectorConfigBuilder() 42 | .withOTLPReceiver(['http']) 43 | .withMetricsPipeline(['otlp'], ['batch'], []) 44 | .build(); 45 | 46 | assert.throws(createInvalidConfig, { 47 | name: 'Error', 48 | message: "Processor 'batch' is used in metrics pipeline but not defined" 49 | }); 50 | }); 51 | 52 | it('should throw error when metrics pipeline references undefined exporter', () => { 53 | const createInvalidConfig = () => new OtelCollectorConfigBuilder() 54 | .withOTLPReceiver(['http']) 55 | .withBatchProcessor() 56 | .withMetricsPipeline(['otlp'], ['batch'], ['debug']) 57 | .build(); 58 | 59 | assert.throws(createInvalidConfig, { 60 | name: 'Error', 61 | message: "Exporter 'debug' is used in metrics pipeline but not defined" 62 | }); 63 | }); 64 | 65 | it('should throw error when traces pipeline references undefined receiver', () => { 66 | const createInvalidConfig = () => new OtelCollectorConfigBuilder() 67 | .withTracesPipeline(['otlp'], [], []) 68 | .build(); 69 | 70 | assert.throws(createInvalidConfig, { 71 | name: 'Error', 72 | message: "Receiver 'otlp' is used in traces pipeline but not defined" 73 | }); 74 | }); 75 | 76 | it('should throw error when traces pipeline references undefined processor', () => { 77 | const createInvalidConfig = () => new OtelCollectorConfigBuilder() 78 | .withOTLPReceiver(['http']) 79 | .withTracesPipeline(['otlp'], ['memory_limiter'], []) 80 | .build(); 81 | 82 | assert.throws(createInvalidConfig, { 83 | name: 'Error', 84 | message: "Processor 'memory_limiter' is used in traces pipeline but not defined" 85 | }); 86 | }); 87 | 88 | it('should throw error when traces pipeline references undefined exporter', () => { 89 | const createInvalidConfig = () => new OtelCollectorConfigBuilder() 90 | .withOTLPReceiver(['http']) 91 | .withMemoryLimiterProcessor() 92 | .withTracesPipeline(['otlp'], ['memory_limiter'], ['awsxray']) 93 | .build(); 94 | 95 | assert.throws(createInvalidConfig, { 96 | name: 'Error', 97 | message: "Exporter 'awsxray' is used in traces pipeline but not defined" 98 | }); 99 | }); 100 | 101 | it('should throw error when memory_limiter is not the first processor in traces pipeline ', () => { 102 | const createInvalidConfig = () => new OtelCollectorConfigBuilder() 103 | .withOTLPReceiver(['http']) 104 | .withMemoryLimiterProcessor() 105 | .withBatchProcessor() 106 | .withDebug() 107 | .withTracesPipeline(['otlp'], ['batch', 'memory_limiter'], ['debug']) 108 | .build(); 109 | 110 | assert.throws(createInvalidConfig, { 111 | name: 'Error', 112 | message: 'memory_limiter processor is not the first processor in the traces pipeline.' 113 | }); 114 | }); 115 | 116 | it('should throw error when memory_limiter is not the first processor in metrics pipeline ', () => { 117 | const createInvalidConfig = () => new OtelCollectorConfigBuilder() 118 | .withOTLPReceiver(['http']) 119 | .withMemoryLimiterProcessor() 120 | .withBatchProcessor() 121 | .withDebug() 122 | .withMetricsPipeline(['otlp'], ['batch', 'memory_limiter'], ['debug']) 123 | .build(); 124 | 125 | assert.throws(createInvalidConfig, { 126 | name: 'Error', 127 | message: 'memory_limiter processor is not the first processor in the metrics pipeline.' 128 | }); 129 | }); 130 | } 131 | -------------------------------------------------------------------------------- /tests/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "extends": "../tsconfig.options.json", 3 | "files": [], 4 | "include": ["**/*"], 5 | "exclude": ["**/app"], 6 | "references": [{ "path": "../src" }], 7 | "compilerOptions": { 8 | "noEmit": true, 9 | "paths": { 10 | "@studion/infra-code-blocks": ["../src/index.ts"] 11 | } 12 | }, 13 | "ts-node": { 14 | "transpileOnly": true, 15 | "require": ["tsconfig-paths/register"] 16 | } 17 | } 18 | -------------------------------------------------------------------------------- /tests/web-server/index.test.ts: -------------------------------------------------------------------------------- 1 | import { describe, it, before, after } from 'node:test'; 2 | import * as assert from 'node:assert'; 3 | import { InlineProgramArgs } from '@pulumi/pulumi/automation'; 4 | import { DescribeServicesCommand, DescribeTaskDefinitionCommand, ECSClient } from '@aws-sdk/client-ecs'; 5 | import { EC2Client, DescribeSecurityGroupsCommand } from '@aws-sdk/client-ec2'; 6 | import { 7 | ElasticLoadBalancingV2Client, 8 | DescribeLoadBalancersCommand, 9 | DescribeTargetGroupsCommand, 10 | DescribeListenersCommand 11 | } from '@aws-sdk/client-elastic-load-balancing-v2'; 12 | import { ACMClient } from '@aws-sdk/client-acm'; 13 | import { Route53Client } from '@aws-sdk/client-route-53'; 14 | import { backOff } from 'exponential-backoff'; 15 | import { request } from 'undici'; 16 | import status from 'http-status'; 17 | import * as automation from '../automation'; 18 | import { WebServerTestContext } from './test-context'; 19 | import * as config from './infrastructure/config'; 20 | 21 | const programArgs: InlineProgramArgs = { 22 | stackName: 'dev', 23 | projectName: 'icb-test-web-server', 24 | program: () => import('./infrastructure') 25 | }; 26 | 27 | class NonRetryableError extends Error { 28 | constructor(message: string) { 29 | super(message); 30 | this.name = 'NonRetryableError'; 31 | } 32 | } 33 | 34 | describe('Web server component deployment', () => { 35 | const region = process.env.AWS_REGION || 'us-east-2'; 36 | const ctx: WebServerTestContext = { 37 | outputs: {}, 38 | config, 39 | clients: { 40 | ecs: new ECSClient({ region }), 41 | ec2: new EC2Client({ region }), 42 | elb: new ElasticLoadBalancingV2Client({ region }), 43 | acm: new ACMClient({ region }), 44 | route53: new Route53Client({ region }) 45 | } 46 | } 47 | 48 | before(async () => { 49 | ctx.outputs = await automation.deploy(programArgs); 50 | }); 51 | 52 | after(() => automation.destroy(programArgs)); 53 | 54 | it('should create a WebServer with the correct configuration', () => { 55 | const webServer = ctx.outputs.webServer.value; 56 | assert.ok(webServer, 'WebServer should be defined'); 57 | assert.strictEqual( 58 | webServer.name, 59 | ctx.config.webServerName, 60 | 'WebServer should have correct name' 61 | ); 62 | }); 63 | 64 | it('should create load balancer with correct configuration', async () => { 65 | const webServer = ctx.outputs.webServer.value; 66 | assert.ok(webServer.lb.lb, 'Load balancer should be defined'); 67 | 68 | const command = new DescribeLoadBalancersCommand({ 69 | LoadBalancerArns: [webServer.lb.lb.arn] 70 | }); 71 | const response = await ctx.clients.elb.send(command); 72 | const [lb] = response.LoadBalancers ?? []; 73 | 74 | assert.ok(lb, 'Load balancer should exist in AWS'); 75 | assert.strictEqual(lb.Scheme, 'internet-facing', 'Load balancer should be internet-facing'); 76 | assert.strictEqual(lb.Type, 'application', 'Load balancer should be an application load balancer'); 77 | }); 78 | 79 | it('should create target group with correct health check path', async () => { 80 | const webServer = ctx.outputs.webServer.value; 81 | 82 | const command = new DescribeTargetGroupsCommand({ 83 | TargetGroupArns: [webServer.lb.targetGroup.arn] 84 | }); 85 | 86 | const response = await ctx.clients.elb.send(command); 87 | const [tg] = response.TargetGroups ?? []; 88 | 89 | assert.ok(tg, 'Target group should exist in AWS'); 90 | assert.strictEqual( 91 | tg.HealthCheckPath, 92 | ctx.config.healthCheckPath, 93 | 'Target group should have correct health check path' 94 | ); 95 | }); 96 | 97 | it('should create HTTP listener on port 80', async () => { 98 | const webServer = ctx.outputs.webServer.value; 99 | 100 | const command = new DescribeListenersCommand({ 101 | ListenerArns: [webServer.lb.httpListener.arn] 102 | }); 103 | 104 | const response = await ctx.clients.elb.send(command); 105 | const [listener] = response.Listeners ?? []; 106 | 107 | assert.ok(listener, 'HTTP listener should exist in AWS'); 108 | assert.strictEqual(listener.Port, 80, 'HTTP listener should be on port 80'); 109 | }); 110 | 111 | it('should create appropriate security groups', async () => { 112 | const webServer = ctx.outputs.webServer.value; 113 | 114 | const lbSgCommand = new DescribeSecurityGroupsCommand({ 115 | GroupIds: [webServer.lb.securityGroup.id] 116 | }); 117 | 118 | const lbSgResponse = await ctx.clients.ec2.send(lbSgCommand); 119 | const [lbSg] = lbSgResponse.SecurityGroups ?? []; 120 | 121 | assert.ok(lbSg, 'Load balancer security group should exist'); 122 | const hasHttpTrafficPermission = lbSg.IpPermissions?.some(permission => { 123 | return permission.FromPort === 80 && permission.ToPort === 80; 124 | }); 125 | assert.ok( 126 | hasHttpTrafficPermission, 127 | 'LB security group should allow HTTP traffic' 128 | ); 129 | const hasTlsTrafficPermission = lbSg.IpPermissions?.some(permission => { 130 | return permission.FromPort === 443 && permission.ToPort === 443; 131 | }) 132 | assert.ok( 133 | hasTlsTrafficPermission, 134 | 'LB security group should allow HTTPS traffic' 135 | ); 136 | 137 | const serviceSgCommand = new DescribeSecurityGroupsCommand({ 138 | GroupIds: [webServer.serviceSecurityGroup.id] 139 | }); 140 | 141 | const serviceSgResponse = await ctx.clients.ec2.send(serviceSgCommand); 142 | const [serviceSg] = serviceSgResponse.SecurityGroups ?? []; 143 | 144 | assert.ok(serviceSg, 'Service security group should exist'); 145 | const allowsIncomingLbTraffic = serviceSg.IpPermissions?.some(permission => { 146 | return permission.UserIdGroupPairs?.some(group => { 147 | return group.GroupId === webServer.lb.securityGroup.id; 148 | }); 149 | }); 150 | assert.ok( 151 | allowsIncomingLbTraffic, 152 | 'Service security group should allow traffic from load balancer' 153 | ); 154 | }); 155 | 156 | it('should include init container in task definition', async () => { 157 | const webServer = ctx.outputs.webServer.value; 158 | const { services } = await ctx.clients.ecs.send( 159 | new DescribeServicesCommand({ 160 | cluster: webServer.ecsConfig.cluster.name, 161 | services: [webServer.service.name] 162 | }) 163 | ); 164 | assert.ok(services && services.length > 0, 'Service should exist'); 165 | const [service] = services; 166 | 167 | const { taskDefinition } = await ctx.clients.ecs.send( 168 | new DescribeTaskDefinitionCommand({ taskDefinition: service.taskDefinition }) 169 | ); 170 | assert.ok(taskDefinition, 'Task definition should exist'); 171 | 172 | const containerDefs = taskDefinition.containerDefinitions; 173 | const initContainer = containerDefs?.find(({ name }) => name === 'init'); 174 | 175 | assert.ok(initContainer, 'Init container should be in task definition'); 176 | assert.strictEqual(initContainer.essential, false, 'Init container should not be essential'); 177 | }); 178 | 179 | it('should include sidecar container in the task definition', async () => { 180 | const webServer = ctx.outputs.webServer.value; 181 | const { services } = await ctx.clients.ecs.send( 182 | new DescribeServicesCommand({ 183 | cluster: webServer.ecsConfig.cluster.name, 184 | services: [webServer.service.name] 185 | }) 186 | ); 187 | assert.ok(services && services.length > 0, 'Service should exist'); 188 | const [service] = services; 189 | 190 | const { taskDefinition } = await ctx.clients.ecs.send( 191 | new DescribeTaskDefinitionCommand({ taskDefinition: service.taskDefinition }) 192 | ); 193 | assert.ok(taskDefinition, 'Task definition should exist'); 194 | 195 | const containerDefs = taskDefinition.containerDefinitions; 196 | const sidecarContainer = containerDefs?.find(({ name }) => name === 'sidecar'); 197 | 198 | assert.ok(sidecarContainer, 'Sidecar container should be in the task definition'); 199 | assert.strictEqual(sidecarContainer.essential, true, 'Sidecar should be marked as essential'); 200 | }); 201 | 202 | it('should include OpenTelemetry collector when configured', async () => { 203 | const webServer = ctx.outputs.webServer.value; 204 | const otelCollector = ctx.outputs.otelCollector.value; 205 | 206 | const { services } = await ctx.clients.ecs.send( 207 | new DescribeServicesCommand({ 208 | cluster: webServer.ecsConfig.cluster.name, 209 | services: [webServer.service.name] 210 | }) 211 | ); 212 | assert.ok(services && services.length > 0, 'Service should exist'); 213 | const [service] = services; 214 | 215 | const { taskDefinition } = await ctx.clients.ecs.send( 216 | new DescribeTaskDefinitionCommand({ taskDefinition: service.taskDefinition }) 217 | ); 218 | assert.ok(taskDefinition, 'Task definition should exist'); 219 | 220 | const containerDefs = taskDefinition.containerDefinitions; 221 | const collectorContainer = containerDefs?.find(containerDef => { 222 | return containerDef.name === otelCollector.container.name; 223 | }); 224 | 225 | assert.ok(collectorContainer, 'OTel collector container should be in task definition'); 226 | 227 | const hasConfigVolume = collectorContainer.mountPoints?.some(mountPoint => { 228 | return mountPoint.sourceVolume === otelCollector.configVolume; 229 | }); 230 | assert.ok(hasConfigVolume, 'OTel collector should have config volume mounted'); 231 | 232 | const configContainer = containerDefs?.find(containerDef => { 233 | return containerDef.name === otelCollector.configContainer.name 234 | }); 235 | 236 | assert.ok(configContainer, 'OTel config container should be in task definition'); 237 | assert.strictEqual(configContainer.essential, false, 'Config container should not be essential'); 238 | 239 | const hasOtelVolume = taskDefinition.volumes?.some( 240 | volume => volume.name === otelCollector.configVolume 241 | ); 242 | assert.ok(hasOtelVolume, 'Task definition should include OTel config volume'); 243 | }); 244 | 245 | it('should receive 200 status code from the healthcheck endpoint', () => { 246 | const webServer = ctx.outputs.webServer.value; 247 | const webServerLbDns = webServer.lb.lb.dnsName; 248 | 249 | if (!webServerLbDns || typeof webServerLbDns !== 'string') { 250 | throw new Error(`Invalid load balancer DNS name: ${webServerLbDns}`); 251 | } 252 | 253 | const webServerUrl = `http://${webServerLbDns}`; 254 | 255 | return backOff(async () => { 256 | const response = await request(`${webServerUrl}${ctx.config.healthCheckPath}`); 257 | if (response.statusCode === status.NOT_FOUND) { 258 | throw new NonRetryableError('Healthcheck endpoint not found'); 259 | } 260 | 261 | assert.strictEqual( 262 | response.statusCode, 263 | status.OK, 264 | `Expected status code 200 but got ${response.statusCode}` 265 | ); 266 | }, { 267 | retry: error => !(error instanceof NonRetryableError), 268 | delayFirstAttempt: true, 269 | numOfAttempts: 10, 270 | startingDelay: 1000, 271 | timeMultiple: 2, 272 | jitter: 'full' 273 | }); 274 | }); 275 | }); 276 | -------------------------------------------------------------------------------- /tests/web-server/infrastructure/config.ts: -------------------------------------------------------------------------------- 1 | export const webServerName = 'web-server-test'; 2 | export const healthCheckPath = '/healthcheck'; 3 | -------------------------------------------------------------------------------- /tests/web-server/infrastructure/index.ts: -------------------------------------------------------------------------------- 1 | import { Project, next as studion } from '@studion/infra-code-blocks'; 2 | import * as aws from '@pulumi/aws'; 3 | import * as pulumi from '@pulumi/pulumi'; 4 | import { webServerName, healthCheckPath } from './config'; 5 | 6 | const stackName = pulumi.getStack(); 7 | const project: Project = new Project(webServerName, { services: [] }); 8 | const tags = { Env: stackName, Project: webServerName }; 9 | const init = { 10 | name: 'init', 11 | image: 'busybox:latest', 12 | essential: false, 13 | command: ['sh', '-c', 'echo "Init container running" && exit 0'] 14 | }; 15 | const sidecar = { 16 | name: 'sidecar', 17 | image: 'busybox:latest', 18 | essential: true, 19 | command: ['sh', '-c', 'echo "Sidecar running" && sleep infinity'], 20 | healthCheck: { 21 | command: ["CMD-SHELL", "echo healthy || exit 1"], 22 | interval: 30, 23 | timeout: 5, 24 | retries: 3, 25 | startPeriod: 10 26 | } 27 | }; 28 | const otelCollector = new studion.openTelemetry.OtelCollectorBuilder(webServerName, stackName) 29 | .withOTLPReceiver() 30 | .withDebug() 31 | .withMetricsPipeline(['otlp'], [], ['debug']) 32 | .build(); 33 | 34 | const cluster = new aws.ecs.Cluster(`${webServerName}-cluster`, { 35 | name: `${webServerName}-cluster-${stackName}`, 36 | tags 37 | }); 38 | 39 | const webServer = new studion.WebServerBuilder(webServerName) 40 | .configureWebServer('nginxdemos/nginx-hello:plain-text', 8080) 41 | .configureEcs({ 42 | cluster, 43 | desiredCount: 1, 44 | size: 'small', 45 | autoscaling: { enabled: false } 46 | }) 47 | .withInitContainer(init) 48 | .withSidecarContainer(sidecar) 49 | .withVpc(project.vpc) 50 | .withOtelCollector(otelCollector) 51 | .withCustomHealthCheckPath(healthCheckPath) 52 | .build({ parent: cluster }); 53 | 54 | export { project, webServer, otelCollector }; 55 | -------------------------------------------------------------------------------- /tests/web-server/test-context.ts: -------------------------------------------------------------------------------- 1 | import { OutputMap } from '@pulumi/pulumi/automation'; 2 | import { ECSClient } from '@aws-sdk/client-ecs'; 3 | import { EC2Client } from '@aws-sdk/client-ec2'; 4 | import { ElasticLoadBalancingV2Client } from '@aws-sdk/client-elastic-load-balancing-v2'; 5 | import { ACMClient } from '@aws-sdk/client-acm'; 6 | import { Route53Client } from '@aws-sdk/client-route-53'; 7 | 8 | interface ConfigContext { 9 | config: { 10 | [key: string]: any; 11 | }; 12 | } 13 | 14 | interface PulumiProgramContext { 15 | outputs: OutputMap; 16 | } 17 | 18 | interface AwsContext { 19 | clients: { 20 | ecs: ECSClient; 21 | ec2: EC2Client; 22 | elb: ElasticLoadBalancingV2Client; 23 | acm: ACMClient; 24 | route53: Route53Client; 25 | }; 26 | } 27 | 28 | export interface WebServerTestContext extends ConfigContext, PulumiProgramContext, AwsContext { }; 29 | -------------------------------------------------------------------------------- /tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "files": [], 3 | "include": [], 4 | "references": [ 5 | { 6 | "path": "./src" 7 | }, 8 | { 9 | "path": "./tests" 10 | } 11 | ] 12 | } 13 | -------------------------------------------------------------------------------- /tsconfig.options.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "pretty": true, 4 | "target": "ES2016", 5 | "module": "commonjs", 6 | "moduleResolution": "node", 7 | 8 | "composite": true, 9 | "declaration": true, 10 | "declarationMap": true, 11 | "noEmitOnError": true, 12 | "sourceMap": false, 13 | 14 | "strict": true, 15 | "experimentalDecorators": true, 16 | "noFallthroughCasesInSwitch": true, 17 | "noImplicitReturns": true, 18 | "forceConsistentCasingInFileNames": true, 19 | 20 | "skipLibCheck": true 21 | } 22 | } 23 | -------------------------------------------------------------------------------- /tstyche.config.json: -------------------------------------------------------------------------------- 1 | { 2 | "$schema": "https://tstyche.org/schemas/config.json", 3 | "testFileMatch": ["tests/**/*.tst.ts"] 4 | } 5 | --------------------------------------------------------------------------------