├── .eslintrc.json ├── .gitattributes ├── .github ├── pull_request_template.md └── workflows │ ├── auto-approve.yml │ ├── build.yml │ ├── pull-request-lint.yml │ ├── release.yml │ └── upgrade-main.yml ├── .gitignore ├── .mergify.yml ├── .npmignore ├── .projen ├── deps.json ├── files.json └── tasks.json ├── .projenrc.ts ├── API.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── MIGRATING.md ├── NOTICE ├── README.md ├── lambda ├── assign-public-ip │ ├── .style.yapf │ ├── Pipfile │ ├── Pipfile.lock │ ├── asdf │ ├── index.py │ ├── lib │ │ ├── __init__.py │ │ ├── cleanup_resource_handler.py │ │ ├── events.py │ │ ├── queue_handler.py │ │ ├── records.py │ │ ├── records_table.py │ │ ├── route53.py │ │ └── running_task_collector.py │ ├── run_test.py │ └── test │ │ ├── __init__.py │ │ ├── fixtures │ │ ├── ddb-record.json │ │ ├── eni_description.json │ │ └── task_description.json │ │ ├── test_cleanup_resource_handler.py │ │ ├── test_events.py │ │ ├── test_queue_handler.py │ │ ├── test_records.py │ │ ├── test_records_table.py │ │ ├── test_route53.py │ │ └── test_tasks.py └── queue │ ├── index.py │ └── queue_backlog_calculator.py ├── package.json ├── src ├── environment.ts ├── extensions │ ├── aliased-port.ts │ ├── appmesh.ts │ ├── assign-public-ip │ │ ├── assign-public-ip.ts │ │ ├── index.ts │ │ └── task-record-manager.ts │ ├── cloudwatch-agent.ts │ ├── container.ts │ ├── extension-interfaces.ts │ ├── firelens.ts │ ├── http-load-balancer.ts │ ├── index.ts │ ├── injecter.ts │ ├── queue │ │ ├── index.ts │ │ └── queue.ts │ ├── scale-on-cpu-utilization.ts │ └── xray.ts ├── index.ts ├── service-description.ts └── service.ts ├── test ├── aliased-port.integ.snapshot │ ├── aws-ecs-integ.assets.json │ └── aws-ecs-integ.template.json ├── aliased-port.integ.ts ├── aliased-port.test.ts ├── all-service-addons.integ.snapshot │ ├── aws-ecs-integ.assets.json │ └── aws-ecs-integ.template.json ├── all-service-addons.integ.ts ├── appmesh.test.ts ├── assign-public-ip.integ.snapshot │ ├── aws-ecs-integ.assets.json │ └── aws-ecs-integ.template.json ├── assign-public-ip.integ.ts ├── assign-public-ip.test.ts ├── cloudwatch-agent.test.ts ├── container.test.ts ├── custom-service-extension.integ.snapshot │ ├── aws-ecs-integ.assets.json │ └── aws-ecs-integ.template.json ├── custom-service-extension.integ.ts ├── environment.test.ts ├── firelens.test.ts ├── http-load-balancer.test.ts ├── imported-environment.integ.snapshot │ ├── imported-environment-integ.assets.json │ ├── imported-environment-integ.template.json │ └── importedenvironmentintegResourcesAB23EBEF.nested.template.json ├── imported-environment.integ.ts ├── injecter.test.ts ├── multiple-environments.integ.snapshot │ ├── aws-ecs-integ.assets.json │ └── aws-ecs-integ.template.json ├── multiple-environments.integ.ts ├── publish-subscribe.integ.snapshot │ ├── aws-ecs-integ.assets.json │ └── aws-ecs-integ.template.json ├── publish-subscribe.integ.ts ├── queue-handler │ ├── Dockerfile │ ├── test.sh │ └── test_index.py ├── queue.lambda.test.ts ├── queue.test.ts ├── scale-on-cpu-utilization.test.ts ├── service.test.ts └── xray.test.ts ├── tsconfig.dev.json └── yarn.lock /.eslintrc.json: -------------------------------------------------------------------------------- 1 | // ~~ Generated by projen. To modify, edit .projenrc.ts and run "npx projen". 2 | { 3 | "env": { 4 | "jest": true, 5 | "node": true 6 | }, 7 | "root": true, 8 | "plugins": [ 9 | "@typescript-eslint", 10 | "import", 11 | "@stylistic" 12 | ], 13 | "parser": "@typescript-eslint/parser", 14 | "parserOptions": { 15 | "ecmaVersion": 2018, 16 | "sourceType": "module", 17 | "project": "./tsconfig.dev.json" 18 | }, 19 | "extends": [ 20 | "plugin:import/typescript" 21 | ], 22 | "settings": { 23 | "import/parsers": { 24 | "@typescript-eslint/parser": [ 25 | ".ts", 26 | ".tsx" 27 | ] 28 | }, 29 | "import/resolver": { 30 | "node": {}, 31 | "typescript": { 32 | "project": "./tsconfig.dev.json", 33 | "alwaysTryTypes": true 34 | } 35 | } 36 | }, 37 | "ignorePatterns": [ 38 | "*.js", 39 | "*.d.ts", 40 | "node_modules/", 41 | "*.generated.ts", 42 | "coverage", 43 | "!.projenrc.ts", 44 | "!projenrc/**/*.ts" 45 | ], 46 | "rules": { 47 | "@stylistic/indent": [ 48 | "error", 49 | 2 50 | ], 51 | "@stylistic/quotes": [ 52 | "error", 53 | "single", 54 | { 55 | "avoidEscape": true 56 | } 57 | ], 58 | "@stylistic/comma-dangle": [ 59 | "error", 60 | "always-multiline" 61 | ], 62 | "@stylistic/comma-spacing": [ 63 | "error", 64 | { 65 | "before": false, 66 | "after": true 67 | } 68 | ], 69 | "@stylistic/no-multi-spaces": [ 70 | "error", 71 | { 72 | "ignoreEOLComments": false 73 | } 74 | ], 75 | "@stylistic/array-bracket-spacing": [ 76 | "error", 77 | "never" 78 | ], 79 | "@stylistic/array-bracket-newline": [ 80 | "error", 81 | "consistent" 82 | ], 83 | "@stylistic/object-curly-spacing": [ 84 | "error", 85 | "always" 86 | ], 87 | "@stylistic/object-curly-newline": [ 88 | "error", 89 | { 90 | "multiline": true, 91 | "consistent": true 92 | } 93 | ], 94 | "@stylistic/object-property-newline": [ 95 | "error", 96 | { 97 | "allowAllPropertiesOnSameLine": true 98 | } 99 | ], 100 | "@stylistic/keyword-spacing": [ 101 | "error" 102 | ], 103 | "@stylistic/brace-style": [ 104 | "error", 105 | "1tbs", 106 | { 107 | "allowSingleLine": true 108 | } 109 | ], 110 | "@stylistic/space-before-blocks": [ 111 | "error" 112 | ], 113 | "@stylistic/member-delimiter-style": [ 114 | "error" 115 | ], 116 | "@stylistic/semi": [ 117 | "error", 118 | "always" 119 | ], 120 | "@stylistic/max-len": [ 121 | "error", 122 | { 123 | "code": 150, 124 | "ignoreUrls": true, 125 | "ignoreStrings": true, 126 | "ignoreTemplateLiterals": true, 127 | "ignoreComments": true, 128 | "ignoreRegExpLiterals": true 129 | } 130 | ], 131 | "@stylistic/quote-props": [ 132 | "error", 133 | "consistent-as-needed" 134 | ], 135 | "@stylistic/key-spacing": [ 136 | "error" 137 | ], 138 | "@stylistic/no-multiple-empty-lines": [ 139 | "error" 140 | ], 141 | "@stylistic/no-trailing-spaces": [ 142 | "error" 143 | ], 144 | "curly": [ 145 | "error", 146 | "multi-line", 147 | "consistent" 148 | ], 149 | "@typescript-eslint/no-require-imports": "error", 150 | "import/no-extraneous-dependencies": [ 151 | "error", 152 | { 153 | "devDependencies": [ 154 | "**/test/**", 155 | "**/build-tools/**", 156 | ".projenrc.ts", 157 | "projenrc/**/*.ts" 158 | ], 159 | "optionalDependencies": false, 160 | "peerDependencies": true 161 | } 162 | ], 163 | "import/no-unresolved": [ 164 | "error" 165 | ], 166 | "import/order": [ 167 | "warn", 168 | { 169 | "groups": [ 170 | "builtin", 171 | "external" 172 | ], 173 | "alphabetize": { 174 | "order": "asc", 175 | "caseInsensitive": true 176 | } 177 | } 178 | ], 179 | "import/no-duplicates": [ 180 | "error" 181 | ], 182 | "no-shadow": [ 183 | "off" 184 | ], 185 | "@typescript-eslint/no-shadow": "error", 186 | "@typescript-eslint/no-floating-promises": "error", 187 | "no-return-await": [ 188 | "off" 189 | ], 190 | "@typescript-eslint/return-await": "error", 191 | "dot-notation": [ 192 | "error" 193 | ], 194 | "no-bitwise": [ 195 | "error" 196 | ], 197 | "@typescript-eslint/member-ordering": [ 198 | "error", 199 | { 200 | "default": [ 201 | "public-static-field", 202 | "public-static-method", 203 | "protected-static-field", 204 | "protected-static-method", 205 | "private-static-field", 206 | "private-static-method", 207 | "field", 208 | "constructor", 209 | "method" 210 | ] 211 | } 212 | ] 213 | }, 214 | "overrides": [ 215 | { 216 | "files": [ 217 | ".projenrc.ts" 218 | ], 219 | "rules": { 220 | "@typescript-eslint/no-require-imports": "off", 221 | "import/no-extraneous-dependencies": "off" 222 | } 223 | } 224 | ] 225 | } 226 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # ~~ Generated by projen. To modify, edit .projenrc.ts and run "npx projen". 2 | 3 | * text=auto eol=lf 4 | *.snap linguist-generated 5 | /.eslintrc.json linguist-generated 6 | /.gitattributes linguist-generated 7 | /.github/pull_request_template.md linguist-generated 8 | /.github/workflows/auto-approve.yml linguist-generated 9 | /.github/workflows/build.yml linguist-generated 10 | /.github/workflows/pull-request-lint.yml linguist-generated 11 | /.github/workflows/release.yml linguist-generated 12 | /.github/workflows/upgrade-main.yml linguist-generated 13 | /.gitignore linguist-generated 14 | /.mergify.yml linguist-generated 15 | /.npmignore linguist-generated 16 | /.projen/** linguist-generated 17 | /.projen/deps.json linguist-generated 18 | /.projen/files.json linguist-generated 19 | /.projen/tasks.json linguist-generated 20 | /API.md linguist-generated 21 | /LICENSE linguist-generated 22 | /package.json linguist-generated 23 | /tsconfig.dev.json linguist-generated 24 | /yarn.lock linguist-generated -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | Fixes # -------------------------------------------------------------------------------- /.github/workflows/auto-approve.yml: -------------------------------------------------------------------------------- 1 | # ~~ Generated by projen. To modify, edit .projenrc.ts and run "npx projen". 2 | 3 | name: auto-approve 4 | on: 5 | pull_request_target: 6 | types: 7 | - labeled 8 | - opened 9 | - synchronize 10 | - reopened 11 | - ready_for_review 12 | jobs: 13 | approve: 14 | runs-on: ubuntu-latest 15 | permissions: 16 | pull-requests: write 17 | if: contains(github.event.pull_request.labels.*.name, 'auto-approve') && (github.event.pull_request.user.login == 'cdklabs-automation') 18 | steps: 19 | - uses: hmarr/auto-approve-action@v2.2.1 20 | with: 21 | github-token: ${{ secrets.GITHUB_TOKEN }} 22 | -------------------------------------------------------------------------------- /.github/workflows/pull-request-lint.yml: -------------------------------------------------------------------------------- 1 | # ~~ Generated by projen. To modify, edit .projenrc.ts and run "npx projen". 2 | 3 | name: pull-request-lint 4 | on: 5 | pull_request_target: 6 | types: 7 | - labeled 8 | - opened 9 | - synchronize 10 | - reopened 11 | - ready_for_review 12 | - edited 13 | merge_group: {} 14 | jobs: 15 | validate: 16 | name: Validate PR title 17 | runs-on: ubuntu-latest 18 | permissions: 19 | pull-requests: write 20 | if: (github.event_name == 'pull_request' || github.event_name == 'pull_request_target') 21 | steps: 22 | - uses: amannn/action-semantic-pull-request@v5.4.0 23 | env: 24 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} 25 | with: 26 | types: |- 27 | feat 28 | fix 29 | chore 30 | requireScope: false 31 | -------------------------------------------------------------------------------- /.github/workflows/upgrade-main.yml: -------------------------------------------------------------------------------- 1 | # ~~ Generated by projen. To modify, edit .projenrc.ts and run "npx projen". 2 | 3 | name: upgrade-main 4 | on: 5 | workflow_dispatch: {} 6 | schedule: 7 | - cron: 0 0 * * * 8 | jobs: 9 | upgrade: 10 | name: Upgrade 11 | runs-on: ubuntu-latest 12 | permissions: 13 | contents: read 14 | outputs: 15 | patch_created: ${{ steps.create_patch.outputs.patch_created }} 16 | steps: 17 | - name: Checkout 18 | uses: actions/checkout@v4 19 | with: 20 | ref: main 21 | - name: Setup Node.js 22 | uses: actions/setup-node@v4 23 | with: 24 | node-version: lts/* 25 | - name: Install dependencies 26 | run: yarn install --check-files --frozen-lockfile 27 | - name: Upgrade dependencies 28 | run: npx projen upgrade 29 | - name: Find mutations 30 | id: create_patch 31 | run: |- 32 | git add . 33 | git diff --staged --patch --exit-code > repo.patch || echo "patch_created=true" >> $GITHUB_OUTPUT 34 | working-directory: ./ 35 | - name: Upload patch 36 | if: steps.create_patch.outputs.patch_created 37 | uses: actions/upload-artifact@v4.4.0 38 | with: 39 | name: repo.patch 40 | path: repo.patch 41 | overwrite: true 42 | pr: 43 | name: Create Pull Request 44 | needs: upgrade 45 | runs-on: ubuntu-latest 46 | permissions: 47 | contents: read 48 | if: ${{ needs.upgrade.outputs.patch_created }} 49 | steps: 50 | - name: Checkout 51 | uses: actions/checkout@v4 52 | with: 53 | ref: main 54 | - name: Download patch 55 | uses: actions/download-artifact@v4 56 | with: 57 | name: repo.patch 58 | path: ${{ runner.temp }} 59 | - name: Apply patch 60 | run: '[ -s ${{ runner.temp }}/repo.patch ] && git apply ${{ runner.temp }}/repo.patch || echo "Empty patch. Skipping."' 61 | - name: Set git identity 62 | run: |- 63 | git config user.name "github-actions" 64 | git config user.email "github-actions@github.com" 65 | - name: Create Pull Request 66 | id: create-pr 67 | uses: peter-evans/create-pull-request@v6 68 | with: 69 | token: ${{ secrets.PROJEN_GITHUB_TOKEN }} 70 | commit-message: |- 71 | chore(deps): upgrade dependencies 72 | 73 | Upgrades project dependencies. See details in [workflow run]. 74 | 75 | [Workflow Run]: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} 76 | 77 | ------ 78 | 79 | *Automatically created by projen via the "upgrade-main" workflow* 80 | branch: github-actions/upgrade-main 81 | title: "chore(deps): upgrade dependencies" 82 | labels: auto-approve 83 | body: |- 84 | Upgrades project dependencies. See details in [workflow run]. 85 | 86 | [Workflow Run]: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} 87 | 88 | ------ 89 | 90 | *Automatically created by projen via the "upgrade-main" workflow* 91 | author: github-actions 92 | committer: github-actions 93 | signoff: true 94 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # ~~ Generated by projen. To modify, edit .projenrc.ts and run "npx projen". 2 | !/.gitattributes 3 | !/.projen/tasks.json 4 | !/.projen/deps.json 5 | !/.projen/files.json 6 | !/.github/workflows/pull-request-lint.yml 7 | !/.github/workflows/auto-approve.yml 8 | !/package.json 9 | !/LICENSE 10 | !/.npmignore 11 | logs 12 | *.log 13 | npm-debug.log* 14 | yarn-debug.log* 15 | yarn-error.log* 16 | lerna-debug.log* 17 | report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json 18 | pids 19 | *.pid 20 | *.seed 21 | *.pid.lock 22 | lib-cov 23 | coverage 24 | *.lcov 25 | .nyc_output 26 | build/Release 27 | node_modules/ 28 | jspm_packages/ 29 | *.tsbuildinfo 30 | .eslintcache 31 | *.tgz 32 | .yarn-integrity 33 | .cache 34 | /test-reports/ 35 | junit.xml 36 | /coverage/ 37 | !/.github/workflows/build.yml 38 | /dist/changelog.md 39 | /dist/version.txt 40 | !/.github/workflows/release.yml 41 | !/.mergify.yml 42 | !/.github/workflows/upgrade-main.yml 43 | !/.github/pull_request_template.md 44 | !/test/ 45 | !/tsconfig.dev.json 46 | !/src/ 47 | /lib 48 | /dist/ 49 | !/.eslintrc.json 50 | .jsii 51 | tsconfig.json 52 | !/API.md 53 | test/aliased-port.integ.snapshot/asset.* 54 | test/aliased-port.integ.snapshot/**/asset.* 55 | test/aliased-port.integ.snapshot/cdk.out 56 | test/aliased-port.integ.snapshot/**/cdk.out 57 | test/aliased-port.integ.snapshot/manifest.json 58 | test/aliased-port.integ.snapshot/**/manifest.json 59 | test/aliased-port.integ.snapshot/tree.json 60 | test/aliased-port.integ.snapshot/**/tree.json 61 | test/all-service-addons.integ.snapshot/asset.* 62 | test/all-service-addons.integ.snapshot/**/asset.* 63 | test/all-service-addons.integ.snapshot/cdk.out 64 | test/all-service-addons.integ.snapshot/**/cdk.out 65 | test/all-service-addons.integ.snapshot/manifest.json 66 | test/all-service-addons.integ.snapshot/**/manifest.json 67 | test/all-service-addons.integ.snapshot/tree.json 68 | test/all-service-addons.integ.snapshot/**/tree.json 69 | test/assign-public-ip.integ.snapshot/asset.* 70 | test/assign-public-ip.integ.snapshot/**/asset.* 71 | test/assign-public-ip.integ.snapshot/cdk.out 72 | test/assign-public-ip.integ.snapshot/**/cdk.out 73 | test/assign-public-ip.integ.snapshot/manifest.json 74 | test/assign-public-ip.integ.snapshot/**/manifest.json 75 | test/assign-public-ip.integ.snapshot/tree.json 76 | test/assign-public-ip.integ.snapshot/**/tree.json 77 | test/custom-service-extension.integ.snapshot/asset.* 78 | test/custom-service-extension.integ.snapshot/**/asset.* 79 | test/custom-service-extension.integ.snapshot/cdk.out 80 | test/custom-service-extension.integ.snapshot/**/cdk.out 81 | test/custom-service-extension.integ.snapshot/manifest.json 82 | test/custom-service-extension.integ.snapshot/**/manifest.json 83 | test/custom-service-extension.integ.snapshot/tree.json 84 | test/custom-service-extension.integ.snapshot/**/tree.json 85 | test/imported-environment.integ.snapshot/asset.* 86 | test/imported-environment.integ.snapshot/**/asset.* 87 | test/imported-environment.integ.snapshot/cdk.out 88 | test/imported-environment.integ.snapshot/**/cdk.out 89 | test/imported-environment.integ.snapshot/manifest.json 90 | test/imported-environment.integ.snapshot/**/manifest.json 91 | test/imported-environment.integ.snapshot/tree.json 92 | test/imported-environment.integ.snapshot/**/tree.json 93 | test/multiple-environments.integ.snapshot/asset.* 94 | test/multiple-environments.integ.snapshot/**/asset.* 95 | test/multiple-environments.integ.snapshot/cdk.out 96 | test/multiple-environments.integ.snapshot/**/cdk.out 97 | test/multiple-environments.integ.snapshot/manifest.json 98 | test/multiple-environments.integ.snapshot/**/manifest.json 99 | test/multiple-environments.integ.snapshot/tree.json 100 | test/multiple-environments.integ.snapshot/**/tree.json 101 | test/.tmp 102 | test/publish-subscribe.integ.snapshot/asset.* 103 | test/publish-subscribe.integ.snapshot/**/asset.* 104 | test/publish-subscribe.integ.snapshot/cdk.out 105 | test/publish-subscribe.integ.snapshot/**/cdk.out 106 | test/publish-subscribe.integ.snapshot/manifest.json 107 | test/publish-subscribe.integ.snapshot/**/manifest.json 108 | test/publish-subscribe.integ.snapshot/tree.json 109 | test/publish-subscribe.integ.snapshot/**/tree.json 110 | !/.projenrc.ts 111 | -------------------------------------------------------------------------------- /.mergify.yml: -------------------------------------------------------------------------------- 1 | # ~~ Generated by projen. To modify, edit .projenrc.ts and run "npx projen". 2 | 3 | queue_rules: 4 | - name: default 5 | update_method: merge 6 | conditions: 7 | - "#approved-reviews-by>=1" 8 | - -label~=(do-not-merge) 9 | - status-success=build 10 | - status-success=package-js 11 | - status-success=package-java 12 | - status-success=package-python 13 | - status-success=package-dotnet 14 | - status-success=package-go 15 | merge_method: squash 16 | commit_message_template: |- 17 | {{ title }} (#{{ number }}) 18 | 19 | {{ body }} 20 | pull_request_rules: 21 | - name: Automatic merge on approval and successful build 22 | actions: 23 | delete_head_branch: {} 24 | queue: 25 | name: default 26 | conditions: 27 | - "#approved-reviews-by>=1" 28 | - -label~=(do-not-merge) 29 | - status-success=build 30 | - status-success=package-js 31 | - status-success=package-java 32 | - status-success=package-python 33 | - status-success=package-dotnet 34 | - status-success=package-go 35 | -------------------------------------------------------------------------------- /.npmignore: -------------------------------------------------------------------------------- 1 | # ~~ Generated by projen. To modify, edit .projenrc.ts and run "npx projen". 2 | /.projen/ 3 | /test-reports/ 4 | junit.xml 5 | /coverage/ 6 | permissions-backup.acl 7 | /dist/changelog.md 8 | /dist/version.txt 9 | /.mergify.yml 10 | /test/ 11 | /tsconfig.dev.json 12 | /src/ 13 | !/lib/ 14 | !/lib/**/*.js 15 | !/lib/**/*.d.ts 16 | dist 17 | /tsconfig.json 18 | /.github/ 19 | /.vscode/ 20 | /.idea/ 21 | /.projenrc.js 22 | tsconfig.tsbuildinfo 23 | /.eslintrc.json 24 | !.jsii 25 | test/aliased-port.integ.snapshot 26 | test/all-service-addons.integ.snapshot 27 | test/assign-public-ip.integ.snapshot 28 | test/custom-service-extension.integ.snapshot 29 | test/imported-environment.integ.snapshot 30 | test/multiple-environments.integ.snapshot 31 | test/.tmp 32 | test/publish-subscribe.integ.snapshot 33 | /.gitattributes 34 | /.projenrc.ts 35 | /projenrc 36 | -------------------------------------------------------------------------------- /.projen/deps.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": [ 3 | { 4 | "name": "@stylistic/eslint-plugin", 5 | "version": "^2", 6 | "type": "build" 7 | }, 8 | { 9 | "name": "@types/jest", 10 | "type": "build" 11 | }, 12 | { 13 | "name": "@types/node", 14 | "type": "build" 15 | }, 16 | { 17 | "name": "@typescript-eslint/eslint-plugin", 18 | "version": "^8", 19 | "type": "build" 20 | }, 21 | { 22 | "name": "@typescript-eslint/parser", 23 | "version": "^8", 24 | "type": "build" 25 | }, 26 | { 27 | "name": "aws-cdk", 28 | "version": "^2", 29 | "type": "build" 30 | }, 31 | { 32 | "name": "commit-and-tag-version", 33 | "version": "^12", 34 | "type": "build" 35 | }, 36 | { 37 | "name": "eslint-import-resolver-typescript", 38 | "type": "build" 39 | }, 40 | { 41 | "name": "eslint-plugin-import", 42 | "type": "build" 43 | }, 44 | { 45 | "name": "eslint", 46 | "version": "^9", 47 | "type": "build" 48 | }, 49 | { 50 | "name": "jest", 51 | "type": "build" 52 | }, 53 | { 54 | "name": "jest-junit", 55 | "version": "^16", 56 | "type": "build" 57 | }, 58 | { 59 | "name": "jsii-diff", 60 | "type": "build" 61 | }, 62 | { 63 | "name": "jsii-docgen", 64 | "version": "^10.5.0", 65 | "type": "build" 66 | }, 67 | { 68 | "name": "jsii-pacmak", 69 | "type": "build" 70 | }, 71 | { 72 | "name": "jsii-rosetta", 73 | "version": "~5.6.0", 74 | "type": "build" 75 | }, 76 | { 77 | "name": "jsii", 78 | "version": "~5.6.0", 79 | "type": "build" 80 | }, 81 | { 82 | "name": "projen", 83 | "type": "build" 84 | }, 85 | { 86 | "name": "ts-jest", 87 | "type": "build" 88 | }, 89 | { 90 | "name": "ts-node", 91 | "type": "build" 92 | }, 93 | { 94 | "name": "typescript", 95 | "type": "build" 96 | }, 97 | { 98 | "name": "aws-cdk-lib", 99 | "version": "^2.52.0", 100 | "type": "peer" 101 | }, 102 | { 103 | "name": "constructs", 104 | "version": "^10.0.5", 105 | "type": "peer" 106 | } 107 | ], 108 | "//": "~~ Generated by projen. To modify, edit .projenrc.ts and run \"npx projen\"." 109 | } 110 | -------------------------------------------------------------------------------- /.projen/files.json: -------------------------------------------------------------------------------- 1 | { 2 | "files": [ 3 | ".eslintrc.json", 4 | ".gitattributes", 5 | ".github/pull_request_template.md", 6 | ".github/workflows/auto-approve.yml", 7 | ".github/workflows/build.yml", 8 | ".github/workflows/pull-request-lint.yml", 9 | ".github/workflows/release.yml", 10 | ".github/workflows/upgrade-main.yml", 11 | ".gitignore", 12 | ".mergify.yml", 13 | ".projen/deps.json", 14 | ".projen/files.json", 15 | ".projen/tasks.json", 16 | "LICENSE", 17 | "tsconfig.dev.json" 18 | ], 19 | "//": "~~ Generated by projen. To modify, edit .projenrc.ts and run \"npx projen\"." 20 | } 21 | -------------------------------------------------------------------------------- /.projenrc.ts: -------------------------------------------------------------------------------- 1 | import { awscdk } from 'projen'; 2 | 3 | const project = new awscdk.AwsCdkConstructLibrary({ 4 | packageName: '@aws-cdk-containers/ecs-service-extensions', 5 | author: 'Amazon Web Services', 6 | authorAddress: 'https://aws.amazon.com', 7 | authorOrganization: true, 8 | cdkVersion: '2.52.0', 9 | defaultReleaseBranch: 'main', 10 | description: 'The CDK Construct Library that helps you build ECS services using simple extensions', 11 | name: 'cdklabs/cdk-ecs-service-extensions', 12 | projenrcTs: true, 13 | repositoryUrl: 'https://github.com/cdklabs/cdk-ecs-service-extensions.git', 14 | stability: 'experimental', 15 | majorVersion: 2, 16 | prerelease: 'alpha', 17 | 18 | peerDeps: [ 19 | 'aws-cdk-lib', 20 | 'constructs', 21 | ], 22 | devDeps: [ 23 | '@types/jest', 24 | 'jest', 25 | ], 26 | 27 | publishToPypi: { 28 | distName: 'cdk-ecs-service-extensions', 29 | module: 'cdk_ecs_service_extensions', 30 | }, 31 | 32 | publishToNuget: { 33 | packageId: 'Cdklabs.CdkEcsServiceExtensions', 34 | dotNetNamespace: 'Cdklabs.CdkEcsServiceExtensions', 35 | }, 36 | 37 | publishToMaven: { 38 | mavenGroupId: 'io.github.cdklabs', 39 | javaPackage: 'io.github.cdklabs.cdkecsserviceextensions', 40 | mavenArtifactId: 'cdk-ecs-service-extensions', 41 | mavenServerId: 'central-ossrh', 42 | }, 43 | 44 | publishToGo: { 45 | moduleName: 'github.com/cdklabs/cdk-ecs-service-extensions-go', 46 | }, 47 | 48 | autoApproveOptions: { 49 | allowedUsernames: ['cdklabs-automation'], 50 | secret: 'GITHUB_TOKEN', 51 | }, 52 | // releaseWorkflowSetupSteps: [{ 53 | // name: 'Set up Docker Buildx', 54 | // id: 'buildx', 55 | // uses: 'docker/setup-buildx-action@v1', 56 | // with: { 57 | // install: true, 58 | // }, 59 | // }], 60 | 61 | autoApproveUpgrades: true, 62 | }); 63 | 64 | project.synth(); 65 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *main* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | -------------------------------------------------------------------------------- /MIGRATING.md: -------------------------------------------------------------------------------- 1 | # Migrating to @aws-cdk-containers/ecs-service-extensions v2 2 | 3 | The v2 version of this module comes with two main changes. It is now 4 | compatible with CDK v2, and has multi-language support. It is recommended 5 | that you migrate your own application or construct library to v2 of 6 | `@aws-cdk-containers/ecs-service-extensions` to continue to receive 7 | features and bug fixes. 8 | 9 | There are no big API changes between v1 and v2. However, there may be 10 | some small modifications necessary to get your application ready for 11 | `@aws-cdk-containers/ecs-service-extensions` v2. 12 | 13 | ## Step 1: Migrate your CDK application or library to AWS CDK v2 14 | 15 | See the steps [here](https://docs.aws.amazon.com/cdk/v2/guide/migrating-v2.html). 16 | 17 | ## Step 2: Update dependencies in your package.json 18 | 19 | ```json 20 | { 21 | "dependencies": { 22 | "@aws-cdk-containers/ecs-service-extensions": "^2.0.0", 23 | } 24 | } 25 | ``` 26 | 27 | ## Step 3: Type Changes 28 | 29 | If you use any of the following properties, change them to use their v2 types. 30 | 31 | - `Environment.cluster` changes from type `ecs.Cluster` to `ecs.ICluster`. 32 | - `InjecterExtensionProps.injectables` changes from type `Injectable[]` to `IInjectable[]`. 33 | - `MeshProps.protocol` changes from type `appmesh.Protocol` to `Protocol`. 34 | 35 | ## Step 4: Name Changes 36 | 37 | If you use any of the following APIs, change them to use their v2 names. 38 | 39 | | v1 API | v2 API 40 | |-------------------------------------|------------------------------------------------| 41 | | `ConnectToProps.local_bind_port` | `ConnectToProps.localBindPort` | 42 | | `Injectable` | `IInjectable` | 43 | | `GrantInjectable` | `IGrantInjectable` | 44 | -------------------------------------------------------------------------------- /NOTICE: -------------------------------------------------------------------------------- 1 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | -------------------------------------------------------------------------------- /lambda/assign-public-ip/.style.yapf: -------------------------------------------------------------------------------- 1 | # Format using: yapf -ri . 2 | # Since you're here, unit test using: python -m unittest discover 3 | [style] 4 | based_on_style = pep8 5 | column_limit = 120 6 | SPLIT_BEFORE_NAMED_ASSIGNS = False 7 | -------------------------------------------------------------------------------- /lambda/assign-public-ip/Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | url = "https://pypi.python.org/simple" 3 | verify_ssl = true 4 | name = "pypi" 5 | 6 | [packages] 7 | 8 | [dev-packages] 9 | yapf = "*" 10 | boto3 = "*" 11 | coverage = "*" 12 | 13 | [requires] 14 | python_version = "3.8" 15 | -------------------------------------------------------------------------------- /lambda/assign-public-ip/asdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cdklabs/cdk-ecs-service-extensions/660e41b67563ff46fef9cde9ef0cb2421e24aa20/lambda/assign-public-ip/asdf -------------------------------------------------------------------------------- /lambda/assign-public-ip/index.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | 4 | import boto3 5 | 6 | from lib.cleanup_resource_handler import CleanupResourceHandler 7 | from lib.queue_handler import QueueHandler 8 | 9 | logging.getLogger().setLevel(logging.INFO) 10 | 11 | 12 | def queue_handler(event, context): 13 | """ 14 | Handler for the event queue lambda trigger 15 | """ 16 | 17 | ec2_client = boto3.client('ec2') 18 | dynamodb_resource = boto3.resource('dynamodb') 19 | route53_client = boto3.client('route53') 20 | 21 | handler = QueueHandler(ec2_client=ec2_client, dynamodb_resource=dynamodb_resource, route53_client=route53_client, 22 | environ=os.environ) 23 | 24 | return handler.handle(event, context) 25 | 26 | 27 | def cleanup_resource_handler(event, context): 28 | """ 29 | Event handler for the custom resource. 30 | """ 31 | 32 | route53_client = boto3.client('route53') 33 | handler = CleanupResourceHandler(route53_client=route53_client) 34 | handler.handle_event(event, context) 35 | -------------------------------------------------------------------------------- /lambda/assign-public-ip/lib/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cdklabs/cdk-ecs-service-extensions/660e41b67563ff46fef9cde9ef0cb2421e24aa20/lambda/assign-public-ip/lib/__init__.py -------------------------------------------------------------------------------- /lambda/assign-public-ip/lib/cleanup_resource_handler.py: -------------------------------------------------------------------------------- 1 | import time 2 | from dataclasses import dataclass 3 | import logging 4 | from typing import Any 5 | 6 | from lib.route53 import Route53RecordSetAccessor, Route53RecordSetLocator 7 | 8 | 9 | @dataclass 10 | class CleanupResourceProperties: 11 | HostedZoneId: str 12 | RecordName: str 13 | ServiceToken: str 14 | 15 | 16 | class CleanupResourceHandler: 17 | route53_client: Any 18 | monitor_interval: int 19 | 20 | def __init__(self, route53_client, monitor_interval=5): 21 | self.record_set_accessor = Route53RecordSetAccessor(route53_client=route53_client) 22 | self.monitor_interval = monitor_interval 23 | 24 | def handle_event(self, event, context): 25 | request_type = event['RequestType'] 26 | resource_properties = event['ResourceProperties'] 27 | logging.info(f'Handling a {request_type} event with properties {resource_properties}') 28 | 29 | # Decode resource properties right away so that mis-configured 30 | # properties will always fail quickly. 31 | resource_properties = CleanupResourceProperties(**resource_properties) 32 | 33 | if request_type == 'Delete': 34 | return self.on_delete(resource_properties) 35 | 36 | def on_delete(self, resource_properties: CleanupResourceProperties): 37 | locator = Route53RecordSetLocator(hosted_zone_id=resource_properties.HostedZoneId, 38 | record_name=resource_properties.RecordName) 39 | 40 | deleted = self.record_set_accessor.delete(locator=locator) 41 | 42 | if deleted: 43 | logging.info(f'Monitoring for the record deletion') 44 | for interval_number in range(1, 10): 45 | if not self.record_set_accessor.exists(locator): 46 | logging.info(f'The record has been deleted') 47 | return 48 | else: 49 | logging.info(f'The record still exists') 50 | if self.monitor_interval > 0: 51 | time.sleep(self.monitor_interval) 52 | -------------------------------------------------------------------------------- /lambda/assign-public-ip/lib/events.py: -------------------------------------------------------------------------------- 1 | from lib.records import TaskInfo, EniInfo 2 | 3 | 4 | def extract_event_task_info(task_description) -> TaskInfo: 5 | arn = task_description['taskArn'] 6 | 7 | # Parse the eni info out of the the attachments 8 | enis = [ 9 | EniInfo(eni_id=detail['value']) for network_interface in task_description['attachments'] 10 | if network_interface['type'] == 'eni' for detail in network_interface['details'] 11 | if detail['name'] == 'networkInterfaceId' 12 | ] 13 | 14 | # Create an object out of the extracted information 15 | return TaskInfo(task_arn=arn, enis=enis) -------------------------------------------------------------------------------- /lambda/assign-public-ip/lib/queue_handler.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | from typing import Any 4 | 5 | from lib.events import extract_event_task_info 6 | from lib.records import DdbRecordKey, DdbRecord 7 | from lib.records_table import RecordsTableAccessor, RecordUpdate 8 | from lib.route53 import Route53RecordSetLocator, Route53RecordSetAccessor 9 | from lib.running_task_collector import RunningTaskCollector 10 | 11 | 12 | class QueueHandler: 13 | def __init__(self, ec2_client, route53_client, dynamodb_resource, environ): 14 | self.ec2_client = ec2_client 15 | self.route53_client = route53_client 16 | 17 | hosted_zone_id = environ['HOSTED_ZONE_ID'] 18 | record_name = environ['RECORD_NAME'] 19 | records_table = environ['RECORDS_TABLE'] 20 | 21 | cluster_arn = environ['CLUSTER_ARN'] 22 | self.service_name = environ['SERVICE_NAME'] 23 | 24 | self.records_table_key = DdbRecordKey(cluster_arn=cluster_arn, service_name=self.service_name) 25 | self.records_table_accessor = RecordsTableAccessor(table_client=dynamodb_resource.Table(records_table)) 26 | 27 | self.record_set_locator = Route53RecordSetLocator(hosted_zone_id=hosted_zone_id, record_name=record_name) 28 | self.record_set_accessor = Route53RecordSetAccessor(route53_client=self.route53_client) 29 | 30 | def handle(self, event, context): 31 | logging.info(f'event = {json.dumps(event)}') 32 | 33 | # Get a reference record from the records table to check for incoming 34 | # event inconsistencies. 35 | reference_record = self.records_table_accessor.get_record(self.records_table_key) 36 | 37 | # Collect running and stopped tasks from the status change events 38 | running_tasks, stopped_tasks = self.collect_event_task_info(event, reference_record) 39 | 40 | # Build up a set of updates for the record 41 | update = RecordUpdate(running_tasks=running_tasks, stopped_tasks=stopped_tasks) 42 | 43 | # Record the current record set locator 44 | update.current_record_set(self.record_set_locator) 45 | 46 | # Clean any extra record sets in case the recordset has moved. 47 | for record_set_locator in reference_record.record_sets: 48 | if not record_set_locator.matches(self.record_set_locator): 49 | update.extra_record_set(record_set_locator) 50 | self.try_to_delete_record(record_set_locator) 51 | 52 | # Introduce some delay 53 | # records_table.optimistic_simulation_delay = 5 54 | 55 | # Update the record 56 | ddb_record = self.records_table_accessor.put_update(key=self.records_table_key, update=update) 57 | 58 | # Update DNS 59 | self.record_set_accessor.update(locator=self.record_set_locator, ipv4s=ddb_record.ipv4s) 60 | 61 | def collect_event_task_info(self, event, reference_record: DdbRecord): 62 | running_task_collector = RunningTaskCollector(ec2_client=self.ec2_client, reference_record=reference_record) 63 | stopped_tasks = [] 64 | for message in decode_records(event): 65 | if 'detail' not in message: 66 | logging.info(f'Received a non-task state message {message}') 67 | continue 68 | 69 | task_description = message['detail'] 70 | 71 | group = task_description['group'] 72 | if group != f'service:{self.service_name}': 73 | logging.info(f'Skipping irrelevant task description from group {group}') 74 | continue 75 | 76 | task_info = extract_event_task_info(task_description) 77 | logging.info(f'extracted task_info = {task_info}') 78 | 79 | last_status = task_description['lastStatus'] 80 | if last_status == 'RUNNING': 81 | logging.info(f'Collecting {task_info.task_arn} as running') 82 | running_task_collector.collect(task_info) 83 | 84 | elif last_status == 'STOPPED': 85 | logging.info(f'Collecting {task_info.task_arn} as stopped') 86 | stopped_tasks.append(task_info) 87 | 88 | else: 89 | logging.warning(f'{task_info.task_arn} had an unexpected status: {last_status}') 90 | 91 | # Query the ENIs store-back public IPs. 92 | running_task_collector.fill_eni_info_from_eni_query() 93 | 94 | running_tasks = running_task_collector.tasks 95 | 96 | return running_tasks, stopped_tasks 97 | 98 | def try_to_delete_record(self, record_set_locator: Route53RecordSetLocator): 99 | """ 100 | Try to delete the given record set. This may not be possible if the 101 | record is in a hosted zone we don't have access to. This may happen 102 | when the user changes dns zones at the service extension level. 103 | """ 104 | 105 | try: 106 | self.record_set_accessor.delete(record_set_locator) 107 | 108 | except: 109 | # We give up pretty easily if the record set accessor can't delete 110 | # the extraneous record for any reason that the accessor can't 111 | # handle. 112 | logging.warning(f'Could not delete the extraneous record set {record_set_locator}') 113 | 114 | 115 | def decode_records(sqs_event): 116 | logging.info(f'sqs_event = {json.dumps(sqs_event)}') 117 | return [json.loads(sqs_record['body']) for sqs_record in sqs_event['Records']] 118 | -------------------------------------------------------------------------------- /lambda/assign-public-ip/lib/records.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass, field 2 | from datetime import datetime 3 | from typing import Optional, List, Dict, Set 4 | 5 | from boto3.dynamodb.conditions import Key 6 | 7 | from lib.route53 import Route53RecordSetLocator 8 | 9 | 10 | @dataclass 11 | class EniInfo: 12 | eni_id: str 13 | public_ipv4: Optional[str] = None 14 | 15 | 16 | @dataclass 17 | class TaskInfo: 18 | task_arn: str 19 | enis: List[EniInfo] 20 | stopped_datetime: Optional[datetime] = None 21 | 22 | # Tombstone information for the dynamodb record. 23 | 24 | def set_stopped_marker(self): 25 | """ 26 | Mark this task as stopped. 27 | """ 28 | self.stopped_datetime = datetime.utcnow() 29 | 30 | def is_stopped(self): 31 | """ 32 | Check if this task is stopped. 33 | """ 34 | return True if self.stopped_datetime is not None else False 35 | 36 | 37 | @dataclass 38 | class DdbRecordKey: 39 | cluster_arn: str 40 | service_name: str 41 | 42 | def to_composite(self): 43 | return f'{self.cluster_arn}#{self.service_name}' 44 | 45 | @staticmethod 46 | def from_composite(composite: str): 47 | cluster_arn, service_name = composite.split('#') 48 | return DdbRecordKey(cluster_arn=cluster_arn, service_name=service_name) 49 | 50 | 51 | @dataclass 52 | class DdbRecord: 53 | key: DdbRecordKey 54 | ipv4s: Set[str] = field(default_factory=set) 55 | task_info: Dict[str, TaskInfo] = field(default_factory=dict) 56 | record_sets: Set[Route53RecordSetLocator] = field(default_factory=set) 57 | version: int = 0 58 | 59 | def task_is_stopped(self, task_info: TaskInfo): 60 | """ 61 | Check if a task has already stopped. 62 | """ 63 | 64 | return task_info.task_arn in self.task_info and self.task_info[task_info.task_arn].is_stopped() 65 | 66 | 67 | class DdbRecordEncoding: 68 | PK_NAME = 'cluster_service' 69 | ATTR_VERSION = 'version' 70 | ATTR_IPV4S = 'ipv4s' 71 | ATTR_TASK_INFO = 'task_info' 72 | ATTR_TASK_ARN = 'task_arn' 73 | ATTR_TASK_ENIS = 'enis' 74 | ATTR_TASK_STOPPED_DATETIME = 'stopped_datetime' 75 | ATTR_ENI_ID = 'eni_id' 76 | ATTR_ENI_PUBLIC_IPV4 = 'public_ipv4' 77 | ATTR_RECORD_SETS = 'record_sets' 78 | ATTR_RECORD_SET_ZONE = 'hosted_zone_id' 79 | ATTR_RECORD_SET_NAME = 'record_name' 80 | 81 | def get_identity(self, key: DdbRecordKey): 82 | return {self.PK_NAME: key.to_composite()} 83 | 84 | def get_identity_expression(self, key: DdbRecordKey): 85 | return Key(self.PK_NAME).eq(key.to_composite()) 86 | 87 | def encode(self, record: DdbRecord) -> dict: 88 | data = dict() 89 | data[self.PK_NAME] = record.key.to_composite() 90 | data[self.ATTR_VERSION] = record.version 91 | 92 | if len(record.ipv4s) > 0: 93 | # Sorting only matters here for repeatability in tests, as set ordering 94 | # isn't easily predictable. 95 | data[self.ATTR_IPV4S] = [v for v in sorted(record.ipv4s)] 96 | 97 | if len(record.record_sets) > 0: 98 | data[self.ATTR_RECORD_SETS] = [self.encode_record_set(v) for v in sorted(record.record_sets)] 99 | 100 | if len(record.task_info) > 0: 101 | data[self.ATTR_TASK_INFO] = { 102 | task_info.task_arn: self.encode_task_info(task_info) 103 | for task_info in record.task_info.values() 104 | } 105 | 106 | return data 107 | 108 | def encode_record_set(self, record_set: Route53RecordSetLocator): 109 | return { 110 | self.ATTR_RECORD_SET_ZONE: record_set.hosted_zone_id, 111 | self.ATTR_RECORD_SET_NAME: record_set.record_name, 112 | } 113 | 114 | def encode_task_info(self, task_info: TaskInfo) -> dict: 115 | data = dict() 116 | data[self.ATTR_TASK_ARN] = task_info.task_arn 117 | 118 | if task_info.stopped_datetime is not None: 119 | data[self.ATTR_TASK_STOPPED_DATETIME] = task_info.stopped_datetime.isoformat() 120 | 121 | if len(task_info.enis) > 0: 122 | data[self.ATTR_TASK_ENIS] = [self.encode_eni_info(eni_info) for eni_info in task_info.enis] 123 | 124 | return data 125 | 126 | def encode_eni_info(self, eni_info: EniInfo) -> dict: 127 | data = dict() 128 | data[self.ATTR_ENI_ID] = eni_info.eni_id 129 | if eni_info.public_ipv4 is not None: 130 | data[self.ATTR_ENI_PUBLIC_IPV4] = eni_info.public_ipv4 131 | 132 | return data 133 | 134 | def decode(self, data: dict) -> DdbRecord: 135 | key = DdbRecordKey.from_composite(data[self.PK_NAME]) 136 | version = int(data[self.ATTR_VERSION]) 137 | 138 | ipv4s = set() 139 | if self.ATTR_IPV4S in data: 140 | ipv4s = {ip for ip in data[self.ATTR_IPV4S]} 141 | 142 | record_sets = set() 143 | if self.ATTR_RECORD_SETS in data: 144 | for record_set_data in data[self.ATTR_RECORD_SETS]: 145 | record_set = self.decode_record_set(record_set_data) 146 | record_sets.add(record_set) 147 | 148 | task_info = dict() 149 | if self.ATTR_TASK_INFO in data: 150 | task_info = { 151 | k: self.decode_task_info(task_info_data) 152 | for (k, task_info_data) in data[self.ATTR_TASK_INFO].items() 153 | } 154 | 155 | record = DdbRecord(key=key, version=version, ipv4s=ipv4s, task_info=task_info, record_sets=record_sets) 156 | 157 | return record 158 | 159 | def decode_record_set(self, data) -> Route53RecordSetLocator: 160 | hosted_zone_id = data[self.ATTR_RECORD_SET_ZONE] 161 | record_name = data[self.ATTR_RECORD_SET_NAME] 162 | 163 | return Route53RecordSetLocator(hosted_zone_id=hosted_zone_id, record_name=record_name) 164 | 165 | def decode_task_info(self, data) -> TaskInfo: 166 | task_arn = data[self.ATTR_TASK_ARN] 167 | 168 | stopped_datetime = None 169 | if self.ATTR_TASK_STOPPED_DATETIME in data: 170 | stopped_datetime = datetime.fromisoformat(data[self.ATTR_TASK_STOPPED_DATETIME]) 171 | 172 | enis = [] 173 | if self.ATTR_TASK_ENIS in data: 174 | enis = [self.decode_eni_info(eni_info_data) for eni_info_data in data[self.ATTR_TASK_ENIS]] 175 | 176 | return TaskInfo(task_arn=task_arn, stopped_datetime=stopped_datetime, enis=enis) 177 | 178 | def decode_eni_info(self, data) -> EniInfo: 179 | eni_id = data[self.ATTR_ENI_ID] 180 | 181 | public_ipv4 = None 182 | if self.ATTR_ENI_PUBLIC_IPV4 in data: 183 | public_ipv4 = data[self.ATTR_ENI_PUBLIC_IPV4] 184 | 185 | return EniInfo(eni_id=eni_id, public_ipv4=public_ipv4) 186 | -------------------------------------------------------------------------------- /lambda/assign-public-ip/lib/route53.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | import time 3 | from typing import * 4 | import logging 5 | 6 | from botocore.exceptions import ClientError 7 | 8 | 9 | @dataclass 10 | class Route53RecordSetLocator: 11 | hosted_zone_id: str 12 | record_name: str 13 | 14 | def __str__(self): 15 | """String serialization for hashing and comparison""" 16 | return f'{self.hosted_zone_id}#{self.record_name}' 17 | 18 | def __hash__(self): 19 | """Unique hash for this object is based on its string serialization""" 20 | return int.from_bytes(self.__str__().encode(), 'little') 21 | 22 | def __lt__(self, other): 23 | """set() uses this""" 24 | return self.__str__() < other.__str__() 25 | 26 | def get_dot_suffixed_name(self): 27 | return self.record_name + '.' 28 | 29 | def matches_record_set(self, record_set): 30 | return record_set['Name'] == self.get_dot_suffixed_name() 31 | 32 | def matches(self, record_set_locator): 33 | return self.record_name == record_set_locator.record_name and self.hosted_zone_id == record_set_locator.hosted_zone_id 34 | 35 | 36 | class Route53RecordSetAccessor: 37 | route53_client: Any 38 | ttl = 60 39 | 40 | def __init__(self, route53_client: Any): 41 | self.route53_client = route53_client 42 | 43 | def update(self, locator: Route53RecordSetLocator, ipv4s: Set[str] = None): 44 | ipv4s = set() if ipv4s is None else ipv4s 45 | 46 | record_set, is_new = retry_with_backoff(lambda: self.get_record_set(locator)) 47 | if is_new: 48 | logging.info(f'Found a pre-existing record set: {record_set}') 49 | else: 50 | logging.info('Creating a new record set') 51 | 52 | if len(ipv4s) > 0: 53 | record_set['ResourceRecords'] = map_ips_to_resource_records(ipv4s) 54 | retry_with_backoff(lambda: self.request_upsert(locator, record_set)) 55 | elif not is_new: 56 | retry_with_backoff(lambda: self.request_delete(locator, record_set)) 57 | else: 58 | logging.info('Refusing to do anything with a new but empty recordset') 59 | 60 | def get_record_set(self, locator: Route53RecordSetLocator) -> Tuple[dict, bool]: 61 | record_type = 'A' 62 | result = self.route53_client.list_resource_record_sets(HostedZoneId=locator.hosted_zone_id, 63 | StartRecordName=locator.record_name, 64 | StartRecordType=record_type, MaxItems="1") 65 | 66 | logging.info(f'Query result: {result}') 67 | existing_record_set = find_locator_record_set(locator, record_type, result['ResourceRecordSets']) 68 | if existing_record_set: 69 | return existing_record_set, False 70 | else: 71 | return { 72 | 'Name': locator.get_dot_suffixed_name(), 73 | 'Type': record_type, 74 | 'ResourceRecords': [], 75 | 'TTL': self.ttl 76 | }, True 77 | 78 | def request_upsert(self, locator: Route53RecordSetLocator, record_set): 79 | logging.info(f'Upserting record set {record_set}') 80 | self.route53_client.change_resource_record_sets( 81 | HostedZoneId=locator.hosted_zone_id, ChangeBatch={ 82 | 'Comment': 'Automatic', 83 | 'Changes': [{ 84 | 'Action': 'UPSERT', 85 | 'ResourceRecordSet': record_set 86 | }] 87 | }) 88 | 89 | def delete(self, locator: Route53RecordSetLocator): 90 | """ 91 | Delete the record. Returns true if it found and deleted the record. 92 | Returns false if it didn't need to delete anything. 93 | """ 94 | 95 | logging.info(f'Querying for {locator}') 96 | record_set, is_new = retry_with_backoff(lambda: self.get_record_set(locator)) 97 | 98 | if not is_new: 99 | logging.info(f'Found a record set') 100 | retry_with_backoff(lambda: self.request_delete(locator, record_set)) 101 | logging.info(f'Deleted record set {record_set}') 102 | return True 103 | 104 | else: 105 | logging.info(f'Did not find a record set, so no deletion needed') 106 | return False 107 | 108 | def exists(self, locator: Route53RecordSetLocator): 109 | """ 110 | Returns true if the record exists. False otherwise. 111 | """ 112 | 113 | _, is_new = retry_with_backoff(lambda: self.get_record_set(locator)) 114 | 115 | return not is_new 116 | 117 | def request_delete(self, locator: Route53RecordSetLocator, record_set): 118 | logging.info(f'Deleting record set: {record_set}') 119 | self.route53_client.change_resource_record_sets( 120 | HostedZoneId=locator.hosted_zone_id, ChangeBatch={ 121 | 'Comment': 'Automatic', 122 | 'Changes': [{ 123 | 'Action': 'DELETE', 124 | 'ResourceRecordSet': record_set, 125 | }] 126 | }) 127 | 128 | 129 | def exponential_backoff(attempt: int): 130 | return 2**attempt 131 | 132 | 133 | def retry_with_backoff(call: Callable, attempts=5, backoff=exponential_backoff): 134 | for attempt in range(0, attempts): 135 | try: 136 | return call() 137 | 138 | except ClientError as e: 139 | if e.response['Error']['Code'] == 'Throttling': 140 | backoff_seconds = backoff(attempt) 141 | logging.info(f'Attempt {attempt+1} throttled. Backing off for {backoff_seconds}.') 142 | time.sleep(backoff_seconds) 143 | continue 144 | 145 | if e.response['Error']['Code'] == 'PriorRequestNotComplete': 146 | backoff_seconds = backoff(attempt) 147 | logging.info( 148 | f'Attempt {attempt+1} discovered the prior request is not yet complete. Backing off for {backoff_seconds}.' 149 | ) 150 | time.sleep(backoff_seconds) 151 | continue 152 | 153 | raise 154 | 155 | 156 | def map_ips_to_resource_records(ips: Set[str]): 157 | # Take up to the first 400 ips after sorting as the max recordset record quota is 400 158 | ips_sorted_limited = sorted(ips)[0:400] 159 | return [{'Value': ip} for ip in ips_sorted_limited] 160 | 161 | 162 | def find_locator_record_set(locator: Route53RecordSetLocator, record_type: str, record_sets: list): 163 | for record_set in record_sets: 164 | if locator.matches_record_set(record_set) and record_set['Type'] == record_type: 165 | return record_set 166 | 167 | return None 168 | -------------------------------------------------------------------------------- /lambda/assign-public-ip/lib/running_task_collector.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import * 3 | 4 | from lib.records import DdbRecord, EniInfo, TaskInfo 5 | 6 | 7 | class RunningTaskCollector: 8 | """ 9 | Collects information about running tasks. After collecting all task info, 10 | when `fill_eni_info_from_eni_query()` is called, the collector queries 11 | for the ip addresses of the tasks and fills in the appropriate records. 12 | """ 13 | 14 | ec2_client: Any 15 | tasks: List[TaskInfo] 16 | enis_by_id: Dict[str, EniInfo] 17 | 18 | def __init__(self, ec2_client, reference_record: DdbRecord): 19 | self.ec2_client = ec2_client 20 | self.tasks = list() 21 | self.enis_by_id = dict() 22 | self.reference_record = reference_record 23 | 24 | def collect(self, task_info): 25 | # Check to see if the task we've received is already stopped. If so, 26 | # we refuse to collect it on the basis that we'll just get an eni 27 | # doesn't exist error anyway. 28 | if self.reference_record.task_is_stopped(task_info): 29 | logging.info(f'Refusing to collect {task_info.task_arn} as it has already been deleted') 30 | return 31 | 32 | # Append the task info to the master list 33 | self.tasks.append(task_info) 34 | 35 | # Collect enis indexed by their ids 36 | for eni in task_info.enis: 37 | self.enis_by_id[eni.eni_id] = eni 38 | 39 | def fill_eni_info_from_eni_query(self): 40 | for eni_description in self.describe_enis(): 41 | eni_id = eni_description['NetworkInterfaceId'] 42 | 43 | if 'Association' in eni_description: 44 | public_ipv4 = eni_description['Association']['PublicIp'] 45 | if public_ipv4 and eni_id in self.enis_by_id: 46 | self.enis_by_id[eni_id].public_ipv4 = public_ipv4 47 | 48 | def describe_enis(self): 49 | paginator = self.ec2_client.get_paginator('describe_network_interfaces') 50 | 51 | eni_ids = list(self.enis_by_id.keys()) 52 | for page in paginator.paginate(NetworkInterfaceIds=eni_ids): 53 | for eni in page['NetworkInterfaces']: 54 | yield eni 55 | 56 | def get_ips(self): 57 | return [eni.public_ipv4 for eni in self.enis_by_id.values()] 58 | -------------------------------------------------------------------------------- /lambda/assign-public-ip/run_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | if __name__ == "__main__": 4 | test_suite = unittest.defaultTestLoader.discover('.') 5 | unittest.TextTestRunner().run(test_suite) 6 | -------------------------------------------------------------------------------- /lambda/assign-public-ip/test/__init__.py: -------------------------------------------------------------------------------- 1 | # Keep this file so that python -m unittest discover can find these tests. 2 | -------------------------------------------------------------------------------- /lambda/assign-public-ip/test/fixtures/ddb-record.json: -------------------------------------------------------------------------------- 1 | { 2 | "cluster_service": "CLUSTER_ARN#SERVICE", 3 | "ipv4s": [ 4 | "1.1.2.1", 5 | "1.1.2.2" 6 | ], 7 | "task_info": { 8 | "TASK1_ARN": { 9 | "enis": [ 10 | { 11 | "eni_id": "TASK1_ENI1_ID", 12 | "public_ipv4": "1.1.1.1" 13 | } 14 | ], 15 | "stopped_datetime": "2020-10-04T23:47:36.322158", 16 | "task_arn": "TASK1_ARN" 17 | }, 18 | "TASK2_ARN": { 19 | "enis": [ 20 | { 21 | "eni_id": "TASK2_ENI1_ID", 22 | "public_ipv4": "1.1.2.1" 23 | }, 24 | { 25 | "eni_id": "TASK2_ENI2_ID", 26 | "public_ipv4": "1.1.2.2" 27 | } 28 | ], 29 | "task_arn": "TASK2_ARN" 30 | } 31 | }, 32 | "record_sets": [ 33 | { 34 | "hosted_zone_id": "ABCD", 35 | "record_name": "test-record.myexample.com" 36 | }, 37 | { 38 | "hosted_zone_id": "ABCD", 39 | "record_name": "test-record2.myexample.com" 40 | } 41 | ], 42 | "version": 12 43 | } 44 | -------------------------------------------------------------------------------- /lambda/assign-public-ip/test/fixtures/eni_description.json: -------------------------------------------------------------------------------- 1 | { 2 | "Association": { 3 | "IpOwnerId": "amazon", 4 | "PublicDnsName": "example.com", 5 | "PublicIp": "1.2.3.4" 6 | }, 7 | "Attachment": { 8 | "AttachTime": "2020-10-03T23:42:51+00:00", 9 | "AttachmentId": "eni-attach-0704671692ecf366b", 10 | "DeleteOnTermination": false, 11 | "DeviceIndex": 1, 12 | "InstanceOwnerId": "000000000", 13 | "Status": "attached" 14 | }, 15 | "AvailabilityZone": "test-region-1a", 16 | "Description": "arn:aws:ecs:test-region-1:0000000000:attachment/20d24cce-3d50-493d-b890-32d4f11859f4", 17 | "Groups": [ 18 | { 19 | "GroupName": "aws-ecs-integ-nameserviceSecurityGroup33F4662C-16PM465FOR8L1", 20 | "GroupId": "sg-0b83d6ad2edd8e940" 21 | } 22 | ], 23 | "InterfaceType": "interface", 24 | "Ipv6Addresses": [], 25 | "MacAddress": "02:a4:cb:74:0f:a8", 26 | "NetworkInterfaceId": "eni-abcd", 27 | "OwnerId": "0000000000", 28 | "PrivateDnsName": "ip-10-0-0-19.test-region-1.compute.internal", 29 | "PrivateIpAddress": "10.0.0.19", 30 | "PrivateIpAddresses": [ 31 | { 32 | "Association": { 33 | "IpOwnerId": "amazon", 34 | "PublicDnsName": "example.com", 35 | "PublicIp": "1.2.3.4" 36 | }, 37 | "Primary": true, 38 | "PrivateDnsName": "ip-10-0-0-19.test-region-1.compute.internal", 39 | "PrivateIpAddress": "10.0.0.19" 40 | } 41 | ], 42 | "RequesterId": "0000000000", 43 | "RequesterManaged": true, 44 | "SourceDestCheck": true, 45 | "Status": "in-use", 46 | "SubnetId": "subnet-036b0d1413bb6bd2c", 47 | "TagSet": [], 48 | "VpcId": "vpc-0e63014e689c4b14f" 49 | } 50 | -------------------------------------------------------------------------------- /lambda/assign-public-ip/test/fixtures/task_description.json: -------------------------------------------------------------------------------- 1 | { 2 | "attachments": [ 3 | { 4 | "id": "", 5 | "type": "eni", 6 | "status": "DELETED", 7 | "details": [ 8 | { 9 | "name": "subnetId", 10 | "value": "subnet-" 11 | }, 12 | { 13 | "name": "networkInterfaceId", 14 | "value": "eni-abcd" 15 | }, 16 | { 17 | "name": "macAddress", 18 | "value": "" 19 | }, 20 | { 21 | "name": "privateIPv4Address", 22 | "value": "10.0.0.52" 23 | } 24 | ] 25 | } 26 | ], 27 | "availabilityZone": "test-region-1a", 28 | "clusterArn": "arn:aws:ecs:test-region-1::cluster/aws-ecs-integ-productionenvironmentclusterC6599D2D-U7W8a2P2HPhC", 29 | "containers": [ 30 | { 31 | "containerArn": "arn:aws:ecs:test-region-1::container/ff3b49f4-5eea-46cd-99c6-069584b3fb8e", 32 | "exitCode": 1, 33 | "lastStatus": "STOPPED", 34 | "name": "app", 35 | "image": "nathanpeck/name", 36 | "runtimeId": "", 37 | "taskArn": "arn:aws:ecs:test-region-1::task/12345678-1234-1234-1234-1234567890AB", 38 | "networkInterfaces": [ 39 | { 40 | "attachmentId": "323eb03f-dedf-44b6-aa5f-d9d7f7b37714", 41 | "privateIpv4Address": "10.0.0.52" 42 | } 43 | ], 44 | "cpu": "256", 45 | "memory": "512" 46 | } 47 | ], 48 | "createdAt": "2020-10-03T22:31:35.117Z", 49 | "launchType": "FARGATE", 50 | "cpu": "256", 51 | "memory": "512", 52 | "desiredStatus": "STOPPED", 53 | "group": "service:aws-ecs-integ-nameserviceService8015C8D6-I4TwUFv4xk2o", 54 | "lastStatus": "STOPPED", 55 | "overrides": { 56 | "containerOverrides": [ 57 | { 58 | "name": "app" 59 | } 60 | ] 61 | }, 62 | "connectivity": "CONNECTED", 63 | "connectivityAt": "2020-10-03T22:31:43.32Z", 64 | "pullStartedAt": "2020-10-03T22:31:46.764Z", 65 | "startedAt": "2020-10-03T22:31:54.764Z", 66 | "startedBy": "ecs-svc/7073659324082574009", 67 | "stoppingAt": "2020-10-03T22:43:06.753Z", 68 | "stoppedAt": "2020-10-03T22:43:31.542Z", 69 | "pullStoppedAt": "2020-10-03T22:31:53.764Z", 70 | "executionStoppedAt": "2020-10-03T22:43:08Z", 71 | "stoppedReason": "Scaling activity initiated by (deployment ecs-svc/7073659324082574009)", 72 | "updatedAt": "2020-10-03T22:43:31.542Z", 73 | "taskArn": "arn:aws:ecs:test-region-1::task/12345678-1234-1234-1234-1234567890AB", 74 | "taskDefinitionArn": "arn:aws:ecs:test-region-1::task-definition/awsecsintegnametaskdefinition0EA6A1A0:3", 75 | "version": 7, 76 | "platformVersion": "1.3.0" 77 | } 78 | -------------------------------------------------------------------------------- /lambda/assign-public-ip/test/test_cleanup_resource_handler.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import unittest.mock as mock 3 | 4 | from lib.cleanup_resource_handler import CleanupResourceHandler 5 | from lib.route53 import Route53RecordSetLocator 6 | 7 | 8 | class TestCleanupResourceHandler(unittest.TestCase): 9 | def test_handler_rejects_invalid_properties(self): 10 | handler = CleanupResourceHandler(route53_client=mock.Mock()) 11 | with self.assertRaises(Exception): 12 | handler.handle_event({'RequestType': 'Delete', 'ResourceProperties': {'Invalid': 'Invalid'}}, {}) 13 | 14 | def test_handling_delete(self): 15 | handler = CleanupResourceHandler(route53_client=mock.Mock(), monitor_interval=0) 16 | record_set_accessor = mock.Mock() 17 | record_set_accessor.delete = mock.Mock(return_value=True) # True = Deleted 18 | 19 | exists_count = 0 20 | 21 | def exists_side_effect(*args): 22 | nonlocal exists_count 23 | exists_count += 1 24 | return True if exists_count < 3 else False 25 | 26 | record_set_accessor.exists = mock.Mock(side_effect=exists_side_effect) 27 | 28 | handler.record_set_accessor = record_set_accessor 29 | 30 | event = { 31 | 'RequestType': 'Delete', 32 | 'ResourceProperties': { 33 | 'ServiceToken': 'Something', 34 | 'HostedZoneId': 'ZONE', 35 | 'RecordName': 'something.mydomain.com' 36 | } 37 | } 38 | 39 | # WHEN 40 | handler.handle_event(event, {}) 41 | 42 | # THEN 43 | expected_locator = Route53RecordSetLocator(hosted_zone_id='ZONE', record_name='something.mydomain.com') 44 | record_set_accessor.delete.assert_called_with(locator=expected_locator) 45 | record_set_accessor.exists.assert_called() 46 | self.assertEqual(record_set_accessor.exists.call_count, 3) 47 | -------------------------------------------------------------------------------- /lambda/assign-public-ip/test/test_events.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import unittest 4 | 5 | from lib.events import extract_event_task_info 6 | 7 | THIS_DIR = os.path.abspath(os.path.dirname(__file__)) 8 | with open(os.path.join(THIS_DIR, 'fixtures', 'task_description.json')) as f: 9 | TASK_DESCRIPTION = json.loads(f.read()) 10 | 11 | 12 | class TestEvents(unittest.TestCase): 13 | def test_extract_event_task_info(self): 14 | task_info = extract_event_task_info(TASK_DESCRIPTION) 15 | 16 | self.assertEqual(task_info.task_arn, 'arn:aws:ecs:test-region-1::task/12345678-1234-1234-1234-1234567890AB') 17 | self.assertTrue(not task_info.is_stopped()) 18 | 19 | self.assertEqual(len(task_info.enis), 1) 20 | self.assertEqual(task_info.enis[0].eni_id, 'eni-abcd') 21 | self.assertEqual(task_info.enis[0].public_ipv4, None) 22 | -------------------------------------------------------------------------------- /lambda/assign-public-ip/test/test_queue_handler.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | import unittest.mock as mock 3 | 4 | from lib.queue_handler import QueueHandler 5 | 6 | 7 | class TestQueueHandler(unittest.TestCase): 8 | def test_queue_handler_sets_up(self): 9 | environ = { 10 | 'HOSTED_ZONE_ID': 'HOSTED_ZONE_ID', 11 | 'RECORD_NAME': 'RECORD_NAME', 12 | 'RECORDS_TABLE': 'RECORDS_TABLE', 13 | 'CLUSTER_ARN': 'CLUSTER_ARN', 14 | 'SERVICE_NAME': 'SERVICE_NAME', 15 | } 16 | 17 | ec2_client = mock.Mock() 18 | route53_client = mock.Mock() 19 | dynamodb_resource = mock.Mock() 20 | 21 | # WHEN 22 | 23 | handler = QueueHandler(ec2_client=ec2_client, route53_client=route53_client, 24 | dynamodb_resource=dynamodb_resource, environ=environ) 25 | 26 | # THEN 27 | dynamodb_resource.Table.called_width('RECORDS_TABLE') 28 | 29 | pass 30 | -------------------------------------------------------------------------------- /lambda/assign-public-ip/test/test_records.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import unittest 4 | from datetime import datetime 5 | 6 | from lib.records import DdbRecordEncoding, TaskInfo, EniInfo 7 | 8 | THIS_DIR = os.path.abspath(os.path.dirname(__file__)) 9 | with open(os.path.join(THIS_DIR, 'fixtures', 'ddb-record.json')) as f: 10 | DDB_RECORD_ENCODED = json.loads(f.read()) 11 | 12 | 13 | class TestRecords(unittest.TestCase): 14 | def test_task_info_stopped_marker(self): 15 | task_info = TaskInfo(task_arn='a', enis=[]) 16 | task_info.set_stopped_marker() 17 | self.assertTrue(task_info.is_stopped()) 18 | 19 | def test_ddb_record_encoding(self): 20 | # GIVEN 21 | ddb_record_encoding = DdbRecordEncoding() 22 | 23 | # WHEN 24 | ddb_record = ddb_record_encoding.decode(DDB_RECORD_ENCODED) 25 | ddb_record_reencoded = ddb_record_encoding.encode(ddb_record) 26 | 27 | # THEN 28 | self.assertEqual(ddb_record.key.cluster_arn, 'CLUSTER_ARN') 29 | self.assertEqual(ddb_record.key.service_name, 'SERVICE') 30 | self.assertEqual(sorted(ddb_record.ipv4s), ['1.1.2.1', '1.1.2.2']) 31 | self.assertEqual( 32 | ddb_record.task_info['TASK1_ARN'], 33 | TaskInfo(task_arn='TASK1_ARN', stopped_datetime=datetime(2020, 10, 4, 23, 47, 36, 322158), enis=[ 34 | EniInfo(eni_id='TASK1_ENI1_ID', public_ipv4='1.1.1.1'), 35 | ])) 36 | self.assertEqual( 37 | ddb_record.task_info['TASK2_ARN'], 38 | TaskInfo( 39 | task_arn='TASK2_ARN', enis=[ 40 | EniInfo(eni_id='TASK2_ENI1_ID', public_ipv4='1.1.2.1'), 41 | EniInfo(eni_id='TASK2_ENI2_ID', public_ipv4='1.1.2.2'), 42 | ])) 43 | self.assertEqual(len(ddb_record.record_sets), 2) 44 | 45 | self.maxDiff = 9999999 46 | self.assertEqual(ddb_record_reencoded, DDB_RECORD_ENCODED) 47 | -------------------------------------------------------------------------------- /lambda/assign-public-ip/test/test_records_table.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import unittest 4 | import unittest.mock as mock 5 | 6 | from boto3.dynamodb.conditions import ConditionExpressionBuilder 7 | from botocore.exceptions import ClientError 8 | 9 | from lib.records import DdbRecordKey, TaskInfo, EniInfo, DdbRecord 10 | from lib.records_table import RecordsTableAccessor, update_ddb_record, RecordUpdate 11 | from lib.route53 import Route53RecordSetLocator 12 | 13 | THIS_DIR = os.path.abspath(os.path.dirname(__file__)) 14 | with open(os.path.join(THIS_DIR, 'fixtures', 'ddb-record.json')) as f: 15 | DDB_RECORD_ENCODED = json.loads(f.read()) 16 | 17 | 18 | class TestRecordsTable(unittest.TestCase): 19 | def test_put_tasks_creates_with_optimistic_lock(self): 20 | # GIVEN 21 | table_client = mock.Mock() 22 | table_client.query = mock.Mock(return_value={'Items': []}) 23 | 24 | key = DdbRecordKey(cluster_arn='a', service_name='b') 25 | records_table = RecordsTableAccessor(table_client=table_client) 26 | 27 | running = [TaskInfo(task_arn='TASK1_ARN', enis=[ 28 | EniInfo(eni_id='TASK1_ENI1_ID', public_ipv4='1.1.1.1'), 29 | ])] 30 | 31 | # WHEN 32 | records_table.put_update_optimistically(key=key, update=RecordUpdate(running_tasks=running)) 33 | 34 | # THEN 35 | table_client.put_item.assert_called() 36 | item = table_client.put_item.call_args.kwargs['Item'] 37 | self.assertEqual(item['version'], 1) 38 | 39 | condition_expression = table_client.put_item.call_args.kwargs['ConditionExpression'] 40 | expr, atts, vals = ConditionExpressionBuilder().build_expression(condition_expression) 41 | self.assertEqual(expr, '(attribute_not_exists(#n0) OR #n1 = :v0)') 42 | self.assertEqual(atts, {'#n0': 'version', '#n1': 'version'}) 43 | self.assertEqual(vals, {':v0': 0}) 44 | 45 | def test_put_tasks_updates_with_optimistic_lock(self): 46 | # GIVEN 47 | table_client = mock.Mock() 48 | table_client.query = mock.Mock(return_value={'Items': [dict(DDB_RECORD_ENCODED)]}) 49 | 50 | key = DdbRecordKey(cluster_arn='FOO', service_name='test.myexample.com') 51 | records_table = RecordsTableAccessor(table_client=table_client) 52 | 53 | running = [TaskInfo(task_arn='TASK1_ARN', enis=[ 54 | EniInfo(eni_id='TASK1_ENI1_ID', public_ipv4='1.1.1.1'), 55 | ])] 56 | 57 | # WHEN 58 | records_table.put_update_optimistically(key=key, update=RecordUpdate(running_tasks=running)) 59 | 60 | # THEN 61 | condition_expression = table_client.put_item.call_args.kwargs['ConditionExpression'] 62 | expr, atts, vals = ConditionExpressionBuilder().build_expression(condition_expression) 63 | self.assertEqual(vals, {':v0': 12}) 64 | 65 | def test_put_tasks_retries_optimistically(self): 66 | # GIVEN 67 | table_client = mock.Mock() 68 | table_client.query = mock.Mock(return_value={'Items': []}) 69 | table_client.put_item = mock.Mock( 70 | side_effect=ClientError({'Error': { 71 | 'Code': 'ConditionalCheckFailedException' 72 | }}, 'PutItem')) 73 | 74 | records_table = RecordsTableAccessor(table_client=table_client) 75 | key = DdbRecordKey(cluster_arn='a', service_name='b') 76 | 77 | # WHEN 78 | with self.assertRaisesRegex(Exception, r'Exceeded maximum retries'): 79 | records_table.put_update(key=key, update=RecordUpdate()) 80 | 81 | # THEN 82 | self.assertEqual(table_client.query.call_count, records_table.max_attempts) 83 | self.assertEqual(table_client.put_item.call_count, records_table.max_attempts) 84 | 85 | def test_put_tasks_raises_other_errors(self): 86 | # GIVEN 87 | table_client = mock.Mock() 88 | table_client.query = mock.Mock(return_value={'Items': []}) 89 | table_client.put_item = mock.Mock(side_effect=ClientError({'Error': {'Code': 'SomethingElse'}}, 'PutItem')) 90 | 91 | records_table = RecordsTableAccessor(table_client=table_client) 92 | key = DdbRecordKey(cluster_arn='a', service_name='b') 93 | 94 | # WHEN 95 | with self.assertRaisesRegex(Exception, r'SomethingElse'): 96 | records_table.put_update(key=key, update=RecordUpdate()) 97 | 98 | # THEN 99 | self.assertEqual(table_client.query.call_count, 1) 100 | self.assertEqual(table_client.put_item.call_count, 1) 101 | 102 | def test_delete(self): 103 | # GIVEN 104 | table_client = mock.Mock() 105 | key = DdbRecordKey(cluster_arn='a', service_name='b') 106 | records_table = RecordsTableAccessor(table_client=table_client) 107 | 108 | # WHEN 109 | records_table.delete(key) 110 | 111 | # THEN 112 | table_client.delete_item.called_with(Key='a#b') 113 | 114 | def test_update_ddb_record(self): 115 | # GIVEN 116 | ddb_record = DdbRecord(key=DdbRecordKey(cluster_arn='a', service_name='b')) 117 | 118 | # TASK1->RUNNING, TASK2->RUNNING 119 | ord1_running = [ 120 | TaskInfo(task_arn='TASK1_ARN', enis=[ 121 | EniInfo(eni_id='TASK1_ENI1_ID', public_ipv4='1.1.1.1'), 122 | ]), 123 | TaskInfo(task_arn='TASK2_ARN', enis=[ 124 | EniInfo(eni_id='TASK2_ENI1_ID', public_ipv4='1.1.2.1'), 125 | ]), 126 | ] 127 | # TASK3->STOPPED (out of order) 128 | ord1_stopped = [ 129 | TaskInfo(task_arn='TASK3_ARN', enis=[ 130 | EniInfo(eni_id='TASK3_ENI1_ID'), 131 | ]), 132 | ] 133 | 134 | # TASK1->STOPPED, TASK3->STOPPED (duplicate) 135 | ord2_stopped = [ 136 | # Expected TASK1 transition to STOPPED 137 | TaskInfo(task_arn='TASK1_ARN', enis=[ 138 | EniInfo(eni_id='TASK1_ENI1_ID'), 139 | ]), 140 | # Duplicate TASK3 transition to STOPPED 141 | TaskInfo(task_arn='TASK3_ARN', enis=[ 142 | EniInfo(eni_id='TASK3_ENI1_ID'), 143 | ]), 144 | ] 145 | 146 | # TASK1->RUNNING (out of order), TASK3->RUNNING (out of order) 147 | ord3_running = [ 148 | TaskInfo(task_arn='TASK1_ARN', enis=[ 149 | EniInfo(eni_id='TASK1_ENI1_ID', public_ipv4='1.1.1.1'), 150 | ]), 151 | TaskInfo(task_arn='TASK3_ARN', enis=[ 152 | EniInfo(eni_id='TASK3_ENI1_ID', public_ipv4='1.1.3.1'), 153 | ]), 154 | ] 155 | 156 | # WHEN 157 | update_ddb_record(ddb_record, RecordUpdate(running_tasks=ord1_running, stopped_tasks=ord1_stopped)) 158 | update_ddb_record(ddb_record, RecordUpdate(stopped_tasks=ord2_stopped)) 159 | update_ddb_record(ddb_record, RecordUpdate(running_tasks=ord3_running)) 160 | 161 | # THEN 162 | self.assertEqual(len(ddb_record.task_info), 3, msg='expected 3 task infos') 163 | self.assertTrue(ddb_record.task_info['TASK1_ARN'].is_stopped()) 164 | self.assertTrue(not ddb_record.task_info['TASK2_ARN'].is_stopped()) 165 | self.assertTrue(ddb_record.task_info['TASK3_ARN'].is_stopped()) 166 | 167 | self.assertFalse('1.1.1.1' in ddb_record.ipv4s, 168 | msg='ord3_running should have been ignored because the task previously stopped') 169 | self.assertEqual(sorted(ddb_record.ipv4s), ['1.1.2.1']) 170 | 171 | def test_update_record_sets(self): 172 | # GIVEN 173 | ddb_record = DdbRecord(key=DdbRecordKey(cluster_arn='a', service_name='b')) 174 | ord1 = [ 175 | Route53RecordSetLocator('a', 'b'), 176 | Route53RecordSetLocator('a', 'c'), 177 | ] 178 | ord2 = [ 179 | Route53RecordSetLocator('a', 'b'), 180 | ] 181 | 182 | # WHEN 183 | update_ddb_record(ddb_record, RecordUpdate(record_sets_added=ord1)) 184 | update_ddb_record(ddb_record, RecordUpdate(record_sets_removed=ord2)) 185 | 186 | # THEN 187 | self.assertEqual(len(ddb_record.record_sets), 1) 188 | self.assertTrue(Route53RecordSetLocator('a', 'c') in ddb_record.record_sets) -------------------------------------------------------------------------------- /lambda/assign-public-ip/test/test_tasks.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import unittest 4 | import unittest.mock as mock 5 | from datetime import datetime 6 | 7 | from lib.events import extract_event_task_info 8 | from lib.records import TaskInfo, DdbRecord, DdbRecordKey 9 | from lib.running_task_collector import RunningTaskCollector 10 | 11 | THIS_DIR = os.path.abspath(os.path.dirname(__file__)) 12 | with open(os.path.join(THIS_DIR, 'fixtures', 'task_description.json')) as f: 13 | TASK_DESCRIPTION = json.loads(f.read()) 14 | with open(os.path.join(THIS_DIR, 'fixtures', 'eni_description.json')) as f: 15 | ENI_DESCRIPTION = json.loads(f.read()) 16 | 17 | 18 | class TestRunningTasksCollector(unittest.TestCase): 19 | def test_task_collector(self): 20 | # GIVEN 21 | ec2_client = mock.Mock() 22 | paginator = mock.Mock() 23 | paginator.paginate = mock.Mock(return_value=[{'NetworkInterfaces': [ENI_DESCRIPTION]}]) 24 | ec2_client.get_paginator = mock.Mock(return_value=paginator) 25 | 26 | reference_record = DdbRecord(key=DdbRecordKey(cluster_arn="A", service_name="B")) 27 | collector = RunningTaskCollector(ec2_client=ec2_client, reference_record=reference_record) 28 | 29 | # WHEN 30 | task_info = extract_event_task_info(TASK_DESCRIPTION) 31 | collector.collect(task_info) 32 | collector.fill_eni_info_from_eni_query() 33 | 34 | # THEN 35 | paginator.paginate.assert_called_with(NetworkInterfaceIds=['eni-abcd']) 36 | self.assertTrue('1.2.3.4' in collector.get_ips()) 37 | 38 | def test_task_collector_doesnt_collect_stopped_tasks(self): 39 | # GIVEN 40 | ec2_client = mock.Mock() 41 | paginator = mock.Mock() 42 | paginator.paginate = mock.Mock(return_value=[{'NetworkInterfaces': [ENI_DESCRIPTION]}]) 43 | ec2_client.get_paginator = mock.Mock(return_value=paginator) 44 | 45 | task_arn = TASK_DESCRIPTION['taskArn'] 46 | task_info = {task_arn: TaskInfo(task_arn=task_arn, enis=[], stopped_datetime=datetime.utcnow())} 47 | reference_record = DdbRecord(key=DdbRecordKey(cluster_arn="A", service_name="B"), task_info=task_info) 48 | collector = RunningTaskCollector(ec2_client=ec2_client, reference_record=reference_record) 49 | 50 | # WHEN 51 | task_info = extract_event_task_info(TASK_DESCRIPTION) 52 | collector.collect(task_info) 53 | 54 | # THEN 55 | self.assertEqual(len(collector.tasks), 0) 56 | -------------------------------------------------------------------------------- /lambda/queue/index.py: -------------------------------------------------------------------------------- 1 | import os 2 | import boto3 3 | from queue_backlog_calculator import QueueHandler 4 | 5 | def queue_handler(event, context): 6 | """ 7 | Handler for the lambda trigger 8 | """ 9 | 10 | ecs = boto3.client('ecs') 11 | sqs = boto3.client('sqs') 12 | 13 | queue_handler = QueueHandler(ecs_client=ecs, sqs_client=sqs, environ=os.environ) 14 | 15 | return queue_handler.emit() 16 | -------------------------------------------------------------------------------- /lambda/queue/queue_backlog_calculator.py: -------------------------------------------------------------------------------- 1 | from math import ceil 2 | import time 3 | import json 4 | 5 | class QueueHandler: 6 | def __init__(self, ecs_client, sqs_client, environ): 7 | self.ecs = ecs_client 8 | self.sqs = sqs_client 9 | self.cluster_name = environ['CLUSTER_NAME'] 10 | self.service_name = environ['SERVICE_NAME'] 11 | self.namespace = environ['NAMESPACE'] 12 | self.queue_names = environ['QUEUE_NAMES'].split(',') 13 | 14 | def emit(self): 15 | try: 16 | running_count = self.get_running_task_count() 17 | backlogs = [self.get_queue_backlog(queue_name, running_count) for queue_name in self.queue_names] 18 | self.timestamp = int(time.time() * 1000) 19 | for backlog in backlogs: 20 | self.emit_backlog_per_task_metric(backlog['queueName'], backlog['backlogPerTask']) 21 | except Exception as e: 22 | Exception('Exception: {}'.format(e)) 23 | 24 | """ 25 | Write the backlogPerTask metric to the stdout according to the Cloudwatch embedded metric format. 26 | """ 27 | def emit_backlog_per_task_metric(self, queue_name, backlog_per_task): 28 | print(json.dumps({ 29 | "_aws": { 30 | "Timestamp": self.timestamp, 31 | "CloudWatchMetrics": [{ 32 | "Namespace": self.namespace, 33 | "Dimensions": [["QueueName"]], 34 | "Metrics": [{"Name":"BacklogPerTask", "Unit": "Count"}] 35 | }], 36 | }, 37 | "QueueName": queue_name, 38 | "BacklogPerTask": backlog_per_task, 39 | })) 40 | 41 | """ 42 | Get the number of tasks in the 'RUNNING' state for the service 'service_name'. 43 | """ 44 | def get_running_task_count(self): 45 | service_desc = self.ecs.describe_services( 46 | cluster=self.cluster_name, 47 | services=[self.service_name], 48 | ) 49 | if len(service_desc['services']) == 0: 50 | raise Exception('There are no services with name {} in cluster: {}'.format(self.service_name, self.cluster_name)) 51 | return service_desc['services'][0].get('runningCount', 0) 52 | 53 | """ 54 | This method calculates and returns the backlogPerTask metric for the given queue. 55 | """ 56 | def get_queue_backlog(self, queue_name, count): 57 | queue_url = self.sqs.get_queue_url(QueueName=queue_name) 58 | running_count = 1 if count == 0 else count 59 | 60 | def get_backlog_per_task(): 61 | queue_attributes = self.sqs.get_queue_attributes( 62 | QueueUrl=queue_url['QueueUrl'], 63 | AttributeNames=['ApproximateNumberOfMessages'] 64 | ) 65 | num_of_msgs = int(queue_attributes['Attributes'].get('ApproximateNumberOfMessages', 0)) 66 | return ceil(num_of_msgs/running_count) 67 | 68 | return { 69 | 'queueName': queue_name, 70 | 'backlogPerTask': get_backlog_per_task() 71 | } -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "@aws-cdk-containers/ecs-service-extensions", 3 | "description": "The CDK Construct Library that helps you build ECS services using simple extensions", 4 | "repository": { 5 | "type": "git", 6 | "url": "https://github.com/cdklabs/cdk-ecs-service-extensions.git" 7 | }, 8 | "scripts": { 9 | "build": "npx projen build", 10 | "bump": "npx projen bump", 11 | "clobber": "npx projen clobber", 12 | "compat": "npx projen compat", 13 | "compile": "npx projen compile", 14 | "default": "npx projen default", 15 | "docgen": "npx projen docgen", 16 | "eject": "npx projen eject", 17 | "eslint": "npx projen eslint", 18 | "integ:aliased-port:assert": "npx projen integ:aliased-port:assert", 19 | "integ:aliased-port:deploy": "npx projen integ:aliased-port:deploy", 20 | "integ:aliased-port:destroy": "npx projen integ:aliased-port:destroy", 21 | "integ:aliased-port:snapshot": "npx projen integ:aliased-port:snapshot", 22 | "integ:aliased-port:watch": "npx projen integ:aliased-port:watch", 23 | "integ:all-service-addons:assert": "npx projen integ:all-service-addons:assert", 24 | "integ:all-service-addons:deploy": "npx projen integ:all-service-addons:deploy", 25 | "integ:all-service-addons:destroy": "npx projen integ:all-service-addons:destroy", 26 | "integ:all-service-addons:snapshot": "npx projen integ:all-service-addons:snapshot", 27 | "integ:all-service-addons:watch": "npx projen integ:all-service-addons:watch", 28 | "integ:assign-public-ip:assert": "npx projen integ:assign-public-ip:assert", 29 | "integ:assign-public-ip:deploy": "npx projen integ:assign-public-ip:deploy", 30 | "integ:assign-public-ip:destroy": "npx projen integ:assign-public-ip:destroy", 31 | "integ:assign-public-ip:snapshot": "npx projen integ:assign-public-ip:snapshot", 32 | "integ:assign-public-ip:watch": "npx projen integ:assign-public-ip:watch", 33 | "integ:custom-service-extension:assert": "npx projen integ:custom-service-extension:assert", 34 | "integ:custom-service-extension:deploy": "npx projen integ:custom-service-extension:deploy", 35 | "integ:custom-service-extension:destroy": "npx projen integ:custom-service-extension:destroy", 36 | "integ:custom-service-extension:snapshot": "npx projen integ:custom-service-extension:snapshot", 37 | "integ:custom-service-extension:watch": "npx projen integ:custom-service-extension:watch", 38 | "integ:imported-environment:assert": "npx projen integ:imported-environment:assert", 39 | "integ:imported-environment:deploy": "npx projen integ:imported-environment:deploy", 40 | "integ:imported-environment:destroy": "npx projen integ:imported-environment:destroy", 41 | "integ:imported-environment:snapshot": "npx projen integ:imported-environment:snapshot", 42 | "integ:imported-environment:watch": "npx projen integ:imported-environment:watch", 43 | "integ:multiple-environments:assert": "npx projen integ:multiple-environments:assert", 44 | "integ:multiple-environments:deploy": "npx projen integ:multiple-environments:deploy", 45 | "integ:multiple-environments:destroy": "npx projen integ:multiple-environments:destroy", 46 | "integ:multiple-environments:snapshot": "npx projen integ:multiple-environments:snapshot", 47 | "integ:multiple-environments:watch": "npx projen integ:multiple-environments:watch", 48 | "integ:publish-subscribe:assert": "npx projen integ:publish-subscribe:assert", 49 | "integ:publish-subscribe:deploy": "npx projen integ:publish-subscribe:deploy", 50 | "integ:publish-subscribe:destroy": "npx projen integ:publish-subscribe:destroy", 51 | "integ:publish-subscribe:snapshot": "npx projen integ:publish-subscribe:snapshot", 52 | "integ:publish-subscribe:watch": "npx projen integ:publish-subscribe:watch", 53 | "integ:snapshot-all": "npx projen integ:snapshot-all", 54 | "package": "npx projen package", 55 | "package-all": "npx projen package-all", 56 | "package:dotnet": "npx projen package:dotnet", 57 | "package:go": "npx projen package:go", 58 | "package:java": "npx projen package:java", 59 | "package:js": "npx projen package:js", 60 | "package:python": "npx projen package:python", 61 | "post-compile": "npx projen post-compile", 62 | "post-upgrade": "npx projen post-upgrade", 63 | "pre-compile": "npx projen pre-compile", 64 | "release": "npx projen release", 65 | "test": "npx projen test", 66 | "test:watch": "npx projen test:watch", 67 | "unbump": "npx projen unbump", 68 | "upgrade": "npx projen upgrade", 69 | "watch": "npx projen watch", 70 | "projen": "npx projen" 71 | }, 72 | "author": { 73 | "name": "Amazon Web Services", 74 | "url": "https://aws.amazon.com", 75 | "organization": true 76 | }, 77 | "devDependencies": { 78 | "@stylistic/eslint-plugin": "^2", 79 | "@types/jest": "^27", 80 | "@types/node": "^16 <= 16.18.78", 81 | "@typescript-eslint/eslint-plugin": "^8", 82 | "@typescript-eslint/parser": "^8", 83 | "aws-cdk": "^2", 84 | "aws-cdk-lib": "2.52.0", 85 | "commit-and-tag-version": "^12", 86 | "constructs": "10.0.5", 87 | "eslint": "^9", 88 | "eslint-import-resolver-typescript": "^2.7.1", 89 | "eslint-plugin-import": "^2.31.0", 90 | "jest": "^27", 91 | "jest-junit": "^16", 92 | "jsii": "~5.6.0", 93 | "jsii-diff": "^1.112.0", 94 | "jsii-docgen": "^10.5.0", 95 | "jsii-pacmak": "^1.112.0", 96 | "jsii-rosetta": "~5.6.0", 97 | "projen": "0.92.9", 98 | "ts-jest": "^27", 99 | "ts-node": "^10.9.2", 100 | "typescript": "^4.9.5" 101 | }, 102 | "peerDependencies": { 103 | "aws-cdk-lib": "^2.52.0", 104 | "constructs": "^10.0.5" 105 | }, 106 | "keywords": [ 107 | "cdk" 108 | ], 109 | "main": "lib/index.js", 110 | "license": "Apache-2.0", 111 | "version": "0.0.0", 112 | "jest": { 113 | "coverageProvider": "v8", 114 | "testMatch": [ 115 | "/@(src|test)/**/*(*.)@(spec|test).ts?(x)", 116 | "/@(src|test)/**/__tests__/**/*.ts?(x)", 117 | "/@(projenrc)/**/*(*.)@(spec|test).ts?(x)", 118 | "/@(projenrc)/**/__tests__/**/*.ts?(x)" 119 | ], 120 | "clearMocks": true, 121 | "collectCoverage": true, 122 | "coverageReporters": [ 123 | "json", 124 | "lcov", 125 | "clover", 126 | "cobertura", 127 | "text" 128 | ], 129 | "coverageDirectory": "coverage", 130 | "coveragePathIgnorePatterns": [ 131 | "/node_modules/" 132 | ], 133 | "testPathIgnorePatterns": [ 134 | "/node_modules/" 135 | ], 136 | "watchPathIgnorePatterns": [ 137 | "/node_modules/" 138 | ], 139 | "reporters": [ 140 | "default", 141 | [ 142 | "jest-junit", 143 | { 144 | "outputDirectory": "test-reports" 145 | } 146 | ] 147 | ], 148 | "preset": "ts-jest", 149 | "globals": { 150 | "ts-jest": { 151 | "tsconfig": "tsconfig.dev.json" 152 | } 153 | } 154 | }, 155 | "types": "lib/index.d.ts", 156 | "stability": "experimental", 157 | "jsii": { 158 | "outdir": "dist", 159 | "targets": { 160 | "java": { 161 | "package": "io.github.cdklabs.cdkecsserviceextensions", 162 | "maven": { 163 | "groupId": "io.github.cdklabs", 164 | "artifactId": "cdk-ecs-service-extensions" 165 | } 166 | }, 167 | "python": { 168 | "distName": "cdk-ecs-service-extensions", 169 | "module": "cdk_ecs_service_extensions" 170 | }, 171 | "dotnet": { 172 | "namespace": "Cdklabs.CdkEcsServiceExtensions", 173 | "packageId": "Cdklabs.CdkEcsServiceExtensions" 174 | }, 175 | "go": { 176 | "moduleName": "github.com/cdklabs/cdk-ecs-service-extensions-go" 177 | } 178 | }, 179 | "tsc": { 180 | "outDir": "lib", 181 | "rootDir": "src" 182 | } 183 | }, 184 | "//": "~~ Generated by projen. To modify, edit .projenrc.ts and run \"npx projen\"." 185 | } 186 | -------------------------------------------------------------------------------- /src/environment.ts: -------------------------------------------------------------------------------- 1 | import * as ec2 from 'aws-cdk-lib/aws-ec2'; 2 | import * as ecs from 'aws-cdk-lib/aws-ecs'; 3 | import { Construct } from 'constructs'; 4 | import { EnvironmentCapacityType } from './extensions/extension-interfaces'; 5 | 6 | /** 7 | * Settings for the environment where you want to deploy your services. 8 | */ 9 | export interface EnvironmentProps { 10 | /** 11 | * The VPC used by the service for networking. 12 | * 13 | * @default - Create a new VPC 14 | */ 15 | readonly vpc?: ec2.IVpc; 16 | 17 | /** 18 | * The ECS cluster which provides compute capacity to this service. 19 | * 20 | * [disable-awslint:ref-via-interface] 21 | * @default - Create a new cluster 22 | */ 23 | readonly cluster?: ecs.Cluster; 24 | 25 | /** 26 | * The type of capacity to use for this environment. 27 | * 28 | * @default - EnvironmentCapacityType.FARGATE 29 | */ 30 | readonly capacityType?: EnvironmentCapacityType; 31 | } 32 | 33 | /** 34 | * An environment into which to deploy a service. 35 | */ 36 | export interface IEnvironment { 37 | /** 38 | * The name of this environment. 39 | */ 40 | readonly id: string; 41 | 42 | /** 43 | * The VPC into which environment services should be placed. 44 | */ 45 | readonly vpc: ec2.IVpc; 46 | 47 | /** 48 | * The cluster that is providing capacity for this service. 49 | */ 50 | readonly cluster: ecs.ICluster; 51 | 52 | /** 53 | * The capacity type used by the service's cluster. 54 | */ 55 | readonly capacityType: EnvironmentCapacityType; 56 | 57 | /** 58 | * Add a default cloudmap namespace to the environment's cluster. 59 | */ 60 | addDefaultCloudMapNamespace(options: ecs.CloudMapNamespaceOptions): void; 61 | } 62 | 63 | /** 64 | * An environment into which to deploy a service. This environment 65 | * can either be instantiated with a pre-existing AWS VPC and ECS cluster, 66 | * or it can create its own VPC and cluster. By default, it will create 67 | * a cluster with Fargate capacity. 68 | */ 69 | export class Environment extends Construct implements IEnvironment { 70 | /** 71 | * Import an existing environment from its attributes. 72 | */ 73 | public static fromEnvironmentAttributes(scope: Construct, id: string, attrs: EnvironmentAttributes): IEnvironment { 74 | return new ImportedEnvironment(scope, id, attrs); 75 | } 76 | 77 | /** 78 | * The name of this environment. 79 | */ 80 | public readonly id: string; 81 | 82 | /** 83 | * The VPC where environment services should be placed. 84 | */ 85 | public readonly vpc: ec2.IVpc; 86 | 87 | /** 88 | * The cluster that is providing capacity for this service. 89 | */ 90 | public get cluster(): ecs.ICluster { 91 | return this._cluster; 92 | }; 93 | 94 | /** 95 | * The capacity type used by the service's cluster. 96 | */ 97 | public readonly capacityType: EnvironmentCapacityType; 98 | 99 | private readonly scope: Construct; 100 | private readonly _cluster: ecs.Cluster; 101 | 102 | constructor(scope: Construct, id: string, props?: EnvironmentProps) { 103 | super(scope, id); 104 | 105 | this.scope = scope; 106 | this.id = id; 107 | 108 | if (props && props.vpc) { 109 | this.vpc = props.vpc; 110 | } else { 111 | this.vpc = new ec2.Vpc(this.scope, `${this.id}-environment-vpc`); 112 | } 113 | 114 | if (props && props.cluster) { 115 | this._cluster = props.cluster; 116 | } else { 117 | this._cluster = new ecs.Cluster(this.scope, `${this.id}-environment-cluster`, { vpc: this.vpc }); 118 | } 119 | 120 | if (props && props.capacityType) { 121 | this.capacityType = props.capacityType; 122 | } else { 123 | this.capacityType = EnvironmentCapacityType.FARGATE; 124 | } 125 | } 126 | 127 | /** 128 | * Add a default cloudmap namespace to the environment's cluster. 129 | * The environment's cluster must not be imported. 130 | */ 131 | public addDefaultCloudMapNamespace(options: ecs.CloudMapNamespaceOptions) { 132 | this._cluster.addDefaultCloudMapNamespace(options); 133 | } 134 | } 135 | 136 | export interface EnvironmentAttributes { 137 | /** 138 | * The capacity type used by the service's cluster. 139 | */ 140 | readonly capacityType: EnvironmentCapacityType; 141 | 142 | /** 143 | * The cluster that is providing capacity for this service. 144 | */ 145 | readonly cluster: ecs.ICluster; 146 | } 147 | 148 | export class ImportedEnvironment extends Construct implements IEnvironment { 149 | public readonly capacityType: EnvironmentCapacityType; 150 | public readonly cluster: ecs.ICluster; 151 | public readonly id: string; 152 | public readonly vpc: ec2.IVpc; 153 | 154 | constructor(scope: Construct, id: string, props: EnvironmentAttributes) { 155 | super(scope, id); 156 | 157 | this.id = id; 158 | this.capacityType = props.capacityType; 159 | this.cluster = props.cluster; 160 | this.vpc = props.cluster.vpc; 161 | } 162 | 163 | /** 164 | * Adding a default cloudmap namespace to the cluster will throw an error, as we don't 165 | * own it. 166 | */ 167 | addDefaultCloudMapNamespace(_options: ecs.CloudMapNamespaceOptions) { 168 | throw new Error('the cluster environment is immutable when imported'); 169 | } 170 | } 171 | -------------------------------------------------------------------------------- /src/extensions/aliased-port.ts: -------------------------------------------------------------------------------- 1 | import * as ecs from 'aws-cdk-lib/aws-ecs'; 2 | import { Construct } from 'constructs'; 3 | import { Container } from './container'; 4 | import { ContainerMutatingHook, ServiceBuild, ServiceExtension } from './extension-interfaces'; 5 | import { Service } from '../service'; 6 | 7 | 8 | /** 9 | * AliasedPortProps defines the properties of an aliased port extension 10 | */ 11 | export interface AliasedPortProps { 12 | /** 13 | * The DNS alias to advertise for downstream clients. 14 | */ 15 | readonly alias: string; 16 | /** 17 | * The protocol to use over the specified port. 18 | * 19 | * May be one of HTTP, HTTP2, or GRPC. 20 | * 21 | * @default - none 22 | */ 23 | readonly appProtocol?: ecs.AppProtocol; 24 | 25 | /** 26 | * The traffic port for clients to use to connect to the DNS alias. 27 | * 28 | * @default - same as containerPort. 29 | */ 30 | readonly aliasPort?: number; 31 | } 32 | 33 | export class AliasedPortExtension extends ServiceExtension { 34 | protected alias: string; 35 | protected aliasPort?: number; 36 | protected appProtocol?: ecs.AppProtocol; 37 | protected namespace?: string; 38 | 39 | constructor(props: AliasedPortProps) { 40 | super('aliasedPort'); 41 | 42 | this.aliasPort = props.aliasPort; 43 | this.alias = props.alias; 44 | this.appProtocol = props.appProtocol; 45 | } 46 | 47 | public prehook(service: Service, scope: Construct) { 48 | this.parentService = service; 49 | this.scope = scope; 50 | 51 | // If there isn't a default cloudmap namespace on the cluster, create a private HTTP namespace for SC. 52 | if (!this.parentService.cluster.defaultCloudMapNamespace) { 53 | this.parentService.environment.addDefaultCloudMapNamespace({ 54 | name: this.parentService.environment.id, 55 | }); 56 | } 57 | this.namespace = this.parentService.environment.cluster.defaultCloudMapNamespace?.namespaceName; 58 | } 59 | 60 | public addHooks(): void { 61 | const containerextension = this.parentService.serviceDescription.get('service-container') as Container; 62 | if (!containerextension) { 63 | throw new Error('Aliased Port extension requires a Container extension to already exist.'); 64 | } 65 | 66 | containerextension.addContainerMutatingHook(new AliasedPortMutatingHook({ 67 | portMappingName: this.alias, 68 | aliasPort: containerextension.trafficPort, 69 | protocol: this.appProtocol, 70 | })); 71 | } 72 | 73 | public modifyServiceProps(props: ServiceBuild): ServiceBuild { 74 | if (props.serviceConnectConfiguration && props.serviceConnectConfiguration.namespace !== this.namespace) { 75 | throw new Error('Service connect cannot be enabled with two different namespaces.'); 76 | } 77 | 78 | const containerextension = this.parentService.serviceDescription.get('service-container') as Container; 79 | 80 | if (!containerextension.container) { 81 | throw new Error('Parent service must have a container to enable an Aliased Port Extension.'); 82 | } 83 | if (!containerextension.trafficPort && !this.aliasPort) { 84 | throw new Error('Cannot infer port: container has no traffic port and aliasPort was not specified.'); 85 | } 86 | 87 | // If there is already a service connect config, we need to modify the existing properties instead of creating new ones. 88 | // Push a new service to the list of services. 89 | let services: ecs.ServiceConnectService[] = []; 90 | if (props.serviceConnectConfiguration) { 91 | services = props.serviceConnectConfiguration.services ? props.serviceConnectConfiguration.services : []; 92 | } 93 | services.push({ 94 | portMappingName: this.alias, 95 | port: this.aliasPort || containerextension.trafficPort, 96 | dnsName: this.alias, 97 | }); 98 | if (!this.parentService.cluster.defaultCloudMapNamespace) { 99 | throw new Error('Cluster must have a default CloudMap namespace.'); 100 | } 101 | if (!props.serviceConnectConfiguration) { 102 | return { 103 | ...props, 104 | 105 | serviceConnectConfiguration: { 106 | namespace: this.parentService.cluster.defaultCloudMapNamespace.namespaceName, 107 | services, 108 | }, 109 | }; 110 | } 111 | 112 | return { 113 | ...props, 114 | 115 | serviceConnectConfiguration: { 116 | ...props.serviceConnectConfiguration, 117 | services, 118 | }, 119 | }; 120 | } 121 | } 122 | 123 | export interface AliasedPortMutatingHookProps { 124 | /** 125 | * The name by which to refer to this port mapping. 126 | */ 127 | readonly portMappingName: string; 128 | /** 129 | * The port on the container which receives traffic. This is the same as the `containerPort` property of port mapping. 130 | */ 131 | readonly aliasPort: number; 132 | /** 133 | * The protocol which this port mapping expects to receive. 134 | * 135 | * @default - none 136 | */ 137 | readonly protocol?: ecs.AppProtocol; 138 | } 139 | 140 | /** 141 | * This hook modifies the application container's settings so that 142 | * its primary port mapping has a name. 143 | */ 144 | export class AliasedPortMutatingHook extends ContainerMutatingHook { 145 | private portMappingName: string; 146 | private portMappingProtocol?: ecs.AppProtocol; 147 | private aliasPort: number; 148 | 149 | constructor(props: AliasedPortMutatingHookProps) { 150 | super(); 151 | this.portMappingName = props.portMappingName; 152 | this.aliasPort = props.aliasPort; 153 | this.portMappingProtocol = props.protocol; 154 | } 155 | 156 | public mutateContainerDefinition(props: ecs.ContainerDefinitionOptions): ecs.ContainerDefinitionOptions { 157 | return { 158 | ...props, 159 | 160 | portMappings: [ 161 | { 162 | containerPort: this.aliasPort, 163 | name: this.portMappingName, 164 | appProtocol: this.portMappingProtocol, 165 | }, 166 | ], 167 | } as ecs.ContainerDefinitionOptions; 168 | } 169 | } 170 | -------------------------------------------------------------------------------- /src/extensions/assign-public-ip/assign-public-ip.ts: -------------------------------------------------------------------------------- 1 | import * as ec2 from 'aws-cdk-lib/aws-ec2'; 2 | import * as ecs from 'aws-cdk-lib/aws-ecs'; 3 | import * as route53 from 'aws-cdk-lib/aws-route53'; 4 | import { Construct } from 'constructs'; 5 | import { TaskRecordManager } from './task-record-manager'; 6 | import { Service } from '../../service'; 7 | import { Container } from '../container'; 8 | import { ServiceExtension, ServiceBuild, EnvironmentCapacityType } from '../extension-interfaces'; 9 | 10 | export interface AssignPublicIpExtensionOptions { 11 | /** 12 | * Enable publishing task public IPs to a recordset in a Route 53 hosted zone. 13 | * 14 | * Note: If you want to change the DNS zone or record name, you will need to 15 | * remove this extension completely and then re-add it. 16 | */ 17 | readonly dns?: AssignPublicIpDnsOptions; 18 | } 19 | 20 | export interface AssignPublicIpDnsOptions { 21 | /** 22 | * A DNS Zone to expose task IPs in. 23 | */ 24 | readonly zone: route53.IHostedZone; 25 | 26 | /** 27 | * Name of the record to add to the zone and in which to add the task IP 28 | * addresses to. 29 | * 30 | * @example 'myservice' 31 | */ 32 | readonly recordName: string; 33 | } 34 | 35 | /** 36 | * Modifies the service to assign a public ip to each task and optionally 37 | * exposes public IPs in a Route 53 record set. 38 | * 39 | * Note: If you want to change the DNS zone or record name, you will need to 40 | * remove this extension completely and then re-add it. 41 | */ 42 | export class AssignPublicIpExtension extends ServiceExtension { 43 | dns?: AssignPublicIpDnsOptions; 44 | 45 | constructor(options?: AssignPublicIpExtensionOptions) { 46 | super('public-ip'); 47 | 48 | this.dns = options?.dns; 49 | } 50 | 51 | private hasDns() { 52 | return Boolean(this.dns); 53 | } 54 | 55 | public prehook(service: Service, _scope: Construct) { 56 | super.prehook(service, _scope); 57 | 58 | if (service.capacityType != EnvironmentCapacityType.FARGATE) { 59 | throw new Error('AssignPublicIp only supports Fargate tasks'); 60 | } 61 | } 62 | 63 | public modifyServiceProps(props: ServiceBuild): ServiceBuild { 64 | return { 65 | ...props, 66 | assignPublicIp: true, 67 | } as ServiceBuild; 68 | } 69 | 70 | public useService(service: ecs.Ec2Service | ecs.FargateService) { 71 | if (this.hasDns()) { 72 | new TaskRecordManager(service, 'TaskRecordManager', { 73 | service: service, 74 | dnsZone: this.dns!.zone, 75 | dnsRecordName: this.dns!.recordName, 76 | }); 77 | 78 | const container = this.parentService.serviceDescription.get('service-container') as Container; 79 | service.connections.allowFromAnyIpv4( 80 | ec2.Port.tcp(container.trafficPort), 81 | 'Accept inbound traffic on traffic port from anywhere', 82 | ); 83 | } 84 | } 85 | } 86 | -------------------------------------------------------------------------------- /src/extensions/assign-public-ip/index.ts: -------------------------------------------------------------------------------- 1 | export * from './assign-public-ip'; 2 | -------------------------------------------------------------------------------- /src/extensions/cloudwatch-agent.ts: -------------------------------------------------------------------------------- 1 | import * as ecs from 'aws-cdk-lib/aws-ecs'; 2 | import * as iam from 'aws-cdk-lib/aws-iam'; 3 | import { Construct } from 'constructs'; 4 | import { ServiceExtension } from './extension-interfaces'; 5 | import { Service } from '../service'; 6 | 7 | const CLOUDWATCH_AGENT_IMAGE = 'amazon/cloudwatch-agent:latest'; 8 | 9 | /** 10 | * This extension adds a CloudWatch agent to the task definition and 11 | * configures the task to be able to publish metrics to CloudWatch. 12 | */ 13 | export class CloudwatchAgentExtension extends ServiceExtension { 14 | private CW_CONFIG_CONTENT = { 15 | logs: { 16 | metrics_collected: { 17 | emf: {}, 18 | }, 19 | }, 20 | metrics: { 21 | metrics_collected: { 22 | statsd: {}, 23 | }, 24 | }, 25 | }; 26 | 27 | constructor() { 28 | super('cloudwatchAgent'); 29 | } 30 | 31 | public prehook(service: Service, scope: Construct) { 32 | this.parentService = service; 33 | this.scope = scope; 34 | } 35 | 36 | public useTaskDefinition(taskDefinition: ecs.TaskDefinition) { 37 | // Add the CloudWatch Agent to this task 38 | this.container = taskDefinition.addContainer('cloudwatch-agent', { 39 | image: ecs.ContainerImage.fromRegistry(CLOUDWATCH_AGENT_IMAGE), 40 | environment: { 41 | CW_CONFIG_CONTENT: JSON.stringify(this.CW_CONFIG_CONTENT), 42 | }, 43 | logging: new ecs.AwsLogDriver({ streamPrefix: 'cloudwatch-agent' }), 44 | user: '0:1338', // Ensure that CloudWatch agent outbound traffic doesn't go through proxy 45 | memoryReservationMiB: 50, 46 | }); 47 | 48 | // Add permissions that allow the cloudwatch agent to publish metrics 49 | new iam.Policy(this.scope, `${this.parentService.id}-publish-metrics`, { 50 | roles: [taskDefinition.taskRole], 51 | statements: [ 52 | new iam.PolicyStatement({ 53 | resources: ['*'], 54 | actions: ['cloudwatch:PutMetricData'], 55 | }), 56 | ], 57 | }); 58 | } 59 | 60 | public resolveContainerDependencies() { 61 | if (!this.container) { 62 | throw new Error('The container dependency hook was called before the container was created'); 63 | } 64 | 65 | const appmeshextension = this.parentService.serviceDescription.get('appmesh'); 66 | if (appmeshextension && appmeshextension.container) { 67 | this.container.addContainerDependencies({ 68 | container: appmeshextension.container, 69 | condition: ecs.ContainerDependencyCondition.HEALTHY, 70 | }); 71 | } 72 | } 73 | } 74 | -------------------------------------------------------------------------------- /src/extensions/container.ts: -------------------------------------------------------------------------------- 1 | import { RemovalPolicy } from 'aws-cdk-lib'; 2 | import * as ecs from 'aws-cdk-lib/aws-ecs'; 3 | import * as awslogs from 'aws-cdk-lib/aws-logs'; 4 | import * as cxapi from 'aws-cdk-lib/cx-api'; 5 | import { ServiceExtension } from './extension-interfaces'; 6 | import { Service } from '../service'; 7 | 8 | // keep this import separate from other imports to reduce chance for merge conflicts with v2-main 9 | // eslint-disable-next-line no-duplicate-imports, import/order 10 | import { Construct } from 'constructs'; 11 | 12 | /** 13 | * Setting for the main application container of a service. 14 | */ 15 | export interface ContainerExtensionProps { 16 | /** 17 | * How much CPU the container requires. 18 | */ 19 | readonly cpu: number; 20 | 21 | /** 22 | * How much memory in megabytes the container requires. 23 | */ 24 | readonly memoryMiB: number; 25 | 26 | /** 27 | * The image to run. 28 | */ 29 | readonly image: ecs.ContainerImage; 30 | 31 | /** 32 | * What port the image listen for traffic on. 33 | */ 34 | readonly trafficPort: number; 35 | 36 | /** 37 | * Environment variables to pass into the container. 38 | * 39 | * @default - No environment variables. 40 | */ 41 | readonly environment?: { 42 | [key: string]: string; 43 | }; 44 | 45 | /** 46 | * The environment files to pass to the container. 47 | * 48 | * @see https://docs.aws.amazon.com/AmazonECS/latest/developerguide/taskdef-envfiles.html 49 | * 50 | * @default - No environment files. 51 | */ 52 | readonly environmentFiles?: ecs.EnvironmentFile[]; 53 | 54 | /** 55 | * The secret environment variables to pass to the container. 56 | * 57 | * @default - No secret environment variables. 58 | */ 59 | readonly secrets?: { [key: string]: ecs.Secret }; 60 | 61 | /** 62 | * The log group into which application container logs should be routed. 63 | * 64 | * @default - A log group is automatically created for you if the `ECS_SERVICE_EXTENSIONS_ENABLE_DEFAULT_LOG_DRIVER` feature flag is set. 65 | */ 66 | readonly logGroup?: awslogs.ILogGroup; 67 | } 68 | 69 | /** 70 | * The main container of a service. This is generally the container 71 | * which runs your application business logic. Other extensions will attach 72 | * sidecars alongside this main container. 73 | */ 74 | export class Container extends ServiceExtension { 75 | /** 76 | * The port on which the container expects to receive network traffic 77 | */ 78 | public readonly trafficPort: number; 79 | 80 | /** 81 | * The log group into which application container logs should be routed. 82 | */ 83 | public logGroup?: awslogs.ILogGroup; 84 | 85 | /** 86 | * The settings for the container. 87 | */ 88 | private props: ContainerExtensionProps; 89 | 90 | constructor(props: ContainerExtensionProps) { 91 | super('service-container'); 92 | this.props = props; 93 | this.trafficPort = props.trafficPort; 94 | this.logGroup = props.logGroup; 95 | } 96 | 97 | public prehook(service: Service, scope: Construct) { 98 | this.parentService = service; 99 | this.scope = scope; 100 | } 101 | 102 | // This hook sets the overall task resource requirements to the 103 | // resource requirements of the application itself. 104 | public modifyTaskDefinitionProps(props: ecs.TaskDefinitionProps): ecs.TaskDefinitionProps { 105 | return { 106 | ...props, 107 | cpu: this.props.cpu.toString(), 108 | memoryMiB: this.props.memoryMiB.toString(), 109 | } as ecs.TaskDefinitionProps; 110 | } 111 | 112 | // This hook adds the application container to the task definition. 113 | public useTaskDefinition(taskDefinition: ecs.TaskDefinition) { 114 | let containerProps = { 115 | image: this.props.image, 116 | cpu: Number(this.props.cpu), 117 | memoryLimitMiB: Number(this.props.memoryMiB), 118 | environment: this.props.environment, 119 | environmentFiles: this.props.environmentFiles, 120 | secrets: this.props.secrets, 121 | } as ecs.ContainerDefinitionOptions; 122 | 123 | // Let other extensions mutate the container definition. This is 124 | // used by extensions which want to add environment variables, modify 125 | // logging parameters, etc. 126 | this.containerMutatingHooks.forEach((hookProvider) => { 127 | containerProps = hookProvider.mutateContainerDefinition(containerProps); 128 | }); 129 | 130 | // If no observability extensions have been added to the service description then we can configure the `awslogs` log driver 131 | if (!containerProps.logging) { 132 | // Create a log group for the service if one is not provided by the user (only if feature flag is set) 133 | if (!this.logGroup && this.parentService.node.tryGetContext(cxapi.ECS_SERVICE_EXTENSIONS_ENABLE_DEFAULT_LOG_DRIVER)) { 134 | this.logGroup = new awslogs.LogGroup(this.scope, `${this.parentService.id}-logs`, { 135 | logGroupName: `${this.parentService.id}-logs`, 136 | removalPolicy: RemovalPolicy.DESTROY, 137 | retention: awslogs.RetentionDays.ONE_MONTH, 138 | }); 139 | } 140 | 141 | if (this.logGroup) { 142 | containerProps = { 143 | ...containerProps, 144 | logging: new ecs.AwsLogDriver({ 145 | streamPrefix: this.parentService.id, 146 | logGroup: this.logGroup, 147 | }), 148 | }; 149 | } 150 | } else { 151 | if (this.logGroup) { 152 | throw Error(`Log configuration already specified. You cannot provide a log group for the application container of service '${this.parentService.id}' while also adding log configuration separately using service extensions.`); 153 | } 154 | } 155 | this.container = taskDefinition.addContainer('app', containerProps); 156 | 157 | // Create a port mapping for the container if not already created by another extension. 158 | if (!this.container.findPortMapping(this.trafficPort, ecs.Protocol.TCP)) { 159 | this.container.addPortMappings({ 160 | containerPort: this.trafficPort, 161 | }); 162 | } 163 | 164 | // Raise the ulimits for this main application container 165 | // so that it can handle more concurrent requests 166 | this.container.addUlimits({ 167 | softLimit: 1024000, 168 | hardLimit: 1024000, 169 | name: ecs.UlimitName.NOFILE, 170 | }); 171 | } 172 | 173 | public resolveContainerDependencies() { 174 | if (!this.container) { 175 | throw new Error('The container dependency hook was called before the container was created'); 176 | } 177 | 178 | const firelens = this.parentService.serviceDescription.get('firelens'); 179 | if (firelens && firelens.container) { 180 | this.container.addContainerDependencies({ 181 | container: firelens.container, 182 | condition: ecs.ContainerDependencyCondition.START, 183 | }); 184 | } 185 | 186 | const appmeshextension = this.parentService.serviceDescription.get('appmesh'); 187 | if (appmeshextension && appmeshextension.container) { 188 | this.container.addContainerDependencies({ 189 | container: appmeshextension.container, 190 | condition: ecs.ContainerDependencyCondition.HEALTHY, 191 | }); 192 | } 193 | 194 | const cloudwatchextension = this.parentService.serviceDescription.get('cloudwatchAgent'); 195 | if (cloudwatchextension && cloudwatchextension.container) { 196 | this.container.addContainerDependencies({ 197 | container: cloudwatchextension.container, 198 | condition: ecs.ContainerDependencyCondition.START, 199 | }); 200 | } 201 | 202 | const xrayextension = this.parentService.serviceDescription.get('xray'); 203 | if (xrayextension && xrayextension.container) { 204 | this.container.addContainerDependencies({ 205 | container: xrayextension.container, 206 | condition: ecs.ContainerDependencyCondition.HEALTHY, 207 | }); 208 | } 209 | } 210 | } 211 | -------------------------------------------------------------------------------- /src/extensions/firelens.ts: -------------------------------------------------------------------------------- 1 | import { Stack, RemovalPolicy } from 'aws-cdk-lib'; 2 | import * as ecs from 'aws-cdk-lib/aws-ecs'; 3 | import * as awslogs from 'aws-cdk-lib/aws-logs'; 4 | import { Construct } from 'constructs'; 5 | import { Container } from './container'; 6 | import { ContainerMutatingHook, ServiceExtension } from './extension-interfaces'; 7 | import { Service } from '../service'; 8 | 9 | /** 10 | * Settings for the hook which mutates the application container 11 | * to route logs through FireLens. 12 | */ 13 | export interface FirelensProps { 14 | /** 15 | * The parent service that is being mutated. 16 | */ 17 | readonly parentService: Service; 18 | 19 | /** 20 | * The log group into which logs should be routed. 21 | */ 22 | readonly logGroup: awslogs.LogGroup; 23 | } 24 | 25 | /** 26 | * This hook modifies the application container's settings so that 27 | * it routes logs using FireLens. 28 | */ 29 | export class FirelensMutatingHook extends ContainerMutatingHook { 30 | private parentService: Service; 31 | private logGroup: awslogs.LogGroup; 32 | 33 | constructor(props: FirelensProps) { 34 | super(); 35 | this.parentService = props.parentService; 36 | this.logGroup = props.logGroup; 37 | } 38 | 39 | public mutateContainerDefinition(props: ecs.ContainerDefinitionOptions): ecs.ContainerDefinitionOptions { 40 | return { 41 | ...props, 42 | 43 | logging: ecs.LogDrivers.firelens({ 44 | options: { 45 | Name: 'cloudwatch', 46 | region: Stack.of(this.parentService).region, 47 | log_group_name: this.logGroup.logGroupName, 48 | log_stream_prefix: `${this.parentService.id}/`, 49 | }, 50 | }), 51 | } as ecs.ContainerDefinitionOptions; 52 | } 53 | } 54 | 55 | /** 56 | * This extension adds a FluentBit log router to the task definition 57 | * and does all the configuration necessarily to enable log routing 58 | * for the task using FireLens. 59 | */ 60 | export class FireLensExtension extends ServiceExtension { 61 | private logGroup!: awslogs.LogGroup; 62 | 63 | constructor() { 64 | super('firelens'); 65 | } 66 | 67 | public prehook(service: Service, scope: Construct) { 68 | this.parentService = service; 69 | 70 | // Create a log group for the service, into which FireLens 71 | // will route the service's logs 72 | this.logGroup = new awslogs.LogGroup(scope, `${service.id}-logs`, { 73 | logGroupName: `${service.id}-logs`, 74 | removalPolicy: RemovalPolicy.DESTROY, 75 | retention: awslogs.RetentionDays.ONE_WEEK, 76 | }); 77 | } 78 | 79 | // Add hooks to the main application extension so that it is modified to 80 | // have logging properties that enable sending logs via the 81 | // Firelens log router container 82 | public addHooks() { 83 | const container = this.parentService.serviceDescription.get('service-container') as Container; 84 | 85 | if (!container) { 86 | throw new Error('Firelens extension requires an application extension'); 87 | } 88 | 89 | container.addContainerMutatingHook(new FirelensMutatingHook({ 90 | parentService: this.parentService, 91 | logGroup: this.logGroup, 92 | })); 93 | } 94 | 95 | public useTaskDefinition(taskDefinition: ecs.TaskDefinition) { 96 | // Manually add a firelens log router, so that we can manually manage the dependencies 97 | // to ensure that the Firelens log router depends on the Envoy proxy 98 | this.container = taskDefinition.addFirelensLogRouter('firelens', { 99 | image: ecs.obtainDefaultFluentBitECRImage(taskDefinition, { 100 | logDriver: 'awsfirelens', 101 | options: { 102 | Name: 'cloudwatch', 103 | }, 104 | }), 105 | firelensConfig: { 106 | type: ecs.FirelensLogRouterType.FLUENTBIT, 107 | }, 108 | logging: new ecs.AwsLogDriver({ streamPrefix: 'firelens' }), 109 | memoryReservationMiB: 50, 110 | user: '0:1338', // Give Firelens a group ID that allows its outbound logs to bypass Envoy 111 | }); 112 | } 113 | 114 | public resolveContainerDependencies() { 115 | if (!this.container) { 116 | throw new Error('The container dependency hook was called before the container was created'); 117 | } 118 | 119 | const appmeshextension = this.parentService.serviceDescription.get('appmesh'); 120 | if (appmeshextension && appmeshextension.container) { 121 | this.container.addContainerDependencies({ 122 | container: appmeshextension.container, 123 | condition: ecs.ContainerDependencyCondition.HEALTHY, 124 | }); 125 | } 126 | } 127 | } 128 | -------------------------------------------------------------------------------- /src/extensions/http-load-balancer.ts: -------------------------------------------------------------------------------- 1 | import { CfnOutput, Duration } from 'aws-cdk-lib'; 2 | import * as ecs from 'aws-cdk-lib/aws-ecs'; 3 | import * as alb from 'aws-cdk-lib/aws-elasticloadbalancingv2'; 4 | import { Construct } from 'constructs'; 5 | import { ServiceExtension, ServiceBuild } from './extension-interfaces'; 6 | import { Service } from '../service'; 7 | 8 | export interface HttpLoadBalancerProps { 9 | /** 10 | * The number of ALB requests per target. 11 | */ 12 | readonly requestsPerTarget?: number; 13 | } 14 | /** 15 | * This extension add a public facing load balancer for sending traffic 16 | * to one or more replicas of the application container. 17 | */ 18 | export class HttpLoadBalancerExtension extends ServiceExtension { 19 | private loadBalancer!: alb.IApplicationLoadBalancer; 20 | private listener!: alb.IApplicationListener; 21 | private requestsPerTarget?: number; 22 | 23 | constructor(props: HttpLoadBalancerProps = {}) { 24 | super('load-balancer'); 25 | this.requestsPerTarget = props.requestsPerTarget; 26 | } 27 | 28 | // Before the service is created, go ahead and create the load balancer itself. 29 | public prehook(service: Service, scope: Construct) { 30 | this.parentService = service; 31 | 32 | this.loadBalancer = new alb.ApplicationLoadBalancer(scope, `${this.parentService.id}-load-balancer`, { 33 | vpc: this.parentService.vpc, 34 | internetFacing: true, 35 | }); 36 | 37 | this.listener = this.loadBalancer.addListener(`${this.parentService.id}-listener`, { 38 | port: 80, 39 | open: true, 40 | }); 41 | 42 | // Automatically create an output 43 | new CfnOutput(scope, `${this.parentService.id}-load-balancer-dns-output`, { 44 | value: this.loadBalancer.loadBalancerDnsName, 45 | }); 46 | } 47 | 48 | // Minor service configuration tweaks to work better with a load balancer 49 | public modifyServiceProps(props: ServiceBuild): ServiceBuild { 50 | return { 51 | ...props, 52 | 53 | // Give the task a little bit of grace time to start passing 54 | // healthchecks. Without this it is possible for a slow starting task 55 | // to cause the ALB to consider the task unhealthy, causing ECS to stop 56 | // the task before it actually has a chance to finish starting up 57 | healthCheckGracePeriod: Duration.minutes(1), 58 | } as ServiceBuild; 59 | } 60 | 61 | // After the service is created add the service to the load balancer's listener 62 | public useService(service: ecs.Ec2Service | ecs.FargateService) { 63 | const targetGroup = this.listener.addTargets(this.parentService.id, { 64 | deregistrationDelay: Duration.seconds(10), 65 | port: 80, 66 | targets: [service], 67 | }); 68 | this.parentService.targetGroup = targetGroup; 69 | 70 | if (this.requestsPerTarget) { 71 | if (!this.parentService.scalableTaskCount) { 72 | throw Error(`Auto scaling target for the service '${this.parentService.id}' hasn't been configured. Please use Service construct to configure 'minTaskCount' and 'maxTaskCount'.`); 73 | } 74 | this.parentService.scalableTaskCount.scaleOnRequestCount(`${this.parentService.id}-target-request-count-${this.requestsPerTarget}`, { 75 | requestsPerTarget: this.requestsPerTarget, 76 | targetGroup: this.parentService.targetGroup, 77 | }); 78 | this.parentService.enableAutoScalingPolicy(); 79 | } 80 | } 81 | } 82 | -------------------------------------------------------------------------------- /src/extensions/index.ts: -------------------------------------------------------------------------------- 1 | export * from './container'; 2 | export * from './firelens'; 3 | export * from './appmesh'; 4 | export * from './http-load-balancer'; 5 | export * from './cloudwatch-agent'; 6 | export * from './scale-on-cpu-utilization'; 7 | export * from './xray'; 8 | export * from './assign-public-ip'; 9 | export * from './queue/queue'; 10 | export * from './injecter'; 11 | export * from './aliased-port'; 12 | -------------------------------------------------------------------------------- /src/extensions/injecter.ts: -------------------------------------------------------------------------------- 1 | import * as ecs from 'aws-cdk-lib/aws-ecs'; 2 | import * as sns from 'aws-cdk-lib/aws-sns'; 3 | import { Construct } from 'constructs'; 4 | import { Container } from './container'; 5 | import { ContainerMutatingHook, ServiceExtension } from './extension-interfaces'; 6 | import { Service } from '../service'; 7 | 8 | /** 9 | * An interface that will be implemented by all the resources that can be published events or written data to. 10 | */ 11 | export interface IInjectable { 12 | environmentVariables(): { [key: string]: string }; 13 | } 14 | 15 | /** 16 | * An interface that will be implemented by all the injectable resources that need to grant permissions to the task role. 17 | */ 18 | export interface IGrantInjectable extends IInjectable { 19 | grant(taskDefinition: ecs.TaskDefinition): void; 20 | } 21 | 22 | /** 23 | * The settings for the `InjectableTopic` class. 24 | */ 25 | export interface InjectableTopicProps { 26 | /** 27 | * The SNS Topic to publish events to. 28 | */ 29 | readonly topic: sns.ITopic; 30 | } 31 | 32 | /** 33 | * The `InjectableTopic` class represents SNS Topic resource that can be published events to by the parent service. 34 | */ 35 | 36 | export class InjectableTopic implements IGrantInjectable { 37 | public readonly topic: sns.ITopic; 38 | 39 | constructor(props: InjectableTopicProps) { 40 | this.topic = props.topic; 41 | } 42 | 43 | public grant(taskDefinition: ecs.TaskDefinition) { 44 | this.topic.grantPublish(taskDefinition.taskRole); 45 | } 46 | 47 | public environmentVariables(): { [key: string]: string } { 48 | let environment: { [key: string]: string } = {}; 49 | environment[`${this.topic.node.id.toUpperCase()}_TOPIC_ARN`] = this.topic.topicArn; 50 | return environment; 51 | } 52 | } 53 | 54 | /** 55 | * The settings for the Injecter extension. 56 | */ 57 | export interface InjecterExtensionProps { 58 | /** 59 | * The list of injectable resources for this service. 60 | */ 61 | readonly injectables: IInjectable[]; 62 | } 63 | 64 | /** 65 | * Settings for the hook which mutates the application container 66 | * to add the injectable resource environment variables. 67 | */ 68 | interface ContainerMutatingProps { 69 | /** 70 | * The resource environment variables to be added to the container environment. 71 | */ 72 | readonly environment: { [key: string]: string }; 73 | } 74 | 75 | /** 76 | * This hook modifies the application container's environment to 77 | * add the injectable resource environment variables. 78 | */ 79 | class InjecterExtensionMutatingHook extends ContainerMutatingHook { 80 | private environment: { [key: string]: string }; 81 | 82 | constructor(props: ContainerMutatingProps) { 83 | super(); 84 | this.environment = props.environment; 85 | } 86 | 87 | public mutateContainerDefinition(props: ecs.ContainerDefinitionOptions): ecs.ContainerDefinitionOptions { 88 | return { 89 | ...props, 90 | 91 | environment: { ...(props.environment || {}), ...this.environment }, 92 | } as ecs.ContainerDefinitionOptions; 93 | } 94 | } 95 | 96 | /** 97 | * This extension accepts a list of `Injectable` resources that the parent service can publish events or write data to. 98 | * It sets up the corresponding permissions for the task role of the parent service. 99 | */ 100 | export class InjecterExtension extends ServiceExtension { 101 | private props: InjecterExtensionProps; 102 | 103 | private environment: { [key: string]: string } = {}; 104 | 105 | constructor(props: InjecterExtensionProps) { 106 | super('injecter'); 107 | 108 | this.props = props; 109 | } 110 | 111 | // @ts-ignore - Ignore unused params that are required for abstract class extend 112 | public prehook(service: Service, scope: Construct) { 113 | this.parentService = service; 114 | 115 | for (const injectable of this.props.injectables) { 116 | for (const [key, val] of Object.entries(injectable.environmentVariables())) { 117 | this.environment[key] = val; 118 | } 119 | } 120 | } 121 | 122 | /** 123 | * Add hooks to the main application extension so that it is modified to 124 | * add the injectable resource environment variables to the container environment. 125 | */ 126 | public addHooks() { 127 | const container = this.parentService.serviceDescription.get('service-container') as Container; 128 | 129 | if (!container) { 130 | throw new Error('Injecter Extension requires an application extension'); 131 | } 132 | 133 | container.addContainerMutatingHook(new InjecterExtensionMutatingHook({ 134 | environment: this.environment, 135 | })); 136 | } 137 | 138 | /** 139 | * After the task definition has been created, this hook grants the required permissions to the task role for the 140 | * parent service. 141 | * 142 | * @param taskDefinition The created task definition 143 | */ 144 | public useTaskDefinition(taskDefinition: ecs.TaskDefinition) { 145 | for (const injectable of this.props.injectables) { 146 | if ((injectable as IGrantInjectable).grant !== undefined) { 147 | (injectable as IGrantInjectable).grant(taskDefinition); 148 | } 149 | } 150 | } 151 | } -------------------------------------------------------------------------------- /src/extensions/queue/index.ts: -------------------------------------------------------------------------------- 1 | export * from './queue'; -------------------------------------------------------------------------------- /src/extensions/scale-on-cpu-utilization.ts: -------------------------------------------------------------------------------- 1 | import { Duration } from 'aws-cdk-lib'; 2 | import * as ecs from 'aws-cdk-lib/aws-ecs'; 3 | import { ServiceExtension, ServiceBuild } from './extension-interfaces'; 4 | 5 | 6 | /** 7 | * The autoscaling settings. 8 | * 9 | * @deprecated use the `minTaskCount` and `maxTaskCount` properties of `autoScaleTaskCount` in the `Service` construct 10 | * to configure the auto scaling target for the service. For more information, please refer 11 | * https://github.com/aws/aws-cdk/blob/master/packages/%40aws-cdk-containers/ecs-service-extensions/README.md#task-auto-scaling . 12 | */ 13 | export interface CpuScalingProps { 14 | /** 15 | * How many tasks to launch initially. 16 | * 17 | * @default - 2 18 | */ 19 | readonly initialTaskCount?: number; 20 | 21 | /** 22 | * The minimum number of tasks when scaling in. 23 | * 24 | * @default - 2 25 | */ 26 | readonly minTaskCount?: number; 27 | 28 | /** 29 | * The maximum number of tasks when scaling out. 30 | * 31 | * @default - 8 32 | */ 33 | readonly maxTaskCount?: number; 34 | 35 | /** 36 | * The CPU utilization to try ot maintain. 37 | * 38 | * @default - 50% 39 | */ 40 | readonly targetCpuUtilization?: number; 41 | 42 | /** 43 | * How long to wait between scale out actions. 44 | * 45 | * @default - 60 seconds 46 | */ 47 | readonly scaleOutCooldown?: Duration; 48 | 49 | /** 50 | * How long to wait between scale in actions. 51 | * 52 | * @default - 60 seconds 53 | */ 54 | readonly scaleInCooldown?: Duration; 55 | } 56 | 57 | // The default autoscaling settings 58 | const cpuScalingPropsDefault = { 59 | initialTaskCount: 2, 60 | minTaskCount: 2, 61 | maxTaskCount: 8, 62 | targetCpuUtilization: 50, 63 | scaleOutCooldown: Duration.seconds(60), 64 | scaleInCooldown: Duration.seconds(60), 65 | }; 66 | 67 | /** 68 | * This extension helps you scale your service according to CPU utilization. 69 | * 70 | * @deprecated To enable target tracking based on CPU utilization, use the `targetCpuUtilization` property of `autoScaleTaskCount` in the `Service` construct. 71 | * For more information, please refer https://github.com/aws/aws-cdk/blob/master/packages/%40aws-cdk-containers/ecs-service-extensions/README.md#task-auto-scaling . 72 | */ 73 | export class ScaleOnCpuUtilization extends ServiceExtension { 74 | /** 75 | * How many tasks to launch initially. 76 | */ 77 | public readonly initialTaskCount: number; 78 | 79 | /** 80 | * The minimum number of tasks when scaling in. 81 | */ 82 | public readonly minTaskCount: number; 83 | 84 | /** 85 | * The maximum number of tasks when scaling out. 86 | */ 87 | public readonly maxTaskCount: number; 88 | 89 | /** 90 | * The CPU utilization to try ot maintain. 91 | */ 92 | public readonly targetCpuUtilization: number; 93 | 94 | /** 95 | * How long to wait between scale out actions. 96 | */ 97 | public readonly scaleOutCooldown: Duration; 98 | 99 | /** 100 | * How long to wait between scale in actions. 101 | */ 102 | public readonly scaleInCooldown: Duration; 103 | 104 | constructor(props?: CpuScalingProps) { 105 | super('scale-on-cpu-utilization'); 106 | 107 | let combinedProps = { 108 | ...cpuScalingPropsDefault, 109 | ...props, 110 | }; 111 | 112 | this.initialTaskCount = combinedProps.initialTaskCount; 113 | this.minTaskCount = combinedProps.minTaskCount; 114 | this.maxTaskCount = combinedProps.maxTaskCount; 115 | this.targetCpuUtilization = combinedProps.targetCpuUtilization; 116 | this.scaleOutCooldown = combinedProps.scaleOutCooldown; 117 | this.scaleInCooldown = combinedProps.scaleInCooldown; 118 | } 119 | 120 | // This service modifies properties of the service prior 121 | // to construct creation. 122 | public modifyServiceProps(props: ServiceBuild): ServiceBuild { 123 | return { 124 | ...props, 125 | 126 | // Launch an initial number of tasks 127 | // In the future we should change this to use a custom resource 128 | // to read the current task count set by autoscaling, so that the task 129 | // count doesn't rollback to the initial level on each deploy. 130 | desiredCount: this.initialTaskCount, 131 | } as ServiceBuild; 132 | } 133 | 134 | // This hook utilizes the resulting service construct 135 | // once it is created. 136 | public useService(service: ecs.Ec2Service | ecs.FargateService) { 137 | if (this.parentService.scalableTaskCount) { 138 | throw Error('Cannot specify \'autoScaleTaskCount\' in the Service construct and also provide a \'ScaleOnCpuUtilization\' extension. \'ScaleOnCpuUtilization\' is deprecated. Please only provide \'autoScaleTaskCount\'.'); 139 | } 140 | const scalingTarget = service.autoScaleTaskCount({ 141 | minCapacity: this.minTaskCount, 142 | maxCapacity: this.maxTaskCount, 143 | }); 144 | 145 | scalingTarget.scaleOnCpuUtilization(`${this.parentService.id}-target-cpu-utilization-${this.targetCpuUtilization}`, { 146 | targetUtilizationPercent: this.targetCpuUtilization, 147 | scaleInCooldown: this.scaleInCooldown, 148 | scaleOutCooldown: this.scaleOutCooldown, 149 | }); 150 | this.parentService.enableAutoScalingPolicy(); 151 | } 152 | } 153 | -------------------------------------------------------------------------------- /src/extensions/xray.ts: -------------------------------------------------------------------------------- 1 | import { Duration, Stack } from 'aws-cdk-lib'; 2 | import * as ecs from 'aws-cdk-lib/aws-ecs'; 3 | import * as iam from 'aws-cdk-lib/aws-iam'; 4 | import { Construct } from 'constructs'; 5 | import { ServiceExtension } from './extension-interfaces'; 6 | import { Service } from '../service'; 7 | 8 | const XRAY_DAEMON_IMAGE = 'amazon/aws-xray-daemon:latest'; 9 | 10 | /** 11 | * This extension adds an X-Ray daemon inside the task definition for 12 | * capturing application trace spans and submitting them to the AWS 13 | * X-Ray service. 14 | */ 15 | export class XRayExtension extends ServiceExtension { 16 | constructor() { 17 | super('xray'); 18 | } 19 | 20 | // @ts-ignore - Ignore unused params that are required for abstract class extend 21 | public prehook(service: Service, scope: Construct) { 22 | this.parentService = service; 23 | } 24 | 25 | public useTaskDefinition(taskDefinition: ecs.TaskDefinition) { 26 | // Add the XRay Daemon to the task 27 | this.container = taskDefinition.addContainer('xray', { 28 | image: ecs.ContainerImage.fromRegistry(XRAY_DAEMON_IMAGE), 29 | essential: true, 30 | memoryReservationMiB: 256, 31 | environment: { 32 | AWS_REGION: Stack.of(this.parentService).region, 33 | }, 34 | healthCheck: { 35 | command: [ 36 | 'CMD-SHELL', 37 | 'curl -s http://localhost:2000', 38 | ], 39 | startPeriod: Duration.seconds(10), 40 | interval: Duration.seconds(5), 41 | timeout: Duration.seconds(2), 42 | retries: 3, 43 | }, 44 | logging: new ecs.AwsLogDriver({ streamPrefix: 'xray' }), 45 | user: '1337', // X-Ray traffic should not go through Envoy proxy 46 | }); 47 | 48 | // Add permissions to this task to allow it to talk to X-Ray 49 | taskDefinition.taskRole.addManagedPolicy( 50 | iam.ManagedPolicy.fromAwsManagedPolicyName('AWSXRayDaemonWriteAccess'), 51 | ); 52 | } 53 | 54 | public resolveContainerDependencies() { 55 | if (!this.container) { 56 | throw new Error('The container dependency hook was called before the container was created'); 57 | } 58 | 59 | const appmeshextension = this.parentService.serviceDescription.get('appmesh'); 60 | if (appmeshextension && appmeshextension.container) { 61 | this.container.addContainerDependencies({ 62 | container: appmeshextension.container, 63 | condition: ecs.ContainerDependencyCondition.HEALTHY, 64 | }); 65 | } 66 | } 67 | } 68 | -------------------------------------------------------------------------------- /src/index.ts: -------------------------------------------------------------------------------- 1 | export * from './service'; 2 | export * from './service-description'; 3 | export * from './environment'; 4 | export * from './extensions'; 5 | export * from './extensions/extension-interfaces'; -------------------------------------------------------------------------------- /src/service-description.ts: -------------------------------------------------------------------------------- 1 | import { ServiceExtension } from './extensions/extension-interfaces'; 2 | 3 | /** 4 | * A ServiceDescription is a wrapper for all of the extensions that a user wants 5 | * to add to an ECS Service. It collects all of the extensions that are added 6 | * to a service, allowing each extension to query the full list of extensions 7 | * added to a service to determine information about how to self-configure. 8 | */ 9 | export class ServiceDescription { 10 | /** 11 | * The list of extensions that have been registered to run when 12 | * preparing this service. 13 | */ 14 | public extensions: Record = {}; 15 | 16 | /** 17 | * Adds a new extension to the service. The extensions mutate a service 18 | * to add resources to or configure properties for the service. 19 | * 20 | * @param extension - The extension that you wish to add 21 | */ 22 | public add(extension: ServiceExtension) { 23 | if (this.extensions[extension.name]) { 24 | throw new Error(`An extension called ${extension.name} has already been added`); 25 | } 26 | 27 | this.extensions[extension.name] = extension; 28 | 29 | return this; 30 | } 31 | 32 | /** 33 | * Get the extension with a specific name. This is generally used by 34 | * extensions in order to discover each other. 35 | * 36 | * @param name 37 | */ 38 | public get(name: string) { 39 | return this.extensions[name]; 40 | } 41 | }; 42 | -------------------------------------------------------------------------------- /test/aliased-port.integ.snapshot/aws-ecs-integ.assets.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "21.0.0", 3 | "files": { 4 | "1904e1ff90c4f297a8a8c9f4f5573101aa4cb632abb96486f141dc588c222614": { 5 | "source": { 6 | "path": "aws-ecs-integ.template.json", 7 | "packaging": "file" 8 | }, 9 | "destinations": { 10 | "current_account-current_region": { 11 | "bucketName": "cdk-hnb659fds-assets-${AWS::AccountId}-${AWS::Region}", 12 | "objectKey": "1904e1ff90c4f297a8a8c9f4f5573101aa4cb632abb96486f141dc588c222614.json", 13 | "assumeRoleArn": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/cdk-hnb659fds-file-publishing-role-${AWS::AccountId}-${AWS::Region}" 14 | } 15 | } 16 | } 17 | }, 18 | "dockerImages": {} 19 | } -------------------------------------------------------------------------------- /test/aliased-port.integ.ts: -------------------------------------------------------------------------------- 1 | import { App, Stack } from 'aws-cdk-lib'; 2 | import * as ecs from 'aws-cdk-lib/aws-ecs'; 3 | import { AliasedPortExtension, Container, Environment, EnvironmentCapacityType, Service, ServiceDescription } from '../lib'; 4 | 5 | const app = new App(); 6 | const stack = new Stack(app, 'aws-ecs-integ'); 7 | 8 | const environment = new Environment(stack, 'production', { 9 | capacityType: EnvironmentCapacityType.FARGATE, 10 | }); 11 | 12 | const aliasedPortServiceDescription = new ServiceDescription(); 13 | 14 | aliasedPortServiceDescription.add(new Container({ 15 | cpu: 256, 16 | memoryMiB: 512, 17 | trafficPort: 80, 18 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 19 | environment: { 20 | PORT: '80', 21 | }, 22 | })); 23 | 24 | aliasedPortServiceDescription.add(new AliasedPortExtension({ 25 | alias: 'name', 26 | aliasPort: 1000, 27 | appProtocol: ecs.AppProtocol.grpc, 28 | })); 29 | 30 | new Service(stack, 'ServiceConnect', { 31 | environment: environment, 32 | serviceDescription: aliasedPortServiceDescription, 33 | desiredCount: 1, 34 | }); 35 | -------------------------------------------------------------------------------- /test/aliased-port.test.ts: -------------------------------------------------------------------------------- 1 | import { Stack } from 'aws-cdk-lib'; 2 | import { Template } from 'aws-cdk-lib/assertions'; 3 | import * as ecs from 'aws-cdk-lib/aws-ecs'; 4 | import { AliasedPortExtension, Container, Environment, ServiceDescription, Service } from '../lib'; 5 | 6 | describe('aliased port', () => { 7 | let stack: Stack; 8 | let environment: Environment; 9 | let serviceDescription: ServiceDescription; 10 | beforeEach(() => { 11 | stack = new Stack(); 12 | environment = new Environment(stack, 'production'); 13 | serviceDescription = new ServiceDescription(); 14 | }); 15 | 16 | test('aliased port extension throws error when no container extension exists', () => { 17 | 18 | serviceDescription.add(new AliasedPortExtension({ 19 | alias: 'name', 20 | })); 21 | 22 | expect(() => { 23 | new Service(stack, 'my-service', { 24 | environment, 25 | serviceDescription, 26 | }); 27 | }).toThrow('Service \'my-service\' must have a Container extension'); 28 | }); 29 | 30 | test('when adding all options', () => { 31 | serviceDescription.add(new Container({ 32 | cpu: 256, 33 | memoryMiB: 512, 34 | trafficPort: 80, 35 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 36 | })); 37 | 38 | serviceDescription.add(new AliasedPortExtension({ 39 | alias: 'name', 40 | aliasPort: 1000, 41 | appProtocol: ecs.AppProtocol.grpc, 42 | })); 43 | 44 | new Service(stack, 'my-service', { 45 | environment, 46 | serviceDescription, 47 | }); 48 | 49 | // THEN 50 | Template.fromStack(stack).hasResourceProperties('AWS::ECS::TaskDefinition', { 51 | ContainerDefinitions: [ 52 | { 53 | PortMappings: [ 54 | { 55 | Name: 'name', 56 | Protocol: 'tcp', 57 | AppProtocol: 'grpc', 58 | ContainerPort: 80, 59 | }, 60 | ], 61 | }, 62 | ], 63 | }); 64 | 65 | Template.fromStack(stack).hasResourceProperties('AWS::ECS::Service', { 66 | ServiceConnectConfiguration: { 67 | Enabled: true, 68 | Namespace: 'production', 69 | Services: [ 70 | { 71 | PortName: 'name', 72 | ClientAliases: [ 73 | { 74 | Port: 1000, 75 | DnsName: 'name', 76 | }, 77 | ], 78 | }, 79 | ], 80 | }, 81 | }); 82 | }); 83 | 84 | test('when adding an aliased port with minimal config', () => { 85 | serviceDescription.add(new Container({ 86 | cpu: 256, 87 | memoryMiB: 512, 88 | trafficPort: 80, 89 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 90 | })); 91 | 92 | serviceDescription.add(new AliasedPortExtension({ 93 | alias: 'name', 94 | })); 95 | 96 | new Service(stack, 'my-service', { 97 | environment, 98 | serviceDescription, 99 | }); 100 | 101 | // THEN 102 | Template.fromStack(stack).hasResourceProperties('AWS::ECS::Service', { 103 | ServiceConnectConfiguration: { 104 | Enabled: true, 105 | Namespace: 'production', 106 | Services: [ 107 | { 108 | PortName: 'name', 109 | ClientAliases: [ 110 | { 111 | Port: 80, 112 | DnsName: 'name', 113 | }, 114 | ], 115 | }, 116 | ], 117 | }, 118 | }); 119 | }); 120 | }); -------------------------------------------------------------------------------- /test/all-service-addons.integ.snapshot/aws-ecs-integ.assets.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "21.0.0", 3 | "files": { 4 | "b6f7e728bbb59a44cd36e3c267201c18a62a55d3ebf1181a439da200585eb7df": { 5 | "source": { 6 | "path": "aws-ecs-integ.template.json", 7 | "packaging": "file" 8 | }, 9 | "destinations": { 10 | "current_account-current_region": { 11 | "bucketName": "cdk-hnb659fds-assets-${AWS::AccountId}-${AWS::Region}", 12 | "objectKey": "b6f7e728bbb59a44cd36e3c267201c18a62a55d3ebf1181a439da200585eb7df.json", 13 | "assumeRoleArn": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/cdk-hnb659fds-file-publishing-role-${AWS::AccountId}-${AWS::Region}" 14 | } 15 | } 16 | } 17 | }, 18 | "dockerImages": {} 19 | } -------------------------------------------------------------------------------- /test/all-service-addons.integ.ts: -------------------------------------------------------------------------------- 1 | import { App, Stack } from 'aws-cdk-lib'; 2 | import { Mesh } from 'aws-cdk-lib/aws-appmesh'; 3 | import { ContainerImage } from 'aws-cdk-lib/aws-ecs'; 4 | import { AppMeshExtension, CloudwatchAgentExtension, Container, Environment, FireLensExtension, HttpLoadBalancerExtension, Service, ServiceDescription, XRayExtension } from '../lib'; 5 | 6 | const app = new App(); 7 | const stack = new Stack(app, 'aws-ecs-integ'); 8 | 9 | const mesh = new Mesh(stack, 'my-mesh'); 10 | const environment = new Environment(stack, 'production'); 11 | 12 | /** Name service */ 13 | const nameDescription = new ServiceDescription(); 14 | nameDescription.add(new Container({ 15 | cpu: 1024, 16 | memoryMiB: 2048, 17 | trafficPort: 80, 18 | image: ContainerImage.fromRegistry('nathanpeck/name'), 19 | environment: { 20 | PORT: '80', 21 | }, 22 | })); 23 | nameDescription.add(new AppMeshExtension({ mesh })); 24 | nameDescription.add(new FireLensExtension()); 25 | nameDescription.add(new XRayExtension()); 26 | nameDescription.add(new CloudwatchAgentExtension()); 27 | 28 | const nameService = new Service(stack, 'name', { 29 | environment: environment, 30 | serviceDescription: nameDescription, 31 | autoScaleTaskCount: { 32 | maxTaskCount: 10, 33 | minTaskCount: 2, 34 | targetCpuUtilization: 75, 35 | }, 36 | desiredCount: 2, 37 | }); 38 | 39 | /** Greeting service */ 40 | const greetingDescription = new ServiceDescription(); 41 | greetingDescription.add(new Container({ 42 | cpu: 1024, 43 | memoryMiB: 2048, 44 | trafficPort: 80, 45 | image: ContainerImage.fromRegistry('nathanpeck/greeting'), 46 | environment: { 47 | PORT: '80', 48 | }, 49 | })); 50 | greetingDescription.add(new AppMeshExtension({ mesh })); 51 | greetingDescription.add(new FireLensExtension()); 52 | greetingDescription.add(new XRayExtension()); 53 | greetingDescription.add(new CloudwatchAgentExtension()); 54 | 55 | const greetingService = new Service(stack, 'greeting', { 56 | environment: environment, 57 | serviceDescription: greetingDescription, 58 | desiredCount: 2, 59 | autoScaleTaskCount: { 60 | minTaskCount: 2, 61 | maxTaskCount: 10, 62 | targetCpuUtilization: 75, 63 | }, 64 | }); 65 | 66 | /** Greeter service */ 67 | const greeterDescription = new ServiceDescription(); 68 | greeterDescription.add(new Container({ 69 | cpu: 1024, 70 | memoryMiB: 2048, 71 | trafficPort: 80, 72 | image: ContainerImage.fromRegistry('nathanpeck/greeter'), 73 | environment: { 74 | PORT: '80', 75 | GREETING_URL: 'http://greeting.production', 76 | NAME_URL: 'http://name.production', 77 | }, 78 | })); 79 | greeterDescription.add(new AppMeshExtension({ mesh })); 80 | greeterDescription.add(new FireLensExtension()); 81 | greeterDescription.add(new XRayExtension()); 82 | greeterDescription.add(new CloudwatchAgentExtension()); 83 | greeterDescription.add(new HttpLoadBalancerExtension()); 84 | 85 | const greeterService = new Service(stack, 'greeter', { 86 | environment: environment, 87 | serviceDescription: greeterDescription, 88 | desiredCount: 2, 89 | autoScaleTaskCount: { 90 | minTaskCount: 2, 91 | maxTaskCount: 10, 92 | targetCpuUtilization: 75, 93 | }, 94 | }); 95 | 96 | greeterService.connectTo(nameService); 97 | greeterService.connectTo(greetingService); 98 | 99 | /** 100 | * Expectations are that you should see an output 101 | * of the load balancer URL for the greeter service, make 102 | * a request to it and see a greeting phrase constructed out 103 | * of a random greeting and a random name from the two underlying 104 | * services. The other addons enable tracing and logging which must 105 | * be verified separately. 106 | */ -------------------------------------------------------------------------------- /test/assign-public-ip.integ.snapshot/aws-ecs-integ.assets.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "21.0.0", 3 | "files": { 4 | "63997c98a36563198603dd944433ccada81fcdb39b2d5456dfc0eafd27880258": { 5 | "source": { 6 | "path": "asset.63997c98a36563198603dd944433ccada81fcdb39b2d5456dfc0eafd27880258", 7 | "packaging": "zip" 8 | }, 9 | "destinations": { 10 | "current_account-current_region": { 11 | "bucketName": "cdk-hnb659fds-assets-${AWS::AccountId}-${AWS::Region}", 12 | "objectKey": "63997c98a36563198603dd944433ccada81fcdb39b2d5456dfc0eafd27880258.zip", 13 | "assumeRoleArn": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/cdk-hnb659fds-file-publishing-role-${AWS::AccountId}-${AWS::Region}" 14 | } 15 | } 16 | }, 17 | "6ff23d2800aac27308e31c227529dc13854507c3b2598d2433fcf82604fa054d": { 18 | "source": { 19 | "path": "asset.6ff23d2800aac27308e31c227529dc13854507c3b2598d2433fcf82604fa054d", 20 | "packaging": "zip" 21 | }, 22 | "destinations": { 23 | "current_account-current_region": { 24 | "bucketName": "cdk-hnb659fds-assets-${AWS::AccountId}-${AWS::Region}", 25 | "objectKey": "6ff23d2800aac27308e31c227529dc13854507c3b2598d2433fcf82604fa054d.zip", 26 | "assumeRoleArn": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/cdk-hnb659fds-file-publishing-role-${AWS::AccountId}-${AWS::Region}" 27 | } 28 | } 29 | }, 30 | "e845402ce43b66fc6f20df4a239f20f8662eb6c7f920b94cf6542dd0e64ce0f7": { 31 | "source": { 32 | "path": "asset.e845402ce43b66fc6f20df4a239f20f8662eb6c7f920b94cf6542dd0e64ce0f7", 33 | "packaging": "zip" 34 | }, 35 | "destinations": { 36 | "current_account-current_region": { 37 | "bucketName": "cdk-hnb659fds-assets-${AWS::AccountId}-${AWS::Region}", 38 | "objectKey": "e845402ce43b66fc6f20df4a239f20f8662eb6c7f920b94cf6542dd0e64ce0f7.zip", 39 | "assumeRoleArn": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/cdk-hnb659fds-file-publishing-role-${AWS::AccountId}-${AWS::Region}" 40 | } 41 | } 42 | }, 43 | "06af30c77f9df1b1314c7e42e674b5b75cbe2841251ba21438e7eb6f0e18abf3": { 44 | "source": { 45 | "path": "aws-ecs-integ.template.json", 46 | "packaging": "file" 47 | }, 48 | "destinations": { 49 | "current_account-current_region": { 50 | "bucketName": "cdk-hnb659fds-assets-${AWS::AccountId}-${AWS::Region}", 51 | "objectKey": "06af30c77f9df1b1314c7e42e674b5b75cbe2841251ba21438e7eb6f0e18abf3.json", 52 | "assumeRoleArn": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/cdk-hnb659fds-file-publishing-role-${AWS::AccountId}-${AWS::Region}" 53 | } 54 | } 55 | } 56 | }, 57 | "dockerImages": {} 58 | } -------------------------------------------------------------------------------- /test/assign-public-ip.integ.ts: -------------------------------------------------------------------------------- 1 | /// !cdk-integ pragma:ignore-assets 2 | import { App, CfnOutput, Fn, Stack } from 'aws-cdk-lib'; 3 | import { SubnetType, Vpc } from 'aws-cdk-lib/aws-ec2'; 4 | import { ContainerImage } from 'aws-cdk-lib/aws-ecs'; 5 | import { CnameRecord, PublicHostedZone } from 'aws-cdk-lib/aws-route53'; 6 | import { AssignPublicIpExtension, Container, Environment, Service, ServiceDescription } from '../lib'; 7 | 8 | // Record name. You can change this and redeploy this integration test to see 9 | // what happens when the record name changes. 10 | const RECORD_NAME = 'test-record'; 11 | 12 | const app = new App(); 13 | const stack = new Stack(app, 'aws-ecs-integ'); 14 | 15 | const vpc = new Vpc(stack, 'vpc', { 16 | subnetConfiguration: [ 17 | { 18 | cidrMask: 24, 19 | name: 'public', 20 | subnetType: SubnetType.PUBLIC, 21 | }, 22 | ], 23 | }); 24 | 25 | const dnsZone = new PublicHostedZone(stack, 'zone', { 26 | zoneName: 'myexample.com', 27 | }); 28 | 29 | // A record in the zone that is lexicographically later than 'test-record' 30 | // to try to trip up the record set locator. 31 | new CnameRecord(stack, 'laterRecord', { 32 | recordName: 'u-record', 33 | zone: dnsZone, 34 | domainName: 'console.aws.amazon.com', 35 | }); 36 | 37 | const environment = new Environment(stack, 'production', { vpc }); 38 | 39 | const nameDescription = new ServiceDescription(); 40 | 41 | nameDescription.add(new Container({ 42 | cpu: 256, 43 | memoryMiB: 512, 44 | trafficPort: 80, 45 | image: ContainerImage.fromRegistry('nathanpeck/name'), 46 | environment: { 47 | PORT: '80', 48 | }, 49 | })); 50 | 51 | nameDescription.add(new AssignPublicIpExtension({ 52 | dns: { 53 | zone: dnsZone, 54 | recordName: RECORD_NAME, 55 | }, 56 | })); 57 | 58 | new Service(stack, 'name', { 59 | environment: environment, 60 | serviceDescription: nameDescription, 61 | }); 62 | 63 | new CfnOutput(stack, 'DnsName', { 64 | value: Fn.join('.', [RECORD_NAME, dnsZone.zoneName]), 65 | }); 66 | 67 | new CfnOutput(stack, 'DnsServer', { 68 | value: Fn.select(0, dnsZone.hostedZoneNameServers!), 69 | }); 70 | 71 | /** 72 | * Expect this stack to deploy. The stack outputs include a DNS name and a 73 | * nameserver. A short time after the services have settled, you may query the 74 | * nameserver for the record. If an IP address is shown, then this test has 75 | * succeeded. 76 | * 77 | * Example: 78 | * 79 | * ``` 80 | * $ cdk --app 'node ./integ.assign-public-ip.js' deploy 81 | * ... 82 | * Outputs: 83 | * aws-ecs-integ.DnsName = test-record.myexample.com 84 | * aws-ecs-integ.DnsServer = ns-1836.awsdns-37.co.uk 85 | * ... 86 | * 87 | * $ host test-record.myexample.com ns-1836.awsdns-37.co.uk 88 | * Using domain server: 89 | * Name: ns-1836.awsdns-37.co.uk 90 | * Address: 2600:9000:5307:2c00::1#53 91 | * Aliases: 92 | * 93 | * test-record.myexample.com has address 52.60.53.62 94 | * ``` 95 | */ 96 | -------------------------------------------------------------------------------- /test/assign-public-ip.test.ts: -------------------------------------------------------------------------------- 1 | import { Stack } from 'aws-cdk-lib'; 2 | import { Template } from 'aws-cdk-lib/assertions'; 3 | import * as autoscaling from 'aws-cdk-lib/aws-autoscaling'; 4 | import * as ec2 from 'aws-cdk-lib/aws-ec2'; 5 | import * as ecs from 'aws-cdk-lib/aws-ecs'; 6 | import * as route53 from 'aws-cdk-lib/aws-route53'; 7 | import { AssignPublicIpExtension, Container, Environment, EnvironmentCapacityType, Service, ServiceDescription } from '../lib'; 8 | import { TaskRecordManager } from '../lib/extensions/assign-public-ip/task-record-manager'; 9 | 10 | describe('assign public ip', () => { 11 | test('should assign a public ip to fargate tasks', () => { 12 | // GIVEN 13 | const stack = new Stack(); 14 | 15 | // WHEN 16 | const environment = new Environment(stack, 'production'); 17 | const serviceDescription = new ServiceDescription(); 18 | serviceDescription.add(new Container({ 19 | cpu: 256, 20 | memoryMiB: 512, 21 | trafficPort: 80, 22 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 23 | })); 24 | serviceDescription.add(new AssignPublicIpExtension()); 25 | 26 | new Service(stack, 'my-service', { 27 | environment, 28 | serviceDescription, 29 | }); 30 | 31 | // THEN 32 | Template.fromStack(stack).hasResourceProperties('AWS::ECS::Service', { 33 | NetworkConfiguration: { 34 | AwsvpcConfiguration: { 35 | AssignPublicIp: 'ENABLED', 36 | }, 37 | }, 38 | }); 39 | }); 40 | 41 | test('errors when adding a public ip to ec2-backed service', () => { 42 | // GIVEN 43 | const stack = new Stack(); 44 | 45 | const vpc = new ec2.Vpc(stack, 'VPC'); 46 | const cluster = new ecs.Cluster(stack, 'Cluster', { vpc }); 47 | cluster.addAsgCapacityProvider(new ecs.AsgCapacityProvider(stack, 'Provider', { 48 | autoScalingGroup: new autoscaling.AutoScalingGroup(stack, 'DefaultAutoScalingGroup', { 49 | vpc, 50 | machineImage: ec2.MachineImage.latestAmazonLinux(), 51 | instanceType: new ec2.InstanceType('t2.micro'), 52 | }), 53 | })); 54 | 55 | const environment = new Environment(stack, 'production', { 56 | vpc, 57 | cluster, 58 | capacityType: EnvironmentCapacityType.EC2, 59 | }); 60 | 61 | const serviceDescription = new ServiceDescription(); 62 | serviceDescription.add(new Container({ 63 | cpu: 256, 64 | memoryMiB: 512, 65 | trafficPort: 80, 66 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 67 | })); 68 | serviceDescription.add(new AssignPublicIpExtension()); 69 | 70 | // WHEN / THEN 71 | expect(() => { 72 | new Service(stack, 'my-service', { 73 | environment, 74 | serviceDescription, 75 | }); 76 | }).toThrow(/Fargate/i); 77 | }); 78 | 79 | test('should not add a task record manager by default', () => { 80 | // GIVEN 81 | const stack = new Stack(); 82 | 83 | const environment = new Environment(stack, 'production'); 84 | const serviceDescription = new ServiceDescription(); 85 | 86 | // WHEN 87 | serviceDescription.add(new Container({ 88 | cpu: 256, 89 | memoryMiB: 512, 90 | trafficPort: 80, 91 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 92 | })); 93 | serviceDescription.add(new AssignPublicIpExtension()); 94 | 95 | const service = new Service(stack, 'my-service', { 96 | environment, 97 | serviceDescription, 98 | }); 99 | 100 | // THEN 101 | expect(service.ecsService.node.tryFindChild('TaskRecordManager')).toBeUndefined(); 102 | }); 103 | 104 | test('should add a task record manager when dns is requested', () => { 105 | // GIVEN 106 | const stack = new Stack(); 107 | const dnsZone = new route53.PublicHostedZone(stack, 'zone', { 108 | zoneName: 'myexample.com', 109 | }); 110 | 111 | const environment = new Environment(stack, 'production'); 112 | const serviceDescription = new ServiceDescription(); 113 | 114 | // WHEN 115 | serviceDescription.add(new Container({ 116 | cpu: 256, 117 | memoryMiB: 512, 118 | trafficPort: 80, 119 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 120 | })); 121 | serviceDescription.add(new AssignPublicIpExtension({ 122 | dns: { 123 | zone: dnsZone, 124 | recordName: 'test-record', 125 | }, 126 | })); 127 | 128 | const service = new Service(stack, 'my-service', { 129 | environment, 130 | serviceDescription, 131 | }); 132 | 133 | // THEN 134 | expect(service.ecsService.node.tryFindChild('TaskRecordManager')).toBeDefined(); 135 | }); 136 | 137 | test('task record manager listens for ecs events', () => { 138 | // GIVEN 139 | const stack = new Stack(); 140 | const dnsZone = new route53.PublicHostedZone(stack, 'zone', { 141 | zoneName: 'myexample.com', 142 | }); 143 | 144 | const environment = new Environment(stack, 'production'); 145 | const serviceDescription = new ServiceDescription(); 146 | serviceDescription.add(new Container({ 147 | cpu: 256, 148 | memoryMiB: 512, 149 | trafficPort: 80, 150 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 151 | })); 152 | serviceDescription.add(new AssignPublicIpExtension()); 153 | 154 | const service = new Service(stack, 'my-service', { 155 | environment, 156 | serviceDescription, 157 | }); 158 | 159 | // WHEN 160 | new TaskRecordManager(stack, 'manager', { 161 | dnsRecordName: 'test-record', 162 | dnsZone: dnsZone, 163 | service: service.ecsService, 164 | }); 165 | 166 | // THEN 167 | Template.fromStack(stack).hasResourceProperties('AWS::Events::Rule', { 168 | EventPattern: { 169 | 'source': ['aws.ecs'], 170 | 'detail-type': [ 171 | 'ECS Task State Change', 172 | ], 173 | 'detail': { 174 | lastStatus: ['RUNNING'], 175 | desiredStatus: ['RUNNING'], 176 | }, 177 | }, 178 | }); 179 | 180 | Template.fromStack(stack).hasResourceProperties('AWS::Events::Rule', { 181 | EventPattern: { 182 | 'source': ['aws.ecs'], 183 | 'detail-type': [ 184 | 'ECS Task State Change', 185 | ], 186 | 'detail': { 187 | lastStatus: ['STOPPED'], 188 | desiredStatus: ['STOPPED'], 189 | }, 190 | }, 191 | }); 192 | }); 193 | }); 194 | -------------------------------------------------------------------------------- /test/cloudwatch-agent.test.ts: -------------------------------------------------------------------------------- 1 | import { Stack } from 'aws-cdk-lib'; 2 | import { Template } from 'aws-cdk-lib/assertions'; 3 | import * as ecs from 'aws-cdk-lib/aws-ecs'; 4 | import { CloudwatchAgentExtension, Container, Environment, Service, ServiceDescription } from '../lib'; 5 | 6 | describe('cloudwatch agent', () => { 7 | test('should be able to add AWS X-Ray to a service', () => { 8 | // GIVEN 9 | const stack = new Stack(); 10 | 11 | // WHEN 12 | const environment = new Environment(stack, 'production'); 13 | const serviceDescription = new ServiceDescription(); 14 | 15 | serviceDescription.add(new Container({ 16 | cpu: 256, 17 | memoryMiB: 512, 18 | trafficPort: 80, 19 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 20 | })); 21 | 22 | serviceDescription.add(new CloudwatchAgentExtension()); 23 | 24 | new Service(stack, 'my-service', { 25 | environment, 26 | serviceDescription, 27 | }); 28 | 29 | // THEN 30 | Template.fromStack(stack).hasResourceProperties('AWS::ECS::TaskDefinition', { 31 | ContainerDefinitions: [ 32 | { 33 | Cpu: 256, 34 | DependsOn: [ 35 | { 36 | Condition: 'START', 37 | ContainerName: 'cloudwatch-agent', 38 | }, 39 | ], 40 | Essential: true, 41 | Image: 'nathanpeck/name', 42 | Memory: 512, 43 | Name: 'app', 44 | PortMappings: [ 45 | { 46 | ContainerPort: 80, 47 | Protocol: 'tcp', 48 | }, 49 | ], 50 | Ulimits: [ 51 | { 52 | HardLimit: 1024000, 53 | Name: 'nofile', 54 | SoftLimit: 1024000, 55 | }, 56 | ], 57 | }, 58 | { 59 | Environment: [ 60 | { 61 | Name: 'CW_CONFIG_CONTENT', 62 | Value: '{"logs":{"metrics_collected":{"emf":{}}},"metrics":{"metrics_collected":{"statsd":{}}}}', 63 | }, 64 | ], 65 | Essential: true, 66 | Image: 'amazon/cloudwatch-agent:latest', 67 | LogConfiguration: { 68 | LogDriver: 'awslogs', 69 | Options: { 70 | 'awslogs-group': { 71 | Ref: 'myservicetaskdefinitioncloudwatchagentLogGroupDF0CD679', 72 | }, 73 | 'awslogs-stream-prefix': 'cloudwatch-agent', 74 | 'awslogs-region': { 75 | Ref: 'AWS::Region', 76 | }, 77 | }, 78 | }, 79 | MemoryReservation: 50, 80 | Name: 'cloudwatch-agent', 81 | User: '0:1338', 82 | }, 83 | ], 84 | Cpu: '256', 85 | ExecutionRoleArn: { 86 | 'Fn::GetAtt': [ 87 | 'myservicetaskdefinitionExecutionRole0CE74AD0', 88 | 'Arn', 89 | ], 90 | }, 91 | Family: 'myservicetaskdefinition', 92 | Memory: '512', 93 | NetworkMode: 'awsvpc', 94 | RequiresCompatibilities: [ 95 | 'EC2', 96 | 'FARGATE', 97 | ], 98 | TaskRoleArn: { 99 | 'Fn::GetAtt': [ 100 | 'myservicetaskdefinitionTaskRole92ACD903', 101 | 'Arn', 102 | ], 103 | }, 104 | }); 105 | }); 106 | }); -------------------------------------------------------------------------------- /test/custom-service-extension.integ.snapshot/aws-ecs-integ.assets.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "21.0.0", 3 | "files": { 4 | "0c4bd04678299111d98eaa888bf6d98b39f6762a55b9ff2f659c9ef35371f748": { 5 | "source": { 6 | "path": "aws-ecs-integ.template.json", 7 | "packaging": "file" 8 | }, 9 | "destinations": { 10 | "current_account-current_region": { 11 | "bucketName": "cdk-hnb659fds-assets-${AWS::AccountId}-${AWS::Region}", 12 | "objectKey": "0c4bd04678299111d98eaa888bf6d98b39f6762a55b9ff2f659c9ef35371f748.json", 13 | "assumeRoleArn": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/cdk-hnb659fds-file-publishing-role-${AWS::AccountId}-${AWS::Region}" 14 | } 15 | } 16 | } 17 | }, 18 | "dockerImages": {} 19 | } -------------------------------------------------------------------------------- /test/custom-service-extension.integ.ts: -------------------------------------------------------------------------------- 1 | import { App, Duration, Stack } from 'aws-cdk-lib'; 2 | import * as ecs from 'aws-cdk-lib/aws-ecs'; 3 | import { Container, Environment, Service, ServiceBuild, ServiceDescription, ServiceExtension } from '../lib'; 4 | 5 | class MyCustomAutoscaling extends ServiceExtension { 6 | constructor() { 7 | super('my-custom-autoscaling'); 8 | } 9 | 10 | // This service modifies properties of the service prior 11 | // to construct creation. 12 | public modifyServiceProps(props: ServiceBuild) { 13 | return { 14 | ...props, 15 | 16 | // Initially launch 10 copies of the service 17 | desiredCount: 10, 18 | } as ServiceBuild; 19 | } 20 | 21 | // This hook utilizes the resulting service construct 22 | // once it is created 23 | public useService(service: ecs.Ec2Service | ecs.FargateService) { 24 | const scalingTarget = service.autoScaleTaskCount({ 25 | minCapacity: 5, // Min 5 tasks 26 | maxCapacity: 20, // Max 20 tasks 27 | }); 28 | 29 | scalingTarget.scaleOnCpuUtilization('TargetCpuUtilization50', { 30 | targetUtilizationPercent: 50, 31 | scaleInCooldown: Duration.seconds(60), 32 | scaleOutCooldown: Duration.seconds(60), 33 | }); 34 | } 35 | } 36 | 37 | const app = new App(); 38 | const stack = new Stack(app, 'aws-ecs-integ'); 39 | 40 | const environment = new Environment(stack, 'production'); 41 | 42 | /** Name service */ 43 | const nameDescription = new ServiceDescription(); 44 | nameDescription.add(new Container({ 45 | cpu: 1024, 46 | memoryMiB: 2048, 47 | trafficPort: 80, 48 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 49 | environment: { 50 | PORT: '80', 51 | }, 52 | })); 53 | nameDescription.add(new MyCustomAutoscaling()); 54 | 55 | new Service(stack, 'name', { 56 | environment: environment, 57 | serviceDescription: nameDescription, 58 | }); 59 | 60 | /** 61 | * Expectation is that the user is able to implement their own extension 62 | * using the abstract class, and that it will function. This will help 63 | * catch breaking changes to extensions. (Might need to make this example 64 | * custom extension more complex eventually) 65 | */ -------------------------------------------------------------------------------- /test/environment.test.ts: -------------------------------------------------------------------------------- 1 | import { Stack } from 'aws-cdk-lib'; 2 | import { Template } from 'aws-cdk-lib/assertions'; 3 | import * as autoscaling from 'aws-cdk-lib/aws-autoscaling'; 4 | import * as ec2 from 'aws-cdk-lib/aws-ec2'; 5 | import * as ecs from 'aws-cdk-lib/aws-ecs'; 6 | import { Container, Environment, EnvironmentCapacityType, Service, ServiceDescription } from '../lib'; 7 | 8 | describe('environment', () => { 9 | test('should be able to add a service to an environment', () => { 10 | // GIVEN 11 | const stack = new Stack(); 12 | 13 | // WHEN 14 | const environment = new Environment(stack, 'production'); 15 | const serviceDescription = new ServiceDescription(); 16 | 17 | serviceDescription.add(new Container({ 18 | cpu: 256, 19 | memoryMiB: 512, 20 | trafficPort: 80, 21 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 22 | })); 23 | 24 | new Service(stack, 'my-service', { 25 | environment, 26 | serviceDescription, 27 | }); 28 | 29 | // THEN 30 | Template.fromStack(stack).resourceCountIs('AWS::ECS::Service', 1); 31 | 32 | Template.fromStack(stack).hasResourceProperties('AWS::ECS::TaskDefinition', { 33 | ContainerDefinitions: [ 34 | { 35 | Cpu: 256, 36 | Essential: true, 37 | Image: 'nathanpeck/name', 38 | Memory: 512, 39 | Name: 'app', 40 | PortMappings: [ 41 | { 42 | ContainerPort: 80, 43 | Protocol: 'tcp', 44 | }, 45 | ], 46 | Ulimits: [ 47 | { 48 | HardLimit: 1024000, 49 | Name: 'nofile', 50 | SoftLimit: 1024000, 51 | }, 52 | ], 53 | }, 54 | ], 55 | Cpu: '256', 56 | Family: 'myservicetaskdefinition', 57 | Memory: '512', 58 | NetworkMode: 'awsvpc', 59 | RequiresCompatibilities: [ 60 | 'EC2', 61 | 'FARGATE', 62 | ], 63 | TaskRoleArn: { 64 | 'Fn::GetAtt': [ 65 | 'myservicetaskdefinitionTaskRole92ACD903', 66 | 'Arn', 67 | ], 68 | }, 69 | }); 70 | }); 71 | 72 | test('should be able to create a Fargate environment with a given VPC and cluster', () => { 73 | // GIVEN 74 | const stack = new Stack(); 75 | 76 | // WHEN 77 | const vpc = new ec2.Vpc(stack, 'VPC'); 78 | const cluster = new ecs.Cluster(stack, 'Cluster', { vpc }); 79 | 80 | const environment = new Environment(stack, 'production', { 81 | vpc, 82 | cluster, 83 | }); 84 | const serviceDescription = new ServiceDescription(); 85 | 86 | serviceDescription.add(new Container({ 87 | cpu: 256, 88 | memoryMiB: 512, 89 | trafficPort: 80, 90 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 91 | })); 92 | 93 | new Service(stack, 'my-service', { 94 | environment, 95 | serviceDescription, 96 | }); 97 | 98 | // THEN 99 | Template.fromStack(stack).resourceCountIs('AWS::ECS::Service', 1); 100 | 101 | Template.fromStack(stack).hasResourceProperties('AWS::ECS::TaskDefinition', { 102 | ContainerDefinitions: [ 103 | { 104 | Cpu: 256, 105 | Essential: true, 106 | Image: 'nathanpeck/name', 107 | Memory: 512, 108 | Name: 'app', 109 | PortMappings: [ 110 | { 111 | ContainerPort: 80, 112 | Protocol: 'tcp', 113 | }, 114 | ], 115 | Ulimits: [ 116 | { 117 | HardLimit: 1024000, 118 | Name: 'nofile', 119 | SoftLimit: 1024000, 120 | }, 121 | ], 122 | }, 123 | ], 124 | Cpu: '256', 125 | Family: 'myservicetaskdefinition', 126 | Memory: '512', 127 | NetworkMode: 'awsvpc', 128 | RequiresCompatibilities: [ 129 | 'EC2', 130 | 'FARGATE', 131 | ], 132 | TaskRoleArn: { 133 | 'Fn::GetAtt': [ 134 | 'myservicetaskdefinitionTaskRole92ACD903', 135 | 'Arn', 136 | ], 137 | }, 138 | }); 139 | }); 140 | 141 | test('should be able to create an environment for EC2', () => { 142 | // GIVEN 143 | const stack = new Stack(); 144 | 145 | // WHEN 146 | const vpc = new ec2.Vpc(stack, 'VPC'); 147 | const cluster = new ecs.Cluster(stack, 'Cluster', { vpc }); 148 | cluster.addAsgCapacityProvider(new ecs.AsgCapacityProvider(stack, 'Provider', { 149 | autoScalingGroup: new autoscaling.AutoScalingGroup(stack, 'DefaultAutoScalingGroup', { 150 | vpc, 151 | machineImage: ec2.MachineImage.latestAmazonLinux(), 152 | instanceType: new ec2.InstanceType('t2.micro'), 153 | }), 154 | })); 155 | 156 | const environment = new Environment(stack, 'production', { 157 | vpc, 158 | cluster, 159 | capacityType: EnvironmentCapacityType.EC2, 160 | }); 161 | const serviceDescription = new ServiceDescription(); 162 | 163 | serviceDescription.add(new Container({ 164 | cpu: 256, 165 | memoryMiB: 512, 166 | trafficPort: 80, 167 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 168 | })); 169 | 170 | new Service(stack, 'my-service', { 171 | environment, 172 | serviceDescription, 173 | }); 174 | 175 | // THEN 176 | Template.fromStack(stack).resourceCountIs('AWS::ECS::Service', 1); 177 | 178 | Template.fromStack(stack).hasResourceProperties('AWS::ECS::TaskDefinition', { 179 | ContainerDefinitions: [ 180 | { 181 | Cpu: 256, 182 | Essential: true, 183 | Image: 'nathanpeck/name', 184 | Memory: 512, 185 | Name: 'app', 186 | PortMappings: [ 187 | { 188 | ContainerPort: 80, 189 | Protocol: 'tcp', 190 | }, 191 | ], 192 | Ulimits: [ 193 | { 194 | HardLimit: 1024000, 195 | Name: 'nofile', 196 | SoftLimit: 1024000, 197 | }, 198 | ], 199 | }, 200 | ], 201 | Cpu: '256', 202 | Family: 'myservicetaskdefinition', 203 | Memory: '512', 204 | NetworkMode: 'awsvpc', 205 | RequiresCompatibilities: [ 206 | 'EC2', 207 | 'FARGATE', 208 | ], 209 | TaskRoleArn: { 210 | 'Fn::GetAtt': [ 211 | 'myservicetaskdefinitionTaskRole92ACD903', 212 | 'Arn', 213 | ], 214 | }, 215 | }); 216 | }); 217 | 218 | test('should be able to create an environment from attributes', () => { 219 | // GIVEN 220 | const stack = new Stack(); 221 | 222 | const vpc = new ec2.Vpc(stack, 'VPC'); 223 | const cluster = new ecs.Cluster(stack, 'Cluster', { vpc }); 224 | cluster.addAsgCapacityProvider(new ecs.AsgCapacityProvider(stack, 'Provider', { 225 | autoScalingGroup: new autoscaling.AutoScalingGroup(stack, 'DefaultAutoScalingGroup', { 226 | vpc, 227 | machineImage: ec2.MachineImage.latestAmazonLinux(), 228 | instanceType: new ec2.InstanceType('t2.micro'), 229 | }), 230 | })); 231 | 232 | // WHEN 233 | const environment = Environment.fromEnvironmentAttributes(stack, 'Environment', { 234 | capacityType: EnvironmentCapacityType.EC2, 235 | cluster: cluster, 236 | }); 237 | 238 | // THEN 239 | expect(environment.capacityType).toEqual(EnvironmentCapacityType.EC2); 240 | expect(environment.cluster).toEqual(cluster); 241 | expect(environment.vpc).toEqual(vpc); 242 | expect(environment.id).toEqual('Environment'); 243 | }); 244 | }); 245 | -------------------------------------------------------------------------------- /test/firelens.test.ts: -------------------------------------------------------------------------------- 1 | import { Stack } from 'aws-cdk-lib'; 2 | import { Match, Template } from 'aws-cdk-lib/assertions'; 3 | import * as ecs from 'aws-cdk-lib/aws-ecs'; 4 | import { Container, Environment, FireLensExtension, Service, ServiceDescription } from '../lib'; 5 | 6 | describe('firelens', () => { 7 | test('should be able to add Firelens to a service', () => { 8 | // GIVEN 9 | const stack = new Stack(); 10 | 11 | // WHEN 12 | const environment = new Environment(stack, 'production'); 13 | const serviceDescription = new ServiceDescription(); 14 | 15 | serviceDescription.add(new Container({ 16 | cpu: 256, 17 | memoryMiB: 512, 18 | trafficPort: 80, 19 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 20 | })); 21 | 22 | serviceDescription.add(new FireLensExtension()); 23 | 24 | new Service(stack, 'my-service', { 25 | environment, 26 | serviceDescription, 27 | }); 28 | 29 | // THEN 30 | 31 | // Ensure that the log group was created 32 | Template.fromStack(stack).hasResource('AWS::Logs::LogGroup', Match.anyValue()); 33 | 34 | // Ensure that task has a Firelens sidecar and a log configuration 35 | // pointing at the sidecar 36 | Template.fromStack(stack).hasResourceProperties('AWS::ECS::TaskDefinition', { 37 | ContainerDefinitions: [ 38 | { 39 | Cpu: 256, 40 | DependsOn: [ 41 | { 42 | Condition: 'START', 43 | ContainerName: 'firelens', 44 | }, 45 | ], 46 | Essential: true, 47 | Image: 'nathanpeck/name', 48 | LogConfiguration: { 49 | LogDriver: 'awsfirelens', 50 | Options: { 51 | Name: 'cloudwatch', 52 | region: { 53 | Ref: 'AWS::Region', 54 | }, 55 | log_group_name: { 56 | Ref: 'myservicelogs176EE19F', 57 | }, 58 | log_stream_prefix: 'my-service/', 59 | }, 60 | }, 61 | Memory: 512, 62 | Name: 'app', 63 | PortMappings: [ 64 | { 65 | ContainerPort: 80, 66 | Protocol: 'tcp', 67 | }, 68 | ], 69 | Ulimits: [ 70 | { 71 | HardLimit: 1024000, 72 | Name: 'nofile', 73 | SoftLimit: 1024000, 74 | }, 75 | ], 76 | }, 77 | { 78 | Essential: true, 79 | FirelensConfiguration: { 80 | Type: 'fluentbit', 81 | }, 82 | Image: { 83 | Ref: 'SsmParameterValueawsserviceawsforfluentbitlatestC96584B6F00A464EAD1953AFF4B05118Parameter', 84 | }, 85 | LogConfiguration: { 86 | LogDriver: 'awslogs', 87 | Options: { 88 | 'awslogs-group': { 89 | Ref: 'myservicetaskdefinitionfirelensLogGroup0D59B0EB', 90 | }, 91 | 'awslogs-stream-prefix': 'firelens', 92 | 'awslogs-region': { 93 | Ref: 'AWS::Region', 94 | }, 95 | }, 96 | }, 97 | MemoryReservation: 50, 98 | Name: 'firelens', 99 | User: '0:1338', 100 | }, 101 | ], 102 | Cpu: '256', 103 | Family: 'myservicetaskdefinition', 104 | Memory: '512', 105 | NetworkMode: 'awsvpc', 106 | RequiresCompatibilities: [ 107 | 'EC2', 108 | 'FARGATE', 109 | ], 110 | TaskRoleArn: { 111 | 'Fn::GetAtt': [ 112 | 'myservicetaskdefinitionTaskRole92ACD903', 113 | 'Arn', 114 | ], 115 | }, 116 | }); 117 | }); 118 | }); -------------------------------------------------------------------------------- /test/http-load-balancer.test.ts: -------------------------------------------------------------------------------- 1 | import { Stack } from 'aws-cdk-lib'; 2 | import { Template } from 'aws-cdk-lib/assertions'; 3 | import * as ecs from 'aws-cdk-lib/aws-ecs'; 4 | import { Container, Environment, HttpLoadBalancerExtension, Service, ServiceDescription } from '../lib'; 5 | 6 | describe('http load balancer', () => { 7 | test('should be able to add an HTTP load balancer to a service', () => { 8 | // GIVEN 9 | const stack = new Stack(); 10 | 11 | // WHEN 12 | const environment = new Environment(stack, 'production'); 13 | const serviceDescription = new ServiceDescription(); 14 | 15 | serviceDescription.add(new Container({ 16 | cpu: 256, 17 | memoryMiB: 512, 18 | trafficPort: 80, 19 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 20 | })); 21 | 22 | serviceDescription.add(new HttpLoadBalancerExtension()); 23 | 24 | new Service(stack, 'my-service', { 25 | environment, 26 | serviceDescription, 27 | }); 28 | 29 | // THEN 30 | Template.fromStack(stack).hasResourceProperties('AWS::ECS::TaskDefinition', { 31 | ContainerDefinitions: [ 32 | { 33 | Cpu: 256, 34 | Essential: true, 35 | Image: 'nathanpeck/name', 36 | Memory: 512, 37 | Name: 'app', 38 | PortMappings: [ 39 | { 40 | ContainerPort: 80, 41 | Protocol: 'tcp', 42 | }, 43 | ], 44 | Ulimits: [ 45 | { 46 | HardLimit: 1024000, 47 | Name: 'nofile', 48 | SoftLimit: 1024000, 49 | }, 50 | ], 51 | }, 52 | ], 53 | Cpu: '256', 54 | Family: 'myservicetaskdefinition', 55 | Memory: '512', 56 | NetworkMode: 'awsvpc', 57 | RequiresCompatibilities: [ 58 | 'EC2', 59 | 'FARGATE', 60 | ], 61 | TaskRoleArn: { 62 | 'Fn::GetAtt': [ 63 | 'myservicetaskdefinitionTaskRole92ACD903', 64 | 'Arn', 65 | ], 66 | }, 67 | }); 68 | 69 | Template.fromStack(stack).resourceCountIs('AWS::ElasticLoadBalancingV2::LoadBalancer', 1); 70 | Template.fromStack(stack).resourceCountIs('AWS::ElasticLoadBalancingV2::Listener', 1); 71 | }); 72 | 73 | test('allows scaling on request count for the HTTP load balancer', () => { 74 | // GIVEN 75 | const stack = new Stack(); 76 | 77 | // WHEN 78 | const environment = new Environment(stack, 'production'); 79 | const serviceDescription = new ServiceDescription(); 80 | 81 | serviceDescription.add(new Container({ 82 | cpu: 256, 83 | memoryMiB: 512, 84 | trafficPort: 80, 85 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 86 | })); 87 | 88 | serviceDescription.add(new HttpLoadBalancerExtension({ requestsPerTarget: 100 })); 89 | 90 | const service = new Service(stack, 'my-service', { 91 | environment, 92 | serviceDescription, 93 | autoScaleTaskCount: { 94 | maxTaskCount: 5, 95 | }, 96 | }); 97 | 98 | // THEN 99 | expect(service.targetGroup).not.toBeUndefined(); 100 | Template.fromStack(stack).hasResourceProperties('AWS::ApplicationAutoScaling::ScalableTarget', { 101 | MaxCapacity: 5, 102 | MinCapacity: 1, 103 | }); 104 | 105 | Template.fromStack(stack).hasResourceProperties('AWS::ApplicationAutoScaling::ScalingPolicy', { 106 | PolicyType: 'TargetTrackingScaling', 107 | TargetTrackingScalingPolicyConfiguration: { 108 | PredefinedMetricSpecification: { 109 | PredefinedMetricType: 'ALBRequestCountPerTarget', 110 | }, 111 | TargetValue: 100, 112 | }, 113 | }); 114 | }); 115 | 116 | test('should error when adding scaling policy if scaling target has not been configured', () => { 117 | // GIVEN 118 | const stack = new Stack(); 119 | 120 | // WHEN 121 | const environment = new Environment(stack, 'production'); 122 | const serviceDescription = new ServiceDescription(); 123 | 124 | serviceDescription.add(new Container({ 125 | cpu: 256, 126 | memoryMiB: 512, 127 | trafficPort: 80, 128 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 129 | })); 130 | 131 | serviceDescription.add(new HttpLoadBalancerExtension({ requestsPerTarget: 100 })); 132 | 133 | // THEN 134 | expect(() => { 135 | new Service(stack, 'my-service', { 136 | environment, 137 | serviceDescription, 138 | }); 139 | }).toThrow(/Auto scaling target for the service 'my-service' hasn't been configured. Please use Service construct to configure 'minTaskCount' and 'maxTaskCount'./); 140 | }); 141 | }); -------------------------------------------------------------------------------- /test/imported-environment.integ.snapshot/imported-environment-integ.assets.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "21.0.0", 3 | "files": { 4 | "68e34c759c6a6e13a84b4608c7aba93b4d15c81ff7de6cf4ee427d2b9339790e": { 5 | "source": { 6 | "path": "importedenvironmentintegResourcesAB23EBEF.nested.template.json", 7 | "packaging": "file" 8 | }, 9 | "destinations": { 10 | "current_account-current_region": { 11 | "bucketName": "cdk-hnb659fds-assets-${AWS::AccountId}-${AWS::Region}", 12 | "objectKey": "68e34c759c6a6e13a84b4608c7aba93b4d15c81ff7de6cf4ee427d2b9339790e.json", 13 | "assumeRoleArn": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/cdk-hnb659fds-file-publishing-role-${AWS::AccountId}-${AWS::Region}" 14 | } 15 | } 16 | }, 17 | "af2e3b9034b602ea14e274d349175f203fcfc7f59edba6f9eabed8e972a28e86": { 18 | "source": { 19 | "path": "imported-environment-integ.template.json", 20 | "packaging": "file" 21 | }, 22 | "destinations": { 23 | "current_account-current_region": { 24 | "bucketName": "cdk-hnb659fds-assets-${AWS::AccountId}-${AWS::Region}", 25 | "objectKey": "af2e3b9034b602ea14e274d349175f203fcfc7f59edba6f9eabed8e972a28e86.json", 26 | "assumeRoleArn": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/cdk-hnb659fds-file-publishing-role-${AWS::AccountId}-${AWS::Region}" 27 | } 28 | } 29 | } 30 | }, 31 | "dockerImages": {} 32 | } -------------------------------------------------------------------------------- /test/imported-environment.integ.ts: -------------------------------------------------------------------------------- 1 | /// !cdk-integ pragma:ignore-assets 2 | import { App, NestedStack, Stack } from 'aws-cdk-lib'; 3 | import { Vpc } from 'aws-cdk-lib/aws-ec2'; 4 | import { Cluster, ContainerImage } from 'aws-cdk-lib/aws-ecs'; 5 | import { Construct } from 'constructs'; 6 | import { 7 | Container, 8 | Environment, 9 | EnvironmentCapacityType, 10 | HttpLoadBalancerExtension, 11 | Service, 12 | ServiceDescription, 13 | } from '../lib'; 14 | 15 | class ResourceStack extends NestedStack { 16 | public readonly clusterName: string; 17 | public readonly vpcId: string; 18 | public readonly publicSubnetIds: string[]; 19 | public readonly privateSubnetIds: string[]; 20 | 21 | constructor(scope: Construct, id: string) { 22 | super(scope, id); 23 | 24 | const environment = new Environment(this, 'Environment'); 25 | 26 | this.clusterName = environment.cluster.clusterName; 27 | this.vpcId = environment.vpc.vpcId; 28 | this.privateSubnetIds = environment.vpc.privateSubnets.map(m => m.subnetId); 29 | this.publicSubnetIds = environment.vpc.publicSubnets.map(m => m.subnetId); 30 | } 31 | } 32 | 33 | class TestStack extends Stack { 34 | constructor(scope: Construct, id: string) { 35 | super(scope, id); 36 | // Create a nested stack with the shared resources 37 | const resourceStack = new ResourceStack(this, 'Resources'); 38 | 39 | // Import the vpc from the nested stack 40 | const vpc = Vpc.fromVpcAttributes(this, 'Vpc', { 41 | availabilityZones: resourceStack.availabilityZones, 42 | vpcId: resourceStack.vpcId, 43 | privateSubnetIds: resourceStack.privateSubnetIds, 44 | publicSubnetIds: resourceStack.publicSubnetIds, 45 | }); 46 | 47 | // Import the cluster from the nested stack 48 | const cluster = Cluster.fromClusterAttributes(this, 'Cluster', { 49 | clusterName: resourceStack.clusterName, 50 | securityGroups: [], 51 | vpc: vpc, 52 | }); 53 | 54 | // Create the environment from attributes. 55 | const environment = Environment.fromEnvironmentAttributes(this, 'Environment', { 56 | cluster, 57 | capacityType: EnvironmentCapacityType.FARGATE, 58 | }); 59 | 60 | // Add a workload. 61 | const serviceDescription = new ServiceDescription(); 62 | serviceDescription.add(new Container({ 63 | cpu: 256, 64 | memoryMiB: 512, 65 | trafficPort: 80, 66 | image: ContainerImage.fromRegistry('nathanpeck/name'), 67 | environment: { 68 | PORT: '80', 69 | }, 70 | })); 71 | serviceDescription.add(new HttpLoadBalancerExtension()); 72 | 73 | new Service(this, 'Service', { 74 | environment, 75 | serviceDescription, 76 | }); 77 | } 78 | } 79 | 80 | const app = new App(); 81 | new TestStack(app, 'imported-environment-integ'); 82 | 83 | /** 84 | * Expect this stack to deploy and show a load balancer DNS address. When you 85 | * request the address with curl, you should see the name container's output. 86 | * The load balancer may response 503 Service Temporarily Unavailable for a 87 | * short while, before you can see the container output. 88 | * 89 | * Example: 90 | * ``` 91 | * $ cdk --app 'node integ.imported-environment.js' deploy 92 | * ... 93 | * Outputs: 94 | * shared-cluster-integ.Serviceloadbalancerdnsoutput = share-Servi-6JALU1FDE36L-2093347098.us-east-1.elb.amazonaws.com 95 | * ... 96 | * 97 | * $ curl share-Servi-6JALU1FDE36L-2093347098.us-east-1.elb.amazonaws.com 98 | * Keira (ip-10-0-153-44.ec2.internal) 99 | * ``` 100 | */ 101 | -------------------------------------------------------------------------------- /test/injecter.test.ts: -------------------------------------------------------------------------------- 1 | import { Stack } from 'aws-cdk-lib'; 2 | import { Template } from 'aws-cdk-lib/assertions'; 3 | import * as ecs from 'aws-cdk-lib/aws-ecs'; 4 | import * as sns from 'aws-cdk-lib/aws-sns'; 5 | import { Container, Environment, InjecterExtension, InjectableTopic, Service, ServiceDescription } from '../lib'; 6 | 7 | describe('injecter', () => { 8 | test('correctly sets publish permissions for given topics', () => { 9 | // GIVEN 10 | const stack = new Stack(); 11 | 12 | const environment = new Environment(stack, 'production'); 13 | const serviceDescription = new ServiceDescription(); 14 | 15 | serviceDescription.add(new Container({ 16 | cpu: 256, 17 | memoryMiB: 512, 18 | trafficPort: 80, 19 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 20 | environment: { 21 | PORT: '80', 22 | }, 23 | })); 24 | 25 | // WHEN 26 | const topic1 = new InjectableTopic({ 27 | topic: new sns.Topic(stack, 'topic1'), 28 | }); 29 | 30 | const topic2 = new InjectableTopic({ 31 | topic: new sns.Topic(stack, 'topic2'), 32 | }); 33 | 34 | serviceDescription.add(new InjecterExtension({ 35 | injectables: [topic1, topic2], 36 | })); 37 | 38 | new Service(stack, 'my-service', { 39 | environment, 40 | serviceDescription, 41 | }); 42 | 43 | // THEN 44 | // Ensure creation of provided topics 45 | Template.fromStack(stack).resourceCountIs('AWS::SNS::Topic', 2); 46 | 47 | // Ensure the task role is given permissions to publish events to topics 48 | Template.fromStack(stack).hasResourceProperties('AWS::IAM::Policy', { 49 | PolicyDocument: { 50 | Statement: [ 51 | { 52 | Action: 'sns:Publish', 53 | Effect: 'Allow', 54 | Resource: { 55 | Ref: 'topic152D84A37', 56 | }, 57 | }, 58 | { 59 | Action: 'sns:Publish', 60 | Effect: 'Allow', 61 | Resource: { 62 | Ref: 'topic2A4FB547F', 63 | }, 64 | }, 65 | ], 66 | Version: '2012-10-17', 67 | }, 68 | }); 69 | 70 | // Ensure that the topic ARNs have been correctly appended to the environment variables 71 | Template.fromStack(stack).hasResourceProperties('AWS::ECS::TaskDefinition', { 72 | ContainerDefinitions: [ 73 | { 74 | Cpu: 256, 75 | Environment: [ 76 | { 77 | Name: 'PORT', 78 | Value: '80', 79 | }, 80 | { 81 | Name: 'TOPIC1_TOPIC_ARN', 82 | Value: { 83 | Ref: 'topic152D84A37', 84 | }, 85 | }, 86 | { 87 | Name: 'TOPIC2_TOPIC_ARN', 88 | Value: { 89 | Ref: 'topic2A4FB547F', 90 | }, 91 | }, 92 | ], 93 | Image: 'nathanpeck/name', 94 | Essential: true, 95 | Memory: 512, 96 | Name: 'app', 97 | PortMappings: [ 98 | { 99 | ContainerPort: 80, 100 | Protocol: 'tcp', 101 | }, 102 | ], 103 | Ulimits: [ 104 | { 105 | HardLimit: 1024000, 106 | Name: 'nofile', 107 | SoftLimit: 1024000, 108 | }, 109 | ], 110 | }, 111 | ], 112 | }); 113 | }); 114 | }); -------------------------------------------------------------------------------- /test/multiple-environments.integ.snapshot/aws-ecs-integ.assets.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "21.0.0", 3 | "files": { 4 | "1c4ad1ddd84ec62cd00e1c01c2971172dcc3acca3cf10509e3f7dca20433d2e3": { 5 | "source": { 6 | "path": "aws-ecs-integ.template.json", 7 | "packaging": "file" 8 | }, 9 | "destinations": { 10 | "current_account-current_region": { 11 | "bucketName": "cdk-hnb659fds-assets-${AWS::AccountId}-${AWS::Region}", 12 | "objectKey": "1c4ad1ddd84ec62cd00e1c01c2971172dcc3acca3cf10509e3f7dca20433d2e3.json", 13 | "assumeRoleArn": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/cdk-hnb659fds-file-publishing-role-${AWS::AccountId}-${AWS::Region}" 14 | } 15 | } 16 | } 17 | }, 18 | "dockerImages": {} 19 | } -------------------------------------------------------------------------------- /test/multiple-environments.integ.ts: -------------------------------------------------------------------------------- 1 | import { App, Stack } from 'aws-cdk-lib'; 2 | import { Mesh } from 'aws-cdk-lib/aws-appmesh'; 3 | import * as ecs from 'aws-cdk-lib/aws-ecs'; 4 | import { Container, Environment, Service, ServiceDescription, AppMeshExtension } from '../lib'; 5 | 6 | const app = new App(); 7 | const stack = new Stack(app, 'aws-ecs-integ'); 8 | 9 | const production = new Environment(stack, 'production'); 10 | const development = new Environment(stack, 'development'); 11 | 12 | const productionMesh = new Mesh(stack, 'production-mesh'); 13 | const developmentMesh = new Mesh(stack, 'development-mesh'); 14 | 15 | /** Production name service */ 16 | const productionNameDescription = new ServiceDescription(); 17 | productionNameDescription.add(new Container({ 18 | cpu: 1024, 19 | memoryMiB: 2048, 20 | trafficPort: 80, 21 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 22 | environment: { 23 | PORT: '80', 24 | }, 25 | })); 26 | productionNameDescription.add(new AppMeshExtension({ mesh: productionMesh })); 27 | 28 | new Service(stack, 'name-production', { 29 | environment: production, 30 | serviceDescription: productionNameDescription, 31 | }); 32 | 33 | /** Development name service */ 34 | const developmentNameDescription = new ServiceDescription(); 35 | developmentNameDescription.add(new Container({ 36 | cpu: 1024, 37 | memoryMiB: 2048, 38 | trafficPort: 80, 39 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 40 | environment: { 41 | PORT: '80', 42 | }, 43 | })); 44 | developmentNameDescription.add(new AppMeshExtension({ mesh: developmentMesh })); 45 | 46 | new Service(stack, 'name-development', { 47 | environment: development, 48 | serviceDescription: developmentNameDescription, 49 | }); 50 | 51 | /** 52 | * This test verifies the edge case of creating multiple environments 53 | * on the same account to ensure that there are no conflicts. 54 | */ -------------------------------------------------------------------------------- /test/publish-subscribe.integ.snapshot/aws-ecs-integ.assets.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "21.0.0", 3 | "files": { 4 | "a820140ad8525b8ed56ad2a7bcd9da99d6afc2490e8c91e34620886c011bdc91": { 5 | "source": { 6 | "path": "asset.a820140ad8525b8ed56ad2a7bcd9da99d6afc2490e8c91e34620886c011bdc91", 7 | "packaging": "zip" 8 | }, 9 | "destinations": { 10 | "current_account-current_region": { 11 | "bucketName": "cdk-hnb659fds-assets-${AWS::AccountId}-${AWS::Region}", 12 | "objectKey": "a820140ad8525b8ed56ad2a7bcd9da99d6afc2490e8c91e34620886c011bdc91.zip", 13 | "assumeRoleArn": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/cdk-hnb659fds-file-publishing-role-${AWS::AccountId}-${AWS::Region}" 14 | } 15 | } 16 | }, 17 | "d4054e0cd1b9f54f3db86bd878213678c88b9e40e1bd27b8064eb1ae9419fcb1": { 18 | "source": { 19 | "path": "aws-ecs-integ.template.json", 20 | "packaging": "file" 21 | }, 22 | "destinations": { 23 | "current_account-current_region": { 24 | "bucketName": "cdk-hnb659fds-assets-${AWS::AccountId}-${AWS::Region}", 25 | "objectKey": "d4054e0cd1b9f54f3db86bd878213678c88b9e40e1bd27b8064eb1ae9419fcb1.json", 26 | "assumeRoleArn": "arn:${AWS::Partition}:iam::${AWS::AccountId}:role/cdk-hnb659fds-file-publishing-role-${AWS::AccountId}-${AWS::Region}" 27 | } 28 | } 29 | } 30 | }, 31 | "dockerImages": {} 32 | } -------------------------------------------------------------------------------- /test/publish-subscribe.integ.ts: -------------------------------------------------------------------------------- 1 | import { App, Duration, Stack } from 'aws-cdk-lib'; 2 | import * as ecs from 'aws-cdk-lib/aws-ecs'; 3 | import * as sns from 'aws-cdk-lib/aws-sns'; 4 | import * as sqs from 'aws-cdk-lib/aws-sqs'; 5 | import { Container, Environment, InjecterExtension, InjectableTopic, QueueExtension, Service, ServiceDescription, TopicSubscription } from '../lib'; 6 | 7 | 8 | const app = new App(); 9 | const stack = new Stack(app, 'aws-ecs-integ'); 10 | 11 | const environment = new Environment(stack, 'production'); 12 | 13 | const pubServiceDescription = new ServiceDescription(); 14 | 15 | pubServiceDescription.add(new Container({ 16 | cpu: 256, 17 | memoryMiB: 512, 18 | trafficPort: 80, 19 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 20 | environment: { 21 | PORT: '80', 22 | }, 23 | })); 24 | 25 | const topic1 = new InjectableTopic({ 26 | topic: new sns.Topic(stack, 'sign-up'), 27 | }); 28 | 29 | const topic2 = new InjectableTopic({ 30 | topic: new sns.Topic(stack, 'delete'), 31 | }); 32 | 33 | pubServiceDescription.add(new InjecterExtension({ 34 | injectables: [topic1, topic2], 35 | })); 36 | 37 | new Service(stack, 'Publisher', { 38 | environment: environment, 39 | serviceDescription: pubServiceDescription, 40 | }); 41 | 42 | const subServiceDescription = new ServiceDescription(); 43 | 44 | subServiceDescription.add(new Container({ 45 | cpu: 256, 46 | memoryMiB: 512, 47 | trafficPort: 80, 48 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 49 | environment: { 50 | PORT: '80', 51 | }, 52 | })); 53 | 54 | const topicSubscription1 = new TopicSubscription({ 55 | topic: topic1.topic, 56 | topicSubscriptionQueue: { 57 | queue: new sqs.Queue(stack, 'sign-up-queue'), 58 | scaleOnLatency: { 59 | acceptableLatency: Duration.minutes(10), 60 | messageProcessingTime: Duration.seconds(20), 61 | }, 62 | }, 63 | }); 64 | const topicSubscription2 = new TopicSubscription({ 65 | topic: topic2.topic, 66 | }); 67 | 68 | subServiceDescription.add(new QueueExtension({ 69 | subscriptions: [topicSubscription1, topicSubscription2], 70 | scaleOnLatency: { 71 | acceptableLatency: Duration.minutes(5), 72 | messageProcessingTime: Duration.seconds(20), 73 | }, 74 | })); 75 | 76 | new Service(stack, 'Worker', { 77 | environment: environment, 78 | serviceDescription: subServiceDescription, 79 | autoScaleTaskCount: { 80 | maxTaskCount: 10, 81 | }, 82 | }); -------------------------------------------------------------------------------- /test/queue-handler/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM public.ecr.aws/lambda/python:latest 2 | 3 | ADD . /opt/lambda 4 | WORKDIR /opt/lambda 5 | 6 | RUN pip3 install boto3 7 | RUN python3 test_index.py 8 | 9 | ENTRYPOINT [ "/bin/bash" ] -------------------------------------------------------------------------------- /test/queue-handler/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | #--------------------------------------------------------------------------------------------------- 3 | # executes unit tests 4 | # 5 | # prepares a staging directory with the requirements 6 | set -e 7 | script_dir=$(cd $(dirname $0) && pwd) 8 | 9 | # prepare staging directory 10 | staging=$(mktemp -d) 11 | mkdir -p ${staging} 12 | cd ${staging} 13 | 14 | # copy src and overlay with test 15 | cp ${script_dir}/../../lambda/queue/queue_backlog_calculator.py $PWD 16 | cp ${script_dir}/test_index.py $PWD 17 | cp ${script_dir}/Dockerfile $PWD 18 | 19 | pip3 install boto3 20 | python3 test_index.py 21 | #docker build . -------------------------------------------------------------------------------- /test/queue-handler/test_index.py: -------------------------------------------------------------------------------- 1 | import json 2 | import io 3 | import unittest 4 | import unittest.mock as mock 5 | import time 6 | from botocore.exceptions import ClientError 7 | from queue_backlog_calculator import QueueHandler 8 | 9 | mock_time = time.time() 10 | 11 | class TestQueueAutoscaling(unittest.TestCase): 12 | maxDiff = None 13 | ''' 14 | Test for unexpected error. 15 | ''' 16 | def test_unexpected_error(self): 17 | ecs_client = mock.Mock() 18 | ecs_client.describe_services.side_effect = ClientError({'Error': {'Code': 'UnexpectedError'}}, 'DescribeServices') 19 | 20 | sqs_client = mock.Mock() 21 | environ = { 22 | 'CLUSTER_NAME': 'TEST_CLUSTER', 23 | 'SERVICE_NAME': 'TEST_SERVICE', 24 | 'NAMESPACE': 'TEST', 25 | 'QUEUE_NAMES': 'queue1' 26 | } 27 | 28 | queue_handler = QueueHandler(ecs_client, sqs_client, environ) 29 | queue_handler.emit() 30 | 31 | self.assertRaisesRegex(Exception, r'UnexpectedError') 32 | 33 | ''' 34 | Test for Exception when the service doesn't exist in cluster. 35 | ''' 36 | @mock.patch('sys.stdout', new_callable=io.StringIO) 37 | def test_no_services_in_cluster(self, _): 38 | ecs_client = mock.Mock() 39 | ecs_client.describe_services.return_value = {'services': []} 40 | 41 | sqs_client = mock.Mock() 42 | environ = { 43 | 'CLUSTER_NAME': 'TEST_CLUSTER', 44 | 'SERVICE_NAME': 'TEST_SERVICE', 45 | 'NAMESPACE': 'TEST', 46 | 'QUEUE_NAMES': 'queue1' 47 | } 48 | 49 | queue_handler = QueueHandler(ecs_client, sqs_client, environ) 50 | queue_handler.emit() 51 | 52 | self.assertRaisesRegex(Exception, r'There are no services with name {} in cluster: {}'.format(environ['SERVICE_NAME'], environ['CLUSTER_NAME'])) 53 | 54 | ''' 55 | Test 'backPerTask' value is equal to 'ApproximateNumberOfMessages' in the queue when no tasks are running. 56 | ''' 57 | @mock.patch('time.time', mock.MagicMock(return_value=mock_time)) 58 | @mock.patch('sys.stdout', new_callable=io.StringIO) 59 | def test_backlog_with_no_running_tasks(self, mock_stdout): 60 | ecs_client = mock.Mock() 61 | ecs_client.describe_services.return_value = {'services': [{'runningCount': 0}]} 62 | 63 | sqs_client = mock.Mock() 64 | sqs_client.get_queue_url.return_value = {'QueueUrl': 'queue1_url'} 65 | sqs_client.get_queue_attributes.return_value = {'Attributes': {'ApproximateNumberOfMessages':100}} 66 | environ = { 67 | 'CLUSTER_NAME': 'TEST_CLUSTER', 68 | 'SERVICE_NAME': 'TEST_SERVICE', 69 | 'NAMESPACE': 'TEST', 70 | 'QUEUE_NAMES': 'queue1' 71 | } 72 | 73 | queue_handler = QueueHandler(ecs_client, sqs_client, environ) 74 | queue_handler.emit() 75 | 76 | metric = json.dumps({ 77 | "_aws": { 78 | "Timestamp": int(mock_time*1000), 79 | "CloudWatchMetrics": [{ 80 | "Namespace": "TEST", 81 | "Dimensions": [["QueueName"]], 82 | "Metrics": [{"Name":"BacklogPerTask", "Unit": "Count"}] 83 | }], 84 | }, 85 | "QueueName": "queue1", 86 | "BacklogPerTask": 100, 87 | }) 88 | self.assertEqual(mock_stdout.getvalue(), metric+'\n') 89 | 90 | ''' 91 | Test 'backPerTask' metric is generated correctly for each queue. 92 | ''' 93 | @mock.patch('time.time', mock.MagicMock(return_value=mock_time)) 94 | @mock.patch('sys.stdout', new_callable=io.StringIO) 95 | def test_metric_generation_per_queue(self, mock_stdout): 96 | ecs_client = mock.Mock() 97 | ecs_client.describe_services.return_value = {'services': [{'runningCount': 2}]} 98 | 99 | val1 = { 100 | 'queue1': {'QueueUrl': 'queue1_url'}, 101 | 'queue2': {'QueueUrl': 'queue2_url'} 102 | } 103 | val2 = { 104 | 'queue1_url': {'Attributes': {'ApproximateNumberOfMessages':101}}, 105 | 'queue2_url': {'Attributes': {'ApproximateNumberOfMessages':200}} 106 | } 107 | 108 | sqs_client = mock.Mock() 109 | sqs_client.get_queue_url.side_effect = [val1['queue1'], val1['queue2']] 110 | sqs_client.get_queue_attributes.side_effect = [val2['queue1_url'], val2['queue2_url']] 111 | environ = { 112 | 'CLUSTER_NAME': 'TEST_CLUSTER', 113 | 'SERVICE_NAME': 'TEST_SERVICE', 114 | 'NAMESPACE': 'TEST', 115 | 'QUEUE_NAMES': 'queue1,queue2' 116 | } 117 | 118 | queue_handler = QueueHandler(ecs_client, sqs_client, environ) 119 | queue_handler.emit() 120 | 121 | metric1 = json.dumps({ 122 | "_aws": { 123 | "Timestamp": int(mock_time*1000), 124 | "CloudWatchMetrics": [{ 125 | "Namespace": "TEST", 126 | "Dimensions": [["QueueName"]], 127 | "Metrics": [{"Name":"BacklogPerTask", "Unit": "Count"}] 128 | }], 129 | }, 130 | "QueueName": "queue1", 131 | "BacklogPerTask": 51, 132 | }) 133 | metric2 = json.dumps({ 134 | "_aws": { 135 | "Timestamp": int(mock_time*1000), 136 | "CloudWatchMetrics": [{ 137 | "Namespace": "TEST", 138 | "Dimensions": [["QueueName"]], 139 | "Metrics": [{"Name":"BacklogPerTask", "Unit": "Count"}] 140 | }], 141 | }, 142 | "QueueName": "queue2", 143 | "BacklogPerTask": 100, 144 | }) 145 | 146 | self.assertEqual(mock_stdout.getvalue(), metric1+'\n'+metric2+'\n') 147 | 148 | if __name__ == "__main__": 149 | unittest.main() -------------------------------------------------------------------------------- /test/queue.lambda.test.ts: -------------------------------------------------------------------------------- 1 | import { spawnSync } from 'child_process'; 2 | import * as path from 'path'; 3 | 4 | test('queue handler', () => { 5 | const testScript = path.join(__dirname, 'queue-handler', 'test.sh'); 6 | const result = spawnSync(testScript, { stdio: 'inherit' }); 7 | expect(result.status).toBe(0); 8 | }); 9 | -------------------------------------------------------------------------------- /test/scale-on-cpu-utilization.test.ts: -------------------------------------------------------------------------------- 1 | import { Duration, Stack } from 'aws-cdk-lib'; 2 | import { Template } from 'aws-cdk-lib/assertions'; 3 | import * as ecs from 'aws-cdk-lib/aws-ecs'; 4 | import { Container, Environment, ScaleOnCpuUtilization, Service, ServiceDescription } from '../lib'; 5 | 6 | describe('scale on cpu utilization', () => { 7 | test('scale on cpu utilization extension with no parameters should create a default autoscaling setup', () => { 8 | // GIVEN 9 | const stack = new Stack(); 10 | 11 | // WHEN 12 | const environment = new Environment(stack, 'production'); 13 | const serviceDescription = new ServiceDescription(); 14 | 15 | serviceDescription.add(new Container({ 16 | cpu: 256, 17 | memoryMiB: 512, 18 | trafficPort: 80, 19 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 20 | })); 21 | 22 | serviceDescription.add(new ScaleOnCpuUtilization()); 23 | 24 | new Service(stack, 'my-service', { 25 | environment, 26 | serviceDescription, 27 | }); 28 | 29 | // THEN 30 | Template.fromStack(stack).hasResourceProperties('AWS::ECS::Service', { 31 | DeploymentConfiguration: { 32 | MaximumPercent: 200, 33 | MinimumHealthyPercent: 100, 34 | }, 35 | DesiredCount: 2, 36 | }); 37 | 38 | Template.fromStack(stack).hasResourceProperties('AWS::ApplicationAutoScaling::ScalableTarget', { 39 | MaxCapacity: 8, 40 | MinCapacity: 2, 41 | ResourceId: { 42 | 'Fn::Join': [ 43 | '', 44 | [ 45 | 'service/', 46 | { 47 | Ref: 'productionenvironmentclusterC6599D2D', 48 | }, 49 | '/', 50 | { 51 | 'Fn::GetAtt': [ 52 | 'myserviceserviceServiceE9A5732D', 53 | 'Name', 54 | ], 55 | }, 56 | ], 57 | ], 58 | }, 59 | RoleARN: { 60 | 'Fn::Join': [ 61 | '', 62 | [ 63 | 'arn:', 64 | { 65 | Ref: 'AWS::Partition', 66 | }, 67 | ':iam::', 68 | { 69 | Ref: 'AWS::AccountId', 70 | }, 71 | ':role/aws-service-role/ecs.application-autoscaling.amazonaws.com/AWSServiceRoleForApplicationAutoScaling_ECSService', 72 | ], 73 | ], 74 | }, 75 | ScalableDimension: 'ecs:service:DesiredCount', 76 | ServiceNamespace: 'ecs', 77 | }); 78 | 79 | Template.fromStack(stack).hasResourceProperties('AWS::ApplicationAutoScaling::ScalingPolicy', { 80 | PolicyName: 'myserviceserviceTaskCountTargetmyservicetargetcpuutilization50E6628660', 81 | PolicyType: 'TargetTrackingScaling', 82 | ScalingTargetId: { 83 | Ref: 'myserviceserviceTaskCountTarget4268918D', 84 | }, 85 | TargetTrackingScalingPolicyConfiguration: { 86 | PredefinedMetricSpecification: { 87 | PredefinedMetricType: 'ECSServiceAverageCPUUtilization', 88 | }, 89 | ScaleInCooldown: 60, 90 | ScaleOutCooldown: 60, 91 | TargetValue: 50, 92 | }, 93 | }); 94 | }); 95 | 96 | test('should be able to set a custom scaling policy as well', () => { 97 | // GIVEN 98 | const stack = new Stack(); 99 | 100 | // WHEN 101 | const environment = new Environment(stack, 'production'); 102 | const serviceDescription = new ServiceDescription(); 103 | 104 | serviceDescription.add(new Container({ 105 | cpu: 256, 106 | memoryMiB: 512, 107 | trafficPort: 80, 108 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 109 | })); 110 | 111 | serviceDescription.add(new ScaleOnCpuUtilization({ 112 | initialTaskCount: 25, 113 | minTaskCount: 15, 114 | maxTaskCount: 30, 115 | targetCpuUtilization: 75, 116 | scaleInCooldown: Duration.minutes(3), 117 | scaleOutCooldown: Duration.minutes(3), 118 | })); 119 | 120 | new Service(stack, 'my-service', { 121 | environment, 122 | serviceDescription, 123 | }); 124 | 125 | // THEN 126 | Template.fromStack(stack).hasResourceProperties('AWS::ECS::Service', { 127 | DeploymentConfiguration: { 128 | MaximumPercent: 200, 129 | MinimumHealthyPercent: 100, 130 | }, 131 | DesiredCount: 25, 132 | }); 133 | 134 | Template.fromStack(stack).hasResourceProperties('AWS::ApplicationAutoScaling::ScalableTarget', { 135 | MaxCapacity: 30, 136 | MinCapacity: 15, 137 | }); 138 | 139 | Template.fromStack(stack).hasResourceProperties('AWS::ApplicationAutoScaling::ScalingPolicy', { 140 | TargetTrackingScalingPolicyConfiguration: { 141 | ScaleInCooldown: 180, 142 | ScaleOutCooldown: 180, 143 | TargetValue: 75, 144 | }, 145 | }); 146 | }); 147 | 148 | test('should error if configuring autoscaling target both in the extension and the Service', () => { 149 | // GIVEN 150 | const stack = new Stack(); 151 | 152 | // WHEN 153 | const environment = new Environment(stack, 'production'); 154 | const serviceDescription = new ServiceDescription(); 155 | 156 | serviceDescription.add(new Container({ 157 | cpu: 256, 158 | memoryMiB: 512, 159 | trafficPort: 80, 160 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 161 | })); 162 | 163 | serviceDescription.add(new ScaleOnCpuUtilization()); 164 | // THEN 165 | expect(() => { 166 | new Service(stack, 'my-service', { 167 | environment, 168 | serviceDescription, 169 | autoScaleTaskCount: { 170 | maxTaskCount: 5, 171 | }, 172 | }); 173 | }).toThrow('Cannot specify \'autoScaleTaskCount\' in the Service construct and also provide a \'ScaleOnCpuUtilization\' extension. \'ScaleOnCpuUtilization\' is deprecated. Please only provide \'autoScaleTaskCount\'.'); 174 | }); 175 | }); -------------------------------------------------------------------------------- /test/service.test.ts: -------------------------------------------------------------------------------- 1 | import { Stack } from 'aws-cdk-lib'; 2 | import { Match, Template } from 'aws-cdk-lib/assertions'; 3 | import * as ecs from 'aws-cdk-lib/aws-ecs'; 4 | import { Container, Environment, Service, ServiceDescription } from '../lib'; 5 | 6 | describe('service', () => { 7 | test('should error if a service is prepared with no addons', () => { 8 | // GIVEN 9 | const stack = new Stack(); 10 | 11 | // WHEN 12 | const environment = new Environment(stack, 'production'); 13 | const serviceDescription = new ServiceDescription(); 14 | 15 | // THEN 16 | expect(() => { 17 | new Service(stack, 'my-service', { 18 | environment, 19 | serviceDescription, 20 | }); 21 | }).toThrow(/Service 'my-service' must have a Container extension/); 22 | }); 23 | 24 | test('allows scaling on a target CPU utilization', () => { 25 | // GIVEN 26 | const stack = new Stack(); 27 | 28 | // WHEN 29 | const environment = new Environment(stack, 'production'); 30 | const serviceDescription = new ServiceDescription(); 31 | serviceDescription.add(new Container({ 32 | cpu: 256, 33 | memoryMiB: 512, 34 | trafficPort: 80, 35 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 36 | })); 37 | 38 | new Service(stack, 'my-service', { 39 | environment, 40 | serviceDescription, 41 | desiredCount: 3, 42 | autoScaleTaskCount: { 43 | maxTaskCount: 5, 44 | targetCpuUtilization: 70, 45 | }, 46 | }); 47 | 48 | // THEN 49 | Template.fromStack(stack).hasResourceProperties('AWS::ECS::Service', { 50 | DesiredCount: Match.absent(), 51 | }); 52 | 53 | Template.fromStack(stack).hasResourceProperties('AWS::ApplicationAutoScaling::ScalableTarget', { 54 | MaxCapacity: 5, 55 | MinCapacity: 1, 56 | }); 57 | 58 | Template.fromStack(stack).hasResourceProperties('AWS::ApplicationAutoScaling::ScalingPolicy', { 59 | PolicyType: 'TargetTrackingScaling', 60 | TargetTrackingScalingPolicyConfiguration: { 61 | PredefinedMetricSpecification: { PredefinedMetricType: 'ECSServiceAverageCPUUtilization' }, 62 | TargetValue: 70, 63 | }, 64 | }); 65 | }); 66 | 67 | test('allows scaling on a target memory utilization', () => { 68 | // GIVEN 69 | const stack = new Stack(); 70 | 71 | // WHEN 72 | const environment = new Environment(stack, 'production'); 73 | const serviceDescription = new ServiceDescription(); 74 | serviceDescription.add(new Container({ 75 | cpu: 256, 76 | memoryMiB: 512, 77 | trafficPort: 80, 78 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 79 | })); 80 | 81 | new Service(stack, 'my-service', { 82 | environment, 83 | serviceDescription, 84 | desiredCount: 3, 85 | autoScaleTaskCount: { 86 | maxTaskCount: 5, 87 | targetMemoryUtilization: 70, 88 | }, 89 | }); 90 | 91 | // THEN 92 | Template.fromStack(stack).hasResourceProperties('AWS::ECS::Service', { 93 | DesiredCount: Match.absent(), 94 | }); 95 | 96 | Template.fromStack(stack).hasResourceProperties('AWS::ApplicationAutoScaling::ScalableTarget', { 97 | MaxCapacity: 5, 98 | MinCapacity: 1, 99 | }); 100 | 101 | Template.fromStack(stack).hasResourceProperties('AWS::ApplicationAutoScaling::ScalingPolicy', { 102 | PolicyType: 'TargetTrackingScaling', 103 | TargetTrackingScalingPolicyConfiguration: { 104 | PredefinedMetricSpecification: { PredefinedMetricType: 'ECSServiceAverageMemoryUtilization' }, 105 | TargetValue: 70, 106 | }, 107 | }); 108 | }); 109 | 110 | test('should error when no auto scaling policies have been configured after creating the auto scaling target', () => { 111 | // GIVEN 112 | const stack = new Stack(); 113 | 114 | // WHEN 115 | const environment = new Environment(stack, 'production'); 116 | const serviceDescription = new ServiceDescription(); 117 | 118 | serviceDescription.add(new Container({ 119 | cpu: 256, 120 | memoryMiB: 512, 121 | trafficPort: 80, 122 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 123 | })); 124 | 125 | // THEN 126 | expect(() => { 127 | new Service(stack, 'my-service', { 128 | environment, 129 | serviceDescription, 130 | autoScaleTaskCount: { 131 | maxTaskCount: 5, 132 | }, 133 | }); 134 | }).toThrow(/The auto scaling target for the service 'my-service' has been created but no auto scaling policies have been configured./); 135 | }); 136 | }); -------------------------------------------------------------------------------- /test/xray.test.ts: -------------------------------------------------------------------------------- 1 | import { Stack } from 'aws-cdk-lib'; 2 | import { Template } from 'aws-cdk-lib/assertions'; 3 | import * as ecs from 'aws-cdk-lib/aws-ecs'; 4 | import { Container, Environment, XRayExtension, Service, ServiceDescription } from '../lib'; 5 | 6 | describe('xray', () => { 7 | test('should be able to add AWS X-Ray to a service', () => { 8 | // GIVEN 9 | const stack = new Stack(); 10 | 11 | // WHEN 12 | const environment = new Environment(stack, 'production'); 13 | const serviceDescription = new ServiceDescription(); 14 | 15 | serviceDescription.add(new Container({ 16 | cpu: 256, 17 | memoryMiB: 512, 18 | trafficPort: 80, 19 | image: ecs.ContainerImage.fromRegistry('nathanpeck/name'), 20 | })); 21 | 22 | serviceDescription.add(new XRayExtension()); 23 | 24 | new Service(stack, 'my-service', { 25 | environment, 26 | serviceDescription, 27 | }); 28 | 29 | // THEN 30 | Template.fromStack(stack).hasResourceProperties('AWS::ECS::TaskDefinition', { 31 | ContainerDefinitions: [ 32 | { 33 | Cpu: 256, 34 | DependsOn: [ 35 | { 36 | Condition: 'HEALTHY', 37 | ContainerName: 'xray', 38 | }, 39 | ], 40 | Essential: true, 41 | Image: 'nathanpeck/name', 42 | Memory: 512, 43 | Name: 'app', 44 | PortMappings: [ 45 | { 46 | ContainerPort: 80, 47 | Protocol: 'tcp', 48 | }, 49 | ], 50 | Ulimits: [ 51 | { 52 | HardLimit: 1024000, 53 | Name: 'nofile', 54 | SoftLimit: 1024000, 55 | }, 56 | ], 57 | }, 58 | { 59 | Environment: [ 60 | { 61 | Name: 'AWS_REGION', 62 | Value: { 63 | Ref: 'AWS::Region', 64 | }, 65 | }, 66 | ], 67 | Essential: true, 68 | HealthCheck: { 69 | Command: [ 70 | 'CMD-SHELL', 71 | 'curl -s http://localhost:2000', 72 | ], 73 | Interval: 5, 74 | Retries: 3, 75 | StartPeriod: 10, 76 | Timeout: 2, 77 | }, 78 | Image: 'amazon/aws-xray-daemon:latest', 79 | LogConfiguration: { 80 | LogDriver: 'awslogs', 81 | Options: { 82 | 'awslogs-group': { 83 | Ref: 'myservicetaskdefinitionxrayLogGroupC0252525', 84 | }, 85 | 'awslogs-stream-prefix': 'xray', 86 | 'awslogs-region': { 87 | Ref: 'AWS::Region', 88 | }, 89 | }, 90 | }, 91 | MemoryReservation: 256, 92 | Name: 'xray', 93 | User: '1337', 94 | }, 95 | ], 96 | Cpu: '256', 97 | ExecutionRoleArn: { 98 | 'Fn::GetAtt': [ 99 | 'myservicetaskdefinitionExecutionRole0CE74AD0', 100 | 'Arn', 101 | ], 102 | }, 103 | Family: 'myservicetaskdefinition', 104 | Memory: '512', 105 | NetworkMode: 'awsvpc', 106 | RequiresCompatibilities: [ 107 | 'EC2', 108 | 'FARGATE', 109 | ], 110 | TaskRoleArn: { 111 | 'Fn::GetAtt': [ 112 | 'myservicetaskdefinitionTaskRole92ACD903', 113 | 'Arn', 114 | ], 115 | }, 116 | }); 117 | }); 118 | }); -------------------------------------------------------------------------------- /tsconfig.dev.json: -------------------------------------------------------------------------------- 1 | // ~~ Generated by projen. To modify, edit .projenrc.ts and run "npx projen". 2 | { 3 | "compilerOptions": { 4 | "alwaysStrict": true, 5 | "declaration": true, 6 | "esModuleInterop": true, 7 | "experimentalDecorators": true, 8 | "inlineSourceMap": true, 9 | "inlineSources": true, 10 | "lib": [ 11 | "es2020" 12 | ], 13 | "module": "CommonJS", 14 | "noEmitOnError": false, 15 | "noFallthroughCasesInSwitch": true, 16 | "noImplicitAny": true, 17 | "noImplicitReturns": true, 18 | "noImplicitThis": true, 19 | "noUnusedLocals": true, 20 | "noUnusedParameters": true, 21 | "resolveJsonModule": true, 22 | "strict": true, 23 | "strictNullChecks": true, 24 | "strictPropertyInitialization": true, 25 | "stripInternal": true, 26 | "target": "ES2020" 27 | }, 28 | "include": [ 29 | "src/**/*.ts", 30 | "test/**/*.ts", 31 | ".projenrc.ts", 32 | "projenrc/**/*.ts" 33 | ], 34 | "exclude": [ 35 | "node_modules" 36 | ] 37 | } 38 | --------------------------------------------------------------------------------