├── .DS_Store
├── .github
├── ISSUE_TEMPLATE
│ ├── committee-suggestion.yml
│ ├── other.md
│ └── tracking-issue.md
├── PULL_REQUEST_TEMPLATE.md
└── workflows
│ ├── pr-lint.yml
│ └── render-table.yml
├── .gitignore
├── .markdownlint.json
├── .mergify.yml
├── .prettierrc.yaml
├── 0000-template.md
├── ACCEPTED.md
├── CLOSED.md
├── CODE_OF_CONDUCT.md
├── FULL_INDEX.md
├── LICENSE
├── NOTICE
├── PROPOSED.md
├── README.md
├── images
├── 0300
│ ├── cli-lib-alpha-usage.png
│ ├── iohost.png
│ ├── today.png
│ └── tomorrow.png
├── 0693
│ └── tree_traversal.drawio.png
├── AspectsDiagram1.png
├── EventBridge-Scheduler-2023-03-05-1723.excalidraw.png
├── GitHubArtifacts.png
├── MaintenancePolicy.png
├── PipelineRollback.png
├── ProposedAspectInvocationOrder.png
├── bedrockKnowledgebase.png
├── cdk-construct-hub
│ ├── architecture-diagram.drawio
│ ├── architecture-diagram.png
│ ├── detail-page.png
│ └── landing-page.png
├── garbagecollection.png
└── lifecycle.png
├── lint.sh
├── render-table.sh
├── text
├── 0001-cdk-update.md
├── 0006-monolothic-packaging.md
├── 00110-cli-framework-compatibility-strategy.md
├── 0039-reduce-module-size.md
├── 0049-continuous-delivery.md
├── 0052-resource-importing-support.md
├── 0055-feature-flags.md
├── 0063-precreated-roles.md
├── 0064-asset-garbage-collection.md
├── 0077-import-external-resources.md
├── 0079-cdk-2.0.md
├── 0092-asset-publishing.md
├── 0095-cognito-construct-library.md
├── 0095-cognito-construct-library
│ └── working-backwards-readme.md
├── 0107-construct-library-module-lifecycle.md
├── 0162-refactoring-support.md
├── 0171-cloudfront-redesign.md
├── 0192-remove-constructs-compat.md
├── 0249-v2-experiments.expired.md
├── 0249-v2-experiments.md
├── 0253-cdk-metadata-v2.md
├── 0264-registry-schema-codegen.md
├── 0300-programmatic-toolkit.md
├── 0308-cli-advisories.md
├── 0322-cdk-pipelines-updated-api.md
├── 0324-cdk-construct-hub.md
├── 0328-polyglot-assert.md
├── 0340-firehose-l2.md
├── 0374-jsii-ts-version.md
├── 0431-sagemaker-l2-endpoint.md
├── 0436-gamelift-l2.md
├── 0456-elasticache-l2.md
├── 0473-eventbridge-pipes.md
├── 0474-event-bridge-scheduler-l2.md
├── 0477-policy-validation.md
├── 0485-aws-batch.md
├── 0497-glue-l2-construct.md
├── 0499-appconfig-constructs.md
├── 0502_aws-vpclattice.md
├── 0507-subnets.md
├── 0507-subnets
│ └── alternative-working-backwards.md
├── 0510-dynamodb-global-table.md
├── 0513-app-specific-staging.md
├── 0605-eks-rewrite.md
├── 0617-cloudfront-oac-l2.md
├── 0648-aspects-priority-ordering.md
├── 0670-aws-applicationsignals-enablement-l2.md
├── 0673-aws-applicationsignals-slo-l2.md
├── 0686-bedrock-l2-construct.md
├── 0693-property-injection.md
├── 0710-node-deprecation-strategy.md
├── 204-golang-bindings.md
├── 287-cli-deprecation-warnings.md
├── 353-cfn-registry-constructs.md
└── 359-construct-hub-deny-list.md
└── tools
├── linters
├── .gitignore
├── package-lock.json
└── package.json
└── rfc-render
├── .gitignore
├── fetch-issues.js
├── inject-table.js
├── package-lock.json
├── package.json
├── render-rfc-table.js
└── status.js
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws/aws-cdk-rfcs/386690e1d1cea13e9fc436bd80660ab899a192ee/.DS_Store
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/committee-suggestion.yml:
--------------------------------------------------------------------------------
1 | name: Working Groups Suggestion
2 | description: Suggest a new sub working groupfor the CDK Contributor Council
3 | title: "[Working Group Suggestion]: "
4 | labels: ["working-group-suggestion", "needs-review"]
5 | body:
6 | - type: markdown
7 | attributes:
8 | value: |
9 | Thanks for taking the time to suggest a new working group! Please fill out this form with as much detail as possible.
10 |
11 | - type: input
12 | id: working-group-name
13 | attributes:
14 | label: Working Group Name
15 | description: Provide a clear and concise name for the proposed working group
16 | placeholder: "e.g., Documentation Standards working group"
17 | validations:
18 | required: true
19 |
20 | - type: textarea
21 | id: purpose-mission
22 | attributes:
23 | label: Purpose and Mission
24 | description: Describe the main purpose and mission of this working group. What problems will it solve?
25 | placeholder: |
26 | Please include:
27 | - Main objectives
28 | - Key responsibilities
29 | - How it aligns with the project's goals
30 | validations:
31 | required: true
32 |
33 | - type: textarea
34 | id: expected-outcomes
35 | attributes:
36 | label: Expected Outcomes
37 | description: What are the specific outcomes and deliverables expected from this working group?
38 | placeholder: |
39 | Example outcomes:
40 | - Standardized documentation templates
41 | - Monthly progress reports
42 | - Specific improvements or changes
43 | validations:
44 | required: true
45 |
46 | - type: textarea
47 | id: resource-requirements
48 | attributes:
49 | label: Resource Requirements
50 | description: What resources would this working group need to function effectively?
51 | placeholder: |
52 | Consider:
53 | - Number of working group members
54 | - Time commitments
55 | - Tools or infrastructure
56 | - Other resources
57 | validations:
58 | required: true
59 |
60 | - type: textarea
61 | id: additional-information
62 | attributes:
63 | label: Additional Information
64 | description: Any other relevant information about the proposed working group
65 | placeholder: "Add any other context or details about your suggestion here"
66 | validations:
67 | required: false
68 |
69 | - type: checkboxes
70 | id: terms
71 | attributes:
72 | label: Code of Conduct
73 | description: By submitting this suggestion, you agree to follow our project's Code of Conduct
74 | options:
75 | - label: I agree to follow this project's Code of Conduct
76 | required: true
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/other.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: "Other Issues"
3 | about: "Anything else regarding this repository"
4 | title: issue title
5 | ---
6 |
7 | ## Description
8 |
9 | A description of the issue, and a proposed resolution.
10 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/tracking-issue.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: "RFC Tracking Issue"
3 | about: "Tracking issue for an RFC"
4 | title: proposal title
5 | labels: status/proposed
6 | ---
7 |
8 | ## Description
9 |
10 | Short description of the proposed feature.
11 |
12 | ## Roles
13 |
14 | | Role | User
15 | |---------------------|------------------------------
16 | | Proposed by | @alias
17 | | Author(s) | @alias, @alias, @alias
18 | | API Bar Raiser | @alias
19 | | Stakeholders | @alias, @alias, @alias
20 |
21 | > See [RFC Process](https://github.com/aws/aws-cdk-rfcs#rfc-process) for details
22 |
23 | ## Workflow
24 |
25 | - [x] Tracking issue created (label: `status/proposed`)
26 | - [ ] API bar raiser assigned (ping us at [#aws-cdk-rfcs](https://cdk-dev.slack.com/archives/C025ZFGMUCD) if needed)
27 | - [ ] Kick off meeting
28 | - [ ] RFC pull request submitted (label: `status/review`)
29 | - [ ] Community reach out (via Slack and/or Twitter)
30 | - [ ] API signed-off (label `status/api-approved` applied to pull request)
31 | - [ ] Final comments period (label: `status/final-comments-period`)
32 | - [ ] Approved and merged (label: `status/approved`)
33 | - [ ] Execution plan submitted (label: `status/planning`)
34 | - [ ] Plan approved and merged (label: `status/implementing`)
35 | - [ ] Implementation complete (label: `status/done`)
36 |
37 | ---
38 |
39 | > Author is responsible to progress the RFC according to this checklist, and
40 | apply the relevant labels to this issue so that the RFC table in README gets
41 | updated.
42 |
--------------------------------------------------------------------------------
/.github/PULL_REQUEST_TEMPLATE.md:
--------------------------------------------------------------------------------
1 | This is a request for comments about {RFC_DESCRIPTION}. See #{TRACKING_ISSUE} for
2 | additional details.
3 |
4 | APIs are signed off by @{BAR_RAISER}.
5 |
6 | ---
7 |
8 | _By submitting this pull request, I confirm that my contribution is made under
9 | the terms of the Apache-2.0 license_
10 |
11 |
--------------------------------------------------------------------------------
/.github/workflows/pr-lint.yml:
--------------------------------------------------------------------------------
1 | name: PR Lint
2 |
3 | on:
4 | pull_request_target: {}
5 | workflow_dispatch: {}
6 |
7 | permissions:
8 | contents: read
9 |
10 | jobs:
11 | markdownlint:
12 | runs-on: ubuntu-latest
13 | steps:
14 | # Checkout of the source to be validated
15 | - uses: actions/checkout@v4
16 | with:
17 | ref: "${{ github.event.pull_request.merge_commit_sha }}"
18 | path: checkout_pr
19 |
20 | # Checkout of tools from 'main'; necessary to avoid compromise of GitHub token
21 | # (due to pull_request_target)
22 | - uses: actions/checkout@v4
23 | with:
24 | ref: main
25 | path: checkout_main
26 |
27 | - uses: actions/setup-node@v4
28 | with:
29 | node-version: lts/*
30 |
31 | # Run linter from 'main' on the working copy
32 | - name: run linter
33 | run: |
34 | cd checkout_pr
35 | ../checkout_main/lint.sh
36 |
--------------------------------------------------------------------------------
/.github/workflows/render-table.yml:
--------------------------------------------------------------------------------
1 | name: Render RFC Table
2 | on:
3 | issues:
4 | types:
5 | [
6 | opened,
7 | edited,
8 | deleted,
9 | transferred,
10 | assigned,
11 | unassigned,
12 | labeled,
13 | unlabeled,
14 | ]
15 | workflow_dispatch: {}
16 |
17 | jobs:
18 | render:
19 | runs-on: ubuntu-latest
20 | concurrency:
21 | group: update-table
22 | cancel-in-progress: true
23 | permissions:
24 | pull-requests: write
25 | steps:
26 | - uses: actions/checkout@v4
27 | - uses: actions/setup-node@v4
28 | with:
29 | node-version: lts/*
30 | - name: install dependencies
31 | run: npm --prefix tools/rfc-render ci
32 | - name: render tables
33 | env:
34 | PROJEN_GITHUB_TOKEN: ${{ secrets.PROJEN_GITHUB_TOKEN }}
35 | run: npm --prefix tools/rfc-render run render:all
36 | - name: Create Pull Request
37 | id: pr
38 | uses: peter-evans/create-pull-request@v5
39 | with:
40 | token: ${{ secrets.PROJEN_GITHUB_TOKEN }}
41 | commit-message: Update RFC table in README
42 | title: Update RFC table in README
43 | branch: auto/update-rfc-table
44 | labels: auto-approve
45 | - name: Auto approve PR
46 | if: ${{ steps.pr.outputs.pull-request-number }}
47 | uses: hmarr/auto-approve-action@v3
48 | with:
49 | pull-request-number: ${{ steps.pr.outputs.pull-request-number }}
50 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # VSCode Local history extension
2 | .history/
3 | *.swp
--------------------------------------------------------------------------------
/.markdownlint.json:
--------------------------------------------------------------------------------
1 | {
2 | "blank_line": false,
3 | "commands-show-output": false,
4 | "fenced-code-language": false,
5 | "first-line-h1": false,
6 | "MD013": {
7 | "line_length": 150,
8 | "tables": false
9 | },
10 | "MD034": false,
11 | "MD046": { "style": "fenced" },
12 | "MD049": false,
13 | "MD050": false,
14 | "no-duplicate-heading": {
15 | "siblings_only": true
16 | },
17 | "no-inline-html": {
18 | "allowed_elements": [
19 | "br",
20 | "details",
21 | "span",
22 | "summary"
23 | ]
24 | },
25 | "no-trailing-punctuation": {
26 | "punctuation": ".,;:!。,;:!"
27 | },
28 | "ol-prefix": false,
29 | "single-h1": false,
30 | "ul-style": false
31 | }
32 |
--------------------------------------------------------------------------------
/.mergify.yml:
--------------------------------------------------------------------------------
1 |
2 | # See https://doc.mergify.io
3 |
4 | queue_rules:
5 | - name: default
6 | conditions:
7 | - -title~=(WIP|wip)
8 | - -label~=(blocked|do-not-merge)
9 | - -merged
10 | - -closed
11 | - "#approved-reviews-by>=1"
12 | - -approved-reviews-by~=author
13 | - "#changes-requested-reviews-by=0"
14 | - "#commented-reviews-by=0"
15 | merge_method: squash
16 | commit_message_template: |-
17 | {{ title }} (#{{ number }})
18 | {{ body }}
19 |
20 | pull_request_rules:
21 | - name: automatic merge
22 | actions:
23 | comment:
24 | message: Thank you for contributing! Your pull request is now being automatically merged.
25 | queue:
26 | name: default
27 | delete_head_branch: {}
28 | dismiss_reviews: {}
29 | conditions:
30 | - -title~=(WIP|wip)
31 | - -label~=(blocked|do-not-merge)
32 | - -merged
33 | - -closed
34 | - "#approved-reviews-by>=1"
35 | - -approved-reviews-by~=author
36 | - "#changes-requested-reviews-by=0"
37 | - "#commented-reviews-by=0"
38 |
--------------------------------------------------------------------------------
/.prettierrc.yaml:
--------------------------------------------------------------------------------
1 | printWidth: 80
2 | proseWrap: always
3 | semi: true
4 | singleQuote: true
5 | quoteProps: as-needed
6 | trailingComma: all
7 |
--------------------------------------------------------------------------------
/0000-template.md:
--------------------------------------------------------------------------------
1 | # {RFC_TITLE}
2 |
3 | * **Original Author(s):**: @{AUTHOR}
4 | * **Tracking Issue**: #{TRACKING_ISSUE}
5 | * **API Bar Raiser**: @{BAR_RAISER_USER}
6 |
7 | > Write one sentence which is a brief description of the feature. It should describe:
8 | >
9 | > * What is the user pain we are solving?
10 | > * How does it impact users?
11 |
12 | ## Working Backwards
13 |
14 | > This section should contain one or more "artifacts from the future", as if the
15 | > feature was already released and we are publishing its CHANGELOG, README,
16 | > CONTRIBUTING.md and optionally a PRESS RELEASE. This is the most important
17 | > section of your RFC. It's a powerful thought exercise which will challenge you
18 | > to truly think about this feature from a user's point of view.
19 | >
20 | > Choose *one or more* of the options below:
21 | >
22 | > * **CHANGELOG**: Write the changelog entry for this feature in conventional
23 | > form (e.g. `feat(eks): cluster tags`). If this change includes a breaking
24 | > change, include a `BREAKING CHANGE` clause with information on how to
25 | > migrate. If migration is complicated, refer to a fictional GitHub issue and
26 | > add its contents here.
27 | >
28 | > * **README**: If this is a new feature, write the README section which
29 | > describes this new feature. It should describe the feature and walk users
30 | > through usage examples and description of the various options and behavior.
31 | >
32 | > * **PRESS RELEASE**: If this is a major feature (~6 months of work), write the
33 | > press release which announces this feature. The press release is a single
34 | > page that includes 7 paragraphs: (1) summary, (2) problem, (3) solution, (4)
35 | > leader quote, (5) user experience, (6) customer testimonial and (7) one
36 | > sentence call to action.
37 |
38 | ---
39 |
40 | Ticking the box below indicates that the public API of this RFC has been
41 | signed-off by the API bar raiser (the `status/api-approved` label was applied to the
42 | RFC pull request):
43 |
44 | ```
45 | [ ] Signed-off by API Bar Raiser @xxxxx
46 | ```
47 |
48 | ## Public FAQ
49 |
50 | > This section should include answers to questions readers will likely ask about
51 | > this release. Similar to the "working backwards", this section should be
52 | > written in a language as if the feature is now released.
53 | >
54 | > The template includes a some common questions, feel free to add any questions
55 | > that might be relevant to this feature or omit questions that you feel are not
56 | > applicable.
57 |
58 | ### What are we launching today?
59 |
60 | > What exactly are we launching? Is this a new feature in an existing module? A
61 | > new module? A whole framework? A change in the CLI?
62 |
63 | ### Why should I use this feature?
64 |
65 | > Describe use cases that are addressed by this feature.
66 |
67 | ## Internal FAQ
68 |
69 | > The goal of this section is to help decide if this RFC should be implemented.
70 | > It should include answers to questions that the team is likely ask. Contrary
71 | > to the rest of the RFC, answers should be written "from the present" and
72 | > likely discuss design approach, implementation plans, alternative considered
73 | > and other considerations that will help decide if this RFC should be
74 | > implemented.
75 |
76 | ### Why are we doing this?
77 |
78 | > What is the motivation for this change?
79 |
80 | ### Why should we _not_ do this?
81 |
82 | > Is there a way to address this use case with the current product? What are the
83 | > downsides of implementing this feature?
84 |
85 | ### What is the technical solution (design) of this feature?
86 |
87 | > Briefly describe the high-level design approach for implementing this feature.
88 | >
89 | > As appropriate, you can add an appendix with a more detailed design document.
90 | >
91 | > This is a good place to reference a prototype or proof of concept, which is
92 | > highly recommended for most RFCs.
93 |
94 | ### Is this a breaking change?
95 |
96 | > If the answer is no. Otherwise:
97 | >
98 | > Describe what ways did you consider to deliver this without breaking users?
99 | >
100 | > Make sure to include a `BREAKING CHANGE` clause under the CHANGELOG section with a description of the breaking
101 | > changes and the migration path.
102 |
103 | ### What alternative solutions did you consider?
104 |
105 | > Briefly describe alternative approaches that you considered. If there are
106 | > hairy details, include them in an appendix.
107 |
108 | ### What are the drawbacks of this solution?
109 |
110 | > Describe any problems/risks that can be introduced if we implement this RFC.
111 |
112 | ### What is the high-level project plan?
113 |
114 | > Describe your plan on how to deliver this feature from prototyping to GA.
115 | > Especially think about how to "bake" it in the open and get constant feedback
116 | > from users before you stabilize the APIs.
117 | >
118 | > If you have a project board with your implementation plan, this is a good
119 | > place to link to it.
120 |
121 | ### Are there any open issues that need to be addressed later?
122 |
123 | > Describe any major open issues that this RFC did not take into account. Once
124 | > the RFC is approved, create GitHub issues for these issues and update this RFC
125 | > of the project board with these issue IDs.
126 |
127 | ## Appendix
128 |
129 | Feel free to add any number of appendices as you see fit. Appendices are
130 | expected to allow readers to dive deeper to certain sections if they like. For
131 | example, you can include an appendix which describes the detailed design of an
132 | algorithm and reference it from the FAQ.
133 |
--------------------------------------------------------------------------------
/ACCEPTED.md:
--------------------------------------------------------------------------------
1 | # Accepted RFCs
2 |
3 | **Jump to**:
4 | [Full list](./FULL_INDEX.md) |
5 | [Accepted](./ACCEPTED.md) |
6 | [Proposed](./PROPOSED.md) |
7 | [Closed](./CLOSED.md)
8 |
9 |
10 | \#|Title|Owner|Status
11 | ---|-----|-----|------
12 | [502](https://github.com/aws/aws-cdk-rfcs/issues/502)|[Amazon VPC Lattice L2 Construct](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0502_aws-vpclattice.md)|[@TheRealAmazonKendra](https://github.com/TheRealAmazonKendra)|👍 approved
13 | [162](https://github.com/aws/aws-cdk-rfcs/issues/162)|[CDK Refactoring Tools](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0162-refactoring-support.md)|[@otaviomacedo](https://github.com/otaviomacedo)|👷 implementing
14 | [670](https://github.com/aws/aws-cdk-rfcs/issues/670)|[AWS CloudWatch Application Signals L2 Constructs](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0670-aws-applicationsignals-enablement-l2.md)||👷 implementing
15 | [673](https://github.com/aws/aws-cdk-rfcs/issues/673)|[AWS CloudWatch Application Signals L2 Constructs for SLO](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0673-aws-applicationsignals-slo-l2.md)||👷 implementing
16 | [686](https://github.com/aws/aws-cdk-rfcs/issues/686)|[L2 Constructs for Bedrock](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0686-bedrock-l2-construct.md)|[@dineshSajwan](https://github.com/dineshSajwan)|👷 implementing
17 | [1](https://github.com/aws/aws-cdk-rfcs/issues/1)|[CDK Watch](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0001-cdk-update.md)||✅ done
18 | [4](https://github.com/aws/aws-cdk-rfcs/issues/4)|[CDK Testing Toolkit](https://github.com/aws/aws-cdk-rfcs/issues/4)|[@nija-at](https://github.com/nija-at)|✅ done
19 | [5](https://github.com/aws/aws-cdk-rfcs/issues/5)|[Security-restricted environments](https://github.com/aws/aws-cdk-rfcs/issues/5)||✅ done
20 | [6](https://github.com/aws/aws-cdk-rfcs/issues/6)|[Monolithic Packaging](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0006-monolothic-packaging.md)||✅ done
21 | [7](https://github.com/aws/aws-cdk-rfcs/issues/7)|[Lambda Bundles](https://github.com/aws/aws-cdk-rfcs/issues/7)||✅ done
22 | [8](https://github.com/aws/aws-cdk-rfcs/issues/8)|[Project Structure Guidelines](https://github.com/aws/aws-cdk-rfcs/issues/8)|[@rix0rrr](https://github.com/rix0rrr)|✅ done
23 | [15](https://github.com/aws/aws-cdk-rfcs/issues/15)|[Scaffolding](https://github.com/aws/aws-cdk-rfcs/issues/15)||✅ done
24 | [16](https://github.com/aws/aws-cdk-rfcs/issues/16)|[RFC Process](https://github.com/aws/aws-cdk-rfcs/pull/53)|[@MrArnoldPalmer](https://github.com/MrArnoldPalmer)|✅ done
25 | [31](https://github.com/aws/aws-cdk-rfcs/issues/31)|[Integration tests](https://github.com/aws/aws-cdk-rfcs/issues/31)||✅ done
26 | [34](https://github.com/aws/aws-cdk-rfcs/issues/34)|[Third-party construct ecosystem](https://github.com/aws/aws-cdk-rfcs/issues/34)||✅ done
27 | [35](https://github.com/aws/aws-cdk-rfcs/issues/35)|[Publish construct library guidelines](https://github.com/aws/aws-cdk-rfcs/issues/35)||✅ done
28 | [36](https://github.com/aws/aws-cdk-rfcs/issues/36)|[Constructs Programming Model](https://github.com/aws/aws-cdk-rfcs/issues/36)||✅ done
29 | [37](https://github.com/aws/aws-cdk-rfcs/issues/37)|[Release from a "release" branch](https://github.com/aws/aws-cdk-rfcs/issues/37)|[@MrArnoldPalmer](https://github.com/MrArnoldPalmer)|✅ done
30 | [39](https://github.com/aws/aws-cdk-rfcs/issues/39)|[Release public artifacts (lambda layers for custom resources, docker images)](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0039-reduce-module-size.md)||✅ done
31 | [49](https://github.com/aws/aws-cdk-rfcs/issues/49)|[CI/CD for CDK apps](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0049-continuous-delivery.md)|[@rix0rrr](https://github.com/rix0rrr)|✅ done
32 | [52](https://github.com/aws/aws-cdk-rfcs/issues/52)|[Support resource import](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0052-resource-importing-support.md)||✅ done
33 | [55](https://github.com/aws/aws-cdk-rfcs/issues/55)|[Feature Flags](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0055-feature-flags.md)||✅ done
34 | [63](https://github.com/aws/aws-cdk-rfcs/issues/63)|[CDK in Secure Environments](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0063-precreated-roles.md)||✅ done
35 | [64](https://github.com/aws/aws-cdk-rfcs/issues/64)|[Garbage Collection for Assets](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0064-asset-garbage-collection.md)|[@kaizencc](https://github.com/kaizencc)|✅ done
36 | [66](https://github.com/aws/aws-cdk-rfcs/issues/66)|[StackSets Support](https://github.com/aws/aws-cdk-rfcs/issues/66)||✅ done
37 | [71](https://github.com/aws/aws-cdk-rfcs/issues/71)|[Deployment Triggers](https://github.com/aws/aws-cdk-rfcs/issues/71)||✅ done
38 | [73](https://github.com/aws/aws-cdk-rfcs/issues/73)|[AWS Resource Model](https://github.com/aws/aws-cdk-rfcs/issues/73)||✅ done
39 | [77](https://github.com/aws/aws-cdk-rfcs/issues/77)|[CloudFormation Registry Support](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0077-import-external-resources.md)||✅ done
40 | [79](https://github.com/aws/aws-cdk-rfcs/issues/79)|[CDK v2.0](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0079-cdk-2.0.md)||✅ done
41 | [92](https://github.com/aws/aws-cdk-rfcs/issues/92)|[CI/CD Asset Publishing](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0092-asset-publishing.md)|[@rix0rrr](https://github.com/rix0rrr)|✅ done
42 | [95](https://github.com/aws/aws-cdk-rfcs/issues/95)|[Cognito Construct Library](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0095-cognito-construct-library)|[@nija-at](https://github.com/nija-at)|✅ done
43 | [107](https://github.com/aws/aws-cdk-rfcs/issues/107)|[Publish a Construct Library Module Lifecycle document](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0107-construct-library-module-lifecycle.md)|[@ccfife](https://github.com/ccfife)|✅ done
44 | [110](https://github.com/aws/aws-cdk-rfcs/issues/110)|[CLI Compatibility Strategy](https://github.com/aws/aws-cdk-rfcs/blob/main/text/00110-cli-framework-compatibility-strategy.md)|[@iliapolo](https://github.com/iliapolo)|✅ done
45 | [116](https://github.com/aws/aws-cdk-rfcs/issues/116)|[Easier identification of experimental modules](https://github.com/aws/aws-cdk-rfcs/issues/116)||✅ done
46 | [171](https://github.com/aws/aws-cdk-rfcs/issues/171)|[CloudFront Module Redesign](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0171-cloudfront-redesign.md)||✅ done
47 | [192](https://github.com/aws/aws-cdk-rfcs/issues/192)|[Removal of the "constructs" compatibility layer (v2.0)](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0192-remove-constructs-compat.md)|[@eladb](https://github.com/eladb)|✅ done
48 | [204](https://github.com/aws/aws-cdk-rfcs/issues/204)|[JSII Go Support](https://github.com/aws/aws-cdk-rfcs/blob/main/text/204-golang-bindings.md)|[@MrArnoldPalmer](https://github.com/MrArnoldPalmer)|✅ done
49 | [228](https://github.com/aws/aws-cdk-rfcs/issues/228)|[CDK CLI Triggers](https://github.com/aws/aws-cdk-rfcs/issues/228)||✅ done
50 | [249](https://github.com/aws/aws-cdk-rfcs/issues/249)|[Experimental Code in CDK v2](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0249-v2-experiments.expired.md)|[@ericzbeard](https://github.com/ericzbeard)|✅ done
51 | [253](https://github.com/aws/aws-cdk-rfcs/issues/253)|[CDK Metadata v2](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0253-cdk-metadata-v2.md)||✅ done
52 | [282](https://github.com/aws/aws-cdk-rfcs/issues/282)|[CDK Pipelines security posture change approvals](https://github.com/aws/aws-cdk-rfcs/issues/282)||✅ done
53 | [294](https://github.com/aws/aws-cdk-rfcs/issues/294)|[Policy Definition and Enforcement](https://github.com/aws/aws-cdk-rfcs/issues/294)||✅ done
54 | [300](https://github.com/aws/aws-cdk-rfcs/issues/300)|[Programmatic access of AWS CDK CLI](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0300-programmatic-toolkit.md)|[@mrgrain](https://github.com/mrgrain)|✅ done
55 | [322](https://github.com/aws/aws-cdk-rfcs/issues/322)|[CDK Pipelines Updated API](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0322-cdk-pipelines-updated-api.md)||✅ done
56 | [324](https://github.com/aws/aws-cdk-rfcs/issues/324)|[Construct Hub](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0324-cdk-construct-hub.md)|[@RomainMuller](https://github.com/RomainMuller)|✅ done
57 | [328](https://github.com/aws/aws-cdk-rfcs/issues/328)|[polyglot assert library](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0328-polyglot-assert.md)|[@nija-at](https://github.com/nija-at)|✅ done
58 | [340](https://github.com/aws/aws-cdk-rfcs/issues/340)|[Kinesis Data Firehose Delivery Stream L2](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0340-firehose-l2.md)|[@BenChaimberg](https://github.com/BenChaimberg)|✅ done
59 | [353](https://github.com/aws/aws-cdk-rfcs/issues/353)|[Constructs for all public CloudFormation resources and modules](https://github.com/aws/aws-cdk-rfcs/blob/main/text/353-cfn-registry-constructs.md)||✅ done
60 | [359](https://github.com/aws/aws-cdk-rfcs/issues/359)|[Construct Hub Deny List](https://github.com/aws/aws-cdk-rfcs/blob/main/text/359-construct-hub-deny-list.md)||✅ done
61 | [374](https://github.com/aws/aws-cdk-rfcs/issues/374)|[The jsii compiler to follow TypeScript versioning](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0374-jsii-ts-version.md)||✅ done
62 | [388](https://github.com/aws/aws-cdk-rfcs/issues/388)|[CLI Banners](https://github.com/aws/aws-cdk-rfcs/issues/388)||✅ done
63 | [436](https://github.com/aws/aws-cdk-rfcs/issues/436)|[Amazon GameLift L2 Constructs](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0436-gamelift-l2.md)||✅ done
64 | [460](https://github.com/aws/aws-cdk-rfcs/issues/460)|[Reduce aws-cdk-lib package size](https://github.com/aws/aws-cdk-rfcs/issues/460)||✅ done
65 | [473](https://github.com/aws/aws-cdk-rfcs/issues/473)|[EventBridge Pipes L2 Construct](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0473-eventbridge-pipes.md)|[@mrgrain](https://github.com/mrgrain)|✅ done
66 | [474](https://github.com/aws/aws-cdk-rfcs/issues/474)|[EventBridge Scheduler L2 Construct](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0474-event-bridge-scheduler-l2.md)||✅ done
67 | [485](https://github.com/aws/aws-cdk-rfcs/issues/485)|[AWS Batch L2](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0485-aws-batch.md)||✅ done
68 | [491](https://github.com/aws/aws-cdk-rfcs/issues/491)|[CloudFront Origin Access Control L2](https://github.com/aws/aws-cdk-rfcs/issues/491)||✅ done
69 | [497](https://github.com/aws/aws-cdk-rfcs/issues/497)|[AWS Glue L2 CDK Construct](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0497-glue-l2-construct.md)|[@TheRealAmazonKendra](https://github.com/TheRealAmazonKendra)|✅ done
70 | [507](https://github.com/aws/aws-cdk-rfcs/issues/507)|[Full control over VPC and subnet configuration](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0507-subnets)|[@shikha372](https://github.com/shikha372)|✅ done
71 | [510](https://github.com/aws/aws-cdk-rfcs/issues/510)|[DyanmoDB Global Table L2 Construct](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0510-dynamodb-global-table.md)|[@vinayak-kukreja](https://github.com/vinayak-kukreja)|✅ done
72 | [513](https://github.com/aws/aws-cdk-rfcs/issues/513)|[Application Specific Staging Resources](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0513-app-specific-staging.md)||✅ done
73 | [605](https://github.com/aws/aws-cdk-rfcs/issues/605)|[Rewrite EKS L2 construct (EKSv2)](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0605-eks-rewrite.md)||✅ done
74 | [617](https://github.com/aws/aws-cdk-rfcs/issues/617)|[Amazon CloudFront Origin Access Control L2 Construct](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0617-cloudfront-oac-l2.md)||✅ done
75 | [648](https://github.com/aws/aws-cdk-rfcs/issues/648)|[Priority-Ordered Aspect Invocation](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0648-aspects-priority-ordering.md)|[@sumupitchayan](https://github.com/sumupitchayan)|✅ done
76 | [710](https://github.com/aws/aws-cdk-rfcs/issues/710)|[Node.js Version Support Policy for AWS CDK](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0710-node-deprecation-strategy.md)||✅ done
77 |
78 |
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | ## Code of Conduct
2 |
3 | This project has adopted the
4 | [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). For
5 | more information see the
6 | [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
7 | opensource-codeofconduct@amazon.com with any additional questions or comments.
8 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Apache License
2 | Version 2.0, January 2004
3 | http://www.apache.org/licenses/
4 |
5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6 |
7 | 1. Definitions.
8 |
9 | "License" shall mean the terms and conditions for use, reproduction,
10 | and distribution as defined by Sections 1 through 9 of this document.
11 |
12 | "Licensor" shall mean the copyright owner or entity authorized by
13 | the copyright owner that is granting the License.
14 |
15 | "Legal Entity" shall mean the union of the acting entity and all
16 | other entities that control, are controlled by, or are under common
17 | control with that entity. For the purposes of this definition,
18 | "control" means (i) the power, direct or indirect, to cause the
19 | direction or management of such entity, whether by contract or
20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
21 | outstanding shares, or (iii) beneficial ownership of such entity.
22 |
23 | "You" (or "Your") shall mean an individual or Legal Entity
24 | exercising permissions granted by this License.
25 |
26 | "Source" form shall mean the preferred form for making modifications,
27 | including but not limited to software source code, documentation
28 | source, and configuration files.
29 |
30 | "Object" form shall mean any form resulting from mechanical
31 | transformation or translation of a Source form, including but
32 | not limited to compiled object code, generated documentation,
33 | and conversions to other media types.
34 |
35 | "Work" shall mean the work of authorship, whether in Source or
36 | Object form, made available under the License, as indicated by a
37 | copyright notice that is included in or attached to the work
38 | (an example is provided in the Appendix below).
39 |
40 | "Derivative Works" shall mean any work, whether in Source or Object
41 | form, that is based on (or derived from) the Work and for which the
42 | editorial revisions, annotations, elaborations, or other modifications
43 | represent, as a whole, an original work of authorship. For the purposes
44 | of this License, Derivative Works shall not include works that remain
45 | separable from, or merely link (or bind by name) to the interfaces of,
46 | the Work and Derivative Works thereof.
47 |
48 | "Contribution" shall mean any work of authorship, including
49 | the original version of the Work and any modifications or additions
50 | to that Work or Derivative Works thereof, that is intentionally
51 | submitted to Licensor for inclusion in the Work by the copyright owner
52 | or by an individual or Legal Entity authorized to submit on behalf of
53 | the copyright owner. For the purposes of this definition, "submitted"
54 | means any form of electronic, verbal, or written communication sent
55 | to the Licensor or its representatives, including but not limited to
56 | communication on electronic mailing lists, source code control systems,
57 | and issue tracking systems that are managed by, or on behalf of, the
58 | Licensor for the purpose of discussing and improving the Work, but
59 | excluding communication that is conspicuously marked or otherwise
60 | designated in writing by the copyright owner as "Not a Contribution."
61 |
62 | "Contributor" shall mean Licensor and any individual or Legal Entity
63 | on behalf of whom a Contribution has been received by Licensor and
64 | subsequently incorporated within the Work.
65 |
66 | 2. Grant of Copyright License. Subject to the terms and conditions of
67 | this License, each Contributor hereby grants to You a perpetual,
68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69 | copyright license to reproduce, prepare Derivative Works of,
70 | publicly display, publicly perform, sublicense, and distribute the
71 | Work and such Derivative Works in Source or Object form.
72 |
73 | 3. Grant of Patent License. Subject to the terms and conditions of
74 | this License, each Contributor hereby grants to You a perpetual,
75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76 | (except as stated in this section) patent license to make, have made,
77 | use, offer to sell, sell, import, and otherwise transfer the Work,
78 | where such license applies only to those patent claims licensable
79 | by such Contributor that are necessarily infringed by their
80 | Contribution(s) alone or by combination of their Contribution(s)
81 | with the Work to which such Contribution(s) was submitted. If You
82 | institute patent litigation against any entity (including a
83 | cross-claim or counterclaim in a lawsuit) alleging that the Work
84 | or a Contribution incorporated within the Work constitutes direct
85 | or contributory patent infringement, then any patent licenses
86 | granted to You under this License for that Work shall terminate
87 | as of the date such litigation is filed.
88 |
89 | 4. Redistribution. You may reproduce and distribute copies of the
90 | Work or Derivative Works thereof in any medium, with or without
91 | modifications, and in Source or Object form, provided that You
92 | meet the following conditions:
93 |
94 | (a) You must give any other recipients of the Work or
95 | Derivative Works a copy of this License; and
96 |
97 | (b) You must cause any modified files to carry prominent notices
98 | stating that You changed the files; and
99 |
100 | (c) You must retain, in the Source form of any Derivative Works
101 | that You distribute, all copyright, patent, trademark, and
102 | attribution notices from the Source form of the Work,
103 | excluding those notices that do not pertain to any part of
104 | the Derivative Works; and
105 |
106 | (d) If the Work includes a "NOTICE" text file as part of its
107 | distribution, then any Derivative Works that You distribute must
108 | include a readable copy of the attribution notices contained
109 | within such NOTICE file, excluding those notices that do not
110 | pertain to any part of the Derivative Works, in at least one
111 | of the following places: within a NOTICE text file distributed
112 | as part of the Derivative Works; within the Source form or
113 | documentation, if provided along with the Derivative Works; or,
114 | within a display generated by the Derivative Works, if and
115 | wherever such third-party notices normally appear. The contents
116 | of the NOTICE file are for informational purposes only and
117 | do not modify the License. You may add Your own attribution
118 | notices within Derivative Works that You distribute, alongside
119 | or as an addendum to the NOTICE text from the Work, provided
120 | that such additional attribution notices cannot be construed
121 | as modifying the License.
122 |
123 | You may add Your own copyright statement to Your modifications and
124 | may provide additional or different license terms and conditions
125 | for use, reproduction, or distribution of Your modifications, or
126 | for any such Derivative Works as a whole, provided Your use,
127 | reproduction, and distribution of the Work otherwise complies with
128 | the conditions stated in this License.
129 |
130 | 5. Submission of Contributions. Unless You explicitly state otherwise,
131 | any Contribution intentionally submitted for inclusion in the Work
132 | by You to the Licensor shall be under the terms and conditions of
133 | this License, without any additional terms or conditions.
134 | Notwithstanding the above, nothing herein shall supersede or modify
135 | the terms of any separate license agreement you may have executed
136 | with Licensor regarding such Contributions.
137 |
138 | 6. Trademarks. This License does not grant permission to use the trade
139 | names, trademarks, service marks, or product names of the Licensor,
140 | except as required for reasonable and customary use in describing the
141 | origin of the Work and reproducing the content of the NOTICE file.
142 |
143 | 7. Disclaimer of Warranty. Unless required by applicable law or
144 | agreed to in writing, Licensor provides the Work (and each
145 | Contributor provides its Contributions) on an "AS IS" BASIS,
146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147 | implied, including, without limitation, any warranties or conditions
148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149 | PARTICULAR PURPOSE. You are solely responsible for determining the
150 | appropriateness of using or redistributing the Work and assume any
151 | risks associated with Your exercise of permissions under this License.
152 |
153 | 8. Limitation of Liability. In no event and under no legal theory,
154 | whether in tort (including negligence), contract, or otherwise,
155 | unless required by applicable law (such as deliberate and grossly
156 | negligent acts) or agreed to in writing, shall any Contributor be
157 | liable to You for damages, including any direct, indirect, special,
158 | incidental, or consequential damages of any character arising as a
159 | result of this License or out of the use or inability to use the
160 | Work (including but not limited to damages for loss of goodwill,
161 | work stoppage, computer failure or malfunction, or any and all
162 | other commercial damages or losses), even if such Contributor
163 | has been advised of the possibility of such damages.
164 |
165 | 9. Accepting Warranty or Additional Liability. While redistributing
166 | the Work or Derivative Works thereof, You may choose to offer,
167 | and charge a fee for, acceptance of support, warranty, indemnity,
168 | or other liability obligations and/or rights consistent with this
169 | License. However, in accepting such obligations, You may act only
170 | on Your own behalf and on Your sole responsibility, not on behalf
171 | of any other Contributor, and only if You agree to indemnify,
172 | defend, and hold each Contributor harmless for any liability
173 | incurred by, or claims asserted against, such Contributor by reason
174 | of your accepting any such warranty or additional liability.
175 |
176 | END OF TERMS AND CONDITIONS
177 |
178 | APPENDIX: How to apply the Apache License to your work.
179 |
180 | To apply the Apache License to your work, attach the following
181 | boilerplate notice, with the fields enclosed by brackets "[]"
182 | replaced with your own identifying information. (Don't include
183 | the brackets!) The text should be enclosed in the appropriate
184 | comment syntax for the file format. We also recommend that a
185 | file or class name and description of purpose be included on the
186 | same "printed page" as the copyright notice for easier
187 | identification within third-party archives.
188 |
189 | Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
190 |
191 | Licensed under the Apache License, Version 2.0 (the "License");
192 | you may not use this file except in compliance with the License.
193 | You may obtain a copy of the License at
194 |
195 | http://www.apache.org/licenses/LICENSE-2.0
196 |
197 | Unless required by applicable law or agreed to in writing, software
198 | distributed under the License is distributed on an "AS IS" BASIS,
199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200 | See the License for the specific language governing permissions and
201 | limitations under the License.
202 |
--------------------------------------------------------------------------------
/NOTICE:
--------------------------------------------------------------------------------
1 | AWS Cloud Development Kit (AWS CDK)
2 | Copyright 2018-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 |
--------------------------------------------------------------------------------
/PROPOSED.md:
--------------------------------------------------------------------------------
1 | # Proposed RFCs
2 |
3 | **Jump to**:
4 | [Full list](./FULL_INDEX.md) |
5 | [Accepted](./ACCEPTED.md) |
6 | [Proposed](./PROPOSED.md) |
7 | [Closed](./CLOSED.md)
8 |
9 |
10 | \#|Title|Owner|Status
11 | ---|-----|-----|------
12 | [72](https://github.com/aws/aws-cdk-rfcs/issues/72)|[Stack Policy](https://github.com/aws/aws-cdk-rfcs/issues/72)||💡 proposed
13 | [437](https://github.com/aws/aws-cdk-rfcs/issues/437)|[CDK post-deployment experience](https://github.com/aws/aws-cdk-rfcs/issues/437)||💡 proposed
14 | [469](https://github.com/aws/aws-cdk-rfcs/issues/469)|[AWS Lambda for .NET Support](https://github.com/aws/aws-cdk-rfcs/issues/469)||💡 proposed
15 | [489](https://github.com/aws/aws-cdk-rfcs/issues/489)|[Add API to register and execute code before or after CDK App lifecycle events](https://github.com/aws/aws-cdk-rfcs/issues/489)||💡 proposed
16 | [583](https://github.com/aws/aws-cdk-rfcs/issues/583)|[Deployment Debugging](https://github.com/aws/aws-cdk-rfcs/issues/583)||💡 proposed
17 | [585](https://github.com/aws/aws-cdk-rfcs/issues/585)|[Local Application Testing](https://github.com/aws/aws-cdk-rfcs/issues/585)||💡 proposed
18 | [609](https://github.com/aws/aws-cdk-rfcs/issues/609)|[PythonFunction additional build options](https://github.com/aws/aws-cdk-rfcs/issues/609)||💡 proposed
19 | [611](https://github.com/aws/aws-cdk-rfcs/issues/611)|[Complete Construct Model](https://github.com/aws/aws-cdk-rfcs/issues/611)||💡 proposed
20 | [613](https://github.com/aws/aws-cdk-rfcs/issues/613)|[Add L2 construct for aws_controltower.CfnEnabledControl](https://github.com/aws/aws-cdk-rfcs/issues/613)||💡 proposed
21 | [627](https://github.com/aws/aws-cdk-rfcs/issues/627)|[L2 constructs for CodeArtifact](https://github.com/aws/aws-cdk-rfcs/issues/627)||💡 proposed
22 | [629](https://github.com/aws/aws-cdk-rfcs/issues/629)|[Cost Estimation Tools - Reopened](https://github.com/aws/aws-cdk-rfcs/issues/629)||💡 proposed
23 | [631](https://github.com/aws/aws-cdk-rfcs/issues/631)|[Amazon VPC Lattice L2 Construct](https://github.com/aws/aws-cdk-rfcs/issues/631)||💡 proposed
24 | [635](https://github.com/aws/aws-cdk-rfcs/issues/635)|[L2 Constructs for AWS MediaConvert](https://github.com/aws/aws-cdk-rfcs/issues/635)||💡 proposed
25 | [655](https://github.com/aws/aws-cdk-rfcs/issues/655)|[Enhancements to L1s in CDK](https://github.com/aws/aws-cdk-rfcs/issues/655)||💡 proposed
26 | [676](https://github.com/aws/aws-cdk-rfcs/issues/676)|[Proposing a Contributor Council for AWS CDK](https://github.com/aws/aws-cdk-rfcs/issues/676)||💡 proposed
27 | [683](https://github.com/aws/aws-cdk-rfcs/issues/683)|[L2 Constructs for AWS Event Schemas](https://github.com/aws/aws-cdk-rfcs/issues/683)||💡 proposed
28 | [693](https://github.com/aws/aws-cdk-rfcs/issues/693)|[Set Default Values to Construct Properties using PropertyInjector](https://github.com/aws/aws-cdk-rfcs/blob/main/text/0693-property-injection.md)||💡 proposed
29 | [695](https://github.com/aws/aws-cdk-rfcs/issues/695)|[AWS S3 directory bucket L2 constructs](https://github.com/aws/aws-cdk-rfcs/issues/695)||💡 proposed
30 | [703](https://github.com/aws/aws-cdk-rfcs/issues/703)|[apigateway: private domain name L2 construct](https://github.com/aws/aws-cdk-rfcs/issues/703)||💡 proposed
31 | [707](https://github.com/aws/aws-cdk-rfcs/issues/707)|[CDK Pipelines: Use pipeline ServiceRole as default ActionRole](https://github.com/aws/aws-cdk-rfcs/issues/707)||💡 proposed
32 | [715](https://github.com/aws/aws-cdk-rfcs/issues/715)|[CDK Construct Update Support](https://github.com/aws/aws-cdk-rfcs/issues/715)||💡 proposed
33 | [719](https://github.com/aws/aws-cdk-rfcs/issues/719)|[Native Container Image Tarball Asset Handling](https://github.com/aws/aws-cdk-rfcs/issues/719)||💡 proposed
34 | [728](https://github.com/aws/aws-cdk-rfcs/issues/728)|[L2 Constructs for AWS Elemental MediaPackageV2](https://github.com/aws/aws-cdk-rfcs/issues/728)||💡 proposed
35 | [732](https://github.com/aws/aws-cdk-rfcs/issues/732)|[CDK CLI Telemetry](https://github.com/aws/aws-cdk-rfcs/issues/732)|[@kaizencc](https://github.com/kaizencc)|✍️ review
36 |
37 |
--------------------------------------------------------------------------------
/images/0300/cli-lib-alpha-usage.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws/aws-cdk-rfcs/386690e1d1cea13e9fc436bd80660ab899a192ee/images/0300/cli-lib-alpha-usage.png
--------------------------------------------------------------------------------
/images/0300/iohost.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws/aws-cdk-rfcs/386690e1d1cea13e9fc436bd80660ab899a192ee/images/0300/iohost.png
--------------------------------------------------------------------------------
/images/0300/today.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws/aws-cdk-rfcs/386690e1d1cea13e9fc436bd80660ab899a192ee/images/0300/today.png
--------------------------------------------------------------------------------
/images/0300/tomorrow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws/aws-cdk-rfcs/386690e1d1cea13e9fc436bd80660ab899a192ee/images/0300/tomorrow.png
--------------------------------------------------------------------------------
/images/0693/tree_traversal.drawio.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws/aws-cdk-rfcs/386690e1d1cea13e9fc436bd80660ab899a192ee/images/0693/tree_traversal.drawio.png
--------------------------------------------------------------------------------
/images/AspectsDiagram1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws/aws-cdk-rfcs/386690e1d1cea13e9fc436bd80660ab899a192ee/images/AspectsDiagram1.png
--------------------------------------------------------------------------------
/images/EventBridge-Scheduler-2023-03-05-1723.excalidraw.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws/aws-cdk-rfcs/386690e1d1cea13e9fc436bd80660ab899a192ee/images/EventBridge-Scheduler-2023-03-05-1723.excalidraw.png
--------------------------------------------------------------------------------
/images/GitHubArtifacts.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws/aws-cdk-rfcs/386690e1d1cea13e9fc436bd80660ab899a192ee/images/GitHubArtifacts.png
--------------------------------------------------------------------------------
/images/MaintenancePolicy.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws/aws-cdk-rfcs/386690e1d1cea13e9fc436bd80660ab899a192ee/images/MaintenancePolicy.png
--------------------------------------------------------------------------------
/images/PipelineRollback.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws/aws-cdk-rfcs/386690e1d1cea13e9fc436bd80660ab899a192ee/images/PipelineRollback.png
--------------------------------------------------------------------------------
/images/ProposedAspectInvocationOrder.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws/aws-cdk-rfcs/386690e1d1cea13e9fc436bd80660ab899a192ee/images/ProposedAspectInvocationOrder.png
--------------------------------------------------------------------------------
/images/bedrockKnowledgebase.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws/aws-cdk-rfcs/386690e1d1cea13e9fc436bd80660ab899a192ee/images/bedrockKnowledgebase.png
--------------------------------------------------------------------------------
/images/cdk-construct-hub/architecture-diagram.drawio:
--------------------------------------------------------------------------------
1 | 7V3bcts4Ev0aV80+yEXwKj5akpVkxok98WxN5kkFkZDEmCK0JGhL8/UL8CKJBCzRNi+IzSRVEZrgDd3noNFogBfaeL39FMLN6it2kX+hKu72QptcqCqwNY3+xyS7VKIC00wly9Bzs1oHwb33L8qESiaNPRdFhYoEY594m6LQwUGAHFKQwTDET8VqC+wX77qBS8QJ7h3o89K/PZesUulQtQ7yz8hbrvI7A9NOj6xhXjl7k2gFXfx0JNKuL7RxiDFJf623Y+Sz1svbJT1v+szR/YOFKCBVTrj9Prl7+H1q3/0Fb+F0MNN/fP02yK7yCP04e+Grv++pYOzj2M2em+zyxthgLyBJgxoj+o/eb6xcGPTImJUuVaMkKJetogDwJXaNoqBctooCUL48KN0flB/wSMCVCpdXSvdXjh6Q/tNGOCa+F6Dx3vQUKlyG0PWoSsbYxyGVBTigrTdakbVPS4D+fFp5BN1voMNa9YnihsoWOCCZ8QM1L2cNz65KjWfDfq+3Swa0S/gU6ZfLEMeb5JZfqPkLj87oz5mTKJNehIT4AeUPdqFq9O+UGcto4fl+6YEfUUg8ioMr31uyaxPMbgWzko8WhF2RvoUXLG+S0kRTsicX3cKF0Qq52evwtpsbIr0r2h6JMlv+hPAakXBHq2RHB3qGq4xZBnugPR1wChQlE66OQGrnNWFGDsv91Q/4oT8yCL0ATobN4eke0deEBFHplePgmL6wqvx2vSUoDKCf0BnDGw6odmKHHfwcz//DQe9YQ7RZr6dTZapxOs10V2hrsSLLBiZS2+SNmso1A4a8ZvY6OFaM2phigM21KHIp0WdFHJIVXmKqkOuDdEQBFLj7djzUucGsCRMs/0SE7DLgwpiqsoB0tPXID3Y6ZZG09M/Rkck2u3JS2B0V7lDo0fdG4Tm4RDgOHXTqvfWsz4ThEpFTFdW0ImuVk0oNkQ+J91jsHuvXl8EBqQsFvr7htYrtrsvV7OYHaXYApGp3w+qimSPaTOSKOcmHriORTT32+CkdBW5ew/FhFHlOKsyqtKMtQy6UaBxI7qDzwAYSqjLxIgfTvnHHKVToLT7nLT3jRZa8gIlijIH1Ii/gBs6Rf4cjj3iJyzjHhOD1WX/Poc/CuqNj8znns8Jok77owtuy5xglwwcUXj+idBQBnnNsfbieu3C2iAMnfcw6HMZh0V9UTd4psYa8T5LLarei/bjwF6FWgScRfPtxs4rB5PftbfRn/MfGdUMyqJ1Zk1MpCcHdUYVsKHq48h0THLSt555Ppm6QeYAHjaVXPOhv/2hvIAbjQ9J4VR9T64zGf2C0gHfqv19mZD2f3a79a/A5N9Qu8FdqgWYBaXbZV+pcX/klIPAB9d2j3N2jYMjeVO8o7kU66R2rUOnrAWtUBKwllbfLj8Qn2Bl8QgGLY93AYDm4p4bsLTyHj1r1oO4W1KZWRLWud+z06tLCulEPyarqIXUGfTEJd+IiNUvCAlUI392QioQtjoTHkEAfL6lwFHu+S4mqJ1+5ybfziAPoJMr4eqgOK7ImkCs8OOSwehQepKhtA6iqZQFgvj+gzmPnAZHZk0dWMzz/Sa8S1QNWzTILYNWAAKxGm2AddgFW2oDh7mjKkhX/2XtItHCYtExKu+NSfdOWdmfIryXYqFstBBv5VIPvCCYJBFebjU8Bm+C45xr5uEatMiprlWv4NLBgs/4ZXTpUKW1YUF79nVlQUi1gDFZLxhPQh0WHMjeSMw6l3pjdqJzd/DdCYdTbzOttJk4asCafxijYi2V0PAAxpI0nNBr9qZyEletHkoEM4CdvkrzgaYiZ91PSpDA5WJggLEoSFiYK88nChWpJ+q7gDmWhSGbxQsBXyzN+eaFIJkpvLp8NBGeD0tnPJxdXpUx6zNaNyVQ9OjbxQpRFNygKQkYAZU41rjRlZHCcSo8skj8yEquQREOUgjLNix7RoihDOsmLXiTG3AjfGsNq/XNzKSYSp5Xl48zCKPMw6GxnnKmaFZlZHUrFzCqfp5mt25j80ROz3MSsTyw1Sd1/ATHTP/rI/ljEPHPRI/LxZk0fZvbg1TaIMiSbElU7WZrQ7CSbWjV0b8tFq3zoPnV4cbgWh9N6bq2VW/vmlKqrmmq6PnzhGGI0BprBR4PfdVe12PNDE6l4nfdQmrQ9VLOrU5SKnZjW2eBAmLkirbbqVYXw3eUapuWPfWo5XZ+l03GWji0b2/I20ubE/77whoDMC6k7q3Hg7TeSBajI25LlDeXPfTyjT9kBGVo/7JDbT+5j7Wf95JBZ8szQ6iFtveQidx5p13mMSuJ0NesiV53Z1FW5qJaf2Zwg6FLJDW1wlkut/BmjuJVVas8MmH95b+1/SQPWE7RVi3C3jcuKmS9WU3jvJDuze7xXXciiS+Za8esnerzLi3ervHLC6h7wEk+lNwn4qhM5uikX4PmZnB7wEgO+HIQxuge8ylnQlQ/DVvKh36vFwKQBa9lUENh8mE4T2IfW2IiPH0L09iGBfeRzaGUXQrChRbv2IkjZ6u1FHnsp90CCxbqt2ovBzx19xYFHcOgFy95oXm8060Mr1rOaq5RQZtjVPBejMcv5mLFIo/IqC7m2djD6WGTjmK9xqAJs6WITuUH3roWUroVatpiuXVGTn2g+7FCRNc1H7EHMqglfVYPb+fSFcmmCnOZfthPBYauBvApeLCJEOBuoYUMBk/c4x9hFVxSUi3Rjge9ow2CLBbsb9xkJUmUk9ItMzmfuUtuGuW3XQ/RmcX3+AGg807ealmDyQcx7emkqORD+d7T0IiKAdL9ov7ONHkpDygEAFV3MxgxJ/5geQdUvFORAk2RMaX7Mjcgrq0uObaNMq4Ty0gfcStXzLeyfqd/MLlMm/7GLflj5hmElBU2dS4VU6TwOfpqDPHksjtVvJiWTj2GXMmj3QO/Mw5B22VKzXVbVjDpTrgQbk8+ou//GdqD4C29oA6nKb7cbhibot7Ir/HvtL0jSmo1sRzyw1IqBa7OGwLVwtR7vSkgC+Up4PvX1o/Nwrn2n99e5oGbRpwS2+rxPef7s8pxm2gzZWW+KWAobW9qJzubtR64PBUgbHWheEbVPJr8Kx3ppjqmE47P1696xWNykPF5bMYKSr+AaaOjqojj3UJ1rpnnsMICL8ytV08vkX4dXW7E6uXYc4OcTqZuM1nN/d/kz6jehfosDmG48XY8HqJVJwuKDBXlaa2HMpzdlN6fDlJny3swRBTTXsTadR/zLKeb1HCH4jNgvwBF8joqTfsGmpwiJKMIwigFFNfMLCrkqNk8Ruax+iuBdSzkZoMhSbfJB1T1M5eIDPk5MrTSiMIt6QpCIEDSluKcxsIAgaqQDgdfQmLdp9l7Dy1miapxZLpbgo8z5yGJwoZpwzUDmM1P3IVsVkUmWTNLTiEQ0YpYXSeTTGS3kwIpJ5PR67p5ETm2vJ+8XjE89dcspDD0tnKcFC7RGC7QYYkyOI5rsnb5iF7Ea/wc=
--------------------------------------------------------------------------------
/images/cdk-construct-hub/architecture-diagram.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws/aws-cdk-rfcs/386690e1d1cea13e9fc436bd80660ab899a192ee/images/cdk-construct-hub/architecture-diagram.png
--------------------------------------------------------------------------------
/images/cdk-construct-hub/detail-page.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws/aws-cdk-rfcs/386690e1d1cea13e9fc436bd80660ab899a192ee/images/cdk-construct-hub/detail-page.png
--------------------------------------------------------------------------------
/images/cdk-construct-hub/landing-page.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws/aws-cdk-rfcs/386690e1d1cea13e9fc436bd80660ab899a192ee/images/cdk-construct-hub/landing-page.png
--------------------------------------------------------------------------------
/images/garbagecollection.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws/aws-cdk-rfcs/386690e1d1cea13e9fc436bd80660ab899a192ee/images/garbagecollection.png
--------------------------------------------------------------------------------
/images/lifecycle.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws/aws-cdk-rfcs/386690e1d1cea13e9fc436bd80660ab899a192ee/images/lifecycle.png
--------------------------------------------------------------------------------
/lint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | # executes markdownlint on all RFCs
3 | # --fix to fix errors
4 | set -euo pipefail
5 | scriptdir=$(cd $(dirname $0) && pwd)
6 | linters=$PWD/tools/linters
7 |
8 | (cd $linters && npm ci)
9 | cliargs="--ignore node_modules --ignore tools ${@:1}"
10 | $linters/node_modules/.bin/markdownlint . $cliargs
11 |
--------------------------------------------------------------------------------
/render-table.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | set -euo pipefail
3 | (cd tools/rfc-render && npm ci)
4 | node tools/rfc-render/inject-table.js README.md
5 |
--------------------------------------------------------------------------------
/text/0001-cdk-update.md:
--------------------------------------------------------------------------------
1 | ---
2 | rfc pr: [#335](https://github.com/aws/aws-cdk-rfcs/pull/335)
3 | tracking issue: https://github.com/aws/aws-cdk-rfcs/issues/1
4 | ---
5 |
6 | # CDK Watch and Hotswap
7 |
8 | Shorten the development iteration speed -- the "edit-compile-test" loop -- for
9 | CDK applications.
10 |
11 | ## Working Backwards
12 |
13 | ### CHANGELOG
14 |
15 | - feat(cli): implement `cdk deploy --hotswap` and `cdk deploy --watch`
16 |
17 | ### Help
18 |
19 | ```
20 | cdk deploy --help
21 |
22 | -w --watch Watch for file changes and deploy any updates. Implies
23 | --hotswap=auto if --hotswap is not specified.
24 |
25 | --hotswap[=only|auto|no|ask] Perform a hotswap deployment of the stack.
26 | (`--hotswap=only` will fail if there are non-hotswap updates). The
27 | default value is "only".
28 |
29 | Examples:
30 | cdk deploy -w
31 | cdk deploy --hotswap
32 | cdk deploy --hotswap=only --watch
33 | ```
34 |
35 | ### README
36 |
37 | The `cdk deploy --hotswap` command improves the speed of the
38 | edit-compile-test loop for your CDK application during development by inspecting
39 | the assets and stack resources in your application, identifying those that can
40 | be updated in-place without a full CloudFormation stack update, and doing so
41 | using AWS service APIs directly. It can either do this as an interactive command
42 | or by running continuously and monitoring the input files.
43 |
44 | For supported construct types (see list, below) the update command will identify
45 | assets or other stack resources that have changed since the last time the CDK
46 | application was deployed, and update them in-place according to their types.
47 | Assets will be uploaded to their storage medium. The CDK will then use AWS SDK
48 | commands to directly modify the stack resources in-place to synchronize them
49 | with your local code. The hotswap option can act on one or more stacks in
50 | the application.
51 |
52 | #### Usage
53 |
54 | The simplest use case for CDK hotswap is `cdk deploy --watch`. This mode of
55 | operation will identify the files that can affect the resources being watched,
56 | monitor them for changes, and perform the fastest supported form of deployment
57 | on the stacks that change, as they change.
58 |
59 | ```
60 | $ cdk deploy --hotswap --watch ApplicationStack DataStack
61 | Checking stack ApplicationStack for possible hotswap update
62 | * LambdaFunction[Appfunction]: No Changes
63 | ApplicationStack can *not* be updated
64 | Checking stack DataStack for possible hotswap update
65 | * LambdaFunction[StreamFunction]: No Changes
66 |
67 | Watching stack inputs for changes:
68 | * LambdaFunction[Appfunction]:
69 | * LambdaFunction[StreamFunction]:
70 | ```
71 |
72 | The "watch" functionality can be customized by setting a few new options in your `cdk.json` file:
73 |
74 | 1. If either your CDK code, or your runtime code
75 | (like the code of your Lambda functions)
76 | needs to go through a build step before invoking `cdk deploy`,
77 | you can specify that command in the new `"build"` key. Example:
78 |
79 | ```json
80 | {
81 | "app": "mvn exec:java",
82 | "build": "mvn package"
83 | }
84 | ```
85 |
86 | If the `"build"` key is present in the `cdk.json` file,
87 | `cdk synth` (which the "watch" process invokes before deployment)
88 | will execute the specified command before performing synthesis.
89 |
90 | 2. The "watch" process needs to know which files and directories to observe for changes,
91 | and which ones to ignore. You can customize these using the `"include"` and `"exclude"`
92 | sub-keys of the new `"watch"` top-level key.
93 | Values are glob patterns that are matched _relative to the location of the `cdk.json`_ directory:
94 |
95 | ```json
96 | {
97 | "app": "mvn exec:java",
98 | "build": "mvn package",
99 | "watch": {
100 | "include": ["./**"],
101 | "exclude": ["target"]
102 | }
103 | }
104 | ```
105 |
106 | The `cdk init` command fills these out for you,
107 | so if your project has a standard layout,
108 | you shouldn't need to modify these from the generated defaults.
109 | Also note that the output directory (`cdk.out` by default)
110 | is always automatically excluded, so doesn't need to be specified in the `cdk.json` file.
111 |
112 | In addition to the monitoring mode, it is possible to perform one-shot
113 | hotswap deployments on some or all of the stacks in the CDK application:
114 |
115 | ```
116 | $ cdk deploy --hotswap ApplicationStack
117 | Checking stack ApplicationStack for possible hotswap update
118 | * LambdaFunction[AppFunction]: File asset changed
119 | ApplicationStack can be updated
120 | 1/1 stacks can be updated:
121 | Updating LambdaFunction in ApplicationStack
122 | ...
123 | Update complete!
124 | ```
125 |
126 | If the update is to an attribute of the stack that cannot be updated, the
127 | command can offer to perform a full deployment if `--hotswap=ask`:
128 |
129 | ```
130 | $ cdk deploy --hotswap=ask ApplicationStack
131 | Checking stack ApplicationStack for possible hotswap update
132 | * LambdaFunction[Appfunction]: Function data changed
133 | ApplicationStack has changes that can not be rapidly updated.
134 | Perform a full deployment of ApplicationStack? [Y/n]: Y
135 | Deploying ApplicationStack
136 | ...
137 | Done!
138 | ```
139 |
140 | When multiple stacks are provided, update only occurs if all pending changes are
141 | suitable for update. Otherwise, a full deployment is done:
142 |
143 | ```
144 | $ cdk deploy --hotswap=auto ApplicationStack DataStack
145 | Checking stack ApplicationStack for possible hotswap update
146 | * LambdaFunction[Appfunction]: Function data changed
147 | ApplicationStack has changes that can not be rapidly updated.
148 | Checking stack DataStack for possible hotswap update
149 | * LambdaFunction[StreamFunction]: File asset changed
150 | DataStack can be updated
151 | 1/2 stacks can be hotswap, automatically performing a full deployment.
152 | Deploying ApplicationStack
153 | ...
154 | Deploying DataStack
155 | ...
156 | Done!
157 | ```
158 |
159 | #### Resource Support
160 |
161 | - AWS Lambda `Function`
162 | - file and directory assets
163 | - StepFunction
164 | - State Machine definitions
165 | - ECS
166 | - image assets
167 |
168 | #### Future Support Plans
169 |
170 | - API Gateway models
171 |
172 | ## FAQ
173 |
174 | ### What are we launching today?
175 |
176 | The `cdk deploy --watch` and `cdk deploy --hotswap` command features, with
177 | support for rapid update of Lambda function code, images for ECS and Fargate
178 | task definitions, and AWS StepFunction workflows.
179 |
180 | ### Why should I use this feature?
181 |
182 | If you are developing a CDK application and want to publish your code changes
183 | without waiting for CloudFormation to perform a full, safe stack update, you
184 | should use this feature.
185 |
186 | ### Does it work when my Lambda is in Go or Java?
187 |
188 | To an extent, yes, but there are caveats. In order to update the lambda code you
189 | will need to run your build process to create the jar or executable that you are
190 | uploading as lambda code.
191 |
192 | ### Does this feature work with `aws-lambda-go` for compiled Lambdas?
193 |
194 | Yes, if your lambda is defined by an `aws-lambda-go`, `aws-lambda-python`, or
195 | `aws-lambda-nodejs` construct, the hotswap and watch capabilities will work
196 | within those source trees to automatically deploy changes to your code as you
197 | make it.
198 |
199 | ### What about "other resource type"?
200 |
201 | Based on customer response, we will consider expanding the set of AWS resources
202 | and asset types implemented in the CDK directly. The plugin mechanism for
203 | resource refreshing will allow other resource types to be supported by either
204 | community or vendor addition.
205 |
206 | ## Internal FAQ
207 |
208 | ### Why are we doing this?
209 |
210 | The overhead of developing a Lambda application using the CDK is significant,
211 | since each code change currently requires a CloudFormation stack update to apply
212 | new code, _or_ manually introducing drift to the application by inspecting the
213 | stack and manipulating the resources directly.
214 |
215 | The CDK hotswap option will allow the CDK to handle the resource changes rather
216 | than this manual process, introducing some implicit safety and reducing the
217 | manual labor of the update.
218 |
219 | ### Why should we _not_ do this?
220 |
221 | This solution has a risk of introducing a deployment tool that users might use
222 | to shortcircuit the safe CloudFormation deployment process. If a user runs CDK
223 | update on their production stack, it can perform updates without adequate stack
224 | update safety checks. Releasing a public tool with instant update capability
225 | into the CDK may not be the right way to make this functionality public. To
226 | mitigate this, `--hotswap` only runs on explicitly selected stacks, and does
227 | not support the `--all` flag.
228 |
229 | ### What changes are required to enable this change?
230 |
231 | 1. An implementation of the `cdk deploy --hotswap` command in the CDK CLI
232 | that can examine the Application stacks for updates that can be applied and
233 | apply them.
234 | 2. The CDK CLI must be able to query a CDK resource for the set of filesystem
235 | resources it must monitor for the `--watch` operation, and run a filesystem
236 | monitor for that.
237 |
238 | ### Is this a breaking change?
239 |
240 | No, it is not.
241 |
242 | ### What are the drawbacks of this solution?
243 |
244 | - Updating like this still entails a fair amount of preprocesing time, since for
245 | complex projects (Golang and Java lambdas, for example) there remains a
246 | compilation step.
247 | - The update model requires the AWS account used to publish the stack to also
248 | have sufficient permissions to update the underlying resources, rather than
249 | simply requiring the account to have CloudFormation access.
250 | - Runtimes that require compilation or assembly -- Java lambdas, docker images
251 | -- do not benefit from the `--watch` support as naturally, and require some
252 | manual steps.
253 | - updating Lambda functions that are using aliases and provisioned concurrency
254 | can take several minutes to switch over between the two versions.
255 |
256 | ### What alternative solutions did you consider?
257 |
258 | We considered "SAM Accelerate" for a similar purpose, but SAM covers a relatively
259 | small set of possible application structures in AWS, while CDK is intended to
260 | address the whole set of them.
261 |
262 | We also considered introspecting the CDK model ourselves, but concluded that
263 | there was little value in reinventing the wheel, so to speak, when CDK already
264 | had all of the information we'd need to deliver this.
265 |
266 | For identifying stacks that are subject to hotswap deployment, we are
267 | considering defining a full "Personal Development Stack" model, possibly based
268 | off of information in the CDK `App` context.
269 |
270 | ### What is the high level implementation plan?
271 |
272 | We will start from the prototype CDK update command that identifies the Lambda
273 | resources and then publishes them using the CDK CLI, and extend that to
274 | implement support for ECR images associated with ECS and Fargate tasks, API
275 | Gateway definitions, and Step Function workflows. Those will be implemented
276 | directly in the CLI code as part of the launch for the feature. The CLI
277 | implementation will be designed to conform to an interface that provides:
278 |
279 | - Watchable filesystem resources
280 | - A way of updating the watchable resource list
281 | - A method for determining whether the construct can be updated in place
282 | - A method for updating the construct in place
283 |
284 | If changes to the CDK constructs are necessary to implement the hotswap
285 | development process, we will make those changes as well. In the longer term we
286 | must lay the groundwork for moving the logic defining the update process into
287 | the Construct library, which implies a design for passing these values by way of
288 | the Cloud Assembly.
289 |
290 | We will implement a filesystem watcher for the CDK CLI that works on one or more
291 | directory trees, watching for changes. It will base its watch list on the set of
292 | files indicated by the CLI, and update them when those responses change.
293 |
294 | Additionally, a `--watch` flag and a file watcher will be added to support
295 | monitoring the inputs of stack resources for changes.
296 |
297 | ### Are there any open issues that need to be addressed later?
298 |
299 | - This RFC can be extended to add support for further pluggable asset and update
300 | targets. The hotswap capabilities are attached to the CDK constructs, not
301 | the CDK CLI, so any CDK construct that can perform hotswap deployment can
302 | implement that capability in whatever manner is appropriate.
303 | - This RFC will be enhanced significantly when the CDK asset model is enriched
304 | to support asset construction directly.
305 | - Direct support for monitoring container repositories for changes (possibly via
306 | polling) instead of only supporting local rebuild.
307 |
308 | # How do we keep production from being affected by CDK hotswap?
309 |
310 | CDK hotswap uses the AWS IAM access controls already in place in your account
311 | to maintain the safety of your production deployments. The interface to CDK
312 | hotswap requires the developer to specify the stacks that they will be
313 | deploying explicitly, so by default it is affecting only the developer's stacks,
314 | and when a production stack is defined it is up to the AWS account administrator
315 | to ensure that the interactive developer's roles do not have modification access
316 | to the hotswap resources.
317 |
--------------------------------------------------------------------------------
/text/0055-feature-flags.md:
--------------------------------------------------------------------------------
1 | ---
2 | feature name: feature-flags
3 | ? start date
4 | rfc pr: https://github.com/aws/aws-cdk/pull/5017
5 | related issue: https://github.com/awslabs/aws-cdk-rfcs/issues/55
6 | ---
7 |
8 | # Summary
9 |
10 | Feature flags will allow us to introduce new breaking behavior which is disabled
11 | by default (so existing projects will not be affected) but enabled automatically
12 | for new projects created through `cdk init`.
13 |
14 | # Motivation
15 |
16 | Sometimes (hopefully rarely) we want to introduce new breaking behavior because
17 | we believe this is the correct default behavior for the CDK. The problem, of
18 | course, is that breaking changes are only allowed in major versions and those
19 | are rare.
20 |
21 | # Basic Example
22 |
23 | For example, the AWS CDK had a bug
24 | ([#4925](https://github.com/aws/aws-cdk/pull/4925)) where it was impossible to
25 | use the same physical stack name for multiple stacks, even if they were
26 | targeting different regions despite the fact that this is technically possible
27 | (and sometimes desired).
28 |
29 | If multiple stacks can have the same physical name, we need another way to
30 | identify stacks uniquely (e.g. when selecting stacks in various CLI commands
31 | like `cdk deploy X`). To that end, we introduced the concept of an **artifact
32 | ID**. In most cases, artifact IDs will be the same as stack names, so the common
33 | behavior will stay the same. However, in some cases, the current behavior would
34 | break.
35 |
36 | If we introduced this fix without a feature flag, it means that current users
37 | may break. Therefore, we introduced this feature under a feature flag named
38 | `@aws-cdk/core:enableStackNameDuplicates`.
39 |
40 | This feature is disabled by default, which means that the behavior for existing
41 | projects would remain the same (along with the limitation of course), but new
42 | projects would have the flag automatically enabled in their `cdk.json` file.
43 |
44 | # Design Summary
45 |
46 | The basic idea is that new breaking behavior will always be disabled by default
47 | and only enabled when a certain CDK context parameter is set. If not enabled,
48 | the system will continue to behave exactly like it used to without breaking any
49 | existing projects.
50 |
51 | When we release a new major version of the AWS CDK, we will flip this behavior
52 | or completely remove the legacy behavior.
53 |
54 | In order for new projects to pick up this new behavior automatically, we will
55 | modify `cdk init` to inject the set of feature flags into the generated
56 | `cdk.json` file. This means that the new project will have the latest behavior,
57 | but projects that were created prior to the introduction of this feature will
58 | have the same legacy behavior based on the set of capabilities that were
59 | available at the time of the project's creation. This list will be cleaned up
60 | every time we release a major version of course.
61 |
62 | Using fine-grained flags will allow users of old projects to pick up specific
63 | new behaviors by manually adding the specific keys to their `cdk.json` file,
64 | without risking breakage in other unexpected areas.
65 |
66 | # Detailed Design
67 |
68 | Context keys for feature flags will be listed in `cx-api/lib/features.ts` and
69 | will take the form: `:`.
70 |
71 | For example:
72 |
73 | - `@aws-cdk/core:enableStackNameDuplicates`
74 | - `@aws-cdk/aws-cloudformation:doNotCapitalizeCustomResourcePropertyNames`.
75 |
76 | Using the module name will allow easy tracing of the code that consumes this
77 | flag.
78 |
79 | The configuration for which feature flags should be enabled for new projects
80 | will be under `cx-api/lib/future.ts` and will be encoded as a simple context
81 | hash that will be injected by `cdk init` to all `cdk.json` files generated for
82 | new projects.
83 |
84 | We will mandate that when a feature or bug fix is introduced under a feature
85 | flag, the CHANGELOG will include:
86 |
87 | - The suffix `(under feature flag)` in the title.
88 | - A `BREAKING CHANGES` paragraph will be added which describes the _new_
89 | behavior but disclaims that it will only apply to new projects created through
90 | `cdk init`. It will also indicate the context key this flag uses for users who
91 | wish to enable it manually in their project.
92 |
93 | Since feature flags can have implications on framework behavior, we need to ask
94 | users to include the list of enabled features in bug reports. At a minimum, we
95 | can request that they paste a copy of their `cdk.json` and `cdk.context.json`,
96 | but a better experience would be to include this information in the output of
97 | `cdk doctor` and request users to include this output in bug reports.
98 |
99 | # Drawbacks
100 |
101 | There are a few concerns this capability raises. These concerns are mostly
102 | emphasized in the situation where we have a proliferation of feature flags. If
103 | we end up with less than 5 feature flags before we bump a major version (and
104 | eradicate all existing flags), then I believe these drawbacks are not
105 | substantial. If we end up with dozens of flags, all of these will become an
106 | issue.
107 |
108 | Therefore, the main mitigation is to make sure we don't abuse this capability
109 | and only introduce feature flags when all creative and genuine attempts to avoid
110 | a breaking change were exhausted.
111 |
112 | ## Discoverability
113 |
114 | If users wish to enable a flag in an existing project, they need a way to find
115 | out which flag to enable in their `cdk.json` and how.
116 |
117 | This drawback will be mitigated by:
118 |
119 | - [ ] Adding a documentation section about feature flags in the developer guide,
120 | pointing to the `cx-api/lib/features.ts` file as an index of feature
121 | flags.
122 | - [x] Announce the feature flag ID in our release notes under
123 | `BREAKING CHANGE: (under feature flag)`.
124 |
125 | ## Testing
126 |
127 | A feature flag is a system-level degree of freedom. Theoretically, every flag
128 | introduces another dimension in our entire test matrix. Without extensive
129 | tooling, it will be impossible to actually run our entire test suite against all
130 | permutations of feature flags.
131 |
132 | Unit tests across the framework will normally continue to define apps with all
133 | feature flags disabled (this is the default). In the case where a test depends
134 | on a feature flag being enabled, it will explicitly enable it when the `App`
135 | construct is defined through it's `context` option.
136 |
137 | The feature itself will be tested in both enabled and disabled mode.
138 |
139 | In the meantime, we will not introduce tooling for matrix coverage due to it's
140 | complexity and impact on build times and contributor experience. As long as we
141 | don't have a proliferation of flags, I believe this is a reasonable trade-off.
142 |
143 | ## Support
144 |
145 | When an issue is raised, we need to be able to reproduce it. Since feature flags
146 | can implicitly change how the CDK behaves, we need to know which features are
147 | enabled.
148 |
149 | To mitigate this risk we will:
150 |
151 | - [ ] Add feature flags to `cdk doctor` and update bug report template
152 | accordingly to request users to run `cdk doctor`.
153 |
154 | # Rationale and Alternatives
155 |
156 | We considered an alternative of "bundling" new capabilities under a single flag
157 | that specifies the CDK version which created the project, but this means that
158 | users won't have the ability to pick and choose which capabilities they want to
159 | enable in case they need them but don't want to take the risk of unexpected
160 | changes.
161 |
162 | The downside of the fine-grained approach is that it could result in a "blowing
163 | up" new `cdk.json` files in case there will be many new breaking capabilities
164 | between major releases. But this is hypothetical and even if this list ends up
165 | with 20 features before we release the next major version, I still think the
166 | benefits outweigh the risks of the alternative approach.
167 |
168 | # Adoption Strategy
169 |
170 | Most CDK users will likely not need to know about feature flags. Projects
171 | created before a feature flag was introduced will continue to behave in the same
172 | way and new projects created through `cdk init` will automatically get all new
173 | behaviors.
174 |
175 | The contributor experience for using feature flags will be documented in the
176 | contribution guide and will involve the following steps:
177 |
178 | 1. Seek the approval of a core team member that a feature flag can be used.
179 | - If the feature in question is being planned via an RFC, and the feature
180 | flag is contained in the proposal, core team member approval should include
181 | the feature flag.
182 | - If the feature is being tracked in a single issue without an RFC, approval
183 | should be indicated in this issue.
184 | 2. Define a new const under
185 | [cx-api/lib/features.ts](https://github.com/aws/aws-cdk/blob/main/packages/%40aws-cdk/cx-api/lib/features.ts)
186 | with the name of the context key that **enables** this new feature (for
187 | example, `ENABLE_STACK_NAME_DUPLICATES`). The context key should be in the
188 | form `module.Type:feature` (e.g. `@aws-cdk/core:enableStackNameDuplicates`).
189 | 3. Use `node.tryGetContext(cxapi.ENABLE_XXX)` to check if this feature is
190 | enabled in your code. If it is not defined, revert to the legacy behavior.
191 | 4. Add your feature flag to
192 | [cx-api/lib/future.ts](https://github.com/aws/aws-cdk/blob/main/packages/%40aws-cdk/cx-api/lib/future.ts).
193 | This map is inserted to generated `cdk.json` files for new projects created
194 | through `cdk init`.
195 | 5. In your PR title (which goes into CHANGELOG), add a `(behind feature flag)`
196 | suffix. e.g:
197 |
198 | ```
199 | fix(core): impossible to use the same physical stack name for two stacks (under feature flag)
200 | ```
201 |
202 | 6. Under `BREAKING CHANGES`, add a prefix `(under feature flag)` and the name of
203 | the flag in the postfix. For example:
204 |
205 | ```
206 | BREAKING CHANGE: (under feature flag) template file names for new projects created
207 | through "cdk init" will use the template artifact ID instead of the physical stack
208 | name to enable multiple stacks to use the same name (feature flag: @aws-cdk/core:enableStackNameDuplicates)
209 | ```
210 |
211 | # Unresolved questions
212 |
213 | I believe the biggest unresolved question is how many feature flags we will end
214 | up with until the next major version bump. We introduced a bit of process to
215 | require that feature flags will be approved by two core team members, and we
216 | will closely monitor this and reevaluate if we see a proliferation of flags.
217 |
218 | # Future Possibilities
219 |
220 | As a general rule, using a feature flag should be last resort in the case where
221 | it is impossible to implement backwards compatibility. A feature flag is likely
222 | to get less usage and therefore mature slower, so it's important to make sure we
223 | don't abuse this pattern.
224 |
225 | Still, a valid concern is that we end up with too many feature flags between
226 | major releases (I would say >20 is too many), in which case it might be required
227 | to offer additional tools to manage and discover them.
228 |
229 | Here are a few ideas that came up as we designed this. All of these can be
230 | implemented on top of the proposed mechanism, and should be considered if needed
231 | in the future (as well as any other idea of course):
232 |
233 | - Introduce a CLI command to list all flags and enable/disable them in your
234 | `cdk.json`.
235 | - Aggregate all flags in groups so it will be easier to enable many of them.
236 | - Define a flag that will allow users to say "I want all feature up until a
237 | certain CDK version" (basically enables all features that were available when
238 | the version was releases).
239 |
--------------------------------------------------------------------------------
/text/0063-precreated-roles.md:
--------------------------------------------------------------------------------
1 | # Supporting precreated roles in CDK
2 |
3 | * **Original Author(s):**: @rix0rrr
4 | * **Tracking Issue**: #63
5 | * **API Bar Raiser**: @corymhall
6 |
7 | Make CDK work for environments where developers are not allowed to create IAM roles.
8 |
9 | ## Abstract
10 |
11 | One of the beloved features of CDK is that it automatically creates execution roles and instance roles for all infrastructure that requires them, and
12 | assigns least-privilege permissions to these roles based on declared intent of the related infrastructure. However, some developers are working in
13 | environments where central IT operators do not allow the application developers to create any roles, to avoid potential risky situations where these
14 | roles might be misconfigured with overly broad permissions.
15 |
16 | In those environments, using CDK is a chore: every construct and integration needs to be inspected for an optional `role?: IRole` parameter, which
17 | needs to be passed an instance of `Role.fromRoleName()`, passing the roles that have been precreated for the application developers by the IT
18 | operators. These referenced Roles need to be threaded through the entire construct tree to get to the right location, and their policies need to be
19 | determined using trial-and-error.
20 |
21 | This RFC proposes a mechanism by which roles creation is replaced with an offline reporting mechanism. Application developers write their application
22 | as usual, using standard constructs and standard `grant` calls. Then, when this feature is switched on, `Role` resources are no longer synthesized.
23 | Instead, a report is generated containing all Roles and policies that *would* have been created, and synthesis fails. This report can be given to the
24 | central IT organization, who can create and configure the roles as reported. Afterwards, the app developer plugs in the names of the roles that the IT
25 | organization created, and synthesis succeeds (assuming all would-be Roles have name assigned).
26 |
27 | ## README: prevent creation of IAM Roles
28 |
29 | In normal operation, L2 constructs in the AWS Construct Library will automatically create IAM Roles for resources that require them (like Execution
30 | Roles for AWS Lambda Functions and Instance Roles for EC2 instances), and assign least-privilege permissions based on the `grant`s and integrations
31 | you define.
32 |
33 | If you work in an environment that does not allow definition of IAM Roles by application developers, you can disable this behavior by calling
34 | `iam.Role.customizeRoles()` with `preventSynthesis: true` on the scope at which you want to prevent roles from being created, before defining the
35 | infrastructure of your application. Example:
36 |
37 | ```ts
38 | import { App, aws_iam as iam } from 'aws-cdk-lib';
39 | import { MyStack } from './my-stack';
40 |
41 | const app = new App();
42 |
43 | // Disable synthesis of IAM Roles in the entire app
44 | iam.Role.customizeRoles(app, {
45 | preventSynthesis: true,
46 | });
47 |
48 | new MyStack(app, 'MyStack', {
49 | // ...
50 | });
51 | app.synth();
52 | ```
53 |
54 | The next time you run `cdk synth`, a file named `cdk.out/iam-roles.txt` will be created, containing a report of all roles that the CDK app would have
55 | created, and the permissions that would be added to them (based on `grant` methods).
56 |
57 | For example, `iam-roles.txt` might look like this:
58 |
59 | ```text
60 | (/MyStack/MyLambda/Role)
61 | AssumeRole Policy:
62 | {
63 | "Effect": "Allow",
64 | "Action": ["sts:AssumeRole"],
65 | "Principal": { "Service": "lambda.amazonaws.com" }
66 | }
67 |
68 | Managed Policies:
69 | arn:(PARTITION):iam:::aws/policy/AWSLambdaBasicExecutionRole
70 |
71 | Identity Policy:
72 | {
73 | "Effect": "Allow",
74 | "Action": ["s3:GetObject"],
75 | "Resource": ["arn:(PARTITION):s3:(REGION):(ACCOUNT):(/MyStack/MyBucket.Arn)/*",
76 | }
77 | ```
78 |
79 | If there are any roles marked as `` (which means there is no known role name associated with them yet), synthesis will fail.
80 |
81 | Give this report to your IT operators, and ask them to create roles with the required permissions. The policies may refer to resources which have yet
82 | to be created, and therefore have no resource names to refer to in the policies. Your IT operators will need to use wildcards or set up some form of
83 | tag-based access control when they build the permissions for these roles.
84 |
85 | > Note: if you don't use this workflow and let CDK generate IAM Roles, it will generate least-privilege permissions Roles that can only access the
86 | > resources they need to. This avoids the need to use wildcard-based pre-created Role permissions. If you can, let CDK generate Roles and use
87 | > *Permissions Boundaries* to address concerns of privilege escalation. The feature described in this section is intended only to allow usage of CDK
88 | > in environments where the IAM Role creation process cannot be changed.
89 |
90 | When your IT department comes back to you, they will have created a role with a known name. For example, they might have created a role named `LambdaRole`.
91 | Plug that name into your `customizeRoles`:
92 |
93 | ```ts
94 | iam.Role.customizeRoles(app, {
95 | preventSynthesis: true,
96 | usePrecreatedRoles: {
97 | 'MyStack/MyLambda/Role': 'LambdaRole',
98 | },
99 | });
100 | ```
101 |
102 | On the next synthesis, the given existing Role is automatically referenced in places where originally an IAM Role would be created. When all Roles
103 | that would be created have a precreated name assigned (and there are no precreated names specified that do not correspond to actual Role constructs),
104 | synthesis will succeed.
105 |
106 | You do not need to create a separate Role for each construct: it is possible to supply the same role name for multiple constructs, as long as that
107 | single role has all the required permissions.
108 |
109 | ---
110 |
111 | Ticking the box below indicates that the public API of this RFC has been
112 | signed-off by the API bar raiser (the `status/api-approved` label was applied to the
113 | RFC pull request):
114 |
115 | ```
116 | [ ] Signed-off by API Bar Raiser @xxxxx
117 | ```
118 |
119 | ## Internal FAQ
120 |
121 | ### Why are we doing this?
122 |
123 | This change is intended to help developers that want to use CDK in environments where they do not permissions to manipulate IAM resources. This change
124 | makes it feasible to build a workflow where app developers can use CDK to build their application as normal, while delegating the IAM role creation to
125 | a different team, with minimal impact on source code and development workflow.
126 |
127 | ### Why should we _not_ do this?
128 |
129 | It might be that the envisioned workflow doesn't match the actual workflow at the companies we're targeting, or that
130 | the limitation around identifying individual resources makes the workflow ineffective or unacceptable (see next section).
131 |
132 | ### What are the limitations of this feature?
133 |
134 | - If resources are created as part of the deployment, they will typically have generated identifiers. The names are not predictable, and hence IT
135 | operators will need to grant `*` permissions, which they will probably feel uncomfortable with.
136 | - We can potentially replicate the CloudFormation physical ID generation logic to come up with a wildcard like
137 | `"arn:PARTITION:s3:REGION:ACCOUNT:mystack-mybucket13437-*"`, but it's hard to say how reliable that will be. This strategy can not and will not
138 | work for resources that have unique IDs instead of names.
139 | - IT operators may impose naming scheme requirements to get around this limitation, preventing future resource replacement. This does not help the
140 | CDK as even if we know the resource name we don't currently track the known resource name to downstream resources (see below).
141 | - Some construct libraries go and create policies directly (without going through `role.addToPrincipalPolicy()`); we will not be able to capture these
142 | permissions, and we will not be able to prevent their creation.
143 | - Developers will need to call `grant` methods and `addToPrincipalPolicy()` in their code, to get the most out of the automatic policy report.
144 | However, the permissions will never actually be applied so it's hard to know if they are correct.
145 |
146 | #### What do you mean we don't track known resource names?
147 |
148 | Given the following:
149 |
150 | ```ts
151 | const bucket = new Bucket(this, 'MyBucket', { bucketName: 'my-bucket' });
152 | const lambda = new Lambda(this, 'MyLambda', {
153 | environment: {
154 | BUCKET_NAME: bucket.bucketName,
155 | },
156 | });
157 | ```
158 |
159 | We have the choice of rendering one of the following:
160 |
161 | ```
162 | (1)
163 | MyLambda:
164 | Properties:
165 | Environment:
166 | - Name: BUCKET_NAME
167 | Value: { Ref: 'MyBucket' }
168 |
169 | ----------------------------------------------
170 |
171 | (2)
172 | MyLambda:
173 | Properties:
174 | Environment:
175 | - Name: BUCKET_NAME
176 | Value: 'my-bucket'
177 | ```
178 |
179 | We currently render variant `(1)`, because: even though we *know* that `bucketName` has the value `my-bucket`, the `{ Ref }` has the additional effect
180 | of implying a dependency from `MyLambda -> MyBucket`.
181 |
182 | If we were to render the value `my-bucket` directly, we would need to recreate that dependency by adding a `DependsOn` field to `MyLambda`. Because of
183 | the way the CDK token system works internally, that is currently not available, and not easy to add.
184 |
185 | The upshot of this is that even if IT operators force hard-naming resources (disregarding all the operational downsides of doing so), we have no way
186 | of easily tracking that resource name to the policies.
187 |
188 | ### What is the technical solution (design) of this feature?
189 |
190 | This section is mostly interesting for implementors:
191 |
192 | * `customizeRoles` will set a **context key** at the given scope. The exact name and value of this context key are an implementation detail and will
193 | not be made public.
194 | * `new Role()` will check for this context key. If found, `new CfnRole()` will *not* be called; instead, if a name is available for the current
195 | construct, `iam.Role.fromRoleName()` will be used instead. A validation is added to the Role which will fail if no precreated name is assigned for
196 | it (meaning errors are reported as construct tree errors).
197 | * When operating in "no role creation" mode, roles will synthesize their policy documents to a report file.
198 | * Some of the logic will have to be reimplemented for the Custom Resource framework in `@aws-cdk/core`, which creates Roles but doesn't use `iam.Role`
199 | (but rather `CfnResource`).
200 | * `customizeRoles` takes either *absolute* or *relative* construct paths to the scope it's invoked on. This makes it possible to set it on production
201 | stacks but not development stacks (for example).
202 | * `customizeRoles` will throw if any of the paths it is invoked on already exist, or if no `iam.Role` creation was prevented. This should help find
203 | instances of people calling it *after* application construction, instead of before.
204 | * Tokens are not supported.
205 | * We should be able to detect `new iam.Policy()` as well, as I believe it calls `role.attachPolicy()`. We record the policy and prevent its synthesis
206 | of `CfnPolicy`.
207 | * I'm not sure we will be able to detect `new iam.ManagedPolicy()`.
208 |
209 | ### What alternative solutions did you consider?
210 |
211 | This solution has the advantage of being relatively easily to implement, because it just automates some work that developers would otherwise have to
212 | do manually (communicate about required permissions, reference them in their code). However, it has the disadvantage of not being able to generate
213 | least-privilege permissions very well.
214 |
215 | Potential alternatives might be better, but would require buy-in from the IT organization to change processes:
216 |
217 | - Permissions Boundaries.
218 | - CloudFormation Hooks which compare newly requested IAM Roles and policies to a pre-approved set (this could be treated as an extension of the
219 | feature proposed in this RFC).
220 |
221 | ### What is the high-level project plan?
222 |
223 | MVP
224 |
225 | - We should be able to implement the limited version of this feature (without support for tracking resource names) pretty easily. We will let
226 | developers use that feature for a while and see how it goes.
227 | - `customizeRoles` takes an options object on purpose so that if we need to change behavior, we can add more optional fields and flags.
228 | - Initial version will only support role names but importing by ARN should be trivial enough to add by testing for the presence of a `:` in the
229 | given role name.
230 |
231 | POTENTIAL EXTENSIONS
232 |
233 | - Do the same for Security Groups.
234 | - We may extend by generating a machine-readable report in addition to a text file so organizations can build their
235 | own automation around it.
236 | - We may implement resource name tracking later (although this will be a lot of work) to generate more targeted policies, if enough people ask for it.
237 | - Potentially we may add a callback interface so that organizations will be able to control their own Role handlers, generate whatever formalism they
238 | desire.
239 | - In a distant post-Launchpads future, we may extend this feature with an approval workflow that gets validated in CloudFormation hooks.
240 |
--------------------------------------------------------------------------------
/text/0064-asset-garbage-collection.md:
--------------------------------------------------------------------------------
1 | # Garbage Collection for Assets
2 |
3 | * **Original Author(s):**: @eladb, @kaizencc
4 | * **Tracking Issue**: #64
5 | * **API Bar Raiser**: @rix0rrr
6 |
7 | The asset garbage collection CLI will identify and/or delete unused CDK assets,
8 | resulting in smaller bucket/repo size and less cost for customers.
9 |
10 | ## Working Backwards
11 |
12 | **CHANGELOG**:
13 |
14 | - feat(cli): garbage collect s3 assets (under --unstable flag)
15 | - feat(cli): garbage collect ecr assets (under --unstable flag)
16 |
17 | **Help**:
18 |
19 | ```shell
20 | ➜ cdk gc --help
21 | cdk gc [ENVIRONMENT...]
22 |
23 | Finds and deletes all unused S3 and ECR assets in the ENVIRONMENT
24 |
25 | Options:
26 | --type=[s3|ecr|all] filters for type of asset
27 | --action=[print|tag|delete-tagged|full] type of action to perform on unused assets
28 | --rollback-buffer-days=number number of days an asset should be isolated before deletion
29 | --created-buffer-days=number number of days old an asset must be before its elligible for deletion
30 | --bootstrap-stack-name=string name of a custom bootstrap stack if not CDK Toolkit
31 | --confirm=boolean confirm with user before deleting assets
32 |
33 | Examples:
34 | cdk gc
35 | cdk gc aws://ACCOUNT/REGION
36 | cdk gc --type=s3 --action=delete-tagged
37 | ```
38 |
39 | **README:**
40 |
41 | > [!CAUTION]
42 | > CDK Garbage Collection is under development and therefore must be opted in via the `--unstable` flag: `cdk gc --unstable=gc`.
43 |
44 | `cdk gc` garbage collects unused assets from your bootstrap bucket via the following mechanism:
45 |
46 | - for each object in the bootstrap S3 Bucket, check to see if it is referenced in any existing CloudFormation templates
47 | - if not, it is treated as unused and gc will either tag it or delete it, depending on your configuration.
48 |
49 | The high-level mechanism works identically for unused assets in bootstrapped ECR Repositories.
50 |
51 | The most basic usage looks like this:
52 |
53 | ```console
54 | cdk gc --unstable=gc
55 | ```
56 |
57 | This will garbage collect all unused assets in all environments of the existing CDK App.
58 |
59 | To specify one type of asset, use the `type` option (options are `all`, `s3`, `ecr`):
60 |
61 | ```console
62 | cdk gc --unstable=gc --type=s3
63 | ```
64 |
65 | Otherwise `cdk gc` defaults to collecting assets in both the bootstrapped S3 Bucket and ECR Repository.
66 |
67 | `cdk gc` will garbage collect S3 and ECR assets from the current bootstrapped environment(s) and immediately
68 | delete them. Note that, since the default bootstrap S3 Bucket is versioned, object deletion will be handled by
69 | the lifecycle policy on the bucket.
70 |
71 | Before we begin to delete your assets, you will be prompted:
72 |
73 | ```console
74 | cdk gc --unstable=gc
75 |
76 | Found X objects to delete based off of the following criteria:
77 | - objects have been isolated for > 0 days
78 | - objects were created > 1 days ago
79 |
80 | Delete this batch (yes/no/delete-all)?
81 | ```
82 |
83 | Since it's quite possible that the bootstrap bucket has many objects, we work in batches of 1000 objects or 100 images.
84 | To skip the prompt either reply with `delete-all`, or use the `--confirm=false` option.
85 |
86 | ```console
87 | cdk gc --unstable=gc --confirm=false
88 | ```
89 |
90 | If you are concerned about deleting assets too aggressively, there are multiple levers you can configure:
91 |
92 | - rollback-buffer-days: this is the amount of days an asset has to be marked as isolated before it is elligible for deletion.
93 | - created-buffer-days: this is the amount of days an asset must live before it is elligible for deletion.
94 |
95 | When using `rollback-buffer-days`, instead of deleting unused objects, `cdk gc` will tag them with
96 | today's date instead. It will also check if any objects have been tagged by previous runs of `cdk gc`
97 | and delete them if they have been tagged for longer than the buffer days.
98 |
99 | When using `created-buffer-days`, we simply filter out any assets that have not persisted that number
100 | of days.
101 |
102 | ```console
103 | cdk gc --unstable=gc --rollback-buffer-days=30 --created-buffer-days=1
104 | ```
105 |
106 | You can also configure the scope that `cdk gc` performs via the `--action` option. By default, all actions
107 | are performed, but you can specify `print`, `tag`, or `delete-tagged`.
108 |
109 | - `print` performs no changes to your AWS account, but finds and prints the number of unused assets.
110 | - `tag` tags any newly unused assets, but does not delete any unused assets.
111 | - `delete-tagged` deletes assets that have been tagged for longer than the buffer days, but does not tag newly unused assets.
112 |
113 | ```console
114 | cdk gc --unstable=gc --action=delete-tagged --rollback-buffer-days=30
115 | ```
116 |
117 | This will delete assets that have been unused for >30 days, but will not tag additional assets.
118 |
119 | ### Theoretical Race Condition with `REVIEW_IN_PROGRESS` stacks
120 |
121 | When gathering stack templates, we are currently ignoring `REVIEW_IN_PROGRESS` stacks as no template
122 | is available during the time the stack is in that state. However, stacks in `REVIEW_IN_PROGRESS` have already
123 | passed through the asset uploading step, where it either uploads new assets or ensures that the asset exists.
124 | Therefore it is possible the assets it references are marked as isolated and garbage collected before the stack
125 | template is available.
126 |
127 | Our recommendation is to not deploy stacks and run garbage collection at the same time. If that is unavoidable,
128 | setting `--created-buffer-days` will help as garbage collection will avoid deleting assets that are recently
129 | created. Finally, if you do result in a failed deployment, the mitigation is to redeploy, as the asset upload step
130 | will be able to reupload the missing asset.
131 |
132 | In practice, this race condition is only for a specific edge case and unlikely to happen but please open an
133 | issue if you think that this has happened to your stack.
134 |
135 | ---
136 |
137 | #
138 |
139 | Ticking the box below indicates that the public API of this RFC has been
140 | signed-off by the API bar raiser (the `api-approved` label was applied to the
141 | RFC pull request):
142 |
143 | ```
144 | [x] Signed-off by API Bar Raiser @rix0rrr
145 | ```
146 |
147 | ## Public FAQ
148 |
149 | ### What are we launching today?
150 |
151 | The `cdk gc` command features, with support for garbage collection of unused S3 and ECR
152 | assets.
153 |
154 | ### Why should I use this feature?
155 |
156 | Currently unused assets are left in the S3 bucket or ECR repository and contribute
157 | additional cost for customers. This feature provides a swift way to identify and delete
158 | unutilized assets.
159 |
160 | ### How does the command identify unused assets?
161 |
162 | `cdk gc` will look at all the deployed stacks in the environment and store the
163 | assets that are being referenced by these stacks. All assets that are not reached via
164 | tracing are determined to be unused.
165 |
166 | #### A note on pipeline rollbacks and the `--rollback-buffer-days` option
167 |
168 | In some pipeline rollback scenarios, the default `cdk gc` options may be overzealous in
169 | deleting assets. A CI/CD system that offers indeterminate rollbacks without redeploying
170 | are expecting that previously deployed assets still exist. If `cdk gc` is run between
171 | the failed deployment and the rollback, the asset will be garbage collected. To mitigate
172 | this, we recommend the following setting: `--rollback-buffer-days=30`. This will ensure
173 | that all assets spend 30 days tagged as "unused" _before_ they are deleted, and should
174 | guard against even the most pessimistic of rollback scenarios.
175 |
176 | 
177 |
178 | ## Internal FAQ
179 |
180 | > The goal of this section is to help decide if this RFC should be implemented.
181 | > It should include answers to questions that the team is likely ask. Contrary
182 | > to the rest of the RFC, answers should be written "from the present" and
183 | > likely discuss design approach, implementation plans, alternative considered
184 | > and other considerations that will help decide if this RFC should be
185 | > implemented.
186 |
187 | ### Why are we doing this?
188 |
189 | As customers continue to adopt the CDK and grow their CDK applications over time, their
190 | asset buckets/repositories grow as well. At least one customer has
191 | [reported]() 0.5TB of
192 | assets in their staging bucket. Most of these assets are unused and can be safely removed.
193 |
194 | ### Why should we _not_ do this?
195 |
196 | There is risk of removing assets that are in use, providing additional pain to the
197 | customer. See [this]()
198 | github comment.
199 |
200 | ### What is the technical solution (design) of this feature?
201 |
202 | 
203 |
204 | At a high level, garbage collection consists of two parallel processes - refreshing CFN stack templates
205 | in the background and garbage collecting objects/images in the foreground. CFN stack templates are queried
206 | every ~5 minutes and stored in memory. Then we go through the bootstrapped bucket/repository and check if
207 | the hash in the object's key exists in _any_ template.
208 |
209 | If `--rollback-buffer-days` is set, we tag the object as isolated, otherwise we delete it immediately.
210 | Also depending on if `--rollback-buffer-days` is set, we check if any isolated objects have previously
211 | been marked as isolated and are ready to be deleted, and if any in-use assets are erroneously marked
212 | as isolated that should be unmarked.
213 |
214 | > Why are we storing the entire template in memory and not just the asset hashes?
215 |
216 | We don't expect that the bottleneck for `cdk gc` is going to be memory storage but rather
217 | the (potentially) large number of AWS API calls. Storing hashes alone opens up the possibility
218 | of missing an asset hash an inadvertently deleting something in-use.
219 |
220 | > What happens if we run `cdk deploy` (or `cdk destroy`) while `cdk gc` is in progress?
221 |
222 | We mitigate this issue with the following redundancies:
223 |
224 | - we refresh the in-memory state of CloudFormation Stacks periodically to catch any new or updated stacks
225 | - as a baseline, we do not delete any assets that are created after `cdk gc` is started (and this can
226 | be increased via the `--created-buffer-days` option)
227 |
228 | > Are there any race conditions between the refresher and garbage collection?
229 | Yes, a small one. Stacks in `REVIEW_IN_PROGRESS` do not yet have a template to query, but these stacks
230 | have already gone through asset uploading. There is a theoretical situation where a previously isolated
231 | asset is referenced by a `REVIEW_IN_PROGRESS` stack, and since we are unaware that that is happening,
232 | we may delete the asset in the meantime. In practice though, I am not expecting this to be a frequent
233 | scenario.
234 |
235 | ### Is this a breaking change?
236 |
237 | No.
238 |
239 | ### What alternative solutions did you consider?
240 |
241 | Eventually, a zero-touch solution where garbage collection makes scheduled runs in the
242 | background is what users would want. However, `cdk gc` would be the building block for the
243 | automated garbage collection, so it makes sense to start with a CLI experience and iterate
244 | from there. After `cdk gc` stabilizes, we can vend a construct that runs periodically and
245 | at some point add this to the bootstrapping stack.
246 |
247 | ### What are the drawbacks of this solution?
248 |
249 | The main drawback is that we will own a CLI command capable of deleting assets in customer
250 | accounts. They will rely on the correctness of the command to ensure that we are not deleting
251 | in-use assets and crashing live applications.
252 |
253 | ### What is the high-level project plan?
254 |
255 | `cdk gc` will trace all assets referenced by deployed stacks in the environment and delete
256 | the assets that were not reached. As for how to implement this trace, I have not yet
257 | settled on a plan. The command will focus on garbage collecting v2 assets, where there is a
258 | separate S3 bucket and ECR repository in each boostrapped account. Preliminary thoughts are
259 | that we can either search for a string pattern that represents an asset location or utilize
260 | stack metadata that indicates which assets are being used.
261 |
262 | ### Are there any open issues that need to be addressed later?
263 |
264 | No
265 |
--------------------------------------------------------------------------------
/text/0077-import-external-resources.md:
--------------------------------------------------------------------------------
1 | ---
2 | rfc pr: [#266](https://github.com/aws/aws-cdk-rfcs/pull/266)
3 | tracking issue: https://github.com/aws/aws-cdk-rfcs/issues/77
4 | ---
5 |
6 | # On-Demand import of Resources
7 |
8 | Not all L1 resources need to be vended via the AWS Construct Library. The
9 | user can generate constructs on demand based on imported type definitions
10 | from a variety of sources.
11 |
12 | ## Working Backwards
13 |
14 | ### README
15 |
16 | The AWS Construct Library ships with CloudFormation resources and
17 | higher-level resources for all of AWS' services. Using the built-in
18 | classes gives you the advantage of IDE AutoComplete and type checking
19 | when defining them.
20 |
21 | There are more kinds of resources and even complete applications you can
22 | deploy via CloudFormation. These include:
23 |
24 | - Applications from the Serverless Application Repository
25 | - Applications from the AWS Service Catalog
26 | - Non-AWS resources, or your own custom resources registered to your account
27 | in the AWS CloudFormation Registry.
28 |
29 | The CloudFormation resources available from these locations are not in the
30 | AWS Construct Libray because they're (1) not managed by AWS and (2) the
31 | list that's available to you depends on the configuration of your account.
32 |
33 | However, you can get the same benefits of AutoComplete and type checking on
34 | these resources as well by **importing** them into your CDK project.
35 |
36 | Importing is done by running the `cdk import` command. The exact syntax of
37 | the command depends on the type of resource you're trying to import.
38 |
39 | ```shell
40 | # To import from the CloudFormation Registry, pass the type name:
41 | $ cdk import cloudformation Datadog::Monitors::Monitor
42 | # ...or pass an entire scope:
43 | $ cdk import cloudformation Datadog::Monitors
44 |
45 | # To import applications from the Serverless Application Repository, pass the ARN:
46 | $ cdk import sar arn:aws:serverlessrepo:us-east-1:464622532012:applications/Datadog-Log-Forwarder
47 |
48 | # To import applications from the Service Catalog, pass the product ID
49 | $ cdk import servicecatalog prod-wxkgnkcrznu5i
50 | # ...or don't pass anything to import all available products
51 | $ cdk import servicecatalog
52 | ```
53 |
54 | `cdk import` will create a set of source files for you in the `imports/` directory,
55 | which you should commit to version control. After that, you can use them in your
56 | application as usual.
57 |
58 | If you want to update to the latest version of the definition, you can run the same
59 | `cdk import` command again. It will show you the changes to the the resource, before
60 | asking you to confirm to overwrite the existing source files.
61 |
62 | If you happen to be working in Java, C#, Python or Go, `cdk import` will build a
63 | jsii bundle from the generated sources and automatically generate the corresponding
64 | binding sources as well.
65 |
66 | The same workflow applies if you are the vendor of 3rd party constructs that
67 | are available via the CloudFormation registry. You can use `cdk import` to
68 | generate L1s, completement them with L2s and vend them both together in a
69 | construct library to your customers.
70 |
71 | ## FAQ
72 |
73 | ### What are we launching today?
74 |
75 | A new subcommand to the CLI that will perform the necessary AWS calls to
76 | import resource schemas on demand from 3 additional resource sources, and
77 | generate constructs from those definitions.
78 |
79 | ### Why should I use this feature?
80 |
81 | You can use this feature to gain the same IDE support for CloudFormation
82 | resources that are specific to your account as exists for those that are
83 | available to all AWS accounts.
84 |
85 | ## Internal FAQ
86 |
87 | ### Why are we doing this?
88 |
89 | To power up the CDK and enable its use in more scenarios.
90 |
91 | To smoothly interoperate with other CloudFormation abstraction mechanisms that exist
92 | so customers don't have to choose.
93 |
94 | ### Why should we _not_ do this?
95 |
96 | Customers can achieve the end result already, albeit in an untyped way, by
97 | instantiating the `CfnResource` construct directly with a big bag of strings.
98 |
99 | The target market might not be large enough to justify the effort.
100 |
101 | It might confuse our users even more about what CloudFormation abstraction mechanisms
102 | they are *supposed* to use.
103 |
104 | ### What changes are required to enable this change?
105 |
106 | We need to be able to parse and diff and generate code based on JSON Schema
107 | sources (see #264). The tool that we use to generate this code (a la `cfn2ts`)
108 | needs to be public.
109 |
110 | To support SAR and Service Catalog, we convert the API responses into JSON
111 | schema so that we can codegen using the same tool; the class generation
112 | needs some customization as well.
113 |
114 | A new CLI subcommand gets added, which performs two major functions:
115 |
116 | - Import a schema from somewhere, convert to JSON schema if necessray.
117 | - Use the schema to perform code generation for the CloudFormation resource sources
118 | in the project directory.
119 | - It needs to be able to detect (in some way) what programming language the
120 | current project is in (TypeScript, Java, ...), and if a non-TypeScript
121 | language is detected `jsii-srcmak` can be used to build a local jsii
122 | package for the generated resources.
123 |
124 | `jsii-srcmak` needs to be brought into maintenance of the CDK project.
125 |
126 | ### Is this a breaking change?
127 |
128 | No.
129 |
130 | ### What are the drawbacks of this solution?
131 |
132 | Nothing dramatic. We may have to figure out how to deal with upgrades in an
133 | automatic way. Would be good if we could auto-refresh all previously imported
134 | constructs (would probably look like writing a config file containing the
135 | ids/ARNs of all imported resources).
136 |
137 | ### What alternative solutions did you consider?
138 |
139 | Public SAR applications could be vended on our end via a library.
140 |
141 | For privately published applications there is nothing else we can really do other
142 | than generate code on-demand.
143 |
144 | ### What is the high level implementation plan?
145 |
146 | 1. Add the CLI feature for CloudFormation registry resources and TypeScript users only.
147 | This should be fairly trivial as it's mostly just exposing the code we already have
148 | in place for CDK L1 generation via a CLI interface.
149 | 2. This can be released and previewed already.
150 | 3. During developer preview, we add in support for other languages.
151 | 4. During developer preview, add in support for additional schema sources.
152 | First SAR, then Service Catalog.
153 |
154 | ### Are there any open issues that need to be addressed later?
155 |
156 | Do we want to support `fromCloudFormation()` (used by `@aws-cdk/cfn-include`)
157 | for these resources? The implementation of `cfn-include` will become quite a
158 | lot trickier if we do.
159 |
160 | ## Appendix
161 |
162 | ### CloudFormation Registry
163 |
164 | All available types are available via API:
165 |
166 | ```shell
167 | $ aws cloudformation list-types --visibility PUBLIC
168 | $ aws cloudformation describe-type --arn arn:aws:cloudformation:eu-west-1::type/resource/AWS-CloudFront-RealtimeLogConfig
169 | ...Output contains a CloudFormation Registry schema, which is a superset of JSON Schema...
170 | ```
171 |
172 | Types discovered this way map directly to an L1.
173 |
174 | ### Serverless Application Repository
175 |
176 | The API is not documented on the AWS Docs website, but available via the AWS CLI:
177 |
178 | ```shell
179 | # As far as I am aware there is no way to search except by using the console
180 |
181 | # Get access to a single SAR template, given its ARN
182 | $ aws serverlessrepo get-application --application-id arn:aws:serverlessrepo:us-east-1:464622532012:applications/Datadog-Log-Forwarder
183 | ```
184 |
185 | We obtain the schema by parsing the template.
186 |
187 | Applications discovered this way map to the following CloudFormation template:
188 |
189 | ```yaml
190 | Resources:
191 | DatadogLogForwarder:
192 | Type: AWS::Serverless::Application
193 | Properties:
194 | Location:
195 | ApplicationId: arn:aws:serverlessrepo:us-east-1:464622532012:applications/Datadog-Log-Forwarder
196 | SemanticVersion: 2.4.1
197 | ```
198 |
199 | ### Service Catalog
200 |
201 | Service Catalog allows system administrators to define applications using CloudFormation
202 | and control what IAM identities in what accounts are allowed to deploy them.
203 |
204 | There are standard "portfolios" with built-in templates (AWS QuickStarts, AWS Solutions solutions, reference
205 | architectures etc)
206 |
207 | ```shell
208 | # List all products made accessible to us by an admin
209 | $ aws servicecatalog search-products
210 |
211 | # Get the schema
212 | $ aws servicecatalog describe-product --id prod-wxkgnkcrznu5i
213 | $ aws servicecatalog describe-provisioning-artifacts --product-id prod-wxkgnkcrznu5i --provisioning-artifact-id pa-sa7esaptaxllk
214 | $ aws servicecatalog describe-provisioning-parameters --product-id prod-wxkgnkcrznu5i --provisioning-artifact-id pa-sa7esaptaxllk --path-id XXXX
215 | ```
216 |
217 | We obtain the schema by parsing the output of `DescribeProvisioningParameters`.
218 |
219 | Applications discovered this way map to the following CloudFormation template:
220 |
221 | ```yaml
222 | Resources:
223 | MongoDB:
224 | Type: AWS::ServiceCatalog::CloudFormationProvisionedProduct
225 | Properties:
226 | ProductId: prod-wxkgnkcrznu5i
227 | ProvisioningArtifactId: pa-sa7esaptaxllk
228 | ProvisioningParameters:
229 | - ....
230 | ```
231 |
--------------------------------------------------------------------------------
/text/0092-asset-publishing.md:
--------------------------------------------------------------------------------
1 | # cdk-assets
2 |
3 | `cdk-assets` is a tool in the AWS CDK toolchain responsible to package and
4 | publish assets as part of the deployment process of CDK applications.
5 |
6 | This document specifies the requirements for this tool derived from the
7 | [continuous delivery design document](./continuous-delivery.md).
8 |
9 | ## Assumptions
10 |
11 | - Similarly to any resource defined through code and managed through
12 | CloudFormation, we are not attempting to protect assets from manual tampering
13 | by users.
14 |
15 | ## Asset Manifest
16 |
17 | The main input to `cdk-assets` is a JSON file called `assets.json` which
18 | includes a manifest of assets.
19 |
20 | The manifest lists all assets and for each asset it describes the asset
21 | **source** (with instructions on how to package the asset if needed) and a list
22 | of **destinations**, which are locations into which this asset needs to be
23 | published.
24 |
25 | The main components of the assets manifest are:
26 |
27 | - **Types:** there are currently two supported types: files and docker images.
28 | Files are uploaded to an Amazon S3 bucket and docker images are pushed to an
29 | Amazon ECR repository.
30 |
31 | - **Identifiers:** assets are identified throughout the system via a unique
32 | identifier (the key in the `files` and `images` map). This identifier is based
33 | on the sha256 of the source (the contents of the file or directory) and will
34 | change only if the source changes. It can be used for local caching and
35 | optimization purposes. For example, a zip of a directory can be stored in a
36 | local cache by this identifier to avoid duplicate work.
37 |
38 | - **Sources:** the `source` information for each asset defines the file or
39 | directory (relative to the directory of `assets.json`), and additional
40 | **packaging** instructions, such as whether to create a zip file from a
41 | directory (for file assets) or which options to pass to `docker build`.
42 |
43 | - **Destinations:** describe where the asset should be published. At a minimum,
44 | for file assets, it includes the S3 bucket and object key and for docker
45 | images it includes the repository and image names. A destination may also
46 | indicate that an IAM role must be assumed in order to support cross
47 | environment publishing.
48 |
49 | NOTES:
50 |
51 | - **Denormalization:** destinations are intentionally denormalized in order to
52 | keep the logic of where assets are published at the application or framework
53 | level and not in this tool. For example, consider a deployment system which
54 | requires that all assets are always published to the same location, and then
55 | replicated through some other means to their actual consumption point.
56 | Alternatively, a user may have unique security requirements that will require
57 | certain assets to be stored in dedicated locations (e.g. with a specific key)
58 | and others in a different location, even if they all go to the same
59 | environment. Therefore, this tool should not take any assumptions on where
60 | assets should be published besides the exact instructions in this file.
61 | - **Environment-agnostic:** In order to allow assets to be used in
62 | environment-agnostic stacks, `assets.json` will support two simple
63 | substitutions `${AWS::AccountId}` and `${AWS::Region}` which will be replaced
64 | with the currently configured account/region (alternatively, we can also
65 | decide to support CloudFormation intrinsic functions and pseudo references).
66 | The "current" account and region will always refer to the one derived from the
67 | CLI configuration even if `assets.json` instructs to assume a role from a
68 | different account.
69 |
70 | Here is the complete manifest file schema in typescript:
71 |
72 | ```ts
73 | interface AssetManifest {
74 | readonly version: 'assets-1.0';
75 | readonly files?: { [id: string]: FileAsset };
76 | readonly images?: { [id: string]: ImageAsset };
77 | }
78 |
79 | interface FileAsset {
80 | readonly source: FileAssetSource;
81 | readonly destinations: FileAssetDestination[];
82 | }
83 |
84 | interface ImageAsset {
85 | readonly source: ImageAssetSource;
86 | readonly destinations: ImageAssetDestination[];
87 | }
88 |
89 | interface FileAssetSource {
90 | readonly file: string; // file or directory name, relative to basedir
91 | readonly packaging?: FileAssetPackaging; // packaging (default "FILE")
92 | }
93 |
94 | enum FileAssetPackaging {
95 | FILE = 'file', // just upload "file" as-is
96 | ZIP_DIRECTORY = 'zip', // zip the directory and then upload
97 | }
98 |
99 | interface FileAssetDestination {
100 | readonly assumeRoleArn?: string; // iam role to assume
101 | readonly assumeRoleExternalId?: string; // external id to pass to assume-role
102 | readonly region: string;
103 | readonly bucketName: string;
104 | readonly objectKey: string;
105 | }
106 |
107 | interface ImageAssetSource {
108 | readonly directory: string; // docker build context directory
109 | readonly dockerBuildArgs?: { [arg: string]: string }; // optional args to "docker build"
110 | readonly dockerBuildTarget?: string; // docker build --target to use
111 | readonly dockerFile?: string; // custom name for Dockerfile
112 | }
113 |
114 | interface ImageAssetDestination {
115 | readonly region: string;
116 | readonly assumeRoleArn: string; // iam role to assume
117 | readonly assumeRoleExternalId?: string; // external id to pass to assume-role
118 | readonly repositoryName: string; // ECR repository name
119 | readonly imageName: string; // image tag to use
120 | }
121 | ```
122 |
123 | Example of `assets.json` with two assets: a docker image and a file asset. Both
124 | assets are published to two destinations (S3/ECR).
125 |
126 | ```json
127 | {
128 | "version": "assets-1.0",
129 | "images": {
130 | "d31ca1aef8d1b68217852e7aea70b1e857d107b47637d5160f9f9a1b24882d2a": {
131 | "source": {
132 | "packaging": "docker",
133 | "directory": "my-image",
134 | "dockerFile": "CustomDockerFile",
135 | "dockerBuildArgs": { "label": "prod" },
136 | "dockerBuildTarget": "my-target"
137 | },
138 | "destinations": [
139 | {
140 | "repositoryName": "aws-cdk-images-2222222222US-us-east-1",
141 | "imageName": "d31ca1aef8d1b68217852e7aea70b1e857d107b47637d5160f9f9a1b24882d2a",
142 | "assumeRoleArn": "arn:aws:iam::2222222222US:role/aws-cdk-publish-2222222222US-us-east-1"
143 | },
144 | {
145 | "repositoryName": "aws-cdk-images-3333333333EU-eu-west-2",
146 | "imageName": "d31ca1aef8d1b68217852e7aea70b1e857d107b47637d5160f9f9a1b24882d2a",
147 | "assumeRoleArn": "arn:aws:iam::3333333333EU:role/aws-cdk-publish-3333333333EU-eu-west-2"
148 | }
149 | ]
150 | }
151 | },
152 | "files": {
153 | "a0bae29e7b47044a66819606c65d26a92b1e844f4b3124a5539efc0167a09e57": {
154 | "source": {
155 | "packaging": "zip",
156 | "file": "myzipdirectory"
157 | },
158 | "destinations": [
159 | {
160 | "bucketName": "aws-cdk-files-2222222222US-us-east-1",
161 | "objectKey": "a0bae29e7b47044a66819606c65d26a92b1e844f4b3124a5539efc0167a09e57.zip"
162 | },
163 | {
164 | "bucketName": "aws-cdk-files-3333333333EU-us-west-2",
165 | "objectKey": "a0bae29e7b47044a66819606c65d26a92b1e844f4b3124a5539efc0167a09e57.zip",
166 | "assumeRoleArn": "arn:aws:iam::3333333333EU:role/aws-cdk-publish-3333333333EU-eu-west-2"
167 | }
168 | ]
169 | }
170 | }
171 | }
172 | ```
173 |
174 | ## API
175 |
176 | `cdk-assets` is designed as a stand-alone command line program and a library, so
177 | it can be integrated into other tools such as the CDK CLI or executed
178 | individually as a step in a CI/CD pipeline.
179 |
180 | ### `publish`
181 |
182 | ```shell
183 | cdk-assets publish DIR [ASSET-ID,ASSET-ID...]
184 | ```
185 |
186 | Packages and publishes assets to all destinations.
187 |
188 | - `DIR` is the directory from where to read `assets.json`, and which is used as
189 | the base directory for all file/directory references.
190 | - `ASSET-ID,...`: additional arguments represent asset identifiers to publish
191 | (default is to publish all assets). This can be used to implement concurrent
192 | publishing of assets (e.g. through CodePipeline).
193 |
194 | The `publish` command will do the following:
195 |
196 | For each asset and for each destination (pseudo code):
197 |
198 | ```
199 | for each asset in manifest:
200 | for each destination:
201 | assume destination iam role (if defined)
202 | if asset already published to destination: continue
203 | if asset requires packaging and not cached locally (by id):
204 | package asset (docker build/zip directory)
205 | cache asset locally (to avoid duplicate builds)
206 | publish to destination (upload/push)
207 | ```
208 |
209 | > When we execute this tool in a CodeBuild project, we should enable
210 | > [local caching](https://docs.aws.amazon.com/codebuild/latest/userguide/build-caching.html#caching-local)
211 | > of docker images as an optimization.
212 |
213 | Example (`cdk.out/assets.json` as above):
214 |
215 | ```shell
216 | $ cdk-assets publish cdk.out
217 | asset d31ca1aef8d1b68217852e7aea70b1e857d107b47637d5160f9f9a1b24882d2a
218 | assume arn:aws:iam::2222222222US:role/aws-cdk-publish-2222222222US-us-east-1
219 | notfound aws-cdk-images-2222222222US-us-east-1:d31ca1aef8d1b68217852e7aea70b1e857d107b47637d5160f9f9a1b24882d2a
220 | nocache d31ca1aef8d1b68217852e7aea70b1e857d107b47637d5160f9f9a1b24882d2a
221 | package docker build --target=my-target --label=prod -f CustomDockerFile ./myimage
222 | push aws-cdk-images-2222222222US-us-east-1:d31ca1aef8d1b68217852e7aea70b1e857d107b47637d5160f9f9a1b24882d2a
223 | assume arn:aws:iam::3333333333EU:role/aws-cdk-publish-3333333333EU-eu-west-2
224 | found aws-cdk-images-3333333333EU-eu-west-2:d31ca1aef8d1b68217852e7aea70b1e857d107b47637d5160f9f9a1b24882d2a
225 | done d31ca1aef8d1b68217852e7aea70b1e857d107b47637d5160f9f9a1b24882d2a
226 | --------------------------------------------------------------------------
227 | asset a0bae29e7b47044a66819606c65d26a92b1e844f4b3124a5539efc0167a09e57
228 | found s3://aws-cdk-files-2222222222US-us-east-1/a0bae29e7b47044a66819606c65d26a92b1e844f4b3124a5539efc0167a09e57.zip
229 | assume arn:aws:iam::3333333333EU:role/aws-cdk-publish-3333333333EU-eu-west-2
230 | notfound s3://aws-cdk-files-3333333333EU-us-west-2/a0bae29e7b47044a66819606c65d26a92b1e844f4b3124a5539efc0167a09e57.zip
231 | cached zip ./myzipdirectory
232 | upload s3://aws-cdk-files-3333333333EU-us-west-2/a0bae29e7b47044a66819606c65d26a92b1e844f4b3124a5539efc0167a09e57.zip
233 | done a0bae29e7b47044a66819606c65d26a92b1e844f4b3124a5539efc0167a09e57
234 | --------------------------------------------------------------------------
235 | ```
236 |
237 | The log above describes the following:
238 |
239 | The first asset to process is the docker image with id `d31ca1a...`. We first
240 | assume the role is the 2222US environment and check if the image exists in this
241 | ECR repository. Since it doesn't exist (`notfound`), we check if it exists in
242 | the local cache under the tag
243 | `d31ca1aef8d1b68217852e7aea70b1e857d107b47637d5160f9f9a1b24882d2a`. It doesn't
244 | so we build the docker image (`package`) and then push it. Then we assume the
245 | role from the 33333EU environment and find that the image is already in that ECR
246 | repository, so we just finish.
247 |
248 | The second asset is the file asset with id `a0bae...`. The first environment
249 | doesn't specify a role, so we just check if the file exists in the S3 bucket. It
250 | is, so we move to the next destination. We assume the role and check if the
251 | asset is in the s3 location. It is not (`notfound`), so we need to package and
252 | upload it. Before we package we check if it's cached locally and find that it is
253 | (`cached`), so no need to zip again, just `upload`.
254 |
255 | ### `ls`
256 |
257 | ```shell
258 | cdk-assets ls DIR
259 | ```
260 |
261 | Prints a list of asset identifiers and their type.
262 |
263 | Example:
264 |
265 | ```shell
266 | $ cdk-assets ls cdk.out
267 | d31ca1aef8d1b68217852e7aea70b1e857d107b47637d5160f9f9a1b24882d2a image
268 | a0bae29e7b47044a66819606c65d26a92b1e844f4b3124a5539efc0167a09e57 file
269 | ```
270 |
271 | This information is purely based on the contents of `assets.json`.
272 |
273 | ### Programmatic API
274 |
275 | The tool should expose a programmatic (library) API, so it can be integrated
276 | with other tools such as the AWS CLI and IDEs. The library should be
277 | jsii-compliant and released to all languages supported by the AWS CDK.
278 |
279 | Since the publishing process is highly asynchronous, the API should include the
280 | ability to subscribe to progress events in order to allow implementation of a
281 | rich user interface.
282 |
283 | Proposed API:
284 |
285 | ```ts
286 | class Assets {
287 | constructor(dir: string);
288 | readonly manifest: AssetManifest;
289 |
290 | // starts publishing a single asset
291 | publish(assetid: string, progress?: Progress): Publish;
292 | }
293 |
294 | interface ProgressEvent {
295 | readonly assetid: string;
296 | readonly progress: number; // percentage
297 | readonly type: string;
298 | readonly info: string;
299 | }
300 |
301 | class Progress {
302 | onStart(assetid: string): void;
303 | onEvent(evt: ProgressEvent): void;
304 | onComplete(assetid: string): void;
305 | }
306 |
307 | class Publish {
308 | abort(): void;
309 | readonly events: ProgressEvent[];
310 | readonly progress: number; // percentage
311 | readonly complete: boolean;
312 | }
313 | ```
314 |
315 | ## Non-Functional Requirements
316 |
317 | - **test coverage**: codebase must have full unit test coverage and a set of
318 | integration tests that can be executed within a pipeline environment.
319 | - **minimal dependencies**: the tool should take the minimum amount of 3rd party
320 | dependencies in order to reduce attack surface and to simplify bundling for
321 | jsii.
322 | - **jsii**: the API must be jsii-complient, so it can be used programmatically
323 | from all supported languages. This means that all non-jsii dependencies must
324 | be bundled into the module.
325 |
--------------------------------------------------------------------------------
/text/0095-cognito-construct-library.md:
--------------------------------------------------------------------------------
1 | ---
2 | feature name: cognito-construct-library
3 | start date: 27/01/2020
4 | rfc pr: https://github.com/aws/aws-cdk-rfcs/pull/91
5 | related issue: https://github.com/aws/aws-cdk-rfcs/issues/95
6 | ---
7 |
8 | # Summary
9 |
10 | This RFC covers the design of Cognito's CDK construct library coverage.
11 |
12 | # Motivation
13 |
14 | The CDK constructs that are currently available in the CDK have a decent level
15 | of usage among customers. However, the current usage is via the 'Cfn' constructs
16 | and some basic features available via the `UserPool` construct. The current
17 | implementation of the `UserPool` construct only covers sign in type, user pool
18 | attributes and triggers.
19 |
20 | The goal of this RFC is to review the current `UserPool` construct for
21 | ergonomics in usability and extensibility, and to extend the features covered by
22 | the Cognito module.
23 |
24 | # Design Summary
25 |
26 | This RFC is structured as a working backwards document. Since the focus of this
27 | RFC is to propose the API design, the best way to propose this is to write up
28 | the future user guide of the Cognito module, as if it was complete.
29 |
30 | The bulk of the RFC is in the supporting document -
31 | [working-backwards-readme.md](./0095-cognito-construct-library/working-backwards-readme.md)
32 |
33 | # Detailed Design
34 |
35 | The design creates a couple of entry points for getting into the Cognito APIs.
36 |
37 | The first is the `UserPool` construct. The UserPool resource type itself can be
38 | fully configured via its constructor. Further resource types that are based on
39 | user pool, such as, user pool user, client, etc. can configured from this.
40 |
41 | The other entry point is the `IdentityPool` construct. Similar to the user pool,
42 | all subsequent resource types that are based on the identity pool, can be
43 | created and configured from this.
44 |
45 | # Adoption Strategy
46 |
47 | The proposal looks to adhere as close to the existing set of `UserPool` APIs as
48 | possible, so any breaking change to the current APIs is easy and quick to
49 | resolve.
50 |
51 | The Cognito module's API stability is 'experimental'. This is a signal to the
52 | users of this module that breaking changes to the API are to be expected.
53 |
54 | # Unresolved questions
55 |
56 | None.
57 |
58 | # Future Changes / Currently out of scope
59 |
60 | The following are currently out of scope for this document. Separate issues will
61 | be created for these, that will then be prioritized based on customer requests
62 | and +1s.
63 |
64 | - User Pool
65 | - Import using user pool name
66 | - Configure SES email actions.
67 | - ClientMetadata and ValidationData properties in User
68 | - Support for
69 | [Clients and HostedUI](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-app-integration.html)
70 | - [Identity providers](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pools-identity-federation.html)
71 | - [Advanced security features](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-user-pool-settings-advanced-security.html)
72 | - Identity Pool
73 | - External provider - Cognito. This requires CDK coverage of user pool clients
74 | (above). While it's available via the APIs and CloudFormation resources, it
75 | is
76 | [not listed in the documentation](https://docs.aws.amazon.com/cognito/latest/developerguide/external-identity-providers.html)
77 | as one of the identity providers, so it might not be a sought after feature.
78 | - External provider - OpenId connect. This requires coverage for OIDC identity
79 | providers in the IAM module.
80 | - External provider - SAML. This requires coverage for SAML identity providers
81 | in the IAM module.
82 | - Token based access control - Requires Cognito user pool clients (above).
83 |
84 | The following are out of scope of this document and are unlikely to be
85 | implemented.
86 |
87 | - Identity pool cognito sync. Reason:
88 | [Documentation](https://docs.aws.amazon.com/cognito/latest/developerguide/cognito-sync.html)
89 | suggests the use of AWS AppSync instead.
90 |
--------------------------------------------------------------------------------
/text/0308-cli-advisories.md:
--------------------------------------------------------------------------------
1 | # CLI notices
2 |
3 | * **Original Author(s)**: [@otaviomacedo](https://github.com/otaviomacedo)
4 | * **Tracking Issue**: [#308](https://github.com/aws/aws-cdk-rfcs/issues/308)
5 | * **API Bar Raiser**: @eladb
6 |
7 | A new CLI feature to notify customers about urgent and important issues that
8 | require their attention.
9 |
10 | ## Working backwards
11 |
12 | ### README
13 |
14 | Starting on version x.y.z of the CDK CLI, customers will be notified, on every
15 | command, about security vulnerabilities, regressions and usage of unsupported
16 | versions:
17 |
18 | ```
19 | $ cdk deploy
20 |
21 | ... # Normal output of the command
22 |
23 | NOTICES
24 |
25 | 16603 Toggling off auto_delete_objects for Bucket empties the bucket
26 |
27 | Overview: If a stack is deployed with an S3 bucket with
28 | auto_delete_objects=True, and then re-deployed with
29 | auto_delete_objects=False, all the objects in the bucket
30 | will be deleted.
31 |
32 | Affected versions: <1.126.0.
33 |
34 | More information at: https://github.com/aws/aws-cdk/issues/16603
35 |
36 |
37 | 17061 Error when building EKS cluster with monocdk import
38 |
39 | Overview: When using monocdk/aws-eks to build a stack containing
40 | an EKS cluster, error is thrown about missing
41 | lambda-layer-node-proxy-agent/layer/package.json.
42 |
43 | Affected versions: >=1.126.0 <=1.130.0.
44 |
45 | More information at: https://github.com/aws/aws-cdk/issues/17061
46 |
47 | If you don’t want to see an notice anymore, use "cdk acknowledge ID". For example, "cdk acknowledge 16603".
48 | ```
49 |
50 | By acknowledging a particular notice, it won’t show anymore in subsequent calls:
51 |
52 | ```
53 | $ cdk acknowledge 16603
54 |
55 | NOTICES
56 |
57 | 17061 Error when building EKS cluster with monocdk import
58 |
59 | Overview: When using monocdk/aws-eks to build a stack containing
60 | an EKS cluster, error is thrown about missing
61 | lambda-layer-node-proxy-agent/layer/package.json.
62 |
63 | Affected versions: >=1.126.0 <=1.130.0.
64 |
65 | More information at: https://github.com/aws/aws-cdk/issues/17061
66 |
67 | If you don’t want to see an notice anymore, use "cdk acknowledge ID". For example, "cdk acknowledge 17061".
68 | ```
69 |
70 | You can suppress all warnings per individual execution:
71 |
72 | ```
73 | $ cdk deploy --no-notices
74 | ```
75 |
76 | And you can disable all notices indefinitely by adding this entry to
77 | `~/.cdk.json`:
78 |
79 | ```
80 | "notices": false
81 | ```
82 |
83 | Regardless of the state of this flag and the notices you have acknowledged,
84 | you can always show the currently active notices:
85 |
86 | ```
87 | $ cdk notices
88 | ```
89 |
90 | This command returns zero if there is no notice and non-zero otherwise. Users
91 | can then plug this into a pipeline approval workflow and expect manual review if
92 | there are any notices.
93 |
94 | > Please note that the acknowledgements are made project by project. If you
95 | acknowledge an notice in one CDK project, it will still appear on other
96 | projects when you run any CDK commands, unless you have suppressed or disabled
97 | notices.
98 |
99 | ### Runbook section (internal to the CDK team)
100 |
101 | In case of a high-impact issue, follow these steps:
102 |
103 | 1. Create or update an issue for this incident on the aws-cdk GitHub repository.
104 | 2. Update the file `notices.json` on repository `cdklabs/aws-cdk-notices`, adding an entry for
105 | the incident. Example:
106 |
107 | ```json
108 | {
109 | "title": "Toggling off auto_delete_objects for Bucket empties the bucket",
110 | "issueUrl": "https://github.com/aws/aws-cdk/issues/16603",
111 | "overview": "If a stack is deployed with an S3 bucket with auto_delete_objects=True, and then re-deployed with auto_delete_objects=False, all the objects in the bucket will be deleted.",
112 | "component": ["framework"],
113 | "version": "<1.126.0"
114 | }
115 | ```
116 |
117 |
118 | 3. Create a PR with this change and wait for an approval. Only SDMs have
119 | permission to approve PRs in this repository.
120 | 4. A PR check will validate the `notices.json` file and verify that the
121 | following conditions hold:
122 | * The file is compliant with the schema.
123 | * The issue exists.
124 | * Title and overview are within the length constraints.
125 | * The semantic version is valid.
126 | * The component is valid.
127 | 5. When the PR gets merged, the notice will be visible to all CLI
128 | installations. The GitHub issue will also be automatically updated with the
129 | information contained in the file. All the necessary tags will also be added
130 | automatically.
131 | 6. You can keep updating the issue normally, as new information comes in, but
132 | you're not allowed to touch the sections auto-generated from the notices
133 | file.
134 |
135 | [ ] Signed-off by API Bar Raiser @xxxxx
136 |
137 | ## Public FAQ
138 |
139 | ### What are we launching today?
140 |
141 | A new communication channel between AWS and users of the CDK. Starting on
142 | version x.y.z of the CDK CLI, customers will be notified, on every command,
143 | about security vulnerabilities, regressions and usage of unsupported versions.
144 |
145 | ### Why should I use this feature?
146 |
147 | These notices shown by the CLI contain very important and actionable
148 | information about problems that directly affect you. They give you an
149 | opportunity to upgrade the CLI or the construct library when necessary or to
150 | work around high-impacting issues.
151 |
152 | ## Internal FAQ
153 |
154 | ### Why are we doing this?
155 |
156 | In case of emergency announcements, such as security vulnerabilities or
157 | regressions, the only mechanisms we have right now are email campaigns and
158 | pinned GitHub issues. These are not always the best means to reach out to
159 | customers. Many email addresses are not monitored by anyone and customers don’t
160 | necessarily check the GitHub repository for updates. The output of the CLI, on
161 | the other hand, is seen by many customers every day.
162 |
163 | ### Why should we *not* do this?
164 |
165 | This is a powerful feature to convey urgent and important messages to customers.
166 | However, to keep its relevance, it should be used sparingly and the messages
167 | must meet a high bar. Otherwise it will just become a tool for spamming
168 | customers. We will introduce filters to reduce this risk, by only showing
169 | content that applies to each particular environment and also require PR approval
170 | for any changes in notices. But ultimately, it hinges on responsible use. In
171 | particular, this feature should not be used for things like marketing campaigns,
172 | blog post announcements and things like that. If the mechanisms proposed in this
173 | RFC are not considered strong enough by the CDK team, we should not implement
174 | this feature.
175 |
176 | ### What is the technical solution (design) of this feature?
177 |
178 | Notice information will be available as a static file on a GitHub repository.
179 | This file will be automatically synced to an S3 bucket and distributed via
180 | CloudFront. The CLI will consume this file from and apply a set of filters to
181 | narrow down the list of notices to the context in which it is being run. We
182 | will also implement some GitHub actions to validate and copy the contents of the
183 | file over to the issue. For a more detailed explanation, see the Appendix.
184 |
185 | ### Is this a breaking change?
186 |
187 | No.
188 |
189 | ### What alternative solutions did you consider?
190 |
191 | On the publishing side:
192 |
193 | * Implementing a new internal REST service to manage the notices. Overly
194 | complex for this use case.
195 | * Authoring the content directly on the GitHub issue. Hard to enfore
196 | constraints on the content.
197 |
198 | On the distribution side:
199 |
200 | * Serving the file directly form GitHub.
201 | * Using the GitHub API to query for special issues that are considered
202 | notices (in case of using GitHub issues as the source of truth).
203 |
204 | ### What is the high-level project plan?
205 |
206 | 1. Implement and deploy the infastructure necessary for distribution (S3 +
207 | CloudFront).
208 | 1. Implement the GitHub actions of validation and issue sync-up and issue
209 | protection.
210 | 2. Add the construct library version to the cloud assembly metadata.
211 | 2. Implement and release the CLI changes.
212 |
213 | ### What is the expected lifetime of notices?
214 |
215 | We should expect notices to be available for months. This is usually the case
216 | when we want users to upgrade the construct library version.
217 |
218 | ### Why do we need to give users the option to suppress notices?
219 |
220 | This is useful when the CDK is running as part of larger script and users want
221 | to hide the output.
222 |
223 | ### Why do we need to give users the option to acknowledge notices?
224 |
225 | Given the expected lifetime of notices, they will eventually become just
226 | noise and users will try to silence it. If the only option we give users is to
227 | suppress them (per execution or indefinitely), they will use that and miss new
228 | notices.
229 |
230 | ### What future improvements can we make?
231 |
232 | **Resource filter**: Ideally, notices should be as targeted as possible. The
233 | version filter addresses part of that requirement, but we can make it better by
234 | also adding a resource filter: if an issue only affects `FargateTaskDefinition`
235 | resources, for example, but this resource is not present in any of the stacks,
236 | the customer doesn’t need to see that particular warning.
237 |
238 | **Pipeline awareness**: Initially, we won’t treat the pipeline as a special
239 | case. By default, the notices will be fetched and displayed and we expect
240 | users to never look at them. This behavior can always be overridden by
241 | suppressing the notices. In future releases, the CLI will use the information
242 | about whether it’s running on a pipeline and block the deployment in case there
243 | is any notice.
244 |
245 | ## Appendix
246 |
247 | ### Detailed design
248 |
249 | #### Publishing notices
250 |
251 | We will create a new repository, `cdklabs/aws-cdk-notices`, dedicated to host
252 | the notices file. As usual, in order to be merged, any change to this file
253 | will have to be published as a PR and approved. Using a CODEOWNERS file, we will
254 | restrict the permission to approve PRs only to SDMs. The file will contain a
255 | list of notices, each having the following fields:
256 |
257 | | Field | Description | Format | Mandatory? |
258 | |:------------:|:--------------------------------------------------------------:|---------------------------------|:----------:|
259 | | `title` | The title of the incident (max length: 100) | Free form text | Yes |
260 | | `issueUrl` | A link to the GitHub issue where the incident is being tracked | URL | Yes |
261 | | `overview` | A paragraph with more information about the incident | Free form text | Yes |
262 | | `component` | The CLI or the Framework | Either `"cli"` or `"framework"` | Yes |
263 | | `version` | Version range using the semver format | Semantic Versioning | No |
264 |
265 | We will also implement three GitHub actions on this repository:
266 |
267 | 1. File validation on PR. It will block merging if the structure of the file is
268 | not compliant with the specification above.
269 | 2. Issue sync-up. When the PR is merged, this action will copy the content of
270 | the file over to the GitHub issue it's linked to.
271 | 3. Issue protection. Every change to issues that are linked to some notice
272 | will be checked by this action, to avoid corruption.
273 |
274 | For the distribution, we will implement a mechanism to sync the contents of the
275 | GitHub repository with the S3 bucket (e.g., a Event Bridge event). The S3
276 | content will be served by a CloudFront distribution.
277 |
278 | #### CLI logic
279 |
280 | On every command, the CLI will fetch the file from the backend, parse the
281 | content and check whether the version range contained in the notice matches
282 | the CLI version or the framework version (depending on the affected component).
283 |
284 | Since the CLI knows its own version, checking against the version range of the
285 | notice is trivial. The version of the framework, however, is not readily
286 | available anywhere. To address this, we will start writing the framework version
287 | to the Cloud Assembly, in a place where the CLI can read it.
288 |
289 | Issues that pass this filter will be displayed on the standard output. If an
290 | error or timeout occurs when retrieving the issues, the CLI will simply skip
291 | the display of notices and try again at the next command execution. Results
292 | will be cached for a period of one hour.
293 |
294 | The CLI will store the IDs of the acknowledged issues in the project specific
295 | `./cdk.json` file.
296 |
--------------------------------------------------------------------------------
/text/0507-subnets/alternative-working-backwards.md:
--------------------------------------------------------------------------------
1 | ## Working Backwards
2 |
3 | The `VpcV2` is a new construct, that creates an AWS VPC. Compared to `Vpc`,
4 | it makes fewer assumptions and allows for more control over how you
5 | structure your VPC, subnets and related resources, such as NAT Gateways and
6 | VPC Endpoints.
7 |
8 | > **Note**
9 | > In all the code snippets in this document, unless otherwise specified, the
10 | > variable `vpc` always refers to an instance of `VpcV2`.
11 |
12 | ### IP addressing
13 |
14 | With `VpcV2`, in addition to the mandatory primary IP block, you can have one or
15 | more secondary IP blocks, by providing them in the CIDR format:
16 |
17 | ```ts
18 | const vpc = new VpcV2(this, 'vpc', {
19 | primaryAddressBlock: IpAddresses.ipv4('10.0.0.0/16'),
20 | secondaryAddressBlocks: [
21 | // The secondary address blocks must be in the same RFC 1918 range as
22 | // the primary address block
23 | IpAddresses.ipv4('10.1.0.0/16'),
24 | IpAddresses.ipv4('10.2.0.0/16'),
25 | ],
26 | });
27 | ```
28 |
29 | Or by providing an [IPAM] pool with a netmask length:
30 |
31 | ```ts
32 | const ipam = new Ipam(stack, 'ipam');
33 | const pool = ipam.publicScope.addPool({
34 | addressFamily: AddressFamily.IP_V4,
35 | provisionedCidrs: ['10.2.0.0/16'],
36 | });
37 |
38 | const vpc = new VpcV2(this, 'vpc', {
39 | primaryAddressBlock: IpAddresses.ipv4('10.0.0.0/16'),
40 | secondaryAddressBlocks: [
41 | IpAddresses.ipv4Ipam({
42 | ipamPool: pool,
43 | netmaskLength: 20,
44 | }),
45 | ],
46 | });
47 | ```
48 |
49 | You can also add secondary IPv6 address blocks, in three different ways:
50 |
51 | ```ts
52 | // 1. Using an Ipv6 address block. Because IPv6 addresses are all publicly
53 | // addressable, they must come from an address pool that you own and brought to
54 | // AWS (BYOIP). So you must also provide the pool ID:
55 | IpAddresses.ipv6({
56 | cidr: '2001:db8:1234:1a00::/56',
57 |
58 | // The pool of IPs you own. Not to be confused with an IPAM pool ID
59 | poolId: 'my-ipv6-pool-id',
60 | });
61 |
62 | // 2. Using an IPAM pool:
63 | IpAddresses.ipv6Ipam({
64 | ipamPool: pool,
65 | netmaskLength: 64
66 | });
67 |
68 | // 3. Using an Amazon-provided IPv6 CIDR block:
69 | IpAddresses.amazonProvidedIpv6();
70 | ```
71 |
72 | ### Defining your own subnets
73 |
74 | `VpcV2` also allows you to define your own subnets:
75 |
76 | ```ts
77 | const subnet = vpc.addSubnet('subnet', {
78 | cidrBlock: '10.2.0.0/20',
79 | availabilityZone: 'us-west-2a'
80 | });
81 | ```
82 |
83 | If you add more than one subnet to the VPC, the framework validates that there
84 | is no intersection between their address blocks. In addition, if all VPC IP
85 | address blocks (both primary and secondary) are provided as CIDR strings, the
86 | framework validates that each address block of all subnets is within one of the
87 | address blocks of the VPC.
88 |
89 | If you have added a secondary IPv6 block to your VPC, you can then add
90 | subnets with IPv6 ranges as well:
91 |
92 | ```ts
93 | const subnet = vpc.addSubnet('subnet', {
94 | cidrBlock: '2001:db8:1234:1a00::/60',
95 | availabilityZone: 'us-west-2a'
96 | });
97 | ```
98 |
99 | ### Routing
100 |
101 | By default, `addSubnet()` creates isolated subnets, that only route traffic
102 | to other hosts inside the VPC. To define different routing policies for a
103 | subnet, provide a route table when creating it. For example, to create a
104 | public subnet:
105 |
106 | ```ts
107 | const publicRouteTable = vpc.addRouteTable('routeTable', {
108 | routes: [
109 | // By adding this route, all subnets that use this table become public
110 | Route.toInternetGateway('0.0.0.0/0'),
111 | ],
112 | });
113 |
114 | const subnet = vpc.addSubnet('publicSubnet', {
115 | cidrBlock: '10.2.0.0/20',
116 | availabilityZone: 'us-west-2a',
117 | routeTable: publicRouteTable,
118 | mapPublicIpOnLaunch: false, // default: true for public subnets
119 | });
120 |
121 | // The following is true
122 | vpc.publicSubnets.includes(subnet);
123 |
124 | // As is
125 | vpc.selectSubnets({subnetType: ec2.SubnetType.PUBLIC}).includes(subnet);
126 | ```
127 |
128 | If you don't provide a route table when adding a subnet, a new route table
129 | will be automatically created and assigned to it. To add routes to a route
130 | table after it has been created, use the `addRoute()` method:
131 |
132 | ```ts
133 | subnet.routeTable.addRoute(
134 | Route.toInternetGateway('0.0.0.0/0'),
135 | );
136 | ```
137 |
138 | To route traffic through gateway VPC endpoints, use the `Route.
139 | toGatewayEndpoint()` method:
140 |
141 | ```ts
142 | vpc.addRouteTable('routeTable', {
143 | routes: [
144 | // The endpoint will be created if it doesn't exist
145 | Route.toGatewayEndpoint(GatewayVpcEndpointAwsService.DYNAMODB),
146 | ],
147 | });
148 | ```
149 |
150 | To create a route table that sends traffic through interface VPC endpoints:
151 |
152 | ```ts
153 | const subnet1 = vpc.addSubnet(/*...*/);
154 | const subnet2 = vpc.addSubnet(/*...*/);
155 |
156 | vpc.addRouteTable('routeTable', {
157 | routes: [
158 | Route.toInterfaceEndpoint(InterfaceVpcEndpointAwsService.ECR_DOCKER, {
159 | // The endpoint will be created if it doesn't exist,
160 | // in each of these subnets
161 | subnets: [subnet1, subnet2],
162 | }),
163 | ],
164 | });
165 | ```
166 |
167 | You can use a public NAT gateway to enable instances in a private subnet to
168 | send outbound traffic to the internet, while preventing the internet from
169 | establishing connections to the instances:
170 |
171 | ```ts
172 | const elasticIp = new ElasticIp({
173 | domain: Domain.VPC
174 |
175 | // Other properties, such as networkBorderGroup and publicIpv4Pool,
176 | // are also available. Omitted here for brevity.
177 | });
178 |
179 | // Ideally, we would have an addNatGateway() to ISubnet or just on Subnet,
180 | // that would return IRouter, but this method already exists, with a
181 | // different signature, and only in PublicSubnet. So we have to export
182 | // NatGateway and create it outside.
183 | const natGateway = new NatGateway(vpc, 'NatGateway', {
184 | subnet: subnet,
185 | eip: elasticIp,
186 | });
187 |
188 | const routeTable = vpc.addRouteTable('routeTable', {
189 | routes: [
190 | Route.to({
191 | destination: '0.0.0.0/0',
192 |
193 | // targets must implement the IRouter interface
194 | target: natGateway,
195 | }),
196 | ],
197 | });
198 |
199 | const privateSubnet = vpc.addSubnet('privateSubnet', {
200 | cidrBlock: '10.2.0.0/20',
201 | availabilityZone: 'us-west-2a',
202 | routeTable,
203 | });
204 | ```
205 |
206 | You can also produce the same kind of routing pattern with a NAT instance:
207 |
208 | ```ts
209 | // Same thing with a NAT instance: we have to create it outside of the subnet.
210 | // NatInstance extends Instance and implements IRouter.
211 | const natInstance = new NatInstance(vpc, 'natinst', {
212 | instanceType: new ec2.InstanceType('t3.micro'),
213 | machineImage: new ec2.GenericLinuxImage({
214 | 'us-east-2': 'ami-0f9c61b5a562a16af'
215 | }),
216 |
217 | // Other properties ommitted
218 | });
219 |
220 | const routeTable = vpc.addRouteTable('routeTable', {
221 | routes: [
222 | Route.to({
223 | destination: '0.0.0.0/0',
224 | target: natInstance,
225 | }),
226 | ],
227 | });
228 |
229 | const privateSubnet = vpc.addSubnet('privateSubnet', {
230 | cidrBlock: '10.2.0.0/20',
231 | availabilityZone: 'us-west-2a',
232 | routeTable,
233 | });
234 | ```
235 |
236 | For IPv6 traffic, to produce this pattern, you have to use an egress-only
237 | internet gateway:
238 |
239 | ```ts
240 | const routeTable = vpc.addRouteTable('routeTable', {
241 | routes: [
242 | // The CIDR provided here must be for IPv6
243 | Route.toEgressOnlyInternetGateway('::/0'),
244 | ],
245 | });
246 | ```
247 |
248 | To route traffic to a VPN Gateway, you can explicitly add a route to the
249 | route table, or you can enable route propagation (or both):
250 |
251 | ```ts
252 | const routeTable = vpc.addRouteTable('routeTable', {
253 | routes: [
254 | // Static route to a VPN Gateway. Takes priority over propagated ones
255 | // Causes VPN gateway to be enabled in the VPC
256 | Route.toVpnGateway('172.31.0.0/24'),
257 | ],
258 |
259 | // To make VPN Gateway routes propagate to this route table
260 | enableVpnGatewayRoutePropagation: true, // default: false
261 | });
262 | ```
263 |
264 | If you have another VPC that you want to use in a peering connection:
265 |
266 | ```ts
267 | const routeTable = vpc.addRouteTable('routeTable', {
268 | routes: [
269 | Route.toPeerVpc({
270 | vpc: anotherVpc,
271 | destination: '192.168.0.0/24', // The peer VPC CIDR
272 | }),
273 | ],
274 | });
275 | ```
276 |
277 | Other targets include carrier gateways, transit gateways and network interfaces.
278 | The API to create routes to them follows the same pattern as above.
279 |
280 | ### Using subnets with other components
281 |
282 | When you create a component that needs to be placed in a subnet, you can
283 | provide a subnet selection, which informs the `Vpc` construct which
284 | actual subnet to pick. For example, to create an `ApplicationLoadBalancer`:
285 |
286 | ```ts
287 | const subnet1 = vpc.addSubnet(/*...*/);
288 | const subnet2 = vpc.addSubnet(/*...*/);
289 |
290 | const lb = new elbv2.ApplicationLoadBalancer(this, 'LB', {
291 | vpc,
292 | vpcSubnets: {
293 | subnets: [subnet1, subnet2],
294 | },
295 | });
296 | ```
297 |
--------------------------------------------------------------------------------
/text/0673-aws-applicationsignals-slo-l2.md:
--------------------------------------------------------------------------------
1 | # AWS CloudWatch Application Signals L2 Construct for Simplifying SLO
2 |
3 | * **Original Author(s):**: @liunia-amazon
4 | * **Tracking Issue**: [#673](https://github.com/aws/aws-cdk-rfcs/issues/673)
5 | * **API Bar Raiser**: @moelasmar
6 |
7 | The `ServiceLevelObjective` module is a collection of L2 constructs which leverages native L1 CFN resources, simplifying the
8 | Application Signals Service Level Objectives (SLOs) creation process to monitor the reliability of a service against customer expectations.
9 | The current process of creating SLOs using AWS CDK is complex and requires significant input from users, leading to potential errors and
10 | a time-consuming setup process. This design document addresses these challenges by proposing a predefined constants, and robust set of L2 CDK
11 | constructs that simplify the creation of SLOs while providing the flexibility.
12 |
13 | ## Working Backwards
14 |
15 | ### CHANGELOG
16 |
17 | `feat(applicationSignals): introduce new Application Signals L2 constructs for simplifying SLO creation and management`
18 |
19 | ### README
20 |
21 | Amazon CloudWatch Application Signals Service Level Objectives (SLOs) L2 construct
22 | enables customers to create and manage Service Level Objectives (SLOs) for their applications
23 | using Amazon CloudWatch Application Signals. SLOs help ensure critical business
24 | operations meet customer expectations by setting and tracking specific reliability and availability targets.
25 |
26 | The ServiceLevelObjective construct provides two types of SLOs:
27 | Period-based SLOs: Evaluate performance against goals using defined time periods.
28 | Request-based SLOs: Measure performance based on request success ratios
29 |
30 | #### Key Features
31 |
32 | 1. Easy creation of both period-based and request-based SLOs.
33 | 2. Support for custom CloudWatch metrics and math expressions.
34 | 3. Automatic error budget calculation and tracking.
35 |
36 | ### Usage
37 |
38 | #### Use Case 1 - Create a Period-based SLO with custom metrics, default attainmentGoal: 99.9 and warningThreshold: 30
39 |
40 | ```
41 | const periodSlo = ServiceLevelObjective.periodBased(this, 'PeriodSLO', {
42 | name: 'my-period-slo',
43 | goal: {
44 | interval: Interval.rolling({
45 | duration: 7,
46 | unit: DurationUnit.DAY,
47 | }),
48 | },
49 | metric: {
50 | metricThreshold: 100,
51 | periodSeconds: 300,
52 | statistic: 'Average',
53 | metricDataQueries: [/* ... */],
54 | },
55 | });
56 | ```
57 |
58 | #### Use Case 2 - Create a Period-based SLO with service/operation, attainmentGoal is 99.99 and warningThreshold is 50
59 |
60 | ```
61 | const availabilitySlo = ServiceLevelObjective.periodBased(this, 'ApiAvailabilitySlo', {
62 | name: 'api-availability-slo',
63 | description: 'API endpoint availability SLO',
64 | goal: {
65 | attainmentGoal: 99.99,
66 | warningThreshold: 50,
67 | interval: Interval.calendar({
68 | duration: 1,
69 | unit: DurationUnit.MONTH,
70 | // default startTime is now,
71 | }),
72 | },
73 | metric: {
74 | metricThreshold: 99,
75 | metricType: MetricType.AVAILABILITY,
76 | operationName: 'OrderProcessing',
77 | keyAttributes: KeyAttributes.service({
78 | name: 'MyService',
79 | environment: 'Development',
80 | });
81 | periodSeconds: 300,
82 | statistic: 'Average',
83 | },
84 | });
85 | ```
86 |
87 | #### Use Case 3 - Create request based SLO with custom metrics
88 |
89 | ```
90 | const requestSlo = ServiceLevelObjective.requestBased(this, 'RequestSLO', {
91 | name: 'my-request-slo',
92 | goal: {
93 | interval: Interval.calendar({
94 | duration: 30,
95 | unit: DurationUnit.DAY,
96 | startTime: 1,
97 | }),
98 | },
99 | metric: {
100 | metricThreshold: 200,
101 | goodCountMetrics: [/* ... */],
102 | totalCountMetrics: [/* ... */],
103 | },
104 | });
105 | ```
106 |
107 | ### Important Considerations
108 |
109 | 1. SLO type (period-based or request-based) cannot be changed after creation
110 | 2. Application Signals service operations must report standard metrics before SLO creation
111 |
112 | ### Best Practices
113 |
114 | 1. Use period-based SLOs for time-based measurements (e.g., latency)
115 | 2. Use request-based SLOs for counting-based measurements (e.g., availability)
116 | 3. Set appropriate warning thresholds below attainment goals
117 | 4. Configure burn rate windows for early detection of issues
118 |
119 | ### API Design
120 |
121 | This L2 construct simplifies SLO creation while maintaining the
122 | flexibility needed for various use cases. It handles the complexity of
123 | creating and managing SLOs, allowing focus on defining the service reliability targets.
124 |
125 | #### IntervalProps
126 |
127 | `IntervalProps` implements IInterval
128 |
129 | |Name |Type |Description |
130 | |--- |--- |--- |
131 | |duration |number |Duration value |
132 | |unit |DurationUnit |Unit of duration
One of the following enum values:
`HOUR`
`DAY`
`MINUTE`|
133 |
134 | #### CalendarIntervalProps
135 |
136 | `CalendarIntervalProps` implements IInterval
137 |
138 | |Name |Type |Description |
139 | |--- |--- |--- |
140 | |duration |number |Duration value |
141 | |startTime? |number |Start time for the calendar interval.
default is now |
142 | |unit |DurationUnit (enum) |Unit of duration. the following enum values:
`MONTH` |
143 |
144 | #### GoalConfig
145 |
146 | |Name |Type |Description |
147 | |--- |--- |--- |
148 | |attainmentGoal? |number |Optional The target goal percentage.
default is 99.9 |
149 | |warningThreshold? |number |Optional warning threshold percentage.
default is 30 |
150 | |interval |IInterval |Interval configuration |
151 |
152 | #### KeyAttributes
153 |
154 | `KeyAttributes` is a construct that helps configure and validate Application Signals key attributes for services.
155 | If this SLO is related to a metric collected by Application Signals, you must use this field to specify which service
156 | the SLO metric is related to. To do so, you must specify at least the Type, Name, and Environment attributes.
157 |
158 | |Name |Type | Description |
159 | |--- |--- |---------------------------------------------------------------------------------------------------------------------------------------------|
160 | |type |[KeyAttributeType](enum) | The type of service.
One of the following enum values:
`SERVICE`
`AWS_SERVICE`
`REMOTE_SERVICE`
`RESOURCE`
`AWS::RESOURCE` |
161 | |name |string | The name of the service. This is used only if the value of the Type field is Service, RemoteService, or AWS::Service. |
162 | |environment |string | The environment of the service |
163 | |resourceType? |string | Optional specifies the type of the resource. Used only when type is Resource or AWS::Resource. |
164 | |identifier? |string | Optional additional identifier for the service. identifies the resource objects of this resource. This is used only if the value of the Type field is Resource or AWS::Resource. |
165 |
166 | #### MetricDimension
167 |
168 | |Name |Type |Description |
169 | |--- |--- |--- |
170 | |name |string |Dimension name |
171 | |value |string |Dimension value |
172 |
173 | #### MetricDefinition
174 |
175 | |Name |Type |Description |
176 | |--- |--- |--- |
177 | |metricName |string |Name of the metric |
178 | |namespace |string |Metric namespace |
179 | |dimensions? |MetricDimension[] |Optional metric dimensions |
180 |
181 | #### SliMetricBaseProps
182 |
183 | |Name | Type |Description |
184 | |--- |---|---|
185 | |metricThreshold | number | Threshold for the metric|
186 | |metricType? | MetricType(enum) | Optional metric type.
One of the following enum values:
`LATENCY`
`AVAILABILITY` |
187 | |keyAttributes? | KeyAttributes | Optional key attributes|
188 | |operationName? | string | Optional operation name |
189 | |comparisonOperator? | ComparisonOperator(enum) | Optional comparison operator.
One of the following enum values:
`GREATER_THAN`
`LESS_THAN`
`GREATER_THAN_OR_EQUAL`
`LESS_THAN_OR_EQUAL` |
190 |
191 | #### PeriodBasedMetricProps
192 |
193 | `PeriodBasedMetricBaseProps`implements SliMetricBaseProps
194 |
195 | |Name |Type |Description |
196 | |--- |--- |--- |
197 | |periodSeconds? |number |Period in seconds, default is 60s |
198 | |statistic |MetricStatistic |Statistic |
199 | |metricDataQueries |MetricDataQuery[] |Array of metric queries |
200 |
201 | #### RequestBasedMetricProps
202 |
203 | `RequestMetricBaseProps`implements SliMetricBaseProps
204 |
205 | |Name |Type |Description |
206 | |--- |--- |--- |
207 | |goodCountMetrics? |MetricDataQuery[] |Optional good count metrics |
208 | |totalCountMetrics |MetricDataQuery[] |Total count metrics |
209 | |badCountMetrics? |MetricDataQuery[] |Optional bad count metrics |
210 |
211 | #### PeriodBasedSloProps
212 |
213 | |Name |Type |Description |
214 | |--- |--- |--- |
215 | |name |string |The name of the SLO |
216 | |keyAttributes | KeyAttributes |The key attributes for the SLO |
217 | |operationName? |string |The operation name for the SLO (optional) |
218 | |goal |GoalConfig |The goal configuration for the SLO |
219 | |sliMetric? |PeriodBasedMetricProps |Period-based metric configuration |
220 | |description? |string |A description for the SLO (optional) |
221 | |burnRateWindows? |number[] |Burn rate windows (Optional) |
222 |
223 | #### RequestBasedSloProps
224 |
225 | |Name |Type |Description |
226 | |--- |-- |--- |
227 | |name |string |The name of the SLO |
228 | |keyAttributes | KeyAttributes |The key attributes for the SLO |
229 | |operationName? |string |The operation name for the SLO (optional) |
230 | |goal |GoalConfig |The goal configuration for the SLO |
231 | |sliMetric? |RequestBasedMetricProps |Request-based metric configuration |
232 | |description? |string |A description for the SLO (optional) |
233 | |burnRateWindows? |number[] |Burn rate windows (Optional) |
234 |
235 | Ticking the box below indicates that the public API of this RFC has been
236 | signed-off by the API bar raiser (the `status/api-approved` label was applied to the
237 | RFC pull request):
238 |
239 | ```text
240 | [] Signed-off by API Bar Raiser @moelasmar
241 | ```
242 |
243 | ## Public FAQ
244 |
245 | ### What are we launching today?
246 |
247 | We are launching a new L2 construct `ServiceLevelObjective` to simplify the Application Signals SLO creation process.These constructs
248 | provide a simplified and more intuitive way to create and manage SLOs using AWS CDK.
249 |
250 | ### Why should I use this feature?
251 |
252 | You should use this feature if you want to:
253 |
254 | 1. Simplify the process of creating SLOs in your CDK applications
255 | 2. Reduce the amount of configuration required for each SLO
256 | 3. Benefit from enhanced type safety and autocompletion support
257 |
258 | ## Internal FAQ
259 |
260 | ### Why are we doing this?
261 |
262 | We are implementing these L2 constructs to address the current complexity in creating SLOs using CDK. The existing
263 | process requires extensive configuration and lacks standardization, leading to potential errors and a time-consuming setup process.
264 |
265 | ### Why should we *not* do this?
266 |
267 | While the benefits are significant, potential reasons not to proceed include:
268 |
269 | 1. Maintenance overhead of supporting both L1 and L2 constructs
270 | 2. Possible limitations in flexibility for highly customized SLO configurations
271 |
272 | ### What is the technical solution (design) of this feature?
273 |
274 | #### 1. Simplified API Structure
275 |
276 | Added pre-defined defaults and builder patterns for cleaner code organization
277 |
278 | #### 2. Enhanced Type Safety
279 |
280 | Replaced string literals with enums (DurationUnit, MetricType, ComparisonOperator)
281 |
282 | #### 3. New Features
283 |
284 | 1. Introduced separate Period-based and Request-based SLO patterns.
285 | 2. Added validation logic for configuration values.
286 | 3. Implemented reusable interval configurations.
287 |
288 | ### Is this a breaking change?
289 |
290 | No.
291 |
292 | ### What alternative solutions did you consider?
293 |
294 | N/A
295 |
296 | ### What are the drawbacks of this solution?
297 |
298 | N/A
299 |
300 | ### What is the high-level project plan?
301 |
302 | * [ ] Gather feedback on the RFC
303 | * [ ] Get bar raiser to sign off on RFC
304 | * [ ] Make pull request to aws-cdk repository
305 | * [ ] Iterate and respond to PR feedback
306 | * [ ] Merge new construct and related changes
307 |
308 | ### Are there any open issues that need to be addressed later?
309 |
--------------------------------------------------------------------------------
/text/0710-node-deprecation-strategy.md:
--------------------------------------------------------------------------------
1 | ## Description
2 |
3 | This RFC proposes the introduction of a formal policy in the AWS Cloud Development Kit (CDK) for discontinuing support of Node.js versions
4 | six months after the Node.js community [officially](https://github.com/nodejs/Release#release-schedule) designates them as End-of-Life (EOL).
5 | This ensures users have ample transition time and that the AWS CDK remains aligned with actively supported, secure Node.js releases.
6 |
7 | ## What is covered under this RFC
8 |
9 | ### The following falls under this RFC
10 |
11 | * AWS CDK CLI
12 | * AWS CDK Construct Library
13 | * JSII
14 | * Projen
15 | * CDK8s
16 |
17 | ### The following does not fall under this RFC
18 |
19 | * AWS Lambda Runtimes
20 | * Custom Resource Lambda Functions managed by the CDK team
21 |
22 | ## Motivation
23 |
24 | * Security & Stability: When Node.js versions reach EOL, they no longer receive critical security patches or bug fixes from the Node.js community.
25 | Continuing to support them imposes security risks and technical overhead.
26 | * Focus on Innovation: By trimming support for EOL Node.js versions, the AWS CDK can rely on modern Node.js features and improvements that benefit
27 | the developer community.
28 | * Consistency & Predictability: Providing a clear, consistent policy for when Node.js versions lose AWS CDK support allows customers to plan
29 | upgrades confidently and reduces confusion around "how long will CDK support version X?"
30 |
31 | ## Background
32 |
33 | The AWS CDK is dropping support for Node.js 14.x and 16.x on May 30, 2025. These versions went EOL on:
34 |
35 | * Node.js 14.x EOL: 2023-04-30
36 | * Node.js 16.x EOL: 2023-09-11
37 |
38 | Separately:
39 |
40 | * Node.js 18.x EOL: 2025-05-30 (per the Node.js working group's schedule), and the AWS CDK support will end 2025-11-30.
41 |
42 | In this proposal, we formalize a policy for all Node.js versions going forward.
43 |
44 | ## Policy Proposal
45 |
46 | 1. Policy Statement
47 | The AWS CDK will discontinue official support for any Node.js version six months after that version has reached End-of-Life according
48 | to [the Node.js Release working group schedule](https://github.com/nodejs/Release#release-schedule).
49 | 2. Effective Date
50 | 1. This policy is effective immediately upon approval and applies to all future Node.js LTS and non-LTS releases.
51 | 2. Practically, support for Node.js 14.x and 16.x has already ended (announced for May 30, 2025, but both versions are beyond
52 | Node's official EOL as of this writing).
53 | 3. For Node.js 18.x (which has an EOL date of 2025-05-30), the AWS CDK support will end 2025-11-30.
54 | 3. What Does "No Longer Supported" Mean
55 | 1. Bug Reports: Issues will not be triaged or resolved unless they can be reproduced on a supported version of Node.js.
56 | 2. Security Fixes: The AWS CDK will not implement or backport security patches for versions of Node.js beyond their 6-month grace period.
57 | 3. Compatibility Testing: The AWS CDK team will discontinue testing with those versions in CI/CD pipelines, meaning we cannot guarantee
58 | the functionality of the AWS CDK on those runtimes.
59 | 4. Transition Period
60 | 1. The six months after Node's official EOL date is considered a "transition period". During that time:
61 | 1. We will accept bug reports and work on fixes for the soon-to-be-deprecated version.
62 | 1. Depending on scope of work however, we may ask that you update to an LTS version.
63 | 2. We will produce announcements when possible to give users notice.
64 | 5. Announcement & Documentation
65 | 1. Official announcements of upcoming deprecations will be published at least 1 month before the EOL + six month cutoff.
66 | 2. The AWS CDK documentation will contain a "Node.js Version Support" section, listing currently supported versions and their end-of-support dates.
67 |
68 | ## Rationale and Alternatives
69 |
70 | * Alternative: Immediate Deprecation on Node.js EOL
71 | * Some communities drop support the day a Node.js version hits EOL. However, this may leave customers with insufficient lead time to migrate.
72 | Hence, we prefer a six month grace period.
73 | * Alternative: Longer Grace Period
74 | * A year or more might be more generous but burdens the AWS CDK team with supporting potentially insecure or outdated runtimes. Six months
75 | is a balanced, industry-friendly period (see [AWS SDK support for Node.JS](https://aws.amazon.com/blogs/developer/announcing-the-end-of-support-for-node-js-16-x-in-the-aws-sdk-for-javascript-v3/))
76 | * Alternative: Stick to "Best-Effort"
77 | * The AWS CDK could rely on "best-effort" support without a formal policy, but that ambiguity complicates planning for customers who need
78 | a known end date.
79 |
80 | ## Implementation Plan
81 |
82 | 1. Documentation Updates
83 | 1. Update the AWS CDK Developer Guide to reflect the new support policy and current Node.js version status.
84 | 2. Publish a schedule table showing upcoming Node.js EOL dates and the corresponding AWS CDK "end-of-support" dates.
85 | 2. Tooling
86 | 1. In the AWS CDK CLI, we will print a warning if the user's Node.js runtime is in the 6-month transition period or already past AWS CDK support.
87 | 3. Communication
88 | 1. Post announcements in the AWS CDK GitHub repository, AWS forums, and relevant community channels.
89 | 2. Encourage early adopters to test newer Node.js releases while still under transition.
90 | 4. Maintenance
91 | 1. Track Node.js EOL dates in an internal schedule.
92 | 2. Every six months, or as new EOL dates come up, update the official support matrix.
93 |
94 | ## Security Implications
95 |
96 | * Dropping support for EOL Node.js versions reduces the attack surface inherent in using outdated runtimes that do not receive security patches.
97 | * Encouraging customers to adopt newer Node.js versions increases overall security posture for AWS CDK-based solutions.
98 |
99 | ## Drawbacks
100 |
101 | * Some users on older Node.js versions may need to perform an unplanned or expedited upgrade if they were unaware of the Node.js EOL schedule.
102 | * There is a small maintenance overhead in tracking Node.js releases and updating documentation,
103 | though we deem it acceptable for security and predictability benefits.
104 |
105 | ## Adoption Strategy
106 |
107 | * Most AWS CDK users who stay on current LTS releases will remain unaffected by this policy.
108 | * Users on older Node.js versions are encouraged to upgrade as soon as possible to avoid issues and missed security patches.
109 |
110 | ## Conclusion
111 |
112 | This RFC seeks to introduce a formal Node.js version support policy for AWS CDK, which is for the AWS CDK to stop supporting a Node.js version
113 | six months after that version's official EOL date. This provides a predictable, security-forward posture that balances end-user needs for
114 | stability with the AWS CDK's continued modernization.
115 |
116 | Feedback is welcome! If you have any questions or concerns, please comment on this issue and if needed we can refine the proposal.
117 |
--------------------------------------------------------------------------------
/text/287-cli-deprecation-warnings.md:
--------------------------------------------------------------------------------
1 | ---
2 | rfc pr: [#290](https://github.com/aws/aws-cdk-rfcs/pull/290)
3 | tracking issue: https://github.com/aws/aws-cdk-rfcs/issues/287
4 | ---
5 |
6 | # CDK CLI deprecation warnings
7 |
8 | If an element in the CDK Construct Library (class, interface, property, function) is deprecated,
9 | using it inside a CDK application should produce a warning in the CDK CLI for all commands that perform synthesis
10 | (`cdk synth`, `cdk diff`, `cdk deploy`, etc.).
11 |
12 | This is done in order to aid the migration from `V1` of the CDK to `V2`
13 | (where all deprecated elements from `V1` will be removed).
14 |
15 | ## Working Backwards
16 |
17 | ### CHANGELOG
18 |
19 | feat(cli): warn about usages of deprecated Construct Library elements
20 |
21 | ### README
22 |
23 | When using any element (class, interface, property, function) that is deprecated in the Construct Library,
24 | you will receive a warning when performing any CLI operation that performs synthesis
25 | (`cdk synth`, `cdk diff`, `cdk deploy`, etc.) that looks similar to:
26 |
27 | ```
28 | [WARNING] The API @aws-cdk/core.CfnInclude is deprecated:
29 | please use the CfnInclude class from the cloudformation-include module instead.
30 | This API will be removed in the next major release
31 | ```
32 |
33 | There is also an environment variable `JSII_DEPRECATED` that you can use to change the behavior of this feature:
34 |
35 | * Setting that environment variable to the value `quiet` will silence these warnings
36 | (they will no longer be printed to the stderr stream).
37 | * Setting that environment variable to the value `fail` will instead fail with an exception when any deprecated element is used.
38 | * Setting that environment variable to the value `warn` is the same as the default behavior
39 | (the warnings will be printed to the stderr stream,
40 | but will not cause errors).
41 |
42 | #### When exactly will this warning be shown?
43 |
44 | In general, this warning will be shown when executing any member
45 | (by which I mean function (static or standalone), constructor, method,
46 | or field (static or instance))
47 | from the AWS CDK Construct Library that is either:
48 |
49 | * Deprecated itself;
50 | * Belongs to a class or interface that is deprecated;
51 | * Has an argument that is of an interface type with a deprecated property,
52 | and that property has been passed when calling the member.
53 |
54 | In particular, this means the warnings will not be printed for:
55 |
56 | * Any elements in your own code which are either explicitly deprecated,
57 | or override any deprecated members from the Construct Library.
58 | * Any code that invokes deprecated members,
59 | but that is itself not actually invoked when running CDK synthesis.
60 | * Using any deprecated members without actually invoking them
61 | (for example, using a deprecated type only in a type declaration).
62 |
63 | ### Contributing guide
64 |
65 | To deprecate an element (class, interface, property, function, etc.) in the CDK,
66 | you need to add the `@deprecated` JSDoc tag to the documentation block of that element.
67 | This will change the generated JavaScript code to make it emit a warning to the standard error stream whenever that element is invoked.
68 |
69 | ## FAQ
70 |
71 | ### What are we launching today?
72 |
73 | In the newest release of the AWS CDK Construct Library,
74 | any usage of a deprecated element (class, interface, property, function)
75 | in your CDK code will result in a warning being printed in the console output.
76 |
77 | ### Does this mean my code will stop working after this change?
78 |
79 | No! This feature only adds warnings to the CLI,
80 | it does not result in any code that was previously working to now fail.
81 |
82 | Note that if you want to migrate to `V2` of the CDK,
83 | which we strongly recommend,
84 | you will have to handle all the warnings that this feature emits,
85 | as all deprecated elements in `V1` will be removed from `V2`,
86 | and thus no longer available to be used by your code.
87 |
88 | If you want to make sure your code does not use any deprecated APIs,
89 | and thus is ready for migrating to CDK `V2`,
90 | you can set the `JSII_DEPRECATED` environment variable to the value `fail`,
91 | which will make any CDK command that invokes synthesis
92 | (`cdk synth`, `cdk deploy`, `cdk diff`, etc.)
93 | fail with an exception if any deprecated element is used.
94 |
95 | ## Internal FAQ
96 |
97 | ### Why are we doing this?
98 |
99 | We are doing this to help customers have a smoother migration from `V1` to `V2` of the CDK.
100 |
101 | ### Why should we _not_ do this?
102 |
103 | I see four reasons why we might not want to implement this feature:
104 |
105 | 1. Deprecated elements are already highlighted by virtually any editor/IDE.
106 | In my experience, customers are diligent in moving away from deprecated APIs,
107 | so this feature might not provide much value.
108 | 2. We risk overloading customers with warnings,
109 | training them to ignore our output if there's too much of it.
110 | A warning must be read in order to be effective, after all.
111 | 3. Depending on the exact solution chosen
112 | (see discussion below),
113 | we might not be able to distinguish from the user's code using a deprecated API,
114 | and from our own libraries using deprecated APIs.
115 | Which means a user might get warnings that they will be unable to get rid of
116 | (because they're coming from code that they don't control).
117 | 4. Depending on the exact solution chosen
118 | (see discussion below),
119 | there might be edge cases where something is deprecated,
120 | but we won't be able to report a particular pattern of its usage.
121 | Some examples include:
122 | * Using deprecated types only in type declarations.
123 | * Static constants (which includes enums).
124 | * Deprecated elements declared in the code, but not actually executed during synthesis.
125 |
126 | ### What changes are required to enable this change?
127 |
128 | There are two high-level components of this change:
129 |
130 | #### 1. Changes to JSII
131 |
132 | We will add a feature to JSII that adds the warning code to the emitted JavaScript for deprecated elements.
133 | It will be off by default, and will have to be activated explicitly to affect the emitted JavaScript code.
134 |
135 | #### 2. Using the changed JSII
136 |
137 | Once we have modified JSII and released a new version of it,
138 | we will need to use it in the CDK,
139 | and start compiling CDK with the new option turned on.
140 |
141 | We should also modify our CDK test infrastructure to run with the `JSII_DEPRECATED`
142 | environment variable set to `error` by default,
143 | to prevent our own code from using deprecated APIs which would cause warnings to be shown to users.
144 | We should come up with a nice API that allows individual tests that are explicitly checking deprecated API(s)
145 | as regression tests to make the value of that environment variable `quiet` easily.
146 |
147 | ### Is this a breaking change?
148 |
149 | No.
150 |
151 | ### What are the drawbacks of this solution?
152 |
153 | 1. Requires changes to JSII.
154 | 2. Does not allow distinguishing between the customer using deprecated APIs,
155 | and the library code using deprecated APIs.
156 | We can alleviate this problem by making the value of the `JSII_DEPRECATED` environment variable `fail` by default in our tests,
157 | which will be a forcing function for us to stop using deprecated APIs in the Construct Library.
158 | 3. We only report APIs actually hit during a particular execution of the code
159 | (while missing statically declared but not actually invoked deprecated elements -
160 | note though that IDEs/editors still warn about them).
161 |
162 | ### What alternative solutions did you consider?
163 |
164 | There are many alternatives that were considered,
165 | but were, for various reasons, discarded:
166 |
167 | #### 1. Manual code for emitting the warnings
168 |
169 | Instead of modifying JSII,
170 | we could simply write the code inside the TypeScript APIs "manually",
171 | emitting warnings for any deprecated elements being used.
172 |
173 | This has the advantage of being the simplest solution,
174 | but has been discarded because of the large effort,
175 | and the fact that there's no way for us to verify that code has been added
176 | (and added correctly).
177 |
178 | #### 2. TypeScript decorators
179 |
180 | We can use [TypeScript decorators](https://www.typescriptlang.org/docs/handbook/decorators.html)
181 | to enhance the runtime JavaScript code with our custom logic.
182 | We can add an `awslint` rule that enforces that every element with the `@deprecated`
183 | JSDoc tag also needs to have the `@deprecated` decorator present.
184 | While decorators are still considered an experimental feature,
185 | our init templates and JSII enable them in `tsconfig.json`,
186 | which means the vast majority of our customers will have them enabled as well.
187 |
188 | The main advantage of this solution is that changes are needed only in the CDK project.
189 |
190 | Disadvantages of this solution:
191 |
192 | 1. It won't be possible to reliably get the module the deprecated element belongs to (only the name of the element) --
193 | the decorator is simply a JavaScript function defined in `@aws-cdk/core`,
194 | and doesn't have (easy) access to the information on what module a given object it's called on belongs to.
195 | 2. TypeScript does not allow decorators on interfaces (neither on the interface directly, nor on any of its properties).
196 | This means we will not be able to register warnings for structs, or struct properties.
197 | 3. We won't be able to register warnings for accessing deprecated static constants
198 | (this includes enums).
199 | 4. We only report APIs actually hit during a particular execution of the code
200 | (while missing statically declared but not actually invoked deprecated elements).
201 |
202 | #### 3. Rely on language-specific deprecation warning mechanisms
203 |
204 | JSII emits deprecated elements with the correct language-specific mechanisms in place
205 | (for example, the `@Deprecated` annotation for Java).
206 | We could simply piggyback on those,
207 | instead of writing our own.
208 |
209 | Advantages of this solution:
210 |
211 | 1. Minimal development effort on our side.
212 | 2. Compared to runtime analysis, allows listing of all deprecated API usages in the code base,
213 | not the just ones that happen to be executed during a particular run.
214 |
215 | Disadvantages of this solution:
216 |
217 | 1. TypeScript in VSCode (our most popular language/IDE combination)
218 | does not by default show a complete list of deprecated API usages:
219 | it will only show strikethroughs on identifiers you happen to be looking at.
220 | Getting a complete list of deprecated APIs requires installing and configuring `eslint`.
221 | 2. TypeScript currently does not properly detect deprecation of properties in object literals,
222 | and deprecation of properties (props) is very important for our project.
223 | Even though it will properly detect them while autocompleting,
224 | and there is an issue about it on the TypeScript bug tracker,
225 | we cannot rely on them fixing this in a reasonable time frame.
226 | 3. There is nothing out of the box for this in Python
227 | (we would have to change JSII and write our own decorator,
228 | which would have the same disadvantages as the TypeScript decorators solution).
229 |
230 | #### 4. Parse the customer's code
231 |
232 | We could create a parser and type-checker for each language the CDK supports,
233 | and use that to discover any cases of using deprecated elements.
234 |
235 | Advantages of this solution:
236 |
237 | 1. Allows us to discover some tricky deprecated elements,
238 | like enum values, or deprecated types used only in type declarations,
239 | that will be difficult to do with only runtime reporting.
240 | 2. Would prevent reporting warnings for deprecated usages outside the customer's code
241 | (for example, in our own libraries).
242 | 3. Compared to runtime analysis, allows listing of all deprecated API usages in the code base,
243 | not the just ones that happen to be executed during a particular run.
244 |
245 | Disadvantages of this solution:
246 |
247 | 1. We would have to write a separate parser and type-checker for each language supported by the CDK.
248 | 2. This additional parsing could have an adverse impact on the performance of CDK commands.
249 | 3. It's not obvious this analysis can even be performed at all in the case of dynamically-typed languages like JavaScript or Python.
250 |
251 | ### What is the high level implementation plan?
252 |
253 | The high-level implementation plan is:
254 |
255 | 1. Make the changes in JSII
256 | (will probably be a single PR),
257 | and release a new version once those are merged in.
258 | Effort estimate (development + code review): 2 weeks.
259 |
260 | 2. Set the new option during building CDK,
261 | make sure the tests use the `JSII_DEPRECATED` environment variable set to `fail`,
262 | and provide an API for tests to opt-out of that on a case-by-case basis.
263 | Effort estimate (development + code review): 1 week.
264 |
265 | ### Are there any open issues that need to be addressed later?
266 |
267 | No.
268 |
269 | ### Are there any things that need to be paid attention to during implementation?
270 |
271 | 1. We need to check all arguments of all functions that have struct types,
272 | and add code checking whether any of the deprecated struct properties have been passed.
273 | This must include doing this recursively for nested structs as well.
274 | 2. It's very likely internal (starting with `_`)
275 | API elements should be excluded from emitting these warnings.
276 | 3. It would be ideal if each API element only emitted the warning once ,
277 | when it was first invoked - as opposed to doing it every time it was invoked.
278 | 4. The message displayed in the console should include the deprecation message specified in the code.
279 | 5. If a class is deprecated, the warnings should most likely be added to every API element of that class,
280 | whether that element is explicitly deprecated or not.
281 |
--------------------------------------------------------------------------------
/text/353-cfn-registry-constructs.md:
--------------------------------------------------------------------------------
1 | # Constructs for Public CloudFormation Extensions
2 |
3 | * **Original Author(s):**: @eladb
4 | * **Tracking Issue**: #353
5 | * **API Bar Raiser**: @rix0rrr
6 |
7 | A set of construct libraries which includes generated constructs for all CloudFormation
8 | resources and modules published to the public CloudFormation Registry.
9 |
10 | ## README
11 |
12 | The `@cdk-cloudformation/xxx` scope includes construct libraries with generated strongly-typed "L1" constructs
13 | for all the public extensions (resources and modules) in the AWS CloudFormation public registry. This library
14 | makes it easier to use public CloudFormation extensions in your CDK apps.
15 |
16 | For example, let's say I want to define a GitHub repository using the `TF::GitHub::Repository` resource:
17 |
18 | ```ts
19 | import { CfnRepository } from '@cdk-cloudformation/tf-github-repository';
20 |
21 | new CfnRepository(this, 'MyRepo', {
22 | name: 'my-repo',
23 | description: 'My awesome project',
24 | licenseTemplate: 'apache-2.0',
25 | });
26 | ```
27 |
28 | For each type (e.g. `TF::GitHub::Repository` in the above example) in
29 | the public CloudFormation Registry, a module is available under
30 | the scope `@cdk-cloudformation/`. This library
31 | includes a construct class and all the relevant data types for this
32 | type.
33 |
34 | The module version corresponds to the version of the type schema
35 | in the public registry.
36 |
37 | ---
38 |
39 | Ticking the box below indicates that the public API of this RFC has been
40 | signed-off by the API bar raiser (the `status/api-approved` label was applied to the
41 | RFC pull request):
42 |
43 | ```
44 | [ ] Signed-off by API Bar Raiser @xxxxx
45 | ```
46 |
47 | ## Public FAQ
48 |
49 | ### What are we launching today?
50 |
51 | We are publishing a set of construct libraries (in all JSII languages) which include constructs (and auxiliary types) for
52 | all the resources and modules in the public CloudFormation registry.
53 |
54 | ### Why should I use this feature?
55 |
56 | This library makes it easier to discover and use CloudFormation extensions in CDK apps.
57 |
58 | Previously, in order to use a resource from the CloudFormation registry, you would need to look up the resource documentation
59 | and use the low-level, weakly-typed `CfnResource` class to define it in your CDK app:
60 |
61 | ```ts
62 | new CfnResource(this, 'MyEksCluster', {
63 | type: 'AWSQS::EKS::Cluster',
64 |
65 | // weakly-typed!
66 | properties: {
67 | name: 'my-new-cluster',
68 | tags: [ { key: 'foo', value: 'bar' } ],
69 | },
70 | });
71 | ```
72 |
73 | With `@cdk-cloudformation` all public resources and modules will be listed in the **Construct Hub** like any
74 | other construct and by importing the relevant `cdk-cloudformation-extensions` module into their projects, they will be able to
75 | use them via strongly-typed classes. IDEs will show type information and inline help derived from the
76 | extension schema.
77 |
78 | ```ts
79 | import { CfnCluster } from '@cdk-cloudformation/awsqs-eks-cluster';
80 |
81 | new CfnCluster(this, 'MyEksCluster', {
82 | name: 'my-new-cluster',
83 | tags: [ { key: 'foo', value: 'bar' } ],
84 | });
85 | ```
86 |
87 | ## Internal FAQ
88 |
89 | ### Why are we doing this?
90 |
91 | We are doing this because CDK users would like to be able to use CloudFormation extensions
92 | (modules, resources) as first-class citizens in CDK code. Extensions and native resource types
93 | should look and feel the same way when defined in CDK apps.
94 |
95 | Additionally, we would like to surface CloudFormation extensions in the upcoming Construct Hub
96 | and to that end, if we simply publish a construct library that includes constructs for all the public,
97 | extensions, this library will naturally be discoverable through the Hub.
98 |
99 | ### Why should we _not_ do this?
100 |
101 | We have a longer term plan ([RFC#77](https://github.com/aws/aws-cdk-rfcs/issues/77)) to add
102 | support for a CLI command called `cdk import` which will allow users to generate L1s from any
103 | registry type just-in-type and add them to their project (this is similar to CDK8s and CDKtf).
104 |
105 | ### What is the technical solution (design) of this feature?
106 |
107 | The idea is to do something similar to what we do with the CloudFormation L1s
108 | in the AWS CDK and simply publish JSII construct libraries which includes
109 | statically-generated L1s for all the public registry extensions.
110 |
111 | #### Package per type
112 |
113 | CloudFormation extensions are semantically versioned. This means that new versions of an extension
114 | may include breaking changes. We also want to allow users to pick up a specific version of an extension.
115 | In light of these constraints, we determined that the best path forward is to publish each extension (type)
116 | as a separate module, with a version that corresponds to the extension version in the CloudFormation
117 | registry. This way, we will have 1:1 alignment and users will have the most freedom.
118 |
119 | There are currently 44 public types (resources & modules), but we expect this
120 | list to substantially grow. If this dramatically scales (to thousands of
121 | packages) and we hit any limitations of package managers (e.g. account/scope
122 | limits), we can shard the solution (multiple accounts, multiple scopes, etc).
123 |
124 | #### Naming Scheme
125 |
126 | We will use the following naming scheme. Names are all derived from the base name of `cdk-cloudformation` which represents the fact
127 | that these modules include L1s which we also refer to as "CFN resources" in the CDK. Since these are also modules, we decided against
128 | `cdk-cloudformation-resources`. We also decided not to use the short name `cfn` in the scope. See below for some alternatives considered.
129 |
130 | * **npm**:
131 | * Package name: `@cdk-cloudformation/` (e.g. `cdk-cloudformation/mongodb-atlas-project`)
132 | * **Maven Central**:
133 | * Group ID: `io.github.cdklabs.cdk_cloudformation`
134 | * Artifact ID: `` (e.g. `mongodb-atlas-project`)
135 | * Java Package: `io.github.cdklabs.cdk_cloudformation.` (e.g. `io.github.cdklabs.cdk_cloudformation.mongodb_atlas_project`)
136 | * **PyPI**:
137 | * Distribution name: `cdk-cloudformation-` (e.g. `cdk-cloudformation-mongodb-atlas-project`)
138 | * Module: `cdk_cloudformation_` (e.g. `cdk_cloudformation_mongodb_atlas_project`)
139 | * **NuGet**:
140 | * Package ID: `CdkCloudFormation.` (e.g. `CdkCloudFormation.MongodbAtlasProject`)
141 | * .NET Namespace: `CdkCloudFormation.` (e.g. `CdkCloudFormation.MongodbAtlasProject`)
142 |
143 | Alternatives considered:
144 |
145 | 1. `@cdk-cloudformation-types/mongodb-atlas-project`
146 | 1. `@cdk-types/mongodb-atlas-project`
147 | 2. `@cdk-cfn/mongodb-atlas-project`
148 | 3. `@cdkcfn/mongodb-atlas-project`
149 | 4. `@awscdk/mongodb-atlas-project`
150 | 5. `@cfn/mongodb-atlas-project`
151 | 6. `@cfn/cdk-mongodb-atlas-project`
152 | 7. `@cloudformation/cdk-mongodb-atlas-project`
153 | 8. `@cfntypes/mongodb-atlas-project`
154 | 9. `@cloudformation-registry/mongodb-atlas-project`
155 | 10. `@cfn-registry/cdk-mongodb-atlas-project`
156 | 11. `@cfn-types/cdk-mongodb-atlas-project`
157 | 12. `@cfn-registry/cdk-mongodb-atlas-project`
158 | 13. `@cloudformation-registry/cdk-mongodb-atlas-project`
159 | 13. `@cdk-cloudformation-registry/cdk-mongodb-atlas-project`
160 | 13. `@cdk-resources/cdk-mongodb-atlas-project`
161 | 13. `@awscdk-resources/mongodb-atlas-project`
162 | 13. `@cloudformation-resources/mongodb-atlas-project`
163 | 13. `@cloudformation-types/mongodb-atlas-project`
164 |
165 | #### Versioning
166 |
167 | To allow users to select which extension schema version to use, the version of
168 | each package will be based on the schema version of the extension.
169 |
170 | We will prefer to use the full schema version (MAJOR.MINOR.PATCH), but if we
171 | will need to publish a security patch, we should be able to bump the PATCH
172 | number as needed.
173 |
174 | #### Code Generator
175 |
176 | The code generator will be executed daily (e.g. through a GitHub workflow),
177 | query the CloudFormation Registry, and generate L1s for all the types based
178 | on their metadata and schema.
179 |
180 | The code generator is implemented as a separate tool called
181 | [`cdk-import`](https://github.com/cdklabs/cdk-import).
182 |
183 | We can use the `ListTypes` API to list all the types in the registry (excluding native AWS resources):
184 |
185 | ```shell
186 | aws cloudformation list-types --visibility=PUBLIC | grep -v "AWS::"
187 | ```
188 |
189 | And then for each type, we can use `DescribeType` to retrieve the type information and its schema.
190 |
191 | For example, this command will return the description of the `AWSQS::EKS::Cluster` resource:
192 |
193 | ```shell
194 | aws cloudformation describe-type --arn arn:aws:cloudformation:us-east-1::type/resource/408988dff9e863704bcc72e7e13f8d645cee8311/AWSQS-EKS-Cluster
195 | ```
196 |
197 | For reference: the output of this command can be found [here](https://gist.github.com/eladb/bf417c07444027d6954360295df4ee37#file-awsqs-vpc-vpcqs-module-json).
198 | The parsed `Schema` field can be found [here](https://gist.github.com/eladb/bf417c07444027d6954360295df4ee37#file-awsqs-vpc-vpcqs-module-schema-json).
199 |
200 | Now, we need to filter all non-`AWS::` types (the `AWS::` types are actually L1s), and then generate
201 | an L1 construct for each one. This includes:
202 |
203 | 1. Generating the construct class that extends `CfnResource`. I think we can use submodules to
204 | represent the type namespace (`awsqs.eks` in this case), and the name of the resource as the construct name `Cluster`.
205 | 2. Generate an `XxxProps` JSII struct for this construct based on the `Schema` field.
206 | This can be done using [`json2jsii`](https://github.com/cdklabs/json2jsii), which is the same
207 | tool we use to implemented the `import` command in CDK8s and CDKtf.
208 |
209 | ### Is this a breaking change?
210 |
211 | Nope.
212 |
213 | ### What are the drawbacks of this solution?
214 |
215 | > Describe any problems/risks that can be introduced if we implement this RFC.
216 |
217 | ### What alternative solutions did you consider?
218 |
219 | #### `cdk import` versus library
220 |
221 | As mentioned above, an alternative approach would be to add support for `import`
222 | in the CDK CLI, which is inline with how other CDKs implement this functionality
223 | (`import` in CDK8s and `get` in CDKtf). The `import` experience offers a "just
224 | in time" option that the pre-published library option does not, but also
225 | increases the cognitive load for users since they will need to understand how
226 | `import` works.
227 |
228 | The downsides of the `import` approach (at this point) are that (a) it is a
229 | bigger investment as it involves a change to the CDK CLI; and (b) the Construct
230 | Hub won't be able to surface these types automatically (we will need some custom
231 | support for "imported" types, which is something that we would like to add to
232 | the Construct Hub in the future for all CDK domains).
233 |
234 | To make sure this path is possible in the future, the code generator will be
235 | implemented as a separate tool called `cdk-import`, which we can later integrate
236 | into the CDK CLI.
237 |
238 | #### Monolithic versus module per resource
239 |
240 | As mentioned above, due to the fact that resource schemas are versioned and we want to allow users to select which
241 | version of the resource they use, we determined that the best approach is to publish a generated module for each resource.
242 |
243 | ### What is the high level implementation plan?
244 |
245 | * [ ] Implement [`cdk-import`](https://github.com/cdklabs/cdk-import) as a command-line tool that generates L1 constructs for registry extensions.
246 | * [ ] Create a mono-repo-style project using projen that utilizes `cdk-import` to generate a module for each registry resource.
247 | * [ ] Created a scheduled job which generates the modules periodically (should probably run against `us-east-1`)
248 |
249 | ### Are there any open issues that need to be addressed later?
250 |
251 | * ~__Property name conversion__~ (resolved by [cdklabs/json2jsii#480]): `json2jsii` converts field names to camelCase to adhere with
252 | naming typescript naming conventions.
253 | This means that we need to map field names back when we define the `CfnResource`. I think we might want to add a
254 | feature to `json2jsii` that will generate conversion functions that can be used to convert back data types to the original schema.
255 |
256 | [cdklabs/json2jsii#480]: https://github.com/cdklabs/json2jsii/pull/480
257 |
--------------------------------------------------------------------------------
/text/359-construct-hub-deny-list.md:
--------------------------------------------------------------------------------
1 | # Construct Hub Deny List
2 |
3 | * **Original Author(s):**: @eladb
4 | * **Tracking Issue**: #359
5 | * **API Bar Raiser**: @RomainMuller
6 |
7 | To ensure the integrity of the website and prevent recurring abuse we need to have the ability to block specific packages from being ingested.
8 |
9 | ## Working Backwards - README
10 |
11 | The Construct Hub has a capability to block specific package versions (or all versions of a specific package) by
12 | adding it to a "deny list". When a package is in a deny list, its processing will be skipped be the ingestion
13 | pipeline (to protect the pipeline from abuse) and will no longer be available in the construct hub index or package pages.
14 |
15 | To add a package to the deny list, add it to the `denyList` option when defining the `ConstructHub` construct in your deployment:
16 |
17 | ```ts
18 | new ConstructHub(this, 'ConstrucHub', {
19 | denyList: [
20 | { package: '@aws-cdk/aws-eks', version: '1.44.0', reason: 'Security issue.' },
21 | { package: 'boombam', reason: 'Copyright violation' },
22 | }
23 | });
24 | ```
25 |
26 | Each entry in `denyList` is a rule that packages are matched against. Packages can match against
27 | name + version or just name (and all versions). `reason` is currently just emitted to logs
28 | if we run into a denied package.
29 |
30 | ---
31 |
32 | Ticking the box below indicates that the public API of this RFC has been
33 | signed-off by the API bar raiser (the `status/api-approved` label was applied to the
34 | RFC pull request):
35 |
36 | ```
37 | [x] Signed-off by API Bar Raiser @RomainMuller
38 | ```
39 |
40 | ## Public FAQ
41 |
42 | ### What are we launching today?
43 |
44 | A feature of the Construct Hub to allow operators to block packages (or package versions) from
45 | appearing in search results and package pages.
46 |
47 | ### Why should I use this feature?
48 |
49 | Operators (incl. Amazon, as the operator of the public construct hub) can use this to block
50 | packages for any reason (e.g. security, compliance, copyright, trademark, etc).
51 |
52 | ## Internal FAQ
53 |
54 | ### Why are we doing this?
55 |
56 | This is a security and legal requirement for the Construct Hub.
57 |
58 | ### What is the technical solution (design) of this feature?
59 |
60 | The deny list will be modeled through strongly-typed API of the `ConstructHub`.
61 |
62 | 1. During deployment, we will use `BucketDeployment` in order to upload this list into a file in an S3 bucket dedicated
63 | for the deny list.
64 | 3. We will trigger a lambda function every time the deny list file is created/updated. This lambda function will
65 | iterate over the packages in the deny list and will delete any objects from the packages s3 bucket that match a
66 | denied package. The object key prefix will be based on the deny entry key (name + version or just name).
67 | We currently assume this process will fit into a single lambda execution given we will issue an S3 ListObjects
68 | with a prefix filter. Timeout alarms will let us know if this does not scale and requires an additional indirection.
69 | 3. The "discovery" and "ingestion" lambda functions will retrieve this file for each request, and will consult the
70 | deny list for every package. If an incoming package is in the deny list, it will be skipped and a log entry will
71 | be emitted with the deny information.
72 | 4. The "inventory" process will retrieve the deny list and will emit a metric that includes the number of deny
73 | list entries in the list for monitoring purposes.
74 | 6. We will add a trigger to the "catalog-builder" for object deletion so when a denied package is deleted from
75 | the packages bucket, the catalog will be rebuilt without that package.
76 |
77 | ### Is this a breaking change?
78 |
79 | No.
80 |
81 | ### What alternative solutions did you consider?
82 |
83 | #### Manually manage the deny list via a file on S3
84 |
85 | We considered the option of letting operators manage their deny list by directly editing a file on S3. There are multiple downsides to this approach:
86 |
87 | 1. If this file gets corrupted due to a human error (e.g. invalid JSON), denied package may be ingested.
88 | To address this we would need some kind of fallback to previously good versions and this adds quite a
89 | lot of unneeded complexity. By defining the deny list in strongly-typed code, the compiler (and synthesizer)
90 | takes care of validating the input which simplifies downstream consumption of this information.
91 | 3. Each deployment will have its own copy of the deny list and replicating the deny list across multiple
92 | environments (e.g. gamma/prod) will require additional mechanism. By allowing users to specify deny list
93 | in code, they have freedom to decide what parts of the list is replicated across environments (all/none/partial)
94 | by using simple programming techniques (e.g. constants).
95 |
96 | ### What are the drawbacks of this solution?
97 |
98 | One benefit of the direct S3 approach is reduced SLA for blocking a package. In the proposed design, customers need to
99 | commit the change to their source repository and have that change propagate through their release pipeline in order for
100 | the package to be removed. But since the SLA we declared for removing a package is 48 hours, this seems like a reasonable
101 | tradeoff. If in the future we will want to add support for "quick removal" we can always add an additional mechanism that
102 | will temporarily block a package.
103 |
104 | Another potential drawback (albeit it could be preceived as a feature) is that packages denied in the public hub (or any
105 | hub instance for that matter) will still be allowed in other hubs until they are explicitly
106 | added to the deny list there. That might actually be the desired behavior (e.g. local deployment may
107 | want to show a package that is blocked in the public hub), but it should be communicated that if a
108 | package is reported and denied from the public hub, it will still appear in private hubs unless it is
109 | explicitly denied there.
110 |
111 | ### What is the high level implementation plan?
112 |
113 | * [ ] Add `denyList` option to `ConstructHub` and upload to an S3 bucket.
114 | * [ ] Filter denied packages in "discovery"
115 | * [ ] Filter denied packages in "ingestion"
116 | * [ ] Add deny list count to "inventory"
117 | * [ ] Create deny-list handler - triggered by updates to the deny list and deletes existing packages
118 | * [ ] Update catalog builder when objects are deleted
119 | * [ ] Move "report abuse" email address to `ConstructHub` API so it's configurable in private instances
120 | * [ ] Update documentation to indicate that deny list (and report abuse) is instance-specific
121 |
122 | ### Are there any open issues that need to be addressed later?
123 |
124 | * [ ] @NetaNir can you elaborate on this please? "(Security-requirement) "Alert when a specific package owner
125 | is hitting the deny list protection far more then normal."
126 |
--------------------------------------------------------------------------------
/tools/linters/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules/
2 |
--------------------------------------------------------------------------------
/tools/linters/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@cdklabs/linters",
3 | "version": "1.0.0",
4 | "license": "Apache-2.0",
5 | "private": true,
6 | "dependencies": {
7 | "markdownlint-cli": "^0.35.0"
8 | }
9 | }
10 |
--------------------------------------------------------------------------------
/tools/rfc-render/.gitignore:
--------------------------------------------------------------------------------
1 | node_modules
2 |
3 |
--------------------------------------------------------------------------------
/tools/rfc-render/fetch-issues.js:
--------------------------------------------------------------------------------
1 | import { Octokit } from "@octokit/rest";
2 | import { STATUS_LIST, UNKNOWN_STATUS } from './status.js';
3 | import { promises as fs } from 'fs';
4 | import path from 'path';
5 | import { fileURLToPath } from 'url';
6 |
7 | export { issuesGroupedByStatus };
8 |
9 | const STATUS_LABELS = Object.keys(STATUS_LIST);
10 |
11 | async function issuesGroupedByStatus(filterStatus = undefined) {
12 | const __dirname = path.dirname(fileURLToPath(import.meta.url));
13 | const files = await fs.readdir(path.join(__dirname, '..', '..', 'text'));
14 |
15 | const octo = new Octokit({
16 | auth: process.env.PROJEN_GITHUB_TOKEN || process.env.GITHUB_TOKEN
17 | });
18 |
19 | const issueByStatus = {};
20 |
21 | const labelQuery = filterStatus ? `label:${filterStatus.join(',')}` : '';
22 | const fullQuery = `repo:aws/aws-cdk-rfcs is:issue ${labelQuery}`;
23 | console.log(fullQuery);
24 | const request = octo.search.issuesAndPullRequests.endpoint.merge({
25 | q: fullQuery,
26 | advanced_search: true,
27 | });
28 |
29 | const result = await octo.paginate(request);
30 |
31 | for (const issue of result) {
32 | // skip pull requests
33 | if (issue.pull_request) {
34 | continue;
35 | }
36 |
37 | const status = determineStatus(issue.labels);
38 | // kip not requested status
39 | if (filterStatus && !filterStatus.includes(status)) {
40 | continue;
41 | }
42 | // skip closed issues of unknown status
43 | if (issue.state === 'closed' && status === UNKNOWN_STATUS) {
44 | continue;
45 | }
46 |
47 | const { champion, pr_number } = findMetadata(issue);
48 | const doc = findDocFile(files, issue.number);
49 |
50 | let link;
51 |
52 | // we we already have a doc, then the link should go to it
53 | if (doc) {
54 | link = `https://github.com/aws/aws-cdk-rfcs/blob/main/text/${doc}`;
55 | } else if (pr_number) {
56 | link = `https://github.com/aws/aws-cdk-rfcs/pull/${pr_number}`;
57 | } else {
58 | link = `https://github.com/aws/aws-cdk-rfcs/issues/${issue.number}`;
59 | }
60 |
61 | (issueByStatus[status] ??= []).push({
62 | number: issue.number,
63 | title: issue.title,
64 | link,
65 | assignee: issue.assignee && issue.assignee.login,
66 | champion,
67 | status,
68 | doc,
69 | });
70 | }
71 |
72 | return issueByStatus;
73 | }
74 |
75 |
76 | function findDocFile(files, number) {
77 | return files.find(file => parseInt(file.split('-')[0]) === number);
78 | }
79 |
80 | function findMetadata(issue) {
81 | const body = issue.body || '';
82 | const lines = body.split('\n');
83 | const titleIndex = lines.findIndex(line => line.startsWith('|PR|Champion|'));
84 | if (titleIndex === -1) {
85 | return { champion: '' };
86 | }
87 |
88 | let [, pr, champion] = lines[titleIndex + 2].split('|');
89 | champion = champion ? champion.trim() : '';
90 |
91 | const pr_number = (pr.startsWith('#') ? pr.substring(1) : '').trim();
92 | return { champion, pr_number };
93 | }
94 |
95 | function determineStatus(item) {
96 | const result = [];
97 | for (const label of item) {
98 | if (STATUS_LABELS.includes(label.name)) {
99 | result.push(label.name);
100 | }
101 | }
102 |
103 | if (result.length !== 1) {
104 | return UNKNOWN_STATUS;
105 | } else {
106 | return result[0];
107 | }
108 | }
109 |
--------------------------------------------------------------------------------
/tools/rfc-render/inject-table.js:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env node
2 |
3 | import { promises as fs } from 'fs';
4 | import path from 'path';
5 | import { fileURLToPath } from 'url';
6 | import { render } from './render-rfc-table.js';
7 | import { parseArgs } from 'util';
8 |
9 | async function main() {
10 | const { values: { status = undefined }, positionals: [readme = 'README.md'] } = parseArgs({
11 | options: {
12 | status: {
13 | type: 'string',
14 | short: 's',
15 | multiple: true,
16 | }
17 | },
18 | strict: true,
19 | allowPositionals: true,
20 | })
21 |
22 | const readmeFile = path.resolve(readme);
23 | const statusList = status?.map(s => s.startsWith('status/') ? s : `status/${s}`);
24 |
25 | console.info(`Injecting '${readmeFile}' with status: ${statusList ? statusList.join(', ') : ''}`);
26 | const text = (await fs.readFile(readmeFile, 'utf-8'));
27 | const lines = text.split('\n');
28 |
29 |
30 | const begin = lines.indexOf('');
31 | const end = lines.indexOf('');
32 |
33 | if (begin === -1 || end === -1) {
34 | throw new Error(`unable to find begin/end markers in file ${readmeFile}`);
35 | }
36 |
37 | const final = [...lines.slice(0, begin + 1), ...(await render(statusList, Boolean(statusList))), ...lines.slice(end)];
38 |
39 | console.error(`Writing ${readmeFile}`);
40 | await fs.writeFile(readmeFile, final.join('\n'));
41 | console.error(`Done`);
42 | }
43 |
44 | main().catch(e => {
45 | console.error();
46 | console.error(e);
47 | console.error();
48 | const __filename = fileURLToPath(import.meta.url);
49 | console.error(`Usage:\n\t${path.relative(process.cwd(), __filename)} README.md [--status ] [--status ] [...]`)
50 | process.exitCode = 1;
51 | });
52 |
--------------------------------------------------------------------------------
/tools/rfc-render/package-lock.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@cdklabs/rfc-render",
3 | "version": "1.0.0",
4 | "lockfileVersion": 3,
5 | "requires": true,
6 | "packages": {
7 | "": {
8 | "name": "@cdklabs/rfc-render",
9 | "version": "1.0.0",
10 | "license": "Apache-2.0",
11 | "dependencies": {
12 | "@octokit/rest": "^21.1.1"
13 | }
14 | },
15 | "node_modules/@octokit/auth-token": {
16 | "version": "5.1.2",
17 | "resolved": "https://registry.npmjs.org/@octokit/auth-token/-/auth-token-5.1.2.tgz",
18 | "integrity": "sha512-JcQDsBdg49Yky2w2ld20IHAlwr8d/d8N6NiOXbtuoPCqzbsiJgF633mVUw3x4mo0H5ypataQIX7SFu3yy44Mpw==",
19 | "license": "MIT",
20 | "engines": {
21 | "node": ">= 18"
22 | }
23 | },
24 | "node_modules/@octokit/core": {
25 | "version": "6.1.4",
26 | "resolved": "https://registry.npmjs.org/@octokit/core/-/core-6.1.4.tgz",
27 | "integrity": "sha512-lAS9k7d6I0MPN+gb9bKDt7X8SdxknYqAMh44S5L+lNqIN2NuV8nvv3g8rPp7MuRxcOpxpUIATWprO0C34a8Qmg==",
28 | "license": "MIT",
29 | "dependencies": {
30 | "@octokit/auth-token": "^5.0.0",
31 | "@octokit/graphql": "^8.1.2",
32 | "@octokit/request": "^9.2.1",
33 | "@octokit/request-error": "^6.1.7",
34 | "@octokit/types": "^13.6.2",
35 | "before-after-hook": "^3.0.2",
36 | "universal-user-agent": "^7.0.0"
37 | },
38 | "engines": {
39 | "node": ">= 18"
40 | }
41 | },
42 | "node_modules/@octokit/endpoint": {
43 | "version": "10.1.3",
44 | "resolved": "https://registry.npmjs.org/@octokit/endpoint/-/endpoint-10.1.3.tgz",
45 | "integrity": "sha512-nBRBMpKPhQUxCsQQeW+rCJ/OPSMcj3g0nfHn01zGYZXuNDvvXudF/TYY6APj5THlurerpFN4a/dQAIAaM6BYhA==",
46 | "license": "MIT",
47 | "dependencies": {
48 | "@octokit/types": "^13.6.2",
49 | "universal-user-agent": "^7.0.2"
50 | },
51 | "engines": {
52 | "node": ">= 18"
53 | }
54 | },
55 | "node_modules/@octokit/graphql": {
56 | "version": "8.2.1",
57 | "resolved": "https://registry.npmjs.org/@octokit/graphql/-/graphql-8.2.1.tgz",
58 | "integrity": "sha512-n57hXtOoHrhwTWdvhVkdJHdhTv0JstjDbDRhJfwIRNfFqmSo1DaK/mD2syoNUoLCyqSjBpGAKOG0BuwF392slw==",
59 | "license": "MIT",
60 | "dependencies": {
61 | "@octokit/request": "^9.2.2",
62 | "@octokit/types": "^13.8.0",
63 | "universal-user-agent": "^7.0.0"
64 | },
65 | "engines": {
66 | "node": ">= 18"
67 | }
68 | },
69 | "node_modules/@octokit/openapi-types": {
70 | "version": "23.0.1",
71 | "resolved": "https://registry.npmjs.org/@octokit/openapi-types/-/openapi-types-23.0.1.tgz",
72 | "integrity": "sha512-izFjMJ1sir0jn0ldEKhZ7xegCTj/ObmEDlEfpFrx4k/JyZSMRHbO3/rBwgE7f3m2DHt+RrNGIVw4wSmwnm3t/g==",
73 | "license": "MIT"
74 | },
75 | "node_modules/@octokit/plugin-paginate-rest": {
76 | "version": "11.4.2",
77 | "resolved": "https://registry.npmjs.org/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-11.4.2.tgz",
78 | "integrity": "sha512-BXJ7XPCTDXFF+wxcg/zscfgw2O/iDPtNSkwwR1W1W5c4Mb3zav/M2XvxQ23nVmKj7jpweB4g8viMeCQdm7LMVA==",
79 | "license": "MIT",
80 | "dependencies": {
81 | "@octokit/types": "^13.7.0"
82 | },
83 | "engines": {
84 | "node": ">= 18"
85 | },
86 | "peerDependencies": {
87 | "@octokit/core": ">=6"
88 | }
89 | },
90 | "node_modules/@octokit/plugin-request-log": {
91 | "version": "5.3.1",
92 | "resolved": "https://registry.npmjs.org/@octokit/plugin-request-log/-/plugin-request-log-5.3.1.tgz",
93 | "integrity": "sha512-n/lNeCtq+9ofhC15xzmJCNKP2BWTv8Ih2TTy+jatNCCq/gQP/V7rK3fjIfuz0pDWDALO/o/4QY4hyOF6TQQFUw==",
94 | "license": "MIT",
95 | "engines": {
96 | "node": ">= 18"
97 | },
98 | "peerDependencies": {
99 | "@octokit/core": ">=6"
100 | }
101 | },
102 | "node_modules/@octokit/plugin-rest-endpoint-methods": {
103 | "version": "13.3.1",
104 | "resolved": "https://registry.npmjs.org/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-13.3.1.tgz",
105 | "integrity": "sha512-o8uOBdsyR+WR8MK9Cco8dCgvG13H1RlM1nWnK/W7TEACQBFux/vPREgKucxUfuDQ5yi1T3hGf4C5ZmZXAERgwQ==",
106 | "license": "MIT",
107 | "dependencies": {
108 | "@octokit/types": "^13.8.0"
109 | },
110 | "engines": {
111 | "node": ">= 18"
112 | },
113 | "peerDependencies": {
114 | "@octokit/core": ">=6"
115 | }
116 | },
117 | "node_modules/@octokit/request": {
118 | "version": "9.2.2",
119 | "resolved": "https://registry.npmjs.org/@octokit/request/-/request-9.2.2.tgz",
120 | "integrity": "sha512-dZl0ZHx6gOQGcffgm1/Sf6JfEpmh34v3Af2Uci02vzUYz6qEN6zepoRtmybWXIGXFIK8K9ylE3b+duCWqhArtg==",
121 | "license": "MIT",
122 | "dependencies": {
123 | "@octokit/endpoint": "^10.1.3",
124 | "@octokit/request-error": "^6.1.7",
125 | "@octokit/types": "^13.6.2",
126 | "fast-content-type-parse": "^2.0.0",
127 | "universal-user-agent": "^7.0.2"
128 | },
129 | "engines": {
130 | "node": ">= 18"
131 | }
132 | },
133 | "node_modules/@octokit/request-error": {
134 | "version": "6.1.7",
135 | "resolved": "https://registry.npmjs.org/@octokit/request-error/-/request-error-6.1.7.tgz",
136 | "integrity": "sha512-69NIppAwaauwZv6aOzb+VVLwt+0havz9GT5YplkeJv7fG7a40qpLt/yZKyiDxAhgz0EtgNdNcb96Z0u+Zyuy2g==",
137 | "license": "MIT",
138 | "dependencies": {
139 | "@octokit/types": "^13.6.2"
140 | },
141 | "engines": {
142 | "node": ">= 18"
143 | }
144 | },
145 | "node_modules/@octokit/rest": {
146 | "version": "21.1.1",
147 | "resolved": "https://registry.npmjs.org/@octokit/rest/-/rest-21.1.1.tgz",
148 | "integrity": "sha512-sTQV7va0IUVZcntzy1q3QqPm/r8rWtDCqpRAmb8eXXnKkjoQEtFe3Nt5GTVsHft+R6jJoHeSiVLcgcvhtue/rg==",
149 | "license": "MIT",
150 | "dependencies": {
151 | "@octokit/core": "^6.1.4",
152 | "@octokit/plugin-paginate-rest": "^11.4.2",
153 | "@octokit/plugin-request-log": "^5.3.1",
154 | "@octokit/plugin-rest-endpoint-methods": "^13.3.0"
155 | },
156 | "engines": {
157 | "node": ">= 18"
158 | }
159 | },
160 | "node_modules/@octokit/types": {
161 | "version": "13.8.0",
162 | "resolved": "https://registry.npmjs.org/@octokit/types/-/types-13.8.0.tgz",
163 | "integrity": "sha512-x7DjTIbEpEWXK99DMd01QfWy0hd5h4EN+Q7shkdKds3otGQP+oWE/y0A76i1OvH9fygo4ddvNf7ZvF0t78P98A==",
164 | "license": "MIT",
165 | "dependencies": {
166 | "@octokit/openapi-types": "^23.0.1"
167 | }
168 | },
169 | "node_modules/before-after-hook": {
170 | "version": "3.0.2",
171 | "resolved": "https://registry.npmjs.org/before-after-hook/-/before-after-hook-3.0.2.tgz",
172 | "integrity": "sha512-Nik3Sc0ncrMK4UUdXQmAnRtzmNQTAAXmXIopizwZ1W1t8QmfJj+zL4OA2I7XPTPW5z5TDqv4hRo/JzouDJnX3A==",
173 | "license": "Apache-2.0"
174 | },
175 | "node_modules/fast-content-type-parse": {
176 | "version": "2.0.1",
177 | "resolved": "https://registry.npmjs.org/fast-content-type-parse/-/fast-content-type-parse-2.0.1.tgz",
178 | "integrity": "sha512-nGqtvLrj5w0naR6tDPfB4cUmYCqouzyQiz6C5y/LtcDllJdrcc6WaWW6iXyIIOErTa/XRybj28aasdn4LkVk6Q==",
179 | "funding": [
180 | {
181 | "type": "github",
182 | "url": "https://github.com/sponsors/fastify"
183 | },
184 | {
185 | "type": "opencollective",
186 | "url": "https://opencollective.com/fastify"
187 | }
188 | ],
189 | "license": "MIT"
190 | },
191 | "node_modules/universal-user-agent": {
192 | "version": "7.0.2",
193 | "resolved": "https://registry.npmjs.org/universal-user-agent/-/universal-user-agent-7.0.2.tgz",
194 | "integrity": "sha512-0JCqzSKnStlRRQfCdowvqy3cy0Dvtlb8xecj/H8JFZuCze4rwjPZQOgvFvn0Ws/usCHQFGpyr+pB9adaGwXn4Q==",
195 | "license": "ISC"
196 | }
197 | }
198 | }
199 |
--------------------------------------------------------------------------------
/tools/rfc-render/package.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "@cdklabs/rfc-render",
3 | "version": "1.0.0",
4 | "description": "",
5 | "main": "index.js",
6 | "type": "module",
7 | "scripts": {
8 | "render:all": "npm run render:readme && npm run render:full && npm run render:proposed && npm run render:accepted && npm run render:closed",
9 | "render:readme": "node inject-table.js ../../README.md -s unknown -s implementing -s planning -s approved -s final-comment-period -s api-approved -s review",
10 | "render:full": "node inject-table.js ../../FULL_INDEX.md",
11 | "render:proposed": "node inject-table.js ../../PROPOSED.md -s proposed -s review -s api-approved -s final-comment-period",
12 | "render:accepted": "node inject-table.js ../../ACCEPTED.md -s approved -s planning -s implementing -s done",
13 | "render:closed": "node inject-table.js ../../CLOSED.md -s rejected -s stale"
14 | },
15 | "keywords": [],
16 | "author": "AWS",
17 | "private": true,
18 | "license": "Apache-2.0",
19 | "dependencies": {
20 | "@octokit/rest": "^21.1.1"
21 | }
22 | }
23 |
--------------------------------------------------------------------------------
/tools/rfc-render/render-rfc-table.js:
--------------------------------------------------------------------------------
1 | import { issuesGroupedByStatus } from './fetch-issues.js';
2 | import { STATUS_LIST } from './status.js';
3 |
4 | const labels = Object.keys(STATUS_LIST);
5 | export { render };
6 |
7 | async function render(renderStatus = undefined, groupByStatus = true) {
8 | const issuesByStatus = await issuesGroupedByStatus(renderStatus);
9 |
10 | const lines = [];
11 |
12 | lines.push('\\#|Title|Owner|Status');
13 | lines.push('---|-----|-----|------');
14 |
15 | if (groupByStatus) {
16 | for (const statusGroup of (renderStatus || Object.keys(issuesByStatus))) {
17 | for (const row of issuesByStatus[statusGroup]?.sort(byNumber) || []) {
18 | lines.push(renderRow(row));
19 | }
20 | }
21 | } else {
22 | for (const row of Object.values(issuesByStatus).flat().sort(byNumber)) {
23 | lines.push(renderRow(row));
24 | }
25 | }
26 |
27 | return lines;
28 | }
29 |
30 | function renderRow(row) {
31 | return [
32 | `[${row.number}](https://github.com/aws/aws-cdk-rfcs/issues/${row.number})`,
33 | `[${row.title.trim()}](${row.link})`,
34 | renderUser(row.assignee),
35 | STATUS_LIST[row.status],
36 | ].join('|');
37 | }
38 |
39 | function byNumber(a, b) {
40 | return a.number - b.number;
41 | }
42 |
43 | function renderUser(user) {
44 | if (!user) {
45 | return '';
46 | }
47 |
48 | if (user.startsWith('@')) {
49 | user = user.substring(1);
50 | }
51 |
52 | user = user.trim();
53 | if (!user) {
54 | return '';
55 | }
56 |
57 | return `[@${user}](https://github.com/${user})`;
58 | }
59 |
--------------------------------------------------------------------------------
/tools/rfc-render/status.js:
--------------------------------------------------------------------------------
1 | export const UNKNOWN_STATUS = 'status/unknown';
2 |
3 | // Order does not matters here
4 | // The cli input is an ordered list of status
5 | export const STATUS_LIST = {
6 | [UNKNOWN_STATUS]: '❓unknown',
7 | 'status/implementing': '👷 implementing',
8 | 'status/planning': '📆 planning',
9 | 'status/approved': '👍 approved',
10 | 'status/final-comment-period': '⏰ final comments',
11 | 'status/api-approved': '📐 API approved',
12 | 'status/review': '✍️ review',
13 | 'status/proposed': '💡 proposed',
14 | 'status/done': '✅ done',
15 | 'status/stale': '🤷 stale',
16 | 'status/rejected': '👎 rejected',
17 | }
18 |
--------------------------------------------------------------------------------