├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md └── PULL_REQUEST_TEMPLATE.md ├── .gitignore ├── CHANGELOG.md ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── NOTICE.txt ├── README.md ├── README_CN.md ├── THIRD-PARTY-LICENSES.txt ├── deployment ├── build-s3-dist.sh └── cdk-solution-helper │ ├── README.md │ ├── index.js │ └── package.json ├── docs ├── CUSTOM_BUILD.md ├── DEPLOYMENT_CN.md ├── DEPLOYMENT_EN.md ├── cluster_cn.png ├── cluster_en.png ├── launch-stack.svg ├── secret_cn.png └── secret_en.png ├── ecr-plugin-architect.png └── source ├── README.md ├── bin └── aws-data-replication-component-ecr.ts ├── cdk.json ├── ecr ├── Dockerfile └── copy.sh ├── jest.config.js ├── lambda ├── ecr_helper │ ├── .coveragerc │ ├── lambda_function.py │ ├── test │ │ ├── __init__.py │ │ ├── conftest.py │ │ ├── requirements-test.txt │ │ └── test_lambda_function.py │ └── util │ │ └── ecr_helper.py └── step-func.ts ├── lib └── aws-data-replication-component-ecr-stack.ts ├── package.json ├── run-all-tests.sh ├── test └── aws-data-replication-component-ecr.test.ts └── tsconfig.json /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior. 15 | 16 | **Expected behavior** 17 | A clear and concise description of what you expected to happen. 18 | 19 | **Please complete the following information about the solution:** 20 | - [ ] Version: [e.g. v1.0.0] 21 | 22 | To get the version of the solution, you can look at the description of the created CloudFormation stack. For example, "_(SO8003) - Data Transfer Hub - ECR Plugin - Template version **v1.0.0**_". 23 | 24 | - [ ] Region: [e.g. us-east-1] 25 | - [ ] Was the solution modified from the version published on this repository? 26 | - [ ] If the answer to the previous question was yes, are the changes available on GitHub? 27 | - [ ] Have you checked your [service quotas](https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) for the sevices this solution uses? 28 | - [ ] Were there any errors in the CloudWatch Logs? 29 | 30 | **Screenshots** 31 | If applicable, add screenshots to help explain your problem (please **DO NOT include sensitive information**). 32 | 33 | **Additional context** 34 | Add any other context about the problem here. 35 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this solution 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the feature you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Additional context** 17 | Add any other context or screenshots about the feature request here. 18 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | *Issue #, if available:* 2 | 3 | *Description of changes:* 4 | 5 | By submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice. 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | source/*.js 2 | source/*/*.js 3 | !jest.config.js 4 | *.d.ts 5 | node_modules 6 | package-lock.json 7 | 8 | # CDK asset staging directory 9 | .cdk.staging 10 | cdk.out 11 | 12 | # Parcel build directories 13 | .cache 14 | .build 15 | 16 | # Python build 17 | build 18 | dist 19 | *.egg-info 20 | __pycache__ 21 | .venv 22 | 23 | # General 24 | .DS_Store 25 | .vscode 26 | 27 | # Deployment 28 | staging 29 | global-s3-assets 30 | regional-s3-assets 31 | viperlight 32 | 33 | 34 | # temp file 35 | tmp* 36 | 37 | # test file 38 | /source/lambda/test* 39 | 40 | config -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Change Log 2 | All notable changes to this project will be documented in this file. 3 | 4 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 5 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 6 | 7 | ## [2.3.0] - 2023-03-30 8 | - Upgrade the Lambda Runtime 9 | 10 | ## [1.0.4] - 2023-01-03 11 | ### Added 12 | - Support ALL_TAGS 13 | 14 | ## [1.0.0] - 2021-12-22 15 | ### Added 16 | - All files, initial version -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check [existing open](https://github.com/awslabs/amazon-ecr-data-replication-hub-plugin/issues), or [recently closed](https://github.com/awslabs/amazon-ecr-data-replication-hub-plugin/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *develop* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure all build processes execute successfully (see README.md for additional guidance). 35 | 4. Ensure all unit, integration, and/or snapshot tests pass, as applicable. 36 | 5. Commit to your fork using clear commit messages. 37 | 6. Send us a pull request, answering any default questions in the pull request interface. 38 | 7. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 39 | 40 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 41 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 42 | 43 | 44 | ## Finding contributions to work on 45 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels ((enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/awslabs/amazon-ecr-data-replication-hub-plugin/labels/help%20wanted) issues is a great place to start. 46 | 47 | 48 | ## Code of Conduct 49 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 50 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 51 | opensource-codeofconduct@amazon.com with any additional questions or comments. 52 | 53 | 54 | ## Security issue notifications 55 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public GitHub issue. 56 | 57 | 58 | ## Licensing 59 | 60 | See the [LICENSE](https://github.com/awslabs/amazon-ecr-data-replication-hub-plugin/blob/main/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 61 | 62 | We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. 63 | -------------------------------------------------------------------------------- /NOTICE.txt: -------------------------------------------------------------------------------- 1 | Data Transfer Hub 2 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | 4 | ********************** 5 | THIRD PARTY COMPONENTS 6 | ********************** 7 | This software includes third party software subject to the following copyrights: 8 | 9 | 10 | aws-cdk under the Apache License 2.0 11 | aws-cdk-lib under the Apache License 2.0 12 | cdk-nag under the Apache License 2.0 13 | constructs under the Apache License 2.0 14 | source-map-support under the Massachusetts Institute of Technology (MIT) license 15 | @types/jest under the Massachusetts Institute of Technology (MIT) license 16 | @types/node under the Massachusetts Institute of Technology (MIT) license 17 | @types/aws-lambda under the Massachusetts Institute of Technology (MIT) license 18 | jest under the Massachusetts Institute of Technology (MIT) license 19 | ts-jest under the Massachusetts Institute of Technology (MIT) license 20 | aws-sdk under the Apache License 2.0 21 | ts-node under the Massachusetts Institute of Technology (MIT) license 22 | typescript under the Apache License 2.0 -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | [中文](./README_CN.md) 3 | 4 | # Repository Has Been Migrated 5 | 6 | This repository has been merged into [Data Transfer Hub](https://github.com/awslabs/data-transfer-hub/blob/main/docs/ECR_PLUGIN.md). Subsequent maintenance and updates for this repository will be conducted within repository [Data Transfer Hub](https://github.com/awslabs/data-transfer-hub). 7 | 8 | The document of this repository has been migrated to: 9 | - MD: https://github.com/awslabs/data-transfer-hub/blob/main/docs/ECR_PLUGIN.md 10 | - Mkdoc: https://awslabs.github.io/data-transfer-hub/en/user-guide/tutorial-ecr/ 11 | 12 | The code of this repository has been migrated to: 13 | - Infra CDK: https://github.com/awslabs/data-transfer-hub/tree/main/source/constructs/lib/ecr-plugin 14 | - Lambda: https://github.com/awslabs/data-transfer-hub/tree/main/source/constructs/lambda/plugin/ecr 15 | - Dockerfile: https://github.com/awslabs/data-transfer-hub/tree/main/source/constructs/ecr 16 | 17 | This migration will not impact any existing functionalities of the ECR plugin, and the ECR plugin can still be deployed independently. 18 | 19 | # Data Transfer Hub - ECR Plugin 20 | 21 | ## Table of contents 22 | * [Introduction](#introduction) 23 | * [Architect](#architect) 24 | * [Deployment](#deployment) 25 | * [FAQ](#faq) 26 | * [How to debug](#how-to-debug) 27 | * [How to customize](#how-to-customize) 28 | ## Introduction 29 | 30 | [Data Transfer Hub](https://github.com/awslabs/data-transfer-hub), a.k.a Data Replication Hub, is a solution for transferring data from different sources into AWS. This project is for ECR transfer plugin. You can deploy and run this plugin independently without the UI. 31 | 32 | The following are the planned features of this plugin. 33 | 34 | - Transfer Amazon ECR between AWS accounts or regions 35 | - Transfer Amazon ECR between AWS Standard partition and AWS CN partition 36 | - Transfer Public container image registry to AWS ECR 37 | - Transfer all images or only selected Images 38 | - Support One-time transfer 39 | - Support Incremental transfer 40 | 41 | This plugin uses [**skopeo**](https://github.com/containers/skopeo) as the tool to copy images to Aamazon ECR. If same layer already exists in target ECR, it will not be copied again. 42 | 43 | 44 | ## Architecture 45 | 46 | ![ECR Plugin Architecture](ecr-plugin-architect.png) 47 | 48 | EventBridge Rule to trigger Step functions to execute on a regular basis. (By default, daily) 49 | 50 | Step functions will invoke Lambda to get the list of images from source. 51 | 52 | Lambda will either list all the repositorys in the source ECR or get the stored selected image list from System Manager Parameter Store. 53 | 54 | The transfer task will be run within Fargate in a max concurrency of 10. If a transfer task failed for some reason, it will automatically retry for 3 times. 55 | 56 | Each task uses `skopeo copy` to copy the images into target ECR 57 | 58 | Once copy is completed, the status (either success or failed) will be logged into DynamoDB for tracking purpose 59 | 60 | 61 | ## Deployment 62 | 63 | Things to know about the deployment of this plugin: 64 | 65 | - The deployment will automatically provision resources like lambda, dynamoDB table, ECS Task Definition in your AWS account, etc. 66 | - The deployment will take approximately 3-5 minutes. 67 | - Once the deployment is completed, the data transfer task will start right away. 68 | 69 | Please follow the steps in the [Deployment Guide](./docs/DEPLOYMENT_EN.md) to start the deployment. 70 | 71 | > Note: You can simply delete the stack from CloudFormation console if the data transfer job is no longer required. 72 | 73 | ## FAQ 74 | ### How to debug 75 | 76 | **Q**: There seems to be something wrong, how to debug? 77 | 78 | **A**: When you deploy the stack, you will be asked to input the stack name (default is DTHECRStack), most of the resources will be created with name prefix as the stack name. For example, Step Function name will be in a format of `-ECRReplicationSM`. 79 | 80 | There will be two main log groups created by this plugin. 81 | 82 | - /aws/lambda/<StackName>-ListImagesFunction<random suffix> 83 | 84 | This is the log group for listing Image Lambda Function. If there is no data transferred, you should check if something is wrong in the Lambda log. This is the first step. 85 | 86 | - <StackName>-DTHECRContainerLogGroup<random suffix> 87 | 88 | This is the log group for all ECS containers, detailed transfer log can be found here. 89 | 90 | If you can't find anything helpful in the log group, please raise an issue in Github. 91 | 92 | ### How to customize 93 | 94 | **Q**: I want to make some custom changes, how do I do? 95 | 96 | If you want to make custom changes to this plugin, you can follow [custom build](./docs/CUSTOM_BUILD.md) guide. -------------------------------------------------------------------------------- /README_CN.md: -------------------------------------------------------------------------------- 1 | [English](./README.md) 2 | 3 | # 仓库已迁移 4 | 5 | 该仓库已合并到 [Data Transfer Hub](https://github.com/awslabs/data-transfer-hub/blob/main/docs/ECR_PLUGIN.md) 中。此后,该仓库的维护和更新将在 [Data Transfer Hub](https://github.com/awslabs/data-transfer-hub) 仓库中进行。 6 | 7 | 该仓库的文档已迁移到以下位置: 8 | - MD格式:https://github.com/awslabs/data-transfer-hub/blob/main/docs/ECR_PLUGIN.md 9 | - Mkdoc格式:https://awslabs.github.io/data-transfer-hub/en/user-guide/tutorial-ecr/ 10 | 11 | 该仓库的代码已迁移到以下位置: 12 | - Infra CDK:https://github.com/awslabs/data-transfer-hub/tree/main/source/constructs/lib/ecr-plugin 13 | - Lambda:https://github.com/awslabs/data-transfer-hub/tree/main/source/constructs/lambda/plugin/ecr 14 | - Dockerfile:https://github.com/awslabs/data-transfer-hub/tree/main/source/constructs/ecr 15 | 16 | 此迁移不会影响 ECR 插件的任何现有功能,并且 ECR 插件仍然可以独立部署。 17 | 18 | # Data Transfer Hub - ECR 插件 19 | 20 | ## 目录 21 | * [简介](#简介) 22 | * [架构](#架构) 23 | * [部署](#部署) 24 | * [FAQ](#faq) 25 | * [如何调试](#如何调试) 26 | * [如何客制化](#如何客制化) 27 | 28 | ## 简介 29 | 30 | [Data Transfer Hub](https://github.com/awslabs/data-transfer-hub) ,前称是Data Replication Hub,是一个用于从不同的源传输数据到AWS的解决方案。本项目是该方案的其中一款插件(ECR插件)。你可以独立部署和运行此插件而无需使用UI。 31 | 32 | 以下是此插件的功能。 33 | 34 | - AWS账户或区域之间的Amazon ECR的传输 35 | - AWS Global区和AWS 中国区之间的Amazon ECR的传输 36 | - 公共容器镜像仓库到AWS ECR的传输 37 | - 传输所有镜像,或仅传输选定的镜像 38 | - 支持一次性传输 39 | - 支持增量传输 40 | 41 | 该插件使用 [**skopeo**](https://github.com/containers/skopeo) 作为将镜像传输到Aamazon ECR的工具。 如果目标ECR中已经存在相同的层,则不会被再次传输。 42 | 43 | 44 | ## 架构 45 | 46 | ![ECR Plugin Architect](ecr-plugin-architect.png) 47 | 48 | EventBridge 规则用于触发Step Function以定期执行任务。 (默认情况下,每天触发) 49 | 50 | 将调用Lambda以从源获取镜像列表 51 | 52 | Lambda将列出源ECR中的所有存储库,或者从 AWS System Manager Parameter Store 中获取已存储的选定镜像列表 53 | 54 | 传输任务将在Fargate中以最大10个并发运行。如果传输任务由于某种原因失败,它将自动重试3次 55 | 56 | 每个任务都使用`skopeo copy`将图像传输到目标ECR中 57 | 58 | 传输完成后,状态(成功或失败)将记录到DynamoDB中以进行跟踪 59 | ## 部署 60 | 61 | 有关此插件的部署的注意事项:: 62 | 63 | - 部署本插件会自动在您的AWS账号里创建包括Lambda, DyanomoDB表,ECS任务等 64 | - 部署预计用时3-5分钟 65 | - 一旦部署完成,复制任务就会马上开始 66 | 67 | 请参考[部署指南](./docs/DEPLOYMENT_CN.md)里的步骤进行部署。 68 | 69 | > 注意:如果不再需要数据传输任务,则可以从CloudFormation控制台中删除堆栈。 70 | ## FAQ 71 | ### 如何调试 72 | 73 | **问题**:部署完后似乎没有正常运行,该如何调试? 74 | 75 | **回答**:部署堆栈时,将要求您输入堆栈名称(默认为 DTHECRStack),大多数资源将使用该堆栈名称作为前缀进行创建。 例如,Step Function名称将采用`<堆栈名>-ECRReplicationSM`的格式。 76 | 77 | 此插件将创建两个主要的CloudWatch日志组。 78 | 79 | - /aws/lambda/<堆栈名>-ListImagesFunction<随机后缀> 80 | 81 | 这是获取镜像列表的日志组。如果未传输任何数据,则应首先检查Lambda运行日志中是否出了问题。 这是第一步。 82 | 83 | - <堆栈名>-DTHECRContainerLogGroup<随机后缀> 84 | 85 | 这是所有ECS容器的日志组,可以在此处找到详细的传输日志。 86 | 87 | 如果您在日志组中找不到任何有帮组的内容,请在Github中提出问题。 88 | 89 | ### 如何客制化 90 | 91 | **问题**:我想要更改此方案,需要做什么? 92 | 93 | **回答**:如果要更改解决方案,可以参考[定制](./docs/CUSTOM_BUILD.md) 指南. 94 | -------------------------------------------------------------------------------- /THIRD-PARTY-LICENSES.txt: -------------------------------------------------------------------------------- 1 | ** skopeo; version v1.3.0 -- https://github.com/containers/skopeo 2 | 3 | Apache License 4 | 5 | Version 2.0, January 2004 6 | 7 | http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND 8 | DISTRIBUTION 9 | 10 | 1. Definitions. 11 | 12 | "License" shall mean the terms and conditions for use, reproduction, and 13 | distribution as defined by Sections 1 through 9 of this document. 14 | 15 | "Licensor" shall mean the copyright owner or entity authorized by the 16 | copyright owner that is granting the License. 17 | 18 | "Legal Entity" shall mean the union of the acting entity and all other 19 | entities that control, are controlled by, or are under common control 20 | with that entity. For the purposes of this definition, "control" means 21 | (i) the power, direct or indirect, to cause the direction or management 22 | of such entity, whether by contract or otherwise, or (ii) ownership of 23 | fifty percent (50%) or more of the outstanding shares, or (iii) 24 | beneficial ownership of such entity. 25 | 26 | "You" (or "Your") shall mean an individual or Legal Entity exercising 27 | permissions granted by this License. 28 | 29 | "Source" form shall mean the preferred form for making modifications, 30 | including but not limited to software source code, documentation source, 31 | and configuration files. 32 | 33 | "Object" form shall mean any form resulting from mechanical 34 | transformation or translation of a Source form, including but not limited 35 | to compiled object code, generated documentation, and conversions to 36 | other media types. 37 | 38 | "Work" shall mean the work of authorship, whether in Source or Object 39 | form, made available under the License, as indicated by a copyright 40 | notice that is included in or attached to the work (an example is 41 | provided in the Appendix below). 42 | 43 | "Derivative Works" shall mean any work, whether in Source or Object form, 44 | that is based on (or derived from) the Work and for which the editorial 45 | revisions, annotations, elaborations, or other modifications represent, 46 | as a whole, an original work of authorship. For the purposes of this 47 | License, Derivative Works shall not include works that remain separable 48 | from, or merely link (or bind by name) to the interfaces of, the Work and 49 | Derivative Works thereof. 50 | 51 | "Contribution" shall mean any work of authorship, including the original 52 | version of the Work and any modifications or additions to that Work or 53 | Derivative Works thereof, that is intentionally submitted to Licensor for 54 | inclusion in the Work by the copyright owner or by an individual or Legal 55 | Entity authorized to submit on behalf of the copyright owner. For the 56 | purposes of this definition, "submitted" means any form of electronic, 57 | verbal, or written communication sent to the Licensor or its 58 | representatives, including but not limited to communication on electronic 59 | mailing lists, source code control systems, and issue tracking systems 60 | that are managed by, or on behalf of, the Licensor for the purpose of 61 | discussing and improving the Work, but excluding communication that is 62 | conspicuously marked or otherwise designated in writing by the copyright 63 | owner as "Not a Contribution." 64 | 65 | "Contributor" shall mean Licensor and any individual or Legal Entity on 66 | behalf of whom a Contribution has been received by Licensor and 67 | subsequently incorporated within the Work. 68 | 69 | 2. Grant of Copyright License. Subject to the terms and conditions of this 70 | License, each Contributor hereby grants to You a perpetual, worldwide, 71 | non-exclusive, no-charge, royalty-free, irrevocable copyright license to 72 | reproduce, prepare Derivative Works of, publicly display, publicly perform, 73 | sublicense, and distribute the Work and such Derivative Works in Source or 74 | Object form. 75 | 76 | 3. Grant of Patent License. Subject to the terms and conditions of this 77 | License, each Contributor hereby grants to You a perpetual, worldwide, 78 | non-exclusive, no-charge, royalty-free, irrevocable (except as stated in 79 | this section) patent license to make, have made, use, offer to sell, sell, 80 | import, and otherwise transfer the Work, where such license applies only to 81 | those patent claims licensable by such Contributor that are necessarily 82 | infringed by their Contribution(s) alone or by combination of their 83 | Contribution(s) with the Work to which such Contribution(s) was submitted. 84 | If You institute patent litigation against any entity (including a 85 | cross-claim or counterclaim in a lawsuit) alleging that the Work or a 86 | Contribution incorporated within the Work constitutes direct or contributory 87 | patent infringement, then any patent licenses granted to You under this 88 | License for that Work shall terminate as of the date such litigation is 89 | filed. 90 | 91 | 4. Redistribution. You may reproduce and distribute copies of the Work or 92 | Derivative Works thereof in any medium, with or without modifications, and 93 | in Source or Object form, provided that You meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or Derivative Works a 96 | copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices stating 99 | that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works that You 102 | distribute, all copyright, patent, trademark, and attribution notices 103 | from the Source form of the Work, excluding those notices that do not 104 | pertain to any part of the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must include 108 | a readable copy of the attribution notices contained within such NOTICE 109 | file, excluding those notices that do not pertain to any part of the 110 | Derivative Works, in at least one of the following places: within a 111 | NOTICE text file distributed as part of the Derivative Works; within the 112 | Source form or documentation, if provided along with the Derivative 113 | Works; or, within a display generated by the Derivative Works, if and 114 | wherever such third-party notices normally appear. The contents of the 115 | NOTICE file are for informational purposes only and do not modify the 116 | License. You may add Your own attribution notices within Derivative Works 117 | that You distribute, alongside or as an addendum to the NOTICE text from 118 | the Work, provided that such additional attribution notices cannot be 119 | construed as modifying the License. 120 | 121 | You may add Your own copyright statement to Your modifications and may 122 | provide additional or different license terms and conditions for use, 123 | reproduction, or distribution of Your modifications, or for any such 124 | Derivative Works as a whole, provided Your use, reproduction, and 125 | distribution of the Work otherwise complies with the conditions stated in 126 | this License. 127 | 128 | 5. Submission of Contributions. Unless You explicitly state otherwise, any 129 | Contribution intentionally submitted for inclusion in the Work by You to the 130 | Licensor shall be under the terms and conditions of this License, without 131 | any additional terms or conditions. Notwithstanding the above, nothing 132 | herein shall supersede or modify the terms of any separate license agreement 133 | you may have executed with Licensor regarding such Contributions. 134 | 135 | 6. Trademarks. This License does not grant permission to use the trade 136 | names, trademarks, service marks, or product names of the Licensor, except 137 | as required for reasonable and customary use in describing the origin of the 138 | Work and reproducing the content of the NOTICE file. 139 | 140 | 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in 141 | writing, Licensor provides the Work (and each Contributor provides its 142 | Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY 143 | KIND, either express or implied, including, without limitation, any 144 | warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or 145 | FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining 146 | the appropriateness of using or redistributing the Work and assume any risks 147 | associated with Your exercise of permissions under this License. 148 | 149 | 8. Limitation of Liability. In no event and under no legal theory, whether 150 | in tort (including negligence), contract, or otherwise, unless required by 151 | applicable law (such as deliberate and grossly negligent acts) or agreed to 152 | in writing, shall any Contributor be liable to You for damages, including 153 | any direct, indirect, special, incidental, or consequential damages of any 154 | character arising as a result of this License or out of the use or inability 155 | to use the Work (including but not limited to damages for loss of goodwill, 156 | work stoppage, computer failure or malfunction, or any and all other 157 | commercial damages or losses), even if such Contributor has been advised of 158 | the possibility of such damages. 159 | 160 | 9. Accepting Warranty or Additional Liability. While redistributing the Work 161 | or Derivative Works thereof, You may choose to offer, and charge a fee for, 162 | acceptance of support, warranty, indemnity, or other liability obligations 163 | and/or rights consistent with this License. However, in accepting such 164 | obligations, You may act only on Your own behalf and on Your sole 165 | responsibility, not on behalf of any other Contributor, and only if You 166 | agree to indemnify, defend, and hold each Contributor harmless for any 167 | liability incurred by, or claims asserted against, such Contributor by 168 | reason of your accepting any such warranty or additional liability. END OF 169 | TERMS AND CONDITIONS 170 | 171 | APPENDIX: How to apply the Apache License to your work. 172 | 173 | To apply the Apache License to your work, attach the following boilerplate 174 | notice, with the fields enclosed by brackets "[]" replaced with your own 175 | identifying information. (Don't include the brackets!) The text should be 176 | enclosed in the appropriate comment syntax for the file format. We also 177 | recommend that a file or class name and description of purpose be included on 178 | the same "printed page" as the copyright notice for easier identification 179 | within third-party archives. 180 | 181 | Copyright [yyyy] [name of copyright owner] 182 | 183 | Licensed under the Apache License, Version 2.0 (the "License"); 184 | 185 | you may not use this file except in compliance with the License. 186 | 187 | You may obtain a copy of the License at 188 | 189 | http://www.apache.org/licenses/LICENSE-2.0 190 | 191 | Unless required by applicable law or agreed to in writing, software 192 | 193 | distributed under the License is distributed on an "AS IS" BASIS, 194 | 195 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 196 | 197 | See the License for the specific language governing permissions and 198 | 199 | limitations under the License. 200 | 201 | * For skopeo see also this required NOTICE: 202 | No copyright notice was found for this project. -------------------------------------------------------------------------------- /deployment/build-s3-dist.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script packages your project into a solution distributable that can be 4 | # used as an input to the solution builder validation pipeline. 5 | # 6 | # Important notes and prereq's: 7 | # 1. The initialize-repo.sh script must have been run in order for this script to 8 | # function properly. 9 | # 2. This script should be run from the repo's /deployment folder. 10 | # 11 | # This script will perform the following tasks: 12 | # 1. Remove any old dist files from previous runs. 13 | # 2. Install dependencies for the cdk-solution-helper; responsible for 14 | # converting standard 'cdk synth' output into solution assets. 15 | # 3. Build and synthesize your CDK project. 16 | # 4. Run the cdk-solution-helper on template outputs and organize 17 | # those outputs into the /global-s3-assets folder. 18 | # 5. Organize source code artifacts into the /regional-s3-assets folder. 19 | # 6. Remove any temporary files used for staging. 20 | # 21 | # Parameters: 22 | # - source-bucket-base-name: Name for the S3 bucket location where the template will source the Lambda 23 | # code from. The template will append '-[region_name]' to this bucket name. 24 | # For example: ./build-s3-dist.sh solutions v1.0.0 25 | # The template will then expect the source code to be located in the solutions-[region_name] bucket 26 | # - solution-name: name of the solution for consistency 27 | # - version-code: version of the package 28 | 29 | do_cmd() 30 | { 31 | echo "------ EXEC $*" 32 | $* 33 | rc=$? 34 | if [ $rc -gt 0 ] 35 | then 36 | echo "Aborted - rc=$rc" 37 | exit $rc 38 | fi 39 | } 40 | 41 | # Important: CDK global version number 42 | # cdk_version=1.74.0 43 | cleanup_temporary_generted_files() 44 | { 45 | echo "------------------------------------------------------------------------------" 46 | echo "${bold}[Cleanup] Remove temporary files${normal}" 47 | echo "------------------------------------------------------------------------------" 48 | 49 | # Delete generated files: CDK Consctruct typescript transcompiled generted files 50 | do_cmd cd $source_dir 51 | do_cmd npm run cleanup:tsc 52 | 53 | # Delete the temporary /staging folder 54 | do_cmd rm -rf $staging_dist_dir 55 | } 56 | 57 | # Check to see if the required parameters have been provided: 58 | if [ -z "$1" ] || [ -z "$2" ]; then 59 | echo "Please provide the base source bucket name, trademark approved solution name and version where the lambda code will eventually reside." 60 | echo "For example: ./build-s3-dist.sh solutions trademarked-solution-name v1.0.0" 61 | exit 1 62 | fi 63 | if [ ! -z $3 ]; then 64 | export VERSION="$3" 65 | else 66 | export VERSION=$(git describe --tags --exact-match || { [ -n "$BRANCH_NAME" ] && echo "$BRANCH_NAME"; } || echo v0.0.0) 67 | fi 68 | 69 | # Get reference for all important folders 70 | template_dir="$PWD" 71 | staging_dist_dir="$template_dir/staging" 72 | template_dist_dir="$template_dir/global-s3-assets" 73 | build_dist_dir="$template_dir/regional-s3-assets" 74 | source_dir="$template_dir/../source" 75 | 76 | echo "------------------------------------------------------------------------------" 77 | echo "[Init] Remove any old dist files from previous runs" 78 | echo "------------------------------------------------------------------------------" 79 | 80 | echo "rm -rf $template_dist_dir" 81 | rm -rf $template_dist_dir 82 | echo "mkdir -p $template_dist_dir" 83 | mkdir -p $template_dist_dir 84 | echo "rm -rf $build_dist_dir" 85 | rm -rf $build_dist_dir 86 | echo "mkdir -p $build_dist_dir" 87 | mkdir -p $build_dist_dir 88 | echo "rm -rf $staging_dist_dir" 89 | rm -rf $staging_dist_dir 90 | echo "mkdir -p $staging_dist_dir" 91 | mkdir -p $staging_dist_dir 92 | 93 | echo "VERSION=${VERSION}" 94 | echo "${VERSION}" > $template_dist_dir/version 95 | 96 | echo "------------------------------------------------------------------------------" 97 | echo "[Init] Install dependencies for the cdk-solution-helper" 98 | echo "------------------------------------------------------------------------------" 99 | 100 | echo "cd $template_dir/cdk-solution-helper" 101 | cd $template_dir/cdk-solution-helper 102 | echo "npm install" 103 | npm install 104 | 105 | echo "------------------------------------------------------------------------------" 106 | echo "[Synth] CDK Project" 107 | echo "------------------------------------------------------------------------------" 108 | 109 | # Install the global aws-cdk package 110 | echo "cd $source_dir" 111 | cd $source_dir 112 | # echo "npm install -g aws-cdk@$cdk_version" 113 | # npm install -g aws-cdk@$cdk_version 114 | 115 | 116 | # Install and build 117 | echo "npm install && npm run build" 118 | npm install 119 | npm run build 120 | 121 | # Run 'cdk synth' to generate raw solution outputs 122 | echo "cdk synth --output=$staging_dist_dir" 123 | npx cdk synth --output=$staging_dist_dir 124 | 125 | # Remove unnecessary output files 126 | echo "cd $staging_dist_dir" 127 | cd $staging_dist_dir 128 | echo "rm tree.json manifest.json cdk.out" 129 | rm tree.json manifest.json cdk.out 130 | 131 | echo "------------------------------------------------------------------------------" 132 | echo "[Packing] Template artifacts" 133 | echo "------------------------------------------------------------------------------" 134 | 135 | # Move images from staging to template_dist_dir 136 | IMAGE_NAME=data-transfer-hub-ecr 137 | IMAGE_PATH=$template_dir/ecr/$IMAGE_NAME 138 | mkdir -p $IMAGE_PATH 139 | cp -r $source_dir/ecr/* $IMAGE_PATH/ 140 | 141 | # Move outputs from staging to template_dist_dir 142 | echo "Move outputs from staging to template_dist_dir" 143 | echo "cp $template_dir/*.template $template_dist_dir/" 144 | cp $staging_dist_dir/*.template.json $template_dist_dir/ 145 | rm *.template.json 146 | 147 | # Rename all *.template.json files to *.template 148 | echo "Rename all *.template.json to *.template" 149 | echo "copy templates and rename" 150 | for f in $template_dist_dir/*.template.json; do 151 | mv -- "$f" "${f%.template.json}.template" 152 | done 153 | 154 | # Run the helper to clean-up the templates and remove unnecessary CDK elements 155 | echo "Run the helper to clean-up the templates and remove unnecessary CDK elements" 156 | echo "node $template_dir/cdk-solution-helper/index" 157 | node $template_dir/cdk-solution-helper/index 158 | if [ "$?" = "1" ]; then 159 | echo "(cdk-solution-helper) ERROR: there is likely output above." 1>&2 160 | exit 1 161 | fi 162 | 163 | # Find and replace bucket_name, solution_name, and version 164 | echo "Find and replace bucket_name, solution_name, and version" 165 | cd $template_dist_dir 166 | echo "Updating code source bucket in template with $1" 167 | replace="s/%%BUCKET_NAME%%/$1/g" 168 | echo "sed -i -e $replace $template_dist_dir/*.template" 169 | sed -i -e $replace $template_dist_dir/*.template 170 | replace="s/%%SOLUTION_NAME%%/$2/g" 171 | echo "sed -i -e $replace $template_dist_dir/*.template" 172 | sed -i -e $replace $template_dist_dir/*.template 173 | replace="s/%%VERSION%%/$VERSION/g" 174 | echo "sed -i -e $replace $template_dist_dir/*.template" 175 | sed -i -e $replace $template_dist_dir/*.template 176 | 177 | echo "------------------------------------------------------------------------------" 178 | echo "[Packing] Source code artifacts" 179 | echo "------------------------------------------------------------------------------" 180 | 181 | # General cleanup of node_modules and package-lock.json files 182 | echo "find $staging_dist_dir -iname "node_modules" -type d -exec rm -rf "{}" \; 2> /dev/null" 183 | find $staging_dist_dir -iname "node_modules" -type d -exec rm -rf "{}" \; 2> /dev/null 184 | echo "find $staging_dist_dir -iname "package-lock.json" -type f -exec rm -f "{}" \; 2> /dev/null" 185 | find $staging_dist_dir -iname "package-lock.json" -type f -exec rm -f "{}" \; 2> /dev/null 186 | 187 | # ... For each asset.* source code artifact in the temporary /staging folder... 188 | cd $staging_dist_dir 189 | for d in `find . -mindepth 1 -maxdepth 1 -type d`; do 190 | # cd $d 191 | # Rename the artifact, removing the period for handler compatibility 192 | pfname="$(basename -- $d)" 193 | # zip folder 194 | echo "zip -rq $pfname.zip $pfname" 195 | cd $pfname 196 | zip -rq $pfname.zip * 197 | mv $pfname.zip ../ 198 | cd .. 199 | 200 | # Remove the old, unzipped artifact from /staging 201 | echo "rm -rf $pfname" 202 | rm -rf $pfname 203 | 204 | # ... repeat until all source code artifacts are zipped and placed in the /staging 205 | done 206 | 207 | 208 | # ... For each asset.*.zip code artifact in the temporary /staging folder... 209 | cd $staging_dist_dir 210 | for f in `find . -iname \*.zip`; do 211 | # Rename the artifact, removing the period for handler compatibility 212 | # pfname = asset..zip 213 | pfname="$(basename -- $f)" 214 | echo $pfname 215 | # fname = .zip 216 | fname="$(echo $pfname | sed -e 's/asset\.//g')" 217 | mv $pfname $fname 218 | 219 | # Copy the zipped artifact from /staging to /regional-s3-assets 220 | echo "cp $fname $build_dist_dir" 221 | cp $fname $build_dist_dir 222 | 223 | # Remove the old, zipped artifact from /staging 224 | echo "rm $fname" 225 | rm $fname 226 | done 227 | 228 | 229 | echo "------------------------------------------------------------------------------" 230 | echo "[Cleanup] Remove temporary files" 231 | echo "------------------------------------------------------------------------------" 232 | 233 | # Delete the temporary /staging folder 234 | echo "rm -rf $staging_dist_dir" 235 | rm -rf $staging_dist_dir 236 | 237 | # cleanup temporary generated files that are not needed for later stages of the build pipeline 238 | cleanup_temporary_generted_files -------------------------------------------------------------------------------- /deployment/cdk-solution-helper/README.md: -------------------------------------------------------------------------------- 1 | # cdk-solution-helper 2 | 3 | A lightweight helper function that cleans-up synthesized templates from the AWS Cloud Development Kit (CDK) and prepares 4 | them for use with the AWS Solutions publishing pipeline. This function performs the following tasks: 5 | 6 | #### Lambda function preparation 7 | 8 | Replaces the AssetParameter-style properties that identify source code for Lambda functions with the common variables 9 | used by the AWS Solutions publishing pipeline. 10 | 11 | - `Code.S3Bucket` is assigned the `%%BUCKET_NAME%%` placeholder value. 12 | - `Code.S3Key` is assigned the `%%SOLUTION_NAME%%`/`%%VERSION%%` placeholder value. 13 | - `Handler` is given a prefix identical to the artifact hash, enabling the Lambda function to properly find the handler in the extracted source code package. 14 | 15 | These placeholders are then replaced with the appropriate values using the default find/replace operation run by the pipeline. 16 | 17 | Before: 18 | ``` 19 | "examplefunction67F55935": { 20 | "Type": "AWS::Lambda::Function", 21 | "Properties": { 22 | "Code": { 23 | "S3Bucket": { 24 | "Ref": "AssetParametersd513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7S3Bucket54E71A95" 25 | }, 26 | "S3Key": { 27 | "Fn::Join": [ 28 | "", 29 | [ 30 | { 31 | "Fn::Select": [ 32 | 0, 33 | { 34 | "Fn::Split": [ 35 | "||", 36 | { 37 | "Ref": "AssetParametersd513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7S3VersionKeyC789D8B1" 38 | } 39 | ] 40 | } 41 | ] 42 | }, 43 | { 44 | "Fn::Select": [ 45 | 1, 46 | { 47 | "Fn::Split": [ 48 | "||", 49 | { 50 | "Ref": "AssetParametersd513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7S3VersionKeyC789D8B1" 51 | } 52 | ] 53 | } 54 | ] 55 | } 56 | ] 57 | ] 58 | } 59 | }, ... 60 | Handler: "index.handler", ... 61 | ``` 62 | 63 | After helper function run: 64 | ``` 65 | "examplefunction67F55935": { 66 | "Type": "AWS::Lambda::Function", 67 | "Properties": { 68 | "Code": { 69 | "S3Bucket": "%%BUCKET_NAME%%", 70 | "S3Key": "%%SOLUTION_NAME%%/%%VERSION%%/assetd513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7.zip" 71 | }, ... 72 | "Handler": "assetd513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7/index.handler" 73 | ``` 74 | 75 | After build script run: 76 | ``` 77 | "examplefunction67F55935": { 78 | "Type": "AWS::Lambda::Function", 79 | "Properties": { 80 | "Code": { 81 | "S3Bucket": "solutions", 82 | "S3Key": "trademarked-solution-name/v1.0.0/asset.d513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7.zip" 83 | }, ... 84 | "Handler": "assetd513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7/index.handler" 85 | ``` 86 | 87 | After CloudFormation deployment: 88 | ``` 89 | "examplefunction67F55935": { 90 | "Type": "AWS::Lambda::Function", 91 | "Properties": { 92 | "Code": { 93 | "S3Bucket": "solutions-us-east-1", 94 | "S3Key": "trademarked-solution-name/v1.0.0/asset.d513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7.zip" 95 | }, ... 96 | "Handler": "assetd513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7/index.handler" 97 | ``` 98 | 99 | #### Template cleanup 100 | 101 | Cleans-up the parameters section and improves readability by removing the AssetParameter-style fields that would have 102 | been used to specify Lambda source code properties. This allows solution-specific parameters to be highlighted and 103 | removes unnecessary clutter. 104 | 105 | Before: 106 | ``` 107 | "Parameters": { 108 | "AssetParametersd513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7S3Bucket54E71A95": { 109 | "Type": "String", 110 | "Description": "S3 bucket for asset \"d513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7\"" 111 | }, 112 | "AssetParametersd513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7S3VersionKeyC789D8B1": { 113 | "Type": "String", 114 | "Description": "S3 key for asset version \"d513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7\"" 115 | }, 116 | "AssetParametersd513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7ArtifactHash7AA751FE": { 117 | "Type": "String", 118 | "Description": "Artifact hash for asset \"d513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7\"" 119 | }, 120 | "CorsEnabled" : { 121 | "Description" : "Would you like to enable Cross-Origin Resource Sharing (CORS) for the image handler API? Select 'Yes' if so.", 122 | "Default" : "No", 123 | "Type" : "String", 124 | "AllowedValues" : [ "Yes", "No" ] 125 | }, 126 | "CorsOrigin" : { 127 | "Description" : "If you selected 'Yes' above, please specify an origin value here. A wildcard (*) value will support any origin.", 128 | "Default" : "*", 129 | "Type" : "String" 130 | } 131 | } 132 | ``` 133 | 134 | After: 135 | ``` 136 | "Parameters": { 137 | "CorsEnabled" : { 138 | "Description" : "Would you like to enable Cross-Origin Resource Sharing (CORS) for the image handler API? Select 'Yes' if so.", 139 | "Default" : "No", 140 | "Type" : "String", 141 | "AllowedValues" : [ "Yes", "No" ] 142 | }, 143 | "CorsOrigin" : { 144 | "Description" : "If you selected 'Yes' above, please specify an origin value here. A wildcard (*) value will support any origin.", 145 | "Default" : "*", 146 | "Type" : "String" 147 | } 148 | } 149 | ``` 150 | 151 | *** 152 | © Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. -------------------------------------------------------------------------------- /deployment/cdk-solution-helper/index.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance 5 | * with the License. A copy of the License is located at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES 10 | * OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions 11 | * and limitations under the License. 12 | */ 13 | 14 | // Imports 15 | const fs = require('fs'); 16 | 17 | // Paths 18 | const global_s3_assets = '../global-s3-assets'; 19 | 20 | // For each template in global_s3_assets ... 21 | fs.readdirSync(global_s3_assets).forEach(file => { 22 | const isTemplate = file.endsWith('.template') || file.endsWith('.template.json'); 23 | if (!isTemplate) { 24 | return 25 | } 26 | 27 | // Import and parse template file 28 | const raw_template = fs.readFileSync(`${global_s3_assets}/${file}`); 29 | let template = JSON.parse(raw_template); 30 | 31 | // Clean-up Lambda function code dependencies 32 | const resources = (template.Resources) ? template.Resources : {}; 33 | const lambdaFunctions = Object.keys(resources).filter(function (key) { 34 | return resources[key].Type === "AWS::Lambda::Function"; 35 | }); 36 | lambdaFunctions.forEach(function (f) { 37 | const fn = template.Resources[f]; 38 | if (fn.Properties.Code.hasOwnProperty('S3Bucket')) { 39 | // Set the S3 key reference 40 | let s3Key = Object.assign(fn.Properties.Code.S3Key); 41 | // https://github.com/aws/aws-cdk/issues/10608 42 | if (!s3Key.endsWith('.zip')) { 43 | fn.Properties.Code.S3Key = `%%SOLUTION_NAME%%/%%VERSION%%/${s3Key}.zip`; 44 | } else { 45 | fn.Properties.Code.S3Key = `%%SOLUTION_NAME%%/%%VERSION%%/${s3Key}`; 46 | } 47 | // Set the S3 bucket reference 48 | fn.Properties.Code.S3Bucket = { 49 | 'Fn::Sub': '%%BUCKET_NAME%%-${AWS::Region}' 50 | }; 51 | // Set the handler 52 | // const handler = fn.Properties.Handler; 53 | // fn.Properties.Handler = `${assetPath}/${handler}`; 54 | let metadata = Object.assign(fn.Metadata); 55 | fn.Metadata = { 56 | ...metadata, 57 | 'cfn_nag': { 58 | 'rules_to_suppress': [ 59 | { 60 | id: 'W58', 61 | reason: 'False alarm: The Lambda function does have the permission to write CloudWatch Logs.' 62 | }, 63 | { 64 | id: 'W89', 65 | reason: 'This is a fully serverless solution - no VPC is required' 66 | }, 67 | { 68 | id: 'W92' 69 | }, 70 | ] 71 | } 72 | }; 73 | } 74 | }); 75 | 76 | 77 | // Clean-up parameters section 78 | const parameters = (template.Parameters) ? template.Parameters : {}; 79 | const assetParameters = Object.keys(parameters).filter(function (key) { 80 | return key.includes('AssetParameters'); 81 | }); 82 | assetParameters.forEach(function (a) { 83 | template.Parameters[a] = undefined; 84 | }); 85 | 86 | // Clean-up BootstrapVersion parameter 87 | if (parameters.hasOwnProperty('BootstrapVersion')) { 88 | parameters.BootstrapVersion = undefined 89 | } 90 | 91 | // Clean-up CheckBootstrapVersion Rule 92 | const rules = (template.Rules) ? template.Rules : {}; 93 | if (rules.hasOwnProperty('CheckBootstrapVersion')) { 94 | rules.CheckBootstrapVersion = undefined 95 | } 96 | 97 | // Output modified template file 98 | const output_template = JSON.stringify(template, null, 2); 99 | fs.writeFileSync(`${global_s3_assets}/${file}`, output_template); 100 | }); -------------------------------------------------------------------------------- /deployment/cdk-solution-helper/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "cdk-solution-helper", 3 | "version": "0.1.0", 4 | "devDependencies": { 5 | "fs": "0.0.1-security" 6 | }, 7 | "dependencies": { 8 | "fs": "0.0.1-security" 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /docs/CUSTOM_BUILD.md: -------------------------------------------------------------------------------- 1 | # Create custom build 2 | 3 | The solution can be deployed through the CloudFormation template available on the solution home page. 4 | To make changes to the solution, download or clone this repo, update the source code and then run the deployment/build-s3-dist.sh script to deploy the updated code to an Amazon S3 bucket in your account. 5 | 6 | ## Prerequisites: 7 | * [AWS Command Line Interface](https://aws.amazon.com/cli/) 8 | * Node.js 12.x or later 9 | 10 | ## 1. Clone the repository 11 | 12 | ## 2. Run unit tests for customization 13 | Run unit tests to make sure added customization passes the tests: 14 | 15 | ```bash 16 | chmod +x ./run-unit-tests.sh 17 | ./run-unit-tests.sh 18 | ``` 19 | 20 | ## 3. Declare environment variables 21 | ```bash 22 | export REGION=aws-region-code # the AWS region to launch the solution (e.g. us-east-1) 23 | export DIST_OUTPUT_BUCKET=my-bucket-name # bucket where customized code will reside 24 | export SOLUTION_NAME=my-solution-name # the solution name 25 | export VERSION=my-version # version number for the customized code 26 | export AWS_ACCOUNT_ID=my-account-id # AWS Account ID, (e.g. 123456789012) 27 | ``` 28 | 29 | ## 4. Create an Amazon S3 Bucket 30 | The CloudFormation template is configured to pull the Lambda deployment packages from Amazon S3 bucket in the region the template is being launched in. Use below command to create the buckets. 31 | 32 | ```bash 33 | aws s3 mb s3://$DIST_OUTPUT_BUCKET --region $REGION 34 | aws s3 mb s3://$DIST_OUTPUT_BUCKET-$REGION --region $REGION 35 | ``` 36 | 37 | ## 5. Create the deployment packages 38 | Build the distributable: 39 | ```bash 40 | chmod +x ./build-s3-dist.sh 41 | ./build-s3-dist.sh $DIST_OUTPUT_BUCKET $SOLUTION_NAME $VERSION 42 | ``` 43 | 44 | ## 6. Build custom ECR image 45 | Build and push to ECR repository: 46 | ```bash 47 | chmod +x ./build-ecr.sh 48 | ./build-ecr.sh $REGION $AWS_ACCOUNT_ID $VERSION 49 | ``` 50 | 51 | ## 7. Deploy the packages 52 | Deploy the distributable to the Amazon S3 bucket in your account: 53 | ```bash 54 | aws s3 cp ./global-s3-assets/ s3://$DIST_OUTPUT_BUCKET/$SOLUTION_NAME/$VERSION/ --recursive --acl bucket-owner-full-control 55 | aws s3 cp ./regional-s3-assets/ s3://$DIST_OUTPUT_BUCKET-$REGION/$SOLUTION_NAME/$VERSION/ --recursive --acl bucket-owner-full-control 56 | ``` -------------------------------------------------------------------------------- /docs/DEPLOYMENT_CN.md: -------------------------------------------------------------------------------- 1 | 2 | [English](./DEPLOYMENT_EN.md) 3 | 4 | # 部署指南 5 | 6 | ## 1. 准备VPC (可选) 7 | 8 | 此解决方案可以部署在公共和私有子网中。 建议使用公共子网。 9 | 10 | - 如果您想使用现有的 VPC,请确保 VPC 至少有 2 个子网,并且两个子网都必须具有公网访问权限(带有 Internet 网关的公有子网或带有 NAT 网关的私有子网) 11 | 12 | - 如果您想为此解决方案创建新的默认 VPC,请转到步骤2,并确保您在创建集群时选择了*为此集群创建一个新的 VPC*。 13 | 14 | 15 | ## 2. 配置ECS集群 16 | 17 | 此方案需要ECS 集群才能运行Fargate任务。 18 | 19 | 打开AWS 管理控制台 > Elastic Container Service (ECS)。 在 ECS集群首页上,单击 **创建集群** 20 | 21 | 步骤1:选择集群模版,确保选择 **仅限联网** 类型。 22 | 23 | 步骤2:配置集群,指定集群名称,点击创建即可。 如果您还想创建一个新的 VCP(仅限公有子网),还请选中**为此集群创建新的 VPC** 选项。 24 | 25 | ![创建集群](cluster_cn.png) 26 | 27 | 28 | 29 | ## 3. 配置凭据 30 | 31 | 如果源(或目标)不在当前的AWS账户中,则您需要提供`AccessKeyID`和`SecretAccessKey`(即`AK` / `SK`)以从Amazon ECR中拉取或推送镜像。 Amazon Secrets Manager 用于以安全方式存储访问凭证。 32 | 33 | > 注意:如果源类型为“公共(Public)”,则无需提供源的访问凭证。 34 | 35 | 打开AWS 管理控制台 > Secrets Manager。 在 Secrets Manager 主页上,单击 **存储新的密钥**。 对于密钥类型,请使用**其他类型的秘密**。 对于键/值对,请将下面的 JSON 文本复制并粘贴到明文部分,并相应地将值更改为您的 AK/SK。 36 | 37 | ``` 38 | { 39 | "access_key_id": "", 40 | "secret_access_key": "" 41 | } 42 | ``` 43 | 44 | ![密钥](secret_cn.png) 45 | 46 | 然后下一步指定密钥名称,最后一步点击创建。 47 | 48 | ## 4. 启动AWS Cloudformation部署 49 | 50 | 请按照以下步骤通过AWS Cloudformation部署此插件。 51 | 52 | 1.登录到AWS管理控制台,切换到将CloudFormation Stack部署到的区域。 53 | 54 | 1.单击以下按钮在该区域中启动CloudFormation堆栈。 55 | 56 | - 部署到AWS中国北京和宁夏区 57 | 58 | [![Launch Stack](launch-stack.svg)](https://console.amazonaws.cn/cloudformation/home#/stacks/create/template?stackName=DTHECRStack&templateURL=https://solutions-reference.s3.amazonaws.com/data-transfer-hub-ecr-plugin/latest/DataTransferECRStack.template) 59 | 60 | - 部署到AWS海外区 61 | 62 | [![Launch Stack](launch-stack.svg)](https://console.aws.amazon.com/cloudformation/home#/stacks/create/template?stackName=DTHECRStack&templateURL=https://solutions-reference.s3.amazonaws.com/data-transfer-hub-ecr-plugin/latest/DataTransferECRStack.template) 63 | 64 | 65 | 1.单击**下一步**。 相应地为参数指定值。 如果需要,请更改堆栈名称。 66 | 67 | 1.单击**下一步**。 配置其他堆栈选项,例如标签(可选)。 68 | 69 | 1.单击**下一步**。 查看并勾选确认,然后单击“创建堆栈”开始部署。 70 | 71 | 部署预计用时3-5分钟 -------------------------------------------------------------------------------- /docs/DEPLOYMENT_EN.md: -------------------------------------------------------------------------------- 1 | 2 | [中文](./DEPLOYMENT_CN.md) 3 | 4 | # Deployment Guide 5 | 6 | ## 1. Prepare VPC (optional) 7 | 8 | This solution can be deployed in both public and private subnets. Using public subnets is recommended. 9 | 10 | - If you want to use existing VPC, please make sure the VPC has at least 2 subnets, and both subnets must have public internet access (Either public subnets with internet gateway or private subnets with NAT gateway) 11 | 12 | - If you want to create new default VPC for this solution, please go to Step 2 and make sure you have *Create a new VPC for this cluster* selected when you create the cluster. 13 | 14 | 15 | ## 2. Set up ECS Cluster 16 | 17 | A ECS Cluster is required for this solution to run Fargate task. 18 | 19 | Go to AWS Management Console > Elastic Container Service (ECS). From ECS Cluster home page, click **Create Cluster** 20 | 21 | Step 1: Select Cluster Template, make sure you choose **Network Only** type. 22 | 23 | Step 2: Configure cluster, just specify a cluster name and click Create. If you want to also create a new VCP (public subnets only), please also check the **Create a new VPC for this cluster** option. 24 | 25 | ![Create Cluster](cluster_en.png) 26 | 27 | 28 | 29 | ## 3. Configure credentials 30 | 31 | If source (or destination) is NOT in current AWS account, you will need to provide `AccessKeyID` and `SecretAccessKey` (namely `AK/SK`) to pull from or push to Amazon ECR. And Secrets Manager is used to store the credentials in a secure manner. 32 | 33 | >Note: If source type is Public, there is no need to provide the source credentials. 34 | 35 | Go to AWS Management Console > Secrets Manager. From Secrets Manager home page, click **Store a new secret**. For secret type, please use **Other type of secrets**. For key/value paris, please copy and paste below JSON text into the Plaintext section, and change value to your AK/SK accordingly. 36 | 37 | ``` 38 | { 39 | "access_key_id": "", 40 | "secret_access_key": "" 41 | } 42 | ``` 43 | 44 | ![Secret](secret_en.png) 45 | 46 | Click Next to specify a secret name, and click Create in teh last step. 47 | 48 | ## 4. Launch AWS Cloudformation Stack 49 | 50 | Please follow below steps to deploy this plugin via AWS Cloudformation. 51 | 52 | 1. Sign in to AWS Management Console, switch to the region to deploy the CloudFormation Stack to. 53 | 54 | 1. Click the following button to launch the CloudFormation Stack in that region. 55 | 56 | - For AWS China Regions 57 | 58 | [![Launch Stack](launch-stack.svg)](https://console.amazonaws.cn/cloudformation/home#/stacks/create/template?stackName=DTHECRStack&templateURL=https://solutions-reference.s3.amazonaws.com/data-transfer-hub-ecr-plugin/latest/DataTransferECRStack.template) 59 | 60 | 61 | - For AWS Global regions 62 | 63 | [![Launch Stack](launch-stack.svg)](https://console.aws.amazon.com/cloudformation/home#/stacks/create/template?stackName=DTHECRStack&templateURL=https://solutions-reference.s3.amazonaws.com/data-transfer-hub-ecr-plugin/latest/DataTransferECRStack.template) 64 | 65 | 1. Click **Next**. Specify values to parameters accordingly. Change the stack name if required. 66 | 67 | 1. Click **Next**. Configure additional stack options such as tags (Optional). 68 | 69 | 1. Click **Next**. Review and confirm acknowledgement, then click **Create Stack** to start the deployment. 70 | 71 | The deployment will take approximately 3-5 minutes. -------------------------------------------------------------------------------- /docs/cluster_cn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awslabs/amazon-ecr-data-replication-hub-plugin/cbc83ae7cb29f25dd4e486c6c4d17fb6a7d7b8bd/docs/cluster_cn.png -------------------------------------------------------------------------------- /docs/cluster_en.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awslabs/amazon-ecr-data-replication-hub-plugin/cbc83ae7cb29f25dd4e486c6c4d17fb6a7d7b8bd/docs/cluster_en.png -------------------------------------------------------------------------------- /docs/launch-stack.svg: -------------------------------------------------------------------------------- 1 | Launch Stack -------------------------------------------------------------------------------- /docs/secret_cn.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awslabs/amazon-ecr-data-replication-hub-plugin/cbc83ae7cb29f25dd4e486c6c4d17fb6a7d7b8bd/docs/secret_cn.png -------------------------------------------------------------------------------- /docs/secret_en.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awslabs/amazon-ecr-data-replication-hub-plugin/cbc83ae7cb29f25dd4e486c6c4d17fb6a7d7b8bd/docs/secret_en.png -------------------------------------------------------------------------------- /ecr-plugin-architect.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awslabs/amazon-ecr-data-replication-hub-plugin/cbc83ae7cb29f25dd4e486c6c4d17fb6a7d7b8bd/ecr-plugin-architect.png -------------------------------------------------------------------------------- /source/README.md: -------------------------------------------------------------------------------- 1 | # Welcome to your CDK TypeScript project! 2 | 3 | This is a blank project for TypeScript development with CDK. 4 | 5 | The `cdk.json` file tells the CDK Toolkit how to execute your app. 6 | 7 | ## Useful commands 8 | 9 | * `npm run build` compile typescript to js 10 | * `npm run watch` watch for changes and compile 11 | * `npm run test` perform the jest unit tests 12 | * `cdk deploy` deploy this stack to your default AWS account/region 13 | * `cdk diff` compare deployed stack with current state 14 | * `cdk synth` emits the synthesized CloudFormation template 15 | -------------------------------------------------------------------------------- /source/bin/aws-data-replication-component-ecr.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | /* 4 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 5 | 6 | Licensed under the Apache License, Version 2.0 (the "License"); 7 | you may not use this file except in compliance with the License. 8 | You may obtain a copy of the License at 9 | 10 | http://www.apache.org/licenses/LICENSE-2.0 11 | 12 | Unless required by applicable law or agreed to in writing, software 13 | distributed under the License is distributed on an "AS IS" BASIS, 14 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | See the License for the specific language governing permissions and 16 | limitations under the License. 17 | */ 18 | 19 | 20 | import 'source-map-support/register'; 21 | import { DataTransferECRStack } from '../lib/aws-data-replication-component-ecr-stack'; 22 | import { App, Aspects, Stack } from "aws-cdk-lib"; 23 | 24 | import { 25 | AwsSolutionsChecks, 26 | NagPackSuppression, 27 | NagSuppressions, 28 | } from "cdk-nag"; 29 | 30 | const app = new App(); 31 | 32 | function stackSuppressions( 33 | stacks: Stack[], 34 | suppressions: NagPackSuppression[] 35 | ) { 36 | stacks.forEach((s) => 37 | NagSuppressions.addStackSuppressions(s, suppressions, true) 38 | ); 39 | } 40 | 41 | stackSuppressions([ 42 | new DataTransferECRStack(app, 'DataTransferECRStack'), 43 | ], [ 44 | { id: 'AwsSolutions-IAM5', reason: 'some policies need to get dynamic resources' }, 45 | { id: 'AwsSolutions-IAM4', reason: 'these policies is used by CDK Customer Resource lambda' }, 46 | { id: 'AwsSolutions-L1', reason: 'not applicable to use the latest lambda runtime version' }, 47 | { id: "AwsSolutions-ECS2", reason: "We need to create a dynamic ECS Service" }, 48 | ]); 49 | 50 | Aspects.of(app).add(new AwsSolutionsChecks()); -------------------------------------------------------------------------------- /source/cdk.json: -------------------------------------------------------------------------------- 1 | { 2 | "app": "npx ts-node bin/aws-data-replication-component-ecr.ts", 3 | "context": { 4 | "aws-cdk:enableDiffNoFail": "true", 5 | "@aws-cdk/core:stackRelativeExports": "true" 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /source/ecr/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM alpine 2 | 3 | RUN apk update \ 4 | && apk add skopeo aws-cli jq 5 | 6 | WORKDIR /drh 7 | 8 | ENV IMAGE alpine 9 | ENV TAG latest 10 | ENV SOURCE_TYPE Amazon_ECR 11 | ENV AWS_DEFAULT_REGION us-west-2 12 | ENV AWS_ACCOUNT_ID 111111111111 13 | 14 | ENV SRC_REGION us-west-2 15 | ENV SRC_ACCOUNT_ID 987654321098 16 | ENV SRC_CREDENTIAL src 17 | ENV SRC_CREDENTIAL_NAME srcNmae 18 | 19 | ENV DEST_REGION cn-north-1 20 | ENV DEST_ACCOUNT_ID 123456789012 21 | ENV DEST_PREFIX '' 22 | ENV DEST_CREDENTIAL dest 23 | ENV DEST_CREDENTIAL_NAME desNmae 24 | 25 | COPY copy.sh . 26 | RUN chmod +x copy.sh 27 | CMD ["sh", "copy.sh"] -------------------------------------------------------------------------------- /source/ecr/copy.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | set -e 4 | 5 | echo "[Init] Get Image Repo Name and Tag" 6 | echo "repo is $IMAGE and tag is $TAG" 7 | 8 | echo "[Init] Get Secrets Manager info" 9 | 10 | get_aksk() 11 | { 12 | echo "Get AK/SK in Secrets Manager" 13 | if [ -z "$1" ]; then 14 | echo "No credential is provided, no ak/sk" 15 | ak='0' 16 | sk='0' 17 | else 18 | echo "Get $1 from $AWS_DEFAULT_REGION" 19 | cred_secret_manager=$(aws secretsmanager get-secret-value --secret-id $1 --version-stage AWSCURRENT) 20 | ak=$(echo $cred_secret_manager | jq -c '.SecretString | fromjson | .access_key_id' | tr -d '"') 21 | sk=$(echo $cred_secret_manager | jq -c '.SecretString | fromjson | .secret_access_key' | tr -d '"') 22 | fi 23 | } 24 | 25 | get_aksk $SRC_CREDENTIAL_NAME 26 | src_ak=$ak 27 | src_sk=$sk 28 | get_aksk $DEST_CREDENTIAL_NAME 29 | dest_ak=$ak 30 | dest_sk=$sk 31 | 32 | # function to get ecr login password 33 | # Usage: get_cred region account_id ak sk 34 | get_cred() 35 | { 36 | # echo "All params are $@" 37 | if [ -z "$4" ]; then 38 | # In current account 39 | echo "Get login pwd in region $1 in current account" 40 | cred=`aws ecr get-login-password --region $1` 41 | ACCOUNT_ID=$AWS_ACCOUNT_ID 42 | else 43 | ACCOUNT_ID=$2 44 | echo "Read AK/SK" 45 | # echo $3 46 | # echo $4 47 | # export AWS_ACCESS_KEY_ID=$3 48 | # export AWS_SECRET_ACCESS_KEY=$4 49 | echo "Get login pwd in region $1" 50 | cred=$(AWS_ACCESS_KEY_ID=$3 AWS_SECRET_ACCESS_KEY=$4 AWS_DEFAULT_REGION=$1 aws ecr get-login-password --region $1) 51 | # echo "cred is $cred" 52 | fi 53 | 54 | # Get ecr domain name 55 | if [ "$1" = "cn-north-1" ] || [ "$1" = "cn-northwest-1" ]; then 56 | domain=$ACCOUNT_ID.dkr.ecr.$1.amazonaws.com.cn 57 | else 58 | domain=$ACCOUNT_ID.dkr.ecr.$1.amazonaws.com 59 | fi 60 | echo "domain is $domain" 61 | } 62 | 63 | 64 | echo "[Source] Get Source Info" 65 | if [ "$SOURCE_TYPE" = "Amazon_ECR" ]; then 66 | echo "Source Type is ECR" 67 | get_cred $SRC_REGION $SRC_ACCOUNT_ID $src_ak $src_sk 68 | src_cred=$cred 69 | src_domain=$domain 70 | # echo "src_cred is $src_cred" 71 | # echo "src_domain is $src_domain" 72 | else 73 | echo "Source Type is NOT Amazon ECR" 74 | fi 75 | 76 | 77 | echo "[Destination] Get Destination Info" 78 | 79 | get_cred $DEST_REGION $DEST_ACCOUNT_ID $dest_ak $dest_sk 80 | dest_cred=$cred 81 | dest_domain=$domain 82 | 83 | # echo "dest_cred is $dest_cred" 84 | # echo "dest_domain is $dest_domain" 85 | 86 | echo "[Destination] Create ECR repo" 87 | # echo "Create ecr repo $IMAGE" 88 | if [ -n "$DEST_ACCOUNT_ID" ]; then 89 | echo "Set env" 90 | export AWS_ACCESS_KEY_ID=$dest_ak 91 | export AWS_SECRET_ACCESS_KEY=$dest_sk 92 | export AWS_DEFAULT_REGION=$DEST_REGION 93 | fi 94 | aws ecr create-repository --repository-name $IMAGE --region $DEST_REGION >/dev/null || true 95 | 96 | 97 | echo "[Copy] Start copying" 98 | start_time=$(date +%s) 99 | 100 | 101 | # echo $dest_pwd | skopeo login --username AWS --password-stdin $dest_domain 102 | if [ "$SOURCE_TYPE" = "Amazon_ECR" ]; then 103 | skopeo copy docker://$src_domain/$IMAGE:$TAG docker://$dest_domain/$IMAGE:$TAG --src-creds AWS:$src_cred --dest-creds AWS:$dest_cred 104 | else 105 | skopeo copy docker://$IMAGE:$TAG docker://$dest_domain/$IMAGE:$TAG --dest-creds AWS:$dest_cred 106 | fi 107 | 108 | end_time=$(date +%s) 109 | cost_time=`expr $end_time - $start_time` 110 | echo "Time elapsed to copy is $(($cost_time/60))min $(($cost_time%60))s" 111 | -------------------------------------------------------------------------------- /source/jest.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | "roots": [ 3 | "/test" 4 | ], 5 | testMatch: ['**/*.test.ts'], 6 | "transform": { 7 | "^.+\\.tsx?$": "ts-jest" 8 | }, 9 | coverageReporters: [ 10 | "text", 11 | ["lcov", { "projectRoot": "../" }] 12 | ] 13 | } 14 | -------------------------------------------------------------------------------- /source/lambda/ecr_helper/.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | omit = 3 | tests/* 4 | .venv-*/* 5 | test/* 6 | */__init__.py 7 | source = 8 | . -------------------------------------------------------------------------------- /source/lambda/ecr_helper/lambda_function.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: Apache-2.0 3 | import os 4 | import logging 5 | 6 | from util.ecr_helper import BaseHelper, ECRHelper 7 | 8 | logger = logging.getLogger() 9 | logger.setLevel(logging.INFO) 10 | 11 | 12 | def lambda_handler(event, context): 13 | params = { 14 | 'source_type': os.environ['SOURCE_TYPE'], 15 | 'src_list': os.environ['SRC_LIST'], 16 | 'src_image_list': os.environ['SELECTED_IMAGE_PARAM'], 17 | 'src_region': os.environ['SRC_REGION'], 18 | 'src_account_id': os.environ['SRC_ACCOUNT_ID'], 19 | 'src_credential_name': os.environ['SRC_CREDENTIAL_NAME'] 20 | } 21 | logger.info(params) 22 | 23 | result = [] 24 | 25 | if params['source_type'] == 'Amazon_ECR': 26 | image_helper = ECRHelper(params) 27 | result = image_helper.generate_repo_tag_map_list() 28 | elif params['source_type'] != 'Amazon_ECR' and params['src_list'] == 'SELECTED': 29 | image_helper = BaseHelper(params) 30 | result = image_helper.generate_repo_tag_map_list() 31 | else: 32 | logger.info("sourceType is not (Amazon_ECR + X)/SELECTED, it is: " + 33 | params['source_type'] + " " + params['src_list']) 34 | 35 | return {"Payload": result} 36 | 37 | -------------------------------------------------------------------------------- /source/lambda/ecr_helper/test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awslabs/amazon-ecr-data-replication-hub-plugin/cbc83ae7cb29f25dd4e486c6c4d17fb6a7d7b8bd/source/lambda/ecr_helper/test/__init__.py -------------------------------------------------------------------------------- /source/lambda/ecr_helper/test/conftest.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: Apache-2.0 3 | 4 | import os 5 | 6 | import pytest 7 | 8 | 9 | @pytest.fixture(autouse=True) 10 | def default_environment_variables(): 11 | """Mocked AWS evivronment variables such as AWS credentials and region""" 12 | os.environ["AWS_ACCESS_KEY_ID"] = "mocked-aws-access-key-id" 13 | os.environ["AWS_SECRET_ACCESS_KEY"] = "mocked-aws-secret-access-key" 14 | os.environ["AWS_SESSION_TOKEN"] = "mocked-aws-session-token" 15 | os.environ["AWS_REGION"] = "us-east-1" 16 | os.environ["AWS_DEFAULT_REGION"] = "us-east-1" 17 | 18 | os.environ["SOURCE_TYPE"] = "Amazon_ECR" -------------------------------------------------------------------------------- /source/lambda/ecr_helper/test/requirements-test.txt: -------------------------------------------------------------------------------- 1 | moto==3.1.18 2 | pytest==7.1.2 3 | pytest-cov==3.0.0 4 | pyyaml -------------------------------------------------------------------------------- /source/lambda/ecr_helper/test/test_lambda_function.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: Apache-2.0 3 | 4 | import pytest 5 | import os 6 | import boto3 7 | from moto import mock_ecr, mock_ssm 8 | 9 | 10 | @pytest.fixture 11 | def ecr_client(): 12 | with mock_ecr(): 13 | region = os.environ.get("AWS_REGION") 14 | ecr = boto3.client("ecr", region_name=region) 15 | # create a ecr repository 01 16 | repository_name = "test-repository-01" 17 | ecr.create_repository(repositoryName=repository_name) 18 | 19 | # upload a image to above ecr repository 20 | ecr.put_image( 21 | repositoryName=repository_name, 22 | imageManifest="test_01", 23 | imageTag="latest" 24 | ) 25 | ecr.put_image( 26 | repositoryName=repository_name, 27 | imageManifest="test_01", 28 | imageTag="v1.3.0" 29 | ) 30 | ecr.put_image( 31 | repositoryName=repository_name, 32 | imageManifest="test_02", 33 | imageTag="v1.2.0" 34 | ) 35 | 36 | # create a ecr repository 02 37 | repository_name = "ubuntu" 38 | ecr.create_repository(repositoryName=repository_name) 39 | 40 | # upload a image to above ecr repository 41 | ecr.put_image( 42 | repositoryName=repository_name, 43 | imageManifest="test_01", 44 | imageTag="latest" 45 | ) 46 | ecr.put_image( 47 | repositoryName=repository_name, 48 | imageManifest="test_02", 49 | imageTag="v2.3.0" 50 | ) 51 | ecr.put_image( 52 | repositoryName=repository_name, 53 | imageManifest="test_03", 54 | imageTag="v2.2.0" 55 | ) 56 | 57 | yield 58 | 59 | 60 | @pytest.fixture 61 | def ssm_client(): 62 | with mock_ssm(): 63 | region = os.environ.get("AWS_REGION") 64 | ssm = boto3.client("ssm", region_name=region) 65 | ssm.put_parameter( 66 | Name="test_ssm_param_name", 67 | Value=""" 68 | ubuntu:v2.2.0, 69 | test-repository-01 70 | """, 71 | Type='String' 72 | ) 73 | 74 | yield 75 | 76 | 77 | # Test Amazon ECR with all images 78 | @pytest.fixture 79 | def env_variables_01(): 80 | """Mocked AWS evivronment variables such as AWS credentials and region""" 81 | os.environ["AWS_ACCESS_KEY_ID"] = "mocked-aws-access-key-id" 82 | os.environ["AWS_SECRET_ACCESS_KEY"] = "mocked-aws-secret-access-key" 83 | os.environ["AWS_SESSION_TOKEN"] = "mocked-aws-session-token" 84 | os.environ["AWS_REGION"] = "us-east-1" 85 | os.environ["AWS_DEFAULT_REGION"] = "us-east-1" 86 | 87 | os.environ["SOURCE_TYPE"] = "Amazon_ECR" 88 | os.environ["SRC_LIST"] = "ALL" 89 | os.environ["SRC_REGION"] = "us-east-1" 90 | os.environ["SRC_ACCOUNT_ID"] = "" 91 | os.environ["SELECTED_IMAGE_PARAM"] = "" 92 | os.environ["SRC_CREDENTIAL_NAME"] = "test_key" 93 | 94 | yield 95 | 96 | 97 | def test_lambda_function_01(ecr_client, env_variables_01): 98 | import lambda_function 99 | 100 | response = lambda_function.lambda_handler( 101 | {}, 102 | None, 103 | ) 104 | assert len(response['Payload']) == 6 105 | 106 | 107 | # Test Amazon ECR with selected images 108 | @pytest.fixture 109 | def env_variables_02(): 110 | """Mocked AWS evivronment variables such as AWS credentials and region""" 111 | os.environ["AWS_ACCESS_KEY_ID"] = "mocked-aws-access-key-id" 112 | os.environ["AWS_SECRET_ACCESS_KEY"] = "mocked-aws-secret-access-key" 113 | os.environ["AWS_SESSION_TOKEN"] = "mocked-aws-session-token" 114 | os.environ["AWS_REGION"] = "us-east-1" 115 | os.environ["AWS_DEFAULT_REGION"] = "us-east-1" 116 | 117 | os.environ["SOURCE_TYPE"] = "Amazon_ECR" 118 | os.environ["SRC_LIST"] = "SELECTED" 119 | os.environ["SRC_REGION"] = "us-east-1" 120 | os.environ["SRC_ACCOUNT_ID"] = "" 121 | os.environ["SELECTED_IMAGE_PARAM"] = "test_ssm_param_name" 122 | os.environ["SRC_CREDENTIAL_NAME"] = "test_key" 123 | 124 | yield 125 | 126 | 127 | def test_lambda_function_02(ecr_client, ssm_client, env_variables_02): 128 | import lambda_function 129 | 130 | response = lambda_function.lambda_handler( 131 | {}, 132 | None, 133 | ) 134 | assert len(response['Payload']) == 2 135 | assert response['Payload'] == [{'repositoryName': 'ubuntu', 'imageTag': 'v2.2.0'}, 136 | {'repositoryName': 'test-repository-01', 'imageTag': 'latest'}] 137 | 138 | 139 | # Test Public repos 140 | @pytest.fixture 141 | def env_variables_03(): 142 | """Mocked AWS evivronment variables such as AWS credentials and region""" 143 | os.environ["AWS_ACCESS_KEY_ID"] = "mocked-aws-access-key-id" 144 | os.environ["AWS_SECRET_ACCESS_KEY"] = "mocked-aws-secret-access-key" 145 | os.environ["AWS_SESSION_TOKEN"] = "mocked-aws-session-token" 146 | os.environ["AWS_REGION"] = "us-east-1" 147 | os.environ["AWS_DEFAULT_REGION"] = "us-east-1" 148 | 149 | os.environ["SOURCE_TYPE"] = "Public" 150 | os.environ["SRC_LIST"] = "SELECTED" 151 | os.environ["SRC_REGION"] = "us-east-1" 152 | os.environ["SRC_ACCOUNT_ID"] = "" 153 | os.environ["SELECTED_IMAGE_PARAM"] = "test_ssm_param_name" 154 | os.environ["SRC_CREDENTIAL_NAME"] = "test_key" 155 | 156 | yield 157 | 158 | 159 | def test_lambda_function_03(ecr_client, ssm_client, env_variables_03): 160 | import lambda_function 161 | 162 | response = lambda_function.lambda_handler( 163 | {}, 164 | None, 165 | ) 166 | assert len(response['Payload']) == 2 167 | assert response['Payload'] == [{'repositoryName': 'ubuntu', 'imageTag': 'v2.2.0'}, 168 | {'repositoryName': 'test-repository-01', 'imageTag': 'latest'}] 169 | 170 | 171 | # Test Amazon ECR repos with tag ALL_TAGS 172 | @pytest.fixture 173 | def env_variables_04(): 174 | """Mocked AWS evivronment variables such as AWS credentials and region""" 175 | os.environ["AWS_ACCESS_KEY_ID"] = "mocked-aws-access-key-id" 176 | os.environ["AWS_SECRET_ACCESS_KEY"] = "mocked-aws-secret-access-key" 177 | os.environ["AWS_SESSION_TOKEN"] = "mocked-aws-session-token" 178 | os.environ["AWS_REGION"] = "us-east-1" 179 | os.environ["AWS_DEFAULT_REGION"] = "us-east-1" 180 | 181 | os.environ["SOURCE_TYPE"] = "Amazon_ECR" 182 | os.environ["SRC_LIST"] = "SELECTED" 183 | os.environ["SRC_REGION"] = "us-east-1" 184 | os.environ["SRC_ACCOUNT_ID"] = "" 185 | os.environ["SELECTED_IMAGE_PARAM"] = "test_ssm_param_name_all" 186 | os.environ["SRC_CREDENTIAL_NAME"] = "test_key" 187 | 188 | yield 189 | 190 | 191 | @pytest.fixture 192 | def ssm_client_02(): 193 | with mock_ssm(): 194 | region = os.environ.get("AWS_REGION") 195 | ssm = boto3.client("ssm", region_name=region) 196 | ssm.put_parameter( 197 | Name="test_ssm_param_name_all", 198 | Value=""" 199 | ubuntu:ALL_TAGS, 200 | test-repository-01 201 | """, 202 | Type='String' 203 | ) 204 | 205 | yield 206 | 207 | 208 | def test_lambda_function_04(ecr_client, ssm_client_02, env_variables_04): 209 | import lambda_function 210 | 211 | response = lambda_function.lambda_handler( 212 | {}, 213 | None, 214 | ) 215 | assert len(response['Payload']) == 4 216 | assert response['Payload'] == [{'repositoryName': 'ubuntu', 'imageTag': 'latest'}, 217 | {'repositoryName': 'ubuntu', 'imageTag': 'v2.3.0'}, 218 | {'repositoryName': 'ubuntu', 'imageTag': 'v2.2.0'}, 219 | {'repositoryName': 'test-repository-01', 'imageTag': 'latest'}] 220 | -------------------------------------------------------------------------------- /source/lambda/ecr_helper/util/ecr_helper.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: Apache-2.0 3 | import os 4 | import json 5 | import logging 6 | import re 7 | 8 | import base64 9 | import boto3 10 | from botocore import config 11 | 12 | logger = logging.getLogger() 13 | logger.setLevel(logging.INFO) 14 | 15 | solution_version = os.environ.get("SOLUTION_VERSION", "v1.0.0") 16 | solution_id = os.environ.get("SOLUTION_ID", "SO8003") 17 | user_agent_config = { 18 | "user_agent_extra": f"AwsSolution/{solution_id}/{solution_version}" 19 | } 20 | default_config = config.Config(**user_agent_config) 21 | default_region = os.environ.get("AWS_REGION") 22 | 23 | secretsmanager_client = boto3.client('secretsmanager', config=default_config) 24 | ssm_client = boto3.client('ssm', config=default_config) 25 | 26 | 27 | class BaseHelper: 28 | """ 29 | Base Helper Class""" 30 | 31 | def __init__(self, params): 32 | self._params = params 33 | 34 | def _get_ssm_parameter(self, parameter_name, decrypt=False): 35 | """Get the value of an SSM parameter.""" 36 | response = ssm_client.get_parameter( 37 | Name=parameter_name, 38 | WithDecryption=decrypt 39 | ) 40 | return response['Parameter']['Value'] 41 | 42 | def _split_selected_images(self, src_image_list): 43 | """Split the list of selected images into a list of dictionaries.""" 44 | logger.info("input srcImageList: %s", src_image_list) 45 | result = [] 46 | src_image_list = re.sub(r'(\r|\n|\ |\\n)', '', src_image_list) 47 | 48 | for image in src_image_list.split(','): 49 | image_name = image.split(':')[0] 50 | image_tag = image.split(':')[1] if len( 51 | image.split(':')) >= 2 else "latest" 52 | result.append( 53 | {"repositoryName": image_name, "imageTag": image_tag}) 54 | return result 55 | 56 | def generate_repo_tag_map_list(self): 57 | """Generate a list of repository and tag map 58 | Args: 59 | src_list_type (str): Source list type: ALL | SELECTED 60 | 61 | Return: 62 | [ 63 | { 64 | "repositoryName": "ubuntu", 65 | "imageTag": "latest" 66 | }, 67 | { 68 | "repositoryName": "ubuntu", 69 | "imageTag": "1.2.0" 70 | } 71 | ] 72 | """ 73 | result = [] 74 | 75 | ssm_image_list = self._get_ssm_parameter( 76 | self._params['src_image_list'], False) 77 | logger.info(ssm_image_list) 78 | result = self._split_selected_images(ssm_image_list) 79 | 80 | return result 81 | 82 | 83 | class ECRHelper(BaseHelper): 84 | """Helper Class for ECR Task""" 85 | 86 | def __init__(self, params): 87 | super().__init__(params) 88 | self._params = params 89 | self._ecr = self._generate_client() 90 | 91 | def generate_repo_tag_map_list(self): 92 | """Generate a list of repository and tag map 93 | Args: 94 | src_list_type (str): Source list type: ALL | SELECTED 95 | 96 | Return: 97 | [ 98 | { 99 | "repositoryName": "ubuntu", 100 | "imageTag": "latest" 101 | }, 102 | { 103 | "repositoryName": "ubuntu", 104 | "imageTag": "1.2.0" 105 | } 106 | ] 107 | """ 108 | result = [] 109 | 110 | if self._params['src_list'] == 'ALL': 111 | # Use ECR API to get the full list of repos and tags 112 | 113 | repo_name_list = self._get_ecr_repositories_name() 114 | for repo_name in repo_name_list: 115 | image_tags = self._get_ecr_image_tags(repo_name) 116 | for image_tag in image_tags: 117 | result.append( 118 | {"repositoryName": repo_name, "imageTag": image_tag}) 119 | 120 | elif self._params['src_list'] == 'SELECTED': 121 | # If the version of input is ALL_TAGS and the sourceType is Amazon_ECR, the function will get all the tags of this image. 122 | ssm_image_list = self._get_ssm_parameter( 123 | self._params['src_image_list'], False) 124 | logger.info(ssm_image_list) 125 | result = self._split_selected_images(ssm_image_list) 126 | else: 127 | logger.info("sourceType is not (Amazon_ECR + ALL)/SELECTED, it is: " + 128 | self._params['source_type'] + " " + self._params['src_list']) 129 | 130 | return result 131 | 132 | def _generate_client(self): 133 | """Generate the ECR client.""" 134 | 135 | # Get the AK/SK if source is NOT in current AWS account 136 | if self._params['src_account_id']: 137 | secret_name = self._params['src_credential_name'] 138 | secret = None 139 | decoded_binary_secret = None 140 | 141 | response = secretsmanager_client.get_secret_value( 142 | SecretId=secret_name) 143 | if 'SecretString' in response: 144 | secret = response['SecretString'] 145 | else: 146 | binary_secret_data = response['SecretBinary'] 147 | decoded_binary_secret = base64.b64decode(binary_secret_data) 148 | secret = decoded_binary_secret.decode('utf-8') 149 | 150 | secret_dict = json.loads(secret) 151 | return boto3.client('ecr', 152 | region_name=self._params['src_region'], 153 | aws_access_key_id=secret_dict['access_key_id'], 154 | aws_secret_access_key=secret_dict['secret_access_key']) 155 | else: 156 | return boto3.client('ecr', region_name=self._params['src_region']) 157 | 158 | def _get_ecr_repositories_name(self): 159 | """Get the list of repositories in an Amazon ECR registry.""" 160 | response = self._ecr.describe_repositories() 161 | repos = response.get('repositories') 162 | while "nextToken" in response: 163 | response = self._ecr.describe_repositories( 164 | nextToken=response['nextToken'] 165 | ) 166 | repos.extend(response['repositories']) 167 | 168 | repo_name_list = [repo['repositoryName'] for repo in repos] 169 | return repo_name_list 170 | 171 | def _get_ecr_image_tags(self, repo_name): 172 | """ 173 | Get the list of tags for a specific repository in an Amazon ECR registry. 174 | For example: 175 | Repo: prod_ubuntu 176 | [ 177 | { 178 | Image Tag: 1.3, latest 179 | Digest: sha256002 180 | }, 181 | { 182 | Image Tag: 1.2 183 | Digest: sha256001 184 | } 185 | ] 186 | Return: 187 | tags_list: [1.3, latest, 1.2] 188 | 189 | """ 190 | image_tags = [] 191 | response = self._ecr.describe_images( 192 | repositoryName=repo_name, 193 | filter={ 194 | 'tagStatus': 'TAGGED' 195 | } 196 | ) 197 | image_details = response.get('imageDetails') 198 | while "nextToken" in response: 199 | response = self._ecr.describe_repositories( 200 | repositoryName=repo_name, 201 | filter={ 202 | 'tagStatus': 'TAGGED' 203 | }, 204 | nextToken=response['nextToken'] 205 | ) 206 | image_details.extend(response.get('imageDetails')) 207 | for image_detail in image_details: 208 | image_tags.extend(image_detail.get('imageTags')) 209 | 210 | return image_tags 211 | 212 | def _split_selected_images(self, src_image_list): 213 | """Split the list of selected images into a list of dictionaries.""" 214 | logger.info("input srcImageList: %s", src_image_list) 215 | result = [] 216 | src_image_list = re.sub(r'(\r|\n|\ |\\n)', '', src_image_list) 217 | 218 | for image in src_image_list.split(','): 219 | repo_name = image.split(':')[0] 220 | image_tag = image.split(':')[1] if len( 221 | image.split(':')) >= 2 else "latest" 222 | 223 | # Handle the ALL_TAGS 224 | if image_tag == "ALL_TAGS": 225 | tmp_image_tags = self._get_ecr_image_tags(repo_name) 226 | for tmp_image_tag in tmp_image_tags: 227 | result.append({"repositoryName": repo_name, 228 | "imageTag": tmp_image_tag}) 229 | else: 230 | result.append( 231 | {"repositoryName": repo_name, "imageTag": image_tag}) 232 | 233 | return result 234 | -------------------------------------------------------------------------------- /source/lambda/step-func.ts: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | import * as AWS from "aws-sdk"; 18 | 19 | /** 20 | * This lambda should be triggered when stack is created or updated. 21 | * The purpose is to ensure replication task is started immediately (step functions is triggered) on create or update 22 | * Ideally, the step functions should be triggered by event bridge rule, but this doesn't work all the time. 23 | * This is a workaround to resolve the issue. 24 | * 25 | * @param event:any - Not used. 26 | */ 27 | 28 | exports.handler = async (event: any) => { 29 | const stateMachineArn: string = process.env.STATE_MACHINE_ARN ? process.env.STATE_MACHINE_ARN : 'null' 30 | 31 | const sfn = new AWS.StepFunctions(); 32 | 33 | var queryParams = { 34 | stateMachineArn: stateMachineArn, 35 | statusFilter: 'RUNNING' 36 | }; 37 | 38 | var execParams = { 39 | stateMachineArn: stateMachineArn, 40 | }; 41 | 42 | // Check if any running executions 43 | const list = await sfn.listExecutions(queryParams).promise(); 44 | console.log(list.executions); 45 | 46 | if (list.executions.length == 0) { 47 | //if not, start a new one 48 | const executionArn = await sfn.startExecution(execParams).promise(); 49 | console.log(executionArn); 50 | } 51 | 52 | 53 | } -------------------------------------------------------------------------------- /source/lib/aws-data-replication-component-ecr-stack.ts: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | import { 18 | Construct, 19 | } from 'constructs'; 20 | import { 21 | Aws, 22 | Fn, 23 | Stack, 24 | StackProps, 25 | CfnParameter, 26 | CfnCondition, 27 | Duration, 28 | CfnResource, 29 | RemovalPolicy, 30 | aws_iam as iam, 31 | aws_dynamodb as ddb, 32 | aws_sns as sns, 33 | aws_kms as kms, 34 | aws_stepfunctions as sfn, 35 | aws_stepfunctions_tasks as tasks, 36 | aws_lambda as lambda, 37 | aws_ecs as ecs, 38 | aws_ec2 as ec2, 39 | aws_ssm as ssm, 40 | aws_sns_subscriptions as subscriptions, 41 | aws_events as events, 42 | aws_events_targets as targets, 43 | aws_secretsmanager as secretsmanager, 44 | aws_logs as logs, 45 | custom_resources as cr 46 | } from 'aws-cdk-lib'; 47 | import { NagSuppressions } from "cdk-nag"; 48 | 49 | import * as path from 'path'; 50 | 51 | const { VERSION } = process.env; 52 | 53 | /** 54 | * cfn-nag suppression rule interface 55 | */ 56 | interface CfnNagSuppressRule { 57 | readonly id: string; 58 | readonly reason: string; 59 | } 60 | 61 | 62 | export function addCfnNagSuppressRules(resource: CfnResource, rules: CfnNagSuppressRule[]) { 63 | resource.addMetadata('cfn_nag', { 64 | rules_to_suppress: rules 65 | }); 66 | } 67 | 68 | /*** 69 | * Main Stack 70 | */ 71 | export class DataTransferECRStack extends Stack { 72 | private paramGroups: any[] = []; 73 | private paramLabels: any = {}; 74 | 75 | private addToParamGroups(label: string, ...param: string[]) { 76 | this.paramGroups.push({ 77 | Label: { default: label }, 78 | Parameters: param 79 | 80 | }); 81 | }; 82 | 83 | private addToParamLabels(label: string, param: string) { 84 | this.paramLabels[param] = { 85 | default: label 86 | } 87 | } 88 | 89 | 90 | constructor(scope: Construct, id: string, props?: StackProps) { 91 | super(scope, id, props); 92 | 93 | // The code that defines your stack goes here 94 | 95 | const sourceType = new CfnParameter(this, 'sourceType', { 96 | description: 'Choose type of source container registry, for example Amazon_ECR, or Public from Docker Hub, gco.io, etc.', 97 | type: 'String', 98 | default: 'Amazon_ECR', 99 | allowedValues: ['Amazon_ECR', 'Public'] 100 | }) 101 | this.addToParamLabels('Source Type', sourceType.logicalId) 102 | 103 | // Only required for ECR 104 | const srcRegion = new CfnParameter(this, 'srcRegion', { 105 | description: 'Source Region Name (only required if source type is Amazon ECR), for example, us-west-1', 106 | type: 'String', 107 | default: '', 108 | }) 109 | this.addToParamLabels('Source Region Name', srcRegion.logicalId) 110 | 111 | // Only required for ECR 112 | const srcAccountId = new CfnParameter(this, 'srcAccountId', { 113 | description: 'Source AWS Account ID (only required if source type is Amazon ECR), leave it blank if source is in current account', 114 | type: 'String', 115 | default: '', 116 | }) 117 | this.addToParamLabels('Source AWS Account ID', srcAccountId.logicalId) 118 | // 119 | const srcList = new CfnParameter(this, 'srcList', { 120 | description: 'Type of Source Image List, either ALL or SELECTED, for public registry, please use SELECTED only', 121 | type: 'String', 122 | default: 'ALL', 123 | allowedValues: ['ALL', 'SELECTED'] 124 | }) 125 | this.addToParamLabels('Source Image List Type', srcList.logicalId) 126 | 127 | const srcImageList = new CfnParameter(this, 'srcImageList', { 128 | description: 'Selected Image List delimited by comma, for example, ubuntu:latest,alpine:latest..., leave it blank if Type is ALL. For ECR source, using ALL_TAGS tag to get all tags.', 129 | type: 'String', 130 | default: '', 131 | }) 132 | this.addToParamLabels('Source Image List', srcImageList.logicalId) 133 | 134 | // Currently, only required if source type is ECR 135 | const srcCredential = new CfnParameter(this, 'srcCredential', { 136 | description: 'The secret name in Secrets Manager only when using AK/SK credentials to pull images from source Amazon ECR, leave it blank for public registry', 137 | type: 'String', 138 | default: '', 139 | }) 140 | this.addToParamLabels('Source Credentials', srcCredential.logicalId) 141 | 142 | 143 | const destRegion = new CfnParameter(this, 'destRegion', { 144 | description: 'Destination Region Name, for example, cn-north-1', 145 | type: 'String', 146 | }) 147 | this.addToParamLabels('Destination Region Name', destRegion.logicalId) 148 | 149 | const destAccountId = new CfnParameter(this, 'destAccountId', { 150 | description: 'Destination AWS Account ID, leave it blank if destination is in current account', 151 | type: 'String', 152 | default: '', 153 | }) 154 | this.addToParamLabels('Destination AWS Account ID', destAccountId.logicalId) 155 | 156 | const destPrefix = new CfnParameter(this, 'destPrefix', { 157 | description: 'Destination Repo Prefix', 158 | type: 'String', 159 | default: '', 160 | }) 161 | this.addToParamLabels('Destination Repo Prefix', destPrefix.logicalId) 162 | 163 | const destCredential = new CfnParameter(this, 'destCredential', { 164 | description: 'The secret name in Secrets Manager only when using AK/SK credentials to push images to destination Amazon ECR', 165 | type: 'String', 166 | default: '', 167 | }) 168 | this.addToParamLabels('Destination Credentials', destCredential.logicalId) 169 | 170 | const ecsClusterName = new CfnParameter(this, 'ecsClusterName', { 171 | description: 'ECS Cluster Name to run ECS task (Please make sure the cluster exists)', 172 | type: 'String' 173 | }) 174 | this.addToParamLabels('ECS Cluster Name', ecsClusterName.logicalId) 175 | 176 | const ecsVpcId = new CfnParameter(this, 'ecsVpcId', { 177 | description: 'VPC ID to run ECS task, e.g. vpc-bef13dc7', 178 | type: 'AWS::EC2::VPC::Id' 179 | }) 180 | this.addToParamLabels('VPC ID', ecsVpcId.logicalId) 181 | 182 | const ecsSubnetA = new CfnParameter(this, 'ecsSubnetA', { 183 | description: 'First Subnet ID to run ECS task, e.g. subnet-97bfc4cd', 184 | type: 'AWS::EC2::Subnet::Id' 185 | }) 186 | this.addToParamLabels('First Subnet ID', ecsSubnetA.logicalId) 187 | 188 | const ecsSubnetB = new CfnParameter(this, 'ecsSubnetB', { 189 | description: 'Second Subnet ID to run ECS task, e.g. subnet-7ad7de32', 190 | type: 'AWS::EC2::Subnet::Id' 191 | }) 192 | this.addToParamLabels('Second Subnet ID', ecsSubnetB.logicalId) 193 | 194 | const alarmEmail = new CfnParameter(this, 'alarmEmail', { 195 | description: 'Alarm Email address to receive notification in case of any failure', 196 | // default: '', 197 | allowedPattern: '\\w[-\\w.+]*@([A-Za-z0-9][-A-Za-z0-9]+\\.)+[A-Za-z]{2,14}', 198 | type: 'String', 199 | }) 200 | this.addToParamLabels('Alarm Email', alarmEmail.logicalId) 201 | 202 | this.addToParamGroups('Type', sourceType.logicalId) 203 | this.addToParamGroups('Source Information', srcRegion.logicalId, srcAccountId.logicalId, srcList.logicalId, srcImageList.logicalId, srcCredential.logicalId) 204 | this.addToParamGroups('Destination Information', destRegion.logicalId, destAccountId.logicalId, destPrefix.logicalId, destCredential.logicalId) 205 | this.addToParamGroups('ECS Cluster Information', ecsClusterName.logicalId, ecsVpcId.logicalId, ecsSubnetA.logicalId, ecsSubnetB.logicalId) 206 | this.addToParamGroups('Notification Information', alarmEmail.logicalId) 207 | 208 | this.templateOptions.description = `(SO8003) - Data Transfer Hub - ECR Plugin - Template version ${VERSION}`; 209 | 210 | this.templateOptions.metadata = { 211 | 'AWS::CloudFormation::Interface': { 212 | ParameterGroups: this.paramGroups, 213 | ParameterLabels: this.paramLabels, 214 | } 215 | } 216 | 217 | const isSelectedImage = new CfnCondition(this, 'isSelectedImage', { 218 | expression: Fn.conditionEquals('SELECTED', srcList), 219 | }); 220 | 221 | 222 | const isSrcInCurrentAccount = new CfnCondition(this, 'isSrcInCurrentAccount', { 223 | expression: Fn.conditionAnd( 224 | // Source Account ID is blank 225 | Fn.conditionEquals('', srcAccountId), 226 | // Source Type is Amazon ECR 227 | Fn.conditionEquals('Amazon_ECR', sourceType)), 228 | 229 | }); 230 | 231 | const isDestInCurrentAccount = new CfnCondition(this, 'isDestInCurrentAccount', { 232 | // Destination in Current Account 233 | expression: Fn.conditionEquals('', destAccountId), 234 | }); 235 | 236 | const selectedImages = Fn.conditionIf(isSelectedImage.logicalId, srcImageList.valueAsString, 'Not Applicable').toString(); 237 | 238 | 239 | // Set up SSM for selected image list 240 | const selectedImageParam = new ssm.StringParameter(this, 'selectedImageParam', { 241 | description: `Parameter to store the selected image list delimited by comma for stack ${Aws.STACK_NAME}`, 242 | // parameterName: 'SelectedImageList', 243 | stringValue: selectedImages, 244 | }); 245 | 246 | 247 | // Setup DynamoDB 248 | const imageTable = new ddb.Table(this, 'ECRTransferTable', { 249 | partitionKey: { name: 'Image', type: ddb.AttributeType.STRING }, 250 | sortKey: { name: 'Tag', type: ddb.AttributeType.STRING }, 251 | billingMode: ddb.BillingMode.PAY_PER_REQUEST, 252 | removalPolicy: RemovalPolicy.DESTROY, 253 | pointInTimeRecovery: true, 254 | }) 255 | 256 | const cfnTable = imageTable.node.defaultChild as ddb.CfnTable 257 | addCfnNagSuppressRules(cfnTable, [ 258 | { 259 | id: 'W74', 260 | reason: 'This table is set to use DEFAULT encryption, the key is owned by DDB.' 261 | }, 262 | ]) 263 | 264 | const listImagesLambda = new lambda.Function(this, 'ListImagesFunction', { 265 | code: lambda.AssetCode.fromAsset(path.join(__dirname, '../lambda/ecr_helper'), { 266 | }), 267 | runtime: lambda.Runtime.PYTHON_3_9, 268 | handler: 'lambda_function.lambda_handler', 269 | memorySize: 256, 270 | timeout: Duration.minutes(15), 271 | description: 'Data Transfer Hub ECR Plugin - List Image Handler', 272 | environment: { 273 | SOURCE_TYPE: sourceType.valueAsString, 274 | SRC_ACCOUNT_ID: srcAccountId.valueAsString, 275 | SRC_LIST: srcList.valueAsString, 276 | SRC_REGION: srcRegion.valueAsString, 277 | SRC_CREDENTIAL_NAME: srcCredential.valueAsString, 278 | SELECTED_IMAGE_PARAM: selectedImageParam.parameterName, 279 | } 280 | }); 281 | 282 | const srcSecretParam = secretsmanager.Secret.fromSecretNameV2(this, 'srcSecretParam', srcCredential.valueAsString); 283 | const desSecretParam = secretsmanager.Secret.fromSecretNameV2(this, 'desSecretParam', destCredential.valueAsString); 284 | 285 | listImagesLambda.addToRolePolicy( 286 | new iam.PolicyStatement({ 287 | actions: [ 288 | "ecr:DescribeRepositories", 289 | "ecr:DescribeImages", 290 | ], 291 | resources: [ 292 | `arn:${Aws.PARTITION}:ecr:${srcRegion.valueAsString}:${Aws.ACCOUNT_ID}:repository/*` 293 | ] 294 | }) 295 | ); 296 | 297 | selectedImageParam.grantRead(listImagesLambda); 298 | srcSecretParam.grantRead(listImagesLambda); 299 | 300 | const vpc = ec2.Vpc.fromVpcAttributes(this, 'ECSVpc', { 301 | vpcId: ecsVpcId.valueAsString, 302 | availabilityZones: Fn.getAzs(), 303 | publicSubnetIds: [ecsSubnetA.valueAsString, ecsSubnetB.valueAsString] 304 | }) 305 | 306 | const cluster = ecs.Cluster.fromClusterAttributes(this, 'ECSCluster', { 307 | clusterName: ecsClusterName.valueAsString, 308 | vpc: vpc, 309 | securityGroups: [] 310 | }) 311 | 312 | const containerlogGroup = new logs.LogGroup(this, `DTH-ECR-Container-LogGroup`, { 313 | retention: 365 314 | }); 315 | const cfncontainerlogGroup = containerlogGroup.node.defaultChild as logs.CfnLogGroup 316 | addCfnNagSuppressRules(cfncontainerlogGroup, [ 317 | { 318 | id: 'W84', 319 | reason: 'Log group data is always encrypted in CloudWatch Logs using an AWS Managed KMS Key' 320 | }, 321 | ]) 322 | 323 | // Create ECS executionRole and executionPolicy 324 | const ecsTaskExecutionRole = new iam.Role(this, `DTH-ECR-TaskExecutionRole`, { 325 | assumedBy: new iam.ServicePrincipal('ecs-tasks.amazonaws.com') 326 | }); 327 | 328 | const taskExecutionPolicy = new iam.Policy(this, 'TaskExecutionPolicy', { 329 | policyName: `${Aws.STACK_NAME}TaskExecutionPolicy`, 330 | statements: [ 331 | new iam.PolicyStatement({ 332 | actions: [ 333 | "logs:CreateLogStream", 334 | "logs:PutLogEvents" 335 | ], 336 | resources: [ 337 | containerlogGroup.logGroupArn 338 | ] 339 | }), 340 | ] 341 | }); 342 | taskExecutionPolicy.node.addDependency(containerlogGroup); 343 | taskExecutionPolicy.attachToRole(ecsTaskExecutionRole); 344 | 345 | const taskDefinition = new ecs.TaskDefinition(this, 'ECRTransferTask', { 346 | memoryMiB: '1024', 347 | cpu: '512', 348 | compatibility: ecs.Compatibility.FARGATE, 349 | family: `${Aws.STACK_NAME}-ECRTransferTask`, 350 | executionRole: ecsTaskExecutionRole.withoutPolicyUpdates() 351 | }); 352 | srcSecretParam.grantRead(taskDefinition.taskRole); 353 | desSecretParam.grantRead(taskDefinition.taskRole); 354 | 355 | const ecrRegistry = 'public.ecr.aws/aws-gcr-solutions' 356 | const ecrImageName = 'data-transfer-hub-ecr' 357 | const ecrImageTag = VERSION 358 | 359 | const ecrImageUrl = `${ecrRegistry}/${ecrImageName}:${ecrImageTag}` 360 | 361 | const containerDefinition = taskDefinition.addContainer('DefaultContainer', { 362 | image: ecs.ContainerImage.fromRegistry(ecrImageUrl), 363 | environment: { 364 | SOURCE_TYPE: sourceType.valueAsString, 365 | AWS_DEFAULT_REGION: this.region, 366 | AWS_ACCOUNT_ID: this.account, 367 | SRC_REGION: srcRegion.valueAsString, 368 | SRC_ACCOUNT_ID: srcAccountId.valueAsString, 369 | SRC_CREDENTIAL_NAME: srcCredential.valueAsString, 370 | DEST_REGION: destRegion.valueAsString, 371 | DEST_ACCOUNT_ID: destAccountId.valueAsString, 372 | DEST_PREFIX: destPrefix.valueAsString, 373 | DEST_CREDENTIAL_NAME: destCredential.valueAsString, 374 | 375 | }, 376 | logging: ecs.LogDrivers.awsLogs({ 377 | streamPrefix: 'DTH-ECR', 378 | logGroup: containerlogGroup, 379 | }) 380 | }); 381 | 382 | 383 | const ecrSrcReadOnlyPolicy = new iam.Policy(this, 'ECRSrcReadOnlyPolicy', { 384 | policyName: `${Aws.STACK_NAME}ECRSrcReadOnlyPolicy`, 385 | statements: [ 386 | new iam.PolicyStatement({ 387 | actions: [ 388 | "ecr:GetAuthorizationToken", 389 | ], 390 | resources: [ 391 | '*' 392 | ] 393 | }), 394 | new iam.PolicyStatement({ 395 | actions: [ 396 | "ecr:BatchCheckLayerAvailability", 397 | "ecr:GetDownloadUrlForLayer", 398 | "ecr:BatchGetImage", 399 | ], 400 | resources: [ 401 | `arn:${Aws.PARTITION}:ecr:${srcRegion.valueAsString}:${Aws.ACCOUNT_ID}:repository/*` 402 | 403 | ] 404 | }), 405 | ] 406 | }); 407 | 408 | const cfnecrSrcReadOnlyPolicy = ecrSrcReadOnlyPolicy.node.defaultChild as iam.CfnPolicy 409 | addCfnNagSuppressRules(cfnecrSrcReadOnlyPolicy, [ 410 | { 411 | id: 'W12', 412 | reason: 'This IAM policy need * resource' 413 | }, 414 | ]) 415 | 416 | const ecrSrcPolicy = ecrSrcReadOnlyPolicy.node.defaultChild as iam.CfnPolicy 417 | ecrSrcPolicy.cfnOptions.condition = isSrcInCurrentAccount 418 | 419 | ecrSrcReadOnlyPolicy.attachToRole(taskDefinition.taskRole); 420 | 421 | const ecrDestWritePolicy = new iam.Policy(this, 'ECRDestWritePolicy', { 422 | policyName: `${Aws.STACK_NAME}ECRDestWritePolicy`, 423 | statements: [ 424 | new iam.PolicyStatement({ 425 | actions: [ 426 | "ecr:GetAuthorizationToken", 427 | ], 428 | resources: [ 429 | '*' 430 | ] 431 | }), 432 | new iam.PolicyStatement({ 433 | actions: [ 434 | "ecr:CreateRepository", 435 | "ecr:CompleteLayerUpload", 436 | "ecr:UploadLayerPart", 437 | "ecr:InitiateLayerUpload", 438 | "ecr:PutImage", 439 | "ecr:BatchCheckLayerAvailability", 440 | "ecr:GetDownloadUrlForLayer", 441 | "ecr:BatchGetImage", 442 | ], 443 | resources: [ 444 | `arn:${Aws.PARTITION}:ecr:${destRegion.valueAsString}:${Aws.ACCOUNT_ID}:repository/*` 445 | 446 | ] 447 | }), 448 | ] 449 | }); 450 | const cfnecrDestWritePolicy = ecrDestWritePolicy.node.defaultChild as iam.CfnPolicy 451 | addCfnNagSuppressRules(cfnecrDestWritePolicy, [ 452 | { 453 | id: 'W12', 454 | reason: 'This IAM policy need * resource' 455 | }, 456 | ]) 457 | 458 | const ecrDestPolicy = ecrDestWritePolicy.node.defaultChild as iam.CfnPolicy 459 | ecrDestPolicy.cfnOptions.condition = isDestInCurrentAccount 460 | ecrDestWritePolicy.attachToRole(taskDefinition.taskRole); 461 | 462 | 463 | const submitJob = new tasks.LambdaInvoke(this, 'Submit Lambda', { 464 | lambdaFunction: listImagesLambda, 465 | // Lambda's result is in the attribute `Payload` 466 | outputPath: '$.Payload' 467 | }); 468 | 469 | const clusterSG = new ec2.SecurityGroup(this, 'clusterSG', { 470 | allowAllOutbound: true, 471 | description: `SG for ${Aws.STACK_NAME} Fargate Tasks`, 472 | vpc: vpc, 473 | }); 474 | const cfnclusterSG = clusterSG.node.defaultChild as ec2.CfnSecurityGroup 475 | addCfnNagSuppressRules(cfnclusterSG, [ 476 | { 477 | id: 'W5', 478 | reason: 'Egress of 0.0.0.0/0 is required' 479 | }, 480 | { 481 | id: 'W40', 482 | reason: 'Egress IPProtocol of -1 is required' 483 | }, 484 | ]) 485 | 486 | const runTask = new tasks.EcsRunTask(this, 'Run Fargate Task', { 487 | integrationPattern: sfn.IntegrationPattern.RUN_JOB, 488 | cluster, 489 | taskDefinition, 490 | assignPublicIp: true, 491 | containerOverrides: [{ 492 | containerDefinition, 493 | environment: [ 494 | { name: 'IMAGE', value: sfn.JsonPath.stringAt('$.repositoryName') }, 495 | { name: 'TAG', value: sfn.JsonPath.stringAt('$.imageTag') }, 496 | ], 497 | }], 498 | launchTarget: new tasks.EcsFargateLaunchTarget(), 499 | resultPath: '$.result', 500 | securityGroups: [clusterSG] 501 | }); 502 | 503 | 504 | const putSuccessInDDBTask = new tasks.DynamoPutItem(this, 'Log Success in DynamoDB', { 505 | item: { 506 | Image: tasks.DynamoAttributeValue.fromString(sfn.JsonPath.stringAt('$.repositoryName')), 507 | Tag: tasks.DynamoAttributeValue.fromString(sfn.JsonPath.stringAt('$.imageTag')), 508 | Execution: tasks.DynamoAttributeValue.fromString(sfn.JsonPath.stringAt('$$.Execution.Name')), 509 | Status: tasks.DynamoAttributeValue.fromString('Done'), 510 | }, 511 | table: imageTable, 512 | returnValues: tasks.DynamoReturnValues.NONE, 513 | resultPath: '$.result' 514 | }); 515 | 516 | const putFailureInDDBTask = new tasks.DynamoPutItem(this, 'Log Failure in DynamoDB', { 517 | item: { 518 | Image: tasks.DynamoAttributeValue.fromString(sfn.JsonPath.stringAt('$.repositoryName')), 519 | Tag: tasks.DynamoAttributeValue.fromString(sfn.JsonPath.stringAt('$.imageTag')), 520 | Execution: tasks.DynamoAttributeValue.fromString(sfn.JsonPath.stringAt('$$.Execution.Name')), 521 | ErrorMessage: tasks.DynamoAttributeValue.fromString(sfn.JsonPath.stringAt('$.result.Error')), 522 | Status: tasks.DynamoAttributeValue.fromString('Error'), 523 | }, 524 | table: imageTable, 525 | returnValues: tasks.DynamoReturnValues.NONE, 526 | resultPath: '$.result' 527 | }); 528 | 529 | const myKeyAlias = kms.Alias.fromAliasName(this, 'AwsSnsDefaultKey', 'alias/aws/sns'); 530 | 531 | const topic = new sns.Topic(this, 532 | 'EcrReplicationTopic', 533 | { 534 | masterKey: myKeyAlias, 535 | } 536 | ); 537 | topic.addSubscription(new subscriptions.EmailSubscription(alarmEmail.valueAsString)); 538 | 539 | const snsTask = new tasks.SnsPublish(this, 'Publish To SNS', { 540 | topic, 541 | integrationPattern: sfn.IntegrationPattern.REQUEST_RESPONSE, 542 | message: sfn.TaskInput.fromObject({ 543 | error: "Failed to copy image", 544 | execution: sfn.JsonPath.stringAt('$$.Execution.Name'), 545 | image: sfn.JsonPath.stringAt('$.repositoryName'), 546 | tag: sfn.JsonPath.stringAt('$.imageTag'), 547 | }) 548 | }); 549 | 550 | const endState = new sfn.Pass(this, 'EndState'); 551 | 552 | const map = new sfn.Map(this, 'Map State', { 553 | maxConcurrency: 10, 554 | itemsPath: sfn.JsonPath.stringAt('$.Payload'), 555 | }); 556 | 557 | const retryParam: sfn.RetryProps = { 558 | backoffRate: 2, 559 | interval: Duration.seconds(60), 560 | maxAttempts: 3, 561 | } 562 | 563 | map.iterator(runTask 564 | .addRetry(retryParam) 565 | .addCatch(putFailureInDDBTask.next(snsTask), { resultPath: '$.result' }) 566 | .next(putSuccessInDDBTask)); 567 | 568 | submitJob.next(map).next(endState) 569 | 570 | const logGroup = new logs.LogGroup(this, `DTH-ECR-StepFunction-LogGroup`,{ 571 | logGroupName: `/aws/vendedlogs/states/${Fn.select(6, Fn.split(":", listImagesLambda.functionArn))}-SM-log` 572 | }); 573 | 574 | // Create role for Step Machine 575 | const ecrStateMachineRole = new iam.Role(this, `DTH-ECR-ecrStateMachineRole`, { 576 | assumedBy: new iam.ServicePrincipal('states.amazonaws.com') 577 | }); 578 | 579 | const taskDefArnNoVersion = Stack.of(this).formatArn({ 580 | service: 'ecs', 581 | resource: 'task-definition', 582 | resourceName: taskDefinition.family 583 | }) 584 | 585 | const ecrStateMachineRolePolicy = new iam.Policy(this, 'ecrStateMachineRolePolicy'); 586 | 587 | ecrStateMachineRolePolicy.addStatements( 588 | new iam.PolicyStatement({ 589 | actions: [ 590 | 'lambda:InvokeFunction' 591 | ], 592 | resources: [ 593 | listImagesLambda.functionArn 594 | ] 595 | }), 596 | new iam.PolicyStatement({ 597 | actions: [ 598 | 'ecs:RunTask' 599 | ], 600 | resources: [ 601 | taskDefArnNoVersion 602 | ] 603 | }), 604 | new iam.PolicyStatement({ 605 | actions: [ 606 | "ecs:StopTask", 607 | "ecs:DescribeTasks" 608 | ], 609 | resources: [ 610 | '*' 611 | ] 612 | }), 613 | new iam.PolicyStatement({ 614 | actions: [ 615 | "iam:PassRole" 616 | ], 617 | resources: [ 618 | taskDefinition.taskRole.roleArn, 619 | taskDefinition.executionRole!.roleArn 620 | ] 621 | }), 622 | new iam.PolicyStatement({ 623 | actions: [ 624 | "dynamodb:PutItem" 625 | ], 626 | resources: [ 627 | imageTable.tableArn 628 | ] 629 | }), 630 | new iam.PolicyStatement({ 631 | actions: [ 632 | "sns:Publish" 633 | ], 634 | resources: [ 635 | topic.topicArn 636 | ] 637 | }), 638 | new iam.PolicyStatement({ 639 | actions: [ 640 | "events:PutTargets", 641 | "events:PutRule", 642 | "events:DescribeRule" 643 | ], 644 | resources: [ 645 | `arn:${Aws.PARTITION}:events:${Aws.REGION}:${Aws.ACCOUNT_ID}:rule/StepFunctionsGetEventsForECSTaskRule`, 646 | ] 647 | }), 648 | new iam.PolicyStatement({ 649 | actions: [ 650 | 'logs:CreateLogDelivery', 651 | 'logs:GetLogDelivery', 652 | 'logs:UpdateLogDelivery', 653 | 'logs:DeleteLogDelivery', 654 | 'logs:ListLogDeliveries', 655 | 'logs:PutResourcePolicy', 656 | 'logs:DescribeResourcePolicies', 657 | 'logs:DescribeLogGroups' 658 | ], 659 | resources: [ 660 | '*' 661 | ] 662 | }), 663 | ); 664 | ecrStateMachineRolePolicy.node.addDependency(listImagesLambda, taskDefinition, imageTable, topic, logGroup); 665 | ecrStateMachineRolePolicy.attachToRole(ecrStateMachineRole); 666 | const cfnecrStateMachineRolePolicy = ecrStateMachineRolePolicy.node.defaultChild as iam.CfnPolicy 667 | addCfnNagSuppressRules(cfnecrStateMachineRolePolicy, [ 668 | { 669 | id: 'W12', 670 | reason: '[*] Access granted as per documentation: https://docs.aws.amazon.com/step-functions/latest/dg/cw-logs.html' 671 | }, 672 | { 673 | id: 'W76', 674 | reason: 'SPCM complexity greater then 25 is appropriate for the logic implemented' 675 | } 676 | ]) 677 | 678 | const ecrStateMachine = new sfn.StateMachine(this, 'ECRReplicationStateMachine', { 679 | stateMachineName: `${Aws.STACK_NAME}-ECRReplicationSM`, 680 | role: ecrStateMachineRole.withoutPolicyUpdates(), 681 | definition: submitJob, 682 | logs: { 683 | destination: logGroup, 684 | level: sfn.LogLevel.ALL, 685 | }, 686 | tracingEnabled: true, 687 | }); 688 | const cfnlogGroup = logGroup.node.defaultChild as logs.CfnLogGroup 689 | addCfnNagSuppressRules(cfnlogGroup, [ 690 | { 691 | id: 'W84', 692 | reason: 'Log group data is always encrypted in CloudWatch Logs using an AWS Managed KMS Key' 693 | }, 694 | ]) 695 | 696 | ecrStateMachine.node.addDependency(containerDefinition, taskDefinition, submitJob, logGroup, ecrStateMachineRole, ecrStateMachineRolePolicy) 697 | 698 | const smRuleRole = new iam.Role(this, 'ECRReplicationSMExecRole', { 699 | assumedBy: new iam.ServicePrincipal('events.amazonaws.com'), 700 | }) 701 | smRuleRole.addToPolicy(new iam.PolicyStatement({ 702 | actions: [ 703 | "states:StartExecution", 704 | ], 705 | resources: [ 706 | ecrStateMachine.stateMachineArn, 707 | ] 708 | })) 709 | 710 | const ecrStateMachineTarget = new targets.SfnStateMachine(ecrStateMachine, { role: smRuleRole }); 711 | const smRule = new events.Rule(this, 'ECRReplicationScheduleRule', { 712 | schedule: events.Schedule.rate(Duration.days(1)), 713 | targets: [ecrStateMachineTarget], 714 | }); 715 | smRule.node.addDependency(ecrStateMachine, smRuleRole) 716 | 717 | const checkExecutionLambdaPolicy = new iam.Policy(this, 'CheckExecutionLambdaPolicy', { 718 | policyName: `${Aws.STACK_NAME}CheckExecutionLambdaPolicy`, 719 | statements: [ 720 | new iam.PolicyStatement({ 721 | actions: [ 722 | "states:StartExecution", 723 | "states:ListExecutions", 724 | "states:ListStateMachines", 725 | "states:DescribeExecution", 726 | "states:DescribeStateMachineForExecution", 727 | "states:GetExecutionHistory", 728 | "states:ListActivities", 729 | "states:DescribeStateMachine", 730 | "states:DescribeActivity", 731 | ], 732 | resources: [ 733 | '*' 734 | ] 735 | }), 736 | ] 737 | }); 738 | 739 | const cfncheckExecutionLambdaPolicy = checkExecutionLambdaPolicy.node.defaultChild as iam.CfnPolicy 740 | addCfnNagSuppressRules(cfncheckExecutionLambdaPolicy, [ 741 | { 742 | id: 'W12', 743 | reason: 'This IAM policy need * resource' 744 | }, 745 | ]) 746 | 747 | const checkExecutionLambdaRole = new iam.Role(this, 'CheckExecutionFunctionRole', { 748 | assumedBy: new iam.ServicePrincipal('lambda.amazonaws.com'), 749 | }) 750 | 751 | const checkExecutionLambda = new lambda.Function(this, 'CheckExecutionFunction', { 752 | runtime: lambda.Runtime.NODEJS_16_X, 753 | handler: 'step-func.handler', 754 | code: lambda.Code.fromAsset(path.join(__dirname, '../lambda')), 755 | memorySize: 256, 756 | timeout: Duration.minutes(15), 757 | // tracing: lambda.Tracing.ACTIVE, 758 | environment: { 759 | STATE_MACHINE_ARN: ecrStateMachine.stateMachineArn 760 | }, 761 | role: checkExecutionLambdaRole.withoutPolicyUpdates() 762 | }); 763 | checkExecutionLambda.node.addDependency(checkExecutionLambdaRole, checkExecutionLambdaPolicy) 764 | 765 | checkExecutionLambdaPolicy.attachToRole(checkExecutionLambda.role!) 766 | ecrStateMachine.grantStartExecution(checkExecutionLambda) 767 | ecrStateMachine.grantRead(checkExecutionLambda) 768 | 769 | //Run checkExecutionLambda on Create 770 | const lambdaTrigger = new cr.AwsCustomResource(this, 'StatefunctionTrigger', { 771 | policy: cr.AwsCustomResourcePolicy.fromStatements([new iam.PolicyStatement({ 772 | actions: ['lambda:InvokeFunction'], 773 | effect: iam.Effect.ALLOW, 774 | resources: [checkExecutionLambda.functionArn] 775 | })]), 776 | timeout: Duration.minutes(15), 777 | onCreate: { 778 | service: 'Lambda', 779 | action: 'invoke', 780 | parameters: { 781 | FunctionName: checkExecutionLambda.functionName, 782 | InvocationType: 'Event' 783 | }, 784 | physicalResourceId: cr.PhysicalResourceId.of('JobSenderTriggerPhysicalId') 785 | }, 786 | onUpdate: { 787 | service: 'Lambda', 788 | action: 'invoke', 789 | parameters: { 790 | FunctionName: checkExecutionLambda.functionName, 791 | InvocationType: 'Event' 792 | }, 793 | physicalResourceId: cr.PhysicalResourceId.of('JobSenderTriggerPhysicalId') 794 | } 795 | }) 796 | lambdaTrigger.node.addDependency(ecrStateMachine, smRule) 797 | } 798 | } 799 | -------------------------------------------------------------------------------- /source/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "aws-data-replication-component-ecr", 3 | "version": "0.1.0", 4 | "license": "Apache-2.0", 5 | "author": { 6 | "name": "Amazon Web Services", 7 | "url": "https://aws.amazon.com/solutions" 8 | }, 9 | "bin": { 10 | "aws-data-replication-component-ecr": "bin/aws-data-replication-component-ecr.js" 11 | }, 12 | "scripts": { 13 | "cleanup": "tsc --build .. --clean && rm -rf node_modules && rm -f package-lock.json", 14 | "cleanup:tsc": "tsc --build ./ --clean", 15 | "build": "tsc", 16 | "watch": "tsc -w", 17 | "test": "jest --coverage", 18 | "cdk": "cdk" 19 | }, 20 | "devDependencies": { 21 | "@types/jest": "^26.0.10", 22 | "@types/node": "10.17.27", 23 | "@types/aws-lambda": "^8.10.61", 24 | "jest": "^26.4.2", 25 | "ts-jest": "^26.2.0", 26 | "aws-cdk": "2.69.0", 27 | "aws-cdk-lib": "2.69.0", 28 | "aws-sdk": "2.814.0", 29 | "ts-node": "^8.1.0", 30 | "typescript": "~3.9.7" 31 | }, 32 | "dependencies": { 33 | "aws-cdk": "2.69.0", 34 | "aws-cdk-lib": "2.69.0", 35 | "cdk-nag": "2.23.5", 36 | "constructs": "10.1.85", 37 | "source-map-support": "^0.5.16" 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /source/run-all-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script runs all tests for the root CDK project, as well as any microservices, Lambda functions, or dependency 4 | # source code packages. These include unit tests, integration tests, and snapshot tests. 5 | # 6 | # This script is called by the ../initialize-repo.sh file and the buildspec.yml file. It is important that this script 7 | # be tested and validated to ensure that all available test fixtures are run. 8 | # 9 | # The if/then blocks are for error handling. They will cause the script to stop executing if an error is thrown from the 10 | # node process running the test case(s). Removing them or not using them for additional calls with result in the 11 | # script continuing to execute despite an error being thrown. 12 | 13 | [ "$DEBUG" == 'true' ] && set -x 14 | set -e 15 | 16 | setup_python_env() { 17 | if [ -d "./.venv-test" ]; then 18 | echo "Reusing already setup python venv in ./.venv-test. Delete ./.venv-test if you want a fresh one created." 19 | return 20 | fi 21 | 22 | echo "Setting up python venv" 23 | python3 -m venv .venv-test 24 | echo "Initiating virtual environment" 25 | source .venv-test/bin/activate 26 | 27 | echo "Installing python packages" 28 | # install test dependencies in the python virtual environment 29 | pip install --upgrade pip 30 | pip3 install -r test/requirements-test.txt 31 | # pip3 install -r requirements.txt --target . 32 | 33 | echo "deactivate virtual environment" 34 | deactivate 35 | } 36 | 37 | run_python_test() { 38 | local component_path=$1 39 | local component_name=$2 40 | 41 | echo "------------------------------------------------------------------------------" 42 | echo "[Test] Run python unit test with coverage for $component_path $component_name" 43 | echo "------------------------------------------------------------------------------" 44 | cd $component_path 45 | 46 | if [ "${CLEAN:-true}" = "true" ]; then 47 | rm -fr .venv-test 48 | fi 49 | 50 | setup_python_env 51 | 52 | echo "Initiating virtual environment" 53 | source .venv-test/bin/activate 54 | 55 | # setup coverage report path 56 | mkdir -p $source_dir/test/coverage-reports 57 | coverage_report_path=$source_dir/test/coverage-reports/$component_name.coverage.xml 58 | echo "coverage report path set to $coverage_report_path" 59 | 60 | # Use -vv for debugging 61 | python3 -m pytest -s --cov --cov-report=term-missing --cov-report "xml:$coverage_report_path" 62 | 63 | # The pytest --cov with its parameters and .coveragerc generates a xml cov-report with `coverage/sources` list 64 | # with absolute path for the source directories. To avoid dependencies of tools (such as SonarQube) on different 65 | # absolute paths for source directories, this substitution is used to convert each absolute source directory 66 | # path to the corresponding project relative path. The $source_dir holds the absolute path for source directory. 67 | sed -i -e "s,$source_dir,source,g" $coverage_report_path 68 | 69 | echo "deactivate virtual environment" 70 | deactivate 71 | 72 | if [ "${CLEAN:-true}" = "true" ]; then 73 | rm -fr .venv-test 74 | rm .coverage 75 | rm -fr .pytest_cache 76 | rm -fr __pycache__ test/__pycache__ 77 | fi 78 | } 79 | 80 | prepare_jest_coverage_report() { 81 | local component_name=$1 82 | 83 | if [ ! -d "coverage" ]; then 84 | echo "ValidationError: Missing required directory coverage after running unit tests" 85 | exit 129 86 | fi 87 | 88 | # prepare coverage reports 89 | rm -fr coverage/lcov-report 90 | mkdir -p $coverage_reports_top_path/jest 91 | coverage_report_path=$coverage_reports_top_path/jest/$component_name 92 | rm -fr $coverage_report_path 93 | mv coverage $coverage_report_path 94 | } 95 | 96 | run_javascript_test() { 97 | local component_path=$1 98 | local component_name=$2 99 | 100 | echo "------------------------------------------------------------------------------" 101 | echo "[Test] Run javascript unit test with coverage for $component_path $component_name" 102 | echo "------------------------------------------------------------------------------" 103 | echo "cd $component_path" 104 | cd $component_path 105 | 106 | # install and build for unit testing 107 | npm install 108 | 109 | # run unit tests 110 | npm run test 111 | 112 | # prepare coverage reports 113 | prepare_jest_coverage_report $component_name 114 | } 115 | 116 | run_cdk_project_test() { 117 | local component_path=$1 118 | local component_name=solutions-constructs 119 | 120 | echo "------------------------------------------------------------------------------" 121 | echo "[Test] $component_name" 122 | echo "------------------------------------------------------------------------------" 123 | cd $component_path 124 | 125 | # install and build for unit testing 126 | npm install 127 | 128 | ## Option to suppress the Override Warning messages while synthesizing using CDK 129 | # export overrideWarningsEnabled=false 130 | 131 | # run unit tests 132 | npm run test -- -u 133 | 134 | # prepare coverage reports 135 | prepare_jest_coverage_report $component_name 136 | } 137 | 138 | # Run unit tests 139 | echo "Running unit tests" 140 | 141 | # Get reference for source folder 142 | source_dir="$(pwd -P)" 143 | coverage_reports_top_path=$source_dir/test/coverage-reports 144 | 145 | # Test the CDK project 146 | construct_dir=$source_dir 147 | run_cdk_project_test $construct_dir 148 | 149 | # Test the attached Lambda function 150 | run_python_test $construct_dir/lambda/ecr_helper ecr_helper 151 | 152 | # Return to the source/ level 153 | cd $source_dir -------------------------------------------------------------------------------- /source/test/aws-data-replication-component-ecr.test.ts: -------------------------------------------------------------------------------- 1 | /* 2 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | 4 | Licensed under the Apache License, Version 2.0 (the "License"); 5 | you may not use this file except in compliance with the License. 6 | You may obtain a copy of the License at 7 | 8 | http://www.apache.org/licenses/LICENSE-2.0 9 | 10 | Unless required by applicable law or agreed to in writing, software 11 | distributed under the License is distributed on an "AS IS" BASIS, 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | See the License for the specific language governing permissions and 14 | limitations under the License. 15 | */ 16 | 17 | import { App, Stack } from "aws-cdk-lib"; 18 | import { Match, Template } from "aws-cdk-lib/assertions"; 19 | import * as main from '../lib/aws-data-replication-component-ecr-stack'; 20 | 21 | beforeEach(() => { 22 | jest.resetModules(); 23 | process.env = {}; 24 | }); 25 | 26 | describe("MainStack", () => { 27 | test("Test main stack with default setting", () => { 28 | const app = new App(); 29 | 30 | // WHEN 31 | const stack = new main.DataTransferECRStack(app, "MyTestStack"); 32 | const template = Template.fromStack(stack); 33 | 34 | template.resourceCountIs("AWS::StepFunctions::StateMachine", 1); 35 | }); 36 | 37 | }); 38 | -------------------------------------------------------------------------------- /source/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES2018", 4 | "module": "commonjs", 5 | "lib": ["es2018"], 6 | "declaration": true, 7 | "strict": true, 8 | "noImplicitAny": true, 9 | "strictNullChecks": true, 10 | "noImplicitThis": true, 11 | "alwaysStrict": true, 12 | "noUnusedLocals": false, 13 | "noUnusedParameters": false, 14 | "noImplicitReturns": true, 15 | "noFallthroughCasesInSwitch": false, 16 | "inlineSourceMap": true, 17 | "inlineSources": true, 18 | "experimentalDecorators": true, 19 | "strictPropertyInitialization": false, 20 | "typeRoots": ["./node_modules/@types"] 21 | }, 22 | "exclude": ["cdk.out"] 23 | } 24 | --------------------------------------------------------------------------------