├── source ├── cdk │ ├── lib │ │ ├── fixity │ │ │ ├── lambda │ │ │ │ ├── fixity_driver │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── requirements.txt │ │ │ │ │ └── app.py │ │ │ │ └── test_fixity.py │ │ │ ├── images │ │ │ │ └── fixity.jpeg │ │ │ ├── hasher │ │ │ │ ├── s3pcat_0.1.0_linux-amd64.tar.gz │ │ │ │ ├── Dockerfile │ │ │ │ └── hash.sh │ │ │ ├── scripts │ │ │ │ ├── generate_inventory.sh │ │ │ │ └── run_hash_job.sh │ │ │ ├── fixity-repository-stack.ts │ │ │ ├── Makefile │ │ │ └── README.md │ │ ├── autoingest │ │ │ ├── lambda │ │ │ │ ├── autoingest_driver │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── requirements.txt │ │ │ │ │ └── app.py │ │ │ │ └── test_autoingest.py │ │ │ ├── images │ │ │ │ └── autoingest.jpeg │ │ │ ├── Dockerfile │ │ │ ├── Makefile │ │ │ ├── README.md │ │ │ └── autoingest-stack.ts │ │ ├── mediasync │ │ │ ├── lambda │ │ │ │ ├── mediasync_driver │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── requirements.txt │ │ │ │ │ └── app.py │ │ │ │ └── events │ │ │ │ │ └── event.json │ │ │ ├── copier │ │ │ │ ├── ssc.sh │ │ │ │ ├── stream.sh │ │ │ │ └── Dockerfile │ │ │ ├── images │ │ │ │ └── mediasync.jpeg │ │ │ ├── scripts │ │ │ │ ├── generate_inventory.sh │ │ │ │ └── run_copy_job.sh │ │ │ ├── mediasync-repository-stack.ts │ │ │ ├── Makefile │ │ │ └── README.md │ │ ├── provision-stack.ts │ │ └── subscriber-stack.ts │ ├── .npmignore │ ├── jest.config.js │ ├── .gitignore │ ├── README.md │ ├── tsconfig.json │ ├── package.json │ ├── cdk.json │ ├── bin │ │ └── cdk.ts │ └── test │ │ └── cdk.test.ts ├── package-lock.json └── custom-resource │ ├── package.json │ ├── lib │ ├── metrics │ │ ├── index.js │ │ └── index.spec.js │ └── cfn │ │ ├── index.js │ │ └── index.spec.js │ └── index.js ├── images ├── main.png └── sc.jpeg ├── tests ├── python │ ├── test-requirements.txt │ └── quickstart_test.py └── deployment │ └── testrole.yaml ├── .github ├── PULL_REQUEST_TEMPLATE.md └── ISSUE_TEMPLATE │ ├── feature_request.md │ └── bug_report.md ├── deployment ├── cdk-solution-helper │ ├── package.json │ ├── index.js │ └── README.md ├── run-unit-tests.sh └── build-s3-dist.sh ├── CODE_OF_CONDUCT.md ├── solution-manifest.yaml ├── docs ├── security.md ├── developer.md └── faqs.md ├── NOTICE.txt ├── CHANGELOG.md ├── CONTRIBUTING.md ├── .gitignore ├── Makefile ├── README.md └── LICENSE.txt /source/cdk/lib/fixity/lambda/fixity_driver/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /source/cdk/lib/autoingest/lambda/autoingest_driver/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /source/cdk/lib/mediasync/lambda/mediasync_driver/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /source/cdk/lib/fixity/lambda/fixity_driver/requirements.txt: -------------------------------------------------------------------------------- 1 | jsonpickle==3.0.1 2 | -------------------------------------------------------------------------------- /source/cdk/lib/autoingest/lambda/autoingest_driver/requirements.txt: -------------------------------------------------------------------------------- 1 | jsonpickle==3.0.1 -------------------------------------------------------------------------------- /source/cdk/lib/mediasync/lambda/mediasync_driver/requirements.txt: -------------------------------------------------------------------------------- 1 | jsonpickle==3.0.1 2 | -------------------------------------------------------------------------------- /images/main.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-solutions/media-exchange-on-aws/main/images/main.png -------------------------------------------------------------------------------- /images/sc.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-solutions/media-exchange-on-aws/main/images/sc.jpeg -------------------------------------------------------------------------------- /source/cdk/.npmignore: -------------------------------------------------------------------------------- 1 | *.ts 2 | !*.d.ts 3 | 4 | # CDK asset staging directory 5 | .cdk.staging 6 | cdk.out 7 | -------------------------------------------------------------------------------- /source/cdk/lib/mediasync/copier/ssc.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | aws s3 cp $1 $2 --expected-size $3 --source-region $4 3 | -------------------------------------------------------------------------------- /tests/python/test-requirements.txt: -------------------------------------------------------------------------------- 1 | boto3==1.26.57 2 | requests==2.31.0 3 | pytest==7.3.1 4 | jsonpickle==3.0.1 5 | -------------------------------------------------------------------------------- /source/package-lock.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "source", 3 | "lockfileVersion": 2, 4 | "requires": true, 5 | "packages": {} 6 | } 7 | -------------------------------------------------------------------------------- /source/cdk/lib/fixity/images/fixity.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-solutions/media-exchange-on-aws/main/source/cdk/lib/fixity/images/fixity.jpeg -------------------------------------------------------------------------------- /source/cdk/lib/mediasync/copier/stream.sh: -------------------------------------------------------------------------------- 1 | #/bin/bash 2 | aws s3 cp $1 - --expected-size $3 --source-region $4 | aws s3 cp - $2 --expected-size $3 3 | -------------------------------------------------------------------------------- /source/cdk/lib/mediasync/images/mediasync.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-solutions/media-exchange-on-aws/main/source/cdk/lib/mediasync/images/mediasync.jpeg -------------------------------------------------------------------------------- /source/cdk/lib/autoingest/images/autoingest.jpeg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-solutions/media-exchange-on-aws/main/source/cdk/lib/autoingest/images/autoingest.jpeg -------------------------------------------------------------------------------- /source/cdk/lib/fixity/hasher/s3pcat_0.1.0_linux-amd64.tar.gz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aws-solutions/media-exchange-on-aws/main/source/cdk/lib/fixity/hasher/s3pcat_0.1.0_linux-amd64.tar.gz -------------------------------------------------------------------------------- /source/cdk/jest.config.js: -------------------------------------------------------------------------------- 1 | module.exports = { 2 | testEnvironment: 'node', 3 | roots: ['/test'], 4 | testMatch: ['**/*.test.ts'], 5 | transform: { 6 | '^.+\\.tsx?$': 'ts-jest' 7 | } 8 | }; 9 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | *Issue #, if available:* 2 | 3 | *Description of changes:* 4 | 5 | By submitting this pull request, I confirm that you can use, modify, copy, and redistribute this contribution, under the terms of your choice. -------------------------------------------------------------------------------- /source/cdk/.gitignore: -------------------------------------------------------------------------------- 1 | *.js 2 | !jest.config.js 3 | *.d.ts 4 | node_modules 5 | asset.* 6 | **/autoingest/deployment 7 | **/fixity/deployment 8 | **/mediasync/deployment 9 | 10 | # CDK asset staging directory 11 | .cdk.staging 12 | cdk.out 13 | -------------------------------------------------------------------------------- /deployment/cdk-solution-helper/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "cdk-solution-helper", 3 | "description": "Cleans-up synthesized templates from the AWS Cloud Development Kit (CDK) and prepares them for use with the AWS Solutions publishing pipeline.", 4 | "license": "Apache-2.0" 5 | } 6 | -------------------------------------------------------------------------------- /source/cdk/lib/autoingest/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM public.ecr.aws/lambda/python:3.9 2 | ARG wd=/var/task/ 3 | COPY app.py requirements.txt ${wd} 4 | RUN python3.9 -m pip install -r requirements.txt -t "${wd}" 5 | # Command can be overwritten by providing a different command in the template directly. 6 | CMD ["app.lambda_handler"] -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /solution-manifest.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | id: SO0133 3 | name: media-exchange-on-aws 4 | version: 1.2.3 5 | cloudformation_templates: 6 | - template: aws-media-exchange.template 7 | main_template: true 8 | - template: agreement.template 9 | - template: publisher.template 10 | - template: subscriber.template 11 | - template: provision.template 12 | -------------------------------------------------------------------------------- /source/cdk/lib/fixity/hasher/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM amazon/aws-cli:latest 2 | 3 | RUN amazon-linux-extras install epel -y \ 4 | && yum update -y \ 5 | && yum install -y \ 6 | xxhash \ 7 | && yum clean all 8 | 9 | ADD s3pcat_0.1.0_linux-amd64.tar.gz /usr/local/bin/ 10 | 11 | COPY ./hash.sh /usr/local/bin/ 12 | RUN chmod +x /usr/local/bin/hash.sh 13 | 14 | ENTRYPOINT ["/usr/local/bin/hash.sh"] 15 | CMD [] 16 | -------------------------------------------------------------------------------- /source/cdk/lib/mediasync/lambda/events/event.json: -------------------------------------------------------------------------------- 1 | { 2 | "invocationSchemaVersion": "1.0", 3 | "invocationId": "YXNkbGZqYWRmaiBhc2RmdW9hZHNmZGpmaGFzbGtkaGZza2RmaAo", 4 | "job": { 5 | "id": "f3cc4f60-61f6-4a2b-8a21-d07600c373ce" 6 | }, 7 | "tasks": [ 8 | { 9 | "taskId": "dGFza2lkZ29lc2hlcmUK", 10 | "s3Key": "sintel_trailer-audio.flac", 11 | "s3VersionId": "1", 12 | "s3BucketArn": "arn:aws:s3:::prodey-media-samples-us-east-2" 13 | } 14 | ] 15 | } 16 | -------------------------------------------------------------------------------- /source/cdk/lib/mediasync/copier/Dockerfile: -------------------------------------------------------------------------------- 1 | 2 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | # SPDX-License-Identifier: Apache-2.0 4 | 5 | 6 | FROM amazon/aws-cli:latest 7 | 8 | COPY stream.sh /usr/local/bin/ 9 | RUN chmod +x /usr/local/bin/stream.sh 10 | 11 | COPY ssc.sh /usr/local/bin/ 12 | RUN chmod +x /usr/local/bin/ssc.sh 13 | 14 | 15 | RUN aws configure set default.s3.max_concurrent_requests 64 && aws configure set default.s3.multipart_chunksize 64MB 16 | 17 | ENTRYPOINT ["/bin/bash"] 18 | CMD [] 19 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this solution 4 | title: '' 5 | labels: enhancement 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Is your feature request related to a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] 12 | 13 | **Describe the feature you'd like** 14 | A clear and concise description of what you want to happen. 15 | 16 | **Additional context** 17 | Add any other context or screenshots about the feature request here. 18 | -------------------------------------------------------------------------------- /source/cdk/README.md: -------------------------------------------------------------------------------- 1 | # Welcome to your CDK TypeScript project 2 | 3 | This is a blank project for CDK development with TypeScript. 4 | 5 | The `cdk.json` file tells the CDK Toolkit how to execute your app. 6 | 7 | ## Useful commands 8 | 9 | - `npm run build` compile typescript to js 10 | - `npm run watch` watch for changes and compile 11 | - `npm run test` perform the jest unit tests 12 | - `cdk deploy` deploy this stack to your default AWS account/region 13 | - `cdk diff` compare deployed stack with current state 14 | - `cdk synth` emits the synthesized CloudFormation template 15 | -------------------------------------------------------------------------------- /source/custom-resource/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "me-custom-resource", 3 | "version": "1.2.0", 4 | "engines": { 5 | "node": ">=12" 6 | }, 7 | "description": "cfn custom resources for media exchange", 8 | "main": "index.js", 9 | "scripts": { 10 | "pretest": "npm i --quiet", 11 | "test": "jest --coverage" 12 | }, 13 | "dependencies": { 14 | "axios": "^1.4.0", 15 | "uuid": "^9.0.0" 16 | }, 17 | "devDependencies": { 18 | "chai": "^4.2.0", 19 | "axios-mock-adapter": "^1.17.0", 20 | "lodash": "^4.17.15", 21 | "jest": "^29.2.2" 22 | }, 23 | "private": true, 24 | "author": { 25 | "name": "Amazon Web Services", 26 | "url": "https://aws.amazon.com/solutions" 27 | }, 28 | "license": "Apache-2.0" 29 | } 30 | -------------------------------------------------------------------------------- /source/cdk/tsconfig.json: -------------------------------------------------------------------------------- 1 | { 2 | "compilerOptions": { 3 | "target": "ES2020", 4 | "module": "commonjs", 5 | "lib": [ 6 | "es2020" 7 | ], 8 | "declaration": true, 9 | "strict": true, 10 | "noImplicitAny": true, 11 | "strictNullChecks": true, 12 | "noImplicitThis": true, 13 | "alwaysStrict": true, 14 | "noUnusedLocals": false, 15 | "noUnusedParameters": false, 16 | "noImplicitReturns": true, 17 | "noFallthroughCasesInSwitch": false, 18 | "inlineSourceMap": true, 19 | "inlineSources": true, 20 | "experimentalDecorators": true, 21 | "strictPropertyInitialization": false, 22 | "typeRoots": [ 23 | "./node_modules/@types" 24 | ] 25 | }, 26 | "exclude": [ 27 | "node_modules", 28 | "cdk.out" 29 | ] 30 | } 31 | -------------------------------------------------------------------------------- /source/cdk/lib/fixity/scripts/generate_inventory.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | # SPDX-License-Identifier: Apache-2.0 5 | 6 | ## generate_inventory.sh > 7 | 8 | [[ -z "$1" ]] && { echo "Error: is required"; exit 1; } 9 | 10 | if ! command -v aws &> /dev/null 11 | then 12 | echo "awscli is not installed; please install aws-cli by following install guide from here: https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html" 13 | exit 14 | fi 15 | 16 | if ! command -v jq &> /dev/null 17 | then 18 | echo "jq is not installed; please download and install jq from here: https://stedolan.github.io/jq/download/" 19 | exit 20 | fi 21 | 22 | aws s3api list-objects-v2 --bucket $1 --no-fetch-owner --page-size 100 --query "Contents[].['$1', Key]" | jq -r ".[]| @csv" 23 | -------------------------------------------------------------------------------- /source/cdk/lib/mediasync/scripts/generate_inventory.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | # SPDX-License-Identifier: Apache-2.0 5 | 6 | ## generate_inventory.sh > 7 | 8 | [[ -z "$1" ]] && { echo "Error: is required"; exit 1; } 9 | 10 | if ! command -v aws &> /dev/null 11 | then 12 | echo "awscli is not installed; please install aws-cli by following install guide from here: https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html" 13 | exit 14 | fi 15 | 16 | if ! command -v jq &> /dev/null 17 | then 18 | echo "jq is not installed; please download and install jq from here: https://stedolan.github.io/jq/download/" 19 | exit 20 | fi 21 | 22 | aws s3api list-objects-v2 --bucket $1 --no-fetch-owner --page-size 100 --query "Contents[].['$1', Key]" | jq -r ".[]| @csv" 23 | -------------------------------------------------------------------------------- /source/cdk/lib/fixity/hasher/hash.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -xe 2 | 3 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | # SPDX-License-Identifier: Apache-2.0 5 | 6 | # usage: ./hash.sh 7 | 8 | [[ -z $1 ]] && { echo "Error: is required"; exit 1; } 9 | [[ -z $2 ]] && { echo "Error: is required"; exit 1; } 10 | [[ -z $3 ]] && { echo "Error: is required"; exit 1; } 11 | 12 | BUCKET=$1 13 | KEY=$2 14 | WORKERS=$3 15 | 16 | AWS_REGION=us-west-2 s3pcat --bucket $BUCKET --key $KEY --workers $WORKERS | tee >(md5sum | cut -d ' ' -f1 > /tmp/MD5.result) >(sha1sum | cut -d ' ' -f1 > /tmp/SHA1.result) >(xxhsum | cut -d ' ' -f1 > /tmp/xxhsum.result) > /dev/null 17 | 18 | aws s3api put-object-tagging --bucket $BUCKET --key $KEY --tagging "TagSet=[{Key=Content-MD5,Value=$(cat /tmp/MD5.result)},{Key=Content-SHA1,Value=$(cat /tmp/SHA1.result)},{Key=Content-XXHash,Value=$(cat /tmp/xxhsum.result)}]" 19 | -------------------------------------------------------------------------------- /source/cdk/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "cdk", 3 | "version": "0.1.0", 4 | "description": "Cleans-up synthesized templates from the AWS Cloud Development Kit (CDK) and prepares them for use with the AWS Solutions publishing pipeline.", 5 | "license": "Apache-2.0", 6 | "bin": { 7 | "cdk": "bin/cdk.js" 8 | }, 9 | "scripts": { 10 | "build": "tsc", 11 | "watch": "tsc -w", 12 | "test": "jest --coverage", 13 | "cdk": "cdk" 14 | }, 15 | "devDependencies": { 16 | "@types/jest": "^29.4.0", 17 | "@types/node": "^20.2.5", 18 | "aws-cdk": "^2.80.0", 19 | "jest": "^29.5.0", 20 | "ts-jest": "^29.0.5", 21 | "ts-node": "^10.9.1", 22 | "typescript": "^5.0.0" 23 | }, 24 | "dependencies": { 25 | "@aws-cdk/aws-servicecatalogappregistry-alpha": "^2.35.0-alpha.0", 26 | "aws-cdk-lib": "^2.102.0", 27 | "cdk-nag": "^2.23.5", 28 | "constructs": "^10.0.0", 29 | "source-map-support": "^0.5.21" 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /docs/security.md: -------------------------------------------------------------------------------- 1 | This solution uses AWS best practices for securing the files shared through the object storage area. 2 | 3 | * Files are encrypted by default at rest and in transit. 4 | * This solution uses AWS Key Management Service (AWS KMS) to store a customer master key (CMK) that has been established with specific account level permissions. The publisher account can use the CMK to encrypt and the subscriber account can use the CMK to decrypt the Amazon S3 bucket level data keys which are used to decrypt each of the files in the shared MediaExchange Amazon S3 bucket. 5 | * The S3 bucket is configured with specific permissions so that the publisher account can write to it and the subscriber account can read from it. 6 | * The MediaExchange S3 bucket is configured with a lifecycle policy to delete the shared files after a configurable number of days. 7 | * In addition, the actions on files in the MediaExchange S3 bucket are tracked by access logs that are delivered to the Logs S3 bucket, which is made available to the publisher account. 8 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: bug 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Describe the bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | Steps to reproduce the behavior. 15 | 16 | **Expected behavior** 17 | A clear and concise description of what you expected to happen. 18 | 19 | **Please complete the following information about the solution:** 20 | 21 | - [ ] Version: [e.g. v1.0.0] 22 | To get the version of the solution, you can look at the description of the created CloudFormation stack. For example, "(SO0133) - media-exchange-on-aws v1.1.0. You can also find the version from [releases](https://github.com/aws-solutions/media-exchange-on-aws/releases). 23 | - [ ] Region: [e.g. us-east-1] 24 | - [ ] Was the solution modified from the version published on this repository? 25 | - [ ] If the answer to the previous question was yes, are the changes available on GitHub? 26 | - [ ] Have you checked your [service quotas](https://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html) for the sevices this solution uses? 27 | - [ ] Were there any errors in the CloudWatch Logs? 28 | 29 | **Screenshots** 30 | If applicable, add screenshots to help explain your problem (please **DO NOT include sensitive information**). 31 | 32 | **Additional context** 33 | Add any other context about the problem here. 34 | -------------------------------------------------------------------------------- /source/cdk/lib/autoingest/Makefile: -------------------------------------------------------------------------------- 1 | all: help 2 | 3 | help: 4 | @echo 'deploys AutoIngest utility' 5 | 6 | GUIDED ?= --guided 7 | IMAGEVERSION ?= latest 8 | ENV ?= dev 9 | VERSION ?= 1.2.0 10 | 11 | 12 | STACKPREFIX = mediaexchange-tools 13 | CURRENT_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) 14 | IMAGENAME = mediasync/aws-cli 15 | ACCOUNT_ID = $(shell aws sts get-caller-identity --query Account --output text) 16 | 17 | PARAMETER_OVERRIDES := Environment=$(ENV) 18 | AWS_REGION ?= $(shell aws configure get region --output text) 19 | 20 | configure: 21 | @mkdir -p build 22 | 23 | %-build: deployment/%.json configure 24 | @echo "Building lambda..." 25 | @sam build -s $(CURRENT_DIR) -b $(CURRENT_DIR)/build --template $(CURRENT_DIR)/$< --use-container $(DEBUG) 26 | 27 | @sed -i -e "s/__VERSION__/$(VERSION)/g" $(CURRENT_DIR)/build/template.yaml 28 | 29 | autoingest-stack: autoingest-build 30 | sam deploy -t $(CURRENT_DIR)/build/template.yaml --stack-name $(STACKPREFIX)-autoingest-$(ENV) --no-confirm-changeset --no-fail-on-empty-changeset --capabilities CAPABILITY_IAM CAPABILITY_NAMED_IAM --config-env autoingest $(GUIDED) --region $(AWS_REGION) --parameter-overrides $(PARAMETER_OVERRIDES) 31 | 32 | install: autoingest-stack 33 | 34 | clean: ## clears the build directory 35 | @rm -rf build/* 36 | aws cloudformation delete-stack --stack-name $(STACKPREFIX)-autoingest-$(ENV) 37 | aws cloudformation wait stack-delete-complete --stack-name $(STACKPREFIX)-autoingest-$(ENV) 38 | 39 | .PHONY: install clean build 40 | -------------------------------------------------------------------------------- /source/cdk/lib/fixity/fixity-repository-stack.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance 5 | * with the License. A copy of the License is located at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES 10 | * OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions 11 | * and limitations under the License. 12 | */ 13 | import * as cdk from "aws-cdk-lib"; 14 | import { Construct } from "constructs"; 15 | import * as ecr from "aws-cdk-lib/aws-ecr"; 16 | 17 | export class FixityRepositoryStack extends cdk.Stack { 18 | constructor(scope: Construct, id: string, props?: cdk.StackProps) { 19 | super(scope, id, props); 20 | /** 21 | * CloudFormation Template Descrption 22 | */ 23 | this.templateOptions.description = `Cloudformation template for creating ECR Repository.`; 24 | 25 | /** 26 | * Cfn Parameters 27 | */ 28 | const repositoryName = new cdk.CfnParameter(this, "RepositoryName", { 29 | type: "String", 30 | description: "Repository Name", 31 | }); 32 | 33 | /** 34 | * ECR Repository 35 | */ 36 | new ecr.Repository(this, "Repo", { // NOSONAR 37 | repositoryName: repositoryName.valueAsString, 38 | lifecycleRules: [ 39 | { 40 | rulePriority: 1, 41 | description: "keep only one image", 42 | tagStatus: ecr.TagStatus.ANY, 43 | maxImageCount: 1, 44 | }, 45 | ], 46 | }); 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /source/cdk/lib/mediasync/mediasync-repository-stack.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance 5 | * with the License. A copy of the License is located at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES 10 | * OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions 11 | * and limitations under the License. 12 | */ 13 | import * as cdk from "aws-cdk-lib"; 14 | import { Construct } from "constructs"; 15 | import * as ecr from "aws-cdk-lib/aws-ecr"; 16 | 17 | export class MediaSyncRepositoryStack extends cdk.Stack { 18 | constructor(scope: Construct, id: string, props?: cdk.StackProps) { 19 | super(scope, id, props); 20 | /** 21 | * CloudFormation Template Descrption 22 | */ 23 | this.templateOptions.description = `Cloudformation template for creating ECR Repository.`; 24 | 25 | /** 26 | * Cfn Parameters 27 | */ 28 | const repositoryName = new cdk.CfnParameter(this, "RepositoryName", { 29 | type: "String", 30 | description: "Repository Name", 31 | }); 32 | 33 | /** 34 | * Create ECR Repository 35 | */ 36 | new ecr.Repository(this, "Repo", { // NOSONAR 37 | repositoryName: repositoryName.valueAsString, 38 | lifecycleRules: [ 39 | { 40 | rulePriority: 1, 41 | description: "keep only one image", 42 | tagStatus: ecr.TagStatus.ANY, 43 | maxImageCount: 1, 44 | }, 45 | ], 46 | }); 47 | } 48 | } 49 | -------------------------------------------------------------------------------- /NOTICE.txt: -------------------------------------------------------------------------------- 1 | Media Exchange on AWS 2 | 3 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | 5 | Licensed under the Apache License Version 2.0 (the "License"). You may not use this file except in compliance with the License. A copy of the License is located at http://www.apache.org/licenses/ or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and limitations under the License. 6 | 7 | ********************** 8 | THIRD PARTY COMPONENTS 9 | ********************** 10 | This software includes third party software subject to the following copyrights: 11 | 12 | AWS SDK under the Apache License Version 2.0 13 | aws-cdk-lib under the Apache License Version 2.0 14 | cdk-nag under the Apache License Version 2.0 15 | constructs under the Apache License Version 2.0 16 | source-map-support under the Massachusetts Institute of Technology (MIT) license 17 | @types/jest under the Massachusetts Institute of Technology (MIT) license 18 | @types/node under the Massachusetts Institute of Technology (MIT) license 19 | aws-cdk under the Apache License Version 2.0 20 | jest under the Massachusetts Institute of Technology (MIT) license 21 | ts-jest under the Massachusetts Institute of Technology (MIT) license 22 | ts-node under the Massachusetts Institute of Technology (MIT) license 23 | typescript under the Apache License Version 2.0 24 | @aws-cdk/aws-servicecatalogappregistry-alpha under the Apache License Version 2.0 25 | AWS SDK Mock under the Apache License Version 2.0 26 | requests under the Apache License Version 2.0 27 | pytest under the Massachusetts Institute of Technology (MIT) license 28 | boto3 under the Apache License Version 2.0 29 | botocore under the Apache License Version 2.0 30 | awscli under the Apache License Version 2.0 31 | jsonpickle under a New BSD license (https://github.com/jsonpickle/jsonpickle/raw/master/COPYING) 32 | axios under the Massachusetts Institute of Technology (MIT) license 33 | uuid under the Massachusetts Institute of Technology (MIT) license 34 | -------------------------------------------------------------------------------- /source/cdk/cdk.json: -------------------------------------------------------------------------------- 1 | { 2 | "app": "npx ts-node --prefer-ts-exts bin/cdk.ts", 3 | "watch": { 4 | "include": [ 5 | "**" 6 | ], 7 | "exclude": [ 8 | "README.md", 9 | "cdk*.json", 10 | "**/*.d.ts", 11 | "**/*.js", 12 | "tsconfig.json", 13 | "package*.json", 14 | "yarn.lock", 15 | "node_modules", 16 | "test" 17 | ] 18 | }, 19 | "context": { 20 | "@aws-cdk/aws-lambda:recognizeLayerVersion": true, 21 | "@aws-cdk/core:checkSecretUsage": true, 22 | "@aws-cdk/core:target-partitions": [ 23 | "aws", 24 | "aws-cn" 25 | ], 26 | "@aws-cdk-containers/ecs-service-extensions:enableDefaultLogDriver": true, 27 | "@aws-cdk/aws-ec2:uniqueImdsv2TemplateName": true, 28 | "@aws-cdk/aws-ecs:arnFormatIncludesClusterName": true, 29 | "@aws-cdk/aws-iam:minimizePolicies": true, 30 | "@aws-cdk/core:validateSnapshotRemovalPolicy": true, 31 | "@aws-cdk/aws-codepipeline:crossAccountKeyAliasStackSafeResourceName": true, 32 | "@aws-cdk/aws-s3:createDefaultLoggingPolicy": true, 33 | "@aws-cdk/aws-sns-subscriptions:restrictSqsDescryption": true, 34 | "@aws-cdk/aws-apigateway:disableCloudWatchRole": true, 35 | "@aws-cdk/core:enablePartitionLiterals": true, 36 | "@aws-cdk/aws-events:eventsTargetQueueSameAccount": true, 37 | "@aws-cdk/aws-iam:standardizedServicePrincipals": true, 38 | "@aws-cdk/aws-ecs:disableExplicitDeploymentControllerForCircuitBreaker": true, 39 | "@aws-cdk/aws-iam:importedRoleStackSafeDefaultPolicyName": true, 40 | "@aws-cdk/aws-s3:serverAccessLogsUseBucketPolicy": true, 41 | "@aws-cdk/aws-route53-patters:useCertificate": true, 42 | "@aws-cdk/customresources:installLatestAwsSdkDefault": false, 43 | "@aws-cdk/aws-rds:databaseProxyUniqueResourceName": true, 44 | "@aws-cdk/aws-codedeploy:removeAlarmsFromDeploymentGroup": true, 45 | "@aws-cdk/aws-apigateway:authorizerChangeDeploymentLogicalId": true, 46 | "@aws-cdk/aws-ec2:launchTemplateDefaultUserData": true, 47 | "@aws-cdk/aws-secretsmanager:useAttachedSecretResourcePolicyForSecretTargetAttachments": true, 48 | "@aws-cdk/aws-redshift:columnId": true 49 | } 50 | } 51 | -------------------------------------------------------------------------------- /deployment/cdk-solution-helper/index.js: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * SPDX-License-Identifier: Apache-2.0 4 | */ 5 | 6 | // Imports 7 | const fs = require("fs"); 8 | 9 | // Paths 10 | const global_s3_assets = "../global-s3-assets"; 11 | 12 | // For each template in global_s3_assets ... 13 | fs.readdirSync(global_s3_assets).forEach((file) => { 14 | // Import and parse template file 15 | const raw_template = fs.readFileSync(`${global_s3_assets}/${file}`); 16 | let template = JSON.parse(raw_template); 17 | 18 | // Clean-up Lambda function code dependencies 19 | const resources = template.Resources ? template.Resources : {}; 20 | const lambdaFunctions = Object.keys(resources).filter(function (key) { 21 | return resources[key].Type === "AWS::Lambda::Function"; 22 | }); 23 | 24 | lambdaFunctions.forEach(function (f) { 25 | const fn = template.Resources[f]; 26 | let prop; 27 | if (fn.Properties.hasOwnProperty("Code")) { 28 | prop = fn.Properties.Code; 29 | } else if (fn.Properties.hasOwnProperty("Content")) { 30 | prop = fn.Properties.Content; 31 | } 32 | 33 | if (prop.hasOwnProperty("S3Bucket")) { 34 | // Set the S3 key reference 35 | let artifactHash = Object.assign(prop.S3Key); 36 | const assetPath = `asset${artifactHash}`; 37 | prop.S3Key = `__SOLUTION_NAME__/__VERSION__/${assetPath}`; 38 | 39 | // Set the S3 bucket reference 40 | prop.S3Bucket = { 41 | "Fn::Sub": "__ASSET_BUCKET_NAME__-${AWS::Region}", 42 | }; 43 | } else { 44 | console.warn(`No S3Bucket Property found for ${JSON.stringify(prop)}`); 45 | } 46 | }); 47 | 48 | // Clean-up parameters section 49 | const parameters = template.Parameters ? template.Parameters : {}; 50 | const assetParameters = Object.keys(parameters).filter(function (key) { 51 | return key.includes("AssetParameters"); 52 | }); 53 | assetParameters.forEach(function (a) { 54 | template.Parameters[a] = undefined; 55 | }); 56 | 57 | // Output modified template file 58 | const output_template = JSON.stringify(template, null, 2); 59 | fs.writeFileSync(`${global_s3_assets}/${file}`, output_template); 60 | }); 61 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # Change Log 2 | 3 | All notable changes to this project will be documented in this file. 4 | 5 | The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), 6 | and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). 7 | 8 | ## [1.2.3] - 2023-10-30 9 | 10 | ### Q4 2023 release 11 | 12 | - Security updates 13 | 14 | ### Contributors 15 | 16 | - @e-thoman 17 | 18 | ## [1.2.2] - 2023-09-28 19 | 20 | ### Q3 2023 release 21 | 22 | - Library updates 23 | - Improved unit tests 24 | 25 | ### Contributors 26 | 27 | - @e-thoman 28 | 29 | ## [1.2.1] - 2023-07-05 30 | 31 | ### Q3 2023 release 32 | 33 | - Updated metrics reporting so that the solution accurately sends in data for main stack deployments 34 | - Added ability to opt out of metrics reporting for optional function deployments (Fixity, Mediasync, Autoingest) 35 | 36 | ### Contributors 37 | 38 | - @e-thoman 39 | 40 | ## [1.2.0] - 2023-06-08 41 | 42 | ### Q2 2023 release 43 | 44 | - Updated PyTest dependencies to patch security vulnerabilities 45 | - Fixed spelling mistakes on Maintainer input fields 46 | - AutoIngest lambda now dynamically iterates version number 47 | - Added App Registry 48 | - CDK Conversion for main deployment and all 3 functions 49 | - Email address now support + symbol 50 | - Added unit testing 51 | - Fixed makefiles for optional deployments 52 | 53 | ### Contributors 54 | 55 | - @e-thoman 56 | 57 | ## [1.1.0] - 2022-07-07 58 | 59 | ### Q3 2022 release 60 | 61 | - Disabled ACL on the exchange bucket. 62 | - Deprecated AutoACL tool. 63 | - Deprecated lambda function for s3 events. 64 | 65 | ## [1.0.1] - 2021-07-02 66 | 67 | ### Patch release 68 | 69 | - Documentation updates and bug fixes. 70 | 71 | ## [1.0.0] - 2021-06-08 72 | 73 | ### Q2 2021 release 74 | 75 | - Solution Launch 76 | 77 | ## [0.3] - 2021-03-16 78 | 79 | ### Q1 2021 release 80 | 81 | - New tools: AutoACL, AutoIngest 82 | - Improvements and Bug Fixes 83 | - Documentation update 84 | 85 | ## [0.2] - 2021-02-20 86 | 87 | ### Q1 2021 release 88 | 89 | - Simplified project structure 90 | - Additional security improvements 91 | - Documentation update 92 | 93 | ## [0.1] - 2020-05-19 94 | 95 | ### Initial release 96 | 97 | - Open source 98 | -------------------------------------------------------------------------------- /source/custom-resource/lib/metrics/index.js: -------------------------------------------------------------------------------- 1 | /********************************************************************************************************************* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * 3 | * * 4 | * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance * 5 | * with the License. A copy of the License is located at * 6 | * * 7 | * http://www.apache.org/licenses/LICENSE-2.0 * 8 | * * 9 | * or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES * 10 | * OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions * 11 | * and limitations under the License. * 12 | *********************************************************************************************************************/ 13 | 14 | const axios = require('axios'); 15 | 16 | const sanitizeData = (config) => { 17 | // Remove lambda arn from config to avoid sending AccountId 18 | delete config['ServiceToken']; 19 | delete config['Resource']; 20 | 21 | return config; 22 | }; 23 | 24 | const send = async (config) => { 25 | let data; 26 | 27 | const metrics = { 28 | Solution: config.SolutionId, 29 | UUID: config.UUID, 30 | TimeStamp: new Date().toISOString(), 31 | Data: sanitizeData(config) 32 | }; 33 | 34 | const params = { 35 | method: 'post', 36 | port: 443, 37 | url: 'https://metrics.awssolutionsbuilder.com/generic', 38 | headers: { 39 | 'Content-Type': 'application/json' 40 | }, 41 | data: metrics 42 | }; 43 | 44 | data = await axios(params); 45 | 46 | return data.status; 47 | }; 48 | 49 | module.exports = { 50 | send, 51 | sanitizeData 52 | }; 53 | -------------------------------------------------------------------------------- /source/custom-resource/lib/cfn/index.js: -------------------------------------------------------------------------------- 1 | /********************************************************************************************************************* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * 3 | * * 4 | * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance * 5 | * with the License. A copy of the License is located at * 6 | * * 7 | * http://www.apache.org/licenses/LICENSE-2.0 * 8 | * * 9 | * or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES * 10 | * OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions * 11 | * and limitations under the License. * 12 | *********************************************************************************************************************/ 13 | 14 | const axios = require('axios'); 15 | 16 | let sendResponse = async (event, context, responseStatus, responseData) => { 17 | let data; 18 | 19 | const responseBody = JSON.stringify({ 20 | Status: responseStatus, 21 | Reason: 'See the details in CloudWatch Log Stream: ' + context.logStreamName, 22 | PhysicalResourceId: event.LogicalResourceId, 23 | StackId: event.StackId, 24 | RequestId: event.RequestId, 25 | LogicalResourceId: event.LogicalResourceId, 26 | Data: responseData 27 | }); 28 | 29 | const params = { 30 | url: event.ResponseURL, 31 | port: 443, 32 | method: 'put', 33 | headers: { 34 | 'content-type': '', 35 | 'content-length': responseBody.length 36 | }, 37 | data: responseBody 38 | }; 39 | 40 | data = await axios(params); 41 | 42 | return data.status; 43 | }; 44 | 45 | module.exports = { 46 | send: sendResponse 47 | }; 48 | -------------------------------------------------------------------------------- /source/cdk/lib/fixity/scripts/run_hash_job.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | # SPDX-License-Identifier: Apache-2.0 5 | 6 | ## run_hash_job.sh 7 | 8 | [[ -z "$1" ]] && { echo "Error: is required"; exit 1; } 9 | [[ -z "$2" ]] && { echo "Error: is required"; exit 1; } 10 | 11 | 12 | ENV="${ENV:-dev}" 13 | STACK_NAME=mediaexchange-tools-fixity-$ENV 14 | BUCKET=$1 15 | KEY=$2 16 | 17 | if ! command -v aws &> /dev/null 18 | then 19 | echo "awscli is not installed; please install aws-cli by following install guide from here: https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html" 20 | exit 21 | fi 22 | 23 | DEFAULT_REGION=$(aws configure get region --output text) 24 | AWS_REGION=${AWS_REGION:-$DEFAULT_REGION} 25 | LAMBDA_ARN=$(aws cloudformation describe-stacks --stack-name $STACK_NAME --query "Stacks[0].Outputs[?OutputKey == 'FixtyDriverFunctionArn'].OutputValue" --output text) 26 | ROLE_ARN=$(aws cloudformation describe-stacks --stack-name $STACK_NAME --query "Stacks[0].Outputs[?OutputKey == 'FixtyS3BatchIAMRoleArn'].OutputValue" --output text) 27 | 28 | # JOB_QUEUE_ARN=$(aws cloudformation describe-stacks --stack-name $STACK_NAME --query "Stacks[0].Outputs[?OutputKey == 'JobQueue'].OutputValue" --output text) 29 | 30 | ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) 31 | 32 | ETAG=$(aws s3api head-object --bucket $BUCKET --key $KEY --query "ETag" --output text) 33 | 34 | MANIFEST="{\"Spec\":{\"Format\":\"S3BatchOperations_CSV_20180820\",\"Fields\":[\"Bucket\",\"Key\"]},\"Location\":{\"ObjectArn\":\"arn:aws:s3:::$BUCKET/$KEY\",\"ETag\":$ETAG}}" 35 | 36 | REPORT="{\"Bucket\":\"arn:aws:s3:::$BUCKET\",\"Prefix\":\"$KEY\",\"Format\":\"Report_CSV_20180820\",\"Enabled\":true,\"ReportScope\":\"AllTasks\"}" 37 | 38 | OPERATION="{\"LambdaInvoke\":{\"FunctionArn\":\"$LAMBDA_ARN\"}}" 39 | 40 | JobId=$(aws \ 41 | s3control create-job \ 42 | --account-id $ACCOUNT_ID \ 43 | --no-confirmation-required \ 44 | --manifest $MANIFEST \ 45 | --operation $OPERATION \ 46 | --report $REPORT \ 47 | --role-arn $ROLE_ARN \ 48 | --client-request-token "$(uuidgen)" \ 49 | --priority 10 \ 50 | --description "fixity" --query "JobId" --output text); 51 | 52 | echo "See your job status at https://$REGION.console.aws.amazon.com/batch/v2/home?region=$REGION#dashboard" 53 | -------------------------------------------------------------------------------- /source/cdk/lib/mediasync/scripts/run_copy_job.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | 3 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 4 | # SPDX-License-Identifier: Apache-2.0 5 | 6 | ## run_copy_job.sh 7 | 8 | [[ -z "$1" ]] && { echo "Error: is required"; exit 1; } 9 | [[ -z "$2" ]] && { echo "Error: is required"; exit 1; } 10 | 11 | 12 | ENV="${ENV:-dev}" 13 | STACK_NAME=mediaexchange-tools-mediasync-$ENV 14 | BUCKET=$1 15 | KEY=$2 16 | 17 | if ! command -v aws &> /dev/null 18 | then 19 | echo "awscli is not installed; please install aws-cli by following install guide from here: https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html" 20 | exit 21 | fi 22 | 23 | 24 | DEFAULT_REGION=$(aws configure get region --output text) 25 | AWS_REGION=${AWS_REGION:-$DEFAULT_REGION} 26 | LAMBDA_ARN=$(aws cloudformation describe-stacks --stack-name $STACK_NAME --query "Stacks[0].Outputs[?OutputKey == 'LambdaFunctionArn'].OutputValue" --output text) 27 | ROLE_ARN=$(aws cloudformation describe-stacks --stack-name $STACK_NAME --query "Stacks[0].Outputs[?OutputKey == 'S3BatchRoleArn'].OutputValue" --output text) 28 | 29 | # JOB_QUEUE_ARN=$(aws cloudformation describe-stacks --stack-name $STACK_NAME --query "Stacks[0].Outputs[?OutputKey == 'JobQueue'].OutputValue" --output text) 30 | 31 | ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text) 32 | 33 | ETAG=$(aws s3api head-object --bucket $BUCKET --key $KEY --query "ETag" --output text) 34 | 35 | MANIFEST="{\"Spec\":{\"Format\":\"S3BatchOperations_CSV_20180820\",\"Fields\":[\"Bucket\",\"Key\"]},\"Location\":{\"ObjectArn\":\"arn:aws:s3:::$BUCKET/$KEY\",\"ETag\":$ETAG}}" 36 | 37 | REPORT="{\"Bucket\":\"arn:aws:s3:::$BUCKET\",\"Prefix\":\"$KEY\",\"Format\":\"Report_CSV_20180820\",\"Enabled\":true,\"ReportScope\":\"AllTasks\"}" 38 | 39 | OPERATION="{\"LambdaInvoke\":{\"FunctionArn\":\"$LAMBDA_ARN\"}}" 40 | 41 | JobId=$(aws \ 42 | s3control create-job \ 43 | --account-id $ACCOUNT_ID \ 44 | --no-confirmation-required \ 45 | --manifest $MANIFEST \ 46 | --operation $OPERATION \ 47 | --report $REPORT \ 48 | --role-arn $ROLE_ARN \ 49 | --client-request-token "$(uuidgen)" \ 50 | --priority 10 \ 51 | --description "MediaSync" --query "JobId" --output text); 52 | 53 | echo "See your job status at https://$AWS_REGION.console.aws.amazon.com/batch/v2/home?region=$AWS_REGION#dashboard" 54 | -------------------------------------------------------------------------------- /source/cdk/bin/cdk.ts: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | import "source-map-support/register"; 3 | import * as cdk from "aws-cdk-lib"; 4 | import { MEStack } from "../lib/me-stack"; 5 | import { AgreementStack } from "../lib/agreement-stack"; 6 | import { SubscriberStack } from "../lib/subscriber-stack"; 7 | import { PublisherStack } from "../lib/publisher-stack"; 8 | import { ProvisionStack } from "../lib/provision-stack"; 9 | import { FixityStack } from "../lib/fixity/fixity-stack"; 10 | import { FixityRepositoryStack } from "../lib/fixity/fixity-repository-stack"; 11 | import { MediaSyncStack } from "../lib/mediasync/mediasync-stack"; 12 | import { MediaSyncRepositoryStack } from "../lib/mediasync/mediasync-repository-stack"; 13 | import { AutoIngestStack } from "../lib/autoingest/autoingest-stack"; 14 | import { DefaultStackSynthesizer } from "aws-cdk-lib"; 15 | 16 | const app = new cdk.App(); 17 | 18 | // Fixity 19 | new FixityStack(app, "FixityStack", { // NOSONAR 20 | synthesizer: new DefaultStackSynthesizer({ 21 | generateBootstrapVersionRule: false, 22 | }), 23 | }); 24 | 25 | new FixityRepositoryStack(app, "FixityRepositoryStack", { // NOSONAR 26 | synthesizer: new DefaultStackSynthesizer({ 27 | generateBootstrapVersionRule: false, 28 | }), 29 | }); 30 | 31 | // MediaSync 32 | new MediaSyncStack(app, "MediaSyncStack", { // NOSONAR 33 | synthesizer: new DefaultStackSynthesizer({ 34 | generateBootstrapVersionRule: false, 35 | }), 36 | }); 37 | 38 | new MediaSyncRepositoryStack(app, "MediaSyncRepositoryStack", { // NOSONAR 39 | synthesizer: new DefaultStackSynthesizer({ 40 | generateBootstrapVersionRule: false, 41 | }), 42 | }); 43 | 44 | // Autoingest 45 | new AutoIngestStack(app, "AutoIngestStack", { // NOSONAR 46 | synthesizer: new DefaultStackSynthesizer({ 47 | generateBootstrapVersionRule: false, 48 | }), 49 | }); 50 | 51 | // Media Exchange Solution Templates 52 | new MEStack(app, "MEStack", { // NOSONAR 53 | synthesizer: new DefaultStackSynthesizer({ 54 | generateBootstrapVersionRule: false, 55 | }), 56 | }); 57 | 58 | new AgreementStack(app, "AgreementStack", { // NOSONAR 59 | synthesizer: new DefaultStackSynthesizer({ 60 | generateBootstrapVersionRule: false, 61 | }), 62 | }); 63 | 64 | new SubscriberStack(app, "SubscriberStack", { // NOSONAR 65 | synthesizer: new DefaultStackSynthesizer({ 66 | generateBootstrapVersionRule: false, 67 | }), 68 | }); 69 | 70 | new PublisherStack(app, "PublisherStack", { // NOSONAR 71 | synthesizer: new DefaultStackSynthesizer({ 72 | generateBootstrapVersionRule: false, 73 | }), 74 | }); 75 | 76 | new ProvisionStack(app, "ProvisionStack", { // NOSONAR 77 | synthesizer: new DefaultStackSynthesizer({ 78 | generateBootstrapVersionRule: false, 79 | }), 80 | }); 81 | -------------------------------------------------------------------------------- /source/custom-resource/lib/metrics/index.spec.js: -------------------------------------------------------------------------------- 1 | /********************************************************************************************************************* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * 3 | * * 4 | * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance * 5 | * with the License. A copy of the License is located at * 6 | * * 7 | * http://www.apache.org/licenses/LICENSE-2.0 * 8 | * * 9 | * or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES * 10 | * OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions * 11 | * and limitations under the License. * 12 | *********************************************************************************************************************/ 13 | 14 | const axios = require('axios'); 15 | const expect = require('chai').expect; 16 | const MockAdapter = require('axios-mock-adapter'); 17 | 18 | const lambda = require('./index.js'); 19 | 20 | const _config = { 21 | SolutionId: 'SO0133', 22 | UUID: '999-999', 23 | ServiceToken: 'lambda-arn', 24 | Resource: 'AnonymizedMetric' 25 | }; 26 | 27 | describe('#SEND METRICS', () => { 28 | it('should return "200" on a send metrics sucess', async () => { 29 | const mock = new MockAdapter(axios); 30 | mock.onPost().reply(200, {}); 31 | 32 | lambda.send(_config, (_err, res) => { 33 | expect(res).to.equal(200); 34 | }); 35 | }); 36 | 37 | it('should return "Network Error" on connection timedout', async () => { 38 | const mock = new MockAdapter(axios); 39 | mock.onPut().networkError(); 40 | 41 | await lambda.send(_config).catch(err => { 42 | expect(err.toString()).to.equal('Error: Request failed with status code 404'); 43 | }); 44 | }); 45 | 46 | it('should remove ServiceToken and Resource from metrics data', () => { 47 | const sanitizedData = lambda.sanitizeData(_config); 48 | expect(sanitizedData.ServiceToken).to.be.undefined; 49 | expect(sanitizedData.Resource).to.be.undefined; 50 | }); 51 | }); 52 | -------------------------------------------------------------------------------- /source/custom-resource/index.js: -------------------------------------------------------------------------------- 1 | /********************************************************************************************************************* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * 3 | * * 4 | * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance * 5 | * with the License. A copy of the License is located at * 6 | * * 7 | * http://www.apache.org/licenses/LICENSE-2.0 * 8 | * * 9 | * or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES * 10 | * OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions * 11 | * and limitations under the License. * 12 | *********************************************************************************************************************/ 13 | const cfn = require('./lib/cfn'); 14 | const Metrics = require('./lib/metrics'); 15 | const { v4: uuidv4 } = require('uuid'); 16 | 17 | exports.handler = async (event, context) => { 18 | console.log(`REQUEST:: ${JSON.stringify(event, null, 2)}`); 19 | let config = event.ResourceProperties; 20 | let responseData = {}; 21 | 22 | // Each resource returns a promise with a json object to return cloudformation. 23 | try { 24 | console.log(`RESOURCE:: ${config.Resource}`); 25 | 26 | if (event.RequestType === 'Create') { 27 | switch (config.Resource) { 28 | case 'UUID': 29 | responseData = { UUID: uuidv4() }; 30 | break; 31 | 32 | case 'AnonymizedMetric': 33 | if (config.SendAnonymizedMetric === "Yes") { 34 | await Metrics.send(config); 35 | } 36 | break; 37 | default: 38 | console.log(config.Resource, ': not defined as a custom resource, sending success response'); 39 | } 40 | } 41 | 42 | const response = await cfn.send(event, context, 'SUCCESS', responseData); 43 | console.log(`RESPONSE:: ${JSON.stringify(responseData, null, 2)}`); 44 | console.log(`CFN STATUS:: ${response}`); 45 | } catch (err) { 46 | console.error(JSON.stringify(err, null, 2)); 47 | await cfn.send(event, context, 'FAILED'); 48 | } 49 | }; 50 | -------------------------------------------------------------------------------- /source/cdk/lib/mediasync/Makefile: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: Apache-2.0 3 | 4 | all: help 5 | 6 | help: 7 | @echo 'deploys MediaSync transfer utility' 8 | 9 | GUIDED ?= --guided 10 | IMAGEVERSION ?= latest 11 | ENV ?= dev 12 | VERSION ?= 1.2.0 13 | 14 | 15 | STACKPREFIX = mediaexchange-tools 16 | CURRENT_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) 17 | IMAGENAME = mediasync/aws-cli 18 | ACCOUNT_ID = $(shell aws sts get-caller-identity --query Account --output text) 19 | 20 | PARAMETER_OVERRIDES := Environment=$(ENV) 21 | AWS_REGION ?= $(shell aws configure get region --output text) 22 | 23 | configure: 24 | @mkdir -p build 25 | 26 | %-image-build: 27 | @echo "Building docker image..." 28 | docker buildx build --platform=linux/amd64 -t $(IMAGENAME):$(IMAGEVERSION) copier/ 29 | 30 | %-image-push: %-image-build 31 | @echo "Publishing Docker image to registry..." 32 | @aws ecr get-login-password | docker login --username AWS --password-stdin $(ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com 33 | 34 | docker tag ${IMAGENAME}:latest $(ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com/${IMAGENAME}:${IMAGEVERSION} 35 | docker push $(ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com/${IMAGENAME}:${IMAGEVERSION} 36 | 37 | %-stack-build: deployment/%.json configure 38 | @echo "Building template..." 39 | @sam build -s $(CURRENT_DIR) -b $(CURRENT_DIR)/build/$*/ --template $(CURRENT_DIR)/$< --use-container 40 | 41 | @sed -i -e "s/__VERSION__/$(VERSION)/g" $(CURRENT_DIR)/build/$*/template.yaml 42 | 43 | 44 | %-stack-install: %-stack-build 45 | @echo "deploying cloudformation template" 46 | sam deploy -t $(CURRENT_DIR)/build/$*/template.yaml --stack-name $(STACKPREFIX)-$*-$(ENV) --no-confirm-changeset --no-fail-on-empty-changeset --capabilities CAPABILITY_IAM CAPABILITY_NAMED_IAM --parameter-overrides $(PARAMETER_OVERRIDES) --config-env $* $(GUIDED) --region $(AWS_REGION) 47 | 48 | %-stack-delete: 49 | @echo "deleting cloudformation stack" 50 | aws cloudformation delete-stack --stack-name $(STACKPREFIX)-$*-$(ENV) 51 | aws cloudformation wait stack-delete-complete --stack-name $(STACKPREFIX)-$*-$(ENV) 52 | 53 | %-repository-clean: 54 | @echo "Cleaning docker images from registry..." 55 | for tag in $(shell aws ecr list-images --repository-name $* --query "imageIds[].imageDigest" --output text); do \ 56 | aws ecr batch-delete-image --repository-name $* --image-ids imageDigest=$$tag; \ 57 | done 58 | 59 | mediasync-repository-stack-install: PARAMETER_OVERRIDES += 'RepositoryName=${IMAGENAME}' 60 | 61 | mediasync-stack-install: PARAMETER_OVERRIDES += 'ImageName=$(ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com/$(IMAGENAME)' 62 | 63 | publish: mediasync-repository-stack-install $(IMAGENAME)-image-push 64 | install: mediasync-repository-stack-install $(IMAGENAME)-image-push mediasync-stack-install 65 | 66 | clean: mediasync-stack-delete $(IMAGENAME)-repository-clean mediasync-repository-stack-delete 67 | @rm -rf build/* 68 | 69 | .PHONY: configure install clean 70 | -------------------------------------------------------------------------------- /source/custom-resource/lib/cfn/index.spec.js: -------------------------------------------------------------------------------- 1 | /********************************************************************************************************************* 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * 3 | * * 4 | * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance * 5 | * with the License. A copy of the License is located at * 6 | * * 7 | * http://www.apache.org/licenses/LICENSE-2.0 * 8 | * * 9 | * or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES * 10 | * OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions * 11 | * and limitations under the License. * 12 | *********************************************************************************************************************/ 13 | 14 | const axios = require('axios'); 15 | const expect = require('chai').expect; 16 | const MockAdapter = require('axios-mock-adapter'); 17 | 18 | const lambda = require('./index.js'); 19 | const _event = { 20 | RequestType: 'Create', 21 | ServiceToken: 'arn:aws:lambda', 22 | ResponseURL: 'https://cloudformation', 23 | StackId: 'arn:aws:cloudformation', 24 | RequestId: '63e8ffa2-3059-4607-a450-119d473c73bc', 25 | LogicalResourceId: 'Uuid', 26 | ResourceType: 'Custom::UUID', 27 | ResourceProperties: { 28 | ServiceToken: 'arn:aws:lambda', 29 | Resource: 'abc' 30 | } 31 | }; 32 | 33 | const _context = { 34 | logStreamName: 'cloudwatch' 35 | }; 36 | 37 | const _responseStatus = 'ok'; 38 | const _responseData = { 39 | test: 'testing' 40 | }; 41 | 42 | describe('#CFN RESPONSE::', () => { 43 | it('should return "200" on a send cfn response sucess', async () => { 44 | const mock = new MockAdapter(axios); 45 | mock.onPut().reply(200, {}); 46 | 47 | lambda.send(_event, _context, _responseStatus, _responseData, (err, res) => { 48 | expect(res.status).to.equal(200); 49 | }); 50 | }); 51 | 52 | it('should return "Network Error" on connection timedout', async () => { 53 | const mock = new MockAdapter(axios); 54 | mock.onPut().networkError(); 55 | 56 | await lambda.send(_event, _context, _responseStatus, _responseData).catch(err => { 57 | expect(err.toString()).to.equal('Error: Network Error'); 58 | }); 59 | }); 60 | }); 61 | -------------------------------------------------------------------------------- /source/cdk/lib/autoingest/README.md: -------------------------------------------------------------------------------- 1 | # AutoIngest 2 | 3 | ## Table of contents 4 | 5 | - [Solution Overview](#solution-overview) 6 | - [Architecture Diagram](#architecture-diagram) 7 | - [Customizing the Solution](#customizing-the-solution) 8 | - [Prerequisites for Customization](#prerequisites-for-customization) 9 | - [Deploy](#deploy) 10 | - [Cleanup](#cleanup) 11 | - [Cost](#cost) 12 | - [License](#license) 13 | 14 | 15 | 16 | # Solution Overview 17 | 18 | Subscribers to a MediaExchange bucket have the option to automatically ingest to their own bucket by using this component. It automatically moves assets from MediaExchange into a subscriber-owned S3 bucket. This optional component is deployed in the subscriber’s account. 19 | 20 | 21 | 22 | # Architecture Diagram 23 | 24 | ![Architecture](images/autoingest.jpeg) 25 | 26 | 27 | 28 | ## Customizing the Solution 29 | 30 | 31 | 32 | ### Prerequisites for Customization 33 | 34 | - GNU make 35 | - Docker desktop 36 | - Node 37 | - [AWS CDK] (https://docs.aws.amazon.com/cdk/v2/guide/getting_started.html) 38 | - [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html) 39 | - [AWS SAM CLI](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html) 40 | 41 | 42 | 43 | ### Deploy 44 | 45 | - Initialize a shell with AWS credentials to deploy to target (subscriber) account. You can do this by adding AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN as environment variables or by selecting the appropriate profile by adding AWS_PROFILE environment variable. 46 | - Run the build script in the deployment folder of the root directory to generate template files for this solution (or use existing ones in development directory of this function) 47 | - At the command prompt type `make install`. 48 | - follow the on-screen instructions for configuration parameters. 49 | - Specify the Source bucket name. 50 | - Specify the destination bucket name. 51 | - Specify the SNS topic Arn from subscriber on boarding summary. 52 | - Specify the destination bucket prefix. 53 | 54 | 55 | 56 | ### Cleanup 57 | 58 | - Initialize a shell with AWS credentials to deploy to target (subscriber) account. You can do this by adding AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN as environment variables or by selecting the appropriate profile by adding AWS_PROFILE environment variable. 59 | - At the command prompt type `make clean`. 60 | 61 | 62 | 63 | ## Cost 64 | 65 | - S3 API pricing for GET / PUT. See [here](https://aws.amazon.com/s3/pricing/). 66 | - There is no cost for egress in the same region. 67 | - AWS Lambda pricing. See [here](https://aws.amazon.com/lambda/pricing/) 68 | 69 | 70 | 71 | # License 72 | 73 | See license [here](https://github.com/aws-solutions/media-exchange-on-aws/blob/main/LICENSE.txt) 74 | -------------------------------------------------------------------------------- /source/cdk/lib/fixity/Makefile: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: Apache-2.0 3 | 4 | all: help 5 | 6 | help: 7 | @echo 'deploys fixity utility' 8 | 9 | GUIDED ?= --guided 10 | IMAGEVERSION ?= latest 11 | ENV ?= dev 12 | VERSION ?= 1.2.0 13 | 14 | 15 | STACKPREFIX = mediaexchange-tools 16 | CURRENT_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) 17 | IMAGENAME = fixity/hasher 18 | ACCOUNT_ID = $(shell aws sts get-caller-identity --query Account --output text) 19 | 20 | PARAMETER_OVERRIDES := Environment=$(ENV) 21 | AWS_REGION ?= $(shell aws configure get region --output text) 22 | 23 | configure: 24 | @mkdir -p build 25 | 26 | %-image-build: 27 | @echo "Building docker image..." 28 | docker buildx build --platform=linux/amd64 -t $(IMAGENAME):$(IMAGEVERSION) hasher/ 29 | 30 | %-image-push: %-image-build 31 | @echo "Publishing Docker image to registry..." 32 | @aws ecr get-login-password | docker login --username AWS --password-stdin $(ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com 33 | 34 | docker tag ${IMAGENAME}:latest $(ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com/${IMAGENAME}:${IMAGEVERSION} 35 | docker push $(ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com/${IMAGENAME}:${IMAGEVERSION} 36 | 37 | %-stack-build: deployment/%.json configure 38 | @echo "Building template..." 39 | @sam build -s $(CURRENT_DIR) -b $(CURRENT_DIR)/build/$*/ --template $(CURRENT_DIR)/$< --use-container 40 | 41 | @sed -i -e "s/__VERSION__/$(VERSION)/g" $(CURRENT_DIR)/build/$*/template.yaml 42 | 43 | %-stack-install: %-stack-build 44 | 45 | @echo "deploying cloudformation template" 46 | sam deploy -t $(CURRENT_DIR)/build/$*/template.yaml --stack-name $(STACKPREFIX)-$*-$(ENV) --no-confirm-changeset --no-fail-on-empty-changeset --capabilities CAPABILITY_IAM CAPABILITY_NAMED_IAM --parameter-overrides $(PARAMETER_OVERRIDES) --config-env $* $(GUIDED) --region $(AWS_REGION) 47 | 48 | %-stack-delete: 49 | @echo "deleting cloudformation stack" 50 | aws cloudformation delete-stack --stack-name $(STACKPREFIX)-$*-$(ENV) 51 | aws cloudformation wait stack-delete-complete --stack-name $(STACKPREFIX)-$*-$(ENV) 52 | 53 | %-repository-clean: 54 | @echo "Cleaning docker images from registry..." 55 | for tag in $(shell aws ecr list-images --repository-name $* --query "imageIds[].imageDigest" --output text); do \ 56 | aws ecr batch-delete-image --repository-name $* --image-ids imageDigest=$$tag; \ 57 | done 58 | 59 | %-stack-outputs: 60 | @aws cloudformation describe-stacks --stack-name $(STACKPREFIX)-$*-$(ENV) --query "Stacks[0].Outputs[]" --output table 61 | 62 | fixity-repository-stack-install: PARAMETER_OVERRIDES += 'RepositoryName=${IMAGENAME}' 63 | fixity-stack-install: PARAMETER_OVERRIDES += 'ImageName=$(ACCOUNT_ID).dkr.ecr.$(AWS_REGION).amazonaws.com/$(IMAGENAME)' 64 | 65 | install: fixity-repository-stack-install $(IMAGENAME)-image-push fixity-stack-install 66 | 67 | outputs: fixity-stack-outputs 68 | 69 | clean: fixity-stack-delete $(IMAGENAME)-repository-clean fixity-repository-stack-delete 70 | @rm -rf build/* 71 | 72 | .PHONY: configure install clean 73 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *main* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. 61 | -------------------------------------------------------------------------------- /source/cdk/test/cdk.test.ts: -------------------------------------------------------------------------------- 1 | import { Template } from "aws-cdk-lib/assertions"; 2 | import * as MediaExchange from "../lib/me-stack"; 3 | import * as Agreement from "../lib/agreement-stack"; 4 | import * as Provision from "../lib/provision-stack"; 5 | import * as Subscriber from "../lib/subscriber-stack"; 6 | import * as Publisher from "../lib/publisher-stack"; 7 | import * as Fixity from "../lib/fixity/fixity-stack"; 8 | import * as FixityRepo from "../lib/fixity/fixity-repository-stack"; 9 | import * as MediaSync from "../lib/mediasync/mediasync-stack"; 10 | import * as MediaSyncRepo from "../lib/mediasync/mediasync-repository-stack"; 11 | import * as Autoingest from "../lib/autoingest/autoingest-stack"; 12 | import { Stack } from "aws-cdk-lib"; 13 | 14 | const regexHashedFileName = /[A-Fa-f0-9]{64}(\.[a-z]{3,4})$/; 15 | const replaceHashedName = "[HASH REMOVED]$1"; 16 | 17 | expect.addSnapshotSerializer({ 18 | test: (val) => typeof val === 'string' && regexHashedFileName.test(val), 19 | serialize: (val) => JSON.stringify(val.replace(regexHashedFileName, replaceHashedName)), 20 | }); 21 | 22 | test("ME Stack Test", () => { 23 | const stack = new Stack(); 24 | const meTest = new MediaExchange.MEStack(stack, "MediaExchange"); 25 | const template = Template.fromStack(meTest); 26 | expect(template.toJSON()).toMatchSnapshot(); 27 | }); 28 | 29 | test("Agreement Stack Test", () => { 30 | const stack = new Stack(); 31 | const agreementTest = new Agreement.AgreementStack(stack, "Agreement"); 32 | const template = Template.fromStack(agreementTest); 33 | expect(template.toJSON()).toMatchSnapshot(); 34 | }); 35 | 36 | test("Provision Stack Test", () => { 37 | const stack = new Stack(); 38 | const provisionTest = new Provision.ProvisionStack(stack, "Provision"); 39 | const template = Template.fromStack(provisionTest); 40 | expect(template.toJSON()).toMatchSnapshot(); 41 | }); 42 | 43 | test("Subscriber Stack Test", () => { 44 | const stack = new Stack(); 45 | const subscriberTest = new Subscriber.SubscriberStack(stack, "Subscriber"); 46 | const template = Template.fromStack(subscriberTest); 47 | expect(template.toJSON()).toMatchSnapshot(); 48 | }); 49 | 50 | test("Publisher Stack Test", () => { 51 | const stack = new Stack(); 52 | const publisherTest = new Publisher.PublisherStack(stack, "Publisher"); 53 | const template = Template.fromStack(publisherTest); 54 | expect(template.toJSON()).toMatchSnapshot(); 55 | }); 56 | 57 | test("Fixity Stack Test", () => { 58 | const stack = new Stack(); 59 | const fixityTest = new Fixity.FixityStack(stack, "Fixity"); 60 | const template = Template.fromStack(fixityTest); 61 | expect(template.toJSON()).toMatchSnapshot(); 62 | }); 63 | 64 | test("Fixity Repo Stack Test", () => { 65 | const stack = new Stack(); 66 | const fixityRepoTest = new FixityRepo.FixityRepositoryStack( 67 | stack, 68 | "FixityRepo" 69 | ); 70 | const template = Template.fromStack(fixityRepoTest); 71 | expect(template.toJSON()).toMatchSnapshot(); 72 | }); 73 | 74 | test("MediaSync Stack Test", () => { 75 | const stack = new Stack(); 76 | const mediaSyncTest = new MediaSync.MediaSyncStack(stack, "MediaSync"); 77 | const template = Template.fromStack(mediaSyncTest); 78 | expect(template.toJSON()).toMatchSnapshot(); 79 | }); 80 | 81 | test("Mediasync Repo Stack Test", () => { 82 | const stack = new Stack(); 83 | const mediaSyncRepoTest = new MediaSyncRepo.MediaSyncRepositoryStack( 84 | stack, 85 | "MediaSyncRepo" 86 | ); 87 | const template = Template.fromStack(mediaSyncRepoTest); 88 | expect(template.toJSON()).toMatchSnapshot(); 89 | }); 90 | 91 | test("AutoIngest Stack Test", () => { 92 | const stack = new Stack(); 93 | const autoIngestTest = new Autoingest.AutoIngestStack(stack, "AutoIngest"); 94 | const template = Template.fromStack(autoIngestTest); 95 | expect(template.toJSON()).toMatchSnapshot(); 96 | }); 97 | -------------------------------------------------------------------------------- /tests/deployment/testrole.yaml: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: Apache-2.0 3 | 4 | AWSTemplateFormatVersion: '2010-09-09' 5 | Description: > 6 | Cloudformation template for media exchange publisher/subscriber role for testing. 7 | 8 | 9 | Outputs: 10 | SubscriberRoleArn: 11 | Description: Subscriber Role Arn 12 | Value: 13 | Fn::GetAtt: 14 | - SubscriberRole 15 | - Arn 16 | PublisherRoleArn: 17 | Description: Publisher Role Arn 18 | Value: 19 | Fn::GetAtt: 20 | - PublisherRole 21 | - Arn 22 | 23 | Metadata: 24 | 25 | License: 26 | Description: | 27 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 28 | 29 | Licensed under the Apache License, Version 2.0 (the "License"). 30 | You may not use this file except in compliance with the License. 31 | You may obtain a copy of the License at 32 | 33 | http://www.apache.org/licenses/LICENSE-2.0 34 | 35 | Unless required by applicable law or agreed to in writing, software 36 | distributed under the License is distributed on an "AS IS" BASIS, 37 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 38 | See the License for the specific language governing permissions and 39 | limitations under the License. 40 | 41 | Parameters: 42 | TestAccountId: 43 | Type: String 44 | Description: The AWS accountId of mediaexchange. 45 | 46 | Resources: 47 | 48 | SubscriberRole: 49 | Type: AWS::IAM::Role 50 | Properties: 51 | RoleName: subscriber-role 52 | AssumeRolePolicyDocument: 53 | Statement: 54 | - 55 | Effect: Allow 56 | Action: sts:AssumeRole 57 | Principal: 58 | AWS: 59 | Fn::Sub: ${TestAccountId} 60 | Version: "2012-10-17" 61 | Path: / 62 | Policies: 63 | - 64 | PolicyName: KMSAndS3 65 | PolicyDocument: 66 | Version: 2012-10-17 67 | Statement: 68 | - 69 | Effect: Allow 70 | Action: 71 | - s3:Get* 72 | - s3:List* 73 | Resource: '*' 74 | - 75 | Effect: Allow 76 | Action: 77 | - kms:Decrypt 78 | - kms:DescribeKey 79 | Resource: '*' 80 | 81 | 82 | PublisherRole: 83 | Type: AWS::IAM::Role 84 | Properties: 85 | RoleName: publisher-role 86 | AssumeRolePolicyDocument: 87 | Statement: 88 | - 89 | Effect: Allow 90 | Action: sts:AssumeRole 91 | Principal: 92 | AWS: 93 | Fn::Sub: ${TestAccountId} 94 | Version: "2012-10-17" 95 | Path: / 96 | Policies: 97 | - 98 | PolicyName: KMSAndS3 99 | PolicyDocument: 100 | Version: 2012-10-17 101 | Statement: 102 | - 103 | Effect: Allow 104 | Action: 105 | - s3:Get* 106 | - s3:List* 107 | Resource: '*' 108 | - 109 | Effect: Allow 110 | Action: 111 | - s3:Put* 112 | - s3:Delete* 113 | - s3:AbortMultipartUpload 114 | - s3:ListMultipartUploadParts 115 | Resource: 'arn:aws:s3:::*' 116 | - 117 | Effect: Allow 118 | Action: 119 | - kms:Encrypt 120 | - kms:Decrypt 121 | - kms:ReEncrypt* 122 | - kms:GenerateDataKey* 123 | - kms:DescribeKey 124 | Resource: '*' 125 | -------------------------------------------------------------------------------- /docs/developer.md: -------------------------------------------------------------------------------- 1 | ## Table of contents 2 | 3 | - [Customizing the Solution](#customizing-the-solution) 4 | - [Prerequisites](#prerequisites) 5 | - [Deploy](#deploy) 6 | - [Setup](#setup) 7 | - [Testing](#testing) 8 | - [Cleanup](#cleanup) 9 | - [Usage](#usage) 10 | - [Developer Mode](#developer-mode) 11 | 12 | 13 | 14 | # Customizing the Solution 15 | 16 | 17 | 18 | ## Prerequisites 19 | 20 | - GNU make 21 | - Docker desktop 22 | - Node 23 | - [AWS CDK] (https://docs.aws.amazon.com/cdk/v2/guide/getting_started.html) 24 | - [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html) 25 | - [AWS SAM CLI](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html) 26 | 27 | 28 | 29 | ## Deploy 30 | 31 | - Initialize a shell with the necessary credentials to deploy to the MediaExchange account. You can do this by adding AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN as environment variables or by selecting the appropriate profile by adding AWS_PROFILE environment variable. 32 | - At the command prompt type `make install`. 33 | - Follow the on-screen instructions for configuration parameters. 34 | 35 | 36 | 37 | ## Setup 38 | 39 | - After install, at the command prompt type `make provision`. 40 | - Follow the on-screen instructions for configuration parameters. 41 | Note: Service Catalog interface is the preferred way of on-boarding. 42 | 43 | 44 | 45 | # Developer mode 46 | 47 | This method bypasses the service catalog setup to deploy a single publisher, subscriber and transfer agreement. 48 | 49 | - Initialize a shell with the necessary credentials for publisher account. You can do this by adding AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN as environment variables or by selecting the appropriate profile by adding AWS_PROFILE environment variable. 50 | 51 | 1. Navigate to MediaExchnageOnAWS (root) directory. 52 | 1. `make testrole-stack` 53 | 1. Enter the MediaExchange ACCOUNT_ID for parameter TestAccountId. 54 | 1. Enter 'n' for "Save arguments to configuration file" (Y/n) 55 | 56 | - Initialize a shell with the necessary credentials for subscriber account. 57 | 58 | 1. Navigate to MediaExchnageOnAWS (root) directory. 59 | 1. `make testrole-stack` 60 | 1. Enter the MediaExchange ACCOUNT_ID for parameter TestAccountId. 61 | 1. Enter 'n' for "Save arguments to configuration file" (Y/n) 62 | 63 | - Initialize a shell with the necessary credentials for MediaExchange account. 64 | 1. Navigate to MediaExchnageOnAWS (root) directory. 65 | 1. `make quickstart` 66 | 1. Follow the instructions to provide publisher and subscriber information. The default values are printed out for the MediaExchange ACCOUNT_ID. 67 | 68 | 69 | 70 | ## Testing 71 | 72 | The tests are run from the MediaExchange account. The test script assumes a role in the publisher and subscriber accounts to run the tests. 73 | 74 | - Initialize a shell with the necessary credentials for MediaExchange account. 75 | 1. Navigate to MediaExchnageOnAWS (root) directory. 76 | 1. `make test` 77 | 78 | 79 | 80 | # Usage 81 | 82 | - Sharing assets 83 | 84 | ``` 85 | $ aws s3 cp s3:/// 86 | ``` 87 | 88 | - Receiving assets 89 | 90 | ``` 91 | $ aws s3 cp s3:/// 92 | ``` 93 | 94 | 95 | 96 | ## Cleanup 97 | 98 | - Initialize a shell with the necessary credentials for MediaExchange account. You can do this by adding AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN as environment variables or by selecting the appropriate profile by adding AWS_PROFILE environment variable. 99 | - At the command prompt type `make clean`. 100 | - Uninstall process retains certain S3 buckets. These bucket needs to be cleaned up manually. 101 | - Find the bucket name(s) with mediaexchange and delete their contents. 102 | - Versioned buckets that fail to delete from the command above, will require additional steps to cleanup. Please refer to the instructions [here](https://docs.aws.amazon.com/AmazonS3/latest/userguide/RemDelMarker.html). 103 | -------------------------------------------------------------------------------- /tests/python/quickstart_test.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: Apache-2.0 3 | 4 | import json 5 | import pytest 6 | import os 7 | import boto3 8 | import time 9 | import requests 10 | import hashlib 11 | import jsonpickle 12 | import tempfile 13 | 14 | @pytest.fixture() 15 | def config(): 16 | tmpdir = tempfile.mkdtemp() 17 | predictable_filename = 'media-exchange-test-object' 18 | 19 | path = os.path.join(tmpdir, predictable_filename) 20 | 21 | onboarding_info = {} 22 | onboarding_info['FILE_NAME'] = path 23 | 24 | with open(onboarding_info['FILE_NAME'], 'wb') as f: 25 | f.write(os.urandom(1024*100)) #100 KB 26 | 27 | hasher = hashlib.sha256() 28 | with open(onboarding_info['FILE_NAME'], 'rb') as f: 29 | hasher.update(f.read()) 30 | 31 | onboarding_info['CHECKSUM'] = hasher.hexdigest() 32 | 33 | with open('./publisher.env') as f: 34 | data = f.read() 35 | parts = data.split(' ') 36 | for part in parts: 37 | v = part.split('=') 38 | onboarding_info[v[0]] = v[1].strip() 39 | 40 | with open('./subscriber.env') as f: 41 | data = f.read() 42 | parts = data.split(' ') 43 | for part in parts: 44 | v = part.split('=') 45 | onboarding_info[v[0]] = v[1].strip() 46 | 47 | #override PUBLISHER_ROLE 48 | publisher_role = onboarding_info['PUBLISHER_ROLE'] 49 | if publisher_role.endswith(':root'): 50 | onboarding_info['PUBLISHER_ROLE'] = publisher_role.replace(":root", ":role/publisher-role") 51 | 52 | #override SUBSCRIBER_ROLE 53 | publisher_role = onboarding_info['SUBSCRIBER_ROLE'] 54 | if publisher_role.endswith(':root'): 55 | onboarding_info['SUBSCRIBER_ROLE'] = publisher_role.replace(":root", ":role/subscriber-role") 56 | 57 | yield onboarding_info 58 | 59 | os.remove(onboarding_info['FILE_NAME']) 60 | 61 | 62 | def test_push_pull(config): 63 | 64 | sts = boto3.client("sts") 65 | resp = sts.assume_role( 66 | RoleArn=config['PUBLISHER_ROLE'], 67 | RoleSessionName="mediaexchange-test-session" 68 | ) 69 | 70 | session = boto3.session.Session(aws_access_key_id=resp['Credentials']['AccessKeyId'], aws_secret_access_key=resp['Credentials']['SecretAccessKey'], aws_session_token=resp['Credentials']['SessionToken'], region_name=config['AWS_REGION']) 71 | 72 | s3_client = session.client('s3') 73 | with open(config['FILE_NAME'], 'rb') as f: 74 | s3_client.put_object( 75 | Body=f, 76 | Bucket=config['MEDIAEXCHANGE_BUCKET_NAME'], 77 | Key=config['FILE_NAME'] 78 | ) 79 | 80 | resp = sts.assume_role( 81 | RoleArn=config['SUBSCRIBER_ROLE'], 82 | RoleSessionName="mediaexchange-test-session" 83 | ) 84 | 85 | session = boto3.session.Session(aws_access_key_id=resp['Credentials']['AccessKeyId'], aws_secret_access_key=resp['Credentials']['SecretAccessKey'], aws_session_token=resp['Credentials']['SessionToken'], region_name=config['AWS_REGION']) 86 | 87 | s3_client = session.client('s3') 88 | resp = s3_client.list_objects_v2( 89 | Bucket=config['MEDIAEXCHANGE_BUCKET_NAME'], 90 | FetchOwner=True 91 | ) 92 | ff = False 93 | for content in resp['Contents']: 94 | if content['Key'] == config['FILE_NAME']: 95 | ff = True 96 | assert ff == True 97 | 98 | s3 = session.resource('s3') 99 | test_object = s3.Object(config['MEDIAEXCHANGE_BUCKET_NAME'],config['FILE_NAME']) 100 | test_object.download_file(config['FILE_NAME']+'.1') 101 | 102 | hasher = hashlib.sha256() 103 | with open(config['FILE_NAME']+'.1', 'rb') as f: 104 | hasher.update(f.read()) 105 | 106 | assert config['CHECKSUM'] == hasher.hexdigest() 107 | 108 | #delete object 109 | resp = sts.assume_role( 110 | RoleArn=config['PUBLISHER_ROLE'], 111 | RoleSessionName="mediaexchange-test-session" 112 | ) 113 | 114 | session = boto3.session.Session(aws_access_key_id=resp['Credentials']['AccessKeyId'], aws_secret_access_key=resp['Credentials']['SecretAccessKey'], aws_session_token=resp['Credentials']['SessionToken'], region_name=config['AWS_REGION']) 115 | 116 | s3_client = session.client('s3') 117 | s3_client.delete_object(Bucket=config['MEDIAEXCHANGE_BUCKET_NAME'],Key=config['FILE_NAME']) 118 | -------------------------------------------------------------------------------- /deployment/run-unit-tests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | # SPDX-License-Identifier: Apache-2.0 4 | # This script should be run from the repo's deployment directory 5 | # cd deployment 6 | # ./run-unit-tests.sh 7 | 8 | # Run unit tests 9 | echo "Running unit tests" 10 | 11 | # Make sure working directory is the directory containing this script 12 | cd "$(dirname "${BASH_SOURCE[0]}")" 13 | 14 | 15 | echo "------------------------------------------------------------------------------" 16 | echo "Installing Dependencies And Testing CDK" 17 | echo "------------------------------------------------------------------------------" 18 | # Go to cdk directory 19 | cdk_dir=`cd ../source/cdk; pwd` 20 | custom_resource_dir=`cd ../source/custom-resource; pwd` 21 | cd "${cdk_dir}" 22 | 23 | prepare_jest_coverage_report() { 24 | local component_name=$1 25 | 26 | if [ ! -d "coverage" ]; then 27 | echo "ValidationError: Missing required directory coverage after running unit tests" 28 | exit 129 29 | fi 30 | 31 | # prepare coverage reports 32 | rm -fr ../coverage/lcov-report 33 | mkdir -p $coverage_reports_top_path/jest 34 | coverage_report_path=$coverage_reports_top_path/jest/$component_name 35 | rm -fr $coverage_report_path 36 | mv coverage $coverage_report_path 37 | rm -fr coverage 38 | } 39 | 40 | run_cdk_project_test() { 41 | local component_path="$1" 42 | local component_path_name="$2" 43 | local component_name=solutions-constructs-${component_path_name} 44 | 45 | echo "------------------------------------------------------------------------------" 46 | echo "[Test] $component_name" 47 | echo "------------------------------------------------------------------------------" 48 | cd "$component_path" 49 | 50 | # install and build for unit testing 51 | npm install 52 | 53 | # run unit tests 54 | npm run test 55 | 56 | # prepare coverage reports 57 | prepare_jest_coverage_report $component_name 58 | } 59 | 60 | # Get reference for source folder 61 | slnroot_dir="$(dirname "$cdk_dir")" 62 | coverage_reports_top_path="../coverage-reports" 63 | 64 | # Test the CDK project 65 | run_cdk_project_test "$cdk_dir" "cdk" 66 | run_cdk_project_test "$custom_resource_dir" "custom" 67 | 68 | # Make sure we clean up 69 | cleanup_before_exit() { 70 | cleanup $? 71 | } 72 | 73 | cleanup() { 74 | # Reset the signals to default behavior 75 | trap - SIGINT SIGTERM EXIT 76 | echo "------------------------------------------------------------------------------" 77 | echo "Cleaning up" 78 | echo "------------------------------------------------------------------------------" 79 | 80 | # Deactivate and remove the temporary python virtualenv used to run this script 81 | deactivate 82 | rm -rf $VENV 83 | rm -rf __pycache__ 84 | rm -rf .pytest_cache 85 | exit ${1:-0} 86 | } 87 | 88 | # Create and activate a temporary Python environment for this script. 89 | 90 | echo "------------------------------------------------------------------------------" 91 | echo "Creating a temporary Python virtualenv for this script" 92 | echo "------------------------------------------------------------------------------" 93 | if [ -n "${VIRTUAL_ENV:-}" ]; then 94 | echo "ERROR: Do not run this script inside Virtualenv. Type \`deactivate\` and run again."; 95 | exit 1; 96 | fi 97 | if ! command -v python3 &>/dev/null; then 98 | echo "ERROR: install Python3 before running this script" 99 | exit 1 100 | fi 101 | VENV=$(mktemp -d) 102 | python3 -m venv $VENV 103 | source $VENV/bin/activate 104 | 105 | # Trap exits so we are sure to clean up the virtual environment 106 | trap cleanup_before_exit SIGINT SIGTERM EXIT 107 | 108 | # Install packages into the virtual environment 109 | pushd ../../source 110 | pip3 install \ 111 | moto \ 112 | mock \ 113 | coverage \ 114 | pylint \ 115 | pytest \ 116 | jsonpickle \ 117 | popd 118 | 119 | if [ $? -ne 0 ]; then 120 | echo "ERROR: Failed to install required Python libraries." 121 | exit 1 122 | fi 123 | 124 | 125 | # Unit tests for python lambdas 126 | source_dir=`cd ../source; pwd` 127 | coverage_report_path_var="../source/coverage.xml" 128 | coverage run -m pytest 129 | coverage xml -i 130 | sed -i.orig -e "s,$source_dir,source,g" $coverage_report_path_var 131 | rm -f ../source/*.orig 132 | mv $coverage_report_path_var coverage-reports/coverage.xml 133 | 134 | 135 | 136 | cleanup $? -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | build/ 2 | .aws-sam 3 | .scannerwork 4 | # Created by https://www.gitignore.io/api/osx,linux,python,windows,pycharm,visualstudiocode 5 | 6 | ### Linux ### 7 | *~ 8 | 9 | # temporary files which can be created if a process still has a handle open of a deleted file 10 | .fuse_hidden* 11 | 12 | # KDE directory preferences 13 | .directory 14 | 15 | # Linux trash folder which might appear on any partition or disk 16 | .Trash-* 17 | 18 | # .nfs files are created when an open file is removed but is still being accessed 19 | .nfs* 20 | 21 | ### OSX ### 22 | *.DS_Store 23 | .AppleDouble 24 | .LSOverride 25 | 26 | # Icon must end with two \r 27 | Icon 28 | 29 | # Thumbnails 30 | ._* 31 | 32 | # Files that might appear in the root of a volume 33 | .DocumentRevisions-V100 34 | .fseventsd 35 | .Spotlight-V100 36 | .TemporaryItems 37 | .Trashes 38 | .VolumeIcon.icns 39 | .com.apple.timemachine.donotpresent 40 | 41 | # Directories potentially created on remote AFP share 42 | .AppleDB 43 | .AppleDesktop 44 | Network Trash Folder 45 | Temporary Items 46 | .apdisk 47 | 48 | ### PyCharm ### 49 | # Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and Webstorm 50 | # Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 51 | 52 | # User-specific stuff: 53 | .idea/**/workspace.xml 54 | .idea/**/tasks.xml 55 | .idea/dictionaries 56 | 57 | # Sensitive or high-churn files: 58 | .idea/**/dataSources/ 59 | .idea/**/dataSources.ids 60 | .idea/**/dataSources.xml 61 | .idea/**/dataSources.local.xml 62 | .idea/**/sqlDataSources.xml 63 | .idea/**/dynamic.xml 64 | .idea/**/uiDesigner.xml 65 | 66 | # Gradle: 67 | .idea/**/gradle.xml 68 | .idea/**/libraries 69 | 70 | # CMake 71 | cmake-build-debug/ 72 | 73 | # Mongo Explorer plugin: 74 | .idea/**/mongoSettings.xml 75 | 76 | ## File-based project format: 77 | *.iws 78 | 79 | ## Plugin-specific files: 80 | 81 | # IntelliJ 82 | /out/ 83 | 84 | # mpeltonen/sbt-idea plugin 85 | .idea_modules/ 86 | 87 | # JIRA plugin 88 | atlassian-ide-plugin.xml 89 | 90 | # Cursive Clojure plugin 91 | .idea/replstate.xml 92 | 93 | # Ruby plugin and RubyMine 94 | /.rakeTasks 95 | 96 | # Crashlytics plugin (for Android Studio and IntelliJ) 97 | com_crashlytics_export_strings.xml 98 | crashlytics.properties 99 | crashlytics-build.properties 100 | fabric.properties 101 | 102 | ### PyCharm Patch ### 103 | # Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721 104 | 105 | # *.iml 106 | # modules.xml 107 | # .idea/misc.xml 108 | # *.ipr 109 | 110 | # Sonarlint plugin 111 | .idea/sonarlint 112 | 113 | ### Python ### 114 | # Byte-compiled / optimized / DLL files 115 | __pycache__/ 116 | *.py[cod] 117 | *$py.class 118 | 119 | # node modules 120 | node_modules 121 | 122 | # C extensions 123 | *.so 124 | 125 | # Distribution / packaging 126 | .Python 127 | build/ 128 | develop-eggs/ 129 | dist/ 130 | downloads/ 131 | eggs/ 132 | .eggs/ 133 | lib64/ 134 | parts/ 135 | sdist/ 136 | var/ 137 | wheels/ 138 | *.egg-info/ 139 | .installed.cfg 140 | *.egg 141 | 142 | # PyInstaller 143 | # Usually these files are written by a python script from a template 144 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 145 | *.manifest 146 | *.spec 147 | 148 | # Installer logs 149 | pip-log.txt 150 | pip-delete-this-directory.txt 151 | 152 | # Unit test / coverage reports 153 | htmlcov/ 154 | .tox/ 155 | .coverage 156 | .coverage.* 157 | .cache 158 | .pytest_cache/ 159 | nosetests.xml 160 | coverage.xml 161 | *.cover 162 | .hypothesis/ 163 | 164 | # Translations 165 | *.mo 166 | *.pot 167 | 168 | # Flask stuff: 169 | instance/ 170 | .webassets-cache 171 | 172 | # Scrapy stuff: 173 | .scrapy 174 | 175 | # Sphinx documentation 176 | docs/_build/ 177 | 178 | # PyBuilder 179 | target/ 180 | 181 | # Jupyter Notebook 182 | .ipynb_checkpoints 183 | 184 | # pyenv 185 | .python-version 186 | 187 | # celery beat schedule file 188 | celerybeat-schedule.* 189 | 190 | # SageMath parsed files 191 | *.sage.py 192 | 193 | # Environments 194 | .env 195 | .venv 196 | env/ 197 | venv/ 198 | ENV/ 199 | env.bak/ 200 | venv.bak/ 201 | 202 | # Spyder project settings 203 | .spyderproject 204 | .spyproject 205 | 206 | # Rope project settings 207 | .ropeproject 208 | 209 | # mkdocs documentation 210 | /site 211 | 212 | # mypy 213 | .mypy_cache/ 214 | 215 | ### VisualStudioCode ### 216 | .vscode/* 217 | !.vscode/settings.json 218 | !.vscode/tasks.json 219 | !.vscode/launch.json 220 | !.vscode/extensions.json 221 | .history 222 | 223 | ### Windows ### 224 | # Windows thumbnail cache files 225 | Thumbs.db 226 | ehthumbs.db 227 | ehthumbs_vista.db 228 | 229 | # Folder config file 230 | Desktop.ini 231 | 232 | # Recycle Bin used on file shares 233 | $RECYCLE.BIN/ 234 | 235 | # Windows Installer files 236 | *.cab 237 | *.msi 238 | *.msm 239 | *.msp 240 | 241 | # Windows shortcuts 242 | *.lnk 243 | 244 | # Build folder 245 | 246 | */build/* 247 | 248 | # End of https://www.gitignore.io/api/osx,linux,python,windows,pycharm,visualstudiocode 249 | 250 | *.toml 251 | 252 | *.env 253 | media-exchange-base/test/template.yaml 254 | .classpath 255 | .gradle 256 | .project 257 | .vscode 258 | .factorypath 259 | .settings 260 | build.gradle 261 | checkstyle.* 262 | gradle/* 263 | 264 | .envrc 265 | 266 | **/global-s3-assets 267 | **/regional-s3-assets 268 | **/open-source 269 | 270 | cleanup.txt 271 | 272 | # lambda assets 273 | asset*/ 274 | 275 | # coverage 276 | coverage-reports 277 | 278 | # optional deployments 279 | **/autoingest/deployment 280 | **/fixity/deployment 281 | **/mediasync/deployment 282 | 283 | -------------------------------------------------------------------------------- /source/cdk/lib/autoingest/lambda/autoingest_driver/app.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: Apache-2.0 3 | 4 | import os 5 | import logging 6 | import boto3 7 | import jsonpickle 8 | from botocore.exceptions import ClientError 9 | from time import sleep 10 | import urllib 11 | from random import randint 12 | from botocore import config 13 | 14 | logger = logging.getLogger() 15 | logger.setLevel(os.environ['LogLevel']) 16 | 17 | solution_identifier= os.environ['SOLUTION_IDENTIFIER'] 18 | 19 | user_agent_extra_param = {"user_agent_extra":solution_identifier} 20 | 21 | presetConfig = config.Config() 22 | if os.environ['SendAnonymizedMetric'] == 'Yes': 23 | presetConfig = config.Config(**user_agent_extra_param) 24 | 25 | s3client = boto3.client('s3', config=presetConfig) 26 | 27 | 28 | def match_bucket_name(source_bucket): 29 | if (source_bucket != os.environ['SOURCE_BUCKET_NAME']): 30 | raise ClientError({ 31 | 'Error': { 32 | 'Code': '400', 33 | 'Message': 'source bucket name does not match the environment variable SOURCE_BUCKET_NAME\r' + source_bucket + ' != ' + os.environ['SOURCE_BUCKET_NAME'] 34 | }, 35 | 'ResponseMetadata': {} 36 | }, 'match_bucket_name') 37 | 38 | 39 | def check_object(source_bucket,source_key): 40 | 41 | pre_flight_response = s3client.head_object( 42 | Bucket=source_bucket, 43 | Key=source_key 44 | ) 45 | logger.debug('## PREFLIGHT_RESPONSE\r' + jsonpickle.encode(dict(**pre_flight_response))) 46 | 47 | size = pre_flight_response['ContentLength'] 48 | #1 TB 49 | if (size > 1099511627776): 50 | logger.warn("the object size is " + size + ". The lambda function may timeout.") 51 | 52 | def copy_object(source_bucket, source_key, source_version, destination_bucket, prefix): 53 | 54 | s3client.copy(CopySource={'Bucket': source_bucket,'Key': source_key, 'VersionId': source_version}, Bucket=destination_bucket, Key='{}/{}'.format(prefix,source_key)) 55 | 56 | 57 | def lambda_handler(event, _): 58 | 59 | logger.debug('## EVENT\r' + jsonpickle.encode(dict(**event))) 60 | 61 | # there is never more than one record in the payload! 62 | try: 63 | records = event['Records'] 64 | if not records: 65 | raise ClientError({ 66 | 'Error': { 67 | 'Code': '400', 68 | 'Message': 'no records found in EVENT\r' + jsonpickle.encode(dict(**event)) 69 | }, 70 | 'ResponseMetadata': {} 71 | }) 72 | 73 | record = records[0] 74 | 75 | message = jsonpickle.decode(jsonpickle.decode(record['body'])['Message']) 76 | 77 | logger.info('## MESSAGE\r' + jsonpickle.encode(dict(**message))) 78 | 79 | source_bucket = message['bucket']['name'] 80 | source_key = urllib.parse.unquote_plus(message['object']['key']) 81 | source_version = message['object']['version-id'] 82 | 83 | result_code = '0' 84 | result_string = 'Successfully copied' 85 | 86 | 87 | 88 | match_bucket_name(source_bucket) 89 | 90 | 91 | if (message['reason'] == 'PutObject' or message['reason'] == 'CopyObject' or message['reason'] == 'CompleteMultipartUpload'): 92 | check_object(source_bucket, source_key) 93 | copy_object(source_bucket, source_key, source_version, os.environ['DESTINATION_BUCKET_NAME'], os.environ['DESTINATION_PREFIX']) 94 | return {'ResultCode': result_code, 'ResultString': result_string} 95 | else: 96 | result_code = '-1' 97 | result_string = 'did not process ' + message['reason'] + ' event' 98 | 99 | except ClientError as e: 100 | # If request timed out, mark as a temp failure 101 | # and S3 Batch Operations will make the task for retry. If 102 | # any other exceptions are received, mark as permanent failure. 103 | error_code = e.response['Error']['Code'] 104 | error_message = e.response['Error']['Message'] 105 | 106 | logger.debug(error_message) 107 | 108 | if error_code == 'TooManyRequestsException': 109 | result_code = 'TemporaryFailure' 110 | result_string = 'Retry request to batch due to throttling.' 111 | elif error_code == 'RequestTimeout': 112 | result_code = 'TemporaryFailure' 113 | result_string = 'Retry request to Amazon S3 due to timeout.' 114 | elif (error_code == '304'): 115 | result_code = 'Succeeded' 116 | result_string = 'Not modified' 117 | elif (error_code == '400'): 118 | result_code = 'Succeeded' 119 | result_string = error_message 120 | elif (error_code == 'SlowDown'): 121 | result_code = 'TemporaryFailure' 122 | result_string = 'Retry request to s3 due to throttling.' 123 | else: 124 | result_code = 'PermanentFailure' 125 | result_string = '{}: {}'.format(error_code, error_message) 126 | 127 | if (result_code == 'TemporaryFailure'): 128 | #cooloff anytime between 1-10s. SQS does not support exponential backoff based retry 129 | logger.info("cooloff..") 130 | sleep(randint(1,10)) # NOSONAR 131 | #retry 132 | raise 133 | 134 | except Exception as e: 135 | # Catch all exceptions to permanently fail the task 136 | result_code = 'PermanentFailure' 137 | result_string = 'Exception: {}'.format(e) 138 | #absorb the error 139 | 140 | finally: 141 | logger.info(result_code + " # " + result_string + " # " + source_key) 142 | -------------------------------------------------------------------------------- /deployment/cdk-solution-helper/README.md: -------------------------------------------------------------------------------- 1 | # cdk-solution-helper 2 | 3 | A lightweight helper function that cleans-up synthesized templates from the AWS Cloud Development Kit (CDK) and prepares 4 | them for use with the AWS Solutions publishing pipeline. This function performs the following tasks: 5 | 6 | #### Lambda function preparation 7 | 8 | Replaces the AssetParameter-style properties that identify source code for Lambda functions with the common variables 9 | used by the AWS Solutions publishing pipeline. 10 | 11 | - `Code.S3Bucket` is assigned the `__BUCKET_NAME__` placeholder value. 12 | - `Code.S3Key` is assigned the `__SOLUTION_NAME__`/`__VERSION__` placeholder value. 13 | - `Handler` is given a prefix identical to the artifact hash, enabling the Lambda function to properly find the handler in the extracted source code package. 14 | 15 | These placeholders are then replaced with the appropriate values using the default find/replace operation run by the pipeline. 16 | 17 | Before: 18 | 19 | ``` 20 | "examplefunction67F55935": { 21 | "Type": "AWS::Lambda::Function", 22 | "Properties": { 23 | "Code": { 24 | "S3Bucket": { 25 | "Ref": "AssetParametersd513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7S3Bucket54E71A95" 26 | }, 27 | "S3Key": { 28 | "Fn::Join": [ 29 | "", 30 | [ 31 | { 32 | "Fn::Select": [ 33 | 0, 34 | { 35 | "Fn::Split": [ 36 | "||", 37 | { 38 | "Ref": "AssetParametersd513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7S3VersionKeyC789D8B1" 39 | } 40 | ] 41 | } 42 | ] 43 | }, 44 | { 45 | "Fn::Select": [ 46 | 1, 47 | { 48 | "Fn::Split": [ 49 | "||", 50 | { 51 | "Ref": "AssetParametersd513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7S3VersionKeyC789D8B1" 52 | } 53 | ] 54 | } 55 | ] 56 | } 57 | ] 58 | ] 59 | } 60 | }, ... 61 | Handler: "index.handler", ... 62 | ``` 63 | 64 | After helper function run: 65 | 66 | ``` 67 | "examplefunction67F55935": { 68 | "Type": "AWS::Lambda::Function", 69 | "Properties": { 70 | "Code": { 71 | "S3Bucket": "%%BUCKET_NAME%%", 72 | "S3Key": "%%SOLUTION_NAME%%/%%VERSION%%/assetd513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7.zip" 73 | }, ... 74 | "Handler": "assetd513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7/index.handler" 75 | ``` 76 | 77 | After build script run: 78 | 79 | ``` 80 | "examplefunction67F55935": { 81 | "Type": "AWS::Lambda::Function", 82 | "Properties": { 83 | "Code": { 84 | "S3Bucket": "solutions", 85 | "S3Key": "trademarked-solution-name/v1.0.0/asset.d513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7.zip" 86 | }, ... 87 | "Handler": "assetd513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7/index.handler" 88 | ``` 89 | 90 | After CloudFormation deployment: 91 | 92 | ``` 93 | "examplefunction67F55935": { 94 | "Type": "AWS::Lambda::Function", 95 | "Properties": { 96 | "Code": { 97 | "S3Bucket": "solutions-us-east-1", 98 | "S3Key": "trademarked-solution-name/v1.0.0/asset.d513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7.zip" 99 | }, ... 100 | "Handler": "assetd513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7/index.handler" 101 | ``` 102 | 103 | #### Template cleanup 104 | 105 | Cleans-up the parameters section and improves readability by removing the AssetParameter-style fields that would have 106 | been used to specify Lambda source code properties. This allows solution-specific parameters to be highlighted and 107 | removes unnecessary clutter. 108 | 109 | Before: 110 | 111 | ``` 112 | "Parameters": { 113 | "AssetParametersd513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7S3Bucket54E71A95": { 114 | "Type": "String", 115 | "Description": "S3 bucket for asset \"d513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7\"" 116 | }, 117 | "AssetParametersd513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7S3VersionKeyC789D8B1": { 118 | "Type": "String", 119 | "Description": "S3 key for asset version \"d513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7\"" 120 | }, 121 | "AssetParametersd513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7ArtifactHash7AA751FE": { 122 | "Type": "String", 123 | "Description": "Artifact hash for asset \"d513e93e266931de36e1c7e79c27b196f84ab928fce63d364d9152ca501551f7\"" 124 | }, 125 | "CorsEnabled" : { 126 | "Description" : "Would you like to enable Cross-Origin Resource Sharing (CORS) for the image handler API? Select 'Yes' if so.", 127 | "Default" : "No", 128 | "Type" : "String", 129 | "AllowedValues" : [ "Yes", "No" ] 130 | }, 131 | "CorsOrigin" : { 132 | "Description" : "If you selected 'Yes' above, please specify an origin value here. A wildcard (*) value will support any origin.", 133 | "Default" : "*", 134 | "Type" : "String" 135 | } 136 | } 137 | ``` 138 | 139 | After: 140 | 141 | ``` 142 | "Parameters": { 143 | "CorsEnabled" : { 144 | "Description" : "Would you like to enable Cross-Origin Resource Sharing (CORS) for the image handler API? Select 'Yes' if so.", 145 | "Default" : "No", 146 | "Type" : "String", 147 | "AllowedValues" : [ "Yes", "No" ] 148 | }, 149 | "CorsOrigin" : { 150 | "Description" : "If you selected 'Yes' above, please specify an origin value here. A wildcard (*) value will support any origin.", 151 | "Default" : "*", 152 | "Type" : "String" 153 | } 154 | } 155 | 156 | ``` 157 | 158 | --- 159 | 160 | © Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. 161 | -------------------------------------------------------------------------------- /source/cdk/lib/autoingest/lambda/test_autoingest.py: -------------------------------------------------------------------------------- 1 | ####################################################################################################################### 2 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # 3 | # # 4 | # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance # 5 | # with the License. A copy of the License is located at # 6 | # # 7 | # http://www.apache.org/licenses/LICENSE-2.0 # 8 | # # 9 | # or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES # 10 | # OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions # 11 | # and limitations under the License. # 12 | ####################################################################################################################### 13 | import json 14 | import os 15 | import unittest 16 | import boto3 17 | import mock 18 | from moto import mock_s3 19 | from botocore.exceptions import ClientError 20 | 21 | S3_BUCKET_NAME = 'exchangebucket' 22 | DESTINATION_S3_BUCKET_NAME = 'actualtestbucketname' 23 | DEFAULT_REGION = 'us-east-1' 24 | S3_TEST_FILE_KEY = 'test_app3.json' 25 | S3_TEST_FILE_CONTENT = [ 26 | {"company": "amazon", "price": 15}, 27 | {"company": "test", "price": 25} 28 | ] 29 | 30 | @mock_s3 31 | @mock.patch.dict(os.environ, {'SOURCE_BUCKET_NAME': S3_BUCKET_NAME,'SOLUTION_IDENTIFIER': 'SO0133', 'LogLevel': 'INFO', 'DESTINATION_BUCKET_NAME': DESTINATION_S3_BUCKET_NAME, 'DESTINATION_PREFIX': 'ingest', 'SendAnonymizedMetric': 'No'}) 32 | class TestAutoIngestLambdaFunction(unittest.TestCase): 33 | def setUp(self): 34 | # S3 setup 35 | self.s3 = boto3.resource('s3', region_name=DEFAULT_REGION) 36 | self.s3_bucket = self.s3.create_bucket(Bucket=S3_BUCKET_NAME) # Fake Media Exchange Bucket 37 | self.s3.BucketVersioning(S3_BUCKET_NAME).enable() 38 | self.destination_s3_bucket = self.s3.create_bucket(Bucket=DESTINATION_S3_BUCKET_NAME) # Fake ingest bucket 39 | self.s3_bucket.put_object(Key=S3_TEST_FILE_KEY, 40 | Body=json.dumps(S3_TEST_FILE_CONTENT)) # Emulate file in ME bucket 41 | self.S3_TEST_FILE_VERSION = self.s3.Bucket(S3_BUCKET_NAME).Object(S3_TEST_FILE_KEY).version_id # Save file version 42 | 43 | def test_match_bucket_name_success(self): 44 | from autoingest_driver.app import match_bucket_name 45 | file_content = match_bucket_name(S3_BUCKET_NAME) 46 | self.assertIsNone(file_content) 47 | 48 | def test_match_bucket_name_error(self): 49 | from autoingest_driver.app import match_bucket_name 50 | self.assertRaises(ClientError, match_bucket_name, DEFAULT_REGION) 51 | 52 | def test_check_object_success(self): 53 | from autoingest_driver.app import check_object 54 | file_content = check_object(S3_BUCKET_NAME, S3_TEST_FILE_KEY) 55 | self.assertIsNone(file_content) 56 | 57 | def test_check_object_error(self): 58 | from autoingest_driver.app import check_object 59 | self.assertRaises(ClientError, check_object, S3_BUCKET_NAME, DEFAULT_REGION) 60 | 61 | def test_copy_object_success(self): 62 | from autoingest_driver.app import copy_object 63 | file_content = copy_object(S3_BUCKET_NAME, S3_TEST_FILE_KEY, self.S3_TEST_FILE_VERSION, DESTINATION_S3_BUCKET_NAME, 'ingest') 64 | self.assertIsNone(file_content) 65 | 66 | def test_copy_object_error(self): 67 | from autoingest_driver.app import copy_object 68 | self.assertRaises(Exception, copy_object, S3_TEST_FILE_KEY, S3_BUCKET_NAME, self.S3_TEST_FILE_VERSION, DESTINATION_S3_BUCKET_NAME, 'ingest') 69 | 70 | def test_handler_success(self): 71 | from autoingest_driver.app import lambda_handler 72 | event = { 73 | "Records": [ 74 | { 75 | "messageId": "aa3b554c-f909-4453-a846-da9f90f11c24", 76 | "receiptHandle": "AQEBH1Dm1PA4UtuL0uPas5m0rJdmPSPv3ulb7Q+vda4ZCJNvgdo5vDARZUByMxASaWX+MWQU9sEIxqgQXCc2wz7splIQ542h7dZF3FDlWTlMbaVB4a8litsFVy28PrBuWBxwJYLdIkEul+lvVDdl1ht4h4YHQlb61oG5AhTB0+6AByoQWf2RKB/tIRO+iTAc2Pm0Fk/aC/LE6r2LkeTVFTOK6NymWB+beEFdhsSCEyAsiODy7tOceQlzurwroqUeU+WYLoCwQLypiaokZ1OiXNTRyhrlEiJem4cRV28f2i7F68A4b6okRo8xubsRjpDTx4Y8hlpj5wJS7TDihTIBalVOHiP6LvhfFS850xus6AoVB0b8kMbZxxbbEjFCqtPLa7D90vEVbPQBFcEo+Rb7lg2aVziAABhsxJK53IVeIel8OrnBtzEo4Dzfk/S7LhJWndVH", 77 | "body": "{\n \"Type\" : \"Notification\",\n \"MessageId\" : \"fe39e72c-90d8-567b-bae3-bbc575fdbb02\",\n \"TopicArn\" : \"test-string\",\n \"Message\" : \"{\\\"version\\\":\\\"0\\\",\\\"bucket\\\":{\\\"name\\\":\\\"exchangebucket\\\"},\\\"object\\\":{\\\"key\\\":\\\"test_app3.json\\\",\\\"size\\\":3248,\\\"etag\\\":\\\"cdce56146d5c79a7bb8e35b89d73a304\\\",\\\"version-id\\\":\\\"%s\\\",\\\"sequencer\\\":\\\"00646FE2E3E5AA55B7\\\"},\\\"request-id\\\":\\\"WH6BBZSWBQJ64B8C\\\",\\\"requester\\\":\\\"test-requester\\\",\\\"source-ip-address\\\":\\\"test\\\",\\\"reason\\\":\\\"PutObject\\\"}\",\n \"Timestamp\" : \"2023-05-25T22:36:21.738Z\",\n \"SignatureVersion\" : \"1\"\n}" % self.S3_TEST_FILE_VERSION, 78 | "attributes": { 79 | "ApproximateReceiveCount": "1", 80 | "SentTimestamp": "1685054181791", 81 | "SenderId": "AIDAIT2UOQQY3AUEKVGXU", 82 | "ApproximateFirstReceiveTimestamp": "1685054181801" 83 | }, 84 | "messageAttributes": {}, 85 | "md5OfBody": "056d8e151714a82293532cd8a15c5e77", 86 | "eventSource": "aws:sqs", 87 | "awsRegion": "us-east-1" 88 | } 89 | ] 90 | } 91 | result = lambda_handler(event, {}) 92 | self.assertEqual(result, {'ResultCode': '0', 'ResultString': 'Successfully copied'}) 93 | 94 | -------------------------------------------------------------------------------- /source/cdk/lib/fixity/README.md: -------------------------------------------------------------------------------- 1 | # Fixity 2 | 3 | ## Table of contents 4 | 5 | - [Solution Overview](#solution-overview) 6 | - [Architecture Diagram](#architecture-diagram) 7 | - [Customizing the Solution](#customizing-the-solution) 8 | - [Prerequisites for Customization](#prerequisites-for-customization) 9 | - [Deploy](#deploy) 10 | - [Cleanup](#cleanup) 11 | - [Usage](#usage) 12 | - [Performance](#performance) 13 | - [Cost](#cost) 14 | - [License](#license) 15 | 16 | 17 | 18 | # Solution Overview 19 | 20 | This optional utility computes checksums at scale by publishers (at source) or by subscribers (at destination) to ensure file integrity. It uses AWS Batch and Amazon Elastic Compute Cloud (Amazon EC2) Spot Instances to orchestrate the computation infrastructure. It calculates checksums by streaming the objects directly from Amazon S3, so that there is no requirement of local instance storage. 21 | 22 | 23 | 24 | # Architecture Diagram 25 | 26 | ![Architecture](images/fixity.jpeg) 27 | 28 | This utility reads the objects from S3 and computes md5, sha1, xxhash and xx64hash. The resultant checksums are stored as Tags on the source S3 objects. This process is executed in containers managed by AWS Batch. 29 | 30 | It offers two ways to initiate the checksums. 31 | 32 | In the first method, it uses S3 batch operations as frontend. S3 Batch operations works with a CSV formatted inventory list file. You can use S3 [inventory reports](https://docs.aws.amazon.com/AmazonS3/latest/userguide/storage-inventory.html) if you already have one. Otherwise, you can generate an inventory list by utilizing the included scripts/generate_inventory.sh script. Please note that the script works for hundreds of files. If you have thousands of objects in the bucket, inventory reports are the way to go. S3 Batch Jobs invoke a lambda function that performs a few basic checks before handing off the actual fixity operation to a script. This script runs in containers in AWS Batch and Fargate for files smaller than 10GB. If the files are larger than 10GB it runs on Ec2 SPOT which opens up use of high performance and larger instance types. It produces the following checksums, as store them as custom tags with the s3 objects. 33 | 34 | - md5sum 35 | - sha1sum 36 | - xxhsum 37 | 38 | This process works well if you have lots of objects that needs checksumming. 39 | 40 | There is also an API that can be used to invoke the checksumming process one object at a time. The API takes a bucketname and key as parameters. It uses the same underlying AWS Batch infrastructure to compute the checksums. 41 | 42 | 43 | 44 | ## Customizing the Solution 45 | 46 | 47 | 48 | ### Prerequisites for Customization 49 | 50 | - GNU make 51 | - Docker desktop 52 | - Node 53 | - [AWS CDK] (https://docs.aws.amazon.com/cdk/v2/guide/getting_started.html) 54 | - [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html) 55 | - [AWS SAM CLI](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html) 56 | 57 | 58 | 59 | ### Deploy 60 | 61 | - Initialize a shell with AWS credentials to deploy to target (subscriber) account. You can do this by adding AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN as environment variables or by selecting the appropriate profile by adding AWS_PROFILE environment variable. 62 | - Run the build script in the deployment folder of the root directory to generate template files for this solution (or use existing ones in development directory of this function) 63 | - At the command prompt type `make install`. 64 | - follow the on-screen instructions for configuration parameters. 65 | 66 | 67 | 68 | ### Cleanup 69 | 70 | - Initialize a shell with AWS credentials to deploy to target (subscriber) account. You can do this by adding AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN as environment variables or by selecting the appropriate profile by adding AWS_PROFILE environment variable. 71 | - Navigate to MediaExchnageOnAWS/tools/fixity directory. 72 | - At the command prompt type `make outputs`. And make a note of the value of _FlowLogBucketName_. 73 | - At the command prompt type `make clean`. 74 | - This process leaves a VPC Flow Log Bucket. This bucket needs to be cleaned up manually. You noted _FlowLogBucketName_ in the first step. 75 | - Run the following command to remove the bucket and its contents. 76 | ``` 77 | $ aws s3 rm s3:// --recursive 78 | $ aws s3 rb s3:// 79 | ``` 80 | 81 | 82 | 83 | ## Usage 84 | 85 | 1. Sign in to your AWS account and navigate to S3. 86 | 1. On the left menu, select batch operations. 87 | 1. Choose Create Job: 88 | 89 | - Select the region where you have installed the fixity. 90 | - For the manifest, select CSV or S3 inventory report based on what you prepared. 91 | - Choose Next. 92 | - Select Invoke AWS lambda function. 93 | - In the section below, select Choose from functions in your account and select the lambda function ending with fixity. 94 | - Choose Next. 95 | - Under Additional options, enter an appropriate description. 96 | - For the completion report, select Failed tasks only and select a destination s3 bucket. 97 | - Under the permissions section, select choose from existing IAM roles, and select the IAM role ending in fixity_role in the same region. 98 | - Choose Next. 99 | - Review the Job in the last page and choose Create job. 100 | 101 | 1. Once the Job is created, it goes from new to awaiting user confirmation state. Choose Run job when ready. 102 | 1. The S3 Batch job invokes the lambda function that drops copy jobs into an ECS batch job queue. Tasks from this queue are executed in FARGATE. 103 | 104 | 105 | 106 | ## Performance 107 | 108 | Expected performance per object: 109 | 110 | - 16 seconds for 1 GB 111 | - 23 seconds for 5 GB 112 | - 43 seconds for 10 GB 113 | - 2 minutes for 50 GB 114 | - 4 minutes for 100 GB 115 | - 20 minutes for 500 GB 116 | - 39 minutes for 1 TB 117 | 118 | Out of the box, it can run 256 checksums in parallel. 119 | 120 | 121 | 122 | ## Cost 123 | 124 | 1. S3 API pricing for GET / PUT. See [here](https://aws.amazon.com/s3/pricing/). 125 | 1. S3 Batch pricing See [here](https://aws.amazon.com/s3/pricing/) 126 | 1. There is no cost for egress in the same region. 127 | 1. There is no additional charge for AWS Batch. 128 | 1. AWS Lambda pricing. See [here](https://aws.amazon.com/lambda/pricing/) 129 | 1. AWS Fargate SPOT pricing. See [here](https://aws.amazon.com/fargate/pricing/) 130 | 131 | 132 | 133 | # License 134 | 135 | See license [here](https://github.com/aws-solutions/media-exchange-on-aws/blob/main/LICENSE.txt) 136 | -------------------------------------------------------------------------------- /source/cdk/lib/mediasync/README.md: -------------------------------------------------------------------------------- 1 | # MediaSync 2 | 3 | ## Table of contents 4 | 5 | - [Solution Overview](#solution-overview) 6 | - [Architecture Diagram](#architecture-diagram) 7 | - [Customizing the Solution](#customizing-the-solution) 8 | - [Prerequisites for Customization](#prerequisites-for-customization) 9 | - [Deploy](#deploy) 10 | - [Cleanup](#cleanup) 11 | - [Usage](#usage) 12 | - [Performance](#performance) 13 | - [Cost](#cost) 14 | - [License](#license) 15 | 16 | 17 | 18 | # Solution Overview 19 | 20 | This optional utility moves assets between Amazon S3 buckets. When you deploy this, it enables a new toolset in the AWS Management Console that helps move large (100s of GBs) files or hundreds of thousands of small files. The MediaSync utility scales up by running the copy operation in parallel to thousands of concurrent processes. It can handle file sizes up to 5 TB, is resilient, and cost effective. The uses AWS Fargate Spot for its compute environment. 21 | 22 | 23 | 24 | # Architecture Diagram 25 | 26 | ![Architecture](images/mediasync.jpeg) 27 | 28 | MediaSync uses S3 batch operations. S3 Batch operations works with a CSV formatted inventory list file. You can use S3 inventory reports if you already have one. Otherwise, you can generate an inventory list by utilizing the included scripts/generate_inventory.sh script. Please note that the script works best if there are less than one hundred thousand objects in the source bucket. If you have more objects in the bucket, inventory reports are the way to go. 29 | 30 | S3 Batch Jobs invoke an AWS Lambda function that performs a few basic checks before handing off the actual copy operation to a script. This script runs in containers in AWS Batch and AWS Fargate. The copy operation itself uses S3 server-side copy, so the containers themselves do not handle the actual bytes. If the object is small (<500MB) the copy happens in Lambda. 31 | 32 | 33 | 34 | ## Customizing the Solution 35 | 36 | 37 | 38 | ### Prerequisites for Customization 39 | 40 | - GNU make 41 | - Docker desktop 42 | - Node 43 | - [AWS CDK] (https://docs.aws.amazon.com/cdk/v2/guide/getting_started.html) 44 | - [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-install.html) 45 | - [AWS SAM CLI](https://docs.aws.amazon.com/serverless-application-model/latest/developerguide/serverless-sam-cli-install.html) 46 | 47 | 48 | 49 | ### Deploy 50 | 51 | - Initialize a shell with the necessary credentials to deploy to target (publisher / subscriber) account. You can do this by adding AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN as environment variables or by selecting the appropriate profile by adding AWS_PROFILE environment variable. 52 | - Run the build script in the deployment folder of the root directory to generate template files for this solution (or use existing ones in development directory of this function) 53 | - (optional) Build and publish custom container 54 | 55 | - At the command prompt type `make publish`. This publishes the custom container to a private ECR repository. 56 | - Follow the on-screen instructions for configuration parameters. 57 | 58 | - Deploy MediaSync 59 | - At the command prompt type `make install`. 60 | - Follow the on-screen instructions for configuration parameters. 61 | - If you have built a custom image in the previous step, specify that in the ImageName parameter. Otherwise leave default as amazon/aws-cli. 62 | - Specify the destination bucket name. 63 | 64 | 65 | 66 | ### Cleanup 67 | 68 | - Initialize a shell with the necessary credentials to the account where you have deployed this. You can do this by adding AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY and AWS_SESSION_TOKEN as environment variables or by selecting the appropriate profile by adding AWS_PROFILE environment variable. 69 | 70 | - Navigate to MediaExchangeOnAWS/tools/mediasync directory. 71 | - Remove MediaSync 72 | - Navigate to MediaExchnageOnAWS/tools/mediasync directory. 73 | - At the command prompt type `make outputs`. And make a note of the value of _FlowLogBucketName_. 74 | - At the command prompt type `make clean`. 75 | - This process leaves a VPC Flow Log Bucket. This bucket needs to be cleaned up manually. You noted _FlowLogBucketName_ in the first step. 76 | - Run the following command to remove the bucket and its contents. 77 | ``` 78 | $ aws s3 rm s3:// --recursive 79 | $ aws s3 rb s3:// 80 | ``` 81 | - (Optional) Remove custom container images, if you choose to use them. 82 | - At the command prompt type `make clean-repository mediasync-repository-delete-stack`. 83 | 84 | 85 | 86 | ## Usage 87 | 88 | 1. Sign in to AWS account and navigate to S3. 89 | 1. On the left menu, select Batch operations. 90 | 1. Choose Create Job: 91 | 92 | - Select the Region where you have installed the MediaSync. 93 | - For the manifest, select CSV or S3 inventory report based on what you prepared. 94 | - Choose Next. 95 | - Select Invoke AWS Lambda function. 96 | - In the section below, select Choose from functions in your account and select the Lambda function starting with MediaSync. 97 | - Choose Next. 98 | - Under Additional options, enter an appropriate description. 99 | - For the completion report, select Failed tasks only and select a destination S3 bucket. 100 | - Under Permissions, select Choose from existing IAM roles, and select the IAM role starting with MediaSync in the same Region. 101 | - Choose Next 102 | - Review the Job in the last page and choose Create job. 103 | 104 | 1. Once the Job is created, it goes from new to awaiting user confirmation state. Choose Run job when ready. 105 | 1. The S3 Batch job invokes the Lambda function that drops copy jobs into an ECS batch job queue. Tasks from this queue are executed in FARGATE. 106 | 107 | There is a helper script available in scripts/run_copy_job.sh that automates all of these steps. The script takes inventory bucket name and key as inputs. 108 | 109 | 110 | 111 | ## Performance 112 | 113 | Single object performance is 114 | 115 | - 24 seconds for 1 GB 116 | - 32 seconds for 5 GB 117 | - 40 seconds for 10 GB 118 | - 2 minute for 100 GB 119 | - 7 minutes 30 seconds for 500 GB 120 | - 11 minutes for 1TB 121 | - 27 minutes for 5TB 122 | 123 | It runs many of these transfers in parallel. It takes about three hours to copy 1PB of assets between two buckets in the same region. 124 | 125 | 126 | 127 | ## Cost 128 | 129 | 1. S3 API pricing for GET / PUT. See [here](https://aws.amazon.com/s3/pricing/). 130 | 1. S3 Batch pricing See [here](https://aws.amazon.com/s3/pricing/) 131 | 1. There is no cost for egress in the same Region. 132 | 1. There is no additional charge for AWS Batch. 133 | 1. AWS Lambda pricing. See [here](https://aws.amazon.com/lambda/pricing/) 134 | 1. AWS Fargate SPOT pricing. See [here](https://aws.amazon.com/fargate/pricing/) 135 | 136 | 137 | 138 | # License 139 | 140 | See license [here](https://github.com/aws-solutions/media-exchange-on-aws/blob/main/LICENSE.txt) 141 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | all: help 2 | help: 3 | @echo 'deploys Media Exchange cloudformation templates' 4 | 5 | SOLUTION_NAME = "media-exchange-on-aws" 6 | VERSION ?= 1.1.0 7 | 8 | GUIDED ?= --guided 9 | ENV ?= dev 10 | CURRENT_DIR := $(dir $(abspath $(lastword $(MAKEFILE_LIST)))) 11 | STACKPREFIX=mediaexchange 12 | 13 | 14 | ACCOUNT_ID := $(shell aws sts get-caller-identity --query Account --output text) 15 | PARAMETER_OVERRIDES := Environment=$(ENV) 16 | AWS_REGION ?= $(shell aws configure get region --output text) 17 | 18 | 19 | %-stack-build: deployment/%.yaml configure 20 | @echo "Building template..." 21 | @sam build -s $(CURRENT_DIR) -b $(CURRENT_DIR)/build/$*/ --template $(CURRENT_DIR)/$< --use-container 22 | 23 | %-stack-deploy: %-stack-build 24 | @echo "deploying cloudformation template" 25 | sam deploy -t $(CURRENT_DIR)/build/$*/template.yaml --stack-name $(STACKPREFIX)-$*-$(ENV) --no-confirm-changeset --no-fail-on-empty-changeset --capabilities CAPABILITY_IAM CAPABILITY_NAMED_IAM --resolve-s3 --parameter-overrides $(PARAMETER_OVERRIDES) --config-env $* $(GUIDED) --region $(AWS_REGION) 26 | 27 | %-stack-delete: 28 | @echo "deleting cloudformation stack" 29 | aws cloudformation delete-stack --stack-name $(STACKPREFIX)-$*-$(ENV) 30 | aws cloudformation wait stack-delete-complete --stack-name $(STACKPREFIX)-$*-$(ENV) 31 | 32 | quickstart: publisher-stack-deploy subscriber-stack-deploy agreement-stack-deploy 33 | quickclean: agreement-stack-delete publisher-stack-delete subscriber-stack-delete 34 | 35 | ##Testing 36 | TEST_ACCOUNT_ID ?= $(ACCOUNT_ID) 37 | 38 | ifeq ($(PUBLISHER_ACCOUNT_ID), $(ACCOUNT_ID)) 39 | PUBLISHER_ROLE ?= arn:aws:iam::$(ACCOUNT_ID):role/publisher-role 40 | endif 41 | 42 | ifeq ($(SUBSCRIBER_ACCOUNT_ID), $(ACCOUNT_ID)) 43 | SUBSCRIBER_ROLE ?= arn:aws:iam::$(ACCOUNT_ID):role/subscriber-role 44 | endif 45 | 46 | testrole-stack-deploy: 47 | sam deploy -t $(CURRENT_DIR)/tests/deployment/testrole.yaml --stack-name $(STACKPREFIX)-testrole-$(ENV) --no-confirm-changeset --no-fail-on-empty-changeset --capabilities CAPABILITY_IAM CAPABILITY_NAMED_IAM --resolve-s3 --parameter-overrides $(PARAMETER_OVERRIDES) --config-env $* $(GUIDED) --region $(AWS_REGION) 48 | 49 | testrole-stack-deploy: PARAMETER_OVERRIDES += TestAccountId=$(TEST_ACCOUNT_ID) 50 | 51 | 52 | ################ 53 | TEMPLATE_OUTPUT_BUCKET ?= $(STACKPREFIX)-cftemplates-$(AWS_REGION)-$(ACCOUNT_ID) 54 | 55 | EXT_VERSION := $(VERSION)-$(shell date +"%s") 56 | 57 | configure: 58 | @aws s3api head-bucket --bucket $(TEMPLATE_OUTPUT_BUCKET) || aws s3 mb s3://$(TEMPLATE_OUTPUT_BUCKET) 59 | @cd $(CURRENT_DIR)/deployment/ && ./build-s3-dist.sh $(TEMPLATE_OUTPUT_BUCKET) $(SOLUTION_NAME) $(EXT_VERSION) 60 | 61 | @for product in publisher subscriber agreement; do \ 62 | aws s3 cp $(CURRENT_DIR)/deployment/global-s3-assets/$$product.template s3://$(TEMPLATE_OUTPUT_BUCKET)/$(SOLUTION_NAME)/$(EXT_VERSION)/$$product.template --no-progress --only-show-errors; \ 63 | done 64 | 65 | install: configure 66 | 67 | @sam deploy -t $(CURRENT_DIR)/deployment/global-s3-assets/media-exchange-on-aws.template $(GUIDED) --stack-name $(STACKPREFIX)-servicecatalog-stack-$(ENV) --no-confirm-changeset --no-fail-on-empty-changeset --capabilities CAPABILITY_IAM CAPABILITY_NAMED_IAM --parameter-overrides Environment=$(ENV) --config-env servicecatalog-stack --region $(AWS_REGION) 68 | 69 | @sam deploy -t $(CURRENT_DIR)/deployment/global-s3-assets/provision.template --stack-name $(STACKPREFIX)-selfprovision-$(ENV) --no-confirm-changeset --no-fail-on-empty-changeset --capabilities CAPABILITY_IAM CAPABILITY_NAMED_IAM --parameter-overrides Environment=$(ENV) PublisherAccountId=$(ACCOUNT_ID) PublisherName=self SubscriberAccountId=$(ACCOUNT_ID) SubscriberName=self --config-env selfprovision-stack --role-arn arn:aws:iam::$(ACCOUNT_ID):role/mediaexchange-$(AWS_REGION)-$(ENV)-cfn-deploy --region $(AWS_REGION) 70 | 71 | provision: 72 | 73 | @sam deploy -t $(CURRENT_DIR)/deployment/global-s3-assets/provision.template $(GUIDED) --stack-name $(STACKPREFIX)-provision-$(ENV) --no-confirm-changeset --no-fail-on-empty-changeset --capabilities CAPABILITY_IAM CAPABILITY_NAMED_IAM --parameter-overrides Environment=$(ENV) --config-env provision-stack --role-arn arn:aws:iam::$(ACCOUNT_ID):role/mediaexchange-$(AWS_REGION)-$(ENV)-cfn-deploy --region $(AWS_REGION) 74 | 75 | summarize: 76 | 77 | @echo AWS S3 Console URL 78 | @aws cloudformation describe-stacks --stack-name $(shell aws cloudformation describe-stacks --stack-name $(STACKPREFIX)-provision-$(ENV) --query "Stacks[0].Outputs[?OutputKey == 'AgreementStackArn'].OutputValue" --output text) --query "Stacks[0].Outputs[?OutputKey == 'ConsoleUrl'].OutputValue" --output text 79 | 80 | @echo Publisher onboarding summary 81 | @aws cloudformation describe-stacks --stack-name $(shell aws cloudformation describe-stacks --stack-name $(STACKPREFIX)-provision-$(ENV) --query "Stacks[0].Outputs[?OutputKey == 'AgreementStackArn'].OutputValue" --output text) --query "Stacks[0].Outputs[?OutputKey == 'PublisherOnboardingSummary'].OutputValue" --output text | sed 's/ /\n/g' 82 | 83 | @echo Subscriber onboarding summary 84 | @aws cloudformation describe-stacks --stack-name $(shell aws cloudformation describe-stacks --stack-name $(STACKPREFIX)-provision-$(ENV) --query "Stacks[0].Outputs[?OutputKey == 'AgreementStackArn'].OutputValue" --output text) --query "Stacks[0].Outputs[?OutputKey == 'SubscriberOnboardingSummary'].OutputValue" --output text | sed 's/ /\n/g' 85 | 86 | 87 | test: 88 | ifneq ($(PUBLISHER_ACCOUNT_ID), $(ACCOUNT_ID)) 89 | $(info ****ACTION**** please deploy cloudformation template :testrole.yaml: to create the test role in $(PUBLISHER_ACCOUNT_ID)) 90 | endif 91 | ifneq ($(SUBSCRIBER_ACCOUNT_ID), $(ACCOUNT_ID)) 92 | $(info ****ACTION**** please deploy cloudformation template :testrole.yaml: to create the test role in $(SUBSCRIBER_ACCOUNT_ID)) 93 | endif 94 | 95 | @echo saving subscriber onboarding info at tests/subscriber.env 96 | @aws cloudformation describe-stacks --stack-name $(shell aws cloudformation describe-stacks --stack-name $(STACKPREFIX)-provision-$(ENV) --query "Stacks[0].Outputs[?OutputKey == 'AgreementStackArn'].OutputValue" --output text) --query "Stacks[0].Outputs[?OutputKey == 'SubscriberOnboardingSummary'].OutputValue" --output text > $(CURRENT_DIR)/tests/subscriber.env 97 | 98 | @echo saving publihser onboarding info at tests/publisher.env 99 | @aws cloudformation describe-stacks --stack-name $(shell aws cloudformation describe-stacks --stack-name $(STACKPREFIX)-provision-$(ENV) --query "Stacks[0].Outputs[?OutputKey == 'AgreementStackArn'].OutputValue" --output text) --query "Stacks[0].Outputs[?OutputKey == 'PublisherOnboardingSummary'].OutputValue" --output text > $(CURRENT_DIR)/tests/publisher.env 100 | 101 | #TODO: containers? 102 | @cd $(CURRENT_DIR)/tests; VAR=value python3 -m pytest -s python/ 103 | 104 | 105 | testclean: 106 | - aws cloudformation delete-stack --stack-name $(STACKPREFIX)-provision-$(ENV) --region $(AWS_REGION) 107 | - aws cloudformation wait stack-delete-complete --stack-name $(STACKPREFIX)-provision-$(ENV) --region $(AWS_REGION) 108 | 109 | clean: testclean 110 | aws cloudformation delete-stack --stack-name $(STACKPREFIX)-selfprovision-$(ENV) --region $(AWS_REGION) 111 | aws cloudformation wait stack-delete-complete --stack-name $(STACKPREFIX)-selfprovision-$(ENV) --region $(AWS_REGION) 112 | 113 | aws cloudformation delete-stack --stack-name $(STACKPREFIX)-servicecatalog-stack-$(ENV) --region $(AWS_REGION) 114 | aws cloudformation wait stack-delete-complete --stack-name $(STACKPREFIX)-servicecatalog-stack-$(ENV) --region $(AWS_REGION) 115 | 116 | 117 | .PHONY: install provision test clean testclean quickstart quickclean 118 | -------------------------------------------------------------------------------- /deployment/build-s3-dist.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # This script will perform the following tasks: 4 | # 1. Remove any old dist files from previous runs. 5 | # 2. Install dependencies for the cdk-solution-helper; responsible for 6 | # converting standard 'cdk synth' output into solution assets. 7 | # 3. Build and synthesize your CDK project. 8 | # 4. Run the cdk-solution-helper on template outputs and organize 9 | # those outputs into the /global-s3-assets folder. 10 | # 5. Organize source code artifacts into the /regional-s3-assets folder. 11 | # 6. Remove any temporary files used for staging. 12 | # 13 | # This script should be run from the repo's deployment directory 14 | # cd deployment 15 | # ./build-s3-dist.sh dist-bucket-name source-bucket-base-name solution-name version-code 16 | # 17 | # Parameters: 18 | # - dist-bucket-base-name: Name for the S3 bucket location where the assets are 19 | # - source-bucket-base-name: Name for the S3 bucket location where the template will source the Lambda 20 | # code from. The template will append '-[region_name]' to this bucket name. 21 | # For example: ./build-s3-dist.sh solutions my-solution v1.0.0 22 | # The template will then expect the source code to be located in the solutions-[region_name] bucket 23 | # - solution-name: name of the solution for consistency 24 | # - version-code: version of the package 25 | [ "$DEBUG" == 'true' ] && set -x 26 | set -e 27 | 28 | # Check to see if input has been provided: 29 | if [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ] || [ -z "$4" ]; then 30 | echo "Please provide all required parameters for the build script" 31 | echo "For example: ./build-s3-dist.sh solutions solutions-reference trademarked-solution-name v1.0.0" 32 | exit 1 33 | fi 34 | 35 | asset_bucket_name="$1" 36 | bucket_name="$2" 37 | solution_name="$3" 38 | solution_version="$4" 39 | 40 | # Get reference for all important folders 41 | template_dir="$PWD" 42 | staging_dist_dir="$template_dir/staging" 43 | template_dist_dir="$template_dir/global-s3-assets" 44 | build_dist_dir="$template_dir/regional-s3-assets" 45 | source_dir="$template_dir/../source" 46 | tools_dir_autoingest="$template_dir/../source/cdk/lib/autoingest/deployment" 47 | tools_dir_mediasync="$template_dir/../source/cdk/lib/mediasync/deployment" 48 | tools_dir_fixity="$template_dir/../source/cdk/lib/fixity/deployment" 49 | 50 | echo "------------------------------------------------------------------------------" 51 | echo "[Init] Remove any old dist files from previous runs" 52 | echo "------------------------------------------------------------------------------" 53 | rm -rf $template_dist_dir 54 | mkdir -p $template_dist_dir 55 | 56 | rm -rf $build_dist_dir 57 | mkdir -p $build_dist_dir 58 | 59 | rm -rf $staging_dist_dir 60 | mkdir -p $staging_dist_dir 61 | 62 | 63 | echo "------------------------------------------------------------------------------" 64 | echo "[Synth] CDK Project" 65 | echo "------------------------------------------------------------------------------" 66 | cd $source_dir/cdk 67 | npm install 68 | npx aws-cdk synth --output=$staging_dist_dir 69 | if [ $? -ne 0 ] 70 | then 71 | echo "******************************************************************************" 72 | echo "cdk-nag found errors" 73 | echo "******************************************************************************" 74 | exit 1 75 | fi 76 | 77 | cd $staging_dist_dir 78 | rm tree.json manifest.json cdk.out 79 | 80 | echo "------------------------------------------------------------------------------" 81 | echo "Run Cdk Helper and update template placeholders" 82 | echo "------------------------------------------------------------------------------" 83 | 84 | mv MEStack.template.json $template_dist_dir/$solution_name.template 85 | mv SubscriberStack.template.json $template_dist_dir/subscriber.template 86 | mv PublisherStack.template.json $template_dist_dir/publisher.template 87 | mv ProvisionStack.template.json $template_dist_dir/provision.template 88 | mv AgreementStack.template.json $template_dist_dir/agreement.template 89 | 90 | # Get asset folder names dynamically 91 | fixityAsset=$(jq -r 'first(.Resources | to_entries[] | select(.key | startswith("DriverFunction"))) | .value | .Metadata | ."aws:asset:path"' FixityStack.template.json) 92 | MediaSyncAsset=$(jq -r 'first(.Resources | to_entries[] | select(.key | startswith("MediaSyncDriverFunction"))) | .value | .Metadata | ."aws:asset:path"' MediaSyncStack.template.json) 93 | AutoIngestAsset=$(jq -r 'first(.Resources | to_entries[] | select(.key | startswith("DriverFunction"))) | .value | .Metadata | ."aws:asset:path"' AutoIngestStack.template.json) 94 | 95 | cd $template_dir/../source/cdk/lib/autoingest 96 | mkdir -p deployment 97 | cd $template_dir/../source/cdk/lib/mediasync 98 | mkdir -p deployment 99 | cd $template_dir/../source/cdk/lib/fixity 100 | mkdir -p deployment 101 | 102 | cd $staging_dist_dir 103 | mv FixityRepositoryStack.template.json $tools_dir_fixity/fixity-repository.json 104 | mv FixityStack.template.json $tools_dir_fixity/fixity.json 105 | cp -R $fixityAsset $tools_dir_fixity/../ 106 | 107 | mv MediaSyncRepositoryStack.template.json $tools_dir_mediasync/mediasync-repository.json 108 | mv MediaSyncStack.template.json $tools_dir_mediasync/mediasync.json 109 | cp -R $MediaSyncAsset $tools_dir_mediasync/../ 110 | 111 | mv AutoIngestStack.template.json $tools_dir_autoingest/autoingest.json 112 | cp -R $AutoIngestAsset $tools_dir_autoingest/../ 113 | 114 | # Run the helper to clean-up the templates 115 | echo "Run the helper to clean-up the templates" 116 | echo "node $template_dir/cdk-solution-helper/index" 117 | node $template_dir/cdk-solution-helper/index \ 118 | || die "(cdk-solution-helper) ERROR: there is likely output above." $? 119 | 120 | for file in $template_dist_dir/*.template 121 | do 122 | replace="s/__ASSET_BUCKET_NAME__/$asset_bucket_name/g" 123 | sed -i.orig -e $replace $file 124 | 125 | replace="s/__BUCKET_NAME__/$bucket_name/g" 126 | sed -i.orig -e $replace $file 127 | 128 | replace="s/__SOLUTION_NAME__/$solution_name/g" 129 | sed -i.orig -e $replace $file 130 | 131 | replace="s/__VERSION__/$solution_version/g" 132 | sed -i.orig -e $replace $file 133 | done 134 | 135 | echo "------------------------------------------------------------------------------" 136 | echo "[Packing] Source code artifacts" 137 | echo "------------------------------------------------------------------------------" 138 | # ... For each asset.* source code artifact in the temporary /staging folder... 139 | cd $staging_dist_dir 140 | for d in `find . -mindepth 1 -maxdepth 1 -type d`; do 141 | # Rename the artifact, removing the period for handler compatibility 142 | pfname="$(basename -- $d)" 143 | # Skip optional assets for deployment 144 | if [ "$pfname" != *"$fixityAsset"* ] && [ "$pfname" != *"$MediaSyncAsset"* ] && [ "$pfname" != *"$AutoIngestAsset"* ];then 145 | fname="$(echo $pfname | sed -e 's/\.//g')" 146 | mv $d $fname 147 | 148 | # Zip artifacts from asset folder 149 | cd $fname 150 | rm -rf node_modules/ 151 | if [ -f "package.json" ] 152 | then 153 | npm install --production 154 | fi 155 | zip -rq ../$fname.zip * 156 | cd .. 157 | 158 | # Copy the zipped artifact from /staging to /regional-s3-assets 159 | mv $fname.zip $build_dist_dir 160 | fi 161 | done 162 | 163 | echo "------------------------------------------------------------------------------" 164 | echo "[Cleanup] Remove temporary files" 165 | echo "------------------------------------------------------------------------------" 166 | rm -rf $staging_dist_dir 167 | rm -f $template_dist_dir/*.orig 168 | 169 | echo "------------------------------------------------------------------------------" 170 | echo "S3 Packaging Complete" 171 | echo "------------------------------------------------------------------------------" 172 | -------------------------------------------------------------------------------- /source/cdk/lib/fixity/lambda/fixity_driver/app.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: Apache-2.0 3 | 4 | import os 5 | import logging 6 | import boto3 7 | import json 8 | import urllib 9 | import jsonpickle 10 | from botocore.exceptions import ClientError 11 | import unicodedata 12 | from botocore import config 13 | 14 | solution_identifier= os.environ['SOLUTION_IDENTIFIER'] 15 | 16 | user_agent_extra_param = {"user_agent_extra":solution_identifier} 17 | 18 | presetConfig = config.Config() 19 | if os.environ['SendAnonymizedMetric'] == 'Yes': 20 | presetConfig = config.Config(**user_agent_extra_param) 21 | 22 | logger = logging.getLogger() 23 | logger.setLevel(os.environ['LogLevel']) 24 | 25 | batchclient = boto3.client('batch', config=presetConfig) 26 | s3client = boto3.client('s3', config=presetConfig) 27 | 28 | class ObjectDeletedError(Exception): 29 | pass 30 | 31 | class UnsupportedStorageClassError(Exception): 32 | pass 33 | 34 | class UnsupportedTextFormatError(Exception): 35 | pass 36 | 37 | def api_handler(event, _): 38 | 39 | body = '' 40 | status = 400 41 | source_bucket = '' 42 | source_key = '' 43 | 44 | try: 45 | logger.debug('## EVENT\r' + jsonpickle.encode(dict(**event))) 46 | 47 | if event['queryStringParameters'] and 'bucket' in event['queryStringParameters'] and 'key' in event['queryStringParameters']: 48 | 49 | source_bucket = event['queryStringParameters']['bucket'] 50 | source_key= event['queryStringParameters']['key'] 51 | 52 | batch_job_id = _submit_job(source_bucket, source_key) 53 | body = {"JobId" : batch_job_id } 54 | 55 | else: 56 | status = 400 57 | body = {"Error": {"Code": 400, "Message": ' \'bucket\' and \'key\' are required query parameters'}} 58 | 59 | 60 | except ClientError as e: 61 | error_code = e.response['Error']['Code'] 62 | error_message = e.response['Error']['Message'] 63 | 64 | logger.debug(error_message) 65 | body = {"Error": {"Code": error_code, "Message": error_message}} 66 | status = 500 67 | 68 | except Exception as e: 69 | logger.error(e) 70 | 71 | body = {"Error": {"Code": 500, "Message": "internal server error"} } 72 | status = 500 73 | 74 | return { 75 | "statusCode": status, 76 | "body": json.dumps(body) 77 | } 78 | 79 | 80 | def s3_batch_handler(event, _): 81 | 82 | logger.debug('## EVENT\r' + jsonpickle.encode(dict(**event))) 83 | 84 | invocation_id = event['invocationId'] 85 | invocation_schema_version = event['invocationSchemaVersion'] 86 | 87 | task_id = event['tasks'][0]['taskId'] 88 | source_key = urllib.parse.unquote_plus(event['tasks'][0]['s3Key']) 89 | s3_bucket_arn = event['tasks'][0]['s3BucketArn'] 90 | source_bucket = s3_bucket_arn.split(':::')[-1] 91 | 92 | results = [] 93 | # Prepare result code and string 94 | result_code = None 95 | result_string = None 96 | 97 | try: 98 | batch_job_id = _submit_job(source_bucket, source_key) 99 | result_code = 'Succeeded' 100 | result_string = 'https://console.aws.amazon.com/batch/v2/home?region=' + os.environ['AWS_REGION'] + '#jobs/detail/'+ batch_job_id 101 | 102 | except ClientError as e: 103 | # If request timed out, mark as a temp failure 104 | # and S3 Batch Operations will make the task for retry. If 105 | # any other exceptions are received, mark as permanent failure. 106 | error_code = e.response['Error']['Code'] 107 | error_message = e.response['Error']['Message'] 108 | 109 | logger.debug(error_message) 110 | 111 | if error_code == 'TooManyRequestsException': 112 | result_code = 'TemporaryFailure' 113 | result_string = 'Retry request to batch due to throttling.' 114 | elif error_code == 'RequestTimeout': 115 | result_code = 'TemporaryFailure' 116 | result_string = 'Retry request to Amazon S3 due to timeout.' 117 | elif (error_code == '304'): 118 | result_code = 'Succeeded' 119 | result_string = 'Not modified' 120 | elif (error_code == 'SlowDown'): 121 | result_code = 'TemporaryFailure' 122 | result_string = 'Retry request to s3 due to throttling.' 123 | else: 124 | result_code = 'PermanentFailure' 125 | result_string = '{}: {}'.format(error_code, error_message) 126 | 127 | except Exception as e: 128 | # Catch all exceptions to permanently fail the task 129 | result_code = 'PermanentFailure' 130 | result_string = 'Exception: {}'.format(e) 131 | 132 | finally: 133 | results.append({ 134 | 'taskId': task_id, 135 | 'resultCode': result_code, 136 | 'resultString': result_string 137 | }) 138 | logger.info(result_code + " # " + result_string) 139 | 140 | return { 141 | 'invocationSchemaVersion': invocation_schema_version, 142 | 'treatMissingKeysAs': 'PermanentFailure', 143 | 'invocationId': invocation_id, 144 | 'results': results 145 | } 146 | 147 | 148 | def _submit_job(source_bucket, source_key): 149 | 150 | logger.debug("preflight check start") 151 | 152 | #preflight checks _read_ 153 | pre_flight_response = s3client.head_object( 154 | Bucket=source_bucket, 155 | Key=source_key 156 | ) 157 | 158 | logger.debug('## PREFLIGHT_RESPONSE\r' + jsonpickle.encode(dict(**pre_flight_response))) 159 | 160 | if 'DeleteMarker' in pre_flight_response and pre_flight_response['pre_flight_response'] == True: 161 | raise ObjectDeletedError( source_key + ' is deleted') 162 | 163 | logger.debug("preflight check end") 164 | 165 | unsupported_storage_class = False 166 | 167 | #Storage class check 168 | if 'StorageClass' in pre_flight_response and pre_flight_response['StorageClass'] in ['GLACIER', 'DEEP_ARCHIVE']: 169 | #check restore status: 170 | if 'Restore' in pre_flight_response: 171 | restore = pre_flight_response['Restore'] 172 | logger.debug(restore) 173 | if 'ongoing-request="false"' not in restore: 174 | logger.info('restore is in progress') 175 | raise UnsupportedStorageClassError( source_key + ' is restoring from ' + pre_flight_response['StorageClass']) 176 | else: 177 | unsupported_storage_class = True 178 | 179 | if (unsupported_storage_class): 180 | raise UnsupportedStorageClassError( source_key + ' is in unsupported StorageClass ' + pre_flight_response['StorageClass']) 181 | 182 | #NFC for unicodedata 183 | if unicodedata.is_normalized('NFC', source_key) == False: 184 | raise UnsupportedTextFormatError( source_key + ' is not in Normalized Form C' ) 185 | 186 | # use bigger containers for 10GB+ 187 | logger.debug("job submission start") 188 | job_definition = os.environ['JOB_SIZE_SMALL'] if pre_flight_response['ContentLength'] < int(os.environ['JOB_SIZE_THRESHOLD']) else os.environ['JOB_SIZE_LARGE'] 189 | logger.debug("job definition is " + job_definition) 190 | 191 | logger.debug("job submission start") 192 | 193 | #submit job 194 | response = batchclient.submit_job( 195 | jobName="Fixity", 196 | jobQueue=os.environ['JOB_QUEUE'], 197 | jobDefinition=job_definition, 198 | parameters={ 199 | 'Bucket': source_bucket, 200 | 'Key': source_key 201 | }, 202 | propagateTags=True, 203 | tags={ 204 | 'Bucket': source_bucket, 205 | 'Key': source_key, 206 | 'Size': str(pre_flight_response['ContentLength']) 207 | } 208 | ) 209 | 210 | logger.debug('## BATCH_RESPONSE\r' + jsonpickle.encode(dict(**response))) 211 | logger.debug("job submission complete") 212 | 213 | return response['jobId'] 214 | -------------------------------------------------------------------------------- /source/cdk/lib/provision-stack.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance 5 | * with the License. A copy of the License is located at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES 10 | * OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions 11 | * and limitations under the License. 12 | */ 13 | import * as cdk from "aws-cdk-lib"; 14 | import { Construct } from "constructs"; 15 | import * as serviceCatalog from "aws-cdk-lib/aws-servicecatalog"; 16 | 17 | export class ProvisionStack extends cdk.Stack { 18 | constructor(scope: Construct, id: string, props?: cdk.StackProps) { 19 | super(scope, id, props); 20 | /** 21 | * CloudFormation Template Descrption 22 | */ 23 | const solutionId = "SO0133"; 24 | const solutionName = "Media Exchange on AWS"; 25 | this.templateOptions.description = `(${solutionId}PROV) - ${solutionName} - media exchange publisher/subscriber/agreement provisioning. Version: __VERSION__`; 26 | /** 27 | * Cfn Parameters 28 | */ 29 | const environment = new cdk.CfnParameter(this, "Environment", { 30 | type: "String", 31 | description: "Deployment Environment Name", 32 | allowedPattern: "[A-Za-z0-9]+", 33 | default: "dev", 34 | constraintDescription: 35 | "Malformed input-Parameter Environment must only contain uppercase and lowercase letters and numbers", 36 | maxLength: 10, 37 | minLength: 2, 38 | }); 39 | const publisherName = new cdk.CfnParameter(this, "PublisherName", { 40 | type: "String", 41 | description: "The name of the publisher", 42 | allowedPattern: "[A-Za-z0-9]+", 43 | constraintDescription: 44 | "Malformed input-Parameter PublisherName must only contain uppercase and lowercase letters and numbers", 45 | maxLength: 64, 46 | minLength: 3, 47 | }); 48 | const publisherAccountId = new cdk.CfnParameter( 49 | this, 50 | "PublisherAccountId", 51 | { 52 | type: "String", 53 | description: 54 | "The AWS accountId of the publisher. This parameter is ignored if you specify PublisherRole.", 55 | allowedPattern: "^\\d{12}$", 56 | constraintDescription: 57 | "Malformed input-Parameter PublisherAccountId must be a 12 digit number", 58 | } 59 | ); 60 | const subscriberName = new cdk.CfnParameter(this, "SubscriberName", { 61 | type: "String", 62 | description: "A name for subscriber account.", 63 | allowedPattern: "[A-Za-z0-9-]+", 64 | constraintDescription: 65 | "Malformed input-Parameter SubscriberName must only contain uppercase and lowercase letters and numbers", 66 | maxLength: 64, 67 | minLength: 3, 68 | }); 69 | const subscriberAccountId = new cdk.CfnParameter( 70 | this, 71 | "SubscriberAccountId", 72 | { 73 | type: "String", 74 | description: 75 | "The accountId of the Subscriber. This parameter is ignored if you specify SubscriberRole.", 76 | allowedPattern: "^\\d{12}$", 77 | constraintDescription: 78 | "Malformed input-Parameter SubscriberAccountId must be a 12 digit number.", 79 | } 80 | ); 81 | 82 | /** 83 | * Template metadata 84 | */ 85 | this.templateOptions.metadata = { 86 | "AWS::CloudFormation::Interface": { 87 | ParameterGroups: [ 88 | { 89 | Label: { default: "Deployment Configuration" }, 90 | Parameters: [environment.logicalId], 91 | }, 92 | { 93 | Label: { default: "Configuration" }, 94 | Parameters: [ 95 | publisherName.logicalId, 96 | subscriberName.logicalId, 97 | publisherAccountId.logicalId, 98 | subscriberAccountId.logicalId, 99 | ], 100 | }, 101 | ], 102 | }, 103 | }; 104 | // Get publisher product Id from serviceCatalog 105 | const publisherProductId = serviceCatalog.Product.fromProductArn( 106 | this, 107 | "PublisherProductId", 108 | cdk.Fn.importValue( 109 | `mxc-${cdk.Aws.REGION}-${environment.valueAsString}-sc-publisher-productid` 110 | ) 111 | ); 112 | 113 | // Publisher provisioned product 114 | const publisher = new serviceCatalog.CfnCloudFormationProvisionedProduct( 115 | this, 116 | "Publisher", 117 | { 118 | provisionedProductName: `mediaexchange-${publisherName.valueAsString}-publisher`, 119 | productId: publisherProductId.productId, 120 | provisioningArtifactName: "latest", 121 | provisioningParameters: [ 122 | { 123 | key: "Environment", 124 | value: environment.valueAsString, 125 | }, 126 | { 127 | key: "PublisherName", 128 | value: publisherName.valueAsString, 129 | }, 130 | { 131 | key: "PublisherAccountId", 132 | value: publisherAccountId.valueAsString, 133 | }, 134 | ], 135 | } 136 | ); 137 | 138 | // Subscriber provisioned product 139 | const subscriberProductId = serviceCatalog.Product.fromProductArn( 140 | this, 141 | "SubscriberProductId", 142 | cdk.Fn.importValue( 143 | `mxc-${cdk.Aws.REGION}-${environment.valueAsString}-sc-subscriber-productid` 144 | ) 145 | ); 146 | 147 | const subscriber = new serviceCatalog.CfnCloudFormationProvisionedProduct( 148 | this, 149 | "Subscriber", 150 | { 151 | provisionedProductName: `mediaexchange-${subscriberName.valueAsString}-subscriber`, 152 | productId: subscriberProductId.productId, 153 | provisioningArtifactName: "latest", 154 | provisioningParameters: [ 155 | { 156 | key: "Environment", 157 | value: environment.valueAsString, 158 | }, 159 | { 160 | key: "SubscriberName", 161 | value: subscriberName.valueAsString, 162 | }, 163 | { 164 | key: "SubscriberAccountId", 165 | value: subscriberAccountId.valueAsString, 166 | }, 167 | { 168 | key: "Email", 169 | value: "nomail@nomail.com", 170 | }, 171 | ], 172 | } 173 | ); 174 | 175 | // Agreement provisioned product 176 | const agreementProductId = serviceCatalog.Product.fromProductArn( 177 | this, 178 | "AgreementProductId", 179 | cdk.Fn.importValue( 180 | `mxc-${cdk.Aws.REGION}-${environment.valueAsString}-sc-agreement-productid` 181 | ) 182 | ); 183 | 184 | const agreement = new serviceCatalog.CfnCloudFormationProvisionedProduct( 185 | this, 186 | "Agreement", 187 | { 188 | provisionedProductName: `mediaexchange-${publisherName.valueAsString}-${subscriberName.valueAsString}-transfer-agreement`, 189 | productId: agreementProductId.productId, 190 | provisioningArtifactName: "latest", 191 | provisioningParameters: [ 192 | { 193 | key: "Environment", 194 | value: environment.valueAsString, 195 | }, 196 | { 197 | key: "PublisherName", 198 | value: publisherName.valueAsString, 199 | }, 200 | { 201 | key: "SubscriberName", 202 | value: subscriberName.valueAsString, 203 | }, 204 | ], 205 | } 206 | ); 207 | agreement.node.addDependency(publisher); 208 | agreement.node.addDependency(subscriber); 209 | 210 | // outputs 211 | new cdk.CfnOutput(this, "AgreementStackArn", { // NOSONAR 212 | // NOSONAR 213 | description: "Agreement Stack Arn", 214 | value: agreement.attrCloudformationStackArn, 215 | }); 216 | } 217 | } 218 | -------------------------------------------------------------------------------- /source/cdk/lib/fixity/lambda/test_fixity.py: -------------------------------------------------------------------------------- 1 | ####################################################################################################################### 2 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # 3 | # # 4 | # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance # 5 | # with the License. A copy of the License is located at # 6 | # # 7 | # http://www.apache.org/licenses/LICENSE-2.0 # 8 | # # 9 | # or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES # 10 | # OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions # 11 | # and limitations under the License. # 12 | ####################################################################################################################### 13 | import json 14 | import os 15 | import unittest 16 | import boto3 17 | import mock 18 | import pytest 19 | from moto import mock_s3, mock_batch, mock_iam, mock_ec2 20 | from botocore.exceptions import ClientError 21 | 22 | S3_BUCKET_NAME = 'buckettestname' 23 | DEFAULT_REGION = 'us-east-1' 24 | S3_TEST_FILE_KEY = 'BigBunnySample.mp4' 25 | S3_TEST_FILE_CONTENT = [ 26 | {"company": "amazon", "price": 15}, 27 | {"company": "test", "price": 25} 28 | ] 29 | awsSolutionId = 'AwsSolution/SO0133/1.1.0' 30 | invocationId = 'test-invocation-id' 31 | taskId = 'test-task-id' 32 | 33 | @mock_batch 34 | @mock_ec2 35 | @mock_s3 36 | @mock_iam 37 | class TestFixityLambdaFunction(unittest.TestCase): 38 | def setUp(self): 39 | # S3 setup 40 | self.s3 = boto3.resource('s3', region_name=DEFAULT_REGION) 41 | self.s3_bucket = self.s3.create_bucket(Bucket=S3_BUCKET_NAME) 42 | self.s3.BucketVersioning(S3_BUCKET_NAME).enable() 43 | self.s3_bucket.put_object(Key=S3_TEST_FILE_KEY, 44 | Body=json.dumps(S3_TEST_FILE_CONTENT)) # Emulate file in ME bucket 45 | self.S3_TEST_FILE_VERSION = self.s3.Bucket(S3_BUCKET_NAME).Object(S3_TEST_FILE_KEY).version_id # Save file version 46 | 47 | client = boto3.client("batch") 48 | iam = boto3.resource("iam") 49 | service_role = iam.create_role( 50 | RoleName="BatchServiceRole", AssumeRolePolicyDocument="AWSBatchServiceRole" 51 | ) 52 | instance_profile = iam.create_instance_profile( 53 | InstanceProfileName="InstanceProfile" 54 | ) 55 | instance_profile.add_role(RoleName=service_role.name) 56 | 57 | ec2 = boto3.resource("ec2", region_name=DEFAULT_REGION) 58 | vpc = ec2.create_vpc(CidrBlock="172.16.0.0/16") # NOSONAR 59 | vpc.wait_until_available() 60 | subnet = ec2.create_subnet(CidrBlock="172.16.0.1/24", VpcId=vpc.id) # NOSONAR 61 | 62 | response = client.create_compute_environment( 63 | computeEnvironmentName="compute_environment", 64 | type="UNMANAGED", 65 | state="ENABLED", 66 | serviceRole=service_role.arn, 67 | ) 68 | 69 | compute_environment_arn = response.get('computeEnvironmentArn') 70 | 71 | # aws-batch job queue mock 72 | job_qs = client.create_job_queue( 73 | jobQueueName='test_job_q', 74 | state='ENABLED', 75 | priority=1, 76 | computeEnvironmentOrder=[ 77 | { 78 | 'order': 1, 79 | 'computeEnvironment': compute_environment_arn 80 | }, 81 | ] 82 | ) 83 | self.job_q_arn = job_qs.get('jobQueueArn') 84 | 85 | # aws-batch job definition mock 86 | job_definition = client.register_job_definition( 87 | jobDefinitionName='test_job_definition', 88 | type='container', 89 | containerProperties={ 90 | 'image': 'string', 91 | 'vcpus': 123, 92 | 'memory': 123 93 | }, 94 | ) 95 | self.job_definition_arn = job_definition.get('jobDefinitionArn') 96 | 97 | 98 | def test_submit_job_success(self): 99 | with mock.patch.dict(os.environ, {'SendAnonymizedMetric': 'No', "JOB_QUEUE": self.job_q_arn, "JOB_SIZE_SMALL": self.job_definition_arn, "JOB_SIZE_LARGE": self.job_definition_arn, 'JOB_SIZE_THRESHOLD': '10737418240', 'LogLevel': 'INFO', 'SOLUTION_IDENTIFIER': awsSolutionId}): 100 | from fixity_driver.app import _submit_job 101 | file_content = _submit_job(S3_BUCKET_NAME, S3_TEST_FILE_KEY) 102 | self.assertEqual(type(file_content), str) 103 | 104 | def test_submit_job_error(self): 105 | with mock.patch.dict(os.environ, {'SendAnonymizedMetric': 'No', "JOB_QUEUE": self.job_q_arn, "JOB_SIZE_SMALL": self.job_definition_arn, "JOB_SIZE_LARGE": self.job_definition_arn, 'JOB_SIZE_THRESHOLD': '10737418240', 'LogLevel': 'INFO', 'SOLUTION_IDENTIFIER': awsSolutionId}): 106 | from fixity_driver.app import _submit_job 107 | self.assertRaises(ClientError, _submit_job, S3_TEST_FILE_KEY, S3_BUCKET_NAME) 108 | 109 | def test_s3_batch_handler_success(self): 110 | with mock.patch.dict(os.environ, {'SendAnonymizedMetric': 'No', "JOB_QUEUE": self.job_q_arn, "JOB_SIZE_SMALL": self.job_definition_arn, "JOB_SIZE_LARGE": self.job_definition_arn, 'JOB_SIZE_THRESHOLD': '10737418240', 'LogLevel': 'INFO', 'SOLUTION_IDENTIFIER': awsSolutionId, 'AWS_REGION': 'us-east-1'}): 111 | from fixity_driver.app import s3_batch_handler 112 | event = {'invocationId': invocationId, 'job': {'id': '9357a3a7-5e34-4fa9-a1df-e1a4299b90b7'}, 'tasks': [{'taskId': 'taskId', 's3BucketArn': 'arn:aws:s3:::buckettestname', 's3Key': S3_TEST_FILE_KEY, 's3VersionId': None}], 'invocationSchemaVersion': '1.0'} 113 | file_content = s3_batch_handler(event, '_') 114 | self.assertEqual(file_content.get('results')[0].get('resultCode'), 'Succeeded') 115 | 116 | def test_s3_batch_handler_error(self): 117 | with mock.patch.dict(os.environ, {'SendAnonymizedMetric': 'No', "JOB_QUEUE": self.job_q_arn, "JOB_SIZE_SMALL": self.job_definition_arn, "JOB_SIZE_LARGE": self.job_definition_arn, 'JOB_SIZE_THRESHOLD': '10737418240', 'LogLevel': 'INFO', 'SOLUTION_IDENTIFIER': awsSolutionId, 'AWS_REGION': 'us-east-1'}): 118 | from fixity_driver.app import s3_batch_handler 119 | event = {'invocationId': invocationId, 'job': {'id': '9357a3a7-5e34-4fa9-a1df-e1a4299b90b7'}, 'tasks': [{'taskId': 'taskId', 's3BucketArn': 'arn:aws:s3:::buckettestname', 's3Key': 'BigBunnySamp.mp4', 's3VersionId': None}], 'invocationSchemaVersion': '1.0'} 120 | file_content = s3_batch_handler(event, '_') 121 | self.assertEqual(file_content, {'invocationSchemaVersion': '1.0', 'treatMissingKeysAs': 'PermanentFailure', 'invocationId': invocationId, 'results': [{'taskId': 'taskId', 'resultCode': 'PermanentFailure', 'resultString': '404: Not Found'}]}) 122 | 123 | def test_s3_api_handler_success(self): 124 | with mock.patch.dict(os.environ, {'SendAnonymizedMetric': 'No', "JOB_QUEUE": self.job_q_arn, "JOB_SIZE_SMALL": self.job_definition_arn, "JOB_SIZE_LARGE": self.job_definition_arn, 'JOB_SIZE_THRESHOLD': '10737418240', 'LogLevel': 'INFO', 'SOLUTION_IDENTIFIER': awsSolutionId, 'AWS_REGION': 'us-east-1'}): 125 | from fixity_driver.app import api_handler 126 | event = {'queryStringParameters': {'bucket': 'buckettestname', 'key': S3_TEST_FILE_KEY}, 'invocationId': invocationId,'invocationSchemaVersion': '1.0'} 127 | file_content = api_handler(event, '_') 128 | self.assertEqual(file_content.get('statusCode'), 400) 129 | 130 | def test_s3_api_handler_error(self): 131 | with mock.patch.dict(os.environ, {'SendAnonymizedMetric': 'No', "JOB_QUEUE": self.job_q_arn, "JOB_SIZE_SMALL": self.job_definition_arn, "JOB_SIZE_LARGE": self.job_definition_arn, 'JOB_SIZE_THRESHOLD': '10737418240', 'LogLevel': 'INFO', 'SOLUTION_IDENTIFIER': awsSolutionId, 'AWS_REGION': 'us-east-1'}): 132 | from fixity_driver.app import api_handler 133 | event = {'queryStringParameters': {'bucket': 'buckettestname', 'key': 'BigBunnySamp.mp4'}, 'invocationId': invocationId,'invocationSchemaVersion': '1.0'} 134 | file_content = api_handler(event, '_') 135 | self.assertEqual(file_content, {'statusCode': 500, 'body': '{"Error": {"Code": "404", "Message": "Not Found"}}'}) 136 | 137 | 138 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Notice 2 | 3 | **This solution is no longer being maintained.** 4 | 5 | --- 6 | 7 | **[🚀 Solution Landing Page](https://aws.amazon.com/solutions/implementations/media-exchange-on-aws/)** | **[🚧 Feature request](https://github.com/aws-solutions/media-exchange-on-aws/issues/new?assignees=&labels=feature-request%2C+enhancement&template=feature_request.md&title=)** | **[🐛 Bug Report](https://github.com/aws-solutions/media-exchange-on-aws/issues/new?assignees=&labels=bug%2C+triage&template=bug_report.md&title=)** 8 | 9 | Note: If you want to use the solution without building from source, navigate to Solution Landing Page 10 | 11 | ## Table of contents 12 | 13 | - [Solution Overview](#solution-overview) 14 | - [Architecture Diagram](#architecture-diagram) 15 | - [Solution Components](#solution-components) 16 | - [Onboarding Tool](#onboarding-tool) 17 | - [AutoIngest](#autoingest) 18 | - [MediaSync](#mediasync) 19 | - [Fixity](#fixity) 20 | - [Customizing the Solution](#customizing-the-solution) 21 | - [Usage](#usage) 22 | - [Developers](#developers) 23 | - [File Structure](#file-structure) 24 | - [License](#license) 25 | 26 | 27 | 28 | # Solution Overview 29 | 30 | Traditional file transfer services for Media supply chain are expensive, can add unnecessary hours to a workflow making quick-turn challenging, and not optimized for in-cloud media ecosystems. AWS Customers and ISV Partners can now take advantage of utilizing AWS as a common media storage foundation to create direct hand-offs within the ecosystem. Using this new Solution, called Media Exchange On AWS, AWS Customers and ISV Partners can ensure optimal quality, sustain a consistent understanding of the asset state and confirm asset receipt. The goal of this project is to re-examine media supply-chain with cloud-optimizations in mind, and define an asset transfer solution that can be deployed with CloudFormation templates. 31 | 32 | The Media Exchange Solution puts a shared object store (S3 bucket) between publishers and subscribers in a separate, secured AWS account. Publishers copy the assets into this shared storage area so that the subscribers can pull the content from the S3 bucket. The assets do not leave the S3 data plane in this process, thereby (a) there are no per/GB data egress and transfer costs within the same region, (b) eliminates egress, schedule and delivery time dependencies, (c) extremely fast and (d) no servers to manage. The assets are secure by default, encrypted at rest and in transit. 33 | In the standard deployment model, assets are stored in Media Exchange only for the purposes for transfer. Each publisher-subscriber transfer relationship gets its own S3 bucket to share assets. Publishers have write permissions to this bucket, a subscriber can only view assets under their assigned bucket and import assets that are shared to them. Publishers and subscribers perform these operations from their respective AWS accounts that they have on-boarded with into Media Exchange. The primary benefit of this process is that there are no credentials to share between Publishers and Subscribers. All operations on the assets are tracked via AWS CloudTrail and S3 logs. The assets shared in the Media Exchange account are lifecycle deleted after a Publisher-defined period. 34 | This is a seamless addition to the current workflow for your customers who use S3 as part of their Media Supply Chain. It uses standard S3 interface which means most of the tools and services that you are using today, will continue to work. This Solution can also integrate with native AWS file transfer offerings, such as DataSync and Snow\* devices to help transfer assets between physical locations for customers who do not currently integrate S3 into the media supply chain. You can also use this to move content across large geographical distances by leveraging AWS global infrastructure. 35 | 36 | For instructions on how to build applications with Media Exchange on AWS, read the API reference and builder's guide in the [Implementation Guide.](https://docs.aws.amazon.com/solutions/latest/media-exchange-on-aws/welcome.html) 37 | 38 | Please refer to [FAQs](docs/faqs.md) for more details. 39 | 40 | 41 | 42 | # Architecture Diagram 43 | 44 | ![Architecture](images/main.png) 45 | 46 | The Media Exchange On AWS solution helps build a transfer architecture that puts a shared S3 bucket between publisher and subscribers. In addition, it enables S3 Events from the shared bucket to be routed to the publishers and subscribers over EventBridge and SNS. The shared S3 bucket at the core of the architecture is configured with bucket policy so that the publisher account (or a designated role in publisher account) has read/write permissions and the subscriber account (or a designated role in the subscriber account) has permissions to read from this bucket. This model allows assets to be transferred from publisher to subscriber without having to share credentials. The assets in flight are secured using AWS security best practices. You can read more about security [here](docs/security.md). 47 | The solution is designed to help you build this target transfer architecture. You can manage an arbitrary number of publishers, subscribers and their relationships out of the same deployment. When you deploy the solution, it enables a set of products in AWS Service Catalog so that a Media Exchange administrator can onboard new publishers and subscribers and establish a relationship between them enabling the transfer architecture shown in the diagram. The base system gets deployed in the dedicated account, but other optional components can be deployed in the publisher and subscriber accounts, further simplifying the asset-transfer workflow. 48 | 49 | 50 | 51 | # Solution Components 52 | 53 | 54 | 55 | ## Onboarding Tool 56 | 57 | ![Onboarding tool](images/sc.jpeg) 58 | 59 | When you deploy the Media Exchange on AWS solution, it adds deployable products on AWS Service Catalog. AWS Service Catalog deploys infrastructure for a number of publisher and subscriber transfers by deploying an unique, isolated set of resources for each of the transfer relationships. 60 | 61 | 62 | 63 | ## AutoIngest 64 | 65 | Subscribers to a MediaExchange bucket have the option to automatically ingest assets using this component. It moves assets from Media Exchange into a subscriber-owned S3 bucket. This optional component is deployed in the subscriber’s account. See [here](source/cdk/lib/autoingest) 66 | 67 | 68 | 69 | ## MediaSync 70 | 71 | This optional utility moves assets between two Amazon S3 buckets. When you deploy the solution, it enables a new toolset in the AWS Management Console that helps move large (100s of GBs) files or hundreds of thousands of small files. The MediaSync utility scales up by running the copy operation in parallel to thousands of concurrent processes. It can handle file sizes up to 5 TB, is resilient, and cost effective. The utility uses S3 server-side copy to move assets between buckets and AWS Fargate Spot for its compute environment. For details, go [here](source/cdk/lib/mediasync) 72 | 73 | 74 | 75 | ## Fixity 76 | 77 | This optional utility computes checksums at scale by publishers (at source) or by subscribers (at destination) to ensure file integrity. It uses AWS Batch and Amazon Elastic Compute Cloud (Amazon EC2) Spot Instances to orchestrate the computation infrastructure. It calculates checksums by streaming the objects directly from Amazon S3, so that there is no requirement of local instance storage. For details, go [here](source/cdk/lib/fixity) 78 | 79 | 80 | 81 | # Customizing the Solution 82 | 83 | Please refer to the developer guide [here](docs/developer.md) 84 | 85 | 86 | 87 | # Usage 88 | 89 | You will need three AWS accounts to deploy this effectively (a) publisher, (b) subscriber and (c) MediaExchange. The CloudFormation templates are deployed in (c) MediaExchange account. It is also possible to install in a single account for testing. See the implementation guide for instructions. 90 | 91 | 92 | 93 | ## Developers 94 | 95 | Please refer to the developer guide [here](docs/developer.md) 96 | 97 | 98 | 99 | # File structure 100 | 101 |
102 | ├── deployment                          [folder containing build scripts]
103 | ├── docs                                [folder containing documentation]
104 | │   ├── developer.md
105 | │   ├── faqs.md
106 | │   └── security.md
107 | ├── images                              [folder containing images]
108 | ├── tests                               [folder containing integration/system tests]
109 | │   ├── deployment                      [folder containing build scripts for tests]
110 | │   └── python
111 | └── tools                               [Folder containing the optional tools/utilities]
112 |     ├── autoingest                      [ingest media exchange assets into subscriber’s s3 bucket]
113 |     ├── fixity                          [checksums at scale]
114 |     └── mediasync                       [easily move assets between s3 buckets]
115 | 
116 | 117 | 118 | 119 | --- 120 | 121 | ## Collection of operational metrics 122 | 123 | This solution collects anonymized operational metrics to help AWS improve the quality and features of the solution. For more information, including how to disable this capability, please see the [implementation guide](https://docs.aws.amazon.com/solutions/latest/media-exchange-on-aws/welcome.html). 124 | 125 | --- 126 | 127 | # License 128 | 129 | See license [here](https://github.com/aws-solutions/media-exchange-on-aws/blob/main/LICENSE.txt) 130 | -------------------------------------------------------------------------------- /source/cdk/lib/mediasync/lambda/mediasync_driver/app.py: -------------------------------------------------------------------------------- 1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 2 | # SPDX-License-Identifier: Apache-2.0 3 | 4 | import os 5 | import logging 6 | import boto3 7 | import json 8 | import urllib 9 | import jsonpickle 10 | from botocore.exceptions import ClientError 11 | import unicodedata 12 | from botocore import config 13 | 14 | solution_identifier= os.environ['SOLUTION_IDENTIFIER'] 15 | 16 | user_agent_extra_param = {"user_agent_extra":solution_identifier} 17 | 18 | presetConfig = config.Config() 19 | if os.environ['SendAnonymizedMetric'] == 'Yes': 20 | presetConfig = config.Config(**user_agent_extra_param) 21 | 22 | logger = logging.getLogger() 23 | logger.setLevel(os.environ['LogLevel']) 24 | 25 | batchclient = boto3.client('batch', config=presetConfig) 26 | s3client = boto3.client('s3', config=presetConfig) 27 | 28 | class ObjectDeletedError(Exception): 29 | pass 30 | 31 | class UnsupportedStorageClassError(Exception): 32 | pass 33 | 34 | class UnsupportedTextFormatError(Exception): 35 | pass 36 | 37 | def get_bucket_region(bucket): 38 | 39 | bucket_location_resp = s3client.get_bucket_location( 40 | Bucket=bucket 41 | ) 42 | bucket_region=bucket_location_resp['LocationConstraint'] 43 | 44 | logger.info("bucket_name="+ bucket +",bucket_region=" + bucket_region) 45 | 46 | return bucket_region 47 | 48 | def pre_flight_check(source_bucket, source_key): 49 | #preflight checks _read_ 50 | logger.debug("preflight check start") 51 | 52 | pre_flight_response = s3client.head_object( 53 | Bucket=source_bucket, 54 | Key=source_key 55 | ) 56 | logger.debug('## PREFLIGHT_RESPONSE\r' + jsonpickle.encode(dict(**pre_flight_response))) 57 | logger.debug("preflight check end") 58 | return pre_flight_response 59 | 60 | 61 | def check_if_deleted(source_key, pre_flight_response): 62 | 63 | if 'DeleteMarker' in pre_flight_response and pre_flight_response['pre_flight_response'] == True: 64 | raise ObjectDeletedError( source_key + ' is deleted') 65 | return "Exists" 66 | 67 | def check_if_supported_storage_class(source_key, pre_flight_response): 68 | 69 | unsupported_storage_class = False 70 | 71 | #Storage class check 72 | if 'StorageClass' in pre_flight_response and pre_flight_response['StorageClass'] in ['GLACIER', 'DEEP_ARCHIVE']: 73 | #check restore status: 74 | if 'Restore' in pre_flight_response: 75 | restore = pre_flight_response['Restore'] 76 | logger.debug(restore) 77 | if 'ongoing-request="false"' not in restore: 78 | logger.info('restore is in progress') 79 | raise UnsupportedStorageClassError( source_key + ' is restoring from ' + pre_flight_response['StorageClass']) 80 | else: 81 | unsupported_storage_class = True 82 | 83 | if (unsupported_storage_class): 84 | raise UnsupportedStorageClassError( source_key + ' is in unsupported StorageClass ' + pre_flight_response['StorageClass']) 85 | 86 | #NFC for unicodedata 87 | if unicodedata.is_normalized('NFC', source_key) == False: 88 | raise UnsupportedTextFormatError( source_key + ' is not in Normalized Form C' ) 89 | 90 | def submit_job(s3_batch_job_id, source_bucket, source_key, destination_bucket, size): 91 | 92 | source_bucket_region = get_bucket_region(source_bucket) 93 | 94 | job_definition = os.environ['JOB_DEFINITION'] if get_bucket_region(destination_bucket) == source_bucket_region else os.environ['JOB_DEFINITION_X_REGION'] 95 | 96 | logger.debug("job submission start") 97 | 98 | #submit job 99 | response = batchclient.submit_job( 100 | jobName="MediaSyncJob", 101 | jobQueue=os.environ['JOB_QUEUE'], 102 | jobDefinition=job_definition, 103 | parameters={ 104 | 'SourceS3Uri': 's3://' + source_bucket + '/' + source_key, 105 | 'DestinationS3Uri': 's3://' + destination_bucket + '/' + source_key, 106 | 'Size': str(size), 107 | 'SourceBucketRegion': source_bucket_region 108 | }, 109 | tags={ 110 | 'S3BatchJobId': s3_batch_job_id, 111 | 'SourceBucket': source_bucket, 112 | 'DestinationBucket': destination_bucket, 113 | 'Key': source_key, 114 | 'Size': str(size) 115 | } 116 | ) 117 | 118 | logger.debug('## BATCH_RESPONSE\r' + jsonpickle.encode(dict(**response))) 119 | logger.debug("job submission complete") 120 | 121 | job_id = '#' if 'jobId' not in response else response['jobId'] 122 | 123 | return job_id 124 | 125 | 126 | def in_place_copy(source_bucket, source_key, destination_bucket): 127 | 128 | copy_response= {} 129 | copy_response = s3client.copy_object( 130 | Bucket=destination_bucket, 131 | CopySource={'Bucket': source_bucket,'Key': source_key}, 132 | Key=source_key 133 | ) 134 | 135 | logger.debug('## COPY_RESPONSE\r' + jsonpickle.encode(dict(**copy_response))) 136 | 137 | def is_can_submit_jobs(): 138 | 139 | # we don't have a good way of checking how many pending jobs as yet 140 | # without having to build an API 141 | 142 | disable_pending_jobs_test = os.environ['DISABLE_PENDING_JOBS_CHECK'] 143 | 144 | if (disable_pending_jobs_test == 'False'): 145 | ##check how many jobs are pending 146 | listjobs = batchclient.list_jobs( 147 | jobQueue=os.environ['JOB_QUEUE'], 148 | jobStatus='RUNNABLE', 149 | maxResults=int(os.environ['MAX_NUMBER_OF_PENDING_JOBS']) 150 | ) 151 | 152 | if ('nextToken' in listjobs): 153 | return False 154 | else: 155 | logger.debug("Pending jobs check is disabled") 156 | 157 | return True 158 | 159 | def lambda_handler(event, _): 160 | 161 | logger.debug('## EVENT\r' + jsonpickle.encode(dict(**event))) 162 | 163 | destination_bucket=os.environ['DESTINATION_BUCKET_NAME'] 164 | 165 | s3_batch_job_id = event['job']['id'] 166 | invocation_id = event['invocationId'] 167 | invocation_schema_version = event['invocationSchemaVersion'] 168 | 169 | task_id = event['tasks'][0]['taskId'] 170 | source_key = urllib.parse.unquote_plus(event['tasks'][0]['s3Key']) 171 | s3_bucket_arn = event['tasks'][0]['s3BucketArn'] 172 | source_bucket = s3_bucket_arn.split(':::')[-1] 173 | 174 | results = [] 175 | # Prepare result code and string 176 | result_code = None 177 | result_string = None 178 | 179 | minsizeforbatch = int(os.environ['MN_SIZE_FOR_BATCH_IN_BYTES']) 180 | 181 | # Copy object to new bucket with new key name 182 | try: 183 | 184 | pre_flight_response = pre_flight_check(source_bucket, source_key) 185 | 186 | check_if_deleted(source_key, pre_flight_response) 187 | size = pre_flight_response['ContentLength'] 188 | 189 | if (size > minsizeforbatch): 190 | 191 | check_if_supported_storage_class(source_key, pre_flight_response) 192 | 193 | if (is_can_submit_jobs() == False): 194 | 195 | logger.info("too many jobs pending. returning slowdown") 196 | result_code = 'TemporaryFailure' 197 | result_string = 'Retry request to batch due to too many pending jobs.' 198 | 199 | else: 200 | 201 | batch_job_id = submit_job(s3_batch_job_id, source_bucket, source_key, destination_bucket, size) 202 | result_code = 'Succeeded' 203 | result_string = 'https://console.aws.amazon.com/batch/v2/home?region=' + os.environ['AWS_REGION'] + '#jobs/detail/'+ batch_job_id 204 | 205 | else: 206 | # <5GB 207 | in_place_copy(source_bucket, source_key, destination_bucket) 208 | result_string = 'Lambda copy complete' 209 | result_code = 'Succeeded' 210 | 211 | 212 | except ClientError as e: 213 | # If request timed out, mark as a temp failure 214 | # and S3 Batch Operations will make the task for retry. If 215 | # any other exceptions are received, mark as permanent failure. 216 | error_code = e.response['Error']['Code'] 217 | error_message = e.response['Error']['Message'] 218 | 219 | logger.debug(error_message) 220 | 221 | if error_code == 'TooManyRequestsException': 222 | result_code = 'TemporaryFailure' 223 | result_string = 'Retry request to batch due to throttling.' 224 | elif error_code == 'RequestTimeout': 225 | result_code = 'TemporaryFailure' 226 | result_string = 'Retry request to Amazon S3 due to timeout.' 227 | elif (error_code == '304'): 228 | result_code = 'Succeeded' 229 | result_string = 'Not modified' 230 | elif (error_code == 'SlowDown'): 231 | result_code = 'TemporaryFailure' 232 | result_string = 'Retry request to s3 due to throttling.' 233 | else: 234 | result_code = 'PermanentFailure' 235 | result_string = '{}: {}'.format(error_code, error_message) 236 | 237 | except Exception as e: 238 | # Catch all exceptions to permanently fail the task 239 | result_code = 'PermanentFailure' 240 | result_string = 'Exception: {}'.format(e) 241 | 242 | finally: 243 | results.append({ 244 | 'taskId': task_id, 245 | 'resultCode': result_code, 246 | 'resultString': result_string 247 | }) 248 | logger.info(result_code + " # " + result_string) 249 | 250 | return { 251 | 'invocationSchemaVersion': invocation_schema_version, 252 | 'treatMissingKeysAs': 'PermanentFailure', 253 | 'invocationId': invocation_id, 254 | 'results': results 255 | } 256 | -------------------------------------------------------------------------------- /source/cdk/lib/autoingest/autoingest-stack.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance 5 | * with the License. A copy of the License is located at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES 10 | * OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions 11 | * and limitations under the License. 12 | */ 13 | import * as cdk from "aws-cdk-lib"; 14 | import { Construct } from "constructs"; 15 | import * as iam from "aws-cdk-lib/aws-iam"; 16 | import * as kms from "aws-cdk-lib/aws-kms"; 17 | import * as logs from "aws-cdk-lib/aws-logs"; 18 | import * as lambda from "aws-cdk-lib/aws-lambda"; 19 | import * as lambdaEventSources from "aws-cdk-lib/aws-lambda-event-sources"; 20 | import * as sns from "aws-cdk-lib/aws-sns"; 21 | import * as snsSubs from "aws-cdk-lib/aws-sns-subscriptions"; 22 | import * as sqs from "aws-cdk-lib/aws-sqs"; 23 | 24 | export class AutoIngestStack extends cdk.Stack { 25 | constructor(scope: Construct, id: string, props?: cdk.StackProps) { 26 | super(scope, id, props); 27 | /** 28 | * CloudFormation Template Descrption 29 | */ 30 | this.templateOptions.description = `CDK template for AutoIngest.`; 31 | 32 | /** 33 | * Cfn Parameters 34 | */ 35 | const environment = new cdk.CfnParameter(this, "Environment", { 36 | type: "String", 37 | description: "Deployment Environment Name", 38 | allowedPattern: "[A-Za-z0-9]+", 39 | default: "dev", 40 | constraintDescription: 41 | "Malformed input-Parameter Environment must only contain uppercase and lowercase letters and numbers", 42 | maxLength: 10, 43 | minLength: 2, 44 | }); 45 | const notificationTopicArn = new cdk.CfnParameter( 46 | this, 47 | "NotificationTopicArn", 48 | { 49 | type: "String", 50 | description: 51 | "MediaExchange Notifications topic from SubscriberOnBoradingSummary", 52 | } 53 | ); 54 | const destinationBucket = new cdk.CfnParameter(this, "DestinationBucket", { 55 | type: "String", 56 | description: "Destination S3 Bucket Name", 57 | }); 58 | const mediaExchangeBucket = new cdk.CfnParameter( 59 | this, 60 | "MediaExchangeBucket", 61 | { 62 | type: "String", 63 | description: " MediaExchange S3 Bucket Name", 64 | } 65 | ); 66 | const destinationPrefix = new cdk.CfnParameter(this, "DestinationPrefix", { 67 | type: "String", 68 | description: "Destination prefix for S3 Bucket ingestion", 69 | default: "ingest", 70 | }); 71 | 72 | /** 73 | * Template metadata 74 | */ 75 | this.templateOptions.metadata = { 76 | "AWS::CloudFormation::Interface": { 77 | ParameterGroups: [ 78 | { 79 | Label: { default: "Deployment Configuration" }, 80 | Parameters: [environment.logicalId], 81 | }, 82 | { 83 | Label: { default: "Copy Configuration" }, 84 | Parameters: [ 85 | notificationTopicArn.logicalId, 86 | mediaExchangeBucket.logicalId, 87 | destinationBucket.logicalId, 88 | destinationPrefix.logicalId, 89 | ], 90 | }, 91 | ], 92 | }, 93 | }; 94 | 95 | /** 96 | * Mapping for sending anonymized metrics to AWS Solution Builders API 97 | */ 98 | new cdk.CfnMapping(this, 'AnonymizedData', { // NOSONAR 99 | mapping: { 100 | SendAnonymizedData: { 101 | Data: 'Yes' 102 | } 103 | } 104 | }); 105 | 106 | /** 107 | * Roles and policy for lambda creation 108 | */ 109 | const driverFunctionRole = new iam.Role( 110 | this, 111 | "AWSLambdaBasicExecutionRole", 112 | { 113 | assumedBy: new iam.ServicePrincipal("lambda.amazonaws.com"), 114 | } 115 | ); 116 | 117 | const customResourcePolicy = new iam.Policy(this, "CustomResourcePolicy", { 118 | statements: [ 119 | new iam.PolicyStatement({ 120 | sid: "S3Read", 121 | resources: [ 122 | `arn:${cdk.Aws.PARTITION}:s3:::${destinationBucket.valueAsString}/*`, 123 | ], 124 | actions: [ 125 | "s3:GetObject", 126 | "s3:PutObject", 127 | "s3:PutObjectAcl", 128 | "s3:PutObjectVersionAcl", 129 | "s3:PutObjectTagging", 130 | "s3:PutObjectVersionTagging", 131 | "s3:ListBucket", 132 | ], 133 | }), 134 | new iam.PolicyStatement({ 135 | sid: "S3Write", 136 | resources: [ 137 | `arn:${cdk.Aws.PARTITION}:s3:::${mediaExchangeBucket.valueAsString}/*`, 138 | ], 139 | actions: [ 140 | "s3:GetObject", 141 | "s3:GetObjectVersion", 142 | "s3:GetObjectTagging", 143 | "s3:GetObjectVersionTagging", 144 | "s3:AbortMultipartUpload", 145 | "s3:ListMultipartUploadParts", 146 | ], 147 | }), 148 | new iam.PolicyStatement({ 149 | sid: "kms", 150 | resources: ["*"], 151 | actions: [ 152 | "kms:Encrypt", 153 | "kms:Decrypt", 154 | "kms:ReEncrypt*", 155 | "kms:GenerateDataKey*", 156 | "kms:DescribeKey", 157 | ], 158 | }), 159 | ], 160 | }); 161 | customResourcePolicy.attachToRole(driverFunctionRole); 162 | 163 | // KMS 164 | const kmsPolicy = new iam.PolicyDocument({ 165 | statements: [ 166 | new iam.PolicyStatement({ 167 | sid: "KeyManagement", 168 | effect: iam.Effect.ALLOW, 169 | actions: [ 170 | "kms:Create*", 171 | "kms:Describe*", 172 | "kms:Enable*", 173 | "kms:List*", 174 | "kms:Put*", 175 | "kms:Update*", 176 | "kms:Revoke*", 177 | "kms:Disable*", 178 | "kms:Get*", 179 | "kms:Delete*", 180 | "kms:TagResource", 181 | "kms:UntagResource", 182 | "kms:ScheduleKeyDeletion", 183 | "kms:CancelKeyDeletion", 184 | ], 185 | principals: [ 186 | new iam.ArnPrincipal( 187 | `arn:${cdk.Aws.PARTITION}:iam::${cdk.Aws.ACCOUNT_ID}:root` 188 | ), 189 | ], 190 | resources: ["*"], 191 | }), 192 | new iam.PolicyStatement({ 193 | sid: "Allow access for Key User (SNS Service Principal)", 194 | effect: iam.Effect.ALLOW, 195 | actions: ["kms:GenerateDataKey*", "kms:Decrypt"], 196 | principals: [new iam.ServicePrincipal("sns.amazonaws.com")], 197 | resources: ["*"], 198 | }), 199 | new iam.PolicyStatement({ 200 | sid: "Allow access for Key User (Lambda Function)", 201 | effect: iam.Effect.ALLOW, 202 | actions: ["kms:Decrypt"], 203 | principals: [new iam.ArnPrincipal(driverFunctionRole.roleArn)], 204 | resources: ["*"], 205 | }), 206 | ], 207 | }); 208 | 209 | const cmk = new kms.Key(this, "CMK", { 210 | description: "Symetric Key for Encrypting Objects in Media Exchange", 211 | pendingWindow: cdk.Duration.days(7), 212 | enabled: true, 213 | enableKeyRotation: true, 214 | policy: kmsPolicy, 215 | }); 216 | 217 | const dlq = new sqs.Queue(this, "DLQ", { 218 | encryption: sqs.QueueEncryption.SQS_MANAGED, 219 | retentionPeriod: cdk.Duration.seconds(1209600), 220 | }); 221 | 222 | const nq = new sqs.Queue(this, "NQ", { 223 | encryption: sqs.QueueEncryption.KMS, 224 | retentionPeriod: cdk.Duration.seconds(86400), 225 | encryptionMasterKey: cmk, 226 | dataKeyReuse: cdk.Duration.seconds(86400), 227 | visibilityTimeout: cdk.Duration.seconds(900), 228 | }); 229 | 230 | // The Autoingest function 231 | const driverFunction = new lambda.Function(this, "DriverFunction", { 232 | runtime: lambda.Runtime.PYTHON_3_8, 233 | handler: "app.lambda_handler", 234 | description: "Lambda function to be triggered by SNS notification", 235 | environment: { 236 | SOURCE_BUCKET_NAME: mediaExchangeBucket.valueAsString, 237 | DESTINATION_BUCKET_NAME: destinationBucket.valueAsString, 238 | DESTINATION_PREFIX: destinationPrefix.valueAsString, 239 | SOLUTION_IDENTIFIER: "AwsSolution/SO0133/__VERSION__-Autoingest", 240 | LogLevel: "INFO", 241 | SendAnonymizedMetric: cdk.Fn.findInMap('AnonymizedData', 'SendAnonymizedData', 'Data') 242 | }, 243 | functionName: `${cdk.Aws.STACK_NAME}-custom-resource`, 244 | role: driverFunctionRole, 245 | code: lambda.Code.fromAsset("lib/autoingest/lambda/autoingest_driver/"), 246 | timeout: cdk.Duration.seconds(900), 247 | deadLetterQueue: dlq, 248 | deadLetterQueueEnabled: true, 249 | }); 250 | driverFunction.node.addDependency(driverFunctionRole); 251 | driverFunction.node.addDependency(customResourcePolicy); 252 | 253 | const topic = sns.Topic.fromTopicArn( 254 | this, 255 | "MXCEvent", 256 | notificationTopicArn.valueAsString 257 | ); 258 | 259 | topic.addSubscription(new snsSubs.SqsSubscription(nq)); 260 | const eventSourceSQS = new lambdaEventSources.SqsEventSource(nq); 261 | 262 | driverFunction.addEventSource(eventSourceSQS); 263 | 264 | // Add log group 265 | new logs.LogGroup( // NOSONAR 266 | this, 267 | "driverFunctionLogGroup", 268 | { 269 | logGroupName: `/aws/lambda/${driverFunction.functionName}`, 270 | retention: logs.RetentionDays.ONE_MONTH, 271 | } 272 | ); 273 | } 274 | } 275 | -------------------------------------------------------------------------------- /source/cdk/lib/subscriber-stack.ts: -------------------------------------------------------------------------------- 1 | /** 2 | * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. 3 | * 4 | * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance 5 | * with the License. A copy of the License is located at 6 | * 7 | * http://www.apache.org/licenses/LICENSE-2.0 8 | * 9 | * or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES 10 | * OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions 11 | * and limitations under the License. 12 | */ 13 | 14 | import * as cdk from "aws-cdk-lib"; 15 | import { Construct } from "constructs"; 16 | import * as iam from "aws-cdk-lib/aws-iam"; 17 | import * as kms from "aws-cdk-lib/aws-kms"; 18 | import * as sns from "aws-cdk-lib/aws-sns"; 19 | import * as sqs from "aws-cdk-lib/aws-sqs"; 20 | import * as events from "aws-cdk-lib/aws-events"; 21 | 22 | export class SubscriberStack extends cdk.Stack { 23 | constructor(scope: Construct, id: string, props?: cdk.StackProps) { 24 | super(scope, id, props); 25 | /** 26 | * CloudFormation Template Descrption 27 | */ 28 | const solutionId = "SO0133"; 29 | const solutionName = "Media Exchange on AWS"; 30 | this.templateOptions.description = `(${solutionId}SUB) - ${solutionName} - setup a subscriber. Version: __VERSION__`; 31 | 32 | /** 33 | * Cfn Parameters 34 | */ 35 | const environment = new cdk.CfnParameter(this, "Environment", { 36 | type: "String", 37 | description: "Deployment Environment Name", 38 | allowedPattern: "[A-Za-z0-9]+", 39 | default: "dev", 40 | constraintDescription: 41 | "Malformed input-Parameter Environment must only contain uppercase and lowercase letters and numbers", 42 | maxLength: 10, 43 | minLength: 2, 44 | }); 45 | const subscriberName = new cdk.CfnParameter(this, "SubscriberName", { 46 | type: "String", 47 | description: "A name for subscriber account", 48 | allowedPattern: "[A-Za-z0-9]+", 49 | constraintDescription: 50 | "Malformed input-Parameter SubscriberName must only contain uppercase and lowercase letters and numbers", 51 | maxLength: 64, 52 | minLength: 3, 53 | }); 54 | const email = new cdk.CfnParameter(this, "Email", { 55 | type: "String", 56 | description: "The email address for the transfer notifications.", 57 | allowedPattern: 58 | "^[_A-Za-z0-9-\\+]+(\\.[_A-Za-z0-9-]+)*@[A-Za-z0-9-]+(\\.[A-Za-z0-9]+)*(\\.[A-Za-z]{2,})$", 59 | }); 60 | const subscriberAccountId = new cdk.CfnParameter( 61 | this, 62 | "SubscriberAccountId", 63 | { 64 | type: "String", 65 | description: 66 | "The AWS accountId of the subscriber. This parameter is ignored if you specify SubscriberRole.", 67 | allowedPattern: "^\\d{12}$", 68 | constraintDescription: 69 | "Malformed input-Parameter SubscriberAccountId must be a 12 digit number", 70 | } 71 | ); 72 | const subscriberRole = new cdk.CfnParameter(this, "SubscriberRole", { 73 | type: "String", 74 | description: 75 | "Subscriber's Role. Defaults to arn:aws:iam::$SubscriberAccountId:root.", 76 | allowedPattern: "[A-Za-z0-9:/-]*", 77 | default: "", 78 | }); 79 | 80 | /** 81 | * Conditions 82 | */ 83 | const hasRole = new cdk.CfnCondition(this, "HasRole", { 84 | expression: cdk.Fn.conditionNot( 85 | cdk.Fn.conditionEquals(subscriberRole.valueAsString, "") 86 | ), 87 | }); 88 | 89 | /** 90 | * Template metadata 91 | */ 92 | this.templateOptions.metadata = { 93 | "AWS::CloudFormation::Interface": { 94 | ParameterGroups: [ 95 | { 96 | Label: { default: "Deployment Configuration" }, 97 | Parameters: [environment.logicalId], 98 | }, 99 | { 100 | Label: { default: "Subscriber Configuration" }, 101 | Parameters: [ 102 | subscriberName.logicalId, 103 | subscriberRole.logicalId, 104 | subscriberAccountId.logicalId, 105 | email.logicalId, 106 | ], 107 | }, 108 | ], 109 | }, 110 | }; 111 | 112 | // KMS 113 | const kmsPolicy = new iam.PolicyDocument({ 114 | statements: [ 115 | new iam.PolicyStatement({ 116 | sid: "KeyManagement", 117 | effect: iam.Effect.ALLOW, 118 | actions: [ 119 | "kms:Create*", 120 | "kms:Describe*", 121 | "kms:Enable*", 122 | "kms:List*", 123 | "kms:Put*", 124 | "kms:Update*", 125 | "kms:Revoke*", 126 | "kms:Disable*", 127 | "kms:Get*", 128 | "kms:Delete*", 129 | "kms:TagResource", 130 | "kms:UntagResource", 131 | "kms:ScheduleKeyDeletion", 132 | "kms:CancelKeyDeletion", 133 | "kms:GenerateDataKey*", 134 | "kms:Decrypt", 135 | ], 136 | principals: [ 137 | new iam.ArnPrincipal( 138 | `arn:${cdk.Aws.PARTITION}:iam::${cdk.Aws.ACCOUNT_ID}:root` 139 | ), 140 | ], 141 | resources: ["*"], 142 | }), 143 | new iam.PolicyStatement({ 144 | sid: "Allow access for Key User (SNS Service Principal)", 145 | effect: iam.Effect.ALLOW, 146 | actions: ["kms:GenerateDataKey*", "kms:Decrypt"], 147 | principals: [new iam.ServicePrincipal("sns.amazonaws.com")], 148 | resources: ["*"], 149 | }), 150 | new iam.PolicyStatement({ 151 | sid: "Allow access for Key User (Events Service Principal)", 152 | effect: iam.Effect.ALLOW, 153 | actions: ["kms:Decrypt", "kms:GenerateDataKey*"], 154 | principals: [new iam.ServicePrincipal("events.amazonaws.com")], 155 | resources: ["*"], 156 | }), 157 | new iam.PolicyStatement({ 158 | sid: "Allow access for Key User (x-account permissions for subscriber)", 159 | effect: iam.Effect.ALLOW, 160 | actions: ["kms:Decrypt", "kms:GenerateDataKey*"], 161 | principals: [ 162 | new iam.AccountPrincipal(subscriberAccountId.valueAsString), 163 | ], 164 | resources: ["*"], 165 | }), 166 | ], 167 | }); 168 | 169 | const cmk = new kms.Key(this, "CMK", { 170 | description: "Symetric Key for Encrypting Objects in Media Exchange", 171 | pendingWindow: cdk.Duration.days(7), 172 | enabled: true, 173 | enableKeyRotation: true, 174 | policy: kmsPolicy, 175 | }); 176 | 177 | // SNS Topic for subscribers 178 | const notificationTopic = new sns.Topic(this, "NotificationTopic", { 179 | displayName: "SNS Topic for MediaExchange Subscriber Notifications", 180 | masterKey: cmk, 181 | }); 182 | 183 | notificationTopic.addToResourcePolicy( 184 | new iam.PolicyStatement({ 185 | sid: "Allow_Publish_Events", 186 | effect: iam.Effect.ALLOW, 187 | principals: [new iam.ServicePrincipal("events.amazonaws.com")], 188 | actions: ["sns:Publish"], 189 | resources: [notificationTopic.topicArn], 190 | }) 191 | ); 192 | 193 | notificationTopic.addToResourcePolicy( 194 | new iam.PolicyStatement({ 195 | sid: "Allow_X_Account_Subscribe", 196 | effect: iam.Effect.ALLOW, 197 | principals: [ 198 | new iam.AccountPrincipal(subscriberAccountId.valueAsString), 199 | ], 200 | actions: ["sns:Subscribe"], 201 | resources: [notificationTopic.topicArn], 202 | }) 203 | ); 204 | 205 | const dlq = new sqs.Queue(this, "DLQ", { 206 | encryption: sqs.QueueEncryption.KMS, 207 | encryptionMasterKey: cmk, 208 | dataKeyReuse: cdk.Duration.seconds(86400), 209 | retentionPeriod: cdk.Duration.seconds(1209600), 210 | }); 211 | 212 | dlq.addToResourcePolicy( 213 | new iam.PolicyStatement({ 214 | effect: iam.Effect.ALLOW, 215 | principals: [new iam.ServicePrincipal("events.amazonaws.com")], 216 | actions: ["sqs:SendMessage"], 217 | resources: [dlq.queueArn], 218 | }) 219 | ); 220 | 221 | new events.CfnEventBusPolicy( // NOSONAR 222 | this, 223 | "PutEventsPermission", 224 | { 225 | action: "events:PutEvents", 226 | principal: subscriberAccountId.valueAsString, 227 | statementId: `Sid${subscriberName.valueAsString}${environment.valueAsString}${cdk.Aws.REGION}Events`, 228 | } 229 | ); 230 | 231 | // Subscriber role is root role if not specified 232 | const subscriberRoleOutput = cdk.Fn.conditionIf( 233 | hasRole.logicalId, 234 | subscriberRole.valueAsString, 235 | `arn:${cdk.Aws.PARTITION}:iam::${subscriberAccountId.valueAsString}:root` 236 | ); 237 | 238 | // outputs 239 | new cdk.CfnOutput(this, "SubscriberNotificationsEmail", { // NOSONAR 240 | // NOSONAR 241 | description: "Subscriber's notifications Email Address", 242 | value: email.valueAsString, 243 | exportName: `mxc-${cdk.Aws.REGION}-${environment.valueAsString}-subscriber-${subscriberName.valueAsString}-email`, 244 | }); 245 | new cdk.CfnOutput(this, "SubscriberRoleOut", { // NOSONAR 246 | // NOSONAR 247 | description: "Subscriber's S3 Access role", 248 | value: subscriberRoleOutput.toString(), 249 | exportName: `mxc-${cdk.Aws.REGION}-${environment.valueAsString}-subscriber-${subscriberName.valueAsString}-role`, 250 | }); 251 | new cdk.CfnOutput(this, "SubscriberNotificationsTopic", { // NOSONAR 252 | // NOSONAR 253 | description: "Subscriber's notifications topic name", 254 | value: notificationTopic.topicArn, 255 | exportName: `mxc-${cdk.Aws.REGION}-${environment.valueAsString}-subscriber-${subscriberName.valueAsString}-notificationtopic`, 256 | }); 257 | new cdk.CfnOutput(this, "SubscriberNotificationsDLQ", { // NOSONAR 258 | // NOSONAR 259 | description: "Subscriber's notifications dead letter queue Arn", 260 | value: dlq.queueArn, 261 | exportName: `mxc-${cdk.Aws.REGION}-${environment.valueAsString}-subscriber-${subscriberName.valueAsString}-notificationdlq`, 262 | }); 263 | } 264 | } 265 | -------------------------------------------------------------------------------- /docs/faqs.md: -------------------------------------------------------------------------------- 1 | ## Table of contents 2 | - [General](#general) 3 | - [Security](#security) 4 | - [Management](#management) 5 | - [Pricing](#pricing) 6 | - [Scalability and Performance](#performance-and-scalability) 7 | - [Reliability](#reliability) 8 | 9 | 10 | # General 11 | 12 | ### What is Media Exchange on AWS? 13 | 14 | It is a simple, secure, reliable and scalable way of transferring files over AWS. It uses Amazon S3 as the underlying transport layer, Amazon S3 offers 11 9's of durability, industry standard security and compliance controls and very high bandwidth for transfers. There are no credentials to share and manage. On top of that, it is more cost effective to own and operate when compared to 3rd party transfer services. 15 | 16 | ### What are the benefits of Media Exchange on AWS? 17 | 18 | Traditional data transfer services are expensive with licensing and per-gigabyte transfer fees. When compared to traditional file transfer services, Media Exchange on AWS is secure and cheaper to operate with extremely high transfer speeds between senders and recipients. It facilitates direct account-to-account transfers in AWS, thus minimizing egress. There are no licensing fees or servers to manage. 19 | 20 | It improves overall security with compliance controls for access and audit, with features like AES-256 encryption, AWS managed encryption keys, TLS 1.2 for transport. It integrates with available AWS security related offerings for maximum control. Moreover, with direct account to account transfers there are no credentials to manage. 21 | 22 | It improves quality by minimizing generational loss (lossless transfer) and package/conform risk by shifting ownership of transcode/package to the sender, where package quality/conformance is under the sender's control. 23 | 24 | It also enables workflow automation / integration with notifications, access logs and delivery receipts. 25 | 26 | ### How does Media Exchange on AWS work with other AWS services? 27 | 28 | It uses Amazon S3 as the underlying storage and transport for transferring files between two AWS customer accounts. The files are secured with AWS KMS managed encryption keys. It uses AWS IAM for authentication and access control. 29 | 30 | ### Who are the primary users of Media Exchange on AWS? 31 | 32 | AWS customers who are using Amazon S3 as part of their cloud native or hybrid media supply chain workflow are the primary users of Media Exchange on AWS. 33 | 34 | ### What type of files can I transfer with Media Exchange on AWS? 35 | 36 | Media Exchange on AWS is built for file based workflows. Any file type that can be stored in Amazon S3 can also be transferred using Media Exchange on AWS. 37 | 38 | ### Do I need to be an Amazon S3 user to take advantage of Media Exchange on AWS? 39 | 40 | Media Exchange on AWS uses Amazon S3 as the underlying storage and transport. Although, you do not need to have all of your files in S3 to take advantage of Media Exchange on AWS, but you will need to use tools/workflows that can interface with Amazon S3. 41 | 42 | 43 | # Security 44 | 45 | ### How does Media Exchange on AWS ensure that my files are secure? 46 | 47 | - _Encryption_ the files are encrypted at rest with AES256 and secured in transit with TLS 48 | - _Key Management_ the encryption keys are managed by AWS Key Management Service 49 | - _Authentication_ users are authenticated at the account level with AWS Identity and Access Management. Media Exchange on AWS grants access at the account level, so that there are no additional credentials to manage. 50 | - _Access Control_ It allocates a bucket per transfer agreement. Content publishers have write permissions to the shared bucket and the subscribers have read permissions to the files. Moreover, all the files shared with Media Exchange on AWS has singular access control that enables read permissions for the subscribers and write permissions for the publishers. On top of that the encryption keys used to protect the files have similar levels of access control; encrypt permissions for publishers and decrypt for subscribers. 51 | - _Audit_ all actions on the files are tracked in Amazon S3 access logs. The S3 access logs are made available to the publishers. Media Exchange is deployed in an AWS account different from publisher and subscriber's primary account. All of the security & compliance tools/processes that you use today are applicable and can be used on this account. 52 | 53 | ### How quickly can I remove access and/or cancel transfer? 54 | 55 | Publishers are in full control at all phases of the transfer. They can remove object level permissions or even delete any of the files at any point in time. 56 | 57 | 58 | # Management 59 | 60 | ### Who is a publisher? 61 | 62 | A publisher in Media Exchange on AWS is sending files. 63 | 64 | ### Who is a subscriber? 65 | 66 | A subscriber in Media Exchange on AWS is receiving files. 67 | 68 | ### How do I transfer files with Media Exchange on AWS? 69 | 70 | Publishers copy the files over to the exchange bucket corresponding to the subscriber's transfer agreement in Media Exchange on AWS. The subscribers get a notification about the files being available and they download them into their account. 71 | 72 | ### I currently publish files directly from/to S3 bucket(s). Why should I consider using Media Exchange on AWS? 73 | 74 | It enables account to account transfers without having to manage shared credentials; without having to create a role; or bucket policy. The publishers and subscribers would not have to make a change in their security posture to use Media Exchange on AWS. In this case, publishers are pushing to a shared bucket external to their account and the subscribers are pulling from a bucket that is not in their account. 75 | 76 | ### I am using my (custom/3rd party) application to move files to/from Amazon S3. Will it work? 77 | 78 | Media Exchange on AWS operates with Amazon S3 APIs. It is very likely that your current application will continue to operate without modifications. 79 | 80 | ### What type of workflow automation does it support? 81 | 82 | It does not come with a workflow orchestrator. It integrates to your workflow automation with standardized event notifications. 83 | 84 | ### How do I integrate my media processing workflow to Media Exchange on AWS? 85 | 86 | It works with Amazon S3 APIs. Event notifications are delivered over Amazon SNS or Eventbridge. You will have to configure your workflow with the respective S3 buckets for sending and receiving files. Similarly, you can trigger your workflow automation steps by configuring it to receive notifications from Amazon SNS or Eventbridge. 87 | 88 | ### What type of analytics does it offer? 89 | 90 | It produces Amazon S3 access logs for all activities to the shared objects. Publishers can use these to build their own reporting. 91 | 92 | ### Who owns the files in transit? 93 | 94 | Publishers have full control over the files in transit. 95 | 96 | ### Can I customize pricing or terms for select customers? 97 | 98 | It is not supported at this time. 99 | 100 | ### Can I remove an asset that I have shared? 101 | 102 | Yes, you can cancel the transfer and remove the files whenever you want. However, if the recipients have made a copy, Media Exchange on AWS does not have control over those. 103 | 104 | 105 | ## Pricing 106 | 107 | ### How much does it cost to transfer files using Media Exchange on AWS? 108 | 109 | There is no additional charge for using Media Exchange on AWS. You pay for underlying AWS resources (e.g. S3, EC2 etc.) you create to store and move your files. It is pay-as-you-go and there are no servers to manage. 110 | 111 | ### What should I expect in AWS charges for transferring files? 112 | 113 | There is are no data transfer fees if the files are transferred within the same region. The main operational expense of using Media Exchange on AWS is the s3 storage fees associated with keeping the files in the temporary storage bucket. You can reduce the storage fees by configuring a shorter duration. And also by deleting the files from the temporary storage area after they have been copied over by the subscriber. 114 | 115 | In addition to storage, there are S3 charges for the GET and PUT API calls and for key management in AWS KMS. Standard data transfer charges apply when transferred to another region or over internet. 116 | 117 | ### What should I expect to pay in data transfer charges? 118 | 119 | There are no data transfer charges for moving the files into Media Exchange on AWS bucket. There are no data transfer fees when transferring to other buckets within the same region. If you are delivering to buckets in another region, standard AWS cross region transfer fees apply. Likewise, if you are moving files to your data center, standard AWS data transfer fees apply based on transport mechanism (direct connect vs internet). 120 | 121 | ### How does Media Exchange on AWS compare to my (custom / 3rd party) file transfer service? 122 | 123 | It is a simple, secure and easy to operate. In terms of pricing, there are no licensing fees. There are no servers to manage. You only pay for the charges incurred for the underlying AWS services, which could potentially net in `1/100th` of the cost in comparable setting. 124 | 125 | 126 | ## Performance and Scalability 127 | 128 | ### How many transfers can be done in Media Exchange on AWS? 129 | 130 | Media Exchange on AWS supports hundreds of concurrent transfers. 131 | 132 | ### I am transferring hundreds of GBs with my file transfer service. How does Media Exchange on AWS compare? 133 | 134 | There is very high bandwidth between S3 buckets. You can expect `100GB/s` transfer speed in the same region if you are using the included MediaSync utility. Cross region transfers take advantage of AWS managed network connectivity. 135 | 136 | 137 | ## Reliability 138 | 139 | ### How does Media Exchange on AWS ensures that the files are delivered with source quality and there are no data loss in the process? 140 | 141 | The underlying transport, S3, is designed to provide 99.999999999% durability of objects over a given year. This durability level corresponds to an average annual expected loss of 0.000000001% of objects. For example, if you transfer 10,000,000 objects with Media Exchange on AWS, you can on average expect to incur a loss of a single object once every 10,000 years. 142 | 143 | In addition, it calculates checksums on all network traffic to detect corruption of data packets when storing or retrieving data. This is available and enabled by default on AWS SDK(s) and command line interfaces. Most 3rd party tools, that use AWS SDK, takes advantage of this automatically. 144 | 145 | Moreover, if you are looking to compute checksums at the source and destination, there is an included tool `fixity` that can help compute checksums at scale. 146 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | --------------------------------------------------------------------------------