├── labs
├── fargate-dev-workshop
│ ├── __init__.py
│ ├── configs
│ │ ├── hadolint.yml
│ │ ├── stack.json
│ │ ├── exclude-patterns.txt
│ │ ├── docker_build_base.yml
│ │ ├── buildspec_lint.yml
│ │ ├── appspec.json
│ │ ├── service-definition.json
│ │ ├── buildspec_secrets.yml
│ │ ├── task-definition.json
│ │ ├── secrets_config.json
│ │ ├── docker_build_deploy.yml
│ │ ├── deployment-group.json
│ │ └── produce-configs.py
│ ├── cdk.json
│ ├── app
│ │ └── index.html
│ ├── cdk-deploy-to.sh
│ ├── iam
│ │ └── CodeDeployDemo-Trust.json
│ ├── Dockerfile
│ ├── requirements.txt
│ ├── ecs_development_workshop
│ │ ├── code_pipeline_configuration.py
│ │ ├── ecs_inf_fargate.py
│ │ └── docker_build_to_ecr_pipeline.py
│ ├── app.py
│ ├── setup.py
│ ├── r53_health_check.yaml
│ └── README.md
├── fargate-ops-cdk
│ ├── fargate_ops_cdk
│ │ ├── __init__.py
│ │ ├── fargate_workshop_ops_cluster.py
│ │ ├── fargate_workshop_ops_stack.py
│ │ ├── fargate_workshop_ops_failed.py
│ │ ├── fargate_workshop_ops_crystal_backend.py
│ │ ├── fargate_workshop_ops_node_backend.py
│ │ └── fargate_workshop_ops_frontend.py
│ ├── cdk.json
│ ├── requirements.txt
│ ├── setup.py
│ └── app.py
├── fargate-workshop-cdk
│ ├── requirements.txt
│ ├── fargate_workshop_cdk
│ │ ├── __init__.py
│ │ ├── fargate_workshop_discovery_stack.py
│ │ ├── fargate_workshop_dataeng_cluster.py
│ │ ├── fargate_workshop_network_stack.py
│ │ ├── fargate_workshop_dataeng_sftp.py
│ │ ├── fargate_workshop_dataeng_lambda.py
│ │ ├── fargate_workshop_dataeng_batch.py
│ │ ├── kafka-producer.py
│ │ ├── fargate_workshop_dataeng_streams.py
│ │ ├── fargate_workshop_dataeng_s3sink.py
│ │ └── fargate_workshop_dataeng_stack.py
│ ├── cdk.json
│ ├── setup.py
│ └── app.py
├── pipeline-generator-cdk
│ ├── code_pipeline_generator
│ │ ├── __init__.py
│ │ ├── code_pipeline_generator.egg-info
│ │ │ ├── top_level.txt
│ │ │ ├── dependency_links.txt
│ │ │ ├── requires.txt
│ │ │ ├── SOURCES.txt
│ │ │ └── PKG-INFO
│ │ ├── code_pipeline_generic_build_project.py
│ │ ├── code_pipeline_configuration.py
│ │ └── code_pipeline_generator_stack.py
│ ├── cdk.json
│ ├── cdk.context.json
│ ├── requirements.txt
│ ├── app.py
│ ├── setup.py
│ └── README.md
├── batch-processing
│ ├── data
│ │ ├── customer.xml
│ │ └── plane.xml
│ ├── sfn
│ │ ├── Dockerfile
│ │ ├── buildspec.yml
│ │ └── app.py
│ ├── sftp
│ │ ├── Dockerfile
│ │ ├── buildspec.yml
│ │ └── app.py
│ └── solutions
│ │ └── batch_sfn.json
├── stream-processing-pipelines
│ ├── kafka_to_docdb
│ │ ├── app.py
│ │ ├── Dockerfile
│ │ └── buildspec.yml
│ ├── s3sinkhc
│ │ ├── Dockerfile
│ │ └── set_env.sh
│ ├── s3sink
│ │ ├── Dockerfile
│ │ ├── set_env.sh
│ │ └── buildspec.yml
│ └── solutions
│ │ └── streamproc.py
├── requirements.txt
└── cloud9-iam.json
├── diagrams
├── data-eng-lab-seq.png
├── fargate-dev-lab.png
├── Microservices with Fargate.png
├── fargate-dev-cicd-pipeline.png
├── Data Engineering with Fargate.png
├── Fargate Operations and Security.png
├── Data Engineering with Fargate-Stream.png
├── data-eng-lab-seq.puml
├── data_engineering_stream.puml
├── data_engineering.puml
├── microservice.puml
└── operations.puml
├── .gitignore
├── LICENSE-SUMMARY
├── README.md
├── LICENSE-SAMPLECODE
├── CONTRIBUTING.md
└── LICENSE
/labs/fargate-dev-workshop/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/labs/fargate-dev-workshop/configs/hadolint.yml:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/labs/fargate-dev-workshop/configs/stack.json:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/labs/fargate-ops-cdk/fargate_ops_cdk/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/labs/fargate-workshop-cdk/requirements.txt:
--------------------------------------------------------------------------------
1 | -e .
2 |
--------------------------------------------------------------------------------
/labs/fargate-workshop-cdk/fargate_workshop_cdk/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/labs/pipeline-generator-cdk/code_pipeline_generator/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/labs/fargate-ops-cdk/cdk.json:
--------------------------------------------------------------------------------
1 | {
2 | "app": "python3 app.py"
3 | }
4 |
--------------------------------------------------------------------------------
/labs/fargate-dev-workshop/cdk.json:
--------------------------------------------------------------------------------
1 | {
2 | "app": "python3 app.py"
3 | }
4 |
--------------------------------------------------------------------------------
/labs/fargate-workshop-cdk/cdk.json:
--------------------------------------------------------------------------------
1 | {
2 | "app": "python3 app.py"
3 | }
4 |
--------------------------------------------------------------------------------
/labs/pipeline-generator-cdk/cdk.json:
--------------------------------------------------------------------------------
1 | {
2 | "app": "python3 app.py"
3 | }
4 |
--------------------------------------------------------------------------------
/labs/fargate-dev-workshop/configs/exclude-patterns.txt:
--------------------------------------------------------------------------------
1 | configs/secrets_config.json
--------------------------------------------------------------------------------
/labs/pipeline-generator-cdk/code_pipeline_generator/code_pipeline_generator.egg-info/top_level.txt:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/labs/pipeline-generator-cdk/cdk.context.json:
--------------------------------------------------------------------------------
1 | {
2 | "@aws-cdk/core:enableStackNameDuplicates": "true"
3 | }
4 |
--------------------------------------------------------------------------------
/labs/pipeline-generator-cdk/code_pipeline_generator/code_pipeline_generator.egg-info/dependency_links.txt:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/labs/pipeline-generator-cdk/code_pipeline_generator/code_pipeline_generator.egg-info/requires.txt:
--------------------------------------------------------------------------------
1 | aws-cdk.core
2 |
--------------------------------------------------------------------------------
/labs/fargate-dev-workshop/app/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Hello!! This is Fargate container!
4 |
5 |
--------------------------------------------------------------------------------
/diagrams/data-eng-lab-seq.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/amazon-ecs-fargate-workshop-dev-ops-data/HEAD/diagrams/data-eng-lab-seq.png
--------------------------------------------------------------------------------
/diagrams/fargate-dev-lab.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/amazon-ecs-fargate-workshop-dev-ops-data/HEAD/diagrams/fargate-dev-lab.png
--------------------------------------------------------------------------------
/diagrams/Microservices with Fargate.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/amazon-ecs-fargate-workshop-dev-ops-data/HEAD/diagrams/Microservices with Fargate.png
--------------------------------------------------------------------------------
/diagrams/fargate-dev-cicd-pipeline.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/amazon-ecs-fargate-workshop-dev-ops-data/HEAD/diagrams/fargate-dev-cicd-pipeline.png
--------------------------------------------------------------------------------
/diagrams/Data Engineering with Fargate.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/amazon-ecs-fargate-workshop-dev-ops-data/HEAD/diagrams/Data Engineering with Fargate.png
--------------------------------------------------------------------------------
/diagrams/Fargate Operations and Security.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/amazon-ecs-fargate-workshop-dev-ops-data/HEAD/diagrams/Fargate Operations and Security.png
--------------------------------------------------------------------------------
/diagrams/Data Engineering with Fargate-Stream.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/aws-samples/amazon-ecs-fargate-workshop-dev-ops-data/HEAD/diagrams/Data Engineering with Fargate-Stream.png
--------------------------------------------------------------------------------
/labs/batch-processing/data/customer.xml:
--------------------------------------------------------------------------------
1 |
2 | bob
3 |
4 | 1234
5 | 4567
6 |
7 |
8 |
--------------------------------------------------------------------------------
/labs/pipeline-generator-cdk/requirements.txt:
--------------------------------------------------------------------------------
1 | -e .
2 | aws_cdk.aws_codebuild
3 | aws_cdk.aws_codecommit
4 | aws_cdk.aws_codepipeline
5 | aws_cdk.aws_codepipeline_actions
6 | aws_cdk.aws_ec2
7 | aws_cdk.aws_ecs
8 | aws_cdk.aws_ecs_patterns
--------------------------------------------------------------------------------
/labs/stream-processing-pipelines/kafka_to_docdb/app.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | import time
4 |
5 | while True:
6 | print("I'm here")
7 | time.sleep(60)
8 |
--------------------------------------------------------------------------------
/labs/batch-processing/sfn/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:latest
2 |
3 | RUN apt-get update
4 | RUN apt-get -y install python3-pip
5 | RUN pip3 install xmltodict
6 | RUN pip3 install boto3
7 |
8 | COPY app.py /opt/app.py
9 |
10 | CMD ["/usr/bin/python3", "/opt/app.py"]
11 |
--------------------------------------------------------------------------------
/labs/batch-processing/sftp/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:latest
2 |
3 | RUN apt-get update
4 | RUN apt-get -y install python3-pip
5 | RUN pip3 install paramiko
6 | RUN pip3 install boto3
7 |
8 | COPY app.py /opt/app.py
9 |
10 | CMD ["/usr/bin/python3", "/opt/app.py"]
11 |
--------------------------------------------------------------------------------
/labs/batch-processing/data/plane.xml:
--------------------------------------------------------------------------------
1 |
2 | Eagle
3 | F15
4 |
5 | C
6 | E
7 |
8 |
9 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | fargate-workshop.code-workspace
2 | .DS_Store
3 | .vscode/settings.json
4 | .env/
5 | labs/fargate-workshop-cdk/cdk.context.json
6 | labs/fargate-workshop-cdk/cdk.out/
7 | labs/fargate-workshop-cdk/fargate_workshop_cdk/__pycache__/
8 | .env
9 | cdk.out
10 | site/public/
11 | cdk.context.json
--------------------------------------------------------------------------------
/labs/fargate-dev-workshop/cdk-deploy-to.sh:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | #!bash
4 | # cdk-deploy-to.sh
5 | CDK_DEPLOY_ACCOUNT=$1
6 | shift
7 | CDK_DEPLOY_REGION=$1
8 | shift
9 | #cdk deploy fargate-developerPipline
10 | #"$@"
11 | cdk list
12 |
--------------------------------------------------------------------------------
/labs/stream-processing-pipelines/s3sinkhc/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM devshawn/kafka-connect-healthcheck
2 |
3 | RUN apk add curl
4 | RUN apk add wget
5 | RUN wget -O /usr/local/bin/jq https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 && chmod +x /usr/local/bin/jq
6 | COPY set_env.sh /etc/set_env.sh
7 |
8 | CMD ["/etc/set_env.sh"]
--------------------------------------------------------------------------------
/LICENSE-SUMMARY:
--------------------------------------------------------------------------------
1 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 |
3 | The documentation is made available under the Creative Commons Attribution-ShareAlike 4.0 International License. See the LICENSE file.
4 |
5 | The sample code within this documentation is made available under the MIT-0 license. See the LICENSE-SAMPLECODE file.
6 |
--------------------------------------------------------------------------------
/labs/stream-processing-pipelines/kafka_to_docdb/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:latest
2 |
3 | RUN apt-get update
4 | RUN apt-get -y install python3-pip
5 | RUN pip3 install kafka-python
6 | RUN pip3 install pymongo
7 | RUN pip3 install boto3
8 |
9 | COPY app.py /opt/app.py
10 | COPY rds-combined-ca-bundle.pem /opt/rds-combined-ca-bundle.pem
11 |
12 | CMD ["/usr/bin/python3", "/opt/app.py"]
--------------------------------------------------------------------------------
/labs/stream-processing-pipelines/s3sink/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM confluentinc/cp-kafka-connect
2 |
3 | RUN apt-get update
4 | RUN apt-get install -y python3 python3-pip
5 | RUN pip3 install awscli
6 |
7 | RUN wget -O /usr/local/bin/jq https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 && chmod +x /usr/local/bin/jq
8 | COPY set_env.sh /etc/set_env.sh
9 |
10 | CMD ["/etc/set_env.sh"]
--------------------------------------------------------------------------------
/labs/requirements.txt:
--------------------------------------------------------------------------------
1 | aws-cdk.aws-ec2
2 | aws-cdk.aws-docdb
3 | aws-cdk.aws-msk
4 | aws-cdk.aws_ecr
5 | aws-cdk.aws_ecs_patterns
6 | aws-cdk.aws-s3
7 | aws-cdk.aws_servicediscovery
8 | aws-cdk.aws_events
9 | aws-cdk.aws_lambda
10 | aws-cdk.aws_events_targets
11 | aws-cdk.aws_codepipeline
12 | aws-cdk.aws_codepipeline_actions
13 | aws-cdk.aws_codecommit
14 | aws-cdk.aws_codebuild
15 | aws-cdk.aws_transfer
16 |
--------------------------------------------------------------------------------
/labs/fargate-dev-workshop/iam/CodeDeployDemo-Trust.json:
--------------------------------------------------------------------------------
1 | {
2 | "Version": "2012-10-17",
3 | "Statement": [
4 | {
5 | "Sid": "",
6 | "Effect": "Allow",
7 | "Principal": {
8 | "Service": [
9 | "codedeploy.amazonaws.com"
10 | ]
11 | },
12 | "Action": "sts:AssumeRole"
13 | }
14 | ]
15 | }
--------------------------------------------------------------------------------
/labs/stream-processing-pipelines/s3sinkhc/set_env.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
3 |
4 | set -x
5 |
6 | JSON=$(/usr/bin/curl ${ECS_CONTAINER_METADATA_URI}/task)
7 | echo $JSON
8 | TASK=$(echo $JSON | /usr/local/bin/jq -r '.Containers[0].Networks[0].IPv4Addresses[0]')
9 | echo $TASK
10 |
11 | HEALTHCHECK_CONNECT_WORKER_ID=$TASK /usr/local/bin/kafka-connect-healthcheck
12 |
--------------------------------------------------------------------------------
/labs/pipeline-generator-cdk/code_pipeline_generator/code_pipeline_generator.egg-info/SOURCES.txt:
--------------------------------------------------------------------------------
1 | README.md
2 | setup.py
3 | code_pipeline_generator/code_pipeline_generator.egg-info/PKG-INFO
4 | code_pipeline_generator/code_pipeline_generator.egg-info/SOURCES.txt
5 | code_pipeline_generator/code_pipeline_generator.egg-info/dependency_links.txt
6 | code_pipeline_generator/code_pipeline_generator.egg-info/requires.txt
7 | code_pipeline_generator/code_pipeline_generator.egg-info/top_level.txt
--------------------------------------------------------------------------------
/labs/fargate-dev-workshop/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM httpd:latest
2 |
3 | RUN apk -q add libcap
4 |
5 | #Change access righs to conf, logs, bin from root to www-data
6 | RUN chown -hR www-data:www-data /usr/local/apache2/
7 |
8 | #setcap to bind to privileged ports as non-root
9 | RUN setcap 'cap_net_bind_service=+ep' /usr/local/apache2/bin/httpd
10 | RUN getcap /usr/local/apache2/bin/httpd
11 |
12 | COPY ./app/ /usr/local/apache2/htdocs/
13 |
14 | #Run as a www-data
15 | USER www-data
16 |
--------------------------------------------------------------------------------
/labs/fargate-ops-cdk/requirements.txt:
--------------------------------------------------------------------------------
1 | aws-cdk.aws-ec2
2 | aws-cdk.aws-docdb
3 | aws-cdk.aws-msk
4 | aws-cdk.aws_ecr
5 | aws-cdk.aws_ecs_patterns
6 | aws-cdk.aws-s3
7 | aws-cdk.aws_servicediscovery
8 | aws-cdk.aws_events
9 | aws-cdk.aws_lambda
10 | aws-cdk.aws_events_targets
11 | aws-cdk.aws_codepipeline
12 | aws-cdk.aws_codepipeline_actions
13 | aws-cdk.aws_codecommit
14 | aws-cdk.aws_codebuild
15 | aws-cdk.aws_cloudtrail
16 | aws-cdk.aws_cloudwatch
17 | aws-cdk.aws_transfer
18 |
--------------------------------------------------------------------------------
/labs/fargate-dev-workshop/configs/docker_build_base.yml:
--------------------------------------------------------------------------------
1 | version: 0.2
2 | phases:
3 | install:
4 | commands:
5 | - apt-get update
6 | - apt-get install -y jq
7 | pre_build:
8 | commands:
9 | - echo 'starting build...'
10 | - export IMAGE_TAG=`echo $CODEBUILD_BUILD_ID | awk -F":" '{print $2}'`
11 | - $(aws ecr get-login --no-include-email --region $AWS_DEFAULT_REGION)
12 | build:
13 | commands:
14 | #build docker image
15 | - docker build -t $project_name:$tag .
16 | - docker tag $project_name:$tag $ecr:$tag
17 | - docker push $ecr
--------------------------------------------------------------------------------
/labs/fargate-dev-workshop/configs/buildspec_lint.yml:
--------------------------------------------------------------------------------
1 | version: 0.2
2 |
3 | phases:
4 | pre_build:
5 | commands:
6 | - echo Pulling the hadolint docker image
7 | - docker pull hadolint/hadolint:v1.16.2
8 | build:
9 | commands:
10 | - echo Build started on `date`
11 | - cd $CODEBUILD_SRC_DIR
12 | - echo Scanning with Hadolint...
13 | - result=`docker run --rm -i hadolint/hadolint:v1.16.2 hadolint -f json - < Dockerfile`
14 | post_build:
15 | commands:
16 | - echo $result
17 | - echo Build completed on `date`
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # ECS Fargate Workshop
2 |
3 | This workshop teaches you best practices for using ECS Fargate. It has three independent modules targeted at different use cases.
4 |
5 | * Development
6 | * Operations
7 | * Data engineering
8 |
9 | Visit the [main workshop site](https://ecs-fargate-dev-ops-data.workshop.aws/) to see the documentation.
10 |
11 | ## License Summary
12 |
13 | The documentation is made available under the Creative Commons Attribution-ShareAlike 4.0 International License. See the LICENSE file.
14 |
15 | The sample code within this documentation is made available under the MIT-0 license. See the LICENSE-SAMPLECODE file.
16 |
--------------------------------------------------------------------------------
/labs/fargate-dev-workshop/requirements.txt:
--------------------------------------------------------------------------------
1 | aws-cdk.aws-cloudformation
2 | aws-cdk.aws-codepipeline
3 | aws-cdk.aws-codepipeline-actions
4 | aws-cdk.aws-ecr
5 | aws-cdk.aws-ecr-assets
6 | aws-cdk.aws-s3
7 | aws-cdk.aws-s3-assets
8 | aws_cdk.aws_ecs_patterns
9 | aws_cdk.aws_logs
10 |
11 | aws_cdk.aws_elasticloadbalancingv2
12 | aws_cdk.aws_autoscaling
13 | aws_cdk.aws_elasticloadbalancingv2_targets
14 | aws_cdk.custom-resources
15 |
16 | aws_cdk.aws_cloudwatch
17 |
18 | aws-cdk.aws-sqs
19 | aws-cdk.aws-ssm
20 |
21 | aws-cdk.core
22 | aws-cdk.cx-api
23 | aws-cdk.region-info
24 |
25 | aws-cdk.aws-events
26 | aws-cdk.aws-events-targets
27 | aws-cdk.aws-lambda
28 | aws-cdk.core
--------------------------------------------------------------------------------
/labs/fargate-workshop-cdk/fargate_workshop_cdk/fargate_workshop_discovery_stack.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | from aws_cdk import (
4 | aws_servicediscovery as cloudmap,
5 | core
6 | )
7 |
8 |
9 | class FargateWorkshopDiscoveryStack(core.Stack):
10 |
11 | def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
12 | super().__init__(scope, id, **kwargs)
13 |
14 | # CloudMap namespace
15 | self.namespace = cloudmap.HttpNamespace(
16 | scope = self,
17 | id = 'CloudMap',
18 | name = 'FargateWorkshopNamespace'
19 | )
20 |
21 |
22 |
23 |
--------------------------------------------------------------------------------
/labs/fargate-workshop-cdk/fargate_workshop_cdk/fargate_workshop_dataeng_cluster.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | from aws_cdk import (
4 | aws_ecs as ecs,
5 | aws_ec2 as ec2,
6 | core
7 | )
8 |
9 |
10 | class FargateWorkshopDataengClusterStack(core.Stack):
11 |
12 | def __init__(self, scope: core.Construct, id: str, vpc: ec2.IVpc, **kwargs) -> None:
13 | super().__init__(scope, id, **kwargs)
14 |
15 | # Fargate cluster
16 | self.cluster = ecs.Cluster(
17 | scope = self,
18 | id = 'StreamProcessingCluster',
19 | vpc = vpc
20 | )
21 |
22 |
23 |
24 |
25 |
--------------------------------------------------------------------------------
/labs/stream-processing-pipelines/s3sink/set_env.sh:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | #!/bin/bash
4 | set -x
5 |
6 | JSON=$(curl ${ECS_CONTAINER_METADATA_URI}/task)
7 | echo $JSON
8 | TASK=$(echo $JSON | jq -r '.Containers[0].Networks[0].IPv4Addresses[0]')
9 | echo $TASK
10 |
11 | BROKER_ARN=$(aws servicediscovery discover-instances --region $REGION --namespace-name FargateWorkshopNamespace --service-name $MSK_SERVICE | jq -r '.Instances[0].Attributes.broker_arn')
12 | BOOTSTRAP_SERVERS=$(aws kafka get-bootstrap-brokers --region $REGION --cluster-arn $BROKER_ARN | jq -r '.BootstrapBrokerStringTls')
13 | CONNECT_REST_ADVERTISED_HOST_NAME=$TASK CONNECT_BOOTSTRAP_SERVERS=$BOOTSTRAP_SERVERS /etc/confluent/docker/run
14 |
--------------------------------------------------------------------------------
/labs/pipeline-generator-cdk/code_pipeline_generator/code_pipeline_generic_build_project.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | from aws_cdk import (
4 | aws_codebuild,
5 | core,
6 | )
7 |
8 | class genericBuild(core.Construct):
9 | def __init__(self, scope: core.Construct, id: str, **kwargs):
10 | super().__init__(scope, id, **kwargs)
11 | self.Project = aws_codebuild.PipelineProject(
12 | self,
13 | id=id,
14 | project_name=id,
15 | environment = aws_codebuild.BuildEnvironment(
16 | build_image=aws_codebuild.LinuxBuildImage.STANDARD_2_0,
17 | compute_type=aws_codebuild.ComputeType.MEDIUM
18 | )
19 | )
20 |
--------------------------------------------------------------------------------
/diagrams/data-eng-lab-seq.puml:
--------------------------------------------------------------------------------
1 | @startuml
2 |
3 | ' Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
4 |
5 | participant CDK #lightblue
6 | participant CodePipeline #lightblue
7 |
8 | == Lab 1 ==
9 | CDK --> MSK: Deploy as prereq
10 | CDK --> DocumentDB: Deploy as prereq
11 | CDK --> S3: Deploy as prereq
12 | CDK --> DataGenerator: Deploy as prereq
13 | CDK --> SFTP: Deploy as prereq
14 | CDK --> CodePipeline: Deploy as prereq
15 | CodePipeline --> FargateCluster: Deploy
16 | == Lab 2 ==
17 | CodePipeline --> FargateService: Deploy
18 | == Lab 3 ==
19 | CodePipeline --> KafkaConnect: Deploy
20 | CodePipeline --> StreamProcessor: Deploy
21 | == Lab 4 ==
22 | CDK --> Scheduler: Deploy
23 | CodePipeline --> ScheduledTask: Deploy
24 | CodePipeline --> Batch: Deploy
25 |
26 | @enduml
27 |
--------------------------------------------------------------------------------
/labs/fargate-dev-workshop/configs/appspec.json:
--------------------------------------------------------------------------------
1 | {
2 | "version": 0.0,
3 | "Resources": [
4 | {
5 | "TargetService": {
6 | "Type": "AWS::ECS::Service",
7 | "Properties": {
8 | "TaskDefinition": "",
9 | "LoadBalancerInfo": {
10 | "ContainerName": "fargate_task_container",
11 | "ContainerPort": 80
12 | },
13 | "PlatformVersion": "LATEST",
14 | "NetworkConfiguration": {
15 | "awsvpcConfiguration": {
16 | "subnets": [
17 | "PLACEHOLDER_SUBNET"
18 | ],
19 | "securityGroups": [
20 | "PLACEHOLDER_SECURITY_GROUP"
21 | ],
22 | "assignPublicIp": "DISABLED"
23 | }
24 | }
25 | }
26 | }
27 | }
28 | ]
29 | }
--------------------------------------------------------------------------------
/labs/fargate-dev-workshop/configs/service-definition.json:
--------------------------------------------------------------------------------
1 | {
2 | "taskDefinition": "trivia-backend",
3 | "cluster": "default",
4 | "loadBalancers": [
5 | {
6 | "targetGroupArn": "PLACEHOLDER_TARGET_GROUP",
7 | "containerName": "fargate_task_container",
8 | "containerPort": 80
9 | }
10 | ],
11 | "desiredCount": 3,
12 | "launchType": "FARGATE",
13 | "schedulingStrategy": "REPLICA",
14 | "deploymentController": {
15 | "type": "CODE_DEPLOY"
16 | },
17 | "networkConfiguration": {
18 | "awsvpcConfiguration": {
19 | "subnets": [
20 | "PLACEHOLDER_SUBNET"
21 | ],
22 | "securityGroups": [
23 | "PLACEHOLDER_SECURITY_GROUP"
24 | ],
25 | "assignPublicIp": "DISABLED"
26 | }
27 | }
28 | }
--------------------------------------------------------------------------------
/labs/pipeline-generator-cdk/app.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | #!/usr/bin/env python3
4 |
5 | from aws_cdk import core
6 | from code_pipeline_generator.code_pipeline_generator_stack import CodePipelineGeneratorStack
7 | from code_pipeline_generator.code_pipeline_configuration import ContainerPipelineConfiguration
8 |
9 |
10 | app = core.App()
11 |
12 | #BootStrap Developer Pipeline
13 | developerPipeline = ContainerPipelineConfiguration(
14 | projectName = "Fargate-Developer"
15 | )
16 | CodePipelineGeneratorStack(app, "fargate-developerPipline",developerPipeline)
17 |
18 | #Bootstrap Operations Pipeline
19 | operationsPipeline = ContainerPipelineConfiguration(
20 | projectName = "Fargate-Operations",
21 | allTest=False
22 | )
23 | CodePipelineGeneratorStack(app,"fargate-operationsPipeline",operationsPipeline)
24 | app.synth()
25 |
--------------------------------------------------------------------------------
/labs/fargate-ops-cdk/fargate_ops_cdk/fargate_workshop_ops_cluster.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | from aws_cdk import (
4 | aws_ecs as ecs,
5 | aws_ec2 as ec2,
6 | aws_cloudtrail as cloudtrail,
7 | core
8 | )
9 |
10 |
11 | class FargateWorkshopOpsCluster(core.Stack):
12 |
13 | def __init__(self, scope: core.Construct, id: str, vpc: ec2.IVpc, **kwargs) -> None:
14 | super().__init__(scope, id, **kwargs)
15 |
16 | self.trail = cloudtrail.Trail(self, 'ECSWorkshopCloudTrail');
17 |
18 | self.cluster = ecs.Cluster(
19 | scope = self,
20 | id = 'OpsCluster',
21 | vpc = vpc
22 | )
23 | # Adding service discovery namespace to cluster
24 | self.cluster.add_default_cloud_map_namespace(
25 | name="service",
26 | )
27 |
28 |
29 |
30 |
31 |
--------------------------------------------------------------------------------
/labs/pipeline-generator-cdk/code_pipeline_generator/code_pipeline_configuration.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | class ContainerPipelineConfiguration:
4 | AllTest = None
5 | EndToEndTest = None
6 | IntegrationTests = None
7 | LoadTest = None
8 | LowerEnvironmentDeployment = None
9 | ProjectName = None
10 | UnitTest = None
11 |
12 | def __init__(
13 | self,
14 | projectName,
15 | allTest=True,
16 | endToEndTests = False,
17 | integrationTests = False,
18 | loadTest = False,
19 | lowerEnvironmentDeployment = True,
20 | unitTest=False,
21 | ):
22 | self.ProjectName = projectName
23 | self.AllTest = allTest
24 | self.UnitTest = unitTest
25 | self.EndToEndTest = endToEndTests
26 | self.IntegrationTests = integrationTests
27 | self.LoadTest = loadTest
28 |
29 |
--------------------------------------------------------------------------------
/LICENSE-SAMPLECODE:
--------------------------------------------------------------------------------
1 | Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this
4 | software and associated documentation files (the "Software"), to deal in the Software
5 | without restriction, including without limitation the rights to use, copy, modify,
6 | merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
7 | permit persons to whom the Software is furnished to do so.
8 |
9 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
10 | INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
11 | PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
12 | HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
13 | OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
14 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
15 |
--------------------------------------------------------------------------------
/labs/fargate-dev-workshop/configs/buildspec_secrets.yml:
--------------------------------------------------------------------------------
1 | version: 0.2
2 |
3 | phases:
4 | pre_build:
5 | commands:
6 | - echo Setting CodeCommit Credentials
7 | - git config --global credential.helper '!aws codecommit credential-helper $@'
8 | - git config --global credential.UseHttpPath true
9 | - echo Copying secrets_config.json to the application directory
10 | - cp configs/secrets_config.json secrets_config.json
11 | - echo Switching to the application directory
12 | - echo Installing pip and truffleHog
13 | - curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
14 | - python get-pip.py
15 | - pip install truffleHog
16 | build:
17 | commands:
18 | - echo Build started on `date`
19 | - echo Scanning with truffleHog...$commituri
20 | - trufflehog --regex --rules secrets_config.json --entropy=False --max_depth 1 --exclude_paths ./configs/exclude-patterns.txt $commituri
21 | post_build:
22 | commands:
23 | - echo Build completed on `date`
--------------------------------------------------------------------------------
/labs/fargate-dev-workshop/ecs_development_workshop/code_pipeline_configuration.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | class ContainerPipelineConfiguration:
4 | AllTest = None
5 | EndToEndTest = None
6 | IntegrationTests = None
7 | LoadTest = None
8 | LowerEnvironmentDeployment = None
9 | ProjectName = None
10 | UnitTest = None
11 | stage = None
12 |
13 | def __init__(
14 | self,
15 | projectName,
16 | stage,
17 | allTest=True,
18 | endToEndTests = False,
19 | integrationTests = False,
20 | loadTest = False,
21 | lowerEnvironmentDeployment = True,
22 | unitTest=False,
23 | ):
24 | self.stage = stage
25 | self.ProjectName = projectName
26 | self.AllTest = allTest
27 | self.UnitTest = unitTest
28 | self.EndToEndTest = endToEndTests
29 | self.IntegrationTests = integrationTests
30 | self.LoadTest = loadTest
31 |
--------------------------------------------------------------------------------
/labs/cloud9-iam.json:
--------------------------------------------------------------------------------
1 | {
2 | "Version": "2012-10-17",
3 | "Statement": [
4 | {
5 | "Sid": "FullAccess",
6 | "Effect": "Allow",
7 | "Action": [
8 | "s3:*",
9 | "kafka:*",
10 | "rds:*",
11 | "ecr:*",
12 | "ecs:*",
13 | "ec2:*",
14 | "servicediscovery:*",
15 | "lambda:*",
16 | "ssm:*",
17 | "codepipeline:*",
18 | "codecommit:*",
19 | "codebuild:*",
20 | "cloudwatch:*",
21 | "transfer:*",
22 | "elasticloadbalancing:*",
23 | "cloudformation:*",
24 | "autoscaling:*",
25 | "application-autoscaling:*",
26 | "iam:*",
27 | "events:*",
28 | "logs:*",
29 | "route53:*",
30 | "codedeploy:*",
31 | "kms:*"
32 | ],
33 | "Resource": ["*"]
34 | }
35 | ]
36 | }
--------------------------------------------------------------------------------
/labs/fargate-dev-workshop/app.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | #!/usr/bin/env python3
4 |
5 | from aws_cdk import core
6 | import os
7 |
8 | from ecs_development_workshop.docker_build_to_ecr_pipeline import DockerBuildToEcrPipeline
9 | from ecs_development_workshop.code_pipeline_configuration import ContainerPipelineConfiguration
10 | from ecs_development_workshop.ecs_inf_fargate import EcsInfFargate
11 |
12 | app = core.App()
13 |
14 | #BootStrap Developer Pipeline
15 | developerPipelineTest = ContainerPipelineConfiguration(
16 | projectName = "fargate-dev-workshop",
17 | stage = "test"
18 | )
19 |
20 | DockerBuildToEcrPipeline(
21 | app,
22 | "pipeline-to-ecr",
23 | config = developerPipelineTest,
24 | env={'account': os.environ['CDK_DEFAULT_ACCOUNT'], 'region': os.environ['CDK_DEFAULT_REGION']}
25 | )
26 |
27 | EcsInfFargate(
28 | app,
29 | "ecs-inf-test",
30 | config = developerPipelineTest,
31 | env={'account': os.environ['CDK_DEFAULT_ACCOUNT'], 'region': os.environ['CDK_DEFAULT_REGION']}
32 | )
33 |
34 | app.synth()
--------------------------------------------------------------------------------
/labs/batch-processing/sfn/buildspec.yml:
--------------------------------------------------------------------------------
1 | version: 0.2
2 |
3 | env:
4 | parameter-store:
5 | REPOSITORY_URI: "repo_uri_batch"
6 | REGION: "region"
7 | IMG_NAME: "image_batch"
8 |
9 | phases:
10 | pre_build:
11 | commands:
12 | - echo Logging in to Amazon ECR...
13 | - aws --version
14 | - $(aws ecr get-login --region $REGION --no-include-email)
15 | - COMMIT_HASH=$(echo $CODEBUILD_RESOLVED_SOURCE_VERSION | cut -c 1-7)
16 | - IMAGE_TAG=${COMMIT_HASH:=latest}
17 | build:
18 | commands:
19 | - echo Build started on `date`
20 | - echo Building the Docker image...
21 | - docker build -t $REPOSITORY_URI:latest .
22 | - docker tag $REPOSITORY_URI:latest $REPOSITORY_URI:$IMAGE_TAG
23 | post_build:
24 | commands:
25 | - echo Build completed on `date`
26 | - echo Pushing the Docker images...
27 | - docker push $REPOSITORY_URI:latest
28 | - docker push $REPOSITORY_URI:$IMAGE_TAG
29 | - echo Writing image definitions file...
30 | - printf '[{"name":"%s","imageUri":"%s"}]' $IMG_NAME $REPOSITORY_URI:$IMAGE_TAG > imagedefinitions.json
31 | artifacts:
32 | files: imagedefinitions.json
33 |
--------------------------------------------------------------------------------
/labs/batch-processing/sftp/buildspec.yml:
--------------------------------------------------------------------------------
1 | version: 0.2
2 |
3 | env:
4 | parameter-store:
5 | REPOSITORY_URI: "repo_uri_sftp"
6 | REGION: "region"
7 | IMG_NAME: "image_sftp"
8 |
9 | phases:
10 | pre_build:
11 | commands:
12 | - echo Logging in to Amazon ECR...
13 | - aws --version
14 | - $(aws ecr get-login --region $REGION --no-include-email)
15 | - COMMIT_HASH=$(echo $CODEBUILD_RESOLVED_SOURCE_VERSION | cut -c 1-7)
16 | - IMAGE_TAG=${COMMIT_HASH:=latest}
17 | build:
18 | commands:
19 | - echo Build started on `date`
20 | - echo Building the Docker image...
21 | - docker build -t $REPOSITORY_URI:latest .
22 | - docker tag $REPOSITORY_URI:latest $REPOSITORY_URI:$IMAGE_TAG
23 | post_build:
24 | commands:
25 | - echo Build completed on `date`
26 | - echo Pushing the Docker images...
27 | - docker push $REPOSITORY_URI:latest
28 | - docker push $REPOSITORY_URI:$IMAGE_TAG
29 | - echo Writing image definitions file...
30 | - printf '[{"name":"%s","imageUri":"%s"}]' $IMG_NAME $REPOSITORY_URI:$IMAGE_TAG > imagedefinitions.json
31 | artifacts:
32 | files: imagedefinitions.json
33 |
--------------------------------------------------------------------------------
/labs/stream-processing-pipelines/kafka_to_docdb/buildspec.yml:
--------------------------------------------------------------------------------
1 | version: 0.2
2 |
3 | env:
4 | parameter-store:
5 | REPOSITORY_URI: "repo_uri"
6 | REGION: "region"
7 | IMG_NAME: "image_streamproc"
8 |
9 | phases:
10 | pre_build:
11 | commands:
12 | - echo Logging in to Amazon ECR...
13 | - aws --version
14 | - $(aws ecr get-login --region $REGION --no-include-email)
15 | - COMMIT_HASH=$(echo $CODEBUILD_RESOLVED_SOURCE_VERSION | cut -c 1-7)
16 | - IMAGE_TAG=${COMMIT_HASH:=latest}
17 | build:
18 | commands:
19 | - echo Build started on `date`
20 | - echo Building the Docker image...
21 | - docker build -t $REPOSITORY_URI:latest .
22 | - docker tag $REPOSITORY_URI:latest $REPOSITORY_URI:$IMAGE_TAG
23 | post_build:
24 | commands:
25 | - echo Build completed on `date`
26 | - echo Pushing the Docker images...
27 | - docker push $REPOSITORY_URI:latest
28 | - docker push $REPOSITORY_URI:$IMAGE_TAG
29 | - echo Writing image definitions file...
30 | - printf '[{"name":"%s","imageUri":"%s"}]' $IMG_NAME $REPOSITORY_URI:$IMAGE_TAG > imagedefinitions.json
31 | artifacts:
32 | files: imagedefinitions.json
--------------------------------------------------------------------------------
/labs/batch-processing/sfn/app.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | import boto3
4 | import os
5 | import xmltodict
6 | import json
7 |
8 | # XML input file location in S3 is passed as an environment variable
9 | BUCKET=os.environ['BUCKET']
10 | S3PATH=os.environ['S3PATH']
11 | print("Bucket: " + BUCKET)
12 | print("S3 path: " + S3PATH)
13 |
14 | # Local storage (volume mounted under /opt/data)
15 | path_parts = os.path.split(S3PATH)
16 | path_name = path_parts[1]
17 | local_path = "/opt/data/{0}".format(path_name)
18 | print("Local path: " + local_path)
19 |
20 | # Download input file
21 | s3 = boto3.client('s3')
22 | s3.download_file(BUCKET, S3PATH, local_path)
23 | print("Downloaded from s3")
24 |
25 | # Convert to JSON
26 | with open(local_path, 'rb') as input_file:
27 | output = json.dumps(xmltodict.parse(input_file), indent=4)
28 | print("Converted to JSON")
29 |
30 | # Write file
31 | output_path = local_path + ".json"
32 | with open(output_path, 'w') as output_file:
33 | output_file.write(output)
34 | print("Wrote output")
35 |
36 | # Upload to S3
37 | s3.upload_file(output_path, BUCKET, S3PATH + ".json")
38 | print("Uploaded to s3")
39 |
--------------------------------------------------------------------------------
/labs/fargate-ops-cdk/setup.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | import setuptools
4 |
5 |
6 | with open("README.md") as fp:
7 | long_description = fp.read()
8 |
9 |
10 | setuptools.setup(
11 | name="fargate_ops_cdk",
12 | version="0.0.1",
13 |
14 | description="Fargate Operations Workshop CDK",
15 | long_description=long_description,
16 | long_description_content_type="text/markdown",
17 |
18 | author="author",
19 |
20 | package_dir={"": "fargate_ops_cdk"},
21 | packages=setuptools.find_packages(where="fargate_ops_cdk"),
22 |
23 | install_requires=[
24 | "aws-cdk.core",
25 | ],
26 |
27 | python_requires=">=3.6",
28 |
29 | classifiers=[
30 | "Development Status :: 4 - Beta",
31 |
32 | "Intended Audience :: Developers",
33 |
34 | "License :: OSI Approved :: Apache Software License",
35 |
36 | "Programming Language :: JavaScript",
37 | "Programming Language :: Python :: 3 :: Only",
38 | "Programming Language :: Python :: 3.6",
39 | "Programming Language :: Python :: 3.7",
40 | "Programming Language :: Python :: 3.8",
41 |
42 | "Topic :: Software Development :: Code Generators",
43 | "Topic :: Utilities",
44 |
45 | "Typing :: Typed",
46 | ],
47 | )
48 |
--------------------------------------------------------------------------------
/labs/fargate-workshop-cdk/setup.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | import setuptools
4 |
5 |
6 | with open("README.md") as fp:
7 | long_description = fp.read()
8 |
9 |
10 | setuptools.setup(
11 | name="fargate_workshop_cdk",
12 | version="0.0.1",
13 |
14 | description="An empty CDK Python app",
15 | long_description=long_description,
16 | long_description_content_type="text/markdown",
17 |
18 | author="author",
19 |
20 | package_dir={"": "fargate_workshop_cdk"},
21 | packages=setuptools.find_packages(where="fargate_workshop_cdk"),
22 |
23 | install_requires=[
24 | "aws-cdk.core",
25 | ],
26 |
27 | python_requires=">=3.6",
28 |
29 | classifiers=[
30 | "Development Status :: 4 - Beta",
31 |
32 | "Intended Audience :: Developers",
33 |
34 | "License :: OSI Approved :: Apache Software License",
35 |
36 | "Programming Language :: JavaScript",
37 | "Programming Language :: Python :: 3 :: Only",
38 | "Programming Language :: Python :: 3.6",
39 | "Programming Language :: Python :: 3.7",
40 | "Programming Language :: Python :: 3.8",
41 |
42 | "Topic :: Software Development :: Code Generators",
43 | "Topic :: Utilities",
44 |
45 | "Typing :: Typed",
46 | ],
47 | )
48 |
--------------------------------------------------------------------------------
/labs/fargate-dev-workshop/setup.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | import setuptools
4 |
5 |
6 | with open("README.md") as fp:
7 | long_description = fp.read()
8 |
9 |
10 | setuptools.setup(
11 | name="ecs_development_workshop",
12 | version="0.0.1",
13 |
14 | description="An empty CDK Python app",
15 | long_description=long_description,
16 | long_description_content_type="text/markdown",
17 |
18 | author="author",
19 |
20 | package_dir={"": "ecs_development_workshop"},
21 | packages=setuptools.find_packages(where="ecs_development_workshop"),
22 |
23 | install_requires=[
24 | "aws-cdk.core",
25 | ],
26 |
27 | python_requires=">=3.6",
28 |
29 | classifiers=[
30 | "Development Status :: 4 - Beta",
31 |
32 | "Intended Audience :: Developers",
33 |
34 | "License :: OSI Approved :: Apache Software License",
35 |
36 | "Programming Language :: JavaScript",
37 | "Programming Language :: Python :: 3 :: Only",
38 | "Programming Language :: Python :: 3.6",
39 | "Programming Language :: Python :: 3.7",
40 | "Programming Language :: Python :: 3.8",
41 |
42 | "Topic :: Software Development :: Code Generators",
43 | "Topic :: Utilities",
44 |
45 | "Typing :: Typed",
46 | ],
47 | )
48 |
--------------------------------------------------------------------------------
/labs/fargate-ops-cdk/fargate_ops_cdk/fargate_workshop_ops_stack.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | from aws_cdk import (
4 | aws_ec2 as ec2,
5 | aws_ecs as ecs,
6 | aws_iam as iam,
7 | core
8 | )
9 | from typing import List
10 |
11 |
12 | class FargateWorkshopOpsStack(core.Stack):
13 |
14 | def __init__(self, scope: core.Stack, id=str, **kwargs):
15 | super().__init__(scope, id, **kwargs)
16 |
17 | self.vpc = ec2.Vpc(
18 | self, "BaseVPC",
19 | cidr='10.0.0.0/24',
20 | enable_dns_support=True,
21 | enable_dns_hostnames=True,
22 | )
23 |
24 | self.services_3000_sec_group = ec2.SecurityGroup(
25 | self, "FrontendToBackendSecurityGroup",
26 | allow_all_outbound=True,
27 | description="Security group for frontend service to talk to backend services",
28 | vpc=self.vpc
29 | )
30 |
31 | self.sec_grp_ingress_self_3000 = ec2.CfnSecurityGroupIngress(
32 | self, "InboundSecGrp3000",
33 | ip_protocol='TCP',
34 | source_security_group_id=self.services_3000_sec_group.security_group_id,
35 | from_port=3000,
36 | to_port=3000,
37 | group_id=self.services_3000_sec_group.security_group_id
38 | )
39 |
40 |
--------------------------------------------------------------------------------
/labs/pipeline-generator-cdk/setup.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | import setuptools
4 |
5 |
6 | with open("README.md") as fp:
7 | long_description = fp.read()
8 |
9 |
10 | setuptools.setup(
11 | name="code_pipeline_generator",
12 | version="0.0.1",
13 |
14 | description="An empty CDK Python app",
15 | long_description=long_description,
16 | long_description_content_type="text/markdown",
17 |
18 | author="author",
19 |
20 | package_dir={"": "code_pipeline_generator"},
21 | packages=setuptools.find_packages(where="code_pipeline_generator"),
22 |
23 | install_requires=[
24 | "aws-cdk.core",
25 | ],
26 |
27 | python_requires=">=3.6",
28 |
29 | classifiers=[
30 | "Development Status :: 4 - Beta",
31 |
32 | "Intended Audience :: Developers",
33 |
34 | "License :: OSI Approved :: Apache Software License",
35 |
36 | "Programming Language :: JavaScript",
37 | "Programming Language :: Python :: 3 :: Only",
38 | "Programming Language :: Python :: 3.6",
39 | "Programming Language :: Python :: 3.7",
40 | "Programming Language :: Python :: 3.8",
41 |
42 | "Topic :: Software Development :: Code Generators",
43 | "Topic :: Utilities",
44 |
45 | "Typing :: Typed",
46 | ],
47 | )
48 |
--------------------------------------------------------------------------------
/labs/fargate-dev-workshop/configs/task-definition.json:
--------------------------------------------------------------------------------
1 | {
2 | "family": "PLACEHOLDER",
3 | "containerDefinitions": [
4 | {
5 | "portMappings": [
6 | {
7 | "hostPort": 80,
8 | "protocol": "tcp",
9 | "containerPort": 80
10 | }
11 | ],
12 | "image": "",
13 | "essential": true,
14 | "name": "fargate_task_container",
15 | "logConfiguration": {
16 | "logDriver": "awslogs",
17 | "options": {
18 | "awslogs-group": "fargate-dev-workshop-test",
19 | "awslogs-region": "us-west-2",
20 | "awslogs-stream-prefix": "fargate-dev-workshop-test"
21 | }
22 | },
23 | "privileged": false,
24 | "linuxParameters": {
25 | "capabilities": {
26 | "drop": [
27 | "SYS_ADMIN",
28 | "NET_ADMIN"
29 | ]
30 | }
31 | }
32 | }
33 | ],
34 | "cpu": "512",
35 | "memory": "1024",
36 | "taskRoleArn": "PLACEHOLDER_TASK_ROLE",
37 | "executionRoleArn": "PLACEHOLDER_EXECUTION_ROLE",
38 | "requiresCompatibilities": [
39 | "FARGATE"
40 | ],
41 | "networkMode": "awsvpc"
42 | }
--------------------------------------------------------------------------------
/labs/fargate-dev-workshop/configs/secrets_config.json:
--------------------------------------------------------------------------------
1 | {
2 | "Slack Token": "(xox[p|b|o|a]-[0-9]{12}-[0-9]{12}-[0-9]{12}-[a-z0-9]{32})",
3 | "RSA private key": "-----BEGIN RSA PRIVATE KEY-----",
4 | "SSH (OPENSSH) private key": "-----BEGIN OPENSSH PRIVATE KEY-----",
5 | "SSH (DSA) private key": "-----BEGIN DSA PRIVATE KEY-----",
6 | "SSH (EC) private key": "-----BEGIN EC PRIVATE KEY-----",
7 | "PGP private key block": "-----BEGIN PGP PRIVATE KEY BLOCK-----",
8 | "Facebook Oauth": "[f|F][a|A][c|C][e|E][b|B][o|O][o|O][k|K].*['|\"][0-9a-f]{32}['|\"]",
9 | "Twitter Oauth": "[t|T][w|W][i|I][t|T][t|T][e|E][r|R].*['|\"][0-9a-zA-Z]{35,44}['|\"]",
10 | "GitHub": "[g|G][i|I][t|T][h|H][u|U][b|B].*['|\"][0-9a-zA-Z]{35,40}['|\"]",
11 | "Google Oauth": "(\"client_secret\":\"[a-zA-Z0-9-_]{24}\")",
12 | "AWS API Key": "AKIA[0-9A-Z]{16}",
13 | "Heroku API Key": "[h|H][e|E][r|R][o|O][k|K][u|U].*[0-9A-F]{8}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{12}",
14 | "Generic Secret": "[s|S][e|E][c|C][r|R][e|E][t|T].*['|\"][0-9a-zA-Z]{32,45}['|\"]",
15 | "Generic API Key": "[a|A][p|P][i|I][_]?[k|K][e|E][y|Y].*['|\"][0-9a-zA-Z]{32,45}['|\"]",
16 | "Slack Webhook": "https://hooks.slack.com/services/T[a-zA-Z0-9_]{8}/B[a-zA-Z0-9_]{8}/[a-zA-Z0-9_]{24}",
17 | "Google (GCP) Service-account": "\"type\": \"service_account\"",
18 | "Twilio API Key": "SK[a-z0-9]{32}",
19 | "Password in URL": "[a-zA-Z]{3,10}://[^/\\s:@]{3,20}:[^/\\s:@]{3,20}@.{1,100}[\"'\\s]"
20 | }
--------------------------------------------------------------------------------
/diagrams/data_engineering_stream.puml:
--------------------------------------------------------------------------------
1 | @startuml Data Engineering with Fargate
2 |
3 | ' Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
4 |
5 | !define AWSPuml https://raw.githubusercontent.com/awslabs/aws-icons-for-plantuml/master/dist
6 | !include AWSPuml/AWSCommon.puml
7 | !include AWSPuml/NetworkingAndContentDelivery/all.puml
8 | !include AWSPuml/Compute/all.puml
9 | !include AWSPuml/Database/all.puml
10 | !include AWSPuml/Mobile/all.puml
11 | !include AWSPuml/Storage/all.puml
12 | !include AWSPuml/Analytics/all.puml
13 | !include AWSPuml/ApplicationIntegration/all.puml
14 | !include AWSPuml/ManagementAndGovernance/all.puml
15 |
16 | skinparam frame {
17 | backgroundColor<> lightblue
18 | }
19 |
20 | title Stream Processing with Fargate
21 |
22 | actor "Users" as client
23 | package "DMZ" {
24 | ELBApplicationLoadBalancer(elb, "ALB", "load balancer", "Kafka Connect endpoint")
25 | }
26 | package "Fargate" {
27 | ECSService(kafkaconnect, "Kafka Connect", "Microservice", "")
28 | ECSService(kafkaconsumer, "Kafka Consumer", "Stream processor", "")
29 | }
30 | package "Other AWS" {
31 | S3Bucket(bucket, "Kafka S3 Storage", "Object Storage", "kafka data")
32 | ManagedStreamingforKafka(kafka, "Kafka Cluster", "Streaming Ingest", "")
33 | DocumentDBwithMongoDBcompatibility(docdb, "DocumentDB", "Mongo-compatible database", "")
34 | }
35 |
36 | client --> elb
37 | elb --> kafkaconnect
38 | kafkaconnect --> bucket
39 | kafkaconnect <-> kafka
40 |
41 | kafka --> kafkaconsumer
42 | kafkaconsumer --> docdb
43 |
44 | @enduml
45 |
--------------------------------------------------------------------------------
/labs/stream-processing-pipelines/s3sink/buildspec.yml:
--------------------------------------------------------------------------------
1 | version: 0.2
2 |
3 | env:
4 | parameter-store:
5 | REPOSITORY_URI: "repo_uri_s3_sink"
6 | HC_REPOSITORY_URI: "repo_uri_s3_sink_hc"
7 | REGION: "region"
8 | IMG_NAME: "image_s3sink"
9 | HC_IMG_NAME: "image_s3sink_hc"
10 |
11 | phases:
12 | pre_build:
13 | commands:
14 | - echo Logging in to Amazon ECR...
15 | - aws --version
16 | - $(aws ecr get-login --region $REGION --no-include-email)
17 | - COMMIT_HASH=$(echo $CODEBUILD_RESOLVED_SOURCE_VERSION | cut -c 1-7)
18 | - IMAGE_TAG=${COMMIT_HASH:=latest}
19 | build:
20 | commands:
21 | - echo Build started on `date`
22 | - echo Building the Docker image...
23 | - docker build -t $REPOSITORY_URI:latest .
24 | - docker tag $REPOSITORY_URI:latest $REPOSITORY_URI:$IMAGE_TAG
25 | - docker build -t $HC_REPOSITORY_URI:latest $CODEBUILD_SRC_DIR_codehc
26 | - docker tag $HC_REPOSITORY_URI:latest $HC_REPOSITORY_URI:$IMAGE_TAG
27 | post_build:
28 | commands:
29 | - echo Build completed on `date`
30 | - echo Pushing the Docker images...
31 | - docker push $REPOSITORY_URI:latest
32 | - docker push $REPOSITORY_URI:$IMAGE_TAG
33 | - docker push $HC_REPOSITORY_URI:latest
34 | - docker push $HC_REPOSITORY_URI:$IMAGE_TAG
35 | - echo Writing image definitions file...
36 | - printf '[{"name":"%s","imageUri":"%s"}, {"name":"%s","imageUri":"%s"}]' $IMG_NAME $REPOSITORY_URI:$IMAGE_TAG $HC_IMG_NAME $HC_REPOSITORY_URI:$IMAGE_TAG > imagedefinitions.json
37 | artifacts:
38 | files: imagedefinitions.json
--------------------------------------------------------------------------------
/diagrams/data_engineering.puml:
--------------------------------------------------------------------------------
1 | @startuml Data Engineering with Fargate
2 |
3 | ' Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
4 |
5 | !define AWSPuml https://raw.githubusercontent.com/awslabs/aws-icons-for-plantuml/master/dist
6 | !include AWSPuml/AWSCommon.puml
7 | !include AWSPuml/NetworkingAndContentDelivery/all.puml
8 | !include AWSPuml/Compute/all.puml
9 | !include AWSPuml/Database/all.puml
10 | !include AWSPuml/Mobile/all.puml
11 | !include AWSPuml/Storage/all.puml
12 | !include AWSPuml/Analytics/all.puml
13 | !include AWSPuml/ApplicationIntegration/all.puml
14 | !include AWSPuml/ManagementAndGovernance/all.puml
15 |
16 | skinparam frame {
17 | backgroundColor<> lightblue
18 | }
19 |
20 | title Batch Processing with Fargate
21 |
22 | package "Fargate" {
23 | ECSContainer1(schtask, "Scheduled Task", "Workers", "")
24 | ECSService(batchworker, "Batch Processors", "Workers", "")
25 | }
26 | package "Other AWS" {
27 | together {
28 | S3Bucket(batchbucket, "S3 Storage", "Object Storage", "outputs")
29 | S3Bucket(ingestbucket, "S3 Storage", "Object Storage", "inputs")
30 | }
31 | together {
32 | StepFunctions(sfn, "Step Functions", "Job Scheduler", "")
33 | CloudWatchEventTimeBased(cw, "Cloud Cron", "CloudWatch Alarm", "")
34 | }
35 | }
36 | component sftp as "SFTP Server"
37 |
38 | sfn -[#blue]-> batchworker: Invoke
39 | ingestbucket -[#blue]-> batchworker: Process
40 | batchworker -[#blue]-> batchbucket: Store
41 |
42 | cw -[#red]-> schtask: Invoke
43 | sftp -[#red]-> schtask: Process
44 |
45 | Fargate -[hidden]> sftp
46 | cw -[hidden]> batchbucket
47 |
48 | @enduml
49 |
--------------------------------------------------------------------------------
/labs/fargate-ops-cdk/fargate_ops_cdk/fargate_workshop_ops_failed.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | from aws_cdk import (
4 | aws_ecs as ecs,
5 | aws_ec2 as ec2,
6 | aws_logs as logs,
7 | aws_ecs_patterns as ecs_patterns,
8 | core
9 | )
10 |
11 | class FargateWorkshopOpsFailed(core.Stack):
12 |
13 | def __init__(self, scope: core.Stack, id: str, cluster: ecs.ICluster, vpc, private_subnets, sec_group, desired_service_count, **kwargs):
14 | super().__init__(scope, id, **kwargs)
15 | self.cluster = cluster
16 | self.vpc = vpc
17 | self.private_subnets = private_subnets
18 | self.sec_group = sec_group
19 |
20 | self.service_discovery = cluster.default_cloud_map_namespace
21 | self.desired_service_count = desired_service_count
22 |
23 | self.task_definition = ecs.FargateTaskDefinition(
24 | self, "FailedServiceTaskDef",
25 | cpu=256,
26 | memory_limit_mib=512,
27 | )
28 |
29 | self.task_definition.add_container(
30 | "FailedServiceContainer",
31 | image=ecs.ContainerImage.from_registry("mbednarz/fargate_issue"),
32 | logging=ecs.AwsLogDriver(stream_prefix="ecsdemo-nodejs", log_retention=logs.RetentionDays.THREE_DAYS),
33 | )
34 |
35 | self.fargate_service = ecs.FargateService(
36 | self, "FailedFargateService",
37 | service_name="Fargate-Backend-Failed",
38 | task_definition=self.task_definition,
39 | cluster=self.cluster,
40 | max_healthy_percent=100,
41 | min_healthy_percent=0,
42 | vpc_subnets={
43 | "subnet_name" : "Private"
44 | },
45 | desired_count=self.desired_service_count,
46 | security_group=self.sec_group,
47 | )
48 |
--------------------------------------------------------------------------------
/diagrams/microservice.puml:
--------------------------------------------------------------------------------
1 | @startuml Microservices with Fargate
2 |
3 | ' Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
4 |
5 | !define AWSPuml https://raw.githubusercontent.com/awslabs/aws-icons-for-plantuml/master/dist
6 | !include AWSPuml/AWSCommon.puml
7 | !include AWSPuml/NetworkingAndContentDelivery/all.puml
8 | !include AWSPuml/Compute/all.puml
9 | !include AWSPuml/Database/all.puml
10 | !include AWSPuml/Mobile/all.puml
11 |
12 | skinparam frame {
13 | backgroundColor<> lightblue
14 | }
15 |
16 | title Microservices with Fargate
17 |
18 | actor "Client" as client
19 | package "DMZ" {
20 | ELBApplicationLoadBalancer(elb, "ALB", "load balancer", "External communication")
21 | APIGateway(apigw, "API Gateway", "portal", "API endpoints")
22 | }
23 | package "Private" {
24 | DocumentDBwithMongoDBcompatibility(docdb, "DocumentDB", "Mongo-compatible database", "Database")
25 | EC2Instance(billing, "Back office system", "billing", "")
26 | }
27 | package "Fargate" {
28 | frame "Service Mesh" <> as appmesh {
29 | ECSService(frontend, "Front end service", "Microservice", "Service")
30 | together {
31 | ECSService(svcA, "Enterprise", "Microservice", "Service")
32 | ECSService(svcB, "Yorktown", "Microservice", "Service")
33 | ECSService(svcC, "Lexington", "Microservice", "Service")
34 | }
35 | }
36 | }
37 | CloudMap(cloudmap, "Cloud Map", "service discovery", "")
38 |
39 | client --> apigw
40 | apigw --> elb
41 | elb --> frontend
42 | frontend --> svcA
43 | frontend --> svcB
44 | frontend --> svcC
45 | svcA --> cloudmap
46 | cloudmap --> docdb
47 | svcB --> cloudmap
48 | cloudmap --> billing
49 |
50 | note right of cloudmap: Use CloudMap for service discovery.
51 | note top of appmesh: Service mesh for microservice communication
52 | legend
53 | Pattern for each environment
54 | end legend
55 |
56 | @enduml
57 |
--------------------------------------------------------------------------------
/labs/pipeline-generator-cdk/README.md:
--------------------------------------------------------------------------------
1 |
2 | # Welcome to your CDK Python project!
3 |
4 | _// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: CC-BY-SA-4.0_
5 |
6 | The `cdk.json` file tells the CDK Toolkit how to execute your app.
7 |
8 | This project is set up like a standard Python project. The initialization
9 | process also creates a virtualenv within this project, stored under the .env
10 | directory. To create the virtualenv it assumes that there is a `python3`
11 | (or `python` for Windows) executable in your path with access to the `venv`
12 | package. If for any reason the automatic creation of the virtualenv fails,
13 | you can create the virtualenv manually.
14 |
15 | To manually create a virtualenv on MacOS and Linux:
16 |
17 | ```
18 | $ python3 -m venv .env
19 | ```
20 |
21 | After the init process completes and the virtualenv is created, you can use the following
22 | step to activate your virtualenv.
23 |
24 | ```
25 | $ source .env/bin/activate
26 | ```
27 |
28 | If you are a Windows platform, you would activate the virtualenv like this:
29 |
30 | ```
31 | % .env\Scripts\activate.bat
32 | ```
33 |
34 | Once the virtualenv is activated, you can install the required dependencies.
35 |
36 | ```
37 | $ pip install -r requirements.txt
38 | ```
39 |
40 | At this point you can now synthesize the CloudFormation template for this code.
41 |
42 | ```
43 | $ cdk synth
44 | ```
45 |
46 | To add additional dependencies, for example other CDK libraries, just add
47 | them to your `setup.py` file and rerun the `pip install -r requirements.txt`
48 | command.
49 |
50 | # Useful commands
51 |
52 | * `cdk ls` list all stacks in the app
53 | * `cdk synth` emits the synthesized CloudFormation template
54 | * `cdk deploy` deploy this stack to your default AWS account/region
55 | * `cdk diff` compare deployed stack with current state
56 | * `cdk docs` open CDK documentation
57 |
58 | Enjoy!
59 |
--------------------------------------------------------------------------------
/labs/batch-processing/solutions/batch_sfn.json:
--------------------------------------------------------------------------------
1 | {
2 | "Comment": "This workflow processes a list of XML files.",
3 | "StartAt": "ProcessAllItems",
4 | "States": {
5 | "ProcessAllItems": {
6 | "Type": "Map",
7 | "ItemsPath": "$",
8 | "Parameters": {
9 | "InputFile.$": "$$.Map.Item.Value"
10 | },
11 | "MaxConcurrency": 5,
12 | "Iterator": {
13 | "StartAt": "ProcessFile",
14 | "States": {
15 | "ProcessFile": {
16 | "Type": "Task",
17 | "Resource": "arn:aws:states:::ecs:runTask.sync",
18 | "Parameters": {
19 | "LaunchType":"FARGATE",
20 | "Cluster":"arn:aws:ecs:::cluster/",
21 | "TaskDefinition":"arn:aws:ecs:::task-definition/:",
22 | "NetworkConfiguration":{
23 | "AwsvpcConfiguration": {
24 | "Subnets":[
25 | "",""
26 | ],
27 | "SecurityGroups": [
28 | ""
29 | ]
30 | }
31 | },
32 | "Overrides":{
33 | "ContainerOverrides":[
34 | {
35 | "Name":"BatchContainer",
36 | "Environment":[
37 | {
38 | "Name":"S3PATH",
39 | "Value.$":"$.InputFile"
40 | }
41 | ]
42 | }
43 | ]
44 | }
45 | },
46 | "End": true
47 | }
48 | }
49 | },
50 | "Next": "StopDone"
51 | },
52 | "StopDone": {
53 | "Type": "Succeed"
54 | }
55 | }
56 | }
--------------------------------------------------------------------------------
/labs/fargate-ops-cdk/fargate_ops_cdk/fargate_workshop_ops_crystal_backend.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | from aws_cdk import (
4 | aws_ecs as ecs,
5 | aws_ec2 as ec2,
6 | aws_logs as logs,
7 | aws_ecs_patterns as ecs_patterns,
8 | core
9 | )
10 |
11 | class FargateWorkshopOpsCrystalBackend(core.Stack):
12 |
13 | def __init__(self, scope: core.Stack, id: str, cluster: ecs.ICluster, vpc, private_subnets, sec_group, desired_service_count, **kwargs):
14 | super().__init__(scope, id, **kwargs)
15 | self.cluster = cluster
16 | self.vpc = vpc
17 | self.private_subnets = private_subnets
18 | self.sec_group = sec_group
19 |
20 | self.service_discovery = cluster.default_cloud_map_namespace
21 | self.desired_service_count = desired_service_count
22 |
23 |
24 | self.task_definition = ecs.FargateTaskDefinition(
25 | self, "BackendCrystalServiceTaskDef",
26 | cpu=256,
27 | memory_limit_mib=512,
28 | )
29 |
30 | self.task_definition.add_container(
31 | "BackendCrystalServiceContainer",
32 | image=ecs.ContainerImage.from_registry("adam9098/ecsdemo-crystal"),
33 | logging=ecs.AwsLogDriver(stream_prefix="ecsdemo-crystal", log_retention=logs.RetentionDays.THREE_DAYS),
34 | )
35 |
36 | self.fargate_service = ecs.FargateService(
37 | self, "BackendCrystalFargateService",
38 | service_name="Fargate-Backend-Crystal",
39 | task_definition=self.task_definition,
40 | cluster=self.cluster,
41 | max_healthy_percent=100,
42 | min_healthy_percent=0,
43 | vpc_subnets={
44 | "subnet_name" : "Private"
45 | },
46 | desired_count=self.desired_service_count,
47 | cloud_map_options={
48 | "name": "ecsdemo-crystal"
49 | },
50 | security_group=self.sec_group
51 | )
52 |
--------------------------------------------------------------------------------
/labs/fargate-workshop-cdk/fargate_workshop_cdk/fargate_workshop_network_stack.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | from aws_cdk import (
4 | aws_ec2 as ec2,
5 | core
6 | )
7 | import itertools
8 |
9 |
10 | class FargateWorkshopNetworkStack(core.Stack):
11 |
12 | def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
13 | super().__init__(scope, id, **kwargs)
14 |
15 | # VPC with private and public subnets
16 | self.vpc = ec2.Vpc( self, "FargateVpc", max_azs=3)
17 |
18 | # import default VPC
19 | self.default_vpc = ec2.Vpc.from_lookup(self, "DefaultVPC",
20 | # This imports the default VPC but you can also
21 | # specify a 'vpcName' or 'tags'.
22 | is_default=True
23 | )
24 | self.default_vpc_cidr_block = '172.31.0.0/16'
25 |
26 | # peering connection
27 | self.peer = ec2.CfnVPCPeeringConnection(
28 | scope = self,
29 | id = "VpcPeer",
30 | peer_vpc_id = self.default_vpc.vpc_id,
31 | vpc_id = self.vpc.vpc_id,
32 | peer_region = self.region
33 | )
34 |
35 | # routes
36 | ii = 0
37 | for subnet in itertools.chain(self.vpc.private_subnets,self.vpc.public_subnets):
38 | route = ec2.CfnRoute(self,
39 | "PeerRoute{0}".format(ii),
40 | route_table_id= subnet.route_table.route_table_id,
41 | destination_cidr_block= self.default_vpc_cidr_block,
42 | vpc_peering_connection_id= self.peer.ref
43 | )
44 | ii = ii + 1
45 | subnet = self.default_vpc.public_subnets[0]
46 | route = ec2.CfnRoute(self,
47 | "PeerRoute{0}".format(ii),
48 | route_table_id= subnet.route_table.route_table_id,
49 | destination_cidr_block= self.vpc.vpc_cidr_block,
50 | vpc_peering_connection_id= self.peer.ref
51 | )
52 |
--------------------------------------------------------------------------------
/labs/fargate-dev-workshop/configs/docker_build_deploy.yml:
--------------------------------------------------------------------------------
1 | version: 0.2
2 |
3 | phases:
4 | install:
5 | commands:
6 | - apt-get update
7 | - apt-get install -y jq
8 | - npm install -g npm@6.4.1
9 |
10 | pre_build:
11 | commands:
12 | - echo 'starting build...'
13 | - export IMAGE_TAG=build-`echo $CODEBUILD_BUILD_ID | awk -F":" '{print $2}'`
14 | - $(aws ecr get-login --no-include-email --region $AWS_DEFAULT_REGION)
15 | build:
16 | commands:
17 | # Set up environment variables like image tag and repo
18 | - echo Entered the post_build phase...
19 |
20 | #docker commands
21 |
22 | - echo $project_name:$IMAGE_TAG
23 |
24 | - docker build -t $project_name:$IMAGE_TAG .
25 | - docker tag $project_name:$IMAGE_TAG $ecr:$IMAGE_TAG
26 | - docker push $ecr
27 |
28 | # Consume base image from the pipeline
29 | - export BASE_IMAGE=`jq -r '.ImageURI' <$CODEBUILD_SRC_DIR_BaseImage/imageDetail.json`
30 | - export BASE_IMAGE=`jq -r '.ImageURI' <$CODEBUILD_SRC_DIR_BaseImage/imageDetail.json`
31 | - sed -i "s|$project_name:release|$BASE_IMAGE|g" Dockerfile
32 |
33 | # Generate appspec and task definition files (filling in subnet IDs, security group IDs, etc)
34 | - cd configs
35 | - mkdir build
36 | - npm ci
37 | - node produce-config.js -g test -s $project_name + "test"
38 | - node produce-config.js -g prod -s $project_name + "prod"
39 | # - node produce-config.js -g test -s $project_name + "test" -h $project_name + "hooks-test"
40 | # - node produce-config.js -g prod -s $project_name + "prod" -h $project_name + "hooks-prod"
41 | artifacts:
42 | secondary-artifacts:
43 | BuildArtifact:
44 | files:
45 | - configs/build/appspec-prod.json
46 | - configs/build/appspec-test.json
47 | - configs/build/task-definition-test.json
48 | - configs/build/task-definition-prod.json
49 | discard-paths: yes
50 | ImageDetails:
51 | files:
52 | - imageDetail.json
53 | discard-paths: yes
--------------------------------------------------------------------------------
/labs/fargate-workshop-cdk/fargate_workshop_cdk/fargate_workshop_dataeng_sftp.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | from aws_cdk import (
4 | aws_codepipeline as codepipeline,
5 | aws_codecommit as codecommit,
6 | aws_codebuild as codebuild,
7 | aws_iam as iam,
8 | aws_codepipeline_actions as actions,
9 | aws_ecs as ecs,
10 | aws_ecs_patterns as ecs_patterns,
11 | aws_ecr as ecr,
12 | aws_ec2 as ec2,
13 | aws_ssm as ssm,
14 | aws_applicationautoscaling as applicationautoscaling,
15 | core
16 | )
17 |
18 |
19 | class FargateWorkshopDataengSftpStack(core.Stack):
20 |
21 | def __init__(self, scope: core.Construct, id: str, cluster: ecs.ICluster, repo: ecr.IRepository, **kwargs) -> None:
22 | super().__init__(scope, id, **kwargs)
23 |
24 | # service skeleton
25 | streamproc_task_definition = ecs_patterns.ScheduledFargateTask(
26 | scope = self,
27 | id = "SftpTaskDef",
28 | cluster=cluster,
29 | desired_task_count=1,
30 | schedule = applicationautoscaling.Schedule.rate(duration = core.Duration.minutes(5)),
31 | scheduled_fargate_task_image_options = ecs_patterns.ScheduledFargateTaskImageOptions(
32 | image=ecs.ContainerImage.from_ecr_repository(repository = repo, tag = 'latest'),
33 | cpu=1024,
34 | memory_limit_mib=2048
35 | )
36 |
37 | )
38 | streamproc_task_definition.task_definition.task_role.add_to_policy(
39 | statement = iam.PolicyStatement(
40 | resources = ['*'],
41 | actions = ['servicediscovery:DiscoverInstances', 'secretsmanager:Get*', 'ec2:Describe*']
42 | )
43 | )
44 | ssm.StringParameter(
45 | scope = self,
46 | id = 'SSMParamSftpImageName',
47 | string_value = streamproc_task_definition.task_definition.default_container.container_name,
48 | parameter_name = 'image_sftp'
49 | )
50 |
--------------------------------------------------------------------------------
/labs/fargate-dev-workshop/configs/deployment-group.json:
--------------------------------------------------------------------------------
1 | {
2 | "applicationName": "AppECS-default-trivia-demo-backend-PLACEHOLDER_STAGE",
3 | "autoRollbackConfiguration": {
4 | "enabled": true,
5 | "events": [
6 | "DEPLOYMENT_FAILURE",
7 | "DEPLOYMENT_STOP_ON_REQUEST",
8 | "DEPLOYMENT_STOP_ON_ALARM"
9 | ]
10 | },
11 | "blueGreenDeploymentConfiguration": {
12 | "deploymentReadyOption": {
13 | "actionOnTimeout": "CONTINUE_DEPLOYMENT",
14 | "waitTimeInMinutes": 0
15 | },
16 | "terminateBlueInstancesOnDeploymentSuccess": {
17 | "action": "TERMINATE",
18 | "terminationWaitTimeInMinutes": 5
19 | }
20 | },
21 | "deploymentStyle": {
22 | "deploymentOption": "WITH_TRAFFIC_CONTROL",
23 | "deploymentType": "BLUE_GREEN"
24 | },
25 | "loadBalancerInfo": {
26 | "targetGroupPairInfoList": [
27 | {
28 | "targetGroups": [
29 | {
30 | "name": "TARGET_GROUP_PLACEHOLDER1"
31 | },
32 | {
33 | "name": "TARGET_GROUP_PLACEHOLDER2"
34 | }
35 | ],
36 | "prodTrafficRoute": {
37 | "listenerArns": [
38 | "arn:aws:elasticloadbalancing:us-east-1:1234567890:listener/app/ecs-demo/1c70194522f801da/PLACEHOLDER"
39 | ]
40 | },
41 | "testTrafficRoute": {
42 | "listenerArns": [
43 | "arn:aws:elasticloadbalancing:us-east-1:1234567890:listener/app/ecs-demo/1c70194522f801da/PLACEHOLDER"
44 | ]
45 | }
46 | }
47 | ]
48 | },
49 | "alarmConfiguration": {
50 | "enabled": true,
51 | "ignorePollAlarmFailure": false,
52 | "alarms": [
53 | {
54 | "name": "PLACEHOLDER_ALARM"
55 | }
56 | ]
57 | },
58 | "serviceRoleArn": "PLACEHOLDER_ROLE",
59 | "ecsServices": [
60 | {
61 | "serviceName": "trivia-backend-demo-PLACEHOLDER_STAGE",
62 | "clusterName": "default"
63 | }
64 | ]
65 | }
--------------------------------------------------------------------------------
/labs/fargate-ops-cdk/fargate_ops_cdk/fargate_workshop_ops_node_backend.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | from aws_cdk import (
4 | aws_ecs as ecs,
5 | aws_ec2 as ec2,
6 | aws_logs as logs,
7 | aws_ecs_patterns as ecs_patterns,
8 | core
9 | )
10 |
11 | class FargateWorkshopOpsNodeBackend(core.Stack):
12 |
13 | def __init__(self, scope: core.Stack, id: str, cluster: ecs.ICluster, vpc, private_subnets, sec_group, desired_service_count, **kwargs):
14 | super().__init__(scope, id, **kwargs)
15 | self.cluster = cluster
16 | self.vpc = vpc
17 | self.private_subnets = private_subnets
18 | self.sec_group = sec_group
19 |
20 | self.service_discovery = cluster.default_cloud_map_namespace
21 | self.desired_service_count = desired_service_count
22 |
23 | self.task_definition = ecs.FargateTaskDefinition(
24 | self, "BackendNodeServiceTaskDef",
25 | cpu=256,
26 | memory_limit_mib=512,
27 | )
28 |
29 | self.task_definition.add_container(
30 | "BackendNodeServiceContainer",
31 | image=ecs.ContainerImage.from_registry("brentley/ecsdemo-nodejs"),
32 | logging=ecs.LogDrivers.firelens(
33 | options={
34 | "Name": "cloudwatch",
35 | "log_key": "log",
36 | "region": "us-west-2",
37 | "delivery_stream": "my-stream",
38 | "log_group_name": "firelens-fluent-bit",
39 | "auto_create_group": "true",
40 | "log_stream_prefix": "from-fluent-bit"}
41 | )
42 | )
43 |
44 | self.fargate_service = ecs.FargateService(
45 | self, "BackendNodeFargateService",
46 | service_name="Fargate-Backend-NodeJS",
47 | task_definition=self.task_definition,
48 | cluster=self.cluster,
49 | max_healthy_percent=100,
50 | min_healthy_percent=0,
51 | vpc_subnets={
52 | "subnet_name" : "Private"
53 | },
54 | desired_count=self.desired_service_count,
55 | security_group=self.sec_group,
56 | cloud_map_options={
57 | "name": "ecsdemo-nodejs"
58 | },
59 | )
60 |
--------------------------------------------------------------------------------
/labs/stream-processing-pipelines/solutions/streamproc.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | from kafka import KafkaConsumer
4 | import pymongo
5 | import hashlib
6 | import boto3
7 | import os
8 |
9 | # environment variables for service discovery
10 | TOPIC_NAME = os.environ['TOPIC_NAME']
11 | NAMESPACE = os.environ['NAMESPACE']
12 | MSK_SERVICE = os.environ['MSK_SERVICE']
13 | DDB_SERVICE = os.environ['DDB_SERVICE']
14 |
15 | print("Looking up broker ARN")
16 | svc_client = boto3.client('servicediscovery')
17 | response = svc_client.discover_instances(
18 | NamespaceName=NAMESPACE,
19 | ServiceName=MSK_SERVICE
20 | )
21 | broker_arn = response['Instances'][0]['Attributes']['broker_arn']
22 | print("Got broker ARN {0}".format(broker_arn))
23 | print("Looking up broker string")
24 | msk_client = boto3.client('kafka')
25 | response = msk_client.get_bootstrap_brokers(
26 | ClusterArn=broker_arn
27 | )
28 | broker_string = response['BootstrapBrokerStringTls']
29 | print("Got broker string {0}".format(broker_string))
30 |
31 | print("Looking up DocumentDB endpoint")
32 | response = svc_client.discover_instances(
33 | NamespaceName=NAMESPACE,
34 | ServiceName=DDB_SERVICE
35 | )
36 | ddb_endpoint = ''
37 | for svc_instance in response['Instances']:
38 | svc_instance_id = svc_instance['InstanceId']
39 | if 'ReadEndpoint' not in svc_instance_id:
40 | ddb_endpoint = svc_instance['Attributes']['endpoint']
41 | docdbuser = svc_instance['Attributes']['user']
42 | docdbpass = svc_instance['Attributes']['password']
43 |
44 | # To consume latest messages and auto-commit offsets
45 | print("")
46 | consumer = KafkaConsumer(TOPIC_NAME,
47 | bootstrap_servers=broker_string,
48 | security_protocol = 'SSL')
49 |
50 | client = pymongo.MongoClient("mongodb://{0}:{1}@{2}:27017/?ssl=true&ssl_ca_certs=/opt/rds-combined-ca-bundle.pem&replicaSet=rs0&readPreference=secondaryPreferred".format(
51 | docdbuser,
52 | docdbpass,
53 | ddb_endpoint
54 | ))
55 | db = client.kafka
56 | col = db.hashed
57 |
58 | # This loop will run forever as long as we're getting messages
59 | for message in consumer:
60 | raw_value = message.value.decode('utf-8')
61 | print("Found record {0}".format(raw_value))
62 | hashvalue = hashlib.sha224(raw_value.encode('utf-8')).hexdigest()
63 | col.insert_one({'value':hashvalue})
64 |
--------------------------------------------------------------------------------
/labs/fargate-workshop-cdk/fargate_workshop_cdk/fargate_workshop_dataeng_lambda.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | from aws_cdk import (
4 | aws_docdb as docdb,
5 | aws_msk as msk,
6 | aws_ec2 as ec2,
7 | aws_s3 as s3,
8 | aws_servicediscovery as cloudmap,
9 | aws_events as events,
10 | aws_lambda as lambda_,
11 | aws_events_targets as targets,
12 | aws_iam as iam,
13 | core
14 | )
15 |
16 | class FargateWorkshopDataengLambdaStack(core.Stack):
17 | def __init__(self, scope: core.Construct, id: str, kafkaClientFirewall: ec2.ISecurityGroup, vpc: ec2.IVpc, kafkaCloudMap: cloudmap.Service, **kwargs) -> None:
18 |
19 | super().__init__(scope, id, **kwargs)
20 |
21 | # Kafka data producer
22 | lambdaFn = lambda_.Function(
23 | self, "KafkaProducer",
24 | code=lambda_.AssetCode('fargate_workshop_cdk/function.zip'),
25 | handler="kafka-producer.main",
26 | timeout=core.Duration.seconds(300),
27 | runtime=lambda_.Runtime.PYTHON_3_7,
28 | description = 'Simple Kafka producer for Fargate workshop',
29 | environment = {'NAMESPACE': kafkaCloudMap.namespace.namespace_name, 'SERVICE': kafkaCloudMap.service_name, 'TOPIC_NAME': 'MyTopic'},
30 | memory_size = 512,
31 | security_group = kafkaClientFirewall,
32 | vpc = vpc
33 | )
34 | lambdaFn.add_to_role_policy(
35 | statement = iam.PolicyStatement(
36 | resources = ['*'],
37 | actions = ['servicediscovery:DiscoverInstances']
38 | )
39 | )
40 | lambdaFn.add_to_role_policy(
41 | statement = iam.PolicyStatement(
42 | resources = ['*'],
43 | actions = ['kafka:GetBootstrapBrokers']
44 | )
45 | )
46 | # Run every 5 minutes
47 | # See https://docs.aws.amazon.com/lambda/latest/dg/tutorial-scheduled-events-schedule-expressions.html
48 | rule = events.Rule(
49 | self, "Rule",
50 | schedule=events.Schedule.rate(
51 | duration = core.Duration.minutes(5),
52 | ),
53 | )
54 | rule.add_target(targets.LambdaFunction(lambdaFn))
55 |
56 |
--------------------------------------------------------------------------------
/labs/fargate-workshop-cdk/fargate_workshop_cdk/fargate_workshop_dataeng_batch.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | from aws_cdk import (
4 | aws_codepipeline as codepipeline,
5 | aws_codecommit as codecommit,
6 | aws_codebuild as codebuild,
7 | aws_iam as iam,
8 | aws_codepipeline_actions as actions,
9 | aws_ecs as ecs,
10 | aws_ecs_patterns as ecs_patterns,
11 | aws_ecr as ecr,
12 | aws_ec2 as ec2,
13 | aws_ssm as ssm,
14 | aws_applicationautoscaling as applicationautoscaling,
15 | aws_s3 as s3,
16 | core
17 | )
18 |
19 |
20 | class FargateWorkshopDataengBatchStack(core.Stack):
21 |
22 | def __init__(self, scope: core.Construct, id: str, cluster: ecs.ICluster, repo: ecr.IRepository, **kwargs) -> None:
23 | super().__init__(scope, id, **kwargs)
24 |
25 | # bucket
26 | self.xmlBucket = s3.Bucket(
27 | scope = self,
28 | id = "XmlBucket",
29 | block_public_access = s3.BlockPublicAccess.BLOCK_ALL,
30 | encryption = s3.BucketEncryption.S3_MANAGED
31 | )
32 | core.CfnOutput(
33 | scope = self,
34 | id = "XmlBucketName",
35 | value=self.xmlBucket.bucket_name
36 | )
37 |
38 | # service skeleton
39 | batch_task_definition = ecs.FargateTaskDefinition(
40 | scope = self,
41 | id = "BatchTaskDef",
42 | cpu=2048,
43 | memory_limit_mib=4096,
44 | volumes = [ecs.Volume(name='storage')]
45 | )
46 | batch_container = batch_task_definition.add_container(
47 | id = "BatchContainer",
48 | image=ecs.ContainerImage.from_ecr_repository(repository = repo, tag = 'latest'),
49 | logging=ecs.LogDrivers.aws_logs(stream_prefix="BatchProcessing"),
50 | environment = {'BUCKET': self.xmlBucket.bucket_name }
51 | )
52 | batch_container.add_mount_points(ecs.MountPoint(container_path = '/opt/data', read_only = False, source_volume = 'storage'))
53 | batch_task_definition.task_role.add_to_policy(
54 | statement = iam.PolicyStatement(
55 | resources = [self.xmlBucket.bucket_arn, self.xmlBucket.bucket_arn + '/*'],
56 | actions = ['s3:*']
57 | )
58 | )
59 | ssm.StringParameter(
60 | scope = self,
61 | id = 'SSMParamBatchImageName',
62 | string_value = batch_container.container_name,
63 | parameter_name = 'image_batch'
64 | )
65 |
66 |
--------------------------------------------------------------------------------
/labs/batch-processing/sftp/app.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | import boto3
4 | import os
5 | import paramiko
6 | import traceback
7 | import io
8 | import base64
9 | from botocore.exceptions import ClientError
10 |
11 | def create_sftp_client(host, port, username, password, keyfiledata, keyfiletype):
12 | sftp = None
13 | key = None
14 | transport = None
15 | try:
16 | if keyfiledata is not None:
17 | # Get private key used to authenticate user.
18 | keyfile = io.StringIO(keyfiledata)
19 | if keyfiletype == 'DSA':
20 | # The private key is a DSA type key.
21 | key = paramiko.DSSKey.from_private_key(keyfile)
22 | else:
23 | # The private key is a RSA type key.
24 | key = paramiko.RSAKey.from_private_key(keyfile)
25 |
26 | # Create Transport object using supplied method of authentication.
27 | transport = paramiko.Transport((host, port))
28 | transport.connect(None, username, password, key)
29 |
30 | sftp = paramiko.SFTPClient.from_transport(transport)
31 |
32 | return sftp
33 | except Exception as e:
34 | print('An error occurred creating SFTP client: %s: %s' % (e.__class__, e))
35 | traceback.print_exc()
36 | if sftp is not None:
37 | sftp.close()
38 | if transport is not None:
39 | transport.close()
40 | pass
41 |
42 | # Look up SFTP server
43 | print("Looking up SFTP information")
44 | svc_client = boto3.client('servicediscovery')
45 | response = svc_client.discover_instances(
46 | NamespaceName='FargateWorkshopNamespace',
47 | ServiceName='SFTP'
48 | )
49 | sftp_vpce_id = response['Instances'][0]['Attributes']['vpce_id']
50 | sftp_user = response['Instances'][0]['Attributes']['user']
51 | sftp_bucket = response['Instances'][0]['Attributes']['bucket']
52 |
53 | # Look up VPC endpoint
54 | ec2_client = boto3.client('ec2')
55 | response = ec2_client.describe_vpc_endpoints(DryRun = False, VpcEndpointIds = [sftp_vpce_id])
56 | sftp_host = response['VpcEndpoints'][0]['DnsEntries'][0]['DnsName']
57 | print("Got SFTP host {0} and user {1}".format(sftp_host, sftp_user))
58 |
59 | # Look up SSH key
60 | secret_name = "sftpkey"
61 | sm_client = boto3.client('secretsmanager')
62 | get_secret_value_response = sm_client.get_secret_value( SecretId=secret_name)
63 | keyfiledata = get_secret_value_response['SecretString']
64 |
65 | # Connect
66 | sftp = create_sftp_client(sftp_host, 22, sftp_user, '', keyfiledata, keyfiletype = 'RSA')
67 |
68 | # List files
69 | filepath = "/" + sftp_bucket
70 | remote_files = sftp.listdir(path=filepath)
71 | for r in remote_files:
72 | print("Found file " + r)
73 |
74 | # Close
75 | if sftp: sftp.close()
76 |
--------------------------------------------------------------------------------
/diagrams/operations.puml:
--------------------------------------------------------------------------------
1 | @startuml Fargate Operations and Security
2 |
3 | ' Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
4 |
5 | !define AWSPuml https://raw.githubusercontent.com/awslabs/aws-icons-for-plantuml/master/dist
6 | !include AWSPuml/AWSCommon.puml
7 | !include AWSPuml/NetworkingAndContentDelivery/all.puml
8 | !include AWSPuml/Compute/all.puml
9 | !include AWSPuml/Database/all.puml
10 | !include AWSPuml/Mobile/all.puml
11 | !include AWSPuml/DeveloperTools/all.puml
12 | !include AWSPuml/ManagementAndGovernance/all.puml
13 | !include AWSPuml/SecurityIdentityAndCompliance/all.puml
14 |
15 | skinparam frame {
16 | backgroundColor<> lightblue
17 | }
18 | skinparam package {
19 | backgroundColor<> lightgreen
20 | backgroundColor<> LightSlateGray
21 | backgroundColor<> LightGoldenRodYellow
22 | backgroundColor<> PaleVioletRed
23 | }
24 |
25 | title Fargate Operations and Security
26 |
27 | actor "Developer" as client
28 | package "Tooling" <> {
29 | EC2ContainerRegistry(ecr, "Registry", "container registry", "Private container registry")
30 | CodePipeline(pipeline, "CI/CD", "Deployment pipeline", "")
31 | component clair as "CoreOS Clair"
32 | }
33 | package "DMZ" <> {
34 | ELBApplicationLoadBalancer(elb, "ALB", "load balancer", "External communication")
35 | APIGateway(apigw, "API Gateway", "portal", "API endpoints")
36 | }
37 | package "Private" <> {
38 | DocumentDBwithMongoDBcompatibility(docdb, "DocumentDB", "Mongo-compatible database", "Database")
39 | EC2Instance(billing, "Back office system", "billing", "")
40 | }
41 | package "Fargate" <> {
42 | frame "Service Mesh" <> as appmesh {
43 | ECSService(frontend, "Front end service", "Microservice", "Service")
44 | node "Task" {
45 | ECSContainer1(taskcode, "Container", "Business logic", "")
46 | ECSContainer1(tasksec, "Sidecar", "Dynamic scan", "")
47 | }
48 | }
49 | }
50 | CloudMap(cloudmap, "Cloud Map", "service discovery", "")
51 | package "Ops" <> {
52 | CloudWatch(logs, "CloudWatch logs", "Logging", "Cluster and tasks")
53 | CloudWatch(metrics, "CloudWatch metrics", "Metrics", "Cluster and tasks")
54 | AutoScaling(scaling, "Service autoscaling", "Scaling", "")
55 | IAMPermissions(perms, "Permissions", "RBAC", "Cluster and tasks")
56 | }
57 |
58 | pipeline --> ecr
59 | ecr -[#red]-> clair: scan_on_push
60 | ecr --> Fargate: imagepull
61 | apigw --> elb
62 | elb --> frontend
63 | frontend --> cloudmap
64 | cloudmap --> docdb
65 | cloudmap --> billing
66 | Fargate --> Ops
67 |
68 | note right of cloudmap: Use CloudMap for service discovery.
69 | note top of appmesh: Service mesh for microservice communication
70 | legend
71 | Pattern for each environment
72 | Color coding indicates network boundaries
73 | end legend
74 |
75 | @enduml
76 |
--------------------------------------------------------------------------------
/labs/fargate-workshop-cdk/fargate_workshop_cdk/kafka-producer.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | from kafka import KafkaProducer
4 | from kafka import KafkaConsumer
5 | from kafka import KafkaAdminClient
6 | from kafka.admin import NewTopic
7 | import time
8 | import os
9 | import boto3
10 |
11 | # constants
12 | MIN_TIME_REMAINING_MILLIS = 30 * 1000
13 | SLEEP_SECONDS = 5
14 |
15 | # environment variables
16 | NAMESPACE = os.environ['NAMESPACE']
17 | SERVICE = os.environ['SERVICE']
18 | TOPIC_NAME = os.environ['TOPIC_NAME']
19 |
20 | def main(event, context):
21 |
22 | print("Looking up broker ARN")
23 | svc_client = boto3.client('servicediscovery')
24 | response = svc_client.discover_instances(
25 | NamespaceName=NAMESPACE,
26 | ServiceName=SERVICE
27 | )
28 | broker_arn = response['Instances'][0]['Attributes']['broker_arn']
29 | print("Got broker ARN {0}".format(broker_arn))
30 |
31 | print("Looking up broker string")
32 | msk_client = boto3.client('kafka')
33 | response = msk_client.get_bootstrap_brokers(
34 | ClusterArn=broker_arn
35 | )
36 | broker_string = response['BootstrapBrokerStringTls']
37 | print("Got broker string {0}".format(broker_string))
38 |
39 | # make sure topic exists
40 | print("Checking if topic {0} exists".format(TOPIC_NAME))
41 | kclient = KafkaConsumer(bootstrap_servers=broker_string, security_protocol = 'SSL')
42 | existing_topics = kclient.topics()
43 | if TOPIC_NAME in existing_topics:
44 | print("Topic {0} exists".format(TOPIC_NAME))
45 | else:
46 | print("Topic {0} does not exist, creating".format(TOPIC_NAME))
47 | topic_list = []
48 | topic = NewTopic(name=TOPIC_NAME, num_partitions=1, replication_factor=1)
49 | topic_list.append(topic)
50 | kadmin = KafkaAdminClient(bootstrap_servers=broker_string, security_protocol = 'SSL')
51 | kadmin.create_topics(new_topics = topic_list)
52 | kadmin.close()
53 | kclient.close()
54 |
55 | producer = KafkaProducer(bootstrap_servers=broker_string, security_protocol = 'SSL')
56 |
57 | while True:
58 | remaining_time_millis = context.get_remaining_time_in_millis()
59 |
60 | if remaining_time_millis < MIN_TIME_REMAINING_MILLIS:
61 | print("Time left ({0}) is less than time required ({1}), exiting".format(str(remaining_time_millis), str(MIN_TIME_REMAINING_MILLIS)))
62 | break
63 | else:
64 | print("Time left ({0}) is greater than time required ({1}), sending".format(str(remaining_time_millis), str(MIN_TIME_REMAINING_MILLIS)))
65 | msg = "Kafka message sent at {0}".format(str(time.time()))
66 | producer.send(TOPIC_NAME, msg.encode('utf-8'))
67 | producer.flush()
68 | time.sleep(SLEEP_SECONDS)
69 |
70 | producer.close()
71 | print("All done")
72 |
--------------------------------------------------------------------------------
/labs/pipeline-generator-cdk/code_pipeline_generator/code_pipeline_generator.egg-info/PKG-INFO:
--------------------------------------------------------------------------------
1 | Metadata-Version: 2.1
2 | Name: code-pipeline-generator
3 | Version: 0.0.1
4 | Summary: An empty CDK Python app
5 | Home-page: UNKNOWN
6 | Author: author
7 | License: UNKNOWN
8 | Description:
9 | # Welcome to your CDK Python project!
10 |
11 | The `cdk.json` file tells the CDK Toolkit how to execute your app.
12 |
13 | This project is set up like a standard Python project. The initialization
14 | process also creates a virtualenv within this project, stored under the .env
15 | directory. To create the virtualenv it assumes that there is a `python3`
16 | (or `python` for Windows) executable in your path with access to the `venv`
17 | package. If for any reason the automatic creation of the virtualenv fails,
18 | you can create the virtualenv manually.
19 |
20 | To manually create a virtualenv on MacOS and Linux:
21 |
22 | ```
23 | $ python3 -m venv .env
24 | ```
25 |
26 | After the init process completes and the virtualenv is created, you can use the following
27 | step to activate your virtualenv.
28 |
29 | ```
30 | $ source .env/bin/activate
31 | ```
32 |
33 | If you are a Windows platform, you would activate the virtualenv like this:
34 |
35 | ```
36 | % .env\Scripts\activate.bat
37 | ```
38 |
39 | Once the virtualenv is activated, you can install the required dependencies.
40 |
41 | ```
42 | $ pip install -r requirements.txt
43 | ```
44 |
45 | At this point you can now synthesize the CloudFormation template for this code.
46 |
47 | ```
48 | $ cdk synth
49 | ```
50 |
51 | To add additional dependencies, for example other CDK libraries, just add
52 | them to your `setup.py` file and rerun the `pip install -r requirements.txt`
53 | command.
54 |
55 | # Useful commands
56 |
57 | * `cdk ls` list all stacks in the app
58 | * `cdk synth` emits the synthesized CloudFormation template
59 | * `cdk deploy` deploy this stack to your default AWS account/region
60 | * `cdk diff` compare deployed stack with current state
61 | * `cdk docs` open CDK documentation
62 |
63 | Enjoy!
64 |
65 | Platform: UNKNOWN
66 | Classifier: Development Status :: 4 - Beta
67 | Classifier: Intended Audience :: Developers
68 | Classifier: License :: OSI Approved :: Apache Software License
69 | Classifier: Programming Language :: JavaScript
70 | Classifier: Programming Language :: Python :: 3 :: Only
71 | Classifier: Programming Language :: Python :: 3.6
72 | Classifier: Programming Language :: Python :: 3.7
73 | Classifier: Programming Language :: Python :: 3.8
74 | Classifier: Topic :: Software Development :: Code Generators
75 | Classifier: Topic :: Utilities
76 | Classifier: Typing :: Typed
77 | Requires-Python: >=3.6
78 | Description-Content-Type: text/markdown
79 |
--------------------------------------------------------------------------------
/labs/fargate-ops-cdk/fargate_ops_cdk/fargate_workshop_ops_frontend.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | from aws_cdk import (
4 | aws_ecs as ecs,
5 | aws_ec2 as ec2,
6 | aws_ecs_patterns as ecs_patterns,
7 | core
8 | )
9 |
10 |
11 | class FargateWorkshopOpsFrontend(core.Stack):
12 |
13 | def __init__(self, scope: core.Stack, id: str, cluster: ecs.ICluster, vpc, sec_group, desired_service_count, **kwargs):
14 | super().__init__(scope, id, **kwargs)
15 | self.cluster = cluster
16 | self.vpc = vpc
17 | self.sec_group = sec_group
18 | self.desired_service_count = desired_service_count
19 |
20 | self.fargate_load_balanced_service = ecs_patterns.ApplicationLoadBalancedFargateService(self, "FrontendFargateLBService",
21 | cluster=self.cluster,
22 | desired_count=self.desired_service_count,
23 | service_name="Fargate-Frontend",
24 | cpu=256,
25 | memory_limit_mib=512,
26 | public_load_balancer=True,
27 | task_image_options={
28 | "image": ecs.ContainerImage.from_registry("brentley/ecsdemo-frontend"),
29 | "container_port": 3000,
30 | "enable_logging": True,
31 | "environment": {
32 | "CRYSTAL_URL": "http://ecsdemo-crystal.service:3000/crystal",
33 | "NODEJS_URL": "http://ecsdemo-nodejs.service:3000"
34 | }
35 | },
36 | )
37 |
38 | self.fargate_load_balanced_service.service.connections.security_groups[0].add_ingress_rule(
39 | peer = ec2.Peer.ipv4(self.vpc.vpc_cidr_block),
40 | connection = ec2.Port.tcp(3000),
41 | description="Allow http inbound from VPC"
42 | )
43 |
44 | self.sec_grp_ingress_backend_to_frontend_3000 = ec2.CfnSecurityGroupIngress(
45 | self, "InboundBackendSecGrp3000",
46 | ip_protocol='TCP',
47 | source_security_group_id=self.fargate_load_balanced_service.service.connections.security_groups[0].security_group_id,
48 | from_port=3000,
49 | to_port=3000,
50 | group_id=self.sec_group.security_group_id
51 | )
52 |
53 | self.sec_grp_ingress_frontend_to_backend_3000 = ec2.CfnSecurityGroupIngress(
54 | self, "InboundFrontendtoBackendSecGrp3000",
55 | ip_protocol='TCP',
56 | source_security_group_id=self.sec_group.security_group_id,
57 | from_port=3000,
58 | to_port=3000,
59 | group_id=self.fargate_load_balanced_service.service.connections.security_groups[0].security_group_id,
60 | )
61 |
62 | scaling = self.fargate_load_balanced_service.service.auto_scale_task_count(
63 | min_capacity=3,
64 | max_capacity=6
65 | )
66 | scaling.scale_on_cpu_utilization(
67 | "CpuScaling",
68 | target_utilization_percent=30,
69 | scale_in_cooldown=core.Duration.seconds(60),
70 | scale_out_cooldown=core.Duration.seconds(60),
71 | )
72 |
73 | core.CfnOutput(
74 | self, "LoadBalancerDNS",
75 | value = self.fargate_load_balanced_service.load_balancer.load_balancer_dns_name
76 | )
77 |
--------------------------------------------------------------------------------
/labs/pipeline-generator-cdk/code_pipeline_generator/code_pipeline_generator_stack.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | from code_pipeline_configuration import ContainerPipelineConfiguration
4 | from code_pipeline_generic_build_project import genericBuild
5 | from aws_cdk import (
6 | aws_codebuild,
7 | aws_codecommit,
8 | aws_codepipeline,
9 | aws_codepipeline_actions,
10 | core,
11 | )
12 |
13 |
14 | class CodePipelineGeneratorStack(core.Stack):
15 |
16 | def __init__(self, scope: core.Construct, id: str, config: ContainerPipelineConfiguration, **kwargs) -> None:
17 | super().__init__(scope, id, **kwargs)
18 |
19 | sourceOutput = aws_codepipeline.Artifact(
20 | artifact_name=config.ProjectName+"-SourceOutput"
21 | )
22 |
23 | pipelineStages = []
24 |
25 | commit = aws_codecommit.Repository(
26 | self,
27 | config.ProjectName + "-codeRepo",
28 | repository_name=config.ProjectName+"-Repository"
29 | )
30 | pipelineStages.append(
31 | aws_codepipeline.StageOptions(
32 | stage_name="Source",
33 | actions=[
34 | aws_codepipeline_actions.CodeCommitSourceAction(
35 | action_name="Commit",
36 | repository=commit,
37 | output=sourceOutput
38 | )]
39 | )
40 | )
41 |
42 | build = genericBuild(self, config.ProjectName+"-Build")
43 | pipelineStages.append(
44 | aws_codepipeline.StageOptions(
45 | stage_name="Build",
46 | actions=[
47 | aws_codepipeline_actions.CodeBuildAction(
48 | action_name="Build",
49 | project=build.Project,
50 | input=sourceOutput
51 | )]
52 | )
53 | )
54 |
55 | if(config.AllTest or config.UnitTest):
56 | unitTest = genericBuild(self, config.ProjectName+"-UnitTests")
57 | pipelineStages[1].actions.append(
58 | aws_codepipeline_actions.CodeBuildAction(
59 | action_name="UnitTests",
60 | project=build.Project,
61 | input=sourceOutput
62 | )
63 | )
64 |
65 | containerLinting=genericBuild(self, config.ProjectName+"-ContainerLinting")
66 | pipelineStages[1].actions.append(
67 | aws_codepipeline_actions.CodeBuildAction(
68 | action_name="Linting",
69 | project=build.Project,
70 | input=sourceOutput
71 | )
72 | )
73 |
74 |
75 | if(config.AllTest or config.IntegrationTests):
76 | integrationTest=genericBuild(
77 | self, config.ProjectName+"-IntegrationTests")
78 |
79 | if(config.AllTest or config.EndToEndTest):
80 | endToEnd=genericBuild(self, config.ProjectName+"-EndToEndTests")
81 |
82 | if(config.AllTest or config.LoadTest):
83 | loadTest=genericBuild(self, config.ProjectName+"-LoadTests")
84 |
85 | pipeline=aws_codepipeline.Pipeline(
86 | self,
87 | config.ProjectName+"-PipeLine",
88 | pipeline_name=config.ProjectName+"-Pipeline",
89 | stages=pipelineStages,
90 | )
91 |
--------------------------------------------------------------------------------
/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contributing Guidelines
2 |
3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
4 | documentation, we greatly value feedback and contributions from our community.
5 |
6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
7 | information to effectively respond to your bug report or contribution.
8 |
9 |
10 | ## Reporting Bugs/Feature Requests
11 |
12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features.
13 |
14 | When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already
15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
16 |
17 | * A reproducible test case or series of steps
18 | * The version of our code being used
19 | * Any modifications you've made relevant to the bug
20 | * Anything unusual about your environment or deployment
21 |
22 |
23 | ## Contributing via Pull Requests
24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
25 |
26 | 1. You are working against the latest source on the *master* branch.
27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
29 |
30 | To send us a pull request, please:
31 |
32 | 1. Fork the repository.
33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change.
34 | 3. Ensure local tests pass.
35 | 4. Commit to your fork using clear commit messages.
36 | 5. Send us a pull request, answering any default questions in the pull request interface.
37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
38 |
39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
41 |
42 |
43 | ## Finding contributions to work on
44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start.
45 |
46 |
47 | ## Code of Conduct
48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
50 | opensource-codeofconduct@amazon.com with any additional questions or comments.
51 |
52 |
53 | ## Security issue notifications
54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue.
55 |
56 |
57 | ## Licensing
58 |
59 | See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
60 |
61 | We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes.
62 |
--------------------------------------------------------------------------------
/labs/fargate-ops-cdk/app.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | #!/usr/bin/env python3
4 |
5 | from aws_cdk import core
6 | import os
7 |
8 | from fargate_ops_cdk.fargate_workshop_ops_stack import *
9 | from fargate_ops_cdk.fargate_workshop_ops_cluster import *
10 | from fargate_ops_cdk.fargate_workshop_ops_frontend import *
11 | from fargate_ops_cdk.fargate_workshop_ops_node_backend import *
12 | from fargate_ops_cdk.fargate_workshop_ops_crystal_backend import *
13 | from fargate_ops_cdk.fargate_workshop_ops_failed import *
14 |
15 | class FargateDemo(core.App):
16 |
17 | def __init__(self, **kwargs):
18 | super().__init__(**kwargs)
19 |
20 | self.stack_name = "FargateWorkshopOps"
21 |
22 | self.base_module = FargateWorkshopOpsStack(self, self.stack_name + "-base",
23 | env={'account': os.environ['CDK_DEFAULT_ACCOUNT'], 'region': os.environ['CDK_DEFAULT_REGION']})
24 |
25 | self.ops_cluster = FargateWorkshopOpsCluster(self, self.stack_name + "-cluster",
26 | vpc = self.base_module.vpc,
27 | env={'account': os.environ['CDK_DEFAULT_ACCOUNT'], 'region': os.environ['CDK_DEFAULT_REGION']})
28 |
29 | self.ops_cluster_frontend = FargateWorkshopOpsFrontend(self, self.stack_name + "-frontend",
30 | self.ops_cluster.cluster, self.base_module.vpc,
31 | self.base_module.services_3000_sec_group,
32 | desired_service_count=3,
33 | env={'account': os.environ['CDK_DEFAULT_ACCOUNT'], 'region': os.environ['CDK_DEFAULT_REGION']})
34 |
35 | self.ops_cluster_frontend = FargateWorkshopOpsNodeBackend(self, self.stack_name + "-nodejs-backend",
36 | self.ops_cluster.cluster, self.base_module.vpc, self.base_module.vpc.private_subnets,
37 | self.base_module.services_3000_sec_group,
38 | desired_service_count=3,
39 | env={'account': os.environ['CDK_DEFAULT_ACCOUNT'], 'region': os.environ['CDK_DEFAULT_REGION']})
40 |
41 | self.ops_cluster_frontend = FargateWorkshopOpsCrystalBackend(self, self.stack_name + "-crystal-backend",
42 | self.ops_cluster.cluster, self.base_module.vpc, self.base_module.vpc.private_subnets,
43 | self.base_module.services_3000_sec_group,
44 | desired_service_count=3,
45 | env={'account': os.environ['CDK_DEFAULT_ACCOUNT'], 'region': os.environ['CDK_DEFAULT_REGION']})
46 |
47 | self.ops_cluster_frontend = FargateWorkshopOpsFailed(self, self.stack_name + "-failed",
48 | self.ops_cluster.cluster, self.base_module.vpc, self.base_module.vpc.private_subnets,
49 | self.base_module.services_3000_sec_group,
50 | desired_service_count=1,
51 | env={'account': os.environ['CDK_DEFAULT_ACCOUNT'], 'region': os.environ['CDK_DEFAULT_REGION']})
52 |
53 | if __name__ == '__main__':
54 | app = FargateDemo()
55 | app.synth()
56 |
--------------------------------------------------------------------------------
/labs/fargate-workshop-cdk/app.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | #!/usr/bin/env python3
4 |
5 | from aws_cdk import core
6 | import os
7 |
8 | from fargate_workshop_cdk.fargate_workshop_network_stack import *
9 | from fargate_workshop_cdk.fargate_workshop_discovery_stack import *
10 | from fargate_workshop_cdk.fargate_workshop_dataeng_stack import *
11 | from fargate_workshop_cdk.fargate_workshop_dataeng_lambda import *
12 | from fargate_workshop_cdk.fargate_workshop_dataeng_cluster import *
13 | from fargate_workshop_cdk.fargate_workshop_dataeng_streams import *
14 | from fargate_workshop_cdk.fargate_workshop_dataeng_s3sink import *
15 | from fargate_workshop_cdk.fargate_workshop_dataeng_sftp import *
16 | from fargate_workshop_cdk.fargate_workshop_dataeng_batch import *
17 |
18 | app = core.App()
19 | project = 'FargateWorkshop'
20 |
21 | # Network stack is common
22 | network = FargateWorkshopNetworkStack(app, "fargate-workshop-network", env={'account': os.environ['CDK_DEFAULT_ACCOUNT'], 'region': os.environ['CDK_DEFAULT_REGION']})
23 | private_subnets = network.vpc.private_subnets
24 | private_subnet_ids = [n.subnet_id for n in private_subnets]
25 |
26 | # Start discovery stack
27 | discovery = FargateWorkshopDiscoveryStack(app, "fargate-workshop-discovery", env={'account': os.environ['CDK_DEFAULT_ACCOUNT'], 'region': os.environ['CDK_DEFAULT_REGION']})
28 |
29 | # Now build stacks for other tracks
30 | dataeng = FargateWorkshopDataengStack(app, "fargate-workshop-dataeng", private_subnet_ids, network.vpc, network.default_vpc_cidr_block, project, discovery.namespace, env={'account': os.environ['CDK_DEFAULT_ACCOUNT'], 'region': os.environ['CDK_DEFAULT_REGION']})
31 | FargateWorkshopDataengLambdaStack(app, "fargate-workshop-dataeng-lambda", dataeng.kafkaClientFirewall, vpc = network.vpc, kafkaCloudMap = dataeng.kafkaCloudMap, env={'account': os.environ['CDK_DEFAULT_ACCOUNT'], 'region': os.environ['CDK_DEFAULT_REGION']})
32 | dataeng_cluster = FargateWorkshopDataengClusterStack(app, "fargate-workshop-dataeng-cluster", vpc = network.vpc, env={'account': os.environ['CDK_DEFAULT_ACCOUNT'], 'region': os.environ['CDK_DEFAULT_REGION']})
33 | FargateWorkshopDataengStreamsStack(app, "fargat-workshop-dataeng-streams",
34 | repo_arn = dataeng.docker_repo.repository_arn,
35 | cluster = dataeng_cluster.cluster,
36 | repo = dataeng.docker_repo,
37 | clientFirewall = dataeng.unifiedClientFirewall,
38 | docdbClientFirewall = dataeng.clientFirewall,
39 | cmnamespace = dataeng.kafkaCloudMap.namespace.namespace_name,
40 | cmmsk = dataeng.kafkaCloudMap.service_name,
41 | cmddb = dataeng.docdbCloudMap.service_name,
42 | env={'account': os.environ['CDK_DEFAULT_ACCOUNT'], 'region': os.environ['CDK_DEFAULT_REGION']})
43 | FargateWorkshopDataengS3SinkStack(app, "fargate-workshop-dataeng-kafkaconnect",
44 | cluster = dataeng_cluster.cluster,
45 | kafkaClientFirewall = dataeng.unifiedClientFirewall,
46 | lbFirewall = dataeng.lbFirewall,
47 | kcrepo = dataeng.docker_repo_s3sink,
48 | hcrepo = dataeng.docker_repo_s3sinkhc,
49 | cmnamespace = dataeng.kafkaCloudMap.namespace.namespace_name,
50 | cmmsk = dataeng.kafkaCloudMap.service_name,
51 | vpc = network.vpc,
52 | env={'account': os.environ['CDK_DEFAULT_ACCOUNT'], 'region': os.environ['CDK_DEFAULT_REGION']})
53 | FargateWorkshopDataengSftpStack(app, "fargate-workshop-dataeng-sfp",
54 | cluster = dataeng_cluster.cluster,
55 | repo = dataeng.docker_repo_sftp,
56 | env={'account': os.environ['CDK_DEFAULT_ACCOUNT'], 'region': os.environ['CDK_DEFAULT_REGION']})
57 | FargateWorkshopDataengBatchStack(app, "fargate-workshop-dataeng-batch",
58 | cluster = dataeng_cluster.cluster,
59 | repo = dataeng.docker_repo_batch,
60 | env={'account': os.environ['CDK_DEFAULT_ACCOUNT'], 'region': os.environ['CDK_DEFAULT_REGION']})
61 |
62 | app.synth()
63 |
--------------------------------------------------------------------------------
/labs/fargate-dev-workshop/r53_health_check.yaml:
--------------------------------------------------------------------------------
1 | Parameters:
2 |
3 | url:
4 | Type: String
5 | Description: url from your loab balancer
6 |
7 |
8 | Resources:
9 | cloudfronthealth1:
10 | Type: 'AWS::Route53::HealthCheck'
11 | Properties:
12 | HealthCheckConfig:
13 | Port: 80
14 | Type: HTTP
15 | ResourcePath: '/'
16 | FullyQualifiedDomainName: !Ref url
17 | RequestInterval: 10
18 | FailureThreshold: 3
19 | HealthCheckTags:
20 | -
21 | Key: Name
22 | Value: cloudfronthealth1
23 |
24 | cloudfronthealth2:
25 | Type: 'AWS::Route53::HealthCheck'
26 | Properties:
27 | HealthCheckConfig:
28 | Port: 80
29 | Type: HTTP
30 | ResourcePath: '/'
31 | FullyQualifiedDomainName: !Ref url
32 | RequestInterval: 10
33 | FailureThreshold: 3
34 | HealthCheckTags:
35 | -
36 | Key: Name
37 | Value: cloudfronthealth2
38 |
39 | cloudfronthealth3:
40 | Type: 'AWS::Route53::HealthCheck'
41 | Properties:
42 | HealthCheckConfig:
43 | Port: 80
44 | Type: HTTP
45 | ResourcePath: '/'
46 | FullyQualifiedDomainName: !Ref url
47 | RequestInterval: 10
48 | FailureThreshold: 3
49 | HealthCheckTags:
50 | -
51 | Key: Name
52 | Value: cloudfronthealth3
53 |
54 | cloudfronthealth4:
55 | Type: 'AWS::Route53::HealthCheck'
56 | Properties:
57 | HealthCheckConfig:
58 | Port: 80
59 | Type: HTTP
60 | ResourcePath: '/'
61 | FullyQualifiedDomainName: !Ref url
62 | RequestInterval: 10
63 | FailureThreshold: 3
64 | HealthCheckTags:
65 | -
66 | Key: Name
67 | Value: cloudfronthealth4
68 |
69 | cloudfronthealth5:
70 | Type: 'AWS::Route53::HealthCheck'
71 | Properties:
72 | HealthCheckConfig:
73 | Port: 80
74 | Type: HTTP
75 | ResourcePath: '/'
76 | FullyQualifiedDomainName: !Ref url
77 | RequestInterval: 10
78 | FailureThreshold: 3
79 | HealthCheckTags:
80 | -
81 | Key: Name
82 | Value: cloudfronthealth5
83 |
84 | cloudfronthealth6s:
85 | Type: 'AWS::Route53::HealthCheck'
86 | Properties:
87 | HealthCheckConfig:
88 | Port: 80
89 | Type: HTTP
90 | ResourcePath: '/'
91 | FullyQualifiedDomainName: !Ref url
92 | RequestInterval: 10
93 | FailureThreshold: 3
94 | HealthCheckTags:
95 | -
96 | Key: Name
97 | Value: cloudfronthealth6
98 |
99 |
100 | cloudfronthealth7:
101 | Type: 'AWS::Route53::HealthCheck'
102 | Properties:
103 | HealthCheckConfig:
104 | Port: 80
105 | Type: HTTP
106 | ResourcePath: '/'
107 | FullyQualifiedDomainName: !Ref url
108 | RequestInterval: 10
109 | FailureThreshold: 3
110 | HealthCheckTags:
111 | -
112 | Key: Name
113 | Value: cloudfronthealth7
114 |
115 | cloudfronthealth8:
116 | Type: 'AWS::Route53::HealthCheck'
117 | Properties:
118 | HealthCheckConfig:
119 | Port: 80
120 | Type: HTTP
121 | ResourcePath: '/'
122 | FullyQualifiedDomainName: !Ref url
123 | RequestInterval: 10
124 | FailureThreshold: 3
125 | HealthCheckTags:
126 | -
127 | Key: Name
128 | Value: cloudfronthealth8
129 |
130 | cloudfronthealth9:
131 | Type: 'AWS::Route53::HealthCheck'
132 | Properties:
133 | HealthCheckConfig:
134 | Port: 80
135 | Type: HTTP
136 | ResourcePath: '/'
137 | FullyQualifiedDomainName: !Ref url
138 | RequestInterval: 10
139 | FailureThreshold: 3
140 | HealthCheckTags:
141 | -
142 | Key: Name
143 | Value: cloudfronthealth9
144 |
145 | cloudfronthealth10:
146 | Type: 'AWS::Route53::HealthCheck'
147 | Properties:
148 | HealthCheckConfig:
149 | Port: 80
150 | Type: HTTP
151 | ResourcePath: '/'
152 | FullyQualifiedDomainName: !Ref url
153 | RequestInterval: 10
154 | FailureThreshold: 3
155 | HealthCheckTags:
156 | -
157 | Key: Name
158 | Value: cloudfronthealth10
--------------------------------------------------------------------------------
/labs/fargate-workshop-cdk/fargate_workshop_cdk/fargate_workshop_dataeng_streams.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | from aws_cdk import (
4 | aws_codepipeline as codepipeline,
5 | aws_codecommit as codecommit,
6 | aws_codebuild as codebuild,
7 | aws_iam as iam,
8 | aws_codepipeline_actions as actions,
9 | aws_ecs as ecs,
10 | aws_ecr as ecr,
11 | aws_ec2 as ec2,
12 | aws_ssm as ssm,
13 | core
14 | )
15 |
16 |
17 | class FargateWorkshopDataengStreamsStack(core.Stack):
18 |
19 | def __init__(self, scope: core.Construct, id: str, repo_arn: str, cluster: ecs.ICluster, repo: ecr.IRepository,
20 | clientFirewall: ec2.ISecurityGroup,
21 | docdbClientFirewall: ec2.ISecurityGroup,
22 | cmnamespace: str,
23 | cmmsk: str,
24 | cmddb: str,
25 | **kwargs) -> None:
26 | super().__init__(scope, id, **kwargs)
27 |
28 | # service skeleton
29 | streamproc_task_definition = ecs.FargateTaskDefinition(
30 | scope = self,
31 | id = "StreamProcTaskDef",
32 | cpu=1024,
33 | memory_limit_mib=2048
34 | )
35 | streamproc_container = streamproc_task_definition.add_container(
36 | id = "StreamProcContainer",
37 | image=ecs.ContainerImage.from_ecr_repository(repository = repo, tag = 'latest'),
38 | logging=ecs.LogDrivers.aws_logs(stream_prefix="StreamProcessing"),
39 | environment = {'NAMESPACE': cmnamespace, 'MSK_SERVICE': cmmsk, 'TOPIC_NAME': 'MyTopic', 'DDB_SERVICE': cmddb},
40 | )
41 | streamproc_task_definition.add_to_task_role_policy(
42 | statement = iam.PolicyStatement(
43 | resources = ['*'],
44 | actions = ['servicediscovery:DiscoverInstances']
45 | )
46 | )
47 | streamproc_task_definition.add_to_task_role_policy(
48 | statement = iam.PolicyStatement(
49 | resources = ['*'],
50 | actions = ['kafka:GetBootstrapBrokers']
51 | )
52 | )
53 | streamproc_service = ecs.FargateService(
54 | scope = self,
55 | id = "StreamProcessingService",
56 | task_definition=streamproc_task_definition,
57 | assign_public_ip = False,
58 | security_group = clientFirewall,
59 | cluster=cluster,
60 | desired_count = 1
61 | )
62 | streamproc_scaling = streamproc_service.auto_scale_task_count(max_capacity=10)
63 | streamproc_scaling.scale_on_cpu_utilization("CpuScaling",
64 | target_utilization_percent=70
65 | )
66 | ssm.StringParameter(
67 | scope = self,
68 | id = 'SSMParamStreamProcImageName',
69 | string_value = streamproc_container.container_name,
70 | parameter_name = 'image_streamproc'
71 | )
72 |
73 | # pipeline
74 | self.cbproject = codebuild.PipelineProject(
75 | scope = self,
76 | id = 'KafkaToDocdbBuildImage',
77 | cache = codebuild.Cache.local(codebuild.LocalCacheMode.DOCKER_LAYER),
78 | environment = codebuild.BuildEnvironment(
79 | build_image = codebuild.LinuxBuildImage.UBUNTU_14_04_DOCKER_18_09_0,
80 | privileged = True,
81 | compute_type = codebuild.ComputeType.LARGE
82 | )
83 | )
84 | self.cbproject.add_to_role_policy(
85 | statement = iam.PolicyStatement(
86 | resources = ['*'],
87 | actions = ['ssm:GetParameters', 'ecr:GetAuthorizationToken']
88 | )
89 | )
90 | self.cbproject.add_to_role_policy(
91 | statement = iam.PolicyStatement(
92 | resources = [repo_arn],
93 | actions = ['ecr:*']
94 | )
95 | )
96 | self.pipeline = codepipeline.Pipeline(
97 | scope = self,
98 | id = "KafkaToDocDb",
99 | pipeline_name = 'KafkaToDocdb'
100 | )
101 | self.pipeline.add_stage(
102 | stage_name='Source',
103 | actions = [
104 | actions.CodeCommitSourceAction(
105 | repository = codecommit.Repository.from_repository_name(scope=self, id = 'FargateStreamProcessorRepo', repository_name = 'FargateStreamProcessor'),
106 | action_name = "Get-Code",
107 | output = codepipeline.Artifact('code')
108 | )
109 | ]
110 | )
111 | self.pipeline.add_stage(
112 | stage_name = 'Build',
113 | actions = [
114 | actions.CodeBuildAction(
115 | input = codepipeline.Artifact('code'),
116 | project = self.cbproject,
117 | outputs = [codepipeline.Artifact('image')],
118 | action_name = 'Build-Image'
119 | )
120 | ]
121 | )
122 | self.pipeline.add_stage(
123 | stage_name = 'Deploy',
124 | actions = [
125 | actions.EcsDeployAction(
126 | service = streamproc_service,
127 | input = codepipeline.Artifact('image'),
128 | action_name = 'Deploy-Image'
129 | )
130 | ]
131 | )
132 |
133 | core.CfnOutput(
134 | self, "IgnoredOutput",
135 | value=docdbClientFirewall.security_group_id
136 | )
137 |
138 |
139 |
140 |
--------------------------------------------------------------------------------
/labs/fargate-dev-workshop/ecs_development_workshop/ecs_inf_fargate.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | from ecs_development_workshop.code_pipeline_configuration import ContainerPipelineConfiguration
4 |
5 | from aws_cdk import (
6 | aws_ecs as ecs,
7 | aws_ec2 as ec2,
8 | aws_iam as iam,
9 | aws_ecr as ecr,
10 | aws_elasticloadbalancingv2 as elbv2,
11 | aws_ecs_patterns,
12 | aws_autoscaling as autoscaling,
13 | aws_cloudwatch,
14 | aws_elasticloadbalancingv2_targets as elbvs_targets,
15 | aws_logs as logs,
16 | core
17 | )
18 |
19 | import json
20 | from django.http import JsonResponse
21 |
22 | class EcsInfFargate(core.Stack):
23 |
24 | def __init__(self, scope: core.Construct, id: str, config: ContainerPipelineConfiguration, **kwargs) -> None:
25 | super().__init__(scope, id, **kwargs)
26 |
27 | #VPC
28 | vpc = ec2.Vpc(self, "TheVPC",
29 | cidr ="10.0.0.0/16"
30 | )
31 |
32 | #IAM roles
33 | service_task_def_exe_role = iam.Role(self, "ServiceTaskDefExecutionRole",
34 | assumed_by = iam.ServicePrincipal('ecs-tasks.amazonaws.com')
35 | )
36 | service_task_def_exe_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('service-role/AmazonECSTaskExecutionRolePolicy'))
37 |
38 | service_task_def_role = iam.Role(self,'ServiceTaskDefTaskRole',
39 | assumed_by = iam.ServicePrincipal('ecs-tasks.amazonaws.com')
40 | )
41 |
42 | code_deploy_role = iam.Role(self, "CodeDeployRole",
43 | assumed_by = iam.ServicePrincipal('codedeploy.amazonaws.com')
44 | )
45 | code_deploy_role.add_managed_policy(iam.ManagedPolicy.from_aws_managed_policy_name('AWSCodeDeployRoleForECS'))
46 |
47 | # Fargate cluster
48 | cluster = ecs.Cluster(
49 | scope = self,
50 | id = "ecs-cluster",
51 | cluster_name = config.ProjectName + "-" + config.stage,
52 | vpc = vpc
53 | )
54 |
55 | load_balancer = elbv2.ApplicationLoadBalancer(
56 | self, "load_balancer",
57 | vpc=vpc,
58 | internet_facing=True
59 | )
60 |
61 | #Security Group
62 | service_sg = ec2.SecurityGroup(self, "service_sg",vpc=vpc)
63 | service_sg.connections.allow_from(load_balancer, ec2.Port.tcp(80));
64 |
65 | #ECR Repo
66 | image_repo = ecr.Repository.from_repository_name(self, "image_repo",
67 | repository_name = config.ProjectName
68 | )
69 |
70 | log_group = logs.LogGroup(self, "log_group",
71 | log_group_name= config.ProjectName + "-" + config.stage,
72 | removal_policy=core.RemovalPolicy.DESTROY,
73 | retention=None
74 | )
75 |
76 | #ECS Task Def
77 | fargate_task_definition = ecs.FargateTaskDefinition(
78 | scope = self,
79 | id = "fargate_task_definition",
80 | cpu=256,
81 | memory_limit_mib=512,
82 | execution_role = service_task_def_exe_role,
83 | task_role = service_task_def_role,
84 | family = config.ProjectName + "-" + config.stage
85 | )
86 |
87 | container = fargate_task_definition.add_container(
88 | id = "fargate_task_container",
89 | image=ecs.ContainerImage.from_ecr_repository(repository = image_repo, tag = 'release')
90 | )
91 |
92 | container.add_port_mappings(ecs.PortMapping(container_port=80, host_port=80, protocol = ecs.Protocol.TCP))
93 |
94 | #ECS Fargate Service
95 | fargate_service = ecs.FargateService(
96 | scope = self,
97 | id = "fargate_service",
98 | security_group = service_sg,
99 | cluster=cluster,
100 | desired_count=5,
101 | deployment_controller = ecs.DeploymentController(type = ecs.DeploymentControllerType.CODE_DEPLOY),
102 | task_definition = fargate_task_definition,
103 | service_name = config.ProjectName + "-" + config.stage
104 | )
105 |
106 | #Main Env
107 | listern_health_check_main = elbv2.HealthCheck(
108 | healthy_http_codes = '200',
109 | interval = core.Duration.seconds(5),
110 | healthy_threshold_count = 2,
111 | unhealthy_threshold_count = 3,
112 | timeout = core.Duration.seconds(4)
113 | )
114 | #Test Env
115 | listern_health_check_test = elbv2.HealthCheck(
116 | healthy_http_codes = '200',
117 | interval = core.Duration.seconds(5),
118 | healthy_threshold_count = 2,
119 | unhealthy_threshold_count = 3,
120 | timeout = core.Duration.seconds(4)
121 | )
122 |
123 | listener_main = load_balancer.add_listener("load_balancer_listener_1",
124 | port = 80,
125 | )
126 |
127 | listern_main_targets = listener_main.add_targets("load_balancer_target_1", port=80,
128 | health_check = listern_health_check_main,
129 | targets=[fargate_service]
130 | )
131 |
132 | listener_test = load_balancer.add_listener("load_balancer_listener_2",
133 | port = 8080,
134 | )
135 |
136 | listern_test_targets = listener_test.add_targets("load_balancer_target_2", port=80,
137 | health_check = listern_health_check_test,
138 | targets=[fargate_service]
139 | )
140 |
141 | #Alarms: monitor 500s on target group
142 | aws_cloudwatch.Alarm(self,"TargetGroup5xx",
143 | metric = listern_main_targets.metric_http_code_target(elbv2.HttpCodeTarget.TARGET_5XX_COUNT),
144 | threshold = 1,
145 | evaluation_periods = 1,
146 | period = core.Duration.minutes(1)
147 | )
148 |
149 | aws_cloudwatch.Alarm(self,"TargetGroup25xx",
150 | metric = listern_test_targets.metric_http_code_target(elbv2.HttpCodeTarget.TARGET_5XX_COUNT),
151 | threshold = 1,
152 | evaluation_periods = 1,
153 | period = core.Duration.minutes(1)
154 | )
155 |
156 | #Alarms: monitor unhealthy hosts on target group
157 | aws_cloudwatch.Alarm(self,"TargetGroupUnhealthyHosts",
158 | metric = listern_main_targets.metric('UnHealthyHostCount'),
159 | threshold = 1,
160 | evaluation_periods = 1,
161 | period = core.Duration.minutes(1)
162 | )
163 |
164 | aws_cloudwatch.Alarm(self,"TargetGroup2UnhealthyHosts",
165 | metric = listern_test_targets.metric('UnHealthyHostCount'),
166 | threshold = 1,
167 | evaluation_periods = 1,
168 | period = core.Duration.minutes(1)
169 | )
170 |
171 | core.CfnOutput(self,"lburl",
172 | value = load_balancer.load_balancer_dns_name,
173 | export_name = "LoadBalancerUrl"
174 | )
175 |
--------------------------------------------------------------------------------
/labs/fargate-dev-workshop/configs/produce-configs.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | import json
4 | import boto3
5 | import sys
6 |
7 | #command example
8 | #python produce-configs.py fargate-dev-workshop test 11111111111.dkr.ecr.us-west-2.amazonaws.com/fargate-dev-workshop:9b2de43d-e787-4f1e-b9df-1eb73319fa4d
9 |
10 |
11 | #stack prefix given by cdk app.py
12 | stack_prefix = "ecs-inf-"
13 |
14 | #input from cdk app.py
15 | project_name = sys.argv[1]
16 |
17 | #input from cdk app.py
18 | app_env = sys.argv[2]
19 |
20 | #input new ecs task def
21 | image = sys.argv[3]
22 |
23 | #'ecs-inf-prod'
24 | stack_name = stack_prefix + app_env
25 |
26 | account_number = boto3.client('sts').get_caller_identity().get('Account')
27 | region = boto3.client('sts').get_caller_identity().get('Region')
28 |
29 | # create a boto3 client first
30 | cloudformation = boto3.client('cloudformation', region_name='us-west-2')
31 |
32 | inf_stack = cloudformation.describe_stack_resources(
33 | StackName=stack_name
34 | )
35 |
36 | stack_alarms = []
37 |
38 | #Deployment Group Replacements
39 | for stack in inf_stack['StackResources']:
40 | #ECS Cluster Name
41 | if("ecscluster" in stack['LogicalResourceId']):
42 | ecs_cluster = stack['PhysicalResourceId']
43 |
44 | if("servicesg" in stack['LogicalResourceId']):
45 | if( "sg-" in stack['PhysicalResourceId']):
46 | ecs_task_sg = stack['PhysicalResourceId']
47 |
48 | if("ServiceTaskDefExecutionRoleDefaultPolicy"in stack['LogicalResourceId']):
49 | ecs_task_exe_role_policy = stack['PhysicalResourceId']
50 | elif("ServiceTaskDefExecutionRole" in stack['LogicalResourceId']):
51 | ecs_task_exe_role_arn = "arn:aws:iam::" + account_number + ":role/" + stack['PhysicalResourceId']
52 |
53 | if("ServiceTaskDefTaskRole" in stack['LogicalResourceId']):
54 | ecs_task_role_arn = "arn:aws:iam::" + account_number + ":role/" + stack['PhysicalResourceId']
55 |
56 | if("TheVPCPrivateSubnet1Subnet" in stack['LogicalResourceId']):
57 | ecs_private_subnet1 = stack['PhysicalResourceId']
58 |
59 | if("TheVPCPrivateSubnet2Subnet" in stack['LogicalResourceId']):
60 | ecs_private_subnet2 = stack['PhysicalResourceId']
61 |
62 | if("ecsclusterLBPublicListenerECSGroup" in stack['LogicalResourceId']):
63 | ecs_cluster_public_listener = stack['PhysicalResourceId']
64 |
65 | if("CodeDeployRole" in stack['LogicalResourceId']):
66 | code_deploy_role = "arn:aws:iam::" + account_number + ":role/" + stack['PhysicalResourceId']
67 |
68 | if("loadbalancerloadbalancerlistener1loadbalancertarget1Group" in stack['LogicalResourceId']):
69 | load_balancer_listern_tg_arn1 = stack['PhysicalResourceId']
70 | load_balancer_listern_tg_name1 = stack['PhysicalResourceId'].split("/",1)[1].split("/",1)[0]
71 | elif("loadbalancerloadbalancerlistener1" in stack['LogicalResourceId']):
72 | load_balancer_listern_arn1 = stack['PhysicalResourceId']
73 |
74 | if("loadbalancerloadbalancerlistener2loadbalancertarget2Group" in stack['LogicalResourceId']):
75 | load_balancer_listern_tg_arn2 = stack['PhysicalResourceId']
76 | load_balancer_listern_tg_name2 = stack['PhysicalResourceId'].split("/",1)[1].split("/",1)[0]
77 | elif("loadbalancerloadbalancerlistener2" in stack['LogicalResourceId']):
78 | load_balancer_listern_arn2 = stack['PhysicalResourceId']
79 |
80 | if("TargetGroup25xx" in stack['LogicalResourceId']):
81 | stack_alarms.append({ "name": stack['PhysicalResourceId']})
82 | if("TargetGroup2UnhealthyHosts" in stack['LogicalResourceId']):
83 | stack_alarms.append({ "name": stack['PhysicalResourceId']})
84 | if("TargetGroup5xx" in stack['LogicalResourceId']):
85 | stack_alarms.append({ "name": stack['PhysicalResourceId']})
86 | if("TargetGroupUnhealthyHosts" in stack['LogicalResourceId']):
87 | stack_alarms.append({ "name": stack['PhysicalResourceId']})
88 |
89 | #edit deployment group
90 | with open('./deployment-group.json', 'r') as deploy_group_file:
91 | json_data = json.load(deploy_group_file)
92 | json_data['ecsServices'][0]['clusterName'] = ecs_cluster
93 | json_data['ecsServices'][0]['serviceName'] = project_name + "-" + app_env
94 | json_data['loadBalancerInfo']['targetGroupPairInfoList'][0]['targetGroups'][0]['name'] = load_balancer_listern_tg_name1
95 | json_data['loadBalancerInfo']['targetGroupPairInfoList'][0]['targetGroups'][1]['name'] = load_balancer_listern_tg_name2
96 | json_data['loadBalancerInfo']['targetGroupPairInfoList'][0]['prodTrafficRoute']['listenerArns'] = [load_balancer_listern_arn1]
97 | json_data['loadBalancerInfo']['targetGroupPairInfoList'][0]['testTrafficRoute']['listenerArns'] = [load_balancer_listern_arn2]
98 | json_data['serviceRoleArn'] = code_deploy_role
99 | json_data['alarmConfiguration']['alarms'] = stack_alarms
100 | json_data['applicationName'] = project_name + "-" + app_env
101 | with open('./deployment-group-' + app_env +'.json', 'w+') as file:
102 | json.dump(json_data, file, indent=2)
103 |
104 | #edit Service definition
105 | with open('./service-definition.json', 'r') as file:
106 | json_data = json.load(file)
107 | json_data['cluster'] = ecs_cluster
108 | json_data['networkConfiguration']['awsvpcConfiguration']['securityGroups'] = [ecs_task_sg]
109 | json_data['networkConfiguration']['awsvpcConfiguration']['subnets'] = [ecs_private_subnet1,ecs_private_subnet2]
110 | json_data['taskDefinition'] = project_name + "-" + app_env
111 | json_data['loadBalancers'][0]['targetGroupArn'] = load_balancer_listern_tg_arn1
112 | json_data['loadBalancers'][0]['containerName'] = project_name + "-" + app_env
113 | with open('./service-definition-' + app_env +'.json', 'w+') as file:
114 | json.dump(json_data, file, indent=2)
115 |
116 | #edit ecs definition
117 | with open('./task-definition.json', 'r') as file:
118 | json_data = json.load(file)
119 | json_data['taskRoleArn'] = ecs_task_role_arn
120 | json_data['executionRoleArn'] = ecs_task_exe_role_arn
121 | json_data['family'] = project_name + '-' + app_env
122 | json_data['containerDefinitions'][0]['image'] = image
123 | json_data['containerDefinitions'][0]['name'] = project_name + "-" + app_env
124 | json_data['containerDefinitions'][0]['logConfiguration']['options']['awslogs-region'] = region
125 | with open('./task-definition-' + app_env +'.json', 'w+') as file:
126 | json.dump(json_data, file, indent=2)
127 |
128 | #edit app sec
129 | with open('./appspec.json', 'r') as file:
130 | json_data = json.load(file)
131 | json_data['Resources'][0]['TargetService']['Properties']['TaskDefinition'] = project_name + "-" + app_env
132 | json_data['Resources'][0]['TargetService']['Properties']['LoadBalancerInfo']['ContainerName'] = project_name + "-" + app_env
133 | json_data['Resources'][0]['TargetService']['Properties']['NetworkConfiguration']['awsvpcConfiguration']['subnets'] = [ecs_private_subnet1,ecs_private_subnet2]
134 | json_data['Resources'][0]['TargetService']['Properties']['NetworkConfiguration']['awsvpcConfiguration']['securityGroups'] = [ecs_task_sg]
135 | with open('./appsec' + app_env +'.json', 'w+') as file:
136 | json.dump(json_data, file, indent=2)
137 |
--------------------------------------------------------------------------------
/labs/fargate-dev-workshop/ecs_development_workshop/docker_build_to_ecr_pipeline.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | from ecs_development_workshop.code_pipeline_configuration import ContainerPipelineConfiguration
4 | #from ecs_development_workshop.code_pipeline_generic_build_project import genericBuild
5 | from aws_cdk import (
6 | aws_codebuild,
7 | aws_iam as iam,
8 | aws_ecs as ecs,
9 | aws_codecommit,
10 | aws_codedeploy,
11 | aws_codepipeline as codepipeline,
12 | aws_codepipeline_actions as codepipeline_actions,
13 | aws_ecr as ecr,
14 | aws_events as events,
15 | aws_lambda as lambda_,
16 | aws_autoscaling as autoscaling,
17 | aws_events_targets as targets,
18 | aws_elasticloadbalancingv2 as elbv2,
19 | aws_cloudwatch as cloudwatch,
20 | core,
21 | )
22 |
23 | class DockerBuildToEcrPipeline(core.Stack):
24 |
25 | # def __init__(self, scope: core.Construct, id: str, cluster: ecs.Cluster, asg_1: autoscaling.IAutoScalingGroup, asg_2: autoscaling.IAutoScalingGroup, lb: elbv2.IApplicationLoadBalancer, config: ContainerPipelineConfiguration, **kwargs) -> None:
26 | # super().__init__(scope, id, **kwargs)
27 | #
28 | def __init__(self, scope: core.Construct, id: str, config: ContainerPipelineConfiguration, **kwargs) -> None:
29 | super().__init__(scope, id, **kwargs)
30 |
31 | sourceOutput = codepipeline.Artifact(
32 | artifact_name=config.ProjectName+"-SourceOutput"
33 | )
34 |
35 | #pipelineStages = []
36 |
37 | # create lambda function
38 | #self.function = lambda_.Function(
39 | # self, "lambda_function",
40 | # runtime=lambda_.Runtime.PYTHON_3_7,
41 | # handler="pipeline_starter.handler",
42 | # code=lambda_.AssetCode('ecs_development_workshop/pipeline_starter.zip')
43 | #)
44 |
45 | #Code Repo
46 | commit = aws_codecommit.Repository(
47 | self,
48 | config.ProjectName + "-apprepo",
49 | repository_name=config.ProjectName+"-app-repo"
50 | )
51 |
52 | #Container Repo
53 | self.docker_repo = ecr.Repository(
54 | scope = self,
55 | id = config.ProjectName,
56 | removal_policy=core.RemovalPolicy.DESTROY,
57 | repository_name = config.ProjectName
58 | )
59 |
60 | pipeline = codepipeline.Pipeline(self, "MyPipeline",
61 | pipeline_name = config.ProjectName + "-commit-to-ecr"
62 | )
63 |
64 | source_output = codepipeline.Artifact()
65 |
66 | source_action = codepipeline_actions.CodeCommitSourceAction(
67 | action_name="CodeCommit",
68 | repository=commit,
69 | output=source_output
70 | )
71 |
72 | #docker file linting
73 | cb_docker_build_lint = aws_codebuild.PipelineProject(
74 | self, "DockerLint",
75 | project_name= config.ProjectName + "-docker-lint",
76 | build_spec=aws_codebuild.BuildSpec.from_source_filename(
77 | filename='configs/buildspec_lint.yml'),
78 | environment=aws_codebuild.BuildEnvironment(
79 | build_image=aws_codebuild.LinuxBuildImage.UBUNTU_14_04_NODEJS_10_1_0,
80 | privileged=True,
81 | ),
82 | # pass the ecr repo uri into the codebuild project so codebuild knows where to push
83 | environment_variables={
84 | 'ecr': aws_codebuild.BuildEnvironmentVariable(
85 | value=self.docker_repo.repository_uri),
86 | 'project_name': aws_codebuild.BuildEnvironmentVariable(
87 | value=config.ProjectName)
88 | },
89 | description='linting the container dockerfile for best practices',
90 | timeout=core.Duration.minutes(60),
91 | )
92 |
93 |
94 | #docker code repo secret scan
95 | cb_docker_build_secretscan = aws_codebuild.PipelineProject(
96 | self, "DockerSecretScan",
97 | project_name= config.ProjectName + "-docker-secretscan",
98 | build_spec=aws_codebuild.BuildSpec.from_source_filename(
99 | filename='configs/buildspec_secrets.yml'),
100 | environment=aws_codebuild.BuildEnvironment(
101 | privileged=True,
102 | ),
103 | # pass the ecr repo uri into the codebuild project so codebuild knows where to push
104 | environment_variables={
105 | 'commituri': aws_codebuild.BuildEnvironmentVariable(
106 | value=commit.repository_clone_url_http),
107 | 'ecr': aws_codebuild.BuildEnvironmentVariable(
108 | value=self.docker_repo.repository_uri),
109 | 'project_name': aws_codebuild.BuildEnvironmentVariable(
110 | value=config.ProjectName)
111 | },
112 | description='Scanning container for secrets',
113 | timeout=core.Duration.minutes(60),
114 | )
115 |
116 | cb_docker_build_secretscan.add_to_role_policy(
117 | statement = iam.PolicyStatement(
118 | resources = ['*'],
119 | actions = ['codecommit:*']
120 | )
121 | )
122 |
123 | #push to ecr repo
124 | cb_docker_build_push = aws_codebuild.PipelineProject(
125 | self, "DockerBuild",
126 | project_name= config.ProjectName + "-docker-build",
127 | build_spec=aws_codebuild.BuildSpec.from_source_filename(
128 | filename='configs/docker_build_base.yml'),
129 | environment=aws_codebuild.BuildEnvironment(
130 | privileged=True,
131 | ),
132 | # pass the ecr repo uri into the codebuild project so codebuild knows where to push
133 | environment_variables={
134 | 'ecr': aws_codebuild.BuildEnvironmentVariable(
135 | value=self.docker_repo.repository_uri),
136 | 'tag': aws_codebuild.BuildEnvironmentVariable(
137 | value="release"),
138 | 'project_name': aws_codebuild.BuildEnvironmentVariable(
139 | value=config.ProjectName)
140 | },
141 | description='Deploy to ECR',
142 | timeout=core.Duration.minutes(60),
143 | )
144 |
145 | pipeline.add_stage(
146 | stage_name="Source",
147 | actions=[source_action]
148 | )
149 |
150 | pipeline.add_stage(
151 | stage_name='Lint',
152 | actions=[
153 | codepipeline_actions.CodeBuildAction(
154 | action_name='DockerLintImages',
155 | input=source_output,
156 | project= cb_docker_build_lint,
157 | run_order=1,
158 | )
159 | ]
160 | )
161 |
162 | pipeline.add_stage(
163 | stage_name='SecretScan',
164 | actions=[
165 | codepipeline_actions.CodeBuildAction(
166 | action_name='DockerSecretScanImages',
167 | input=source_output,
168 | project= cb_docker_build_secretscan,
169 | run_order=1,
170 | )
171 | ]
172 | )
173 |
174 | pipeline.add_stage(
175 | stage_name='Build',
176 | actions=[
177 | codepipeline_actions.CodeBuildAction(
178 | action_name='DockerBuildImages',
179 | input=source_output,
180 | project= cb_docker_build_push,
181 | run_order=1,
182 | )
183 | ]
184 | )
185 |
186 | self.docker_repo.grant_pull_push(cb_docker_build_push)
187 |
188 |
--------------------------------------------------------------------------------
/labs/fargate-workshop-cdk/fargate_workshop_cdk/fargate_workshop_dataeng_s3sink.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | from aws_cdk import (
4 | aws_codepipeline as codepipeline,
5 | aws_codecommit as codecommit,
6 | aws_codebuild as codebuild,
7 | aws_iam as iam,
8 | aws_codepipeline_actions as actions,
9 | aws_ecs as ecs,
10 | aws_ecr as ecr,
11 | aws_s3 as s3,
12 | aws_ec2 as ec2,
13 | aws_ssm as ssm,
14 | aws_elasticloadbalancingv2 as elbv2,
15 | core
16 | )
17 |
18 |
19 | class FargateWorkshopDataengS3SinkStack(core.Stack):
20 |
21 | def __init__(self, scope: core.Construct, id: str, cluster: ecs.ICluster,
22 | kafkaClientFirewall: ec2.ISecurityGroup,
23 | lbFirewall: ec2.ISecurityGroup,
24 | kcrepo: ecr.IRepository,
25 | hcrepo: ecr.IRepository,
26 | cmnamespace: str,
27 | cmmsk: str,
28 | vpc: ec2.IVpc,
29 | **kwargs) -> None:
30 | super().__init__(scope, id, **kwargs)
31 |
32 | # S3 buckets
33 | self.kafkaConnectBucket = s3.Bucket(
34 | scope = self,
35 | id = "KafkaConnectBucket",
36 | block_public_access = s3.BlockPublicAccess.BLOCK_ALL,
37 | encryption = s3.BucketEncryption.S3_MANAGED
38 | )
39 | core.CfnOutput(
40 | scope = self,
41 | id = "KafkaConnectBucketName",
42 | value=self.kafkaConnectBucket.bucket_name
43 | )
44 |
45 | # service skeleton
46 | kc_task_definition = ecs.FargateTaskDefinition(
47 | scope = self,
48 | id = "KafkaConnectTaskDef",
49 | cpu=4096,
50 | memory_limit_mib=8192
51 | )
52 | kc_container = kc_task_definition.add_container(
53 | id = "KafkaConnectContainer",
54 | image=ecs.ContainerImage.from_ecr_repository(repository = kcrepo, tag = 'latest'),
55 | logging=ecs.LogDrivers.aws_logs(stream_prefix="KafkaConnect"),
56 | environment = {'CONNECT_PLUGIN_PATH': "/usr/share/java",
57 | 'MSK_SERVICE': cmmsk,
58 | 'CONNECT_GROUP_ID': 'KcS3SinkGroup',
59 | 'CONNECT_CONFIG_STORAGE_TOPIC': 'kc_config',
60 | 'CONNECT_OFFSET_STORAGE_TOPIC': 'kc_offset',
61 | 'CONNECT_STATUS_STORAGE_TOPIC': 'kc_status',
62 | 'CONNECT_VALUE_CONVERTER': 'org.apache.kafka.connect.storage.StringConverter',
63 | 'CONNECT_KEY_CONVERTER': 'org.apache.kafka.connect.storage.StringConverter',
64 | 'CONNECT_REST_PORT': '8083',
65 | 'CONNECT_CONSUMER_AUTO_OFFSET_RESET': 'latest',
66 | 'CONNECT_OFFSET_FLUSH_INTERVAL_MS': '120000',
67 | 'CONNECT_OFFSET_FLUSH_TIMEOUT_MS': '20000',
68 | 'CONNECT_INTERNAL_KEY_CONVERTER': 'org.apache.kafka.connect.json.JsonConverter',
69 | 'CONNECT_INTERNAL_VALUE_CONVERTER': 'org.apache.kafka.connect.json.JsonConverter',
70 | 'CONNECT_INTERNAL_KEY_CONVERTER_SCHEMAS_ENABLE': 'false',
71 | 'CONNECT_INTERNAL_VALUE_CONVERTER_SCHEMAS_ENABLE': 'false',
72 | 'CONNECT_SECURITY_PROTOCOL': 'SSL',
73 | 'CONNECT_CONSUMER_SECURITY_PROTOCOL': 'SSL',
74 | 'CONNECT_PRODUCER_SECURITY_PROTOCOL': 'SSL',
75 | 'REGION': self.region
76 | }
77 | )
78 | kc_container.add_port_mappings(ecs.PortMapping(container_port=8083, host_port=8083, protocol = ecs.Protocol.TCP))
79 | hc_container = kc_task_definition.add_container(
80 | id = "HealthCheckContainer",
81 | image=ecs.ContainerImage.from_ecr_repository(repository = hcrepo, tag = 'latest'),
82 | logging=ecs.LogDrivers.aws_logs(stream_prefix="KafkaConnectHc")
83 | )
84 | hc_container.add_port_mappings(ecs.PortMapping(container_port=18083, host_port=18083, protocol = ecs.Protocol.TCP))
85 | kc_task_definition.add_to_task_role_policy(
86 | statement = iam.PolicyStatement(
87 | resources = ['*'],
88 | actions = ['servicediscovery:DiscoverInstances']
89 | )
90 | )
91 | kc_task_definition.add_to_task_role_policy(
92 | statement = iam.PolicyStatement(
93 | resources = ['*'],
94 | actions = ['kafka:GetBootstrapBrokers']
95 | )
96 | )
97 | kc_task_definition.add_to_task_role_policy(
98 | statement = iam.PolicyStatement(
99 | resources = [self.kafkaConnectBucket.bucket_arn, self.kafkaConnectBucket.bucket_arn + '/*'],
100 | actions = ['s3:*']
101 | )
102 | )
103 | kc_svc = ecs.FargateService(
104 | scope = self,
105 | id = "KafkaConnectSvc",
106 | task_definition=kc_task_definition,
107 | security_group = kafkaClientFirewall,
108 | cluster=cluster,
109 | desired_count=1
110 | )
111 | kc_scaling = kc_svc.auto_scale_task_count(max_capacity=10)
112 | kc_scaling.scale_on_cpu_utilization("CpuScaling",
113 | target_utilization_percent=70
114 | )
115 | ssm.StringParameter(
116 | scope = self,
117 | id = 'SSMParamS3SinkImageName',
118 | string_value = kc_container.container_name,
119 | parameter_name = 'image_s3sink'
120 | )
121 | ssm.StringParameter(
122 | scope = self,
123 | id = 'SSMParamS3SinkHCImageName',
124 | string_value = hc_container.container_name,
125 | parameter_name = 'image_s3sink_hc'
126 | )
127 |
128 | # Create ALB
129 | self.lb = elbv2.ApplicationLoadBalancer(
130 | self, "KafkaConnectALB",
131 | vpc=vpc,
132 | security_group = lbFirewall,
133 | internet_facing=False
134 | )
135 | listener = self.lb.add_listener(
136 | "KafkaConnectListener",
137 | port=8083,
138 | protocol = elbv2.ApplicationProtocol.HTTP,
139 | open=False
140 | )
141 |
142 | health_check = elbv2.HealthCheck(
143 | interval=core.Duration.seconds(120),
144 | path="/",
145 | port = '18083',
146 | timeout=core.Duration.seconds(60)
147 | )
148 |
149 | # Attach ALB to ECS Service
150 | listener.add_targets(
151 | "KafkaConnectSvcListener",
152 | port=8083,
153 | protocol = elbv2.ApplicationProtocol.HTTP,
154 | targets=[kc_svc],
155 | health_check=health_check,
156 | )
157 | core.CfnOutput(
158 | scope = self,
159 | id = "KafkaConnectAlbDns",
160 | value=self.lb.load_balancer_dns_name
161 | )
162 |
163 | # pipeline
164 | self.cbproject = codebuild.PipelineProject(
165 | scope = self,
166 | id = 'KafkaS3SinkBuildImage',
167 | cache = codebuild.Cache.local(codebuild.LocalCacheMode.DOCKER_LAYER),
168 | environment = codebuild.BuildEnvironment(
169 | build_image = codebuild.LinuxBuildImage.UBUNTU_14_04_DOCKER_18_09_0,
170 | privileged = True,
171 | compute_type = codebuild.ComputeType.LARGE
172 | )
173 | )
174 | self.cbproject.add_to_role_policy(
175 | statement = iam.PolicyStatement(
176 | resources = ['*'],
177 | actions = ['ssm:GetParameters', 'ecr:GetAuthorizationToken']
178 | )
179 | )
180 | self.cbproject.add_to_role_policy(
181 | statement = iam.PolicyStatement(
182 | resources = ['*'],
183 | actions = ['ecr:*']
184 | )
185 | )
186 | self.pipeline = codepipeline.Pipeline(
187 | scope = self,
188 | id = "KafkaS3Sink",
189 | pipeline_name = 'KafkaS3Sink'
190 | )
191 | self.pipeline.add_stage(
192 | stage_name='Source',
193 | actions = [
194 | actions.CodeCommitSourceAction(
195 | repository = codecommit.Repository.from_repository_name(scope=self, id = 'FargateKcRepo', repository_name = 'FargateS3Sink'),
196 | action_name = "Get-Code-Kc",
197 | output = codepipeline.Artifact('code')
198 | ),
199 | actions.CodeCommitSourceAction(
200 | repository = codecommit.Repository.from_repository_name(scope=self, id = 'FargateHcRepo', repository_name = 'FargateS3SinkHealthCheck'),
201 | action_name = "Get-Code-Hc",
202 | output = codepipeline.Artifact('codehc')
203 | )
204 | ]
205 | )
206 | self.pipeline.add_stage(
207 | stage_name = 'Build',
208 | actions = [
209 | actions.CodeBuildAction(
210 | input = codepipeline.Artifact('code'),
211 | extra_inputs = [codepipeline.Artifact('codehc')],
212 | project = self.cbproject,
213 | outputs = [codepipeline.Artifact('image')],
214 | action_name = 'Build-Image'
215 | )
216 | ]
217 | )
218 | self.pipeline.add_stage(
219 | stage_name = 'Deploy',
220 | actions = [
221 | actions.EcsDeployAction(
222 | service = kc_svc,
223 | input = codepipeline.Artifact('image'),
224 | action_name = 'Deploy-Image'
225 | )
226 | ]
227 | )
228 |
--------------------------------------------------------------------------------
/labs/fargate-workshop-cdk/fargate_workshop_cdk/fargate_workshop_dataeng_stack.py:
--------------------------------------------------------------------------------
1 | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0
2 |
3 | from aws_cdk import (
4 | aws_docdb as docdb,
5 | aws_msk as msk,
6 | aws_ec2 as ec2,
7 | aws_ecr as ecr,
8 | aws_s3 as s3,
9 | aws_servicediscovery as cloudmap,
10 | aws_events as events,
11 | aws_lambda as lambda_,
12 | aws_events_targets as targets,
13 | aws_iam as iam,
14 | aws_ssm as ssm,
15 | aws_transfer as transfer,
16 | aws_iam as iam,
17 | core
18 | )
19 | from typing import List
20 |
21 |
22 | class FargateWorkshopDataengStack(core.Stack):
23 |
24 | def __init__(self, scope: core.Construct, id: str, subnets: List[str], vpc: ec2.IVpc, default_vpc_cidr_block: str, project: str, namespace: cloudmap.HttpNamespace, **kwargs) -> None:
25 | super().__init__(scope, id, **kwargs)
26 |
27 | # firewall for load balancers
28 | self.lbFirewall = ec2.SecurityGroup(
29 | scope = self,
30 | id = 'LbFirewall',
31 | vpc = vpc,
32 | description = 'Load balancer firewall'
33 | )
34 | self.lbFirewall.add_ingress_rule(
35 | peer = ec2.Peer.ipv4(vpc.vpc_cidr_block),
36 | connection = ec2.Port.all_traffic()
37 | )
38 | self.lbFirewall.add_ingress_rule(
39 | peer = ec2.Peer.ipv4(default_vpc_cidr_block),
40 | connection = ec2.Port.all_traffic()
41 | )
42 |
43 | # unified client firewall for both MSK and DocumentDB
44 | self.unifiedClientFirewall = ec2.SecurityGroup(
45 | scope = self,
46 | id = 'UnifiedClientFirewall',
47 | vpc = vpc,
48 | description = 'Client access firewall for DocumentDB and MSK'
49 | )
50 | self.unifiedClientFirewall.add_ingress_rule(
51 | peer = self.lbFirewall,
52 | connection = ec2.Port.all_traffic()
53 | )
54 |
55 | # DocumentDB cluster
56 | projTag = core.CfnTag(key = 'Project', value = project)
57 | subnetGroup = docdb.CfnDBSubnetGroup(
58 | scope = self,
59 | id = 'DatabaseSubnetGroup',
60 | db_subnet_group_description = 'Subnet group for database',
61 | subnet_ids = subnets,
62 | tags = [projTag, core.CfnTag(key = 'Name', value = 'DocDbSubnetGroup')]
63 | )
64 |
65 | self.clientFirewall = ec2.SecurityGroup(
66 | scope = self,
67 | id = 'DatabaseClientFirewall',
68 | vpc = vpc,
69 | description = 'Client access firewall for DocumentDB'
70 | )
71 | self.dbFirewall = ec2.SecurityGroup(
72 | scope = self,
73 | id = 'DatabaseInternalFirewall',
74 | vpc = vpc,
75 | allow_all_outbound = True,
76 | description = 'Firewall for DocumentDB'
77 | )
78 | self.dbFirewall.add_ingress_rule(
79 | peer = self.clientFirewall,
80 | connection = ec2.Port.all_traffic()
81 | )
82 | self.dbFirewall.add_ingress_rule(
83 | peer = self.unifiedClientFirewall,
84 | connection = ec2.Port.all_traffic()
85 | )
86 | self.dbFirewall.add_ingress_rule(
87 | peer = ec2.Peer.ipv4(default_vpc_cidr_block),
88 | connection = ec2.Port.all_traffic()
89 | )
90 | self.docdbCluster = docdb.CfnDBCluster(
91 | scope=self,
92 | id='DataStore',
93 | db_subnet_group_name = subnetGroup.ref,
94 | master_username = 'DocDbMaster',
95 | master_user_password = 'DocDbPass',
96 | vpc_security_group_ids = [self.dbFirewall.security_group_id]
97 | )
98 | self.docdbInstances = [
99 | docdb.CfnDBInstance(
100 | scope = self,
101 | id="DataStore-Instance-{0}".format(str(i)),
102 | db_cluster_identifier = self.docdbCluster.ref,
103 | db_instance_class = 'db.r5.xlarge'
104 | )
105 | for i in range(3)
106 | ]
107 | self.docdbCloudMap = namespace.create_service(
108 | id = 'DbSvc'
109 | )
110 | self.docdbCloudMap.register_non_ip_instance(
111 | id = 'dbEndpoint',
112 | custom_attributes = { 'endpoint': self.docdbCluster.attr_endpoint, 'user': 'DocDbMaster', 'password': 'DocDbPass'}
113 | )
114 | self.docdbCloudMap.register_non_ip_instance(
115 | id = 'dbReadEndpoint',
116 | custom_attributes = { 'endpoint': self.docdbCluster.attr_read_endpoint }
117 | )
118 |
119 | # MSK cluster
120 | self.kafkaClientFirewall = ec2.SecurityGroup(
121 | scope = self,
122 | id = 'KafkaClientFirewall',
123 | vpc = vpc,
124 | description = 'Client access firewall for Kafka'
125 | )
126 | self.kafkaFirewall = ec2.SecurityGroup(
127 | scope = self,
128 | id = 'KafkaInternalFirewall',
129 | vpc = vpc,
130 | allow_all_outbound = True,
131 | description = 'Firewall for Kafka'
132 | )
133 | self.kafkaFirewall.add_ingress_rule(
134 | peer = self.kafkaClientFirewall,
135 | connection = ec2.Port.all_traffic()
136 | )
137 | self.kafkaFirewall.add_ingress_rule(
138 | peer = self.unifiedClientFirewall,
139 | connection = ec2.Port.all_traffic()
140 | )
141 | self.kafkaFirewall.add_ingress_rule(
142 | peer = ec2.Peer.ipv4(default_vpc_cidr_block),
143 | connection = ec2.Port.all_traffic()
144 | )
145 | self.kafkaFirewall.add_ingress_rule(
146 | peer = self.kafkaFirewall,
147 | connection = ec2.Port.all_traffic()
148 | )
149 | num_brokers = len(subnets)
150 | if num_brokers < 3:
151 | num_brokers = 2 * num_brokers
152 | self.kafka = msk.CfnCluster(
153 | scope = self,
154 | id = 'kafka',
155 | cluster_name = 'kafkafargateworkshop',
156 | kafka_version = '2.2.1',
157 | number_of_broker_nodes = num_brokers,
158 | enhanced_monitoring = 'PER_TOPIC_PER_BROKER',
159 | broker_node_group_info = msk.CfnCluster.BrokerNodeGroupInfoProperty(
160 | client_subnets = subnets,
161 | instance_type = 'kafka.m5.large',
162 | security_groups = [self.kafkaFirewall.security_group_id]
163 | )
164 | )
165 | self.kafkaCloudMap = namespace.create_service(
166 | id = 'KafkaSvc'
167 | )
168 | self.kafkaCloudMap.register_non_ip_instance(
169 | id = 'KafkaBrokerArn',
170 | custom_attributes = { 'broker_arn': self.kafka.ref }
171 | )
172 |
173 | # ECR
174 | self.docker_repo = ecr.Repository(
175 | scope = self,
176 | id = "FargateImageRepository"
177 | )
178 | ssm.StringParameter(
179 | scope = self,
180 | id = 'SSMParamRegion',
181 | string_value = self.region,
182 | parameter_name = 'region'
183 | )
184 | ssm.StringParameter(
185 | scope = self,
186 | id = 'SSMParamRepoUri',
187 | string_value = self.docker_repo.repository_uri,
188 | parameter_name = 'repo_uri'
189 | )
190 | self.docker_repo_s3sink = ecr.Repository(
191 | scope = self,
192 | id = "FargateImageRepositoryS3Sink"
193 | )
194 | self.docker_repo_s3sinkhc = ecr.Repository(
195 | scope = self,
196 | id = "FargateImageRepositoryS3SinkHC"
197 | )
198 | ssm.StringParameter(
199 | scope = self,
200 | id = 'SSMParamRepoUriS3Sink',
201 | string_value = self.docker_repo_s3sink.repository_uri,
202 | parameter_name = 'repo_uri_s3_sink'
203 | )
204 | ssm.StringParameter(
205 | scope = self,
206 | id = 'SSMParamRepoUriS3SinkHC',
207 | string_value = self.docker_repo_s3sinkhc.repository_uri,
208 | parameter_name = 'repo_uri_s3_sink_hc'
209 | )
210 | self.docker_repo_sftp = ecr.Repository(
211 | scope = self,
212 | id = "FargateImageRepositorySftp"
213 | )
214 | ssm.StringParameter(
215 | scope = self,
216 | id = 'SSMParamRepoUriSftp',
217 | string_value = self.docker_repo_sftp.repository_uri,
218 | parameter_name = 'repo_uri_sftp'
219 | )
220 | self.docker_repo_batch = ecr.Repository(
221 | scope = self,
222 | id = "FargateImageRepositoryBatch"
223 | )
224 | ssm.StringParameter(
225 | scope = self,
226 | id = 'SSMParamRepoUriBatch',
227 | string_value = self.docker_repo_batch.repository_uri,
228 | parameter_name = 'repo_uri_batch'
229 | )
230 |
231 | # SFTP server
232 | self.sftpBucket = s3.Bucket(
233 | scope = self,
234 | id = "SFTPBucket",
235 | block_public_access = s3.BlockPublicAccess.BLOCK_ALL,
236 | encryption = s3.BucketEncryption.S3_MANAGED
237 | )
238 | core.CfnOutput(
239 | scope = self,
240 | id = "SFTPBucketName",
241 | value=self.sftpBucket.bucket_name
242 | )
243 | self.sftp_role = iam.Role(
244 | scope = self,
245 | id = "SFTPRole",
246 | assumed_by=iam.ServicePrincipal("transfer.amazonaws.com")
247 | )
248 | self.sftp_role.add_to_policy(
249 | statement = iam.PolicyStatement(
250 | resources = [self.sftpBucket.bucket_arn, self.sftpBucket.bucket_arn + '/*'],
251 | actions = ['s3:*']
252 | )
253 | )
254 | self.sftp_vpce = vpc.add_interface_endpoint(id = "SftpEndpoint", service = ec2.InterfaceVpcEndpointAwsService.TRANSFER )
255 | self.sftp_vpce.connections.allow_default_port_from(other = ec2.Peer.ipv4(vpc.vpc_cidr_block))
256 | self.sftp_vpce.connections.allow_default_port_from(other = ec2.Peer.ipv4(default_vpc_cidr_block))
257 | self.sftp_vpce.connections.allow_from(other = ec2.Peer.ipv4(vpc.vpc_cidr_block), port_range = ec2.Port.tcp(22))
258 | self.sftp_vpce.connections.allow_from(other = ec2.Peer.ipv4(default_vpc_cidr_block), port_range = ec2.Port.tcp(22))
259 | self.sftp = transfer.CfnServer(
260 | scope = self,
261 | id = "SFTP",
262 | endpoint_type = 'VPC_ENDPOINT',
263 | endpoint_details = transfer.CfnServer.EndpointDetailsProperty(vpc_endpoint_id = self.sftp_vpce.vpc_endpoint_id),
264 | identity_provider_type = 'SERVICE_MANAGED'
265 | )
266 | self.sftp_user = transfer.CfnUser(
267 | scope = self,
268 | id = "SFTPUser",
269 | role = self.sftp_role.role_arn,
270 | server_id = self.sftp.attr_server_id,
271 | user_name = "sftpuser"
272 | )
273 | core.CfnOutput(
274 | scope = self,
275 | id = "SFTPHostVpceOut",
276 | value=self.sftp_vpce.vpc_endpoint_id
277 | )
278 | core.CfnOutput(
279 | scope = self,
280 | id = "SFTPUserOut",
281 | value=self.sftp_user.attr_user_name
282 | )
283 | self.sftpCloudMap = namespace.create_service(
284 | id = 'SftpSvc',
285 | name = 'SFTP'
286 | )
287 | self.sftpCloudMap.register_non_ip_instance(
288 | id = 'sftpEndpoint',
289 | custom_attributes = { 'vpce_id': self.sftp_vpce.vpc_endpoint_id, 'user': 'sftpuser', 'bucket': self.sftpBucket.bucket_name}
290 | )
291 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | Creative Commons Attribution-ShareAlike 4.0 International Public License
2 |
3 | By exercising the Licensed Rights (defined below), You accept and agree to be bound by the terms and conditions of this Creative Commons Attribution-ShareAlike 4.0 International Public License ("Public License"). To the extent this Public License may be interpreted as a contract, You are granted the Licensed Rights in consideration of Your acceptance of these terms and conditions, and the Licensor grants You such rights in consideration of benefits the Licensor receives from making the Licensed Material available under these terms and conditions.
4 |
5 | Section 1 – Definitions.
6 |
7 | a. Adapted Material means material subject to Copyright and Similar Rights that is derived from or based upon the Licensed Material and in which the Licensed Material is translated, altered, arranged, transformed, or otherwise modified in a manner requiring permission under the Copyright and Similar Rights held by the Licensor. For purposes of this Public License, where the Licensed Material is a musical work, performance, or sound recording, Adapted Material is always produced where the Licensed Material is synched in timed relation with a moving image.
8 |
9 | b. Adapter's License means the license You apply to Your Copyright and Similar Rights in Your contributions to Adapted Material in accordance with the terms and conditions of this Public License.
10 |
11 | c. BY-SA Compatible License means a license listed at creativecommons.org/compatiblelicenses, approved by Creative Commons as essentially the equivalent of this Public License.
12 |
13 | d. Copyright and Similar Rights means copyright and/or similar rights closely related to copyright including, without limitation, performance, broadcast, sound recording, and Sui Generis Database Rights, without regard to how the rights are labeled or categorized. For purposes of this Public License, the rights specified in Section 2(b)(1)-(2) are not Copyright and Similar Rights.
14 |
15 | e. Effective Technological Measures means those measures that, in the absence of proper authority, may not be circumvented under laws fulfilling obligations under Article 11 of the WIPO Copyright Treaty adopted on December 20, 1996, and/or similar international agreements.
16 |
17 | f. Exceptions and Limitations means fair use, fair dealing, and/or any other exception or limitation to Copyright and Similar Rights that applies to Your use of the Licensed Material.
18 |
19 | g. License Elements means the license attributes listed in the name of a Creative Commons Public License. The License Elements of this Public License are Attribution and ShareAlike.
20 |
21 | h. Licensed Material means the artistic or literary work, database, or other material to which the Licensor applied this Public License.
22 |
23 | i. Licensed Rights means the rights granted to You subject to the terms and conditions of this Public License, which are limited to all Copyright and Similar Rights that apply to Your use of the Licensed Material and that the Licensor has authority to license.
24 |
25 | j. Licensor means the individual(s) or entity(ies) granting rights under this Public License.
26 |
27 | k. Share means to provide material to the public by any means or process that requires permission under the Licensed Rights, such as reproduction, public display, public performance, distribution, dissemination, communication, or importation, and to make material available to the public including in ways that members of the public may access the material from a place and at a time individually chosen by them.
28 |
29 | l. Sui Generis Database Rights means rights other than copyright resulting from Directive 96/9/EC of the European Parliament and of the Council of 11 March 1996 on the legal protection of databases, as amended and/or succeeded, as well as other essentially equivalent rights anywhere in the world.
30 |
31 | m. You means the individual or entity exercising the Licensed Rights under this Public License. Your has a corresponding meaning.
32 |
33 | Section 2 – Scope.
34 |
35 | a. License grant.
36 |
37 | 1. Subject to the terms and conditions of this Public License, the Licensor hereby grants You a worldwide, royalty-free, non-sublicensable, non-exclusive, irrevocable license to exercise the Licensed Rights in the Licensed Material to:
38 |
39 | A. reproduce and Share the Licensed Material, in whole or in part; and
40 |
41 | B. produce, reproduce, and Share Adapted Material.
42 |
43 | 2. Exceptions and Limitations. For the avoidance of doubt, where Exceptions and Limitations apply to Your use, this Public License does not apply, and You do not need to comply with its terms and conditions.
44 |
45 | 3. Term. The term of this Public License is specified in Section 6(a).
46 |
47 | 4. Media and formats; technical modifications allowed. The Licensor authorizes You to exercise the Licensed Rights in all media and formats whether now known or hereafter created, and to make technical modifications necessary to do so. The Licensor waives and/or agrees not to assert any right or authority to forbid You from making technical modifications necessary to exercise the Licensed Rights, including technical modifications necessary to circumvent Effective Technological Measures. For purposes of this Public License, simply making modifications authorized by this Section 2(a)(4) never produces Adapted Material.
48 |
49 | 5. Downstream recipients.
50 |
51 | A. Offer from the Licensor – Licensed Material. Every recipient of the Licensed Material automatically receives an offer from the Licensor to exercise the Licensed Rights under the terms and conditions of this Public License.
52 |
53 | B. Additional offer from the Licensor – Adapted Material. Every recipient of Adapted Material from You automatically receives an offer from the Licensor to exercise the Licensed Rights in the Adapted Material under the conditions of the Adapter’s License You apply.
54 |
55 | C. No downstream restrictions. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, the Licensed Material if doing so restricts exercise of the Licensed Rights by any recipient of the Licensed Material.
56 |
57 | 6. No endorsement. Nothing in this Public License constitutes or may be construed as permission to assert or imply that You are, or that Your use of the Licensed Material is, connected with, or sponsored, endorsed, or granted official status by, the Licensor or others designated to receive attribution as provided in Section 3(a)(1)(A)(i).
58 |
59 | b. Other rights.
60 |
61 | 1. Moral rights, such as the right of integrity, are not licensed under this Public License, nor are publicity, privacy, and/or other similar personality rights; however, to the extent possible, the Licensor waives and/or agrees not to assert any such rights held by the Licensor to the limited extent necessary to allow You to exercise the Licensed Rights, but not otherwise.
62 |
63 | 2. Patent and trademark rights are not licensed under this Public License.
64 |
65 | 3. To the extent possible, the Licensor waives any right to collect royalties from You for the exercise of the Licensed Rights, whether directly or through a collecting society under any voluntary or waivable statutory or compulsory licensing scheme. In all other cases the Licensor expressly reserves any right to collect such royalties.
66 |
67 | Section 3 – License Conditions.
68 |
69 | Your exercise of the Licensed Rights is expressly made subject to the following conditions.
70 |
71 | a. Attribution.
72 |
73 | 1. If You Share the Licensed Material (including in modified form), You must:
74 |
75 | A. retain the following if it is supplied by the Licensor with the Licensed Material:
76 |
77 | i. identification of the creator(s) of the Licensed Material and any others designated to receive attribution, in any reasonable manner requested by the Licensor (including by pseudonym if designated);
78 |
79 | ii. a copyright notice;
80 |
81 | iii. a notice that refers to this Public License;
82 |
83 | iv. a notice that refers to the disclaimer of warranties;
84 |
85 | v. a URI or hyperlink to the Licensed Material to the extent reasonably practicable;
86 |
87 | B. indicate if You modified the Licensed Material and retain an indication of any previous modifications; and
88 |
89 | C. indicate the Licensed Material is licensed under this Public License, and include the text of, or the URI or hyperlink to, this Public License.
90 |
91 | 2. You may satisfy the conditions in Section 3(a)(1) in any reasonable manner based on the medium, means, and context in which You Share the Licensed Material. For example, it may be reasonable to satisfy the conditions by providing a URI or hyperlink to a resource that includes the required information.
92 |
93 | 3. If requested by the Licensor, You must remove any of the information required by Section 3(a)(1)(A) to the extent reasonably practicable.
94 |
95 | b. ShareAlike.In addition to the conditions in Section 3(a), if You Share Adapted Material You produce, the following conditions also apply.
96 |
97 | 1. The Adapter’s License You apply must be a Creative Commons license with the same License Elements, this version or later, or a BY-SA Compatible License.
98 |
99 | 2. You must include the text of, or the URI or hyperlink to, the Adapter's License You apply. You may satisfy this condition in any reasonable manner based on the medium, means, and context in which You Share Adapted Material.
100 |
101 | 3. You may not offer or impose any additional or different terms or conditions on, or apply any Effective Technological Measures to, Adapted Material that restrict exercise of the rights granted under the Adapter's License You apply.
102 |
103 | Section 4 – Sui Generis Database Rights.
104 |
105 | Where the Licensed Rights include Sui Generis Database Rights that apply to Your use of the Licensed Material:
106 |
107 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right to extract, reuse, reproduce, and Share all or a substantial portion of the contents of the database;
108 |
109 | b. if You include all or a substantial portion of the database contents in a database in which You have Sui Generis Database Rights, then the database in which You have Sui Generis Database Rights (but not its individual contents) is Adapted Material, including for purposes of Section 3(b); and
110 |
111 | c. You must comply with the conditions in Section 3(a) if You Share all or a substantial portion of the contents of the database.
112 | For the avoidance of doubt, this Section 4 supplements and does not replace Your obligations under this Public License where the Licensed Rights include other Copyright and Similar Rights.
113 |
114 | Section 5 – Disclaimer of Warranties and Limitation of Liability.
115 |
116 | a. Unless otherwise separately undertaken by the Licensor, to the extent possible, the Licensor offers the Licensed Material as-is and as-available, and makes no representations or warranties of any kind concerning the Licensed Material, whether express, implied, statutory, or other. This includes, without limitation, warranties of title, merchantability, fitness for a particular purpose, non-infringement, absence of latent or other defects, accuracy, or the presence or absence of errors, whether or not known or discoverable. Where disclaimers of warranties are not allowed in full or in part, this disclaimer may not apply to You.
117 |
118 | b. To the extent possible, in no event will the Licensor be liable to You on any legal theory (including, without limitation, negligence) or otherwise for any direct, special, indirect, incidental, consequential, punitive, exemplary, or other losses, costs, expenses, or damages arising out of this Public License or use of the Licensed Material, even if the Licensor has been advised of the possibility of such losses, costs, expenses, or damages. Where a limitation of liability is not allowed in full or in part, this limitation may not apply to You.
119 |
120 | c. The disclaimer of warranties and limitation of liability provided above shall be interpreted in a manner that, to the extent possible, most closely approximates an absolute disclaimer and waiver of all liability.
121 |
122 | Section 6 – Term and Termination.
123 |
124 | a. This Public License applies for the term of the Copyright and Similar Rights licensed here. However, if You fail to comply with this Public License, then Your rights under this Public License terminate automatically.
125 |
126 | b. Where Your right to use the Licensed Material has terminated under Section 6(a), it reinstates:
127 |
128 | 1. automatically as of the date the violation is cured, provided it is cured within 30 days of Your discovery of the violation; or
129 |
130 | 2. upon express reinstatement by the Licensor.
131 |
132 | c. For the avoidance of doubt, this Section 6(b) does not affect any right the Licensor may have to seek remedies for Your violations of this Public License.
133 |
134 | d. For the avoidance of doubt, the Licensor may also offer the Licensed Material under separate terms or conditions or stop distributing the Licensed Material at any time; however, doing so will not terminate this Public License.
135 |
136 | e. Sections 1, 5, 6, 7, and 8 survive termination of this Public License.
137 |
138 | Section 7 – Other Terms and Conditions.
139 |
140 | a. The Licensor shall not be bound by any additional or different terms or conditions communicated by You unless expressly agreed.
141 |
142 | b. Any arrangements, understandings, or agreements regarding the Licensed Material not stated herein are separate from and independent of the terms and conditions of this Public License.
143 |
144 | Section 8 – Interpretation.
145 |
146 | a. For the avoidance of doubt, this Public License does not, and shall not be interpreted to, reduce, limit, restrict, or impose conditions on any use of the Licensed Material that could lawfully be made without permission under this Public License.
147 |
148 | b. To the extent possible, if any provision of this Public License is deemed unenforceable, it shall be automatically reformed to the minimum extent necessary to make it enforceable. If the provision cannot be reformed, it shall be severed from this Public License without affecting the enforceability of the remaining terms and conditions.
149 |
150 | c. No term or condition of this Public License will be waived and no failure to comply consented to unless expressly agreed to by the Licensor.
151 |
152 | d. Nothing in this Public License constitutes or may be interpreted as a limitation upon, or waiver of, any privileges and immunities that apply to the Licensor or You, including from the legal processes of any jurisdiction or authority.
153 |
--------------------------------------------------------------------------------
/labs/fargate-dev-workshop/README.md:
--------------------------------------------------------------------------------
1 | # Fargate for Devs
2 |
3 | _// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. // SPDX-License-Identifier: CC-BY-SA-4.0_
4 |
5 | ## Lab 1: Prerequisites
6 |
7 | First, we'll use the [AWS CDK](https://docs.aws.amazon.com/cdk/latest/guide/home.html) to deploy some prerequisites. Our guiding principle is that we'll use the CDK to deploy static infrastructure and prerequisites that are out of scope for this lab, and use a CI/CD pipeline to deploy the rest. For example, if we're building a stream processor, we might assume that the Kafka cluster is already in operation, but we need to deploy our actual stream processing application.
8 |
9 | #### Note your account and region
10 |
11 | Pick an AWS region to work in, such as `us-west-2`. We'll refer to this as `REGION` going forward.
12 |
13 | Also note your AWS account number. You find this in the console or by running `aws sts get-caller-identity` on the CLI. We'll refer to this as `ACCOUNT` going forward.
14 |
15 | #### Set up a Cloud9 IDE
16 |
17 | In the AWS console, go to the Cloud9 service and select `Create environment`. Call your new IDE `FargateIDE` and click `Next Step`. On the next screen, change the instance type to `m4.large` and click `Next step` again. On the final page, click `Create environment`. Make sure that you leave the VPC settings at the default values.
18 |
19 | Once the environment builds, you'll automatically redirect to the IDE. Take a minute to explore the interface, and note that you can change the color scheme if you like (AWS Cloud9 menu -> Preferences -> Themes).
20 |
21 | Next, let's update the Cloud9 environment to let you run the labs from the environment.
22 |
23 | * Go to the IAM console and create an instance profile for the Cloud 9 VM.
24 | * Go to the `Roles` section.
25 | * Click `Create role`.
26 | * Select `AWS service` for the entity and leave the service set to `EC2`.
27 | * On the next screen, choose `Create policy`.
28 | * Switch to the JSON tab and paste in the contents of the file `cloud9-iam.json`.
29 | * Call the policy `Cloud9-fargate-policy`.
30 | * Click `Create policy`.
31 | * Switch back to the browser tab with the new role, and assign the policy you just made.
32 | * Call the role `Cloud9-fargate-role`.
33 | * Click `Create role`.
34 | * Once this new profile is created, go to EC2 and find the Cloud9 instance, and assign the instance profile to this instance.
35 | * Go to Cloud9 Preferences and under AWS Credentials disable `AWS managed temporary credentials`.
36 |
37 | #### Deploy other prerequisites using CDK
38 |
39 | Git clone the workshop repo:
40 |
41 | git clone https://github.com/aws-samples/amazon-ecs-fargate-workshop-dev-ops-data
42 | mkdir ~/environment/fargate-dev-workshop
43 | cp -r ~/environment/amazon-ecs-fargate-workshop-dev-ops-data/labs/fargate-dev-workshop/* ~/environment/fargate-dev-workshop/
44 | cd ~/environment/fargate-dev-workshop
45 |
46 | In your Cloud 9 environment, install the CDK and update some dependencies:
47 |
48 | npm install -g aws-cdk@1.19.0
49 |
50 | Update to the latest version of pip
51 |
52 | curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
53 | python get-pip.py
54 |
55 | Now we install some CDK modules.
56 |
57 | pip install awscli
58 | pip install --upgrade aws-cdk.core
59 | pip install -r requirements.txt
60 |
61 | Create the file `~/.aws/config` with these lines:
62 |
63 | [default]
64 | region=REGION
65 | account=ACCOUNT
66 |
67 | Set environment variables which will be used later
68 |
69 | AWS_REGION=`aws configure get region`
70 | echo $AWS_REGION
71 |
72 | We're now ready to deploy the prerequisites. Run the following, making sure to substitute the proper values for your `ACCOUNT` and `REGION`.
73 |
74 | touch ~/.aws/credentials
75 | cdk bootstrap aws://ACCOUNT/REGION
76 | cdk synth
77 | cdk deploy pipeline-to-ecr
78 |
79 | ## Lab 2: Deploy Docker image to ECR
80 |
81 | #### Pipeline Review
82 |
83 | The pipeline is made up of 4 different steps:
84 |
85 | 1 - the pipeline is triggered by commit to the master branch of the git repository.
86 | 2 - the container is linted to check for usage of best practices
87 | 3 - the container is scanned for secrets / passwords to ensure no secrets are store in the container
88 | 4 - the container is built and pushed to a container repository (ECR)
89 |
90 | pipeline image here
91 |
92 | ### Connect to GIT (CodeCommit)
93 |
94 | Let's connect to the a Git repository. We'll do this manually as setting up development tools is often done by hand.
95 | In the console, navigate to CodeCommit.
96 |
97 | Follow the instructions on the console to clone your new repo into a local folder in Cloud9.
98 |
99 | #### Initial Push
100 |
101 | git init .
102 | git remote add origin
103 | git add .
104 | git commit -m "Initial commit"
105 | git push origin master
106 |
107 | #### Monitor your pipeline
108 |
109 | In the AWS Console navigate Code Pipeline
110 | Select your pipeline.
111 |
112 | Your pipeline should have failed in the linting stage.
113 | For more information, on the linting stage click details, then Link to execution details.
114 | You should be able to see the log for the set in the pipeline.
115 |
116 | #### Dockerfile best practices
117 |
118 | It is a best practice to not use the :latest tag when referencing container.
119 | This can cause issues of having an unwanted version deployed.
120 |
121 | Similar to ensuring you do not use the :latest tag for your containers.
122 | It is also suggested to specify package version you install on your containers.
123 | You want to avoid the chance of an unwanted package being deployed.
124 |
125 | If you do not specify a USER in a Dockerfile, the container will run as ROOT permissions.
126 | It is best practice to ensure the container is limited to least privilege.
127 | This will be reviewed later when we dive into the ECS task definition.
128 |
129 | In the Dockerfile change the following
130 | before
131 |
132 | FROM httpd:latest
133 | RUN apk -q add libcap
134 | after
135 |
136 | FROM httpd:2.4.41
137 | RUN apk -q add libcap=2.27-r0 --no-cache
138 |
139 | These changes ensure we are pulling a certain version of the base docker image as well as the package we are adding to our container.
140 |
141 | Push your changes
142 |
143 | git add .
144 | git commit -m "fixed docker FROM to a pinned version"
145 | git push origin master
146 |
147 | ## Lab 3: Deploy ECS / Farate cluster
148 |
149 | #### Infrastrcuture Deployment
150 |
151 | Before we deploy our VPC, ECS cluster, and service; lets enable CloudWatch container insights.
152 | Container insights is a very useful feature which allows you to gain insights of resource usage of your containers.
153 |
154 | Running the following command will turn on CloudWatch container insights for all ECS clusters which will be deployed into this AWS acccount.
155 |
156 | aws ecs put-account-setting --name "containerInsights" --value "enabled"
157 |
158 | Now we will deploy Test Environment Stack
159 |
160 | cdk deploy ecs-inf-test
161 |
162 | This stack will deploy the VPC, ECS Cluster, Load Balancer, and AutoScaling groups.
163 |
164 | When the stack has finished deploying it should display the output of the load balancers url.
165 |
166 | ecs-inf-test.lburl = ecs-i-loadb-11111111-11111111.us-west-2.elb.amazonaws.com
167 |
168 | Open a web browers and navigate to the load balancers url provided.
169 |
170 | ## Lab 4: Blue/Green deploy to ECS
171 |
172 | In this lab you will update our container image and then deploy in a Blue / Green fashion.
173 |
174 | Before you can do that, you will delete the service we just deployed.
175 | Using the AWS console navigate to the ECS console select the cluster for the workshop.
176 | Select the Service deployed and delete it
177 | Go to Tasks, and stop all running tasks.
178 |
179 | #### Navigate back configs
180 |
181 | In your Cloud9 editior, open the file configs/docker_build_base.yml.
182 |
183 | Change the following
184 | Before:
185 |
186 | - docker build -t $project_name:$tag .
187 | - docker tag $project_name:$tag $ecr:$tag
188 | - docker push $ecr
189 |
190 | After:
191 |
192 | - docker build -t $project_name:$IMAGE_TAG .
193 | - docker tag $project_name:$IMAGE_TAG $ecr:$IMAGE_TAG
194 | - docker push $ecr
195 |
196 | These changes will modify the tag which the container is tagged with in your ECR repository.
197 | The new tag will be the build guid which produced the image.
198 | You can also use this guid to track back which code build action actually built the container.
199 |
200 | open /app/index.html and add some text to the body of the html to visualize changes made.
201 |
202 | Push your changes
203 |
204 | git add .
205 | git commit -m "Update pipeline to tag container with build number"
206 | git push origin master
207 |
208 | There are a few files which are needed for the next process.
209 |
210 | * ECS task definition
211 | * ECS service definition
212 | * Code Deploy deployment group
213 | * Code Deploy appspec
214 |
215 | These files currect exist in the /configs directory. Also in this directory we have a python script produce-configs.py.
216 |
217 | Once your previous push has completed and is built.
218 |
219 | This script will produce the correct configs needed for the deployment. This script will query the previous environment we deploy to populate variables.
220 | You will need to pass in the most currect docker image tag.
221 |
222 | python produce-configs.py fargate-dev-workshop test dkr.ecr..amazonaws.com/fargate-dev-workshop:
223 |
224 | Once we have created the nessessary config files we can begin to create our new service.
225 |
226 | #### Create ECS service
227 |
228 | aws ecs create-service --region us-west-2 --service-name fargate-dev-workshop-test --cli-input-json file://./service-definition-test.json
229 |
230 | #### Create Code Deploy application
231 |
232 | aws deploy create-application --region us-west-2 --application-name fargate-dev-workshop-test --compute-platform ECS
233 |
234 |
235 | #### Create Code Deploy deployment groups
236 |
237 | aws deploy create-deployment-group --region us-west-2 \
238 | --deployment-group-name ecs-fargate-workshop-test-dg --cli-input-json file://./deployment-group-test.json
239 |
240 | #### Deployment changes
241 |
242 | aws ecs deploy --region us-west-2 --service 'fargate-dev-workshop-test' \
243 | --cluster 'fargate-dev-workshop-test' --codedeploy-application 'fargate-dev-workshop-test' \
244 | --codedeploy-deployment-group 'ecs-fargate-workshop-test-dg' \
245 | --task-definition task-definition-test.json --codedeploy-appspec appsectest.json
246 |
247 | You will now be able to monitor your deployment in the AWS Code Deploy Console.
248 |
249 | ## Lab 5: Container Observability.
250 |
251 | #### deploy cloudformation to create some traffic. replace the `ParameterValue` with your specific load balancer.
252 |
253 | aws cloudformation create-stack --stack-name ecs-fargate-workshop-traffic \
254 | --template-body file://./r53_health_check.yaml \
255 | --parameters ParameterKey=url,ParameterValue="YOUR LOAD BALANCE URL"
256 |
257 | Navigate to the CloudWatch console.
258 | Near the top left of the page where the "Overview" drop down menu is, select container insights.
259 | In the drop down menu under container insights, take a look at the different metrics captured by ECS Service, ECS Cluster, and ECS Task.
260 |
261 | You are able to select a time frame from the graphs and also view the container logs from the time you specified.
262 |
263 | ## Lab 6: Update Task Definition
264 |
265 | ### Review Task Def
266 |
267 | Ensure the container is running without elevated privileged.
268 | The default is false, however its suggested to also explicitly state this in your task defintion.
269 |
270 | "privileged": true|false
271 |
272 | #### Linux Capacitlies
273 | Linux Capacities applies to what docker privileges the container has on a host. It is best practice to also apply least privilege what the commands the container can send docker. For example the the container should not need access to the logs, when logging to stderr and stdout docker will take care of this for the container. The container should not need access to edit the network, this is provided for the container. In this example we are dropping some of the most privileged capabilities.
274 |
275 | "linuxParameters": {
276 | "capabilities": {
277 | "drop": ["SYS_ADMIN","NET_ADMIN"]
278 | }
279 | }
280 |
281 | #### ulimits
282 | Amazon ECS task definitions for Fargate support the ulimits parameter to define the resource limits to set for a container.
283 |
284 | Fargate tasks use the default resource limit values with the exception of the nofile resource limit parameter, which Fargate overrides.
285 | The nofile resource limit sets a restriction on the number of open files that a container can use.
286 | The default nofile soft limit is 1024 and hard limit is 4096 for Fargate tasks. These limits can be adjusted in a task definition if your tasks needs to handle a larger number of files.
287 |
288 | "ulimits": [
289 | {
290 | "name": "cpu",
291 | "softLimit": 0,
292 | "hardLimit": 0
293 | }
294 | ],
295 |
296 | Run the following commands to update the task defintion which you will deploy.
297 | Due to low utilization, reconfiguring the task to use the smallest amount of compute for fargate.
298 |
299 | open /app/index.html and add some text to the body of the html to visualize changes made.
300 |
301 | Push your changes
302 |
303 | git add .
304 | git commit -m "Update pipeline to tag container with build number"
305 | git push origin master
306 |
307 | Wait for the container to be published in ECR.
308 | The run the following commands in the /configs/ directory:
309 |
310 | sed -i '/cpu/c\ \"cpu\" : \":256\",' task-definition-test.json
311 | sed -i '/memory/c\ \"memory\" : \"512\",' task-definition-test.json
312 | sed -i '/image/c\ \"image\" : \"YOUR-CONTAINER-REPO:BUILD-GUID\",' task-definition-test.json
313 |
314 | Deploy the updated container and task definition specifying fewer resources.
315 |
316 | aws ecs deploy --region us-west-2 --service 'fargate-dev-workshop-test' \
317 | --cluster 'fargate-dev-workshop-test' --codedeploy-application 'fargate-dev-workshop-test' \
318 | --codedeploy-deployment-group 'ecs-fargate-workshop-test-dg' \
319 | --task-definition task-definition-test.json --codedeploy-appspec appsectest.json
320 |
321 | ## Lab 7: Clean up.
322 |
323 | aws deploy delete-deployment-group --deployment-group-name 'ecs-fargate-workshop-test-dg' --application-name fargate-dev-workshop-test
324 | aws deploy delete-application --application-name fargate-dev-workshop-test
325 | aws cloudformation delete-stack --stack-name ecs-fargate-workshop-traffic
326 | cdk destroy ecs-inf-test
327 | cdk destroy pipeline-to-ecr
328 |
--------------------------------------------------------------------------------