├── .github └── PULL_REQUEST_TEMPLATE.md ├── .gitignore ├── 1-no-container ├── README.md ├── db.json ├── index.js ├── package.json └── server.js ├── 2-containerized ├── README.md ├── deploy.sh ├── infrastructure │ └── ecs.yml └── services │ └── api │ ├── Dockerfile │ ├── db.json │ ├── package.json │ ├── rule.json │ └── server.js ├── 3-microservices ├── README.md ├── deploy.sh ├── infrastructure │ └── ecs.yml └── services │ ├── posts │ ├── Dockerfile │ ├── db.json │ ├── package.json │ ├── rule.json │ └── server.js │ ├── threads │ ├── Dockerfile │ ├── db.json │ ├── package.json │ ├── rule.json │ └── server.js │ └── users │ ├── Dockerfile │ ├── db.json │ ├── package.json │ ├── rule.json │ └── server.js ├── CODE_OF_CONDUCT.md ├── CONTRIBUTING.md ├── LICENSE ├── README.md └── images ├── microservice-containers.png ├── monolithic-containers.png └── monolithic-no-container.png /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | *Issue #, if available:* 2 | 3 | *Description of changes:* 4 | 5 | 6 | By submitting this pull request, I confirm that my contribution is made under the terms of the Apache 2.0 license. 7 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | node_modules 3 | -------------------------------------------------------------------------------- /1-no-container/README.md: -------------------------------------------------------------------------------- 1 | ## Basic Node.js Server 2 | 3 | This is an example of a basic monolithic node.js service that has been designed to run directly on a server, without a container. 4 | 5 | ### Architecture 6 | 7 | Since Node.js programs run a single threaded event loop it is necessary to use the node `cluster` functionality in order to get maximum usage out of a multi-core server. 8 | 9 | In this example `cluster` is used to spawn one worker process per core, and the processes share a single port using round robin load balancing built into Node.js 10 | 11 | We can use an Application Load Balancer to round robin requests across multiple servers, providing horizontal scaling. 12 | 13 | ![Reference diagram of the basic node application deployment](../images/monolithic-no-container.png) 14 | -------------------------------------------------------------------------------- /1-no-container/db.json: -------------------------------------------------------------------------------- 1 | { 2 | "users": [ 3 | { 4 | "id": 1, 5 | "username": "marceline", 6 | "name": "Marceline Abadeer", 7 | "bio": "1000 year old vampire queen, musician" 8 | }, 9 | { 10 | "id": 2, 11 | "username": "finn", 12 | "name": "Finn 'the Human' Mertens", 13 | "bio": "Adventurer and hero, last human, defender of good" 14 | }, 15 | { 16 | "id": 3, 17 | "username": "pb", 18 | "name": "Bonnibel Bubblegum", 19 | "bio": "Scientist, bearer of candy power, ruler of the candy kingdom" 20 | }, 21 | { 22 | "id": 4, 23 | "username": "jake", 24 | "name": "Jake the Dog", 25 | "bio": "Former criminal, now magical dog adventurer, and father" 26 | } 27 | ], 28 | 29 | "threads": [ 30 | { 31 | "id": 1, 32 | "title": "What's up with the Lich?", 33 | "createdBy": 4 34 | }, 35 | { 36 | "id": 2, 37 | "title": "Party at the candy kingdom tomorrow", 38 | "createdBy": 3 39 | }, 40 | { 41 | "id": 3, 42 | "title": "In search of a new guitar", 43 | "createdBy": 1 44 | } 45 | ], 46 | 47 | "posts": [ 48 | { 49 | "thread": 1, 50 | "text": "Has anyone checked on the lich recently?", 51 | "user": 4 52 | }, 53 | { 54 | "thread": 1, 55 | "text": "I'll stop by and see how he's doing tomorrow!", 56 | "user": 2 57 | }, 58 | { 59 | "thread": 2, 60 | "text": "Come party with the candy people tomorrow!", 61 | "user": 3 62 | }, 63 | { 64 | "thread": 2, 65 | "text": "Mathematical!", 66 | "user": 2 67 | }, 68 | { 69 | "thread": 2, 70 | "text": "I'll bring my guitar", 71 | "user": 1 72 | }, 73 | { 74 | "thread": 3, 75 | "text": "I need a new guitar to play the most savory licks in Ooo", 76 | "user": 1 77 | } 78 | ] 79 | } 80 | -------------------------------------------------------------------------------- /1-no-container/index.js: -------------------------------------------------------------------------------- 1 | const cluster = require('cluster'); 2 | const http = require('http'); 3 | const numCPUs = require('os').cpus().length; 4 | 5 | if (cluster.isMaster) { 6 | console.log(`Leader ${process.pid} is running`); 7 | 8 | // Fork workers. 9 | for (let i = 0; i < numCPUs; i++) { 10 | cluster.fork(); 11 | } 12 | 13 | cluster.on('exit', (worker, code, signal) => { 14 | console.log(`worker ${worker.process.pid} died`); 15 | }); 16 | } else { 17 | require('./server.js'); 18 | 19 | console.log(`Worker ${process.pid} started`); 20 | } 21 | -------------------------------------------------------------------------------- /1-no-container/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": { 3 | "koa": "^1.2.5", 4 | "koa-router": "^5.4.0" 5 | }, 6 | "scripts": { 7 | "start": "node index.js" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /1-no-container/server.js: -------------------------------------------------------------------------------- 1 | const app = require('koa')(); 2 | const router = require('koa-router')(); 3 | const db = require('./db.json'); 4 | 5 | // Log requests 6 | app.use(function *(next){ 7 | const start = new Date; 8 | yield next; 9 | const ms = new Date - start; 10 | console.log('%s %s - %s', this.method, this.url, ms); 11 | }); 12 | 13 | router.get('/api/users', function *(next) { 14 | this.body = db.users; 15 | }); 16 | 17 | router.get('/api/users/:userId', function *(next) { 18 | const id = parseInt(this.params.userId); 19 | this.body = db.users.find((user) => user.id == id); 20 | }); 21 | 22 | router.get('/api/threads', function *() { 23 | this.body = db.threads; 24 | }); 25 | 26 | router.get('/api/threads/:threadId', function *() { 27 | const id = parseInt(this.params.threadId); 28 | this.body = db.threads.find((thread) => thread.id == id); 29 | }); 30 | 31 | router.get('/api/posts/in-thread/:threadId', function *() { 32 | const id = parseInt(this.params.threadId); 33 | this.body = db.posts.filter((post) => post.thread == id); 34 | }); 35 | 36 | router.get('/api/posts/by-user/:userId', function *() { 37 | const id = parseInt(this.params.userId); 38 | this.body = db.posts.filter((post) => post.user == id); 39 | }); 40 | 41 | router.get('/api/', function *() { 42 | this.body = "API ready to receive requests"; 43 | }); 44 | 45 | router.get('/', function *() { 46 | this.body = "Ready to receive requests"; 47 | }); 48 | 49 | app.use(router.routes()); 50 | app.use(router.allowedMethods()); 51 | 52 | app.listen(3000); 53 | -------------------------------------------------------------------------------- /2-containerized/README.md: -------------------------------------------------------------------------------- 1 | ## Deploying in containers 2 | 3 | In this example we take our node application and put it into a container for deployment on EC2 Container Service. 4 | 5 | ![Reference architecture of the containerized monolith](../images/monolithic-containers.png) 6 | 7 | ### Why containers? 8 | 9 | __Dependency Control__: Containers wrap application code in a unit of deployment that captures a snapshot of the code as well as its dependencies, which solves a few problems: 10 | 11 | - The version of `node` on a local developer's machine may not match the version on the production servers, or the version on the CI server, allowing developers to ship code that runs locally but fails in production. On the other hand a container will ship with a specific version of node included. 12 | - If `package.json` dependencies aren't rigorously shrinkwrapped then `npm install` may end up installing different package versions locally, on a CI server, and on the production servers. Containers solve this by including all the npm dependencies with the application code. 13 | - Even if dependencies are locked down using a shrinkwrap file a particular package you depend on [may be unavailable, or removed](http://blog.npmjs.org/post/141577284765/kik-left-pad-and-npm). If this happens it doesn't stop a container from working, because the container still has a copy of the package from the moment that the container was built. 14 | 15 | __Improved Pipeline__: The container also allows an engineering organization to create a standard pipeline for the application lifecycle. For example: 16 | 17 | 1. Developers build and run container locally. 18 | 2. CI server runs the same container and executes integration tests against it to make sure it passes expectations. 19 | 3. Same container is shipped to a staging environment where its runtime behavior can be checked using load tests or manual QA. 20 | 4. Same container is finally shipped to production. 21 | 22 | Being able to ship the exact same container through all four stages of the process makes delivering a high quality, reliable application considerably easier. 23 | 24 | __No mutations to machines:__ When applications are deployed directly onto instances you run the risk of a bad deploy corrupting an instance configuration in a way that is hard to recover from. For example imagine a deployed application which requires some custom configurations in `/etc`. This can become a very fragile deploy as well as one that is hard to roll back if needed. However with a containerized application the container carries its own filesystem with its own `/etc` and any custom configuration changes that are part of this container will be sandboxed to that application's environment only. The underlying instance's configurations stay the same. In fact a container can't even make persistant filesystem changes without an explicit mounted volume which grants the container access to a limited area on the host instance. 25 | 26 | ## Why EC2 Container Service? 27 | 28 | EC2 Container Service provides orchestration for your containers. It automates the process of launching containers across your fleet of instances according to rules your specify, then automates keeping track of where those containers are running so that you can use a load balancer to get traffic to them. It also has built in features to roll out deploys with zero downtime, gather metrics and logs from your containers, and auto scale the number of containers you are running based on metrics. 29 | 30 | ## Application Changes for Docker 31 | 32 | 1. __Single process instead of `cluster`.__ The first and biggest change involved with containerizing this application is getting rid of `cluster`. With docker containers the goal is to run a single process per container, rather than a cluster of processes. 33 | 34 | The reason for this change is that a lighweight container with a single process in it allows for greater granularity and flexibility in container placement onto infrastructure. A large container that has four processes in it and requires four cores of CPU power can only be run on an instance of a particular size. However by breaking that up into four containers that each have a single process in them we can now make use of two smaller instances that will each run two containers, or even four tiny instances that will each run a single container. Or we could go the opposite direction and easily run 64 of these small containers on a single massive instance. 35 | 36 | 2. __Create `Dockerfile`:__ This file is basically a build script that creates the container. The base container that the dockerfile starts from contains a specific version of node.js. Then the rest of the commands add both the application code and the `node_modules` folder into the container. The result is a container image that is a reliable unit of deployment. The container can be run locally, or run on a remote server. It will run the same in both places. 37 | 38 | ## Deployment 39 | 40 | 1. Launch an ECS cluster using the Cloudformation template: 41 | 42 | ``` 43 | $ aws cloudformation deploy \ 44 | --template-file infrastructure/ecs.yml \ 45 | --region \ 46 | --stack-name \ 47 | --capabilities CAPABILITY_NAMED_IAM 48 | ``` 49 | 50 | 2. Deploy the services onto your cluster: 51 | 52 | ``` 53 | $ ./deploy.sh 54 | ``` 55 | -------------------------------------------------------------------------------- /2-containerized/deploy.sh: -------------------------------------------------------------------------------- 1 | REGION=$1 2 | STACK_NAME=$2 3 | 4 | DEPLOYABLE_SERVICES=( 5 | api 6 | ); 7 | 8 | PRIMARY='\033[0;34m' 9 | NC='\033[0m' # No Color 10 | 11 | # Fetch the stack metadata for use later 12 | printf "${PRIMARY}* Fetching current stack state${NC}\n"; 13 | 14 | QUERY=$(cat <<-EOF 15 | [ 16 | Stacks[0].Outputs[?OutputKey==\`ClusterName\`].OutputValue, 17 | Stacks[0].Outputs[?OutputKey==\`ALBArn\`].OutputValue, 18 | Stacks[0].Outputs[?OutputKey==\`ECSRole\`].OutputValue, 19 | Stacks[0].Outputs[?OutputKey==\`Url\`].OutputValue, 20 | Stacks[0].Outputs[?OutputKey==\`VPCId\`].OutputValue 21 | ] 22 | EOF) 23 | 24 | RESULTS=$(aws cloudformation describe-stacks \ 25 | --stack-name $STACK_NAME \ 26 | --region $REGION \ 27 | --query "$QUERY" \ 28 | --output text); 29 | RESULTS_ARRAY=($RESULTS) 30 | 31 | CLUSTER_NAME=${RESULTS_ARRAY[0]} 32 | ALB_ARN=${RESULTS_ARRAY[1]} 33 | ECS_ROLE=${RESULTS_ARRAY[2]} 34 | URL=${RESULTS_ARRAY[3]} 35 | VPCID=${RESULTS_ARRAY[4]} 36 | 37 | printf "${PRIMARY}* Authenticating with EC2 Container Repository${NC}\n"; 38 | 39 | `aws ecr get-login --region $REGION --no-include-email` 40 | 41 | # Tag for versioning the container images, currently set to timestamp 42 | TAG=`date +%s` 43 | 44 | for SERVICE_NAME in "${DEPLOYABLE_SERVICES[@]}" 45 | do 46 | printf "${PRIMARY}* Locating the ECR repository for service \`${SERVICE_NAME}\`${NC}\n"; 47 | 48 | # Find the ECR repo to push to 49 | REPO=`aws ecr describe-repositories \ 50 | --region $REGION \ 51 | --repository-names "$SERVICE_NAME" \ 52 | --query "repositories[0].repositoryUri" \ 53 | --output text` 54 | 55 | if [ "$?" != "0" ]; then 56 | # The repository was not found, create it 57 | printf "${PRIMARY}* Creating new ECR repository for service \`${SERVICE_NAME}\`${NC}\n"; 58 | 59 | REPO=`aws ecr create-repository \ 60 | --region $REGION \ 61 | --repository-name "$SERVICE_NAME" \ 62 | --query "repository.repositoryUri" \ 63 | --output text` 64 | fi 65 | 66 | printf "${PRIMARY}* Building \`${SERVICE_NAME}\`${NC}\n"; 67 | 68 | # Build the container, and assign a tag to it for versioning 69 | (cd services/$SERVICE_NAME && npm install); 70 | docker build -t $SERVICE_NAME ./services/$SERVICE_NAME 71 | docker tag $SERVICE_NAME:latest $REPO:$TAG 72 | 73 | # Push the tag up so we can make a task definition for deploying it 74 | printf "${PRIMARY}* Pushing \`${SERVICE_NAME}\`${NC}\n"; 75 | 76 | docker push $REPO:$TAG 77 | 78 | printf "${PRIMARY}* Creating new task definition for \`${SERVICE_NAME}\`${NC}\n"; 79 | 80 | # Build an create the task definition for the container we just pushed 81 | CONTAINER_DEFINITIONS=$(cat <<-EOF 82 | [{ 83 | "name": "$SERVICE_NAME", 84 | "image": "$REPO:$TAG", 85 | "cpu": 256, 86 | "memory": 256, 87 | "portMappings": [{ 88 | "containerPort": 3000, 89 | "hostPort": 0 90 | }], 91 | "essential": true 92 | }] 93 | EOF) 94 | 95 | TASK_DEFINITION_ARN=`aws ecs register-task-definition \ 96 | --region $REGION \ 97 | --family $SERVICE_NAME \ 98 | --container-definitions "$CONTAINER_DEFINITIONS" \ 99 | --query "taskDefinition.taskDefinitionArn" \ 100 | --output text` 101 | 102 | # Ensure that the service exists in ECS 103 | STATUS=`aws ecs describe-services \ 104 | --region $REGION \ 105 | --cluster $CLUSTER_NAME \ 106 | --services $SERVICE_NAME \ 107 | --query "services[0].status" \ 108 | --output text` 109 | 110 | if [ "$STATUS" != "ACTIVE" ]; then 111 | # New service that needs to be deployed because it hasn't 112 | # been created yet. 113 | if [ -e "./services/$SERVICE_NAME/rule.json" ]; then 114 | # If this service has a rule setup for routing traffic to the service, then 115 | # create a target group for the service, and a rule on the ELB for routing 116 | # traffic to the target group. 117 | printf "${PRIMARY}* Setting up web facing service \`${SERVICE_NAME}\`${NC}\n"; 118 | printf "${PRIMARY}* Creating target group for service \`${SERVICE_NAME}\`${NC}\n"; 119 | 120 | TARGET_GROUP_ARN=`aws elbv2 create-target-group \ 121 | --region $REGION \ 122 | --name $SERVICE_NAME \ 123 | --vpc-id $VPCID \ 124 | --port 80 \ 125 | --protocol HTTP \ 126 | --health-check-protocol HTTP \ 127 | --health-check-path / \ 128 | --health-check-interval-seconds 6 \ 129 | --health-check-timeout-seconds 5 \ 130 | --healthy-threshold-count 2 \ 131 | --unhealthy-threshold-count 2 \ 132 | --query "TargetGroups[0].TargetGroupArn" \ 133 | --output text` 134 | 135 | printf "${PRIMARY}* Locating load balancer listener \`${SERVICE_NAME}\`${NC}\n"; 136 | 137 | LISTENER_ARN=`aws elbv2 describe-listeners \ 138 | --region $REGION \ 139 | --load-balancer-arn $ALB_ARN \ 140 | --query "Listeners[0].ListenerArn" \ 141 | --output text` 142 | 143 | if [ "$LISTENER_ARN" == "None" ]; then 144 | printf "${PRIMARY}* Creating listener for load balancer${NC}\n"; 145 | 146 | LISTENER_ARN=`aws elbv2 create-listener \ 147 | --region $REGION \ 148 | --load-balancer-arn $ALB_ARN \ 149 | --port 80 \ 150 | --protocol HTTP \ 151 | --query "Listeners[0].ListenerArn" \ 152 | --default-actions Type=forward,TargetGroupArn=$TARGET_GROUP_ARN \ 153 | --output text` 154 | fi 155 | 156 | printf "${PRIMARY}* Adding rule to load balancer listener \`${SERVICE_NAME}\`${NC}\n"; 157 | 158 | # Manipulate the template to customize it with the target group and listener 159 | RULE_DOC=`cat ./services/$SERVICE_NAME/rule.json | 160 | jq ".ListenerArn=\"$LISTENER_ARN\" | .Actions[0].TargetGroupArn=\"$TARGET_GROUP_ARN\""` 161 | 162 | aws elbv2 create-rule \ 163 | --region $REGION \ 164 | --cli-input-json "$RULE_DOC" 165 | 166 | printf "${PRIMARY}* Creating new web facing service \`${SERVICE_NAME}\`${NC}\n"; 167 | 168 | LOAD_BALANCERS=$(cat <<-EOF 169 | [{ 170 | "targetGroupArn": "$TARGET_GROUP_ARN", 171 | "containerName": "$SERVICE_NAME", 172 | "containerPort": 3000 173 | }] 174 | EOF) 175 | 176 | RESULT=`aws ecs create-service \ 177 | --region $REGION \ 178 | --cluster $CLUSTER_NAME \ 179 | --load-balancers "$LOAD_BALANCERS" \ 180 | --service-name $SERVICE_NAME \ 181 | --role $ECS_ROLE \ 182 | --task-definition $TASK_DEFINITION_ARN \ 183 | --desired-count 1` 184 | else 185 | # This service doesn't have a web interface, just create it without load balancer settings 186 | printf "${PRIMARY}* Creating new background service \`${SERVICE_NAME}\`${NC}\n"; 187 | RESULT=`aws ecs create-service \ 188 | --region $REGION \ 189 | --cluster $CLUSTER_NAME \ 190 | --service-name $SERVICE_NAME \ 191 | --task-definition $TASK_DEFINITION_ARN \ 192 | --desired-count 1` 193 | fi 194 | else 195 | # The service already existed, just update the existing service. 196 | printf "${PRIMARY}* Updating service \`${SERVICE_NAME}\` with task definition \`${TASK_DEFINITION_ARN}\`${NC}\n"; 197 | RESULT=`aws ecs update-service \ 198 | --region $REGION \ 199 | --cluster $CLUSTER_NAME \ 200 | --service $SERVICE_NAME \ 201 | --task-definition $TASK_DEFINITION_ARN` 202 | fi 203 | done 204 | 205 | printf "${PRIMARY}* Done, application is at: http://${URL}${NC}\n"; 206 | printf "${PRIMARY}* (It may take a minute for the container to register as healthy and begin receiving traffic.)${NC}\n"; 207 | -------------------------------------------------------------------------------- /2-containerized/infrastructure/ecs.yml: -------------------------------------------------------------------------------- 1 | AWSTemplateFormatVersion: '2010-09-09' 2 | Parameters: 3 | DesiredCapacity: 4 | Type: Number 5 | Default: '2' 6 | Description: Number of instances to launch in your ECS cluster. 7 | MaxSize: 8 | Type: Number 9 | Default: '2' 10 | Description: Maximum number of instances that can be launched in your ECS cluster. 11 | InstanceType: 12 | Description: EC2 instance type 13 | Type: String 14 | Default: t2.micro 15 | AllowedValues: [t2.micro, t2.small, t2.medium, t2.large, t2.xlarge, t3.micro, t3.small, 16 | t3.medium, t3.large, t3.xlarge, m4.large, m4.xlarge, m5.large, m5.xlarge, c4.large, 17 | c4.xlarge, c5.large, c5.xlarge, r4.large, r4.xlarge, r5.large, r5.xlarge, i3.large, 18 | i3.xlarge] 19 | ConstraintDescription: Please choose a valid instance type. 20 | ECSAMI: 21 | Description: AMI ID 22 | Type: AWS::SSM::Parameter::Value 23 | Default: /aws/service/ecs/optimized-ami/amazon-linux/recommended/image_id 24 | Mappings: 25 | SubnetConfig: 26 | VPC: 27 | CIDR: '10.0.0.0/16' 28 | PublicOne: 29 | CIDR: '10.0.0.0/24' 30 | PublicTwo: 31 | CIDR: '10.0.1.0/24' 32 | Resources: 33 | # VPC into which stack instances will be placed 34 | VPC: 35 | Type: AWS::EC2::VPC 36 | Properties: 37 | EnableDnsSupport: true 38 | EnableDnsHostnames: true 39 | CidrBlock: !FindInMap ['SubnetConfig', 'VPC', 'CIDR'] 40 | PublicSubnetOne: 41 | Type: AWS::EC2::Subnet 42 | Properties: 43 | AvailabilityZone: 44 | Fn::Select: 45 | - 0 46 | - Fn::GetAZs: {Ref: 'AWS::Region'} 47 | VpcId: !Ref 'VPC' 48 | CidrBlock: !FindInMap ['SubnetConfig', 'PublicOne', 'CIDR'] 49 | MapPublicIpOnLaunch: true 50 | PublicSubnetTwo: 51 | Type: AWS::EC2::Subnet 52 | Properties: 53 | AvailabilityZone: 54 | Fn::Select: 55 | - 1 56 | - Fn::GetAZs: {Ref: 'AWS::Region'} 57 | VpcId: !Ref 'VPC' 58 | CidrBlock: !FindInMap ['SubnetConfig', 'PublicTwo', 'CIDR'] 59 | MapPublicIpOnLaunch: true 60 | InternetGateway: 61 | Type: AWS::EC2::InternetGateway 62 | GatewayAttachement: 63 | Type: AWS::EC2::VPCGatewayAttachment 64 | Properties: 65 | VpcId: !Ref 'VPC' 66 | InternetGatewayId: !Ref 'InternetGateway' 67 | PublicRouteTable: 68 | Type: AWS::EC2::RouteTable 69 | Properties: 70 | VpcId: !Ref 'VPC' 71 | PublicRoute: 72 | Type: AWS::EC2::Route 73 | DependsOn: GatewayAttachement 74 | Properties: 75 | RouteTableId: !Ref 'PublicRouteTable' 76 | DestinationCidrBlock: '0.0.0.0/0' 77 | GatewayId: !Ref 'InternetGateway' 78 | PublicSubnetOneRouteTableAssociation: 79 | Type: AWS::EC2::SubnetRouteTableAssociation 80 | Properties: 81 | SubnetId: !Ref PublicSubnetOne 82 | RouteTableId: !Ref PublicRouteTable 83 | PublicSubnetTwoRouteTableAssociation: 84 | Type: AWS::EC2::SubnetRouteTableAssociation 85 | Properties: 86 | SubnetId: !Ref PublicSubnetTwo 87 | RouteTableId: !Ref PublicRouteTable 88 | 89 | # ECS Resources 90 | ECSCluster: 91 | Type: AWS::ECS::Cluster 92 | EcsSecurityGroup: 93 | Type: AWS::EC2::SecurityGroup 94 | Properties: 95 | GroupDescription: ECS Security Group 96 | VpcId: !Ref 'VPC' 97 | EcsSecurityGroupHTTPinbound: 98 | Type: AWS::EC2::SecurityGroupIngress 99 | Properties: 100 | GroupId: !Ref 'EcsSecurityGroup' 101 | IpProtocol: tcp 102 | FromPort: '80' 103 | ToPort: '80' 104 | CidrIp: 0.0.0.0/0 105 | EcsSecurityGroupSSHinbound: 106 | Type: AWS::EC2::SecurityGroupIngress 107 | Properties: 108 | GroupId: !Ref 'EcsSecurityGroup' 109 | IpProtocol: tcp 110 | FromPort: '22' 111 | ToPort: '22' 112 | CidrIp: 0.0.0.0/0 113 | EcsSecurityGroupALBports: 114 | Type: AWS::EC2::SecurityGroupIngress 115 | Properties: 116 | GroupId: !Ref 'EcsSecurityGroup' 117 | IpProtocol: tcp 118 | FromPort: '31000' 119 | ToPort: '61000' 120 | SourceSecurityGroupId: !Ref 'EcsSecurityGroup' 121 | CloudwatchLogsGroup: 122 | Type: AWS::Logs::LogGroup 123 | Properties: 124 | LogGroupName: !Join ['-', [ECSLogGroup, !Ref 'AWS::StackName']] 125 | RetentionInDays: 14 126 | ECSALB: 127 | Type: AWS::ElasticLoadBalancingV2::LoadBalancer 128 | Properties: 129 | Scheme: internet-facing 130 | LoadBalancerAttributes: 131 | - Key: idle_timeout.timeout_seconds 132 | Value: '30' 133 | Subnets: 134 | - !Ref PublicSubnetOne 135 | - !Ref PublicSubnetTwo 136 | SecurityGroups: [!Ref 'EcsSecurityGroup'] 137 | ECSAutoScalingGroup: 138 | Type: AWS::AutoScaling::AutoScalingGroup 139 | Properties: 140 | VPCZoneIdentifier: 141 | - !Ref PublicSubnetOne 142 | - !Ref PublicSubnetTwo 143 | LaunchConfigurationName: !Ref 'ContainerInstances' 144 | MinSize: '1' 145 | MaxSize: !Ref 'MaxSize' 146 | DesiredCapacity: !Ref 'DesiredCapacity' 147 | CreationPolicy: 148 | ResourceSignal: 149 | Timeout: PT15M 150 | UpdatePolicy: 151 | AutoScalingReplacingUpdate: 152 | WillReplace: 'true' 153 | ContainerInstances: 154 | Type: AWS::AutoScaling::LaunchConfiguration 155 | Properties: 156 | ImageId: !Ref ECSAMI 157 | SecurityGroups: [!Ref 'EcsSecurityGroup'] 158 | InstanceType: !Ref 'InstanceType' 159 | IamInstanceProfile: !Ref 'EC2InstanceProfile' 160 | UserData: 161 | Fn::Base64: !Sub | 162 | #!/bin/bash -xe 163 | echo ECS_CLUSTER=${ECSCluster} >> /etc/ecs/ecs.config 164 | yum install -y aws-cfn-bootstrap 165 | /opt/aws/bin/cfn-signal -e $? --stack ${AWS::StackName} --resource ECSAutoScalingGroup --region ${AWS::Region} 166 | ECSServiceRole: 167 | Type: AWS::IAM::Role 168 | Properties: 169 | AssumeRolePolicyDocument: 170 | Statement: 171 | - Effect: Allow 172 | Principal: 173 | Service: [ecs.amazonaws.com] 174 | Action: ['sts:AssumeRole'] 175 | Path: / 176 | Policies: 177 | - PolicyName: ecs-service 178 | PolicyDocument: 179 | Statement: 180 | - Effect: Allow 181 | Action: 182 | - 'elasticloadbalancing:DeregisterInstancesFromLoadBalancer' 183 | - 'elasticloadbalancing:DeregisterTargets' 184 | - 'elasticloadbalancing:Describe*' 185 | - 'elasticloadbalancing:RegisterInstancesWithLoadBalancer' 186 | - 'elasticloadbalancing:RegisterTargets' 187 | - 'ec2:Describe*' 188 | - 'ec2:AuthorizeSecurityGroupIngress' 189 | Resource: '*' 190 | EC2Role: 191 | Type: AWS::IAM::Role 192 | Properties: 193 | AssumeRolePolicyDocument: 194 | Statement: 195 | - Effect: Allow 196 | Principal: 197 | Service: [ec2.amazonaws.com] 198 | Action: ['sts:AssumeRole'] 199 | Path: / 200 | Policies: 201 | - PolicyName: ecs-service 202 | PolicyDocument: 203 | Statement: 204 | - Effect: Allow 205 | Action: 206 | - 'ecs:CreateCluster' 207 | - 'ecs:DeregisterContainerInstance' 208 | - 'ecs:DiscoverPollEndpoint' 209 | - 'ecs:Poll' 210 | - 'ecs:RegisterContainerInstance' 211 | - 'ecs:StartTelemetrySession' 212 | - 'ecs:Submit*' 213 | - 'logs:CreateLogStream' 214 | - 'logs:PutLogEvents' 215 | - 'ecr:GetAuthorizationToken' 216 | - 'ecr:BatchGetImage' 217 | - 'ecr:GetDownloadUrlForLayer' 218 | Resource: '*' 219 | AutoscalingRole: 220 | Type: AWS::IAM::Role 221 | Properties: 222 | AssumeRolePolicyDocument: 223 | Statement: 224 | - Effect: Allow 225 | Principal: 226 | Service: [application-autoscaling.amazonaws.com] 227 | Action: ['sts:AssumeRole'] 228 | Path: / 229 | Policies: 230 | - PolicyName: service-autoscaling 231 | PolicyDocument: 232 | Statement: 233 | - Effect: Allow 234 | Action: 235 | - 'application-autoscaling:*' 236 | - 'cloudwatch:DescribeAlarms' 237 | - 'cloudwatch:PutMetricAlarm' 238 | - 'ecs:DescribeServices' 239 | - 'ecs:UpdateService' 240 | Resource: '*' 241 | EC2InstanceProfile: 242 | Type: AWS::IAM::InstanceProfile 243 | Properties: 244 | Path: / 245 | Roles: [!Ref 'EC2Role'] 246 | 247 | Outputs: 248 | ClusterName: 249 | Description: The name of the ECS cluster, used by the deploy script 250 | Value: !Ref 'ECSCluster' 251 | Export: 252 | Name: !Join [':', [!Ref "AWS::StackName", "ClusterName" ]] 253 | Url: 254 | Description: The url at which the application is available 255 | Value: !Join ['', [!GetAtt 'ECSALB.DNSName']] 256 | ALBArn: 257 | Description: The ARN of the ALB, exported for later use in creating services 258 | Value: !Ref 'ECSALB' 259 | Export: 260 | Name: !Join [':', [!Ref "AWS::StackName", "ALBArn" ]] 261 | ECSRole: 262 | Description: The ARN of the ECS role, exports for later use in creating services 263 | Value: !GetAtt 'ECSServiceRole.Arn' 264 | Export: 265 | Name: !Join [':', [!Ref "AWS::StackName", "ECSRole" ]] 266 | VPCId: 267 | Description: The ID of the VPC that this stack is deployed in 268 | Value: !Ref 'VPC' 269 | Export: 270 | Name: !Join [':', [!Ref "AWS::StackName", "VPCId" ]] 271 | -------------------------------------------------------------------------------- /2-containerized/services/api/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mhart/alpine-node:7.10.1 2 | 3 | WORKDIR /srv 4 | ADD . . 5 | RUN npm install 6 | 7 | EXPOSE 3000 8 | CMD ["node", "server.js"] 9 | -------------------------------------------------------------------------------- /2-containerized/services/api/db.json: -------------------------------------------------------------------------------- 1 | { 2 | "users": [ 3 | { 4 | "id": 1, 5 | "username": "marceline", 6 | "name": "Marceline Abadeer", 7 | "bio": "1000 year old vampire queen, musician" 8 | }, 9 | { 10 | "id": 2, 11 | "username": "finn", 12 | "name": "Finn 'the Human' Mertens", 13 | "bio": "Adventurer and hero, last human, defender of good" 14 | }, 15 | { 16 | "id": 3, 17 | "username": "pb", 18 | "name": "Bonnibel Bubblegum", 19 | "bio": "Scientist, bearer of candy power, ruler of the candy kingdom" 20 | }, 21 | { 22 | "id": 4, 23 | "username": "jake", 24 | "name": "Jake the Dog", 25 | "bio": "Former criminal, now magical dog adventurer, and father" 26 | } 27 | ], 28 | 29 | "threads": [ 30 | { 31 | "id": 1, 32 | "title": "What's up with the Lich?", 33 | "createdBy": 4 34 | }, 35 | { 36 | "id": 2, 37 | "title": "Party at the candy kingdom tomorrow", 38 | "createdBy": 3 39 | }, 40 | { 41 | "id": 3, 42 | "title": "In search of a new guitar", 43 | "createdBy": 1 44 | } 45 | ], 46 | 47 | "posts": [ 48 | { 49 | "thread": 1, 50 | "text": "Has anyone checked on the lich recently?", 51 | "user": 4 52 | }, 53 | { 54 | "thread": 1, 55 | "text": "I'll stop by and see how he's doing tomorrow!", 56 | "user": 2 57 | }, 58 | { 59 | "thread": 2, 60 | "text": "Come party with the candy people tomorrow!", 61 | "user": 3 62 | }, 63 | { 64 | "thread": 2, 65 | "text": "Mathematical!", 66 | "user": 2 67 | }, 68 | { 69 | "thread": 2, 70 | "text": "I'll bring my guitar", 71 | "user": 1 72 | }, 73 | { 74 | "thread": 3, 75 | "text": "I need a new guitar to play the most savory licks in Ooo", 76 | "user": 1 77 | } 78 | ] 79 | } 80 | -------------------------------------------------------------------------------- /2-containerized/services/api/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": { 3 | "koa": "^1.2.5", 4 | "koa-router": "^5.4.0" 5 | }, 6 | "scripts": { 7 | "start": "node server.js" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /2-containerized/services/api/rule.json: -------------------------------------------------------------------------------- 1 | { 2 | "ListenerArn": "placeholder", 3 | "Conditions": [ 4 | { 5 | "Field": "path-pattern", 6 | "Values": [ 7 | "/api*" 8 | ] 9 | } 10 | ], 11 | "Priority": 99, 12 | "Actions": [ 13 | { 14 | "Type": "forward", 15 | "TargetGroupArn": "placeholder" 16 | } 17 | ] 18 | } 19 | -------------------------------------------------------------------------------- /2-containerized/services/api/server.js: -------------------------------------------------------------------------------- 1 | const app = require('koa')(); 2 | const router = require('koa-router')(); 3 | const db = require('./db.json'); 4 | 5 | // Log requests 6 | app.use(function *(next){ 7 | const start = new Date; 8 | yield next; 9 | const ms = new Date - start; 10 | console.log('%s %s - %s', this.method, this.url, ms); 11 | }); 12 | 13 | router.get('/api/users', function *(next) { 14 | this.body = db.users; 15 | }); 16 | 17 | router.get('/api/users/:userId', function *(next) { 18 | const id = parseInt(this.params.userId); 19 | this.body = db.users.find((user) => user.id == id); 20 | }); 21 | 22 | router.get('/api/threads', function *() { 23 | this.body = db.threads; 24 | }); 25 | 26 | router.get('/api/threads/:threadId', function *() { 27 | const id = parseInt(this.params.threadId); 28 | this.body = db.threads.find((thread) => thread.id == id); 29 | }); 30 | 31 | router.get('/api/posts', function *() { 32 | this.body = db.posts; 33 | }); 34 | 35 | router.get('/api/posts/in-thread/:threadId', function *() { 36 | const id = parseInt(this.params.threadId); 37 | this.body = db.posts.filter((post) => post.thread == id); 38 | }); 39 | 40 | router.get('/api/posts/by-user/:userId', function *() { 41 | const id = parseInt(this.params.userId); 42 | this.body = db.posts.filter((post) => post.user == id); 43 | }); 44 | 45 | router.get('/api/', function *() { 46 | this.body = "API ready to receive requests"; 47 | }); 48 | 49 | router.get('/', function *() { 50 | this.body = "Ready to receive requests"; 51 | }); 52 | 53 | app.use(router.routes()); 54 | app.use(router.allowedMethods()); 55 | 56 | app.listen(3000); 57 | 58 | console.log('Worker started'); 59 | -------------------------------------------------------------------------------- /3-microservices/README.md: -------------------------------------------------------------------------------- 1 | ## From Monolith To Microservices 2 | 3 | In this example we take our monolithic application deployed on ECS and split it up into microservices. 4 | 5 | ![Reference architecture of microservices on EC2 Container Service](../images/microservice-containers.png) 6 | 7 | ## Why Microservices? 8 | 9 | __Isolation of crashes:__ Even the best engineering organizations can and do have fatal crashes in production. In addition to following all the standard best practices for handling crashes gracefully, one approach that can limit the impact of such crashes is building microservices. Good microservice architecture means that if one micro piece of your service is crashing then only that part of your service will go down. The rest of your service can continue to work properly. 10 | 11 | __Isolation for security:__ In a monolithic application if one feature of the application has a security breach, for example a vulnerability that allows remote code execution then you must assume that an attacker could have gained access to every other feature of the system as well. This can be dangerous if for example your avatar upload feature has a security issue which ends up compromising your database with user passwords. Separating out your features into micorservices using EC2 Container Service allows you to lock down access to AWS resources by giving each service its own IAM role. When microservice best practices are followed the result is that if an attacker compromises one service they only gain access to the resources of that service, and can't horizontally access other resources from other services without breaking into those services as well. 12 | 13 | __Independent scaling:__ When features are broken out into microservices then the amount of infrastructure and number of instances of each microservice class can be scaled up and down independently. This makes it easier to measure the infrastructure cost of particular feature, identify features that may need to be optimized first, as well as keep performance reliable for other features if one particular feature is going out of control on its resource needs. 14 | 15 | __Development velocity__: Microservices can enable a team to build faster by lowering the risk of development. In a monolith adding a new feature can potentially impact every other feature that the monolith contains. Developers must carefully consider the impact of any code they add, and ensure that they don't break anything. On the other hand a proper microservice architecture has new code for a new feature going into a new service. Developers can be confident that any code they write will actually not be able to impact the existing code at all unless they explictly write a connection between two microservices. 16 | 17 | ## Application Changes for Microsevices 18 | 19 | __Define microservice boundaries:__ Defining the boundaries for services is specific to your application's design, but for this REST API one fairly clear approach to breaking it up is to make one service for each of the top level classes of objects that the API serves: 20 | 21 | ``` 22 | /api/users/* -> A service for all user related REST paths 23 | /api/posts/* -> A service for all post related REST paths 24 | /api/threads/* -> A service for all thread related REST paths 25 | ``` 26 | 27 | So each service will only serve one particular class of REST object, and nothing else. This will give us some significant advantages in our ability to independently monitor and independently scale each service. 28 | 29 | __Stitching microservices together:__ Once we have created three separate microservices we need a way to stitch these separate services back together into one API that we can expose to clients. This is where Amazon Application Load Balancer (ALB) comes in. We can create rules on the ALB that direct requests that match a specific path to a specific service. The ALB looks like one API to clients and they don't need to even know that there are multiple microservices working together behind the scenes. 30 | 31 | __Chipping away slowly:__ It is not always possible to fully break apart a monolithic service in one go as it is with this simple example. If our monolith was too complicated to break apart all at once we can still use ALB to redirect just a subset of the traffic from the monolithic service out to a microservice. The rest of the traffic would continue on to the monolith exactly as it did before. 32 | 33 | Once we have verified this new microservice works we can remove the old code paths that are no longer being executed in the monolith. Whenever ready repeat the process by splitting another small portion of the code out into a new service. In this way even very complicated monoliths can be gradually broken apart in a safe manner that will not risk existing features. 34 | 35 | ## Deployment 36 | 37 | 1. Launch an ECS cluster using the Cloudformation template: 38 | 39 | ``` 40 | $ aws cloudformation deploy \ 41 | --template-file infrastructure/ecs.yml \ 42 | --region \ 43 | --stack-name \ 44 | --capabilities CAPABILITY_NAMED_IAM 45 | ``` 46 | 47 | 2. Deploy the services onto your cluster: 48 | 49 | ``` 50 | $ ./deploy.sh 51 | ``` 52 | -------------------------------------------------------------------------------- /3-microservices/deploy.sh: -------------------------------------------------------------------------------- 1 | REGION=$1 2 | STACK_NAME=$2 3 | 4 | DEPLOYABLE_SERVICES=( 5 | users 6 | threads 7 | posts 8 | ); 9 | 10 | PRIMARY='\033[0;34m' 11 | NC='\033[0m' # No Color 12 | 13 | # Fetch the stack metadata for use later 14 | printf "${PRIMARY}* Fetching current stack state${NC}\n"; 15 | 16 | QUERY=$(cat <<-EOF 17 | [ 18 | Stacks[0].Outputs[?OutputKey==\`ClusterName\`].OutputValue, 19 | Stacks[0].Outputs[?OutputKey==\`ALBArn\`].OutputValue, 20 | Stacks[0].Outputs[?OutputKey==\`ECSRole\`].OutputValue, 21 | Stacks[0].Outputs[?OutputKey==\`Url\`].OutputValue, 22 | Stacks[0].Outputs[?OutputKey==\`VPCId\`].OutputValue 23 | ] 24 | EOF) 25 | 26 | RESULTS=$(aws cloudformation describe-stacks \ 27 | --stack-name $STACK_NAME \ 28 | --region $REGION \ 29 | --query "$QUERY" \ 30 | --output text); 31 | RESULTS_ARRAY=($RESULTS) 32 | 33 | CLUSTER_NAME=${RESULTS_ARRAY[0]} 34 | ALB_ARN=${RESULTS_ARRAY[1]} 35 | ECS_ROLE=${RESULTS_ARRAY[2]} 36 | URL=${RESULTS_ARRAY[3]} 37 | VPCID=${RESULTS_ARRAY[4]} 38 | 39 | printf "${PRIMARY}* Authenticating with EC2 Container Repository${NC}\n"; 40 | 41 | `aws ecr get-login --region $REGION --no-include-email` 42 | 43 | # Tag for versioning the container images, currently set to timestamp 44 | TAG=`date +%s` 45 | 46 | for SERVICE_NAME in "${DEPLOYABLE_SERVICES[@]}" 47 | do 48 | printf "${PRIMARY}* Locating the ECR repository for service \`${SERVICE_NAME}\`${NC}\n"; 49 | 50 | # Find the ECR repo to push to 51 | REPO=`aws ecr describe-repositories \ 52 | --region $REGION \ 53 | --repository-names "$SERVICE_NAME" \ 54 | --query "repositories[0].repositoryUri" \ 55 | --output text` 56 | 57 | if [ "$?" != "0" ]; then 58 | # The repository was not found, create it 59 | printf "${PRIMARY}* Creating new ECR repository for service \`${SERVICE_NAME}\`${NC}\n"; 60 | 61 | REPO=`aws ecr create-repository \ 62 | --region $REGION \ 63 | --repository-name "$SERVICE_NAME" \ 64 | --query "repository.repositoryUri" \ 65 | --output text` 66 | fi 67 | 68 | printf "${PRIMARY}* Building \`${SERVICE_NAME}\`${NC}\n"; 69 | 70 | # Build the container, and assign a tag to it for versioning 71 | (cd services/$SERVICE_NAME && npm install); 72 | docker build -t $SERVICE_NAME ./services/$SERVICE_NAME 73 | docker tag $SERVICE_NAME:latest $REPO:$TAG 74 | 75 | # Push the tag up so we can make a task definition for deploying it 76 | printf "${PRIMARY}* Pushing \`${SERVICE_NAME}\`${NC}\n"; 77 | 78 | docker push $REPO:$TAG 79 | 80 | printf "${PRIMARY}* Creating new task definition for \`${SERVICE_NAME}\`${NC}\n"; 81 | 82 | # Build an create the task definition for the container we just pushed 83 | CONTAINER_DEFINITIONS=$(cat <<-EOF 84 | [{ 85 | "name": "$SERVICE_NAME", 86 | "image": "$REPO:$TAG", 87 | "cpu": 256, 88 | "memory": 256, 89 | "portMappings": [{ 90 | "containerPort": 3000, 91 | "hostPort": 0 92 | }], 93 | "essential": true 94 | }] 95 | EOF) 96 | 97 | TASK_DEFINITION_ARN=`aws ecs register-task-definition \ 98 | --region $REGION \ 99 | --family $SERVICE_NAME \ 100 | --container-definitions "$CONTAINER_DEFINITIONS" \ 101 | --query "taskDefinition.taskDefinitionArn" \ 102 | --output text` 103 | 104 | # Ensure that the service exists in ECS 105 | STATUS=`aws ecs describe-services \ 106 | --region $REGION \ 107 | --cluster $CLUSTER_NAME \ 108 | --services $SERVICE_NAME \ 109 | --query "services[0].status" \ 110 | --output text` 111 | 112 | if [ "$STATUS" != "ACTIVE" ]; then 113 | # New service that needs to be deployed because it hasn't 114 | # been created yet. 115 | if [ -e "./services/$SERVICE_NAME/rule.json" ]; then 116 | # If this service has a rule setup for routing traffic to the service, then 117 | # create a target group for the service, and a rule on the ELB for routing 118 | # traffic to the target group. 119 | printf "${PRIMARY}* Setting up web facing service \`${SERVICE_NAME}\`${NC}\n"; 120 | printf "${PRIMARY}* Creating target group for service \`${SERVICE_NAME}\`${NC}\n"; 121 | 122 | TARGET_GROUP_ARN=`aws elbv2 create-target-group \ 123 | --region $REGION \ 124 | --name $SERVICE_NAME \ 125 | --vpc-id $VPCID \ 126 | --port 80 \ 127 | --protocol HTTP \ 128 | --health-check-protocol HTTP \ 129 | --health-check-path / \ 130 | --health-check-interval-seconds 6 \ 131 | --health-check-timeout-seconds 5 \ 132 | --healthy-threshold-count 2 \ 133 | --unhealthy-threshold-count 2 \ 134 | --query "TargetGroups[0].TargetGroupArn" \ 135 | --output text` 136 | 137 | printf "${PRIMARY}* Locating load balancer listener \`${SERVICE_NAME}\`${NC}\n"; 138 | 139 | LISTENER_ARN=`aws elbv2 describe-listeners \ 140 | --region $REGION \ 141 | --load-balancer-arn $ALB_ARN \ 142 | --query "Listeners[0].ListenerArn" \ 143 | --output text` 144 | 145 | if [ "$LISTENER_ARN" == "None" ]; then 146 | printf "${PRIMARY}* Creating listener for load balancer${NC}\n"; 147 | 148 | LISTENER_ARN=`aws elbv2 create-listener \ 149 | --region $REGION \ 150 | --load-balancer-arn $ALB_ARN \ 151 | --port 80 \ 152 | --protocol HTTP \ 153 | --query "Listeners[0].ListenerArn" \ 154 | --default-actions Type=forward,TargetGroupArn=$TARGET_GROUP_ARN \ 155 | --output text` 156 | fi 157 | 158 | printf "${PRIMARY}* Adding rule to load balancer listener \`${SERVICE_NAME}\`${NC}\n"; 159 | 160 | # Manipulate the template to customize it with the target group and listener 161 | RULE_DOC=`cat ./services/$SERVICE_NAME/rule.json | 162 | jq ".ListenerArn=\"$LISTENER_ARN\" | .Actions[0].TargetGroupArn=\"$TARGET_GROUP_ARN\""` 163 | 164 | aws elbv2 create-rule \ 165 | --region $REGION \ 166 | --cli-input-json "$RULE_DOC" 167 | 168 | printf "${PRIMARY}* Creating new web facing service \`${SERVICE_NAME}\`${NC}\n"; 169 | 170 | LOAD_BALANCERS=$(cat <<-EOF 171 | [{ 172 | "targetGroupArn": "$TARGET_GROUP_ARN", 173 | "containerName": "$SERVICE_NAME", 174 | "containerPort": 3000 175 | }] 176 | EOF) 177 | 178 | RESULT=`aws ecs create-service \ 179 | --region $REGION \ 180 | --cluster $CLUSTER_NAME \ 181 | --load-balancers "$LOAD_BALANCERS" \ 182 | --service-name $SERVICE_NAME \ 183 | --role $ECS_ROLE \ 184 | --task-definition $TASK_DEFINITION_ARN \ 185 | --desired-count 1` 186 | else 187 | # This service doesn't have a web interface, just create it without load balancer settings 188 | printf "${PRIMARY}* Creating new background service \`${SERVICE_NAME}\`${NC}\n"; 189 | RESULT=`aws ecs create-service \ 190 | --region $REGION \ 191 | --cluster $CLUSTER_NAME \ 192 | --service-name $SERVICE_NAME \ 193 | --task-definition $TASK_DEFINITION_ARN \ 194 | --desired-count 1` 195 | fi 196 | else 197 | # The service already existed, just update the existing service. 198 | printf "${PRIMARY}* Updating service \`${SERVICE_NAME}\` with task definition \`${TASK_DEFINITION_ARN}\`${NC}\n"; 199 | RESULT=`aws ecs update-service \ 200 | --region $REGION \ 201 | --cluster $CLUSTER_NAME \ 202 | --service $SERVICE_NAME \ 203 | --task-definition $TASK_DEFINITION_ARN` 204 | fi 205 | done 206 | 207 | printf "${PRIMARY}* Done, application is at: http://${URL}${NC}\n"; 208 | printf "${PRIMARY}* (It may take a minute for the container to register as healthy and begin receiving traffic.)${NC}\n"; 209 | -------------------------------------------------------------------------------- /3-microservices/infrastructure/ecs.yml: -------------------------------------------------------------------------------- 1 | AWSTemplateFormatVersion: '2010-09-09' 2 | Parameters: 3 | DesiredCapacity: 4 | Type: Number 5 | Default: '2' 6 | Description: Number of instances to launch in your ECS cluster. 7 | MaxSize: 8 | Type: Number 9 | Default: '2' 10 | Description: Maximum number of instances that can be launched in your ECS cluster. 11 | InstanceType: 12 | Description: EC2 instance type 13 | Type: String 14 | Default: t2.micro 15 | AllowedValues: [t2.micro, t2.small, t2.medium, t2.large, t2.xlarge, t3.micro, t3.small, 16 | t3.medium, t3.large, t3.xlarge, m4.large, m4.xlarge, m5.large, m5.xlarge, c4.large, 17 | c4.xlarge, c5.large, c5.xlarge, r4.large, r4.xlarge, r5.large, r5.xlarge, i3.large, 18 | i3.xlarge] 19 | ConstraintDescription: Please choose a valid instance type. 20 | ECSAMI: 21 | Description: AMI ID 22 | Type: AWS::SSM::Parameter::Value 23 | Default: /aws/service/ecs/optimized-ami/amazon-linux/recommended/image_id 24 | Mappings: 25 | SubnetConfig: 26 | VPC: 27 | CIDR: '10.0.0.0/16' 28 | PublicOne: 29 | CIDR: '10.0.0.0/24' 30 | PublicTwo: 31 | CIDR: '10.0.1.0/24' 32 | Resources: 33 | # VPC into which stack instances will be placed 34 | VPC: 35 | Type: AWS::EC2::VPC 36 | Properties: 37 | EnableDnsSupport: true 38 | EnableDnsHostnames: true 39 | CidrBlock: !FindInMap ['SubnetConfig', 'VPC', 'CIDR'] 40 | PublicSubnetOne: 41 | Type: AWS::EC2::Subnet 42 | Properties: 43 | AvailabilityZone: 44 | Fn::Select: 45 | - 0 46 | - Fn::GetAZs: {Ref: 'AWS::Region'} 47 | VpcId: !Ref 'VPC' 48 | CidrBlock: !FindInMap ['SubnetConfig', 'PublicOne', 'CIDR'] 49 | MapPublicIpOnLaunch: true 50 | PublicSubnetTwo: 51 | Type: AWS::EC2::Subnet 52 | Properties: 53 | AvailabilityZone: 54 | Fn::Select: 55 | - 1 56 | - Fn::GetAZs: {Ref: 'AWS::Region'} 57 | VpcId: !Ref 'VPC' 58 | CidrBlock: !FindInMap ['SubnetConfig', 'PublicTwo', 'CIDR'] 59 | MapPublicIpOnLaunch: true 60 | InternetGateway: 61 | Type: AWS::EC2::InternetGateway 62 | GatewayAttachement: 63 | Type: AWS::EC2::VPCGatewayAttachment 64 | Properties: 65 | VpcId: !Ref 'VPC' 66 | InternetGatewayId: !Ref 'InternetGateway' 67 | PublicRouteTable: 68 | Type: AWS::EC2::RouteTable 69 | Properties: 70 | VpcId: !Ref 'VPC' 71 | PublicRoute: 72 | Type: AWS::EC2::Route 73 | DependsOn: GatewayAttachement 74 | Properties: 75 | RouteTableId: !Ref 'PublicRouteTable' 76 | DestinationCidrBlock: '0.0.0.0/0' 77 | GatewayId: !Ref 'InternetGateway' 78 | PublicSubnetOneRouteTableAssociation: 79 | Type: AWS::EC2::SubnetRouteTableAssociation 80 | Properties: 81 | SubnetId: !Ref PublicSubnetOne 82 | RouteTableId: !Ref PublicRouteTable 83 | PublicSubnetTwoRouteTableAssociation: 84 | Type: AWS::EC2::SubnetRouteTableAssociation 85 | Properties: 86 | SubnetId: !Ref PublicSubnetTwo 87 | RouteTableId: !Ref PublicRouteTable 88 | 89 | # ECS Resources 90 | ECSCluster: 91 | Type: AWS::ECS::Cluster 92 | EcsSecurityGroup: 93 | Type: AWS::EC2::SecurityGroup 94 | Properties: 95 | GroupDescription: ECS Security Group 96 | VpcId: !Ref 'VPC' 97 | EcsSecurityGroupHTTPinbound: 98 | Type: AWS::EC2::SecurityGroupIngress 99 | Properties: 100 | GroupId: !Ref 'EcsSecurityGroup' 101 | IpProtocol: tcp 102 | FromPort: '80' 103 | ToPort: '80' 104 | CidrIp: 0.0.0.0/0 105 | EcsSecurityGroupSSHinbound: 106 | Type: AWS::EC2::SecurityGroupIngress 107 | Properties: 108 | GroupId: !Ref 'EcsSecurityGroup' 109 | IpProtocol: tcp 110 | FromPort: '22' 111 | ToPort: '22' 112 | CidrIp: 0.0.0.0/0 113 | EcsSecurityGroupALBports: 114 | Type: AWS::EC2::SecurityGroupIngress 115 | Properties: 116 | GroupId: !Ref 'EcsSecurityGroup' 117 | IpProtocol: tcp 118 | FromPort: '31000' 119 | ToPort: '61000' 120 | SourceSecurityGroupId: !Ref 'EcsSecurityGroup' 121 | CloudwatchLogsGroup: 122 | Type: AWS::Logs::LogGroup 123 | Properties: 124 | LogGroupName: !Join ['-', [ECSLogGroup, !Ref 'AWS::StackName']] 125 | RetentionInDays: 14 126 | ECSALB: 127 | Type: AWS::ElasticLoadBalancingV2::LoadBalancer 128 | Properties: 129 | Scheme: internet-facing 130 | LoadBalancerAttributes: 131 | - Key: idle_timeout.timeout_seconds 132 | Value: '30' 133 | Subnets: 134 | - !Ref PublicSubnetOne 135 | - !Ref PublicSubnetTwo 136 | SecurityGroups: [!Ref 'EcsSecurityGroup'] 137 | ECSAutoScalingGroup: 138 | Type: AWS::AutoScaling::AutoScalingGroup 139 | Properties: 140 | VPCZoneIdentifier: 141 | - !Ref PublicSubnetOne 142 | - !Ref PublicSubnetTwo 143 | LaunchConfigurationName: !Ref 'ContainerInstances' 144 | MinSize: '1' 145 | MaxSize: !Ref 'MaxSize' 146 | DesiredCapacity: !Ref 'DesiredCapacity' 147 | CreationPolicy: 148 | ResourceSignal: 149 | Timeout: PT15M 150 | UpdatePolicy: 151 | AutoScalingReplacingUpdate: 152 | WillReplace: 'true' 153 | ContainerInstances: 154 | Type: AWS::AutoScaling::LaunchConfiguration 155 | Properties: 156 | ImageId: !Ref ECSAMI 157 | SecurityGroups: [!Ref 'EcsSecurityGroup'] 158 | InstanceType: !Ref 'InstanceType' 159 | IamInstanceProfile: !Ref 'EC2InstanceProfile' 160 | UserData: 161 | Fn::Base64: !Sub | 162 | #!/bin/bash -xe 163 | echo ECS_CLUSTER=${ECSCluster} >> /etc/ecs/ecs.config 164 | yum install -y aws-cfn-bootstrap 165 | /opt/aws/bin/cfn-signal -e $? --stack ${AWS::StackName} --resource ECSAutoScalingGroup --region ${AWS::Region} 166 | ECSServiceRole: 167 | Type: AWS::IAM::Role 168 | Properties: 169 | AssumeRolePolicyDocument: 170 | Statement: 171 | - Effect: Allow 172 | Principal: 173 | Service: [ecs.amazonaws.com] 174 | Action: ['sts:AssumeRole'] 175 | Path: / 176 | Policies: 177 | - PolicyName: ecs-service 178 | PolicyDocument: 179 | Statement: 180 | - Effect: Allow 181 | Action: 182 | - 'elasticloadbalancing:DeregisterInstancesFromLoadBalancer' 183 | - 'elasticloadbalancing:DeregisterTargets' 184 | - 'elasticloadbalancing:Describe*' 185 | - 'elasticloadbalancing:RegisterInstancesWithLoadBalancer' 186 | - 'elasticloadbalancing:RegisterTargets' 187 | - 'ec2:Describe*' 188 | - 'ec2:AuthorizeSecurityGroupIngress' 189 | Resource: '*' 190 | EC2Role: 191 | Type: AWS::IAM::Role 192 | Properties: 193 | AssumeRolePolicyDocument: 194 | Statement: 195 | - Effect: Allow 196 | Principal: 197 | Service: [ec2.amazonaws.com] 198 | Action: ['sts:AssumeRole'] 199 | Path: / 200 | Policies: 201 | - PolicyName: ecs-service 202 | PolicyDocument: 203 | Statement: 204 | - Effect: Allow 205 | Action: 206 | - 'ecs:CreateCluster' 207 | - 'ecs:DeregisterContainerInstance' 208 | - 'ecs:DiscoverPollEndpoint' 209 | - 'ecs:Poll' 210 | - 'ecs:RegisterContainerInstance' 211 | - 'ecs:StartTelemetrySession' 212 | - 'ecs:Submit*' 213 | - 'logs:CreateLogStream' 214 | - 'logs:PutLogEvents' 215 | - 'ecr:GetAuthorizationToken' 216 | - 'ecr:BatchGetImage' 217 | - 'ecr:GetDownloadUrlForLayer' 218 | Resource: '*' 219 | AutoscalingRole: 220 | Type: AWS::IAM::Role 221 | Properties: 222 | AssumeRolePolicyDocument: 223 | Statement: 224 | - Effect: Allow 225 | Principal: 226 | Service: [application-autoscaling.amazonaws.com] 227 | Action: ['sts:AssumeRole'] 228 | Path: / 229 | Policies: 230 | - PolicyName: service-autoscaling 231 | PolicyDocument: 232 | Statement: 233 | - Effect: Allow 234 | Action: 235 | - 'application-autoscaling:*' 236 | - 'cloudwatch:DescribeAlarms' 237 | - 'cloudwatch:PutMetricAlarm' 238 | - 'ecs:DescribeServices' 239 | - 'ecs:UpdateService' 240 | Resource: '*' 241 | EC2InstanceProfile: 242 | Type: AWS::IAM::InstanceProfile 243 | Properties: 244 | Path: / 245 | Roles: [!Ref 'EC2Role'] 246 | 247 | Outputs: 248 | ClusterName: 249 | Description: The name of the ECS cluster, used by the deploy script 250 | Value: !Ref 'ECSCluster' 251 | Export: 252 | Name: !Join [':', [!Ref "AWS::StackName", "ClusterName" ]] 253 | Url: 254 | Description: The url at which the application is available 255 | Value: !Join ['', [!GetAtt 'ECSALB.DNSName']] 256 | ALBArn: 257 | Description: The ARN of the ALB, exported for later use in creating services 258 | Value: !Ref 'ECSALB' 259 | Export: 260 | Name: !Join [':', [!Ref "AWS::StackName", "ALBArn" ]] 261 | ECSRole: 262 | Description: The ARN of the ECS role, exports for later use in creating services 263 | Value: !GetAtt 'ECSServiceRole.Arn' 264 | Export: 265 | Name: !Join [':', [!Ref "AWS::StackName", "ECSRole" ]] 266 | VPCId: 267 | Description: The ID of the VPC that this stack is deployed in 268 | Value: !Ref 'VPC' 269 | Export: 270 | Name: !Join [':', [!Ref "AWS::StackName", "VPCId" ]] 271 | -------------------------------------------------------------------------------- /3-microservices/services/posts/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mhart/alpine-node:7.10.1 2 | 3 | WORKDIR /srv 4 | ADD . . 5 | RUN npm install 6 | 7 | EXPOSE 3000 8 | CMD ["node", "server.js"] 9 | -------------------------------------------------------------------------------- /3-microservices/services/posts/db.json: -------------------------------------------------------------------------------- 1 | { 2 | "posts": [ 3 | { 4 | "thread": 1, 5 | "text": "Has anyone checked on the lich recently?", 6 | "user": 4 7 | }, 8 | { 9 | "thread": 1, 10 | "text": "I'll stop by and see how he's doing tomorrow!", 11 | "user": 2 12 | }, 13 | { 14 | "thread": 2, 15 | "text": "Come party with the candy people tomorrow!", 16 | "user": 3 17 | }, 18 | { 19 | "thread": 2, 20 | "text": "Mathematical!", 21 | "user": 2 22 | }, 23 | { 24 | "thread": 2, 25 | "text": "I'll bring my guitar", 26 | "user": 1 27 | }, 28 | { 29 | "thread": 3, 30 | "text": "I need a new guitar to play the most savory licks in Ooo", 31 | "user": 1 32 | } 33 | ] 34 | } 35 | -------------------------------------------------------------------------------- /3-microservices/services/posts/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": { 3 | "koa": "^1.2.5", 4 | "koa-router": "^5.4.0" 5 | }, 6 | "scripts": { 7 | "start": "node server.js" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /3-microservices/services/posts/rule.json: -------------------------------------------------------------------------------- 1 | { 2 | "ListenerArn": "placeholder", 3 | "Conditions": [ 4 | { 5 | "Field": "path-pattern", 6 | "Values": [ 7 | "/api/posts*" 8 | ] 9 | } 10 | ], 11 | "Priority": 3, 12 | "Actions": [ 13 | { 14 | "Type": "forward", 15 | "TargetGroupArn": "placeholder" 16 | } 17 | ] 18 | } 19 | -------------------------------------------------------------------------------- /3-microservices/services/posts/server.js: -------------------------------------------------------------------------------- 1 | const app = require('koa')(); 2 | const router = require('koa-router')(); 3 | const db = require('./db.json'); 4 | 5 | // Log requests 6 | app.use(function *(next){ 7 | const start = new Date; 8 | yield next; 9 | const ms = new Date - start; 10 | console.log('%s %s - %s', this.method, this.url, ms); 11 | }); 12 | 13 | router.get('/api/posts/in-thread/:threadId', function *() { 14 | const id = parseInt(this.params.threadId); 15 | this.body = db.posts.filter((post) => post.thread == id); 16 | }); 17 | 18 | router.get('/api/posts', function *() { 19 | this.body = db.posts; 20 | }); 21 | 22 | router.get('/api/posts/by-user/:userId', function *() { 23 | const id = parseInt(this.params.userId); 24 | this.body = db.posts.filter((post) => post.user == id); 25 | }); 26 | 27 | router.get('/api/', function *() { 28 | this.body = "API ready to receive requests"; 29 | }); 30 | 31 | router.get('/', function *() { 32 | this.body = "Ready to receive requests"; 33 | }); 34 | 35 | app.use(router.routes()); 36 | app.use(router.allowedMethods()); 37 | 38 | app.listen(3000); 39 | 40 | console.log('Worker started'); 41 | -------------------------------------------------------------------------------- /3-microservices/services/threads/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mhart/alpine-node:7.10.1 2 | 3 | WORKDIR /srv 4 | ADD . . 5 | RUN npm install 6 | 7 | EXPOSE 3000 8 | CMD ["node", "server.js"] 9 | -------------------------------------------------------------------------------- /3-microservices/services/threads/db.json: -------------------------------------------------------------------------------- 1 | { 2 | "threads": [ 3 | { 4 | "id": 1, 5 | "title": "What's up with the Lich?", 6 | "createdBy": 4 7 | }, 8 | { 9 | "id": 2, 10 | "title": "Party at the candy kingdom tomorrow", 11 | "createdBy": 3 12 | }, 13 | { 14 | "id": 3, 15 | "title": "In search of a new guitar", 16 | "createdBy": 1 17 | } 18 | ] 19 | } 20 | -------------------------------------------------------------------------------- /3-microservices/services/threads/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": { 3 | "koa": "^1.2.5", 4 | "koa-router": "^5.4.0" 5 | }, 6 | "scripts": { 7 | "start": "node server.js" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /3-microservices/services/threads/rule.json: -------------------------------------------------------------------------------- 1 | { 2 | "ListenerArn": "placeholder", 3 | "Conditions": [ 4 | { 5 | "Field": "path-pattern", 6 | "Values": [ 7 | "/api/threads*" 8 | ] 9 | } 10 | ], 11 | "Priority": 2, 12 | "Actions": [ 13 | { 14 | "Type": "forward", 15 | "TargetGroupArn": "placeholder" 16 | } 17 | ] 18 | } 19 | -------------------------------------------------------------------------------- /3-microservices/services/threads/server.js: -------------------------------------------------------------------------------- 1 | const app = require('koa')(); 2 | const router = require('koa-router')(); 3 | const db = require('./db.json'); 4 | 5 | // Log requests 6 | app.use(function *(next){ 7 | const start = new Date; 8 | yield next; 9 | const ms = new Date - start; 10 | console.log('%s %s - %s', this.method, this.url, ms); 11 | }); 12 | 13 | router.get('/api/threads', function *() { 14 | this.body = db.threads; 15 | }); 16 | 17 | router.get('/api/threads/:threadId', function *() { 18 | const id = parseInt(this.params.threadId); 19 | this.body = db.threads.find((thread) => thread.id == id); 20 | }); 21 | 22 | router.get('/api/', function *() { 23 | this.body = "API ready to receive requests"; 24 | }); 25 | 26 | router.get('/', function *() { 27 | this.body = "Ready to receive requests"; 28 | }); 29 | 30 | app.use(router.routes()); 31 | app.use(router.allowedMethods()); 32 | 33 | app.listen(3000); 34 | 35 | console.log('Worker started'); 36 | -------------------------------------------------------------------------------- /3-microservices/services/users/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mhart/alpine-node:7.10.1 2 | 3 | WORKDIR /srv 4 | ADD . . 5 | RUN npm install 6 | 7 | EXPOSE 3000 8 | CMD ["node", "server.js"] 9 | -------------------------------------------------------------------------------- /3-microservices/services/users/db.json: -------------------------------------------------------------------------------- 1 | { 2 | "users": [ 3 | { 4 | "id": 1, 5 | "username": "marceline", 6 | "name": "Marceline Abadeer", 7 | "bio": "1000 year old vampire queen, musician" 8 | }, 9 | { 10 | "id": 2, 11 | "username": "finn", 12 | "name": "Finn 'the Human' Mertens", 13 | "bio": "Adventurer and hero, last human, defender of good" 14 | }, 15 | { 16 | "id": 3, 17 | "username": "pb", 18 | "name": "Bonnibel Bubblegum", 19 | "bio": "Scientist, bearer of candy power, ruler of the candy kingdom" 20 | }, 21 | { 22 | "id": 4, 23 | "username": "jake", 24 | "name": "Jake the Dog", 25 | "bio": "Former criminal, now magical dog adventurer, and father" 26 | } 27 | ] 28 | } 29 | -------------------------------------------------------------------------------- /3-microservices/services/users/package.json: -------------------------------------------------------------------------------- 1 | { 2 | "dependencies": { 3 | "koa": "^1.2.5", 4 | "koa-router": "^5.4.0" 5 | }, 6 | "scripts": { 7 | "start": "node server.js" 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /3-microservices/services/users/rule.json: -------------------------------------------------------------------------------- 1 | { 2 | "ListenerArn": "placeholder", 3 | "Conditions": [ 4 | { 5 | "Field": "path-pattern", 6 | "Values": [ 7 | "/api/users*" 8 | ] 9 | } 10 | ], 11 | "Priority": 1, 12 | "Actions": [ 13 | { 14 | "Type": "forward", 15 | "TargetGroupArn": "placeholder" 16 | } 17 | ] 18 | } 19 | -------------------------------------------------------------------------------- /3-microservices/services/users/server.js: -------------------------------------------------------------------------------- 1 | const app = require('koa')(); 2 | const router = require('koa-router')(); 3 | const db = require('./db.json'); 4 | 5 | // Log requests 6 | app.use(function *(next){ 7 | const start = new Date; 8 | yield next; 9 | const ms = new Date - start; 10 | console.log('%s %s - %s', this.method, this.url, ms); 11 | }); 12 | 13 | router.get('/api/users', function *(next) { 14 | this.body = db.users; 15 | }); 16 | 17 | router.get('/api/users/:userId', function *(next) { 18 | const id = parseInt(this.params.userId); 19 | this.body = db.users.find((user) => user.id == id); 20 | }); 21 | 22 | router.get('/api/', function *() { 23 | this.body = "API ready to receive requests"; 24 | }); 25 | 26 | router.get('/', function *() { 27 | this.body = "Ready to receive requests"; 28 | }); 29 | 30 | app.use(router.routes()); 31 | app.use(router.allowedMethods()); 32 | 33 | app.listen(3000); 34 | 35 | console.log('Worker started'); 36 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | ## Code of Conduct 2 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 3 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 4 | opensource-codeofconduct@amazon.com with any additional questions or comments. 5 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing Guidelines 2 | 3 | Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional 4 | documentation, we greatly value feedback and contributions from our community. 5 | 6 | Please read through this document before submitting any issues or pull requests to ensure we have all the necessary 7 | information to effectively respond to your bug report or contribution. 8 | 9 | 10 | ## Reporting Bugs/Feature Requests 11 | 12 | We welcome you to use the GitHub issue tracker to report bugs or suggest features. 13 | 14 | When filing an issue, please check [existing open](https://github.com/awslabs/amazon-ecs-nodejs-microservices/issues), or [recently closed](https://github.com/awslabs/amazon-ecs-nodejs-microservices/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already 15 | reported the issue. Please try to include as much information as you can. Details like these are incredibly useful: 16 | 17 | * A reproducible test case or series of steps 18 | * The version of our code being used 19 | * Any modifications you've made relevant to the bug 20 | * Anything unusual about your environment or deployment 21 | 22 | 23 | ## Contributing via Pull Requests 24 | Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that: 25 | 26 | 1. You are working against the latest source on the *master* branch. 27 | 2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already. 28 | 3. You open an issue to discuss any significant work - we would hate for your time to be wasted. 29 | 30 | To send us a pull request, please: 31 | 32 | 1. Fork the repository. 33 | 2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change. 34 | 3. Ensure local tests pass. 35 | 4. Commit to your fork using clear commit messages. 36 | 5. Send us a pull request, answering any default questions in the pull request interface. 37 | 6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation. 38 | 39 | GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and 40 | [creating a pull request](https://help.github.com/articles/creating-a-pull-request/). 41 | 42 | 43 | ## Finding contributions to work on 44 | Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels ((enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/awslabs/amazon-ecs-nodejs-microservices/labels/help%20wanted) issues is a great place to start. 45 | 46 | 47 | ## Code of Conduct 48 | This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct). 49 | For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact 50 | opensource-codeofconduct@amazon.com with any additional questions or comments. 51 | 52 | 53 | ## Security issue notifications 54 | If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue. 55 | 56 | 57 | ## Licensing 58 | 59 | See the [LICENSE](https://github.com/awslabs/amazon-ecs-nodejs-microservices/blob/master/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution. 60 | 61 | We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes. 62 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Node.js Microservices Deployed on EC2 Container Service 2 | 3 | This is a reference architecture that shows the evolution of a Node.js application from a monolithic 4 | application that is deployed directly onto instances with no containerization or orchestration, to a 5 | containerized microservices architecture orchestrated using Amazon EC2 Container Service. 6 | 7 | - [Part One: The base Node.js application](1-no-container/) 8 | - [Part Two: Moving the application to a container deployed using ECS](2-containerized/) 9 | - [Part Three: Breaking the monolith apart into microservices on ECS](3-microservices/) 10 | -------------------------------------------------------------------------------- /images/microservice-containers.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awslabs/amazon-ecs-nodejs-microservices/7b550c52640d451c46fcdb9f03628eba85139a32/images/microservice-containers.png -------------------------------------------------------------------------------- /images/monolithic-containers.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awslabs/amazon-ecs-nodejs-microservices/7b550c52640d451c46fcdb9f03628eba85139a32/images/monolithic-containers.png -------------------------------------------------------------------------------- /images/monolithic-no-container.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/awslabs/amazon-ecs-nodejs-microservices/7b550c52640d451c46fcdb9f03628eba85139a32/images/monolithic-no-container.png --------------------------------------------------------------------------------