├── code_hash.txt ├── .dockerignore ├── cdk ├── cdk.json ├── requirements.txt └── app.py ├── .gitignore ├── mu.yml ├── kubernetes ├── service.yaml └── deployment.yaml ├── docker-compose.yml ├── README.md ├── package.json ├── ecs-params.yml.template ├── ecs-params.yml ├── test └── test.js ├── Dockerfile ├── Dockerfile.cdk ├── LICENSE ├── server.js ├── startup.sh └── startup-cdk.sh /code_hash.txt: -------------------------------------------------------------------------------- 1 | NOHASH 2 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | npm-debug.log 3 | .git 4 | -------------------------------------------------------------------------------- /cdk/cdk.json: -------------------------------------------------------------------------------- 1 | { 2 | "app": "python3 app.py" 3 | } 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | node_modules 2 | cdk.out 3 | cdk.context.json 4 | -------------------------------------------------------------------------------- /mu.yml: -------------------------------------------------------------------------------- 1 | --- 2 | service: 3 | desiredCount: 3 4 | maxSize: 6 5 | port: 3000 6 | discoveryTTL: 5 7 | -------------------------------------------------------------------------------- /cdk/requirements.txt: -------------------------------------------------------------------------------- 1 | aws_cdk.aws_ecs_patterns 2 | aws_cdk.aws_ec2 3 | aws_cdk.aws_ecs 4 | aws_cdk.aws_ecs_patterns 5 | aws_cdk.aws_servicediscovery 6 | aws_cdk.core 7 | -------------------------------------------------------------------------------- /kubernetes/service.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: Service 3 | metadata: 4 | name: ecsdemo-nodejs 5 | spec: 6 | selector: 7 | app: ecsdemo-nodejs 8 | ports: 9 | - protocol: TCP 10 | port: 80 11 | targetPort: 3000 12 | 13 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '3' 2 | services: 3 | ecsdemo-nodejs: 4 | image: brentley/ecsdemo-nodejs 5 | ports: 6 | - "3000:3000" 7 | logging: 8 | driver: awslogs 9 | options: 10 | awslogs-group: ecsdemo-nodejs 11 | awslogs-region: ${AWS_REGION} 12 | awslogs-stream-prefix: ecsdemo-nodejs 13 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![Build Status](https://codebuild.us-east-2.amazonaws.com/badges?uuid=eyJlbmNyeXB0ZWREYXRhIjoidDRkc2xFKzlVaVpRZXRiK1VrV3crbVNQMnJBNHdzQjQrVlQwaXdqVHdlYW94RU9jODN0R0I4WGJieEJLdjRldktxN3pCMWNTbW9YVUVPUmNzcVFCd0FjPSIsIml2UGFyYW1ldGVyU3BlYyI6Ikg0S29yNDFQUEVHMWVoN2wiLCJtYXRlcmlhbFNldFNlcmlhbCI6MX0%3D&branch=master) 2 | 3 | # Amazon ECS Workshop 4 | 5 | This is part of an Amazon ECS workshop at https://ecsworkshop.com 6 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "example-nodejs", 3 | "version": "1.0.0", 4 | "license": "ISC", 5 | "main": "server.js", 6 | "scripts": { 7 | "test": "mocha" 8 | }, 9 | "dependencies": { 10 | "express": "^4.16.2", 11 | "express-healthcheck": "^0.1.0", 12 | "internal-ip": "^3.0.1", 13 | "ip": "^1.1.5", 14 | "morgan": "^1.9.0" 15 | }, 16 | "devDependencies": { 17 | "mocha": "^4.1.0", 18 | "supertest": "^3.0.0" 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /ecs-params.yml.template: -------------------------------------------------------------------------------- 1 | version: 1 2 | task_definition: 3 | task_execution_role: $ecsTaskExecutionRole 4 | ecs_network_mode: awsvpc 5 | task_size: 6 | mem_limit: 0.5GB 7 | cpu_limit: 256 8 | run_params: 9 | network_configuration: 10 | awsvpc_configuration: 11 | subnets: 12 | - "$subnet_1" 13 | - "$subnet_2" 14 | - "$subnet_3" 15 | security_groups: 16 | - "$security_group" 17 | assign_public_ip: DISABLED 18 | service_discovery: 19 | container_name: ecsdemo-nodejs 20 | service_discovery_service: 21 | name: ecsdemo-nodejs 22 | description: string 23 | -------------------------------------------------------------------------------- /ecs-params.yml: -------------------------------------------------------------------------------- 1 | version: 1 2 | task_definition: 3 | task_execution_role: ecsTaskExecutionRole 4 | ecs_network_mode: awsvpc 5 | task_size: 6 | mem_limit: 0.5GB 7 | cpu_limit: 256 8 | run_params: 9 | network_configuration: 10 | awsvpc_configuration: 11 | subnets: 12 | - "subnet-0412684667a7a5147" 13 | - "subnet-08467586f86110cea" 14 | - "subnet-0afe24995d24d9e0f" 15 | security_groups: 16 | - "sg-03eef5d8f0eefa149" 17 | assign_public_ip: DISABLED 18 | service_discovery: 19 | container_name: ecsdemo-nodejs 20 | service_discovery_service: 21 | name: ecsdemo-nodejs 22 | description: string 23 | -------------------------------------------------------------------------------- /test/test.js: -------------------------------------------------------------------------------- 1 | var request = require('supertest'); 2 | describe('loading express', function () { 3 | var server; 4 | beforeEach(function () { 5 | server = require('../server'); 6 | }); 7 | after(function (done) { 8 | server.close(done); 9 | }); 10 | it('responds to /', function testSlash(done) { 11 | request(server) 12 | .get('/') 13 | .expect(200, done); 14 | }); 15 | it('responds to /health', function testSlash(done) { 16 | request(server) 17 | .get('/health') 18 | .expect(200, done); 19 | }); 20 | it('404 everything else', function testPath(done) { 21 | request(server) 22 | .get('/foo/bar') 23 | .expect(404, done); 24 | }); 25 | }); 26 | -------------------------------------------------------------------------------- /kubernetes/deployment.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: apps/v1 2 | kind: Deployment 3 | metadata: 4 | name: ecsdemo-nodejs 5 | labels: 6 | app: ecsdemo-nodejs 7 | namespace: default 8 | spec: 9 | replicas: 3 10 | selector: 11 | matchLabels: 12 | app: ecsdemo-nodejs 13 | strategy: 14 | rollingUpdate: 15 | maxSurge: 25% 16 | maxUnavailable: 25% 17 | type: RollingUpdate 18 | template: 19 | metadata: 20 | labels: 21 | app: ecsdemo-nodejs 22 | spec: 23 | containers: 24 | - image: brentley/ecsdemo-nodejs:latest 25 | imagePullPolicy: Always 26 | name: ecsdemo-nodejs 27 | ports: 28 | - containerPort: 3000 29 | protocol: TCP 30 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # FROM node:alpine 2 | FROM alpine:3.6 3 | 4 | # set the default NODE_ENV to production 5 | # for dev/test build with: docker build --build-arg NODE=development . 6 | # and the testing npms will be included 7 | ARG NODE=production 8 | ENV NODE_ENV ${NODE} 9 | 10 | # copy package info early to install npms and delete npm command 11 | WORKDIR /usr/src/app 12 | COPY package*.json ./ 13 | RUN apk -U add curl jq bash nodejs nodejs-npm python3 py3-pip && \ 14 | pip3 install awscli netaddr && \ 15 | npm install && apk del --purge nodejs-npm && \ 16 | rm -rvf /var/cache/* /root/.npm /tmp/* 17 | 18 | # copy the code 19 | COPY . . 20 | HEALTHCHECK --interval=10s --timeout=3s \ 21 | CMD curl -f -s http://localhost:3000/health/ || exit 1 22 | EXPOSE 3000 23 | ENTRYPOINT ["bash","/usr/src/app/startup.sh"] 24 | -------------------------------------------------------------------------------- /Dockerfile.cdk: -------------------------------------------------------------------------------- 1 | # FROM node:alpine 2 | FROM alpine:3.6 3 | 4 | # set the default NODE_ENV to production 5 | # for dev/test build with: docker build --build-arg NODE=development . 6 | # and the testing npms will be included 7 | ARG NODE=production 8 | ENV NODE_ENV ${NODE} 9 | 10 | # copy package info early to install npms and delete npm command 11 | WORKDIR /usr/src/app 12 | COPY package*.json ./ 13 | RUN apk -U add curl jq bash nodejs nodejs-npm python3 py3-pip && \ 14 | pip3 install awscli netaddr && \ 15 | npm install && apk del --purge nodejs-npm && \ 16 | rm -rvf /var/cache/* /root/.npm /tmp/* 17 | 18 | # copy the code 19 | COPY . . 20 | RUN mv /usr/src/app/startup-cdk.sh /usr/src/app/startup.sh 21 | HEALTHCHECK --interval=10s --timeout=3s \ 22 | CMD curl -f -s http://localhost:3000/health/ || exit 1 23 | EXPOSE 3000 24 | ENTRYPOINT ["bash","/usr/src/app/startup.sh"] 25 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Brent Langston 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /server.js: -------------------------------------------------------------------------------- 1 | // use the express framework 2 | var express = require('express'); 3 | var app = express(); 4 | 5 | var fs = require('fs'); 6 | var code_hash = fs.readFileSync('code_hash.txt','utf8'); 7 | console.log (code_hash); 8 | console.log('The IPADDRESS is:', process.env.IP); 9 | console.log('The message is:', process.env.AZ); 10 | console.log('The hash is: %s', code_hash); 11 | 12 | var ipaddress = process.env.IP; 13 | var message = process.env.AZ; 14 | 15 | // morgan: generate apache style logs to the console 16 | var morgan = require('morgan') 17 | app.use(morgan('combined')); 18 | 19 | // express-healthcheck: respond on /health route for LB checks 20 | app.use('/health', require('express-healthcheck')()); 21 | 22 | // main route 23 | app.get('/', function (req, res) { 24 | res.set({ 25 | 'Content-Type': 'text/plain' 26 | }) 27 | res.send(`Node.js backend: Hello! from ${message} commit ${code_hash}`); 28 | // res.send(`Hello World! from ${ipaddress} in AZ-${az} which has been up for ` + process.uptime() + 'ms'); 29 | }); 30 | 31 | app.get('/nodejs', function (req, res) { 32 | res.set({ 33 | 'Content-Type': 'text/plain' 34 | }) 35 | res.send(`Node.js backend: Hello! from ${message} commit ${code_hash}`); 36 | // res.send(`Hello World! from ${ipaddress} in AZ-${az} which has been up for ` + process.uptime() + 'ms'); 37 | }); 38 | 39 | app.get('/nodejs/api', function (req, res) { 40 | res.send({ 41 | from: 'Node.js backend', 42 | message: message, 43 | commit: code_hash 44 | }); 45 | }); 46 | 47 | // health route - variable subst is more pythonic just as an example 48 | var server = app.listen(3000, function() { 49 | var port = server.address().port; 50 | console.log('Example app listening on port %s!', port); 51 | }); 52 | 53 | // export the server to make tests work 54 | module.exports = server; 55 | -------------------------------------------------------------------------------- /startup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x 4 | 5 | IP=$(ip route show |grep -o src.* |cut -f2 -d" ") 6 | # kubernetes sets routes differently -- so we will discover our IP differently 7 | if [[ ${IP} == "" ]]; then 8 | IP=$(hostname -i) 9 | fi 10 | SUBNET=$(echo ${IP} | cut -f1 -d.) 11 | NETWORK=$(echo ${IP} | cut -f3 -d.) 12 | 13 | case "${SUBNET}" in 14 | 10) 15 | orchestrator=ecs 16 | ;; 17 | 192) 18 | orchestrator=kubernetes 19 | ;; 20 | *) 21 | orchestrator=unknown 22 | ;; 23 | esac 24 | 25 | if [[ "${orchestrator}" == 'ecs' ]]; then 26 | case "${NETWORK}" in 27 | 100) 28 | zone=a 29 | color=Crimson 30 | ;; 31 | 101) 32 | zone=b 33 | color=CornflowerBlue 34 | ;; 35 | 102) 36 | zone=c 37 | color=LightGreen 38 | ;; 39 | *) 40 | zone=unknown 41 | color=Yellow 42 | ;; 43 | esac 44 | fi 45 | 46 | if [[ "${orchestrator}" == 'kubernetes' ]]; then 47 | if ((0<=${NETWORK} && ${NETWORK}<32)) 48 | then 49 | zone=a 50 | elif ((32<=${NETWORK} && ${NETWORK}<64)) 51 | then 52 | zone=b 53 | elif ((64<=${NETWORK} && ${NETWORK}<96)) 54 | then 55 | zone=c 56 | elif ((96<=${NETWORK} && ${NETWORK}<128)) 57 | then 58 | zone=a 59 | elif ((128<=${NETWORK} && ${NETWORK}<160)) 60 | then 61 | zone=b 62 | elif ((160<=${NETWORK})) 63 | then 64 | zone=c 65 | else 66 | zone=unknown 67 | fi 68 | fi 69 | 70 | if [[ ${orchestrator} == 'unknown' ]]; then 71 | zone=$(curl -m2 -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r '.availabilityZone' | grep -o .$) 72 | fi 73 | 74 | # Am I on ec2 instances? 75 | if [[ ${zone} == "unknown" ]]; then 76 | zone=$(curl -m2 -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r '.availabilityZone' | grep -o .$) 77 | fi 78 | 79 | # Still no luck? Perhaps we're running fargate! 80 | if [[ -z ${zone} ]]; then 81 | ip_addr=$(curl -m2 -s ${ECS_CONTAINER_METADATA_URI} | jq -r '.Networks[].IPv4Addresses[]') 82 | declare -a subnets=( $(aws ec2 describe-subnets | jq -r .Subnets[].CidrBlock| sed ':a;N;$!ba;s/\n/ /g') ) 83 | for sub in "${subnets[@]}"; do 84 | ip_match=$(echo -e "from netaddr import IPNetwork, IPAddress\nif IPAddress('$ip_addr') in IPNetwork('$sub'):\n print('true')" | python3) 85 | if [[ $ip_match == "true" ]];then 86 | zone=$(aws ec2 describe-subnets | jq -r --arg sub "$sub" '.Subnets[] | select(.CidrBlock==$sub) | .AvailabilityZone' | grep -o .$) 87 | fi 88 | done 89 | fi 90 | 91 | export CODE_HASH="$(cat code_hash.txt)" 92 | export IP 93 | export AZ="${IP} in AZ-${zone}" 94 | 95 | # exec container command 96 | exec node server.js 97 | -------------------------------------------------------------------------------- /startup-cdk.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x 4 | 5 | IP=$(ip route show |grep -o src.* |cut -f2 -d" ") 6 | # kubernetes sets routes differently -- so we will discover our IP differently 7 | if [[ ${IP} == "" ]]; then 8 | IP=$(hostname -i) 9 | fi 10 | SUBNET=$(echo ${IP} | cut -f1 -d.) 11 | NETWORK=$(echo ${IP} | cut -f3 -d.) 12 | 13 | case "${SUBNET}" in 14 | 10) 15 | orchestrator=ecs 16 | ;; 17 | 192) 18 | orchestrator=kubernetes 19 | ;; 20 | *) 21 | orchestrator=unknown 22 | ;; 23 | esac 24 | 25 | if [[ "${orchestrator}" == 'ecs' ]]; then 26 | case "${NETWORK}" in 27 | 100) 28 | zone=a 29 | color=Crimson 30 | ;; 31 | 101) 32 | zone=b 33 | color=CornflowerBlue 34 | ;; 35 | 102) 36 | zone=c 37 | color=LightGreen 38 | ;; 39 | *) 40 | zone=unknown 41 | color=Yellow 42 | ;; 43 | esac 44 | fi 45 | 46 | if [[ "${orchestrator}" == 'kubernetes' ]]; then 47 | if ((0<=${NETWORK} && ${NETWORK}<32)) 48 | then 49 | zone=a 50 | elif ((32<=${NETWORK} && ${NETWORK}<64)) 51 | then 52 | zone=b 53 | elif ((64<=${NETWORK} && ${NETWORK}<96)) 54 | then 55 | zone=c 56 | elif ((96<=${NETWORK} && ${NETWORK}<128)) 57 | then 58 | zone=a 59 | elif ((128<=${NETWORK} && ${NETWORK}<160)) 60 | then 61 | zone=b 62 | elif ((160<=${NETWORK})) 63 | then 64 | zone=c 65 | else 66 | zone=unknown 67 | fi 68 | fi 69 | 70 | if [[ ${orchestrator} == 'unknown' ]]; then 71 | zone=$(curl -m2 -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r '.availabilityZone' | grep -o .$) 72 | fi 73 | 74 | # Am I on ec2 instances? 75 | if [[ ${zone} == "unknown" ]]; then 76 | zone=$(curl -m2 -s http://169.254.169.254/latest/dynamic/instance-identity/document | jq -r '.availabilityZone' | grep -o .$) 77 | fi 78 | 79 | # Still no luck? Perhaps we're running fargate! 80 | if [[ -z ${zone} ]]; then 81 | export AWS_DEFAULT_REGION=$REGION 82 | ip_addr=$(curl -m2 -s ${ECS_CONTAINER_METADATA_URI} | jq -r '.Networks[].IPv4Addresses[]') 83 | declare -a subnets=( $(aws ec2 describe-subnets | jq -r .Subnets[].CidrBlock| sed ':a;N;$!ba;s/\n/ /g') ) 84 | for sub in "${subnets[@]}"; do 85 | ip_match=$(echo -e "from netaddr import IPNetwork, IPAddress\nif IPAddress('$ip_addr') in IPNetwork('$sub'):\n print('true')" | python3) 86 | if [[ $ip_match == "true" ]];then 87 | zone=$(aws ec2 describe-subnets | jq -r --arg sub "$sub" '.Subnets[] | select(.CidrBlock==$sub) | .AvailabilityZone' | grep -o .$) 88 | fi 89 | done 90 | fi 91 | 92 | export CODE_HASH="$(cat code_hash.txt)" 93 | export IP 94 | export AZ="${IP} in AZ-${zone}" 95 | 96 | # exec container command 97 | exec node server.js 98 | -------------------------------------------------------------------------------- /cdk/app.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # cdk: 1.25.0 4 | from aws_cdk import ( 5 | aws_ec2, 6 | aws_ecs, 7 | aws_ecs_patterns, 8 | aws_iam, 9 | aws_servicediscovery, 10 | core, 11 | ) 12 | 13 | from os import getenv 14 | 15 | 16 | # Creating a construct that will populate the required objects created in the platform repo such as vpc, ecs cluster, and service discovery namespace 17 | class BasePlatform(core.Construct): 18 | 19 | def __init__(self, scope: core.Construct, id: str, **kwargs): 20 | super().__init__(scope, id, **kwargs) 21 | self.environment_name = 'ecsworkshop' 22 | 23 | # The base platform stack is where the VPC was created, so all we need is the name to do a lookup and import it into this stack for use 24 | self.vpc = aws_ec2.Vpc.from_lookup( 25 | self, "VPC", 26 | vpc_name='{}-base/BaseVPC'.format(self.environment_name) 27 | ) 28 | 29 | self.sd_namespace = aws_servicediscovery.PrivateDnsNamespace.from_private_dns_namespace_attributes( 30 | self, "SDNamespace", 31 | namespace_name=core.Fn.import_value('NSNAME'), 32 | namespace_arn=core.Fn.import_value('NSARN'), 33 | namespace_id=core.Fn.import_value('NSID') 34 | ) 35 | 36 | self.ecs_cluster = aws_ecs.Cluster.from_cluster_attributes( 37 | self, "ECSCluster", 38 | cluster_name=core.Fn.import_value('ECSClusterName'), 39 | security_groups=[], 40 | vpc=self.vpc, 41 | default_cloud_map_namespace=self.sd_namespace 42 | ) 43 | 44 | self.services_sec_grp = aws_ec2.SecurityGroup.from_security_group_id( 45 | self, "ServicesSecGrp", 46 | security_group_id=core.Fn.import_value('ServicesSecGrp') 47 | ) 48 | 49 | 50 | class NodejsService(core.Stack): 51 | 52 | def __init__(self, scope: core.Stack, id: str, **kwargs): 53 | super().__init__(scope, id, **kwargs) 54 | 55 | self.base_platform = BasePlatform(self, self.stack_name) 56 | 57 | self.fargate_task_def = aws_ecs.TaskDefinition( 58 | self, "TaskDef", 59 | compatibility=aws_ecs.Compatibility.EC2_AND_FARGATE, 60 | cpu='256', 61 | memory_mib='512', 62 | ) 63 | 64 | self.container = self.fargate_task_def.add_container( 65 | "NodeServiceContainerDef", 66 | image=aws_ecs.ContainerImage.from_registry("brentley/ecsdemo-nodejs:cdk"), 67 | memory_reservation_mib=512, 68 | logging=aws_ecs.LogDriver.aws_logs( 69 | stream_prefix='ecsworkshop-nodejs' 70 | ), 71 | environment={ 72 | "REGION": getenv('AWS_DEFAULT_REGION') 73 | }, 74 | ) 75 | 76 | self.container.add_port_mappings( 77 | aws_ecs.PortMapping( 78 | container_port=3000 79 | ) 80 | ) 81 | 82 | self.fargate_service = aws_ecs.FargateService( 83 | self, "NodejsFargateService", 84 | service_name='ecsdemo-nodejs', 85 | task_definition=self.fargate_task_def, 86 | cluster=self.base_platform.ecs_cluster, 87 | security_group=self.base_platform.services_sec_grp, 88 | desired_count=1, 89 | cloud_map_options=aws_ecs.CloudMapOptions( 90 | cloud_map_namespace=self.base_platform.sd_namespace, 91 | name='ecsdemo-nodejs' 92 | ) 93 | ) 94 | 95 | self.fargate_task_def.add_to_task_role_policy( 96 | aws_iam.PolicyStatement( 97 | actions=['ec2:DescribeSubnets'], 98 | resources=['*'] 99 | ) 100 | ) 101 | 102 | # Enable Service Autoscaling 103 | # self.autoscale = self.fargate_service.auto_scale_task_count( 104 | # min_capacity=1, 105 | # max_capacity=10 106 | # ) 107 | 108 | # self.autoscale.scale_on_cpu_utilization( 109 | # "CPUAutoscaling", 110 | # target_utilization_percent=50, 111 | # scale_in_cooldown=core.Duration.seconds(30), 112 | # scale_out_cooldown=core.Duration.seconds(30) 113 | # ) 114 | 115 | _env = core.Environment(account=getenv('AWS_ACCOUNT_ID'), region=getenv('AWS_DEFAULT_REGION')) 116 | environment = "ecsworkshop" 117 | stack_name = "{}-nodejs".format(environment) 118 | app = core.App() 119 | NodejsService(app, stack_name, env=_env) 120 | app.synth() 121 | --------------------------------------------------------------------------------