├── .gitignore ├── user-data.sh ├── SendJobs.py ├── role.json ├── GetJobs.py ├── README.md └── doEverything.sh /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | -------------------------------------------------------------------------------- /user-data.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | PARALLEL=2 # Number of parallel processes to run 3 | SCRIPT=GetJobs.py 4 | COMMAND="cp" # For testing 5 | #PARAMS=" " 6 | PARAMS="/var/tmp .out batch-queue eu-west-1 $COMMAND" 7 | yum update -y 8 | wget -O $SCRIPT "" 9 | for i in $(seq $PARALLEL) 10 | do 11 | LOGFILE=./${SCRIPT}.$i.log 12 | echo "Starting $i of $PARALLEL - log file is $LOGFILE ..." 13 | nohup python ./$SCRIPT $PARAMS > $LOGFILE 2>&1 & 14 | done 15 | -------------------------------------------------------------------------------- /SendJobs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import json 4 | import os 5 | 6 | from sys import argv, exit 7 | 8 | import boto 9 | import boto.s3 10 | import boto.sqs 11 | 12 | from boto.s3.key import Key 13 | from boto.sqs.message import Message 14 | 15 | def uploadDir(localDir, s3BucketName, s3InputPrefix, s3OutputPrefix, sqsQueueName, awsRegion): 16 | files = os.listdir(localDir) 17 | s3 = boto.s3.connect_to_region(awsRegion) 18 | s3Bucket = s3.get_bucket(s3BucketName) 19 | sqs = boto.sqs.connect_to_region(awsRegion) 20 | sqsQueue = sqs.lookup(sqsQueueName) 21 | for fileName in files: 22 | localPath = os.path.join(localDir, fileName) 23 | remotePath = s3InputPrefix + fileName 24 | print "Uploading %s to s3://%s/%s ..." % (localPath, s3BucketName, remotePath) 25 | # Upload to S3 26 | key = Key(s3Bucket) 27 | key.key = remotePath 28 | key.set_contents_from_filename(localPath) 29 | # Send message to SQS 30 | print "Sending message to SQS queue ..." 31 | messageBody = json.dumps(['process', s3BucketName, s3InputPrefix, s3OutputPrefix, fileName]) 32 | m = Message() 33 | m.set_body(messageBody) 34 | sqsQueue.write(m) 35 | print "Done!" 36 | print "All done!" 37 | 38 | def main(): 39 | if len(argv) < 6: 40 | print "Usage: %s " % argv[0] 41 | exit(1) 42 | localDir = argv[1] 43 | s3BucketName = argv[2] 44 | s3InputPrefix = argv[3] 45 | s3OutputPrefix = argv[4] 46 | sqsQueueName = argv[5] 47 | awsRegion = argv[6] 48 | uploadDir(localDir, s3BucketName, s3InputPrefix, s3OutputPrefix, sqsQueueName, awsRegion) 49 | 50 | if __name__ == '__main__': 51 | 52 | main() 53 | -------------------------------------------------------------------------------- /role.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Action": [ 6 | "s3:AbortMultipartUpload", 7 | "s3:DeleteBucketPolicy", 8 | "s3:DeleteBucketWebsite", 9 | "s3:DeleteObject", 10 | "s3:DeleteObjectVersion", 11 | "s3:GetBucketAcl", 12 | "s3:GetBucketLocation", 13 | "s3:GetBucketLogging", 14 | "s3:GetBucketNotification", 15 | "s3:GetBucketPolicy", 16 | "s3:GetBucketRequestPayment", 17 | "s3:GetBucketVersioning", 18 | "s3:GetBucketWebsite", 19 | "s3:GetLifecycleConfiguration", 20 | "s3:GetObject", 21 | "s3:GetObjectAcl", 22 | "s3:GetObjectTorrent", 23 | "s3:GetObjectVersion", 24 | "s3:GetObjectVersionAcl", 25 | "s3:GetObjectVersionTorrent", 26 | "s3:ListAllMyBuckets", 27 | "s3:ListBucket", 28 | "s3:ListBucketMultipartUploads", 29 | "s3:ListBucketVersions", 30 | "s3:ListMultipartUploadParts", 31 | "s3:PutBucketAcl", 32 | "s3:PutBucketLogging", 33 | "s3:PutBucketNotification", 34 | "s3:PutBucketPolicy", 35 | "s3:PutBucketRequestPayment", 36 | "s3:PutBucketVersioning", 37 | "s3:PutBucketWebsite", 38 | "s3:PutLifecycleConfiguration", 39 | "s3:PutObject", 40 | "s3:PutObjectAcl", 41 | "s3:PutObjectVersionAcl" 42 | ], 43 | "Sid": "Stmt1373286835000", 44 | "Resource": [ 45 | "arn:aws:s3:::", 46 | "arn:aws:s3:::/*" 47 | ], 48 | "Effect": "Allow" 49 | }, 50 | { 51 | "Action": [ 52 | "sqs:AddPermission", 53 | "sqs:ChangeMessageVisibility", 54 | "sqs:ChangeMessageVisibilityBatch", 55 | "sqs:DeleteMessage", 56 | "sqs:DeleteMessageBatch", 57 | "sqs:GetQueueAttributes", 58 | "sqs:GetQueueUrl", 59 | "sqs:ListQueues", 60 | "sqs:ReceiveMessage", 61 | "sqs:RemovePermission", 62 | "sqs:SendMessage", 63 | "sqs:SendMessageBatch", 64 | "sqs:SetQueueAttributes" 65 | ], 66 | "Sid": "Stmt1373286994000", 67 | "Resource": [ 68 | "arn:aws:sqs:eu-west-1::" 69 | ], 70 | "Effect": "Allow" 71 | } 72 | ] 73 | } 74 | -------------------------------------------------------------------------------- /GetJobs.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import json 4 | import os 5 | import subprocess 6 | import signal 7 | 8 | from sys import argv, exit 9 | 10 | import boto 11 | import boto.s3 12 | import boto.sqs 13 | 14 | from boto.s3.key import Key 15 | from boto.sqs.message import Message 16 | 17 | def getJobs(workDir, outputExtension, sqsQueueName, awsRegion, command): 18 | s3 = boto.s3.connect_to_region(awsRegion) 19 | sqs = boto.sqs.connect_to_region(awsRegion) 20 | sqsQueue = sqs.lookup(sqsQueueName) 21 | while (True): 22 | print "Getting messages from SQS queue..." 23 | messages = sqsQueue.get_messages(wait_time_seconds=20) 24 | if messages: 25 | for m in messages: 26 | print m.get_body() 27 | job = json.loads(m.get_body()) 28 | print "Message received: '%s'" % job 29 | action = job[0] 30 | if action == 'process': 31 | s3BucketName = job[1] 32 | s3InputPrefix = job[2] 33 | s3OutputPrefix = job[3] 34 | fileName = job[4] 35 | status = process(s3, s3BucketName, s3InputPrefix, s3OutputPrefix, fileName, 36 | workDir, outputExtension, command) 37 | if (status): 38 | print "Message processed correctly ..." 39 | m.delete() 40 | print "Message deleted" 41 | 42 | def process(s3, s3BucketName, s3InputPrefix, s3OutputPrefix, fileName, workDir, outputExtension, command): 43 | s3Bucket = s3.get_bucket(s3BucketName) 44 | localInputPath = os.path.join(workDir, fileName) 45 | localOutputPath = localInputPath + outputExtension 46 | remoteInputPath = s3InputPrefix + fileName 47 | remoteOutputPath = s3OutputPrefix + fileName + outputExtension 48 | print "Downloading %s from s3://%s/%s ..." % (localInputPath, s3BucketName, remoteInputPath) 49 | key = s3Bucket.get_key(remoteInputPath) 50 | key.get_contents_to_filename(localInputPath) 51 | full_command = [command, localInputPath, localOutputPath] 52 | print "Executing: %s" % ' '.join(full_command) 53 | returncode = subprocess.call(full_command) 54 | if returncode != 0: 55 | print "Return Code not '0'!" 56 | return False 57 | print "Uploading %s to s3://%s/%s ..." % (localOutputPath, s3BucketName, remoteOutputPath) 58 | key = Key(s3Bucket) 59 | key.key = remoteOutputPath 60 | key.set_contents_from_filename(localOutputPath) 61 | return True 62 | 63 | def signal_handler(signal, frame): 64 | print "Exiting..." 65 | exit(0) 66 | 67 | def main(): 68 | if len(argv) < 4: 69 | print "Usage: %s " % argv[0] 70 | exit(1) 71 | workDir = argv[1] 72 | outputExtension = argv[2] 73 | sqsQueueName = argv[3] 74 | awsRegion = argv[4] 75 | command = argv[5] 76 | getJobs(workDir, outputExtension, sqsQueueName, awsRegion, command) 77 | 78 | if __name__ == '__main__': 79 | 80 | signal.signal(signal.SIGINT, signal_handler) 81 | main() 82 | 83 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Sample Implementation of Batch Processing on Amazon Web Services (AWS) 2 | 3 | This is a Sample Implementation for the [AWS Reference Architecture for Batch Processing](http://aws.amazon.com/architecture/). 4 | 5 | Is is implemented in Python, using [boto](http://aws.amazon.com/sdkforpython/), and the new [AWS Command Line Interface (CLI)](http://aws.amazon.com/cli/). 6 | 7 | Two tools are provided: 8 | * SendJobs.py - to upload files from a (local) directory to S3 and put "job" requests to process those files as messages in an SQS queue 9 | * GetJobs.py - to get "job" messages from an SQS queue and upload on S3 the outcome of the processing 10 | 11 | The setup leverages [EC2](http://aws.amazon.com/ec2/) [Auto Scaling](http://aws.amazon.com/autoscaling/) to have a group of instances that is empty (i.e. no instance is running) when there are no "job" requests in the SQS queue and grows when there is the need. 12 | 13 | ## Tutorial 14 | 15 | ### Install AWS CLI 16 | 17 | The new [AWS Command Line Interface (CLI) tool](http://aws.amazon.com/cli/) 18 | is Python based, so you can install it using "pip" 19 | 20 | pip install awscli 21 | 22 | or using "easy_install" 23 | 24 | easy_install awscli 25 | 26 | Before using AWS CLI, you first need to specify your AWS account credentials and default AWS region as described 27 | [here](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html). 28 | 29 | The awscli package includes a very useful command completion feature, 30 | e.g. to enable tab completion for bash use the built-in command complete (not boot persistant): 31 | 32 | complete -C aws_completer aws 33 | 34 | ### Create an S3 Bucket to host input and output files 35 | 36 | You can create a bucket from the [S3 web console](http://console.aws.amazon.com/s3/) or using the CLI: 37 | 38 | aws s3api create-bucket --bucket \ 39 | --create-bucket-configuration '{ "location_constraint": }' 40 | 41 | ### Create an SQS Queue to centralize "job" requests 42 | 43 | You can create a queue from the [SQS web console](http://console.aws.amazon.com/sqs/) or using the CLI: 44 | The "VisibilityTimeout" is expressed in seconds and should be larger than the maximun processing time required for a "job". 45 | It can eventually be increased for a single "job", but that is not part of this implementation. 46 | 47 | aws sqs create-queue --queue-name --attributes VisibilityTimeout=60 48 | 49 | ### Create a IAM Role to delegate access to processing instances 50 | 51 | From the [IAM web console](http://console.aws.amazon.com/iam/) -> Roles -> Create Role -> 52 | Write a role name.Under "AWS Service Roles" select "Amazon EC2". 53 | Select a "Custom Policy", write a policy name and see the "role.json" file 54 | for a sample role giving access to an S3 bucket and an SQS queue. 55 | You should replace "AWS Account", "S3 Bucket Name" and "SQS Queue Name" in the policy with yours. 56 | Write down the Instance Profile ARN from the Summary tab, you'll need it later. 57 | 58 | ### Create Auto Scaling Launch Configuration 59 | 60 | For this sample I'm using a default Amazon Linux EBS-backed AMI, you can take the AMI ID [here](http://aws.amazon.com/amazon-linux-ami) 61 | The user data script provided automatically configures and run multiple parallel "GetJobs.py" scripts per node to get "job" from the queue and process them, uploading the final result back on S3. You probably need to edit the "user-data.sh" file before launching the following command. 62 | Alternatively you can create your own AMI that starts one of more parallel "GetJobs.py" scripts at boot. 63 | 64 | aws autoscaling create-launch-configuration --launch-configuration-name asl-batch \ 65 | --image-id --instance-type \ 66 | --iam-instance-profile --user-data "`cat user-data.sh`" 67 | 68 | If you want to be able to login into the instances launched by Auto Scaling you can add the following parametrs to the previous command 69 | 70 | --key-name --security-groups 71 | 72 | ### Create Auto Scaling Group 73 | 74 | aws autoscaling create-auto-scaling-group --auto-scaling-group-name asg-batch \ 75 | --launch-configuration-name asl-batch --min-size 0 \ 76 | --max-size \ 77 | --availability-zones \ 79 | --default-cooldown 300 80 | 81 | ### Create Auto Scaling "Up" Policy 82 | 83 | aws autoscaling put-scaling-policy --auto-scaling-group-name asg-batch --policy-name ash-batch-upscale-policy \ 84 | --scaling-adjustment \ 85 | --adjustment-type ExactCapacity 86 | 87 | Write down the "PolicyARN", you need it in the next step to set up the alarm. 88 | 89 | ### Create CloudWatch Alarm to trigger "Up" scaling Policy 90 | 91 | aws cloudwatch put-metric-alarm --alarm-name StartBatchProcessing --metric-name ApproximateNumberOfMessagesVisible \ 92 | --namespace "AWS/SQS" --statistic Average --period 60 --evaluation-periods 2 --threshold 1 \ 93 | --comparison-operator GreaterThanOrEqualToThreshold --dimensions Name=QueueName,Value=batch-queue \ 94 | --alarm-actions <"Up" PolicyARN> 95 | 96 | ### Create Auto Scaling "Down" Policy 97 | 98 | aws autoscaling put-scaling-policy --auto-scaling-group-name asg-batch --policy-name ash-batch-downscale-policy \ 99 | --scaling-adjustment 0 --adjustment-type ExactCapacity 100 | 101 | Write down the "PolicyARN", you need it in the next step to set up the alarm. 102 | 103 | ### Create CloudWatch Alarm to trigger "Down" scaling Policy 104 | 105 | aws cloudwatch put-metric-alarm --alarm-name StopBatchProcessing --metric-name ApproximateNumberOfMessagesVisible \ 106 | --namespace "AWS/SQS" --statistic Average --period 60 --evaluation-periods 2 --threshold 0 \ 107 | --comparison-operator LessThanOrEqualToThreshold --dimensions Name=QueueName,Value=batch-queue \ 108 | --alarm-actions <"Down" PolicyARN> 109 | 110 | ### Send the jobs uploading files from a directory 111 | 112 | The directory can be local or on an EC2 instance. 113 | 114 | ./SendJobs.py input/ output/ 115 | 116 | To get help, run the tool without options 117 | 118 | ./SendJobs.py 119 | 120 | After a few minutes the first CloudWatch Alarm should trigger the "Up" scaling Policy 121 | to start EC2 Instances configured to consume "jobs" from the SQS queue. 122 | When all "jobs" are processed and the SQS is "empty" the second CloudWatch Alarm should trigger 123 | the "Down" scaling Policy to shutdown and terminate the EC2 Instances. 124 | You should find the output of the processing in the S3 bucket under the "ouput/" prefix. 125 | 126 | ### Change the Launch Configuration of an Auto Scaling Group 127 | 128 | If later on you need to change the Launch Configuration create a new one and update the Auto Scaling Group, e.g. 129 | 130 | aws autoscaling update-auto-scaling-group --launch-configuration-name asl-batch-v2 \ 131 | --auto-scaling-group-name asg-batch 132 | -------------------------------------------------------------------------------- /doEverything.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | ## This script breaks out some of the bash commands 3 | ## listed in the README.md file to be easier to 4 | ## follow along in class 5 | ## 6 | ## \author Hans J. Johson 7 | ## Tutorial 8 | ### Install AWS CLI 9 | 10 | AWS_COMMAND=$(which aws) 11 | if [ ! -f ${AWS_COMMAND} ]; then 12 | echo "ERROR: Missing aws commands, follow instructions in README.md" 13 | exit 2 14 | fi 15 | 16 | if ! $(python -c "import boto.sqs" &> /dev/null); then 17 | echo "ERROR: boto libary for aws services not installed in python" >&2 18 | exit 2 19 | fi 20 | # TODO: Test for awscli 21 | # pip install awscli 22 | complete -C aws_completer aws 23 | 24 | ##################################################### 25 | ### Create an S3 Bucket to host input and output files 26 | #You can create a bucket from the [S3 web console](http://console.aws.amazon.com/s3/) or using the CLI: 27 | S3_BUCKET_NAME="hjtest2" ## ## Make all lower case letters 28 | YOUR_AWS_REGION="us-east-1" ## 29 | aws s3 mb s3://${S3_BUCKET_NAME} \ 30 | --region ${YOUR_AWS_REGION} 31 | if [ $? -ne 0 ]; then 32 | echo "FAIL: create bucket" 33 | fi 34 | 35 | ##################################################### 36 | ### Create an SQS Queue to centralize "job" requests 37 | SQS_QUEUE_NAME="batch-queue" ## 38 | aws sqs create-queue \ 39 | --queue-name ${SQS_QUEUE_NAME} \ 40 | --attributes VisibilityTimeout=60 41 | 42 | 43 | ##################################################### 44 | ### Create a IAM Role to delegate access to processing instances 45 | 46 | ## NOTE: THIS NEEDS TO BE DONE FROM AWS web console 47 | #From the [IAM web console](http://console.aws.amazon.com/iam/) -> Roles -> Create Role -> 48 | #Write a role name.Under "AWS Service Roles" select "Amazon EC2". 49 | #Select a "Custom Policy", write a policy name and see the "role.json" file 50 | #for a sample role giving access to an S3 bucket and an SQS queue. 51 | #You should replace "AWS Account", "S3 Bucket Name" and "SQS Queue Name" in the policy with yours. 52 | #Write down the Instance Profile ARN from the Summary tab, you'll need it later. 53 | 54 | INSTANCE_PROFILE_ARN="arn:aws:iam::236198936632:instance-profile/HansDLTRole" 55 | 56 | ##################################################### 57 | ### Create Auto Scaling Launch Configuration 58 | LAUNCH_CONFIGURATION_NAME="asl-batch" 59 | LINUX_AMI_ID=ami-146e2a7c ## These are region specific 60 | INSTANCE_TYPE=m3.medium ## The instance profile to launch EC2 Instance Type, e.g. t1.micro 61 | 62 | aws autoscaling create-launch-configuration \ 63 | --launch-configuration-name ${LAUNCH_CONFIGURATION_NAME} \ 64 | --image-id ${LINUX_AMI_ID} \ 65 | --instance-type ${INSTANCE_TYPE} \ 66 | --iam-instance-profile ${INSTANCE_PROFILE_ARN} \ 67 | --user-data "`cat user-data.sh`" 68 | 69 | #If you want to be able to login into the instances launched by Auto Scaling you can add the following parametrs to the previous command 70 | # --key-name \ 71 | # --security-groups 72 | 73 | ##################################################### 74 | ### Create Auto Scaling Group 75 | AUTO_SCALING_GROUP_NAME="asg-batch2" ## 76 | NUM_INSTANCES_TO_START=3 ## 77 | 78 | ## NOTE: You must restrict your AZ's to those available in your VPC 79 | AZ_IN_YOUR_DEFAULT_VPC="us-east-b us-east-c" ## All AZs in the region, 80 | ## e.g. for "eu-west-1" you can use "eu-west-1a" "eu-west-1b" "eu-west-1c" 81 | aws autoscaling create-auto-scaling-group \ 82 | --auto-scaling-group-name ${AUTO_SCALING_GROUP_NAME} \ 83 | --launch-configuration-name ${LAUNCH_CONFIGURATION_NAME} \ 84 | --min-size 0 \ 85 | --max-size ${NUM_INSTANCES_TO_START} \ 86 | --availability-zones ${AZ_IN_YOUR_DEFAULT_VPC} \ 87 | --default-cooldown 300 88 | 89 | ##################################################### 90 | ### Create Auto Scaling "Up" Policy 91 | AUTO_SCALE_UP_POLICY_NAME="ash-batch-upscale-policy" 92 | NUM_JOBS_TO_UPSCALE=2 93 | aws autoscaling put-scaling-policy \ 94 | --auto-scaling-group-name ${AUTO_SCALING_GROUP_NAME} \ 95 | --policy-name ${AUTO_SCALE_UP_POLICY_NAME} \ 96 | --scaling-adjustment ${NUM_JOBS_TO_UPSCALE} \ 97 | --adjustment-type ExactCapacity |tee UP_POLICY_ARN.log 2>&1 98 | 99 | # HACK TO GET the policy from script 100 | UP_POLICY_ARN=$(cat UP_POLICY_ARN.log |grep PolicyARN| awk -F\" '{print $4'}) 101 | #Write down the "PolicyARN", you need it in the next step to set up the alarm. 102 | 103 | ### Create CloudWatch Alarm to trigger "Up" scaling Policy 104 | aws cloudwatch put-metric-alarm \ 105 | --alarm-name StartBatchProcessing \ 106 | --metric-name ApproximateNumberOfMessagesVisible \ 107 | --namespace "AWS/SQS" \ 108 | --statistic Average \ 109 | --period 60 \ 110 | --evaluation-periods 2 \ 111 | --threshold 1 \ 112 | --comparison-operator GreaterThanOrEqualToThreshold \ 113 | --dimensions Name=QueueName,Value=batch-queue \ 114 | --alarm-actions ${UP_POLICY_ARN} 115 | 116 | ### Create Auto Scaling "Down" Policy 117 | AUTO_SCALE_DOWN_POLICY_NAME="ash-batch-downscale-policy" 118 | aws autoscaling put-scaling-policy \ 119 | --auto-scaling-group-name ${AUTO_SCALING_GROUP_NAME} \ 120 | --policy-name ${AUTO_SCALE_DOWN_POLICY_NAME} \ 121 | --scaling-adjustment 0 \ 122 | --adjustment-type ExactCapacity |tee DOWN_POLICY_ARN.log 2>&1 123 | 124 | DOWN_POLICY_ARN=$(cat DOWN_POLICY_ARN.log |grep PolicyARN| awk -F\" '{print $4'}) 125 | 126 | #Write down the "PolicyARN", you need it in the next step to set up the alarm. 127 | 128 | ### Create CloudWatch Alarm to trigger "Down" scaling Policy 129 | aws cloudwatch put-metric-alarm \ 130 | --alarm-name StopBatchProcessing \ 131 | --metric-name ApproximateNumberOfMessagesVisible \ 132 | --namespace "AWS/SQS" \ 133 | --statistic Average \ 134 | --period 60 \ 135 | --evaluation-periods 2 \ 136 | --threshold 0 \ 137 | --comparison-operator LessThanOrEqualToThreshold \ 138 | --dimensions Name=QueueName,Value=batch-queue \ 139 | --alarm-actions ${DOWN_POLICY_ARN} 140 | 141 | ### Send the jobs uploading files from a directory 142 | 143 | #The directory can be local or on an EC2 instance. 144 | # 145 | # ./SendJobs.py input/ output/ 146 | # 147 | #To get help, run the tool without options 148 | # 149 | # ./SendJobs.py 150 | # 151 | #After a few minutes the first CloudWatch Alarm should trigger the "Up" scaling Policy 152 | #to start EC2 Instances configured to consume "jobs" from the SQS queue. 153 | #When all "jobs" are processed and the SQS is "empty" the second CloudWatch Alarm should trigger 154 | #the "Down" scaling Policy to shutdown and terminate the EC2 Instances. 155 | #You should find the output of the processing in the S3 bucket under the "ouput/" prefix. 156 | # 157 | ### Change the Launch Configuration of an Auto Scaling Group 158 | # 159 | #If later on you need to change the Launch Configuration create a new one and update the Auto Scaling Group, e.g. 160 | # 161 | # aws autoscaling update-auto-scaling-group \ 162 | # --launch-configuration-name ${LAUNCH_CONFIGURATION_NAME}-v2 \ 163 | # --auto-scaling-group-name ${AUTO_SCALING_GROUP_NAME} 164 | 165 | --------------------------------------------------------------------------------