├── .gitignore ├── src ├── publish │ ├── __pycache__ │ │ └── util.cpython-36.pyc │ ├── tables │ │ ├── tensorflow_keras_pillow │ │ └── tensorflow_keras │ ├── util.py │ ├── arn_table.py │ └── publish.py └── build_scripts │ ├── run-all.sh │ ├── aws_regions │ ├── setup-server.sh │ ├── build-layer.sh │ └── aws-build-lambda.sh ├── LICENSE ├── README.md └── arn_tables ├── tensorflow_keras_pillow.md └── tensorflow_keras.md /.gitignore: -------------------------------------------------------------------------------- 1 | test/ 2 | env/ 3 | build/ 4 | -------------------------------------------------------------------------------- /src/publish/__pycache__/util.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/antonpaquin/Tensorflow-Lambda-Layer/HEAD/src/publish/__pycache__/util.cpython-36.pyc -------------------------------------------------------------------------------- /src/publish/tables/tensorflow_keras_pillow: -------------------------------------------------------------------------------- 1 | build target,tf version,keras version,pillow version,size,arn 2 | tf_keras_pillow,1.8.0,2.2.4,5.4.1,230M,{arn} 3 | -------------------------------------------------------------------------------- /src/build_scripts/run-all.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | ./setup-server.sh 4 | 5 | for f in $(cat build_targets.list); do 6 | pushd build 7 | ./build-layer.sh $f 8 | popd 9 | done 10 | -------------------------------------------------------------------------------- /src/build_scripts/aws_regions: -------------------------------------------------------------------------------- 1 | us-east-2 2 | us-east-1 3 | us-west-1 4 | us-west-2 5 | ap-south-1 6 | ap-northeast-2 7 | ap-southeast-1 8 | ap-southeast-2 9 | ap-northeast-1 10 | ca-central-1 11 | eu-central-1 12 | eu-west-1 13 | us-east-2 14 | us-east-1 15 | us-west-1 16 | us-west-2 17 | ap-south-1 18 | ap-northeast-2 19 | ap-southeast-1 20 | ap-southeast-2 21 | ap-northeast-1 22 | ca-central-1 23 | eu-central-1 24 | eu-west-1 25 | eu-west-2 26 | eu-west-3 27 | eu-north-1 28 | sa-east-1 29 | -------------------------------------------------------------------------------- /src/publish/tables/tensorflow_keras: -------------------------------------------------------------------------------- 1 | build target,tf version,keras version,size,arn 2 | tf_1.0_keras,1.0.1,2.2.4,169M,`{arn}` 3 | tf_1.1_keras,1.1.0,2.2.4,174M,`{arn}` 4 | tf_1.2_keras,1.2.1,2.2.4,178M,`{arn}` 5 | tf_1.3_keras,1.3.0,2.2.4,184M,`{arn}` 6 | tf_1.4_keras,1.4.1,2.2.4,199M,`{arn}` 7 | tf_1.5_keras,1.5.1,2.2.4,213M,`{arn}` 8 | tf_1.6_keras,1.6.0,2.2.4,215M,`{arn}` 9 | tf_1.7_keras,1.7.1,2.2.4,218M,`{arn}` 10 | tf_1.8_keras,1.8.0,2.2.4,223M,`{arn}` 11 | tf_1.9_keras,1.9.0,2.2.4,227M,`{arn}` 12 | tf_1.10_keras,1.10.1,2.2.4,236M,`{arn}` 13 | tf_1.11_keras,1.11.0,2.2.4,231M,`{arn}` 14 | tf_1.12_keras,1.12.0,2.2.4,282M,`{arn}` 15 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Anton Paquin 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /src/publish/util.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import os 4 | 5 | 6 | project_root = os.path.abspath(__file__) 7 | for _ in range(3): 8 | project_root = os.path.dirname(project_root) 9 | 10 | release_target = '2' 11 | aws_bucket = 'antonpaquin-lambda-zip-' 12 | 13 | target_regions = [ 14 | 'us-east-2', 15 | 'us-east-1', 16 | 'us-west-1', 17 | 'us-west-2', 18 | 'ap-south-1', 19 | 'ap-northeast-2', 20 | 'ap-southeast-1', 21 | 'ap-southeast-2', 22 | 'ap-northeast-1', 23 | 'ca-central-1', 24 | 'eu-central-1', 25 | 'eu-west-1', 26 | 'eu-west-2', 27 | 'eu-west-3', 28 | 'eu-north-1', 29 | 'sa-east-1', 30 | ] 31 | 32 | def s3_list_all(bucket, prefix): 33 | s3 = boto3.client('s3') 34 | request = { 35 | 'Bucket': bucket, 36 | 'Prefix': prefix, 37 | } 38 | has_more = True 39 | while has_more: 40 | response = s3.list_objects(**request) 41 | has_more = response['IsTruncated'] 42 | if has_more: 43 | request['Marker'] = response['Contents'][-1]['Key'] 44 | for item in response['Contents']: 45 | yield item 46 | 47 | 48 | def fmt_build_name(name): 49 | allowed_chars = set([ 50 | *map(chr, range(ord('a'),ord('z')+1)), 51 | *map(chr, range(ord('A'),ord('Z')+1)), 52 | *map(chr, range(ord('0'),ord('9')+1)), 53 | ]) 54 | return ''.join([ch if ch in allowed_chars else '_' for ch in name]) 55 | -------------------------------------------------------------------------------- /src/build_scripts/setup-server.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | # Attach and mount a larger storage device 4 | mkdir build 5 | sudo mkfs.ext4 /dev/nvme1n1 6 | sudo mount /dev/nvme1n1 build 7 | sudo chown -R ec2-user:ec2-user build 8 | 9 | # Unzip the transfer file sent to the server 10 | mv build_targets.zip build 11 | mv build-layer.sh build 12 | mv aws_regions build 13 | pushd build 14 | unzip build_targets.zip 15 | rm build_targets.zip 16 | popd 17 | 18 | # Install some libraries needed to build openssl and python 19 | sudo yum groupinstall -y \ 20 | development 21 | 22 | sudo yum install -y \ 23 | zlib-devel \ 24 | openssl-devel 25 | 26 | # Install openssl from source 27 | wget https://github.com/openssl/openssl/archive/OpenSSL_1_0_2l.tar.gz 28 | tar -zxvf OpenSSL_1_0_2l.tar.gz 29 | pushd openssl-OpenSSL_1_0_2l/ 30 | ./config shared 31 | make 32 | sudo make install 33 | export LD_LIBRARY_PATH=/usr/local/ssl/lib/ 34 | popd 35 | rm -rf OpenSSL_1_0_2l.tar.gz openssl-OpenSSL_1_0_2l/ 36 | 37 | # Install python from source 38 | wget https://www.python.org/ftp/python/3.6.6/Python-3.6.6.tar.xz 39 | tar xJf Python-3.6.6.tar.xz 40 | pushd Python-3.6.6 41 | ./configure 42 | make 43 | sudo make install 44 | popd 45 | sudo rm -rf Python-3.6.6.tar.xz Python-3.6.6 46 | 47 | # Start up the installation virtualenv 48 | sudo env PATH=$PATH pip3 install --upgrade virtualenv 49 | 50 | # Add the "epel" yum repo and install inotifytools 51 | wget https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm 52 | sudo yum install -y \ 53 | epel-release-latest-7.noarch.rpm 54 | rm epel-release-latest-7.noarch.rpm 55 | sudo yum install -y \ 56 | inotify-tools 57 | 58 | sudo pip install --upgrade awscli 59 | -------------------------------------------------------------------------------- /src/publish/arn_table.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import os 4 | 5 | import boto3 6 | 7 | from util import ( 8 | project_root, 9 | release_target, 10 | aws_bucket, 11 | target_regions, 12 | s3_list_all, 13 | fmt_build_name, 14 | ) 15 | 16 | 17 | def lambda_latest_version(name, region): 18 | lmbda = boto3.client('lambda', region_name=region) 19 | resp = lmbda.list_layer_versions(LayerName=name) 20 | return sorted(resp['LayerVersions'], key=lambda x: x['Version'])[-1] 21 | 22 | 23 | def write_table(table_src, data): 24 | header = data[0].split(',') 25 | out_f = open(os.path.join(project_root, 'arn_tables', table_src + '.md'), 'w') 26 | out_f.write('# ' + table_src + '\n\n') 27 | for region in target_regions: 28 | out_f.write('### ' + region + '\n') 29 | out_f.write(' | '.join(header)) 30 | out_f.write('\n') 31 | out_f.write(' | '.join(['---' for _ in header])) 32 | out_f.write('\n') 33 | for row in data[1:]: 34 | try: 35 | fields = row.split(',') 36 | build_target = fields[0] 37 | lambda_version = lambda_latest_version(fmt_build_name(build_target), region) 38 | context = { 39 | 'arn': lambda_version['LayerVersionArn'], 40 | } 41 | fmt_fields = [field.format(**context) for field in fields] 42 | out_f.write(' | '.join(fmt_fields)) 43 | out_f.write('\n') 44 | except Exception: 45 | print(row) 46 | pass 47 | out_f.write('\n\n') 48 | out_f.close() 49 | 50 | 51 | def main(): 52 | table_dir = os.path.join(project_root, 'src', 'publish', 'tables') 53 | table_srcs = os.listdir(table_dir) 54 | 55 | for table_src in table_srcs: 56 | table_fname = os.path.join(table_dir, table_src) 57 | with open(table_fname, 'r') as in_f: 58 | data = in_f.read().split('\n') 59 | write_table(table_src, data) 60 | 61 | 62 | if __name__ == '__main__': 63 | main() 64 | -------------------------------------------------------------------------------- /src/publish/publish.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/env python 2 | 3 | import os 4 | 5 | import boto3 6 | 7 | from util import ( 8 | project_root, 9 | release_target, 10 | aws_bucket, 11 | target_regions, 12 | s3_list_all, 13 | fmt_build_name, 14 | ) 15 | 16 | 17 | def get_description(name): 18 | with open(os.path.join(project_root, 'build_targets', name, 'description.txt'), 'r') as in_f: 19 | res = in_f.read() 20 | return res.strip() 21 | 22 | 23 | def get_release_layers(): 24 | release_items = s3_list_all(aws_bucket + target_regions[0], 'layers/{}/'.format(release_target)) 25 | for item in release_items: 26 | if item['Key'].endswith('layer.zip'): 27 | build_target = item['Key'].split('/')[2] 28 | yield { 29 | 'key': item['Key'], 30 | 'lambda_name': fmt_build_name(build_target), 31 | 'description': get_description(build_target), 32 | 'build_target': build_target, 33 | } 34 | 35 | 36 | def publish_layer(layer_spec): 37 | global arn_tables 38 | for region in target_regions: 39 | lmbda = boto3.client('lambda', region_name=region) 40 | resp = lmbda.publish_layer_version( 41 | LayerName=layer_spec['lambda_name'], 42 | Description=layer_spec['description'], 43 | Content={ 44 | 'S3Bucket': aws_bucket + region, 45 | 'S3Key': layer_spec['key'], 46 | }, 47 | CompatibleRuntimes=[ 48 | 'python3.6', 49 | ], 50 | LicenseInfo='MIT', 51 | ) 52 | layer_arn = resp['LayerVersionArn'] 53 | layer_version = resp['Version'] 54 | lmbda.add_layer_version_permission( 55 | LayerName=layer_spec['lambda_name'], 56 | VersionNumber=layer_version, 57 | StatementId='publish', 58 | Action='lambda:GetLayerVersion', 59 | Principal='*', 60 | ) 61 | print('Published {} in region {}'.format(layer_spec['lambda_name'], region)) 62 | 63 | 64 | def main(): 65 | for layer_spec in get_release_layers(): 66 | try: 67 | publish_layer(layer_spec) 68 | except Exception as err: 69 | print('Could not publish {}'.format(layer_spec['lambda_name'])) 70 | print(err) 71 | 72 | 73 | if __name__ == '__main__': 74 | main() 75 | -------------------------------------------------------------------------------- /src/build_scripts/build-layer.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | 3 | LAMBDA_S3_BUCKET_PREFIX="antonpaquin-lambda-zip-" 4 | RELEASE=2 5 | LAYER_NAME="$1" 6 | LAYER_ROOT="/home/ec2-user/build/$LAYER_NAME" 7 | LAYER_UPLOAD_NAME="$(echo "$LAYER_NAME" | sed s/'[^a-zA-Z0-9]/_/g')" 8 | 9 | pushd $LAYER_NAME 10 | 11 | virtualenv -p python3 env 12 | source env/bin/activate 13 | 14 | # Pip install necessary python libraries 15 | pip3 install -r requirements.txt 16 | 17 | # We want to find out what files python actually uses in the process of running our script 18 | # So we'll set up a listener for all accessed files in the virtualenv 19 | inotifywait \ 20 | -m \ 21 | -e access \ 22 | -o inotifywait.list \ 23 | --format "%w%f" \ 24 | -r \ 25 | $VIRTUAL_ENV/lib/python3.6/site-packages/ & 26 | 27 | # Make sure to save the PID so it can be killed later 28 | INOTIFY="$!" 29 | 30 | # Sleep to give inotify time to set up the watches 31 | sleep 1; 32 | 33 | # Run a test, which should touch every file that the layer will need to run 34 | mkdir build 35 | cp test.py build 36 | 37 | if [ -f pre_hook.sh ]; then 38 | ./pre_hook.sh 39 | fi 40 | 41 | pushd build 42 | python3 test.py 43 | kill $INOTIFY 44 | popd 45 | 46 | # Copy over all of the used files to the build directory 47 | pushd build 48 | for f in $(cat $LAYER_ROOT/inotifywait.list); do 49 | if [ -f $f ]; then 50 | REL=$(dirname $f | sed 's/.*site-packages\///g') 51 | mkdir -p $REL 52 | cp $f $REL 53 | fi 54 | done 55 | 56 | # Copy all the python files, because they're small and tend to break 57 | # things if they're absent 58 | pushd $VIRTUAL_ENV/lib/python3.6/site-packages/ 59 | find . -name "*.py" | cut -c 3- > $LAYER_ROOT/pydep.list 60 | popd 61 | 62 | for f in $(cat $LAYER_ROOT/pydep.list); do 63 | cp "$VIRTUAL_ENV/lib/python3.6/site-packages/$f" "$LAYER_ROOT/build/$f" 2>/dev/null 64 | done 65 | popd 66 | 67 | # And start the final zipping process 68 | pushd build 69 | 70 | # Strip unnecessary symbols from binaries 71 | find . -name "*.so" | xargs strip 72 | 73 | # Remove the leftover test script 74 | rm test.py 75 | popd 76 | 77 | if [ -f hook.sh ]; then 78 | ./hook.sh 79 | fi 80 | 81 | # Zip up the build for lambda 82 | mv build python 83 | zip -r9 lambda.zip python/ 84 | 85 | # Freeze the env for later reference 86 | pip freeze > pip.txt 87 | 88 | # And copy it to an s3 bucket 89 | for region in $(cat ../aws_regions); do 90 | aws s3 cp lambda.zip s3://"$LAMBDA_S3_BUCKET_PREFIX""$region"/layers/$RELEASE/$LAYER_NAME/layer.zip 91 | aws s3 cp pip.txt s3://"$LAMBDA_S3_BUCKET_PREFIX""$region"/layers/$RELEASE/$LAYER_NAME/pip.txt 92 | done 93 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Tensorflow-Lambda-Layer 2 | Lets you import Tensorflow + Keras from an AWS lambda 3 | 4 | ## What is this? 5 | It's a lambda layer that includes Tensorflow, Keras, and Numpy. You can use it to deploy serverless machine learning models. 6 | 7 | Serverless is especially nice for when you want to serve a model that will be accessed infrequently, without paying for an always-on ec2 instance. 8 | 9 | If you're a single developer or small org and all you want to do is [show off your binary classifier](http://isitanime.website), it's usually possible to stay within the free tier limits if you set things up right. 10 | 11 | And even if you're larger, serverless brings a lot of benefits, like transparent scaling and the ability to mostly ignore the hardware. 12 | 13 | The problem is, some packages (like Tensorflow) end up hard to use. This repo is an attempt to alleviate that problem. 14 | 15 | ## How do I use it? 16 | Pick an ARN from the tables for the region and Tensorflow version you want (for example, `arn:aws:lambda:us-west-2:347034527139:layer:tf_1_11_keras:1`) 17 | 18 | Tables: 19 | - [tensorflow and keras](https://github.com/antonpaquin/Tensorflow-Lambda-Layer/blob/master/arn_tables/tensorflow_keras.md) 20 | - [tensorflow, keras, and PIL](https://github.com/antonpaquin/Tensorflow-Lambda-Layer/blob/master/arn_tables/tensorflow_keras_pillow.md) 21 | 22 | In the AWS lambda management console, create a new function you want to use Tensorflow in, or pick an existing function. 23 | 24 | Click 25 | - layers 26 | - add layer 27 | - provide a layer version ARN 28 | 29 | Paste the ARN in, add the layer, and you should be able to use the libraries as normal. 30 | 31 | ## Build it yourself 32 | The code involved in generating these layers is all included in `src` and `build_targets`. 33 | The code is a collection of shell scripts that constructs, uploads, and publishes the lambda zipfiles. It reads AWS credentials from $HOME/.aws, and spawns an instance to actually run the build process (note: does not shut it off automatically). 34 | 35 | If you have a set of dependencies you'd like built into a layer, you should add a new directory, following this structure: 36 | 37 | - `requirements.txt`: The pip packages to install 38 | - `description.txt`: The description that will be attached to the published layer 39 | - `hook.sh`: Extra commands that run as the last phase of the build step 40 | - `test.py`: A python file that should trigger an access of every file used by the library in the course of its execution 41 | 42 | If you send this in a pull request and it seems like something people will use, I'll run the build the next chance I get and add it here. 43 | 44 | Or you can run it yourself with `aws-build-lambda.sh`. Make sure you know what this script is doing before you run it! 45 | 46 | ## I think you should build a layer with 47 | Let me know! It's fairly low cost to add a new layer. It doesn't even need to involve tensorflow. 48 | 49 | ## Caveats 50 | This repo will minimize a deployment package by: 51 | 52 | - Only copying source files and files accessed when `test.py` is run 53 | - Stripping symbols from shared objects 54 | 55 | These steps usually produce good results, but they may end up leaving out something essential. If you see an error that you're not expecting, file an issue including the error and the code that generates it, and I'll see what I can fix. 56 | 57 | Tensorflow 1.12 + Keras clocks in at 282M, which is too big to fit into a lambda layer. Unless I can find a way to further reduce the size, I can't support this combination. 58 | -------------------------------------------------------------------------------- /src/build_scripts/aws-build-lambda.sh: -------------------------------------------------------------------------------- 1 | ROOT_DIR="/home/anton/Programming/TensorflowLambdaLayer" 2 | SSH_KEY="/home/anton/.ssh/Nimitz-120518.pem" 3 | SSH_KEY_NAME="Nimitz-12.05.18" 4 | 5 | # Test for the shell dependencies of this script: "aws" and "jq" 6 | if command -v aws > /dev/null 2>&1; then 7 | echo "aws found" 8 | else 9 | echo "This script requires the \"aws\" command line utility" 10 | exit 1 11 | fi 12 | if command -v jq > /dev/null 2>&1; then 13 | echo "jq found" 14 | else 15 | echo "This script requires the \"jq\" command line utility" 16 | exit 1 17 | fi 18 | 19 | # We need a security group to allow global SSH. If it doesn't exist already, 20 | # create it 21 | if aws ec2 describe-security-groups --group-names "GlobalSSH" > /dev/null; then 22 | echo "Security group already exists" 23 | else 24 | aws ec2 create-security-group \ 25 | --group-name "GlobalSSH" \ 26 | --description "Allow 22 traffic in" 27 | 28 | aws ec2 authorize-security-group-ingress \ 29 | --group-name "GlobalSSH" \ 30 | --protocol "tcp" \ 31 | --port 22 \ 32 | --cidr "0.0.0.0/0" 33 | fi 34 | 35 | # Create the dev instance to build the python deploy zip 36 | # A t3.medium seems to work out fine 37 | INSTANCE_ID=$(\ 38 | aws ec2 run-instances \ 39 | --image-id "ami-4fffc834" \ 40 | --key-name "$SSH_KEY_NAME" \ 41 | --security-groups "GlobalSSH" \ 42 | --instance-type "t3.medium" \ 43 | --placement "AvailabilityZone=us-east-1b" \ 44 | --count 1 \ 45 | --block-device-mappings 'DeviceName=/dev/sda1,Ebs={VolumeSize=100,VolumeType=gp2}' \ 46 | | jq -r .Instances[0].InstanceId \ 47 | ) 48 | echo "Spawned instance: $INSTANCE_ID" 49 | 50 | # Build a zipfile of all the files we need to send to the instance 51 | # First step: if one already exists, remove it 52 | if [ -f "$ROOT_DIR/build/transfer.zip" ]; then 53 | rm "$ROOT_DIR/build/transfer.zip" 54 | fi 55 | 56 | # Zip the source directory 57 | pushd "$ROOT_DIR/build_targets" 58 | zip -r build_targets.zip * 59 | mv build_targets.zip "$ROOT_DIR/build/build_targets.zip" 60 | popd 61 | 62 | ls -1 "$ROOT_DIR/build_targets" > "$ROOT_DIR/build/build_targets.list" 63 | 64 | # Add all files that we need to build to the zipfile 65 | zip \ 66 | -r "$ROOT_DIR/build/transfer.zip" \ 67 | --junk-paths \ 68 | "$ROOT_DIR/build/build_targets.zip" \ 69 | "$ROOT_DIR/src/build_scripts/setup-server.sh" \ 70 | "$ROOT_DIR/src/build_scripts/build-layer.sh" \ 71 | "$ROOT_DIR/src/build_scripts/run-all.sh" \ 72 | "$ROOT_DIR/src/build_scripts/aws_regions" \ 73 | "$ROOT_DIR/build/build_targets.list" 74 | 75 | # Also add aws credentials so that we can cli it to an s3 bucket 76 | pushd "$HOME" 77 | zip \ 78 | -r "$ROOT_DIR/build/transfer.zip" \ 79 | -g \ 80 | ".aws/" 81 | popd 82 | 83 | # Wait for the instance to start -- it takes a while to boot up 84 | echo "Waiting for instance to start..." 85 | aws ec2 wait instance-running \ 86 | --instance-ids "$INSTANCE_ID" 87 | echo "Started" 88 | 89 | # Get the public IP of the instance so we can SSH to it 90 | echo "Fetching public ip..." 91 | INSTANCE_IP=$(\ 92 | aws ec2 describe-instances \ 93 | --instance-id "$INSTANCE_ID" \ 94 | --query "Reservations[].Instances[].PublicIpAddress" \ 95 | --output=text 96 | ) 97 | echo "Found ip: $INSTANCE_IP" 98 | 99 | # Write a helpful SSH host file pointing to the instance 100 | cat > ssh_aws <