├── test ├── fixtures │ ├── creds.test.json │ └── message.test.json ├── watch.test.sh ├── template.test.js ├── manual.ecs-conex.sh ├── lib │ └── utils.sh ├── cleanup.test.js └── utils.test.sh ├── scripts ├── send-job.sh ├── bootstrap.sh ├── watch.sh └── cleanup.js ├── .eslintrc ├── .circleci └── config.yml ├── docs ├── logging.md ├── npm.md ├── removing-images.md ├── alarms.md ├── debugging-failures.md └── getting-started.md ├── Dockerfile ├── package.json ├── cloudformation ├── ci.template.js └── ecs-conex.template.js ├── ecs-conex.sh ├── changelog.md ├── readme.md └── utils.sh /test/fixtures/creds.test.json: -------------------------------------------------------------------------------- 1 | { 2 | "AccessKeyId": "test_AccessKeyId", 3 | "SecretAccessKey": "test_SecretAccessKey", 4 | "SessionToken": "test_SessionToken" 5 | } 6 | -------------------------------------------------------------------------------- /test/fixtures/message.test.json: -------------------------------------------------------------------------------- 1 | { 2 | "ref": "test", 3 | "after": "test", 4 | "before": "test", 5 | "repository": { 6 | "name": "test", 7 | "owner": { 8 | "name": "test" 9 | } 10 | }, 11 | "pusher": { 12 | "name": "test" 13 | }, 14 | "deleted": "test" 15 | } 16 | -------------------------------------------------------------------------------- /test/watch.test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | source $(dirname $0)/lib/utils.sh 5 | 6 | testId=0 7 | PASSED=0 8 | FAILED=0 9 | 10 | subShell () { 11 | exitCode=$(echo $?) 12 | assert 'equal' $exitCode 1 13 | } 14 | 15 | tag_test 'GithubAccessToken with inadequate permissions should throw helpful error message' 16 | export GithubAccessToken=ffffffffffffffffffffffffffffffffffffffff 17 | subShell $(./$(dirname $0)/../scripts/watch.sh) -------------------------------------------------------------------------------- /scripts/send-job.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | 5 | Owner=$1 6 | Repo=$2 7 | AfterSha=$3 8 | BeforeSha=${4:-0000000000000000000000000000000000000000} 9 | Topic=$5 10 | 11 | aws sns publish \ 12 | --topic-arn ${Topic} \ 13 | --subject "webhook" \ 14 | --message "{\"ref\":\"refs/heads/test-branch\",\"after\":\"${AfterSha}\",\"before\":\"${BeforeSha}\",\"repository\":{\"name\":\"${Repo}\",\"owner\":{\"name\":\"${Owner}\"}},\"pusher\":{\"name\":\"test-user\"}}" 15 | -------------------------------------------------------------------------------- /test/template.test.js: -------------------------------------------------------------------------------- 1 | var test = require('tape'); 2 | var cf = require('@mapbox/cloudfriend'); 3 | var path = require('path'); 4 | 5 | test('template is valid', function(assert) { 6 | cf.validate(path.resolve(__dirname, '..', 'cloudformation', 'ecs-conex.template.js')) 7 | .then(function() { 8 | assert.pass('success'); 9 | assert.end(); 10 | }) 11 | .catch(function(err) { 12 | assert.ifError(err, 'failure'); 13 | assert.end(); 14 | }); 15 | }); 16 | -------------------------------------------------------------------------------- /.eslintrc: -------------------------------------------------------------------------------- 1 | { 2 | "rules": { 3 | "indent": [2, 2], 4 | "quotes": [2, "single"], 5 | "quote-props": [2, "as-needed"], 6 | "no-console": [1], 7 | "semi": [2, "always"], 8 | "space-before-function-paren": [2, "never"], 9 | "object-curly-spacing": [2, "always"], 10 | "array-bracket-spacing": [2, "never"], 11 | "comma-spacing": [2, { "before": false, "after": true }], 12 | "key-spacing": [2, { "beforeColon": false, "afterColon": true }] 13 | }, 14 | "env": { 15 | "node": true, 16 | "es6": true 17 | }, 18 | "globals": { 19 | "process": true, 20 | "module": true, 21 | "require": true 22 | }, 23 | "extends": "eslint:recommended" 24 | } 25 | -------------------------------------------------------------------------------- /scripts/bootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eux 4 | 5 | # Log docker client into ECR 6 | eval "$(aws ecr get-login --region us-east-1 --no-include-email)" || \ 7 | eval "$(aws ecr get-login --region us-east-1)" 8 | 9 | # Make sure the ECR repository exists 10 | aws ecr describe-repositories --region us-east-1 --repository-names ecs-conex > /dev/null 2>&1 || \ 11 | aws ecr create-repository --region us-east-1 --repository-name ecs-conex > /dev/null 12 | 13 | # Fetch the ECR repository URI 14 | desc=$(aws ecr describe-repositories --region us-east-1 --repository-names ecs-conex --output json) 15 | uri=$(node -e "console.log(${desc}.repositories[0].repositoryUri);") 16 | 17 | # Build the docker image 18 | docker build -t ecs-conex ./ 19 | 20 | # Tag the image into the ECR repository 21 | docker tag ecs-conex "${uri}:$(git rev-parse HEAD)" 22 | 23 | # Push the image into the ECR repository 24 | docker push "${uri}:$(git rev-parse HEAD)" 25 | -------------------------------------------------------------------------------- /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | jobs: 3 | build: 4 | working_directory: ~/mapbox/ecs-conex 5 | environment: 6 | SHELL: /bin/bash 7 | docker: 8 | - image: circleci/node:8.11 9 | steps: 10 | - checkout 11 | - setup_remote_docker: 12 | docker_layer_caching: true 13 | - run: docker info >/dev/null 2>&1 || service docker start 14 | - run: npm install 15 | - run: npm test 16 | node6: 17 | working_directory: ~/mapbox/ecs-conex 18 | environment: 19 | SHELL: /bin/bash 20 | docker: 21 | - image: circleci/node:6.11 22 | steps: 23 | - checkout 24 | - setup_remote_docker: 25 | docker_layer_caching: true 26 | - run: docker info >/dev/null 2>&1 || service docker start 27 | - run: npm install 28 | - run: npm test 29 | workflows: 30 | version: 2 31 | build_and_test: 32 | jobs: 33 | - build 34 | - node6 35 | -------------------------------------------------------------------------------- /docs/logging.md: -------------------------------------------------------------------------------- 1 | # Logging 2 | 3 | When used in conjunction with [ecs-watchbot](https://github.com/mapbox/ecs-watchbot), logs from ecs-conex containers will be written to a [CloudWatch Log Group](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-loggroup.html). Watchbot creates this resource with the name `--`. Within your log group, individual message logs will contain the `MessageId` — `39340547-4ec7-413f-bcd4-cdfbdf21a61c` in the following example — in the log stream. 4 | 5 | The logs will be formatted using [fastlog](https://github.com/willwhite/fastlog), allowing you to separate them from other logs that may be written to the same file. An example log output: 6 | 7 | ``` 8 | [Tue, 05 Jul 2016 06:10:51 GMT] [ecs-conex] [39340547-4ec7-413f-bcd4-cdfbdf21a61c] processing commit abcd by chuck to refs/heads/my-branch of my-org/my-repo 9 | ``` 10 | 11 | This log breaks down as follows, where `messageId` is a common identifier for all the ecs-conex logs related to processing a single push: 12 | 13 | ``` 14 | [timestamp] [ecs-conex] [messageId] message 15 | ``` 16 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu 2 | 3 | # Installations 4 | RUN apt-get update -qq && apt-get install -y curl git python-pip parallel jq wget 5 | RUN pip install awscli 6 | RUN curl https://nodejs.org/dist/v8.9.4/node-v8.9.4-linux-x64.tar.gz | tar zxC /usr/local --strip-components=1 7 | 8 | # Setup watchbot for logging and env var decryption 9 | RUN wget https://s3.amazonaws.com/watchbot-binaries/linux/v4.11.1/watchbot -O /usr/local/bin/watchbot 10 | RUN chmod +x /usr/local/bin/watchbot 11 | RUN npm install -g decrypt-kms-env@^2.0.1 12 | 13 | # Setup application directory 14 | RUN mkdir -p /usr/local/src/ecs-conex 15 | WORKDIR /usr/local/src/ecs-conex 16 | 17 | ENV conex_docker_version "17.12.1" 18 | RUN curl -sL https://download.docker.com/linux/static/stable/x86_64/docker-${conex_docker_version}-ce.tgz > docker-${conex_docker_version}-ce.tgz 19 | RUN tar -xzf docker-${conex_docker_version}-ce.tgz && cp docker/docker /usr/local/bin/docker && chmod 755 /usr/local/bin/docker 20 | 21 | # Copy files into the container 22 | COPY ./*.sh ./ 23 | 24 | # Use docker on the host instead of running docker-in-docker 25 | # https://jpetazzo.github.io/2015/09/03/do-not-use-docker-in-docker-for-ci/ 26 | VOLUME /var/run/docker.sock 27 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "ecs-conex", 3 | "version": "0.5.0", 4 | "description": "ECS Container Express", 5 | "main": "ecs-conex.sh", 6 | "scripts": { 7 | "pretest": "eslint cloudformation test", 8 | "test": "test/utils.test.sh && test/watch.test.sh && tape test/template.test.js && tape test/cleanup.test.js", 9 | "template": "node_modules/.bin/build-template cloudformation/ecs-conex.template.js" 10 | }, 11 | "bin": { 12 | "cleanup": "scripts/cleanup.js", 13 | "ecs-conex-watch": "scripts/watch.sh" 14 | }, 15 | "repository": { 16 | "type": "git", 17 | "url": "git+https://github.com/mapbox/ecs-conex.git" 18 | }, 19 | "author": "Mapbox", 20 | "license": "ISC", 21 | "bugs": { 22 | "url": "https://github.com/mapbox/ecs-conex/issues" 23 | }, 24 | "homepage": "https://github.com/mapbox/ecs-conex#readme", 25 | "dependencies": { 26 | "@mapbox/cloudfriend": "^1.10.0", 27 | "@mapbox/hookshot": "^5.0.1", 28 | "ajv": "^6.0.0", 29 | "aws-sdk": "^2.282.1", 30 | "d3-queue": "^3.0.2", 31 | "inquirer": "^6.0.0", 32 | "minimist": "^1.2.0", 33 | "moment": "^2.14.1", 34 | "request": "^2.74.0", 35 | "underscore": "^1.8.3" 36 | }, 37 | "devDependencies": { 38 | "@mapbox/watchbot": "^4.11.1", 39 | "eslint": "^5.2.0", 40 | "pinkie-promise": "^2.0.1", 41 | "sinon": "^6.1.4", 42 | "tape": "^4.6.0" 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /docs/npm.md: -------------------------------------------------------------------------------- 1 | ## Working with GitHub repositories requiring private npm modules 2 | 3 | In order to require private npm modules, you will need to export an npm access token environment variable in the ecs-conex build, and write that access token to an `.npmrc` file in your GitHub repository's Dockerfile. 4 | 5 | 1. Your ecs-conex CloudFormation stack was provided with an npm access token in the parameter `NPMAccessToken`. This exposes your token to the GitHub repository you are watching. 6 | 2. In your GitHub repository's Dockerfile, specify that you'd like to pass the npm access token at build-time, write this token to `.npmrc` prior to dependency installation: 7 | 8 | ```sh 9 | # define build arguments 10 | ARG NPMAccessToken 11 | 12 | # create .npmrc file 13 | RUN echo "//registry.npmjs.org/:_authToken=$NPMAccessToken" > ./.npmrc 14 | ONBUILD COPY .npmrc ./ 15 | 16 | # install app dependencies. DO NOT change directories before doing so 17 | RUN npm install 18 | 19 | # Clean up 20 | RUN rm -f ./.npmrc 21 | ``` 22 | 23 | Note that your Dockerfile **must place the `.npmrc` file in the directory where you will `npm install`**. 24 | 25 | During local Docker builds, be sure to pass in the NPMAccessToken as part of the build arg: 26 | `docker build --build-arg NPMAccessToken=ABCDEFGHIJKLMNOP -t your-repo ./` 27 | 28 | Checkout the [NPM docs](https://docs.npmjs.com/private-modules/docker-and-private-modules) for more on Docker and private modules. 29 | -------------------------------------------------------------------------------- /test/manual.ecs-conex.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | 5 | # IMPORTANT: must be run from the root directory of ecs-conex 6 | # env vars must be set: 7 | # - GithubAccessToken 8 | # - AWS_ACCESS_KEY_ID 9 | # - AWS_SECRET_ACCESS_KEY 10 | # - AWS_SESSION_TOKEN (optional) 11 | # - TMPDIR 12 | 13 | Owner=$1 14 | Repo=$2 15 | AccountId=$3 16 | AfterSha=$4 17 | BeforeSha=${5:-0000000000000000000000000000000000000000} 18 | GithubAccessToken=${GithubAccessToken} 19 | AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} 20 | AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} 21 | AWS_SESSION_TOKEN=${AWS_SESSION_TOKEN:-} 22 | NPMAccessToken=${NPMAccessToken} 23 | ApproximateReceiveCount="0" 24 | 25 | docker build -t ecs-conex ./ 26 | docker run \ 27 | -v $TMPDIR:/mnt/data \ 28 | -v /var/run/docker.sock:/var/run/docker.sock \ 29 | -e MessageId=test \ 30 | -e AccountId=${AccountId} \ 31 | -e StackRegion=us-east-1 \ 32 | -e GithubAccessToken=${GithubAccessToken} \ 33 | -e Message="{\"ref\":\"refs/heads/test-branch\",\"after\":\"${AfterSha}\",\"before\":\"${BeforeSha}\",\"repository\":{\"name\":\"${Repo}\",\"owner\":{\"name\":\"${Owner}\"}},\"pusher\":{\"name\":\"test-user\"}}" \ 34 | -e AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID} \ 35 | -e AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY} \ 36 | -e AWS_SESSION_TOKEN=${AWS_SESSION_TOKEN} \ 37 | -e NPMAccessToken=${NPMAccessToken} \ 38 | -e ApproximateReceiveCount=${ApproximateReceiveCount} \ 39 | ecs-conex 40 | -------------------------------------------------------------------------------- /cloudformation/ci.template.js: -------------------------------------------------------------------------------- 1 | var cf = require('cloudfriend'); 2 | 3 | module.exports = { 4 | AWSTemplateFormatVersion: '2010-09-09', 5 | Description: 'test user for ecs-conex', 6 | Resources: { 7 | User: { 8 | Type: 'AWS::IAM::User', 9 | Properties: { 10 | Policies: [ 11 | { 12 | PolicyName: 'validate-templates', 13 | PolicyDocument: { 14 | Statement: [ 15 | { 16 | Action: ['ecr:*'], 17 | Effect: 'Allow', 18 | Resource: cf.join(['arn:aws:ecr:*:', cf.accountId, ':repository/ecs-conex-test']) 19 | }, 20 | { 21 | Action: ['ecr:GetAuthorizationToken', 'ecr:CreateRepository'], 22 | Effect: 'Allow', 23 | Resource: '*' 24 | }, 25 | { 26 | Action: ['cloudformation:ValidateTemplate'], 27 | Effect: 'Allow', 28 | Resource: '*' 29 | } 30 | ] 31 | } 32 | } 33 | ] 34 | } 35 | }, 36 | AccessKey: { 37 | Type: 'AWS::IAM::AccessKey', 38 | Properties: { 39 | UserName: cf.ref('User') 40 | } 41 | } 42 | }, 43 | Outputs: { 44 | AccessKeyId: { 45 | Value: cf.ref('AccessKey') 46 | }, 47 | SecretAccessKey: { 48 | Value: cf.getAtt('AccessKey', 'SecretAccessKey') 49 | } 50 | } 51 | }; 52 | -------------------------------------------------------------------------------- /docs/removing-images.md: -------------------------------------------------------------------------------- 1 | # Removing old ECR registry images 2 | 3 | ```sh 4 | node scripts/cleanup.js [options] 5 | ``` 6 | 7 | The cleanup script accepts the following options: 8 | 9 | * `--maximum` The number of images to keep in the ECR registry. For example, if you want to keep 700 images in the ECR registry, you would wave the `--maximum=700` flag. The default value is 750. 10 | * `--blacklist` A comma-separated list of imageTags not subject to deletion. For example, if you want to ensure that imageTag `` and `` are not deleted, you would wave the `--blacklist=,` flag. 11 | * `--region` The AWS region to clean up. Defaults to `us-east-1`. If a registry exists in multiple regions, you'll need to run cleanup for each. 12 | 13 | You will need to have two environment parameters set in your terminal: 14 | 15 | * `GithubAccessToken`, and 16 | * `RegistryId`, which you can retrieve this value from your Repository URL, which should have the format `.dkr.ecr..amazonaws.com`. Substitute, `region` with the AWS region that contains your ECR. 17 | 18 | If the ECR registry size is not greater than the desired maximum, the cleanup script will not run. There are certain types of imageTags that will never be subject to deletion: 19 | 20 | * ImageTags that do not resemble a Gitsha, or a 40 hex character string, 21 | * ImageTags that are specified in the `--blacklist` flag parameter, 22 | * ImageTags that cannot be retrieved from GitHub, and 23 | * ImageTags that don't have associated commit dates on GitHub. 24 | -------------------------------------------------------------------------------- /ecs-conex.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | set -o pipefail 5 | 6 | regions=(us-east-1 us-west-2 eu-west-1) 7 | bucket_regions=($ImageBucketRegions) 8 | tmpdir="$(mktemp -d /tmp/XXXXXX)" 9 | source utils.sh 10 | 11 | function main() { 12 | echo "checking docker version compatibility" 13 | version-check 14 | 15 | echo "checking docker configuration" 16 | docker version > /dev/null 17 | 18 | echo "checking environment configuration" 19 | MessageId=${MessageId} 20 | Message=${Message} 21 | AccountId=${AccountId} 22 | GithubAccessToken=${GithubAccessToken} 23 | StackRegion=${StackRegion} 24 | ApproximateReceiveCount=${ApproximateReceiveCount} 25 | 26 | echo "checking job receive count" 27 | check_receives 28 | 29 | echo "parsing received message" 30 | parse_message 31 | 32 | echo "processing commit ${after} by ${user} to ${ref} of ${owner}/${repo}" 33 | 34 | status="pending" 35 | echo "sending ${status} status to github" 36 | github_status "${status}" "ecs-conex is building an image" 37 | [ "${deleted}" == "true" ] && exit 0 38 | 39 | git clone -q https://${GithubAccessToken}@github.com/${owner}/${repo} ${tmpdir} 40 | cd ${tmpdir} && git checkout -q $after || exit 3 41 | 42 | echo "looking for dockerfile" 43 | check_dockerfile ./Dockerfile 44 | 45 | echo "gather local credentials and setup --build-arg" 46 | credentials ./Dockerfile 47 | 48 | echo "logging into ECR repositories in ${regions[*]}" 49 | ecr_logins "${regions[@]}" 50 | 51 | echo "building new image" 52 | docker build --no-cache ${args} --tag ${repo}:${after} ${tmpdir} 53 | 54 | echo "writing images to ECR" 55 | docker_push 56 | 57 | echo "conditionally saving image tarball" 58 | docker_save 59 | 60 | echo "conditionally writing image tarballs to S3" 61 | bucket_push 62 | 63 | echo "completed successfully" 64 | } 65 | 66 | trap "cleanup" EXIT 67 | main 2>&1 68 | -------------------------------------------------------------------------------- /changelog.md: -------------------------------------------------------------------------------- 1 | ### v0.5.0 2 | - Pushes tarballs of images to S3 optionally 3 | - Detects the appropriate Docker versions to include, and alarms if missing 4 | - Documentation updates 5 | - Runs on @mapbox/ecs-watchbot version 4+ 6 | 7 | ### v0.4.0 8 | 9 | - ecs-conex's container can now run on a host using Docker 1.12.6 or 17.03.1-ce 10 | - Fail helpfully when the GithubAccessToken used with conex watch does not have adequate permissions. 11 | 12 | ### v0.3.1 13 | 14 | - ecs-watchbot@1.0.4 allows LogAggregationFunction to be truly an optional stack parameter 15 | - fixes package.json version identifier 16 | 17 | ### v0.3.0 18 | 19 | - Ensures that both git tag and git sha tagged docker images get pushed to registry 20 | - GithubAccessToken stack parameter gets passed via `--build-arg` for accessing private repositories 21 | - Fixes an issue where secure environment variables were not being decrypted 22 | 23 | ### v0.2.0 24 | 25 | - Builds are performed with `--no-cache`, and images that were produced are cleaned up after being uploaded to ECR 26 | - CloudFormation template overhaul w/ Watchbot v0.0.7 27 | - Adds NPMAccessToken stack parameter and passes to builds via `--build-arg` if requested 28 | - Passes AWS credentials from the host EC2 to the build via `--build-arg` if requested 29 | - Logs are aggregated in CloudWatch Logs, optionally sent to a Lambda function via `SubscriptionFilter` if a function's ARN is provided 30 | - Failure notifications now contain information about the resource that ran the task, as well as excerpts from the logs prior to the failure. 31 | - Removes a job from SQS if it has been tried 3 times 32 | - Will no longer overwrite an existing image in ECR 33 | - Adds unit tests 34 | 35 | ### v0.1.0 36 | 37 | - All logs sent to syslog 38 | - Runs docker v1.11.1 39 | - Provides Github status notifications 40 | - Quiet build logs 41 | - Handles push events from deleted branches 42 | 43 | ### v0.0.1 44 | 45 | - First sketch of ECS container express 46 | -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # [deprecated] ecs-conex 2 | 3 | :warning: This repository is deprecated and will no longer be maintained :warning:. 4 | 5 | If you’re looking for alternatives to building docker images for AWS ECS, we recommend checking out [AWS CodeBuild](https://aws.amazon.com/codebuild/). 6 | 7 | ## What is ecs-conex? 8 | 9 | ECS Container Express is a continuous integration service for building [Docker](https://www.docker.com/) images and uploading them to [ECR](https://aws.amazon.com/ecr/) repositories in response to push events to Github repositories. 10 | 11 | ### Dockerfile 12 | 13 | The [Dockerfile](https://docs.docker.com/engine/reference/builder/) contains the commands required to build an image, or snapshot of your repository, when you push to GitHub. This file is located in the root directory of your application code. If you are using private npm modules, your Dockerfile might require some additional commands as listed over [here](https://github.com/mapbox/ecs-conex/blob/master/docs/npm.md) 14 | 15 | ### ECR Repository 16 | 17 | ecs-conex will create one ECR repository for each Github repository, and each time a push is made to the Github repository, a Docker image will be created for the most recent commit in the push. The image will be tagged with the SHA of that most recent commit. Also, if the most recent commit represents a git tag, the tag's name will also become an image in the ECR repository. 18 | 19 | ## Usage 20 | 21 | You only need to run ecs-conex's `watch.sh` script once to subscribe your repository to the ecs-conex webhook. For more information about associating these resources, see the [Getting started](./docs/getting-started.md) documentation. 22 | 23 | ## Documentation 24 | 25 | - [Getting started](./docs/getting-started.md) 26 | - [Working with NPM private modules](./docs/npm.md) 27 | - [Logging](./docs/logging.md) 28 | - [Debugging failures](./docs/debugging-failures.md) 29 | - [Removing old ECR registry images](./docs/removing-images.md) 30 | -------------------------------------------------------------------------------- /docs/alarms.md: -------------------------------------------------------------------------------- 1 | ### MaxPendingTime 2 | 3 | #### What 4 | 5 | Time between tasks getting created and actually starting (i.e. staying in PENDING state) has gone above 120 seconds. 6 | 7 | #### Problem 8 | 9 | Long pending times could contribute to backup of minutes to hours for building docker images. When the cluster is under heavy load it's possible for the Agent Control Service (ACS) to throttle state change requests from the ecs-agent. This usually causes a buildup of tasks hanging in the `PENDING` state for a long time. One way to check if the cluster is in this state is to check the number of tasks in the `PENDING` state on the cluster. If there are more `PENDING` than 10 tasks per host instance on the cluster, it's very likely the ACS service is throttling your state change requests. 10 | 11 | #### Solution 12 | 13 | If the ACS service is getting throttled (see above), check through the running tasks and see if there is one service or particular family of tasks that is starting and stopping very rapidly on the cluster. If it's possible, scale down that process to return conex pending time to normal. 14 | 15 | ### Docker major version mismatch 16 | 17 | #### What 18 | 19 | The docker major version on the ecs-conex worker is higher/lower than the docker major version on the host EC2 20 | 21 | #### Problem 22 | 23 | ecs-conex workers do not run the same version of Docker as their host EC2s. While this is not problematic, we would recommend ensuring that the two versions of Docker at least have matching major versions. 24 | 25 | #### Solution 26 | 27 | Update the `conex_docker_version` variable in the Dockerfile, to the _most recent version_ of Docker that has the same major version as the host Docker version. For example, if the `host_docker_version` is `18.03.1`, the `conex_docker_version` can be `18.*` (and not necessarily `18.03.1`), but **cannot** be `17.*`. A full list of permissible Docker versions is available on https://download.docker.com/linux/static/stable/aarch64/ 28 | -------------------------------------------------------------------------------- /docs/debugging-failures.md: -------------------------------------------------------------------------------- 1 | # Debugging failures 2 | 3 | When a build fails, a notification is sent to an SNS topic and forwarded to the `WatchbotNotificationEmail` that was provided when the ecs-conex stack was created. A notification will look similar to this: 4 | 5 | ``` 6 | At Tue, 26 Jul 2016 23:29:50 GMT, processing message a7492004-8ca8-4322-9299-2e82bb649163 failed on ecs-conex-production 7 | 8 | Task outcome: delete & notify 9 | Task stopped reason: Essential container in task exited 10 | 11 | Message information: 12 | MessageId: a7492004-8ca8-4322-9299-2e82bb649163 13 | Subject: webhook 14 | Message: {"ref":"refs/heads/test-branch","after":"81e48385715d60cae6f6d9ae818d8148590a9902","before":"c2abf76a55709b2f5eb27eeb1c0d33d4408ea963","repository":{"name":"ecs-conex","owner":{"name":"mapbox"}},"pusher":{"name":"rclark"}} 15 | SentTimestamp: 1469575768248 16 | ApproximateFirstReceiveTimestamp: 1469575768250 17 | ApproximateReceiveCount: 1 18 | 19 | Runtime resources: 20 | Cluster ARN: arn:aws:ecs:us-east-1:123456789012:cluster/ecs-cluster-production 21 | Instance ARN: arn:aws:ecs:us-east-1:123456789012:container-instance/2e14b317-0909-4ecc-ab88-d94fe64d2167 22 | Task ARN: arn:aws:ecs:us-east-1:123456789012:task/798b49eb-49d7-4abb-a305-82a6e723caf6 23 | ``` 24 | 25 | First off all, check the `Message` JSON to help identify the commit that caused a failure, the repository that was being built, and the person who was responsible for the commit. 26 | 27 | When used in conjunction with [ecs-watchbot](https://github.com/mapbox/ecs-watchbot), logs from ecs-conex containers will be written to a [CloudWatch Log Group](http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-logs-loggroup.html). Watchbot creates this resource with the name `--`. Within your log group, individual message logs will contain the `MessageId` — `a7492004-8ca8-4322-9299-2e82bb649163` in this example — in the log stream. 28 | 29 | If there are more questions, the `Runtime resources` indicate the ECS cluster, the EC2 instance, and the ECS task that attempted the build. You can use these for closer inspection via further ECS API requests. 30 | -------------------------------------------------------------------------------- /scripts/watch.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | 5 | GithubAccessToken=${GithubAccessToken} 6 | stack=${1:-"us-east-1:ecs-conex-production"} 7 | region=$(echo $stack | cut -d : -f 1) 8 | name=$(echo $stack | cut -d : -f 2) 9 | 10 | outputs=$(aws cloudformation describe-stacks --region ${region} --stack-name ${name} --query 'Stacks[0].Outputs' --output json) 11 | secret=$(node -e "console.log(${outputs}.find(function(o) { return o.OutputKey === 'WatchbotWebhookSecretOutput'}).OutputValue);") 12 | url=$(node -e "console.log(${outputs}.find(function(o) { return o.OutputKey === 'WatchbotWebhookEndpointOutput'}).OutputValue);") 13 | 14 | remote=$(git config --get remote.origin.url) 15 | repo=$(node -e "console.log(require('path').basename('${remote}', '.git'));") 16 | owner=$(node -e "console.log(require('path').dirname('${remote}').split(':').slice(-1)[0].split('/').slice(-1)[0]);") 17 | 18 | # Does the GithubAccessToken have adequate permissions? 19 | responseCode=$(curl -IsL https://api.github.com/repos/${owner}/${repo}/hooks?access_token=${GithubAccessToken} | head -n 1 | cut -d' ' -f 2) 20 | 21 | if [[ ${responseCode} -gt 399 ]]; 22 | then 23 | echo "Make sure that the GitHub user corresponding to the token stored in ${GithubAccessToken}, is listed as a collaborator and has permission to read from ${repo}" 24 | exit 1 25 | fi 26 | 27 | hooks=$(curl -sL https://api.github.com/repos/${owner}/${repo}/hooks?access_token=${GithubAccessToken}) 28 | 29 | existing=$(node -e "var exists = ${hooks}.find(function(hook) { return hook.config.url === '${url}'; }); console.log(exists ? exists.id : '');") 30 | 31 | if [ -z "$existing" ]; then 32 | curl -sL \ 33 | --request POST \ 34 | --header "Content-Type: application/json" \ 35 | --data "{\"name\":\"web\",\"active\":true,\"config\":{\"url\":\"${url}\",\"secret\":\"${secret}\",\"content_type\":\"json\"}}" \ 36 | https://api.github.com/repos/${owner}/${repo}/hooks?access_token=${GithubAccessToken} 37 | else 38 | curl -sL \ 39 | --request PATCH \ 40 | --header "Content-Type: application/json" \ 41 | --data "{\"name\":\"web\",\"active\":true,\"config\":{\"url\":\"${url}\",\"secret\":\"${secret}\",\"content_type\":\"json\"}}" \ 42 | https://api.github.com/repos/${owner}/${repo}/hooks/${existing}?access_token=${GithubAccessToken} 43 | fi 44 | -------------------------------------------------------------------------------- /test/lib/utils.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | set -o pipefail 5 | 6 | function copy_func() { 7 | declare -F $1 >/dev/null || (echo "Error: Can't find function '$1' to copy" && exit 255) 8 | eval "$(echo "${2}()"; declare -f ${1} | tail -n +2)" 9 | } 10 | 11 | function tag_test() { 12 | label=$1 13 | echo -e "\n# test ${label}" 14 | } 15 | 16 | function assert () { 17 | testId=$((testId+1)) 18 | evaluation=$1 19 | result=$2 20 | expected=$3 21 | message=${4:-""} 22 | 23 | # equal 24 | if [ "${evaluation}" == "equal" ]; then 25 | if [[ -z ${message} ]]; then 26 | message="should be equal" 27 | fi 28 | 29 | if [ "${result}" != "${expected}" ]; then 30 | failed "${message}" "${expected}" "${result}" 31 | else 32 | passed "${message}" 33 | fi 34 | fi 35 | 36 | # not equal 37 | if [ "${evaluation}" == "notEqual" ]; then 38 | if [ !${message} ]; then 39 | message="should not equal" 40 | fi 41 | 42 | if [ "${result}" == "${expected}" ]; then 43 | failed "${message}" "${expected}" "${result}" 44 | else 45 | passed "${message}" 46 | fi 47 | fi 48 | 49 | # contains 50 | if [ "${evaluation}" == "contains" ]; then 51 | if [ !${message} ]; then 52 | message="should contain" 53 | fi 54 | 55 | if [[ "${result}" != *"${expected}"* ]]; then 56 | failed "${message}" "string that contains: \"${expected}\"" "${result}" 57 | else 58 | passed "${message}" 59 | fi 60 | fi 61 | 62 | # does not contain 63 | if [ "${evaluation}" == "doesNotContain" ]; then 64 | if [ !${message} ]; then 65 | message="should not contain" 66 | fi 67 | 68 | if [[ "${result}" == *"${expected}"* ]]; then 69 | failed "${message}" "string that does not contain: \"${expected}\"" "${result}" 70 | else 71 | passed "${message}" 72 | fi 73 | fi 74 | } 75 | 76 | function passed() { 77 | PASSED=$((PASSED+1)) 78 | message=$1 79 | echo -e "ok ${testId} ${message}" 80 | } 81 | 82 | function failed() { 83 | FAILED=$((FAILED+1)) 84 | message=$1 85 | expected=$2 86 | actual=$3 87 | echo -e "not ok ${testId} ${message}\n ---\n expected: ${expected}\n actual: ${actual}\n ---" 88 | } 89 | 90 | function summarize() { 91 | echo -e "\n# tests $((PASSED + FAILED))" 92 | echo -e "# pass ${PASSED}" 93 | echo -e "# fail ${FAILED}\n" 94 | } 95 | -------------------------------------------------------------------------------- /docs/getting-started.md: -------------------------------------------------------------------------------- 1 | # Getting started 2 | 3 | ## Set up ecs-conex in your AWS account 4 | 5 | This only needs to be performed once per account. More instruction and scripts coming soon. 6 | 7 | ## Have ecs-conex watch a GitHub repository 8 | 9 | Once ecs-conex is running in your AWS account, you can ask it to build a Docker image each time you push changes to a GitHub repository. 10 | 11 | 1. Make sure you have awscli installed. 12 | 2. Setup the GitHub repository. You will need a `Dockerfile` at the root level of the repository to specify how the image should be built. 13 | 3. When it was created or updated, your ecs-conex CloudFormation stack was provided with a GitHub access token to allow it to read from repositories. Make sure that the GitHub user corresponding to that token has permission to read from the Github repository you are adding to ecs-conex, either by being listed as a collaborator or by being part of a team that has read permission. 14 | 4. Clone the ecs-conex repository locally. 15 | 5. In your Github account, generate an access token with `admin:repo_hook` and `repo` scopes. This token will be used to add the ecs-conex repo hook to the Github repository you're adding to ecs-conex. (After you've added your repository you can safely delete this token.) 16 | 6. If you are not already a collaborator on the Github repository you're adding to ecs-conex, add yourself as a collaborator. 17 | 7. Set your Github access token as an environment variable named `GithubAccessToken`. 18 | 8. Run the `watch.sh` script from `ecs-conex/scripts/` in the root directory of your repository to register the Github repository with ecs-conex. 19 | 20 | In the example below, we assume: 21 | - that a ecs-conex stack has already been created in `us-east-1` called `ecs-conex-production`, 22 | - a new GitHub repository called `my-github-repo` is already created, 23 | - you have generated a personal GitHub access token `abcdefghi` with `admin:repo_hook` and `repo` scopes, and 24 | - awscli is installed and properly configured. 25 | 26 | ```sh 27 | $ git clone https://github.com/mapbox/ecs-conex 28 | $ mkdir my-github-repo 29 | $ cd my-github-repo 30 | $ git init 31 | $ git remote add origin git@github.com:my-username/my-github-repo 32 | $ echo "FROM ubuntu" > Dockerfile 33 | $ git commit -am "my first commit" 34 | $ git push --set-upstream origin master 35 | $ GithubAccessToken=abcdefghi ../ecs-conex/scripts/watch.sh us-east-1:ecs-conex-production 36 | ``` 37 | 38 | You can check to see if your repository is being watched by looking at Settings > Webhooks & Services for your repository: 39 | 40 | ``` 41 | https://github.com/my-username/my-github-repo/settings/hooks 42 | ``` 43 | -------------------------------------------------------------------------------- /utils.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | set -o pipefail 5 | 6 | function after_image() { 7 | local region=$1 8 | local sha=${2:-${after}} 9 | echo ${AccountId}.dkr.ecr.${region}.amazonaws.com/${repo}:${sha} 10 | } 11 | 12 | function login() { 13 | local region=$1 14 | eval "$(aws ecr get-login --region ${region} --no-include-email)" || \ 15 | eval "$(aws ecr get-login --region ${region})" 16 | } 17 | 18 | function ensure_repo() { 19 | local region=$1 20 | aws ecr describe-repositories \ 21 | --region ${region} \ 22 | --repository-names ${repo} > /dev/null 2>&1 || create_repo ${region} 23 | } 24 | 25 | function create_repo() { 26 | local region=$1 27 | aws ecr create-repository \ 28 | --region ${region} \ 29 | --repository-name ${repo} > /dev/null 30 | } 31 | 32 | function image_exists() { 33 | local region=$1 34 | local imgtag=${2:-${after}} 35 | aws ecr batch-get-image \ 36 | --region ${region} \ 37 | --repository-name ${repo} \ 38 | --image-ids imageTag=${imgtag} \ 39 | --output text | grep -q IMAGES 40 | } 41 | 42 | function github_status() { 43 | local status=$1 44 | local description=$2 45 | curl -s \ 46 | --request POST \ 47 | --header "Content-Type: application/json" \ 48 | --data "{\"state\":\"${status}\",\"description\":\"${description}\",\"context\":\"ecs-conex\"}" \ 49 | ${status_url} > /dev/null 50 | } 51 | 52 | function check_dockerfile() { 53 | filepath=$1 54 | if [ ! -f ${filepath} ]; then 55 | echo "no Dockerfile found" 56 | exit 0 57 | fi 58 | } 59 | 60 | function check_receives() { 61 | if [ $ApproximateReceiveCount -gt 3 ]; then 62 | echo "Job received $ApproximateReceiveCount times, aborting build" 63 | return 3 64 | fi 65 | } 66 | 67 | function parse_message() { 68 | ref=$(node -e "console.log(${Message}.ref);") 69 | after=$(node -e "console.log(${Message}.after);") 70 | repo=$(node -e "console.log(${Message}.repository.name);") 71 | owner=$(node -e "console.log(${Message}.repository.owner.name);") 72 | user=$(node -e "console.log(${Message}.pusher.name);") 73 | deleted=$(node -e "console.log(${Message}.deleted);") 74 | status_url="https://api.github.com/repos/${owner}/${repo}/statuses/${after}?access_token=${GithubAccessToken}" 75 | } 76 | 77 | function credentials() { 78 | filepath=${1} 79 | args="" 80 | 81 | NPMAccessToken=$(printenv | grep NPMAccessToken | sed 's/.*=//') 82 | if [[ -n $NPMAccessToken ]] && grep "ARG NPMAccessToken" ${filepath} > /dev/null 2>&1; then 83 | args+="--build-arg NPMAccessToken=${NPMAccessToken}" 84 | fi 85 | 86 | 87 | GithubAccessToken=$(printenv | grep GithubAccessToken | sed 's/.*=//') 88 | if grep "ARG GithubAccessToken" ${filepath} > /dev/null 2>&1; then 89 | args+=" --build-arg GithubAccessToken=${GithubAccessToken}" 90 | fi 91 | 92 | 93 | role=$(curl -s http://169.254.169.254/latest/meta-data/iam/security-credentials/) 94 | if [[ -z $role ]]; then 95 | return 96 | fi 97 | 98 | creds=$(curl -s http://169.254.169.254/latest/meta-data/iam/security-credentials/${role}) 99 | accessKeyId=$(node -e "console.log(${creds}.AccessKeyId)") 100 | secretAccessKey=$(node -e "console.log(${creds}.SecretAccessKey)") 101 | sessionToken=$(node -e "console.log(${creds}.SessionToken)") 102 | 103 | if [[ -n $accessKeyId ]] && [[ $accessKeyId != "undefined" ]] && grep "ARG AWS_ACCESS_KEY_ID" ${filepath} > /dev/null 2>&1; then 104 | args+=" --build-arg AWS_ACCESS_KEY_ID=${accessKeyId}" 105 | fi 106 | 107 | if [[ -n $secretAccessKey ]] && [[ $secretAccessKey != "undefined" ]] && grep "ARG AWS_SECRET_ACCESS_KEY" ${filepath} > /dev/null 2>&1; then 108 | args+=" --build-arg AWS_SECRET_ACCESS_KEY=${secretAccessKey}" 109 | fi 110 | 111 | if [[ -n $sessionToken ]] && [[ $sessionToken != "undefined" ]] && grep "ARG AWS_SESSION_TOKEN" ${filepath} > /dev/null 2>&1; then 112 | args+=" --build-arg AWS_SESSION_TOKEN=${sessionToken}" 113 | fi 114 | } 115 | 116 | function exact_match() { 117 | if git describe --tags --exact-match 2> /dev/null; then 118 | local tag="$(git describe --tags --exact-match)" 119 | if image_exists ${region} ${tag}; then 120 | echo "found existing image for ${tag} in ${region}, skipping push" >&2 121 | else 122 | echo "pushing ${tag} to ${region}" >&2 123 | docker tag ${repo}:${after} "$(after_image ${region} ${tag})" 124 | echo "$(after_image ${region} ${tag})" 125 | fi 126 | fi 127 | } 128 | 129 | function ecr_logins() { 130 | local regions=$1 131 | for region in "$@"; do 132 | login ${region} 133 | done 134 | } 135 | 136 | function docker_push() { 137 | local queue="" 138 | 139 | for region in "${regions[@]}"; do 140 | ensure_repo ${region} 141 | 142 | # tag + add current image to queue by exact tag match (omitted if no exact match) 143 | queue="${queue} $(exact_match)" 144 | 145 | if image_exists ${region}; then 146 | echo "found existing image for ${after} in ${region}, skipping push" 147 | continue 148 | fi 149 | 150 | echo "pushing ${after} to ${region}" 151 | 152 | # tag + add current image to queue by gitsha 153 | docker tag ${repo}:${after} "$(after_image ${region})" 154 | queue="${queue} $(after_image ${region})" 155 | done 156 | 157 | parallel docker push {} ::: $queue 158 | } 159 | 160 | function bucket_push() { 161 | [ "$ImageBucketPrefix" == "" ] && echo "nothing to do" && return 162 | 163 | for region in "${bucket_regions[@]}"; do 164 | echo "copying to ${region}" 165 | aws s3 cp ${tmpdir}/${repo}-${after}.tar.gz s3://${ImageBucketPrefix}-${region}/images/${repo}/${after}.tar.gz --only-show-errors 166 | done 167 | } 168 | 169 | function docker_save() { 170 | [ "$ImageBucketPrefix" == "" ] && echo "nothing to do" && return 171 | 172 | image_file=${tmpdir}/${repo}-${after}.tar.gz 173 | echo "saving image to ${image_file}" 174 | docker save ${repo}:${after} | gzip > ${image_file} 175 | } 176 | 177 | function version-check() { 178 | conex_docker_version=${conex_docker_version} 179 | host_docker_version=$(curl -s --unix-socket /var/run/docker.sock http://localhost/info | jq -r .ServerVersion) 180 | 181 | major_conex_docker_version=$(echo "${conex_docker_version}" | cut -d "." -f 1) 182 | major_host_docker_version=$(echo "${host_docker_version}" | cut -d "." -f 1) 183 | 184 | if [ $major_host_docker_version -ne $major_conex_docker_version ]; then 185 | echo "Docker versions don't match on the client and the host." 186 | aws sns publish \ 187 | --topic-arn ${NotificationTopic} \ 188 | --subject "Version mismatch between docker on ecs-conex and the host" \ 189 | --message "The docker versions don't match on ecs-conex and the host EC2. Host Docker version: ${host_docker_version} and Local Docker version: ${conex_docker_version}. For more information, look at: https://github.com/mapbox/ecs-conex/blob/master/docs/alarms.md#docker-major-version-mismatch" 190 | fi 191 | } 192 | 193 | 194 | function cleanup() { 195 | exit_code=$? 196 | 197 | parse_message 198 | 199 | if [ "${exit_code}" == "0" ]; then 200 | github_status "success" "ecs-conex successfully completed" 201 | else 202 | github_status "failure" "ecs-conex failed to build an image" 203 | fi 204 | 205 | rm -rf ${tmpdir} 206 | 207 | local imageId=$(docker images -q ${repo}:${after}) 208 | if [ -n "${imageId}" ]; then 209 | docker rmi -f ${imageId} 210 | fi 211 | } 212 | -------------------------------------------------------------------------------- /scripts/cleanup.js: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env node 2 | 3 | var AWS = require('aws-sdk'); 4 | var inquirer = require('inquirer'); 5 | var minimist = require('minimist'); 6 | var moment = require('moment'); 7 | var queue = require('d3-queue').queue; 8 | var request = require('request'); 9 | var _ = require('underscore'); 10 | 11 | module.exports = { 12 | validateInputs: validateInputs, 13 | confirmInputs: confirmInputs, 14 | listImages: listImages, 15 | validateECRSize: validateECRSize, 16 | isGitSha: isGitSha, 17 | isBlacklisted: isBlacklisted, 18 | getTimeStamps: getTimeStamps, 19 | assignTimeStamps: assignTimeStamps, 20 | dateCheck: dateCheck, 21 | toDelete: toDelete, 22 | deleteImages: deleteImages, 23 | mergeByProperty: mergeByProperty, 24 | wontDelete: wontDelete, 25 | willDelete: willDelete 26 | }; 27 | 28 | if (!module.parent) { 29 | var arguments = process.argv.slice(2); 30 | validateInputs(arguments, function(err, params) { 31 | if (err) throw new Error(err); 32 | confirmInputs(params, function(confirmation) { 33 | if (confirmation === false) process.exit(1); 34 | var ecr = new AWS.ECR({ region: params.region }); 35 | listImages(ecr, params, function(err, res) { 36 | if (err) throw new Error(err); 37 | var result = res.imageIds; 38 | validateECRSize(result, params); 39 | isGitSha(result); 40 | isBlacklisted(result, params); 41 | getTimeStamps(result, params, function(err, res) { 42 | if (err) throw new Error(err); 43 | assignTimeStamps(result, res); 44 | dateCheck(result); 45 | var imagesToDelete = toDelete(result, params); 46 | deleteImages(ecr, params, imagesToDelete, function(err) { 47 | if (err) throw new Error(err); 48 | else console.log('[info] Successfully removed images from ECR'); 49 | }); 50 | }); 51 | }); 52 | }); 53 | }); 54 | } 55 | 56 | function validateInputs(arguments, callback) { 57 | var params = {}; 58 | var argv = minimist(arguments); 59 | 60 | if (!argv._[0] || !argv._[1]) return callback('GitHub user name and repository name are required'); 61 | if (argv.maximum && !_.isNumber(argv.maximum)) return callback('Desired maximum number of images to leave in ECR should be a number'); 62 | if (argv.maximum && (argv.maximum < 0 || argv.maximum > 1000)) return callback('Desired maximum number of images to leave in ECR should be between 0 and 1000'); 63 | if (argv.blacklist) try { 64 | var blacklistArr = argv.blacklist.split(','); 65 | } catch(err) { 66 | return callback('Blacklisted imageTags must be a comma-separated list'); 67 | } 68 | if (!process.env.GithubAccessToken) return callback(new Error('GithubAccessToken env var must be set')); 69 | 70 | params.user = argv._[0]; 71 | params.repo = argv._[1]; 72 | params.region = argv.region || process.env.AWS_DEFAULT_REGION || 'us-east-1'; 73 | params.maximum = argv.maximum || 750; 74 | params.blacklist = (blacklistArr) ? blacklistArr : []; 75 | params.githubAccessToken = process.env.GithubAccessToken; 76 | params.registryId = process.env.RegistryId; 77 | 78 | return callback(null, params); 79 | } 80 | 81 | function confirmInputs(params, callback) { 82 | console.log(''); 83 | console.log(_.omit(params, 'githubAccessToken', 'registryId')); 84 | console.log(''); 85 | inquirer.prompt([ 86 | { 87 | type: 'confirm', 88 | name: 'confirmation', 89 | message: 'Ready to delete images? Any imageTags not blacklisted above are subject to deletion.', 90 | default: false 91 | } 92 | ]).then(function(answer) { 93 | return callback(answer.confirmation); 94 | }); 95 | } 96 | 97 | function listImages(ecr, params, callback) { 98 | var data = { imageIds:[] }; 99 | ecr.listImages({ repositoryName: params.repo }).eachItem(function(err, item) { 100 | if (err) { 101 | callback && callback(err); 102 | callback = false; 103 | } else if (!item) { 104 | callback(null, data); 105 | } else { 106 | data.imageIds.push(item); 107 | } 108 | }); 109 | } 110 | 111 | function validateECRSize(array, params) { 112 | var count = array.length; 113 | if (count < params.maximum) { 114 | throw new Error('The repository ' + params.user + '/' + params.repo + ' has ' + count + ' images, which is less than the desired ' + params.maximum + ' image maximum. No clean-up required.'); 115 | } 116 | } 117 | 118 | function isGitSha(array) { 119 | for (var i = 0; i < array.length; i++) { 120 | if (array[i].imageTag !== undefined && array[i].imageTag.match(/^[a-z0-9]{40}$/)) { 121 | array[i]['ableToDelete'] = true; 122 | } else { 123 | wontDelete(array[i], 'Did not resemble a GitSha', true); 124 | } 125 | } 126 | } 127 | 128 | function isBlacklisted(array, params) { 129 | for (var i = 0; i < array.length; i++) { 130 | if (params.blacklist !== null && params.blacklist.indexOf(array[i].imageTag) !== -1) { 131 | wontDelete(array[i], 'ImageTag is blacklisted', true); 132 | } 133 | } 134 | } 135 | 136 | function getTimeStamps(array, params, callback) { 137 | var q = queue(10); 138 | for (var i = 0; i < array.length; i++) { 139 | var match = _.isMatch(array[i], { ableToDelete: true }); 140 | if (match) { 141 | var options = { 142 | url: 'https://api.github.com/repos/' + params.user + '/' + params.repo + '/commits/' + array[i].imageTag + '?access_token=' + params.githubAccessToken, 143 | headers: { 'User-agent': 'request' } 144 | }; 145 | q.defer(request, options); 146 | } 147 | } 148 | 149 | q.awaitAll(function(error, response) { 150 | if (error) return callback(error); 151 | return callback(null, response); 152 | }); 153 | } 154 | 155 | function assignTimeStamps(array, response) { 156 | var dates = []; 157 | for (var i = 0; i < response.length; i++) { 158 | if (response[i].statusCode !== 200) { 159 | var commit = response[i].request.uri.pathname.match(/\/([a-z0-9]*)$/)[1]; 160 | wontDelete(array[i], 'ImageTag could not be retrieved from GitHub'); 161 | dates.push({ imageTag: commit, ableToDelete: false }); 162 | } else { 163 | var result = JSON.parse(response[i].body); 164 | dates.push({ imageTag: result.sha, date: moment(result.commit.author.date).unix() }); 165 | } 166 | } 167 | 168 | mergeByProperty(array, dates, 'imageTag'); 169 | return array; 170 | } 171 | 172 | function dateCheck(array) { 173 | for (var i = 0; i < array.length; i++) { 174 | if (array[i].ableToDelete === true && !array[i].date) { 175 | wontDelete(array[i], 'ImageTag date could not be mapped from GitHub', true); 176 | } 177 | } 178 | } 179 | 180 | function toDelete(array, params) { 181 | var ableToDelete = []; 182 | for (var i = 0; i < array.length; i++) { 183 | var deletable = _.isMatch(array[i], { ableToDelete: true }); 184 | if (deletable) ableToDelete.push(array[i]); 185 | } 186 | 187 | var deleteCount = array.length - params.maximum; 188 | var sorted = _.sortBy(ableToDelete, function(o) { return o.date; }); 189 | var toDelete = sorted.splice(0, deleteCount); 190 | return toDelete; 191 | } 192 | 193 | function deleteImages(ecr, params, array, callback) { 194 | for (var i = 0; i < array.length; i++) { 195 | willDelete(array, i); 196 | array[i] = _.pick(array[i], 'imageTag', 'imageDigest'); 197 | } 198 | 199 | var q = queue(1); 200 | 201 | while (array.length) { 202 | q.defer(ecr.batchDeleteImage.bind(ecr), { 203 | imageIds: array.splice(0, 100), 204 | repositoryName: params.repo, 205 | registryId: params.registryId 206 | }); 207 | } 208 | 209 | q.awaitAll(function(err, data) { 210 | if (err) return callback(err); 211 | return callback(err, _(data).flatten()); 212 | }); 213 | } 214 | 215 | // Utility functions 216 | 217 | function mergeByProperty(arr1, arr2, prop) { 218 | _.each(arr2, function(arr2object) { 219 | var arr1object = _.find(arr1, function(arr1object) { 220 | return arr1object[prop] === arr2object[prop]; 221 | }); 222 | arr1object ? _.extend(arr1object, arr2object) : console.log('[warning] Image tag ' + arr2object.imageTag + ' was queried for a commit date, but does not map to an ECR image.'); 223 | }); 224 | } 225 | 226 | function wontDelete(object, message, tag) { 227 | console.log('[wont-delete] [' + object.imageDigest + '] [' + object.imageTag + '] ' + message); 228 | if (tag) object['ableToDelete'] = false; 229 | } 230 | 231 | function willDelete(array, index) { 232 | console.log('[will-delete] [' + array[index].imageDigest + '] [' + array[index].imageTag + '] Deleting image ' + (index + 1) + ' of ' + array.length); 233 | } 234 | -------------------------------------------------------------------------------- /cloudformation/ecs-conex.template.js: -------------------------------------------------------------------------------- 1 | 'use strict'; 2 | var watchbot = require('@mapbox/watchbot'); 3 | var cf = require('@mapbox/cloudfriend'); 4 | var hookshot = require('@mapbox/hookshot'); 5 | 6 | var webhook = hookshot.github('ConexWebhookFunction', 'WatchbotWebhook'); 7 | 8 | // Build watchbot resources 9 | var watcher = watchbot.template({ 10 | prefix: 'Watchbot', 11 | service: 'ecs-conex', 12 | serviceVersion: cf.ref('GitSha'), 13 | family: cf.ref('Family'), 14 | maxSize: 10, 15 | minSize: 1, 16 | reservation: { memory: 512 }, 17 | mounts: '/root', 18 | env: { 19 | AWS_DEFAULT_REGION: cf.region, 20 | StackRegion: cf.region, 21 | AccountId: cf.accountId, 22 | GithubAccessToken: cf.ref('GithubAccessToken'), 23 | NPMAccessToken: cf.ref('NPMAccessToken'), 24 | ImageBucketPrefix: cf.ref('ImageBucketPrefix'), 25 | ImageBucketRegions: cf.ref('ImageBucketRegions'), 26 | NotificationTopic: cf.ref('AlarmSNSTopic') 27 | }, 28 | command: 'eval $(decrypt-kms-env) && timeout 3600 ./ecs-conex.sh', 29 | notificationTopic: cf.ref('AlarmSNSTopic'), 30 | cluster: cf.ref('Cluster'), 31 | alarmOnEachFailure: true, 32 | alarmThreshold: 20, 33 | alarmPeriods: 6, 34 | messageTimeout: 1200, 35 | permissions: [ 36 | { 37 | Effect: 'Allow', 38 | Action: [ 39 | 'ecr:BatchCheckLayerAvailability', 40 | 'ecr:BatchGetImage', 41 | 'ecr:CreateRepository', 42 | 'ecr:DescribeRepositories', 43 | 'ecr:GetAuthorizationToken', 44 | 'ecr:GetDownloadUrlForLayer', 45 | 'ecr:InitiateLayerUpload', 46 | 'ecr:CompleteLayerUpload', 47 | 'ecr:UploadLayerPart', 48 | 'ecr:PutImage' 49 | ], 50 | Resource: '*' 51 | }, 52 | { 53 | Effect: 'Allow', 54 | Action: [ 55 | 's3:PutObject' 56 | ], 57 | Resource: [ 58 | cf.sub('arn:aws:s3:::${ImageBucketPrefix}-*/images/*') 59 | ] 60 | } 61 | ] 62 | }); 63 | 64 | // Main ecs-conex template 65 | var conex = { 66 | Parameters: { 67 | GitSha: { 68 | Description: 'The SHA of the task repository to use', 69 | Type: 'String' 70 | }, 71 | GithubAccessToken: { 72 | Description: '[secure] A Github access token with permission to clone private repositories', 73 | Type: 'String' 74 | }, 75 | NPMAccessToken: { 76 | Type: 'String', 77 | Description: '[secure] npm access token used to install private packages', 78 | Default: '' 79 | }, 80 | ImageBucketPrefix: { 81 | Type: 'String', 82 | Description: 'The prefix for buckets to write .tar.gz images into', 83 | Default: '' 84 | }, 85 | ImageBucketRegions: { 86 | Type: 'String', 87 | Description: 'Space-delimited list of region suffixes for image buckets', 88 | Default: '' 89 | }, 90 | NumberOfWorkers: { 91 | Type: 'Number', 92 | Description: 'The number of concurrent build jobs ecs-conex will perform', 93 | Default: 4 94 | }, 95 | Cluster: { 96 | Description: 'The ARN of the ECS cluster to run on', 97 | Type: 'String' 98 | }, 99 | AlarmEmail: { 100 | Description: 'An email address to subscribe to alarms', 101 | Type: 'String', 102 | Default: 'devnull@mapbox.com' 103 | }, 104 | LogAggregationFunction: { 105 | Description: 'The ARN of a Lambda function that will receive log events from CloudWatch', 106 | Type: 'String', 107 | Default: 'none' 108 | }, 109 | Family: { 110 | Description: 'An optional family name for ecs-conex tasks', 111 | Type: 'String', 112 | Default: 'ecs-conex' 113 | } 114 | }, 115 | Resources: { 116 | AlarmSNSTopic: { 117 | Type: 'AWS::SNS::Topic', 118 | Description: 'Subscribe to this topic to receive emails when tasks fail or retry', 119 | Properties: { 120 | Subscription: [ 121 | { 122 | Endpoint: cf.ref('AlarmEmail'), 123 | Protocol: 'email' 124 | } 125 | ] 126 | } 127 | }, 128 | MaxPendingTime: { 129 | Type: 'AWS::CloudWatch::Alarm', 130 | Properties: { 131 | AlarmDescription: 'https://github.com/mapbox/ecs-conex/blob/master/docs/alarms.md#maxpendingtime', 132 | Period: 60, 133 | EvaluationPeriods: 5, 134 | Statistic: 'Maximum', 135 | Threshold: 120, 136 | ComparisonOperator: 'GreaterThanThreshold', 137 | Namespace: 'Mapbox/ecs-watchbot', 138 | MetricName: cf.join(['WatchbotWorkerPending', cf.stackName]), 139 | AlarmActions: [cf.ref('AlarmSNSTopic')] 140 | } 141 | }, 142 | ConexWebhookFunction: { 143 | Type: 'AWS::Lambda::Function', 144 | Properties: { 145 | Role: cf.getAtt('ConexWebhookFunctionRole', 'Arn'), 146 | Description: cf.join(['watchbot webhooks for ', cf.stackName]), 147 | Handler: 'index.webhooks', 148 | Runtime: 'nodejs6.10', 149 | Timeout: 30, 150 | MemorySize: 128, 151 | Code: { 152 | ZipFile: cf.join('\n', [ 153 | 'var AWS = require("aws-sdk");', 154 | cf.join(['var sns = new AWS.SNS({ region: "', cf.region, '" });']), 155 | cf.join(['var topic = "', watcher.ref.topic, '";']), 156 | cf.join(['var secret = "', cf.ref('WatchbotUserKey'), '";']), 157 | 'var crypto = require("crypto");', 158 | 'module.exports.webhooks = function(event, context) {', 159 | ' var body = event.body', 160 | ' var hash = "sha1=" + crypto.createHmac("sha1", secret).update(new Buffer(JSON.stringify(body))).digest("hex");', 161 | ' if (event.signature !== hash) return context.done("invalid: signature does not match");', 162 | ' if (body.zen) return context.done(null, "ignored ping request");', 163 | ' var push = {', 164 | ' ref: event.body.ref,', 165 | ' after: event.body.after,', 166 | ' before: event.body.before,', 167 | ' deleted: event.body.deleted,', 168 | ' repository: {', 169 | ' name: event.body.repository.name,', 170 | ' owner: { name: event.body.repository.owner.name }', 171 | ' },', 172 | ' pusher: { name: event.body.pusher.name }', 173 | ' };', 174 | ' var params = {', 175 | ' TopicArn: topic,', 176 | ' Subject: "webhook",', 177 | ' Message: JSON.stringify(push)', 178 | ' };', 179 | ' sns.publish(params, function(err) {', 180 | ' if (err) return context.done("error: " + err.message);', 181 | ' context.done(null, "success");', 182 | ' });', 183 | '};' 184 | ]) 185 | } 186 | } 187 | }, 188 | ConexWebhookFunctionRole: { 189 | Type: 'AWS::IAM::Role', 190 | Properties: { 191 | AssumeRolePolicyDocument: { 192 | Statement: [ 193 | { 194 | Sid: 'webhookrole', 195 | Effect: 'Allow', 196 | Principal: { Service: 'lambda.amazonaws.com' }, 197 | Action: 'sts:AssumeRole' 198 | } 199 | ] 200 | }, 201 | Policies: [ 202 | { 203 | PolicyName: 'WatchbotWebhookPolicy', 204 | PolicyDocument: { 205 | Statement: [ 206 | { 207 | Effect: 'Allow', 208 | Action: ['logs:*'], 209 | Resource: ['arn:aws:logs:*:*:*'] 210 | }, 211 | { 212 | Effect: 'Allow', 213 | Action: ['sns:Publish'], 214 | Resource: [watcher.ref.topic] 215 | } 216 | ] 217 | } 218 | } 219 | ] 220 | } 221 | }, 222 | WatchbotUserKey: { 223 | Type: 'AWS::IAM::AccessKey', 224 | Description: 'AWS access keys to authenticate as the Watchbot user', 225 | Properties: { 226 | Status: 'Active', 227 | UserName: cf.ref('WatchbotUser') 228 | } 229 | }, 230 | WatchbotUser: { 231 | Type: 'AWS::IAM::User', 232 | Description: 'An AWS user with permission to publish the the work topic', 233 | Properties: { 234 | Policies: [ 235 | { 236 | PolicyName: cf.join('', [cf.stackName, 'publish-to-sns']), 237 | PolicyDocument: { 238 | Statement: [ 239 | { 240 | Effect: 'Allow', 241 | Action: [ 242 | 'sns:Publish' 243 | ], 244 | Resource: [ 245 | cf.ref('WatchbotTopic') 246 | ] 247 | } 248 | ] 249 | } 250 | } 251 | ] 252 | } 253 | } 254 | }, 255 | Outputs: { 256 | WorkTopic: { 257 | Description: 'The ARN of ecs-conex\'s SNS topic. Send messages to this topic to have builds processed', 258 | Value: watcher.ref.topic 259 | }, 260 | LogGroup: { 261 | Description: 'The name of the CloudWatch LogGroup where ecs-conex logs are sent', 262 | Value: watcher.ref.logGroup 263 | } 264 | } 265 | }; 266 | 267 | webhook.Resources.WatchbotWebhookStage.Properties.StageName = 'watchbot'; 268 | webhook.Resources.WatchbotWebhookResource.Properties.PathPart = 'webhooks'; 269 | webhook.Resources.WatchbotWebhookPermission.Properties.FunctionName = cf.ref('ConexWebhookFunction'); 270 | webhook.Resources.WatchbotWebhookMethod.Properties.Integration.Uri = cf.sub('arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${ConexWebhookFunction.Arn}/invocations'); 271 | 272 | webhook.Resources.WatchbotWebhookMethod.Properties.Integration.RequestTemplates = { 273 | 'application/json': '{"signature":"$input.params(\'X-Hub-Signature\')","body":$input.json(\'$\')}' 274 | }; 275 | 276 | watcher.Resources.WatchbotTask.Properties.ContainerDefinitions[0].MountPoints.push({ 277 | ContainerPath: '/var/run/docker.sock', 278 | SourceVolume: 'docker-sock' 279 | }); 280 | 281 | watcher.Resources.WatchbotTask.Properties.Volumes.push({ 282 | Host: { 283 | SourcePath: '/var/run/docker.sock' 284 | }, 285 | Name: 'docker-sock' 286 | }); 287 | 288 | // Rollup the template 289 | module.exports = cf.merge(watcher, conex, webhook); 290 | -------------------------------------------------------------------------------- /test/cleanup.test.js: -------------------------------------------------------------------------------- 1 | /* eslint-disable no-console*/ 2 | 3 | var cleanup = require(__dirname + '/../scripts/cleanup.js'); 4 | var inquirer = require('inquirer'); 5 | var Promise = require('pinkie-promise'); 6 | var sinon = require('sinon'); 7 | var test = require('tape'); 8 | var _ = require('underscore'); 9 | 10 | test('validateInputs: no user', function(assert) { 11 | var arguments = ['repo', '--maximum=1', '--blacklist=tag1,tag2']; 12 | 13 | cleanup.validateInputs(arguments, function(err, res) { 14 | assert.equal(err, 'GitHub user name and repository name are required'); 15 | assert.equal(res, undefined); 16 | assert.end(); 17 | }); 18 | }); 19 | 20 | test('validateInputs: no repo', function(assert) { 21 | var arguments = ['user', '--maximum=1', '--blacklist=tag1,tag2']; 22 | 23 | cleanup.validateInputs(arguments, function(err, res) { 24 | assert.equal(err, 'GitHub user name and repository name are required'); 25 | assert.equal(res, undefined); 26 | assert.end(); 27 | }); 28 | }); 29 | 30 | test('validateInputs: no maximum', function(assert) { 31 | var arguments = ['user', 'repo', '--blacklist=tag1,tag2']; 32 | 33 | cleanup.validateInputs(arguments, function(err, res) { 34 | assert.equal(err, null); 35 | assert.equal(res.maximum, 750); 36 | assert.end(); 37 | }); 38 | }); 39 | 40 | test('validateInputs: no blacklist', function(assert) { 41 | var arguments = ['user', 'repo', '--maximum=1']; 42 | 43 | cleanup.validateInputs(arguments, function(err, res) { 44 | assert.equal(err, null); 45 | assert.deepEqual(res.blacklist, []); 46 | assert.end(); 47 | }); 48 | }); 49 | 50 | test('validateInputs', function(assert) { 51 | var arguments = ['user', 'repo', '--maximum=1', '--blacklist=tag1,tag2']; 52 | 53 | cleanup.validateInputs(arguments, function(err, res) { 54 | assert.equal(err, null); 55 | assert.equal(res.user, 'user'); 56 | assert.equal(res.repo, 'repo'); 57 | assert.equal(res.maximum, 1); 58 | assert.deepEqual(res.blacklist, ['tag1', 'tag2']); 59 | assert.end(); 60 | }); 61 | }); 62 | 63 | test('confirmInputs: true', function(assert) { 64 | var params = { user: 'user', repo: 'repo', maximum: 1, blacklist: ['tag1', 'tag2'] }; 65 | sinon.stub(inquirer, 'prompt').callsFake(function(questions) { 66 | assert.deepEqual(questions, [{ 67 | type: 'confirm', 68 | name: 'confirmation', 69 | message: 'Ready to delete images? Any imageTags not blacklisted above are subject to deletion.', 70 | default: false 71 | }]); 72 | 73 | return Promise.resolve({ confirmation: true }); 74 | }); 75 | 76 | cleanup.confirmInputs(params, function(answer) { 77 | assert.equal(answer, true); 78 | inquirer.prompt.restore(); 79 | assert.end(); 80 | }); 81 | }); 82 | 83 | test('confirmInputs: false', function(assert) { 84 | var params = { user: 'user', repo: 'repo', maximum: 1, blacklist: ['tag1', 'tag2'] }; 85 | 86 | assert.plan(2); 87 | sinon.stub(inquirer, 'prompt').callsFake(function(questions) { 88 | assert.deepEqual(questions, [{ 89 | type: 'confirm', 90 | name: 'confirmation', 91 | message: 'Ready to delete images? Any imageTags not blacklisted above are subject to deletion.', 92 | default: false 93 | }]); 94 | 95 | return Promise.resolve({ confirmation: false }); 96 | }); 97 | 98 | cleanup.confirmInputs(params, function(answer) { 99 | assert.equal(answer, false); 100 | inquirer.prompt.restore(); 101 | }); 102 | }); 103 | 104 | test('listImages', function(assert) { 105 | var params = { repo: 'repo' }; 106 | 107 | assert.plan(1); 108 | var ecr = { 109 | listImages: function(object) { 110 | assert.deepEqual(object, { repositoryName: 'repo' }); 111 | var counter = 0; 112 | var eachItem = function(handler) { 113 | if (++counter >= 5) { 114 | handler(null, null); 115 | } else { 116 | handler(null, {}); 117 | } 118 | }; 119 | return { eachItem: eachItem }; 120 | } 121 | }; 122 | 123 | cleanup.listImages(ecr, params, function() {}); 124 | }); 125 | 126 | test('validateECRSize', function(assert) { 127 | var result = [ 128 | { imageTag: 'tag1' }, 129 | { imageTag: 'tag2' }, 130 | { imageTag: 'tag3' } 131 | ]; 132 | 133 | assert.equal(cleanup.validateECRSize(result, { maximum: 2 }), undefined); 134 | assert.equal(cleanup.validateECRSize(result, { maximum: 3 }), undefined); 135 | assert.throws(function() { cleanup.validateECRSize(result, { maximum: 4 }); }, /The repository undefined\/undefined has 3 images, which is less than the desired 4 image maximum. No clean-up required./); 136 | assert.end(); 137 | }); 138 | 139 | test('isGitSha: true', function(assert) { 140 | var array = [{ imageTag: 'c5332a6c78cf23d86f28b8987a3ca78af46b7f48' }]; 141 | cleanup.isGitSha(array); 142 | 143 | assert.equal(array[0].ableToDelete, true); 144 | assert.end(); 145 | }); 146 | 147 | test('isGitSha: false', function(assert) { 148 | var array = [ 149 | { }, 150 | { imageTag: '' }, 151 | { imageTag: 'v0.1.0' }, 152 | { imageTag: 'c5332a6c78cf23d86f28b8987a3ca78af46b7f4' }, 153 | { imageTag: 'c5332a6c78cf23d86f28b8987a3ca78af46b7f488' }, 154 | { imageTag: 'C5332A6C78Cf23D86F28B8987A3CA78AF46B7F48' } 155 | ]; 156 | cleanup.isGitSha(array); 157 | 158 | _.each(array, function(e) { 159 | assert.equal(e.ableToDelete, false); 160 | }); 161 | assert.end(); 162 | }); 163 | 164 | test('isBlacklisted: true', function(assert) { 165 | var params = { blacklist: ['tag'] }; 166 | var array = [{ imageTag: 'tag', ableToDelete: true }]; 167 | cleanup.isBlacklisted(array, params); 168 | 169 | assert.equal(array[0].ableToDelete, false); 170 | assert.end(); 171 | }); 172 | 173 | test('isBlacklisted: false', function(assert) { 174 | var params = { blacklist: ['tag'] }; 175 | var array = [{ imageTag: '', ableToDelete: true }]; 176 | cleanup.isBlacklisted(array, params); 177 | 178 | assert.equal(array[0].ableToDelete, true); 179 | assert.end(); 180 | }); 181 | 182 | test('getTimestamps', function(assert) { 183 | var array = [{ imageTag: 'tag', ableToDelete: true }]; 184 | var params = { user: 'user', repo: 'repo', githubAccessToken: 'token' }; 185 | 186 | assert.plan(3); 187 | cleanup.getTimeStamps(array, params, function(err, res) { 188 | assert.ok(res[0].statusCode); 189 | assert.ok(res[0].request.uri.pathname); 190 | assert.ok(res[0].body); 191 | }); 192 | }); 193 | 194 | test('assignTimeStamps: 200 status code', function(assert) { 195 | var array = [{ imageTag: 'tag', ableToDelete: true }]; 196 | var response = [{ statusCode: 200, body: '{"sha":"tag","commit":{"author":{"date":"2016-07-20T18:27:53Z"}}}', request: { uri: { pathname: '/repos/user/repo/commits/tag' } } }]; 197 | cleanup.assignTimeStamps(array, response); 198 | 199 | assert.equal(array[0].imageTag, 'tag'); 200 | assert.equal(array[0].ableToDelete, true); 201 | assert.equal(array[0].date, 1469039273); 202 | assert.end(); 203 | }); 204 | 205 | test('assignTimeStamps: 401 status code', function(assert) { 206 | var array = [{ imageTag: 'tag', ableToDelete: true }]; 207 | var response = [{ statusCode: 401, body: '{"sha":"tag","commit":{"author":{"date":"2016-07-20T18:27:53Z"}}}', request: { uri: { pathname: '/repos/user/repo/commits/tag' } } }]; 208 | cleanup.assignTimeStamps(array, response); 209 | 210 | assert.equal(array[0].imageTag, 'tag'); 211 | assert.equal(array[0].ableToDelete, false); 212 | assert.ok(!array[0].date); 213 | assert.end(); 214 | }); 215 | 216 | test('dateCheck: true', function(assert) { 217 | var array = [{ imageTag: 'tag', imageDigest: 'digest', date: 1469641800, ableToDelete: true }]; 218 | cleanup.dateCheck(array); 219 | 220 | assert.equal(array[0].ableToDelete, true); 221 | assert.end(); 222 | }); 223 | 224 | test('dateCheck: false', function(assert) { 225 | var array = [{ imageTag: 'tag', imageDigest: 'digest', ableToDelete: true }]; 226 | cleanup.dateCheck(array); 227 | 228 | assert.equal(array[0].ableToDelete, false); 229 | assert.end(); 230 | }); 231 | 232 | test('toDelete', function(assert) { 233 | var results = [ 234 | { imageTag: 'tag1', imageDigest: 'digest1', ableToDelete: true, date: 5 }, 235 | { imageTag: 'tag2', imageDigest: 'digest2', ableToDelete: true, date: 4 }, 236 | { imageTag: 'tag3', imageDigest: 'digest3', ableToDelete: true, date: 3 }, 237 | { imageTag: 'tag4', imageDigest: 'digest4', ableToDelete: true, date: 2 }, 238 | { imageTag: 'tag5', imageDigest: 'digest5', ableToDelete: false, date: 1 } 239 | ]; 240 | 241 | var max1 = cleanup.toDelete(results, { maximum: 1 }); 242 | var max2 = cleanup.toDelete(results, { maximum: 2 }); 243 | var max3 = cleanup.toDelete(results, { maximum: 3 }); 244 | var max4 = cleanup.toDelete(results, { maximum: 4 }); 245 | var max5 = cleanup.toDelete(results, { maximum: 5 }); 246 | 247 | assert.equal(max1.length, 4); 248 | assert.equal(_.first(max1).date, 2); 249 | assert.equal(_.last(max1).date, 5); 250 | 251 | assert.equal(max2.length, 3); 252 | assert.equal(_.first(max2).date, 2); 253 | assert.equal(_.last(max2).date, 4); 254 | 255 | assert.equal(max3.length, 2); 256 | assert.equal(_.first(max3).date, 2); 257 | assert.equal(_.last(max3).date, 3); 258 | 259 | assert.equal(max4.length, 1); 260 | assert.equal(max4[0].date, 2); 261 | 262 | assert.equal(max5.length, 0); 263 | assert.end(); 264 | }); 265 | 266 | test('deleteImages', function(assert) { 267 | var array = []; 268 | for (var i = 0; i < 250; i++) { 269 | array.push({ imageTag: 'tag' + i, imageDigest: 'digest' }); 270 | } 271 | var params = { repo: 'repo', registryId: 'registryId' }; 272 | 273 | var counter = 0; 274 | var ecr = { 275 | batchDeleteImage: function(params, callback) { 276 | if (counter++ < 2) { 277 | assert.deepEqual(params.imageIds.length, 100); 278 | } else { 279 | assert.deepEqual(params.imageIds.length, 50); 280 | } 281 | assert.equal(params.repositoryName, 'repo'); 282 | assert.equal(params.registryId, 'registryId'); 283 | callback(null, params.imageIds); 284 | } 285 | }; 286 | 287 | cleanup.deleteImages(ecr, params, array, function(err, list) { 288 | assert.ifError(err); 289 | assert.equal(list.length, 250); 290 | assert.end(); 291 | }); 292 | }); 293 | 294 | test('mergeByProperty: mergable', function(assert) { 295 | var params = { deleteCount: 0 }; 296 | var arr1 = [{ imageTag: 'tag', imageDigest: 'digest', ableToDelete: true }]; 297 | var arr2 = [{ imageTag: 'tag', date: 1469641800 }]; 298 | cleanup.mergeByProperty(arr1, arr2, 'imageTag', params); 299 | 300 | assert.equal(arr1.length, 1); 301 | assert.equal(arr1[0].imageTag, 'tag'); 302 | assert.equal(arr1[0].imageDigest, 'digest'); 303 | assert.equal(arr1[0].ableToDelete, true); 304 | assert.equal(arr1[0].date, 1469641800); 305 | assert.end(); 306 | }); 307 | 308 | test('mergeByProperty: not mergable', function(assert) { 309 | var params = { deleteCount: 0 }; 310 | var arr1 = [{ imageTag: 'tag1', imageDigest: 'digest', ableToDelete: true }]; 311 | var arr2 = [{ imageTag: 'tag2', date: 1469641800 }]; 312 | cleanup.mergeByProperty(arr1, arr2, 'imageTag', params); 313 | 314 | assert.equal(arr1.length, 1); 315 | assert.equal(arr1[0].imageTag, 'tag1'); 316 | assert.equal(arr1[0].imageDigest, 'digest'); 317 | assert.equal(arr1[0].ableToDelete, true); 318 | assert.ok(!arr1[0].date); 319 | assert.end(); 320 | }); 321 | 322 | test('wontDelete', function(assert) { 323 | var object = { imageDigest: 'digest', imageTag: 'tag' }; 324 | var message = 'test'; 325 | 326 | assert.plan(2); 327 | sinon.stub(console, 'log').callsFake(function(msg) { 328 | console.log.restore(); 329 | assert.equal(msg, '[wont-delete] [digest] [tag] test'); 330 | }); 331 | 332 | cleanup.wontDelete(object, message, true); 333 | assert.equal(object.ableToDelete, false); 334 | }); 335 | 336 | test('willDelete', function(assert) { 337 | var array = [{ imageDigest: 'digest', imageTag: 'tag' }]; 338 | var index = 0; 339 | 340 | assert.plan(1); 341 | sinon.stub(console, 'log').callsFake(function(msg) { 342 | console.log.restore(); 343 | assert.equal(msg, '[will-delete] [digest] [tag] Deleting image 1 of 1'); 344 | }); 345 | 346 | cleanup.willDelete(array, index); 347 | }); 348 | -------------------------------------------------------------------------------- /test/utils.test.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -eu 4 | source $(dirname $0)/../utils.sh 5 | source $(dirname $0)/lib/utils.sh 6 | FAILED=0 7 | PASSED=0 8 | 9 | # initialize test id counter 10 | testId=0 11 | 12 | # after_image() 1 param test 13 | tag_test "after_image() with 1 param" 14 | export AccountId=1 15 | export repo=repo 16 | export after=2 17 | 18 | log=$(after_image us-east-1) 19 | expected="1.dkr.ecr.us-east-1.amazonaws.com/repo:2" 20 | assert "equal" "${log}" "${expected}" 21 | 22 | # after_image() 2 param test 23 | tag_test "after_image() with 2 params" 24 | export AccountId=1 25 | export repo=repo 26 | export after=2 27 | 28 | log=$(after_image us-east-1 v1.0.0) 29 | expected="1.dkr.ecr.us-east-1.amazonaws.com/repo:v1.0.0" 30 | assert "equal" "${log}" "${expected}" 31 | 32 | # login() test 33 | tag_test "login()" 34 | test_region=us-east-1 35 | 36 | function aws() { 37 | if [ "${1}" != "ecr" ]; then 38 | echo "First argument must be ecr" 39 | elif [ "${2}" != "get-login" ]; then 40 | echo "Second argument must be get-login" 41 | elif [ "${4}" != "${test_region}" ]; then 42 | echo "Fourth argument must be region" 43 | else 44 | echo "echo All good" 45 | fi 46 | } 47 | 48 | log=$(login ${test_region}) 49 | expected="All good" 50 | assert "equal" "${log}" "${expected}" 51 | 52 | # ecr_logins() test 53 | tag_test "ecr_logins()" 54 | test_regions=(us-east-1 us-west-2 eu-west-1) 55 | logged_into="" 56 | 57 | function login() { 58 | logged_into="${logged_into}|$1" 59 | } 60 | 61 | ecr_logins "${test_regions[@]}" 62 | assert "equal" "$logged_into" "|us-east-1|us-west-2|eu-west-1" 63 | 64 | # ensure_repo() setup 65 | copy_func create_repo old_create_repo 66 | test_region=us-east-1 67 | FAILURE_MESSAGE="" 68 | CALLED=0 69 | 70 | function aws() { 71 | if [ "${1}" != "ecr" ]; then 72 | FAILURE_MESSAGE="First argument must be ecr" 73 | elif [ "${2}" != "describe-repositories" ]; then 74 | FAILURE_MESSAGE="Second argument must be describe-repositories" 75 | elif [ "${4}" != "${test_region}" ]; then 76 | FAILURE_MESSAGE="Fourth argument must be region" 77 | elif [ "${6}" == "exists" ]; then 78 | return 0 79 | elif [ "${6}" == "not_exists" ]; then 80 | return 1 81 | else 82 | FAILURE_MESSAGE="${6} must be exists or not_exists" 83 | fi 84 | } 85 | 86 | function create_repo() { 87 | CALLED=1 88 | } 89 | 90 | # ensure_repo() exists test 91 | tag_test "ensure_repo() exists" 92 | repo=exists 93 | 94 | ensure_repo ${test_region} 95 | assert "equal" "${CALLED}" "0" 96 | assert "equal" "${FAILURE_MESSAGE}" "" "should not have any failures" 97 | 98 | # ensure_repo() doesn't exist test 99 | tag_test "ensure_repo() doesn't exist" 100 | repo="not_exists" 101 | 102 | ensure_repo ${test_region} 103 | assert "equal" "${CALLED}" "1" 104 | assert "equal" "${FAILURE_MESSAGE}" "" "should not have any failures" 105 | 106 | # ensure_repo() cleanup 107 | copy_func old_create_repo create_repo 108 | 109 | # create_repo() test 110 | tag_test "create_repo()" 111 | repo=repo 112 | test_region=us-east-1 113 | FAILURE_MESSAGE="" 114 | CALLED=0 115 | 116 | function aws() { 117 | if [ "${1}" != "ecr" ]; then 118 | FAILURE_MESSAGE="First argument must be ecr" 119 | elif [ "$2" != "create-repository" ]; then 120 | FAILURE_MESSAGE="Second argument must be create-repository" 121 | elif [ "$4" != "${test_region}" ]; then 122 | FAILURE_MESSAGE="Fourth argument must be region" 123 | elif [ "$6" != "repo" ]; then 124 | FAILURE_MESSAGE="Sixth argument must be repo" 125 | else 126 | CALLED=1 127 | fi 128 | } 129 | 130 | create_repo ${test_region} 131 | assert "equal" "${FAILURE_MESSAGE}" "" "should not have any failures" 132 | assert "equal" "${CALLED}" "1" 133 | 134 | # image_exists() test 135 | tag_test "image_exists()" 136 | 137 | function aws() { 138 | if [ ${4} == "us-east-1" ]; then 139 | echo "IMAGES" 140 | else 141 | echo "FAILURES" 142 | fi 143 | } 144 | 145 | repo=repo after=test image_exists us-east-1 ; assert "equal" "$?" "0" "finds existing image" 146 | repo=repo after=test image_exists us-west-1 || assert "equal" "$?" "1" "finds no image" 147 | 148 | function aws() { 149 | if [ ${8} == "imageTag=v1.0" ]; then 150 | echo "IMAGES" 151 | else 152 | echo "FAILURES" 153 | fi 154 | } 155 | 156 | repo=repo after=test image_exists us-east-1 v1.0 ; assert "equal" "$?" "0" "finds existing tagged image" 157 | repo=repo after=test image_exists us-east-1 v2.0 || assert "equal" "$?" "1" "finds no tagged image" 158 | repo=repo after=test image_exists us-east-1 || assert "equal" "$?" "1" "finds no tagged image" 159 | 160 | # github_status() test 161 | tag_test "github_status()" 162 | test_status="good" 163 | test_description="clear" 164 | status_url="https://api.github.com/repos/someone/stuff" 165 | FAILURE_MESSAGE="" 166 | CALLED=0 167 | 168 | function curl() { 169 | if [ "${3}" != "POST" ]; then 170 | FAILURE_MESSAGE="Must be a POST request" 171 | elif [ "${7}" != "{\"state\":\"${test_status}\",\"description\":\"${test_description}\",\"context\":\"ecs-conex\"}" ]; then 172 | FAILURE_MESSAGE="Must post correct body" 173 | elif [ "${8}" != "${status_url}" ]; then 174 | FAILURE_MESSAGE="Must post to the status url" 175 | else 176 | CALLED=1 177 | fi 178 | } 179 | 180 | github_status ${test_status} ${test_description} 181 | assert "equal" "${FAILURE_MESSAGE}" "" "should not have any failures" 182 | assert "equal" "${CALLED}" "1" 183 | 184 | # check_dockerfile() test 185 | tag_test "check_dockerfile()" 186 | filepath="/fake/file/path" 187 | log=$(check_dockerfile ${filepath}) 188 | assert "equal" "${log}" "no Dockerfile found" 189 | assert "equal" "$?" "0" 190 | 191 | filepath="ecs-conex.sh" 192 | log=$(check_dockerfile ${filepath}) 193 | assert "equal" "${log}" "" 194 | 195 | # check_receives() test 196 | tag_test "check_receives()" 197 | ApproximateReceiveCount=3 198 | check_receives && assert "equal" "$?" "0" 199 | 200 | ApproximateReceiveCount=4 201 | check_receives || assert "equal" "$?" "3" 202 | 203 | # parse_message() test 204 | tag_test "parse_message()" 205 | Message=$(cat ./test/fixtures/message.test.json) 206 | GithubAccessToken=test 207 | parse_message 208 | assert "equal" "${status_url}" "https://api.github.com/repos/test/test/statuses/test?access_token=test" 209 | 210 | # credentials() setup 211 | tmpdocker=$(mktemp /tmp/dockerfile-XXXXXX) 212 | tmpcreds=$(cat ./test/fixtures/creds.test.json) 213 | 214 | function curl () { 215 | nullRole=$(printenv | grep nullRole | sed 's/.*=//') 216 | role=test_role 217 | 218 | if [[ "${nullRole}" == "1" ]]; then 219 | echo "" 220 | elif [[ "${2}" != *"${role}"* ]]; then 221 | echo ${role} 222 | else 223 | echo ${creds} 224 | fi 225 | } 226 | 227 | function write_dockerfile() { 228 | creds=$1 229 | echo "ARG NPMAccessToken" > ${tmpdocker} 230 | echo "ARG GithubAccessToken" >> ${tmpdocker} 231 | echo "ARG AWS_ACCESS_KEY_ID" >> ${tmpdocker} 232 | echo "ARG AWS_SECRET_ACCESS_KEY" >> ${tmpdocker} 233 | echo "ARG AWS_SESSION_TOKEN" >> ${tmpdocker} 234 | } 235 | 236 | function clear_dockerfile() { 237 | echo "" > ${tmpdocker} 238 | } 239 | 240 | # credentials() no npm token in env test 241 | tag_test "credentials() missing npm token in env" 242 | export NPMAccessToken="" 243 | write_dockerfile "${tmpcreds}" 244 | credentials ${tmpdocker} 245 | assert "doesNotContain" "${args}" "NPMAccessToken=${NPMAccessToken}" 246 | 247 | # credentials() no npm token in dockerfile test 248 | tag_test "credentials() missing npm token in dockerfile" 249 | export NPMAccessToken=test_NPMAccessToken 250 | clear_dockerfile 251 | credentials ${tmpdocker} 252 | assert "doesNotContain" "${args}" "NPMAccessToken=${NPMAccessToken}" 253 | 254 | # credentials() no github token in dockerfile test 255 | tag_test "credentials() missing github token in dockerfile" 256 | export GithubAccessToken=test_GithubAccessToken 257 | clear_dockerfile 258 | credentials ${tmpdocker} 259 | assert "doesNotContain" "${args}" "GithubAccessToken=${GithubAccessToken}" 260 | 261 | # credentials() no role test 262 | tag_test "credentials() missing role" 263 | export nullRole=1 264 | write_dockerfile "${tmpcreds}" 265 | credentials ${tmpdocker} 266 | assert "equal" "${args}" "--build-arg NPMAccessToken=test_NPMAccessToken --build-arg GithubAccessToken=test_GithubAccessToken" 267 | 268 | # credentials() role test 269 | tag_test "credentials() role" 270 | export nullRole="" 271 | write_dockerfile "${tmpcreds}" 272 | credentials ${tmpdocker} 273 | assert "contains" "${args}" "NPMAccessToken=${NPMAccessToken}" 274 | assert "contains" "${args}" "GithubAccessToken=${GithubAccessToken}" 275 | assert "contains" "${args}" "AWS_ACCESS_KEY_ID=$(node -e "console.log(${creds}.AccessKeyId)")" 276 | assert "contains" "${args}" "AWS_SECRET_ACCESS_KEY=$(node -e "console.log(${creds}.SecretAccessKey)")" 277 | assert "contains" "${args}" "AWS_SESSION_TOKEN=$(node -e "console.log(${creds}.SessionToken)")" 278 | 279 | # credentials() missing build arguments in dockerfile test 280 | tag_test "credentials() missing build arguments in dockerfile" 281 | clear_dockerfile 282 | credentials ${tmpdocker} 283 | assert "equal" "${args}" "" "should be empty" 284 | 285 | # credentials() missing build arguments in creds test 286 | tag_test "credentials() missing build arguments in creds" 287 | write_dockerfile "{}" 288 | credentials ${tmpdocker} 289 | assert "equal" "${args}" "--build-arg NPMAccessToken=test_NPMAccessToken --build-arg GithubAccessToken=test_GithubAccessToken" 290 | 291 | # exact_match() test 292 | AccountId=1 293 | region=us-east-1 294 | repo=test 295 | 296 | function git () { 297 | echo "test_tag" 298 | } 299 | 300 | function docker() { 301 | if [ ${1} == "tag" ]; then 302 | assert "equal" "${3}" "1.dkr.ecr.us-east-1.amazonaws.com/test:test_tag" 303 | elif [ ${1} == "push" ]; then 304 | assert "equal" "${2}" "1.dkr.ecr.us-east-1.amazonaws.com/test:test_tag" 305 | else 306 | FAILURE="Tried to push a tag that already exists" 307 | fi 308 | } 309 | 310 | FAILURE="" 311 | log="$(exact_match)" 312 | assert "equal" "$FAILURE" "" 313 | assert "contains" "$log" "test_tag" 314 | 315 | function git () { 316 | echo "v1.0" 317 | } 318 | 319 | FAILURE="" 320 | log="$(exact_match)" 321 | assert "equal" "$FAILURE" "" 322 | assert "contains" "$log" "" 323 | 324 | # docker_push() test 325 | tag_test "docker_push()" 326 | regions=(us-east-1) 327 | repo=test 328 | after=test 329 | FAILURE="" 330 | 331 | function ensure_repo() { 332 | if [ "${1}" != "us-east-1" ]; then 333 | FAILURE="Region not passed into ensure_repo" 334 | fi 335 | } 336 | 337 | function login() { 338 | FAILURE="Should expect prior login" 339 | } 340 | 341 | function image_exists { 342 | return 1 343 | } 344 | 345 | function after_image { 346 | if [ "${1}" != "us-east-1" ]; then 347 | FAILURE="Region not passed into after_image" 348 | else 349 | echo "some_after_image" 350 | fi 351 | } 352 | 353 | function docker() { 354 | if [ ${1} == "tag" ]; then 355 | assert "equal" "${3}" "some_after_image" 356 | elif [ ${1} == "push" ]; then 357 | assert "equal" "${2}" "some_after_image" 358 | else 359 | FAILURE="should call docker tag or docker push" 360 | fi 361 | } 362 | # export functions explicitly for parallel called from within docker_push 363 | export -f assert passed failed docker 364 | 365 | function git() { 366 | exit 1 367 | } 368 | 369 | function exact_match() { 370 | assert "equal" "${FAILURE}" "" 371 | } 372 | 373 | log=$(docker_push) 374 | assert "equal" "$?" "0" 375 | assert "contains" "${log}" "pushing test to us-east-1" 376 | assert "equal" "${FAILURE}" "" "should not have any failures" 377 | 378 | # docker_push() test to region with existing images 379 | tag_test "docker_push() test to region with existing images" 380 | function image_exists() { 381 | if [ "$1" == "us-west-2" ]; then 382 | return 0 383 | else 384 | return 1 385 | fi 386 | } 387 | 388 | regions=(us-east-1 us-west-2) 389 | log=$(docker_push) 390 | assert "equal" "$?" "0" 391 | assert "contains" "${log}" "pushing test to us-east-1" 392 | assert "contains" "${log}" "found existing image for test in us-west-2, skipping push" 393 | assert "equal" "${FAILURE}" "" "should not have any failures" 394 | 395 | # bucket_push() no ImageBucketPrefix 396 | tag_test "bucket_push" 397 | export ImageBucketPrefix="" 398 | 399 | log=$(bucket_push) 400 | expected="nothing to do" 401 | assert "equal" "${log}" "${expected}" 402 | 403 | # bucket_push() single region 404 | export ImageBucketPrefix="something" 405 | export bucket_regions=(region1 region2) 406 | export tmpdir="temp" 407 | export repo="repo" 408 | export after="1" 409 | 410 | function aws(){ 411 | if [ ${1} != "s3" ]; then 412 | FAILURE="should call aws s3" 413 | elif [ ${2} != "cp" ]; then 414 | FAILURE="should call cp" 415 | elif [ ${3} != "temp/repo-1.tar.gz" ]; then 416 | FAILURE="should call correct tmpdir and repo" 417 | fi 418 | } 419 | 420 | log=$(bucket_push) 421 | assert "equal" "${FAILURE}" "" "should not have any failures" 422 | assert "contains" "${log}" "copying to region1" 423 | assert "contains" "${log}" "copying to region2" 424 | 425 | # docker_save() no ImageBucketPrefix 426 | tag_test "docker_save" 427 | export ImageBucketPrefix="" 428 | 429 | log=$(docker_save) 430 | expected="nothing to do" 431 | assert "equal" "${log}" "${expected}" 432 | 433 | # docker_save() 434 | export ImageBucketPrefix="something" 435 | export tmpdir="test" 436 | export repo="repo" 437 | export after=1 438 | export image_file="file" 439 | 440 | function docker(){ 441 | if [ ${1} == "save" ]; then 442 | assert "equal" "$*" "save repo:1" "calls docker save with repo:after" >&2 443 | else 444 | FAILURE="should call docker save" 445 | fi 446 | } 447 | 448 | log=$(docker_save) 449 | expected="saving image to test/repo-1.tar.gz" 450 | assert "equal" "${log}" "${expected}" "saves image to tmpdir/repo-after" 451 | assert "equal" "${FAILURE}" "" "should not have any failures" 452 | 453 | # cleanup() 454 | tag_test "cleanup()" 455 | tmpdir=$(mktemp -d /tmp/ecs-conex-test-XXXXXX) 456 | Message=$(cat ./test/fixtures/message.test.json) 457 | GithubAccessToken=test 458 | status="" 459 | message="" 460 | FAILURE="" 461 | 462 | function docker() { 463 | if [ ${1} == "images" ]; then 464 | assert "equal" "$*" "images -q test:test" "calls images with repo:tag" >&2 465 | echo "12345678" 466 | elif [ ${1} == "rmi" ]; then 467 | assert "equal" "$*" "rmi -f 12345678" "calls rmi with image ID" >&2 468 | else 469 | FAILURE="should call docker images or docker rmi" 470 | fi 471 | } 472 | 473 | function github_status() { 474 | github_status=$1 475 | github_message=$2 476 | } 477 | 478 | false || cleanup 479 | assert "equal" "${github_status}" "failure" 480 | assert "equal" "${github_message}" "ecs-conex failed to build an image" 481 | assert "equal" "${FAILURE}" "" "should not have any failures" 482 | 483 | function docker() { 484 | if [ ${1} == "images" ]; then 485 | assert "equal" "$*" "images -q test:test" "calls images with repo:tag" >&2 486 | echo "" 487 | else [ ${1} == "rmi" ]; 488 | FAILURE="should call docker images only" 489 | fi 490 | } 491 | 492 | true 493 | cleanup 494 | assert "equal" "${github_status}" "success" 495 | assert "equal" "${github_message}" "ecs-conex successfully completed" 496 | assert "equal" "${FAILURE}" "" "should not have any failures" 497 | 498 | if [ -d ${tmpdir} ]; then 499 | FAILURE="directory was not deleted" 500 | rm -rf ${tmpdir} 501 | fi 502 | assert "equal" "${FAILURE}" "" "should not have any failures" 503 | 504 | # summary 505 | summarize 506 | 507 | if [[ ${FAILED} != 0 ]]; then 508 | exit 1 509 | else 510 | exit 0 511 | fi 512 | --------------------------------------------------------------------------------