├── .gitignore ├── acceptance ├── fixtures │ ├── public │ │ └── index.html │ └── src │ │ └── code.txt ├── dockerfiles │ ├── backend │ │ ├── src │ │ │ ├── public │ │ │ │ └── index.html │ │ │ └── server.js │ │ └── Dockerfile │ ├── database │ │ ├── data │ │ │ └── data.json │ │ └── Dockerfile │ ├── config │ │ ├── config │ │ │ └── config.json │ │ └── Dockerfile │ ├── application │ │ └── Dockerfile │ ├── rsync │ │ ├── Dockerfile │ │ └── etc │ │ │ └── rsyncd.conf │ ├── base │ │ └── Dockerfile │ └── build.sh ├── Galleyfile.js ├── Galleyfile.coffee ├── test_commands.coffee └── acceptance_spec.coffee ├── gulpfile.js ├── .travis.yml ├── docs └── images │ └── galley-red.png ├── lib ├── commands │ ├── version.coffee │ ├── list.coffee │ ├── stop_env.coffee │ ├── config.coffee │ ├── pull.coffee │ ├── cleanup.coffee │ ├── help.coffee │ └── run.coffee ├── lib │ ├── promise_utils.coffee │ ├── progress_line.coffee │ ├── console_reporter.coffee │ ├── localhost_forwarder.coffee │ ├── docker_config.coffee │ ├── docker_args.coffee │ ├── stdin_command_interceptor.coffee │ ├── overlay_output_stream.coffee │ ├── docker_utils.coffee │ ├── rsyncer.coffee │ └── service_helpers.coffee ├── bin │ └── watch.coffee └── index.coffee ├── descriptions └── rsync │ ├── Dockerfile │ └── etc │ └── rsyncd.conf ├── config └── build.coffee ├── LICENSE ├── spec ├── promise_utils_spec.coffee ├── util │ └── test_reporter.coffee ├── docker_args_spec.coffee ├── run_spec.coffee └── service_helpers_spec.coffee ├── package.json ├── CONTRIBUTING.md ├── CHANGELOG.md ├── gulpfile.coffee └── README.md /.gitignore: -------------------------------------------------------------------------------- 1 | /node_modules 2 | /build 3 | -------------------------------------------------------------------------------- /acceptance/fixtures/public/index.html: -------------------------------------------------------------------------------- 1 | Hello, Source! 2 | -------------------------------------------------------------------------------- /acceptance/fixtures/src/code.txt: -------------------------------------------------------------------------------- 1 | println 'Hello World!' 2 | -------------------------------------------------------------------------------- /acceptance/dockerfiles/backend/src/public/index.html: -------------------------------------------------------------------------------- 1 | Hello, World! 2 | -------------------------------------------------------------------------------- /acceptance/dockerfiles/database/data/data.json: -------------------------------------------------------------------------------- 1 | { 2 | "data": "ok" 3 | } 4 | -------------------------------------------------------------------------------- /acceptance/dockerfiles/config/config/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "config": "ok" 3 | } 4 | -------------------------------------------------------------------------------- /gulpfile.js: -------------------------------------------------------------------------------- 1 | require('coffee-script/register'); 2 | require('./gulpfile.coffee'); 3 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: node_js 2 | node_js: 3 | - "4.1" 4 | 5 | script: gulp test 6 | -------------------------------------------------------------------------------- /docs/images/galley-red.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/google-fabric/galley/HEAD/docs/images/galley-red.png -------------------------------------------------------------------------------- /acceptance/dockerfiles/config/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM galley-integration-base 2 | 3 | COPY config /config 4 | VOLUME /config 5 | -------------------------------------------------------------------------------- /acceptance/dockerfiles/application/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM galley-integration-base 2 | 3 | CMD ["curl", "http://backend:9615/"] 4 | -------------------------------------------------------------------------------- /acceptance/Galleyfile.js: -------------------------------------------------------------------------------- 1 | // CoffeeScript shim for local testing 2 | require('coffee-script/register'); 3 | module.exports = require('./Galleyfile.coffee'); 4 | -------------------------------------------------------------------------------- /acceptance/dockerfiles/database/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM galley-integration-base 2 | 3 | COPY data /data 4 | 5 | CMD ["/usr/local/bin/http-server", "/data"] 6 | EXPOSE 8080 7 | -------------------------------------------------------------------------------- /acceptance/dockerfiles/backend/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM galley-integration-base 2 | 3 | COPY src /src 4 | 5 | WORKDIR /src 6 | 7 | CMD ["/usr/bin/node", "server.js"] 8 | EXPOSE 9615 9 | -------------------------------------------------------------------------------- /lib/commands/version.coffee: -------------------------------------------------------------------------------- 1 | module.exports = (options, done) -> 2 | cliPackage = require '../../../package' 3 | console.log "galley version #{cliPackage.version}" 4 | 5 | done?() 6 | -------------------------------------------------------------------------------- /descriptions/rsync/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:14.04 2 | 3 | RUN apt-get update && apt-get install -y \ 4 | rsync 5 | 6 | COPY etc /etc 7 | 8 | EXPOSE 873 9 | CMD ["/usr/bin/rsync", "--no-detach", "--daemon"] 10 | -------------------------------------------------------------------------------- /descriptions/rsync/etc/rsyncd.conf: -------------------------------------------------------------------------------- 1 | uid = root 2 | gid = root 3 | use chroot = yes 4 | 5 | log file = /dev/stdout 6 | 7 | [root] 8 | read only = false 9 | path = / 10 | comment = docker volume 11 | -------------------------------------------------------------------------------- /acceptance/dockerfiles/rsync/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM galley-integration-base 2 | 3 | RUN apt-get update && apt-get install -y \ 4 | rsync 5 | 6 | COPY etc /etc 7 | 8 | EXPOSE 873 9 | CMD ["/usr/bin/rsync", "--no-detach", "--daemon"] 10 | -------------------------------------------------------------------------------- /acceptance/dockerfiles/base/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:14.04 2 | 3 | RUN apt-get update && apt-get install -y \ 4 | curl \ 5 | nodejs \ 6 | npm 7 | 8 | RUN ln -s /usr/bin/nodejs /usr/bin/node 9 | RUN npm install -g http-server@0.7.4 10 | -------------------------------------------------------------------------------- /config/build.coffee: -------------------------------------------------------------------------------- 1 | module.exports = 2 | build_dir: 'build' 3 | 4 | src: 'lib' 5 | dest: 'build/lib' 6 | 7 | spec_src: 'spec' 8 | spec_dest: 'build/spec' 9 | 10 | acceptance_src: 'acceptance' 11 | acceptance_dest: 'build/acceptance' 12 | -------------------------------------------------------------------------------- /acceptance/dockerfiles/rsync/etc/rsyncd.conf: -------------------------------------------------------------------------------- 1 | uid = root 2 | gid = root 3 | use chroot = yes 4 | 5 | pid file = /var/run/rsyncd.pid 6 | log file = /dev/stdout 7 | 8 | [root] 9 | read only = false 10 | path = / 11 | comment = docker volume 12 | -------------------------------------------------------------------------------- /acceptance/dockerfiles/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | cd "${BASH_SOURCE%/*}" 6 | 7 | docker build -t galley-integration-base base 8 | 9 | docker build -t galley-integration-backend:original backend 10 | docker build -t galley-integration-application:original application 11 | docker build -t galley-integration-config:original config 12 | docker build -t galley-integration-database:original database 13 | docker build -t galley-integration-rsync:original rsync 14 | -------------------------------------------------------------------------------- /lib/lib/promise_utils.coffee: -------------------------------------------------------------------------------- 1 | RSVP = require 'rsvp' 2 | 3 | # Iterates over a list, applying "cb" to each element in turn. Chains promises such that if a cb 4 | # call returns a promise, the next iteration of the loop won't happen until that promise resolves. 5 | # 6 | # Explicitly being serial here. If you want parallel promise resolution, use RSVP.all. 7 | # 8 | # Returns a promise that resolves when the entire array has resolved. 9 | promiseEach = (list, cb) -> 10 | loopPromise = RSVP.resolve() 11 | for el in list 12 | do (el) -> 13 | loopPromise = loopPromise.then -> cb(el) 14 | loopPromise 15 | 16 | module.exports = { 17 | promiseEach 18 | } 19 | -------------------------------------------------------------------------------- /lib/lib/progress_line.coffee: -------------------------------------------------------------------------------- 1 | spin = require 'term-spinner' 2 | 3 | # Class to wrap state of a string written to an output stream, so that it can be cleared and 4 | # overwritten by a new string. 5 | module.exports = 6 | class ProgressLine 7 | constructor: (@stream, @colorFn = (v) -> v) -> 8 | @spinner = spin.new() 9 | @currentStr = '' 10 | 11 | set: (str) -> 12 | unless @stream.isTTY 13 | return @stream.write str 14 | 15 | @spinner.next() 16 | 17 | @stream.moveCursor(-@currentStr.length, 0) 18 | @currentStr = @currentStr.trim() 19 | 20 | nextStr = if str?[0] is '[' then str else "#{@spinner.current} #{str}" 21 | 22 | if @currentStr.length > nextStr.length 23 | nextStr = nextStr + Array(@currentStr.length - nextStr.length + 1).join ' ' 24 | 25 | @stream.write @colorFn(nextStr) 26 | @currentStr = nextStr 27 | 28 | clear: -> 29 | return unless @stream.isTTY 30 | 31 | @set '' 32 | @stream.moveCursor(-@currentStr.length - 1, 0) 33 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2015 Twitter, Inc 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /acceptance/Galleyfile.coffee: -------------------------------------------------------------------------------- 1 | # You can test this Galleyfile locally w/ galley commands! There's a bit of a trick to getting 2 | # galley to resolve correctly. 3 | # 4 | # In the directory above this, run: 5 | # $ npm link; npm link galley 6 | # 7 | # This will make it so that galley-cli run in this directory will recur up to find galley's own 8 | # package.json, and from there be able to resolve 'galley'. 9 | 10 | module.exports = 11 | CONFIG: 12 | rsync: 13 | image: 'galley-integration-rsync' 14 | module: 'root' 15 | suffix: 'galley-integration-rsync' 16 | 17 | ADDONS: 18 | 'backend-addon': 19 | 'application': 20 | links: 21 | 'galley-integration': ['backend'] 22 | 23 | 'application': 24 | image: 'galley-integration-application' 25 | source: '/src' 26 | 27 | 'backend': 28 | image: 'galley-integration-backend' 29 | links: 30 | 'galley-integration': ['database:db'] 31 | source: '/src/public' 32 | volumesFrom: ['config'] 33 | 34 | 'config': 35 | image: 'galley-integration-config' 36 | 37 | 'database': 38 | image: 'galley-integration-database' 39 | stateful: true 40 | -------------------------------------------------------------------------------- /lib/commands/list.coffee: -------------------------------------------------------------------------------- 1 | _ = require 'lodash' 2 | chalk = require 'chalk' 3 | RSVP = require 'rsvp' 4 | 5 | help = require './help' 6 | 7 | ServiceHelpers = require '../lib/service_helpers' 8 | 9 | listServices = (galleyfilePath, out, serviceEnvMap, serviceAddonMap) -> 10 | out.write "#{ chalk.bold 'Galleyfile:' } #{galleyfilePath}\n" 11 | 12 | alphabetizedKeys = _.keys(serviceEnvMap) 13 | alphabetizedKeys.sort() 14 | 15 | for key in alphabetizedKeys 16 | out.write ' ' + chalk.blue key 17 | envs = (".#{env}" for env in serviceEnvMap[key]) 18 | if envs.length > 0 19 | out.write chalk.gray(" [#{ envs.join(', ') }]") 20 | 21 | addons = serviceAddonMap[key] or [] 22 | if addons.length > 0 23 | out.write chalk.green(" -a #{ addons.join(' ')}") 24 | out.write '\n' 25 | 26 | RSVP.resolve() 27 | 28 | module.exports = (args, commandOptions, done) -> 29 | serviceEnvMap = ServiceHelpers.envsByService commandOptions.config 30 | serviceAddonMap = ServiceHelpers.addonsByService commandOptions.config 31 | 32 | listServices commandOptions.configPath, (commandOptions.stdout or process.stdout), serviceEnvMap, serviceAddonMap 33 | .then -> done?() 34 | .catch (e) -> 35 | console.error e?.stack or 'Aborting. ' 36 | process.exit -1 37 | -------------------------------------------------------------------------------- /lib/lib/console_reporter.coffee: -------------------------------------------------------------------------------- 1 | chalk = require 'chalk' 2 | 3 | ProgressLine = require './progress_line' 4 | 5 | class ConsoleReporter 6 | constructor: (@stream) -> 7 | @inLine = false 8 | 9 | maybeSpace: -> 10 | if @inLine 11 | @stream.write ' ' 12 | 13 | startService: (serviceName) -> 14 | @stream.write chalk.blue(serviceName + ':') 15 | @inLine = true 16 | @ 17 | 18 | startTask: (job) -> 19 | @maybeSpace() 20 | @stream.write chalk.gray(job + '…') 21 | @inLine = true 22 | @ 23 | 24 | startProgress: (msg) -> 25 | @maybeSpace() 26 | @stream.write msg + ' ' if msg 27 | @inLine = true 28 | new ProgressLine @stream, chalk.gray 29 | 30 | succeedTask: (msg = 'done!') -> 31 | @maybeSpace() 32 | @stream.write chalk.green(msg) 33 | @ 34 | 35 | completeTask: (msg) -> 36 | @maybeSpace() 37 | @stream.write chalk.cyan(msg) 38 | @ 39 | 40 | finish: -> 41 | if @inLine 42 | @stream.write '\n' 43 | @inLine = false 44 | @ 45 | 46 | error: (err) -> 47 | @maybeSpace() 48 | @stream.write chalk.red(err) + '\n' 49 | @inLine = false 50 | @ 51 | 52 | message: (msg = '') -> 53 | @stream.write msg + '\n' 54 | @inLine = false 55 | @ 56 | 57 | module.exports = ConsoleReporter 58 | -------------------------------------------------------------------------------- /acceptance/dockerfiles/backend/src/server.js: -------------------------------------------------------------------------------- 1 | var http = require('http'); 2 | var fs = require('fs'); 3 | 4 | // Server that returns contents of a local file (public/index.html), the 5 | // contents of a "config" file from /config/config.json (mapped in by 6 | // "volumesFrom"), and the output of another HTTP request to the "database" 7 | // container. 8 | 9 | http.createServer(function (req, res) { 10 | http.get({host: 'db', port: '8080', path: '/data.json'}, function(dbres) { 11 | dbres.on('data', function(d) { 12 | try { 13 | var index = fs.readFileSync('public/index.html').toString(); 14 | var config = fs.readFileSync('/config/config.json').toString(); 15 | 16 | json = { 17 | index: index, 18 | config: JSON.parse(config), 19 | database: JSON.parse(d.toString()), 20 | } 21 | 22 | res.writeHead(200, {'Content-Type': 'application/json'}); 23 | res.end(JSON.stringify(json, null, 2)); 24 | } catch(e) { 25 | res.writeHead(500, {'Content-Type': 'application/json'}); 26 | res.end(JSON.stringify({error: e.toString()})) 27 | } 28 | }); 29 | }).on('error', function(e) { 30 | res.writeHead(500, {'Content-Type': 'application/json'}); 31 | res.end(JSON.stringify({error: e.toString()})) 32 | }); 33 | }).listen(9615); 34 | -------------------------------------------------------------------------------- /spec/promise_utils_spec.coffee: -------------------------------------------------------------------------------- 1 | expect = require 'expect' 2 | _ = require 'lodash' 3 | RSVP = require 'rsvp' 4 | 5 | PromiseUtils = require '../lib/lib/promise_utils' 6 | 7 | describe 'PromiseUtils', -> 8 | describe 'promiseEach', -> 9 | it 'resolves after each promise resolves', -> 10 | handledValues = {} 11 | values = ['a', 'b', 'c'] 12 | 13 | PromiseUtils.promiseEach values, (val) -> 14 | new RSVP.Promise (resolve, reject) -> 15 | process.nextTick -> 16 | handledValues[val] = true 17 | resolve() 18 | .then -> 19 | expect(handledValues).toEqual 20 | 'a': true 21 | 'b': true 22 | 'c': true 23 | 24 | it 'rejects if a value rejects', -> 25 | handledValues = {} 26 | values = ['a', 'b', 'c'] 27 | 28 | succeeded = false 29 | 30 | PromiseUtils.promiseEach values, (val) -> 31 | new RSVP.Promise (resolve, reject) -> 32 | process.nextTick -> 33 | return reject('expected') if val == 'b' 34 | 35 | handledValues[val] = true 36 | resolve() 37 | .then -> 38 | succeeded = true 39 | .catch (err) -> 40 | throw err unless err == 'expected' 41 | .then -> 42 | expect(succeeded).toBe false 43 | expect(handledValues).toEqual 44 | 'a': true 45 | -------------------------------------------------------------------------------- /spec/util/test_reporter.coffee: -------------------------------------------------------------------------------- 1 | chalk = require 'chalk' 2 | 3 | # Test double for the "ConsoleReporter" class that keeps track of what 4 | # "tasks" were called for each service, in order. Makes it easy for the 5 | # acceptance tests to determine what Galley did when starting up a 6 | # service. 7 | class TestReporter 8 | constructor: -> 9 | @services = {} 10 | @currentService = null 11 | 12 | startService: (serviceName) -> 13 | @currentService = serviceName 14 | @services[@currentService] = [] 15 | @ 16 | 17 | startTask: (job) -> 18 | @lastTask = job 19 | @ 20 | 21 | startProgress: -> 22 | { 23 | set: -> 24 | clear: -> 25 | } 26 | 27 | succeedTask: (msg = 'done!') -> 28 | @services[@currentService].push @lastTask 29 | @lastTask = null 30 | @ 31 | 32 | completeTask: (msg) -> 33 | if @lastTask 34 | @services[@currentService].push @lastTask 35 | else 36 | @services[@currentService].push msg 37 | 38 | @lastTask = null 39 | @ 40 | 41 | finish: -> 42 | if @lastTask 43 | @services[@currentService].push @lastTask 44 | 45 | @currentService = null 46 | @lastTask = null 47 | @ 48 | 49 | error: (err) -> 50 | @currentService = null 51 | @lastTask = null 52 | @lastError = err 53 | @ 54 | 55 | message: (msg) -> 56 | @currentService = null 57 | @lastTask = null 58 | @ 59 | 60 | module.exports = TestReporter 61 | -------------------------------------------------------------------------------- /lib/lib/localhost_forwarder.coffee: -------------------------------------------------------------------------------- 1 | tcpProxy = require 'tcp-proxy' 2 | 3 | # Class to run TCP proxies from the localhost in to a boot2docker VM. Useful for when you need 4 | # devices outside of the host machine to connect in to a container. 5 | class LocalhostForwarder 6 | constructor: (modem, reporter) -> 7 | @modem = modem 8 | @reporter = reporter 9 | 10 | # Returns either a receipt object with a "stop" method, or null if forwarding is 11 | # unnecessary. 12 | # 13 | # ports: array of port numbers that are expected to be exposed on the Docker host, 14 | # which we will expose on the Galley host. 15 | forward: (ports) -> 16 | # If we're connecting to Docker via socket, assume that containers are running on the same host 17 | # as Galley, so there's no need to forward. 18 | return if @modem.socketPath? 19 | 20 | servers = for port in ports 21 | server = tcpProxy.createServer 22 | target: 23 | host: @modem.host 24 | port: port 25 | 26 | server.listen port 27 | 28 | # "do" shenanigans since "port" mutates inside the loop. 29 | server.on 'error', do (port) => 30 | (err) => @reporter.error "Failure proxying to #{@modem.host}:#{port}: #{err}" 31 | 32 | server 33 | 34 | new LocalhostForwarderReceipt(servers) 35 | 36 | class LocalhostForwarderReceipt 37 | constructor: (servers) -> 38 | @servers = servers 39 | 40 | stop: -> 41 | server.close() for server in @servers 42 | @servers = [] 43 | 44 | module.exports = LocalhostForwarder 45 | -------------------------------------------------------------------------------- /lib/commands/stop_env.coffee: -------------------------------------------------------------------------------- 1 | _ = require 'lodash' 2 | ConsoleReporter = require '../lib/console_reporter' 3 | Docker = require 'dockerode' 4 | RSVP = require 'rsvp' 5 | help = require './help' 6 | 7 | stopContainer = (name, container, reporter) -> 8 | new RSVP.Promise (resolve, reject) -> 9 | reporter.startService(name).startTask('Stopping') 10 | 11 | container.stop (err, data) -> 12 | if err and err.statusCode isnt 304 13 | reporter.error err.json or "Error #{err.statusCode} stopping container" 14 | else 15 | reporter.succeedTask().finish() 16 | 17 | resolve() 18 | 19 | module.exports = (args, options, done) -> 20 | docker = new Docker() 21 | 22 | if args.length is 0 23 | return help args, options, done 24 | 25 | envRegExps = [] 26 | for env in args 27 | envRegExps.push new RegExp("\.#{env}$") 28 | 29 | reporter = options.reporter or new ConsoleReporter(process.stderr) 30 | 31 | docker.listContainers (err, containerInfos) -> 32 | throw err if err 33 | 34 | promise = RSVP.resolve() 35 | found = false 36 | 37 | for containerInfo in containerInfos 38 | id = containerInfo.Id 39 | 40 | for name in containerInfo.Names 41 | for envRegExp in envRegExps when name.match(envRegExp) 42 | found = true 43 | 44 | do (id, name) -> 45 | promise = promise.then -> 46 | stopContainer name, docker.getContainer(id), reporter 47 | 48 | if found 49 | promise.then done 50 | else 51 | reporter.message 'No containers found to stop' 52 | done?() 53 | -------------------------------------------------------------------------------- /package.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "galley", 3 | "version": "1.2.6", 4 | "description": "Galley is a Docker container orchestator for development and testing.", 5 | "main": "build/lib/index.js", 6 | "scripts": { 7 | "prepublish": "gulp build", 8 | "test": "gulp test acceptance" 9 | }, 10 | "author": "Twitter, Inc.", 11 | "license": "MIT", 12 | "repository": "twitter-fabric/galley", 13 | "bugs": { 14 | "url": "https://github.com/twitter-fabric/galley/issues", 15 | "email": "galley-discuss@googlegroups.com" 16 | }, 17 | "files": [ 18 | "build/lib", 19 | "LICENSE", 20 | "README.md" 21 | ], 22 | "engines": { 23 | "node": ">= 5.0.0" 24 | }, 25 | "devDependencies": { 26 | "coffee-script": "^1.10.0", 27 | "del": "^2.2.1", 28 | "expect": "^1.3.0", 29 | "gulp": "^3.8.10", 30 | "gulp-changed": "^1.0.0", 31 | "gulp-coffee": "^2.3.2", 32 | "gulp-load-plugins": "^1.2.4", 33 | "gulp-mocha": "^2.0.0", 34 | "gulp-notify": "^2.0.1", 35 | "gulp-plumber": "^1.1.0", 36 | "gulp-shell": "^0.5.2", 37 | "run-sequence": "^1.2.1", 38 | "semver": "^5.1.1", 39 | "stream-buffers": "^3.0.0" 40 | }, 41 | "optionalDependencies": { 42 | "fsevents": "^1.0.12" 43 | }, 44 | "dependencies": { 45 | "chalk": "^1.1.3", 46 | "charm": "git://github.com/crashlytics/node-charm", 47 | "chokidar": "^1.6.0", 48 | "dockerode": "^2.0.4", 49 | "home-dir": "^1.0.0", 50 | "is-running": "^2.0.0", 51 | "lodash": "^3.0.0", 52 | "minimist": "^1.1.0", 53 | "rsvp": "^3.0.16", 54 | "rsync": "^0.4.0", 55 | "tcp-proxy": "0.0.1", 56 | "term-spinner": "^1.0.0" 57 | } 58 | } 59 | -------------------------------------------------------------------------------- /lib/bin/watch.coffee: -------------------------------------------------------------------------------- 1 | _ = require 'lodash' 2 | chokidar = require 'chokidar' 3 | fs = require 'fs' 4 | path = require 'path' 5 | 6 | # Separate binary to watch for changes and send a 'change' message back to the parent when they 7 | # happen. We isolate this into its own process because the fsevents code has a habit of segfaulting 8 | # when removing lots of files at once. 9 | 10 | DEBOUNCE_INTERVAL_MS = 50 11 | 12 | notifyTimeout = null 13 | 14 | watchHandler = (path) -> 15 | clearTimeout(notifyTimeout) if notifyTimeout 16 | notifyTimeout = setTimeout(notifyHandler, DEBOUNCE_INTERVAL_MS) 17 | 18 | notifyHandler = -> 19 | notifyTimeout = null 20 | 21 | # If the parent process has since died (it was killed in a way that prevented it from cleanly 22 | # killing us) then this call will raise a "channel closed" exception and terminate us, which is 23 | # perfectly fine. 24 | process.send 'change' 25 | 26 | source = process.argv[2] 27 | 28 | # If a .dockerignore exists, respect its ignored files including the default ignored 29 | ignoredFilesList = ['.DS_Store', '.git'] 30 | dockerignorePath = path.resolve(source, '.dockerignore') 31 | if fs.existsSync(dockerignorePath) 32 | dockerignoreLines = fs.readFileSync(dockerignorePath).toString('utf8').split('\n') 33 | ignoredFilesList = ignoredFilesList.concat(_.compact(dockerignoreLines)) 34 | 35 | ignoredFilesList = _.map(ignoredFilesList, (line) -> 36 | line.replace(/\./g, '\\.') 37 | ) 38 | ignoredFilesRegex = new RegExp(ignoredFilesList.join('|')) 39 | 40 | watcher = chokidar.watch source, 41 | ignored: ignoredFilesRegex 42 | ignoreInitial: true 43 | .on 'add', watchHandler 44 | .on 'addDir', watchHandler 45 | .on 'change', watchHandler 46 | .on 'unlink', watchHandler 47 | .on 'unlinkDir', watchHandler 48 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | We love pull requests from everyone. By participating in this project, you 4 | agree to abide by the Twitter Open Source [code of conduct]. 5 | 6 | [code of conduct]: https://engineering.twitter.com/opensource/code-of-conduct 7 | 8 | Fork, then clone the repo: 9 | 10 | git clone git@github.com:your-username/galley.git 11 | 12 | Make sure your machine is set up for galley, the getting started guide in the [README] 13 | 14 | Test your local galley changes 15 | 16 | gulp watch # compiles and watches your local galley 17 | npm link # from the galley repo, symlinks your local version of galley to be globally installed 18 | cd ../directory-with-galleyfile 19 | npm link galley # symlinks from your local node modules to your global galley 20 | # Check that your local galley is running 21 | 22 | Make sure the tests pass: 23 | 24 | gulp compile # (if you're not running gulp watch) 25 | gulp test 26 | gulp acceptance 27 | 28 | Make your change. Add tests for your change. Make the tests pass: 29 | 30 | gulp compile # (if you're not running gulp watch) 31 | gulp test 32 | gulp acceptance 33 | 34 | Push to your fork and [submit a pull request][pr]. 35 | 36 | [pr]: https://github.com/twitter-fabric/galley/compare 37 | 38 | At this point you're waiting on us. We like to at least comment on pull requests 39 | within three business days (and, typically, one business day). We may suggest 40 | some changes or improvements or alternatives. 41 | 42 | Some things that will increase the chance that your pull request is accepted: 43 | 44 | * Write tests. 45 | * Follow our [style guide][style]. 46 | * Write a [good commit message][commit]. 47 | 48 | [style]: https://github.com/polarmobile/coffeescript-style-guide 49 | [commit]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html 50 | -------------------------------------------------------------------------------- /lib/lib/docker_config.coffee: -------------------------------------------------------------------------------- 1 | fs = require 'fs' 2 | path = require 'path' 3 | url = require 'url' 4 | _ = require 'lodash' 5 | homeDir = require 'home-dir' 6 | 7 | # Used to look up auth credentials when we know we're going to the official 8 | # DockerHub registry. 9 | DEFAULT_DOCKERHUB_HOST = 'https://index.docker.io/v1/' 10 | 11 | module.exports = 12 | # Checks the user's docker config file for login information. The file is a JSON hash, and there are two 13 | # versions, in two different locations. 14 | # 15 | # The old version in ~/.dockercfg (pre-docker 1.7) is keyed by registry host name. 16 | # { 17 | # "docker.crash.io": { 18 | # "auth": "base64user:pass", 19 | # "email": "email@email" 20 | # } 21 | # } 22 | # 23 | # The new version in ~/.docker/config.json 24 | # { 25 | # "auths": { 26 | # "docker.crash.io": { 27 | # "auth": "base64user:pass", 28 | # "email": "email@email" 29 | # }, 30 | # "https://index.docker.io/v1/": { 31 | # "auth": "base64user:pass", 32 | # "email": "email@email" 33 | # } 34 | # } 35 | # } 36 | authConfig: (host = DEFAULT_DOCKERHUB_HOST) -> 37 | hostConfig = try 38 | dockerOneSevenConfig = path.resolve homeDir(), '.docker/config.json' 39 | config = if fs.existsSync(dockerOneSevenConfig) 40 | configFile = fs.readFileSync dockerOneSevenConfig 41 | config = JSON.parse configFile.toString() 42 | config['auths'] 43 | else 44 | configFile = fs.readFileSync path.resolve(homeDir(), '.dockercfg') 45 | JSON.parse configFile.toString() 46 | 47 | config[host] 48 | catch e 49 | # If file doesn't exist don't explode, just don't have auth 50 | throw e unless e?.code is 'ENOENT' 51 | 52 | if hostConfig? 53 | authBuffer = new Buffer hostConfig.auth, 'base64' 54 | [username, password] = authBuffer.toString().split ':' 55 | 56 | username: username 57 | password: password 58 | serveraddress: host 59 | -------------------------------------------------------------------------------- /lib/commands/config.coffee: -------------------------------------------------------------------------------- 1 | path = require 'path' 2 | fs = require 'fs' 3 | homeDir = require 'home-dir' 4 | minimist = require 'minimist' 5 | RSVP = require 'rsvp' 6 | chalk = require 'chalk' 7 | _ = require 'lodash' 8 | help = require './help' 9 | 10 | 11 | newConfigHashItem = (option, value) -> 12 | optionsHash = {} 13 | switch option 14 | when 'configDir' 15 | exists = fs.existsSync path.resolve(value) 16 | if !exists 17 | process.stdout.write chalk.yellow "Warning: " 18 | process.stdout.write "#{value} does not exist\n" 19 | optionsHash[option] = value 20 | else 21 | # JSON parse gives us "true" -> true 22 | optionsHash[option] = JSON.parse(value) 23 | optionsHash 24 | 25 | setConfigOption = (option, value) -> 26 | new RSVP.Promise (resolve, reject) -> 27 | galleycfgPath = path.resolve(homeDir(), '.galleycfg') 28 | existingGalleycfgHash = {} 29 | exists = fs.existsSync galleycfgPath 30 | if exists 31 | process.stdout.write 'Updating ~/.galleycfg\n' 32 | galleycfg = fs.readFileSync galleycfgPath 33 | existingGalleycfgHash = JSON.parse galleycfg.toString() 34 | else 35 | process.stdout.write 'Creating ~/.galleycfg\n' 36 | 37 | galleycfgHash = _.merge(existingGalleycfgHash, newConfigHashItem option, value) 38 | 39 | fs.writeFile galleycfgPath, JSON.stringify(galleycfgHash, false, 2), (err) -> 40 | reject err if err 41 | resolve() 42 | 43 | module.exports = (args, options, done) -> 44 | argv = minimist args, 45 | boolean: [ 46 | 'help' 47 | ] 48 | 49 | if argv._.length isnt 3 or argv.help 50 | return help args, options, done 51 | 52 | configPromise = RSVP.resolve() 53 | if argv['_'][0] == 'set' 54 | option = argv['_'][1] 55 | value = argv['_'][2] 56 | configPromise = configPromise.then -> 57 | setConfigOption(option, value) 58 | 59 | configPromise 60 | .then -> 61 | process.stdout.write chalk.green 'done!\n' 62 | done?() 63 | .catch (err) -> 64 | process.stdout.write chalk.red err 65 | process.stdout.write chalk.red err.stack 66 | process.stdout.write chalk.red '\nAborting.\n' 67 | process.exit 1 68 | 69 | -------------------------------------------------------------------------------- /lib/index.coffee: -------------------------------------------------------------------------------- 1 | path = require 'path' 2 | fs = require 'fs' 3 | _ = require 'lodash' 4 | chalk = require 'chalk' 5 | homeDir = require 'home-dir' 6 | minimist = require 'minimist' 7 | 8 | commands = 9 | pull: require './commands/pull' 10 | 'stop-env': require './commands/stop_env' 11 | cleanup: require './commands/cleanup' 12 | run: require './commands/run' 13 | list: require './commands/list' 14 | 15 | help: require './commands/help' 16 | version: require './commands/version' 17 | config: require './commands/config' 18 | 19 | loadGlobalOptionsSync = -> 20 | globalConfigPath = path.resolve(homeDir(), '.galleycfg') 21 | if fs.existsSync(globalConfigPath) 22 | JSON.parse fs.readFileSync(globalConfigPath, { encoding: 'utf-8' }) 23 | else 24 | {} 25 | 26 | printHelp = (prefix) -> 27 | commands.help [], prefix: prefix 28 | 29 | runCommand = (prefix, args, commands, opts) -> 30 | argv = minimist args, 31 | boolean: ['help'] 32 | 33 | if argv['help'] 34 | printHelp argv._ 35 | 36 | else unless args.length 37 | printHelp [] 38 | process.exit 1 39 | 40 | else if (command = commands[args[0]])? 41 | try 42 | commandOpts = _.merge {}, opts, 43 | prefix: [args[0]] 44 | 45 | command args.slice(1), commandOpts, (statusCode = 0) -> process.exit statusCode 46 | catch err 47 | if typeof err is 'string' 48 | process.stdout.write chalk.red err 49 | else 50 | process.stdout.write err?.stack 51 | 52 | process.stdout.write chalk.red '\nAborting\n' 53 | process.exit -1 54 | 55 | else 56 | console.log "Error: Command not found: #{args[0]}" 57 | printHelp [] 58 | process.exit 1 59 | 60 | run = (galleyfilePath, argv) -> 61 | # Convert SIGTERM and SIGINT directly into exits so that we can listen for 'exit' events to shut 62 | # down our child watcher process. 63 | sigHandler = -> process.exit(0) 64 | process.once 'SIGTERM', sigHandler 65 | process.once 'SIGINT', sigHandler 66 | 67 | opts = 68 | config: require galleyfilePath 69 | configPath: galleyfilePath 70 | globalOptions: loadGlobalOptionsSync() 71 | 72 | args = process.argv.slice 2 73 | runCommand [], args, commands, opts 74 | 75 | module.exports = run 76 | -------------------------------------------------------------------------------- /spec/docker_args_spec.coffee: -------------------------------------------------------------------------------- 1 | expect = require 'expect' 2 | _ = require 'lodash' 3 | 4 | DockerArgs = require '../lib/lib/docker_args' 5 | 6 | # Small regression-like tests to make sure we retain the right formatting. 7 | 8 | describe 'DockerArgs', -> 9 | describe 'formatEnvVariables', -> 10 | it 'should be a list for Docker', -> 11 | envVars = 12 | 'VAR1': 'value1' 13 | 'VAR2': 'value2' 14 | 15 | expect(DockerArgs.formatEnvVariables(envVars)).toEqual ['VAR1=value1', 'VAR2=value2'] 16 | 17 | it 'excludes nulls but includes empty strings', -> 18 | envVars = 19 | 'VAR1': '' 20 | 'VAR2': null 21 | 22 | expect(DockerArgs.formatEnvVariables(envVars)).toEqual ['VAR1='] 23 | 24 | describe 'formatLinks', -> 25 | it 'should format links, looking up container names', -> 26 | links = ['mongo', 'project-service-mysql:mysql'] 27 | containerNames = 28 | 'mongo': 'mongo.dev' 29 | 'project-service-mysql': 'thundering_tesla' 30 | 31 | expect(DockerArgs.formatLinks(links, containerNames)).toEqual [ 32 | 'mongo.dev:mongo' 33 | 'thundering_tesla:mysql' 34 | ] 35 | 36 | describe 'formatPortBindings', -> 37 | it 'should return portBindings and exposedPorts', -> 38 | ports = ['3200:3000', '8506', '5555:4444/udp', '7777/udp'] 39 | expect(DockerArgs.formatPortBindings(ports)).toEqual 40 | portBindings: {'3000/tcp': [{'HostPort': '3200'}], '8506/tcp': [{'HostPort': null}], '4444/udp': [{'HostPort': '5555'}], '7777/udp': [{'HostPort': null}]} 41 | exposedPorts: {'3000/tcp': {}, '8506/tcp': {}, '4444/udp': {}, '7777/udp': {}} 42 | 43 | describe 'formatVolumes', -> 44 | it 'should return volumes', -> 45 | volumes = ['/kittens', '/etc/puppies'] 46 | expect(DockerArgs.formatVolumes(volumes)).toEqual 47 | '/kittens': {} 48 | '/etc/puppies': {} 49 | 50 | describe 'formatVolumesFrom', -> 51 | it 'looks up some services but not others', -> 52 | volumesFrom = ['srv-config', 'www.rsync'] 53 | containerNames = 54 | 'srv-config': 'srv-config.dev' 55 | 56 | expect(DockerArgs.formatVolumesFrom(volumesFrom, containerNames)).toEqual [ 57 | 'srv-config.dev' 58 | 'www.rsync' 59 | ] 60 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | ### Next Release 2 | #### Bug fixes 3 | #### Features 4 | 5 | ### v1.2.6 (2020-04-27): 6 | - Fix a bug that improperly truncated command line args containing `=` 7 | 8 | ### v1.2.5 (2019-03-18): 9 | #### Bug fixes 10 | - Fix a bug that was closing the `process.stderr` stream inappropriately 11 | 12 | ### v1.2.4 (2018-07-03): 13 | #### Bug fixes 14 | - Fix a bug that prevented passing a valid uid with the `--user` cli flag 15 | 16 | ### v1.2.3 (2016-12-05): 17 | #### Features 18 | - Fix edge case bug around rsync execution race condition on startup 19 | 20 | ### v1.2.2 (2016-11-07): 21 | #### Features 22 | - Fix additional errors with watch logic 23 | 24 | ### v1.2.1 (2016-11-07): 25 | #### Features 26 | - Fix a bug with the new watch ignore logic not creating a proper regex. 27 | 28 | ### v1.2.0 (2016-11-03): 29 | #### Features 30 | - Files in `.dockerignore` will be ignored by the rsync container. 31 | This should improve performance in projects with many files that would be ignored (e.g. `node_modules`). 32 | 33 | ### v1.1.2 (2016-09-23): 34 | #### Bug fixes 35 | - Fix a bug that prevented some errors from getting logged. 36 | 37 | ### v1.1.1 (2016-06-27): 38 | #### Bug fixes 39 | - Fix a bug that prevented starting services that aliased links. 40 | 41 | ### v1.1.0 (2016-06-24): 42 | #### Bug fixes 43 | - Fix recreation logic for missing links on Docker >= 1.10 (#40) 44 | 45 | ### v1.0.3 (2016-05-12): 46 | #### Bug fixes 47 | - Updates dependencies for Node 6 compatibility 48 | - Fixes crash when pulling containers with Docker 1.11 49 | 50 | ### v1.0.2 (2016-01-27): 51 | #### Features 52 | - "/udp" can now be used when specifying port mappings 53 | 54 | #### Bug fixes 55 | - Deleting the primary service container at the end of a run now removes its 56 | volumes as well 57 | 58 | ### v1.0.1 (2016-01-14): 59 | #### Features 60 | - Using a custom command reports the auto-generated container name 61 | - The `--as-service` flag on a custom command will maintain the service’s 62 | default container name and port bindings 63 | - `stop-env` can now take any number of environments 64 | 65 | #### Bug fixes 66 | - Galleyfile port bindings now work regardless of what ports are `EXPOSE`d in 67 | the Dockerfile 68 | - DockerHub credentials are now used for pulls to the default Docker registry 69 | 70 | ### v1.0.0 (2015-10-20): 71 | 72 | Initial release! 73 | -------------------------------------------------------------------------------- /lib/lib/docker_args.coffee: -------------------------------------------------------------------------------- 1 | # Helpers for converting our config format into arguments to pass to Docker's API 2 | 3 | _ = require 'lodash' 4 | 5 | # Formats a hash of env variable name to value to the array of VAR=VALUE strings Docker expects 6 | formatEnvVariables = (envVars) -> 7 | out = [] 8 | for name, value of envVars 9 | out.push "#{name}=#{value}" if value? 10 | out 11 | 12 | # given links for a single service from the config file 13 | # and a map of service -> container name 14 | # generate the Link option for the Docker API 15 | formatLinks = (links, containerNameMap) -> 16 | for link in links 17 | service = link.split(':')[0] 18 | alias = link.split(':')[1] or service 19 | containerName = containerNameMap[service] 20 | "#{containerName}:#{alias}" 21 | 22 | # Given a list of ports in one of the following forms: 23 | # ":", "", ":/", "/", 24 | # returns a hash of portBindings and exposedPorts. We always expose every port specified so we 25 | # can map ports regardless of EXPOSE commands in the Dockerfile. 26 | # 27 | # These go into the HostConfig.Ports and ExposedPorts keys for container creation, respectively. 28 | formatPortBindings = (ports) -> 29 | portBindings = {} 30 | exposedPorts = {} 31 | 32 | for port in ports 33 | [dst, src, protocol] = port.match(/(?:(\d+)?:)?(\d+)\/?(tcp|udp)?/).slice(1) 34 | protocol ||= 'tcp' 35 | dst ||= null 36 | 37 | # If dst is null then Docker will allocate an unused port 38 | portBindings["#{src}/#{protocol}"] = [{'HostPort': dst}] 39 | exposedPorts["#{src}/#{protocol}"] = {} 40 | 41 | {portBindings, exposedPorts} 42 | 43 | # Formats the container option for exported volumes. The format for the create endpoint is a hash of: 44 | # : {} 45 | formatVolumes = (volumes) -> 46 | _.zipObject _.map volumes, (volume) -> [volume, {}] 47 | 48 | # Formats the VolumesFrom parameter, which doesn't need much formatting. It looks up service names 49 | # in the containerNameMap, but if a name is missing it assumes it's a container name. This 50 | # assumption is necessary for passing in the rsync source container name without faking it as a 51 | # service in containerNameMap. 52 | formatVolumesFrom = (volumesFrom, containerNameMap) -> 53 | for name in volumesFrom 54 | containerNameMap[name] or name 55 | 56 | module.exports = { 57 | formatEnvVariables 58 | formatLinks 59 | formatPortBindings 60 | formatVolumes 61 | formatVolumesFrom 62 | } 63 | -------------------------------------------------------------------------------- /lib/commands/pull.coffee: -------------------------------------------------------------------------------- 1 | _ = require 'lodash' 2 | chalk = require 'chalk' 3 | Docker = require 'dockerode' 4 | RSVP = require 'rsvp' 5 | minimist = require 'minimist' 6 | 7 | help = require './help' 8 | 9 | ProgressLine = require '../lib/progress_line' 10 | DockerConfig = require '../lib/docker_config' 11 | DockerUtils = require '../lib/docker_utils' 12 | ServiceHelpers = require '../lib/service_helpers' 13 | 14 | parseArgs = (args) -> 15 | argv = minimist args, 16 | alias: 17 | 'add': 'a' 18 | 19 | [service, envArr...] = (argv._[0] or '').split '.' 20 | env = envArr.join '.' 21 | 22 | options = {} 23 | 24 | _.merge options, _.pick argv, [ 25 | 'add' 26 | ] 27 | 28 | options.add = ServiceHelpers.normalizeMultiArgs options.add 29 | 30 | {service, env, options} 31 | 32 | pullService = (docker, servicesConfig, service, env) -> 33 | prereqsArray = ServiceHelpers.generatePrereqServices(service, servicesConfig) 34 | 35 | prereqPromise = RSVP.resolve() 36 | _.forEach prereqsArray, (prereq) -> 37 | imageName = servicesConfig[prereq].image 38 | progressLine = new ProgressLine process.stderr, chalk.gray 39 | 40 | prereqPromise = prereqPromise 41 | .then -> 42 | process.stderr.write chalk.blue(prereq + ':') 43 | process.stderr.write chalk.gray(' Pulling… ') 44 | 45 | DockerUtils.downloadImage(docker, imageName, DockerConfig.authConfig, progressLine.set.bind(progressLine)) 46 | .finally -> 47 | progressLine.clear() 48 | .then -> 49 | process.stderr.write chalk.green(' done!') 50 | .catch (err) -> 51 | if err?.statusCode is 404 52 | throw "Image #{imageName} not found in registry" 53 | else 54 | throw err 55 | .catch (err) -> 56 | if err? and err isnt '' and typeof err is 'string' or err.json? 57 | process.stderr.write chalk.red(' ' + chalk.bold('Error:') + ' ' + (err?.json or err).trim()) 58 | throw '' 59 | else 60 | throw err 61 | .finally -> 62 | process.stderr.write '\n' 63 | 64 | prereqPromise 65 | 66 | module.exports = (args, commandOptions, done) -> 67 | {service, env, options} = parseArgs(args) 68 | 69 | unless service? and not _.isEmpty(service) 70 | return help args, commandOptions, done 71 | 72 | {servicesConfig} = ServiceHelpers.processConfig commandOptions.config, env, options.add 73 | docker = new Docker() 74 | 75 | pullService docker, servicesConfig, service, env 76 | .then -> done?() 77 | .catch (e) -> 78 | console.error e?.stack or 'Aborting. ' 79 | process.exit -1 80 | 81 | -------------------------------------------------------------------------------- /spec/run_spec.coffee: -------------------------------------------------------------------------------- 1 | expect = require 'expect' 2 | _ = require 'lodash' 3 | 4 | Run = require '../lib/commands/run' 5 | 6 | describe 'parseArgs', -> 7 | describe 'addons option support', -> 8 | describe 'with a single value', -> 9 | TEST_ARGS = '--configDir acceptance -a database --entrypoint ls application.foo'.split(' ') 10 | 11 | it 'should generate addon options with an array with one value', -> 12 | addons = Run.parseArgs(TEST_ARGS).options.add 13 | expect(addons).toEqual(['database']) 14 | 15 | describe 'with a multiple values through multiple params', -> 16 | TEST_ARGS = '--configDir acceptance -a database -a config --entrypoint ls application.foo'.split(' ') 17 | 18 | it 'should generate addon options with an array with multiple values', -> 19 | addons = Run.parseArgs(TEST_ARGS).options.add 20 | expect(addons).toEqual(['database', 'config']) 21 | 22 | describe 'with a multiple values through a single delimited param', -> 23 | TEST_ARGS = '--configDir acceptance -a database,config --entrypoint ls application.foo'.split(' ') 24 | 25 | it 'should generate addon options with an array with multiple values', -> 26 | addons = Run.parseArgs(TEST_ARGS).options.add 27 | expect(addons).toEqual(['database', 'config']) 28 | 29 | describe 'with a trailing comma', -> 30 | TEST_ARGS = '--configDir acceptance -a database, --entrypoint ls application.foo'.split(' ') 31 | 32 | it 'should generate addon options that do not include an empty string', -> 33 | addons = Run.parseArgs(TEST_ARGS).options.add 34 | expect(addons).toEqual(['database']) 35 | 36 | describe 'with a mix of delimited and non-delimited params', -> 37 | TEST_ARGS = '--configDir acceptance -a database,config -a other --entrypoint ls application.foo'.split(' ') 38 | 39 | it 'should generate addon options with an array with multiple values', -> 40 | addons = Run.parseArgs(TEST_ARGS).options.add 41 | expect(addons).toEqual(['database', 'config', 'other']) 42 | 43 | describe 'with the long param name', -> 44 | TEST_ARGS = '--configDir acceptance --add database,config --entrypoint ls application.foo'.split(' ') 45 | 46 | it 'should generate the addon options as usual', -> 47 | addons = Run.parseArgs(TEST_ARGS).options.add 48 | expect(addons).toEqual(['database', 'config']) 49 | 50 | describe 'with the parameter not specified', -> 51 | TEST_ARGS = '--configDir acceptance --entrypoint ls application.foo'.split(' ') 52 | 53 | it 'should generate an empty array of addons', -> 54 | addons = Run.parseArgs(TEST_ARGS).options.add 55 | expect(addons).toEqual([]) 56 | -------------------------------------------------------------------------------- /acceptance/test_commands.coffee: -------------------------------------------------------------------------------- 1 | # Helper to wrap Galley commands (and child_process) in promises to 2 | # better use them with Mocha. 3 | 4 | child_process = require 'child_process' 5 | RSVP = require 'rsvp' 6 | stream = require 'stream' 7 | streamBuffers = require 'stream-buffers' 8 | 9 | TestReporter = require '../spec/util/test_reporter' 10 | 11 | cleanupCommand = require '../lib/commands/cleanup' 12 | runCommand = require '../lib/commands/run' 13 | listCommand = require '../lib/commands/list' 14 | stopEnvCommand = require '../lib/commands/stop_env' 15 | 16 | GALLEYFILE = require './Galleyfile' 17 | 18 | exec = (cmd) -> 19 | new RSVP.Promise (resolve, reject) -> 20 | child_process.exec cmd, (err, stdout, stderr) -> 21 | if err 22 | reject(err) 23 | else 24 | resolve 25 | stdout: stdout.toString() 26 | stderr: stderr.toString() 27 | 28 | # args is the array of args as would be passed on the command line to "galley run" 29 | # 30 | # runOpts may contain a "stdin" that is used as the contents of stdin for the command. 31 | run = (args, runOpts = {}) -> 32 | new RSVP.Promise (resolve, reject) -> 33 | stdin = new stream.Readable 34 | stdin.push runOpts.stdin or '' 35 | stdin.push null 36 | 37 | options = 38 | config: GALLEYFILE 39 | stdin: stdin 40 | stdout: new streamBuffers.WritableStreamBuffer(frequency: 0) 41 | stderr: new streamBuffers.WritableStreamBuffer(frequency: 0) 42 | reporter: new TestReporter 43 | 44 | runCommand args, options, (statusCode = 0) -> 45 | if statusCode isnt 0 46 | reject new Error(options.reporter.lastError or options.stderr.getContentsAsString("utf8")) 47 | else 48 | resolve 49 | reporter: options.reporter 50 | statusCode: statusCode 51 | stderr: options.stderr.getContentsAsString("utf8") 52 | stdout: options.stdout.getContentsAsString("utf8") 53 | 54 | cleanup = -> 55 | new RSVP.Promise (resolve, reject) -> 56 | options = 57 | config: GALLEYFILE 58 | reporter: new TestReporter 59 | # In acceptance tests we don't want to mess with your global Docker state 60 | preserveUntagged: true 61 | 62 | cleanupCommand [], options, -> 63 | resolve 64 | reporter: options.reporter 65 | 66 | list = -> 67 | new RSVP.Promise (resolve, reject) -> 68 | outBuffer = new streamBuffers.WritableStreamBuffer(frequency: 0) 69 | options = 70 | config: GALLEYFILE 71 | configPath: './Galleyfile.js' 72 | stdout: outBuffer 73 | 74 | listCommand [], options, -> 75 | resolve 76 | # Strip out chalk ASCII codes before returning 77 | out: outBuffer.getContentsAsString().replace(/\x1b\[\d+m/g, '') 78 | 79 | stopEnv = (env) -> 80 | new RSVP.Promise (resolve, reject) -> 81 | stopEnvCommand [env], {reporter: new TestReporter}, resolve 82 | 83 | module.exports = { 84 | exec 85 | run 86 | cleanup 87 | list 88 | stopEnv 89 | } 90 | -------------------------------------------------------------------------------- /gulpfile.coffee: -------------------------------------------------------------------------------- 1 | gulp = require 'gulp' 2 | $ = require('gulp-load-plugins')() 3 | fs = require 'fs' 4 | mocha = require 'gulp-mocha' 5 | shell = require 'gulp-shell' 6 | runSequence = require 'run-sequence' 7 | semver = require 'semver' 8 | 9 | 10 | config = 11 | build: require './config/build.coffee' 12 | 13 | alertError = $.notify.onError (error) -> 14 | message = error?.stack or error?.message or error?.toString() or 'Something went wrong' 15 | "Error: #{ message }" 16 | 17 | gulp.task 'clean', (cb) -> 18 | fs = require 'fs' 19 | dirs = [config.build.build_dir, config.build.dest, config.build.spec_dest, config.build.acceptance_dest] 20 | glob = [] 21 | 22 | for dir in dirs 23 | fs.mkdirSync dir unless fs.existsSync dir 24 | glob.push "#{ dir }/**", "!#{ dir }" 25 | 26 | require('del') glob, cb 27 | 28 | gulp.task 'compile', -> 29 | gulp.src([ 30 | "#{ config.build.src }/**/*.coffee" 31 | ]) 32 | .pipe($.plumber errorHandler: alertError) 33 | .pipe($.changed config.build.dest) 34 | .pipe($.coffee bare: true) 35 | .pipe(gulp.dest config.build.dest) 36 | 37 | gulp.src([ 38 | "#{ config.build.spec_src }/**/*.coffee" 39 | ]) 40 | .pipe($.plumber errorHandler: alertError) 41 | .pipe($.changed config.build.dest) 42 | .pipe($.coffee bare: true) 43 | .pipe(gulp.dest config.build.spec_dest) 44 | 45 | gulp.src([ 46 | "#{ config.build.acceptance_src }/*.coffee" 47 | ]) 48 | .pipe($.plumber errorHandler: alertError) 49 | .pipe($.changed config.build.dest) 50 | .pipe($.coffee bare: true) 51 | .pipe(gulp.dest config.build.acceptance_dest) 52 | 53 | gulp.task 'build', (cb) -> 54 | runSequence 'clean', ['compile'], cb 55 | 56 | gulp.task 'watch', (cb) -> 57 | gulp.watch [ 58 | "#{ config.build.src }/**/*.coffee" 59 | "#{ config.build.spec_src }/**/*.coffee" 60 | "#{ config.build.acceptance_src }/**/*.coffee" 61 | ], ['compile'] 62 | 63 | 64 | cb() 65 | 66 | mochaArgs = -> 67 | args = {} 68 | args 69 | 70 | gulp.task 'test', -> 71 | gulp.src("#{config.build.spec_dest}/**/*.js") 72 | .pipe(mocha(mochaArgs())) 73 | 74 | gulp.task 'acceptance:build', shell.task [ 75 | './acceptance/dockerfiles/build.sh' 76 | ] 77 | 78 | gulp.task 'acceptance:test', -> 79 | gulp.src("#{config.build.acceptance_dest}/**/*.js") 80 | .pipe(mocha(mochaArgs())) 81 | 82 | gulp.task 'acceptance', (cb) -> 83 | runSequence 'acceptance:build', 'acceptance:test', cb 84 | 85 | # ------------------------------------------------------------------------------ 86 | # Bump Version 87 | # ------------------------------------------------------------------------------ 88 | do -> 89 | bumpVersion = (type) -> 90 | (cb) -> 91 | pkg = JSON.parse fs.readFileSync('./package.json', 'utf8') 92 | pkg.version = pkg.version?.replace /[^\.\d]/g, '' 93 | 94 | if type in ['patch', 'major', 'minor'] 95 | pkg.version = semver.inc pkg.version, type 96 | else 97 | pkg.version = [pkg.version, type].join '' 98 | 99 | fs.writeFileSync 'package.json', JSON.stringify(pkg, null, 2) + '\n' 100 | cb() 101 | 102 | gulp.task 'bump', bumpVersion 'alpha' 103 | gulp.task 'bump:local', bumpVersion 'alpha' 104 | gulp.task 'bump:patch', bumpVersion 'patch' 105 | gulp.task 'bump:minor', bumpVersion 'minor' 106 | gulp.task 'bump:major', bumpVersion 'major' 107 | 108 | gulp.task 'default', (cb) -> 109 | runSequence 'build', 'watch', cb 110 | -------------------------------------------------------------------------------- /lib/lib/stdin_command_interceptor.coffee: -------------------------------------------------------------------------------- 1 | # Class to manage piping from stdin into a container while listening for CTRL-P CTRL-... commands. 2 | # 3 | # Commands are: 4 | # 5 | # CTRL-Q: Detach the container. Docker can handle this by itself, but now that we're trapping 6 | # CTRL-P we do this ourselves. 7 | # CTRL-R: Reload Galley. Also happens if you send a SIGHUP to the process. Causes Galley to run 8 | # through checking all the containers as if it were just started. 9 | # CTRL-C: Stop the container. Useful for when you have a RestartPolicy that is causing an app to 10 | # reload itself on CTRL-C, but you actually really do want to stop the container. 11 | # CTRL-P: Passes a CTRL-P through to the stream. 12 | 13 | events = require 'events' 14 | 15 | CTRL_C = '\u0003' 16 | CTRL_P = '\u0010' 17 | CTRL_Q = '\u0011' 18 | CTRL_R = '\u0012' 19 | 20 | class StdinCommandInterceptor extends events.EventEmitter 21 | constructor: (stdin) -> 22 | @stdin = stdin 23 | 24 | # We create a bound instance of this method so that we can removeListener it. 25 | @stdinDataHandler = @onStdinData.bind(@) 26 | 27 | # Pipes data from the STDIN given to this instance's constructor through to the inputStream. If 28 | # STDIN is a TTY, intercepts control sequences to close itself and trigger a resolution. 29 | start: (inputStream) -> 30 | @inputStream = inputStream 31 | @previousKey = null 32 | 33 | if @stdin.isTTY 34 | @stdin.setRawMode true 35 | 36 | @inputStream.setEncoding 'utf8' 37 | @stdin.setEncoding 'utf8' 38 | 39 | @stdin.on 'data', @stdinDataHandler 40 | else 41 | @stdin.pipe @inputStream 42 | 43 | # We dig into Dockerode's internal data to get the socket because it's the only reliable way 44 | # to detect the close, for example if the container restarts or shuts down unexpectedly. 45 | # 46 | # Reasons why the socket to STDIN closes: 47 | # - Someone called #stop on us, which caused us to destroy the @inputStream. This tends to 48 | # happen after we have already triggered a command (see Run command's maybePipeStdStreams) 49 | # so we don't want to trigger anything additional. In this case, @inputStream will have 50 | # already been set to null. 51 | # - Our own STDIN has EOF'd (e.g. it was piped in from a local file). In this case, @stdin 52 | # will no longer be "readable". We don't trigger a command here, either, because we assume 53 | # that the consumer in the container will react to the EOF, finish its task, and close the 54 | # output stream. (Reacting too early to EOF creates a race condition where we would terminate 55 | # before the container's process wrote all of its output.) 56 | # - Docker has detached and left the container running. This used to be do-able by pressing 57 | # CTRL-P CTRL-Q before we trapped it for our own purposes, but in case it is possible through 58 | # some other means we detect it and trigger a 'detach' command to notify our listeners and 59 | # keep them from thinking that the RestartPolicy triggered and they should re-attach. 60 | @inputStream._output.socket.on 'close', => 61 | @_trigger 'detach' if @inputStream and @stdin.readable 62 | 63 | stop: -> 64 | return unless @inputStream 65 | 66 | # Set our @inputStream to null up front so that when the inputStream is destroyed and its 67 | # socket closes we know not to trigger the 'detach' from the handler above. 68 | inputStream = @inputStream 69 | @inputStream = null 70 | 71 | inputStream.destroy() 72 | 73 | if @stdin.isTTY 74 | @stdin.removeListener 'data', @stdinDataHandler 75 | @stdin.setRawMode false 76 | else 77 | @stdin.unpipe inputStream 78 | 79 | # Looks at each keystroke to check and see if the user is doing a CTRL-P escape sequence. If so, 80 | # traps it to potentially resolve the command. Otherwise passes the value through. 81 | onStdinData: (key) -> 82 | if @previousKey is CTRL_P 83 | @previousKey = null 84 | switch key 85 | when CTRL_C then @_trigger 'stop' 86 | when CTRL_P then @inputStream.write(CTRL_P) 87 | when CTRL_Q then @_trigger 'detach' 88 | when CTRL_R then @_trigger 'reload' 89 | else 90 | @previousKey = key 91 | # In Node 0.12.2 calling setImmediate is important for avoiding an occasional blocking read 92 | # to STDIN that locks up the program until more input comes in. 93 | setImmediate => 94 | @inputStream?.write(key) 95 | 96 | # Called from outside by a SIGHUP handler. Has the same effect as CTRL-P CTRL-R, which causes 97 | # Galley to recheck all containers. 98 | sighup: -> @_trigger 'reload' 99 | 100 | _trigger: (command) -> @emit 'command', {command} 101 | 102 | module.exports = StdinCommandInterceptor 103 | -------------------------------------------------------------------------------- /lib/lib/overlay_output_stream.coffee: -------------------------------------------------------------------------------- 1 | _ = require 'lodash' 2 | charm = require 'charm' 3 | stream = require 'stream' 4 | 5 | # Wrapper for an output stream (e.g. stdout) that adds an "overlay" in the lower right hand 6 | # corner. Overlay can show a "status" (consistent, purple on white) or a "flash" (goes away after 7 | # a few seconds, white on purple). 8 | # 9 | # Attaching to a non-TTY stream will cause this to pass data through with no modifications. 10 | # 11 | # Not entirely great at what happens when STDOUT lines wrap or the terminal is resized: sometimes 12 | # the overlay will not get erased completely. It seems to work well enough in practice, however. 13 | class OverlayOutputStream extends stream.Writable 14 | constructor: (@stream, options) -> 15 | super options 16 | @charm = charm(@stream) 17 | @isTTY = @stream.isTTY 18 | @statusMessage = '' 19 | @currentOverlayText = '' 20 | 21 | # Needed so that the stream's dimensions can be used for resizing the Docker container 22 | @columns = @stream.columns 23 | @rows = @stream.rows 24 | 25 | @lastStreamColumns = @stream.columns 26 | 27 | handleResize = => 28 | @writeOverlay() 29 | @columns = @stream.columns 30 | @rows = @stream.rows 31 | @emit 'resize' 32 | 33 | # Handling wrapping is much more reliable with a bit of debounce 34 | @stream.on 'resize', _.debounce handleResize, 100 35 | @stream.on 'drain', => @emit 'drain' 36 | 37 | # Sets a message that permanently sits in the lower-right as the stream 38 | # scrolls by behind it. 39 | setOverlayStatus: (status) -> 40 | @statusMessage = status 41 | if @hasOverlay 42 | @writeOverlay() 43 | 44 | # Pops a message up (over the status) for a few seconds, then disappears (and 45 | # status, if any, re-appears). 46 | flashOverlayMessage: (message) -> 47 | if @unsetFlashTimeout 48 | clearTimeout @unsetFlashTimeout 49 | 50 | @unsetFlashTimeout = setTimeout @unsetOverlayFlash.bind(@), 2000 51 | @flashMessage = message 52 | @writeOverlay() 53 | 54 | unsetOverlayFlash: -> 55 | @flashMessage = null 56 | @writeOverlay() 57 | 58 | clearOverlay: -> 59 | return unless @hasOverlay 60 | 61 | @charm.push(true) 62 | 63 | # Width from the start of the overlay to the right side of the window. Starts as the length 64 | # of the text (since it was printed right-aligned) but we add in any new columns that have 65 | # appeared, or subtract any that have disappeared (our text will be wrapped in that case). 66 | widthOnLine = @currentOverlayText.length + (@stream.columns - @lastStreamColumns) + 1 67 | overlayDidWrap = @lastStreamColumns > @stream.columns + 1 68 | @lastStreamColumns = @stream.columns 69 | 70 | @charm.position(@stream.columns - widthOnLine, @stream.rows) 71 | 72 | # If we wrapped, the start of our text is one above the bottom row, so we have to move up. 73 | if overlayDidWrap 74 | @charm.up(1) 75 | 76 | @charm.delete('char', @currentOverlayText.length + 1) 77 | 78 | # We then delete the line that has the wrapped characters on it. 79 | if overlayDidWrap 80 | @charm.down(1) 81 | @charm.delete('line', 1) 82 | 83 | @charm.pop(true) 84 | 85 | # After the cursor position is restored, we scroll a line to cover up the newly blank one, 86 | # and have to move the cursor down one line to compensate. 87 | if overlayDidWrap 88 | @charm.scroll(-1) 89 | @charm.down(1) 90 | 91 | writeOverlay: -> 92 | return unless @isTTY 93 | 94 | @clearOverlay() 95 | @charm.push(true) 96 | 97 | if @flashMessage 98 | text = @flashMessage 99 | @charm.background(13) 100 | @charm.foreground('white') 101 | else if @statusMessage 102 | @charm.foreground(13) 103 | @charm.background('white') 104 | text = @statusMessage 105 | else 106 | @currentOverlayText = '' 107 | @charm.pop(true) 108 | return 109 | 110 | @currentOverlayText = if text then " #{text} " else '' 111 | 112 | @charm.position(@stream.columns - @currentOverlayText.length, @stream.rows) 113 | @charm.write(@currentOverlayText) 114 | @charm.pop(true) 115 | @hasOverlay = true 116 | 117 | # Proxy our writes through to the underlying stream, wrapped in 118 | # making our status disappear and re-appear. 119 | _write: (chunk, encoding, cb) -> 120 | # We only redraw if the chunk contains a newline. This is a compromise from redrawing every 121 | # time, to workaround issues with TTY push and pop. When drawing every time, there's a case 122 | # where the cursor can be put just outside the terminal window (easy to do with rspec's one-dot- 123 | # at-a-time output). Pushing that position and popping it ends up bringing the cursor back 124 | # inside the window, so the line never ends up wrapping. 125 | # 126 | # With this solution, we allow writes on one line (and possibly terminal code shenanigans) to 127 | # overwrite the charm, but preserve the wrapping behavior for one-dot-at-a-time output. 128 | redraw = chunk.toString().indexOf('\n') isnt -1 129 | 130 | @clearOverlay() if redraw 131 | ret = @stream.write chunk, encoding, cb 132 | @writeOverlay() if redraw 133 | ret 134 | 135 | end: -> 136 | @clearOverlay() 137 | super 138 | 139 | module.exports = OverlayOutputStream 140 | -------------------------------------------------------------------------------- /lib/commands/cleanup.coffee: -------------------------------------------------------------------------------- 1 | _ = require 'lodash' 2 | chalk = require 'chalk' 3 | minimist = require 'minimist' 4 | 5 | ConsoleReporter = require '../lib/console_reporter' 6 | Docker = require 'dockerode' 7 | DockerUtils = require '../lib/docker_utils' 8 | PromiseUtils = require '../lib/promise_utils' 9 | ServiceHelpers = require '../lib/service_helpers' 10 | RSVP = require 'rsvp' 11 | help = require './help' 12 | 13 | # Docker lists out all the names a container is known by, which includes names from containers that 14 | # link to it. We use the heuristic that if there's only one "/" in the name then it's the "real" 15 | # name for the container. (The "/" is used to separate the container name and its link name.) 16 | findContainerName = (info) -> 17 | for name in info.Names 18 | return name.substr(1) if (name.match(/\//g) or []).length is 1 19 | 20 | # Given a service name and env, reverse-engineered from a container name, use the env to generate 21 | # the flattened config so we can see how the service was configured. Important for determining if 22 | # a service is "stateful" or not. 23 | lookupServiceConfig = (service, env, options, configCache) -> 24 | unless configCache[env]? 25 | configCache[env] = ServiceHelpers.processConfig(options.config, env, []).servicesConfig 26 | 27 | configCache[env][service] 28 | 29 | # Removes a stopped container if it either does not have a Galleyesq name (no ".") or if it is for 30 | # a service that does not appear to be stateful. 31 | maybeRemoveContainer = (docker, info, options, configCache) -> 32 | containerName = findContainerName(info) 33 | 34 | [service, envParts...] = containerName.split('.') 35 | env = envParts.join('.') 36 | 37 | if env.length and (config = lookupServiceConfig service, env, options, configCache) 38 | anonymousContainer = false 39 | statefulContainer = config.stateful 40 | else 41 | # If there's no env or if the service isn't in our config, treat it as anonymous (i.e. not 42 | # managed by Galley). 43 | anonymousContainer = true 44 | statefulContainer = false 45 | 46 | # Short-circuit out here without reporting anything. 47 | return RSVP.resolve() if anonymousContainer and not options.unprotectAnonymous 48 | 49 | options.reporter.startService containerName 50 | 51 | removePromise = unless statefulContainer and not options.unprotectStateful 52 | options.reporter.startTask 'Removing' 53 | 54 | DockerUtils.removeContainer docker.getContainer(info.Id), { v: true } 55 | .then -> 56 | options.reporter.succeedTask() 57 | else 58 | options.reporter.completeTask 'Preserving stateful service' 59 | RSVP.resolve({}) 60 | 61 | removePromise 62 | .finally -> 63 | options.reporter.finish() 64 | 65 | # Removes a dangling image, ignoring any "still in use" errors since those can come up naturally 66 | # when an old container refers to an image whose original tag has moved on. 67 | removeImage = (docker, info) -> 68 | DockerUtils.removeImage docker.getImage(info.Id) 69 | .catch (err) -> 70 | # Sometimes containers are using untagged images, for example a long-lived stateful container 71 | # after a newer image has been downloaded. 72 | if err.statusCode is 409 then return 73 | else throw err 74 | 75 | module.exports = (args, commandOptions, done) -> 76 | argv = minimist args, 77 | boolean: [ 78 | 'help' 79 | 'unprotectAnonymous' 80 | 'unprotectStateful' 81 | ] 82 | 83 | if argv._.length isnt 0 or argv.help 84 | return help args, commandOptions, done 85 | 86 | docker = new Docker() 87 | 88 | options = 89 | unprotectStateful: argv.unprotectStateful 90 | unprotectAnonymous: argv.unprotectAnonymous 91 | stderr: commandOptions.stderr or process.stderr 92 | reporter: commandOptions.reporter or new ConsoleReporter(process.stderr) 93 | config: commandOptions.config 94 | 95 | # Cache used to store env -> serviceConfigs 96 | configCache = {} 97 | 98 | DockerUtils.listContainers docker, filters: '{"status": ["exited"]}' 99 | .then ({infos}) -> 100 | options.reporter.message 'Removing stopped containers…' 101 | PromiseUtils.promiseEach infos, (info) -> 102 | maybeRemoveContainer(docker, info, options, configCache) 103 | .then -> 104 | DockerUtils.listImages docker, filters: '{"dangling": ["true"]}' 105 | .then ({infos}) -> 106 | # Short-circuit out for acceptance tests. We can't effectively test this part because your 107 | # global state is arbitrary (and we don't want to affect it). 108 | return if commandOptions.preserveUntagged 109 | 110 | options.reporter.message() 111 | count = 0 112 | 113 | progressLine = options.reporter.startProgress 'Deleting dangling images…' 114 | updateProgress = -> progressLine.set "#{count} / #{infos.length}" 115 | updateProgress() 116 | 117 | PromiseUtils.promiseEach infos, (info) -> 118 | removeImage docker, info 119 | .then -> 120 | count++ 121 | updateProgress() 122 | .then -> 123 | progressLine.clear() 124 | options.reporter.succeedTask() 125 | options.reporter.finish() 126 | .then -> 127 | done() 128 | .catch (err) -> 129 | if err? and err isnt '' and typeof err is 'string' or err.json? 130 | message = (err?.json or (err if typeof err is 'string') or err?.message or 'Unknown error').trim() 131 | message = message.replace /^Error: /, '' 132 | options.reporter.error chalk.bold('Error:') + ' ' + message 133 | 134 | options.reporter.finish() 135 | options.stderr.write err?.stack if err?.stack 136 | -------------------------------------------------------------------------------- /lib/lib/docker_utils.coffee: -------------------------------------------------------------------------------- 1 | url = require 'url' 2 | _ = require 'lodash' 3 | RSVP = require 'rsvp' 4 | 5 | # Series of wrappers around Dockerode methods to turn them in to RSVP promises. The convention is 6 | # for the promise to resolve to a hash with a "container" key and optionally a key relating to the 7 | # result of the method (e.g. "info", "stream"). 8 | 9 | # Resolves to image and info 10 | inspectImage = (image) -> 11 | new RSVP.Promise (resolve, reject) -> 12 | image.inspect (err, info) -> 13 | if err then reject(err) else resolve({image, info}) 14 | 15 | removeImage = (image, opts = {}) -> 16 | new RSVP.Promise (resolve, reject) -> 17 | image.remove opts, (err) -> 18 | if err then reject(err) else resolve() 19 | 20 | createContainer = (docker, opts) -> 21 | new RSVP.Promise (resolve, reject) -> 22 | docker.createContainer opts, (err, container) -> 23 | if err then reject(err) else resolve({container}) 24 | 25 | # Resolves to container and info 26 | inspectContainer = (container) -> 27 | new RSVP.Promise (resolve, reject) -> 28 | container.inspect (err, info) -> 29 | if err then reject(err) else resolve({container, info}) 30 | 31 | startContainer = (container, opts = {}) -> 32 | new RSVP.Promise (resolve, reject) -> 33 | container.start opts, (err) -> 34 | if err 35 | err.container = container 36 | reject(err) 37 | else 38 | resolve({container}) 39 | 40 | stopContainer = (container, opts = {}) -> 41 | new RSVP.Promise (resolve, reject) -> 42 | container.stop opts, (err) -> 43 | if err then reject(err) else resolve({container}) 44 | 45 | restartContainer = (container, opts = {}) -> 46 | new RSVP.Promise (resolve, reject) -> 47 | container.restart opts, (err) -> 48 | if err then reject(err) else resolve({container}) 49 | 50 | pauseContainer = (container, opts = {}) -> 51 | new RSVP.Promise (resolve, reject) -> 52 | container.pause opts, (err) -> 53 | if err then reject(err) else resolve({container}) 54 | 55 | unpauseContainer = (container, opts = {}) -> 56 | new RSVP.Promise (resolve, reject) -> 57 | container.unpause opts, (err) -> 58 | if err then reject(err) else resolve({container}) 59 | 60 | # Resolves to container and stream 61 | attachContainer = (container, opts) -> 62 | new RSVP.Promise (resolve, reject) -> 63 | container.attach opts, (err, stream) -> 64 | if err then reject(err) else resolve({container, stream}) 65 | 66 | # Resolves to container and completion result 67 | waitContainer = (container) -> 68 | new RSVP.Promise (resolve, reject) -> 69 | container.wait (err, result) -> 70 | if err then reject(err) else resolve({container, result}) 71 | 72 | removeContainer = (container, opts) -> 73 | new RSVP.Promise (resolve, reject) -> 74 | container.remove opts, (err) -> 75 | if err then reject(err) else resolve() 76 | 77 | resizeContainer = (container, ttyStream) -> 78 | new RSVP.Promise (resolve, reject) -> 79 | dimensions = 80 | h: ttyStream.rows 81 | w: ttyStream.columns 82 | 83 | if dimensions.h? and dimensions.w? 84 | container.resize dimensions, (err) -> 85 | if err then reject(err) else resolve({container}) 86 | else 87 | resolve({container}) 88 | 89 | # Downloads the image by name and returns a promise that will resolve when it's complete. Periodically 90 | # calls the progressCb function with either undefined or an interesting progress string. 91 | # 92 | # Does not resolve to a value. 93 | downloadImage = (docker, imageName, authConfigFn, progressCb = ->) -> 94 | new RSVP.Promise (resolve, reject) -> 95 | opts = {} 96 | 97 | # Check and see if we're trying to do "repository/image" and, if the repository has a "." in 98 | # it, grab the credentials. The "." check is the same that the docker command line / tool uses 99 | # to decide whether the repository is a remote server vs. on its default hub.docker.com registry. 100 | # 101 | # If there's no ".", we look up auth anyway in the case of private repos on hub.docker.com. 102 | if imageName.indexOf('/') isnt -1 103 | repository = imageName.split('/')[0] 104 | opts.authconfig = if repository.indexOf('.') isnt -1 105 | authConfigFn(repository) 106 | else 107 | authConfigFn() 108 | 109 | docker.pull imageName, opts, (err, stream) -> 110 | return reject(err) if err 111 | 112 | stream.on 'data', (byteBuffer) -> 113 | # Docker sends along a nice summary of the download progress, including an ASCII progress 114 | # bar and estimation of time remaining for the current download. Send that along to our 115 | # progress callback. 116 | # 117 | # At least with Docker 1.11 running in Docker for Mac, the byteBuffer can contain more than 118 | # one newline-separated JSON object, so we split, look for errors across all of them, and 119 | # report on the first one. 120 | # 121 | # TODO(finneganh): Do better about multiple simultaneous statuses 122 | statusArr = byteBuffer.toString().split('\n').filter((str) -> str.length).map((json) -> JSON.parse(json)) 123 | statusArr.forEach (status) -> 124 | reject(status.error) if status.error? 125 | 126 | progressCb statusArr[0].progress or statusArr[0].status 127 | 128 | stream.on 'end', -> resolve() 129 | 130 | listContainers = (docker, opts = {}) -> 131 | new RSVP.Promise (resolve, reject) -> 132 | docker.listContainers opts, (err, infos) -> 133 | if err then reject(err) else resolve({infos}) 134 | 135 | listImages = (docker, opts = {}) -> 136 | new RSVP.Promise (resolve, reject) -> 137 | docker.listImages opts, (err, infos) -> 138 | if err then reject(err) else resolve({infos}) 139 | 140 | module.exports = { 141 | inspectImage 142 | removeImage 143 | createContainer 144 | inspectContainer 145 | startContainer 146 | stopContainer 147 | restartContainer 148 | pauseContainer 149 | unpauseContainer 150 | attachContainer 151 | resizeContainer 152 | waitContainer 153 | removeContainer 154 | downloadImage 155 | listContainers 156 | listImages 157 | } 158 | -------------------------------------------------------------------------------- /lib/lib/rsyncer.coffee: -------------------------------------------------------------------------------- 1 | _ = require 'lodash' 2 | child_process = require 'child_process' 3 | chokidar = require 'chokidar' 4 | fs = require 'fs' 5 | path = require 'path' 6 | RSVP = require 'rsvp' 7 | Rsync = require('rsync') 8 | 9 | WATCH_CHILD_PATH = path.resolve __dirname, '../bin/watch.js' 10 | 11 | MAX_EXECUTION_RETRIES = 3 12 | EXECUTION_RETRY_WAIT_TIME = 1000 13 | 14 | # Class to watch a directory for changes and use rsync to bring a remote directory up-to-date. 15 | class Rsyncer 16 | # Options: 17 | # src: full local path to a directory to watch and sync 18 | # dest: remote path to sync to 19 | # host: remote server running Rsync daemon 20 | # port: port Rsync is running on 21 | # module: remote module on the server 22 | constructor: (options) -> 23 | @source = options.src 24 | 25 | # True if an rsync process is currently running on our behalf 26 | @syncing = false 27 | 28 | # True if, after the current rsync process completes we should immediately kick off another 29 | # one because files may have changed in the meantime. 30 | @needsResync = false 31 | 32 | # True if there's a watch child process running. 33 | @watching = false 34 | @watchChild = null 35 | 36 | # Callback for us to send change / sync / waiting activity to. Set in the call to watch. 37 | @activityCb = null 38 | 39 | @rsync = Rsync.build 40 | source: "#{options.src}/" 41 | destination: "rsync://#{options.host}:#{options.port}/#{options.module}#{options.dest}/" 42 | flags: 'av' 43 | .set 'delete' 44 | 45 | # .dockerignore is a pretty good set of files not to bother to sync 46 | dockerignorePath = path.resolve(options.src, '.dockerignore') 47 | if fs.existsSync(dockerignorePath) 48 | @rsync.set 'exclude-from', dockerignorePath 49 | 50 | # Syncs the local "src" to "dest". progressCb is called periodically with arrays of file paths 51 | # as rsync reports them being synced. 52 | # 53 | # Returns a promise that resolves to the list of files synched. 54 | sync: (progressCb) -> 55 | new RSVP.Promise (resolve, reject) => 56 | statusLines = [] 57 | @syncing = true 58 | numRetries = 0 59 | 60 | completionHandler = (error, code, cmd) => 61 | @syncing = false 62 | 63 | # First 2 lines are about synchronizing files list and being done. Last lines are a summary 64 | # of the number of bytes transferred. Clear them both out to leave the list of files. 65 | # Additionally filter to remove lines just about directories. 66 | pathStatusLines = statusLines[2...-2] 67 | fileStatusLines = _.filter pathStatusLines, (line) -> line.slice(-1) isnt '/' 68 | 69 | if error 70 | # Attempt to retry execution in case the connection was closed unexpectedly. 71 | if numRetries < MAX_EXECUTION_RETRIES 72 | setTimeout (=> @rsync.execute completionHandler, stdoutHandler), EXECUTION_RETRY_WAIT_TIME 73 | numRetries++ 74 | else reject(error) 75 | else resolve(fileStatusLines) 76 | 77 | # "data" ends up being some chunk of rsync's output, which is for the most part newline- 78 | # separated file paths. 79 | stdoutHandler = (data) -> 80 | newStatusLines = _.filter data.toString().split('\n'), (line) -> line isnt '' 81 | statusLines = statusLines.concat newStatusLines 82 | progressCb(newStatusLines) 83 | 84 | @rsync.execute completionHandler, stdoutHandler 85 | 86 | # Cause a sync to occur. We assume that this is called at a reasonably debounced interval. If 87 | # called while the sync is already in progress, schedules a resync to occur right after that sync 88 | # completes to pick up the additional changes. The resync's result is collapsed into the original 89 | # sync's result. 90 | # 91 | # If no sync is going on, returns a promise that will resolve to a list of files synched, 92 | # including any files from later resyncs. 93 | # 94 | # If a sync is currently going on, returns undefined. 95 | scheduleSync: (progressCb, accumFiles = []) -> 96 | if @syncing 97 | @needsResync = true 98 | return 99 | 100 | syncPromise = @sync(progressCb) 101 | .then (newFiles) => 102 | accumFiles.push.apply accumFiles, newFiles 103 | 104 | if @needsResync 105 | @needsResync = false 106 | @scheduleSync progressCb, accumFiles 107 | else 108 | accumFiles 109 | 110 | # Called to watch the "src" directory provided to the constructor and run rsync if its contents 111 | # change. 112 | # 113 | # Calls the activityCb with an event, the watched path, a list of files (or null), and an error 114 | # (or null) 115 | # 'watching': Rsyncer is waiting for the directory to change. Called right after watch is called 116 | # and again after every sync has completed. 117 | # 'changed': A change has been detected and a sync will be kicked off. Not called if additional 118 | # changes are detected during a sync, as the scheduleSync method will roll those in to the 119 | # same rsync call. 120 | # 'synching': A sync is in progress, and rsync is writing changed file paths to stdout. 121 | # 'synched': A sync has just completed. Includes the list of file paths that rsync reported 122 | # changing as the 3rd argument. 123 | # 'error': Something unfortunate happend. Includes the caught error as the 4th argument. 124 | watch: (@activityCb = ->) -> 125 | # For consistency, schedule this callback to happen soon rather than during the initial call 126 | # to watch. 127 | process.nextTick @activityCb.bind(null, 'watching', @source, null, null) 128 | 129 | @startWatchChild() 130 | 131 | # We need to make sure to stop the child when we exit. Pattern of binding to SIGINT and 132 | # SIGTERM (found in the galley binary) and to uncaughtException courtesy of: 133 | # https://www.exratione.com/2013/05/die-child-process-die/ 134 | process.once 'exit', @stop.bind @ 135 | process.once 'uncaughtException', (error) => 136 | if process.listeners('uncaughtException').length is 0 137 | @stop() 138 | throw error 139 | 140 | # Since fsevents can get a bit segfaulty, we isolate the watching into a child process so that 141 | # it can crash without bringing down all of Galley. 142 | startWatchChild: -> 143 | @watching = true 144 | @watchChild = child_process.fork WATCH_CHILD_PATH, [@source], silent: true 145 | 146 | @watchChild.on 'message', (msg) => 147 | switch msg 148 | when 'change' then @receivedChange() 149 | 150 | # Triggered when either we are exiting the child (in which case @watching will be false) or 151 | # when the process crashes. If it crashes, we restart it, but also treat it as a "change" event 152 | # since file system changes are what would cause it to crash. 153 | @watchChild.on 'exit', => 154 | return unless @watching 155 | 156 | # TODO(finneganh): Maybe detect crash loop? 157 | @startWatchChild() 158 | @receivedChange() 159 | 160 | receivedChange: -> 161 | syncPromise = @scheduleSync @activityCb.bind(null, 'syncing', @source, null, null) 162 | 163 | if syncPromise 164 | @activityCb('changed', @source, null, null) 165 | 166 | syncPromise 167 | .then (files) => 168 | @activityCb('synched', @source, files, null) 169 | @activityCb('watching', @source, null, null) 170 | .catch (err) => 171 | @activityCb('error', @source, null, err) 172 | 173 | stop: -> 174 | @watching = false 175 | @watchChild?.kill 'SIGTERM' 176 | @watchChild = null 177 | @activityCb = null 178 | 179 | module.exports = Rsyncer 180 | -------------------------------------------------------------------------------- /acceptance/acceptance_spec.coffee: -------------------------------------------------------------------------------- 1 | _ = require 'lodash' 2 | expect = require 'expect' 3 | RSVP = require 'rsvp' 4 | 5 | testCommands = require './test_commands' 6 | 7 | #################################################################################################### 8 | # Acceptance tests for Galley. 9 | # 10 | # These tests work with a small set of containers that show off the primary capabilities of 11 | # Galley. The "application" container links to "backend", and its command is to curl backend. 12 | # backend runs a tiny webserver that outputs JSON of a local file, a file mapped from a volume 13 | # container ("config"), and an HTTP request to another container, "database". 14 | # 15 | # This setup means that we can check the JSON output of application and see that 2 levels of link 16 | # and one volume container are connected correctly. 17 | # 18 | # We use a custom "TestReporter" on the run command to see what tasks Galley performed for each of 19 | # the services (e.g. Creating, Restarting) 20 | #################################################################################################### 21 | 22 | 23 | ENV = 'galley-integration' 24 | 25 | # This is what we expect from application when it curls to service and gets data. 26 | APPLICATION_SUCCESS = 27 | index: 'Hello, World!\n' 28 | config: 29 | config: "ok" 30 | database: 31 | data: "ok" 32 | 33 | # We tag from ":original" to ":latest" so that we can eventually update "latest" 34 | # to test the staleness behavior. 35 | resetTags = -> 36 | RSVP.all([ 37 | testCommands.exec 'docker tag galley-integration-backend:original galley-integration-backend' 38 | testCommands.exec 'docker tag galley-integration-database:original galley-integration-database' 39 | testCommands.exec 'docker tag galley-integration-application:original galley-integration-application' 40 | testCommands.exec 'docker tag galley-integration-config:original galley-integration-config' 41 | testCommands.exec 'docker tag galley-integration-rsync:original galley-integration-rsync' 42 | ]) 43 | 44 | removeContainers = -> 45 | testCommands.exec "docker rm -v backend.#{ENV} database.#{ENV} config.#{ENV}" 46 | 47 | describe 'galley', -> 48 | @timeout 15000 49 | 50 | # To establish a baseline, we first start up all the services, then stop them. This means that 51 | # each test run should have a consistent starting place. 52 | before -> 53 | @timeout 0 54 | 55 | resetTags() 56 | .then -> 57 | testCommands.run ['-a', 'backend-addon', "application.#{ENV}"] 58 | .then -> 59 | testCommands.stopEnv ENV 60 | 61 | afterEach -> 62 | @timeout 0 63 | 64 | testCommands.stopEnv ENV 65 | .then -> 66 | resetTags() 67 | 68 | after -> 69 | @timeout 0 70 | 71 | testCommands.stopEnv ENV 72 | .then -> 73 | removeContainers() 74 | 75 | describe 'cleanup', -> 76 | it 'removes everything except stateful', -> 77 | testCommands.cleanup() 78 | .then ({reporter}) -> 79 | expect(reporter.services).toEqual 80 | 'backend.galley-integration': ['Removing'] 81 | 'database.galley-integration': ['Preserving stateful service'] 82 | 'config.galley-integration': ['Removing'] 83 | 84 | .finally -> 85 | # Need to put things back to how the "before" block sets things up so that the next set 86 | # of tests can run. 87 | testCommands.run ['-a', 'backend-addon', "application.#{ENV}"] 88 | .then -> 89 | testCommands.stopEnv ENV 90 | 91 | describe 'list', -> 92 | it 'prints addons and services with environments', -> 93 | testCommands.list() 94 | .then ({out}) -> 95 | expect(out).toEqual ''' 96 | Galleyfile: ./Galleyfile.js 97 | application -a backend-addon 98 | backend [.galley-integration] 99 | config 100 | database 101 | 102 | ''' 103 | 104 | describe 'basics', -> 105 | # Base test to show that we're starting everything up correctly. 106 | it 'starts up prereq services', -> 107 | testCommands.run ['-a', 'backend-addon', "application.#{ENV}"] 108 | .then ({stdout, reporter}) -> 109 | expect(JSON.parse(stdout)).toEqual APPLICATION_SUCCESS 110 | 111 | expect(reporter.services).toEqual 112 | 'config': ['Checking', 'Starting'] 113 | 'database': ['Checking', 'Starting'] 114 | 'backend': ['Checking', 'Starting'] 115 | 'application': ['Checking', 'Creating', 'Starting'] 116 | 117 | # Starts everything twice to show that the containers still running are just checked and 118 | # preserved. 119 | it 'preserves running services', -> 120 | testCommands.run ['-a', 'backend-addon', "application.#{ENV}"] 121 | .then -> 122 | testCommands.run ['-a', 'backend-addon', "application.#{ENV}"] 123 | .then ({stdout, reporter}) -> 124 | expect(JSON.parse(stdout)).toEqual APPLICATION_SUCCESS 125 | 126 | expect(reporter.services).toEqual 127 | 'config': ['Checking', 'Starting'] # As a volume container, is never still running 128 | 'database': ['Checking'] 129 | 'backend': ['Checking'] 130 | 'application': ['Checking', 'Creating', 'Starting'] 131 | 132 | describe 'commands', -> 133 | it 'allows source, entrypoint, and new command', -> 134 | testCommands.run ['--entrypoint', 'cat', '-s', 'acceptance/fixtures/src', "application.#{ENV}", '/src/code.txt'] 135 | .then ({stdout, stderr, reporter}) -> 136 | expect(stdout).toEqual "println 'Hello World!'\n" 137 | 138 | it 'sets env variables, pipes stdin through correctly', -> 139 | testCommands.run ['-e', 'COUNT_CMD=/usr/bin/wc', "application.#{ENV}", '/bin/sh', '-c', '$COUNT_CMD'], stdin: 'kittens puppies' 140 | .then ({stdout, reporter}) -> 141 | expect(stdout).toEqual ' 0 2 15\n' 142 | 143 | describe 'links', -> 144 | # If a container was deleted, it is created. 145 | it 'creates removed linked-to services', -> 146 | testCommands.exec "docker rm -f backend.#{ENV}" 147 | .then -> 148 | testCommands.run ['-a', 'backend-addon', "application.#{ENV}"] 149 | .then ({stdout, reporter}) -> 150 | expect(JSON.parse(stdout)).toEqual APPLICATION_SUCCESS 151 | 152 | expect(reporter.services).toEqual 153 | 'config': ['Checking', 'Starting'] 154 | 'database': ['Checking', 'Starting'] 155 | 'backend': ['Checking', 'Creating', 'Starting'] 156 | 'application': ['Checking', 'Creating', 'Starting'] 157 | 158 | # If a container's link is missing, that container is created, and then linking container is 159 | # *re*-created to pick up a link to the new container. 160 | it 'recreates services with removed linked-to services', -> 161 | testCommands.exec "docker rm -f database.#{ENV}" 162 | .then -> 163 | testCommands.run ['-a', 'backend-addon', "application.#{ENV}"] 164 | .then ({stdout, reporter}) -> 165 | expect(JSON.parse(stdout)).toEqual APPLICATION_SUCCESS 166 | 167 | expect(reporter.services).toEqual 168 | 'config': ['Checking', 'Starting'] 169 | 'database': ['Checking', 'Creating', 'Starting'] 170 | 'backend': ['Checking', 'Removing', 'Creating', 'Starting'] 171 | 'application': ['Checking', 'Creating', 'Starting'] 172 | 173 | # Test that if the 'backend' service is already running that it gets linked to for 174 | # application, rather than recreated. We map source over to an alternate directory 175 | # so that we can see in application that it's actually contacting this service we 176 | # hand-started. 177 | it 'uses existing backend container', -> 178 | testCommands.run ['-s', 'acceptance/fixtures/public', '--rsync', '--detach', "backend.#{ENV}"] 179 | .then ({stdout, reporter}) -> 180 | # FWIW sometimes this fails due to failures in previous runs of the tests 181 | # preventing containers from being removed consistently. If that's the case, 182 | # a second run of this test should make things right. 183 | expect(reporter.services).toEqual 184 | 'backend (rsync)': ['Checking', 'Creating', 'Starting', 'Syncing'] 185 | 'config': ['Checking', 'Starting'] 186 | 'database': ['Checking', 'Starting'] 187 | 'backend': ['Checking', 'Removing', 'Creating', 'Starting'] 188 | 189 | testCommands.run ['-a', 'backend-addon', "application.#{ENV}"] 190 | .then ({stdout, reporter}) -> 191 | expect(JSON.parse(stdout)).toEqual _.merge {}, APPLICATION_SUCCESS, 192 | index: 'Hello, Source!\n' 193 | 194 | expect(reporter.services).toEqual 195 | 'config': ['Checking', 'Starting'] 196 | 'database': ['Checking'] 197 | 'backend': ['Checking'] 198 | 'application': ['Checking', 'Creating', 'Starting'] 199 | .finally -> 200 | testCommands.exec 'docker rm -vf backend.galley-integration-rsync' 201 | 202 | # Go back to the container without the mounted source to avoid polluting other tests 203 | testCommands.run ['--detach', "backend.#{ENV}"] 204 | 205 | describe 'volumes', -> 206 | it 'recreates after volume is deleted', -> 207 | testCommands.exec "docker rm -f config.#{ENV}" 208 | .then -> 209 | testCommands.run ['-a', 'backend-addon', "application.#{ENV}"] 210 | .then ({stdout, reporter}) -> 211 | expect(JSON.parse(stdout)).toEqual APPLICATION_SUCCESS 212 | 213 | expect(reporter.services).toEqual 214 | 'config': ['Checking', 'Creating', 'Starting'] 215 | 'database': ['Checking', 'Starting'] 216 | 'backend': ['Checking', 'Removing', 'Creating', 'Starting'] 217 | 'application': ['Checking', 'Creating', 'Starting'] 218 | -------------------------------------------------------------------------------- /lib/commands/help.coffee: -------------------------------------------------------------------------------- 1 | _ = require 'lodash' 2 | chalk = require 'chalk' 3 | minimist = require 'minimist' 4 | 5 | # Uses stderr 6 | print = console.warn 7 | 8 | commonOptionsHelp = -> 9 | print ' --help Show this help message' 10 | 11 | rootHelp = -> 12 | print "#{chalk.bold 'Usage:'} galley COMMAND [arg...]" 13 | print '' 14 | print 'A tool to manage dependencies among local Docker containers' 15 | print '' 16 | print chalk.bold 'Commands' 17 | print ' help Print help' 18 | print ' cleanup Clean up docker images & containers to save disk space' 19 | print ' config Set up your galley configuration' 20 | print ' list List available services, and their environments and addons' 21 | print ' pull Download images for a service and its dependencies' 22 | print ' run Execute a command inside of a service’s container' 23 | print ' stop-env Stop all containers in an environment' 24 | print ' version Show the Galley version information' 25 | print '' 26 | print 'Run "galley COMMAND --help" for more information on a command.' 27 | 28 | listHelp = -> 29 | print "#{chalk.bold 'Usage:'} galley list" 30 | print '' 31 | print 'Parses your Galleyfile and lists the available services and ' 32 | print 'the environments for which they have defined behavior.' 33 | print '' 34 | print 'Services that do not have environments listed behave the same way' 35 | print 'under any given environment (e.g. same links, same ports).' 36 | print '' 37 | 38 | pullHelp = -> 39 | print "#{chalk.bold 'Usage:'} galley pull [OPTIONS] SERVICE[.ENV]" 40 | print '' 41 | print 'Downloads the latest version of SERVICE’s image from the Docker registry, as well' 42 | print 'as the latest versions of all of SERVICE’s dependencies.' 43 | print '' 44 | print 'If ENV is provided, uses it as a key to look up dependencies in the Galleyfile.' 45 | print '' 46 | print "#{chalk.bold 'Note:'} Does not affect existing containers, running or not." 47 | print 'When you run galley run, non-stateful services will be restarted to pick up new images.' 48 | print '' 49 | print "#{chalk.bold 'Options'}" 50 | print ' -a, --add="SERVICE1,SERVICE2" Includes the specified add-on service(s) as part of' 51 | print ' this SERVICE’s dependencies when downloading updates' 52 | commonOptionsHelp() 53 | 54 | cleanupHelp = -> 55 | print "#{chalk.bold 'Usage:'} galley cleanup [OPTIONS]" 56 | print '' 57 | print 'Removes stopped containers (and their volumes) and cleans up dangling images to save' 58 | print 'disk space.' 59 | print '' 60 | print 'Containers are only stopped if their names match a service from the current Galleyfile' 61 | print 'and that service is not “stateful.”' 62 | print '' 63 | print "#{chalk.bold 'Options'}" 64 | print ' --unprotectAnonymous false If true, then stopped containers that don’t match a' 65 | print ' Galleyfile service are still removed, along with their' 66 | print " volumes. #{chalk.bold 'Use with caution.'}" 67 | print ' --unprotectStateful false If true, then “stateful” containers (such as MySQL) will be' 68 | print ' removed if they’re stopped.' 69 | commonOptionsHelp() 70 | 71 | configHelp = -> 72 | print "#{chalk.bold 'Usage:'} galley config COMMAND SETTING" 73 | print '' 74 | print 'Sets up your ~/.galleycfg file' 75 | print '' 76 | print chalk.bold 'Command' 77 | print ' set Used to set values for particular config settings ' 78 | print chalk.bold 'Settings' 79 | print ' rsync [false] Default behavior for source mapping with the rsync container' 80 | 81 | runHelp = -> 82 | print "#{chalk.bold 'Usage:'} galley run [OPTIONS] SERVICE[.ENV] [COMMAND [ARG...]]" 83 | print '' 84 | print 'Ensures that SERVICE’s dependencies are started, then runs the service. The container' 85 | print 'is started in the foreground by default, with STDIN piped in to the container. Galley' 86 | print 'will remove the container on process exit and return the same status code the process did.' 87 | print '' 88 | print 'You can detach from the container with CTRL-P CTRL-Q, which will leave it running but' 89 | print 'exit Galley.' 90 | print '' 91 | print 'If no command is provided, Galley starts the container as a service. It is given the' 92 | print 'name “service.env” and any ports specified in this env are bound to the host.' 93 | print '' 94 | print 'If a command is specified, Galley does not bind ports automatically and gives the' 95 | print 'container a random name, so that it doesn’t conflict with any service versions of' 96 | print 'the container already running. Use --as-service to override this behavior to run' 97 | print 'the custom command with the default port binding and container name.' 98 | print '' 99 | print 'If run from a TTY, starts the container as a TTY as well. Otherwise, binds the' 100 | print 'container’s STDOUT and STDERR to the process’s STDOUT and STDERR.' 101 | print '' 102 | print 'The provided ENV is used as a suffix for container names, and as a key to look up' 103 | print 'dependencies in the Galleyfile.' 104 | print '' 105 | print chalk.bold 'Options' 106 | print ' -a, --add="SERVICE1,SERVICE2" Starts the specified add-on service(s) as part' 107 | print ' of this SERVICE’s dependencies' 108 | commonOptionsHelp() 109 | print ' --as-service=false When using a custom run command, still names the' 110 | print ' container and binds ports as if the default command' 111 | print ' were used.' 112 | print ' -d, --detach=false Starts the container in the background and does not' 113 | print ' automatically remove it on exit' 114 | print ' -e, --env=[] Set environment variables as “NAME=VALUE”' 115 | print ' --entrypoint="" Override the image’s default ENTRYPOINT with another' 116 | print ' command. If specified as blank, will override the image' 117 | print ' to not use an entrypoint (Docker default).' 118 | print ' --publish-all, -P=true Binds the primary service’s ports per the “ports”' 119 | print ' Galleyfile configuration. The default for this is false' 120 | print ' if a command is specified. Use this flag to override in' 121 | print ' that circumstance.' 122 | print ' --recreate="stale" Specify which linked containers should be re-created. Can' 123 | print ' be “all”, ”stale”, or ”missing-link”. (“stale” implies ' 124 | print ' “missing-link”) The primary service container is always ' 125 | print ' recreated. Containers for services marked “stateful” are ' 126 | print ' never recreated unless --unprotectStateful is true.' 127 | print ' --repairSourceOwnership=false After the command exits, ensure that files in the' 128 | print ' service’s source directory inside of the container are' 129 | print ' not owned by root' 130 | print ' --restart=false Uses Docker’s restart policy to restart the service’s' 131 | print ' process when it exits. Useful for cycling Rails apps' 132 | print ' without shutting down the container (which destroys links).' 133 | print ' --rsync=false If true, starts up the “rsync” container from the' 134 | print ' Galleyfile and uses it to sync the source directory from' 135 | print ' the host to the container. Note: files are only synched' 136 | print ' from outside the container to inside the container, not' 137 | print ' the reverse.' 138 | print ' -s, --source="" Provide a directory to volume mount over the services’s' 139 | print ' “source” directory.' 140 | print ' --unprotectStateful=false If true, then “stateful” containers (such as MySQL) may be' 141 | print ' recreated by the --recreate rules.' 142 | print ' -u, --user="" Username or UID for the primary service’s container' 143 | print ' -v, --volume="" Map a host directory in to a container' 144 | print ' Format: hostDir[:containerDir]' 145 | print ' --volumes-from=[] List of containers whose volumes should be mapped in to' 146 | print ' the primary service' 147 | print ' -w, --workdir="" Execute the command from within this directory inside the' 148 | print ' container' 149 | 150 | stopEnvHelp = -> 151 | print "#{chalk.bold 'Usage:'} galley stop-env ENV" 152 | print '' 153 | print 'Stops all running containers that have the “.ENV” suffix.' 154 | 155 | versionHelp = -> 156 | print "#{chalk.bold 'Usage:'} galley version" 157 | print '' 158 | print 'Provides the currently running version of galley' 159 | 160 | HELPS = 161 | '_': rootHelp 162 | 'cleanup': cleanupHelp 163 | 'config': configHelp 164 | 'pull': pullHelp 165 | 'list': listHelp 166 | 'run': runHelp 167 | 'stop-env': stopEnvHelp 168 | 'version': versionHelp 169 | 170 | printHelp = (args, helps) -> 171 | if _.isFunction helps 172 | helps() 173 | else 174 | command = args[0] or '_' 175 | args = args.slice 1 176 | 177 | if helps[command]? 178 | printHelp args, helps[command] 179 | else 180 | print "Error: Unrecognized argument: #{command}" 181 | printHelp args, helps['_'] 182 | process.exit 1 183 | 184 | module.exports = (args, options, done) -> 185 | argv = minimist args 186 | 187 | if options.prefix[0] is 'help' 188 | # This is the case for: galley help service pull 189 | printHelp argv._, HELPS 190 | else 191 | # This case is for: galley service pull --help 192 | printHelp options.prefix, HELPS 193 | 194 | done?() 195 | -------------------------------------------------------------------------------- /lib/lib/service_helpers.coffee: -------------------------------------------------------------------------------- 1 | RSVP = require 'rsvp' 2 | _ = require 'lodash' 3 | path = require 'path' 4 | 5 | DEFAULT_SERVICE_CONFIG = 6 | binds: [] 7 | command: null 8 | entrypoint: null 9 | env: {} 10 | image: null 11 | links: [] 12 | ports: [] 13 | restart: false 14 | source: null 15 | stateful: false 16 | user: '' 17 | volumesFrom: [] 18 | 19 | ENV_COLLAPSED_ARRAY_CONFIG_KEYS = ['links', 'ports', 'volumesFrom'] 20 | 21 | # Accept a value for argv options and normalize it to handle multiple comma delimited values in strings 22 | # while ensuring that the return value is always a flat array of Strings. Used by both -a and 23 | # --volumes-from. 24 | # 25 | # e.g.: 26 | # 27 | # undefined -> [] 28 | # 'beta' -> ['beta'] 29 | # ['beta'] -> ['beta'] 30 | # 'beta,other' -> ['beta', 'other'] 31 | # ['beta', 'other'] -> ['beta', 'other'] 32 | # ['beta', 'other,third'] -> ['beta', 'other', 'third'] 33 | # 'beta,' -> ['beta'] 34 | # ',beta' -> ['beta'] 35 | normalizeMultiArgs = (addonOptions) -> 36 | toReturn = addonOptions 37 | 38 | # minimist will turn repeatable args (like --add) into an array, but in case there's just one, 39 | # or none, let's always present the value as an array for simplicity of consumption 40 | if _.isString(toReturn) 41 | toReturn = [toReturn] 42 | else if _.isUndefined(toReturn) 43 | toReturn = [] 44 | 45 | # Now, for each entry in the array, handle comma delimited multiple values by flat-mapping a split 46 | # on commas, and rejecting empty strings 47 | _.flatten(_.map(toReturn, (val) -> 48 | if val.indexOf(",") > -1 49 | _.filter(val.split(","), (val) -> val.length > 0) 50 | else 51 | val 52 | )) 53 | 54 | # Converts the "--volume" argv value into an array of volume mappings, resolving any host paths 55 | # relative to the current working directory. 56 | normalizeVolumeArgs = (volumeOptions) -> 57 | # Compensate for minimist giving a string value for single use and an array for multiple use 58 | volumes = if _.isArray(volumeOptions) then volumeOptions else [volumeOptions] 59 | 60 | _.map volumes, (volume) -> 61 | [host_path, container_path] = volume.split(':') 62 | "#{path.resolve(host_path)}:#{container_path}" 63 | 64 | 65 | lookupEnvValue = (hash, env, defaultValue) -> 66 | defaultValue = [] if defaultValue is undefined 67 | 68 | [env, namespace] = env.split('.') 69 | val = hash["#{env}.#{namespace}"] || hash[env] 70 | 71 | # Use existance check rather than just falsey so that val can be '' 72 | if val? then val else defaultValue 73 | 74 | lookupEnvArray = (value, env) -> 75 | value = value or [] 76 | if _.isArray value 77 | value 78 | else 79 | lookupEnvValue value, env 80 | 81 | # serviceConfig: a hash from the Galleyfile for a particular service 82 | # env: string of the form "env" or "env.namespace" 83 | # 84 | # Returns serviceConfig flattened down to only the given env 85 | collapseServiceConfigEnv = (serviceConfig, env) -> 86 | collapsedServiceConfig = _.mapValues serviceConfig, (value, key) -> 87 | if ENV_COLLAPSED_ARRAY_CONFIG_KEYS.indexOf(key) isnt -1 88 | collapseEnvironment value, env, [] 89 | else if key is 'env' 90 | # value here is a hash of 'ENV_VAR_NAME': 91 | _.mapValues value, (envValue, envKey) -> 92 | collapseEnvironment envValue, env, null 93 | else 94 | value 95 | 96 | collapsedServiceConfig 97 | 98 | # service: the service who's serviceConfig is being examined 99 | # env: the requested env 100 | # serviceConfig: the service config, collapsed by env for this service 101 | # requestedAddons: the addons requested in the command 102 | # globalAddons: the possible addons that can be requested 103 | # 104 | # Given a service, an env, and addons, this adds the requested addons to the serviceConfig 105 | # including links, ports, volumes and env variables. Supports addons with environments. 106 | combineAddons = (service, env, serviceConfig, requestedAddons, globalAddons) -> 107 | for addonName in requestedAddons 108 | addon = globalAddons[addonName] 109 | throw "Addon #{addonName} not found in ADDONS list in Galleyfile" unless addon? 110 | 111 | serviceAddon = addon[service] 112 | if serviceAddon? 113 | for key in ENV_COLLAPSED_ARRAY_CONFIG_KEYS 114 | if serviceAddon[key]? 115 | addonValue = collapseEnvironment serviceAddon[key], env, [] 116 | serviceConfig[key] = serviceConfig[key].concat addonValue 117 | 118 | if serviceAddon.env? 119 | addonEnv = _.mapValues serviceAddon.env, (envValue, envKey) -> 120 | collapseEnvironment envValue, env, null 121 | serviceConfig.env = _.merge {}, serviceConfig.env, addonEnv 122 | serviceConfig 123 | 124 | addDefaultNames = (globalConfig, service, env, serviceConfig) -> 125 | serviceConfig.name = service 126 | serviceConfig.containerName = "#{service}.#{env}" 127 | serviceConfig.image ||= _.compact([globalConfig.registry, service]).join '/' 128 | serviceConfig 129 | 130 | # Takes a Galleyfile configuration, environment suffix, and list of addons from the command line, 131 | # and returns a hash of the format: 132 | # servicesConfig: service name to serviceConfig map 133 | # globalConfig: global configuration data (e.g. rsync or registry info) 134 | # 135 | # The servicesConfig is processed and normalized in the following ways: 136 | # - Missing keys and values are added so that each service has a value for everything in 137 | # DEFAULT_SERVICES_CONFIG 138 | # - Image and container names are filled in 139 | # - Environments are "collapsed": e.g. the service's "links" values are those links specified for 140 | # the passed in "env" value. 141 | # - Addons are expanded and merged in to the other values. 142 | # 143 | # Callers of this method therefore need not worry about any further parameterization based on env 144 | # or addons. 145 | processConfig = (galleyfileValue, env, requestedAddons) -> 146 | globalConfig = galleyfileValue.CONFIG or {} 147 | globalAddons = galleyfileValue.ADDONS or {} 148 | 149 | unless globalConfig.rsync? 150 | globalConfig.rsync = 151 | image: 'galley/rsync' 152 | module: 'root' 153 | 154 | servicesConfig = _.mapValues galleyfileValue, (serviceConfig, service) -> 155 | return if service is 'CONFIG' 156 | 157 | # TOOD(finneganh): Raise exception if unrecognized key in serviceConfig 158 | 159 | serviceConfig = _.merge {}, DEFAULT_SERVICE_CONFIG, serviceConfig 160 | 161 | serviceConfig = collapseServiceConfigEnv serviceConfig, env 162 | serviceConfig = combineAddons service, env, serviceConfig, requestedAddons, globalAddons 163 | serviceConfig = addDefaultNames globalConfig, service, env, serviceConfig 164 | serviceConfig 165 | 166 | # Remove the globalConfig from the servicesConfig to keep it from accidentally being used as a 167 | # service called "CONFIG". 168 | delete servicesConfig.CONFIG 169 | 170 | {globalConfig, servicesConfig} 171 | 172 | collapseEnvironment = (configValue, env, defaultValue) -> 173 | if _.isObject(configValue) and not _.isArray(configValue) 174 | lookupEnvValue configValue, env, defaultValue 175 | else 176 | if configValue? then configValue else defaultValue 177 | 178 | envsFromServiceConfig = (serviceConfig) -> 179 | definedEnvs = for parametrizeableKey, value of _.pick(serviceConfig, ['links', 'ports', 'volumesFrom']) 180 | if not value or _.isArray(value) 181 | [] 182 | else 183 | _.keys(value) 184 | 185 | envEnvs = for variable, value of serviceConfig['env'] 186 | if _.isArray(value) or _.isString(value) 187 | [] 188 | else 189 | _.keys(value) 190 | 191 | definedEnvs = definedEnvs.concat(envEnvs) 192 | _.unique _.flatten definedEnvs 193 | 194 | envsByService = (galleyfileValue) -> 195 | serviceList = _.mapValues galleyfileValue, (serviceConfig, service) -> 196 | return if service is 'CONFIG' or service is 'ADDONS' 197 | envsFromServiceConfig serviceConfig 198 | 199 | delete serviceList.CONFIG 200 | delete serviceList.ADDONS 201 | serviceList 202 | 203 | addonsByService = (galleyfileValue) -> 204 | addons = galleyfileValue['ADDONS'] or {} 205 | _.tap {}, (serviceAddonMap) -> 206 | for addon, servicesMap of addons 207 | for service, infoMap of servicesMap 208 | serviceAddonMap[service] ||= [] 209 | serviceAddonMap[service].push addon 210 | serviceAddonMap[service].sort() 211 | 212 | # Generates an array of prerequisite services of a service, from the configuration file. 213 | # The last element of returned array is the requested service, 214 | # and strict ordering is maintained for the rest, such that no service comes before 215 | # a service that it depends on. 216 | generatePrereqServices = (service, servicesConfig) -> 217 | _.uniq generatePrereqsRecursively(service, servicesConfig).reverse() 218 | 219 | # foundServices: contains the ordered list of services that have been discovered by the depth first recursion 220 | # pendingServices: contains the immediate dependency chain in order to reject circular dependecies 221 | # both are built up recursively 222 | generatePrereqsRecursively = (service, servicesConfig, pendingServices = [], foundServices = []) -> 223 | nextFoundServices = foundServices.concat service 224 | serviceConfig = servicesConfig[service] 225 | 226 | throw "Missing config for service: #{service}" unless serviceConfig 227 | 228 | links = serviceConfig.links 229 | volumesFroms = serviceConfig.volumesFrom 230 | 231 | prereqs = links.concat volumesFroms 232 | 233 | _.forEach prereqs, (prereqName) -> 234 | prereqService = prereqName.split(':')[0] 235 | 236 | if (circularIndex = pendingServices.indexOf(prereqService)) isnt -1 237 | dependencyServices = foundServices.slice(circularIndex) 238 | dependencyServices.push service, prereqService 239 | # trigger error handling for the circular dependency 240 | throw "Circular dependency for #{prereqService}: #{dependencyServices.join ' -> '}" 241 | 242 | nextPendingServices = pendingServices.concat service 243 | nextFoundServices = generatePrereqsRecursively(prereqService, servicesConfig, nextPendingServices, nextFoundServices) 244 | nextFoundServices 245 | 246 | module.exports = { 247 | DEFAULT_SERVICE_CONFIG 248 | normalizeMultiArgs 249 | normalizeVolumeArgs 250 | addDefaultNames 251 | generatePrereqServices 252 | collapseEnvironment 253 | combineAddons 254 | collapseServiceConfigEnv 255 | processConfig 256 | envsByService 257 | addonsByService 258 | } 259 | 260 | -------------------------------------------------------------------------------- /spec/service_helpers_spec.coffee: -------------------------------------------------------------------------------- 1 | expect = require 'expect' 2 | _ = require 'lodash' 3 | 4 | ServiceHelpers = require '../lib/lib/service_helpers' 5 | 6 | describe 'normalizeMultiArgs', -> 7 | describe 'with a non-delimited string', -> 8 | it 'should be an array with one value', -> 9 | expect(ServiceHelpers.normalizeMultiArgs('beta')).toEqual ['beta'] 10 | 11 | describe 'with a delimited string with two values', -> 12 | it 'should be an array with two values', -> 13 | expect(ServiceHelpers.normalizeMultiArgs('beta,other')).toEqual ['beta', 'other'] 14 | 15 | describe 'with a delimited string with a bad leading comma', -> 16 | it 'should be an array with one value', -> 17 | expect(ServiceHelpers.normalizeMultiArgs(',other')).toEqual ['other'] 18 | 19 | describe 'with a delimited string with a bad trailing comma', -> 20 | it 'should be an array with one value', -> 21 | expect(ServiceHelpers.normalizeMultiArgs('beta,')).toEqual ['beta'] 22 | 23 | describe 'with an array with one value', -> 24 | it 'should be an array with one value', -> 25 | expect(ServiceHelpers.normalizeMultiArgs(['beta'])).toEqual ['beta'] 26 | 27 | describe 'with an array with one value', -> 28 | it 'should be an array with one value', -> 29 | expect(ServiceHelpers.normalizeMultiArgs(['beta'])).toEqual ['beta'] 30 | 31 | describe 'with an array with two values', -> 32 | it 'should be an array with two values', -> 33 | expect(ServiceHelpers.normalizeMultiArgs(['beta', 'other'])).toEqual ['beta', 'other'] 34 | 35 | describe 'with an array with two values, one of which is delimited', -> 36 | it 'should be an array with three values', -> 37 | expect(ServiceHelpers.normalizeMultiArgs(['beta', 'other,third'])).toEqual ['beta', 'other', 'third'] 38 | 39 | describe 'normalizeVolumeArgs', -> 40 | it 'handles a single value', -> 41 | expect(ServiceHelpers.normalizeVolumeArgs '/host:/container').toEqual ['/host:/container'] 42 | it 'handles multiple values', -> 43 | volumes = ['/host1:/container1', '/host2:/container2'] 44 | expect(ServiceHelpers.normalizeVolumeArgs volumes).toEqual volumes 45 | it 'resolves relative paths', -> 46 | expect(ServiceHelpers.normalizeVolumeArgs ['host:/container']).toEqual [ 47 | "#{process.cwd()}/host:/container" 48 | ] 49 | 50 | describe 'generatePrereqServices', -> 51 | describe 'generates simple dependency chain', -> 52 | config = 53 | service: 54 | links: ['service_two'] 55 | volumesFrom: [] 56 | service_two: 57 | links: ['service_three'] 58 | volumesFrom: [] 59 | service_three: 60 | links: ['service_four'] 61 | volumesFrom: ['service_five'] 62 | service_four: 63 | links: [] 64 | volumesFrom: [] 65 | service_five: 66 | links: [] 67 | volumesFrom: [] 68 | it 'should generate correctly ordered list', -> 69 | expect(ServiceHelpers.generatePrereqServices 'service', config).toEqual ['service_five', 'service_four', 'service_three', 'service_two', 'service'] 70 | 71 | describe 'does not have duplicate service entries, keeps the earliest', -> 72 | config = 73 | service: 74 | links: ['service_two'] 75 | volumesFrom: [] 76 | service_two: 77 | links: ['service_three', 'service_four'] 78 | volumesFrom: [] 79 | service_three: 80 | links: ['service_four'] 81 | volumesFrom: [] 82 | service_four: 83 | links: [] 84 | volumesFrom: [] 85 | it 'should generate correctly ordered list', -> 86 | expect(ServiceHelpers.generatePrereqServices 'service', config).toEqual ['service_four', 'service_three', 'service_two', 'service'] 87 | 88 | describe 'fails on circular dependency', -> 89 | config = 90 | service: 91 | links: ['service_another'] 92 | service_another: 93 | links: ['service'] 94 | it 'should throw', -> 95 | expect( -> ServiceHelpers.generatePrereqServices('service', config)).toThrow('Circular dependency for service: service -> service_another -> service') 96 | 97 | describe 'collapseEnvironment', -> 98 | describe 'not parameterized', -> 99 | CONFIG_STRING_VALUE = 'foo' 100 | CONFIG_ARRAY_VALUE = ['foo', 'bar'] 101 | 102 | it 'does not modify a string', -> 103 | expect(ServiceHelpers.collapseEnvironment CONFIG_STRING_VALUE, 'dev').toEqual CONFIG_STRING_VALUE 104 | 105 | it 'does not modify an array', -> 106 | expect(ServiceHelpers.collapseEnvironment CONFIG_ARRAY_VALUE, 'dev').toEqual CONFIG_ARRAY_VALUE 107 | 108 | describe 'parameterized', -> 109 | CONFIG_VALUE = 110 | 'dev': 'foo' 111 | 'test': 'bar' 112 | 'test.cucumber': 'baz' 113 | 114 | it 'returns defaultValue when env is missing', -> 115 | expect(ServiceHelpers.collapseEnvironment CONFIG_VALUE, 'prod', ['default']).toEqual ['default'] 116 | 117 | it 'finds named environment', -> 118 | expect(ServiceHelpers.collapseEnvironment CONFIG_VALUE, 'dev', null).toEqual 'foo' 119 | 120 | it 'finds namespaced environment', -> 121 | expect(ServiceHelpers.collapseEnvironment CONFIG_VALUE, 'test.cucumber', null).toEqual 'baz' 122 | 123 | it 'falls back when namespace is missing', -> 124 | expect(ServiceHelpers.collapseEnvironment CONFIG_VALUE, 'dev.cucumber', null).toEqual 'foo' 125 | 126 | describe 'collapseServiceConfigEnv', -> 127 | describe 'array parameterization', -> 128 | CONFIG = 129 | image: 'my-image' 130 | links: 131 | 'dev': ['service'] 132 | 'dev.namespace': ['better-service'] 133 | 'test': ['mock-service'] 134 | ports: 135 | 'dev': ['3000'] 136 | volumesFrom: 137 | 'test': ['container'] 138 | 139 | it 'collapses down to just the environment', -> 140 | expect(ServiceHelpers.collapseServiceConfigEnv(CONFIG, 'dev.namespace')).toEqual 141 | image: 'my-image' 142 | links: ['better-service'] 143 | ports: ['3000'] 144 | volumesFrom: [] 145 | 146 | describe 'env parameterization', -> 147 | CONFIG = 148 | env: 149 | 'HOSTNAME': 'docker' 150 | 'NOTHING': '' 151 | 'TEST_ONLY_VALUE': 152 | 'test': 'true' 153 | 'RAILS_ENV': 154 | 'dev': 'development' 155 | 'test': 'test' 156 | 157 | it 'paramerizes the env variables', -> 158 | expect(ServiceHelpers.collapseServiceConfigEnv(CONFIG, 'dev.namespace')).toEqual 159 | env: 160 | 'HOSTNAME': 'docker' 161 | 'NOTHING': '' 162 | 'TEST_ONLY_VALUE': null 163 | 'RAILS_ENV': 'development' 164 | 165 | describe 'combineAddons', -> 166 | describe 'addons', -> 167 | describe 'array parameter merging', -> 168 | EXPECTED = 169 | links: ['database', 'addon-service'] 170 | 171 | describe 'without env', -> 172 | ADDONS = 173 | 'my-addon': 174 | 'service': 175 | links: ['addon-service'] 176 | CONFIG = 177 | links: ['database'] 178 | 179 | it 'merges addons array parameters with addon', -> 180 | expect(ServiceHelpers.combineAddons('service', 'dev', CONFIG, ['my-addon'], ADDONS)).toEqual EXPECTED 181 | 182 | describe 'with addon env', -> 183 | ADDONS = 184 | 'my-addon': 185 | 'service': 186 | links: 187 | 'dev': ['addon-service'] 188 | CONFIG = 189 | links: ['database'] 190 | 191 | it 'merges addons array parameters with addon env', -> 192 | expect(ServiceHelpers.combineAddons('service', 'dev', CONFIG, ['my-addon'], ADDONS)).toEqual EXPECTED 193 | 194 | describe 'with addon namespaced env', -> 195 | ADDONS = 196 | 'my-addon': 197 | 'service': 198 | links: 199 | 'dev.namespace': ['addon-service'] 200 | CONFIG = 201 | links: ['database'] 202 | 203 | it 'merges addons array parameters with namespaced addon env', -> 204 | expect(ServiceHelpers.combineAddons('service', 'dev.namespace', CONFIG, ['my-addon'], ADDONS)).toEqual EXPECTED 205 | 206 | describe 'env parameter merging', -> 207 | describe 'with no base env', -> 208 | ADDONS = 209 | 'my-addon': 210 | 'service': 211 | env: 212 | 'HOSTNAME': 'docker-addon' 213 | 'CUSTOM': 214 | 'dev.namespace': 'custom-value' 215 | 216 | CONFIG = {} 217 | it 'parametrizes the env variables', -> 218 | expect(ServiceHelpers.combineAddons('service', 'dev.namespace', CONFIG, ['my-addon'], ADDONS)).toEqual 219 | env: 220 | 'HOSTNAME': 'docker-addon' 221 | 'CUSTOM': 'custom-value' 222 | 223 | describe 'with a base env', -> 224 | ADDONS = 225 | 'my-addon': 226 | 'service': 227 | env: 228 | 'HOSTNAME': 'docker-addon' 229 | 'CUSTOM': 230 | 'dev.namespace': 'custom-value' 231 | 232 | CONFIG = 233 | env: 234 | 'HOSTNAME': 'docker' 235 | 'TEST_ONLY_VALUE': null 236 | 'RAILS_ENV': 'development' 237 | 238 | it 'parametrizes the env variables', -> 239 | expect(ServiceHelpers.combineAddons('service', 'dev.namespace', CONFIG, ['my-addon'], ADDONS)).toEqual 240 | env: 241 | 'HOSTNAME': 'docker-addon' 242 | 'TEST_ONLY_VALUE': null 243 | 'RAILS_ENV': 'development' 244 | 'CUSTOM': 'custom-value' 245 | 246 | describe 'with multiple addons', -> 247 | ADDONS = 248 | 'my-addon': 249 | 'service': 250 | env: 251 | 'HOSTNAME': 'docker-addon' 252 | 'my-second-addon': 253 | 'service': 254 | env: 255 | 'CUSTOM': 256 | 'dev.namespace': 'custom-value' 257 | CONFIG = 258 | env: 259 | 'HOSTNAME': 'docker' 260 | 'TEST_ONLY_VALUE': null 261 | 'RAILS_ENV': 'development' 262 | 263 | it 'paramerizes the env variables', -> 264 | expect(ServiceHelpers.combineAddons('service', 'dev.namespace', CONFIG, ['my-addon', 'my-second-addon'], ADDONS)).toEqual 265 | env: 266 | 'HOSTNAME': 'docker-addon' 267 | 'TEST_ONLY_VALUE': null 268 | 'RAILS_ENV': 'development' 269 | 'CUSTOM': 'custom-value' 270 | 271 | describe 'addDefaultNames', -> 272 | GLOBAL_CONFIG = registry: 'docker.example.tv' 273 | 274 | it 'preserves existing image name', -> 275 | expect(ServiceHelpers.addDefaultNames(GLOBAL_CONFIG, 'database', 'dev', {image: 'mysql'})).toEqual 276 | containerName: 'database.dev' 277 | image: 'mysql' 278 | name: 'database' 279 | 280 | it 'adds missing image name', -> 281 | expect(ServiceHelpers.addDefaultNames(GLOBAL_CONFIG, 'application', 'dev', {})).toEqual 282 | containerName: 'application.dev' 283 | image: 'docker.example.tv/application' 284 | name: 'application' 285 | 286 | it 'tolerates no registry', -> 287 | expect(ServiceHelpers.addDefaultNames({}, 'application', 'dev', {})).toEqual 288 | containerName: 'application.dev' 289 | image: 'application' 290 | name: 'application' 291 | 292 | describe 'envsByService', -> 293 | describe 'envs', -> 294 | CONFIG = 295 | service: 296 | image: 'my-image' 297 | links: 298 | 'dev': ['service'] 299 | 'dev.namespace': ['better-service'] 300 | 'test': ['mock-service'] 301 | env: 302 | 'HOSTNAME': 'docker' 303 | 'TEST_ONLY_VALUE': 304 | 'test': 'true' 305 | 'RAILS_ENV': 306 | 'dev': 'development' 307 | 'test': 'test' 308 | 'other': 'foo' 309 | ports: 310 | 'dev': ['3000'] 311 | volumesFrom: 312 | 'test': ['container'] 313 | application: 314 | image: 'application' 315 | 316 | it 'processes services', -> 317 | expect(ServiceHelpers.envsByService(CONFIG)).toEqual 318 | 'application': [] 319 | 'service': ['dev', 'dev.namespace', 'test', 'other'] 320 | 321 | describe 'addonsByService', -> 322 | describe 'envs', -> 323 | CONFIG = 324 | ADDONS: 325 | myaddon: 326 | service: 327 | links: 328 | 'dev': ['database'] 329 | service2: 330 | links: 331 | 'dev': ['database'] 332 | myaddon2: 333 | service: {} 334 | service3: {} 335 | 336 | it 'processes addons', -> 337 | expect(ServiceHelpers.addonsByService(CONFIG)).toEqual { 338 | 'service': ['myaddon', 'myaddon2'] 339 | 'service2': ['myaddon'] 340 | 'service3': ['myaddon2'] 341 | } 342 | 343 | describe 'processConfig', -> 344 | describe 'naming', -> 345 | CONFIG = 346 | CONFIG: 347 | registry: 'docker.example.tv' 348 | 'application': {} 349 | 'database': 350 | image: 'mysql' 351 | 352 | it 'processes services', -> 353 | expect(ServiceHelpers.processConfig(CONFIG, 'dev', []).servicesConfig).toEqual 354 | 'application': 355 | binds: [] 356 | command: null 357 | containerName: 'application.dev' 358 | entrypoint: null 359 | env: {} 360 | image: 'docker.example.tv/application' 361 | links: [] 362 | name: 'application' 363 | ports: [] 364 | restart: false 365 | source: null 366 | stateful: false 367 | user: '' 368 | volumesFrom: [] 369 | 'database': 370 | binds: [] 371 | command: null 372 | containerName: 'database.dev' 373 | env: {} 374 | entrypoint: null 375 | image: 'mysql' 376 | links: [] 377 | name: 'database' 378 | ports: [] 379 | restart: false 380 | source: null 381 | stateful: false 382 | user: '' 383 | volumesFrom: [] 384 | 385 | it 'returns global config', -> 386 | expect(ServiceHelpers.processConfig(CONFIG, 'dev', []).globalConfig).toEqual 387 | registry: 'docker.example.tv' 388 | # rsync config is default 389 | rsync: 390 | image: 'galley/rsync' 391 | module: 'root' 392 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ![Galley](docs/images/galley-red.png) 2 | 3 | 4 | [![Build Status](https://travis-ci.org/twitter-fabric/galley.svg?branch=master)](https://travis-ci.org/twitter-fabric/galley) 5 | 6 | ## Overview 7 | 8 | Galley is a command-line tool for orchestrating [Docker](https://www.docker.com/) containers in development 9 | and test environments. Galley automatically starts a container’s dependencies and connects them using Docker’s 10 | `Links` and `VolumesFrom` mappings. Use Galley to start up a web server that connects to a database. Then, use it to 11 | start up a web server, its database, an intermediate data service (and its database), some queues, worker processes, and 12 | the monitoring server they all connect to. 13 | 14 | **Latest version:** 1.2.6 15 | 16 | - Fix a bug that improperly truncated command line args containing `=` 17 | 18 | ### What makes Galley different? 19 | 20 | Galley was built to support [Fabric](http://fabric.io)’s internal development process: multiple teams 21 | sharing a dozen or more services across a 22 | variety of source code repositories. What is under active development by one team might just be a dependency to 23 | another, so Galley gives engineers the flexibility to start the service or services they’re working with using 24 | local source code, while getting pre-built images for any dependencies. 25 | 26 | Galley keeps service dependencies in a central “Galleyfile” configuration so that you can always start up any 27 | service in your system, along with any necessary transitive dependencies. 28 | 29 | ### Features 30 | 31 | - Run Docker containers, linking them to their dependencies 32 | - Dynamic mapping of local source code into containers 33 | - Custom environments to easily run isolated development and test containers side-by-side 34 | - “Addons” to define optional configuration for services 35 | - Automatic re-use of running containers to support developing multiple services simultaneously 36 | - Protected “stateful” containers (*e.g.* databases) 37 | - JavaScript-based configuration for higher-order service definitions 38 | 39 | Galley also has special support for running under a VM, such as when using [docker-machine](https://docs.docker.com/machine/) on Mac OS X: 40 | 41 | - Built-in `rsync` support for massively-improved disk performance with VirtualBox for local source code. 42 | - Port forwarding to let other machines or mobile devices connect to containers in the VM 43 | 44 | And, for continuous integration machines: 45 | 46 | - A `--repairSourceOwnership` flag keeps containers running as root from generating files that only root can delete 47 | - Cleanup command to free up disk space from unused images 48 | 49 | ### Bug Reports and Discussion 50 | 51 | If you find something wrong with Galley, please let us know on the 52 | [issues](https://github.com/twitter-fabric/galley/issues) page. 53 | 54 | You can also chat with us or ask for help on the 55 | [galley-discuss@googlegroups.com](https://groups.google.com/forum/#!forum/galley-discuss) mailing list. 56 | 57 | ## Galley concepts 58 | 59 | To use Galley you define a set of **services** in a central **Galleyfile**. These definitions specify 60 | Docker options for each service (image, links, volumes, *etc.*). 61 | 62 | When you use `galley run .`, you provide a **primary service** that you want to interact with, and the 63 | **environment** to run it in. Environments are used in service definitions to vary the configuration, for example to 64 | specify different dependencies between “dev” and “test” modes. 65 | 66 | Environments can also have a namespace, such as `.dev.1` or `test.cucumber`. If a service does not have a 67 | configuration for a namespaced environment, the one for the base environment is used instead. 68 | 69 | Not all services must have environment-specific configurations. For a service with no environment configuration, the 70 | service's base environment configuration is used. 71 | 72 | ## Quick start 73 | 74 | *Note that Galley requires node >= version 5 to run* 75 | 76 | ```console 77 | $ npm install -g galley-cli 78 | $ git clone https://github.com/twitter-fabric/galley-template.git 79 | $ cd galley-template 80 | $ galley run demo.dev 81 | ``` 82 | 83 | ## Setting up Galley 84 | 85 | ### Installation 86 | 87 | Galley is distributed as a command-line tool, `galley`, and a library. Install the command-line 88 | tool globally from the [galley-cli NPM package](https://www.npmjs.com/package/galley-cli): 89 | 90 | ```console 91 | $ npm install -g galley-cli 92 | ``` 93 | 94 | ### Create a Galleyfile package 95 | 96 | Galley keeps your system’s configuration in a central Galleyfile. This file must be in a directory with 97 | an NPM package.json file that depends on the [galley NPM package](https://www.npmjs.com/package/galley). 98 | You will typically symlink the Galleyfile into the local directory where you keep your repositories. 99 | 100 | When you run the `galley` tool, it recursively walks up your directories until it finds a Galleyfile 101 | or a symlink to one. It then uses the galley library depended on by that package to execute your 102 | commands. 103 | 104 | The easiest way to get started with a Galleyfile is to clone our template: 105 | 106 | ```console 107 | $ git clone https://github.com/twitter-fabric/galley-template.git 108 | ``` 109 | 110 | You can also create an NPM package from scratch: 111 | 112 | ```console 113 | $ npm init 114 | $ npm install --save galley 115 | $ npm shrinkwrap 116 | ``` 117 | 118 | ### Writing a Galleyfile 119 | 120 | A Galleyfile is a JavaScript module that exports a configuration hash that defines your services and 121 | their dependencies. 122 | 123 | Services are expected to share a common Galleyfile that defines the dependencies among 124 | them. You should put your Galleyfile in a common place, and then symlink to it from a 125 | parent directory for your services. The `galley` CLI tool will search for a Galleyfile recursively 126 | from the directory it’s run in. 127 | 128 | #### Example 129 | 130 | The file below defines a Rails “www” service that depends on a MySQL DB in test and both a MySQL DB and a beanstalk 131 | queue in development. Additionally, it expects to have a “config-files” volume mounted in. The container’s source 132 | code is kept in `/code/www`, so you can use `galley run -s .` to map a local directory over it. 133 | 134 | ```javascript 135 | // Example Galleyfile.js for a small Rails app. 136 | module.exports = { 137 | CONFIG: { 138 | registry: 'docker.example.biz' 139 | }, 140 | 141 | 'config-files': {}, 142 | 'beanstalk': {}, 143 | 144 | 'www-mysql': { 145 | image: 'mysql', 146 | stateful: true 147 | }, 148 | 149 | 'www': { 150 | env: { 151 | RAILS_ENV: { 152 | 'dev': 'development', 153 | 'test': 'test' 154 | } 155 | }, 156 | links: { 157 | 'dev': ['www-mysql:mysql', 'beanstalk'], 158 | 'test': ['www-mysql'] 159 | }, 160 | ports: { 161 | 'dev': ['3000:3000'] 162 | }, 163 | source: '/code/www', 164 | volumesFrom: ['config-files'] 165 | } 166 | }; 167 | ``` 168 | 169 | Then, from a common parent directory of your services' source directories: 170 | ```console 171 | $ ln -s ../../path/to/Galleyfile.js . 172 | ``` 173 | 174 | ### Running Galley 175 | 176 | Once you’ve written a Galleyfile and symlinked it, try it out: 177 | 178 | ```bash 179 | $ galley list 180 | ``` 181 | ``` 182 | Galleyfile: /path/to/found/Galleyfile.js 183 | beanstalk 184 | config-files 185 | www [.dev, .test] 186 | www-mysql 187 | ``` 188 | 189 | ## Command reference 190 | 191 | ### `run` 192 | 193 | **Examples:** 194 | ``` 195 | # Starts the www service with its dev dependencies. Runs the image’s default CMD. 196 | galley run www.dev 197 | 198 | # Maps the current directory in as the “source” directory, uses test dependencies, and runs “rake spec”. 199 | galley run -s . www.test rake spec 200 | ``` 201 | 202 | Starts up the given service, using the environment both to name containers and to affect the service configuration. 203 | Dependencies, either `links` or `volumesFrom`, will be started up first and recursively. Containers will be named 204 | `.`. STDOUT, STDERR, and STDIN are piped from the terminal to the container. 205 | 206 | When Galley exits it will remove the primary service container by default, but leave any dependencies running. 207 | 208 | Galley will *always* recreate the container for the named (“primary”) service. For dependencies, Galley will look 209 | for existing containers that match the `.` naming pattern, starting them if necessary. It will 210 | delete and recreate them if: 211 | 212 | - their source image doesn’t match the current image for their image name (*e.g.* if an image was built or pulled 213 | since first starting the container) 214 | - their current `Links` don’t include everything in the current configuration (typically because a container they 215 | depend upon has been recreated, but sometimes because an addon changes the configuration) 216 | 217 | If a service is configured to be “stateful” in the Galleyfile, Galley will not recreate it. 218 | This is useful for development database services that would get wiped if that happened, losing hard-won state. The 219 | `--recreate` and `--unprotectStateful` command line options affect these behaviors; see `galley run --help` for 220 | more info. 221 | 222 | Similar to `docker run`, you can provide a command and arguments after the service name to run those instead 223 | of the image’s default CMD. In this case, Galley will let Docker name the container randomly, to avoid naming 224 | conflicts with any other instances of that service that are running. 225 | 226 | You can use the `-a` option to enable any “addons” configured for your services (primary or otherwise). Addons can 227 | bring in additional dependencies or modify environment variables. 228 | 229 | If you’ve configured a “source” directory for the primary service, then you can use the `-s` option to map a local 230 | directory to it. (This is more convenient than `-v` for the common case of regularly mapping to the same 231 | destination.) 232 | 233 | Run also takes a number of parameters that are the equivalent to `docker run` parameters. See `galley run --help` 234 | for a complete list. 235 | 236 | ### `list` 237 | 238 | **Examples:** 239 | ``` 240 | galley list 241 | ``` 242 | 243 | Prints the name of each service in the Galleyfile, along with the environments it’s configured for and which 244 | addons affect it. 245 | 246 | ### `stop-env` 247 | 248 | **Examples:** 249 | ``` 250 | # Stops all dev containers 251 | galley stop-env dev 252 | ``` 253 | 254 | Stops all containers whose names end in `.`. Useful for freeing up memory in your VM or as a prelude to a 255 | `galley cleanup` to wipe your slate. 256 | 257 | ### `pull` 258 | 259 | **Examples:** 260 | ``` 261 | # Fetches the www image and its “test” environment transitive dependencies 262 | galley pull www.test 263 | 264 | # Fetches “dev” images, including dependencies added by the “beta” addon 265 | galley pull -a beta www.dev 266 | ``` 267 | 268 | Pulls the latest image for the given primary service and any transitive dependencies that come from its 269 | environment. Can take `-a` to include addons in the dependency tree. 270 | 271 | `galley pull` just updates the local Docker images, it doesn’t cause any changes to running containers. A follow-up 272 | `galley run` will recreate any non-“stateful” containers for dependencies whose images have changed. 273 | 274 | ### `cleanup` 275 | 276 | **Examples:** 277 | ``` 278 | galley cleanup 279 | ``` 280 | 281 | Removes any stopped containers that match Galley’s naming conventions, provided they are not for “stateful” 282 | services. Removes their volumes as well. See `galley cleanup --help` for options that affect what’s removed. 283 | 284 | Deletes any dangling Docker images on the machine, to free up disk space. 285 | 286 | ## Galleyfile reference 287 | 288 | A Galleyfile is a JavaScript or CoffeeScript module that exports a configuration hash. The keys for the hash are 289 | the names of services in your system. Each service must have an entry, even if its value is just an empty hash. 290 | 291 | Additionally, the special `CONFIG` key labels a hash of global configuration values. 292 | 293 | ### Global config 294 | 295 | ```javascript 296 | module.exports = { 297 | CONFIG: { 298 | registry: 'docker.example.biz', 299 | rsync: { 300 | image: 'docker.example.biz/rsync', 301 | module: 'root' 302 | } 303 | } 304 | … 305 | }; 306 | ``` 307 | 308 | **registry:** The Docker registry to use when services have default image names. 309 | 310 | **rsync:** Custom Docker image name and Rsync module name to use to make a container that runs an Rsync daemon. See 311 | [rsync support](#rsync-support) for more information. 312 | 313 | ### Service config 314 | 315 | ```javascript 316 | 'www': { 317 | env: { 318 | 'HOST': 'localhost', 319 | 'PROXY_FAYE': { 320 | 'test': '1' 321 | } 322 | }, 323 | ports: { 324 | 'dev': ['3000:3000'] 325 | }, 326 | links: { 327 | 'dev': ['mongo', 'beanstalk', 'data-service', 'redis'], 328 | 'test': ['mongo'], 329 | 'test.cucumber': ['mongo', 'beanstalk', 'data-service'], 330 | }, 331 | source: '/code/www', 332 | volumesFrom: ['config-files'] 333 | } 334 | ``` 335 | 336 | **addons**: Hash of name to a hash of additional configuration values. Additional configuration can include 337 | `links`, `ports`, `volumesFrom`, and `env`. When the addon is enabled via the `-a` flag to `run` or `pull`, array 338 | values (`links`, `ports`, `volumesFrom`) are concatenated with the service’s base configuration (and any other addons). `env` values are merged, with addons taking precidence over the base values. 339 | 340 | **binds**: Array of “Bind” strings to map host directories into the container. String format matches Docker: 341 | `"host_path:container_path"` 342 | 343 | **command**: Command to override the default from the image. Can either be a string, which Docker will run with 344 | `/bin/sh -c`, or an array of strings, which should be an executable and its arguments. 345 | 346 | **entrypoint**: Override the default entrypoint from the image. String path for an executable in the container. 347 | 348 | **env**: Hash of environment variable names and their values to set in the container. If the values are themselves 349 | hashes, they are assumed to be from Galley “env” to value. 350 | 351 | ```javascript 352 | 'my-app': { 353 | env: { 354 | // $HOST will always be "localhost" in the container 355 | 'HOST': 'localhost', 356 | 357 | // "galley run my-app.dev" will set $RAILS_ENV to "development" 358 | // "galley run my-app.test" will set $RAILS_ENV to "test" 359 | // "galley run my-app.test.cucumber" will also set $RAILS_ENV to "test" 360 | // "galley run my-app.other" will not have $RAILS_ENV defined 361 | 'RAILS_ENV': { 362 | 'dev': 'development', 363 | 'test': 'test' 364 | } 365 | } 366 | } 367 | ``` 368 | 369 | **image**: Image name to generate the container from. Defaults to the service’s name from the default registry. 370 | 371 | **links**: Array of links to make to other containers. Elements are either `"service_name"` or 372 | `"service_name:alias"` (where “alias” is the hostname this container will see the service as). Alternately, the value 373 | can be a hash of environment name to array of links. 374 | 375 | ```javascript 376 | 'data-service': { 377 | links: ['data-service-mysql:mysql'] 378 | }, 379 | 380 | 'my-app': { 381 | links: { 382 | 'dev': ['my-app-mysql:mysql', 'data-service'], 383 | 'test': ['my-app-mysql:mysql'] 384 | } 385 | }, 386 | 387 | 'data-service-mysql': { 388 | image: 'docker.example.biz/mysql' 389 | }, 390 | 391 | 'my-app-mysql': { 392 | image: 'docker.example.biz/mysql' 393 | }, 394 | ``` 395 | 396 | **ports**: Array of ports to publish when the service is run as the primary service. Array values are either 397 | `"host_port:container_port"` or `"container_port"`. If a host port is ommitted, Docker will assign a random host 398 | port to proxy in. Alternately, can be a hash of environment name to array of port values. Ports need not be 399 | exposed by the Dockerfile. 400 | 401 | **restart**: Boolean. If `true`, applies a Docker `RestartPolicy` of “always” to the container. Default is `false`. 402 | 403 | **source**: String path to a source code directory inside the container. If `-s` is provided to `galley run`, Galley 404 | will bind that directory to the source directory in the container. 405 | 406 | **stateful**: Boolean. If `true`, Galley will not remove the container in `galley run` or `galley cleanup`, even if it 407 | is stale or missing links. Can be overridden for a command by the `--unprotectStateful` flag. Default is `false`. 408 | 409 | **user**: User to run the container as. 410 | 411 | **volumesFrom**: Array of services whose containers should be volume-mounted into this service’s container. 412 | Alternately, can be a hash of environment name to array of service names. 413 | 414 | ### Addons 415 | 416 | ```javascript 417 | # EXAMPLE 418 | module.exports = { 419 | … 420 | ADDONS: { 421 | 'beta': { 422 | 'www': { 423 | env: { 424 | 'USE_BETA_SERVICE': '1' 425 | }, 426 | links: ['beta', 'uploader'] 427 | }, 428 | 'uploader': { 429 | env: { 430 | 'USE_BETA_SERVICE': '1' 431 | } 432 | } 433 | } 434 | } 435 | … 436 | }; 437 | ``` 438 | 439 | Addons are extra configurations that can be applied from the command line. An addon can include 440 | additional `links`, `ports`, `volumesFrom`, and `env` values that are merged with a service’s 441 | base configuration. Addons are defined globally because they can affect multiple services. 442 | 443 | 444 | ### .galleycfg reference 445 | 446 | Galley can write a .galleycfg JSON configuration file into `~` when you run `galley config`. 447 | Currently, the only state read from the config file is the default value of the `--rsync` flag. 448 | 449 | You can write to the .galleycfg file with: 450 | 451 | `galley config set key value` 452 | 453 | 454 | An example .galleycfg: 455 | 456 | ``` 457 | { 458 | "rsync": true 459 | } 460 | ``` 461 | 462 | 463 | ### Best practices 464 | 465 | - Mark any databases as “stateful” to keep them from being automatically recreated. This keeps your local 466 | development data from disappearing on you. 467 | - Use addons for optional dependencies that developers don’t need all the time. 468 | - Only publish ports from your “dev” environment so that they won’t conflict when you run “dev” and “test” 469 | simultaneously. 470 | - Use constants and loops in your Galleyfile if they’ll make your configuration clearer and easier to maintain. 471 | 472 | ### rsync support 473 | 474 | Galley includes built-in support for using rsync to copy local source changes into a container. This provides 475 | a significant speed boost over VirtualBox’s shared folders when working on Mac OS X with `docker-machine`. 476 | 477 | To use it, just add `--rsync` to your `galley run` commands when you use `--source`. 478 | 479 | You can turn on `--rsync` by default with: 480 | ```console 481 | $ galley config set rsync true 482 | ``` 483 | 484 | rsync support requires that an rsync server container be run and volume-mapped in to your service’s 485 | container. By default, Galley downloads and uses [galley/rsync](https://hub.docker.com/r/galley/rsync/), 486 | but you can specify your own container in the `CONFIG` section of your Galleyfile. 487 | 488 | **Caveat:** Galley’s rsyncing goes one way, from outside the container to inside it. Files changed or created 489 | inside the container are not copied back out to the local disk. In the cases where you need to have a 490 | bi-directional mapping, use `--rsync false` to temporarily disable rsync. 491 | 492 | Also note that `--rsync` only affects the `--source` mapping, not any `--volume` mappings that you specify. 493 | 494 | ### Docker defaults 495 | 496 | Galley uses a handful of defaults when working with Docker containers that we’ve found are appropriate for 497 | development and testing. You should be aware of these, especially if you have a lot of other Docker experience. 498 | If these aren’t working out for you, let us know; we always want to learn about new use cases! 499 | 500 | (In these cases, the “primary service” is the one specified on the command line.) 501 | 502 | - If Galley is being run in a TTY, the primary service’s container is, too (`docker run -t`) 503 | - The primary service container is always run with STDIN allocated (`docker run -i`) 504 | - The primary service container is always removed when Galley stops (`docker run --rm`) 505 | - Volumes are always removed when removing a container (`docker rm -v`) 506 | - Containers are started with an `/etc/hosts` entry that points their service name to 127.0.0.1 507 | 508 | ## Frequently Asked Questions 509 | 510 | #### How is Galley different from Docker Compose? 511 | 512 | There’s a lot of intersection between Galley and Docker Compose for doing development and testing. 513 | Galley has been tuned to how we’ve been using containers, which we’ve been happy with but might 514 | not work for you. You may want to try both and see which is a better fit for your team and your system. 515 | 516 | Some things to highlight: 517 | 518 | * The Galleyfile configuration describes your entire system in one place to capture all of your 519 | dependencies. One team may be actively developing a service and need to run it with local changes, 520 | while another team could just need that service transitively and run it off of an image without 521 | ever cloning the source repo. 522 | 523 | * Docker Compose typically starts and stops several containers as a unit and merges their log 524 | output. Each Galley process focuses on a single container, starting up its dependencies only if 525 | they’re not already running. Galley processes share common containers within an enviroment, which 526 | is important for testing co-ordinated changes. 527 | 528 | * Galley does not do any container building on its own. (We’ve been using CI jobs for that.) Galley 529 | also provides no features around running containers in production. 530 | 531 | Also take a look at `--rsync`, `--localhost`, and other little features we’ve added based on our 532 | experience building Fabric with Galley. 533 | 534 | (And please correct us if we’re mis-representing Docker Compose. We started building Galley before 535 | it was released, and think a diversity of approaches is healthy for the ecosystem.) 536 | 537 | #### Can I use CoffeeScript to write my Galleyfile? 538 | 539 | Yes. That’s actually what we do on Fabric, because it makes the configuration file much more 540 | readable while still giving us the opportunity to refactor the configuration as code. You’ll need 541 | to depend on `coffee-script` in your Galleyfile package, and use this for your `Galleyfile.js`: 542 | 543 | ```javascript 544 | require('coffee-script/register'); 545 | module.exports = require('./Galleyfile.coffee'); 546 | ``` 547 | 548 | ## Contributing 549 | 550 | We welcome GitHub issues and pull requests. Please match the existing CoffeeScript style, conventions, and test 551 | coverage. 552 | 553 | First run `npm install` to fetch dependencies. 554 | 555 | Galley uses `gulp` for building: 556 | ``` 557 | $ gulp watch # watches the Galley directory for changes to compile 558 | $ gulp compile # compile galley before running tests (if you’re not running gulp watch) 559 | $ gulp test # runs mocha specs 560 | $ gulp acceptance # builds the acceptance images and runs some acceptance tests 561 | ``` 562 | -------------------------------------------------------------------------------- /lib/commands/run.coffee: -------------------------------------------------------------------------------- 1 | _ = require 'lodash' 2 | chalk = require 'chalk' 3 | Docker = require 'dockerode' 4 | RSVP = require 'rsvp' 5 | util = require 'util' 6 | path = require 'path' 7 | minimist = require 'minimist' 8 | spin = require 'term-spinner' 9 | running = require 'is-running' 10 | 11 | help = require './help' 12 | 13 | ConsoleReporter = require '../lib/console_reporter' 14 | DockerArgs = require '../lib/docker_args' 15 | DockerConfig = require '../lib/docker_config' 16 | DockerUtils = require '../lib/docker_utils' 17 | LocalhostForwarder = require '../lib/localhost_forwarder' 18 | OverlayOutputStream = require '../lib/overlay_output_stream' 19 | Rsyncer = require '../lib/rsyncer' 20 | ServiceHelpers = require '../lib/service_helpers' 21 | StdinCommandInterceptor = require '../lib/stdin_command_interceptor' 22 | 23 | RECREATE_OPTIONS = ['all', 'stale', 'missing-link'] 24 | 25 | # Returns the configuration to pass in to createContainer based on the options (argv) and service 26 | # configuration. 27 | makeCreateOpts = (imageInfo, serviceConfig, servicesMap, options) -> 28 | containerNameMap = _.mapValues(servicesMap, 'containerName') 29 | 30 | volumesFrom = DockerArgs.formatVolumesFrom(serviceConfig.volumesFrom, containerNameMap) 31 | .concat(serviceConfig.containerVolumesFrom or []) 32 | 33 | createOpts = 34 | 'name': serviceConfig.containerName 35 | 'Image': imageInfo.Id 36 | 'Env': DockerArgs.formatEnvVariables(serviceConfig.env) 37 | 'Labels': 38 | 'io.fabric.galley.primary': 'false' 39 | 'User': "#{serviceConfig.user}" 40 | 'Volumes': DockerArgs.formatVolumes(serviceConfig.volumes) 41 | 'HostConfig': 42 | 'ExtraHosts': ["#{serviceConfig.name}:127.0.0.1"] 43 | 'Links': DockerArgs.formatLinks(serviceConfig.links, containerNameMap) 44 | # Binds actually require no formatting. We pre-process when parsing args to make sure that 45 | # the host path is absolute, but beyond that these are just an array of 46 | # "host_path:container_path" 47 | 'Binds': serviceConfig.binds 48 | 'VolumesFrom': volumesFrom 49 | 50 | if serviceConfig.publishPorts 51 | {portBindings, exposedPorts} = DockerArgs.formatPortBindings(serviceConfig.ports) 52 | createOpts['HostConfig']['PortBindings'] = portBindings 53 | createOpts['ExposedPorts'] = exposedPorts 54 | 55 | # Note container labels and values (as of Docker 1.6) can only be strings 56 | if serviceConfig.primary? 57 | createOpts['Labels']['io.fabric.galley.primary'] = 'true' 58 | createOpts['Labels']['io.fabric.galley.pid'] = "#{process.pid}" 59 | 60 | if serviceConfig.command? 61 | createOpts['Cmd'] = serviceConfig.command 62 | 63 | if serviceConfig.restart 64 | createOpts['HostConfig']['RestartPolicy'] = { 'Name': 'always' } 65 | 66 | if serviceConfig.entrypoint? 67 | # We special case no entrypoint ("--entrypoint=") to an empty array to get 68 | # Docker to use its default non-entrypoint. (null / false stuff will get the 69 | # image's default) 70 | createOpts['Entrypoint'] = if serviceConfig.entrypoint is '' then [] else serviceConfig.entrypoint 71 | 72 | if serviceConfig.workdir? 73 | # We allow relative workdirs, which become relative to either / or the image's default workdir, 74 | # if it's set. 75 | defaultWorkingDir = imageInfo.Config.WorkingDir or '/' 76 | createOpts['WorkingDir'] = path.resolve(defaultWorkingDir, serviceConfig.workdir) 77 | 78 | if serviceConfig.attach 79 | _.merge createOpts, 80 | 'Tty': options.stdin?.isTTY 81 | 'OpenStdin': true 82 | # Causes Docker to close the input stream, which will automatically close STDIN due to the pipe 83 | 'StdinOnce': true 84 | 85 | createOpts 86 | 87 | # Downloads an image by name, showing progress on stderr. 88 | # 89 | # Returns a promise that resolves when the download is complete. 90 | downloadServiceImage = (docker, imageName, options) -> 91 | options.reporter.startTask 'Downloading' 92 | 93 | progressLine = options.reporter.startProgress() 94 | 95 | DockerUtils.downloadImage docker, imageName, DockerConfig.authConfig, progressLine.set.bind(progressLine) 96 | .finally -> progressLine.clear() 97 | .then -> options.reporter.succeedTask() 98 | 99 | # Inspects an image by name, downloading it if necessary. 100 | # 101 | # Returns a promise that resolves to a hash of the format: 102 | # image: the image, guaranteed to be locally downloaded 103 | # info: result of Dockerode's inspect 104 | ensureImageAvailable = (docker, imageName, options) -> 105 | image = docker.getImage(imageName) 106 | 107 | DockerUtils.inspectImage image 108 | .catch (err) -> 109 | # A 404 is a legitimate error that an image of that name doesn't exist. So, try to pull it. 110 | throw err unless err?.statusCode is 404 111 | 112 | downloadServiceImage docker, imageName, options 113 | .then -> 114 | DockerUtils.inspectImage image 115 | 116 | .then ({image, info}) -> 117 | {image, info} 118 | 119 | # Looks for an existing container for the service. If the options are to use a container with 120 | # no set name, pretends it couldn't find anything. 121 | # 122 | # Returns a promise that resolves to a hash of the format: 123 | # container: the container, or null if none was found 124 | # info: result of inspecting the container, or null if it wasn't found 125 | maybeInspectContainer = (docker, name) -> 126 | unless name 127 | RSVP.resolve {container: null, info: null} 128 | else 129 | DockerUtils.inspectContainer docker.getContainer(name) 130 | .then ({container, info}) -> {container, info} 131 | .catch (err) -> 132 | if err?.statusCode is 404 then {container: null, info: null} 133 | else throw err 134 | 135 | # Helper method to check and see if the running container's links differ from the ones we would 136 | # want to create it with. If they do differ, the only recourse is to remove and recreate the 137 | # container, since you may not (currently) modify links of a running container. 138 | isLinkMissing = (containerInfo, createOpts) -> 139 | # These are the links as reported back by Docker, which have the format: 140 | # /sourceContainer:/destContainer/alias 141 | # We do a bit of splitting and joining to convert them back to /sourceContainer:alias so that 142 | # we can compare them directly with the Links parameter we provide to create. 143 | currentLinks = _.map (containerInfo?.HostConfig?.Links or []), (link) -> 144 | [source, dest] = link.split(':') 145 | "#{source}:#{dest.split('/').pop()}" 146 | 147 | # Make a copy since sort mutates. 148 | requestedLinks = createOpts.HostConfig.Links.concat() 149 | 150 | currentLinks.sort() 151 | requestedLinks.sort() 152 | 153 | if not _.isEqual(currentLinks, requestedLinks) 154 | RSVP.resolve(true) 155 | else 156 | docker = new Docker() 157 | # createOpts.HostConfig.Links looks like ['/container-name.env:link-name', '/foo.dev:foo'] 158 | # We need to use the actual container name, not it's aliased link name to be able to inspect it below. 159 | prereqs = createOpts.HostConfig.Links.map (link) -> link.split(':')[0].slice(1) 160 | prereqContainerInfos = prereqs.map (prereq) -> 161 | DockerUtils.inspectContainer docker.getContainer(prereq) 162 | .then ({container, info}) -> info.Created > containerInfo.Created 163 | RSVP.all _.compact(prereqContainerInfos) 164 | .then _.some 165 | 166 | # Docker API 1.20 switched from a "Volumes" map of container paths to filesystem paths to a "Mounts" 167 | # array of mount information. This function adapts to give the "Volumes" format in all cases. 168 | extractVolumesMap = (containerInfo) -> 169 | if containerInfo.Mounts? 170 | _.tap {}, (volumesMap) -> 171 | for mount in containerInfo.Mounts 172 | volumesMap[mount.Destination] = mount.Source 173 | else 174 | containerInfo.Volumes or {} 175 | 176 | # Compares the paths we expect volumes to have, based on the completedServicesMap, with the paths 177 | # for those volumes from the containerInfo. If there is a discrepency, the container will need to 178 | # be recreated in order to get the latest volumes. 179 | areVolumesOutOfDate = (containerInfo, serviceConfig, completedServicesMap) -> 180 | # This becomes an array of objects that map mount points within the container to directories on 181 | # the Docker host machine, for each service that our service takes its volumesFrom. 182 | volumePathsArray = _.map (serviceConfig.volumesFrom or []), (service) -> 183 | completedServicesMap[service].volumePaths 184 | 185 | # Given the above, we can then merge down into an empty object to get a single map of mount 186 | # points to paths. We expect that this order is correct if services have colliding VOLUME 187 | # declarations, but YMMV. Best not to get in that situation. 188 | expectedVolumes = _.merge.apply _, [{}].concat volumePathsArray 189 | 190 | # We only validate the paths from expectedVolumes, rather than doing a full deep equality check, 191 | # since the container's *own* VOLUMEs will appear in its Volumes map, along with the ones from 192 | # VolumesFrom (which are the only ones we validate). 193 | containerVolumes = extractVolumesMap containerInfo 194 | for mountPoint, volumePath of expectedVolumes 195 | return true if containerVolumes[mountPoint] isnt volumePath 196 | 197 | return false 198 | 199 | # Returns true if the container's image doesn't match the one from imageInfo, which we looked up 200 | # from the image the service is configured to run with. 201 | isContainerImageStale = (containerInfo, imageInfo) -> 202 | imageInfo.Id isnt containerInfo.Config.Image 203 | 204 | # Logic for whether we should remove / recreate a given container rather than just 205 | # restart or keep it if it exits. 206 | containerNeedsRecreate = (containerInfo, imageInfo, serviceConfig, createOpts, options, servicesMap) -> 207 | # Normally we make sure to start the "primary" service fresh, deleting it if it exists to get a 208 | # clean slate, but we don't do so in case of upReload and instead rely on the staleness / restart 209 | # checks to determine whether it needs a recreation. 210 | isLinkMissing(containerInfo, createOpts).then (linkIsMissing) -> 211 | if serviceConfig.forceRecreate then true 212 | else if serviceConfig.stateful and not options.unprotectStateful then false 213 | else if linkIsMissing then true 214 | else if areVolumesOutOfDate(containerInfo, serviceConfig, servicesMap) then true 215 | else switch options.recreate 216 | when 'all' then true 217 | when 'stale' then isContainerImageStale(containerInfo, imageInfo) 218 | else false 219 | 220 | # Checks the container metadata contained in labels to determine if the container was started by 221 | # another galley process as the primary container. 222 | containerIsCurrentlyGalleyManaged = (containerInfo) -> 223 | if containerInfo.Config.Labels? and containerInfo.Config.Labels['io.fabric.galley.primary'] is 'true' 224 | pid = parseInt(containerInfo.Config.Labels['io.fabric.galley.pid']) 225 | if pid is not process.pid 226 | running pid 227 | 228 | # If the given container exists, but options are provided, removes the given container. We want to 229 | # clear existing containers so that we can start fresh ones with the correct options configuration. 230 | # 231 | # Returns a promise that resolves to a hash of the format: 232 | # container: the container if it exists and wasn't removed, null if it didn't exist or was removed 233 | maybeRemoveContainer = (container, containerInfo, imageInfo, serviceConfig, createOpts, options, servicesMap) -> 234 | new RSVP.Promise (resolve, reject) -> 235 | unless container? 236 | options.reporter.completeTask 'not found.' 237 | resolve {container: null} 238 | else 239 | containerNeedsRecreate(containerInfo, imageInfo, serviceConfig, createOpts, options, servicesMap).then (needsRecreate) -> 240 | if needsRecreate 241 | # check to see if the container that needs to be recreated is already managed by 242 | # galley (somewhere else). If it is, we can't recreate it, since it will bust that galley 243 | # session. Instead, just error out. 244 | if containerIsCurrentlyGalleyManaged(containerInfo) 245 | reject "Cannot be recreated, container is managed by another Galley process.\n 246 | Check that all images are up to date, and that addons requested here match 247 | those in the managed Galley container." 248 | options.reporter.completeTask('needs recreate').startTask('Removing') 249 | 250 | # When we want to get rid of a container, we want it gone. Use force to take it out if running. 251 | # Also, since this is for dev / testing purposes, delete associated volumes as well to keep 252 | # them from filling up the disk. 253 | promise = DockerUtils.removeContainer container, { force: true, v: true } 254 | .then -> 255 | options.reporter.succeedTask() 256 | {container: null} 257 | resolve promise 258 | else 259 | options.reporter.succeedTask 'ok' 260 | resolve {container} 261 | 262 | # Makes sure that a container exists for the given service. May delete and recreate a container 263 | # based on the logic of mayRemoveContainer. 264 | # 265 | # Returns a promise that resolves to a hash of the format: 266 | # container: the container, which has been created 267 | # info: result of Dockerode's inspect 268 | ensureContainerConfigured = (docker, imageInfo, service, serviceConfig, options, servicesMap) -> 269 | options.reporter.startTask 'Checking' 270 | 271 | createOpts = makeCreateOpts imageInfo, serviceConfig, servicesMap, options 272 | 273 | maybeInspectContainer docker, serviceConfig.containerName 274 | .then ({container, info: containerInfo}) -> 275 | maybeRemoveContainer container, containerInfo, imageInfo, serviceConfig, createOpts, options, servicesMap 276 | .then ({container}) -> {container, info: containerInfo} 277 | 278 | .then ({container, info}) -> 279 | # At this point, we either have an existing container that we're happy with, or no container 280 | # at all and we need to create it. 281 | if container? 282 | servicesMap[service].freshlyCreated = false 283 | return {container, info} 284 | 285 | options.reporter.startTask 'Creating' 286 | DockerUtils.createContainer docker, createOpts 287 | .then ({container}) -> 288 | servicesMap[service].freshlyCreated = true 289 | DockerUtils.inspectContainer container 290 | 291 | .then ({container, info}) -> 292 | options.reporter.succeedTask() 293 | {container, info} 294 | 295 | # Makes sure that the container described by info is started, starting it up and unpausing it if 296 | # necessary. 297 | # 298 | # Returns a promise that resolves to a hash of the format: 299 | # container: the passed-through container, now started 300 | # info: an up-to-date inspection of the container 301 | ensureContainerRunning = (container, info, service, serviceConfig, options) -> 302 | actionPromise = null 303 | 304 | unless info.State.Running 305 | options.reporter.startTask 'Starting' 306 | actionPromise = DockerUtils.startContainer(container) 307 | else if info.State.Paused 308 | options.reporter.startTask 'Unpausing' 309 | actionPromise = DockerUtils.unpauseContainer(container) 310 | else 311 | # Nothing to do, so short-circuit. 312 | return RSVP.resolve {container, info} 313 | 314 | actionPromise 315 | .then -> 316 | DockerUtils.inspectContainer(container) 317 | .then ({container, info}) -> 318 | options.reporter.succeedTask() 319 | if serviceConfig.containerName is '' 320 | options.reporter.completeTask "#{chalk.gray 'Running as:'} #{chalk.bold info.Name.substring(1)}" 321 | {container, info} 322 | 323 | # If options.attach is true, calls DockerUtils.attachContainer to get a stream. We do this before 324 | # the container starts to make sure we get everything. 325 | # 326 | # Note that this only gets the container's output streams. We do input separately so that we can 327 | # close input (e.g. from non-interactive sources like pipes) and still read the rest of the 328 | # output. 329 | # 330 | # Resolves to a promise with a hash of the format: 331 | # container: passed through container 332 | # stream: attachment stream for the container, or null if the container shouldn't be attached 333 | maybeAttachStream = (container, serviceConfig) -> 334 | new RSVP.Promise (resolve, reject) -> 335 | if serviceConfig?.attach 336 | promise = DockerUtils.attachContainer container, 337 | stream: true 338 | stdin: false 339 | stdout: true 340 | stderr: true 341 | .then ({container, stream}) -> {container, stream} 342 | resolve promise 343 | else 344 | resolve {container, stream: null} 345 | 346 | # Pipes the provided container and its stream to stdout/stderr. If "stream" is null, just 347 | # resolves immediately. 348 | # 349 | # Does a second attachment to stdin, which is kept separate so that stdin can close and still 350 | # allow stdout info to flow. 351 | # 352 | # Returns a promise that resolves when the stream "ends," a sign that either the process has 353 | # completed within the container, the user has detached or issued another CTRL-P command, or the 354 | # process is being restarted by the RestartPolicy. The promise has a hash of the format: 355 | # container: the passed-through container 356 | # resolution: one of the following values: 357 | # "end": output stream has closed, either because the command completed or it's restarting 358 | # "detach": the user has triggered a detachment from the running container 359 | # "stop": the user has requested we stop the container 360 | # "reload": the user has requested Galley re-run, re-checking dependencies and recreating the 361 | # primary container if necessary 362 | # "unattached": we weren't asked to attach in the first place 363 | maybePipeStdStreams = (container, outputStream, options) -> 364 | return RSVP.resolve({container, resolution: 'unattached'}) if outputStream is null 365 | 366 | DockerUtils.attachContainer container, 367 | stream: true 368 | stdin: true 369 | stdout: false 370 | stderr: false 371 | .then ({container, stream: inputStream}) -> 372 | options.stdinCommandInterceptor.start(inputStream) 373 | 374 | # Tells the container how big we are so that shell output looks nice, and keeps that 375 | # information up-to-date if you resize the host terminal window. We ignore the promises that 376 | # resizeContainer returns since it's ok to be async and/or fail. 377 | resizeHandler = -> DockerUtils.resizeContainer container, options.stdout 378 | 379 | # We declare these in a scope outside of the RSVP.Promise callback so that we can reference 380 | # them from the finally to removeListener them. (They must be defined inside of the RSVP.Promise 381 | # callback to have access to the resolve / reject callbacks.) 382 | stdinCommandInterceptorHandler = null 383 | outputStreamEndHandler = null 384 | 385 | new RSVP.Promise (resolve, reject) -> 386 | # This handler fires first if we intercept a command from the container. 387 | stdinCommandInterceptorHandler = ({command}) -> 388 | options.stdinCommandInterceptor.stop() 389 | 390 | # We need to manually disconnect the output stream, or else Docker may keep it open, 391 | # causing doubled output if we reattach. 392 | outputStream.destroy() 393 | 394 | resolve {container, resolution: command} 395 | 396 | # This handler fires first if the container exits cleanly, is stopped/killed externally, or 397 | # its process ends and is restarted by the RestartPolicy. 398 | outputStreamEndHandler = -> 399 | options.stdinCommandInterceptor.stop() 400 | resolve {container, resolution: 'end'} 401 | 402 | options.stdinCommandInterceptor.on 'command', stdinCommandInterceptorHandler 403 | outputStream.on 'end', outputStreamEndHandler 404 | 405 | if options.stdout.isTTY 406 | outputStream.setEncoding 'utf8' 407 | 408 | # For TTY we have a blended output of both STDOUT and STDERR 409 | outputStream.pipe options.stdout, end: false 410 | 411 | resizeHandler() 412 | options.stdout.on 'resize', resizeHandler 413 | else 414 | outputStream.on 'end', -> 415 | try options.stdout.end() catch # ignore 416 | try options.stderr != process.stderr && options.stderr.end() catch # ignore 417 | 418 | # For non-TTY, we keep stdout and stderr separate, and pipe them appropriately to our 419 | # process's streams. 420 | container.modem.demuxStream outputStream, options.stdout, options.stderr 421 | 422 | .finally -> 423 | options.stdout.removeListener 'resize', resizeHandler 424 | options.stdinCommandInterceptor.removeListener 'command', stdinCommandInterceptorHandler 425 | outputStream.removeListener 'end', outputStreamEndHandler 426 | 427 | updateCompletedServicesMap = (service, serviceConfig, containerInfo, completedServicesMap) -> 428 | completedServicesMap[service].containerName = containerInfo.Name 429 | 430 | exportedMounts = _.keys (containerInfo.Config.Volumes or {}) 431 | exportedPaths = _.pick extractVolumesMap(containerInfo), exportedMounts 432 | 433 | # This will be a hash of "destination" paths (those inside the container) to 434 | # "source" paths in Docker's volume filesystems. 435 | completedServicesMap[service].volumePaths = exportedPaths 436 | 437 | # Starts a given service, including downloading, creating, removing, and restarting and whatever 438 | # else is necessary to get it going. Meant to be called in a loop with prerequisite services 439 | # already started. 440 | # 441 | # If "options.primary" is false (or null) starts the service in the background with default options 442 | # for its environment. 443 | # 444 | # Mutates the completedServicesMap to reflect any new state changes. 445 | # 446 | # Returns a promise that resolves to a hash of the format: 447 | # container: The container that was created 448 | # resolution: see maybePipeStdStreams 449 | # 450 | # If the container was attached, the promise resolves when the container's process completes, or 451 | # when the stream is explicitly detached by the user. If the container was not attached, the 452 | # promise resolves once the container has started. 453 | startService = (docker, serviceConfig, service, options, completedServicesMap) -> 454 | # We write out our name as a prefix to both status messages and error messages 455 | options.reporter.startService service 456 | 457 | # This should never happen, since services in the prereq list should be unique 458 | if completedServicesMap[service] 459 | throw "Service already completed: #{service}" 460 | 461 | completedServicesMap[service] = 462 | containerName: null 463 | freshlyCreated: null 464 | volumePaths: null 465 | 466 | ensureImageAvailable docker, serviceConfig.image, options 467 | .then ({image, info: imageInfo}) -> 468 | ensureContainerConfigured docker, imageInfo, service, serviceConfig, options, completedServicesMap 469 | 470 | .then ({container, info: containerInfo}) -> 471 | # Attach before starting so we can be sure to get all of the output 472 | maybeAttachStream container, serviceConfig 473 | .then ({container, stream}) -> {container, stream, info: containerInfo} 474 | 475 | .then ({container, stream, info: containerInfo}) -> 476 | ensureContainerRunning container, containerInfo, service, serviceConfig, options 477 | .then ({container, info: containerInfo}) -> 478 | options.reporter.finish() unless options.leaveReporterOpen 479 | 480 | updateCompletedServicesMap service, serviceConfig, containerInfo, completedServicesMap 481 | 482 | forwarderReceipt = null 483 | 484 | maybeForwardPromise = if serviceConfig.localhost 485 | DockerUtils.inspectContainer container 486 | .then ({info}) -> 487 | # NetworkSettings.Ports looks like: 488 | # { '3080/tcp': [ { HostIp: '0.0.0.0', HostPort: '3080' } ], 489 | # '3081/tcp': [ { HostIp: '0.0.0.0', HostPort: '49180' } ] } 490 | 491 | ports = [] 492 | for source, outs of (containerInfo.NetworkSettings.Ports or {}) 493 | ports.push parseInt(outs[0].HostPort) 494 | 495 | if ports.length 496 | forwarderReceipt = options.localhostForwarder.forward(ports) 497 | else 498 | RSVP.resolve() 499 | 500 | maybeForwardPromise 501 | .then -> 502 | pipeStreamsLoop container, stream, serviceConfig, options 503 | .finally -> 504 | forwarderReceipt.stop() if forwarderReceipt 505 | .then ({container, resolution}) -> {container, resolution} 506 | 507 | # Calls maybePipeStdStreams and then loops to keep calling it if the stream ends while the 508 | # container is still running. This lets us re-attach input and output streams when the container's 509 | # process dies but is restarted by a RestartPolicy. 510 | # 511 | # Returns a promise that resolves to maybePipeStdStreams's container/resolution hash. 512 | pipeStreamsLoop = (container, stream, serviceConfig, options) -> 513 | maybePipeStdStreams container, stream, options 514 | .then ({container, resolution}) -> 515 | if resolution is 'end' 516 | DockerUtils.inspectContainer container 517 | .then ({container, info}) -> 518 | # If the container is still going despite the stream having ended then we should try 519 | # to re-attach. It's likely that the container's RestartPolicy restarted the process. 520 | # 521 | # (The first call to maybeAttachStream happened before starting the service initially, back 522 | # in startService, which is why this call is down here and not at the beginning of 523 | # pipeStreamsLoop.) 524 | if info.State.Running or info.State.Restarting 525 | maybeAttachStream container, serviceConfig 526 | .then ({container, stream}) -> 527 | pipeStreamsLoop container, stream, serviceConfig, options 528 | else 529 | {container, resolution} 530 | else 531 | {container, resolution} 532 | 533 | # We chown anything under the "source" directory to the original owner of the "source" 534 | # directory, since if a Docker command created any files they'll be owned by root, which 535 | # can cause problems when the directory is mapped out to the host. 536 | # 537 | # Returns a promise that resolves to true if we repaired source ownership, false otherwise. 538 | maybeRepairSourceOwnership = (docker, config, service, options) -> 539 | serviceConfig = config[service] or {} 540 | unless options.repairSourceOwnership and serviceConfig.source? 541 | return RSVP.resolve(false) 542 | 543 | # This one-liner gets the user / group information via stat from the current directory ("."), 544 | # which we've set with WorkingDir to the service's source directory. It then recursively chowns 545 | # every file in that directory to that user / group. 546 | repairScript = "chown -R $(stat --format '%u:%g' .) ." 547 | 548 | createOpts = 549 | 'Image': serviceConfig.image 550 | 'Entrypoint': [] 551 | 'WorkingDir': serviceConfig.source 552 | 'Cmd': [ 'bash', '-c', repairScript ] 553 | # We don't set ports or links or anything, as that's not relevant, but we do need the directory 554 | # bindings to affect files on the host machine. 555 | 'HostConfig': 556 | 'Binds': serviceConfig.binds 557 | 558 | options.reporter.startTask 'Repairing source ownership' 559 | 560 | DockerUtils.createContainer(docker, createOpts) 561 | .then ({container}) -> 562 | DockerUtils.startContainer container 563 | .then ({container}) -> 564 | DockerUtils.waitContainer container 565 | .then ({container, result}) -> 566 | # Clear out volumes to try and keep them from accumulating 567 | DockerUtils.removeContainer container, { v: true } 568 | .then -> 569 | if result.StatusCode is 0 570 | options.reporter.succeedTask().finish() 571 | else 572 | options.reporter.error("Failed with exit code #{result.StatusCode}") 573 | .then -> true 574 | 575 | printDetachedMessage = (container, options) -> 576 | DockerUtils.inspectContainer(container) 577 | .then ({container, info}) -> 578 | # Docker reports names canonically as beginning with a '/', which looks lame. Remove it. 579 | name = info.Name.replace /^\//, '' 580 | 581 | options.reporter.message '' 582 | options.reporter.message '' 583 | options.reporter.message chalk.gray('Container detached: ') + chalk.bold(name) 584 | options.reporter.message chalk.gray('Reattach with: ') + "docker attach #{name}" 585 | options.reporter.message chalk.gray('Remove with: ') + "docker rm -fv #{name}" 586 | 587 | # Gets the status code of a container, then removes it. Reports an error if the container did not 588 | # exit cleanly with a code of 0. 589 | # 590 | # Resolves to a hash of the format: 591 | # container: the container 592 | # statusCode: if not null, the status code of the completed process 593 | finalizeContainer = (container, options) -> 594 | DockerUtils.inspectContainer container 595 | .then ({container, info}) -> 596 | statusCode = info.State.ExitCode 597 | 598 | if statusCode? and statusCode isnt 0 599 | options.reporter.error "#{info.Config.Cmd.join ' '} failed with exit code #{statusCode}" 600 | 601 | # Since the process exited, we remove the container. (Equivalent of --rm in Docker.) 602 | DockerUtils.removeContainer container, { v: true } 603 | .then -> {container, statusCode} 604 | 605 | # Returns a callback suitable for sending to Rsyncer's "watch" method. Keeps a general status of 606 | # "watching" or "syncing" with the directory being watched. When a sync is complete, flashes 607 | # a "synched" message with the amount of time that the sync took. 608 | # 609 | # Writes the status message using options.stdout, which is assumed to be an OverlayOutputStream, 610 | # and reports error messages using the standard reporter. 611 | makeRsyncerWatchCallback = (options) -> 612 | lastTime = null 613 | spinner = spin.new() 614 | 615 | (status, source, files, error) -> 616 | switch status 617 | when 'watching' 618 | options.stdout.setOverlayStatus? "Watching #{path.basename source}…" 619 | when 'changed' 620 | lastTime = Date.now() if lastTime is null 621 | when 'syncing' 622 | spinner.next() 623 | options.stdout.setOverlayStatus? "#{spinner.current} Synching #{path.basename source}…" 624 | when 'synched' 625 | files = _.uniq files 626 | if files.length is 1 627 | desc = path.basename(files[0]) 628 | else 629 | desc = "#{files.length} files" 630 | 631 | options.stdout.flashOverlayMessage? "Synched #{desc} (#{Date.now() - lastTime}ms)" 632 | lastTime = null 633 | when 'error' 634 | options.reporter.error error 635 | 636 | # Wrapper before startup to set up mapping source into the service's container, if requested. If 637 | # no --source flag is provided, no-ops. 638 | # 639 | # If --source is provided but not --rsync, modifies the service's config to add a "binds" entry to 640 | # bring the --source value in to the container. 641 | # 642 | # If --rsync is also specified, starts up an rsync container to hold the source, performs a sync, 643 | # and starts a watcher. Additionally modifies the service's config to bring the rsync container's 644 | # volume in as a volumesFrom. 645 | # 646 | # If rsync is not needed, resolves to an empty hash. Otherwise, resolves to a hash with the format: 647 | # rsyncer: The Rsyncer object, useful for stopping later 648 | prepareServiceSource = (docker, globalConfig, config, service, env, options) -> 649 | # We do a lot of short-circuiting returns up top to avoid the extra identation 650 | 651 | primaryServiceConfig = config[service] 652 | 653 | unless options.source 654 | return RSVP.resolve({}) 655 | 656 | unless options.rsync 657 | primaryServiceConfig.binds.push "#{options.source}:#{primaryServiceConfig.source}" 658 | return RSVP.resolve({}) 659 | 660 | rsyncConfig = globalConfig.rsync 661 | unless rsyncConfig?.image and rsyncConfig?.module 662 | return RSVP.reject '--rsync requires CONFIG.rsync image and module definitions' 663 | 664 | rsyncPort = rsyncConfig.port or 873 665 | suffix = rsyncConfig.suffix or 'rsync' 666 | 667 | rsyncServiceConfig = _.merge {}, ServiceHelpers.DEFAULT_SERVICE_CONFIG, 668 | containerName: "#{service}.#{suffix}" 669 | image: rsyncConfig.image 670 | ports: ["#{rsyncPort}"] 671 | publishPorts: true 672 | volumes: [primaryServiceConfig.source] 673 | 674 | # We tell startService to not "finish" the reporter's service so that we can include a 675 | # "syncing" task on the same line. 676 | # 677 | # TODO(finneganh): Make this less awkward. 678 | options = _.merge {}, options, leaveReporterOpen: true 679 | 680 | startService docker, rsyncServiceConfig, "#{service} (rsync)", options, {} 681 | .then ({container}) -> 682 | DockerUtils.inspectContainer container 683 | .then ({container, info}) -> 684 | # Now that we have the container running, make sure that the primary service will pull in its 685 | # volume for source code. 686 | primaryServiceConfig.containerVolumesFrom.push info.Name 687 | 688 | options.reporter.startTask 'Syncing' 689 | progressLine = options.reporter.startProgress() 690 | # We don't have any text to display here, we just want the little progress spinner to spin. 691 | activityCb = -> progressLine.set '' 692 | 693 | # We do this to find what port rsync has been mapped to on the container host. We let it be 694 | # random (no ":" in the ports: value above) to avoid collision with other rsync containers. 695 | rsyncPortInfo = info.NetworkSettings.Ports["#{rsyncPort}/tcp"] 696 | 697 | rsyncer = new Rsyncer 698 | src: options.source 699 | dest: primaryServiceConfig.source 700 | # In boot2docker cases, local rsync will need to connect to the VM, which is what the Docker 701 | # modem has been talking to. If docker is running locally, the modem probably doesn't have a 702 | # host value (it would use 'socketPath' instead) so assume that 'localhost' will work. 703 | host: docker.modem.host or 'localhost' 704 | port: rsyncPortInfo[0].HostPort 705 | module: rsyncConfig.module 706 | 707 | # We do an initial sync before starting any other services so that the container will have 708 | # its latest files for when it starts up. 709 | rsyncer.sync activityCb 710 | .finally -> progressLine.clear() 711 | .then -> 712 | options.reporter.succeedTask().finish() 713 | 714 | rsyncer.watch makeRsyncerWatchCallback(options) 715 | 716 | {rsyncer} 717 | 718 | # Starts up a dependency chain of services. services array must be in order so that dependencies 719 | # come earlier. 720 | # 721 | # Returns a promise that resolves to the completedServicesMap, with service names keyed to hashes 722 | # of the format: 723 | # containerName: name of the started container 724 | # freshlyCreated: true if the container was created this Galley run 725 | # volumePaths: map of container path -> host path for all volumes this container exports 726 | startServices = (docker, config, services, options) -> 727 | completedServicesMap = {} 728 | 729 | loopPromise = RSVP.resolve() 730 | _.forEach services, (service) -> 731 | loopPromise = loopPromise.then -> 732 | startService docker, config[service], service, options, completedServicesMap 733 | 734 | loopPromise.then -> completedServicesMap 735 | 736 | # Parses out our command line args to return an object of the format: 737 | # 738 | # service: name of the service we're starting up 739 | # env: ".env" suffix to use when configuring and naming the service and its prereqs 740 | # options: global modifications to our behavior 741 | # serviceConfigOverrides: values to merge into the Galleyfile configuration for the service 742 | parseArgs = (args) -> 743 | argv = minimist args, 744 | # stopEarly allows --opts after the service name to be passed along to the container in command 745 | stopEarly: true 746 | boolean: [ 747 | 'as-service' 748 | 'detach' 749 | 'localhost' 750 | 'publish-all' 751 | 'repairSourceOwnership' 752 | 'restart' 753 | 'rsync' 754 | 'unprotectStateful' 755 | ] 756 | alias: 757 | 'add': 'a' 758 | 'detach': 'd' 759 | 'env': 'e' 760 | 'publish-all': 'P' 761 | 'source': 's' 762 | 'user': 'u' 763 | 'volume': 'v' 764 | 'workdir': 'w' 765 | 766 | [service, envArr...] = (argv._[0] or '').split '.' 767 | env = envArr.join '.' 768 | 769 | options = 770 | recreate: 'stale' 771 | 772 | _.merge options, _.pick argv, [ 773 | 'recreate' 774 | 'unprotectStateful' 775 | ] 776 | 777 | # provide support for pulling these options from the galleycfg file. 778 | # Since minimist automatically fills in "false" for absent boolean flags, 779 | # we need to look and see if the flag was actually set in the args, 780 | # then decide whether or not to include it in options, allowing 781 | # settings in the gallleycfg file to be overriden by command line arguments. 782 | if '--rsync' in args 783 | _.merge options, _.pick argv, 'rsync' 784 | if '--repairSourceOwnership' in args 785 | _.merge options, _.pick argv, 'repairSourceOwnership' 786 | 787 | options.add = ServiceHelpers.normalizeMultiArgs argv.add 788 | options.source = path.resolve(argv.source) if argv.source 789 | 790 | if RECREATE_OPTIONS.indexOf(options.recreate) is -1 791 | throw "Unrecognized recreate option: '#{options.recreate}'" 792 | 793 | # Set up values to be merged in to the Galleyfile configuration for the primary service. 794 | serviceConfigOverrides = 795 | # Causes us to bind to stdin / stdout / stderr on starting up 796 | attach: true 797 | # Used to hold --volume values off the command line 798 | binds: [] 799 | # List of containers to bring in volumes from. Not overlain on serviceConfig's volumesFrom 800 | # directly since that is a list of services that are started as pre-reqs, while these are 801 | # assumed to be containers. 802 | containerVolumesFrom: [] 803 | env: {} 804 | # We will always want this service to be started completely fresh, to avoid any stale state 805 | forceRecreate: true 806 | # Also default to mapping this service's ports to the host 807 | publishPorts: true 808 | primary: true 809 | 810 | # The first element of argv._ is the service name, so if there's anything past that it means that 811 | # the user is specifying a command. In that case, we pull in that command, make the container 812 | # anonymous (so that it doesn't collide with a default version of the service already running), 813 | # and also don't publish ports to avoid collision. The --as-service flag disables the lack of 814 | # naming and port binding. 815 | if argv._.length > 1 and not argv['as-service'] 816 | _.merge serviceConfigOverrides, 817 | command: argv._.slice(1) 818 | containerName: '' 819 | publishPorts: false 820 | restart: false 821 | 822 | _.merge serviceConfigOverrides, _.pick argv, [ 823 | 'entrypoint' 824 | 'localhost' 825 | 'user' 826 | 'workdir' 827 | ] 828 | 829 | if argv['volumes-from'] 830 | _.merge serviceConfigOverrides.containerVolumesFrom, ServiceHelpers.normalizeMultiArgs(argv['volumes-from']) 831 | 832 | if '--restart' in args 833 | _.merge serviceConfigOverrides, _.pick argv, 'restart' 834 | 835 | if '--publish-all' in args or '-P' in args 836 | serviceConfigOverrides.publishPorts = argv['publish-all'] 837 | 838 | # Type coercion to an array from either an array or a single value, or undefined. 839 | # 840 | # Adding to the "env" map will merge these values over any env that the service config has, 841 | # rather than replacing the "env" wholesale. This has the desired behavior of the command line 842 | # overriding the config values as well. 843 | for envVar in [].concat(argv.env or []) 844 | [name, val] = envVar.split /=(.+)/ 845 | serviceConfigOverrides.env[name] = val 846 | 847 | if argv.detach then serviceConfigOverrides.attach = false 848 | if argv.name? then serviceConfigOverrides.containerName = argv.name 849 | if argv.volume? 850 | volumes = ServiceHelpers.normalizeVolumeArgs(argv.volume) 851 | serviceConfigOverrides.binds = serviceConfigOverrides.binds.concat(volumes) 852 | 853 | {service, env, options, serviceConfigOverrides} 854 | 855 | # Method to actually perform the command, broken out so we can call it recursively in the case 856 | # of a HUP reload. 857 | # 858 | # Resolves to promise with the hash: 859 | # statusCode: the statusCode of the container's process if it ran to completion, 0 if we detached, 860 | # or -1 if there was an error. 861 | go = (docker, servicesConfig, services, options) -> 862 | # TODO(finneganh): Don't assume that the last service is the primary one once we implement 863 | # triggers. 864 | service = services.pop() 865 | 866 | startServices docker, servicesConfig, services, options 867 | .then (completedServicesMap) -> 868 | # Pass through completedServicesMap so we can re-use any auto-generated name for the 869 | # service container when HUP-reloading below. 870 | startService docker, servicesConfig[service], service, options, completedServicesMap 871 | .then ({container, resolution}) -> {container, resolution, completedServicesMap} 872 | 873 | .then ({container, resolution, completedServicesMap}) -> 874 | switch resolution 875 | when 'unattached' then {statusCode: 0} 876 | 877 | when 'reload' 878 | options.reporter.message() 879 | options.reporter.message chalk.gray "#{chalk.bold 'Reload' } requested. Rechecking containers.\n" 880 | 881 | # If we're going around again, re-use the same container name, even if 882 | # auto-created. Strip off the leading '/' though or inspecting by name won't work. 883 | # We also disable the "forceRecreate" behavior so that if the container doesn't need 884 | # to be recreated due to volume / link changes it won't be. 885 | primaryServiceConfig = servicesConfig[service] 886 | primaryServiceConfig.containerName ||= completedServicesMap[service].containerName.replace /^\//, '' 887 | primaryServiceConfig.forceRecreate = false 888 | 889 | # We have to add "service" back on to the list of prereqs. 890 | # 891 | # TODO(finneganh): Clean this up a bit when triggers are in place and the 892 | # primary service is less special. 893 | go docker, servicesConfig, services.concat(service), options 894 | 895 | when 'detach' 896 | printDetachedMessage(container, options) 897 | .then -> {statusCode: null} 898 | 899 | when 'stop' 900 | DockerUtils.stopContainer container 901 | .then -> 902 | maybeRepairSourceOwnership docker, servicesConfig, service, options 903 | .then -> 904 | DockerUtils.removeContainer container 905 | .then -> 906 | # The official status code tends to be -1 when we stop the container forcefully, but 907 | # that looks weird so we fake it as a 0. 908 | {statusCode: 0} 909 | 910 | when 'end' 911 | maybeRepairSourceOwnership docker, servicesConfig, service, options 912 | .then -> 913 | finalizeContainer container, options 914 | .then ({statusCode}) -> {statusCode} 915 | 916 | else throw "UNKNOWN SERVICE RESOLUTION: #{resolution}" 917 | 918 | .catch (err) -> 919 | if err? and err isnt '' and typeof err is 'string' or err.json? 920 | message = err.json?.message?.trim() or ((err if typeof err is 'string') or err.message or 'Unknown error').trim() 921 | message = message.replace /^Error: /, '' 922 | options.reporter.error chalk.bold('Error:') + ' ' + message 923 | 924 | options.reporter.finish() 925 | options.stderr.write err?.stack if err?.stack 926 | 927 | {statusCode: -1} 928 | 929 | module.exports = (args, commandOptions, done) -> 930 | {service, env, options, serviceConfigOverrides} = parseArgs(args) 931 | options = _.merge({}, commandOptions['globalOptions'], options) 932 | 933 | unless service? and not _.isEmpty(service) 934 | return help args, commandOptions, done 935 | 936 | docker = new Docker() 937 | 938 | options.stdin = commandOptions.stdin or process.stdin 939 | options.stderr = commandOptions.stderr or process.stderr 940 | options.stdout = commandOptions.stdout or new OverlayOutputStream(process.stdout) 941 | 942 | options.reporter = commandOptions.reporter or new ConsoleReporter(options.stderr) 943 | options.stdinCommandInterceptor = new StdinCommandInterceptor(options.stdin) 944 | 945 | options.localhostForwarder = new LocalhostForwarder(docker.modem, options.reporter) 946 | 947 | throw "Missing env for service #{service}. Format: ." unless env 948 | 949 | {globalConfig, servicesConfig} = ServiceHelpers.processConfig(commandOptions.config, env, options.add) 950 | 951 | primaryServiceConfig = servicesConfig[service] 952 | _.merge primaryServiceConfig, serviceConfigOverrides 953 | 954 | # We want to generate this before prepareServiceSource so that its potential modifications 955 | # to "volumesFrom" don't appear as additional prereq services. 956 | services = ServiceHelpers.generatePrereqServices(service, servicesConfig) 957 | 958 | sighupHandler = options.stdinCommandInterceptor.sighup.bind(options.stdinCommandInterceptor) 959 | process.on 'SIGHUP', sighupHandler 960 | 961 | prepareServiceSource docker, globalConfig, servicesConfig, service, env, options 962 | .then ({rsyncer}) -> 963 | go docker, servicesConfig, services, options 964 | .finally -> 965 | rsyncer?.stop() 966 | .then ({statusCode}) -> 967 | process.removeListener 'SIGHUP', sighupHandler 968 | options.stdinCommandInterceptor.stop() 969 | done statusCode 970 | .catch (err) -> 971 | console.error "UNCAUGHT EXCEPTION IN RUN COMMAND" 972 | console.error err 973 | console.error err?.stack if err?.stack 974 | process.exit 255 975 | 976 | # Exposed for unit testing 977 | module.exports.parseArgs = parseArgs 978 | --------------------------------------------------------------------------------