├── compose ├── cli │ ├── __init__.py │ ├── formatter.py │ ├── colors.py │ ├── multiplexer.py │ ├── docker_client.py │ ├── docopt_command.py │ ├── verbose_proxy.py │ ├── errors.py │ ├── log_printer.py │ ├── utils.py │ └── command.py ├── __init__.py ├── progress_stream.py ├── container.py ├── project.py └── config.py ├── tests ├── unit │ ├── __init__.py │ ├── cli │ │ ├── __init__.py │ │ ├── docker_client_test.py │ │ └── verbose_proxy_test.py │ ├── progress_stream_test.py │ ├── split_buffer_test.py │ ├── log_printer_test.py │ ├── container_test.py │ ├── cli_test.py │ ├── sort_service_test.py │ └── project_test.py ├── integration │ ├── __init__.py │ └── testcases.py ├── fixtures │ ├── env-file │ │ ├── test.env │ │ └── docker-compose.yml │ ├── no-composefile │ │ └── .gitignore │ ├── env │ │ ├── two.env │ │ ├── resolve.env │ │ └── one.env │ ├── simple-dockerfile │ │ ├── docker-compose.yml │ │ └── Dockerfile │ ├── build-ctx │ │ └── Dockerfile │ ├── build-path │ │ └── docker-compose.yml │ ├── dockerfile_with_entrypoint │ │ ├── docker-compose.yml │ │ └── Dockerfile │ ├── dockerfile-with-volume │ │ └── Dockerfile │ ├── multiple-composefiles │ │ ├── compose2.yml │ │ └── docker-compose.yml │ ├── user-composefile │ │ └── docker-compose.yml │ ├── volume-path │ │ ├── common │ │ │ └── services.yml │ │ └── docker-compose.yml │ ├── extends │ │ ├── common.yml │ │ ├── nested-intermediate.yml │ │ ├── nested.yml │ │ ├── circle-1.yml │ │ ├── circle-2.yml │ │ └── docker-compose.yml │ ├── longer-filename-composefile │ │ └── docker-compose.yaml │ ├── commands-composefile │ │ └── docker-compose.yml │ ├── environment-composefile │ │ └── docker-compose.yml │ ├── ports-composefile │ │ └── docker-compose.yml │ ├── UpperCaseDir │ │ └── docker-compose.yml │ ├── simple-composefile │ │ └── docker-compose.yml │ └── links-composefile │ │ └── docker-compose.yml └── __init__.py ├── .dockerignore ├── bin └── docker-compose ├── .gitignore ├── script ├── clean ├── shell ├── build-linux ├── build-linux-inner ├── build-osx ├── docs ├── test ├── wrapdocker ├── dev ├── ci ├── test-versions ├── .validate ├── validate-dco └── dind ├── MAINTAINERS ├── requirements.txt ├── requirements-dev.txt ├── MANIFEST.in ├── wercker.yml ├── tox.ini ├── docs ├── Dockerfile ├── mkdocs.yml ├── completion.md ├── env.md ├── install.md ├── production.md ├── wordpress.md ├── django.md ├── rails.md ├── cli.md ├── index.md ├── yml.md └── extends.md ├── Dockerfile ├── setup.py ├── ROADMAP.md ├── SWARM.md ├── README.md ├── CONTRIBUTING.md ├── contrib └── completion │ └── bash │ └── docker-compose ├── LICENSE └── CHANGES.md /compose/cli/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/unit/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/unit/cli/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | .git 2 | venv 3 | -------------------------------------------------------------------------------- /tests/integration/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/fixtures/env-file/test.env: -------------------------------------------------------------------------------- 1 | FOO=1 -------------------------------------------------------------------------------- /tests/fixtures/no-composefile/.gitignore: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/fixtures/env/two.env: -------------------------------------------------------------------------------- 1 | FOO=baz 2 | DOO=dah 3 | -------------------------------------------------------------------------------- /tests/fixtures/simple-dockerfile/docker-compose.yml: -------------------------------------------------------------------------------- 1 | simple: 2 | build: . 3 | -------------------------------------------------------------------------------- /tests/fixtures/build-ctx/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM busybox:latest 2 | CMD echo "success" 3 | -------------------------------------------------------------------------------- /tests/fixtures/build-path/docker-compose.yml: -------------------------------------------------------------------------------- 1 | foo: 2 | build: ../build-ctx/ 3 | -------------------------------------------------------------------------------- /tests/fixtures/dockerfile_with_entrypoint/docker-compose.yml: -------------------------------------------------------------------------------- 1 | service: 2 | build: . 3 | -------------------------------------------------------------------------------- /bin/docker-compose: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from compose.cli.main import main 3 | main() 4 | -------------------------------------------------------------------------------- /tests/fixtures/env/resolve.env: -------------------------------------------------------------------------------- 1 | FILE_DEF=F1 2 | FILE_DEF_EMPTY= 3 | ENV_DEF 4 | NO_DEF 5 | -------------------------------------------------------------------------------- /tests/fixtures/simple-dockerfile/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM busybox:latest 2 | CMD echo "success" 3 | -------------------------------------------------------------------------------- /tests/fixtures/dockerfile-with-volume/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM busybox 2 | VOLUME /data 3 | CMD sleep 3000 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.egg-info 2 | *.pyc 3 | .tox 4 | /build 5 | /dist 6 | /docs/_site 7 | /venv 8 | docker-compose.spec 9 | -------------------------------------------------------------------------------- /tests/fixtures/dockerfile_with_entrypoint/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM busybox:latest 2 | ENTRYPOINT echo "From prebuilt entrypoint" 3 | -------------------------------------------------------------------------------- /tests/fixtures/multiple-composefiles/compose2.yml: -------------------------------------------------------------------------------- 1 | yetanother: 2 | image: busybox:latest 3 | command: /bin/sleep 300 4 | -------------------------------------------------------------------------------- /script/clean: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | find . -type f -name '*.pyc' -delete 3 | rm -rf docs/_site build dist docker-compose.egg-info 4 | -------------------------------------------------------------------------------- /tests/fixtures/env-file/docker-compose.yml: -------------------------------------------------------------------------------- 1 | web: 2 | image: busybox 3 | command: /bin/true 4 | env_file: ./test.env 5 | -------------------------------------------------------------------------------- /tests/fixtures/user-composefile/docker-compose.yml: -------------------------------------------------------------------------------- 1 | service: 2 | image: busybox:latest 3 | user: notauser 4 | command: id 5 | -------------------------------------------------------------------------------- /tests/fixtures/volume-path/common/services.yml: -------------------------------------------------------------------------------- 1 | db: 2 | image: busybox 3 | volumes: 4 | - ./foo:/foo 5 | - ./bar:/bar 6 | -------------------------------------------------------------------------------- /compose/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from .service import Service # noqa:flake8 3 | 4 | __version__ = '1.1.0' 5 | -------------------------------------------------------------------------------- /tests/fixtures/extends/common.yml: -------------------------------------------------------------------------------- 1 | web: 2 | image: busybox 3 | command: /bin/true 4 | environment: 5 | - FOO=1 6 | - BAR=1 7 | -------------------------------------------------------------------------------- /tests/fixtures/longer-filename-composefile/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | definedinyamlnotyml: 2 | image: busybox:latest 3 | command: /bin/sleep 300 -------------------------------------------------------------------------------- /tests/fixtures/volume-path/docker-compose.yml: -------------------------------------------------------------------------------- 1 | db: 2 | extends: 3 | file: common/services.yml 4 | service: db 5 | volumes: 6 | - ./bar:/bar 7 | -------------------------------------------------------------------------------- /MAINTAINERS: -------------------------------------------------------------------------------- 1 | Aanand Prasad (@aanand) 2 | Ben Firshman (@bfirsh) 3 | Daniel Nephin (@dnephin) 4 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | if sys.version_info >= (2, 7): 4 | import unittest # NOQA 5 | else: 6 | import unittest2 as unittest # NOQA 7 | -------------------------------------------------------------------------------- /tests/fixtures/extends/nested-intermediate.yml: -------------------------------------------------------------------------------- 1 | webintermediate: 2 | extends: 3 | file: common.yml 4 | service: web 5 | environment: 6 | - "FOO=2" 7 | -------------------------------------------------------------------------------- /tests/fixtures/extends/nested.yml: -------------------------------------------------------------------------------- 1 | myweb: 2 | extends: 3 | file: nested-intermediate.yml 4 | service: webintermediate 5 | environment: 6 | - "BAR=2" 7 | -------------------------------------------------------------------------------- /tests/fixtures/commands-composefile/docker-compose.yml: -------------------------------------------------------------------------------- 1 | implicit: 2 | image: composetest_test 3 | explicit: 4 | image: composetest_test 5 | command: [ "/bin/true" ] 6 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | PyYAML==3.10 2 | docker-py==1.0.0 3 | dockerpty==0.3.2 4 | docopt==0.6.1 5 | requests==2.2.1 6 | six==1.7.3 7 | texttable==0.8.2 8 | websocket-client==0.11.0 9 | -------------------------------------------------------------------------------- /tests/fixtures/environment-composefile/docker-compose.yml: -------------------------------------------------------------------------------- 1 | service: 2 | image: busybox:latest 3 | command: sleep 5 4 | 5 | environment: 6 | foo: bar 7 | hello: world 8 | -------------------------------------------------------------------------------- /tests/fixtures/ports-composefile/docker-compose.yml: -------------------------------------------------------------------------------- 1 | 2 | simple: 3 | image: busybox:latest 4 | command: /bin/sleep 300 5 | ports: 6 | - '3000' 7 | - '49152:3001' 8 | -------------------------------------------------------------------------------- /tests/fixtures/UpperCaseDir/docker-compose.yml: -------------------------------------------------------------------------------- 1 | simple: 2 | image: busybox:latest 3 | command: /bin/sleep 300 4 | another: 5 | image: busybox:latest 6 | command: /bin/sleep 300 7 | -------------------------------------------------------------------------------- /tests/fixtures/simple-composefile/docker-compose.yml: -------------------------------------------------------------------------------- 1 | simple: 2 | image: busybox:latest 3 | command: /bin/sleep 300 4 | another: 5 | image: busybox:latest 6 | command: /bin/sleep 300 7 | -------------------------------------------------------------------------------- /script/shell: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -ex 3 | docker build -t docker-compose . 4 | exec docker run -v /var/run/docker.sock:/var/run/docker.sock -v `pwd`:/code -ti --rm --entrypoint bash docker-compose 5 | -------------------------------------------------------------------------------- /tests/fixtures/multiple-composefiles/docker-compose.yml: -------------------------------------------------------------------------------- 1 | simple: 2 | image: busybox:latest 3 | command: /bin/sleep 300 4 | another: 5 | image: busybox:latest 6 | command: /bin/sleep 300 7 | -------------------------------------------------------------------------------- /tests/fixtures/env/one.env: -------------------------------------------------------------------------------- 1 | # Keep the blank lines and comments in this file, please 2 | 3 | ONE=2 4 | TWO=1 5 | 6 | # (thanks) 7 | 8 | THREE=3 9 | 10 | FOO=bar 11 | # FOO=somethingelse 12 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | mock >= 1.0.1 2 | nose==1.3.4 3 | git+https://github.com/pyinstaller/pyinstaller.git@12e40471c77f588ea5be352f7219c873ddaae056#egg=pyinstaller 4 | unittest2==0.8.0 5 | flake8==2.3.0 6 | pep8==1.6.1 7 | -------------------------------------------------------------------------------- /tests/fixtures/extends/circle-1.yml: -------------------------------------------------------------------------------- 1 | foo: 2 | image: busybox 3 | bar: 4 | image: busybox 5 | web: 6 | extends: 7 | file: circle-2.yml 8 | service: web 9 | baz: 10 | image: busybox 11 | quux: 12 | image: busybox 13 | -------------------------------------------------------------------------------- /tests/fixtures/extends/circle-2.yml: -------------------------------------------------------------------------------- 1 | foo: 2 | image: busybox 3 | bar: 4 | image: busybox 5 | web: 6 | extends: 7 | file: circle-1.yml 8 | service: web 9 | baz: 10 | image: busybox 11 | quux: 12 | image: busybox 13 | -------------------------------------------------------------------------------- /script/build-linux: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -ex 4 | 5 | TAG="docker-compose" 6 | docker build -t "$TAG" . 7 | docker run \ 8 | --rm \ 9 | --user=user \ 10 | --volume="$(pwd):/code" \ 11 | --entrypoint="script/build-linux-inner" \ 12 | "$TAG" 13 | -------------------------------------------------------------------------------- /script/build-linux-inner: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -ex 4 | 5 | mkdir -p `pwd`/dist 6 | chmod 777 `pwd`/dist 7 | 8 | pyinstaller -F bin/docker-compose 9 | mv dist/docker-compose dist/docker-compose-Linux-x86_64 10 | dist/docker-compose-Linux-x86_64 --version 11 | -------------------------------------------------------------------------------- /tests/fixtures/links-composefile/docker-compose.yml: -------------------------------------------------------------------------------- 1 | db: 2 | image: busybox:latest 3 | command: /bin/sleep 300 4 | web: 5 | image: busybox:latest 6 | command: /bin/sleep 300 7 | links: 8 | - db:db 9 | console: 10 | image: busybox:latest 11 | command: /bin/sleep 300 12 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include Dockerfile 2 | include LICENSE 3 | include requirements.txt 4 | include requirements-dev.txt 5 | include tox.ini 6 | include *.md 7 | include contrib/completion/bash/docker-compose 8 | recursive-include tests * 9 | global-exclude *.pyc 10 | global-exclude *.pyo 11 | global-exclude *.un~ 12 | -------------------------------------------------------------------------------- /wercker.yml: -------------------------------------------------------------------------------- 1 | box: wercker-labs/docker 2 | build: 3 | steps: 4 | - script: 5 | name: validate DCO 6 | code: script/validate-dco 7 | - script: 8 | name: run tests 9 | code: script/test 10 | - script: 11 | name: build binary 12 | code: script/build-linux 13 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py26,py27 3 | 4 | [testenv] 5 | usedevelop=True 6 | deps = 7 | -rrequirements.txt 8 | -rrequirements-dev.txt 9 | commands = 10 | nosetests -v {posargs} 11 | flake8 compose tests setup.py 12 | 13 | [flake8] 14 | # ignore line-length for now 15 | ignore = E501,E203 16 | exclude = compose/packages 17 | -------------------------------------------------------------------------------- /script/build-osx: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | rm -rf venv 4 | virtualenv venv 5 | venv/bin/pip install -r requirements.txt 6 | venv/bin/pip install -r requirements-dev.txt 7 | venv/bin/pip install . 8 | venv/bin/pyinstaller -F bin/docker-compose 9 | mv dist/docker-compose dist/docker-compose-Darwin-x86_64 10 | dist/docker-compose-Darwin-x86_64 --version 11 | -------------------------------------------------------------------------------- /tests/fixtures/extends/docker-compose.yml: -------------------------------------------------------------------------------- 1 | myweb: 2 | extends: 3 | file: common.yml 4 | service: web 5 | command: sleep 300 6 | links: 7 | - "mydb:db" 8 | environment: 9 | # leave FOO alone 10 | # override BAR 11 | BAR: "2" 12 | # add BAZ 13 | BAZ: "2" 14 | mydb: 15 | image: busybox 16 | command: sleep 300 17 | -------------------------------------------------------------------------------- /script/docs: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -ex 3 | 4 | # import the existing docs build cmds from docker/docker 5 | DOCSPORT=8000 6 | GIT_BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null) 7 | DOCKER_DOCS_IMAGE="compose-docs$GIT_BRANCH" 8 | DOCKER_RUN_DOCS="docker run --rm -it -e NOCACHE" 9 | 10 | docker build -t "$DOCKER_DOCS_IMAGE" -f docs/Dockerfile . 11 | $DOCKER_RUN_DOCS -p $DOCSPORT:8000 "$DOCKER_DOCS_IMAGE" mkdocs serve 12 | -------------------------------------------------------------------------------- /script/test: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # See CONTRIBUTING.md for usage. 3 | 4 | set -ex 5 | 6 | TAG="docker-compose:$(git rev-parse --short HEAD)" 7 | 8 | docker build -t "$TAG" . 9 | docker run \ 10 | --rm \ 11 | --volume="/var/run/docker.sock:/var/run/docker.sock" \ 12 | --volume="$(pwd):/code" \ 13 | -e DOCKER_VERSIONS \ 14 | -e "TAG=$TAG" \ 15 | --entrypoint="script/test-versions" \ 16 | "$TAG" \ 17 | "$@" 18 | -------------------------------------------------------------------------------- /docs/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM docs/base:latest 2 | MAINTAINER Sven Dowideit (@SvenDowideit) 3 | 4 | # to get the git info for this repo 5 | COPY . /src 6 | 7 | # Reset the /docs dir so we can replace the theme meta with the new repo's git info 8 | RUN git reset --hard 9 | 10 | RUN grep "__version" /src/compose/__init__.py | sed "s/.*'\(.*\)'/\1/" > /docs/VERSION 11 | COPY docs/* /docs/sources/compose/ 12 | COPY docs/mkdocs.yml /docs/mkdocs-compose.yml 13 | 14 | # Then build everything together, ready for mkdocs 15 | RUN /docs/build.sh 16 | -------------------------------------------------------------------------------- /script/wrapdocker: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "$DOCKER_VERSION" != "" ] && [ "$DOCKER_VERSION" != "default" ]; then 4 | ln -fs "/usr/local/bin/docker-$DOCKER_VERSION" "/usr/local/bin/docker" 5 | fi 6 | 7 | # If a pidfile is still around (for example after a container restart), 8 | # delete it so that docker can start. 9 | rm -rf /var/run/docker.pid 10 | docker -d $DOCKER_DAEMON_ARGS &>/var/log/docker.log & 11 | 12 | >&2 echo "Waiting for Docker to start..." 13 | while ! docker ps &>/dev/null; do 14 | sleep 1 15 | done 16 | 17 | >&2 echo ">" "$@" 18 | exec "$@" 19 | -------------------------------------------------------------------------------- /script/dev: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This is a script for running Compose inside a Docker container. It's handy for 3 | # development. 4 | # 5 | # $ ln -s `pwd`/script/dev /usr/local/bin/docker-compose 6 | # $ cd /a/compose/project 7 | # $ docker-compose up 8 | # 9 | 10 | set -e 11 | 12 | # Follow symbolic links 13 | if [ -h "$0" ]; then 14 | DIR=$(readlink "$0") 15 | else 16 | DIR=$0 17 | fi 18 | DIR="$(dirname "$DIR")"/.. 19 | 20 | docker build -t docker-compose $DIR 21 | exec docker run -i -t -v /var/run/docker.sock:/var/run/docker.sock -v `pwd`:`pwd` -w `pwd` docker-compose $@ 22 | -------------------------------------------------------------------------------- /script/ci: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This should be run inside a container built from the Dockerfile 3 | # at the root of the repo: 4 | # 5 | # $ TAG="docker-compose:$(git rev-parse --short HEAD)" 6 | # $ docker build -t "$TAG" . 7 | # $ docker run --rm --volume="/var/run/docker.sock:/var/run/docker.sock" --volume="$(pwd)/.git:/code/.git" -e "TAG=$TAG" --entrypoint="script/ci" "$TAG" 8 | 9 | set -e 10 | 11 | >&2 echo "Validating DCO" 12 | script/validate-dco 13 | 14 | export DOCKER_VERSIONS=all 15 | . script/test-versions 16 | 17 | >&2 echo "Building Linux binary" 18 | su -c script/build-linux-inner user 19 | -------------------------------------------------------------------------------- /tests/unit/progress_stream_test.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | from tests import unittest 4 | 5 | from six import StringIO 6 | 7 | from compose import progress_stream 8 | 9 | 10 | class ProgressStreamTestCase(unittest.TestCase): 11 | 12 | def test_stream_output(self): 13 | output = [ 14 | '{"status": "Downloading", "progressDetail": {"current": ' 15 | '31019763, "start": 1413653874, "total": 62763875}, ' 16 | '"progress": "..."}', 17 | ] 18 | events = progress_stream.stream_output(output, StringIO()) 19 | self.assertEqual(len(events), 1) 20 | -------------------------------------------------------------------------------- /compose/cli/formatter.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | import os 4 | import texttable 5 | 6 | 7 | def get_tty_width(): 8 | tty_size = os.popen('stty size', 'r').read().split() 9 | if len(tty_size) != 2: 10 | return 80 11 | _, width = tty_size 12 | return int(width) 13 | 14 | 15 | class Formatter(object): 16 | def table(self, headers, rows): 17 | table = texttable.Texttable(max_width=get_tty_width()) 18 | table.set_cols_dtype(['t' for h in headers]) 19 | table.add_rows([headers] + rows) 20 | table.set_deco(table.HEADER) 21 | table.set_chars(['-', '|', '+', '-']) 22 | 23 | return table.draw() 24 | -------------------------------------------------------------------------------- /tests/unit/cli/docker_client_test.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | import os 4 | 5 | import mock 6 | from tests import unittest 7 | 8 | from compose.cli import docker_client 9 | 10 | 11 | class DockerClientTestCase(unittest.TestCase): 12 | 13 | def test_docker_client_no_home(self): 14 | with mock.patch.dict(os.environ): 15 | del os.environ['HOME'] 16 | docker_client.docker_client() 17 | 18 | def test_docker_client_with_custom_timeout(self): 19 | with mock.patch.dict(os.environ): 20 | os.environ['DOCKER_CLIENT_TIMEOUT'] = timeout = "300" 21 | client = docker_client.docker_client() 22 | self.assertEqual(client.timeout, int(timeout)) 23 | -------------------------------------------------------------------------------- /script/test-versions: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This should be run inside a container built from the Dockerfile 3 | # at the root of the repo - script/test will do it automatically. 4 | 5 | set -e 6 | 7 | >&2 echo "Running lint checks" 8 | flake8 compose tests setup.py 9 | 10 | if [ "$DOCKER_VERSIONS" == "" ]; then 11 | DOCKER_VERSIONS="default" 12 | elif [ "$DOCKER_VERSIONS" == "all" ]; then 13 | DOCKER_VERSIONS="$ALL_DOCKER_VERSIONS" 14 | fi 15 | 16 | for version in $DOCKER_VERSIONS; do 17 | >&2 echo "Running tests against Docker $version" 18 | docker run \ 19 | --rm \ 20 | --privileged \ 21 | --volume="/var/lib/docker" \ 22 | -e "DOCKER_VERSION=$version" \ 23 | --entrypoint="script/dind" \ 24 | "$TAG" \ 25 | script/wrapdocker nosetests "$@" 26 | done 27 | -------------------------------------------------------------------------------- /docs/mkdocs.yml: -------------------------------------------------------------------------------- 1 | 2 | - ['compose/index.md', 'User Guide', 'Docker Compose' ] 3 | - ['compose/production.md', 'User Guide', 'Using Compose in production' ] 4 | - ['compose/extends.md', 'User Guide', 'Extending services in Compose'] 5 | - ['compose/install.md', 'Installation', 'Docker Compose'] 6 | - ['compose/cli.md', 'Reference', 'Compose command line'] 7 | - ['compose/yml.md', 'Reference', 'Compose yml'] 8 | - ['compose/env.md', 'Reference', 'Compose ENV variables'] 9 | - ['compose/completion.md', 'Reference', 'Compose commandline completion'] 10 | - ['compose/django.md', 'Examples', 'Getting started with Compose and Django'] 11 | - ['compose/rails.md', 'Examples', 'Getting started with Compose and Rails'] 12 | - ['compose/wordpress.md', 'Examples', 'Getting started with Compose and Wordpress'] 13 | -------------------------------------------------------------------------------- /compose/cli/colors.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | NAMES = [ 3 | 'grey', 4 | 'red', 5 | 'green', 6 | 'yellow', 7 | 'blue', 8 | 'magenta', 9 | 'cyan', 10 | 'white' 11 | ] 12 | 13 | 14 | def get_pairs(): 15 | for i, name in enumerate(NAMES): 16 | yield(name, str(30 + i)) 17 | yield('intense_' + name, str(30 + i) + ';1') 18 | 19 | 20 | def ansi(code): 21 | return '\033[{0}m'.format(code) 22 | 23 | 24 | def ansi_color(code, s): 25 | return '{0}{1}{2}'.format(ansi(code), s, ansi(0)) 26 | 27 | 28 | def make_color_fn(code): 29 | return lambda s: ansi_color(code, s) 30 | 31 | 32 | for (name, code) in get_pairs(): 33 | globals()[name] = make_color_fn(code) 34 | 35 | 36 | def rainbow(): 37 | cs = ['cyan', 'yellow', 'green', 'magenta', 'red', 'blue', 38 | 'intense_cyan', 'intense_yellow', 'intense_green', 39 | 'intense_magenta', 'intense_red', 'intense_blue'] 40 | 41 | for c in cs: 42 | yield globals()[c] 43 | -------------------------------------------------------------------------------- /tests/unit/cli/verbose_proxy_test.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | from tests import unittest 4 | 5 | from compose.cli import verbose_proxy 6 | 7 | 8 | class VerboseProxyTestCase(unittest.TestCase): 9 | 10 | def test_format_call(self): 11 | expected = "(u'arg1', True, key=u'value')" 12 | actual = verbose_proxy.format_call( 13 | ("arg1", True), 14 | {'key': 'value'}) 15 | 16 | self.assertEqual(expected, actual) 17 | 18 | def test_format_return_sequence(self): 19 | expected = "(list with 10 items)" 20 | actual = verbose_proxy.format_return(list(range(10)), 2) 21 | self.assertEqual(expected, actual) 22 | 23 | def test_format_return(self): 24 | expected = "{u'Id': u'ok'}" 25 | actual = verbose_proxy.format_return({'Id': 'ok'}, 2) 26 | self.assertEqual(expected, actual) 27 | 28 | def test_format_return_no_result(self): 29 | actual = verbose_proxy.format_return(None, 2) 30 | self.assertEqual(None, actual) 31 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:wheezy 2 | 3 | RUN set -ex; \ 4 | apt-get update -qq; \ 5 | apt-get install -y \ 6 | python \ 7 | python-pip \ 8 | python-dev \ 9 | git \ 10 | apt-transport-https \ 11 | ca-certificates \ 12 | curl \ 13 | lxc \ 14 | iptables \ 15 | ; \ 16 | rm -rf /var/lib/apt/lists/* 17 | 18 | ENV ALL_DOCKER_VERSIONS 1.6.0-rc4 19 | 20 | RUN set -ex; \ 21 | curl https://test.docker.com/builds/Linux/x86_64/docker-1.6.0-rc4 -o /usr/local/bin/docker-1.6.0-rc4; \ 22 | chmod +x /usr/local/bin/docker-1.6.0-rc4 23 | 24 | # Set the default Docker to be run 25 | RUN ln -s /usr/local/bin/docker-1.6.0-rc4 /usr/local/bin/docker 26 | 27 | RUN useradd -d /home/user -m -s /bin/bash user 28 | WORKDIR /code/ 29 | 30 | ADD requirements.txt /code/ 31 | RUN pip install -r requirements.txt 32 | 33 | ADD requirements-dev.txt /code/ 34 | RUN pip install -r requirements-dev.txt 35 | 36 | ADD . /code/ 37 | RUN python setup.py install 38 | 39 | RUN chown -R user /code/ 40 | 41 | ENTRYPOINT ["/usr/local/bin/docker-compose"] 42 | -------------------------------------------------------------------------------- /script/.validate: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ -z "$VALIDATE_UPSTREAM" ]; then 4 | # this is kind of an expensive check, so let's not do this twice if we 5 | # are running more than one validate bundlescript 6 | 7 | VALIDATE_REPO='https://github.com/docker/fig.git' 8 | VALIDATE_BRANCH='master' 9 | 10 | if [ "$TRAVIS" = 'true' -a "$TRAVIS_PULL_REQUEST" != 'false' ]; then 11 | VALIDATE_REPO="https://github.com/${TRAVIS_REPO_SLUG}.git" 12 | VALIDATE_BRANCH="${TRAVIS_BRANCH}" 13 | fi 14 | 15 | VALIDATE_HEAD="$(git rev-parse --verify HEAD)" 16 | 17 | git fetch -q "$VALIDATE_REPO" "refs/heads/$VALIDATE_BRANCH" 18 | VALIDATE_UPSTREAM="$(git rev-parse --verify FETCH_HEAD)" 19 | 20 | VALIDATE_COMMIT_LOG="$VALIDATE_UPSTREAM..$VALIDATE_HEAD" 21 | VALIDATE_COMMIT_DIFF="$VALIDATE_UPSTREAM...$VALIDATE_HEAD" 22 | 23 | validate_diff() { 24 | if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then 25 | git diff "$VALIDATE_COMMIT_DIFF" "$@" 26 | fi 27 | } 28 | validate_log() { 29 | if [ "$VALIDATE_UPSTREAM" != "$VALIDATE_HEAD" ]; then 30 | git log "$VALIDATE_COMMIT_LOG" "$@" 31 | fi 32 | } 33 | fi 34 | -------------------------------------------------------------------------------- /compose/cli/multiplexer.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from threading import Thread 3 | 4 | try: 5 | from Queue import Queue, Empty 6 | except ImportError: 7 | from queue import Queue, Empty # Python 3.x 8 | 9 | 10 | # Yield STOP from an input generator to stop the 11 | # top-level loop without processing any more input. 12 | STOP = object() 13 | 14 | 15 | class Multiplexer(object): 16 | def __init__(self, generators): 17 | self.generators = generators 18 | self.queue = Queue() 19 | 20 | def loop(self): 21 | self._init_readers() 22 | 23 | while True: 24 | try: 25 | item = self.queue.get(timeout=0.1) 26 | if item is STOP: 27 | break 28 | else: 29 | yield item 30 | except Empty: 31 | pass 32 | 33 | def _init_readers(self): 34 | for generator in self.generators: 35 | t = Thread(target=_enqueue_output, args=(generator, self.queue)) 36 | t.daemon = True 37 | t.start() 38 | 39 | 40 | def _enqueue_output(generator, queue): 41 | for item in generator: 42 | queue.put(item) 43 | -------------------------------------------------------------------------------- /compose/cli/docker_client.py: -------------------------------------------------------------------------------- 1 | from docker import Client 2 | from docker import tls 3 | import ssl 4 | import os 5 | 6 | 7 | def docker_client(): 8 | """ 9 | Returns a docker-py client configured using environment variables 10 | according to the same logic as the official Docker client. 11 | """ 12 | cert_path = os.environ.get('DOCKER_CERT_PATH', '') 13 | if cert_path == '': 14 | cert_path = os.path.join(os.environ.get('HOME', ''), '.docker') 15 | 16 | base_url = os.environ.get('DOCKER_HOST') 17 | tls_config = None 18 | 19 | if os.environ.get('DOCKER_TLS_VERIFY', '') != '': 20 | parts = base_url.split('://', 1) 21 | base_url = '%s://%s' % ('https', parts[1]) 22 | 23 | client_cert = (os.path.join(cert_path, 'cert.pem'), os.path.join(cert_path, 'key.pem')) 24 | ca_cert = os.path.join(cert_path, 'ca.pem') 25 | 26 | tls_config = tls.TLSConfig( 27 | ssl_version=ssl.PROTOCOL_TLSv1, 28 | verify=True, 29 | assert_hostname=False, 30 | client_cert=client_cert, 31 | ca_cert=ca_cert, 32 | ) 33 | 34 | timeout = int(os.environ.get('DOCKER_CLIENT_TIMEOUT', 60)) 35 | return Client(base_url=base_url, tls=tls_config, version='1.15', timeout=timeout) 36 | -------------------------------------------------------------------------------- /tests/integration/testcases.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | from compose.service import Service 4 | from compose.config import make_service_dict 5 | from compose.cli.docker_client import docker_client 6 | from compose.progress_stream import stream_output 7 | from .. import unittest 8 | 9 | 10 | class DockerClientTestCase(unittest.TestCase): 11 | @classmethod 12 | def setUpClass(cls): 13 | cls.client = docker_client() 14 | 15 | def setUp(self): 16 | for c in self.client.containers(all=True): 17 | if c['Names'] and 'composetest' in c['Names'][0]: 18 | self.client.kill(c['Id']) 19 | self.client.remove_container(c['Id']) 20 | for i in self.client.images(): 21 | if isinstance(i.get('Tag'), basestring) and 'composetest' in i['Tag']: 22 | self.client.remove_image(i) 23 | 24 | def create_service(self, name, **kwargs): 25 | kwargs['image'] = "busybox:latest" 26 | 27 | if 'command' not in kwargs: 28 | kwargs['command'] = ["/bin/sleep", "300"] 29 | 30 | return Service( 31 | project='composetest', 32 | client=self.client, 33 | **make_service_dict(name, kwargs, working_dir='.') 34 | ) 35 | 36 | def check_build(self, *args, **kwargs): 37 | build_output = self.client.build(*args, **kwargs) 38 | stream_output(build_output, open('/dev/null', 'w')) 39 | -------------------------------------------------------------------------------- /tests/unit/split_buffer_test.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | from compose.cli.utils import split_buffer 4 | from .. import unittest 5 | 6 | 7 | class SplitBufferTest(unittest.TestCase): 8 | def test_single_line_chunks(self): 9 | def reader(): 10 | yield b'abc\n' 11 | yield b'def\n' 12 | yield b'ghi\n' 13 | 14 | self.assert_produces(reader, [b'abc\n', b'def\n', b'ghi\n']) 15 | 16 | def test_no_end_separator(self): 17 | def reader(): 18 | yield b'abc\n' 19 | yield b'def\n' 20 | yield b'ghi' 21 | 22 | self.assert_produces(reader, [b'abc\n', b'def\n', b'ghi']) 23 | 24 | def test_multiple_line_chunk(self): 25 | def reader(): 26 | yield b'abc\ndef\nghi' 27 | 28 | self.assert_produces(reader, [b'abc\n', b'def\n', b'ghi']) 29 | 30 | def test_chunked_line(self): 31 | def reader(): 32 | yield b'a' 33 | yield b'b' 34 | yield b'c' 35 | yield b'\n' 36 | yield b'd' 37 | 38 | self.assert_produces(reader, [b'abc\n', b'd']) 39 | 40 | def test_preserves_unicode_sequences_within_lines(self): 41 | string = u"a\u2022c\n".encode('utf-8') 42 | 43 | def reader(): 44 | yield string 45 | 46 | self.assert_produces(reader, [string]) 47 | 48 | def assert_produces(self, reader, expectations): 49 | split = split_buffer(reader(), b'\n') 50 | 51 | for (actual, expected) in zip(split, expectations): 52 | self.assertEqual(type(actual), type(expected)) 53 | self.assertEqual(actual, expected) 54 | -------------------------------------------------------------------------------- /docs/completion.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Command Completion 4 | --- 5 | 6 | Command Completion 7 | ================== 8 | 9 | Compose comes with [command completion](http://en.wikipedia.org/wiki/Command-line_completion) 10 | for the bash shell. 11 | 12 | Installing Command Completion 13 | ----------------------------- 14 | 15 | Make sure bash completion is installed. If you use a current Linux in a non-minimal installation, bash completion should be available. 16 | On a Mac, install with `brew install bash-completion` 17 | 18 | Place the completion script in `/etc/bash_completion.d/` (`/usr/local/etc/bash_completion.d/` on a Mac), using e.g. 19 | 20 | curl -L https://raw.githubusercontent.com/docker/compose/1.1.0/contrib/completion/bash/docker-compose > /etc/bash_completion.d/docker-compose 21 | 22 | Completion will be available upon next login. 23 | 24 | Available completions 25 | --------------------- 26 | Depending on what you typed on the command line so far, it will complete 27 | 28 | - available docker-compose commands 29 | - options that are available for a particular command 30 | - service names that make sense in a given context (e.g. services with running or stopped instances or services based on images vs. services based on Dockerfiles). For `docker-compose scale`, completed service names will automatically have "=" appended. 31 | - arguments for selected options, e.g. `docker-compose kill -s` will complete some signals like SIGHUP and SIGUSR1. 32 | 33 | Enjoy working with Compose faster and with less typos! 34 | 35 | ## Compose documentation 36 | 37 | - [Installing Compose](install.md) 38 | - [User guide](index.md) 39 | - [Command line reference](cli.md) 40 | - [Yaml file reference](yml.md) 41 | - [Compose environment variables](env.md) 42 | -------------------------------------------------------------------------------- /compose/cli/docopt_command.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | import sys 4 | 5 | from inspect import getdoc 6 | from docopt import docopt, DocoptExit 7 | 8 | 9 | def docopt_full_help(docstring, *args, **kwargs): 10 | try: 11 | return docopt(docstring, *args, **kwargs) 12 | except DocoptExit: 13 | raise SystemExit(docstring) 14 | 15 | 16 | class DocoptCommand(object): 17 | def docopt_options(self): 18 | return {'options_first': True} 19 | 20 | def sys_dispatch(self): 21 | self.dispatch(sys.argv[1:], None) 22 | 23 | def dispatch(self, argv, global_options): 24 | self.perform_command(*self.parse(argv, global_options)) 25 | 26 | def perform_command(self, options, handler, command_options): 27 | handler(command_options) 28 | 29 | def parse(self, argv, global_options): 30 | options = docopt_full_help(getdoc(self), argv, **self.docopt_options()) 31 | command = options['COMMAND'] 32 | 33 | if command is None: 34 | raise SystemExit(getdoc(self)) 35 | 36 | if not hasattr(self, command): 37 | raise NoSuchCommand(command, self) 38 | 39 | handler = getattr(self, command) 40 | docstring = getdoc(handler) 41 | 42 | if docstring is None: 43 | raise NoSuchCommand(command, self) 44 | 45 | command_options = docopt_full_help(docstring, options['ARGS'], options_first=True) 46 | return options, handler, command_options 47 | 48 | 49 | class NoSuchCommand(Exception): 50 | def __init__(self, command, supercommand): 51 | super(NoSuchCommand, self).__init__("No such command: %s" % command) 52 | 53 | self.command = command 54 | self.supercommand = supercommand 55 | -------------------------------------------------------------------------------- /docs/env.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: Compose environment variables reference 4 | --- 5 | 6 | Environment variables reference 7 | =============================== 8 | 9 | **Note:** Environment variables are no longer the recommended method for connecting to linked services. Instead, you should use the link name (by default, the name of the linked service) as the hostname to connect to. See the [docker-compose.yml documentation](yml.md#links) for details. 10 | 11 | Compose uses [Docker links] to expose services' containers to one another. Each linked container injects a set of environment variables, each of which begins with the uppercase name of the container. 12 | 13 | To see what environment variables are available to a service, run `docker-compose run SERVICE env`. 14 | 15 | name\_PORT
16 | Full URL, e.g. `DB_PORT=tcp://172.17.0.5:5432` 17 | 18 | name\_PORT\_num\_protocol
19 | Full URL, e.g. `DB_PORT_5432_TCP=tcp://172.17.0.5:5432` 20 | 21 | name\_PORT\_num\_protocol\_ADDR
22 | Container's IP address, e.g. `DB_PORT_5432_TCP_ADDR=172.17.0.5` 23 | 24 | name\_PORT\_num\_protocol\_PORT
25 | Exposed port number, e.g. `DB_PORT_5432_TCP_PORT=5432` 26 | 27 | name\_PORT\_num\_protocol\_PROTO
28 | Protocol (tcp or udp), e.g. `DB_PORT_5432_TCP_PROTO=tcp` 29 | 30 | name\_NAME
31 | Fully qualified container name, e.g. `DB_1_NAME=/myapp_web_1/myapp_db_1` 32 | 33 | [Docker links]: http://docs.docker.com/userguide/dockerlinks/ 34 | 35 | ## Compose documentation 36 | 37 | - [Installing Compose](install.md) 38 | - [User guide](index.md) 39 | - [Command line reference](cli.md) 40 | - [Yaml file reference](yml.md) 41 | - [Compose command line completion](completion.md) 42 | -------------------------------------------------------------------------------- /docs/install.md: -------------------------------------------------------------------------------- 1 | page_title: Installing Compose 2 | page_description: How to install Docker Compose 3 | page_keywords: compose, orchestration, install, installation, docker, documentation 4 | 5 | 6 | ## Installing Compose 7 | 8 | To install Compose, you'll need to install Docker first. You'll then install 9 | Compose with a `curl` command. 10 | 11 | ### Install Docker 12 | 13 | First, install Docker version 1.3 or greater: 14 | 15 | - [Instructions for Mac OS X](http://docs.docker.com/installation/mac/) 16 | - [Instructions for Ubuntu](http://docs.docker.com/installation/ubuntulinux/) 17 | - [Instructions for other systems](http://docs.docker.com/installation/) 18 | 19 | ### Install Compose 20 | 21 | To install Compose, run the following commands: 22 | 23 | curl -L https://github.com/docker/compose/releases/download/1.1.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose 24 | chmod +x /usr/local/bin/docker-compose 25 | 26 | > Note: If you get a "Permission denied" error, your `/usr/local/bin` directory probably isn't writable and you'll need to install Compose as the superuser. Run `sudo -i`, then the two commands above, then `exit`. 27 | 28 | Optionally, you can also install [command completion](completion.md) for the 29 | bash shell. 30 | 31 | Compose is available for OS X and 64-bit Linux. If you're on another platform, 32 | Compose can also be installed as a Python package: 33 | 34 | $ sudo pip install -U docker-compose 35 | 36 | No further steps are required; Compose should now be successfully installed. 37 | You can test the installation by running `docker-compose --version`. 38 | 39 | ## Compose documentation 40 | 41 | - [User guide](index.md) 42 | - [Command line reference](cli.md) 43 | - [Yaml file reference](yml.md) 44 | - [Compose environment variables](env.md) 45 | - [Compose command line completion](completion.md) 46 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | from __future__ import unicode_literals 4 | from __future__ import absolute_import 5 | from setuptools import setup, find_packages 6 | import codecs 7 | import os 8 | import re 9 | import sys 10 | 11 | 12 | def read(*parts): 13 | path = os.path.join(os.path.dirname(__file__), *parts) 14 | with codecs.open(path, encoding='utf-8') as fobj: 15 | return fobj.read() 16 | 17 | 18 | def find_version(*file_paths): 19 | version_file = read(*file_paths) 20 | version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", 21 | version_file, re.M) 22 | if version_match: 23 | return version_match.group(1) 24 | raise RuntimeError("Unable to find version string.") 25 | 26 | 27 | install_requires = [ 28 | 'docopt >= 0.6.1, < 0.7', 29 | 'PyYAML >= 3.10, < 4', 30 | 'requests >= 2.2.1, < 2.6', 31 | 'texttable >= 0.8.1, < 0.9', 32 | 'websocket-client >= 0.11.0, < 1.0', 33 | 'docker-py >= 1.0.0, < 1.2', 34 | 'dockerpty >= 0.3.2, < 0.4', 35 | 'six >= 1.3.0, < 2', 36 | ] 37 | 38 | 39 | tests_require = [ 40 | 'mock >= 1.0.1', 41 | 'nose', 42 | 'pyinstaller', 43 | 'flake8', 44 | ] 45 | 46 | 47 | if sys.version_info < (2, 7): 48 | tests_require.append('unittest2') 49 | 50 | 51 | setup( 52 | name='docker-compose', 53 | version=find_version("compose", "__init__.py"), 54 | description='Multi-container orchestration for Docker', 55 | url='https://www.docker.com/', 56 | author='Docker, Inc.', 57 | license='Apache License 2.0', 58 | packages=find_packages(exclude=['tests.*', 'tests']), 59 | include_package_data=True, 60 | test_suite='nose.collector', 61 | install_requires=install_requires, 62 | tests_require=tests_require, 63 | entry_points=""" 64 | [console_scripts] 65 | docker-compose=compose.cli.main:main 66 | """, 67 | ) 68 | -------------------------------------------------------------------------------- /compose/cli/verbose_proxy.py: -------------------------------------------------------------------------------- 1 | 2 | import functools 3 | from itertools import chain 4 | import logging 5 | import pprint 6 | 7 | import six 8 | 9 | 10 | def format_call(args, kwargs): 11 | args = (repr(a) for a in args) 12 | kwargs = ("{0!s}={1!r}".format(*item) for item in six.iteritems(kwargs)) 13 | return "({0})".format(", ".join(chain(args, kwargs))) 14 | 15 | 16 | def format_return(result, max_lines): 17 | if isinstance(result, (list, tuple, set)): 18 | return "({0} with {1} items)".format(type(result).__name__, len(result)) 19 | 20 | if result: 21 | lines = pprint.pformat(result).split('\n') 22 | extra = '\n...' if len(lines) > max_lines else '' 23 | return '\n'.join(lines[:max_lines]) + extra 24 | 25 | return result 26 | 27 | 28 | class VerboseProxy(object): 29 | """Proxy all function calls to another class and log method name, arguments 30 | and return values for each call. 31 | """ 32 | 33 | def __init__(self, obj_name, obj, log_name=None, max_lines=10): 34 | self.obj_name = obj_name 35 | self.obj = obj 36 | self.max_lines = max_lines 37 | self.log = logging.getLogger(log_name or __name__) 38 | 39 | def __getattr__(self, name): 40 | attr = getattr(self.obj, name) 41 | 42 | if not six.callable(attr): 43 | return attr 44 | 45 | return functools.partial(self.proxy_callable, name) 46 | 47 | def proxy_callable(self, call_name, *args, **kwargs): 48 | self.log.info("%s %s <- %s", 49 | self.obj_name, 50 | call_name, 51 | format_call(args, kwargs)) 52 | 53 | result = getattr(self.obj, call_name)(*args, **kwargs) 54 | self.log.info("%s %s -> %s", 55 | self.obj_name, 56 | call_name, 57 | format_return(result, self.max_lines)) 58 | return result 59 | -------------------------------------------------------------------------------- /ROADMAP.md: -------------------------------------------------------------------------------- 1 | # Roadmap 2 | 3 | ## More than just development environments 4 | 5 | Over time we will extend Compose's remit to cover test, staging and production environments. This is not a simple task, and will take many incremental improvements such as: 6 | 7 | - Compose’s brute-force “delete and recreate everything” approach is great for dev and testing, but it not sufficient for production environments. You should be able to define a "desired" state that Compose will intelligently converge to. 8 | - It should be possible to partially modify the config file for different environments (dev/test/staging/prod), passing in e.g. custom ports or volume mount paths. ([#426](https://github.com/docker/fig/issues/426)) 9 | - Compose should recommend a technique for zero-downtime deploys. 10 | 11 | ## Integration with Swarm 12 | 13 | Compose should integrate really well with Swarm so you can take an application you've developed on your laptop and run it on a Swarm cluster. 14 | 15 | The current state of integration is documented in [SWARM.md](SWARM.md). 16 | 17 | ## Applications spanning multiple teams 18 | 19 | Compose works well for applications that are in a single repository and depend on services that are hosted on Docker Hub. If your application depends on another application within your organisation, Compose doesn't work as well. 20 | 21 | There are several ideas about how this could work, such as [including external files](https://github.com/docker/fig/issues/318). 22 | 23 | ## An even better tool for development environments 24 | 25 | Compose is a great tool for development environments, but it could be even better. For example: 26 | 27 | - [Compose could watch your code and automatically kick off builds when something changes.](https://github.com/docker/fig/issues/184) 28 | - It should be possible to define hostnames for containers which work from the host machine, e.g. “mywebcontainer.local”. This is needed by apps comprising multiple web services which generate links to one another (e.g. a frontend website and a separate admin webapp) 29 | -------------------------------------------------------------------------------- /SWARM.md: -------------------------------------------------------------------------------- 1 | Docker Compose/Swarm integration 2 | ================================ 3 | 4 | Eventually, Compose and Swarm aim to have full integration, meaning you can point a Compose app at a Swarm cluster and have it all just work as if you were using a single Docker host. 5 | 6 | However, the current extent of integration is minimal: Compose can create containers on a Swarm cluster, but the majority of Compose apps won’t work out of the box unless all containers are scheduled on one host, defeating much of the purpose of using Swarm in the first place. 7 | 8 | Still, Compose and Swarm can be useful in a “batch processing” scenario (where a large number of containers need to be spun up and down to do independent computation) or a “shared cluster” scenario (where multiple teams want to deploy apps on a cluster without worrying about where to put them). 9 | 10 | A number of things need to happen before full integration is achieved, which are documented below. 11 | 12 | Links and networking 13 | -------------------- 14 | 15 | The primary thing stopping multi-container apps from working seamlessly on Swarm is getting them to talk to one another: enabling private communication between containers on different hosts hasn’t been solved in a non-hacky way. 16 | 17 | Long-term, networking is [getting overhauled](https://github.com/docker/docker/issues/9983) in such a way that it’ll fit the multi-host model much better. For now, **linked containers are automatically scheduled on the same host**. 18 | 19 | Building 20 | -------- 21 | 22 | `docker build` against a Swarm cluster is not implemented, so for now the `build` option will not work - you will need to manually build your service's image, push it somewhere and use `image` to instruct Compose to pull it. Here's an example using the Docker Hub: 23 | 24 | $ docker build -t myusername/web . 25 | $ docker push myusername/web 26 | $ cat docker-compose.yml 27 | web: 28 | image: myusername/web 29 | links: ["db"] 30 | db: 31 | image: postgres 32 | $ docker-compose up -d 33 | -------------------------------------------------------------------------------- /script/validate-dco: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | source "$(dirname "$BASH_SOURCE")/.validate" 6 | 7 | adds=$(validate_diff --numstat | awk '{ s += $1 } END { print s }') 8 | dels=$(validate_diff --numstat | awk '{ s += $2 } END { print s }') 9 | notDocs="$(validate_diff --numstat | awk '$3 !~ /^docs\// { print $3 }')" 10 | 11 | : ${adds:=0} 12 | : ${dels:=0} 13 | 14 | # "Username may only contain alphanumeric characters or dashes and cannot begin with a dash" 15 | githubUsernameRegex='[a-zA-Z0-9][a-zA-Z0-9-]+' 16 | 17 | # https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work 18 | dcoPrefix='Signed-off-by:' 19 | dcoRegex="^(Docker-DCO-1.1-)?$dcoPrefix ([^<]+) <([^<>@]+@[^<>]+)>( \\(github: ($githubUsernameRegex)\\))?$" 20 | 21 | check_dco() { 22 | grep -qE "$dcoRegex" 23 | } 24 | 25 | if [ $adds -eq 0 -a $dels -eq 0 ]; then 26 | echo '0 adds, 0 deletions; nothing to validate! :)' 27 | elif [ -z "$notDocs" -a $adds -le 1 -a $dels -le 1 ]; then 28 | echo 'Congratulations! DCO small-patch-exception material!' 29 | else 30 | commits=( $(validate_log --format='format:%H%n') ) 31 | badCommits=() 32 | for commit in "${commits[@]}"; do 33 | if [ -z "$(git log -1 --format='format:' --name-status "$commit")" ]; then 34 | # no content (ie, Merge commit, etc) 35 | continue 36 | fi 37 | if ! git log -1 --format='format:%B' "$commit" | check_dco; then 38 | badCommits+=( "$commit" ) 39 | fi 40 | done 41 | if [ ${#badCommits[@]} -eq 0 ]; then 42 | echo "Congratulations! All commits are properly signed with the DCO!" 43 | else 44 | { 45 | echo "These commits do not have a proper '$dcoPrefix' marker:" 46 | for commit in "${badCommits[@]}"; do 47 | echo " - $commit" 48 | done 49 | echo 50 | echo 'Please amend each commit to include a properly formatted DCO marker.' 51 | echo 52 | echo 'Visit the following URL for information about the Docker DCO:' 53 | echo ' https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work' 54 | echo 55 | } >&2 56 | false 57 | fi 58 | fi 59 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Docker Compose 2 | ============== 3 | *(Previously known as Fig)* 4 | 5 | Compose is a tool for defining and running complex applications with Docker. 6 | With Compose, you define a multi-container application in a single file, then 7 | spin your application up in a single command which does everything that needs to 8 | be done to get it running. 9 | 10 | Compose is great for development environments, staging servers, and CI. We don't 11 | recommend that you use it in production yet. 12 | 13 | Using Compose is basically a three-step process. 14 | 15 | First, you define your app's environment with a `Dockerfile` so it can be 16 | reproduced anywhere: 17 | 18 | ```Dockerfile 19 | FROM python:2.7 20 | WORKDIR /code 21 | ADD requirements.txt /code/ 22 | RUN pip install -r requirements.txt 23 | ADD . /code 24 | CMD python app.py 25 | ``` 26 | 27 | Next, you define the services that make up your app in `docker-compose.yml` so 28 | they can be run together in an isolated environment: 29 | 30 | ```yaml 31 | web: 32 | build: . 33 | links: 34 | - db 35 | ports: 36 | - "8000:8000" 37 | db: 38 | image: postgres 39 | ``` 40 | 41 | Lastly, run `docker-compose up` and Compose will start and run your entire app. 42 | 43 | Compose has commands for managing the whole lifecycle of your application: 44 | 45 | * Start, stop and rebuild services 46 | * View the status of running services 47 | * Stream the log output of running services 48 | * Run a one-off command on a service 49 | 50 | Installation and documentation 51 | ------------------------------ 52 | 53 | - Full documentation is available on [Docker's website](http://docs.docker.com/compose/). 54 | - Hop into #docker-compose on Freenode if you have any questions. 55 | 56 | Contributing 57 | ------------ 58 | 59 | [![Build Status](http://jenkins.dockerproject.com/buildStatus/icon?job=Compose Master)](http://jenkins.dockerproject.com/job/Compose%20Master/) 60 | 61 | Want to help build Compose? Check out our [contributing documentation](https://github.com/docker/compose/blob/master/CONTRIBUTING.md). 62 | 63 | -------------------------------------------------------------------------------- /tests/unit/log_printer_test.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | import os 4 | 5 | from compose.cli.log_printer import LogPrinter 6 | from .. import unittest 7 | 8 | 9 | class LogPrinterTest(unittest.TestCase): 10 | def get_default_output(self, monochrome=False): 11 | def reader(*args, **kwargs): 12 | yield "hello\nworld" 13 | 14 | container = MockContainer(reader) 15 | output = run_log_printer([container], monochrome=monochrome) 16 | return output 17 | 18 | def test_single_container(self): 19 | output = self.get_default_output() 20 | 21 | self.assertIn('hello', output) 22 | self.assertIn('world', output) 23 | 24 | def test_monochrome(self): 25 | output = self.get_default_output(monochrome=True) 26 | self.assertNotIn('\033[', output) 27 | 28 | def test_polychrome(self): 29 | output = self.get_default_output() 30 | self.assertIn('\033[', output) 31 | 32 | def test_unicode(self): 33 | glyph = u'\u2022'.encode('utf-8') 34 | 35 | def reader(*args, **kwargs): 36 | yield glyph + b'\n' 37 | 38 | container = MockContainer(reader) 39 | output = run_log_printer([container]) 40 | 41 | self.assertIn(glyph, output) 42 | 43 | 44 | def run_log_printer(containers, monochrome=False): 45 | r, w = os.pipe() 46 | reader, writer = os.fdopen(r, 'r'), os.fdopen(w, 'w') 47 | printer = LogPrinter(containers, output=writer, monochrome=monochrome) 48 | printer.run() 49 | writer.close() 50 | return reader.read() 51 | 52 | 53 | class MockContainer(object): 54 | def __init__(self, reader): 55 | self._reader = reader 56 | 57 | @property 58 | def name(self): 59 | return 'myapp_web_1' 60 | 61 | @property 62 | def name_without_project(self): 63 | return 'web_1' 64 | 65 | def attach(self, *args, **kwargs): 66 | return self._reader() 67 | 68 | def wait(self, *args, **kwargs): 69 | return 0 70 | -------------------------------------------------------------------------------- /compose/cli/errors.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from textwrap import dedent 3 | 4 | 5 | class UserError(Exception): 6 | def __init__(self, msg): 7 | self.msg = dedent(msg).strip() 8 | 9 | def __unicode__(self): 10 | return self.msg 11 | 12 | __str__ = __unicode__ 13 | 14 | 15 | class DockerNotFoundMac(UserError): 16 | def __init__(self): 17 | super(DockerNotFoundMac, self).__init__(""" 18 | Couldn't connect to Docker daemon. You might need to install docker-osx: 19 | 20 | https://github.com/noplay/docker-osx 21 | """) 22 | 23 | 24 | class DockerNotFoundUbuntu(UserError): 25 | def __init__(self): 26 | super(DockerNotFoundUbuntu, self).__init__(""" 27 | Couldn't connect to Docker daemon. You might need to install Docker: 28 | 29 | http://docs.docker.io/en/latest/installation/ubuntulinux/ 30 | """) 31 | 32 | 33 | class DockerNotFoundGeneric(UserError): 34 | def __init__(self): 35 | super(DockerNotFoundGeneric, self).__init__(""" 36 | Couldn't connect to Docker daemon. You might need to install Docker: 37 | 38 | http://docs.docker.io/en/latest/installation/ 39 | """) 40 | 41 | 42 | class ConnectionErrorBoot2Docker(UserError): 43 | def __init__(self): 44 | super(ConnectionErrorBoot2Docker, self).__init__(""" 45 | Couldn't connect to Docker daemon - you might need to run `boot2docker up`. 46 | """) 47 | 48 | 49 | class ConnectionErrorGeneric(UserError): 50 | def __init__(self, url): 51 | super(ConnectionErrorGeneric, self).__init__(""" 52 | Couldn't connect to Docker daemon at %s - is it running? 53 | 54 | If it's at a non-standard location, specify the URL with the DOCKER_HOST environment variable. 55 | """ % url) 56 | 57 | 58 | class ComposeFileNotFound(UserError): 59 | def __init__(self, supported_filenames): 60 | super(ComposeFileNotFound, self).__init__(""" 61 | Can't find a suitable configuration file. Are you in the right directory? 62 | 63 | Supported filenames: %s 64 | """ % ", ".join(supported_filenames)) 65 | -------------------------------------------------------------------------------- /compose/progress_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import codecs 4 | 5 | 6 | class StreamOutputError(Exception): 7 | pass 8 | 9 | 10 | def stream_output(output, stream): 11 | is_terminal = hasattr(stream, 'fileno') and os.isatty(stream.fileno()) 12 | stream = codecs.getwriter('utf-8')(stream) 13 | all_events = [] 14 | lines = {} 15 | diff = 0 16 | 17 | for chunk in output: 18 | event = json.loads(chunk) 19 | all_events.append(event) 20 | 21 | if 'progress' in event or 'progressDetail' in event: 22 | image_id = event.get('id') 23 | if not image_id: 24 | continue 25 | 26 | if image_id in lines: 27 | diff = len(lines) - lines[image_id] 28 | else: 29 | lines[image_id] = len(lines) 30 | stream.write("\n") 31 | diff = 0 32 | 33 | if is_terminal: 34 | # move cursor up `diff` rows 35 | stream.write("%c[%dA" % (27, diff)) 36 | 37 | print_output_event(event, stream, is_terminal) 38 | 39 | if 'id' in event and is_terminal: 40 | # move cursor back down 41 | stream.write("%c[%dB" % (27, diff)) 42 | 43 | stream.flush() 44 | 45 | return all_events 46 | 47 | 48 | def print_output_event(event, stream, is_terminal): 49 | if 'errorDetail' in event: 50 | raise StreamOutputError(event['errorDetail']['message']) 51 | 52 | terminator = '' 53 | 54 | if is_terminal and 'stream' not in event: 55 | # erase current line 56 | stream.write("%c[2K\r" % 27) 57 | terminator = "\r" 58 | pass 59 | elif 'progressDetail' in event: 60 | return 61 | 62 | if 'time' in event: 63 | stream.write("[%s] " % event['time']) 64 | 65 | if 'id' in event: 66 | stream.write("%s: " % event['id']) 67 | 68 | if 'from' in event: 69 | stream.write("(from %s) " % event['from']) 70 | 71 | status = event.get('status', '') 72 | 73 | if 'progress' in event: 74 | stream.write("%s %s%s" % (status, event['progress'], terminator)) 75 | elif 'progressDetail' in event: 76 | detail = event['progressDetail'] 77 | if 'current' in detail: 78 | percentage = float(detail['current']) / float(detail['total']) * 100 79 | stream.write('%s (%.1f%%)%s' % (status, percentage, terminator)) 80 | else: 81 | stream.write('%s%s' % (status, terminator)) 82 | elif 'stream' in event: 83 | stream.write("%s%s" % (event['stream'], terminator)) 84 | else: 85 | stream.write("%s%s\n" % (status, terminator)) 86 | -------------------------------------------------------------------------------- /compose/cli/log_printer.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | import sys 4 | 5 | from itertools import cycle 6 | 7 | from .multiplexer import Multiplexer, STOP 8 | from . import colors 9 | from .utils import split_buffer 10 | 11 | 12 | class LogPrinter(object): 13 | def __init__(self, containers, attach_params=None, output=sys.stdout, monochrome=False): 14 | self.containers = containers 15 | self.attach_params = attach_params or {} 16 | self.prefix_width = self._calculate_prefix_width(containers) 17 | self.generators = self._make_log_generators(monochrome) 18 | self.output = output 19 | 20 | def run(self): 21 | mux = Multiplexer(self.generators) 22 | for line in mux.loop(): 23 | self.output.write(line) 24 | 25 | def _calculate_prefix_width(self, containers): 26 | """ 27 | Calculate the maximum width of container names so we can make the log 28 | prefixes line up like so: 29 | 30 | db_1 | Listening 31 | web_1 | Listening 32 | """ 33 | prefix_width = 0 34 | for container in containers: 35 | prefix_width = max(prefix_width, len(container.name_without_project)) 36 | return prefix_width 37 | 38 | def _make_log_generators(self, monochrome): 39 | color_fns = cycle(colors.rainbow()) 40 | generators = [] 41 | 42 | def no_color(text): 43 | return text 44 | 45 | for container in self.containers: 46 | if monochrome: 47 | color_fn = no_color 48 | else: 49 | color_fn = next(color_fns) 50 | generators.append(self._make_log_generator(container, color_fn)) 51 | 52 | return generators 53 | 54 | def _make_log_generator(self, container, color_fn): 55 | prefix = color_fn(self._generate_prefix(container)).encode('utf-8') 56 | # Attach to container before log printer starts running 57 | line_generator = split_buffer(self._attach(container), '\n') 58 | 59 | for line in line_generator: 60 | yield prefix + line 61 | 62 | exit_code = container.wait() 63 | yield color_fn("%s exited with code %s\n" % (container.name, exit_code)) 64 | yield STOP 65 | 66 | def _generate_prefix(self, container): 67 | """ 68 | Generate the prefix for a log line without colour 69 | """ 70 | name = container.name_without_project 71 | padding = ' ' * (self.prefix_width - len(name)) 72 | return ''.join([name, padding, ' | ']) 73 | 74 | def _attach(self, container): 75 | params = { 76 | 'stdout': True, 77 | 'stderr': True, 78 | 'stream': True, 79 | } 80 | params.update(self.attach_params) 81 | params = dict((name, 1 if value else 0) for (name, value) in list(params.items())) 82 | return container.attach(**params) 83 | -------------------------------------------------------------------------------- /compose/cli/utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | from __future__ import division 4 | import datetime 5 | import os 6 | import subprocess 7 | import platform 8 | 9 | 10 | def yesno(prompt, default=None): 11 | """ 12 | Prompt the user for a yes or no. 13 | 14 | Can optionally specify a default value, which will only be 15 | used if they enter a blank line. 16 | 17 | Unrecognised input (anything other than "y", "n", "yes", 18 | "no" or "") will return None. 19 | """ 20 | answer = raw_input(prompt).strip().lower() 21 | 22 | if answer == "y" or answer == "yes": 23 | return True 24 | elif answer == "n" or answer == "no": 25 | return False 26 | elif answer == "": 27 | return default 28 | else: 29 | return None 30 | 31 | 32 | # http://stackoverflow.com/a/5164027 33 | def prettydate(d): 34 | diff = datetime.datetime.utcnow() - d 35 | s = diff.seconds 36 | if diff.days > 7 or diff.days < 0: 37 | return d.strftime('%d %b %y') 38 | elif diff.days == 1: 39 | return '1 day ago' 40 | elif diff.days > 1: 41 | return '{0} days ago'.format(diff.days) 42 | elif s <= 1: 43 | return 'just now' 44 | elif s < 60: 45 | return '{0} seconds ago'.format(s) 46 | elif s < 120: 47 | return '1 minute ago' 48 | elif s < 3600: 49 | return '{0} minutes ago'.format(s / 60) 50 | elif s < 7200: 51 | return '1 hour ago' 52 | else: 53 | return '{0} hours ago'.format(s / 3600) 54 | 55 | 56 | def mkdir(path, permissions=0o700): 57 | if not os.path.exists(path): 58 | os.mkdir(path) 59 | 60 | os.chmod(path, permissions) 61 | 62 | return path 63 | 64 | 65 | def split_buffer(reader, separator): 66 | """ 67 | Given a generator which yields strings and a separator string, 68 | joins all input, splits on the separator and yields each chunk. 69 | 70 | Unlike string.split(), each chunk includes the trailing 71 | separator, except for the last one if none was found on the end 72 | of the input. 73 | """ 74 | buffered = str('') 75 | separator = str(separator) 76 | 77 | for data in reader: 78 | buffered += data 79 | while True: 80 | index = buffered.find(separator) 81 | if index == -1: 82 | break 83 | yield buffered[:index + 1] 84 | buffered = buffered[index + 1:] 85 | 86 | if len(buffered) > 0: 87 | yield buffered 88 | 89 | 90 | def call_silently(*args, **kwargs): 91 | """ 92 | Like subprocess.call(), but redirects stdout and stderr to /dev/null. 93 | """ 94 | with open(os.devnull, 'w') as shutup: 95 | return subprocess.call(*args, stdout=shutup, stderr=shutup, **kwargs) 96 | 97 | 98 | def is_mac(): 99 | return platform.system() == 'Darwin' 100 | 101 | 102 | def is_ubuntu(): 103 | return platform.system() == 'Linux' and platform.linux_distribution()[0] == 'Ubuntu' 104 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to Compose 2 | 3 | Compose is a part of the Docker project, and follows the same rules and 4 | principles. Take a read of [Docker's contributing guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md) 5 | to get an overview. 6 | 7 | ## TL;DR 8 | 9 | Pull requests will need: 10 | 11 | - Tests 12 | - Documentation 13 | - [To be signed off](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work) 14 | - A logical series of [well written commits](https://github.com/alphagov/styleguides/blob/master/git.md) 15 | 16 | ## Development environment 17 | 18 | If you're looking contribute to Compose 19 | but you're new to the project or maybe even to Python, here are the steps 20 | that should get you started. 21 | 22 | 1. Fork [https://github.com/docker/compose](https://github.com/docker/compose) 23 | to your username. 24 | 2. Clone your forked repository locally `git clone git@github.com:yourusername/compose.git`. 25 | 3. Enter the local directory `cd compose`. 26 | 4. Set up a development environment by running `python setup.py develop`. This 27 | will install the dependencies and set up a symlink from your `docker-compose` 28 | executable to the checkout of the repository. When you now run 29 | `docker-compose` from anywhere on your machine, it will run your development 30 | version of Compose. 31 | 32 | ## Running the test suite 33 | 34 | Use the test script to run DCO check, linting checks and then the full test 35 | suite against different Python interpreters: 36 | 37 | $ script/test 38 | 39 | Tests are run against a Docker daemon inside a container, so that we can test 40 | against multiple Docker versions. By default they'll run against only the latest 41 | Docker version - set the `DOCKER_VERSIONS` environment variable to "all" to run 42 | against all supported versions: 43 | 44 | $ DOCKER_VERSIONS=all script/test 45 | 46 | Arguments to `script/test` are passed through to the `nosetests` executable, so 47 | you can specify a test directory, file, module, class or method: 48 | 49 | $ script/test tests/unit 50 | $ script/test tests/unit/cli_test.py 51 | $ script/test tests.integration.service_test 52 | $ script/test tests.integration.service_test:ServiceTest.test_containers 53 | 54 | Before pushing a commit you can check the DCO by invoking `script/validate-dco`. 55 | 56 | ## Building binaries 57 | 58 | Linux: 59 | 60 | $ script/build-linux 61 | 62 | OS X: 63 | 64 | $ script/build-osx 65 | 66 | Note that this only works on Mountain Lion, not Mavericks, due to a 67 | [bug in PyInstaller](http://www.pyinstaller.org/ticket/807). 68 | 69 | ## Release process 70 | 71 | 1. Open pull request that: 72 | - Updates the version in `compose/__init__.py` 73 | - Updates the binary URL in `docs/install.md` 74 | - Updates the script URL in `docs/completion.md` 75 | - Adds release notes to `CHANGES.md` 76 | 2. Create unpublished GitHub release with release notes 77 | 3. Build Linux version on any Docker host with `script/build-linux` and attach 78 | to release 79 | 4. Build OS X version on Mountain Lion with `script/build-osx` and attach to 80 | release as `docker-compose-Darwin-x86_64` and `docker-compose-Linux-x86_64`. 81 | 5. Publish GitHub release, creating tag 82 | 6. Update website with `script/deploy-docs` 83 | 7. Upload PyPi package 84 | 85 | $ git checkout $VERSION 86 | $ python setup.py sdist upload 87 | -------------------------------------------------------------------------------- /script/dind: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # DinD: a wrapper script which allows docker to be run inside a docker container. 5 | # Original version by Jerome Petazzoni 6 | # See the blog post: http://blog.docker.com/2013/09/docker-can-now-run-within-docker/ 7 | # 8 | # This script should be executed inside a docker container in privilieged mode 9 | # ('docker run --privileged', introduced in docker 0.6). 10 | 11 | # Usage: dind CMD [ARG...] 12 | 13 | # apparmor sucks and Docker needs to know that it's in a container (c) @tianon 14 | export container=docker 15 | 16 | # First, make sure that cgroups are mounted correctly. 17 | CGROUP=/cgroup 18 | 19 | mkdir -p "$CGROUP" 20 | 21 | if ! mountpoint -q "$CGROUP"; then 22 | mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || { 23 | echo >&2 'Could not make a tmpfs mount. Did you use --privileged?' 24 | exit 1 25 | } 26 | fi 27 | 28 | if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then 29 | mount -t securityfs none /sys/kernel/security || { 30 | echo >&2 'Could not mount /sys/kernel/security.' 31 | echo >&2 'AppArmor detection and -privileged mode might break.' 32 | } 33 | fi 34 | 35 | # Mount the cgroup hierarchies exactly as they are in the parent system. 36 | for SUBSYS in $(cut -d: -f2 /proc/1/cgroup); do 37 | mkdir -p "$CGROUP/$SUBSYS" 38 | if ! mountpoint -q $CGROUP/$SUBSYS; then 39 | mount -n -t cgroup -o "$SUBSYS" cgroup "$CGROUP/$SUBSYS" 40 | fi 41 | 42 | # The two following sections address a bug which manifests itself 43 | # by a cryptic "lxc-start: no ns_cgroup option specified" when 44 | # trying to start containers withina container. 45 | # The bug seems to appear when the cgroup hierarchies are not 46 | # mounted on the exact same directories in the host, and in the 47 | # container. 48 | 49 | # Named, control-less cgroups are mounted with "-o name=foo" 50 | # (and appear as such under /proc//cgroup) but are usually 51 | # mounted on a directory named "foo" (without the "name=" prefix). 52 | # Systemd and OpenRC (and possibly others) both create such a 53 | # cgroup. To avoid the aforementioned bug, we symlink "foo" to 54 | # "name=foo". This shouldn't have any adverse effect. 55 | name="${SUBSYS#name=}" 56 | if [ "$name" != "$SUBSYS" ]; then 57 | ln -s "$SUBSYS" "$CGROUP/$name" 58 | fi 59 | 60 | # Likewise, on at least one system, it has been reported that 61 | # systemd would mount the CPU and CPU accounting controllers 62 | # (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu" 63 | # but on a directory called "cpu,cpuacct" (note the inversion 64 | # in the order of the groups). This tries to work around it. 65 | if [ "$SUBSYS" = 'cpuacct,cpu' ]; then 66 | ln -s "$SUBSYS" "$CGROUP/cpu,cpuacct" 67 | fi 68 | done 69 | 70 | # Note: as I write those lines, the LXC userland tools cannot setup 71 | # a "sub-container" properly if the "devices" cgroup is not in its 72 | # own hierarchy. Let's detect this and issue a warning. 73 | if ! grep -q :devices: /proc/1/cgroup; then 74 | echo >&2 'WARNING: the "devices" cgroup should be in its own hierarchy.' 75 | fi 76 | if ! grep -qw devices /proc/1/cgroup; then 77 | echo >&2 'WARNING: it looks like the "devices" cgroup is not mounted.' 78 | fi 79 | 80 | # Mount /tmp 81 | mount -t tmpfs none /tmp 82 | 83 | if [ $# -gt 0 ]; then 84 | exec "$@" 85 | fi 86 | 87 | echo >&2 'ERROR: No command specified.' 88 | echo >&2 'You probably want to run hack/make.sh, or maybe a shell?' 89 | -------------------------------------------------------------------------------- /docs/production.md: -------------------------------------------------------------------------------- 1 | page_title: Using Compose in production 2 | page_description: Guide to using Docker Compose in production 3 | page_keywords: documentation, docs, docker, compose, orchestration, containers, production 4 | 5 | 6 | ## Using Compose in production 7 | 8 | While **Compose is not yet considered production-ready**, if you'd like to experiment and learn more about using it in production deployments, this guide 9 | can help. 10 | The project is actively working towards becoming 11 | production-ready; to learn more about the progress being made, check out the 12 | [roadmap](https://github.com/docker/compose/blob/master/ROADMAP.md) for details 13 | on how it's coming along and what still needs to be done. 14 | 15 | When deploying to production, you'll almost certainly want to make changes to 16 | your app configuration that are more appropriate to a live environment. These 17 | changes may include: 18 | 19 | - Removing any volume bindings for application code, so that code stays inside 20 | the container and can't be changed from outside 21 | - Binding to different ports on the host 22 | - Setting environment variables differently (e.g., to decrease the verbosity of 23 | logging, or to enable email sending) 24 | - Specifying a restart policy (e.g., `restart: always`) to avoid downtime 25 | - Adding extra services (e.g., a log aggregator) 26 | 27 | For this reason, you'll probably want to define a separate Compose file, say 28 | `production.yml`, which specifies production-appropriate configuration. 29 | 30 | > **Note:** The [extends](extends.md) keyword is useful for maintaining multiple 31 | > Compose files which re-use common services without having to manually copy and 32 | > paste. 33 | 34 | Once you've got an alternate configuration file, make Compose use it 35 | by setting the `COMPOSE_FILE` environment variable: 36 | 37 | $ COMPOSE_FILE=production.yml 38 | $ docker-compose up -d 39 | 40 | > **Note:** You can also use the file for a one-off command without setting 41 | > an environment variable. You do this by passing the `-f` flag, e.g., 42 | > `docker-compose -f production.yml up -d`. 43 | 44 | ### Deploying changes 45 | 46 | When you make changes to your app code, you'll need to rebuild your image and 47 | recreate your app's containers. To redeploy a service called 48 | `web`, you would use: 49 | 50 | $ docker-compose build web 51 | $ docker-compose up --no-deps -d web 52 | 53 | This will first rebuild the image for `web` and then stop, destroy, and recreate 54 | *just* the `web` service. The `--no-deps` flag prevents Compose from also 55 | recreating any services which `web` depends on. 56 | 57 | ### Running Compose on a single server 58 | 59 | You can use Compose to deploy an app to a remote Docker host by setting the 60 | `DOCKER_HOST`, `DOCKER_TLS_VERIFY`, and `DOCKER_CERT_PATH` environment variables 61 | appropriately. For tasks like this, 62 | [Docker Machine](https://docs.docker.com/machine) makes managing local and 63 | remote Docker hosts very easy, and is recommended even if you're not deploying 64 | remotely. 65 | 66 | Once you've set up your environment variables, all the normal `docker-compose` 67 | commands will work with no further configuration. 68 | 69 | ### Running Compose on a Swarm cluster 70 | 71 | [Docker Swarm](https://docs.docker.com/swarm), a Docker-native clustering 72 | system, exposes the same API as a single Docker host, which means you can use 73 | Compose against a Swarm instance and run your apps across multiple hosts. 74 | 75 | Compose/Swarm integration is still in the experimental stage, and Swarm is still 76 | in beta, but if you'd like to explore and experiment, check out the 77 | [integration guide](https://github.com/docker/compose/blob/master/SWARM.md). 78 | -------------------------------------------------------------------------------- /docs/wordpress.md: -------------------------------------------------------------------------------- 1 | page_title: Quickstart Guide: Compose and Wordpress 2 | page_description: Getting started with Docker Compose and Rails 3 | page_keywords: documentation, docs, docker, compose, orchestration, containers, 4 | wordpress 5 | 6 | ## Getting started with Compose and Wordpress 7 | 8 | You can use Compose to easily run Wordpress in an isolated environment built 9 | with Docker containers. 10 | 11 | ### Define the project 12 | 13 | First, [Install Compose](install.md) and then download Wordpress into the 14 | current directory: 15 | 16 | $ curl https://wordpress.org/latest.tar.gz | tar -xvzf - 17 | 18 | This will create a directory called `wordpress`. If you wish, you can rename it 19 | to the name of your project. 20 | 21 | Next, inside that directory, create a `Dockerfile`, a file that defines what 22 | environment your app is going to run in. For more information on how to write 23 | Dockerfiles, see the 24 | [Docker user guide](https://docs.docker.com/userguide/dockerimages/#building-an-image-from-a-dockerfile) and the 25 | [Dockerfile reference](http://docs.docker.com/reference/builder/). In this case, 26 | your Dockerfile should be: 27 | 28 | ``` 29 | FROM orchardup/php5 30 | ADD . /code 31 | ``` 32 | 33 | This tells Docker how to build an image defining a container that contains PHP 34 | and Wordpress. 35 | 36 | Next you'll create a `docker-compose.yml` file that will start your web service 37 | and a separate MySQL instance: 38 | 39 | ``` 40 | web: 41 | build: . 42 | command: php -S 0.0.0.0:8000 -t /code 43 | ports: 44 | - "8000:8000" 45 | links: 46 | - db 47 | volumes: 48 | - .:/code 49 | db: 50 | image: orchardup/mysql 51 | environment: 52 | MYSQL_DATABASE: wordpress 53 | ``` 54 | 55 | Two supporting files are needed to get this working - first, `wp-config.php` is 56 | the standard Wordpress config file with a single change to point the database 57 | configuration at the `db` container: 58 | 59 | ``` 60 | 45454/tcp" 110 | self.assertEqual(container.human_readable_ports, expected) 111 | 112 | def test_get_local_port(self): 113 | self.container_dict['NetworkSettings']['Ports'].update({ 114 | "45454/tcp": [{"HostIp": "0.0.0.0", "HostPort": "49197"}], 115 | }) 116 | container = Container(None, self.container_dict, has_been_inspected=True) 117 | 118 | self.assertEqual( 119 | container.get_local_port(45454, protocol='tcp'), 120 | '0.0.0.0:49197') 121 | 122 | def test_get(self): 123 | container = Container(None, { 124 | "Status": "Up 8 seconds", 125 | "HostConfig": { 126 | "VolumesFrom": ["volume_id"] 127 | }, 128 | }, has_been_inspected=True) 129 | 130 | self.assertEqual(container.get('Status'), "Up 8 seconds") 131 | self.assertEqual(container.get('HostConfig.VolumesFrom'), ["volume_id"]) 132 | self.assertEqual(container.get('Foo.Bar.DoesNotExist'), None) 133 | -------------------------------------------------------------------------------- /compose/cli/command.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | from requests.exceptions import ConnectionError, SSLError 4 | import logging 5 | import os 6 | import re 7 | import six 8 | 9 | from .. import config 10 | from ..project import Project 11 | from ..service import ConfigError 12 | from .docopt_command import DocoptCommand 13 | from .utils import call_silently, is_mac, is_ubuntu 14 | from .docker_client import docker_client 15 | from . import verbose_proxy 16 | from . import errors 17 | from .. import __version__ 18 | 19 | log = logging.getLogger(__name__) 20 | 21 | 22 | class Command(DocoptCommand): 23 | base_dir = '.' 24 | 25 | def dispatch(self, *args, **kwargs): 26 | try: 27 | super(Command, self).dispatch(*args, **kwargs) 28 | except SSLError as e: 29 | raise errors.UserError('SSL error: %s' % e) 30 | except ConnectionError: 31 | if call_silently(['which', 'docker']) != 0: 32 | if is_mac(): 33 | raise errors.DockerNotFoundMac() 34 | elif is_ubuntu(): 35 | raise errors.DockerNotFoundUbuntu() 36 | else: 37 | raise errors.DockerNotFoundGeneric() 38 | elif call_silently(['which', 'boot2docker']) == 0: 39 | raise errors.ConnectionErrorBoot2Docker() 40 | else: 41 | raise errors.ConnectionErrorGeneric(self.get_client().base_url) 42 | 43 | def perform_command(self, options, handler, command_options): 44 | if options['COMMAND'] == 'help': 45 | # Skip looking up the compose file. 46 | handler(None, command_options) 47 | return 48 | 49 | if 'FIG_FILE' in os.environ: 50 | log.warn('The FIG_FILE environment variable is deprecated.') 51 | log.warn('Please use COMPOSE_FILE instead.') 52 | 53 | explicit_config_path = options.get('--file') or os.environ.get('COMPOSE_FILE') or os.environ.get('FIG_FILE') 54 | project = self.get_project( 55 | self.get_config_path(explicit_config_path), 56 | project_name=options.get('--project-name'), 57 | verbose=options.get('--verbose')) 58 | 59 | handler(project, command_options) 60 | 61 | def get_client(self, verbose=False): 62 | client = docker_client() 63 | if verbose: 64 | version_info = six.iteritems(client.version()) 65 | log.info("Compose version %s", __version__) 66 | log.info("Docker base_url: %s", client.base_url) 67 | log.info("Docker version: %s", 68 | ", ".join("%s=%s" % item for item in version_info)) 69 | return verbose_proxy.VerboseProxy('docker', client) 70 | return client 71 | 72 | def get_project(self, config_path, project_name=None, verbose=False): 73 | try: 74 | return Project.from_dicts( 75 | self.get_project_name(config_path, project_name), 76 | config.load(config_path), 77 | self.get_client(verbose=verbose)) 78 | except ConfigError as e: 79 | raise errors.UserError(six.text_type(e)) 80 | 81 | def get_project_name(self, config_path, project_name=None): 82 | def normalize_name(name): 83 | return re.sub(r'[^a-z0-9]', '', name.lower()) 84 | 85 | if 'FIG_PROJECT_NAME' in os.environ: 86 | log.warn('The FIG_PROJECT_NAME environment variable is deprecated.') 87 | log.warn('Please use COMPOSE_PROJECT_NAME instead.') 88 | 89 | project_name = project_name or os.environ.get('COMPOSE_PROJECT_NAME') or os.environ.get('FIG_PROJECT_NAME') 90 | if project_name is not None: 91 | return normalize_name(project_name) 92 | 93 | project = os.path.basename(os.path.dirname(os.path.abspath(config_path))) 94 | if project: 95 | return normalize_name(project) 96 | 97 | return 'default' 98 | 99 | def get_config_path(self, file_path=None): 100 | if file_path: 101 | return os.path.join(self.base_dir, file_path) 102 | 103 | supported_filenames = [ 104 | 'docker-compose.yml', 105 | 'docker-compose.yaml', 106 | 'fig.yml', 107 | 'fig.yaml', 108 | ] 109 | 110 | def expand(filename): 111 | return os.path.join(self.base_dir, filename) 112 | 113 | candidates = [filename for filename in supported_filenames if os.path.exists(expand(filename))] 114 | 115 | if len(candidates) == 0: 116 | raise errors.ComposeFileNotFound(supported_filenames) 117 | 118 | winner = candidates[0] 119 | 120 | if len(candidates) > 1: 121 | log.warning("Found multiple config files with supported names: %s", ", ".join(candidates)) 122 | log.warning("Using %s\n", winner) 123 | 124 | if winner == 'docker-compose.yaml': 125 | log.warning("Please be aware that .yml is the expected extension " 126 | "in most cases, and using .yaml can cause compatibility " 127 | "issues in future.\n") 128 | 129 | if winner.startswith("fig."): 130 | log.warning("%s is deprecated and will not be supported in future. " 131 | "Please rename your config file to docker-compose.yml\n" % winner) 132 | 133 | return expand(winner) 134 | -------------------------------------------------------------------------------- /tests/unit/cli_test.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | import logging 4 | import os 5 | import tempfile 6 | import shutil 7 | from .. import unittest 8 | 9 | import docker 10 | import mock 11 | 12 | from compose.cli import main 13 | from compose.cli.main import TopLevelCommand 14 | from compose.cli.errors import ComposeFileNotFound 15 | from compose.service import Service 16 | 17 | 18 | class CLITestCase(unittest.TestCase): 19 | def test_default_project_name(self): 20 | cwd = os.getcwd() 21 | 22 | try: 23 | os.chdir('tests/fixtures/simple-composefile') 24 | command = TopLevelCommand() 25 | project_name = command.get_project_name(command.get_config_path()) 26 | self.assertEquals('simplecomposefile', project_name) 27 | finally: 28 | os.chdir(cwd) 29 | 30 | def test_project_name_with_explicit_base_dir(self): 31 | command = TopLevelCommand() 32 | command.base_dir = 'tests/fixtures/simple-composefile' 33 | project_name = command.get_project_name(command.get_config_path()) 34 | self.assertEquals('simplecomposefile', project_name) 35 | 36 | def test_project_name_with_explicit_uppercase_base_dir(self): 37 | command = TopLevelCommand() 38 | command.base_dir = 'tests/fixtures/UpperCaseDir' 39 | project_name = command.get_project_name(command.get_config_path()) 40 | self.assertEquals('uppercasedir', project_name) 41 | 42 | def test_project_name_with_explicit_project_name(self): 43 | command = TopLevelCommand() 44 | name = 'explicit-project-name' 45 | project_name = command.get_project_name(None, project_name=name) 46 | self.assertEquals('explicitprojectname', project_name) 47 | 48 | def test_project_name_from_environment_old_var(self): 49 | command = TopLevelCommand() 50 | name = 'namefromenv' 51 | with mock.patch.dict(os.environ): 52 | os.environ['FIG_PROJECT_NAME'] = name 53 | project_name = command.get_project_name(None) 54 | self.assertEquals(project_name, name) 55 | 56 | def test_project_name_from_environment_new_var(self): 57 | command = TopLevelCommand() 58 | name = 'namefromenv' 59 | with mock.patch.dict(os.environ): 60 | os.environ['COMPOSE_PROJECT_NAME'] = name 61 | project_name = command.get_project_name(None) 62 | self.assertEquals(project_name, name) 63 | 64 | def test_filename_check(self): 65 | self.assertEqual('docker-compose.yml', get_config_filename_for_files([ 66 | 'docker-compose.yml', 67 | 'docker-compose.yaml', 68 | 'fig.yml', 69 | 'fig.yaml', 70 | ])) 71 | 72 | self.assertEqual('docker-compose.yaml', get_config_filename_for_files([ 73 | 'docker-compose.yaml', 74 | 'fig.yml', 75 | 'fig.yaml', 76 | ])) 77 | 78 | self.assertEqual('fig.yml', get_config_filename_for_files([ 79 | 'fig.yml', 80 | 'fig.yaml', 81 | ])) 82 | 83 | self.assertEqual('fig.yaml', get_config_filename_for_files([ 84 | 'fig.yaml', 85 | ])) 86 | 87 | self.assertRaises(ComposeFileNotFound, lambda: get_config_filename_for_files([])) 88 | 89 | def test_get_project(self): 90 | command = TopLevelCommand() 91 | command.base_dir = 'tests/fixtures/longer-filename-composefile' 92 | project = command.get_project(command.get_config_path()) 93 | self.assertEqual(project.name, 'longerfilenamecomposefile') 94 | self.assertTrue(project.client) 95 | self.assertTrue(project.services) 96 | 97 | def test_help(self): 98 | command = TopLevelCommand() 99 | with self.assertRaises(SystemExit): 100 | command.dispatch(['-h'], None) 101 | 102 | def test_setup_logging(self): 103 | main.setup_logging() 104 | self.assertEqual(logging.getLogger().level, logging.DEBUG) 105 | self.assertEqual(logging.getLogger('requests').propagate, False) 106 | 107 | @mock.patch('compose.cli.main.dockerpty', autospec=True) 108 | def test_run_with_environment_merged_with_options_list(self, mock_dockerpty): 109 | command = TopLevelCommand() 110 | mock_client = mock.create_autospec(docker.Client) 111 | mock_project = mock.Mock() 112 | mock_project.get_service.return_value = Service( 113 | 'service', 114 | client=mock_client, 115 | environment=['FOO=ONE', 'BAR=TWO'], 116 | image='someimage') 117 | 118 | command.run(mock_project, { 119 | 'SERVICE': 'service', 120 | 'COMMAND': None, 121 | '-e': ['BAR=NEW', 'OTHER=THREE'], 122 | '--user': None, 123 | '--no-deps': None, 124 | '--allow-insecure-ssl': None, 125 | '-d': True, 126 | '-T': None, 127 | '--entrypoint': None, 128 | '--service-ports': None, 129 | '--rm': None, 130 | }) 131 | 132 | _, _, call_kwargs = mock_client.create_container.mock_calls[0] 133 | self.assertEqual( 134 | call_kwargs['environment'], 135 | {'FOO': 'ONE', 'BAR': 'NEW', 'OTHER': 'THREE'}) 136 | 137 | 138 | def get_config_filename_for_files(filenames): 139 | project_dir = tempfile.mkdtemp() 140 | try: 141 | make_files(project_dir, filenames) 142 | command = TopLevelCommand() 143 | command.base_dir = project_dir 144 | return os.path.basename(command.get_config_path()) 145 | finally: 146 | shutil.rmtree(project_dir) 147 | 148 | 149 | def make_files(dirname, filenames): 150 | for fname in filenames: 151 | with open(os.path.join(dirname, fname), 'w') as f: 152 | f.write('') 153 | -------------------------------------------------------------------------------- /compose/container.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | 4 | import six 5 | from functools import reduce 6 | 7 | 8 | class Container(object): 9 | """ 10 | Represents a Docker container, constructed from the output of 11 | GET /containers/:id:/json. 12 | """ 13 | def __init__(self, client, dictionary, has_been_inspected=False): 14 | self.client = client 15 | self.dictionary = dictionary 16 | self.has_been_inspected = has_been_inspected 17 | 18 | @classmethod 19 | def from_ps(cls, client, dictionary, **kwargs): 20 | """ 21 | Construct a container object from the output of GET /containers/json. 22 | """ 23 | new_dictionary = { 24 | 'Id': dictionary['Id'], 25 | 'Image': dictionary['Image'], 26 | 'Name': '/' + get_container_name(dictionary), 27 | } 28 | return cls(client, new_dictionary, **kwargs) 29 | 30 | @classmethod 31 | def from_id(cls, client, id): 32 | return cls(client, client.inspect_container(id)) 33 | 34 | @classmethod 35 | def create(cls, client, **options): 36 | response = client.create_container(**options) 37 | return cls.from_id(client, response['Id']) 38 | 39 | @property 40 | def id(self): 41 | return self.dictionary['Id'] 42 | 43 | @property 44 | def image(self): 45 | return self.dictionary['Image'] 46 | 47 | @property 48 | def short_id(self): 49 | return self.id[:10] 50 | 51 | @property 52 | def name(self): 53 | return self.dictionary['Name'][1:] 54 | 55 | @property 56 | def name_without_project(self): 57 | return '_'.join(self.dictionary['Name'].split('_')[1:]) 58 | 59 | @property 60 | def number(self): 61 | try: 62 | return int(self.name.split('_')[-1]) 63 | except ValueError: 64 | return None 65 | 66 | @property 67 | def ports(self): 68 | self.inspect_if_not_inspected() 69 | return self.get('NetworkSettings.Ports') or {} 70 | 71 | @property 72 | def human_readable_ports(self): 73 | def format_port(private, public): 74 | if not public: 75 | return private 76 | return '{HostIp}:{HostPort}->{private}'.format( 77 | private=private, **public[0]) 78 | 79 | return ', '.join(format_port(*item) 80 | for item in sorted(six.iteritems(self.ports))) 81 | 82 | @property 83 | def human_readable_state(self): 84 | if self.is_running: 85 | return 'Ghost' if self.get('State.Ghost') else 'Up' 86 | else: 87 | return 'Exit %s' % self.get('State.ExitCode') 88 | 89 | @property 90 | def human_readable_command(self): 91 | entrypoint = self.get('Config.Entrypoint') or [] 92 | cmd = self.get('Config.Cmd') or [] 93 | return ' '.join(entrypoint + cmd) 94 | 95 | @property 96 | def environment(self): 97 | return dict(var.split("=", 1) for var in self.get('Config.Env') or []) 98 | 99 | @property 100 | def is_running(self): 101 | return self.get('State.Running') 102 | 103 | def get(self, key): 104 | """Return a value from the container or None if the value is not set. 105 | 106 | :param key: a string using dotted notation for nested dictionary 107 | lookups 108 | """ 109 | self.inspect_if_not_inspected() 110 | 111 | def get_value(dictionary, key): 112 | return (dictionary or {}).get(key) 113 | 114 | return reduce(get_value, key.split('.'), self.dictionary) 115 | 116 | def get_local_port(self, port, protocol='tcp'): 117 | port = self.ports.get("%s/%s" % (port, protocol)) 118 | return "{HostIp}:{HostPort}".format(**port[0]) if port else None 119 | 120 | def start(self, **options): 121 | return self.client.start(self.id, **options) 122 | 123 | def stop(self, **options): 124 | return self.client.stop(self.id, **options) 125 | 126 | def kill(self, **options): 127 | return self.client.kill(self.id, **options) 128 | 129 | def restart(self): 130 | return self.client.restart(self.id) 131 | 132 | def remove(self, **options): 133 | return self.client.remove_container(self.id, **options) 134 | 135 | def inspect_if_not_inspected(self): 136 | if not self.has_been_inspected: 137 | self.inspect() 138 | 139 | def wait(self): 140 | return self.client.wait(self.id) 141 | 142 | def logs(self, *args, **kwargs): 143 | return self.client.logs(self.id, *args, **kwargs) 144 | 145 | def inspect(self): 146 | self.dictionary = self.client.inspect_container(self.id) 147 | self.has_been_inspected = True 148 | return self.dictionary 149 | 150 | def links(self): 151 | links = [] 152 | for container in self.client.containers(): 153 | for name in container['Names']: 154 | bits = name.split('/') 155 | if len(bits) > 2 and bits[1] == self.name: 156 | links.append(bits[2]) 157 | return links 158 | 159 | def attach(self, *args, **kwargs): 160 | return self.client.attach(self.id, *args, **kwargs) 161 | 162 | def attach_socket(self, **kwargs): 163 | return self.client.attach_socket(self.id, **kwargs) 164 | 165 | def __repr__(self): 166 | return '' % self.name 167 | 168 | def __eq__(self, other): 169 | if type(self) != type(other): 170 | return False 171 | return self.id == other.id 172 | 173 | 174 | def get_container_name(container): 175 | if not container.get('Name') and not container.get('Names'): 176 | return None 177 | # inspect 178 | if 'Name' in container: 179 | return container['Name'] 180 | # ps 181 | shortest_name = min(container['Names'], key=lambda n: len(n.split('/'))) 182 | return shortest_name.split('/')[-1] 183 | -------------------------------------------------------------------------------- /docs/cli.md: -------------------------------------------------------------------------------- 1 | page_title: Compose CLI reference 2 | page_description: Compose CLI reference 3 | page_keywords: fig, composition, compose, docker, orchestration, cli, reference 4 | 5 | 6 | # CLI reference 7 | 8 | Most Docker Compose commands are run against one or more services. If 9 | the service is not specified, the command will apply to all services. 10 | 11 | For full usage information, run `docker-compose [COMMAND] --help`. 12 | 13 | ## Commands 14 | 15 | ### build 16 | 17 | Builds or rebuilds services. 18 | 19 | Services are built once and then tagged as `project_service`, e.g., 20 | `composetest_db`. If you change a service's Dockerfile or the contents of its 21 | build directory, run `docker-compose build` to rebuild it. 22 | 23 | ### help 24 | 25 | Displays help and usage instructions for a command. 26 | 27 | ### kill 28 | 29 | Forces running containers to stop by sending a `SIGKILL` signal. Optionally the 30 | signal can be passed, for example: 31 | 32 | $ docker-compose kill -s SIGINT 33 | 34 | ### logs 35 | 36 | Displays log output from services. 37 | 38 | ### port 39 | 40 | Prints the public port for a port binding 41 | 42 | ### ps 43 | 44 | Lists containers. 45 | 46 | ### pull 47 | 48 | Pulls service images. 49 | 50 | ### rm 51 | 52 | Removes stopped service containers. 53 | 54 | 55 | ### run 56 | 57 | Runs a one-off command on a service. 58 | 59 | For example, 60 | 61 | $ docker-compose run web python manage.py shell 62 | 63 | will start the `web` service and then run `manage.py shell` in python. 64 | Note that by default, linked services will also be started, unless they are 65 | already running. 66 | 67 | One-off commands are started in new containers with the same configuration as a 68 | normal container for that service, so volumes, links, etc will all be created as 69 | expected. When using `run`, there are two differences from bringing up a 70 | container normally: 71 | 72 | 1. the command will be overridden with the one specified. So, if you run 73 | `docker-compose run web bash`, the container's web command (which could default 74 | to, e.g., `python app.py`) will be overridden to `bash` 75 | 76 | 2. by default no ports will be created in case they collide with already opened 77 | ports. 78 | 79 | Links are also created between one-off commands and the other containers which 80 | are part of that service. So, for example, you could run: 81 | 82 | $ docker-compose run db psql -h db -U docker 83 | 84 | This would open up an interactive PostgreSQL shell for the linked `db` container 85 | (which would get created or started as needed). 86 | 87 | If you do not want linked containers to start when running the one-off command, 88 | specify the `--no-deps` flag: 89 | 90 | $ docker-compose run --no-deps web python manage.py shell 91 | 92 | Similarly, if you do want the service's ports to be created and mapped to the 93 | host, specify the `--service-ports` flag: 94 | $ docker-compose run --service-ports web python manage.py shell 95 | 96 | ### scale 97 | 98 | Sets the number of containers to run for a service. 99 | 100 | Numbers are specified as arguments in the form `service=num`. For example: 101 | 102 | $ docker-compose scale web=2 worker=3 103 | 104 | ### start 105 | 106 | Starts existing containers for a service. 107 | 108 | ### stop 109 | 110 | Stops running containers without removing them. They can be started again with 111 | `docker-compose start`. 112 | 113 | ### up 114 | 115 | Builds, (re)creates, starts, and attaches to containers for a service. 116 | 117 | Linked services will be started, unless they are already running. 118 | 119 | By default, `docker-compose up` will aggregate the output of each container and, 120 | when it exits, all containers will be stopped. Running `docker-compose up -d`, 121 | will start the containers in the background and leave them running. 122 | 123 | By default, if there are existing containers for a service, `docker-compose up` will stop and recreate them (preserving mounted volumes with [volumes-from]), so that changes in `docker-compose.yml` are picked up. If you do not want containers stopped and recreated, use `docker-compose up --no-recreate`. This will still start any stopped containers, if needed. 124 | 125 | [volumes-from]: http://docs.docker.io/en/latest/use/working_with_volumes/ 126 | 127 | ## Options 128 | 129 | ### --verbose 130 | 131 | Shows more output 132 | 133 | ### --version 134 | 135 | Prints version and exits 136 | 137 | ### -f, --file FILE 138 | 139 | Specifies an alternate Compose yaml file (default: `docker-compose.yml`) 140 | 141 | ### -p, --project-name NAME 142 | 143 | Specifies an alternate project name (default: current directory name) 144 | 145 | 146 | ## Environment Variables 147 | 148 | Several environment variables are available for you to configure Compose's behaviour. 149 | 150 | Variables starting with `DOCKER_` are the same as those used to configure the 151 | Docker command-line client. If you're using boot2docker, `$(boot2docker shellinit)` 152 | will set them to their correct values. 153 | 154 | ### COMPOSE\_PROJECT\_NAME 155 | 156 | Sets the project name, which is prepended to the name of every container started by Compose. Defaults to the `basename` of the current working directory. 157 | 158 | ### COMPOSE\_FILE 159 | 160 | Sets the path to the `docker-compose.yml` to use. Defaults to `docker-compose.yml` in the current working directory. 161 | 162 | ### DOCKER\_HOST 163 | 164 | Sets the URL of the docker daemon. As with the Docker client, defaults to `unix:///var/run/docker.sock`. 165 | 166 | ### DOCKER\_TLS\_VERIFY 167 | 168 | When set to anything other than an empty string, enables TLS communication with 169 | the daemon. 170 | 171 | ### DOCKER\_CERT\_PATH 172 | 173 | Configures the path to the `ca.pem`, `cert.pem`, and `key.pem` files used for TLS verification. Defaults to `~/.docker`. 174 | 175 | ## Compose documentation 176 | 177 | - [Installing Compose](install.md) 178 | - [User guide](index.md) 179 | - [Yaml file reference](yml.md) 180 | - [Compose environment variables](env.md) 181 | - [Compose command line completion](completion.md) 182 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | page_title: Compose: Multi-container orchestration for Docker 2 | page_description: Introduction and Overview of Compose 3 | page_keywords: documentation, docs, docker, compose, orchestration, containers 4 | 5 | 6 | # Docker Compose 7 | 8 | ## Overview 9 | 10 | Compose is a tool for defining and running complex applications with Docker. 11 | With Compose, you define a multi-container application in a single file, then 12 | spin your application up in a single command which does everything that needs to 13 | be done to get it running. 14 | 15 | Compose is great for development environments, staging servers, and CI. We don't 16 | recommend that you use it in production yet. 17 | 18 | Using Compose is basically a three-step process. 19 | 20 | First, you define your app's environment with a `Dockerfile` so it can be 21 | reproduced anywhere: 22 | 23 | ```Dockerfile 24 | FROM python:2.7 25 | WORKDIR /code 26 | ADD requirements.txt /code/ 27 | RUN pip install -r requirements.txt 28 | ADD . /code 29 | CMD python app.py 30 | ``` 31 | 32 | Next, you define the services that make up your app in `docker-compose.yml` so 33 | they can be run together in an isolated environment: 34 | 35 | ```yaml 36 | web: 37 | build: . 38 | links: 39 | - db 40 | ports: 41 | - "8000:8000" 42 | db: 43 | image: postgres 44 | ``` 45 | 46 | Lastly, run `docker-compose up` and Compose will start and run your entire app. 47 | 48 | Compose has commands for managing the whole lifecycle of your application: 49 | 50 | * Start, stop and rebuild services 51 | * View the status of running services 52 | * Stream the log output of running services 53 | * Run a one-off command on a service 54 | 55 | ## Compose documentation 56 | 57 | - [Installing Compose](install.md) 58 | - [Command line reference](cli.md) 59 | - [Yaml file reference](yml.md) 60 | - [Compose environment variables](env.md) 61 | - [Compose command line completion](completion.md) 62 | 63 | ## Quick start 64 | 65 | Let's get started with a walkthrough of getting a simple Python web app running 66 | on Compose. It assumes a little knowledge of Python, but the concepts 67 | demonstrated here should be understandable even if you're not familiar with 68 | Python. 69 | 70 | ### Installation and set-up 71 | 72 | First, [install Docker and Compose](install.md). 73 | 74 | Next, you'll want to make a directory for the project: 75 | 76 | $ mkdir composetest 77 | $ cd composetest 78 | 79 | Inside this directory, create `app.py`, a simple web app that uses the Flask 80 | framework and increments a value in Redis: 81 | 82 | ```python 83 | from flask import Flask 84 | from redis import Redis 85 | import os 86 | app = Flask(__name__) 87 | redis = Redis(host='redis', port=6379) 88 | 89 | @app.route('/') 90 | def hello(): 91 | redis.incr('hits') 92 | return 'Hello World! I have been seen %s times.' % redis.get('hits') 93 | 94 | if __name__ == "__main__": 95 | app.run(host="0.0.0.0", debug=True) 96 | ``` 97 | 98 | Next, define the Python dependencies in a file called `requirements.txt`: 99 | 100 | flask 101 | redis 102 | 103 | ### Create a Docker image 104 | 105 | Now, create a Docker image containing all of your app's dependencies. You 106 | specify how to build the image using a file called 107 | [`Dockerfile`](http://docs.docker.com/reference/builder/): 108 | 109 | FROM python:2.7 110 | ADD . /code 111 | WORKDIR /code 112 | RUN pip install -r requirements.txt 113 | 114 | This tells Docker to include Python, your code, and your Python dependencies in 115 | a Docker image. For more information on how to write Dockerfiles, see the 116 | [Docker user 117 | guide](https://docs.docker.com/userguide/dockerimages/#building-an-image-from-a-dockerfile) 118 | and the 119 | [Dockerfile reference](http://docs.docker.com/reference/builder/). 120 | 121 | ### Define services 122 | 123 | Next, define a set of services using `docker-compose.yml`: 124 | 125 | web: 126 | build: . 127 | command: python app.py 128 | ports: 129 | - "5000:5000" 130 | volumes: 131 | - .:/code 132 | links: 133 | - redis 134 | redis: 135 | image: redis 136 | 137 | This defines two services: 138 | 139 | - `web`, which is built from the `Dockerfile` in the current directory. It also 140 | says to run the command `python app.py` inside the image, forward the exposed 141 | port 5000 on the container to port 5000 on the host machine, connect up the 142 | Redis service, and mount the current directory inside the container so we can 143 | work on code without having to rebuild the image. 144 | - `redis`, which uses the public image 145 | [redis](https://registry.hub.docker.com/_/redis/), which gets pulled from the 146 | Docker Hub registry. 147 | 148 | ### Build and run your app with Compose 149 | 150 | Now, when you run `docker-compose up`, Compose will pull a Redis image, build an 151 | image for your code, and start everything up: 152 | 153 | $ docker-compose up 154 | Pulling image redis... 155 | Building web... 156 | Starting composetest_redis_1... 157 | Starting composetest_web_1... 158 | redis_1 | [8] 02 Jan 18:43:35.576 # Server started, Redis version 2.8.3 159 | web_1 | * Running on http://0.0.0.0:5000/ 160 | 161 | The web app should now be listening on port 5000 on your Docker daemon host (if 162 | you're using Boot2docker, `boot2docker ip` will tell you its address). 163 | 164 | If you want to run your services in the background, you can pass the `-d` flag 165 | (for daemon mode) to `docker-compose up` and use `docker-compose ps` to see what 166 | is currently running: 167 | 168 | $ docker-compose up -d 169 | Starting composetest_redis_1... 170 | Starting composetest_web_1... 171 | $ docker-compose ps 172 | Name Command State Ports 173 | ------------------------------------------------------------------- 174 | composetest_redis_1 /usr/local/bin/run Up 175 | composetest_web_1 /bin/sh -c python app.py Up 5000->5000/tcp 176 | 177 | The `docker-compose run` command allows you to run one-off commands for your 178 | services. For example, to see what environment variables are available to the 179 | `web` service: 180 | 181 | $ docker-compose run web env 182 | 183 | See `docker-compose --help` to see other available commands. 184 | 185 | If you started Compose with `docker-compose up -d`, you'll probably want to stop 186 | your services once you've finished with them: 187 | 188 | $ docker-compose stop 189 | 190 | At this point, you have seen the basics of how Compose works. 191 | 192 | - Next, try the quick start guide for [Django](django.md), 193 | [Rails](rails.md), or [Wordpress](wordpress.md). 194 | - See the reference guides for complete details on the [commands](cli.md), the 195 | [configuration file](yml.md) and [environment variables](env.md). 196 | 197 | ## Release Notes 198 | 199 | ### Version 1.2.0 (April 7, 2015) 200 | 201 | For complete information on this release, see the [1.2.0 Milestone project page](https://github.com/docker/compose/wiki/1.2.0-Milestone-Project-Page). 202 | In addition to bug fixes and refinements, this release adds the following: 203 | 204 | * The `extends` keyword, which adds the ability to extend services by sharing common configurations. For details, see 205 | [PR #972](https://github.com/docker/compose/pull/1088). 206 | 207 | * Better integration with Swarm. Swarm will now schedule inter-dependent 208 | containers on the same host. For details, see 209 | [PR #972](https://github.com/docker/compose/pull/972). 210 | -------------------------------------------------------------------------------- /tests/unit/sort_service_test.py: -------------------------------------------------------------------------------- 1 | from compose.project import sort_service_dicts, DependencyError 2 | from .. import unittest 3 | 4 | 5 | class SortServiceTest(unittest.TestCase): 6 | def test_sort_service_dicts_1(self): 7 | services = [ 8 | { 9 | 'links': ['redis'], 10 | 'name': 'web' 11 | }, 12 | { 13 | 'name': 'grunt' 14 | }, 15 | { 16 | 'name': 'redis' 17 | } 18 | ] 19 | 20 | sorted_services = sort_service_dicts(services) 21 | self.assertEqual(len(sorted_services), 3) 22 | self.assertEqual(sorted_services[0]['name'], 'grunt') 23 | self.assertEqual(sorted_services[1]['name'], 'redis') 24 | self.assertEqual(sorted_services[2]['name'], 'web') 25 | 26 | def test_sort_service_dicts_2(self): 27 | services = [ 28 | { 29 | 'links': ['redis', 'postgres'], 30 | 'name': 'web' 31 | }, 32 | { 33 | 'name': 'postgres', 34 | 'links': ['redis'] 35 | }, 36 | { 37 | 'name': 'redis' 38 | } 39 | ] 40 | 41 | sorted_services = sort_service_dicts(services) 42 | self.assertEqual(len(sorted_services), 3) 43 | self.assertEqual(sorted_services[0]['name'], 'redis') 44 | self.assertEqual(sorted_services[1]['name'], 'postgres') 45 | self.assertEqual(sorted_services[2]['name'], 'web') 46 | 47 | def test_sort_service_dicts_3(self): 48 | services = [ 49 | { 50 | 'name': 'child' 51 | }, 52 | { 53 | 'name': 'parent', 54 | 'links': ['child'] 55 | }, 56 | { 57 | 'links': ['parent'], 58 | 'name': 'grandparent' 59 | }, 60 | ] 61 | 62 | sorted_services = sort_service_dicts(services) 63 | self.assertEqual(len(sorted_services), 3) 64 | self.assertEqual(sorted_services[0]['name'], 'child') 65 | self.assertEqual(sorted_services[1]['name'], 'parent') 66 | self.assertEqual(sorted_services[2]['name'], 'grandparent') 67 | 68 | def test_sort_service_dicts_4(self): 69 | services = [ 70 | { 71 | 'name': 'child' 72 | }, 73 | { 74 | 'name': 'parent', 75 | 'volumes_from': ['child'] 76 | }, 77 | { 78 | 'links': ['parent'], 79 | 'name': 'grandparent' 80 | }, 81 | ] 82 | 83 | sorted_services = sort_service_dicts(services) 84 | self.assertEqual(len(sorted_services), 3) 85 | self.assertEqual(sorted_services[0]['name'], 'child') 86 | self.assertEqual(sorted_services[1]['name'], 'parent') 87 | self.assertEqual(sorted_services[2]['name'], 'grandparent') 88 | 89 | def test_sort_service_dicts_5(self): 90 | services = [ 91 | { 92 | 'links': ['parent'], 93 | 'name': 'grandparent' 94 | }, 95 | { 96 | 'name': 'parent', 97 | 'net': 'container:child' 98 | }, 99 | { 100 | 'name': 'child' 101 | } 102 | ] 103 | 104 | sorted_services = sort_service_dicts(services) 105 | self.assertEqual(len(sorted_services), 3) 106 | self.assertEqual(sorted_services[0]['name'], 'child') 107 | self.assertEqual(sorted_services[1]['name'], 'parent') 108 | self.assertEqual(sorted_services[2]['name'], 'grandparent') 109 | 110 | def test_sort_service_dicts_6(self): 111 | services = [ 112 | { 113 | 'links': ['parent'], 114 | 'name': 'grandparent' 115 | }, 116 | { 117 | 'name': 'parent', 118 | 'volumes_from': ['child'] 119 | }, 120 | { 121 | 'name': 'child' 122 | } 123 | ] 124 | 125 | sorted_services = sort_service_dicts(services) 126 | self.assertEqual(len(sorted_services), 3) 127 | self.assertEqual(sorted_services[0]['name'], 'child') 128 | self.assertEqual(sorted_services[1]['name'], 'parent') 129 | self.assertEqual(sorted_services[2]['name'], 'grandparent') 130 | 131 | def test_sort_service_dicts_7(self): 132 | services = [ 133 | { 134 | 'net': 'container:three', 135 | 'name': 'four' 136 | }, 137 | { 138 | 'links': ['two'], 139 | 'name': 'three' 140 | }, 141 | { 142 | 'name': 'two', 143 | 'volumes_from': ['one'] 144 | }, 145 | { 146 | 'name': 'one' 147 | } 148 | ] 149 | 150 | sorted_services = sort_service_dicts(services) 151 | self.assertEqual(len(sorted_services), 4) 152 | self.assertEqual(sorted_services[0]['name'], 'one') 153 | self.assertEqual(sorted_services[1]['name'], 'two') 154 | self.assertEqual(sorted_services[2]['name'], 'three') 155 | self.assertEqual(sorted_services[3]['name'], 'four') 156 | 157 | def test_sort_service_dicts_circular_imports(self): 158 | services = [ 159 | { 160 | 'links': ['redis'], 161 | 'name': 'web' 162 | }, 163 | { 164 | 'name': 'redis', 165 | 'links': ['web'] 166 | }, 167 | ] 168 | 169 | try: 170 | sort_service_dicts(services) 171 | except DependencyError as e: 172 | self.assertIn('redis', e.msg) 173 | self.assertIn('web', e.msg) 174 | else: 175 | self.fail('Should have thrown an DependencyError') 176 | 177 | def test_sort_service_dicts_circular_imports_2(self): 178 | services = [ 179 | { 180 | 'links': ['postgres', 'redis'], 181 | 'name': 'web' 182 | }, 183 | { 184 | 'name': 'redis', 185 | 'links': ['web'] 186 | }, 187 | { 188 | 'name': 'postgres' 189 | } 190 | ] 191 | 192 | try: 193 | sort_service_dicts(services) 194 | except DependencyError as e: 195 | self.assertIn('redis', e.msg) 196 | self.assertIn('web', e.msg) 197 | else: 198 | self.fail('Should have thrown an DependencyError') 199 | 200 | def test_sort_service_dicts_circular_imports_3(self): 201 | services = [ 202 | { 203 | 'links': ['b'], 204 | 'name': 'a' 205 | }, 206 | { 207 | 'name': 'b', 208 | 'links': ['c'] 209 | }, 210 | { 211 | 'name': 'c', 212 | 'links': ['a'] 213 | } 214 | ] 215 | 216 | try: 217 | sort_service_dicts(services) 218 | except DependencyError as e: 219 | self.assertIn('a', e.msg) 220 | self.assertIn('b', e.msg) 221 | else: 222 | self.fail('Should have thrown an DependencyError') 223 | 224 | def test_sort_service_dicts_self_imports(self): 225 | services = [ 226 | { 227 | 'links': ['web'], 228 | 'name': 'web' 229 | }, 230 | ] 231 | 232 | try: 233 | sort_service_dicts(services) 234 | except DependencyError as e: 235 | self.assertIn('web', e.msg) 236 | else: 237 | self.fail('Should have thrown an DependencyError') 238 | -------------------------------------------------------------------------------- /docs/yml.md: -------------------------------------------------------------------------------- 1 | --- 2 | layout: default 3 | title: docker-compose.yml reference 4 | page_title: docker-compose.yml reference 5 | page_description: docker-compose.yml reference 6 | page_keywords: fig, composition, compose, docker 7 | --- 8 | 9 | # docker-compose.yml reference 10 | 11 | Each service defined in `docker-compose.yml` must specify exactly one of 12 | `image` or `build`. Other keys are optional, and are analogous to their 13 | `docker run` command-line counterparts. 14 | 15 | As with `docker run`, options specified in the Dockerfile (e.g., `CMD`, 16 | `EXPOSE`, `VOLUME`, `ENV`) are respected by default - you don't need to 17 | specify them again in `docker-compose.yml`. 18 | 19 | ### image 20 | 21 | Tag or partial image ID. Can be local or remote - Compose will attempt to 22 | pull if it doesn't exist locally. 23 | 24 | ``` 25 | image: ubuntu 26 | image: orchardup/postgresql 27 | image: a4bc65fd 28 | ``` 29 | 30 | ### build 31 | 32 | Path to a directory containing a Dockerfile. When the value supplied is a 33 | relative path, it is interpreted as relative to the location of the yml file 34 | itself. This directory is also the build context that is sent to the Docker daemon. 35 | 36 | Compose will build and tag it with a generated name, and use that image thereafter. 37 | 38 | ``` 39 | build: /path/to/build/dir 40 | ``` 41 | 42 | ### command 43 | 44 | Override the default command. 45 | 46 | ``` 47 | command: bundle exec thin -p 3000 48 | ``` 49 | 50 | 51 | ### links 52 | 53 | Link to containers in another service. Either specify both the service name and 54 | the link alias (`SERVICE:ALIAS`), or just the service name (which will also be 55 | used for the alias). 56 | 57 | ``` 58 | links: 59 | - db 60 | - db:database 61 | - redis 62 | ``` 63 | 64 | An entry with the alias' name will be created in `/etc/hosts` inside containers 65 | for this service, e.g: 66 | 67 | ``` 68 | 172.17.2.186 db 69 | 172.17.2.186 database 70 | 172.17.2.187 redis 71 | ``` 72 | 73 | Environment variables will also be created - see the [environment variable 74 | reference](env.md) for details. 75 | 76 | ### external_links 77 | 78 | Link to containers started outside this `docker-compose.yml` or even outside 79 | of Compose, especially for containers that provide shared or common services. 80 | `external_links` follow semantics similar to `links` when specifying both the 81 | container name and the link alias (`CONTAINER:ALIAS`). 82 | 83 | ``` 84 | external_links: 85 | - redis_1 86 | - project_db_1:mysql 87 | - project_db_1:postgresql 88 | ``` 89 | 90 | ### ports 91 | 92 | Expose ports. Either specify both ports (`HOST:CONTAINER`), or just the container 93 | port (a random host port will be chosen). 94 | 95 | > **Note:** When mapping ports in the `HOST:CONTAINER` format, you may experience 96 | > erroneous results when using a container port lower than 60, because YAML will 97 | > parse numbers in the format `xx:yy` as sexagesimal (base 60). For this reason, 98 | > we recommend always explicitly specifying your port mappings as strings. 99 | 100 | ``` 101 | ports: 102 | - "3000" 103 | - "8000:8000" 104 | - "49100:22" 105 | - "127.0.0.1:8001:8001" 106 | ``` 107 | 108 | ### expose 109 | 110 | Expose ports without publishing them to the host machine - they'll only be 111 | accessible to linked services. Only the internal port can be specified. 112 | 113 | ``` 114 | expose: 115 | - "3000" 116 | - "8000" 117 | ``` 118 | 119 | ### volumes 120 | 121 | Mount paths as volumes, optionally specifying a path on the host machine 122 | (`HOST:CONTAINER`), or an access mode (`HOST:CONTAINER:ro`). 123 | 124 | ``` 125 | volumes: 126 | - /var/lib/mysql 127 | - cache/:/tmp/cache 128 | - ~/configs:/etc/configs/:ro 129 | ``` 130 | 131 | ### volumes_from 132 | 133 | Mount all of the volumes from another service or container. 134 | 135 | ``` 136 | volumes_from: 137 | - service_name 138 | - container_name 139 | ``` 140 | 141 | ### environment 142 | 143 | Add environment variables. You can use either an array or a dictionary. 144 | 145 | Environment variables with only a key are resolved to their values on the 146 | machine Compose is running on, which can be helpful for secret or host-specific values. 147 | 148 | ``` 149 | environment: 150 | RACK_ENV: development 151 | SESSION_SECRET: 152 | 153 | environment: 154 | - RACK_ENV=development 155 | - SESSION_SECRET 156 | ``` 157 | 158 | ### env_file 159 | 160 | Add environment variables from a file. Can be a single value or a list. 161 | 162 | If you have specified a Compose file with `docker-compose -f FILE`, paths in 163 | `env_file` are relative to the directory that file is in. 164 | 165 | Environment variables specified in `environment` override these values. 166 | 167 | ``` 168 | env_file: .env 169 | 170 | env_file: 171 | - ./common.env 172 | - ./apps/web.env 173 | - /opt/secrets.env 174 | ``` 175 | 176 | Compose expects each line in an env file to be in `VAR=VAL` format. Lines 177 | beginning with `#` (i.e. comments) are ignored, as are blank lines. 178 | 179 | ``` 180 | # Set Rails/Rack environment 181 | RACK_ENV=development 182 | ``` 183 | 184 | ### extends 185 | 186 | Extend another service, in the current file or another, optionally overriding 187 | configuration. 188 | 189 | Here's a simple example. Suppose we have 2 files - **common.yml** and 190 | **development.yml**. We can use `extends` to define a service in 191 | **development.yml** which uses configuration defined in **common.yml**: 192 | 193 | **common.yml** 194 | 195 | ``` 196 | webapp: 197 | build: ./webapp 198 | environment: 199 | - DEBUG=false 200 | - SEND_EMAILS=false 201 | ``` 202 | 203 | **development.yml** 204 | 205 | ``` 206 | web: 207 | extends: 208 | file: common.yml 209 | service: webapp 210 | ports: 211 | - "8000:8000" 212 | links: 213 | - db 214 | environment: 215 | - DEBUG=true 216 | db: 217 | image: postgres 218 | ``` 219 | 220 | Here, the `web` service in **development.yml** inherits the configuration of 221 | the `webapp` service in **common.yml** - the `build` and `environment` keys - 222 | and adds `ports` and `links` configuration. It overrides one of the defined 223 | environment variables (DEBUG) with a new value, and the other one 224 | (SEND_EMAILS) is left untouched. 225 | 226 | For more on `extends`, see the [tutorial](extends.md#example) and 227 | [reference](extends.md#reference). 228 | 229 | ### net 230 | 231 | Networking mode. Use the same values as the docker client `--net` parameter. 232 | 233 | ``` 234 | net: "bridge" 235 | net: "none" 236 | net: "container:[name or id]" 237 | net: "host" 238 | ``` 239 | ### pid 240 | 241 | ``` 242 | pid: "host" 243 | ``` 244 | 245 | Sets the PID mode to the host PID mode. This turns on sharing between 246 | container and the host operating system the PID address space. Containers 247 | launched with this flag will be able to access and manipulate other 248 | containers in the bare-metal machine's namespace and vise-versa. 249 | 250 | ### dns 251 | 252 | Custom DNS servers. Can be a single value or a list. 253 | 254 | ``` 255 | dns: 8.8.8.8 256 | dns: 257 | - 8.8.8.8 258 | - 9.9.9.9 259 | ``` 260 | 261 | ### cap_add, cap_drop 262 | 263 | Add or drop container capabilities. 264 | See `man 7 capabilities` for a full list. 265 | 266 | ``` 267 | cap_add: 268 | - ALL 269 | 270 | cap_drop: 271 | - NET_ADMIN 272 | - SYS_ADMIN 273 | ``` 274 | 275 | ### dns_search 276 | 277 | Custom DNS search domains. Can be a single value or a list. 278 | 279 | ``` 280 | dns_search: example.com 281 | dns_search: 282 | - dc1.example.com 283 | - dc2.example.com 284 | ``` 285 | 286 | ### working\_dir, entrypoint, user, hostname, domainname, mem\_limit, privileged, restart, stdin\_open, tty, cpu\_shares 287 | 288 | Each of these is a single value, analogous to its 289 | [docker run](https://docs.docker.com/reference/run/) counterpart. 290 | 291 | ``` 292 | cpu_shares: 73 293 | 294 | working_dir: /code 295 | entrypoint: /code/entrypoint.sh 296 | user: postgresql 297 | 298 | hostname: foo 299 | domainname: foo.com 300 | 301 | mem_limit: 1000000000 302 | privileged: true 303 | 304 | restart: always 305 | 306 | stdin_open: true 307 | tty: true 308 | ``` 309 | 310 | ## Compose documentation 311 | 312 | - [Installing Compose](install.md) 313 | - [User guide](index.md) 314 | - [Command line reference](cli.md) 315 | - [Compose environment variables](env.md) 316 | - [Compose command line completion](completion.md) 317 | -------------------------------------------------------------------------------- /contrib/completion/bash/docker-compose: -------------------------------------------------------------------------------- 1 | #!bash 2 | # 3 | # bash completion for docker-compose 4 | # 5 | # This work is based on the completion for the docker command. 6 | # 7 | # This script provides completion of: 8 | # - commands and their options 9 | # - service names 10 | # - filepaths 11 | # 12 | # To enable the completions either: 13 | # - place this file in /etc/bash_completion.d 14 | # or 15 | # - copy this file to e.g. ~/.docker-compose-completion.sh and add the line 16 | # below to your .bashrc after bash completion features are loaded 17 | # . ~/.docker-compose-completion.sh 18 | 19 | 20 | # For compatibility reasons, Compose and therefore its completion supports several 21 | # stack compositon files as listed here, in descending priority. 22 | # Support for these filenames might be dropped in some future version. 23 | __docker-compose_compose_file() { 24 | local file 25 | for file in docker-compose.y{,a}ml fig.y{,a}ml ; do 26 | [ -e $file ] && { 27 | echo $file 28 | return 29 | } 30 | done 31 | echo docker-compose.yml 32 | } 33 | 34 | # Extracts all service names from the compose file. 35 | ___docker-compose_all_services_in_compose_file() { 36 | awk -F: '/^[a-zA-Z0-9]/{print $1}' "${compose_file:-$(__docker-compose_compose_file)}" 2>/dev/null 37 | } 38 | 39 | # All services, even those without an existing container 40 | __docker-compose_services_all() { 41 | COMPREPLY=( $(compgen -W "$(___docker-compose_all_services_in_compose_file)" -- "$cur") ) 42 | } 43 | 44 | # All services that have an entry with the given key in their compose_file section 45 | ___docker-compose_services_with_key() { 46 | # flatten sections to one line, then filter lines containing the key and return section name. 47 | awk '/^[a-zA-Z0-9]/{printf "\n"};{printf $0;next;}' "${compose_file:-$(__docker-compose_compose_file)}" | awk -F: -v key=": +$1:" '$0 ~ key {print $1}' 48 | } 49 | 50 | # All services that are defined by a Dockerfile reference 51 | __docker-compose_services_from_build() { 52 | COMPREPLY=( $(compgen -W "$(___docker-compose_services_with_key build)" -- "$cur") ) 53 | } 54 | 55 | # All services that are defined by an image 56 | __docker-compose_services_from_image() { 57 | COMPREPLY=( $(compgen -W "$(___docker-compose_services_with_key image)" -- "$cur") ) 58 | } 59 | 60 | # The services for which containers have been created, optionally filtered 61 | # by a boolean expression passed in as argument. 62 | __docker-compose_services_with() { 63 | local containers names 64 | containers="$(docker-compose 2>/dev/null ${compose_file:+-f $compose_file} ${compose_project:+-p $compose_project} ps -q)" 65 | names=( $(docker 2>/dev/null inspect --format "{{if ${1:-true}}} {{ .Name }} {{end}}" $containers) ) 66 | names=( ${names[@]%_*} ) # strip trailing numbers 67 | names=( ${names[@]#*_} ) # strip project name 68 | COMPREPLY=( $(compgen -W "${names[*]}" -- "$cur") ) 69 | } 70 | 71 | # The services for which at least one running container exists 72 | __docker-compose_services_running() { 73 | __docker-compose_services_with '.State.Running' 74 | } 75 | 76 | # The services for which at least one stopped container exists 77 | __docker-compose_services_stopped() { 78 | __docker-compose_services_with 'not .State.Running' 79 | } 80 | 81 | 82 | _docker-compose_build() { 83 | case "$cur" in 84 | -*) 85 | COMPREPLY=( $( compgen -W "--no-cache" -- "$cur" ) ) 86 | ;; 87 | *) 88 | __docker-compose_services_from_build 89 | ;; 90 | esac 91 | } 92 | 93 | 94 | _docker-compose_docker-compose() { 95 | case "$prev" in 96 | --file|-f) 97 | _filedir y?(a)ml 98 | return 99 | ;; 100 | --project-name|-p) 101 | return 102 | ;; 103 | esac 104 | 105 | case "$cur" in 106 | -*) 107 | COMPREPLY=( $( compgen -W "--help -h --verbose --version --file -f --project-name -p" -- "$cur" ) ) 108 | ;; 109 | *) 110 | COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) ) 111 | ;; 112 | esac 113 | } 114 | 115 | 116 | _docker-compose_help() { 117 | COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) ) 118 | } 119 | 120 | 121 | _docker-compose_kill() { 122 | case "$prev" in 123 | -s) 124 | COMPREPLY=( $( compgen -W "SIGHUP SIGINT SIGKILL SIGUSR1 SIGUSR2" -- "$(echo $cur | tr '[:lower:]' '[:upper:]')" ) ) 125 | return 126 | ;; 127 | esac 128 | 129 | case "$cur" in 130 | -*) 131 | COMPREPLY=( $( compgen -W "-s" -- "$cur" ) ) 132 | ;; 133 | *) 134 | __docker-compose_services_running 135 | ;; 136 | esac 137 | } 138 | 139 | 140 | _docker-compose_logs() { 141 | case "$cur" in 142 | -*) 143 | COMPREPLY=( $( compgen -W "--no-color" -- "$cur" ) ) 144 | ;; 145 | *) 146 | __docker-compose_services_all 147 | ;; 148 | esac 149 | } 150 | 151 | 152 | _docker-compose_port() { 153 | case "$prev" in 154 | --protocol) 155 | COMPREPLY=( $( compgen -W "tcp udp" -- "$cur" ) ) 156 | return; 157 | ;; 158 | --index) 159 | return; 160 | ;; 161 | esac 162 | 163 | case "$cur" in 164 | -*) 165 | COMPREPLY=( $( compgen -W "--protocol --index" -- "$cur" ) ) 166 | ;; 167 | *) 168 | __docker-compose_services_all 169 | ;; 170 | esac 171 | } 172 | 173 | 174 | _docker-compose_ps() { 175 | case "$cur" in 176 | -*) 177 | COMPREPLY=( $( compgen -W "-q" -- "$cur" ) ) 178 | ;; 179 | *) 180 | __docker-compose_services_all 181 | ;; 182 | esac 183 | } 184 | 185 | 186 | _docker-compose_pull() { 187 | case "$cur" in 188 | -*) 189 | COMPREPLY=( $( compgen -W "--allow-insecure-ssl" -- "$cur" ) ) 190 | ;; 191 | *) 192 | __docker-compose_services_from_image 193 | ;; 194 | esac 195 | } 196 | 197 | 198 | _docker-compose_restart() { 199 | case "$prev" in 200 | -t | --timeout) 201 | return 202 | ;; 203 | esac 204 | 205 | case "$cur" in 206 | -*) 207 | COMPREPLY=( $( compgen -W "-t --timeout" -- "$cur" ) ) 208 | ;; 209 | *) 210 | __docker-compose_services_running 211 | ;; 212 | esac 213 | } 214 | 215 | 216 | _docker-compose_rm() { 217 | case "$cur" in 218 | -*) 219 | COMPREPLY=( $( compgen -W "--force -f -v" -- "$cur" ) ) 220 | ;; 221 | *) 222 | __docker-compose_services_stopped 223 | ;; 224 | esac 225 | } 226 | 227 | 228 | _docker-compose_run() { 229 | case "$prev" in 230 | -e) 231 | COMPREPLY=( $( compgen -e -- "$cur" ) ) 232 | compopt -o nospace 233 | return 234 | ;; 235 | --entrypoint) 236 | return 237 | ;; 238 | esac 239 | 240 | case "$cur" in 241 | -*) 242 | COMPREPLY=( $( compgen -W "--allow-insecure-ssl -d --entrypoint -e --no-deps --rm --service-ports -T" -- "$cur" ) ) 243 | ;; 244 | *) 245 | __docker-compose_services_all 246 | ;; 247 | esac 248 | } 249 | 250 | 251 | _docker-compose_scale() { 252 | case "$prev" in 253 | =) 254 | COMPREPLY=("$cur") 255 | ;; 256 | *) 257 | COMPREPLY=( $(compgen -S "=" -W "$(___docker-compose_all_services_in_compose_file)" -- "$cur") ) 258 | compopt -o nospace 259 | ;; 260 | esac 261 | } 262 | 263 | 264 | _docker-compose_start() { 265 | __docker-compose_services_stopped 266 | } 267 | 268 | 269 | _docker-compose_stop() { 270 | case "$prev" in 271 | -t | --timeout) 272 | return 273 | ;; 274 | esac 275 | 276 | case "$cur" in 277 | -*) 278 | COMPREPLY=( $( compgen -W "-t --timeout" -- "$cur" ) ) 279 | ;; 280 | *) 281 | __docker-compose_services_running 282 | ;; 283 | esac 284 | } 285 | 286 | 287 | _docker-compose_up() { 288 | case "$prev" in 289 | -t | --timeout) 290 | return 291 | ;; 292 | esac 293 | 294 | case "$cur" in 295 | -*) 296 | COMPREPLY=( $( compgen -W "--allow-insecure-ssl -d --no-build --no-color --no-deps --no-recreate -t --timeout" -- "$cur" ) ) 297 | ;; 298 | *) 299 | __docker-compose_services_all 300 | ;; 301 | esac 302 | } 303 | 304 | 305 | _docker-compose() { 306 | local commands=( 307 | build 308 | help 309 | kill 310 | logs 311 | port 312 | ps 313 | pull 314 | restart 315 | rm 316 | run 317 | scale 318 | start 319 | stop 320 | up 321 | ) 322 | 323 | COMPREPLY=() 324 | local cur prev words cword 325 | _get_comp_words_by_ref -n : cur prev words cword 326 | 327 | # search subcommand and invoke its handler. 328 | # special treatment of some top-level options 329 | local command='docker-compose' 330 | local counter=1 331 | local compose_file compose_project 332 | while [ $counter -lt $cword ]; do 333 | case "${words[$counter]}" in 334 | -f|--file) 335 | (( counter++ )) 336 | compose_file="${words[$counter]}" 337 | ;; 338 | -p|--project-name) 339 | (( counter++ )) 340 | compose_project="${words[$counter]}" 341 | ;; 342 | -*) 343 | ;; 344 | *) 345 | command="${words[$counter]}" 346 | break 347 | ;; 348 | esac 349 | (( counter++ )) 350 | done 351 | 352 | local completions_func=_docker-compose_${command} 353 | declare -F $completions_func >/dev/null && $completions_func 354 | 355 | return 0 356 | } 357 | 358 | complete -F _docker-compose docker-compose 359 | -------------------------------------------------------------------------------- /tests/unit/project_test.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from .. import unittest 3 | from compose.service import Service 4 | from compose.project import Project 5 | from compose.container import Container 6 | from compose import config 7 | 8 | import mock 9 | import docker 10 | 11 | 12 | class ProjectTest(unittest.TestCase): 13 | def test_from_dict(self): 14 | project = Project.from_dicts('composetest', [ 15 | { 16 | 'name': 'web', 17 | 'image': 'busybox:latest' 18 | }, 19 | { 20 | 'name': 'db', 21 | 'image': 'busybox:latest' 22 | }, 23 | ], None) 24 | self.assertEqual(len(project.services), 2) 25 | self.assertEqual(project.get_service('web').name, 'web') 26 | self.assertEqual(project.get_service('web').options['image'], 'busybox:latest') 27 | self.assertEqual(project.get_service('db').name, 'db') 28 | self.assertEqual(project.get_service('db').options['image'], 'busybox:latest') 29 | 30 | def test_from_dict_sorts_in_dependency_order(self): 31 | project = Project.from_dicts('composetest', [ 32 | { 33 | 'name': 'web', 34 | 'image': 'busybox:latest', 35 | 'links': ['db'], 36 | }, 37 | { 38 | 'name': 'db', 39 | 'image': 'busybox:latest', 40 | 'volumes_from': ['volume'] 41 | }, 42 | { 43 | 'name': 'volume', 44 | 'image': 'busybox:latest', 45 | 'volumes': ['/tmp'], 46 | } 47 | ], None) 48 | 49 | self.assertEqual(project.services[0].name, 'volume') 50 | self.assertEqual(project.services[1].name, 'db') 51 | self.assertEqual(project.services[2].name, 'web') 52 | 53 | def test_from_config(self): 54 | dicts = config.from_dictionary({ 55 | 'web': { 56 | 'image': 'busybox:latest', 57 | }, 58 | 'db': { 59 | 'image': 'busybox:latest', 60 | }, 61 | }) 62 | project = Project.from_dicts('composetest', dicts, None) 63 | self.assertEqual(len(project.services), 2) 64 | self.assertEqual(project.get_service('web').name, 'web') 65 | self.assertEqual(project.get_service('web').options['image'], 'busybox:latest') 66 | self.assertEqual(project.get_service('db').name, 'db') 67 | self.assertEqual(project.get_service('db').options['image'], 'busybox:latest') 68 | 69 | def test_get_service(self): 70 | web = Service( 71 | project='composetest', 72 | name='web', 73 | client=None, 74 | image="busybox:latest", 75 | ) 76 | project = Project('test', [web], None) 77 | self.assertEqual(project.get_service('web'), web) 78 | 79 | def test_get_services_returns_all_services_without_args(self): 80 | web = Service( 81 | project='composetest', 82 | name='web', 83 | ) 84 | console = Service( 85 | project='composetest', 86 | name='console', 87 | ) 88 | project = Project('test', [web, console], None) 89 | self.assertEqual(project.get_services(), [web, console]) 90 | 91 | def test_get_services_returns_listed_services_with_args(self): 92 | web = Service( 93 | project='composetest', 94 | name='web', 95 | ) 96 | console = Service( 97 | project='composetest', 98 | name='console', 99 | ) 100 | project = Project('test', [web, console], None) 101 | self.assertEqual(project.get_services(['console']), [console]) 102 | 103 | def test_get_services_with_include_links(self): 104 | db = Service( 105 | project='composetest', 106 | name='db', 107 | ) 108 | web = Service( 109 | project='composetest', 110 | name='web', 111 | links=[(db, 'database')] 112 | ) 113 | cache = Service( 114 | project='composetest', 115 | name='cache' 116 | ) 117 | console = Service( 118 | project='composetest', 119 | name='console', 120 | links=[(web, 'web')] 121 | ) 122 | project = Project('test', [web, db, cache, console], None) 123 | self.assertEqual( 124 | project.get_services(['console'], include_deps=True), 125 | [db, web, console] 126 | ) 127 | 128 | def test_get_services_removes_duplicates_following_links(self): 129 | db = Service( 130 | project='composetest', 131 | name='db', 132 | ) 133 | web = Service( 134 | project='composetest', 135 | name='web', 136 | links=[(db, 'database')] 137 | ) 138 | project = Project('test', [web, db], None) 139 | self.assertEqual( 140 | project.get_services(['web', 'db'], include_deps=True), 141 | [db, web] 142 | ) 143 | 144 | def test_use_volumes_from_container(self): 145 | container_id = 'aabbccddee' 146 | container_dict = dict(Name='aaa', Id=container_id) 147 | mock_client = mock.create_autospec(docker.Client) 148 | mock_client.inspect_container.return_value = container_dict 149 | project = Project.from_dicts('test', [ 150 | { 151 | 'name': 'test', 152 | 'image': 'busybox:latest', 153 | 'volumes_from': ['aaa'] 154 | } 155 | ], mock_client) 156 | self.assertEqual(project.get_service('test')._get_volumes_from(), [container_id]) 157 | 158 | def test_use_volumes_from_service_no_container(self): 159 | container_name = 'test_vol_1' 160 | mock_client = mock.create_autospec(docker.Client) 161 | mock_client.containers.return_value = [ 162 | { 163 | "Name": container_name, 164 | "Names": [container_name], 165 | "Id": container_name, 166 | "Image": 'busybox:latest' 167 | } 168 | ] 169 | project = Project.from_dicts('test', [ 170 | { 171 | 'name': 'vol', 172 | 'image': 'busybox:latest' 173 | }, 174 | { 175 | 'name': 'test', 176 | 'image': 'busybox:latest', 177 | 'volumes_from': ['vol'] 178 | } 179 | ], mock_client) 180 | self.assertEqual(project.get_service('test')._get_volumes_from(), [container_name]) 181 | 182 | @mock.patch.object(Service, 'containers') 183 | def test_use_volumes_from_service_container(self, mock_return): 184 | container_ids = ['aabbccddee', '12345'] 185 | mock_return.return_value = [ 186 | mock.Mock(id=container_id, spec=Container) 187 | for container_id in container_ids] 188 | 189 | project = Project.from_dicts('test', [ 190 | { 191 | 'name': 'vol', 192 | 'image': 'busybox:latest' 193 | }, 194 | { 195 | 'name': 'test', 196 | 'image': 'busybox:latest', 197 | 'volumes_from': ['vol'] 198 | } 199 | ], None) 200 | self.assertEqual(project.get_service('test')._get_volumes_from(), container_ids) 201 | 202 | def test_use_net_from_container(self): 203 | container_id = 'aabbccddee' 204 | container_dict = dict(Name='aaa', Id=container_id) 205 | mock_client = mock.create_autospec(docker.Client) 206 | mock_client.inspect_container.return_value = container_dict 207 | project = Project.from_dicts('test', [ 208 | { 209 | 'name': 'test', 210 | 'image': 'busybox:latest', 211 | 'net': 'container:aaa' 212 | } 213 | ], mock_client) 214 | service = project.get_service('test') 215 | self.assertEqual(service._get_net(), 'container:' + container_id) 216 | 217 | def test_use_net_from_service(self): 218 | container_name = 'test_aaa_1' 219 | mock_client = mock.create_autospec(docker.Client) 220 | mock_client.containers.return_value = [ 221 | { 222 | "Name": container_name, 223 | "Names": [container_name], 224 | "Id": container_name, 225 | "Image": 'busybox:latest' 226 | } 227 | ] 228 | project = Project.from_dicts('test', [ 229 | { 230 | 'name': 'aaa', 231 | 'image': 'busybox:latest' 232 | }, 233 | { 234 | 'name': 'test', 235 | 'image': 'busybox:latest', 236 | 'net': 'container:aaa' 237 | } 238 | ], mock_client) 239 | 240 | service = project.get_service('test') 241 | self.assertEqual(service._get_net(), 'container:' + container_name) 242 | -------------------------------------------------------------------------------- /compose/project.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | import logging 4 | 5 | from functools import reduce 6 | from .config import get_service_name_from_net, ConfigurationError 7 | from .service import Service 8 | from .container import Container 9 | from docker.errors import APIError 10 | 11 | log = logging.getLogger(__name__) 12 | 13 | 14 | def sort_service_dicts(services): 15 | # Topological sort (Cormen/Tarjan algorithm). 16 | unmarked = services[:] 17 | temporary_marked = set() 18 | sorted_services = [] 19 | 20 | def get_service_names(links): 21 | return [link.split(':')[0] for link in links] 22 | 23 | def get_service_dependents(service_dict, services): 24 | name = service_dict['name'] 25 | return [ 26 | service for service in services 27 | if (name in get_service_names(service.get('links', [])) or 28 | name in service.get('volumes_from', []) or 29 | name == get_service_name_from_net(service.get('net'))) 30 | ] 31 | 32 | def visit(n): 33 | if n['name'] in temporary_marked: 34 | if n['name'] in get_service_names(n.get('links', [])): 35 | raise DependencyError('A service can not link to itself: %s' % n['name']) 36 | if n['name'] in n.get('volumes_from', []): 37 | raise DependencyError('A service can not mount itself as volume: %s' % n['name']) 38 | else: 39 | raise DependencyError('Circular import between %s' % ' and '.join(temporary_marked)) 40 | if n in unmarked: 41 | temporary_marked.add(n['name']) 42 | for m in get_service_dependents(n, services): 43 | visit(m) 44 | temporary_marked.remove(n['name']) 45 | unmarked.remove(n) 46 | sorted_services.insert(0, n) 47 | 48 | while unmarked: 49 | visit(unmarked[-1]) 50 | 51 | return sorted_services 52 | 53 | 54 | class Project(object): 55 | """ 56 | A collection of services. 57 | """ 58 | def __init__(self, name, services, client): 59 | self.name = name 60 | self.services = services 61 | self.client = client 62 | 63 | @classmethod 64 | def from_dicts(cls, name, service_dicts, client): 65 | """ 66 | Construct a ServiceCollection from a list of dicts representing services. 67 | """ 68 | project = cls(name, [], client) 69 | for service_dict in sort_service_dicts(service_dicts): 70 | links = project.get_links(service_dict) 71 | volumes_from = project.get_volumes_from(service_dict) 72 | net = project.get_net(service_dict) 73 | 74 | project.services.append(Service(client=client, project=name, links=links, net=net, 75 | volumes_from=volumes_from, **service_dict)) 76 | return project 77 | 78 | def get_service(self, name): 79 | """ 80 | Retrieve a service by name. Raises NoSuchService 81 | if the named service does not exist. 82 | """ 83 | for service in self.services: 84 | if service.name == name: 85 | return service 86 | 87 | raise NoSuchService(name) 88 | 89 | def get_services(self, service_names=None, include_deps=False): 90 | """ 91 | Returns a list of this project's services filtered 92 | by the provided list of names, or all services if service_names is None 93 | or []. 94 | 95 | If include_deps is specified, returns a list including the dependencies for 96 | service_names, in order of dependency. 97 | 98 | Preserves the original order of self.services where possible, 99 | reordering as needed to resolve dependencies. 100 | 101 | Raises NoSuchService if any of the named services do not exist. 102 | """ 103 | if service_names is None or len(service_names) == 0: 104 | return self.get_services( 105 | service_names=[s.name for s in self.services], 106 | include_deps=include_deps 107 | ) 108 | else: 109 | unsorted = [self.get_service(name) for name in service_names] 110 | services = [s for s in self.services if s in unsorted] 111 | 112 | if include_deps: 113 | services = reduce(self._inject_deps, services, []) 114 | 115 | uniques = [] 116 | [uniques.append(s) for s in services if s not in uniques] 117 | return uniques 118 | 119 | def get_links(self, service_dict): 120 | links = [] 121 | if 'links' in service_dict: 122 | for link in service_dict.get('links', []): 123 | if ':' in link: 124 | service_name, link_name = link.split(':', 1) 125 | else: 126 | service_name, link_name = link, None 127 | try: 128 | links.append((self.get_service(service_name), link_name)) 129 | except NoSuchService: 130 | raise ConfigurationError('Service "%s" has a link to service "%s" which does not exist.' % (service_dict['name'], service_name)) 131 | del service_dict['links'] 132 | return links 133 | 134 | def get_volumes_from(self, service_dict): 135 | volumes_from = [] 136 | if 'volumes_from' in service_dict: 137 | for volume_name in service_dict.get('volumes_from', []): 138 | try: 139 | service = self.get_service(volume_name) 140 | volumes_from.append(service) 141 | except NoSuchService: 142 | try: 143 | container = Container.from_id(self.client, volume_name) 144 | volumes_from.append(container) 145 | except APIError: 146 | raise ConfigurationError('Service "%s" mounts volumes from "%s", which is not the name of a service or container.' % (service_dict['name'], volume_name)) 147 | del service_dict['volumes_from'] 148 | return volumes_from 149 | 150 | def get_net(self, service_dict): 151 | if 'net' in service_dict: 152 | net_name = get_service_name_from_net(service_dict.get('net')) 153 | 154 | if net_name: 155 | try: 156 | net = self.get_service(net_name) 157 | except NoSuchService: 158 | try: 159 | net = Container.from_id(self.client, net_name) 160 | except APIError: 161 | raise ConfigurationError('Serivce "%s" is trying to use the network of "%s", which is not the name of a service or container.' % (service_dict['name'], net_name)) 162 | else: 163 | net = service_dict['net'] 164 | 165 | del service_dict['net'] 166 | 167 | else: 168 | net = 'bridge' 169 | 170 | return net 171 | 172 | def start(self, service_names=None, **options): 173 | for service in self.get_services(service_names): 174 | service.start(**options) 175 | 176 | def stop(self, service_names=None, **options): 177 | for service in reversed(self.get_services(service_names)): 178 | service.stop(**options) 179 | 180 | def kill(self, service_names=None, **options): 181 | for service in reversed(self.get_services(service_names)): 182 | service.kill(**options) 183 | 184 | def restart(self, service_names=None, **options): 185 | for service in self.get_services(service_names): 186 | service.restart(**options) 187 | 188 | def build(self, service_names=None, no_cache=False): 189 | for service in self.get_services(service_names): 190 | if service.can_be_built(): 191 | service.build(no_cache) 192 | else: 193 | log.info('%s uses an image, skipping' % service.name) 194 | 195 | def up(self, 196 | service_names=None, 197 | start_deps=True, 198 | recreate=True, 199 | insecure_registry=False, 200 | detach=False, 201 | do_build=True): 202 | running_containers = [] 203 | for service in self.get_services(service_names, include_deps=start_deps): 204 | if recreate: 205 | for (_, container) in service.recreate_containers( 206 | insecure_registry=insecure_registry, 207 | detach=detach, 208 | do_build=do_build): 209 | running_containers.append(container) 210 | else: 211 | for container in service.start_or_create_containers( 212 | insecure_registry=insecure_registry, 213 | detach=detach, 214 | do_build=do_build): 215 | running_containers.append(container) 216 | 217 | return running_containers 218 | 219 | def pull(self, service_names=None, insecure_registry=False): 220 | for service in self.get_services(service_names, include_deps=True): 221 | service.pull(insecure_registry=insecure_registry) 222 | 223 | def remove_stopped(self, service_names=None, **options): 224 | for service in self.get_services(service_names): 225 | service.remove_stopped(**options) 226 | 227 | def containers(self, service_names=None, stopped=False, one_off=False): 228 | return [Container.from_ps(self.client, container) 229 | for container in self.client.containers(all=stopped) 230 | for service in self.get_services(service_names) 231 | if service.has_container(container, one_off=one_off)] 232 | 233 | def _inject_deps(self, acc, service): 234 | net_name = service.get_net_name() 235 | dep_names = (service.get_linked_names() + 236 | service.get_volumes_from_names() + 237 | ([net_name] if net_name else [])) 238 | 239 | if len(dep_names) > 0: 240 | dep_services = self.get_services( 241 | service_names=list(set(dep_names)), 242 | include_deps=True 243 | ) 244 | else: 245 | dep_services = [] 246 | 247 | dep_services.append(service) 248 | return acc + dep_services 249 | 250 | 251 | class NoSuchService(Exception): 252 | def __init__(self, name): 253 | self.name = name 254 | self.msg = "No such service: %s" % self.name 255 | 256 | def __str__(self): 257 | return self.msg 258 | 259 | 260 | class DependencyError(ConfigurationError): 261 | pass 262 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | Copyright 2014 Docker, Inc. 180 | 181 | Licensed under the Apache License, Version 2.0 (the "License"); 182 | you may not use this file except in compliance with the License. 183 | You may obtain a copy of the License at 184 | 185 | http://www.apache.org/licenses/LICENSE-2.0 186 | 187 | Unless required by applicable law or agreed to in writing, software 188 | distributed under the License is distributed on an "AS IS" BASIS, 189 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 190 | See the License for the specific language governing permissions and 191 | limitations under the License. 192 | -------------------------------------------------------------------------------- /docs/extends.md: -------------------------------------------------------------------------------- 1 | page_title: Extending services in Compose 2 | page_description: How to use Docker Compose's "extends" keyword to share configuration between files and projects 3 | page_keywords: fig, composition, compose, docker, orchestration, documentation, docs 4 | 5 | 6 | ## Extending services in Compose 7 | 8 | Docker Compose's `extends` keyword enables sharing of common configurations 9 | among different files, or even different projects entirely. Extending services 10 | is useful if you have several applications that reuse commonly-defined services. 11 | Using `extends` you can define a service in one place and refer to it from 12 | anywhere. 13 | 14 | Alternatively, you can deploy the same application to multiple environments with 15 | a slightly different set of services in each case (or with changes to the 16 | configuration of some services). Moreover, you can do so without copy-pasting 17 | the configuration around. 18 | 19 | ### Understand the extends configuration 20 | 21 | When defining any service in `docker-compose.yml`, you can declare that you are 22 | extending another service like this: 23 | 24 | ```yaml 25 | web: 26 | extends: 27 | file: common-services.yml 28 | service: webapp 29 | ``` 30 | 31 | This instructs Compose to re-use the configuration for the `webapp` service 32 | defined in the `common-services.yml` file. Suppose that `common-services.yml` 33 | looks like this: 34 | 35 | ```yaml 36 | webapp: 37 | build: . 38 | ports: 39 | - "8000:8000" 40 | volumes: 41 | - "/data" 42 | ``` 43 | 44 | In this case, you'll get exactly the same result as if you wrote 45 | `docker-compose.yml` with that `build`, `ports` and `volumes` configuration 46 | defined directly under `web`. 47 | 48 | You can go further and define (or re-define) configuration locally in 49 | `docker-compose.yml`: 50 | 51 | ```yaml 52 | web: 53 | extends: 54 | file: common-services.yml 55 | service: webapp 56 | environment: 57 | - DEBUG=1 58 | cpu_shares: 5 59 | ``` 60 | 61 | You can also write other services and link your `web` service to them: 62 | 63 | ```yaml 64 | web: 65 | extends: 66 | file: common-services.yml 67 | service: webapp 68 | environment: 69 | - DEBUG=1 70 | cpu_shares: 5 71 | links: 72 | - db 73 | db: 74 | image: postgres 75 | ``` 76 | 77 | For full details on how to use `extends`, refer to the [reference](#reference). 78 | 79 | ### Example use case 80 | 81 | In this example, you’ll repurpose the example app from the [quick start 82 | guide](index.md). (If you're not familiar with Compose, it's recommended that 83 | you go through the quick start first.) This example assumes you want to use 84 | Compose both to develop an application locally and then deploy it to a 85 | production environment. 86 | 87 | The local and production environments are similar, but there are some 88 | differences. In development, you mount the application code as a volume so that 89 | it can pick up changes; in production, the code should be immutable from the 90 | outside. This ensures it’s not accidentally changed. The development environment 91 | uses a local Redis container, but in production another team manages the Redis 92 | service, which is listening at `redis-production.example.com`. 93 | 94 | To configure with `extends` for this sample, you must: 95 | 96 | 1. Define the web application as a Docker image in `Dockerfile` and a Compose 97 | service in `common.yml`. 98 | 99 | 2. Define the development environment in the standard Compose file, 100 | `docker-compose.yml`. 101 | 102 | - Use `extends` to pull in the web service. 103 | - Configure a volume to enable code reloading. 104 | - Create an additional Redis service for the application to use locally. 105 | 106 | 3. Define the production environment in a third Compose file, `production.yml`. 107 | 108 | - Use `extends` to pull in the web service. 109 | - Configure the web service to talk to the external, production Redis service. 110 | 111 | #### Define the web app 112 | 113 | Defining the web application requires the following: 114 | 115 | 1. Create an `app.py` file. 116 | 117 | This file contains a simple Python application that uses Flask to serve HTTP 118 | and increments a counter in Redis: 119 | 120 | from flask import Flask 121 | from redis import Redis 122 | import os 123 | 124 | app = Flask(__name__) 125 | redis = Redis(host=os.environ['REDIS_HOST'], port=6379) 126 | 127 | @app.route('/') 128 | def hello(): 129 | redis.incr('hits') 130 | return 'Hello World! I have been seen %s times.\n' % redis.get('hits') 131 | 132 | if __name__ == "__main__": 133 | app.run(host="0.0.0.0", debug=True) 134 | 135 | This code uses a `REDIS_HOST` environment variable to determine where to 136 | find Redis. 137 | 138 | 2. Define the Python dependencies in a `requirements.txt` file: 139 | 140 | flask 141 | redis 142 | 143 | 3. Create a `Dockerfile` to build an image containing the app: 144 | 145 | FROM python:2.7 146 | ADD . /code 147 | WORKDIR /code 148 | RUN pip install -r 149 | requirements.txt 150 | CMD python app.py 151 | 152 | 4. Create a Compose configuration file called `common.yml`: 153 | 154 | This configuration defines how to run the app. 155 | 156 | web: 157 | build: . 158 | ports: 159 | - "5000:5000" 160 | 161 | Typically, you would have dropped this configuration into 162 | `docker-compose.yml` file, but in order to pull it into multiple files with 163 | `extends`, it needs to be in a separate file. 164 | 165 | #### Define the development environment 166 | 167 | 1. Create a `docker-compose.yml` file. 168 | 169 | The `extends` option pulls in the `web` service from the `common.yml` file 170 | you created in the previous section. 171 | 172 | web: 173 | extends: 174 | file: common.yml 175 | service: web 176 | volumes: 177 | - .:/code 178 | links: 179 | - redis 180 | environment: 181 | - REDIS_HOST=redis 182 | redis: 183 | image: redis 184 | 185 | The new addition defines a `web` service that: 186 | 187 | - Fetches the base configuration for `web` out of `common.yml`. 188 | - Adds `volumes` and `links` configuration to the base (`common.yml`) 189 | configuration. 190 | - Sets the `REDIS_HOST` environment variable to point to the linked redis 191 | container. This environment uses a stock `redis` image from the Docker Hub. 192 | 193 | 2. Run `docker-compose up`. 194 | 195 | Compose creates, links, and starts a web and redis container linked together. 196 | It mounts your application code inside the web container. 197 | 198 | 3. Verify that the code is mounted by changing the message in 199 | `app.py`—say, from `Hello world!` to `Hello from Compose!`. 200 | 201 | Don't forget to refresh your browser to see the change! 202 | 203 | #### Define the production environment 204 | 205 | You are almost done. Now, define your production environment: 206 | 207 | 1. Create a `production.yml` file. 208 | 209 | As with `docker-compose.yml`, the `extends` option pulls in the `web` service 210 | from `common.yml`. 211 | 212 | web: 213 | extends: 214 | file: common.yml 215 | service: web 216 | environment: 217 | - REDIS_HOST=redis-production.example.com 218 | 219 | 2. Run `docker-compose -f production.yml up`. 220 | 221 | Compose creates *just* a web container and configures the Redis connection via 222 | the `REDIS_HOST` environment variable. This variable points to the production 223 | Redis instance. 224 | 225 | > **Note**: If you try to load up the webapp in your browser you'll get an 226 | > error—`redis-production.example.com` isn't actually a Redis server. 227 | 228 | You've now done a basic `extends` configuration. As your application develops, 229 | you can make any necessary changes to the web service in `common.yml`. Compose 230 | picks up both the development and production environments when you next run 231 | `docker-compose`. You don't have to do any copy-and-paste, and you don't have to 232 | manually keep both environments in sync. 233 | 234 | 235 | ### Reference 236 | 237 | You can use `extends` on any service together with other configuration keys. It 238 | always expects a dictionary that should always contain two keys: `file` and 239 | `service`. 240 | 241 | The `file` key specifies which file to look in. It can be an absolute path or a 242 | relative one—if relative, it's treated as relative to the current file. 243 | 244 | The `service` key specifies the name of the service to extend, for example `web` 245 | or `database`. 246 | 247 | You can extend a service that itself extends another. You can extend 248 | indefinitely. Compose does not support circular references and `docker-compose` 249 | returns an error if it encounters them. 250 | 251 | #### Adding and overriding configuration 252 | 253 | Compose copies configurations from the original service over to the local one, 254 | **except** for `links` and `volumes_from`. These exceptions exist to avoid 255 | implicit dependencies—you always define `links` and `volumes_from` 256 | locally. This ensures dependencies between services are clearly visible when 257 | reading the current file. Defining these locally also ensures changes to the 258 | referenced file don't result in breakage. 259 | 260 | If a configuration option is defined in both the original service and the local 261 | service, the local value either *override*s or *extend*s the definition of the 262 | original service. This works differently for other configuration options. 263 | 264 | For single-value options like `image`, `command` or `mem_limit`, the new value 265 | replaces the old value. **This is the default behaviour - all exceptions are 266 | listed below.** 267 | 268 | ```yaml 269 | # original service 270 | command: python app.py 271 | 272 | # local service 273 | command: python otherapp.py 274 | 275 | # result 276 | command: python otherapp.py 277 | ``` 278 | 279 | In the case of `build` and `image`, using one in the local service causes 280 | Compose to discard the other, if it was defined in the original service. 281 | 282 | ```yaml 283 | # original service 284 | build: . 285 | 286 | # local service 287 | image: redis 288 | 289 | # result 290 | image: redis 291 | ``` 292 | 293 | ```yaml 294 | # original service 295 | image: redis 296 | 297 | # local service 298 | build: . 299 | 300 | # result 301 | build: . 302 | ``` 303 | 304 | For the **multi-value options** `ports`, `expose`, `external_links`, `dns` and 305 | `dns_search`, Compose concatenates both sets of values: 306 | 307 | ```yaml 308 | # original service 309 | expose: 310 | - "3000" 311 | 312 | # local service 313 | expose: 314 | - "4000" 315 | - "5000" 316 | 317 | # result 318 | expose: 319 | - "3000" 320 | - "4000" 321 | - "5000" 322 | ``` 323 | 324 | In the case of `environment`, Compose "merges" entries together with 325 | locally-defined values taking precedence: 326 | 327 | ```yaml 328 | # original service 329 | environment: 330 | - FOO=original 331 | - BAR=original 332 | 333 | # local service 334 | environment: 335 | - BAR=local 336 | - BAZ=local 337 | 338 | # result 339 | environment: 340 | - FOO=original 341 | - BAR=local 342 | - BAZ=local 343 | ``` 344 | 345 | Finally, for `volumes`, Compose "merges" entries together with locally-defined 346 | bindings taking precedence: 347 | 348 | ```yaml 349 | # original service 350 | volumes: 351 | - /original-dir/foo:/foo 352 | - /original-dir/bar:/bar 353 | 354 | # local service 355 | volumes: 356 | - /local-dir/bar:/bar 357 | - /local-dir/baz/:baz 358 | 359 | # result 360 | volumes: 361 | - /original-dir/foo:/foo 362 | - /local-dir/bar:/bar 363 | - /local-dir/baz/:baz 364 | ``` -------------------------------------------------------------------------------- /CHANGES.md: -------------------------------------------------------------------------------- 1 | Change log 2 | ========== 3 | 4 | 1.1.0 (2015-02-25) 5 | ------------------ 6 | 7 | Fig has been renamed to Docker Compose, or just Compose for short. This has several implications for you: 8 | 9 | - The command you type is now `docker-compose`, not `fig`. 10 | - You should rename your fig.yml to docker-compose.yml. 11 | - If you’re installing via PyPi, the package is now `docker-compose`, so install it with `pip install docker-compose`. 12 | 13 | Besides that, there’s a lot of new stuff in this release: 14 | 15 | - We’ve made a few small changes to ensure that Compose will work with Swarm, Docker’s new clustering tool (https://github.com/docker/swarm). Eventually you'll be able to point Compose at a Swarm cluster instead of a standalone Docker host and it’ll run your containers on the cluster with no extra work from you. As Swarm is still developing, integration is rough and lots of Compose features don't work yet. 16 | 17 | - `docker-compose run` now has a `--service-ports` flag for exposing ports on the given service. This is useful for e.g. running your webapp with an interactive debugger. 18 | 19 | - You can now link to containers outside your app with the `external_links` option in docker-compose.yml. 20 | 21 | - You can now prevent `docker-compose up` from automatically building images with the `--no-build` option. This will make fewer API calls and run faster. 22 | 23 | - If you don’t specify a tag when using the `image` key, Compose will default to the `latest` tag, rather than pulling all tags. 24 | 25 | - `docker-compose kill` now supports the `-s` flag, allowing you to specify the exact signal you want to send to a service’s containers. 26 | 27 | - docker-compose.yml now has an `env_file` key, analogous to `docker run --env-file`, letting you specify multiple environment variables in a separate file. This is great if you have a lot of them, or if you want to keep sensitive information out of version control. 28 | 29 | - docker-compose.yml now supports the `dns_search`, `cap_add`, `cap_drop`, `cpu_shares` and `restart` options, analogous to `docker run`’s `--dns-search`, `--cap-add`, `--cap-drop`, `--cpu-shares` and `--restart` options. 30 | 31 | - Compose now ships with Bash tab completion - see the installation and usage docs at https://github.com/docker/compose/blob/1.1.0/docs/completion.md 32 | 33 | - A number of bugs have been fixed - see the milestone for details: https://github.com/docker/compose/issues?q=milestone%3A1.1.0+ 34 | 35 | Thanks @dnephin, @squebe, @jbalonso, @raulcd, @benlangfield, @albers, @ggtools, @bersace, @dtenenba, @petercv, @drewkett, @TFenby, @paulRbr, @Aigeruth and @salehe! 36 | 37 | 1.0.1 (2014-11-04) 38 | ------------------ 39 | 40 | - Added an `--allow-insecure-ssl` option to allow `fig up`, `fig run` and `fig pull` to pull from insecure registries. 41 | - Fixed `fig run` not showing output in Jenkins. 42 | - Fixed a bug where Fig couldn't build Dockerfiles with ADD statements pointing at URLs. 43 | 44 | 1.0.0 (2014-10-16) 45 | ------------------ 46 | 47 | The highlights: 48 | 49 | - [Fig has joined Docker.](https://www.orchardup.com/blog/orchard-is-joining-docker) Fig will continue to be maintained, but we'll also be incorporating the best bits of Fig into Docker itself. 50 | 51 | This means the GitHub repository has moved to [https://github.com/docker/fig](https://github.com/docker/fig) and our IRC channel is now #docker-fig on Freenode. 52 | 53 | - Fig can be used with the [official Docker OS X installer](https://docs.docker.com/installation/mac/). Boot2Docker will mount the home directory from your host machine so volumes work as expected. 54 | 55 | - Fig supports Docker 1.3. 56 | 57 | - It is now possible to connect to the Docker daemon using TLS by using the `DOCKER_CERT_PATH` and `DOCKER_TLS_VERIFY` environment variables. 58 | 59 | - There is a new `fig port` command which outputs the host port binding of a service, in a similar way to `docker port`. 60 | 61 | - There is a new `fig pull` command which pulls the latest images for a service. 62 | 63 | - There is a new `fig restart` command which restarts a service's containers. 64 | 65 | - Fig creates multiple containers in service by appending a number to the service name (e.g. `db_1`, `db_2`, etc). As a convenience, Fig will now give the first container an alias of the service name (e.g. `db`). 66 | 67 | This link alias is also a valid hostname and added to `/etc/hosts` so you can connect to linked services using their hostname. For example, instead of resolving the environment variables `DB_PORT_5432_TCP_ADDR` and `DB_PORT_5432_TCP_PORT`, you could just use the hostname `db` and port `5432` directly. 68 | 69 | - Volume definitions now support `ro` mode, expanding `~` and expanding environment variables. 70 | 71 | - `.dockerignore` is supported when building. 72 | 73 | - The project name can be set with the `FIG_PROJECT_NAME` environment variable. 74 | 75 | - The `--env` and `--entrypoint` options have been added to `fig run`. 76 | 77 | - The Fig binary for Linux is now linked against an older version of glibc so it works on CentOS 6 and Debian Wheezy. 78 | 79 | Other things: 80 | 81 | - `fig ps` now works on Jenkins and makes fewer API calls to the Docker daemon. 82 | - `--verbose` displays more useful debugging output. 83 | - When starting a service where `volumes_from` points to a service without any containers running, that service will now be started. 84 | - Lots of docs improvements. Notably, environment variables are documented and official repositories are used throughout. 85 | 86 | Thanks @dnephin, @d11wtq, @marksteve, @rubbish, @jbalonso, @timfreund, @alunduil, @mieciu, @shuron, @moss, @suzaku and @chmouel! Whew. 87 | 88 | 0.5.2 (2014-07-28) 89 | ------------------ 90 | 91 | - Added a `--no-cache` option to `fig build`, which bypasses the cache just like `docker build --no-cache`. 92 | - Fixed the `dns:` fig.yml option, which was causing fig to error out. 93 | - Fixed a bug where fig couldn't start under Python 2.6. 94 | - Fixed a log-streaming bug that occasionally caused fig to exit. 95 | 96 | Thanks @dnephin and @marksteve! 97 | 98 | 99 | 0.5.1 (2014-07-11) 100 | ------------------ 101 | 102 | - If a service has a command defined, `fig run [service]` with no further arguments will run it. 103 | - The project name now defaults to the directory containing fig.yml, not the current working directory (if they're different) 104 | - `volumes_from` now works properly with containers as well as services 105 | - Fixed a race condition when recreating containers in `fig up` 106 | 107 | Thanks @ryanbrainard and @d11wtq! 108 | 109 | 110 | 0.5.0 (2014-07-11) 111 | ------------------ 112 | 113 | - Fig now starts links when you run `fig run` or `fig up`. 114 | 115 | For example, if you have a `web` service which depends on a `db` service, `fig run web ...` will start the `db` service. 116 | 117 | - Environment variables can now be resolved from the environment that Fig is running in. Just specify it as a blank variable in your `fig.yml` and, if set, it'll be resolved: 118 | ``` 119 | environment: 120 | RACK_ENV: development 121 | SESSION_SECRET: 122 | ``` 123 | 124 | - `volumes_from` is now supported in `fig.yml`. All of the volumes from the specified services and containers will be mounted: 125 | 126 | ``` 127 | volumes_from: 128 | - service_name 129 | - container_name 130 | ``` 131 | 132 | - A host address can now be specified in `ports`: 133 | 134 | ``` 135 | ports: 136 | - "0.0.0.0:8000:8000" 137 | - "127.0.0.1:8001:8001" 138 | ``` 139 | 140 | - The `net` and `workdir` options are now supported in `fig.yml`. 141 | - The `hostname` option now works in the same way as the Docker CLI, splitting out into a `domainname` option. 142 | - TTY behaviour is far more robust, and resizes are supported correctly. 143 | - Load YAML files safely. 144 | 145 | Thanks to @d11wtq, @ryanbrainard, @rail44, @j0hnsmith, @binarin, @Elemecca, @mozz100 and @marksteve for their help with this release! 146 | 147 | 148 | 0.4.2 (2014-06-18) 149 | ------------------ 150 | 151 | - Fix various encoding errors when using `fig run`, `fig up` and `fig build`. 152 | 153 | 0.4.1 (2014-05-08) 154 | ------------------ 155 | 156 | - Add support for Docker 0.11.0. (Thanks @marksteve!) 157 | - Make project name configurable. (Thanks @jefmathiot!) 158 | - Return correct exit code from `fig run`. 159 | 160 | 0.4.0 (2014-04-29) 161 | ------------------ 162 | 163 | - Support Docker 0.9 and 0.10 164 | - Display progress bars correctly when pulling images (no more ski slopes) 165 | - `fig up` now stops all services when any container exits 166 | - Added support for the `privileged` config option in fig.yml (thanks @kvz!) 167 | - Shortened and aligned log prefixes in `fig up` output 168 | - Only containers started with `fig run` link back to their own service 169 | - Handle UTF-8 correctly when streaming `fig build/run/up` output (thanks @mauvm and @shanejonas!) 170 | - Error message improvements 171 | 172 | 0.3.2 (2014-03-05) 173 | ------------------ 174 | 175 | - Added an `--rm` option to `fig run`. (Thanks @marksteve!) 176 | - Added an `expose` option to `fig.yml`. 177 | 178 | 0.3.1 (2014-03-04) 179 | ------------------ 180 | 181 | - Added contribution instructions. (Thanks @kvz!) 182 | - Fixed `fig rm` throwing an error. 183 | - Fixed a bug in `fig ps` on Docker 0.8.1 when there is a container with no command. 184 | 185 | 0.3.0 (2014-03-03) 186 | ------------------ 187 | 188 | - We now ship binaries for OS X and Linux. No more having to install with Pip! 189 | - Add `-f` flag to specify alternate `fig.yml` files 190 | - Add support for custom link names 191 | - Fix a bug where recreating would sometimes hang 192 | - Update docker-py to support Docker 0.8.0. 193 | - Various documentation improvements 194 | - Various error message improvements 195 | 196 | Thanks @marksteve, @Gazler and @teozkr! 197 | 198 | 0.2.2 (2014-02-17) 199 | ------------------ 200 | 201 | - Resolve dependencies using Cormen/Tarjan topological sort 202 | - Fix `fig up` not printing log output 203 | - Stop containers in reverse order to starting 204 | - Fix scale command not binding ports 205 | 206 | Thanks to @barnybug and @dustinlacewell for their work on this release. 207 | 208 | 0.2.1 (2014-02-04) 209 | ------------------ 210 | 211 | - General improvements to error reporting (#77, #79) 212 | 213 | 0.2.0 (2014-01-31) 214 | ------------------ 215 | 216 | - Link services to themselves so run commands can access the running service. (#67) 217 | - Much better documentation. 218 | - Make service dependency resolution more reliable. (#48) 219 | - Load Fig configurations with a `.yaml` extension. (#58) 220 | 221 | Big thanks to @cameronmaske, @mrchrisadams and @damianmoore for their help with this release. 222 | 223 | 0.1.4 (2014-01-27) 224 | ------------------ 225 | 226 | - Add a link alias without the project name. This makes the environment variables a little shorter: `REDIS_1_PORT_6379_TCP_ADDR`. (#54) 227 | 228 | 0.1.3 (2014-01-23) 229 | ------------------ 230 | 231 | - Fix ports sometimes being configured incorrectly. (#46) 232 | - Fix log output sometimes not displaying. (#47) 233 | 234 | 0.1.2 (2014-01-22) 235 | ------------------ 236 | 237 | - Add `-T` option to `fig run` to disable pseudo-TTY. (#34) 238 | - Fix `fig up` requiring the ubuntu image to be pulled to recreate containers. (#33) Thanks @cameronmaske! 239 | - Improve reliability, fix arrow keys and fix a race condition in `fig run`. (#34, #39, #40) 240 | 241 | 0.1.1 (2014-01-17) 242 | ------------------ 243 | 244 | - Fix bug where ports were not exposed correctly (#29). Thanks @dustinlacewell! 245 | 246 | 0.1.0 (2014-01-16) 247 | ------------------ 248 | 249 | - Containers are recreated on each `fig up`, ensuring config is up-to-date with `fig.yml` (#2) 250 | - Add `fig scale` command (#9) 251 | - Use `DOCKER_HOST` environment variable to find Docker daemon, for consistency with the official Docker client (was previously `DOCKER_URL`) (#19) 252 | - Truncate long commands in `fig ps` (#18) 253 | - Fill out CLI help banners for commands (#15, #16) 254 | - Show a friendlier error when `fig.yml` is missing (#4) 255 | - Fix bug with `fig build` logging (#3) 256 | - Fix bug where builds would time out if a step took a long time without generating output (#6) 257 | - Fix bug where streaming container output over the Unix socket raised an error (#7) 258 | 259 | Big thanks to @tomstuart, @EnTeQuAk, @schickling, @aronasorman and @GeoffreyPlitt. 260 | 261 | 0.0.2 (2014-01-02) 262 | ------------------ 263 | 264 | - Improve documentation 265 | - Try to connect to Docker on `tcp://localdocker:4243` and a UNIX socket in addition to `localhost`. 266 | - Improve `fig up` behaviour 267 | - Add confirmation prompt to `fig rm` 268 | - Add `fig build` command 269 | 270 | 0.0.1 (2013-12-20) 271 | ------------------ 272 | 273 | Initial release. 274 | 275 | 276 | -------------------------------------------------------------------------------- /compose/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | import yaml 3 | import six 4 | 5 | 6 | DOCKER_CONFIG_KEYS = [ 7 | 'cap_add', 8 | 'cap_drop', 9 | 'cpu_shares', 10 | 'command', 11 | 'detach', 12 | 'dns', 13 | 'dns_search', 14 | 'domainname', 15 | 'entrypoint', 16 | 'env_file', 17 | 'environment', 18 | 'hostname', 19 | 'image', 20 | 'links', 21 | 'mem_limit', 22 | 'net', 23 | 'pid', 24 | 'ports', 25 | 'privileged', 26 | 'restart', 27 | 'stdin_open', 28 | 'tty', 29 | 'user', 30 | 'volumes', 31 | 'volumes_from', 32 | 'working_dir', 33 | ] 34 | 35 | ALLOWED_KEYS = DOCKER_CONFIG_KEYS + [ 36 | 'build', 37 | 'expose', 38 | 'external_links', 39 | 'name', 40 | ] 41 | 42 | DOCKER_CONFIG_HINTS = { 43 | 'cpu_share': 'cpu_shares', 44 | 'link': 'links', 45 | 'port': 'ports', 46 | 'privilege': 'privileged', 47 | 'priviliged': 'privileged', 48 | 'privilige': 'privileged', 49 | 'volume': 'volumes', 50 | 'workdir': 'working_dir', 51 | } 52 | 53 | 54 | def load(filename): 55 | working_dir = os.path.dirname(filename) 56 | return from_dictionary(load_yaml(filename), working_dir=working_dir, filename=filename) 57 | 58 | 59 | def from_dictionary(dictionary, working_dir=None, filename=None): 60 | service_dicts = [] 61 | 62 | for service_name, service_dict in list(dictionary.items()): 63 | if not isinstance(service_dict, dict): 64 | raise ConfigurationError('Service "%s" doesn\'t have any configuration options. All top level keys in your docker-compose.yml must map to a dictionary of configuration options.' % service_name) 65 | loader = ServiceLoader(working_dir=working_dir, filename=filename) 66 | service_dict = loader.make_service_dict(service_name, service_dict) 67 | service_dicts.append(service_dict) 68 | 69 | return service_dicts 70 | 71 | 72 | def make_service_dict(name, service_dict, working_dir=None): 73 | return ServiceLoader(working_dir=working_dir).make_service_dict(name, service_dict) 74 | 75 | 76 | class ServiceLoader(object): 77 | def __init__(self, working_dir, filename=None, already_seen=None): 78 | self.working_dir = working_dir 79 | self.filename = filename 80 | self.already_seen = already_seen or [] 81 | 82 | def make_service_dict(self, name, service_dict): 83 | if self.signature(name) in self.already_seen: 84 | raise CircularReference(self.already_seen) 85 | 86 | service_dict = service_dict.copy() 87 | service_dict['name'] = name 88 | service_dict = resolve_environment(service_dict, working_dir=self.working_dir) 89 | service_dict = self.resolve_extends(service_dict) 90 | return process_container_options(service_dict, working_dir=self.working_dir) 91 | 92 | def resolve_extends(self, service_dict): 93 | if 'extends' not in service_dict: 94 | return service_dict 95 | 96 | extends_options = process_extends_options(service_dict['name'], service_dict['extends']) 97 | 98 | if self.working_dir is None: 99 | raise Exception("No working_dir passed to ServiceLoader()") 100 | 101 | other_config_path = expand_path(self.working_dir, extends_options['file']) 102 | other_working_dir = os.path.dirname(other_config_path) 103 | other_already_seen = self.already_seen + [self.signature(service_dict['name'])] 104 | other_loader = ServiceLoader( 105 | working_dir=other_working_dir, 106 | filename=other_config_path, 107 | already_seen=other_already_seen, 108 | ) 109 | 110 | other_config = load_yaml(other_config_path) 111 | other_service_dict = other_config[extends_options['service']] 112 | other_service_dict = other_loader.make_service_dict( 113 | service_dict['name'], 114 | other_service_dict, 115 | ) 116 | validate_extended_service_dict( 117 | other_service_dict, 118 | filename=other_config_path, 119 | service=extends_options['service'], 120 | ) 121 | 122 | return merge_service_dicts(other_service_dict, service_dict) 123 | 124 | def signature(self, name): 125 | return (self.filename, name) 126 | 127 | 128 | def process_extends_options(service_name, extends_options): 129 | error_prefix = "Invalid 'extends' configuration for %s:" % service_name 130 | 131 | if not isinstance(extends_options, dict): 132 | raise ConfigurationError("%s must be a dictionary" % error_prefix) 133 | 134 | if 'service' not in extends_options: 135 | raise ConfigurationError( 136 | "%s you need to specify a service, e.g. 'service: web'" % error_prefix 137 | ) 138 | 139 | for k, _ in extends_options.items(): 140 | if k not in ['file', 'service']: 141 | raise ConfigurationError( 142 | "%s unsupported configuration option '%s'" % (error_prefix, k) 143 | ) 144 | 145 | return extends_options 146 | 147 | 148 | def validate_extended_service_dict(service_dict, filename, service): 149 | error_prefix = "Cannot extend service '%s' in %s:" % (service, filename) 150 | 151 | if 'links' in service_dict: 152 | raise ConfigurationError("%s services with 'links' cannot be extended" % error_prefix) 153 | 154 | if 'volumes_from' in service_dict: 155 | raise ConfigurationError("%s services with 'volumes_from' cannot be extended" % error_prefix) 156 | 157 | if 'net' in service_dict: 158 | if get_service_name_from_net(service_dict['net']) is not None: 159 | raise ConfigurationError("%s services with 'net: container' cannot be extended" % error_prefix) 160 | 161 | 162 | def process_container_options(service_dict, working_dir=None): 163 | for k in service_dict: 164 | if k not in ALLOWED_KEYS: 165 | msg = "Unsupported config option for %s service: '%s'" % (service_dict['name'], k) 166 | if k in DOCKER_CONFIG_HINTS: 167 | msg += " (did you mean '%s'?)" % DOCKER_CONFIG_HINTS[k] 168 | raise ConfigurationError(msg) 169 | 170 | service_dict = service_dict.copy() 171 | 172 | if 'volumes' in service_dict: 173 | service_dict['volumes'] = resolve_host_paths(service_dict['volumes'], working_dir=working_dir) 174 | 175 | if 'build' in service_dict: 176 | service_dict['build'] = resolve_build_path(service_dict['build'], working_dir=working_dir) 177 | 178 | return service_dict 179 | 180 | 181 | def merge_service_dicts(base, override): 182 | d = base.copy() 183 | 184 | if 'environment' in base or 'environment' in override: 185 | d['environment'] = merge_environment( 186 | base.get('environment'), 187 | override.get('environment'), 188 | ) 189 | 190 | if 'volumes' in base or 'volumes' in override: 191 | d['volumes'] = merge_volumes( 192 | base.get('volumes'), 193 | override.get('volumes'), 194 | ) 195 | 196 | if 'image' in override and 'build' in d: 197 | del d['build'] 198 | 199 | if 'build' in override and 'image' in d: 200 | del d['image'] 201 | 202 | list_keys = ['ports', 'expose', 'external_links'] 203 | 204 | for key in list_keys: 205 | if key in base or key in override: 206 | d[key] = base.get(key, []) + override.get(key, []) 207 | 208 | list_or_string_keys = ['dns', 'dns_search'] 209 | 210 | for key in list_or_string_keys: 211 | if key in base or key in override: 212 | d[key] = to_list(base.get(key)) + to_list(override.get(key)) 213 | 214 | already_merged_keys = ['environment', 'volumes'] + list_keys + list_or_string_keys 215 | 216 | for k in set(ALLOWED_KEYS) - set(already_merged_keys): 217 | if k in override: 218 | d[k] = override[k] 219 | 220 | return d 221 | 222 | 223 | def merge_environment(base, override): 224 | env = parse_environment(base) 225 | env.update(parse_environment(override)) 226 | return env 227 | 228 | 229 | def parse_links(links): 230 | return dict(parse_link(l) for l in links) 231 | 232 | 233 | def parse_link(link): 234 | if ':' in link: 235 | source, alias = link.split(':', 1) 236 | return (alias, source) 237 | else: 238 | return (link, link) 239 | 240 | 241 | def get_env_files(options, working_dir=None): 242 | if 'env_file' not in options: 243 | return {} 244 | 245 | if working_dir is None: 246 | raise Exception("No working_dir passed to get_env_files()") 247 | 248 | env_files = options.get('env_file', []) 249 | if not isinstance(env_files, list): 250 | env_files = [env_files] 251 | 252 | return [expand_path(working_dir, path) for path in env_files] 253 | 254 | 255 | def resolve_environment(service_dict, working_dir=None): 256 | service_dict = service_dict.copy() 257 | 258 | if 'environment' not in service_dict and 'env_file' not in service_dict: 259 | return service_dict 260 | 261 | env = {} 262 | 263 | if 'env_file' in service_dict: 264 | for f in get_env_files(service_dict, working_dir=working_dir): 265 | env.update(env_vars_from_file(f)) 266 | del service_dict['env_file'] 267 | 268 | env.update(parse_environment(service_dict.get('environment'))) 269 | env = dict(resolve_env_var(k, v) for k, v in six.iteritems(env)) 270 | 271 | service_dict['environment'] = env 272 | return service_dict 273 | 274 | 275 | def parse_environment(environment): 276 | if not environment: 277 | return {} 278 | 279 | if isinstance(environment, list): 280 | return dict(split_env(e) for e in environment) 281 | 282 | if isinstance(environment, dict): 283 | return environment 284 | 285 | raise ConfigurationError( 286 | "environment \"%s\" must be a list or mapping," % 287 | environment 288 | ) 289 | 290 | 291 | def split_env(env): 292 | if '=' in env: 293 | return env.split('=', 1) 294 | else: 295 | return env, None 296 | 297 | 298 | def resolve_env_var(key, val): 299 | if val is not None: 300 | return key, val 301 | elif key in os.environ: 302 | return key, os.environ[key] 303 | else: 304 | return key, '' 305 | 306 | 307 | def env_vars_from_file(filename): 308 | """ 309 | Read in a line delimited file of environment variables. 310 | """ 311 | if not os.path.exists(filename): 312 | raise ConfigurationError("Couldn't find env file: %s" % filename) 313 | env = {} 314 | for line in open(filename, 'r'): 315 | line = line.strip() 316 | if line and not line.startswith('#'): 317 | k, v = split_env(line) 318 | env[k] = v 319 | return env 320 | 321 | 322 | def resolve_host_paths(volumes, working_dir=None): 323 | if working_dir is None: 324 | raise Exception("No working_dir passed to resolve_host_paths()") 325 | 326 | return [resolve_host_path(v, working_dir) for v in volumes] 327 | 328 | 329 | def resolve_host_path(volume, working_dir): 330 | container_path, host_path = split_volume(volume) 331 | if host_path is not None: 332 | host_path = os.path.expanduser(host_path) 333 | host_path = os.path.expandvars(host_path) 334 | return "%s:%s" % (expand_path(working_dir, host_path), container_path) 335 | else: 336 | return container_path 337 | 338 | 339 | def resolve_build_path(build_path, working_dir=None): 340 | if working_dir is None: 341 | raise Exception("No working_dir passed to resolve_build_path") 342 | 343 | _path = expand_path(working_dir, build_path) 344 | if not os.path.exists(_path) or not os.access(_path, os.R_OK): 345 | raise ConfigurationError("build path %s either does not exist or is not accessible." % _path) 346 | else: 347 | return _path 348 | 349 | 350 | def merge_volumes(base, override): 351 | d = dict_from_volumes(base) 352 | d.update(dict_from_volumes(override)) 353 | return volumes_from_dict(d) 354 | 355 | 356 | def dict_from_volumes(volumes): 357 | if volumes: 358 | return dict(split_volume(v) for v in volumes) 359 | else: 360 | return {} 361 | 362 | 363 | def volumes_from_dict(d): 364 | return [join_volume(v) for v in d.items()] 365 | 366 | 367 | def split_volume(string): 368 | if ':' in string: 369 | (host, container) = string.split(':', 1) 370 | return (container, host) 371 | else: 372 | return (string, None) 373 | 374 | 375 | def join_volume(pair): 376 | (container, host) = pair 377 | if host is None: 378 | return container 379 | else: 380 | return ":".join((host, container)) 381 | 382 | 383 | def expand_path(working_dir, path): 384 | return os.path.abspath(os.path.join(working_dir, path)) 385 | 386 | 387 | def to_list(value): 388 | if value is None: 389 | return [] 390 | elif isinstance(value, six.string_types): 391 | return [value] 392 | else: 393 | return value 394 | 395 | 396 | def get_service_name_from_net(net_config): 397 | if not net_config: 398 | return 399 | 400 | if not net_config.startswith('container:'): 401 | return 402 | 403 | _, net_name = net_config.split(':', 1) 404 | return net_name 405 | 406 | 407 | def load_yaml(filename): 408 | try: 409 | with open(filename, 'r') as fh: 410 | return yaml.safe_load(fh) 411 | except IOError as e: 412 | raise ConfigurationError(six.text_type(e)) 413 | 414 | 415 | class ConfigurationError(Exception): 416 | def __init__(self, msg): 417 | self.msg = msg 418 | 419 | def __str__(self): 420 | return self.msg 421 | 422 | 423 | class CircularReference(ConfigurationError): 424 | def __init__(self, trail): 425 | self.trail = trail 426 | 427 | @property 428 | def msg(self): 429 | lines = [ 430 | "{} in {}".format(service_name, filename) 431 | for (filename, service_name) in self.trail 432 | ] 433 | return "Circular reference:\n {}".format("\n extends ".join(lines)) 434 | --------------------------------------------------------------------------------