├── compose ├── state.py ├── cli │ ├── __init__.py │ ├── formatter.py │ ├── colors.py │ ├── multiplexer.py │ ├── docker_client.py │ ├── docopt_command.py │ ├── verbose_proxy.py │ ├── errors.py │ ├── log_printer.py │ ├── utils.py │ └── command.py ├── __init__.py ├── utils.py ├── const.py ├── progress_stream.py ├── legacy.py └── container.py ├── tests ├── unit │ ├── __init__.py │ ├── cli │ │ ├── __init__.py │ │ ├── docker_client_test.py │ │ └── verbose_proxy_test.py │ ├── progress_stream_test.py │ ├── split_buffer_test.py │ ├── log_printer_test.py │ ├── container_test.py │ ├── sort_service_test.py │ ├── cli_test.py │ └── project_test.py ├── integration │ ├── __init__.py │ ├── resilience_test.py │ ├── testcases.py │ ├── legacy_test.py │ └── state_test.py ├── fixtures │ ├── env-file │ │ ├── test.env │ │ └── docker-compose.yml │ ├── no-composefile │ │ └── .gitignore │ ├── env │ │ ├── two.env │ │ ├── resolve.env │ │ └── one.env │ ├── simple-dockerfile │ │ ├── docker-compose.yml │ │ └── Dockerfile │ ├── build-path │ │ └── docker-compose.yml │ ├── dockerfile_with_entrypoint │ │ ├── docker-compose.yml │ │ └── Dockerfile │ ├── multiple-composefiles │ │ ├── compose2.yml │ │ └── docker-compose.yml │ ├── build-ctx │ │ └── Dockerfile │ ├── longer-filename-composefile │ │ └── docker-compose.yaml │ ├── user-composefile │ │ └── docker-compose.yml │ ├── volume-path │ │ ├── common │ │ │ └── services.yml │ │ └── docker-compose.yml │ ├── extends │ │ ├── common.yml │ │ ├── nonexistent-path-base.yml │ │ ├── nested-intermediate.yml │ │ ├── nested.yml │ │ ├── nonexistent-path-child.yml │ │ ├── circle-1.yml │ │ ├── circle-2.yml │ │ └── docker-compose.yml │ ├── dockerfile-with-volume │ │ └── Dockerfile │ ├── ports-composefile-scale │ │ └── docker-compose.yml │ ├── UpperCaseDir │ │ └── docker-compose.yml │ ├── ports-composefile │ │ └── docker-compose.yml │ ├── commands-composefile │ │ └── docker-compose.yml │ ├── simple-composefile │ │ └── docker-compose.yml │ ├── environment-composefile │ │ └── docker-compose.yml │ └── links-composefile │ │ └── docker-compose.yml └── __init__.py ├── .dockerignore ├── bin └── docker-compose ├── .gitignore ├── script ├── clean ├── shell ├── build-linux ├── build-linux-inner ├── build-osx ├── docs ├── test ├── ci ├── wrapdocker ├── dev ├── test-versions ├── prepare-osx └── dind ├── MAINTAINERS ├── requirements.txt ├── requirements-dev.txt ├── MANIFEST.in ├── tox.ini ├── docs ├── Dockerfile ├── env.md ├── Makefile ├── completion.md ├── install.md ├── README.md ├── production.md ├── wordpress.md ├── django.md ├── rails.md ├── cli.md ├── index.md └── yml.md ├── setup.py ├── ROADMAP.md ├── SWARM.md ├── README.md ├── Dockerfile ├── CONTRIBUTING.md ├── contrib └── completion │ └── bash │ └── docker-compose ├── experimental └── compose_swarm_networking.md └── LICENSE /compose/state.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /compose/cli/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/unit/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/unit/cli/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/integration/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/fixtures/env-file/test.env: -------------------------------------------------------------------------------- 1 | FOO=1 -------------------------------------------------------------------------------- /tests/fixtures/no-composefile/.gitignore: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | .git 2 | build 3 | dist 4 | venv 5 | -------------------------------------------------------------------------------- /tests/fixtures/env/two.env: -------------------------------------------------------------------------------- 1 | FOO=baz 2 | DOO=dah 3 | -------------------------------------------------------------------------------- /tests/fixtures/simple-dockerfile/docker-compose.yml: -------------------------------------------------------------------------------- 1 | simple: 2 | build: . 3 | -------------------------------------------------------------------------------- /tests/fixtures/build-path/docker-compose.yml: -------------------------------------------------------------------------------- 1 | foo: 2 | build: ../build-ctx/ 3 | -------------------------------------------------------------------------------- /tests/fixtures/dockerfile_with_entrypoint/docker-compose.yml: -------------------------------------------------------------------------------- 1 | service: 2 | build: . 3 | -------------------------------------------------------------------------------- /bin/docker-compose: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | from compose.cli.main import main 3 | main() 4 | -------------------------------------------------------------------------------- /tests/fixtures/env/resolve.env: -------------------------------------------------------------------------------- 1 | FILE_DEF=F1 2 | FILE_DEF_EMPTY= 3 | ENV_DEF 4 | NO_DEF 5 | -------------------------------------------------------------------------------- /compose/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | 3 | __version__ = '1.4.0dev' 4 | -------------------------------------------------------------------------------- /tests/fixtures/multiple-composefiles/compose2.yml: -------------------------------------------------------------------------------- 1 | yetanother: 2 | image: busybox:latest 3 | command: top 4 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.egg-info 2 | *.pyc 3 | .tox 4 | /build 5 | /dist 6 | /docs/_site 7 | /venv 8 | docker-compose.spec 9 | -------------------------------------------------------------------------------- /script/clean: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | find . -type f -name '*.pyc' -delete 3 | rm -rf docs/_site build dist docker-compose.egg-info 4 | -------------------------------------------------------------------------------- /tests/fixtures/build-ctx/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM busybox:latest 2 | LABEL com.docker.compose.test_image=true 3 | CMD echo "success" 4 | -------------------------------------------------------------------------------- /tests/fixtures/env-file/docker-compose.yml: -------------------------------------------------------------------------------- 1 | web: 2 | image: busybox 3 | command: /bin/true 4 | env_file: ./test.env 5 | -------------------------------------------------------------------------------- /tests/fixtures/longer-filename-composefile/docker-compose.yaml: -------------------------------------------------------------------------------- 1 | definedinyamlnotyml: 2 | image: busybox:latest 3 | command: top -------------------------------------------------------------------------------- /tests/fixtures/user-composefile/docker-compose.yml: -------------------------------------------------------------------------------- 1 | service: 2 | image: busybox:latest 3 | user: notauser 4 | command: id 5 | -------------------------------------------------------------------------------- /tests/fixtures/volume-path/common/services.yml: -------------------------------------------------------------------------------- 1 | db: 2 | image: busybox 3 | volumes: 4 | - ./foo:/foo 5 | - ./bar:/bar 6 | -------------------------------------------------------------------------------- /tests/fixtures/simple-dockerfile/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM busybox:latest 2 | LABEL com.docker.compose.test_image=true 3 | CMD echo "success" 4 | -------------------------------------------------------------------------------- /tests/fixtures/extends/common.yml: -------------------------------------------------------------------------------- 1 | web: 2 | image: busybox 3 | command: /bin/true 4 | environment: 5 | - FOO=1 6 | - BAR=1 7 | -------------------------------------------------------------------------------- /tests/fixtures/dockerfile-with-volume/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM busybox:latest 2 | LABEL com.docker.compose.test_image=true 3 | VOLUME /data 4 | CMD top 5 | -------------------------------------------------------------------------------- /tests/fixtures/volume-path/docker-compose.yml: -------------------------------------------------------------------------------- 1 | db: 2 | extends: 3 | file: common/services.yml 4 | service: db 5 | volumes: 6 | - ./bar:/bar 7 | -------------------------------------------------------------------------------- /MAINTAINERS: -------------------------------------------------------------------------------- 1 | Aanand Prasad (@aanand) 2 | Ben Firshman (@bfirsh) 3 | Daniel Nephin (@dnephin) 4 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | if sys.version_info >= (2, 7): 4 | import unittest # NOQA 5 | else: 6 | import unittest2 as unittest # NOQA 7 | -------------------------------------------------------------------------------- /tests/fixtures/extends/nonexistent-path-base.yml: -------------------------------------------------------------------------------- 1 | dnebase: 2 | build: nonexistent.path 3 | command: /bin/true 4 | environment: 5 | - FOO=1 6 | - BAR=1 -------------------------------------------------------------------------------- /tests/fixtures/ports-composefile-scale/docker-compose.yml: -------------------------------------------------------------------------------- 1 | 2 | simple: 3 | image: busybox:latest 4 | command: /bin/sleep 300 5 | ports: 6 | - '3000' 7 | -------------------------------------------------------------------------------- /tests/fixtures/UpperCaseDir/docker-compose.yml: -------------------------------------------------------------------------------- 1 | simple: 2 | image: busybox:latest 3 | command: top 4 | another: 5 | image: busybox:latest 6 | command: top 7 | -------------------------------------------------------------------------------- /tests/fixtures/dockerfile_with_entrypoint/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM busybox:latest 2 | LABEL com.docker.compose.test_image=true 3 | ENTRYPOINT echo "From prebuilt entrypoint" 4 | -------------------------------------------------------------------------------- /tests/fixtures/extends/nested-intermediate.yml: -------------------------------------------------------------------------------- 1 | webintermediate: 2 | extends: 3 | file: common.yml 4 | service: web 5 | environment: 6 | - "FOO=2" 7 | -------------------------------------------------------------------------------- /tests/fixtures/extends/nested.yml: -------------------------------------------------------------------------------- 1 | myweb: 2 | extends: 3 | file: nested-intermediate.yml 4 | service: webintermediate 5 | environment: 6 | - "BAR=2" 7 | -------------------------------------------------------------------------------- /tests/fixtures/ports-composefile/docker-compose.yml: -------------------------------------------------------------------------------- 1 | 2 | simple: 3 | image: busybox:latest 4 | command: top 5 | ports: 6 | - '3000' 7 | - '49152:3001' 8 | -------------------------------------------------------------------------------- /tests/fixtures/commands-composefile/docker-compose.yml: -------------------------------------------------------------------------------- 1 | implicit: 2 | image: composetest_test 3 | explicit: 4 | image: composetest_test 5 | command: [ "/bin/true" ] 6 | -------------------------------------------------------------------------------- /tests/fixtures/simple-composefile/docker-compose.yml: -------------------------------------------------------------------------------- 1 | simple: 2 | image: busybox:latest 3 | command: top 4 | another: 5 | image: busybox:latest 6 | command: top 7 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | PyYAML==3.10 2 | docker-py==1.2.3 3 | dockerpty==0.3.4 4 | docopt==0.6.1 5 | requests==2.6.1 6 | six==1.7.3 7 | texttable==0.8.2 8 | websocket-client==0.11.0 9 | -------------------------------------------------------------------------------- /tests/fixtures/multiple-composefiles/docker-compose.yml: -------------------------------------------------------------------------------- 1 | simple: 2 | image: busybox:latest 3 | command: top 4 | another: 5 | image: busybox:latest 6 | command: top 7 | -------------------------------------------------------------------------------- /tests/fixtures/environment-composefile/docker-compose.yml: -------------------------------------------------------------------------------- 1 | service: 2 | image: busybox:latest 3 | command: top 4 | 5 | environment: 6 | foo: bar 7 | hello: world 8 | -------------------------------------------------------------------------------- /script/shell: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -ex 3 | docker build -t docker-compose . 4 | exec docker run -v /var/run/docker.sock:/var/run/docker.sock -v `pwd`:/code -ti --rm --entrypoint bash docker-compose 5 | -------------------------------------------------------------------------------- /tests/fixtures/env/one.env: -------------------------------------------------------------------------------- 1 | # Keep the blank lines and comments in this file, please 2 | 3 | ONE=2 4 | TWO=1 5 | 6 | # (thanks) 7 | 8 | THREE=3 9 | 10 | FOO=bar 11 | # FOO=somethingelse 12 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | mock >= 1.0.1 2 | nose==1.3.4 3 | git+https://github.com/pyinstaller/pyinstaller.git@12e40471c77f588ea5be352f7219c873ddaae056#egg=pyinstaller 4 | unittest2==0.8.0 5 | flake8==2.3.0 6 | pep8==1.6.1 7 | -------------------------------------------------------------------------------- /tests/fixtures/extends/nonexistent-path-child.yml: -------------------------------------------------------------------------------- 1 | dnechild: 2 | extends: 3 | file: nonexistent-path-base.yml 4 | service: dnebase 5 | image: busybox 6 | command: /bin/true 7 | environment: 8 | - BAR=2 -------------------------------------------------------------------------------- /tests/fixtures/extends/circle-1.yml: -------------------------------------------------------------------------------- 1 | foo: 2 | image: busybox 3 | bar: 4 | image: busybox 5 | web: 6 | extends: 7 | file: circle-2.yml 8 | service: web 9 | baz: 10 | image: busybox 11 | quux: 12 | image: busybox 13 | -------------------------------------------------------------------------------- /tests/fixtures/extends/circle-2.yml: -------------------------------------------------------------------------------- 1 | foo: 2 | image: busybox 3 | bar: 4 | image: busybox 5 | web: 6 | extends: 7 | file: circle-1.yml 8 | service: web 9 | baz: 10 | image: busybox 11 | quux: 12 | image: busybox 13 | -------------------------------------------------------------------------------- /compose/utils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import hashlib 3 | 4 | 5 | def json_hash(obj): 6 | dump = json.dumps(obj, sort_keys=True, separators=(',', ':')) 7 | h = hashlib.sha256() 8 | h.update(dump) 9 | return h.hexdigest() 10 | -------------------------------------------------------------------------------- /tests/fixtures/links-composefile/docker-compose.yml: -------------------------------------------------------------------------------- 1 | db: 2 | image: busybox:latest 3 | command: top 4 | web: 5 | image: busybox:latest 6 | command: top 7 | links: 8 | - db:db 9 | console: 10 | image: busybox:latest 11 | command: top 12 | -------------------------------------------------------------------------------- /script/build-linux: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -ex 4 | 5 | TAG="docker-compose" 6 | docker build -t "$TAG" . 7 | docker run \ 8 | --rm \ 9 | --user=user \ 10 | --volume="$(pwd):/code" \ 11 | --entrypoint="script/build-linux-inner" \ 12 | "$TAG" 13 | -------------------------------------------------------------------------------- /script/build-linux-inner: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -ex 4 | 5 | mkdir -p `pwd`/dist 6 | chmod 777 `pwd`/dist 7 | 8 | pyinstaller -F bin/docker-compose 9 | mv dist/docker-compose dist/docker-compose-Linux-x86_64 10 | dist/docker-compose-Linux-x86_64 version 11 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include Dockerfile 2 | include LICENSE 3 | include requirements.txt 4 | include requirements-dev.txt 5 | include tox.ini 6 | include *.md 7 | include contrib/completion/bash/docker-compose 8 | recursive-include tests * 9 | global-exclude *.pyc 10 | global-exclude *.pyo 11 | global-exclude *.un~ 12 | -------------------------------------------------------------------------------- /tests/fixtures/extends/docker-compose.yml: -------------------------------------------------------------------------------- 1 | myweb: 2 | extends: 3 | file: common.yml 4 | service: web 5 | command: top 6 | links: 7 | - "mydb:db" 8 | environment: 9 | # leave FOO alone 10 | # override BAR 11 | BAR: "2" 12 | # add BAZ 13 | BAZ: "2" 14 | mydb: 15 | image: busybox 16 | command: top 17 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py26,py27 3 | 4 | [testenv] 5 | usedevelop=True 6 | deps = 7 | -rrequirements.txt 8 | -rrequirements-dev.txt 9 | commands = 10 | nosetests -v {posargs} 11 | flake8 compose tests setup.py 12 | 13 | [flake8] 14 | # ignore line-length for now 15 | ignore = E501,E203 16 | exclude = compose/packages 17 | -------------------------------------------------------------------------------- /compose/const.py: -------------------------------------------------------------------------------- 1 | 2 | DEFAULT_TIMEOUT = 10 3 | LABEL_CONTAINER_NUMBER = 'com.docker.compose.container-number' 4 | LABEL_ONE_OFF = 'com.docker.compose.oneoff' 5 | LABEL_PROJECT = 'com.docker.compose.project' 6 | LABEL_SERVICE = 'com.docker.compose.service' 7 | LABEL_VERSION = 'com.docker.compose.version' 8 | LABEL_CONFIG_HASH = 'com.docker.compose.config-hash' 9 | -------------------------------------------------------------------------------- /script/build-osx: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -ex 3 | 4 | PATH="/usr/local/bin:$PATH" 5 | 6 | rm -rf venv 7 | virtualenv -p /usr/local/bin/python venv 8 | venv/bin/pip install -r requirements.txt 9 | venv/bin/pip install -r requirements-dev.txt 10 | venv/bin/pip install . 11 | venv/bin/pyinstaller -F bin/docker-compose 12 | mv dist/docker-compose dist/docker-compose-Darwin-x86_64 13 | dist/docker-compose-Darwin-x86_64 version 14 | -------------------------------------------------------------------------------- /script/docs: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | set -ex 3 | 4 | # import the existing docs build cmds from docker/docker 5 | DOCSPORT=8000 6 | GIT_BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null) 7 | DOCKER_DOCS_IMAGE="compose-docs$GIT_BRANCH" 8 | DOCKER_RUN_DOCS="docker run --rm -it -e NOCACHE" 9 | 10 | docker build -t "$DOCKER_DOCS_IMAGE" -f docs/Dockerfile . 11 | $DOCKER_RUN_DOCS -p $DOCSPORT:8000 "$DOCKER_DOCS_IMAGE" mkdocs serve 12 | -------------------------------------------------------------------------------- /script/test: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # See CONTRIBUTING.md for usage. 3 | 4 | set -ex 5 | 6 | TAG="docker-compose:$(git rev-parse --short HEAD)" 7 | 8 | docker build -t "$TAG" . 9 | docker run \ 10 | --rm \ 11 | --volume="/var/run/docker.sock:/var/run/docker.sock" \ 12 | -e DOCKER_VERSIONS \ 13 | -e "TAG=$TAG" \ 14 | -e "affinity:image==$TAG" \ 15 | --entrypoint="script/test-versions" \ 16 | "$TAG" \ 17 | "$@" 18 | -------------------------------------------------------------------------------- /script/ci: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This should be run inside a container built from the Dockerfile 3 | # at the root of the repo: 4 | # 5 | # $ TAG="docker-compose:$(git rev-parse --short HEAD)" 6 | # $ docker build -t "$TAG" . 7 | # $ docker run --rm --volume="/var/run/docker.sock:/var/run/docker.sock" --volume="$(pwd)/.git:/code/.git" -e "TAG=$TAG" --entrypoint="script/ci" "$TAG" 8 | 9 | set -e 10 | 11 | export DOCKER_VERSIONS=all 12 | . script/test-versions 13 | 14 | >&2 echo "Building Linux binary" 15 | su -c script/build-linux-inner user 16 | -------------------------------------------------------------------------------- /script/wrapdocker: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ "$DOCKER_VERSION" != "" ] && [ "$DOCKER_VERSION" != "default" ]; then 4 | ln -fs "/usr/local/bin/docker-$DOCKER_VERSION" "/usr/local/bin/docker" 5 | fi 6 | 7 | # If a pidfile is still around (for example after a container restart), 8 | # delete it so that docker can start. 9 | rm -rf /var/run/docker.pid 10 | docker -d $DOCKER_DAEMON_ARGS &>/var/log/docker.log & 11 | 12 | >&2 echo "Waiting for Docker to start..." 13 | while ! docker ps &>/dev/null; do 14 | sleep 1 15 | done 16 | 17 | >&2 echo ">" "$@" 18 | exec "$@" 19 | -------------------------------------------------------------------------------- /script/dev: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This is a script for running Compose inside a Docker container. It's handy for 3 | # development. 4 | # 5 | # $ ln -s `pwd`/script/dev /usr/local/bin/docker-compose 6 | # $ cd /a/compose/project 7 | # $ docker-compose up 8 | # 9 | 10 | set -e 11 | 12 | # Follow symbolic links 13 | if [ -h "$0" ]; then 14 | DIR=$(readlink "$0") 15 | else 16 | DIR=$0 17 | fi 18 | DIR="$(dirname "$DIR")"/.. 19 | 20 | docker build -t docker-compose $DIR 21 | exec docker run -i -t -v /var/run/docker.sock:/var/run/docker.sock -v `pwd`:`pwd` -w `pwd` docker-compose $@ 22 | -------------------------------------------------------------------------------- /compose/cli/formatter.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | import os 4 | import texttable 5 | 6 | 7 | def get_tty_width(): 8 | tty_size = os.popen('stty size', 'r').read().split() 9 | if len(tty_size) != 2: 10 | return 80 11 | _, width = tty_size 12 | return int(width) 13 | 14 | 15 | class Formatter(object): 16 | def table(self, headers, rows): 17 | table = texttable.Texttable(max_width=get_tty_width()) 18 | table.set_cols_dtype(['t' for h in headers]) 19 | table.add_rows([headers] + rows) 20 | table.set_deco(table.HEADER) 21 | table.set_chars(['-', '|', '+', '-']) 22 | 23 | return table.draw() 24 | -------------------------------------------------------------------------------- /tests/unit/cli/docker_client_test.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | import os 4 | 5 | import mock 6 | from tests import unittest 7 | 8 | from compose.cli import docker_client 9 | 10 | 11 | class DockerClientTestCase(unittest.TestCase): 12 | 13 | def test_docker_client_no_home(self): 14 | with mock.patch.dict(os.environ): 15 | del os.environ['HOME'] 16 | docker_client.docker_client() 17 | 18 | def test_docker_client_with_custom_timeout(self): 19 | with mock.patch.dict(os.environ): 20 | os.environ['DOCKER_CLIENT_TIMEOUT'] = timeout = "300" 21 | client = docker_client.docker_client() 22 | self.assertEqual(client.timeout, int(timeout)) 23 | -------------------------------------------------------------------------------- /script/test-versions: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This should be run inside a container built from the Dockerfile 3 | # at the root of the repo - script/test will do it automatically. 4 | 5 | set -e 6 | 7 | >&2 echo "Running lint checks" 8 | flake8 compose tests setup.py 9 | 10 | if [ "$DOCKER_VERSIONS" == "" ]; then 11 | DOCKER_VERSIONS="default" 12 | elif [ "$DOCKER_VERSIONS" == "all" ]; then 13 | DOCKER_VERSIONS="$ALL_DOCKER_VERSIONS" 14 | fi 15 | 16 | for version in $DOCKER_VERSIONS; do 17 | >&2 echo "Running tests against Docker $version" 18 | docker run \ 19 | --rm \ 20 | --privileged \ 21 | --volume="/var/lib/docker" \ 22 | -e "DOCKER_VERSION=$version" \ 23 | --entrypoint="script/dind" \ 24 | "$TAG" \ 25 | script/wrapdocker nosetests "$@" 26 | done 27 | -------------------------------------------------------------------------------- /docs/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM docs/base:hugo 2 | MAINTAINER Mary Anthony (@moxiegirl) 3 | 4 | # To get the git info for this repo 5 | COPY . /src 6 | 7 | COPY . /docs/content/compose/ 8 | 9 | # Sed to process GitHub Markdown 10 | # 1-2 Remove comment code from metadata block 11 | # 3 Change ](/word to ](/project/ in links 12 | # 4 Change ](word.md) to ](/project/word) 13 | # 5 Remove .md extension from link text 14 | # 6 Change ](../ to ](/project/word) 15 | # 7 Change ](../../ to ](/project/ --> not implemented 16 | # 17 | # 18 | RUN find /docs/content/compose -type f -name "*.md" -exec sed -i.old \ 19 | -e '/^/g' \ 20 | -e '/^/g' \ 21 | -e 's/\(\]\)\([(]\)\(\/\)/\1\2\/compose\//g' \ 22 | -e 's/\(\][(]\)\([A-z].*\)\(\.md\)/\1\/compose\/\2/g' \ 23 | -e 's/\([(]\)\(.*\)\(\.md\)/\1\2/g' \ 24 | -e 's/\(\][(]\)\(\.\.\/\)/\1\/compose\//g' {} \; 25 | -------------------------------------------------------------------------------- /compose/cli/colors.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | NAMES = [ 3 | 'grey', 4 | 'red', 5 | 'green', 6 | 'yellow', 7 | 'blue', 8 | 'magenta', 9 | 'cyan', 10 | 'white' 11 | ] 12 | 13 | 14 | def get_pairs(): 15 | for i, name in enumerate(NAMES): 16 | yield(name, str(30 + i)) 17 | yield('intense_' + name, str(30 + i) + ';1') 18 | 19 | 20 | def ansi(code): 21 | return '\033[{0}m'.format(code) 22 | 23 | 24 | def ansi_color(code, s): 25 | return '{0}{1}{2}'.format(ansi(code), s, ansi(0)) 26 | 27 | 28 | def make_color_fn(code): 29 | return lambda s: ansi_color(code, s) 30 | 31 | 32 | for (name, code) in get_pairs(): 33 | globals()[name] = make_color_fn(code) 34 | 35 | 36 | def rainbow(): 37 | cs = ['cyan', 'yellow', 'green', 'magenta', 'red', 'blue', 38 | 'intense_cyan', 'intense_yellow', 'intense_green', 39 | 'intense_magenta', 'intense_red', 'intense_blue'] 40 | 41 | for c in cs: 42 | yield globals()[c] 43 | -------------------------------------------------------------------------------- /tests/unit/cli/verbose_proxy_test.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | from tests import unittest 4 | 5 | from compose.cli import verbose_proxy 6 | 7 | 8 | class VerboseProxyTestCase(unittest.TestCase): 9 | 10 | def test_format_call(self): 11 | expected = "(u'arg1', True, key=u'value')" 12 | actual = verbose_proxy.format_call( 13 | ("arg1", True), 14 | {'key': 'value'}) 15 | 16 | self.assertEqual(expected, actual) 17 | 18 | def test_format_return_sequence(self): 19 | expected = "(list with 10 items)" 20 | actual = verbose_proxy.format_return(list(range(10)), 2) 21 | self.assertEqual(expected, actual) 22 | 23 | def test_format_return(self): 24 | expected = "{u'Id': u'ok'}" 25 | actual = verbose_proxy.format_return({'Id': 'ok'}, 2) 26 | self.assertEqual(expected, actual) 27 | 28 | def test_format_return_no_result(self): 29 | actual = verbose_proxy.format_return(None, 2) 30 | self.assertEqual(None, actual) 31 | -------------------------------------------------------------------------------- /tests/integration/resilience_test.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | 4 | import mock 5 | 6 | from compose.project import Project 7 | from .testcases import DockerClientTestCase 8 | 9 | 10 | class ResilienceTest(DockerClientTestCase): 11 | def test_recreate_fails(self): 12 | db = self.create_service('db', volumes=['/var/db'], command='top') 13 | project = Project('composetest', [db], self.client) 14 | 15 | container = db.create_container() 16 | db.start_container(container) 17 | host_path = container.get('Volumes')['/var/db'] 18 | 19 | project.up() 20 | container = db.containers()[0] 21 | self.assertEqual(container.get('Volumes')['/var/db'], host_path) 22 | 23 | with mock.patch('compose.service.Service.create_container', crash): 24 | with self.assertRaises(Crash): 25 | project.up() 26 | 27 | project.up() 28 | container = db.containers()[0] 29 | self.assertEqual(container.get('Volumes')['/var/db'], host_path) 30 | 31 | 32 | class Crash(Exception): 33 | pass 34 | 35 | 36 | def crash(*args, **kwargs): 37 | raise Crash() 38 | -------------------------------------------------------------------------------- /compose/cli/multiplexer.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from threading import Thread 3 | 4 | try: 5 | from Queue import Queue, Empty 6 | except ImportError: 7 | from queue import Queue, Empty # Python 3.x 8 | 9 | 10 | # Yield STOP from an input generator to stop the 11 | # top-level loop without processing any more input. 12 | STOP = object() 13 | 14 | 15 | class Multiplexer(object): 16 | def __init__(self, generators): 17 | self.generators = generators 18 | self.queue = Queue() 19 | 20 | def loop(self): 21 | self._init_readers() 22 | 23 | while True: 24 | try: 25 | item = self.queue.get(timeout=0.1) 26 | if item is STOP: 27 | break 28 | else: 29 | yield item 30 | except Empty: 31 | pass 32 | 33 | def _init_readers(self): 34 | for generator in self.generators: 35 | t = Thread(target=_enqueue_output, args=(generator, self.queue)) 36 | t.daemon = True 37 | t.start() 38 | 39 | 40 | def _enqueue_output(generator, queue): 41 | for item in generator: 42 | queue.put(item) 43 | -------------------------------------------------------------------------------- /compose/cli/docker_client.py: -------------------------------------------------------------------------------- 1 | from docker import Client 2 | from docker import tls 3 | import ssl 4 | import os 5 | 6 | 7 | def docker_client(): 8 | """ 9 | Returns a docker-py client configured using environment variables 10 | according to the same logic as the official Docker client. 11 | """ 12 | cert_path = os.environ.get('DOCKER_CERT_PATH', '') 13 | if cert_path == '': 14 | cert_path = os.path.join(os.environ.get('HOME', ''), '.docker') 15 | 16 | base_url = os.environ.get('DOCKER_HOST') 17 | tls_config = None 18 | 19 | if os.environ.get('DOCKER_TLS_VERIFY', '') != '': 20 | parts = base_url.split('://', 1) 21 | base_url = '%s://%s' % ('https', parts[1]) 22 | 23 | client_cert = (os.path.join(cert_path, 'cert.pem'), os.path.join(cert_path, 'key.pem')) 24 | ca_cert = os.path.join(cert_path, 'ca.pem') 25 | 26 | tls_config = tls.TLSConfig( 27 | ssl_version=ssl.PROTOCOL_TLSv1, 28 | verify=True, 29 | assert_hostname=False, 30 | client_cert=client_cert, 31 | ca_cert=ca_cert, 32 | ) 33 | 34 | timeout = int(os.environ.get('DOCKER_CLIENT_TIMEOUT', 60)) 35 | return Client(base_url=base_url, tls=tls_config, version='1.18', timeout=timeout) 36 | -------------------------------------------------------------------------------- /tests/unit/progress_stream_test.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | from tests import unittest 4 | 5 | from six import StringIO 6 | 7 | from compose import progress_stream 8 | 9 | 10 | class ProgressStreamTestCase(unittest.TestCase): 11 | 12 | def test_stream_output(self): 13 | output = [ 14 | '{"status": "Downloading", "progressDetail": {"current": ' 15 | '31019763, "start": 1413653874, "total": 62763875}, ' 16 | '"progress": "..."}', 17 | ] 18 | events = progress_stream.stream_output(output, StringIO()) 19 | self.assertEqual(len(events), 1) 20 | 21 | def test_stream_output_div_zero(self): 22 | output = [ 23 | '{"status": "Downloading", "progressDetail": {"current": ' 24 | '0, "start": 1413653874, "total": 0}, ' 25 | '"progress": "..."}', 26 | ] 27 | events = progress_stream.stream_output(output, StringIO()) 28 | self.assertEqual(len(events), 1) 29 | 30 | def test_stream_output_null_total(self): 31 | output = [ 32 | '{"status": "Downloading", "progressDetail": {"current": ' 33 | '0, "start": 1413653874, "total": null}, ' 34 | '"progress": "..."}', 35 | ] 36 | events = progress_stream.stream_output(output, StringIO()) 37 | self.assertEqual(len(events), 1) 38 | -------------------------------------------------------------------------------- /script/prepare-osx: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -ex 4 | 5 | python_version() { 6 | python -V 2>&1 7 | } 8 | 9 | openssl_version() { 10 | python -c "import ssl; print ssl.OPENSSL_VERSION" 11 | } 12 | 13 | desired_python_version="2.7.9" 14 | desired_python_brew_version="2.7.9" 15 | python_formula="https://raw.githubusercontent.com/Homebrew/homebrew/1681e193e4d91c9620c4901efd4458d9b6fcda8e/Library/Formula/python.rb" 16 | 17 | desired_openssl_version="1.0.1j" 18 | desired_openssl_brew_version="1.0.1j_1" 19 | openssl_formula="https://raw.githubusercontent.com/Homebrew/homebrew/62fc2a1a65e83ba9dbb30b2e0a2b7355831c714b/Library/Formula/openssl.rb" 20 | 21 | PATH="/usr/local/bin:$PATH" 22 | 23 | if !(which brew); then 24 | ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)" 25 | fi 26 | 27 | brew update 28 | 29 | if !(python_version | grep "$desired_python_version"); then 30 | if brew list | grep python; then 31 | brew unlink python 32 | fi 33 | 34 | brew install "$python_formula" 35 | brew switch python "$desired_python_brew_version" 36 | fi 37 | 38 | if !(openssl_version | grep "$desired_openssl_version"); then 39 | if brew list | grep openssl; then 40 | brew unlink openssl 41 | fi 42 | 43 | brew install "$openssl_formula" 44 | brew switch openssl "$desired_openssl_brew_version" 45 | fi 46 | 47 | echo "*** Using $(python_version)" 48 | echo "*** Using $(openssl_version)" 49 | 50 | if !(which virtualenv); then 51 | pip install virtualenv 52 | fi 53 | 54 | -------------------------------------------------------------------------------- /tests/integration/testcases.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | from compose.service import Service 4 | from compose.config import make_service_dict 5 | from compose.const import LABEL_PROJECT 6 | from compose.cli.docker_client import docker_client 7 | from compose.progress_stream import stream_output 8 | from .. import unittest 9 | 10 | 11 | class DockerClientTestCase(unittest.TestCase): 12 | @classmethod 13 | def setUpClass(cls): 14 | cls.client = docker_client() 15 | 16 | def tearDown(self): 17 | for c in self.client.containers( 18 | all=True, 19 | filters={'label': '%s=composetest' % LABEL_PROJECT}): 20 | self.client.kill(c['Id']) 21 | self.client.remove_container(c['Id']) 22 | for i in self.client.images( 23 | filters={'label': 'com.docker.compose.test_image'}): 24 | self.client.remove_image(i) 25 | 26 | def create_service(self, name, **kwargs): 27 | if 'image' not in kwargs and 'build' not in kwargs: 28 | kwargs['image'] = 'busybox:latest' 29 | 30 | if 'command' not in kwargs: 31 | kwargs['command'] = ["top"] 32 | 33 | return Service( 34 | project='composetest', 35 | client=self.client, 36 | **make_service_dict(name, kwargs, working_dir='.') 37 | ) 38 | 39 | def check_build(self, *args, **kwargs): 40 | kwargs.setdefault('rm', True) 41 | build_output = self.client.build(*args, **kwargs) 42 | stream_output(build_output, open('/dev/null', 'w')) 43 | -------------------------------------------------------------------------------- /tests/unit/split_buffer_test.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | from compose.cli.utils import split_buffer 4 | from .. import unittest 5 | 6 | 7 | class SplitBufferTest(unittest.TestCase): 8 | def test_single_line_chunks(self): 9 | def reader(): 10 | yield b'abc\n' 11 | yield b'def\n' 12 | yield b'ghi\n' 13 | 14 | self.assert_produces(reader, [b'abc\n', b'def\n', b'ghi\n']) 15 | 16 | def test_no_end_separator(self): 17 | def reader(): 18 | yield b'abc\n' 19 | yield b'def\n' 20 | yield b'ghi' 21 | 22 | self.assert_produces(reader, [b'abc\n', b'def\n', b'ghi']) 23 | 24 | def test_multiple_line_chunk(self): 25 | def reader(): 26 | yield b'abc\ndef\nghi' 27 | 28 | self.assert_produces(reader, [b'abc\n', b'def\n', b'ghi']) 29 | 30 | def test_chunked_line(self): 31 | def reader(): 32 | yield b'a' 33 | yield b'b' 34 | yield b'c' 35 | yield b'\n' 36 | yield b'd' 37 | 38 | self.assert_produces(reader, [b'abc\n', b'd']) 39 | 40 | def test_preserves_unicode_sequences_within_lines(self): 41 | string = u"a\u2022c\n".encode('utf-8') 42 | 43 | def reader(): 44 | yield string 45 | 46 | self.assert_produces(reader, [string]) 47 | 48 | def assert_produces(self, reader, expectations): 49 | split = split_buffer(reader(), b'\n') 50 | 51 | for (actual, expected) in zip(split, expectations): 52 | self.assertEqual(type(actual), type(expected)) 53 | self.assertEqual(actual, expected) 54 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | from __future__ import unicode_literals 4 | from __future__ import absolute_import 5 | from setuptools import setup, find_packages 6 | import codecs 7 | import os 8 | import re 9 | import sys 10 | 11 | 12 | def read(*parts): 13 | path = os.path.join(os.path.dirname(__file__), *parts) 14 | with codecs.open(path, encoding='utf-8') as fobj: 15 | return fobj.read() 16 | 17 | 18 | def find_version(*file_paths): 19 | version_file = read(*file_paths) 20 | version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", 21 | version_file, re.M) 22 | if version_match: 23 | return version_match.group(1) 24 | raise RuntimeError("Unable to find version string.") 25 | 26 | 27 | install_requires = [ 28 | 'docopt >= 0.6.1, < 0.7', 29 | 'PyYAML >= 3.10, < 4', 30 | 'requests >= 2.6.1, < 2.7', 31 | 'texttable >= 0.8.1, < 0.9', 32 | 'websocket-client >= 0.11.0, < 1.0', 33 | 'docker-py >= 1.2.3, < 1.3', 34 | 'dockerpty >= 0.3.4, < 0.4', 35 | 'six >= 1.3.0, < 2', 36 | ] 37 | 38 | 39 | tests_require = [ 40 | 'mock >= 1.0.1', 41 | 'nose', 42 | 'pyinstaller', 43 | 'flake8', 44 | ] 45 | 46 | 47 | if sys.version_info < (2, 7): 48 | tests_require.append('unittest2') 49 | 50 | 51 | setup( 52 | name='docker-compose', 53 | version=find_version("compose", "__init__.py"), 54 | description='Multi-container orchestration for Docker', 55 | url='https://www.docker.com/', 56 | author='Docker, Inc.', 57 | license='Apache License 2.0', 58 | packages=find_packages(exclude=['tests.*', 'tests']), 59 | include_package_data=True, 60 | test_suite='nose.collector', 61 | install_requires=install_requires, 62 | tests_require=tests_require, 63 | entry_points=""" 64 | [console_scripts] 65 | docker-compose=compose.cli.main:main 66 | """, 67 | ) 68 | -------------------------------------------------------------------------------- /compose/cli/docopt_command.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | import sys 4 | 5 | from inspect import getdoc 6 | from docopt import docopt, DocoptExit 7 | 8 | 9 | def docopt_full_help(docstring, *args, **kwargs): 10 | try: 11 | return docopt(docstring, *args, **kwargs) 12 | except DocoptExit: 13 | raise SystemExit(docstring) 14 | 15 | 16 | class DocoptCommand(object): 17 | def docopt_options(self): 18 | return {'options_first': True} 19 | 20 | def sys_dispatch(self): 21 | self.dispatch(sys.argv[1:], None) 22 | 23 | def dispatch(self, argv, global_options): 24 | self.perform_command(*self.parse(argv, global_options)) 25 | 26 | def perform_command(self, options, handler, command_options): 27 | handler(command_options) 28 | 29 | def parse(self, argv, global_options): 30 | options = docopt_full_help(getdoc(self), argv, **self.docopt_options()) 31 | command = options['COMMAND'] 32 | 33 | if command is None: 34 | raise SystemExit(getdoc(self)) 35 | 36 | handler = self.get_handler(command) 37 | docstring = getdoc(handler) 38 | 39 | if docstring is None: 40 | raise NoSuchCommand(command, self) 41 | 42 | command_options = docopt_full_help(docstring, options['ARGS'], options_first=True) 43 | return options, handler, command_options 44 | 45 | def get_handler(self, command): 46 | command = command.replace('-', '_') 47 | 48 | if not hasattr(self, command): 49 | raise NoSuchCommand(command, self) 50 | 51 | return getattr(self, command) 52 | 53 | 54 | class NoSuchCommand(Exception): 55 | def __init__(self, command, supercommand): 56 | super(NoSuchCommand, self).__init__("No such command: %s" % command) 57 | 58 | self.command = command 59 | self.supercommand = supercommand 60 | -------------------------------------------------------------------------------- /compose/cli/verbose_proxy.py: -------------------------------------------------------------------------------- 1 | 2 | import functools 3 | from itertools import chain 4 | import logging 5 | import pprint 6 | 7 | import six 8 | 9 | 10 | def format_call(args, kwargs): 11 | args = (repr(a) for a in args) 12 | kwargs = ("{0!s}={1!r}".format(*item) for item in six.iteritems(kwargs)) 13 | return "({0})".format(", ".join(chain(args, kwargs))) 14 | 15 | 16 | def format_return(result, max_lines): 17 | if isinstance(result, (list, tuple, set)): 18 | return "({0} with {1} items)".format(type(result).__name__, len(result)) 19 | 20 | if result: 21 | lines = pprint.pformat(result).split('\n') 22 | extra = '\n...' if len(lines) > max_lines else '' 23 | return '\n'.join(lines[:max_lines]) + extra 24 | 25 | return result 26 | 27 | 28 | class VerboseProxy(object): 29 | """Proxy all function calls to another class and log method name, arguments 30 | and return values for each call. 31 | """ 32 | 33 | def __init__(self, obj_name, obj, log_name=None, max_lines=10): 34 | self.obj_name = obj_name 35 | self.obj = obj 36 | self.max_lines = max_lines 37 | self.log = logging.getLogger(log_name or __name__) 38 | 39 | def __getattr__(self, name): 40 | attr = getattr(self.obj, name) 41 | 42 | if not six.callable(attr): 43 | return attr 44 | 45 | return functools.partial(self.proxy_callable, name) 46 | 47 | def proxy_callable(self, call_name, *args, **kwargs): 48 | self.log.info("%s %s <- %s", 49 | self.obj_name, 50 | call_name, 51 | format_call(args, kwargs)) 52 | 53 | result = getattr(self.obj, call_name)(*args, **kwargs) 54 | self.log.info("%s %s -> %s", 55 | self.obj_name, 56 | call_name, 57 | format_return(result, self.max_lines)) 58 | return result 59 | -------------------------------------------------------------------------------- /ROADMAP.md: -------------------------------------------------------------------------------- 1 | # Roadmap 2 | 3 | ## More than just development environments 4 | 5 | Over time we will extend Compose's remit to cover test, staging and production environments. This is not a simple task, and will take many incremental improvements such as: 6 | 7 | - Compose’s brute-force “delete and recreate everything” approach is great for dev and testing, but it not sufficient for production environments. You should be able to define a "desired" state that Compose will intelligently converge to. 8 | - It should be possible to partially modify the config file for different environments (dev/test/staging/prod), passing in e.g. custom ports or volume mount paths. ([#426](https://github.com/docker/fig/issues/426)) 9 | - Compose should recommend a technique for zero-downtime deploys. 10 | 11 | ## Integration with Swarm 12 | 13 | Compose should integrate really well with Swarm so you can take an application you've developed on your laptop and run it on a Swarm cluster. 14 | 15 | The current state of integration is documented in [SWARM.md](SWARM.md). 16 | 17 | ## Applications spanning multiple teams 18 | 19 | Compose works well for applications that are in a single repository and depend on services that are hosted on Docker Hub. If your application depends on another application within your organisation, Compose doesn't work as well. 20 | 21 | There are several ideas about how this could work, such as [including external files](https://github.com/docker/fig/issues/318). 22 | 23 | ## An even better tool for development environments 24 | 25 | Compose is a great tool for development environments, but it could be even better. For example: 26 | 27 | - [Compose could watch your code and automatically kick off builds when something changes.](https://github.com/docker/fig/issues/184) 28 | - It should be possible to define hostnames for containers which work from the host machine, e.g. “mywebcontainer.local”. This is needed by apps comprising multiple web services which generate links to one another (e.g. a frontend website and a separate admin webapp) 29 | -------------------------------------------------------------------------------- /SWARM.md: -------------------------------------------------------------------------------- 1 | Docker Compose/Swarm integration 2 | ================================ 3 | 4 | Eventually, Compose and Swarm aim to have full integration, meaning you can point a Compose app at a Swarm cluster and have it all just work as if you were using a single Docker host. 5 | 6 | However, the current extent of integration is minimal: Compose can create containers on a Swarm cluster, but the majority of Compose apps won’t work out of the box unless all containers are scheduled on one host, defeating much of the purpose of using Swarm in the first place. 7 | 8 | Still, Compose and Swarm can be useful in a “batch processing” scenario (where a large number of containers need to be spun up and down to do independent computation) or a “shared cluster” scenario (where multiple teams want to deploy apps on a cluster without worrying about where to put them). 9 | 10 | A number of things need to happen before full integration is achieved, which are documented below. 11 | 12 | Links and networking 13 | -------------------- 14 | 15 | The primary thing stopping multi-container apps from working seamlessly on Swarm is getting them to talk to one another: enabling private communication between containers on different hosts hasn’t been solved in a non-hacky way. 16 | 17 | Long-term, networking is [getting overhauled](https://github.com/docker/docker/issues/9983) in such a way that it’ll fit the multi-host model much better. For now, **linked containers are automatically scheduled on the same host**. 18 | 19 | Building 20 | -------- 21 | 22 | `docker build` against a Swarm cluster is not implemented, so for now the `build` option will not work - you will need to manually build your service's image, push it somewhere and use `image` to instruct Compose to pull it. Here's an example using the Docker Hub: 23 | 24 | $ docker build -t myusername/web . 25 | $ docker push myusername/web 26 | $ cat docker-compose.yml 27 | web: 28 | image: myusername/web 29 | links: ["db"] 30 | db: 31 | image: postgres 32 | $ docker-compose up -d 33 | -------------------------------------------------------------------------------- /tests/unit/log_printer_test.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | import os 4 | 5 | from compose.cli.log_printer import LogPrinter 6 | from .. import unittest 7 | 8 | 9 | class LogPrinterTest(unittest.TestCase): 10 | def get_default_output(self, monochrome=False): 11 | def reader(*args, **kwargs): 12 | yield "hello\nworld" 13 | 14 | container = MockContainer(reader) 15 | output = run_log_printer([container], monochrome=monochrome) 16 | return output 17 | 18 | def test_single_container(self): 19 | output = self.get_default_output() 20 | 21 | self.assertIn('hello', output) 22 | self.assertIn('world', output) 23 | 24 | def test_monochrome(self): 25 | output = self.get_default_output(monochrome=True) 26 | self.assertNotIn('\033[', output) 27 | 28 | def test_polychrome(self): 29 | output = self.get_default_output() 30 | self.assertIn('\033[', output) 31 | 32 | def test_unicode(self): 33 | glyph = u'\u2022'.encode('utf-8') 34 | 35 | def reader(*args, **kwargs): 36 | yield glyph + b'\n' 37 | 38 | container = MockContainer(reader) 39 | output = run_log_printer([container]) 40 | 41 | self.assertIn(glyph, output) 42 | 43 | 44 | def run_log_printer(containers, monochrome=False): 45 | r, w = os.pipe() 46 | reader, writer = os.fdopen(r, 'r'), os.fdopen(w, 'w') 47 | printer = LogPrinter(containers, output=writer, monochrome=monochrome) 48 | printer.run() 49 | writer.close() 50 | return reader.read() 51 | 52 | 53 | class MockContainer(object): 54 | def __init__(self, reader): 55 | self._reader = reader 56 | 57 | @property 58 | def name(self): 59 | return 'myapp_web_1' 60 | 61 | @property 62 | def name_without_project(self): 63 | return 'web_1' 64 | 65 | def attach(self, *args, **kwargs): 66 | return self._reader() 67 | 68 | def wait(self, *args, **kwargs): 69 | return 0 70 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Docker Compose 2 | ============== 3 | *(Previously known as Fig)* 4 | 5 | Compose is a tool for defining and running multi-container applications with 6 | Docker. With Compose, you define a multi-container application in a single 7 | file, then spin your application up in a single command which does everything 8 | that needs to be done to get it running. 9 | 10 | Compose is great for development environments, staging servers, and CI. We don't 11 | recommend that you use it in production yet. 12 | 13 | Using Compose is basically a three-step process. 14 | 15 | 1. Define your app's environment with a `Dockerfile` so it can be 16 | reproduced anywhere. 17 | 2. Define the services that make up your app in `docker-compose.yml` so 18 | they can be run together in an isolated environment: 19 | 3. Lastly, run `docker-compose up` and Compose will start and run your entire app. 20 | 21 | A `docker-compose.yml` looks like this: 22 | 23 | web: 24 | build: . 25 | ports: 26 | - "5000:5000" 27 | volumes: 28 | - .:/code 29 | links: 30 | - redis 31 | redis: 32 | image: redis 33 | 34 | Compose has commands for managing the whole lifecycle of your application: 35 | 36 | * Start, stop and rebuild services 37 | * View the status of running services 38 | * Stream the log output of running services 39 | * Run a one-off command on a service 40 | 41 | Installation and documentation 42 | ------------------------------ 43 | 44 | - Full documentation is available on [Docker's website](http://docs.docker.com/compose/). 45 | - If you have any questions, you can talk in real-time with other developers in the #docker-compose IRC channel on Freenode. [Click here to join using IRCCloud.](https://www.irccloud.com/invite?hostname=irc.freenode.net&channel=%23docker-compose) 46 | 47 | Contributing 48 | ------------ 49 | 50 | [![Build Status](http://jenkins.dockerproject.org/buildStatus/icon?job=Compose%20Master)](http://jenkins.dockerproject.org/job/Compose%20Master/) 51 | 52 | Want to help build Compose? Check out our [contributing documentation](https://github.com/docker/compose/blob/master/CONTRIBUTING.md). 53 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM debian:wheezy 2 | 3 | RUN set -ex; \ 4 | apt-get update -qq; \ 5 | apt-get install -y \ 6 | gcc \ 7 | make \ 8 | zlib1g \ 9 | zlib1g-dev \ 10 | libssl-dev \ 11 | git \ 12 | apt-transport-https \ 13 | ca-certificates \ 14 | curl \ 15 | lxc \ 16 | iptables \ 17 | ; \ 18 | rm -rf /var/lib/apt/lists/* 19 | 20 | # Build Python 2.7.9 from source 21 | RUN set -ex; \ 22 | curl -LO https://www.python.org/ftp/python/2.7.9/Python-2.7.9.tgz; \ 23 | tar -xzf Python-2.7.9.tgz; \ 24 | cd Python-2.7.9; \ 25 | ./configure --enable-shared; \ 26 | make; \ 27 | make install; \ 28 | cd ..; \ 29 | rm -rf /Python-2.7.9; \ 30 | rm Python-2.7.9.tgz 31 | 32 | # Make libpython findable 33 | ENV LD_LIBRARY_PATH /usr/local/lib 34 | 35 | # Install setuptools 36 | RUN set -ex; \ 37 | curl -LO https://bootstrap.pypa.io/ez_setup.py; \ 38 | python ez_setup.py; \ 39 | rm ez_setup.py 40 | 41 | # Install pip 42 | RUN set -ex; \ 43 | curl -LO https://pypi.python.org/packages/source/p/pip/pip-7.0.1.tar.gz; \ 44 | tar -xzf pip-7.0.1.tar.gz; \ 45 | cd pip-7.0.1; \ 46 | python setup.py install; \ 47 | cd ..; \ 48 | rm -rf pip-7.0.1; \ 49 | rm pip-7.0.1.tar.gz 50 | 51 | ENV ALL_DOCKER_VERSIONS 1.6.0 1.7.0 52 | 53 | RUN set -ex; \ 54 | curl https://get.docker.com/builds/Linux/x86_64/docker-1.6.0 -o /usr/local/bin/docker-1.6.0; \ 55 | chmod +x /usr/local/bin/docker-1.6.0; \ 56 | curl https://test.docker.com/builds/Linux/x86_64/docker-1.7.0 -o /usr/local/bin/docker-1.7.0; \ 57 | chmod +x /usr/local/bin/docker-1.7.0 58 | 59 | # Set the default Docker to be run 60 | RUN ln -s /usr/local/bin/docker-1.6.0 /usr/local/bin/docker 61 | 62 | RUN useradd -d /home/user -m -s /bin/bash user 63 | WORKDIR /code/ 64 | 65 | ADD requirements.txt /code/ 66 | RUN pip install -r requirements.txt 67 | 68 | ADD requirements-dev.txt /code/ 69 | RUN pip install -r requirements-dev.txt 70 | 71 | ADD . /code/ 72 | RUN python setup.py install 73 | 74 | RUN chown -R user /code/ 75 | 76 | ENTRYPOINT ["/usr/local/bin/docker-compose"] 77 | -------------------------------------------------------------------------------- /docs/env.md: -------------------------------------------------------------------------------- 1 | 11 | 12 | # Compose environment variables reference 13 | =============================== 14 | 15 | **Note:** Environment variables are no longer the recommended method for connecting to linked services. Instead, you should use the link name (by default, the name of the linked service) as the hostname to connect to. See the [docker-compose.yml documentation](yml.md#links) for details. 16 | 17 | Compose uses [Docker links] to expose services' containers to one another. Each linked container injects a set of environment variables, each of which begins with the uppercase name of the container. 18 | 19 | To see what environment variables are available to a service, run `docker-compose run SERVICE env`. 20 | 21 | name\_PORT
22 | Full URL, e.g. `DB_PORT=tcp://172.17.0.5:5432` 23 | 24 | name\_PORT\_num\_protocol
25 | Full URL, e.g. `DB_PORT_5432_TCP=tcp://172.17.0.5:5432` 26 | 27 | name\_PORT\_num\_protocol\_ADDR
28 | Container's IP address, e.g. `DB_PORT_5432_TCP_ADDR=172.17.0.5` 29 | 30 | name\_PORT\_num\_protocol\_PORT
31 | Exposed port number, e.g. `DB_PORT_5432_TCP_PORT=5432` 32 | 33 | name\_PORT\_num\_protocol\_PROTO
34 | Protocol (tcp or udp), e.g. `DB_PORT_5432_TCP_PROTO=tcp` 35 | 36 | name\_NAME
37 | Fully qualified container name, e.g. `DB_1_NAME=/myapp_web_1/myapp_db_1` 38 | 39 | [Docker links]: http://docs.docker.com/userguide/dockerlinks/ 40 | 41 | ## Compose documentation 42 | 43 | - [User guide](/) 44 | - [Installing Compose](install.md) 45 | - [Get started with Django](django.md) 46 | - [Get started with Rails](rails.md) 47 | - [Get started with Wordpress](wordpress.md) 48 | - [Command line reference](cli.md) 49 | - [Yaml file reference](yml.md) 50 | - [Compose command line completion](completion.md) 51 | -------------------------------------------------------------------------------- /compose/cli/errors.py: -------------------------------------------------------------------------------- 1 | from __future__ import absolute_import 2 | from textwrap import dedent 3 | 4 | 5 | class UserError(Exception): 6 | def __init__(self, msg): 7 | self.msg = dedent(msg).strip() 8 | 9 | def __unicode__(self): 10 | return self.msg 11 | 12 | __str__ = __unicode__ 13 | 14 | 15 | class DockerNotFoundMac(UserError): 16 | def __init__(self): 17 | super(DockerNotFoundMac, self).__init__(""" 18 | Couldn't connect to Docker daemon. You might need to install docker-osx: 19 | 20 | https://github.com/noplay/docker-osx 21 | """) 22 | 23 | 24 | class DockerNotFoundUbuntu(UserError): 25 | def __init__(self): 26 | super(DockerNotFoundUbuntu, self).__init__(""" 27 | Couldn't connect to Docker daemon. You might need to install Docker: 28 | 29 | http://docs.docker.io/en/latest/installation/ubuntulinux/ 30 | """) 31 | 32 | 33 | class DockerNotFoundGeneric(UserError): 34 | def __init__(self): 35 | super(DockerNotFoundGeneric, self).__init__(""" 36 | Couldn't connect to Docker daemon. You might need to install Docker: 37 | 38 | http://docs.docker.io/en/latest/installation/ 39 | """) 40 | 41 | 42 | class ConnectionErrorBoot2Docker(UserError): 43 | def __init__(self): 44 | super(ConnectionErrorBoot2Docker, self).__init__(""" 45 | Couldn't connect to Docker daemon - you might need to run `boot2docker up`. 46 | """) 47 | 48 | 49 | class ConnectionErrorGeneric(UserError): 50 | def __init__(self, url): 51 | super(ConnectionErrorGeneric, self).__init__(""" 52 | Couldn't connect to Docker daemon at %s - is it running? 53 | 54 | If it's at a non-standard location, specify the URL with the DOCKER_HOST environment variable. 55 | """ % url) 56 | 57 | 58 | class ComposeFileNotFound(UserError): 59 | def __init__(self, supported_filenames): 60 | super(ComposeFileNotFound, self).__init__(""" 61 | Can't find a suitable configuration file in this directory or any parent. Are you in the right directory? 62 | 63 | Supported filenames: %s 64 | """ % ", ".join(supported_filenames)) 65 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: all binary build cross default docs docs-build docs-shell shell test test-unit test-integration test-integration-cli test-docker-py validate 2 | 3 | # env vars passed through directly to Docker's build scripts 4 | # to allow things like `make DOCKER_CLIENTONLY=1 binary` easily 5 | # `docs/sources/contributing/devenvironment.md ` and `project/PACKAGERS.md` have some limited documentation of some of these 6 | DOCKER_ENVS := \ 7 | -e BUILDFLAGS \ 8 | -e DOCKER_CLIENTONLY \ 9 | -e DOCKER_EXECDRIVER \ 10 | -e DOCKER_GRAPHDRIVER \ 11 | -e TESTDIRS \ 12 | -e TESTFLAGS \ 13 | -e TIMEOUT 14 | # note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds 15 | 16 | # to allow `make DOCSDIR=docs docs-shell` (to create a bind mount in docs) 17 | DOCS_MOUNT := $(if $(DOCSDIR),-v $(CURDIR)/$(DOCSDIR):/$(DOCSDIR)) 18 | 19 | # to allow `make DOCSPORT=9000 docs` 20 | DOCSPORT := 8000 21 | 22 | # Get the IP ADDRESS 23 | DOCKER_IP=$(shell python -c "import urlparse ; print urlparse.urlparse('$(DOCKER_HOST)').hostname or ''") 24 | HUGO_BASE_URL=$(shell test -z "$(DOCKER_IP)" && echo localhost || echo "$(DOCKER_IP)") 25 | HUGO_BIND_IP=0.0.0.0 26 | 27 | GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) 28 | DOCKER_IMAGE := docker$(if $(GIT_BRANCH),:$(GIT_BRANCH)) 29 | DOCKER_DOCS_IMAGE := docs-base$(if $(GIT_BRANCH),:$(GIT_BRANCH)) 30 | 31 | 32 | DOCKER_RUN_DOCS := docker run --rm -it $(DOCS_MOUNT) -e AWS_S3_BUCKET -e NOCACHE 33 | 34 | # for some docs workarounds (see below in "docs-build" target) 35 | GITCOMMIT := $(shell git rev-parse --short HEAD 2>/dev/null) 36 | 37 | default: docs 38 | 39 | docs: docs-build 40 | $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) 41 | 42 | docs-draft: docs-build 43 | $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 -e DOCKERHOST "$(DOCKER_DOCS_IMAGE)" hugo server --buildDrafts="true" --port=$(DOCSPORT) --baseUrl=$(HUGO_BASE_URL) --bind=$(HUGO_BIND_IP) 44 | 45 | 46 | docs-shell: docs-build 47 | $(DOCKER_RUN_DOCS) -p $(if $(DOCSPORT),$(DOCSPORT):)8000 "$(DOCKER_DOCS_IMAGE)" bash 48 | 49 | 50 | docs-build: 51 | # ( git remote | grep -v upstream ) || git diff --name-status upstream/release..upstream/docs ./ > ./changed-files 52 | # echo "$(GIT_BRANCH)" > GIT_BRANCH 53 | # echo "$(AWS_S3_BUCKET)" > AWS_S3_BUCKET 54 | # echo "$(GITCOMMIT)" > GITCOMMIT 55 | docker build -t "$(DOCKER_DOCS_IMAGE)" . 56 | -------------------------------------------------------------------------------- /compose/progress_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import codecs 4 | 5 | 6 | class StreamOutputError(Exception): 7 | pass 8 | 9 | 10 | def stream_output(output, stream): 11 | is_terminal = hasattr(stream, 'fileno') and os.isatty(stream.fileno()) 12 | stream = codecs.getwriter('utf-8')(stream) 13 | all_events = [] 14 | lines = {} 15 | diff = 0 16 | 17 | for chunk in output: 18 | event = json.loads(chunk) 19 | all_events.append(event) 20 | 21 | if 'progress' in event or 'progressDetail' in event: 22 | image_id = event.get('id') 23 | if not image_id: 24 | continue 25 | 26 | if image_id in lines: 27 | diff = len(lines) - lines[image_id] 28 | else: 29 | lines[image_id] = len(lines) 30 | stream.write("\n") 31 | diff = 0 32 | 33 | if is_terminal: 34 | # move cursor up `diff` rows 35 | stream.write("%c[%dA" % (27, diff)) 36 | 37 | print_output_event(event, stream, is_terminal) 38 | 39 | if 'id' in event and is_terminal: 40 | # move cursor back down 41 | stream.write("%c[%dB" % (27, diff)) 42 | 43 | stream.flush() 44 | 45 | return all_events 46 | 47 | 48 | def print_output_event(event, stream, is_terminal): 49 | if 'errorDetail' in event: 50 | raise StreamOutputError(event['errorDetail']['message']) 51 | 52 | terminator = '' 53 | 54 | if is_terminal and 'stream' not in event: 55 | # erase current line 56 | stream.write("%c[2K\r" % 27) 57 | terminator = "\r" 58 | pass 59 | elif 'progressDetail' in event: 60 | return 61 | 62 | if 'time' in event: 63 | stream.write("[%s] " % event['time']) 64 | 65 | if 'id' in event: 66 | stream.write("%s: " % event['id']) 67 | 68 | if 'from' in event: 69 | stream.write("(from %s) " % event['from']) 70 | 71 | status = event.get('status', '') 72 | 73 | if 'progress' in event: 74 | stream.write("%s %s%s" % (status, event['progress'], terminator)) 75 | elif 'progressDetail' in event: 76 | detail = event['progressDetail'] 77 | total = detail.get('total') 78 | if 'current' in detail and total: 79 | percentage = float(detail['current']) / float(total) * 100 80 | stream.write('%s (%.1f%%)%s' % (status, percentage, terminator)) 81 | else: 82 | stream.write('%s%s' % (status, terminator)) 83 | elif 'stream' in event: 84 | stream.write("%s%s" % (event['stream'], terminator)) 85 | else: 86 | stream.write("%s%s\n" % (status, terminator)) 87 | -------------------------------------------------------------------------------- /docs/completion.md: -------------------------------------------------------------------------------- 1 | 11 | 12 | # Command Completion 13 | 14 | Compose comes with [command completion](http://en.wikipedia.org/wiki/Command-line_completion) 15 | for the bash and zsh shell. 16 | 17 | ## Installing Command Completion 18 | 19 | ### Bash 20 | 21 | Make sure bash completion is installed. If you use a current Linux in a non-minimal installation, bash completion should be available. 22 | On a Mac, install with `brew install bash-completion` 23 | 24 | Place the completion script in `/etc/bash_completion.d/` (`/usr/local/etc/bash_completion.d/` on a Mac), using e.g. 25 | 26 | curl -L https://raw.githubusercontent.com/docker/compose/$(docker-compose --version | awk 'NR==1{print $NF}')/contrib/completion/bash/docker-compose > /etc/bash_completion.d/docker-compose 27 | 28 | Completion will be available upon next login. 29 | 30 | ### Zsh 31 | 32 | Place the completion script in your `/path/to/zsh/completion`, using e.g. `~/.zsh/completion/` 33 | 34 | mkdir -p ~/.zsh/completion 35 | curl -L https://raw.githubusercontent.com/docker/compose/$(docker-compose --version | awk 'NR==1{print $NF}')/contrib/completion/zsh/_docker-compose > ~/.zsh/completion/_docker-compose 36 | 37 | Include the directory in your `$fpath`, e.g. by adding in `~/.zshrc` 38 | 39 | fpath=(~/.zsh/completion $fpath) 40 | 41 | Make sure `compinit` is loaded or do it by adding in `~/.zshrc` 42 | 43 | autoload -Uz compinit && compinit -i 44 | 45 | Then reload your shell 46 | 47 | exec $SHELL -l 48 | 49 | ## Available completions 50 | 51 | Depending on what you typed on the command line so far, it will complete 52 | 53 | - available docker-compose commands 54 | - options that are available for a particular command 55 | - service names that make sense in a given context (e.g. services with running or stopped instances or services based on images vs. services based on Dockerfiles). For `docker-compose scale`, completed service names will automatically have "=" appended. 56 | - arguments for selected options, e.g. `docker-compose kill -s` will complete some signals like SIGHUP and SIGUSR1. 57 | 58 | Enjoy working with Compose faster and with less typos! 59 | 60 | ## Compose documentation 61 | 62 | - [User guide](/) 63 | - [Installing Compose](install.md) 64 | - [Get started with Django](django.md) 65 | - [Get started with Rails](rails.md) 66 | - [Get started with Wordpress](wordpress.md) 67 | - [Command line reference](cli.md) 68 | - [Yaml file reference](yml.md) 69 | - [Compose environment variables](env.md) 70 | -------------------------------------------------------------------------------- /tests/integration/legacy_test.py: -------------------------------------------------------------------------------- 1 | from docker.errors import APIError 2 | 3 | from compose import legacy 4 | from compose.project import Project 5 | from .testcases import DockerClientTestCase 6 | 7 | 8 | class LegacyTestCase(DockerClientTestCase): 9 | 10 | def setUp(self): 11 | super(LegacyTestCase, self).setUp() 12 | self.containers = [] 13 | 14 | db = self.create_service('db') 15 | web = self.create_service('web', links=[(db, 'db')]) 16 | nginx = self.create_service('nginx', links=[(web, 'web')]) 17 | 18 | self.services = [db, web, nginx] 19 | self.project = Project('composetest', self.services, self.client) 20 | 21 | # Create a legacy container for each service 22 | for service in self.services: 23 | service.ensure_image_exists() 24 | container = self.client.create_container( 25 | name='{}_{}_1'.format(self.project.name, service.name), 26 | **service.options 27 | ) 28 | self.client.start(container) 29 | self.containers.append(container) 30 | 31 | # Create a single one-off legacy container 32 | self.containers.append(self.client.create_container( 33 | name='{}_{}_run_1'.format(self.project.name, self.services[0].name), 34 | **self.services[0].options 35 | )) 36 | 37 | def tearDown(self): 38 | super(LegacyTestCase, self).tearDown() 39 | for container in self.containers: 40 | try: 41 | self.client.kill(container) 42 | except APIError: 43 | pass 44 | try: 45 | self.client.remove_container(container) 46 | except APIError: 47 | pass 48 | 49 | def get_legacy_containers(self, **kwargs): 50 | return list(legacy.get_legacy_containers( 51 | self.client, 52 | self.project.name, 53 | [s.name for s in self.services], 54 | **kwargs 55 | )) 56 | 57 | def test_get_legacy_container_names(self): 58 | self.assertEqual(len(self.get_legacy_containers()), len(self.services)) 59 | 60 | def test_get_legacy_container_names_one_off(self): 61 | self.assertEqual(len(self.get_legacy_containers(stopped=True, one_off=True)), 1) 62 | 63 | def test_migration_to_labels(self): 64 | with self.assertRaises(legacy.LegacyContainersError) as cm: 65 | self.assertEqual(self.project.containers(stopped=True), []) 66 | 67 | self.assertEqual( 68 | set(cm.exception.names), 69 | set(['composetest_db_1', 'composetest_web_1', 'composetest_nginx_1']), 70 | ) 71 | 72 | legacy.migrate_project_to_labels(self.project) 73 | self.assertEqual(len(self.project.containers(stopped=True)), len(self.services)) 74 | -------------------------------------------------------------------------------- /docs/install.md: -------------------------------------------------------------------------------- 1 | 11 | 12 | 13 | # Install Docker Compose 14 | 15 | To install Compose, you'll need to install Docker first. You'll then install 16 | Compose with a `curl` command. 17 | 18 | ## Install Docker 19 | 20 | First, install Docker version 1.6 or greater: 21 | 22 | - [Instructions for Mac OS X](http://docs.docker.com/installation/mac/) 23 | - [Instructions for Ubuntu](http://docs.docker.com/installation/ubuntulinux/) 24 | - [Instructions for other systems](http://docs.docker.com/installation/) 25 | 26 | ## Install Compose 27 | 28 | To install Compose, run the following commands: 29 | 30 | curl -L https://github.com/docker/compose/releases/download/1.2.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose 31 | chmod +x /usr/local/bin/docker-compose 32 | 33 | > Note: If you get a "Permission denied" error, your `/usr/local/bin` directory probably isn't writable and you'll need to install Compose as the superuser. Run `sudo -i`, then the two commands above, then `exit`. 34 | 35 | Optionally, you can also install [command completion](completion.md) for the 36 | bash shell. 37 | 38 | Compose is available for OS X and 64-bit Linux. If you're on another platform, 39 | Compose can also be installed as a Python package: 40 | 41 | $ sudo pip install -U docker-compose 42 | 43 | No further steps are required; Compose should now be successfully installed. 44 | You can test the installation by running `docker-compose --version`. 45 | 46 | ### Upgrading 47 | 48 | If you're coming from Compose 1.2 or earlier, you'll need to remove or migrate your existing containers after upgrading Compose. This is because, as of version 1.3, Compose uses Docker labels to keep track of containers, and so they need to be recreated with labels added. 49 | 50 | If Compose detects containers that were created without labels, it will refuse to run so that you don't end up with two sets of them. If you want to keep using your existing containers (for example, because they have data volumes you want to preserve) you can migrate them with the following command: 51 | 52 | docker-compose migrate-to-labels 53 | 54 | Alternatively, if you're not worried about keeping them, you can remove them - Compose will just create new ones. 55 | 56 | docker rm -f myapp_web_1 myapp_db_1 ... 57 | 58 | ## Compose documentation 59 | 60 | - [User guide](/) 61 | - [Get started with Django](django.md) 62 | - [Get started with Rails](rails.md) 63 | - [Get started with Wordpress](wordpress.md) 64 | - [Command line reference](cli.md) 65 | - [Yaml file reference](yml.md) 66 | - [Compose environment variables](env.md) 67 | - [Compose command line completion](completion.md) 68 | -------------------------------------------------------------------------------- /compose/cli/log_printer.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | import sys 4 | 5 | from itertools import cycle 6 | 7 | from .multiplexer import Multiplexer, STOP 8 | from . import colors 9 | from .utils import split_buffer 10 | 11 | 12 | class LogPrinter(object): 13 | def __init__(self, containers, attach_params=None, output=sys.stdout, monochrome=False): 14 | self.containers = containers 15 | self.attach_params = attach_params or {} 16 | self.prefix_width = self._calculate_prefix_width(containers) 17 | self.generators = self._make_log_generators(monochrome) 18 | self.output = output 19 | 20 | def run(self): 21 | mux = Multiplexer(self.generators) 22 | for line in mux.loop(): 23 | self.output.write(line) 24 | 25 | def _calculate_prefix_width(self, containers): 26 | """ 27 | Calculate the maximum width of container names so we can make the log 28 | prefixes line up like so: 29 | 30 | db_1 | Listening 31 | web_1 | Listening 32 | """ 33 | prefix_width = 0 34 | for container in containers: 35 | prefix_width = max(prefix_width, len(container.name_without_project)) 36 | return prefix_width 37 | 38 | def _make_log_generators(self, monochrome): 39 | color_fns = cycle(colors.rainbow()) 40 | generators = [] 41 | 42 | def no_color(text): 43 | return text 44 | 45 | for container in self.containers: 46 | if monochrome: 47 | color_fn = no_color 48 | else: 49 | color_fn = next(color_fns) 50 | generators.append(self._make_log_generator(container, color_fn)) 51 | 52 | return generators 53 | 54 | def _make_log_generator(self, container, color_fn): 55 | prefix = color_fn(self._generate_prefix(container)).encode('utf-8') 56 | # Attach to container before log printer starts running 57 | line_generator = split_buffer(self._attach(container), '\n') 58 | 59 | for line in line_generator: 60 | yield prefix + line 61 | 62 | exit_code = container.wait() 63 | yield color_fn("%s exited with code %s\n" % (container.name, exit_code)) 64 | yield STOP 65 | 66 | def _generate_prefix(self, container): 67 | """ 68 | Generate the prefix for a log line without colour 69 | """ 70 | name = container.name_without_project 71 | padding = ' ' * (self.prefix_width - len(name)) 72 | return ''.join([name, padding, ' | ']) 73 | 74 | def _attach(self, container): 75 | params = { 76 | 'stdout': True, 77 | 'stderr': True, 78 | 'stream': True, 79 | } 80 | params.update(self.attach_params) 81 | params = dict((name, 1 if value else 0) for (name, value) in list(params.items())) 82 | return container.attach(**params) 83 | -------------------------------------------------------------------------------- /script/dind: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # DinD: a wrapper script which allows docker to be run inside a docker container. 5 | # Original version by Jerome Petazzoni 6 | # See the blog post: http://blog.docker.com/2013/09/docker-can-now-run-within-docker/ 7 | # 8 | # This script should be executed inside a docker container in privilieged mode 9 | # ('docker run --privileged', introduced in docker 0.6). 10 | 11 | # Usage: dind CMD [ARG...] 12 | 13 | # apparmor sucks and Docker needs to know that it's in a container (c) @tianon 14 | export container=docker 15 | 16 | # First, make sure that cgroups are mounted correctly. 17 | CGROUP=/cgroup 18 | 19 | mkdir -p "$CGROUP" 20 | 21 | if ! mountpoint -q "$CGROUP"; then 22 | mount -n -t tmpfs -o uid=0,gid=0,mode=0755 cgroup $CGROUP || { 23 | echo >&2 'Could not make a tmpfs mount. Did you use --privileged?' 24 | exit 1 25 | } 26 | fi 27 | 28 | if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then 29 | mount -t securityfs none /sys/kernel/security || { 30 | echo >&2 'Could not mount /sys/kernel/security.' 31 | echo >&2 'AppArmor detection and -privileged mode might break.' 32 | } 33 | fi 34 | 35 | # Mount the cgroup hierarchies exactly as they are in the parent system. 36 | for SUBSYS in $(cut -d: -f2 /proc/1/cgroup); do 37 | mkdir -p "$CGROUP/$SUBSYS" 38 | if ! mountpoint -q $CGROUP/$SUBSYS; then 39 | mount -n -t cgroup -o "$SUBSYS" cgroup "$CGROUP/$SUBSYS" 40 | fi 41 | 42 | # The two following sections address a bug which manifests itself 43 | # by a cryptic "lxc-start: no ns_cgroup option specified" when 44 | # trying to start containers withina container. 45 | # The bug seems to appear when the cgroup hierarchies are not 46 | # mounted on the exact same directories in the host, and in the 47 | # container. 48 | 49 | # Named, control-less cgroups are mounted with "-o name=foo" 50 | # (and appear as such under /proc//cgroup) but are usually 51 | # mounted on a directory named "foo" (without the "name=" prefix). 52 | # Systemd and OpenRC (and possibly others) both create such a 53 | # cgroup. To avoid the aforementioned bug, we symlink "foo" to 54 | # "name=foo". This shouldn't have any adverse effect. 55 | name="${SUBSYS#name=}" 56 | if [ "$name" != "$SUBSYS" ]; then 57 | ln -s "$SUBSYS" "$CGROUP/$name" 58 | fi 59 | 60 | # Likewise, on at least one system, it has been reported that 61 | # systemd would mount the CPU and CPU accounting controllers 62 | # (respectively "cpu" and "cpuacct") with "-o cpuacct,cpu" 63 | # but on a directory called "cpu,cpuacct" (note the inversion 64 | # in the order of the groups). This tries to work around it. 65 | if [ "$SUBSYS" = 'cpuacct,cpu' ]; then 66 | ln -s "$SUBSYS" "$CGROUP/cpu,cpuacct" 67 | fi 68 | done 69 | 70 | # Note: as I write those lines, the LXC userland tools cannot setup 71 | # a "sub-container" properly if the "devices" cgroup is not in its 72 | # own hierarchy. Let's detect this and issue a warning. 73 | if ! grep -q :devices: /proc/1/cgroup; then 74 | echo >&2 'WARNING: the "devices" cgroup should be in its own hierarchy.' 75 | fi 76 | if ! grep -qw devices /proc/1/cgroup; then 77 | echo >&2 'WARNING: it looks like the "devices" cgroup is not mounted.' 78 | fi 79 | 80 | # Mount /tmp 81 | mount -t tmpfs none /tmp 82 | 83 | if [ $# -gt 0 ]; then 84 | exec "$@" 85 | fi 86 | 87 | echo >&2 'ERROR: No command specified.' 88 | echo >&2 'You probably want to run hack/make.sh, or maybe a shell?' 89 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to Compose 2 | 3 | Compose is a part of the Docker project, and follows the same rules and 4 | principles. Take a read of [Docker's contributing guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md) 5 | to get an overview. 6 | 7 | ## TL;DR 8 | 9 | Pull requests will need: 10 | 11 | - Tests 12 | - Documentation 13 | - [To be signed off](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work) 14 | - A logical series of [well written commits](https://github.com/alphagov/styleguides/blob/master/git.md) 15 | 16 | ## Development environment 17 | 18 | If you're looking contribute to Compose 19 | but you're new to the project or maybe even to Python, here are the steps 20 | that should get you started. 21 | 22 | 1. Fork [https://github.com/docker/compose](https://github.com/docker/compose) 23 | to your username. 24 | 2. Clone your forked repository locally `git clone git@github.com:yourusername/compose.git`. 25 | 3. Enter the local directory `cd compose`. 26 | 4. Set up a development environment by running `python setup.py develop`. This 27 | will install the dependencies and set up a symlink from your `docker-compose` 28 | executable to the checkout of the repository. When you now run 29 | `docker-compose` from anywhere on your machine, it will run your development 30 | version of Compose. 31 | 32 | ## Running the test suite 33 | 34 | Use the test script to run linting checks and then the full test suite against 35 | different Python interpreters: 36 | 37 | $ script/test 38 | 39 | Tests are run against a Docker daemon inside a container, so that we can test 40 | against multiple Docker versions. By default they'll run against only the latest 41 | Docker version - set the `DOCKER_VERSIONS` environment variable to "all" to run 42 | against all supported versions: 43 | 44 | $ DOCKER_VERSIONS=all script/test 45 | 46 | Arguments to `script/test` are passed through to the `nosetests` executable, so 47 | you can specify a test directory, file, module, class or method: 48 | 49 | $ script/test tests/unit 50 | $ script/test tests/unit/cli_test.py 51 | $ script/test tests.integration.service_test 52 | $ script/test tests.integration.service_test:ServiceTest.test_containers 53 | 54 | ## Building binaries 55 | 56 | `script/build-linux` will build the Linux binary inside a Docker container: 57 | 58 | $ script/build-linux 59 | 60 | `script/build-osx` will build the Mac OS X binary inside a virtualenv: 61 | 62 | $ script/build-osx 63 | 64 | For official releases, you should build inside a Mountain Lion VM for proper 65 | compatibility. Run the this script first to prepare the environment before 66 | building - it will use Homebrew to make sure Python is installed and 67 | up-to-date. 68 | 69 | $ script/prepare-osx 70 | 71 | ## Release process 72 | 73 | 1. Open pull request that: 74 | - Updates the version in `compose/__init__.py` 75 | - Updates the binary URL in `docs/install.md` 76 | - Adds release notes to `CHANGES.md` 77 | 2. Create unpublished GitHub release with release notes 78 | 3. Build Linux version on any Docker host with `script/build-linux` and attach 79 | to release 80 | 4. Build OS X version on Mountain Lion with `script/build-osx` and attach to 81 | release as `docker-compose-Darwin-x86_64` and `docker-compose-Linux-x86_64`. 82 | 5. Publish GitHub release, creating tag 83 | 6. Update website with `script/deploy-docs` 84 | 7. Upload PyPi package 85 | 86 | $ git checkout $VERSION 87 | $ python setup.py sdist upload 88 | -------------------------------------------------------------------------------- /compose/legacy.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import re 3 | 4 | from .container import get_container_name, Container 5 | 6 | 7 | log = logging.getLogger(__name__) 8 | 9 | 10 | # TODO: remove this section when migrate_project_to_labels is removed 11 | NAME_RE = re.compile(r'^([^_]+)_([^_]+)_(run_)?(\d+)$') 12 | 13 | ERROR_MESSAGE_FORMAT = """ 14 | Compose found the following containers without labels: 15 | 16 | {names_list} 17 | 18 | As of Compose 1.3.0, containers are identified with labels instead of naming convention. If you want to continue using these containers, run: 19 | 20 | $ docker-compose migrate-to-labels 21 | 22 | Alternatively, remove them: 23 | 24 | $ docker rm -f {rm_args} 25 | """ 26 | 27 | 28 | def check_for_legacy_containers( 29 | client, 30 | project, 31 | services, 32 | stopped=False, 33 | one_off=False): 34 | """Check if there are containers named using the old naming convention 35 | and warn the user that those containers may need to be migrated to 36 | using labels, so that compose can find them. 37 | """ 38 | containers = list(get_legacy_containers( 39 | client, 40 | project, 41 | services, 42 | stopped=stopped, 43 | one_off=one_off)) 44 | 45 | if containers: 46 | raise LegacyContainersError([c.name for c in containers]) 47 | 48 | 49 | class LegacyContainersError(Exception): 50 | def __init__(self, names): 51 | self.names = names 52 | 53 | self.msg = ERROR_MESSAGE_FORMAT.format( 54 | names_list="\n".join(" {}".format(name) for name in names), 55 | rm_args=" ".join(names), 56 | ) 57 | 58 | def __unicode__(self): 59 | return self.msg 60 | 61 | __str__ = __unicode__ 62 | 63 | 64 | def add_labels(project, container): 65 | project_name, service_name, one_off, number = NAME_RE.match(container.name).groups() 66 | if project_name != project.name or service_name not in project.service_names: 67 | return 68 | service = project.get_service(service_name) 69 | service.recreate_container(container) 70 | 71 | 72 | def migrate_project_to_labels(project): 73 | log.info("Running migration to labels for project %s", project.name) 74 | 75 | containers = get_legacy_containers( 76 | project.client, 77 | project.name, 78 | project.service_names, 79 | stopped=True, 80 | one_off=False) 81 | 82 | for container in containers: 83 | add_labels(project, container) 84 | 85 | 86 | def get_legacy_containers( 87 | client, 88 | project, 89 | services, 90 | stopped=False, 91 | one_off=False): 92 | 93 | containers = client.containers(all=stopped) 94 | 95 | for service in services: 96 | for container in containers: 97 | name = get_container_name(container) 98 | if has_container(project, service, name, one_off=one_off): 99 | yield Container.from_ps(client, container) 100 | 101 | 102 | def has_container(project, service, name, one_off=False): 103 | if not name or not is_valid_name(name, one_off): 104 | return False 105 | container_project, container_service, _container_number = parse_name(name) 106 | return container_project == project and container_service == service 107 | 108 | 109 | def is_valid_name(name, one_off=False): 110 | match = NAME_RE.match(name) 111 | if match is None: 112 | return False 113 | if one_off: 114 | return match.group(3) == 'run_' 115 | else: 116 | return match.group(3) is None 117 | 118 | 119 | def parse_name(name): 120 | match = NAME_RE.match(name) 121 | (project, service_name, _, suffix) = match.groups() 122 | return (project, service_name, int(suffix)) 123 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # Contributing to the Docker Compose documentation 2 | 3 | The documentation in this directory is part of the [https://docs.docker.com](https://docs.docker.com) website. Docker uses [the Hugo static generator](http://gohugo.io/overview/introduction/) to convert project Markdown files to a static HTML site. 4 | 5 | You don't need to be a Hugo expert to contribute to the compose documentation. If you are familiar with Markdown, you can modify the content in the `docs` files. 6 | 7 | If you want to add a new file or change the location of the document in the menu, you do need to know a little more. 8 | 9 | ## Documentation contributing workflow 10 | 11 | 1. Edit a Markdown file in the tree. 12 | 13 | 2. Save your changes. 14 | 15 | 3. Make sure you in your `docs` subdirectory. 16 | 17 | 4. Build the documentation. 18 | 19 | $ make docs 20 | ---> ffcf3f6c4e97 21 | Removing intermediate container a676414185e8 22 | Successfully built ffcf3f6c4e97 23 | docker run --rm -it -e AWS_S3_BUCKET -e NOCACHE -p 8000:8000 -e DOCKERHOST "docs-base:test-tooling" hugo server --port=8000 --baseUrl=192.168.59.103 --bind=0.0.0.0 24 | ERROR: 2015/06/13 MenuEntry's .Url is deprecated and will be removed in Hugo 0.15. Use .URL instead. 25 | 0 of 4 drafts rendered 26 | 0 future content 27 | 12 pages created 28 | 0 paginator pages created 29 | 0 tags created 30 | 0 categories created 31 | in 55 ms 32 | Serving pages from /docs/public 33 | Web Server is available at http://0.0.0.0:8000/ 34 | Press Ctrl+C to stop 35 | 36 | 5. Open the available server in your browser. 37 | 38 | The documentation server has the complete menu but only the Docker Compose 39 | documentation resolves. You can't access the other project docs from this 40 | localized build. 41 | 42 | ## Tips on Hugo metadata and menu positioning 43 | 44 | The top of each Docker Compose documentation file contains TOML metadata. The metadata is commented out to prevent it from appears in GitHub. 45 | 46 | 56 | 57 | The metadata alone has this structure: 58 | 59 | +++ 60 | title = "Extending services in Compose" 61 | description = "How to use Docker Compose's extends keyword to share configuration between files and projects" 62 | keywords = ["fig, composition, compose, docker, orchestration, documentation, docs"] 63 | [menu.main] 64 | parent="smn_workw_compose" 65 | weight=2 66 | +++ 67 | 68 | The `[menu.main]` section refers to navigation defined [in the main Docker menu](https://github.com/docker/docs-base/blob/hugo/config.toml). This metadata says *add a menu item called* Extending services in Compose *to the menu with the* `smn_workdw_compose` *identifier*. If you locate the menu in the configuration, you'll find *Create multi-container applications* is the menu title. 69 | 70 | You can move an article in the tree by specifying a new parent. You can shift the location of the item by changing its weight. Higher numbers are heavier and shift the item to the bottom of menu. Low or no numbers shift it up. 71 | 72 | 73 | ## Other key documentation repositories 74 | 75 | The `docker/docs-base` repository contains [the Hugo theme and menu configuration](https://github.com/docker/docs-base). If you open the `Dockerfile` you'll see the `make docs` relies on this as a base image for building the Compose documentation. 76 | 77 | The `docker/docs.docker.com` repository contains [build system for building the Docker documentation site](https://github.com/docker/docs.docker.com). Fork this repository to build the entire documentation site. 78 | -------------------------------------------------------------------------------- /docs/production.md: -------------------------------------------------------------------------------- 1 | 11 | 12 | 13 | ## Using Compose in production 14 | 15 | While **Compose is not yet considered production-ready**, if you'd like to experiment and learn more about using it in production deployments, this guide 16 | can help. 17 | The project is actively working towards becoming 18 | production-ready; to learn more about the progress being made, check out the 19 | [roadmap](https://github.com/docker/compose/blob/master/ROADMAP.md) for details 20 | on how it's coming along and what still needs to be done. 21 | 22 | When deploying to production, you'll almost certainly want to make changes to 23 | your app configuration that are more appropriate to a live environment. These 24 | changes may include: 25 | 26 | - Removing any volume bindings for application code, so that code stays inside 27 | the container and can't be changed from outside 28 | - Binding to different ports on the host 29 | - Setting environment variables differently (e.g., to decrease the verbosity of 30 | logging, or to enable email sending) 31 | - Specifying a restart policy (e.g., `restart: always`) to avoid downtime 32 | - Adding extra services (e.g., a log aggregator) 33 | 34 | For this reason, you'll probably want to define a separate Compose file, say 35 | `production.yml`, which specifies production-appropriate configuration. 36 | 37 | > **Note:** The [extends](extends.md) keyword is useful for maintaining multiple 38 | > Compose files which re-use common services without having to manually copy and 39 | > paste. 40 | 41 | Once you've got an alternate configuration file, make Compose use it 42 | by setting the `COMPOSE_FILE` environment variable: 43 | 44 | $ COMPOSE_FILE=production.yml 45 | $ docker-compose up -d 46 | 47 | > **Note:** You can also use the file for a one-off command without setting 48 | > an environment variable. You do this by passing the `-f` flag, e.g., 49 | > `docker-compose -f production.yml up -d`. 50 | 51 | ### Deploying changes 52 | 53 | When you make changes to your app code, you'll need to rebuild your image and 54 | recreate your app's containers. To redeploy a service called 55 | `web`, you would use: 56 | 57 | $ docker-compose build web 58 | $ docker-compose up --no-deps -d web 59 | 60 | This will first rebuild the image for `web` and then stop, destroy, and recreate 61 | *just* the `web` service. The `--no-deps` flag prevents Compose from also 62 | recreating any services which `web` depends on. 63 | 64 | ### Running Compose on a single server 65 | 66 | You can use Compose to deploy an app to a remote Docker host by setting the 67 | `DOCKER_HOST`, `DOCKER_TLS_VERIFY`, and `DOCKER_CERT_PATH` environment variables 68 | appropriately. For tasks like this, 69 | [Docker Machine](https://docs.docker.com/machine) makes managing local and 70 | remote Docker hosts very easy, and is recommended even if you're not deploying 71 | remotely. 72 | 73 | Once you've set up your environment variables, all the normal `docker-compose` 74 | commands will work with no further configuration. 75 | 76 | ### Running Compose on a Swarm cluster 77 | 78 | [Docker Swarm](https://docs.docker.com/swarm), a Docker-native clustering 79 | system, exposes the same API as a single Docker host, which means you can use 80 | Compose against a Swarm instance and run your apps across multiple hosts. 81 | 82 | Compose/Swarm integration is still in the experimental stage, and Swarm is still 83 | in beta, but if you'd like to explore and experiment, check out the 84 | [integration guide](https://github.com/docker/compose/blob/master/SWARM.md). 85 | 86 | ## Compose documentation 87 | 88 | - [Installing Compose](install.md) 89 | - [Get started with Django](django.md) 90 | - [Get started with Rails](rails.md) 91 | - [Get started with Wordpress](wordpress.md) 92 | - [Command line reference](cli.md) 93 | - [Yaml file reference](yml.md) 94 | - [Compose environment variables](env.md) 95 | - [Compose command line completion](completion.md) 96 | 97 | -------------------------------------------------------------------------------- /compose/cli/utils.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | from __future__ import division 4 | 5 | from .. import __version__ 6 | import datetime 7 | from docker import version as docker_py_version 8 | import os 9 | import platform 10 | import subprocess 11 | import ssl 12 | 13 | 14 | def yesno(prompt, default=None): 15 | """ 16 | Prompt the user for a yes or no. 17 | 18 | Can optionally specify a default value, which will only be 19 | used if they enter a blank line. 20 | 21 | Unrecognised input (anything other than "y", "n", "yes", 22 | "no" or "") will return None. 23 | """ 24 | answer = raw_input(prompt).strip().lower() 25 | 26 | if answer == "y" or answer == "yes": 27 | return True 28 | elif answer == "n" or answer == "no": 29 | return False 30 | elif answer == "": 31 | return default 32 | else: 33 | return None 34 | 35 | 36 | # http://stackoverflow.com/a/5164027 37 | def prettydate(d): 38 | diff = datetime.datetime.utcnow() - d 39 | s = diff.seconds 40 | if diff.days > 7 or diff.days < 0: 41 | return d.strftime('%d %b %y') 42 | elif diff.days == 1: 43 | return '1 day ago' 44 | elif diff.days > 1: 45 | return '{0} days ago'.format(diff.days) 46 | elif s <= 1: 47 | return 'just now' 48 | elif s < 60: 49 | return '{0} seconds ago'.format(s) 50 | elif s < 120: 51 | return '1 minute ago' 52 | elif s < 3600: 53 | return '{0} minutes ago'.format(s / 60) 54 | elif s < 7200: 55 | return '1 hour ago' 56 | else: 57 | return '{0} hours ago'.format(s / 3600) 58 | 59 | 60 | def mkdir(path, permissions=0o700): 61 | if not os.path.exists(path): 62 | os.mkdir(path) 63 | 64 | os.chmod(path, permissions) 65 | 66 | return path 67 | 68 | 69 | def find_candidates_in_parent_dirs(filenames, path): 70 | """ 71 | Given a directory path to start, looks for filenames in the 72 | directory, and then each parent directory successively, 73 | until found. 74 | 75 | Returns tuple (candidates, path). 76 | """ 77 | candidates = [filename for filename in filenames 78 | if os.path.exists(os.path.join(path, filename))] 79 | 80 | if len(candidates) == 0: 81 | parent_dir = os.path.join(path, '..') 82 | if os.path.abspath(parent_dir) != os.path.abspath(path): 83 | return find_candidates_in_parent_dirs(filenames, parent_dir) 84 | 85 | return (candidates, path) 86 | 87 | 88 | def split_buffer(reader, separator): 89 | """ 90 | Given a generator which yields strings and a separator string, 91 | joins all input, splits on the separator and yields each chunk. 92 | 93 | Unlike string.split(), each chunk includes the trailing 94 | separator, except for the last one if none was found on the end 95 | of the input. 96 | """ 97 | buffered = str('') 98 | separator = str(separator) 99 | 100 | for data in reader: 101 | buffered += data 102 | while True: 103 | index = buffered.find(separator) 104 | if index == -1: 105 | break 106 | yield buffered[:index + 1] 107 | buffered = buffered[index + 1:] 108 | 109 | if len(buffered) > 0: 110 | yield buffered 111 | 112 | 113 | def call_silently(*args, **kwargs): 114 | """ 115 | Like subprocess.call(), but redirects stdout and stderr to /dev/null. 116 | """ 117 | with open(os.devnull, 'w') as shutup: 118 | return subprocess.call(*args, stdout=shutup, stderr=shutup, **kwargs) 119 | 120 | 121 | def is_mac(): 122 | return platform.system() == 'Darwin' 123 | 124 | 125 | def is_ubuntu(): 126 | return platform.system() == 'Linux' and platform.linux_distribution()[0] == 'Ubuntu' 127 | 128 | 129 | def get_version_info(scope): 130 | versioninfo = 'docker-compose version: %s' % __version__ 131 | if scope == 'compose': 132 | return versioninfo 133 | elif scope == 'full': 134 | return versioninfo + '\n' \ 135 | + "docker-py version: %s\n" % docker_py_version \ 136 | + "%s version: %s\n" % (platform.python_implementation(), platform.python_version()) \ 137 | + "OpenSSL version: %s" % ssl.OPENSSL_VERSION 138 | else: 139 | raise RuntimeError('passed unallowed value to `cli.utils.get_version_info`') 140 | -------------------------------------------------------------------------------- /docs/wordpress.md: -------------------------------------------------------------------------------- 1 | 11 | 12 | 13 | # Quickstart Guide: Compose and Wordpress 14 | 15 | You can use Compose to easily run Wordpress in an isolated environment built 16 | with Docker containers. 17 | 18 | ## Define the project 19 | 20 | First, [Install Compose](install.md) and then download Wordpress into the 21 | current directory: 22 | 23 | $ curl https://wordpress.org/latest.tar.gz | tar -xvzf - 24 | 25 | This will create a directory called `wordpress`. If you wish, you can rename it 26 | to the name of your project. 27 | 28 | Next, inside that directory, create a `Dockerfile`, a file that defines what 29 | environment your app is going to run in. For more information on how to write 30 | Dockerfiles, see the 31 | [Docker user guide](https://docs.docker.com/userguide/dockerimages/#building-an-image-from-a-dockerfile) and the 32 | [Dockerfile reference](http://docs.docker.com/reference/builder/). In this case, 33 | your Dockerfile should be: 34 | 35 | FROM orchardup/php5 36 | ADD . /code 37 | 38 | This tells Docker how to build an image defining a container that contains PHP 39 | and Wordpress. 40 | 41 | Next you'll create a `docker-compose.yml` file that will start your web service 42 | and a separate MySQL instance: 43 | 44 | web: 45 | build: . 46 | command: php -S 0.0.0.0:8000 -t /code 47 | ports: 48 | - "8000:8000" 49 | links: 50 | - db 51 | volumes: 52 | - .:/code 53 | db: 54 | image: orchardup/mysql 55 | environment: 56 | MYSQL_DATABASE: wordpress 57 | 58 | Two supporting files are needed to get this working - first, `wp-config.php` is 59 | the standard Wordpress config file with a single change to point the database 60 | configuration at the `db` container: 61 | 62 | 2 | +++ 3 | title = "Quickstart Guide: Compose and Django" 4 | description = "Getting started with Docker Compose and Django" 5 | keywords = ["documentation, docs, docker, compose, orchestration, containers"] 6 | [menu.main] 7 | parent="smn_workw_compose" 8 | weight=4 9 | +++ 10 | 11 | 12 | 13 | ## Quickstart Guide: Compose and Django 14 | 15 | 16 | This Quick-start Guide will demonstrate how to use Compose to set up and run a 17 | simple Django/PostgreSQL app. Before starting, you'll need to have 18 | [Compose installed](install.md). 19 | 20 | ### Define the project 21 | 22 | Start by setting up the three files you'll need to build the app. First, since 23 | your app is going to run inside a Docker container containing all of its 24 | dependencies, you'll need to define exactly what needs to be included in the 25 | container. This is done using a file called `Dockerfile`. To begin with, the 26 | Dockerfile consists of: 27 | 28 | FROM python:2.7 29 | ENV PYTHONUNBUFFERED 1 30 | RUN mkdir /code 31 | WORKDIR /code 32 | ADD requirements.txt /code/ 33 | RUN pip install -r requirements.txt 34 | ADD . /code/ 35 | 36 | This Dockerfile will define an image that is used to build a container that 37 | includes your application and has Python installed alongside all of your Python 38 | dependencies. For more information on how to write Dockerfiles, see the 39 | [Docker user guide](https://docs.docker.com/userguide/dockerimages/#building-an-image-from-a-dockerfile) and the [Dockerfile reference](http://docs.docker.com/reference/builder/). 40 | 41 | Second, you'll define your Python dependencies in a file called 42 | `requirements.txt`: 43 | 44 | Django 45 | psycopg2 46 | 47 | Finally, this is all tied together with a file called `docker-compose.yml`. It 48 | describes the services that comprise your app (here, a web server and database), 49 | which Docker images they use, how they link together, what volumes will be 50 | mounted inside the containers, and what ports they expose. 51 | 52 | db: 53 | image: postgres 54 | web: 55 | build: . 56 | command: python manage.py runserver 0.0.0.0:8000 57 | volumes: 58 | - .:/code 59 | ports: 60 | - "8000:8000" 61 | links: 62 | - db 63 | 64 | See the [`docker-compose.yml` reference](yml.html) for more information on how 65 | this file works. 66 | 67 | ### Build the project 68 | 69 | You can now start a Django project with `docker-compose run`: 70 | 71 | $ docker-compose run web django-admin.py startproject composeexample . 72 | 73 | First, Compose will build an image for the `web` service using the `Dockerfile`. 74 | It will then run `django-admin.py startproject composeexample .` inside a 75 | container built using that image. 76 | 77 | This will generate a Django app inside the current directory: 78 | 79 | $ ls 80 | Dockerfile docker-compose.yml composeexample manage.py requirements.txt 81 | 82 | ### Connect the database 83 | 84 | Now you need to set up the database connection. Replace the `DATABASES = ...` 85 | definition in `composeexample/settings.py` to read: 86 | 87 | DATABASES = { 88 | 'default': { 89 | 'ENGINE': 'django.db.backends.postgresql_psycopg2', 90 | 'NAME': 'postgres', 91 | 'USER': 'postgres', 92 | 'HOST': 'db', 93 | 'PORT': 5432, 94 | } 95 | } 96 | 97 | These settings are determined by the 98 | [postgres](https://registry.hub.docker.com/_/postgres/) Docker image specified 99 | in the Dockerfile. 100 | 101 | Then, run `docker-compose up`: 102 | 103 | Recreating myapp_db_1... 104 | Recreating myapp_web_1... 105 | Attaching to myapp_db_1, myapp_web_1 106 | myapp_db_1 | 107 | myapp_db_1 | PostgreSQL stand-alone backend 9.1.11 108 | myapp_db_1 | 2014-01-27 12:17:03 UTC LOG: database system is ready to accept connections 109 | myapp_db_1 | 2014-01-27 12:17:03 UTC LOG: autovacuum launcher started 110 | myapp_web_1 | Validating models... 111 | myapp_web_1 | 112 | myapp_web_1 | 0 errors found 113 | myapp_web_1 | January 27, 2014 - 12:12:40 114 | myapp_web_1 | Django version 1.6.1, using settings 'composeexample.settings' 115 | myapp_web_1 | Starting development server at http://0.0.0.0:8000/ 116 | myapp_web_1 | Quit the server with CONTROL-C. 117 | 118 | Your Django app should nw be running at port 8000 on your Docker daemon (if 119 | you're using Boot2docker, `boot2docker ip` will tell you its address). 120 | 121 | You can also run management commands with Docker. To set up your database, for 122 | example, run `docker-compose up` and in another terminal run: 123 | 124 | $ docker-compose run web python manage.py syncdb 125 | 126 | ## More Compose documentation 127 | 128 | - [User guide](/) 129 | - [Installing Compose](install.md) 130 | - [Get started with Django](django.md) 131 | - [Get started with Rails](rails.md) 132 | - [Get started with Wordpress](wordpress.md) 133 | - [Command line reference](cli.md) 134 | - [Yaml file reference](yml.md) 135 | - [Compose environment variables](env.md) 136 | - [Compose command line completion](completion.md) 137 | -------------------------------------------------------------------------------- /compose/cli/command.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | from requests.exceptions import ConnectionError, SSLError 4 | import logging 5 | import os 6 | import re 7 | import six 8 | 9 | from .. import config 10 | from ..project import Project 11 | from ..service import ConfigError 12 | from .docopt_command import DocoptCommand 13 | from .utils import call_silently, is_mac, is_ubuntu, find_candidates_in_parent_dirs 14 | from .docker_client import docker_client 15 | from . import verbose_proxy 16 | from . import errors 17 | from .. import __version__ 18 | 19 | log = logging.getLogger(__name__) 20 | 21 | SUPPORTED_FILENAMES = [ 22 | 'docker-compose.yml', 23 | 'docker-compose.yaml', 24 | 'fig.yml', 25 | 'fig.yaml', 26 | ] 27 | 28 | 29 | class Command(DocoptCommand): 30 | base_dir = '.' 31 | 32 | def dispatch(self, *args, **kwargs): 33 | try: 34 | super(Command, self).dispatch(*args, **kwargs) 35 | except SSLError as e: 36 | raise errors.UserError('SSL error: %s' % e) 37 | except ConnectionError: 38 | if call_silently(['which', 'docker']) != 0: 39 | if is_mac(): 40 | raise errors.DockerNotFoundMac() 41 | elif is_ubuntu(): 42 | raise errors.DockerNotFoundUbuntu() 43 | else: 44 | raise errors.DockerNotFoundGeneric() 45 | elif call_silently(['which', 'boot2docker']) == 0: 46 | raise errors.ConnectionErrorBoot2Docker() 47 | else: 48 | raise errors.ConnectionErrorGeneric(self.get_client().base_url) 49 | 50 | def perform_command(self, options, handler, command_options): 51 | if options['COMMAND'] in ('help', 'version'): 52 | # Skip looking up the compose file. 53 | handler(None, command_options) 54 | return 55 | 56 | if 'FIG_FILE' in os.environ: 57 | log.warn('The FIG_FILE environment variable is deprecated.') 58 | log.warn('Please use COMPOSE_FILE instead.') 59 | 60 | explicit_config_path = options.get('--file') or os.environ.get('COMPOSE_FILE') or os.environ.get('FIG_FILE') 61 | project = self.get_project( 62 | self.get_config_path(explicit_config_path), 63 | project_name=options.get('--project-name'), 64 | verbose=options.get('--verbose')) 65 | 66 | handler(project, command_options) 67 | 68 | def get_client(self, verbose=False): 69 | client = docker_client() 70 | if verbose: 71 | version_info = six.iteritems(client.version()) 72 | log.info("Compose version %s", __version__) 73 | log.info("Docker base_url: %s", client.base_url) 74 | log.info("Docker version: %s", 75 | ", ".join("%s=%s" % item for item in version_info)) 76 | return verbose_proxy.VerboseProxy('docker', client) 77 | return client 78 | 79 | def get_project(self, config_path, project_name=None, verbose=False): 80 | try: 81 | return Project.from_dicts( 82 | self.get_project_name(config_path, project_name), 83 | config.load(config_path), 84 | self.get_client(verbose=verbose)) 85 | except ConfigError as e: 86 | raise errors.UserError(six.text_type(e)) 87 | 88 | def get_project_name(self, config_path, project_name=None): 89 | def normalize_name(name): 90 | return re.sub(r'[^a-z0-9]', '', name.lower()) 91 | 92 | if 'FIG_PROJECT_NAME' in os.environ: 93 | log.warn('The FIG_PROJECT_NAME environment variable is deprecated.') 94 | log.warn('Please use COMPOSE_PROJECT_NAME instead.') 95 | 96 | project_name = project_name or os.environ.get('COMPOSE_PROJECT_NAME') or os.environ.get('FIG_PROJECT_NAME') 97 | if project_name is not None: 98 | return normalize_name(project_name) 99 | 100 | project = os.path.basename(os.path.dirname(os.path.abspath(config_path))) 101 | if project: 102 | return normalize_name(project) 103 | 104 | return 'default' 105 | 106 | def get_config_path(self, file_path=None): 107 | if file_path: 108 | return os.path.join(self.base_dir, file_path) 109 | 110 | (candidates, path) = find_candidates_in_parent_dirs(SUPPORTED_FILENAMES, self.base_dir) 111 | 112 | if len(candidates) == 0: 113 | raise errors.ComposeFileNotFound(SUPPORTED_FILENAMES) 114 | 115 | winner = candidates[0] 116 | 117 | if len(candidates) > 1: 118 | log.warning("Found multiple config files with supported names: %s", ", ".join(candidates)) 119 | log.warning("Using %s\n", winner) 120 | 121 | if winner == 'docker-compose.yaml': 122 | log.warning("Please be aware that .yml is the expected extension " 123 | "in most cases, and using .yaml can cause compatibility " 124 | "issues in future.\n") 125 | 126 | if winner.startswith("fig."): 127 | log.warning("%s is deprecated and will not be supported in future. " 128 | "Please rename your config file to docker-compose.yml\n" % winner) 129 | 130 | return os.path.join(path, winner) 131 | -------------------------------------------------------------------------------- /docs/rails.md: -------------------------------------------------------------------------------- 1 | 11 | 12 | ## Quickstart Guide: Compose and Rails 13 | 14 | This Quickstart guide will show you how to use Compose to set up and run a Rails/PostgreSQL app. Before starting, you'll need to have [Compose installed](install.md). 15 | 16 | ### Define the project 17 | 18 | Start by setting up the three files you'll need to build the app. First, since 19 | your app is going to run inside a Docker container containing all of its 20 | dependencies, you'll need to define exactly what needs to be included in the 21 | container. This is done using a file called `Dockerfile`. To begin with, the 22 | Dockerfile consists of: 23 | 24 | FROM ruby:2.2.0 25 | RUN apt-get update -qq && apt-get install -y build-essential libpq-dev 26 | RUN mkdir /myapp 27 | WORKDIR /myapp 28 | ADD Gemfile /myapp/Gemfile 29 | RUN bundle install 30 | ADD . /myapp 31 | 32 | That'll put your application code inside an image that will build a container with Ruby, Bundler and all your dependencies inside it. For more information on how to write Dockerfiles, see the [Docker user guide](https://docs.docker.com/userguide/dockerimages/#building-an-image-from-a-dockerfile) and the [Dockerfile reference](http://docs.docker.com/reference/builder/). 33 | 34 | Next, create a bootstrap `Gemfile` which just loads Rails. It'll be overwritten in a moment by `rails new`. 35 | 36 | source 'https://rubygems.org' 37 | gem 'rails', '4.2.0' 38 | 39 | Finally, `docker-compose.yml` is where the magic happens. This file describes the services that comprise your app (a database and a web app), how to get each one's Docker image (the database just runs on a pre-made PostgreSQL image, and the web app is built from the current directory), and the configuration needed to link them together and expose the web app's port. 40 | 41 | db: 42 | image: postgres 43 | ports: 44 | - "5432" 45 | web: 46 | build: . 47 | command: bundle exec rails s -p 3000 -b '0.0.0.0' 48 | volumes: 49 | - .:/myapp 50 | ports: 51 | - "3000:3000" 52 | links: 53 | - db 54 | 55 | ### Build the project 56 | 57 | With those three files in place, you can now generate the Rails skeleton app 58 | using `docker-compose run`: 59 | 60 | $ docker-compose run web rails new . --force --database=postgresql --skip-bundle 61 | 62 | First, Compose will build the image for the `web` service using the 63 | `Dockerfile`. Then it'll run `rails new` inside a new container, using that 64 | image. Once it's done, you should have generated a fresh app: 65 | 66 | $ ls 67 | Dockerfile app docker-compose.yml tmp 68 | Gemfile bin lib vendor 69 | Gemfile.lock config log 70 | README.rdoc config.ru public 71 | Rakefile db test 72 | 73 | Uncomment the line in your new `Gemfile` which loads `therubyracer`, so you've 74 | got a Javascript runtime: 75 | 76 | gem 'therubyracer', platforms: :ruby 77 | 78 | Now that you've got a new `Gemfile`, you need to build the image again. (This, 79 | and changes to the Dockerfile itself, should be the only times you'll need to 80 | rebuild.) 81 | 82 | $ docker-compose build 83 | 84 | ### Connect the database 85 | 86 | The app is now bootable, but you're not quite there yet. By default, Rails 87 | expects a database to be running on `localhost` - so you need to point it at the 88 | `db` container instead. You also need to change the database and username to 89 | align with the defaults set by the `postgres` image. 90 | 91 | Open up your newly-generated `database.yml` file. Replace its contents with the 92 | following: 93 | 94 | development: &default 95 | adapter: postgresql 96 | encoding: unicode 97 | database: postgres 98 | pool: 5 99 | username: postgres 100 | password: 101 | host: db 102 | 103 | test: 104 | <<: *default 105 | database: myapp_test 106 | 107 | You can now boot the app with: 108 | 109 | $ docker-compose up 110 | 111 | If all's well, you should see some PostgreSQL output, and then—after a few 112 | seconds—the familiar refrain: 113 | 114 | myapp_web_1 | [2014-01-17 17:16:29] INFO WEBrick 1.3.1 115 | myapp_web_1 | [2014-01-17 17:16:29] INFO ruby 2.2.0 (2014-12-25) [x86_64-linux-gnu] 116 | myapp_web_1 | [2014-01-17 17:16:29] INFO WEBrick::HTTPServer#start: pid=1 port=3000 117 | 118 | Finally, you need to create the database. In another terminal, run: 119 | 120 | $ docker-compose run web rake db:create 121 | 122 | That's it. Your app should now be running on port 3000 on your Docker daemon (if 123 | you're using Boot2docker, `boot2docker ip` will tell you its address). 124 | 125 | ## More Compose documentation 126 | 127 | - [User guide](/) 128 | - [Installing Compose](install.md) 129 | - [Get started with Django](django.md) 130 | - [Get started with Rails](rails.md) 131 | - [Get started with Wordpress](wordpress.md) 132 | - [Command line reference](cli.md) 133 | - [Yaml file reference](yml.md) 134 | - [Compose environment variables](env.md) 135 | - [Compose command line completion](completion.md) 136 | -------------------------------------------------------------------------------- /tests/unit/container_test.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from .. import unittest 3 | 4 | import mock 5 | import docker 6 | 7 | from compose.container import Container 8 | from compose.container import get_container_name 9 | 10 | 11 | class ContainerTest(unittest.TestCase): 12 | 13 | def setUp(self): 14 | self.container_dict = { 15 | "Id": "abc", 16 | "Image": "busybox:latest", 17 | "Command": "top", 18 | "Created": 1387384730, 19 | "Status": "Up 8 seconds", 20 | "Ports": None, 21 | "SizeRw": 0, 22 | "SizeRootFs": 0, 23 | "Names": ["/composetest_db_1", "/composetest_web_1/db"], 24 | "NetworkSettings": { 25 | "Ports": {}, 26 | }, 27 | "Config": { 28 | "Labels": { 29 | "com.docker.compose.project": "composetest", 30 | "com.docker.compose.service": "web", 31 | "com.docker.compose.container-number": 7, 32 | }, 33 | } 34 | } 35 | 36 | def test_from_ps(self): 37 | container = Container.from_ps(None, 38 | self.container_dict, 39 | has_been_inspected=True) 40 | self.assertEqual( 41 | container.dictionary, 42 | { 43 | "Id": "abc", 44 | "Image": "busybox:latest", 45 | "Name": "/composetest_db_1", 46 | }) 47 | 48 | def test_from_ps_prefixed(self): 49 | self.container_dict['Names'] = ['/swarm-host-1' + n for n in self.container_dict['Names']] 50 | 51 | container = Container.from_ps(None, 52 | self.container_dict, 53 | has_been_inspected=True) 54 | self.assertEqual(container.dictionary, { 55 | "Id": "abc", 56 | "Image": "busybox:latest", 57 | "Name": "/composetest_db_1", 58 | }) 59 | 60 | def test_environment(self): 61 | container = Container(None, { 62 | 'Id': 'abc', 63 | 'Config': { 64 | 'Env': [ 65 | 'FOO=BAR', 66 | 'BAZ=DOGE', 67 | ] 68 | } 69 | }, has_been_inspected=True) 70 | self.assertEqual(container.environment, { 71 | 'FOO': 'BAR', 72 | 'BAZ': 'DOGE', 73 | }) 74 | 75 | def test_number(self): 76 | container = Container(None, self.container_dict, has_been_inspected=True) 77 | self.assertEqual(container.number, 7) 78 | 79 | def test_name(self): 80 | container = Container.from_ps(None, 81 | self.container_dict, 82 | has_been_inspected=True) 83 | self.assertEqual(container.name, "composetest_db_1") 84 | 85 | def test_name_without_project(self): 86 | container = Container(None, self.container_dict, has_been_inspected=True) 87 | self.assertEqual(container.name_without_project, "web_7") 88 | 89 | def test_inspect_if_not_inspected(self): 90 | mock_client = mock.create_autospec(docker.Client) 91 | container = Container(mock_client, dict(Id="the_id")) 92 | 93 | container.inspect_if_not_inspected() 94 | mock_client.inspect_container.assert_called_once_with("the_id") 95 | self.assertEqual(container.dictionary, 96 | mock_client.inspect_container.return_value) 97 | self.assertTrue(container.has_been_inspected) 98 | 99 | container.inspect_if_not_inspected() 100 | self.assertEqual(mock_client.inspect_container.call_count, 1) 101 | 102 | def test_human_readable_ports_none(self): 103 | container = Container(None, self.container_dict, has_been_inspected=True) 104 | self.assertEqual(container.human_readable_ports, '') 105 | 106 | def test_human_readable_ports_public_and_private(self): 107 | self.container_dict['NetworkSettings']['Ports'].update({ 108 | "45454/tcp": [{"HostIp": "0.0.0.0", "HostPort": "49197"}], 109 | "45453/tcp": [], 110 | }) 111 | container = Container(None, self.container_dict, has_been_inspected=True) 112 | 113 | expected = "45453/tcp, 0.0.0.0:49197->45454/tcp" 114 | self.assertEqual(container.human_readable_ports, expected) 115 | 116 | def test_get_local_port(self): 117 | self.container_dict['NetworkSettings']['Ports'].update({ 118 | "45454/tcp": [{"HostIp": "0.0.0.0", "HostPort": "49197"}], 119 | }) 120 | container = Container(None, self.container_dict, has_been_inspected=True) 121 | 122 | self.assertEqual( 123 | container.get_local_port(45454, protocol='tcp'), 124 | '0.0.0.0:49197') 125 | 126 | def test_get(self): 127 | container = Container(None, { 128 | "Status": "Up 8 seconds", 129 | "HostConfig": { 130 | "VolumesFrom": ["volume_id"] 131 | }, 132 | }, has_been_inspected=True) 133 | 134 | self.assertEqual(container.get('Status'), "Up 8 seconds") 135 | self.assertEqual(container.get('HostConfig.VolumesFrom'), ["volume_id"]) 136 | self.assertEqual(container.get('Foo.Bar.DoesNotExist'), None) 137 | 138 | 139 | class GetContainerNameTestCase(unittest.TestCase): 140 | 141 | def test_get_container_name(self): 142 | self.assertIsNone(get_container_name({})) 143 | self.assertEqual(get_container_name({'Name': 'myproject_db_1'}), 'myproject_db_1') 144 | self.assertEqual(get_container_name({'Names': ['/myproject_db_1', '/myproject_web_1/db']}), 'myproject_db_1') 145 | self.assertEqual(get_container_name({'Names': ['/swarm-host-1/myproject_db_1', '/swarm-host-1/myproject_web_1/db']}), 'myproject_db_1') 146 | -------------------------------------------------------------------------------- /docs/cli.md: -------------------------------------------------------------------------------- 1 | 11 | 12 | 13 | # Compose CLI reference 14 | 15 | Most Docker Compose commands are run against one or more services. If 16 | the service is not specified, the command will apply to all services. 17 | 18 | For full usage information, run `docker-compose [COMMAND] --help`. 19 | 20 | ## Commands 21 | 22 | ### build 23 | 24 | Builds or rebuilds services. 25 | 26 | Services are built once and then tagged as `project_service`, e.g., 27 | `composetest_db`. If you change a service's Dockerfile or the contents of its 28 | build directory, run `docker-compose build` to rebuild it. 29 | 30 | ### help 31 | 32 | Displays help and usage instructions for a command. 33 | 34 | ### kill 35 | 36 | Forces running containers to stop by sending a `SIGKILL` signal. Optionally the 37 | signal can be passed, for example: 38 | 39 | $ docker-compose kill -s SIGINT 40 | 41 | ### logs 42 | 43 | Displays log output from services. 44 | 45 | ### port 46 | 47 | Prints the public port for a port binding 48 | 49 | ### ps 50 | 51 | Lists containers. 52 | 53 | ### pull 54 | 55 | Pulls service images. 56 | 57 | ### restart 58 | 59 | Restarts services. 60 | 61 | ### rm 62 | 63 | Removes stopped service containers. 64 | 65 | 66 | ### run 67 | 68 | Runs a one-off command on a service. 69 | 70 | For example, 71 | 72 | $ docker-compose run web python manage.py shell 73 | 74 | will start the `web` service and then run `manage.py shell` in python. 75 | Note that by default, linked services will also be started, unless they are 76 | already running. 77 | 78 | One-off commands are started in new containers with the same configuration as a 79 | normal container for that service, so volumes, links, etc will all be created as 80 | expected. When using `run`, there are two differences from bringing up a 81 | container normally: 82 | 83 | 1. the command will be overridden with the one specified. So, if you run 84 | `docker-compose run web bash`, the container's web command (which could default 85 | to, e.g., `python app.py`) will be overridden to `bash` 86 | 87 | 2. by default no ports will be created in case they collide with already opened 88 | ports. 89 | 90 | Links are also created between one-off commands and the other containers which 91 | are part of that service. So, for example, you could run: 92 | 93 | $ docker-compose run db psql -h db -U docker 94 | 95 | This would open up an interactive PostgreSQL shell for the linked `db` container 96 | (which would get created or started as needed). 97 | 98 | If you do not want linked containers to start when running the one-off command, 99 | specify the `--no-deps` flag: 100 | 101 | $ docker-compose run --no-deps web python manage.py shell 102 | 103 | Similarly, if you do want the service's ports to be created and mapped to the 104 | host, specify the `--service-ports` flag: 105 | 106 | $ docker-compose run --service-ports web python manage.py shell 107 | 108 | 109 | ### scale 110 | 111 | Sets the number of containers to run for a service. 112 | 113 | Numbers are specified as arguments in the form `service=num`. For example: 114 | 115 | $ docker-compose scale web=2 worker=3 116 | 117 | ### start 118 | 119 | Starts existing containers for a service. 120 | 121 | ### stop 122 | 123 | Stops running containers without removing them. They can be started again with 124 | `docker-compose start`. 125 | 126 | ### up 127 | 128 | Builds, (re)creates, starts, and attaches to containers for a service. 129 | 130 | Linked services will be started, unless they are already running. 131 | 132 | By default, `docker-compose up` will aggregate the output of each container and, 133 | when it exits, all containers will be stopped. Running `docker-compose up -d`, 134 | will start the containers in the background and leave them running. 135 | 136 | By default, if there are existing containers for a service, `docker-compose up` will stop and recreate them (preserving mounted volumes with [volumes-from]), so that changes in `docker-compose.yml` are picked up. If you do not want containers stopped and recreated, use `docker-compose up --no-recreate`. This will still start any stopped containers, if needed. 137 | 138 | [volumes-from]: http://docs.docker.io/en/latest/use/working_with_volumes/ 139 | 140 | ## Options 141 | 142 | ### --verbose 143 | 144 | Shows more output 145 | 146 | ### -v, --version 147 | 148 | Prints version and exits 149 | 150 | ### -f, --file FILE 151 | 152 | Specify what file to read configuration from. If not provided, Compose will look 153 | for `docker-compose.yml` in the current working directory, and then each parent 154 | directory successively, until found. 155 | 156 | 157 | ### -p, --project-name NAME 158 | 159 | Specifies an alternate project name (default: current directory name) 160 | 161 | 162 | ## Environment Variables 163 | 164 | Several environment variables are available for you to configure Compose's behaviour. 165 | 166 | Variables starting with `DOCKER_` are the same as those used to configure the 167 | Docker command-line client. If you're using boot2docker, `eval "$(boot2docker shellinit)"` 168 | will set them to their correct values. 169 | 170 | ### COMPOSE\_PROJECT\_NAME 171 | 172 | Sets the project name, which is prepended to the name of every container started by Compose. Defaults to the `basename` of the current working directory. 173 | 174 | ### COMPOSE\_FILE 175 | 176 | Specify what file to read configuration from. If not provided, Compose will look 177 | for `docker-compose.yml` in the current working directory, and then each parent 178 | directory successively, until found. 179 | 180 | ### DOCKER\_HOST 181 | 182 | Sets the URL of the docker daemon. As with the Docker client, defaults to `unix:///var/run/docker.sock`. 183 | 184 | ### DOCKER\_TLS\_VERIFY 185 | 186 | When set to anything other than an empty string, enables TLS communication with 187 | the daemon. 188 | 189 | ### DOCKER\_CERT\_PATH 190 | 191 | Configures the path to the `ca.pem`, `cert.pem`, and `key.pem` files used for TLS verification. Defaults to `~/.docker`. 192 | 193 | ## Compose documentation 194 | 195 | - [User guide](/) 196 | - [Installing Compose](install.md) 197 | - [Get started with Django](django.md) 198 | - [Get started with Rails](rails.md) 199 | - [Get started with Wordpress](wordpress.md) 200 | - [Yaml file reference](yml.md) 201 | - [Compose environment variables](env.md) 202 | - [Compose command line completion](completion.md) 203 | -------------------------------------------------------------------------------- /compose/container.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | 4 | import six 5 | from functools import reduce 6 | 7 | from .const import LABEL_CONTAINER_NUMBER, LABEL_SERVICE 8 | 9 | 10 | class Container(object): 11 | """ 12 | Represents a Docker container, constructed from the output of 13 | GET /containers/:id:/json. 14 | """ 15 | def __init__(self, client, dictionary, has_been_inspected=False): 16 | self.client = client 17 | self.dictionary = dictionary 18 | self.has_been_inspected = has_been_inspected 19 | 20 | @classmethod 21 | def from_ps(cls, client, dictionary, **kwargs): 22 | """ 23 | Construct a container object from the output of GET /containers/json. 24 | """ 25 | new_dictionary = { 26 | 'Id': dictionary['Id'], 27 | 'Image': dictionary['Image'], 28 | 'Name': '/' + get_container_name(dictionary), 29 | } 30 | return cls(client, new_dictionary, **kwargs) 31 | 32 | @classmethod 33 | def from_id(cls, client, id): 34 | return cls(client, client.inspect_container(id)) 35 | 36 | @classmethod 37 | def create(cls, client, **options): 38 | response = client.create_container(**options) 39 | return cls.from_id(client, response['Id']) 40 | 41 | @property 42 | def id(self): 43 | return self.dictionary['Id'] 44 | 45 | @property 46 | def image(self): 47 | return self.dictionary['Image'] 48 | 49 | @property 50 | def image_config(self): 51 | return self.client.inspect_image(self.image) 52 | 53 | @property 54 | def short_id(self): 55 | return self.id[:10] 56 | 57 | @property 58 | def name(self): 59 | return self.dictionary['Name'][1:] 60 | 61 | @property 62 | def name_without_project(self): 63 | return '{0}_{1}'.format(self.labels.get(LABEL_SERVICE), self.number) 64 | 65 | @property 66 | def number(self): 67 | number = self.labels.get(LABEL_CONTAINER_NUMBER) 68 | if not number: 69 | raise ValueError("Container {0} does not have a {1} label".format( 70 | self.short_id, LABEL_CONTAINER_NUMBER)) 71 | return int(number) 72 | 73 | @property 74 | def ports(self): 75 | self.inspect_if_not_inspected() 76 | return self.get('NetworkSettings.Ports') or {} 77 | 78 | @property 79 | def human_readable_ports(self): 80 | def format_port(private, public): 81 | if not public: 82 | return private 83 | return '{HostIp}:{HostPort}->{private}'.format( 84 | private=private, **public[0]) 85 | 86 | return ', '.join(format_port(*item) 87 | for item in sorted(six.iteritems(self.ports))) 88 | 89 | @property 90 | def labels(self): 91 | return self.get('Config.Labels') or {} 92 | 93 | @property 94 | def log_config(self): 95 | return self.get('HostConfig.LogConfig') or None 96 | 97 | @property 98 | def human_readable_state(self): 99 | if self.is_running: 100 | return 'Ghost' if self.get('State.Ghost') else 'Up' 101 | else: 102 | return 'Exit %s' % self.get('State.ExitCode') 103 | 104 | @property 105 | def human_readable_command(self): 106 | entrypoint = self.get('Config.Entrypoint') or [] 107 | cmd = self.get('Config.Cmd') or [] 108 | return ' '.join(entrypoint + cmd) 109 | 110 | @property 111 | def environment(self): 112 | return dict(var.split("=", 1) for var in self.get('Config.Env') or []) 113 | 114 | @property 115 | def is_running(self): 116 | return self.get('State.Running') 117 | 118 | def get(self, key): 119 | """Return a value from the container or None if the value is not set. 120 | 121 | :param key: a string using dotted notation for nested dictionary 122 | lookups 123 | """ 124 | self.inspect_if_not_inspected() 125 | 126 | def get_value(dictionary, key): 127 | return (dictionary or {}).get(key) 128 | 129 | return reduce(get_value, key.split('.'), self.dictionary) 130 | 131 | def get_local_port(self, port, protocol='tcp'): 132 | port = self.ports.get("%s/%s" % (port, protocol)) 133 | return "{HostIp}:{HostPort}".format(**port[0]) if port else None 134 | 135 | def start(self, **options): 136 | return self.client.start(self.id, **options) 137 | 138 | def stop(self, **options): 139 | return self.client.stop(self.id, **options) 140 | 141 | def kill(self, **options): 142 | return self.client.kill(self.id, **options) 143 | 144 | def restart(self, **options): 145 | return self.client.restart(self.id, **options) 146 | 147 | def remove(self, **options): 148 | return self.client.remove_container(self.id, **options) 149 | 150 | def inspect_if_not_inspected(self): 151 | if not self.has_been_inspected: 152 | self.inspect() 153 | 154 | def wait(self): 155 | return self.client.wait(self.id) 156 | 157 | def logs(self, *args, **kwargs): 158 | return self.client.logs(self.id, *args, **kwargs) 159 | 160 | def inspect(self): 161 | self.dictionary = self.client.inspect_container(self.id) 162 | self.has_been_inspected = True 163 | return self.dictionary 164 | 165 | # TODO: only used by tests, move to test module 166 | def links(self): 167 | links = [] 168 | for container in self.client.containers(): 169 | for name in container['Names']: 170 | bits = name.split('/') 171 | if len(bits) > 2 and bits[1] == self.name: 172 | links.append(bits[2]) 173 | return links 174 | 175 | def attach(self, *args, **kwargs): 176 | return self.client.attach(self.id, *args, **kwargs) 177 | 178 | def attach_socket(self, **kwargs): 179 | return self.client.attach_socket(self.id, **kwargs) 180 | 181 | def __repr__(self): 182 | return '' % (self.name, self.id[:6]) 183 | 184 | def __eq__(self, other): 185 | if type(self) != type(other): 186 | return False 187 | return self.id == other.id 188 | 189 | def __hash__(self): 190 | return self.id.__hash__() 191 | 192 | 193 | def get_container_name(container): 194 | if not container.get('Name') and not container.get('Names'): 195 | return None 196 | # inspect 197 | if 'Name' in container: 198 | return container['Name'] 199 | # ps 200 | shortest_name = min(container['Names'], key=lambda n: len(n.split('/'))) 201 | return shortest_name.split('/')[-1] 202 | -------------------------------------------------------------------------------- /tests/unit/sort_service_test.py: -------------------------------------------------------------------------------- 1 | from compose.project import sort_service_dicts, DependencyError 2 | from .. import unittest 3 | 4 | 5 | class SortServiceTest(unittest.TestCase): 6 | def test_sort_service_dicts_1(self): 7 | services = [ 8 | { 9 | 'links': ['redis'], 10 | 'name': 'web' 11 | }, 12 | { 13 | 'name': 'grunt' 14 | }, 15 | { 16 | 'name': 'redis' 17 | } 18 | ] 19 | 20 | sorted_services = sort_service_dicts(services) 21 | self.assertEqual(len(sorted_services), 3) 22 | self.assertEqual(sorted_services[0]['name'], 'grunt') 23 | self.assertEqual(sorted_services[1]['name'], 'redis') 24 | self.assertEqual(sorted_services[2]['name'], 'web') 25 | 26 | def test_sort_service_dicts_2(self): 27 | services = [ 28 | { 29 | 'links': ['redis', 'postgres'], 30 | 'name': 'web' 31 | }, 32 | { 33 | 'name': 'postgres', 34 | 'links': ['redis'] 35 | }, 36 | { 37 | 'name': 'redis' 38 | } 39 | ] 40 | 41 | sorted_services = sort_service_dicts(services) 42 | self.assertEqual(len(sorted_services), 3) 43 | self.assertEqual(sorted_services[0]['name'], 'redis') 44 | self.assertEqual(sorted_services[1]['name'], 'postgres') 45 | self.assertEqual(sorted_services[2]['name'], 'web') 46 | 47 | def test_sort_service_dicts_3(self): 48 | services = [ 49 | { 50 | 'name': 'child' 51 | }, 52 | { 53 | 'name': 'parent', 54 | 'links': ['child'] 55 | }, 56 | { 57 | 'links': ['parent'], 58 | 'name': 'grandparent' 59 | }, 60 | ] 61 | 62 | sorted_services = sort_service_dicts(services) 63 | self.assertEqual(len(sorted_services), 3) 64 | self.assertEqual(sorted_services[0]['name'], 'child') 65 | self.assertEqual(sorted_services[1]['name'], 'parent') 66 | self.assertEqual(sorted_services[2]['name'], 'grandparent') 67 | 68 | def test_sort_service_dicts_4(self): 69 | services = [ 70 | { 71 | 'name': 'child' 72 | }, 73 | { 74 | 'name': 'parent', 75 | 'volumes_from': ['child'] 76 | }, 77 | { 78 | 'links': ['parent'], 79 | 'name': 'grandparent' 80 | }, 81 | ] 82 | 83 | sorted_services = sort_service_dicts(services) 84 | self.assertEqual(len(sorted_services), 3) 85 | self.assertEqual(sorted_services[0]['name'], 'child') 86 | self.assertEqual(sorted_services[1]['name'], 'parent') 87 | self.assertEqual(sorted_services[2]['name'], 'grandparent') 88 | 89 | def test_sort_service_dicts_5(self): 90 | services = [ 91 | { 92 | 'links': ['parent'], 93 | 'name': 'grandparent' 94 | }, 95 | { 96 | 'name': 'parent', 97 | 'net': 'container:child' 98 | }, 99 | { 100 | 'name': 'child' 101 | } 102 | ] 103 | 104 | sorted_services = sort_service_dicts(services) 105 | self.assertEqual(len(sorted_services), 3) 106 | self.assertEqual(sorted_services[0]['name'], 'child') 107 | self.assertEqual(sorted_services[1]['name'], 'parent') 108 | self.assertEqual(sorted_services[2]['name'], 'grandparent') 109 | 110 | def test_sort_service_dicts_6(self): 111 | services = [ 112 | { 113 | 'links': ['parent'], 114 | 'name': 'grandparent' 115 | }, 116 | { 117 | 'name': 'parent', 118 | 'volumes_from': ['child'] 119 | }, 120 | { 121 | 'name': 'child' 122 | } 123 | ] 124 | 125 | sorted_services = sort_service_dicts(services) 126 | self.assertEqual(len(sorted_services), 3) 127 | self.assertEqual(sorted_services[0]['name'], 'child') 128 | self.assertEqual(sorted_services[1]['name'], 'parent') 129 | self.assertEqual(sorted_services[2]['name'], 'grandparent') 130 | 131 | def test_sort_service_dicts_7(self): 132 | services = [ 133 | { 134 | 'net': 'container:three', 135 | 'name': 'four' 136 | }, 137 | { 138 | 'links': ['two'], 139 | 'name': 'three' 140 | }, 141 | { 142 | 'name': 'two', 143 | 'volumes_from': ['one'] 144 | }, 145 | { 146 | 'name': 'one' 147 | } 148 | ] 149 | 150 | sorted_services = sort_service_dicts(services) 151 | self.assertEqual(len(sorted_services), 4) 152 | self.assertEqual(sorted_services[0]['name'], 'one') 153 | self.assertEqual(sorted_services[1]['name'], 'two') 154 | self.assertEqual(sorted_services[2]['name'], 'three') 155 | self.assertEqual(sorted_services[3]['name'], 'four') 156 | 157 | def test_sort_service_dicts_circular_imports(self): 158 | services = [ 159 | { 160 | 'links': ['redis'], 161 | 'name': 'web' 162 | }, 163 | { 164 | 'name': 'redis', 165 | 'links': ['web'] 166 | }, 167 | ] 168 | 169 | try: 170 | sort_service_dicts(services) 171 | except DependencyError as e: 172 | self.assertIn('redis', e.msg) 173 | self.assertIn('web', e.msg) 174 | else: 175 | self.fail('Should have thrown an DependencyError') 176 | 177 | def test_sort_service_dicts_circular_imports_2(self): 178 | services = [ 179 | { 180 | 'links': ['postgres', 'redis'], 181 | 'name': 'web' 182 | }, 183 | { 184 | 'name': 'redis', 185 | 'links': ['web'] 186 | }, 187 | { 188 | 'name': 'postgres' 189 | } 190 | ] 191 | 192 | try: 193 | sort_service_dicts(services) 194 | except DependencyError as e: 195 | self.assertIn('redis', e.msg) 196 | self.assertIn('web', e.msg) 197 | else: 198 | self.fail('Should have thrown an DependencyError') 199 | 200 | def test_sort_service_dicts_circular_imports_3(self): 201 | services = [ 202 | { 203 | 'links': ['b'], 204 | 'name': 'a' 205 | }, 206 | { 207 | 'name': 'b', 208 | 'links': ['c'] 209 | }, 210 | { 211 | 'name': 'c', 212 | 'links': ['a'] 213 | } 214 | ] 215 | 216 | try: 217 | sort_service_dicts(services) 218 | except DependencyError as e: 219 | self.assertIn('a', e.msg) 220 | self.assertIn('b', e.msg) 221 | else: 222 | self.fail('Should have thrown an DependencyError') 223 | 224 | def test_sort_service_dicts_self_imports(self): 225 | services = [ 226 | { 227 | 'links': ['web'], 228 | 'name': 'web' 229 | }, 230 | ] 231 | 232 | try: 233 | sort_service_dicts(services) 234 | except DependencyError as e: 235 | self.assertIn('web', e.msg) 236 | else: 237 | self.fail('Should have thrown an DependencyError') 238 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | 10 | 11 | 12 | # Overview of Docker Compose 13 | 14 | Compose is a tool for defining and running multi-container applications with 15 | Docker. With Compose, you define a multi-container application in a single 16 | file, then spin your application up in a single command which does everything 17 | that needs to be done to get it running. 18 | 19 | Compose is great for development environments, staging servers, and CI. We don't 20 | recommend that you use it in production yet. 21 | 22 | Using Compose is basically a three-step process. 23 | 24 | 1. Define your app's environment with a `Dockerfile` so it can be 25 | reproduced anywhere. 26 | 2. Define the services that make up your app in `docker-compose.yml` so 27 | they can be run together in an isolated environment: 28 | 3. Lastly, run `docker-compose up` and Compose will start and run your entire app. 29 | 30 | A `docker-compose.yml` looks like this: 31 | 32 | web: 33 | build: . 34 | ports: 35 | - "5000:5000" 36 | volumes: 37 | - .:/code 38 | links: 39 | - redis 40 | redis: 41 | image: redis 42 | 43 | Compose has commands for managing the whole lifecycle of your application: 44 | 45 | * Start, stop and rebuild services 46 | * View the status of running services 47 | * Stream the log output of running services 48 | * Run a one-off command on a service 49 | 50 | ## Compose documentation 51 | 52 | - [Installing Compose](install.md) 53 | - [Get started with Django](django.md) 54 | - [Get started with Rails](rails.md) 55 | - [Get started with Wordpress](wordpress.md) 56 | - [Command line reference](cli.md) 57 | - [Yaml file reference](yml.md) 58 | - [Compose environment variables](env.md) 59 | - [Compose command line completion](completion.md) 60 | 61 | ## Quick start 62 | 63 | Let's get started with a walkthrough of getting a simple Python web app running 64 | on Compose. It assumes a little knowledge of Python, but the concepts 65 | demonstrated here should be understandable even if you're not familiar with 66 | Python. 67 | 68 | ### Installation and set-up 69 | 70 | First, [install Docker and Compose](install.md). 71 | 72 | Next, you'll want to make a directory for the project: 73 | 74 | $ mkdir composetest 75 | $ cd composetest 76 | 77 | Inside this directory, create `app.py`, a simple web app that uses the Flask 78 | framework and increments a value in Redis: 79 | 80 | from flask import Flask 81 | from redis import Redis 82 | import os 83 | app = Flask(__name__) 84 | redis = Redis(host='redis', port=6379) 85 | 86 | @app.route('/') 87 | def hello(): 88 | redis.incr('hits') 89 | return 'Hello World! I have been seen %s times.' % redis.get('hits') 90 | 91 | if __name__ == "__main__": 92 | app.run(host="0.0.0.0", debug=True) 93 | 94 | Next, define the Python dependencies in a file called `requirements.txt`: 95 | 96 | flask 97 | redis 98 | 99 | ### Create a Docker image 100 | 101 | Now, create a Docker image containing all of your app's dependencies. You 102 | specify how to build the image using a file called 103 | [`Dockerfile`](http://docs.docker.com/reference/builder/): 104 | 105 | FROM python:2.7 106 | ADD . /code 107 | WORKDIR /code 108 | RUN pip install -r requirements.txt 109 | CMD python app.py 110 | 111 | This tells Docker to: 112 | 113 | * Build an image starting with the Python 2.7 image. 114 | * Add the current directory `.` into the path `/code` in the image. 115 | * Set the working directory to `/code`. 116 | * Install your Python dependencies. 117 | * Set the default command for the container to `python app.py` 118 | 119 | For more information on how to write Dockerfiles, see the [Docker user guide](https://docs.docker.com/userguide/dockerimages/#building-an-image-from-a-dockerfile) and the [Dockerfile reference](http://docs.docker.com/reference/builder/). 120 | 121 | You can test that this builds by running `docker build -t web .`. 122 | 123 | ### Define services 124 | 125 | Next, define a set of services using `docker-compose.yml`: 126 | 127 | web: 128 | build: . 129 | ports: 130 | - "5000:5000" 131 | volumes: 132 | - .:/code 133 | links: 134 | - redis 135 | redis: 136 | image: redis 137 | 138 | This defines two services: 139 | 140 | #### web 141 | 142 | * Builds from the `Dockerfile` in the current directory. 143 | * Forwards the exposed port 5000 on the container to port 5000 on the host machine. 144 | * Connects the web container to the Redis service via a link. 145 | * Mounts the current directory on the host to `/code` inside the container allowing you to modify the code without having to rebuild the image. 146 | 147 | #### redis 148 | 149 | * Uses the public [Redis](https://registry.hub.docker.com/_/redis/) image which gets pulled from the Docker Hub registry. 150 | 151 | ### Build and run your app with Compose 152 | 153 | Now, when you run `docker-compose up`, Compose will pull a Redis image, build an image for your code, and start everything up: 154 | 155 | $ docker-compose up 156 | Pulling image redis... 157 | Building web... 158 | Starting composetest_redis_1... 159 | Starting composetest_web_1... 160 | redis_1 | [8] 02 Jan 18:43:35.576 # Server started, Redis version 2.8.3 161 | web_1 | * Running on http://0.0.0.0:5000/ 162 | 163 | The web app should now be listening on port 5000 on your Docker daemon host (if 164 | you're using Boot2docker, `boot2docker ip` will tell you its address). In a browser, 165 | open `http://ip-from-boot2docker:5000` and you should get a message in your browser saying: 166 | 167 | `Hello World! I have been seen 1 times.` 168 | 169 | Refreshing the page will increment the number. 170 | 171 | If you want to run your services in the background, you can pass the `-d` flag 172 | (for daemon mode) to `docker-compose up` and use `docker-compose ps` to see what 173 | is currently running: 174 | 175 | $ docker-compose up -d 176 | Starting composetest_redis_1... 177 | Starting composetest_web_1... 178 | $ docker-compose ps 179 | Name Command State Ports 180 | ------------------------------------------------------------------- 181 | composetest_redis_1 /usr/local/bin/run Up 182 | composetest_web_1 /bin/sh -c python app.py Up 5000->5000/tcp 183 | 184 | The `docker-compose run` command allows you to run one-off commands for your 185 | services. For example, to see what environment variables are available to the 186 | `web` service: 187 | 188 | $ docker-compose run web env 189 | 190 | See `docker-compose --help` to see other available commands. 191 | 192 | If you started Compose with `docker-compose up -d`, you'll probably want to stop 193 | your services once you've finished with them: 194 | 195 | $ docker-compose stop 196 | 197 | At this point, you have seen the basics of how Compose works. 198 | 199 | - Next, try the quick start guide for [Django](django.md), 200 | [Rails](rails.md), or [Wordpress](wordpress.md). 201 | - See the reference guides for complete details on the [commands](cli.md), the 202 | [configuration file](yml.md) and [environment variables](env.md). 203 | 204 | ## Release Notes 205 | 206 | ### Version 1.2.0 (April 7, 2015) 207 | 208 | For complete information on this release, see the [1.2.0 Milestone project page](https://github.com/docker/compose/wiki/1.2.0-Milestone-Project-Page). 209 | In addition to bug fixes and refinements, this release adds the following: 210 | 211 | * The `extends` keyword, which adds the ability to extend services by sharing common configurations. For details, see 212 | [PR #1088](https://github.com/docker/compose/pull/1088). 213 | 214 | * Better integration with Swarm. Swarm will now schedule inter-dependent 215 | containers on the same host. For details, see 216 | [PR #972](https://github.com/docker/compose/pull/972). 217 | 218 | ## Getting help 219 | 220 | Docker Compose is still in its infancy and under active development. If you need 221 | help, would like to contribute, or simply want to talk about the project with 222 | like-minded individuals, we have a number of open channels for communication. 223 | 224 | * To report bugs or file feature requests: please use the [issue tracker on Github](https://github.com/docker/compose/issues). 225 | 226 | * To talk about the project with people in real time: please join the `#docker-compose` channel on IRC. 227 | 228 | * To contribute code or documentation changes: please submit a [pull request on Github](https://github.com/docker/compose/pulls). 229 | 230 | For more information and resources, please visit the [Getting Help project page](https://docs.docker.com/project/get-help/). 231 | -------------------------------------------------------------------------------- /tests/unit/cli_test.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from __future__ import absolute_import 3 | import logging 4 | import os 5 | import tempfile 6 | import shutil 7 | from .. import unittest 8 | 9 | import docker 10 | import mock 11 | 12 | from compose.cli import main 13 | from compose.cli.main import TopLevelCommand 14 | from compose.cli.docopt_command import NoSuchCommand 15 | from compose.cli.errors import ComposeFileNotFound 16 | from compose.service import Service 17 | 18 | 19 | class CLITestCase(unittest.TestCase): 20 | def test_default_project_name(self): 21 | cwd = os.getcwd() 22 | 23 | try: 24 | os.chdir('tests/fixtures/simple-composefile') 25 | command = TopLevelCommand() 26 | project_name = command.get_project_name(command.get_config_path()) 27 | self.assertEquals('simplecomposefile', project_name) 28 | finally: 29 | os.chdir(cwd) 30 | 31 | def test_project_name_with_explicit_base_dir(self): 32 | command = TopLevelCommand() 33 | command.base_dir = 'tests/fixtures/simple-composefile' 34 | project_name = command.get_project_name(command.get_config_path()) 35 | self.assertEquals('simplecomposefile', project_name) 36 | 37 | def test_project_name_with_explicit_uppercase_base_dir(self): 38 | command = TopLevelCommand() 39 | command.base_dir = 'tests/fixtures/UpperCaseDir' 40 | project_name = command.get_project_name(command.get_config_path()) 41 | self.assertEquals('uppercasedir', project_name) 42 | 43 | def test_project_name_with_explicit_project_name(self): 44 | command = TopLevelCommand() 45 | name = 'explicit-project-name' 46 | project_name = command.get_project_name(None, project_name=name) 47 | self.assertEquals('explicitprojectname', project_name) 48 | 49 | def test_project_name_from_environment_old_var(self): 50 | command = TopLevelCommand() 51 | name = 'namefromenv' 52 | with mock.patch.dict(os.environ): 53 | os.environ['FIG_PROJECT_NAME'] = name 54 | project_name = command.get_project_name(None) 55 | self.assertEquals(project_name, name) 56 | 57 | def test_project_name_from_environment_new_var(self): 58 | command = TopLevelCommand() 59 | name = 'namefromenv' 60 | with mock.patch.dict(os.environ): 61 | os.environ['COMPOSE_PROJECT_NAME'] = name 62 | project_name = command.get_project_name(None) 63 | self.assertEquals(project_name, name) 64 | 65 | def test_filename_check(self): 66 | files = [ 67 | 'docker-compose.yml', 68 | 'docker-compose.yaml', 69 | 'fig.yml', 70 | 'fig.yaml', 71 | ] 72 | 73 | """Test with files placed in the basedir""" 74 | 75 | self.assertEqual('docker-compose.yml', get_config_filename_for_files(files[0:])) 76 | self.assertEqual('docker-compose.yaml', get_config_filename_for_files(files[1:])) 77 | self.assertEqual('fig.yml', get_config_filename_for_files(files[2:])) 78 | self.assertEqual('fig.yaml', get_config_filename_for_files(files[3:])) 79 | self.assertRaises(ComposeFileNotFound, lambda: get_config_filename_for_files([])) 80 | 81 | """Test with files placed in the subdir""" 82 | 83 | def get_config_filename_for_files_in_subdir(files): 84 | return get_config_filename_for_files(files, subdir=True) 85 | 86 | self.assertEqual('docker-compose.yml', get_config_filename_for_files_in_subdir(files[0:])) 87 | self.assertEqual('docker-compose.yaml', get_config_filename_for_files_in_subdir(files[1:])) 88 | self.assertEqual('fig.yml', get_config_filename_for_files_in_subdir(files[2:])) 89 | self.assertEqual('fig.yaml', get_config_filename_for_files_in_subdir(files[3:])) 90 | self.assertRaises(ComposeFileNotFound, lambda: get_config_filename_for_files_in_subdir([])) 91 | 92 | def test_get_project(self): 93 | command = TopLevelCommand() 94 | command.base_dir = 'tests/fixtures/longer-filename-composefile' 95 | project = command.get_project(command.get_config_path()) 96 | self.assertEqual(project.name, 'longerfilenamecomposefile') 97 | self.assertTrue(project.client) 98 | self.assertTrue(project.services) 99 | 100 | def test_help(self): 101 | command = TopLevelCommand() 102 | with self.assertRaises(SystemExit): 103 | command.dispatch(['-h'], None) 104 | 105 | def test_command_help(self): 106 | with self.assertRaises(SystemExit) as ctx: 107 | TopLevelCommand().dispatch(['help', 'up'], None) 108 | 109 | self.assertIn('Usage: up', str(ctx.exception)) 110 | 111 | def test_command_help_dashes(self): 112 | with self.assertRaises(SystemExit) as ctx: 113 | TopLevelCommand().dispatch(['help', 'migrate-to-labels'], None) 114 | 115 | self.assertIn('Usage: migrate-to-labels', str(ctx.exception)) 116 | 117 | def test_command_help_nonexistent(self): 118 | with self.assertRaises(NoSuchCommand): 119 | TopLevelCommand().dispatch(['help', 'nonexistent'], None) 120 | 121 | def test_setup_logging(self): 122 | main.setup_logging() 123 | self.assertEqual(logging.getLogger().level, logging.DEBUG) 124 | self.assertEqual(logging.getLogger('requests').propagate, False) 125 | 126 | @mock.patch('compose.cli.main.dockerpty', autospec=True) 127 | def test_run_with_environment_merged_with_options_list(self, mock_dockerpty): 128 | command = TopLevelCommand() 129 | mock_client = mock.create_autospec(docker.Client) 130 | mock_project = mock.Mock() 131 | mock_project.get_service.return_value = Service( 132 | 'service', 133 | client=mock_client, 134 | environment=['FOO=ONE', 'BAR=TWO'], 135 | image='someimage') 136 | 137 | command.run(mock_project, { 138 | 'SERVICE': 'service', 139 | 'COMMAND': None, 140 | '-e': ['BAR=NEW', 'OTHER=THREE'], 141 | '--user': None, 142 | '--no-deps': None, 143 | '--allow-insecure-ssl': None, 144 | '-d': True, 145 | '-T': None, 146 | '--entrypoint': None, 147 | '--service-ports': None, 148 | '--rm': None, 149 | }) 150 | 151 | _, _, call_kwargs = mock_client.create_container.mock_calls[0] 152 | self.assertEqual( 153 | call_kwargs['environment'], 154 | {'FOO': 'ONE', 'BAR': 'NEW', 'OTHER': 'THREE'}) 155 | 156 | def test_run_service_with_restart_always(self): 157 | command = TopLevelCommand() 158 | mock_client = mock.create_autospec(docker.Client) 159 | mock_project = mock.Mock() 160 | mock_project.get_service.return_value = Service( 161 | 'service', 162 | client=mock_client, 163 | restart='always', 164 | image='someimage') 165 | command.run(mock_project, { 166 | 'SERVICE': 'service', 167 | 'COMMAND': None, 168 | '-e': [], 169 | '--user': None, 170 | '--no-deps': None, 171 | '--allow-insecure-ssl': None, 172 | '-d': True, 173 | '-T': None, 174 | '--entrypoint': None, 175 | '--service-ports': None, 176 | '--rm': None, 177 | }) 178 | _, _, call_kwargs = mock_client.create_container.mock_calls[0] 179 | self.assertEquals(call_kwargs['host_config']['RestartPolicy']['Name'], 'always') 180 | 181 | command = TopLevelCommand() 182 | mock_client = mock.create_autospec(docker.Client) 183 | mock_project = mock.Mock() 184 | mock_project.get_service.return_value = Service( 185 | 'service', 186 | client=mock_client, 187 | restart='always', 188 | image='someimage') 189 | command.run(mock_project, { 190 | 'SERVICE': 'service', 191 | 'COMMAND': None, 192 | '-e': [], 193 | '--user': None, 194 | '--no-deps': None, 195 | '--allow-insecure-ssl': None, 196 | '-d': True, 197 | '-T': None, 198 | '--entrypoint': None, 199 | '--service-ports': None, 200 | '--rm': True, 201 | }) 202 | _, _, call_kwargs = mock_client.create_container.mock_calls[0] 203 | self.assertFalse('RestartPolicy' in call_kwargs['host_config']) 204 | 205 | 206 | def get_config_filename_for_files(filenames, subdir=None): 207 | project_dir = tempfile.mkdtemp() 208 | try: 209 | make_files(project_dir, filenames) 210 | command = TopLevelCommand() 211 | if subdir: 212 | command.base_dir = tempfile.mkdtemp(dir=project_dir) 213 | else: 214 | command.base_dir = project_dir 215 | return os.path.basename(command.get_config_path()) 216 | finally: 217 | shutil.rmtree(project_dir) 218 | 219 | 220 | def make_files(dirname, filenames): 221 | for fname in filenames: 222 | with open(os.path.join(dirname, fname), 'w') as f: 223 | f.write('') 224 | -------------------------------------------------------------------------------- /contrib/completion/bash/docker-compose: -------------------------------------------------------------------------------- 1 | #!bash 2 | # 3 | # bash completion for docker-compose 4 | # 5 | # This work is based on the completion for the docker command. 6 | # 7 | # This script provides completion of: 8 | # - commands and their options 9 | # - service names 10 | # - filepaths 11 | # 12 | # To enable the completions either: 13 | # - place this file in /etc/bash_completion.d 14 | # or 15 | # - copy this file to e.g. ~/.docker-compose-completion.sh and add the line 16 | # below to your .bashrc after bash completion features are loaded 17 | # . ~/.docker-compose-completion.sh 18 | 19 | 20 | # For compatibility reasons, Compose and therefore its completion supports several 21 | # stack compositon files as listed here, in descending priority. 22 | # Support for these filenames might be dropped in some future version. 23 | __docker-compose_compose_file() { 24 | local file 25 | for file in docker-compose.y{,a}ml fig.y{,a}ml ; do 26 | [ -e $file ] && { 27 | echo $file 28 | return 29 | } 30 | done 31 | echo docker-compose.yml 32 | } 33 | 34 | # Extracts all service names from the compose file. 35 | ___docker-compose_all_services_in_compose_file() { 36 | awk -F: '/^[a-zA-Z0-9]/{print $1}' "${compose_file:-$(__docker-compose_compose_file)}" 2>/dev/null 37 | } 38 | 39 | # All services, even those without an existing container 40 | __docker-compose_services_all() { 41 | COMPREPLY=( $(compgen -W "$(___docker-compose_all_services_in_compose_file)" -- "$cur") ) 42 | } 43 | 44 | # All services that have an entry with the given key in their compose_file section 45 | ___docker-compose_services_with_key() { 46 | # flatten sections to one line, then filter lines containing the key and return section name. 47 | awk '/^[a-zA-Z0-9]/{printf "\n"};{printf $0;next;}' "${compose_file:-$(__docker-compose_compose_file)}" | awk -F: -v key=": +$1:" '$0 ~ key {print $1}' 48 | } 49 | 50 | # All services that are defined by a Dockerfile reference 51 | __docker-compose_services_from_build() { 52 | COMPREPLY=( $(compgen -W "$(___docker-compose_services_with_key build)" -- "$cur") ) 53 | } 54 | 55 | # All services that are defined by an image 56 | __docker-compose_services_from_image() { 57 | COMPREPLY=( $(compgen -W "$(___docker-compose_services_with_key image)" -- "$cur") ) 58 | } 59 | 60 | # The services for which containers have been created, optionally filtered 61 | # by a boolean expression passed in as argument. 62 | __docker-compose_services_with() { 63 | local containers names 64 | containers="$(docker-compose 2>/dev/null ${compose_file:+-f $compose_file} ${compose_project:+-p $compose_project} ps -q)" 65 | names=( $(docker 2>/dev/null inspect --format "{{if ${1:-true}}} {{ .Name }} {{end}}" $containers) ) 66 | names=( ${names[@]%_*} ) # strip trailing numbers 67 | names=( ${names[@]#*_} ) # strip project name 68 | COMPREPLY=( $(compgen -W "${names[*]}" -- "$cur") ) 69 | } 70 | 71 | # The services for which at least one running container exists 72 | __docker-compose_services_running() { 73 | __docker-compose_services_with '.State.Running' 74 | } 75 | 76 | # The services for which at least one stopped container exists 77 | __docker-compose_services_stopped() { 78 | __docker-compose_services_with 'not .State.Running' 79 | } 80 | 81 | 82 | _docker-compose_build() { 83 | case "$cur" in 84 | -*) 85 | COMPREPLY=( $( compgen -W "--no-cache" -- "$cur" ) ) 86 | ;; 87 | *) 88 | __docker-compose_services_from_build 89 | ;; 90 | esac 91 | } 92 | 93 | 94 | _docker-compose_docker-compose() { 95 | case "$prev" in 96 | --file|-f) 97 | _filedir "y?(a)ml" 98 | return 99 | ;; 100 | --project-name|-p) 101 | return 102 | ;; 103 | esac 104 | 105 | case "$cur" in 106 | -*) 107 | COMPREPLY=( $( compgen -W "--help -h --verbose --version -v --file -f --project-name -p" -- "$cur" ) ) 108 | ;; 109 | *) 110 | COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) ) 111 | ;; 112 | esac 113 | } 114 | 115 | 116 | _docker-compose_help() { 117 | COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) ) 118 | } 119 | 120 | 121 | _docker-compose_kill() { 122 | case "$prev" in 123 | -s) 124 | COMPREPLY=( $( compgen -W "SIGHUP SIGINT SIGKILL SIGUSR1 SIGUSR2" -- "$(echo $cur | tr '[:lower:]' '[:upper:]')" ) ) 125 | return 126 | ;; 127 | esac 128 | 129 | case "$cur" in 130 | -*) 131 | COMPREPLY=( $( compgen -W "-s" -- "$cur" ) ) 132 | ;; 133 | *) 134 | __docker-compose_services_running 135 | ;; 136 | esac 137 | } 138 | 139 | 140 | _docker-compose_logs() { 141 | case "$cur" in 142 | -*) 143 | COMPREPLY=( $( compgen -W "--no-color" -- "$cur" ) ) 144 | ;; 145 | *) 146 | __docker-compose_services_all 147 | ;; 148 | esac 149 | } 150 | 151 | 152 | _docker-compose_port() { 153 | case "$prev" in 154 | --protocol) 155 | COMPREPLY=( $( compgen -W "tcp udp" -- "$cur" ) ) 156 | return; 157 | ;; 158 | --index) 159 | return; 160 | ;; 161 | esac 162 | 163 | case "$cur" in 164 | -*) 165 | COMPREPLY=( $( compgen -W "--protocol --index" -- "$cur" ) ) 166 | ;; 167 | *) 168 | __docker-compose_services_all 169 | ;; 170 | esac 171 | } 172 | 173 | 174 | _docker-compose_ps() { 175 | case "$cur" in 176 | -*) 177 | COMPREPLY=( $( compgen -W "-q" -- "$cur" ) ) 178 | ;; 179 | *) 180 | __docker-compose_services_all 181 | ;; 182 | esac 183 | } 184 | 185 | 186 | _docker-compose_pull() { 187 | case "$cur" in 188 | -*) 189 | COMPREPLY=( $( compgen -W "--allow-insecure-ssl" -- "$cur" ) ) 190 | ;; 191 | *) 192 | __docker-compose_services_from_image 193 | ;; 194 | esac 195 | } 196 | 197 | 198 | _docker-compose_restart() { 199 | case "$prev" in 200 | -t | --timeout) 201 | return 202 | ;; 203 | esac 204 | 205 | case "$cur" in 206 | -*) 207 | COMPREPLY=( $( compgen -W "-t --timeout" -- "$cur" ) ) 208 | ;; 209 | *) 210 | __docker-compose_services_running 211 | ;; 212 | esac 213 | } 214 | 215 | 216 | _docker-compose_rm() { 217 | case "$cur" in 218 | -*) 219 | COMPREPLY=( $( compgen -W "--force -f -v" -- "$cur" ) ) 220 | ;; 221 | *) 222 | __docker-compose_services_stopped 223 | ;; 224 | esac 225 | } 226 | 227 | 228 | _docker-compose_run() { 229 | case "$prev" in 230 | -e) 231 | COMPREPLY=( $( compgen -e -- "$cur" ) ) 232 | compopt -o nospace 233 | return 234 | ;; 235 | --entrypoint|--user|-u) 236 | return 237 | ;; 238 | esac 239 | 240 | case "$cur" in 241 | -*) 242 | COMPREPLY=( $( compgen -W "--allow-insecure-ssl -d --entrypoint -e --no-deps --rm --service-ports -T --user -u" -- "$cur" ) ) 243 | ;; 244 | *) 245 | __docker-compose_services_all 246 | ;; 247 | esac 248 | } 249 | 250 | 251 | _docker-compose_scale() { 252 | case "$prev" in 253 | =) 254 | COMPREPLY=("$cur") 255 | ;; 256 | *) 257 | COMPREPLY=( $(compgen -S "=" -W "$(___docker-compose_all_services_in_compose_file)" -- "$cur") ) 258 | compopt -o nospace 259 | ;; 260 | esac 261 | } 262 | 263 | 264 | _docker-compose_start() { 265 | __docker-compose_services_stopped 266 | } 267 | 268 | 269 | _docker-compose_stop() { 270 | case "$prev" in 271 | -t | --timeout) 272 | return 273 | ;; 274 | esac 275 | 276 | case "$cur" in 277 | -*) 278 | COMPREPLY=( $( compgen -W "-t --timeout" -- "$cur" ) ) 279 | ;; 280 | *) 281 | __docker-compose_services_running 282 | ;; 283 | esac 284 | } 285 | 286 | 287 | _docker-compose_up() { 288 | case "$prev" in 289 | -t | --timeout) 290 | return 291 | ;; 292 | esac 293 | 294 | case "$cur" in 295 | -*) 296 | COMPREPLY=( $( compgen -W "--allow-insecure-ssl -d --no-build --no-color --no-deps --no-recreate -t --timeout --x-smart-recreate" -- "$cur" ) ) 297 | ;; 298 | *) 299 | __docker-compose_services_all 300 | ;; 301 | esac 302 | } 303 | 304 | 305 | _docker-compose_version() { 306 | case "$cur" in 307 | -*) 308 | COMPREPLY=( $( compgen -W "--short" -- "$cur" ) ) 309 | ;; 310 | esac 311 | } 312 | 313 | 314 | _docker-compose() { 315 | local previous_extglob_setting=$(shopt -p extglob) 316 | shopt -s extglob 317 | 318 | local commands=( 319 | build 320 | help 321 | kill 322 | logs 323 | migrate-to-labels 324 | port 325 | ps 326 | pull 327 | restart 328 | rm 329 | run 330 | scale 331 | start 332 | stop 333 | up 334 | version 335 | ) 336 | 337 | COMPREPLY=() 338 | local cur prev words cword 339 | _get_comp_words_by_ref -n : cur prev words cword 340 | 341 | # search subcommand and invoke its handler. 342 | # special treatment of some top-level options 343 | local command='docker-compose' 344 | local counter=1 345 | local compose_file compose_project 346 | while [ $counter -lt $cword ]; do 347 | case "${words[$counter]}" in 348 | -f|--file) 349 | (( counter++ )) 350 | compose_file="${words[$counter]}" 351 | ;; 352 | -p|--project-name) 353 | (( counter++ )) 354 | compose_project="${words[$counter]}" 355 | ;; 356 | -*) 357 | ;; 358 | *) 359 | command="${words[$counter]}" 360 | break 361 | ;; 362 | esac 363 | (( counter++ )) 364 | done 365 | 366 | local completions_func=_docker-compose_${command} 367 | declare -F $completions_func >/dev/null && $completions_func 368 | 369 | eval "$previous_extglob_setting" 370 | return 0 371 | } 372 | 373 | complete -F _docker-compose docker-compose 374 | -------------------------------------------------------------------------------- /experimental/compose_swarm_networking.md: -------------------------------------------------------------------------------- 1 | # Experimental: Compose, Swarm and Multi-Host Networking 2 | 3 | The [experimental build of Docker](https://github.com/docker/docker/tree/master/experimental) has an entirely new networking system, which enables secure communication between containers on multiple hosts. In combination with Docker Swarm and Docker Compose, you can now run multi-container apps on multi-host clusters with the same tooling and configuration format you use to develop them locally. 4 | 5 | > Note: This functionality is in the experimental stage, and contains some hacks and workarounds which will be removed as it matures. 6 | 7 | ## Prerequisites 8 | 9 | Before you start, you’ll need to install the experimental build of Docker, and the latest versions of Machine and Compose. 10 | 11 | - To install the experimental Docker build on a Linux machine, follow the instructions [here](https://github.com/docker/docker/tree/master/experimental#install-docker-experimental). 12 | 13 | - To install the experimental Docker build on a Mac, run these commands: 14 | 15 | $ curl -L https://experimental.docker.com/builds/Darwin/x86_64/docker-latest > /usr/local/bin/docker 16 | $ chmod +x /usr/local/bin/docker 17 | 18 | - To install Machine, follow the instructions [here](http://docs.docker.com/machine/). 19 | 20 | - To install Compose, follow the instructions [here](http://docs.docker.com/compose/install/). 21 | 22 | You’ll also need a [Docker Hub](https://hub.docker.com/account/signup/) account and a [Digital Ocean](https://www.digitalocean.com/) account. 23 | 24 | ## Set up a swarm with multi-host networking 25 | 26 | Set the `DIGITALOCEAN_ACCESS_TOKEN` environment variable to a valid Digital Ocean API token, which you can generate in the [API panel](https://cloud.digitalocean.com/settings/applications). 27 | 28 | DIGITALOCEAN_ACCESS_TOKEN=abc12345 29 | 30 | Start a consul server: 31 | 32 | docker-machine create -d digitalocean --engine-install-url https://experimental.docker.com consul 33 | docker $(docker-machine config consul) run -d -p 8500:8500 -h consul progrium/consul -server -bootstrap 34 | 35 | (In a real world setting you’d set up a distributed consul, but that’s beyond the scope of this guide!) 36 | 37 | Create a Swarm token: 38 | 39 | SWARM_TOKEN=$(docker run swarm create) 40 | 41 | Create a Swarm master: 42 | 43 | docker-machine create -d digitalocean --swarm --swarm-master --swarm-discovery=token://$SWARM_TOKEN --engine-install-url="https://experimental.docker.com" --digitalocean-image "ubuntu-14-10-x64" --engine-opt=default-network=overlay:multihost --engine-label=com.docker.network.driver.overlay.bind_interface=eth0 --engine-opt=kv-store=consul:$(docker-machine ip consul):8500 swarm-0 44 | 45 | Create a Swarm node: 46 | 47 | docker-machine create -d digitalocean --swarm --swarm-discovery=token://$SWARM_TOKEN --engine-install-url="https://experimental.docker.com" --digitalocean-image "ubuntu-14-10-x64" --engine-opt=default-network=overlay:multihost --engine-label=com.docker.network.driver.overlay.bind_interface=eth0 --engine-opt=kv-store=consul:$(docker-machine ip consul):8500 --engine-label com.docker.network.driver.overlay.neighbor_ip=$(docker-machine ip swarm-0) swarm-1 48 | 49 | You can create more Swarm nodes if you want - it’s best to give them sensible names (swarm-2, swarm-3, etc). 50 | 51 | Finally, point Docker at your swarm: 52 | 53 | eval "$(docker-machine env --swarm swarm-0)" 54 | 55 | ## Run containers and get them communicating 56 | 57 | Now that you’ve got a swarm up and running, you can create containers on it just like a single Docker instance: 58 | 59 | $ docker run busybox echo hello world 60 | hello world 61 | 62 | If you run `docker ps -a`, you can see what node that container was started on by looking at its name (here it’s swarm-3): 63 | 64 | $ docker ps -a 65 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 66 | 41f59749737b busybox "echo hello world" 15 seconds ago Exited (0) 13 seconds ago swarm-3/trusting_leakey 67 | 68 | As you start more containers, they’ll be placed on different nodes across the cluster, thanks to Swarm’s default “spread” scheduling strategy. 69 | 70 | Every container started on this swarm will use the “overlay:multihost” network by default, meaning they can all intercommunicate. Each container gets an IP address on that network, and an `/etc/hosts` file which will be updated on-the-fly with every other container’s IP address and name. That means that if you have a running container named ‘foo’, other containers can access it at the hostname ‘foo’. 71 | 72 | Let’s verify that multi-host networking is functioning. Start a long-running container: 73 | 74 | $ docker run -d --name long-running busybox top 75 | 76 | 77 | If you start a new container and inspect its /etc/hosts file, you’ll see the long-running container in there: 78 | 79 | $ docker run busybox cat /etc/hosts 80 | ... 81 | 172.21.0.6 long-running 82 | 83 | Verify that connectivity works between containers: 84 | 85 | $ docker run busybox ping long-running 86 | PING long-running (172.21.0.6): 56 data bytes 87 | 64 bytes from 172.21.0.6: seq=0 ttl=64 time=7.975 ms 88 | 64 bytes from 172.21.0.6: seq=1 ttl=64 time=1.378 ms 89 | 64 bytes from 172.21.0.6: seq=2 ttl=64 time=1.348 ms 90 | ^C 91 | --- long-running ping statistics --- 92 | 3 packets transmitted, 3 packets received, 0% packet loss 93 | round-trip min/avg/max = 1.140/2.099/7.975 ms 94 | 95 | ## Run a Compose application 96 | 97 | Here’s an example of a simple Python + Redis app using multi-host networking on a swarm. 98 | 99 | Create a directory for the app: 100 | 101 | $ mkdir composetest 102 | $ cd composetest 103 | 104 | Inside this directory, create 2 files. 105 | 106 | First, create `app.py` - a simple web app that uses the Flask framework and increments a value in Redis: 107 | 108 | from flask import Flask 109 | from redis import Redis 110 | import os 111 | app = Flask(__name__) 112 | redis = Redis(host='composetest_redis_1', port=6379) 113 | 114 | @app.route('/') 115 | def hello(): 116 | redis.incr('hits') 117 | return 'Hello World! I have been seen %s times.' % redis.get('hits') 118 | 119 | if __name__ == "__main__": 120 | app.run(host="0.0.0.0", debug=True) 121 | 122 | Note that we’re connecting to a host called `composetest_redis_1` - this is the name of the Redis container that Compose will start. 123 | 124 | Second, create a Dockerfile for the app container: 125 | 126 | FROM python:2.7 127 | RUN pip install flask redis 128 | ADD . /code 129 | WORKDIR /code 130 | CMD ["python", "app.py"] 131 | 132 | Build the Docker image and push it to the Hub (you’ll need a Hub account). Replace `` with your Docker Hub username: 133 | 134 | $ docker build -t /counter . 135 | $ docker push /counter 136 | 137 | Next, create a `docker-compose.yml`, which defines the configuration for the web and redis containers. Once again, replace `` with your Hub username: 138 | 139 | web: 140 | image: /counter 141 | ports: 142 | - "80:5000" 143 | redis: 144 | image: redis 145 | 146 | Now start the app: 147 | 148 | $ docker-compose up -d 149 | Pulling web (username/counter:latest)... 150 | swarm-0: Pulling username/counter:latest... : downloaded 151 | swarm-2: Pulling username/counter:latest... : downloaded 152 | swarm-1: Pulling username/counter:latest... : downloaded 153 | swarm-3: Pulling username/counter:latest... : downloaded 154 | swarm-4: Pulling username/counter:latest... : downloaded 155 | Creating composetest_web_1... 156 | Pulling redis (redis:latest)... 157 | swarm-2: Pulling redis:latest... : downloaded 158 | swarm-1: Pulling redis:latest... : downloaded 159 | swarm-3: Pulling redis:latest... : downloaded 160 | swarm-4: Pulling redis:latest... : downloaded 161 | swarm-0: Pulling redis:latest... : downloaded 162 | Creating composetest_redis_1... 163 | 164 | Swarm has created containers for both web and redis, and placed them on different nodes, which you can check with `docker ps`: 165 | 166 | $ docker ps 167 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 168 | 92faad2135c9 redis "/entrypoint.sh redi 43 seconds ago Up 42 seconds swarm-2/composetest_redis_1 169 | adb809e5cdac username/counter "/bin/sh -c 'python 55 seconds ago Up 54 seconds 45.67.8.9:80->5000/tcp swarm-1/composetest_web_1 170 | 171 | You can also see that the web container has exposed port 80 on its swarm node. If you curl that IP, you’ll get a response from the container: 172 | 173 | $ curl http://45.67.8.9 174 | Hello World! I have been seen 1 times. 175 | 176 | If you hit it repeatedly, the counter will increment, demonstrating that the web and redis container are communicating: 177 | 178 | $ curl http://45.67.8.9 179 | Hello World! I have been seen 2 times. 180 | $ curl http://45.67.8.9 181 | Hello World! I have been seen 3 times. 182 | $ curl http://45.67.8.9 183 | Hello World! I have been seen 4 times. 184 | -------------------------------------------------------------------------------- /tests/unit/project_test.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | from .. import unittest 3 | from compose.service import Service 4 | from compose.project import Project 5 | from compose.container import Container 6 | from compose import config 7 | 8 | import mock 9 | import docker 10 | 11 | 12 | class ProjectTest(unittest.TestCase): 13 | def test_from_dict(self): 14 | project = Project.from_dicts('composetest', [ 15 | { 16 | 'name': 'web', 17 | 'image': 'busybox:latest' 18 | }, 19 | { 20 | 'name': 'db', 21 | 'image': 'busybox:latest' 22 | }, 23 | ], None) 24 | self.assertEqual(len(project.services), 2) 25 | self.assertEqual(project.get_service('web').name, 'web') 26 | self.assertEqual(project.get_service('web').options['image'], 'busybox:latest') 27 | self.assertEqual(project.get_service('db').name, 'db') 28 | self.assertEqual(project.get_service('db').options['image'], 'busybox:latest') 29 | 30 | def test_from_dict_sorts_in_dependency_order(self): 31 | project = Project.from_dicts('composetest', [ 32 | { 33 | 'name': 'web', 34 | 'image': 'busybox:latest', 35 | 'links': ['db'], 36 | }, 37 | { 38 | 'name': 'db', 39 | 'image': 'busybox:latest', 40 | 'volumes_from': ['volume'] 41 | }, 42 | { 43 | 'name': 'volume', 44 | 'image': 'busybox:latest', 45 | 'volumes': ['/tmp'], 46 | } 47 | ], None) 48 | 49 | self.assertEqual(project.services[0].name, 'volume') 50 | self.assertEqual(project.services[1].name, 'db') 51 | self.assertEqual(project.services[2].name, 'web') 52 | 53 | def test_from_config(self): 54 | dicts = config.from_dictionary({ 55 | 'web': { 56 | 'image': 'busybox:latest', 57 | }, 58 | 'db': { 59 | 'image': 'busybox:latest', 60 | }, 61 | }) 62 | project = Project.from_dicts('composetest', dicts, None) 63 | self.assertEqual(len(project.services), 2) 64 | self.assertEqual(project.get_service('web').name, 'web') 65 | self.assertEqual(project.get_service('web').options['image'], 'busybox:latest') 66 | self.assertEqual(project.get_service('db').name, 'db') 67 | self.assertEqual(project.get_service('db').options['image'], 'busybox:latest') 68 | 69 | def test_get_service(self): 70 | web = Service( 71 | project='composetest', 72 | name='web', 73 | client=None, 74 | image="busybox:latest", 75 | ) 76 | project = Project('test', [web], None) 77 | self.assertEqual(project.get_service('web'), web) 78 | 79 | def test_get_services_returns_all_services_without_args(self): 80 | web = Service( 81 | project='composetest', 82 | name='web', 83 | image='foo', 84 | ) 85 | console = Service( 86 | project='composetest', 87 | name='console', 88 | image='foo', 89 | ) 90 | project = Project('test', [web, console], None) 91 | self.assertEqual(project.get_services(), [web, console]) 92 | 93 | def test_get_services_returns_listed_services_with_args(self): 94 | web = Service( 95 | project='composetest', 96 | name='web', 97 | image='foo', 98 | ) 99 | console = Service( 100 | project='composetest', 101 | name='console', 102 | image='foo', 103 | ) 104 | project = Project('test', [web, console], None) 105 | self.assertEqual(project.get_services(['console']), [console]) 106 | 107 | def test_get_services_with_include_links(self): 108 | db = Service( 109 | project='composetest', 110 | name='db', 111 | image='foo', 112 | ) 113 | web = Service( 114 | project='composetest', 115 | name='web', 116 | image='foo', 117 | links=[(db, 'database')] 118 | ) 119 | cache = Service( 120 | project='composetest', 121 | name='cache', 122 | image='foo' 123 | ) 124 | console = Service( 125 | project='composetest', 126 | name='console', 127 | image='foo', 128 | links=[(web, 'web')] 129 | ) 130 | project = Project('test', [web, db, cache, console], None) 131 | self.assertEqual( 132 | project.get_services(['console'], include_deps=True), 133 | [db, web, console] 134 | ) 135 | 136 | def test_get_services_removes_duplicates_following_links(self): 137 | db = Service( 138 | project='composetest', 139 | name='db', 140 | image='foo', 141 | ) 142 | web = Service( 143 | project='composetest', 144 | name='web', 145 | image='foo', 146 | links=[(db, 'database')] 147 | ) 148 | project = Project('test', [web, db], None) 149 | self.assertEqual( 150 | project.get_services(['web', 'db'], include_deps=True), 151 | [db, web] 152 | ) 153 | 154 | def test_use_volumes_from_container(self): 155 | container_id = 'aabbccddee' 156 | container_dict = dict(Name='aaa', Id=container_id) 157 | mock_client = mock.create_autospec(docker.Client) 158 | mock_client.inspect_container.return_value = container_dict 159 | project = Project.from_dicts('test', [ 160 | { 161 | 'name': 'test', 162 | 'image': 'busybox:latest', 163 | 'volumes_from': ['aaa'] 164 | } 165 | ], mock_client) 166 | self.assertEqual(project.get_service('test')._get_volumes_from(), [container_id]) 167 | 168 | def test_use_volumes_from_service_no_container(self): 169 | container_name = 'test_vol_1' 170 | mock_client = mock.create_autospec(docker.Client) 171 | mock_client.containers.return_value = [ 172 | { 173 | "Name": container_name, 174 | "Names": [container_name], 175 | "Id": container_name, 176 | "Image": 'busybox:latest' 177 | } 178 | ] 179 | project = Project.from_dicts('test', [ 180 | { 181 | 'name': 'vol', 182 | 'image': 'busybox:latest' 183 | }, 184 | { 185 | 'name': 'test', 186 | 'image': 'busybox:latest', 187 | 'volumes_from': ['vol'] 188 | } 189 | ], mock_client) 190 | self.assertEqual(project.get_service('test')._get_volumes_from(), [container_name]) 191 | 192 | @mock.patch.object(Service, 'containers') 193 | def test_use_volumes_from_service_container(self, mock_return): 194 | container_ids = ['aabbccddee', '12345'] 195 | mock_return.return_value = [ 196 | mock.Mock(id=container_id, spec=Container) 197 | for container_id in container_ids] 198 | 199 | project = Project.from_dicts('test', [ 200 | { 201 | 'name': 'vol', 202 | 'image': 'busybox:latest' 203 | }, 204 | { 205 | 'name': 'test', 206 | 'image': 'busybox:latest', 207 | 'volumes_from': ['vol'] 208 | } 209 | ], None) 210 | self.assertEqual(project.get_service('test')._get_volumes_from(), container_ids) 211 | 212 | def test_net_unset(self): 213 | mock_client = mock.create_autospec(docker.Client) 214 | project = Project.from_dicts('test', [ 215 | { 216 | 'name': 'test', 217 | 'image': 'busybox:latest', 218 | } 219 | ], mock_client) 220 | service = project.get_service('test') 221 | self.assertEqual(service._get_net(), None) 222 | self.assertNotIn('NetworkMode', service._get_container_host_config({})) 223 | 224 | def test_use_net_from_container(self): 225 | container_id = 'aabbccddee' 226 | container_dict = dict(Name='aaa', Id=container_id) 227 | mock_client = mock.create_autospec(docker.Client) 228 | mock_client.inspect_container.return_value = container_dict 229 | project = Project.from_dicts('test', [ 230 | { 231 | 'name': 'test', 232 | 'image': 'busybox:latest', 233 | 'net': 'container:aaa' 234 | } 235 | ], mock_client) 236 | service = project.get_service('test') 237 | self.assertEqual(service._get_net(), 'container:' + container_id) 238 | 239 | def test_use_net_from_service(self): 240 | container_name = 'test_aaa_1' 241 | mock_client = mock.create_autospec(docker.Client) 242 | mock_client.containers.return_value = [ 243 | { 244 | "Name": container_name, 245 | "Names": [container_name], 246 | "Id": container_name, 247 | "Image": 'busybox:latest' 248 | } 249 | ] 250 | project = Project.from_dicts('test', [ 251 | { 252 | 'name': 'aaa', 253 | 'image': 'busybox:latest' 254 | }, 255 | { 256 | 'name': 'test', 257 | 'image': 'busybox:latest', 258 | 'net': 'container:aaa' 259 | } 260 | ], mock_client) 261 | 262 | service = project.get_service('test') 263 | self.assertEqual(service._get_net(), 'container:' + container_name) 264 | -------------------------------------------------------------------------------- /tests/integration/state_test.py: -------------------------------------------------------------------------------- 1 | from __future__ import unicode_literals 2 | import tempfile 3 | import shutil 4 | import os 5 | 6 | from compose import config 7 | from compose.project import Project 8 | from compose.const import LABEL_CONFIG_HASH 9 | 10 | from .testcases import DockerClientTestCase 11 | 12 | 13 | class ProjectTestCase(DockerClientTestCase): 14 | def run_up(self, cfg, **kwargs): 15 | kwargs.setdefault('smart_recreate', True) 16 | kwargs.setdefault('timeout', 0.1) 17 | 18 | project = self.make_project(cfg) 19 | project.up(**kwargs) 20 | return set(project.containers(stopped=True)) 21 | 22 | def make_project(self, cfg): 23 | return Project.from_dicts( 24 | name='composetest', 25 | client=self.client, 26 | service_dicts=config.from_dictionary(cfg), 27 | ) 28 | 29 | 30 | class BasicProjectTest(ProjectTestCase): 31 | def setUp(self): 32 | super(BasicProjectTest, self).setUp() 33 | 34 | self.cfg = { 35 | 'db': {'image': 'busybox:latest'}, 36 | 'web': {'image': 'busybox:latest'}, 37 | } 38 | 39 | def test_no_change(self): 40 | old_containers = self.run_up(self.cfg) 41 | self.assertEqual(len(old_containers), 2) 42 | 43 | new_containers = self.run_up(self.cfg) 44 | self.assertEqual(len(new_containers), 2) 45 | 46 | self.assertEqual(old_containers, new_containers) 47 | 48 | def test_partial_change(self): 49 | old_containers = self.run_up(self.cfg) 50 | old_db = [c for c in old_containers if c.name_without_project == 'db_1'][0] 51 | old_web = [c for c in old_containers if c.name_without_project == 'web_1'][0] 52 | 53 | self.cfg['web']['command'] = '/bin/true' 54 | 55 | new_containers = self.run_up(self.cfg) 56 | self.assertEqual(len(new_containers), 2) 57 | 58 | preserved = list(old_containers & new_containers) 59 | self.assertEqual(preserved, [old_db]) 60 | 61 | removed = list(old_containers - new_containers) 62 | self.assertEqual(removed, [old_web]) 63 | 64 | created = list(new_containers - old_containers) 65 | self.assertEqual(len(created), 1) 66 | self.assertEqual(created[0].name_without_project, 'web_1') 67 | self.assertEqual(created[0].get('Config.Cmd'), ['/bin/true']) 68 | 69 | def test_all_change(self): 70 | old_containers = self.run_up(self.cfg) 71 | self.assertEqual(len(old_containers), 2) 72 | 73 | self.cfg['web']['command'] = '/bin/true' 74 | self.cfg['db']['command'] = '/bin/true' 75 | 76 | new_containers = self.run_up(self.cfg) 77 | self.assertEqual(len(new_containers), 2) 78 | 79 | unchanged = old_containers & new_containers 80 | self.assertEqual(len(unchanged), 0) 81 | 82 | new = new_containers - old_containers 83 | self.assertEqual(len(new), 2) 84 | 85 | 86 | class ProjectWithDependenciesTest(ProjectTestCase): 87 | def setUp(self): 88 | super(ProjectWithDependenciesTest, self).setUp() 89 | 90 | self.cfg = { 91 | 'db': { 92 | 'image': 'busybox:latest', 93 | 'command': 'tail -f /dev/null', 94 | }, 95 | 'web': { 96 | 'image': 'busybox:latest', 97 | 'command': 'tail -f /dev/null', 98 | 'links': ['db'], 99 | }, 100 | 'nginx': { 101 | 'image': 'busybox:latest', 102 | 'command': 'tail -f /dev/null', 103 | 'links': ['web'], 104 | }, 105 | } 106 | 107 | def test_up(self): 108 | containers = self.run_up(self.cfg) 109 | self.assertEqual( 110 | set(c.name_without_project for c in containers), 111 | set(['db_1', 'web_1', 'nginx_1']), 112 | ) 113 | 114 | def test_change_leaf(self): 115 | old_containers = self.run_up(self.cfg) 116 | 117 | self.cfg['nginx']['environment'] = {'NEW_VAR': '1'} 118 | new_containers = self.run_up(self.cfg) 119 | 120 | self.assertEqual( 121 | set(c.name_without_project for c in new_containers - old_containers), 122 | set(['nginx_1']), 123 | ) 124 | 125 | def test_change_middle(self): 126 | old_containers = self.run_up(self.cfg) 127 | 128 | self.cfg['web']['environment'] = {'NEW_VAR': '1'} 129 | new_containers = self.run_up(self.cfg) 130 | 131 | self.assertEqual( 132 | set(c.name_without_project for c in new_containers - old_containers), 133 | set(['web_1', 'nginx_1']), 134 | ) 135 | 136 | def test_change_root(self): 137 | old_containers = self.run_up(self.cfg) 138 | 139 | self.cfg['db']['environment'] = {'NEW_VAR': '1'} 140 | new_containers = self.run_up(self.cfg) 141 | 142 | self.assertEqual( 143 | set(c.name_without_project for c in new_containers - old_containers), 144 | set(['db_1', 'web_1', 'nginx_1']), 145 | ) 146 | 147 | def test_change_root_no_recreate(self): 148 | old_containers = self.run_up(self.cfg) 149 | 150 | self.cfg['db']['environment'] = {'NEW_VAR': '1'} 151 | new_containers = self.run_up(self.cfg, allow_recreate=False) 152 | 153 | self.assertEqual(new_containers - old_containers, set()) 154 | 155 | 156 | def converge(service, 157 | allow_recreate=True, 158 | smart_recreate=False, 159 | insecure_registry=False, 160 | do_build=True): 161 | """ 162 | If a container for this service doesn't exist, create and start one. If there are 163 | any, stop them, create+start new ones, and remove the old containers. 164 | """ 165 | plan = service.convergence_plan( 166 | allow_recreate=allow_recreate, 167 | smart_recreate=smart_recreate, 168 | ) 169 | 170 | return service.execute_convergence_plan( 171 | plan, 172 | insecure_registry=insecure_registry, 173 | do_build=do_build, 174 | timeout=0.1, 175 | ) 176 | 177 | 178 | class ServiceStateTest(DockerClientTestCase): 179 | """Test cases for Service.convergence_plan.""" 180 | 181 | def test_trigger_create(self): 182 | web = self.create_service('web') 183 | self.assertEqual(('create', []), web.convergence_plan(smart_recreate=True)) 184 | 185 | def test_trigger_noop(self): 186 | web = self.create_service('web') 187 | container = web.create_container() 188 | web.start() 189 | 190 | web = self.create_service('web') 191 | self.assertEqual(('noop', [container]), web.convergence_plan(smart_recreate=True)) 192 | 193 | def test_trigger_start(self): 194 | options = dict(command=["top"]) 195 | 196 | web = self.create_service('web', **options) 197 | web.scale(2) 198 | 199 | containers = web.containers(stopped=True) 200 | containers[0].stop() 201 | containers[0].inspect() 202 | 203 | self.assertEqual([c.is_running for c in containers], [False, True]) 204 | 205 | web = self.create_service('web', **options) 206 | self.assertEqual( 207 | ('start', containers[0:1]), 208 | web.convergence_plan(smart_recreate=True), 209 | ) 210 | 211 | def test_trigger_recreate_with_config_change(self): 212 | web = self.create_service('web', command=["top"]) 213 | container = web.create_container() 214 | 215 | web = self.create_service('web', command=["top", "-d", "1"]) 216 | self.assertEqual(('recreate', [container]), web.convergence_plan(smart_recreate=True)) 217 | 218 | def test_trigger_recreate_with_image_change(self): 219 | repo = 'composetest_myimage' 220 | tag = 'latest' 221 | image = '{}:{}'.format(repo, tag) 222 | 223 | image_id = self.client.images(name='busybox')[0]['Id'] 224 | self.client.tag(image_id, repository=repo, tag=tag) 225 | 226 | try: 227 | web = self.create_service('web', image=image) 228 | container = web.create_container() 229 | 230 | # update the image 231 | c = self.client.create_container(image, ['touch', '/hello.txt']) 232 | self.client.commit(c, repository=repo, tag=tag) 233 | self.client.remove_container(c) 234 | 235 | web = self.create_service('web', image=image) 236 | self.assertEqual(('recreate', [container]), web.convergence_plan(smart_recreate=True)) 237 | 238 | finally: 239 | self.client.remove_image(image) 240 | 241 | def test_trigger_recreate_with_build(self): 242 | context = tempfile.mkdtemp() 243 | base_image = "FROM busybox\nLABEL com.docker.compose.test_image=true\n" 244 | 245 | try: 246 | dockerfile = os.path.join(context, 'Dockerfile') 247 | 248 | with open(dockerfile, 'w') as f: 249 | f.write(base_image) 250 | 251 | web = self.create_service('web', build=context) 252 | container = web.create_container() 253 | 254 | with open(dockerfile, 'w') as f: 255 | f.write(base_image + 'CMD echo hello world\n') 256 | web.build() 257 | 258 | web = self.create_service('web', build=context) 259 | self.assertEqual(('recreate', [container]), web.convergence_plan(smart_recreate=True)) 260 | finally: 261 | shutil.rmtree(context) 262 | 263 | 264 | class ConfigHashTest(DockerClientTestCase): 265 | def test_no_config_hash_when_one_off(self): 266 | web = self.create_service('web') 267 | container = web.create_container(one_off=True) 268 | self.assertNotIn(LABEL_CONFIG_HASH, container.labels) 269 | 270 | def test_no_config_hash_when_overriding_options(self): 271 | web = self.create_service('web') 272 | container = web.create_container(environment={'FOO': '1'}) 273 | self.assertNotIn(LABEL_CONFIG_HASH, container.labels) 274 | 275 | def test_config_hash_with_custom_labels(self): 276 | web = self.create_service('web', labels={'foo': '1'}) 277 | container = converge(web)[0] 278 | self.assertIn(LABEL_CONFIG_HASH, container.labels) 279 | self.assertIn('foo', container.labels) 280 | 281 | def test_config_hash_sticks_around(self): 282 | web = self.create_service('web', command=["top"]) 283 | container = converge(web)[0] 284 | self.assertIn(LABEL_CONFIG_HASH, container.labels) 285 | 286 | web = self.create_service('web', command=["top", "-d", "1"]) 287 | container = converge(web)[0] 288 | self.assertIn(LABEL_CONFIG_HASH, container.labels) 289 | -------------------------------------------------------------------------------- /docs/yml.md: -------------------------------------------------------------------------------- 1 | 10 | 11 | 12 | # docker-compose.yml reference 13 | 14 | Each service defined in `docker-compose.yml` must specify exactly one of 15 | `image` or `build`. Other keys are optional, and are analogous to their 16 | `docker run` command-line counterparts. 17 | 18 | As with `docker run`, options specified in the Dockerfile (e.g., `CMD`, 19 | `EXPOSE`, `VOLUME`, `ENV`) are respected by default - you don't need to 20 | specify them again in `docker-compose.yml`. 21 | 22 | ### image 23 | 24 | Tag or partial image ID. Can be local or remote - Compose will attempt to 25 | pull if it doesn't exist locally. 26 | 27 | image: ubuntu 28 | image: orchardup/postgresql 29 | image: a4bc65fd 30 | 31 | ### build 32 | 33 | Path to a directory containing a Dockerfile. When the value supplied is a 34 | relative path, it is interpreted as relative to the location of the yml file 35 | itself. This directory is also the build context that is sent to the Docker daemon. 36 | 37 | Compose will build and tag it with a generated name, and use that image thereafter. 38 | 39 | build: /path/to/build/dir 40 | 41 | ### dockerfile 42 | 43 | Alternate Dockerfile. 44 | 45 | Compose will use an alternate file to build with. 46 | 47 | dockerfile: Dockerfile-alternate 48 | 49 | ### command 50 | 51 | Override the default command. 52 | 53 | command: bundle exec thin -p 3000 54 | 55 | 56 | ### links 57 | 58 | Link to containers in another service. Either specify both the service name and 59 | the link alias (`SERVICE:ALIAS`), or just the service name (which will also be 60 | used for the alias). 61 | 62 | links: 63 | - db 64 | - db:database 65 | - redis 66 | 67 | An entry with the alias' name will be created in `/etc/hosts` inside containers 68 | for this service, e.g: 69 | 70 | 172.17.2.186 db 71 | 172.17.2.186 database 72 | 172.17.2.187 redis 73 | 74 | Environment variables will also be created - see the [environment variable 75 | reference](env.md) for details. 76 | 77 | ### external_links 78 | 79 | Link to containers started outside this `docker-compose.yml` or even outside 80 | of Compose, especially for containers that provide shared or common services. 81 | `external_links` follow semantics similar to `links` when specifying both the 82 | container name and the link alias (`CONTAINER:ALIAS`). 83 | 84 | external_links: 85 | - redis_1 86 | - project_db_1:mysql 87 | - project_db_1:postgresql 88 | 89 | ### extra_hosts 90 | 91 | Add hostname mappings. Use the same values as the docker client `--add-host` parameter. 92 | 93 | extra_hosts: 94 | - "somehost:162.242.195.82" 95 | - "otherhost:50.31.209.229" 96 | 97 | An entry with the ip address and hostname will be created in `/etc/hosts` inside containers for this service, e.g: 98 | 99 | 162.242.195.82 somehost 100 | 50.31.209.229 otherhost 101 | 102 | ### ports 103 | 104 | Expose ports. Either specify both ports (`HOST:CONTAINER`), or just the container 105 | port (a random host port will be chosen). 106 | 107 | > **Note:** When mapping ports in the `HOST:CONTAINER` format, you may experience 108 | > erroneous results when using a container port lower than 60, because YAML will 109 | > parse numbers in the format `xx:yy` as sexagesimal (base 60). For this reason, 110 | > we recommend always explicitly specifying your port mappings as strings. 111 | 112 | ports: 113 | - "3000" 114 | - "8000:8000" 115 | - "49100:22" 116 | - "127.0.0.1:8001:8001" 117 | 118 | ### expose 119 | 120 | Expose ports without publishing them to the host machine - they'll only be 121 | accessible to linked services. Only the internal port can be specified. 122 | 123 | expose: 124 | - "3000" 125 | - "8000" 126 | 127 | ### volumes 128 | 129 | Mount paths as volumes, optionally specifying a path on the host machine 130 | (`HOST:CONTAINER`), or an access mode (`HOST:CONTAINER:ro`). 131 | 132 | volumes: 133 | - /var/lib/mysql 134 | - cache/:/tmp/cache 135 | - ~/configs:/etc/configs/:ro 136 | 137 | ### volumes_from 138 | 139 | Mount all of the volumes from another service or container. 140 | 141 | volumes_from: 142 | - service_name 143 | - container_name 144 | 145 | ### environment 146 | 147 | Add environment variables. You can use either an array or a dictionary. 148 | 149 | Environment variables with only a key are resolved to their values on the 150 | machine Compose is running on, which can be helpful for secret or host-specific values. 151 | 152 | environment: 153 | RACK_ENV: development 154 | SESSION_SECRET: 155 | 156 | environment: 157 | - RACK_ENV=development 158 | - SESSION_SECRET 159 | 160 | ### env_file 161 | 162 | Add environment variables from a file. Can be a single value or a list. 163 | 164 | If you have specified a Compose file with `docker-compose -f FILE`, paths in 165 | `env_file` are relative to the directory that file is in. 166 | 167 | Environment variables specified in `environment` override these values. 168 | 169 | env_file: .env 170 | 171 | env_file: 172 | - ./common.env 173 | - ./apps/web.env 174 | - /opt/secrets.env 175 | 176 | Compose expects each line in an env file to be in `VAR=VAL` format. Lines 177 | beginning with `#` (i.e. comments) are ignored, as are blank lines. 178 | 179 | # Set Rails/Rack environment 180 | RACK_ENV=development 181 | 182 | ### extends 183 | 184 | Extend another service, in the current file or another, optionally overriding 185 | configuration. 186 | 187 | Here's a simple example. Suppose we have 2 files - **common.yml** and 188 | **development.yml**. We can use `extends` to define a service in 189 | **development.yml** which uses configuration defined in **common.yml**: 190 | 191 | **common.yml** 192 | 193 | webapp: 194 | build: ./webapp 195 | environment: 196 | - DEBUG=false 197 | - SEND_EMAILS=false 198 | 199 | **development.yml** 200 | 201 | web: 202 | extends: 203 | file: common.yml 204 | service: webapp 205 | ports: 206 | - "8000:8000" 207 | links: 208 | - db 209 | environment: 210 | - DEBUG=true 211 | db: 212 | image: postgres 213 | 214 | Here, the `web` service in **development.yml** inherits the configuration of 215 | the `webapp` service in **common.yml** - the `build` and `environment` keys - 216 | and adds `ports` and `links` configuration. It overrides one of the defined 217 | environment variables (DEBUG) with a new value, and the other one 218 | (SEND_EMAILS) is left untouched. 219 | 220 | For more on `extends`, see the [tutorial](extends.md#example) and 221 | [reference](extends.md#reference). 222 | 223 | ### labels 224 | 225 | Add metadata to containers using [Docker labels](http://docs.docker.com/userguide/labels-custom-metadata/). You can use either an array or a dictionary. 226 | 227 | It's recommended that you use reverse-DNS notation to prevent your labels from conflicting with those used by other software. 228 | 229 | labels: 230 | com.example.description: "Accounting webapp" 231 | com.example.department: "Finance" 232 | com.example.label-with-empty-value: "" 233 | 234 | labels: 235 | - "com.example.description=Accounting webapp" 236 | - "com.example.department=Finance" 237 | - "com.example.label-with-empty-value" 238 | 239 | ### log driver 240 | 241 | Specify a logging driver for the service's containers, as with the ``--log-driver`` option for docker run ([documented here](http://docs.docker.com/reference/run/#logging-drivers-log-driver)). 242 | 243 | Allowed values are currently ``json-file``, ``syslog`` and ``none``. The list will change over time as more drivers are added to the Docker engine. 244 | 245 | The default value is json-file. 246 | 247 | log_driver: "json-file" 248 | log_driver: "syslog" 249 | log_driver: "none" 250 | 251 | ### net 252 | 253 | Networking mode. Use the same values as the docker client `--net` parameter. 254 | 255 | net: "bridge" 256 | net: "none" 257 | net: "container:[name or id]" 258 | net: "host" 259 | 260 | ### pid 261 | 262 | pid: "host" 263 | 264 | Sets the PID mode to the host PID mode. This turns on sharing between 265 | container and the host operating system the PID address space. Containers 266 | launched with this flag will be able to access and manipulate other 267 | containers in the bare-metal machine's namespace and vise-versa. 268 | 269 | ### dns 270 | 271 | Custom DNS servers. Can be a single value or a list. 272 | 273 | dns: 8.8.8.8 274 | dns: 275 | - 8.8.8.8 276 | - 9.9.9.9 277 | 278 | ### cap_add, cap_drop 279 | 280 | Add or drop container capabilities. 281 | See `man 7 capabilities` for a full list. 282 | 283 | cap_add: 284 | - ALL 285 | 286 | cap_drop: 287 | - NET_ADMIN 288 | - SYS_ADMIN 289 | 290 | ### dns_search 291 | 292 | Custom DNS search domains. Can be a single value or a list. 293 | 294 | dns_search: example.com 295 | dns_search: 296 | - dc1.example.com 297 | - dc2.example.com 298 | 299 | ### devices 300 | 301 | List of device mappings. Uses the same format as the `--device` docker 302 | client create option. 303 | 304 | devices: 305 | - "/dev/ttyUSB0:/dev/ttyUSB0" 306 | 307 | ### security_opt 308 | 309 | Override the default labeling scheme for each container. 310 | 311 | security_opt: 312 | - label:user:USER 313 | - label:role:ROLE 314 | 315 | ### working\_dir, entrypoint, user, hostname, domainname, mac\_address, mem\_limit, privileged, restart, stdin\_open, tty, cpu\_shares, cpuset, read\_only 316 | 317 | Each of these is a single value, analogous to its 318 | [docker run](https://docs.docker.com/reference/run/) counterpart. 319 | 320 | cpu_shares: 73 321 | cpuset: 0,1 322 | 323 | working_dir: /code 324 | entrypoint: /code/entrypoint.sh 325 | user: postgresql 326 | 327 | hostname: foo 328 | domainname: foo.com 329 | 330 | mac_address: 02:42:ac:11:65:43 331 | 332 | mem_limit: 1000000000 333 | privileged: true 334 | 335 | restart: always 336 | 337 | stdin_open: true 338 | tty: true 339 | read_only: true 340 | 341 | 342 | ## Compose documentation 343 | 344 | - [User guide](/) 345 | - [Installing Compose](install.md) 346 | - [Get started with Django](django.md) 347 | - [Get started with Rails](rails.md) 348 | - [Get started with Wordpress](wordpress.md) 349 | - [Command line reference](cli.md) 350 | - [Compose environment variables](env.md) 351 | - [Compose command line completion](completion.md) 352 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | Copyright 2014 Docker, Inc. 180 | 181 | Licensed under the Apache License, Version 2.0 (the "License"); 182 | you may not use this file except in compliance with the License. 183 | You may obtain a copy of the License at 184 | 185 | http://www.apache.org/licenses/LICENSE-2.0 186 | 187 | Unless required by applicable law or agreed to in writing, software 188 | distributed under the License is distributed on an "AS IS" BASIS, 189 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 190 | See the License for the specific language governing permissions and 191 | limitations under the License. 192 | --------------------------------------------------------------------------------