├── .coveragerc ├── .dockerignore ├── .editorconfig ├── .github └── workflows │ ├── ci.yml │ └── release.yml ├── .gitignore ├── .readthedocs.yml ├── CONTRIBUTING.md ├── Dockerfile ├── Dockerfile-docs ├── LICENSE ├── MAINTAINERS ├── Makefile ├── README.md ├── docker ├── __init__.py ├── api │ ├── __init__.py │ ├── build.py │ ├── client.py │ ├── config.py │ ├── container.py │ ├── daemon.py │ ├── exec_api.py │ ├── image.py │ ├── network.py │ ├── plugin.py │ ├── secret.py │ ├── service.py │ ├── swarm.py │ └── volume.py ├── auth.py ├── client.py ├── constants.py ├── context │ ├── __init__.py │ ├── api.py │ ├── config.py │ └── context.py ├── credentials │ ├── __init__.py │ ├── constants.py │ ├── errors.py │ ├── store.py │ └── utils.py ├── errors.py ├── models │ ├── __init__.py │ ├── configs.py │ ├── containers.py │ ├── images.py │ ├── networks.py │ ├── nodes.py │ ├── plugins.py │ ├── resource.py │ ├── secrets.py │ ├── services.py │ ├── swarm.py │ └── volumes.py ├── tls.py ├── transport │ ├── __init__.py │ ├── basehttpadapter.py │ ├── npipeconn.py │ ├── npipesocket.py │ ├── sshconn.py │ └── unixconn.py ├── types │ ├── __init__.py │ ├── base.py │ ├── containers.py │ ├── daemon.py │ ├── healthcheck.py │ ├── networks.py │ ├── services.py │ └── swarm.py ├── utils │ ├── __init__.py │ ├── build.py │ ├── config.py │ ├── decorators.py │ ├── fnmatch.py │ ├── json_stream.py │ ├── ports.py │ ├── proxy.py │ ├── socket.py │ └── utils.py └── version.py ├── docs ├── _static │ └── custom.css ├── _templates │ └── page.html ├── api.rst ├── change-log.md ├── client.rst ├── conf.py ├── configs.rst ├── containers.rst ├── favicon_whale.png ├── images.rst ├── index.rst ├── networks.rst ├── nodes.rst ├── plugins.rst ├── secrets.rst ├── services.rst ├── swarm.rst ├── tls.rst ├── user_guides │ ├── index.rst │ ├── multiplex.rst │ └── swarm_services.md └── volumes.rst ├── pyproject.toml ├── pytest.ini ├── scripts ├── release.sh └── versions.py ├── tests ├── Dockerfile ├── Dockerfile-dind-certs ├── Dockerfile-ssh-dind ├── __init__.py ├── gpg-keys │ ├── ownertrust │ └── secret ├── helpers.py ├── integration │ ├── __init__.py │ ├── api_build_test.py │ ├── api_client_test.py │ ├── api_config_test.py │ ├── api_container_test.py │ ├── api_exec_test.py │ ├── api_healthcheck_test.py │ ├── api_image_test.py │ ├── api_network_test.py │ ├── api_plugin_test.py │ ├── api_secret_test.py │ ├── api_service_test.py │ ├── api_swarm_test.py │ ├── api_volume_test.py │ ├── base.py │ ├── client_test.py │ ├── conftest.py │ ├── context_api_test.py │ ├── credentials │ │ ├── __init__.py │ │ ├── create_gpg_key.sh │ │ ├── store_test.py │ │ └── utils_test.py │ ├── errors_test.py │ ├── models_containers_test.py │ ├── models_images_test.py │ ├── models_networks_test.py │ ├── models_nodes_test.py │ ├── models_resources_test.py │ ├── models_services_test.py │ ├── models_swarm_test.py │ ├── models_volumes_test.py │ ├── regression_test.py │ └── testdata │ │ └── dummy-plugin │ │ ├── config.json │ │ └── rootfs │ │ └── dummy │ │ └── file.txt ├── ssh │ ├── __init__.py │ ├── api_build_test.py │ ├── base.py │ ├── config │ │ ├── client │ │ │ ├── id_rsa │ │ │ └── id_rsa.pub │ │ └── server │ │ │ ├── known_ed25519 │ │ │ ├── known_ed25519.pub │ │ │ ├── sshd_config │ │ │ ├── unknown_ed25519 │ │ │ └── unknown_ed25519.pub │ └── connect_test.py └── unit │ ├── __init__.py │ ├── api_build_test.py │ ├── api_container_test.py │ ├── api_exec_test.py │ ├── api_image_test.py │ ├── api_network_test.py │ ├── api_test.py │ ├── api_volume_test.py │ ├── auth_test.py │ ├── client_test.py │ ├── context_test.py │ ├── dockertypes_test.py │ ├── errors_test.py │ ├── fake_api.py │ ├── fake_api_client.py │ ├── fake_stat.py │ ├── models_configs_test.py │ ├── models_containers_test.py │ ├── models_images_test.py │ ├── models_networks_test.py │ ├── models_resources_test.py │ ├── models_secrets_test.py │ ├── models_services_test.py │ ├── sshadapter_test.py │ ├── swarm_test.py │ ├── testdata │ └── certs │ │ ├── ca.pem │ │ ├── cert.pem │ │ └── key.pem │ ├── types_containers_test.py │ ├── utils_build_test.py │ ├── utils_config_test.py │ ├── utils_json_stream_test.py │ ├── utils_proxy_test.py │ └── utils_test.py └── tox.ini /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | branch = True 3 | source = docker 4 | 5 | [report] 6 | exclude_lines = 7 | if __name__ == .__main__.: 8 | 9 | [html] 10 | directory = html 11 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | .git/ 2 | 3 | build 4 | dist 5 | *.egg-info 6 | *.egg/ 7 | *.pyc 8 | *.swp 9 | 10 | .tox 11 | .coverage 12 | html/* 13 | __pycache__ 14 | 15 | # Compiled Documentation 16 | docs/_build 17 | -------------------------------------------------------------------------------- /.editorconfig: -------------------------------------------------------------------------------- 1 | root = true 2 | 3 | [*] 4 | indent_style = space 5 | indent_size = 4 6 | insert_final_newline = true 7 | trim_trailing_whitespace = true 8 | max_line_length = 80 9 | 10 | [*.md] 11 | trim_trailing_whitespace = false 12 | 13 | [*.{yaml,yml}] 14 | indent_size = 2 15 | -------------------------------------------------------------------------------- /.github/workflows/ci.yml: -------------------------------------------------------------------------------- 1 | name: Python package 2 | 3 | on: [push, pull_request] 4 | 5 | env: 6 | DOCKER_BUILDKIT: '1' 7 | FORCE_COLOR: 1 8 | 9 | jobs: 10 | lint: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - uses: actions/checkout@v4 14 | - uses: actions/setup-python@v5 15 | with: 16 | python-version: '3.x' 17 | - run: pip install -U ruff==0.1.8 18 | - name: Run ruff 19 | run: ruff docker tests 20 | 21 | build: 22 | runs-on: ubuntu-22.04 23 | steps: 24 | - uses: actions/checkout@v4 25 | - uses: actions/setup-python@v5 26 | with: 27 | python-version: '3.x' 28 | - run: pip3 install build && python -m build . 29 | - uses: actions/upload-artifact@v4 30 | with: 31 | name: dist 32 | path: dist 33 | 34 | unit-tests: 35 | runs-on: ubuntu-latest 36 | strategy: 37 | matrix: 38 | python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] 39 | 40 | steps: 41 | - uses: actions/checkout@v4 42 | - name: Set up Python ${{ matrix.python-version }} 43 | uses: actions/setup-python@v5 44 | with: 45 | python-version: ${{ matrix.python-version }} 46 | allow-prereleases: true 47 | - name: Install dependencies 48 | run: | 49 | python3 -m pip install --upgrade pip 50 | pip3 install '.[ssh,dev]' 51 | - name: Run unit tests 52 | run: | 53 | docker logout 54 | rm -rf ~/.docker 55 | py.test -v --cov=docker tests/unit 56 | 57 | integration-tests: 58 | runs-on: ubuntu-latest 59 | strategy: 60 | matrix: 61 | variant: [ "integration-dind", "integration-dind-ssl", "integration-dind-ssh" ] 62 | 63 | steps: 64 | - uses: actions/checkout@v4 65 | with: 66 | fetch-depth: 0 67 | fetch-tags: true 68 | - name: make ${{ matrix.variant }} 69 | run: | 70 | docker logout 71 | rm -rf ~/.docker 72 | make ${{ matrix.variant }} 73 | -------------------------------------------------------------------------------- /.github/workflows/release.yml: -------------------------------------------------------------------------------- 1 | name: Release 2 | 3 | on: 4 | workflow_dispatch: 5 | inputs: 6 | tag: 7 | description: "Release Tag WITHOUT `v` Prefix (e.g. 6.0.0)" 8 | required: true 9 | dry-run: 10 | description: 'Dry run' 11 | required: false 12 | type: boolean 13 | default: true 14 | 15 | env: 16 | DOCKER_BUILDKIT: '1' 17 | FORCE_COLOR: 1 18 | 19 | jobs: 20 | publish: 21 | runs-on: ubuntu-22.04 22 | steps: 23 | - uses: actions/checkout@v4 24 | 25 | - uses: actions/setup-python@v5 26 | with: 27 | python-version: '3.x' 28 | 29 | - name: Generate Package 30 | run: | 31 | pip3 install build 32 | python -m build . 33 | env: 34 | # This is also supported by Hatch; see 35 | # https://github.com/ofek/hatch-vcs#version-source-environment-variables 36 | SETUPTOOLS_SCM_PRETEND_VERSION: ${{ inputs.tag }} 37 | 38 | - name: Publish to PyPI 39 | uses: pypa/gh-action-pypi-publish@release/v1 40 | if: '! inputs.dry-run' 41 | with: 42 | password: ${{ secrets.PYPI_API_TOKEN }} 43 | 44 | - name: Create GitHub release 45 | uses: ncipollo/release-action@v1 46 | if: '! inputs.dry-run' 47 | with: 48 | artifacts: "dist/*" 49 | generateReleaseNotes: true 50 | draft: true 51 | commit: ${{ github.sha }} 52 | token: ${{ secrets.GITHUB_TOKEN }} 53 | tag: ${{ inputs.tag }} 54 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | dist 3 | *.egg-info 4 | *.egg/ 5 | *.pyc 6 | *.swp 7 | 8 | .tox 9 | .coverage 10 | html/* 11 | 12 | # Compiled Documentation 13 | _build/ 14 | README.rst 15 | 16 | # setuptools_scm 17 | _version.py 18 | 19 | env/ 20 | venv/ 21 | .idea/ 22 | *.iml 23 | -------------------------------------------------------------------------------- /.readthedocs.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | sphinx: 4 | configuration: docs/conf.py 5 | 6 | build: 7 | os: ubuntu-22.04 8 | tools: 9 | python: '3.12' 10 | 11 | python: 12 | install: 13 | - method: pip 14 | path: . 15 | extra_requirements: 16 | - ssh 17 | - docs 18 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing guidelines 2 | 3 | See the [Docker contributing guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md). 4 | The following is specific to Docker SDK for Python. 5 | 6 | Thank you for your interest in the project. We look forward to your 7 | contribution. In order to make the process as fast and streamlined as possible, 8 | here is a set of guidelines we recommend you follow. 9 | 10 | ## Reporting issues 11 | 12 | We do our best to ensure bugs don't creep up in our releases, but some may 13 | still slip through. If you encounter one while using the SDK, please 14 | create an issue 15 | [in the tracker](https://github.com/docker/docker-py/issues/new) with 16 | the following information: 17 | 18 | - SDK version, Docker version and python version 19 | ``` 20 | pip freeze | grep docker && python --version && docker version 21 | ``` 22 | - OS, distribution and OS version 23 | - The issue you're encountering including a stacktrace if applicable 24 | - If possible, steps or a code snippet to reproduce the issue 25 | 26 | To save yourself time, please be sure to check our 27 | [documentation](https://docker-py.readthedocs.io/) and use the 28 | [search function](https://github.com/docker/docker-py/search) to find 29 | out if it has already been addressed, or is currently being looked at. 30 | 31 | ## Submitting pull requests 32 | 33 | Do you have a fix for an existing issue, or want to add a new functionality 34 | to the SDK? We happily welcome pull requests. Here are a few tips to 35 | make the review process easier on both the maintainers and yourself. 36 | 37 | ### 1. Sign your commits 38 | 39 | Please refer to the ["Sign your work"](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work) 40 | paragraph in the Docker contribution guidelines. 41 | 42 | ### 2. Make sure tests pass 43 | 44 | Before we can review your pull request, please ensure that nothing has been 45 | broken by your changes by running the test suite. You can do so simply by 46 | running `make test` in the project root. This also includes coding style using 47 | `ruff` 48 | 49 | ### 3. Write clear, self-contained commits 50 | 51 | Your commit message should be concise and describe the nature of the change. 52 | The commit itself should make sense in isolation from the others in your PR. 53 | Specifically, one should be able to review your commit separately from the 54 | context. 55 | 56 | ### 4. Rebase proactively 57 | 58 | It's much easier to review a pull request that is up to date against the 59 | current master branch. 60 | 61 | ### 5. Notify thread subscribers when changes are made 62 | 63 | GitHub doesn't notify subscribers when new commits happen on a PR, and 64 | fixes or additions might be missed. Please add a comment to the PR thread 65 | when you push new changes. 66 | 67 | ### 6. Two maintainers LGTM are required for merging 68 | 69 | Please wait for review and approval of two maintainers, and respond to their 70 | comments and suggestions during review. 71 | 72 | ### 7. Add tests 73 | 74 | Whether you're adding new functionality to the project or fixing a bug, please 75 | add relevant tests to ensure the code you added continues to work as the 76 | project evolves. 77 | 78 | ### 8. Add docs 79 | 80 | This usually applies to new features rather than bug fixes, but new behavior 81 | should always be documented. 82 | 83 | ### 9. Ask questions 84 | 85 | If you're ever confused about something pertaining to the project, feel free 86 | to reach out and ask questions. We will do our best to answer and help out. 87 | 88 | 89 | ## Development environment 90 | 91 | If you're looking contribute to Docker SDK for Python but are new to the 92 | project or Python, here are the steps to get you started. 93 | 94 | 1. Fork https://github.com/docker/docker-py to your username. 95 | 2. Clone your forked repository locally with 96 | `git clone git@github.com:yourusername/docker-py.git`. 97 | 3. Configure a 98 | [remote](https://help.github.com/articles/configuring-a-remote-for-a-fork/) 99 | for your fork so that you can 100 | [sync changes you make](https://help.github.com/articles/syncing-a-fork/) 101 | with the original repository. 102 | 4. Enter the local directory `cd docker-py`. 103 | 5. Run `python setup.py develop` to install the dev version of the project 104 | and required dependencies. We recommend you do so inside a 105 | [virtual environment](http://docs.python-guide.org/en/latest/dev/virtualenvs) 106 | 107 | ## Running the tests & Code Quality 108 | 109 | To get the source source code and run the unit tests, run: 110 | ``` 111 | $ git clone git://github.com/docker/docker-py.git 112 | $ cd docker-py 113 | $ make test 114 | ``` 115 | 116 | ## Building the docs 117 | 118 | ``` 119 | $ make docs 120 | $ open _build/index.html 121 | ``` 122 | 123 | ## Release Checklist 124 | 125 | Before a new release, please go through the following checklist: 126 | 127 | * Bump version in docker/version.py 128 | * Add a release note in docs/change_log.md 129 | * Git tag the version 130 | * Upload to pypi 131 | 132 | ## Vulnerability Reporting 133 | For any security issues, please do NOT file an issue or pull request on github! 134 | Please contact [security@docker.com](mailto:security@docker.com) or read [the 135 | Docker security page](https://www.docker.com/resources/security/). 136 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | ARG PYTHON_VERSION=3.12 4 | FROM python:${PYTHON_VERSION} 5 | 6 | WORKDIR /src 7 | COPY . . 8 | 9 | ARG VERSION=0.0.0.dev0 10 | RUN --mount=type=cache,target=/cache/pip \ 11 | PIP_CACHE_DIR=/cache/pip \ 12 | SETUPTOOLS_SCM_PRETEND_VERSION=${VERSION} \ 13 | pip install .[ssh] 14 | -------------------------------------------------------------------------------- /Dockerfile-docs: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | ARG PYTHON_VERSION=3.12 4 | 5 | FROM python:${PYTHON_VERSION} 6 | 7 | ARG uid=1000 8 | ARG gid=1000 9 | 10 | RUN addgroup --gid $gid sphinx \ 11 | && useradd --uid $uid --gid $gid -M sphinx 12 | 13 | WORKDIR /src 14 | COPY . . 15 | 16 | ARG VERSION=0.0.0.dev0 17 | RUN --mount=type=cache,target=/cache/pip \ 18 | PIP_CACHE_DIR=/cache/pip \ 19 | SETUPTOOLS_SCM_PRETEND_VERSION=${VERSION} \ 20 | pip install .[ssh,docs] 21 | 22 | USER sphinx 23 | -------------------------------------------------------------------------------- /MAINTAINERS: -------------------------------------------------------------------------------- 1 | # Docker SDK for Python maintainers file 2 | # 3 | # This file describes who runs the docker/docker-py project and how. 4 | # This is a living document - if you see something out of date or missing, speak up! 5 | # 6 | # It is structured to be consumable by both humans and programs. 7 | # To extract its contents programmatically, use any TOML-compliant parser. 8 | # 9 | # This file is compiled into the MAINTAINERS file in docker/opensource. 10 | # 11 | [Org] 12 | [Org."Core maintainers"] 13 | people = [ 14 | "glours", 15 | "milas", 16 | ] 17 | [Org.Alumni] 18 | people = [ 19 | "aiordache", 20 | "aanand", 21 | "bfirsh", 22 | "dnephin", 23 | "mnowster", 24 | "mpetazzoni", 25 | "shin-", 26 | "ulyssessouza", 27 | ] 28 | 29 | [people] 30 | 31 | # A reference list of all people associated with the project. 32 | # All other sections should refer to people by their canonical key 33 | # in the people section. 34 | 35 | # ADD YOURSELF HERE IN ALPHABETICAL ORDER 36 | 37 | [people.aanand] 38 | Name = "Aanand Prasad" 39 | Email = "aanand@docker.com" 40 | GitHub = "aanand" 41 | 42 | [people.aiordache] 43 | Name = "Anca Iordache" 44 | Email = "anca.iordache@docker.com" 45 | GitHub = "aiordache" 46 | 47 | [people.bfirsh] 48 | Name = "Ben Firshman" 49 | Email = "b@fir.sh" 50 | GitHub = "bfirsh" 51 | 52 | [people.dnephin] 53 | Name = "Daniel Nephin" 54 | Email = "dnephin@gmail.com" 55 | GitHub = "dnephin" 56 | 57 | [people.glours] 58 | Name = "Guillaume Lours" 59 | Email = "705411+glours@users.noreply.github.com" 60 | GitHub = "glours" 61 | 62 | [people.milas] 63 | Name = "Milas Bowman" 64 | Email = "devnull@milas.dev" 65 | GitHub = "milas" 66 | 67 | [people.mnowster] 68 | Name = "Mazz Mosley" 69 | Email = "mazz@houseofmnowster.com" 70 | GitHub = "mnowster" 71 | 72 | [people.mpetazzoni] 73 | Name = "Maxime Petazzoni" 74 | Email = "maxime.petazzoni@bulix.org" 75 | GitHub = "mpetazzoni" 76 | 77 | [people.shin-] 78 | Name = "Joffrey F" 79 | Email = "joffrey@docker.com" 80 | GitHub = "shin-" 81 | 82 | [people.ulyssessouza] 83 | Name = "Ulysses Domiciano Souza" 84 | Email = "ulysses.souza@docker.com" 85 | GitHub = "ulyssessouza" 86 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Docker SDK for Python 2 | 3 | [![Build Status](https://github.com/docker/docker-py/actions/workflows/ci.yml/badge.svg)](https://github.com/docker/docker-py/actions/workflows/ci.yml) 4 | 5 | A Python library for the Docker Engine API. It lets you do anything the `docker` command does, but from within Python apps – run containers, manage containers, manage Swarms, etc. 6 | 7 | ## Installation 8 | 9 | The latest stable version [is available on PyPI](https://pypi.python.org/pypi/docker/). Install with pip: 10 | 11 | pip install docker 12 | 13 | > Older versions (< 6.0) required installing `docker[tls]` for SSL/TLS support. 14 | > This is no longer necessary and is a no-op, but is supported for backwards compatibility. 15 | 16 | ## Usage 17 | 18 | Connect to Docker using the default socket or the configuration in your environment: 19 | 20 | ```python 21 | import docker 22 | client = docker.from_env() 23 | ``` 24 | 25 | You can run containers: 26 | 27 | ```python 28 | >>> client.containers.run("ubuntu:latest", "echo hello world") 29 | 'hello world\n' 30 | ``` 31 | 32 | You can run containers in the background: 33 | 34 | ```python 35 | >>> client.containers.run("bfirsh/reticulate-splines", detach=True) 36 | 37 | ``` 38 | 39 | You can manage containers: 40 | 41 | ```python 42 | >>> client.containers.list() 43 | [, , ...] 44 | 45 | >>> container = client.containers.get('45e6d2de7c54') 46 | 47 | >>> container.attrs['Config']['Image'] 48 | "bfirsh/reticulate-splines" 49 | 50 | >>> container.logs() 51 | "Reticulating spline 1...\n" 52 | 53 | >>> container.stop() 54 | ``` 55 | 56 | You can stream logs: 57 | 58 | ```python 59 | >>> for line in container.logs(stream=True): 60 | ... print(line.strip()) 61 | Reticulating spline 2... 62 | Reticulating spline 3... 63 | ... 64 | ``` 65 | 66 | You can manage images: 67 | 68 | ```python 69 | >>> client.images.pull('nginx') 70 | 71 | 72 | >>> client.images.list() 73 | [, , ...] 74 | ``` 75 | 76 | [Read the full documentation](https://docker-py.readthedocs.io) to see everything you can do. 77 | -------------------------------------------------------------------------------- /docker/__init__.py: -------------------------------------------------------------------------------- 1 | from .api import APIClient 2 | from .client import DockerClient, from_env 3 | from .context import Context, ContextAPI 4 | from .tls import TLSConfig 5 | from .version import __version__ 6 | 7 | __title__ = 'docker' 8 | -------------------------------------------------------------------------------- /docker/api/__init__.py: -------------------------------------------------------------------------------- 1 | from .client import APIClient 2 | -------------------------------------------------------------------------------- /docker/api/config.py: -------------------------------------------------------------------------------- 1 | import base64 2 | 3 | from .. import utils 4 | 5 | 6 | class ConfigApiMixin: 7 | @utils.minimum_version('1.30') 8 | def create_config(self, name, data, labels=None, templating=None): 9 | """ 10 | Create a config 11 | 12 | Args: 13 | name (string): Name of the config 14 | data (bytes): Config data to be stored 15 | labels (dict): A mapping of labels to assign to the config 16 | templating (dict): dictionary containing the name of the 17 | templating driver to be used expressed as 18 | { name: } 19 | 20 | Returns (dict): ID of the newly created config 21 | """ 22 | if not isinstance(data, bytes): 23 | data = data.encode('utf-8') 24 | 25 | data = base64.b64encode(data) 26 | data = data.decode('ascii') 27 | body = { 28 | 'Data': data, 29 | 'Name': name, 30 | 'Labels': labels, 31 | 'Templating': templating 32 | } 33 | 34 | url = self._url('/configs/create') 35 | return self._result( 36 | self._post_json(url, data=body), True 37 | ) 38 | 39 | @utils.minimum_version('1.30') 40 | @utils.check_resource('id') 41 | def inspect_config(self, id): 42 | """ 43 | Retrieve config metadata 44 | 45 | Args: 46 | id (string): Full ID of the config to inspect 47 | 48 | Returns (dict): A dictionary of metadata 49 | 50 | Raises: 51 | :py:class:`docker.errors.NotFound` 52 | if no config with that ID exists 53 | """ 54 | url = self._url('/configs/{0}', id) 55 | return self._result(self._get(url), True) 56 | 57 | @utils.minimum_version('1.30') 58 | @utils.check_resource('id') 59 | def remove_config(self, id): 60 | """ 61 | Remove a config 62 | 63 | Args: 64 | id (string): Full ID of the config to remove 65 | 66 | Returns (boolean): True if successful 67 | 68 | Raises: 69 | :py:class:`docker.errors.NotFound` 70 | if no config with that ID exists 71 | """ 72 | url = self._url('/configs/{0}', id) 73 | res = self._delete(url) 74 | self._raise_for_status(res) 75 | return True 76 | 77 | @utils.minimum_version('1.30') 78 | def configs(self, filters=None): 79 | """ 80 | List configs 81 | 82 | Args: 83 | filters (dict): A map of filters to process on the configs 84 | list. Available filters: ``names`` 85 | 86 | Returns (list): A list of configs 87 | """ 88 | url = self._url('/configs') 89 | params = {} 90 | if filters: 91 | params['filters'] = utils.convert_filters(filters) 92 | return self._result(self._get(url, params=params), True) 93 | -------------------------------------------------------------------------------- /docker/api/secret.py: -------------------------------------------------------------------------------- 1 | import base64 2 | 3 | from .. import errors, utils 4 | 5 | 6 | class SecretApiMixin: 7 | @utils.minimum_version('1.25') 8 | def create_secret(self, name, data, labels=None, driver=None): 9 | """ 10 | Create a secret 11 | 12 | Args: 13 | name (string): Name of the secret 14 | data (bytes): Secret data to be stored 15 | labels (dict): A mapping of labels to assign to the secret 16 | driver (DriverConfig): A custom driver configuration. If 17 | unspecified, the default ``internal`` driver will be used 18 | 19 | Returns (dict): ID of the newly created secret 20 | """ 21 | if not isinstance(data, bytes): 22 | data = data.encode('utf-8') 23 | 24 | data = base64.b64encode(data) 25 | data = data.decode('ascii') 26 | body = { 27 | 'Data': data, 28 | 'Name': name, 29 | 'Labels': labels 30 | } 31 | 32 | if driver is not None: 33 | if utils.version_lt(self._version, '1.31'): 34 | raise errors.InvalidVersion( 35 | 'Secret driver is only available for API version > 1.31' 36 | ) 37 | 38 | body['Driver'] = driver 39 | 40 | url = self._url('/secrets/create') 41 | return self._result( 42 | self._post_json(url, data=body), True 43 | ) 44 | 45 | @utils.minimum_version('1.25') 46 | @utils.check_resource('id') 47 | def inspect_secret(self, id): 48 | """ 49 | Retrieve secret metadata 50 | 51 | Args: 52 | id (string): Full ID of the secret to inspect 53 | 54 | Returns (dict): A dictionary of metadata 55 | 56 | Raises: 57 | :py:class:`docker.errors.NotFound` 58 | if no secret with that ID exists 59 | """ 60 | url = self._url('/secrets/{0}', id) 61 | return self._result(self._get(url), True) 62 | 63 | @utils.minimum_version('1.25') 64 | @utils.check_resource('id') 65 | def remove_secret(self, id): 66 | """ 67 | Remove a secret 68 | 69 | Args: 70 | id (string): Full ID of the secret to remove 71 | 72 | Returns (boolean): True if successful 73 | 74 | Raises: 75 | :py:class:`docker.errors.NotFound` 76 | if no secret with that ID exists 77 | """ 78 | url = self._url('/secrets/{0}', id) 79 | res = self._delete(url) 80 | self._raise_for_status(res) 81 | return True 82 | 83 | @utils.minimum_version('1.25') 84 | def secrets(self, filters=None): 85 | """ 86 | List secrets 87 | 88 | Args: 89 | filters (dict): A map of filters to process on the secrets 90 | list. Available filters: ``names`` 91 | 92 | Returns (list): A list of secrets 93 | """ 94 | url = self._url('/secrets') 95 | params = {} 96 | if filters: 97 | params['filters'] = utils.convert_filters(filters) 98 | return self._result(self._get(url, params=params), True) 99 | -------------------------------------------------------------------------------- /docker/constants.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | from .version import __version__ 4 | 5 | DEFAULT_DOCKER_API_VERSION = '1.45' 6 | MINIMUM_DOCKER_API_VERSION = '1.24' 7 | DEFAULT_TIMEOUT_SECONDS = 60 8 | STREAM_HEADER_SIZE_BYTES = 8 9 | CONTAINER_LIMITS_KEYS = [ 10 | 'memory', 'memswap', 'cpushares', 'cpusetcpus' 11 | ] 12 | 13 | DEFAULT_HTTP_HOST = "127.0.0.1" 14 | DEFAULT_UNIX_SOCKET = "http+unix:///var/run/docker.sock" 15 | DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine' 16 | 17 | BYTE_UNITS = { 18 | 'b': 1, 19 | 'k': 1024, 20 | 'm': 1024 * 1024, 21 | 'g': 1024 * 1024 * 1024 22 | } 23 | 24 | 25 | INSECURE_REGISTRY_DEPRECATION_WARNING = \ 26 | 'The `insecure_registry` argument to {} ' \ 27 | 'is deprecated and non-functional. Please remove it.' 28 | 29 | IS_WINDOWS_PLATFORM = (sys.platform == 'win32') 30 | WINDOWS_LONGPATH_PREFIX = '\\\\?\\' 31 | 32 | DEFAULT_USER_AGENT = f"docker-sdk-python/{__version__}" 33 | DEFAULT_NUM_POOLS = 25 34 | 35 | # The OpenSSH server default value for MaxSessions is 10 which means we can 36 | # use up to 9, leaving the final session for the underlying SSH connection. 37 | # For more details see: https://github.com/docker/docker-py/issues/2246 38 | DEFAULT_NUM_POOLS_SSH = 9 39 | 40 | DEFAULT_MAX_POOL_SIZE = 10 41 | 42 | DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048 43 | 44 | DEFAULT_SWARM_ADDR_POOL = ['10.0.0.0/8'] 45 | DEFAULT_SWARM_SUBNET_SIZE = 24 46 | -------------------------------------------------------------------------------- /docker/context/__init__.py: -------------------------------------------------------------------------------- 1 | from .api import ContextAPI 2 | from .context import Context 3 | -------------------------------------------------------------------------------- /docker/context/config.py: -------------------------------------------------------------------------------- 1 | import hashlib 2 | import json 3 | import os 4 | 5 | from docker import utils 6 | from docker.constants import DEFAULT_UNIX_SOCKET, IS_WINDOWS_PLATFORM 7 | from docker.utils.config import find_config_file 8 | 9 | METAFILE = "meta.json" 10 | 11 | 12 | def get_current_context_name(): 13 | name = "default" 14 | docker_cfg_path = find_config_file() 15 | if docker_cfg_path: 16 | try: 17 | with open(docker_cfg_path) as f: 18 | name = json.load(f).get("currentContext", "default") 19 | except Exception: 20 | return "default" 21 | return name 22 | 23 | 24 | def write_context_name_to_docker_config(name=None): 25 | if name == 'default': 26 | name = None 27 | docker_cfg_path = find_config_file() 28 | config = {} 29 | if docker_cfg_path: 30 | try: 31 | with open(docker_cfg_path) as f: 32 | config = json.load(f) 33 | except Exception as e: 34 | return e 35 | current_context = config.get("currentContext", None) 36 | if current_context and not name: 37 | del config["currentContext"] 38 | elif name: 39 | config["currentContext"] = name 40 | else: 41 | return 42 | try: 43 | with open(docker_cfg_path, "w") as f: 44 | json.dump(config, f, indent=4) 45 | except Exception as e: 46 | return e 47 | 48 | 49 | def get_context_id(name): 50 | return hashlib.sha256(name.encode('utf-8')).hexdigest() 51 | 52 | 53 | def get_context_dir(): 54 | return os.path.join(os.path.dirname(find_config_file() or ""), "contexts") 55 | 56 | 57 | def get_meta_dir(name=None): 58 | meta_dir = os.path.join(get_context_dir(), "meta") 59 | if name: 60 | return os.path.join(meta_dir, get_context_id(name)) 61 | return meta_dir 62 | 63 | 64 | def get_meta_file(name): 65 | return os.path.join(get_meta_dir(name), METAFILE) 66 | 67 | 68 | def get_tls_dir(name=None, endpoint=""): 69 | context_dir = get_context_dir() 70 | if name: 71 | return os.path.join(context_dir, "tls", get_context_id(name), endpoint) 72 | return os.path.join(context_dir, "tls") 73 | 74 | 75 | def get_context_host(path=None, tls=False): 76 | host = utils.parse_host(path, IS_WINDOWS_PLATFORM, tls) 77 | if host == DEFAULT_UNIX_SOCKET: 78 | # remove http+ from default docker socket url 79 | if host.startswith("http+"): 80 | host = host[5:] 81 | return host 82 | -------------------------------------------------------------------------------- /docker/credentials/__init__.py: -------------------------------------------------------------------------------- 1 | from .constants import ( 2 | DEFAULT_LINUX_STORE, 3 | DEFAULT_OSX_STORE, 4 | DEFAULT_WIN32_STORE, 5 | PROGRAM_PREFIX, 6 | ) 7 | from .errors import CredentialsNotFound, StoreError 8 | from .store import Store 9 | -------------------------------------------------------------------------------- /docker/credentials/constants.py: -------------------------------------------------------------------------------- 1 | PROGRAM_PREFIX = 'docker-credential-' 2 | DEFAULT_LINUX_STORE = 'secretservice' 3 | DEFAULT_OSX_STORE = 'osxkeychain' 4 | DEFAULT_WIN32_STORE = 'wincred' 5 | -------------------------------------------------------------------------------- /docker/credentials/errors.py: -------------------------------------------------------------------------------- 1 | class StoreError(RuntimeError): 2 | pass 3 | 4 | 5 | class CredentialsNotFound(StoreError): 6 | pass 7 | 8 | 9 | class InitializationError(StoreError): 10 | pass 11 | 12 | 13 | def process_store_error(cpe, program): 14 | message = cpe.output.decode('utf-8') 15 | if 'credentials not found in native keychain' in message: 16 | return CredentialsNotFound(f'No matching credentials in {program}') 17 | return StoreError(f'Credentials store {program} exited with "{message}".') 18 | -------------------------------------------------------------------------------- /docker/credentials/store.py: -------------------------------------------------------------------------------- 1 | import errno 2 | import json 3 | import shutil 4 | import subprocess 5 | import warnings 6 | 7 | from . import constants, errors 8 | from .utils import create_environment_dict 9 | 10 | 11 | class Store: 12 | def __init__(self, program, environment=None): 13 | """ Create a store object that acts as an interface to 14 | perform the basic operations for storing, retrieving 15 | and erasing credentials using `program`. 16 | """ 17 | self.program = constants.PROGRAM_PREFIX + program 18 | self.exe = shutil.which(self.program) 19 | self.environment = environment 20 | if self.exe is None: 21 | warnings.warn( 22 | f'{self.program} not installed or not available in PATH', 23 | stacklevel=1, 24 | ) 25 | 26 | def get(self, server): 27 | """ Retrieve credentials for `server`. If no credentials are found, 28 | a `StoreError` will be raised. 29 | """ 30 | if not isinstance(server, bytes): 31 | server = server.encode('utf-8') 32 | data = self._execute('get', server) 33 | result = json.loads(data.decode('utf-8')) 34 | 35 | # docker-credential-pass will return an object for inexistent servers 36 | # whereas other helpers will exit with returncode != 0. For 37 | # consistency, if no significant data is returned, 38 | # raise CredentialsNotFound 39 | if result['Username'] == '' and result['Secret'] == '': 40 | raise errors.CredentialsNotFound( 41 | f'No matching credentials in {self.program}' 42 | ) 43 | 44 | return result 45 | 46 | def store(self, server, username, secret): 47 | """ Store credentials for `server`. Raises a `StoreError` if an error 48 | occurs. 49 | """ 50 | data_input = json.dumps({ 51 | 'ServerURL': server, 52 | 'Username': username, 53 | 'Secret': secret 54 | }).encode('utf-8') 55 | return self._execute('store', data_input) 56 | 57 | def erase(self, server): 58 | """ Erase credentials for `server`. Raises a `StoreError` if an error 59 | occurs. 60 | """ 61 | if not isinstance(server, bytes): 62 | server = server.encode('utf-8') 63 | self._execute('erase', server) 64 | 65 | def list(self): 66 | """ List stored credentials. Requires v0.4.0+ of the helper. 67 | """ 68 | data = self._execute('list', None) 69 | return json.loads(data.decode('utf-8')) 70 | 71 | def _execute(self, subcmd, data_input): 72 | if self.exe is None: 73 | raise errors.StoreError( 74 | f'{self.program} not installed or not available in PATH' 75 | ) 76 | output = None 77 | env = create_environment_dict(self.environment) 78 | try: 79 | output = subprocess.check_output( 80 | [self.exe, subcmd], input=data_input, env=env, 81 | ) 82 | except subprocess.CalledProcessError as e: 83 | raise errors.process_store_error(e, self.program) from e 84 | except OSError as e: 85 | if e.errno == errno.ENOENT: 86 | raise errors.StoreError( 87 | f'{self.program} not installed or not available in PATH' 88 | ) from e 89 | else: 90 | raise errors.StoreError( 91 | f'Unexpected OS error "{e.strerror}", errno={e.errno}' 92 | ) from e 93 | return output 94 | -------------------------------------------------------------------------------- /docker/credentials/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | def create_environment_dict(overrides): 5 | """ 6 | Create and return a copy of os.environ with the specified overrides 7 | """ 8 | result = os.environ.copy() 9 | result.update(overrides or {}) 10 | return result 11 | -------------------------------------------------------------------------------- /docker/models/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/docker/docker-py/526a9db743a80174aabb9b43dbb8b697b98ef497/docker/models/__init__.py -------------------------------------------------------------------------------- /docker/models/configs.py: -------------------------------------------------------------------------------- 1 | from ..api import APIClient 2 | from .resource import Collection, Model 3 | 4 | 5 | class Config(Model): 6 | """A config.""" 7 | id_attribute = 'ID' 8 | 9 | def __repr__(self): 10 | return f"<{self.__class__.__name__}: '{self.name}'>" 11 | 12 | @property 13 | def name(self): 14 | return self.attrs['Spec']['Name'] 15 | 16 | def remove(self): 17 | """ 18 | Remove this config. 19 | 20 | Raises: 21 | :py:class:`docker.errors.APIError` 22 | If config failed to remove. 23 | """ 24 | return self.client.api.remove_config(self.id) 25 | 26 | 27 | class ConfigCollection(Collection): 28 | """Configs on the Docker server.""" 29 | model = Config 30 | 31 | def create(self, **kwargs): 32 | obj = self.client.api.create_config(**kwargs) 33 | obj.setdefault("Spec", {})["Name"] = kwargs.get("name") 34 | return self.prepare_model(obj) 35 | create.__doc__ = APIClient.create_config.__doc__ 36 | 37 | def get(self, config_id): 38 | """ 39 | Get a config. 40 | 41 | Args: 42 | config_id (str): Config ID. 43 | 44 | Returns: 45 | (:py:class:`Config`): The config. 46 | 47 | Raises: 48 | :py:class:`docker.errors.NotFound` 49 | If the config does not exist. 50 | :py:class:`docker.errors.APIError` 51 | If the server returns an error. 52 | """ 53 | return self.prepare_model(self.client.api.inspect_config(config_id)) 54 | 55 | def list(self, **kwargs): 56 | """ 57 | List configs. Similar to the ``docker config ls`` command. 58 | 59 | Args: 60 | filters (dict): Server-side list filtering options. 61 | 62 | Returns: 63 | (list of :py:class:`Config`): The configs. 64 | 65 | Raises: 66 | :py:class:`docker.errors.APIError` 67 | If the server returns an error. 68 | """ 69 | resp = self.client.api.configs(**kwargs) 70 | return [self.prepare_model(obj) for obj in resp] 71 | -------------------------------------------------------------------------------- /docker/models/nodes.py: -------------------------------------------------------------------------------- 1 | from .resource import Collection, Model 2 | 3 | 4 | class Node(Model): 5 | """A node in a swarm.""" 6 | id_attribute = 'ID' 7 | 8 | @property 9 | def version(self): 10 | """ 11 | The version number of the service. If this is not the same as the 12 | server, the :py:meth:`update` function will not work and you will 13 | need to call :py:meth:`reload` before calling it again. 14 | """ 15 | return self.attrs.get('Version').get('Index') 16 | 17 | def update(self, node_spec): 18 | """ 19 | Update the node's configuration. 20 | 21 | Args: 22 | node_spec (dict): Configuration settings to update. Any values 23 | not provided will be removed. Default: ``None`` 24 | 25 | Returns: 26 | `True` if the request went through. 27 | 28 | Raises: 29 | :py:class:`docker.errors.APIError` 30 | If the server returns an error. 31 | 32 | Example: 33 | 34 | >>> node_spec = {'Availability': 'active', 35 | 'Name': 'node-name', 36 | 'Role': 'manager', 37 | 'Labels': {'foo': 'bar'} 38 | } 39 | >>> node.update(node_spec) 40 | 41 | """ 42 | return self.client.api.update_node(self.id, self.version, node_spec) 43 | 44 | def remove(self, force=False): 45 | """ 46 | Remove this node from the swarm. 47 | 48 | Args: 49 | force (bool): Force remove an active node. Default: `False` 50 | 51 | Returns: 52 | `True` if the request was successful. 53 | 54 | Raises: 55 | :py:class:`docker.errors.NotFound` 56 | If the node doesn't exist in the swarm. 57 | 58 | :py:class:`docker.errors.APIError` 59 | If the server returns an error. 60 | """ 61 | return self.client.api.remove_node(self.id, force=force) 62 | 63 | 64 | class NodeCollection(Collection): 65 | """Nodes on the Docker server.""" 66 | model = Node 67 | 68 | def get(self, node_id): 69 | """ 70 | Get a node. 71 | 72 | Args: 73 | node_id (string): ID of the node to be inspected. 74 | 75 | Returns: 76 | A :py:class:`Node` object. 77 | 78 | Raises: 79 | :py:class:`docker.errors.APIError` 80 | If the server returns an error. 81 | """ 82 | return self.prepare_model(self.client.api.inspect_node(node_id)) 83 | 84 | def list(self, *args, **kwargs): 85 | """ 86 | List swarm nodes. 87 | 88 | Args: 89 | filters (dict): Filters to process on the nodes list. Valid 90 | filters: ``id``, ``name``, ``membership`` and ``role``. 91 | Default: ``None`` 92 | 93 | Returns: 94 | A list of :py:class:`Node` objects. 95 | 96 | Raises: 97 | :py:class:`docker.errors.APIError` 98 | If the server returns an error. 99 | 100 | Example: 101 | 102 | >>> client.nodes.list(filters={'role': 'manager'}) 103 | """ 104 | return [ 105 | self.prepare_model(n) 106 | for n in self.client.api.nodes(*args, **kwargs) 107 | ] 108 | -------------------------------------------------------------------------------- /docker/models/resource.py: -------------------------------------------------------------------------------- 1 | class Model: 2 | """ 3 | A base class for representing a single object on the server. 4 | """ 5 | id_attribute = 'Id' 6 | 7 | def __init__(self, attrs=None, client=None, collection=None): 8 | #: A client pointing at the server that this object is on. 9 | self.client = client 10 | 11 | #: The collection that this model is part of. 12 | self.collection = collection 13 | 14 | #: The raw representation of this object from the API 15 | self.attrs = attrs 16 | if self.attrs is None: 17 | self.attrs = {} 18 | 19 | def __repr__(self): 20 | return f"<{self.__class__.__name__}: {self.short_id}>" 21 | 22 | def __eq__(self, other): 23 | return isinstance(other, self.__class__) and self.id == other.id 24 | 25 | def __hash__(self): 26 | return hash(f"{self.__class__.__name__}:{self.id}") 27 | 28 | @property 29 | def id(self): 30 | """ 31 | The ID of the object. 32 | """ 33 | return self.attrs.get(self.id_attribute) 34 | 35 | @property 36 | def short_id(self): 37 | """ 38 | The ID of the object, truncated to 12 characters. 39 | """ 40 | return self.id[:12] 41 | 42 | def reload(self): 43 | """ 44 | Load this object from the server again and update ``attrs`` with the 45 | new data. 46 | """ 47 | new_model = self.collection.get(self.id) 48 | self.attrs = new_model.attrs 49 | 50 | 51 | class Collection: 52 | """ 53 | A base class for representing all objects of a particular type on the 54 | server. 55 | """ 56 | 57 | #: The type of object this collection represents, set by subclasses 58 | model = None 59 | 60 | def __init__(self, client=None): 61 | #: The client pointing at the server that this collection of objects 62 | #: is on. 63 | self.client = client 64 | 65 | def __call__(self, *args, **kwargs): 66 | raise TypeError( 67 | f"'{self.__class__.__name__}' object is not callable. " 68 | "You might be trying to use the old (pre-2.0) API - " 69 | "use docker.APIClient if so." 70 | ) 71 | 72 | def list(self): 73 | raise NotImplementedError 74 | 75 | def get(self, key): 76 | raise NotImplementedError 77 | 78 | def create(self, attrs=None): 79 | raise NotImplementedError 80 | 81 | def prepare_model(self, attrs): 82 | """ 83 | Create a model from a set of attributes. 84 | """ 85 | if isinstance(attrs, Model): 86 | attrs.client = self.client 87 | attrs.collection = self 88 | return attrs 89 | elif isinstance(attrs, dict): 90 | return self.model(attrs=attrs, client=self.client, collection=self) 91 | else: 92 | raise Exception(f"Can't create {self.model.__name__} from {attrs}") 93 | -------------------------------------------------------------------------------- /docker/models/secrets.py: -------------------------------------------------------------------------------- 1 | from ..api import APIClient 2 | from .resource import Collection, Model 3 | 4 | 5 | class Secret(Model): 6 | """A secret.""" 7 | id_attribute = 'ID' 8 | 9 | def __repr__(self): 10 | return f"<{self.__class__.__name__}: '{self.name}'>" 11 | 12 | @property 13 | def name(self): 14 | return self.attrs['Spec']['Name'] 15 | 16 | def remove(self): 17 | """ 18 | Remove this secret. 19 | 20 | Raises: 21 | :py:class:`docker.errors.APIError` 22 | If secret failed to remove. 23 | """ 24 | return self.client.api.remove_secret(self.id) 25 | 26 | 27 | class SecretCollection(Collection): 28 | """Secrets on the Docker server.""" 29 | model = Secret 30 | 31 | def create(self, **kwargs): 32 | obj = self.client.api.create_secret(**kwargs) 33 | obj.setdefault("Spec", {})["Name"] = kwargs.get("name") 34 | return self.prepare_model(obj) 35 | create.__doc__ = APIClient.create_secret.__doc__ 36 | 37 | def get(self, secret_id): 38 | """ 39 | Get a secret. 40 | 41 | Args: 42 | secret_id (str): Secret ID. 43 | 44 | Returns: 45 | (:py:class:`Secret`): The secret. 46 | 47 | Raises: 48 | :py:class:`docker.errors.NotFound` 49 | If the secret does not exist. 50 | :py:class:`docker.errors.APIError` 51 | If the server returns an error. 52 | """ 53 | return self.prepare_model(self.client.api.inspect_secret(secret_id)) 54 | 55 | def list(self, **kwargs): 56 | """ 57 | List secrets. Similar to the ``docker secret ls`` command. 58 | 59 | Args: 60 | filters (dict): Server-side list filtering options. 61 | 62 | Returns: 63 | (list of :py:class:`Secret`): The secrets. 64 | 65 | Raises: 66 | :py:class:`docker.errors.APIError` 67 | If the server returns an error. 68 | """ 69 | resp = self.client.api.secrets(**kwargs) 70 | return [self.prepare_model(obj) for obj in resp] 71 | -------------------------------------------------------------------------------- /docker/models/volumes.py: -------------------------------------------------------------------------------- 1 | from ..api import APIClient 2 | from .resource import Collection, Model 3 | 4 | 5 | class Volume(Model): 6 | """A volume.""" 7 | id_attribute = 'Name' 8 | 9 | @property 10 | def name(self): 11 | """The name of the volume.""" 12 | return self.attrs['Name'] 13 | 14 | def remove(self, force=False): 15 | """ 16 | Remove this volume. 17 | 18 | Args: 19 | force (bool): Force removal of volumes that were already removed 20 | out of band by the volume driver plugin. 21 | Raises: 22 | :py:class:`docker.errors.APIError` 23 | If volume failed to remove. 24 | """ 25 | return self.client.api.remove_volume(self.id, force=force) 26 | 27 | 28 | class VolumeCollection(Collection): 29 | """Volumes on the Docker server.""" 30 | model = Volume 31 | 32 | def create(self, name=None, **kwargs): 33 | """ 34 | Create a volume. 35 | 36 | Args: 37 | name (str): Name of the volume. If not specified, the engine 38 | generates a name. 39 | driver (str): Name of the driver used to create the volume 40 | driver_opts (dict): Driver options as a key-value dictionary 41 | labels (dict): Labels to set on the volume 42 | 43 | Returns: 44 | (:py:class:`Volume`): The volume created. 45 | 46 | Raises: 47 | :py:class:`docker.errors.APIError` 48 | If the server returns an error. 49 | 50 | Example: 51 | 52 | >>> volume = client.volumes.create(name='foobar', driver='local', 53 | driver_opts={'foo': 'bar', 'baz': 'false'}, 54 | labels={"key": "value"}) 55 | 56 | """ 57 | obj = self.client.api.create_volume(name, **kwargs) 58 | return self.prepare_model(obj) 59 | 60 | def get(self, volume_id): 61 | """ 62 | Get a volume. 63 | 64 | Args: 65 | volume_id (str): Volume name. 66 | 67 | Returns: 68 | (:py:class:`Volume`): The volume. 69 | 70 | Raises: 71 | :py:class:`docker.errors.NotFound` 72 | If the volume does not exist. 73 | :py:class:`docker.errors.APIError` 74 | If the server returns an error. 75 | """ 76 | return self.prepare_model(self.client.api.inspect_volume(volume_id)) 77 | 78 | def list(self, **kwargs): 79 | """ 80 | List volumes. Similar to the ``docker volume ls`` command. 81 | 82 | Args: 83 | filters (dict): Server-side list filtering options. 84 | 85 | Returns: 86 | (list of :py:class:`Volume`): The volumes. 87 | 88 | Raises: 89 | :py:class:`docker.errors.APIError` 90 | If the server returns an error. 91 | """ 92 | resp = self.client.api.volumes(**kwargs) 93 | if not resp.get('Volumes'): 94 | return [] 95 | return [self.prepare_model(obj) for obj in resp['Volumes']] 96 | 97 | def prune(self, filters=None): 98 | return self.client.api.prune_volumes(filters=filters) 99 | prune.__doc__ = APIClient.prune_volumes.__doc__ 100 | -------------------------------------------------------------------------------- /docker/tls.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from . import errors 4 | 5 | 6 | class TLSConfig: 7 | """ 8 | TLS configuration. 9 | 10 | Args: 11 | client_cert (tuple of str): Path to client cert, path to client key. 12 | ca_cert (str): Path to CA cert file. 13 | verify (bool or str): This can be a bool or a path to a CA cert 14 | file to verify against. If ``True``, verify using ca_cert; 15 | if ``False`` or not specified, do not verify. 16 | """ 17 | cert = None 18 | ca_cert = None 19 | verify = None 20 | 21 | def __init__(self, client_cert=None, ca_cert=None, verify=None): 22 | # Argument compatibility/mapping with 23 | # https://docs.docker.com/engine/articles/https/ 24 | # This diverges from the Docker CLI in that users can specify 'tls' 25 | # here, but also disable any public/default CA pool verification by 26 | # leaving verify=False 27 | 28 | # "client_cert" must have both or neither cert/key files. In 29 | # either case, Alert the user when both are expected, but any are 30 | # missing. 31 | 32 | if client_cert: 33 | try: 34 | tls_cert, tls_key = client_cert 35 | except ValueError: 36 | raise errors.TLSParameterError( 37 | 'client_cert must be a tuple of' 38 | ' (client certificate, key file)' 39 | ) from None 40 | 41 | if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or 42 | not os.path.isfile(tls_key)): 43 | raise errors.TLSParameterError( 44 | 'Path to a certificate and key files must be provided' 45 | ' through the client_cert param' 46 | ) 47 | self.cert = (tls_cert, tls_key) 48 | 49 | # If verify is set, make sure the cert exists 50 | self.verify = verify 51 | self.ca_cert = ca_cert 52 | if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert): 53 | raise errors.TLSParameterError( 54 | 'Invalid CA certificate provided for `ca_cert`.' 55 | ) 56 | 57 | def configure_client(self, client): 58 | """ 59 | Configure a client with these TLS options. 60 | """ 61 | if self.verify and self.ca_cert: 62 | client.verify = self.ca_cert 63 | else: 64 | client.verify = self.verify 65 | 66 | if self.cert: 67 | client.cert = self.cert 68 | -------------------------------------------------------------------------------- /docker/transport/__init__.py: -------------------------------------------------------------------------------- 1 | from .unixconn import UnixHTTPAdapter 2 | 3 | try: 4 | from .npipeconn import NpipeHTTPAdapter 5 | from .npipesocket import NpipeSocket 6 | except ImportError: 7 | pass 8 | 9 | try: 10 | from .sshconn import SSHHTTPAdapter 11 | except ImportError: 12 | pass 13 | -------------------------------------------------------------------------------- /docker/transport/basehttpadapter.py: -------------------------------------------------------------------------------- 1 | import requests.adapters 2 | 3 | 4 | class BaseHTTPAdapter(requests.adapters.HTTPAdapter): 5 | def close(self): 6 | super().close() 7 | if hasattr(self, 'pools'): 8 | self.pools.clear() 9 | 10 | # Fix for requests 2.32.2+: 11 | # https://github.com/psf/requests/commit/c98e4d133ef29c46a9b68cd783087218a8075e05 12 | def get_connection_with_tls_context(self, request, verify, proxies=None, cert=None): 13 | return self.get_connection(request.url, proxies) 14 | -------------------------------------------------------------------------------- /docker/transport/npipeconn.py: -------------------------------------------------------------------------------- 1 | import queue 2 | 3 | import requests.adapters 4 | import urllib3 5 | import urllib3.connection 6 | 7 | from .. import constants 8 | from .basehttpadapter import BaseHTTPAdapter 9 | from .npipesocket import NpipeSocket 10 | 11 | RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer 12 | 13 | 14 | class NpipeHTTPConnection(urllib3.connection.HTTPConnection): 15 | def __init__(self, npipe_path, timeout=60): 16 | super().__init__( 17 | 'localhost', timeout=timeout 18 | ) 19 | self.npipe_path = npipe_path 20 | self.timeout = timeout 21 | 22 | def connect(self): 23 | sock = NpipeSocket() 24 | sock.settimeout(self.timeout) 25 | sock.connect(self.npipe_path) 26 | self.sock = sock 27 | 28 | 29 | class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool): 30 | def __init__(self, npipe_path, timeout=60, maxsize=10): 31 | super().__init__( 32 | 'localhost', timeout=timeout, maxsize=maxsize 33 | ) 34 | self.npipe_path = npipe_path 35 | self.timeout = timeout 36 | 37 | def _new_conn(self): 38 | return NpipeHTTPConnection( 39 | self.npipe_path, self.timeout 40 | ) 41 | 42 | # When re-using connections, urllib3 tries to call select() on our 43 | # NpipeSocket instance, causing a crash. To circumvent this, we override 44 | # _get_conn, where that check happens. 45 | def _get_conn(self, timeout): 46 | conn = None 47 | try: 48 | conn = self.pool.get(block=self.block, timeout=timeout) 49 | except AttributeError as ae: # self.pool is None 50 | raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.") from ae 51 | 52 | except queue.Empty: 53 | if self.block: 54 | raise urllib3.exceptions.EmptyPoolError( 55 | self, 56 | "Pool reached maximum size and no more " 57 | "connections are allowed." 58 | ) from None 59 | # Oh well, we'll create a new connection then 60 | 61 | return conn or self._new_conn() 62 | 63 | 64 | class NpipeHTTPAdapter(BaseHTTPAdapter): 65 | 66 | __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['npipe_path', 67 | 'pools', 68 | 'timeout', 69 | 'max_pool_size'] 70 | 71 | def __init__(self, base_url, timeout=60, 72 | pool_connections=constants.DEFAULT_NUM_POOLS, 73 | max_pool_size=constants.DEFAULT_MAX_POOL_SIZE): 74 | self.npipe_path = base_url.replace('npipe://', '') 75 | self.timeout = timeout 76 | self.max_pool_size = max_pool_size 77 | self.pools = RecentlyUsedContainer( 78 | pool_connections, dispose_func=lambda p: p.close() 79 | ) 80 | super().__init__() 81 | 82 | def get_connection(self, url, proxies=None): 83 | with self.pools.lock: 84 | pool = self.pools.get(url) 85 | if pool: 86 | return pool 87 | 88 | pool = NpipeHTTPConnectionPool( 89 | self.npipe_path, self.timeout, 90 | maxsize=self.max_pool_size 91 | ) 92 | self.pools[url] = pool 93 | 94 | return pool 95 | 96 | def request_url(self, request, proxies): 97 | # The select_proxy utility in requests errors out when the provided URL 98 | # doesn't have a hostname, like is the case when using a UNIX socket. 99 | # Since proxies are an irrelevant notion in the case of UNIX sockets 100 | # anyway, we simply return the path URL directly. 101 | # See also: https://github.com/docker/docker-sdk-python/issues/811 102 | return request.path_url 103 | -------------------------------------------------------------------------------- /docker/transport/unixconn.py: -------------------------------------------------------------------------------- 1 | import socket 2 | 3 | import requests.adapters 4 | import urllib3 5 | import urllib3.connection 6 | 7 | from .. import constants 8 | from .basehttpadapter import BaseHTTPAdapter 9 | 10 | RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer 11 | 12 | 13 | class UnixHTTPConnection(urllib3.connection.HTTPConnection): 14 | 15 | def __init__(self, base_url, unix_socket, timeout=60): 16 | super().__init__( 17 | 'localhost', timeout=timeout 18 | ) 19 | self.base_url = base_url 20 | self.unix_socket = unix_socket 21 | self.timeout = timeout 22 | 23 | def connect(self): 24 | sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) 25 | sock.settimeout(self.timeout) 26 | sock.connect(self.unix_socket) 27 | self.sock = sock 28 | 29 | 30 | class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool): 31 | def __init__(self, base_url, socket_path, timeout=60, maxsize=10): 32 | super().__init__( 33 | 'localhost', timeout=timeout, maxsize=maxsize 34 | ) 35 | self.base_url = base_url 36 | self.socket_path = socket_path 37 | self.timeout = timeout 38 | 39 | def _new_conn(self): 40 | return UnixHTTPConnection( 41 | self.base_url, self.socket_path, self.timeout 42 | ) 43 | 44 | 45 | class UnixHTTPAdapter(BaseHTTPAdapter): 46 | 47 | __attrs__ = requests.adapters.HTTPAdapter.__attrs__ + ['pools', 48 | 'socket_path', 49 | 'timeout', 50 | 'max_pool_size'] 51 | 52 | def __init__(self, socket_url, timeout=60, 53 | pool_connections=constants.DEFAULT_NUM_POOLS, 54 | max_pool_size=constants.DEFAULT_MAX_POOL_SIZE): 55 | socket_path = socket_url.replace('http+unix://', '') 56 | if not socket_path.startswith('/'): 57 | socket_path = f"/{socket_path}" 58 | self.socket_path = socket_path 59 | self.timeout = timeout 60 | self.max_pool_size = max_pool_size 61 | self.pools = RecentlyUsedContainer( 62 | pool_connections, dispose_func=lambda p: p.close() 63 | ) 64 | super().__init__() 65 | 66 | def get_connection(self, url, proxies=None): 67 | with self.pools.lock: 68 | pool = self.pools.get(url) 69 | if pool: 70 | return pool 71 | 72 | pool = UnixHTTPConnectionPool( 73 | url, self.socket_path, self.timeout, 74 | maxsize=self.max_pool_size 75 | ) 76 | self.pools[url] = pool 77 | 78 | return pool 79 | 80 | def request_url(self, request, proxies): 81 | # The select_proxy utility in requests errors out when the provided URL 82 | # doesn't have a hostname, like is the case when using a UNIX socket. 83 | # Since proxies are an irrelevant notion in the case of UNIX sockets 84 | # anyway, we simply return the path URL directly. 85 | # See also: https://github.com/docker/docker-py/issues/811 86 | return request.path_url 87 | -------------------------------------------------------------------------------- /docker/types/__init__.py: -------------------------------------------------------------------------------- 1 | from .containers import ContainerConfig, DeviceRequest, HostConfig, LogConfig, Ulimit 2 | from .daemon import CancellableStream 3 | from .healthcheck import Healthcheck 4 | from .networks import EndpointConfig, IPAMConfig, IPAMPool, NetworkingConfig 5 | from .services import ( 6 | ConfigReference, 7 | ContainerSpec, 8 | DNSConfig, 9 | DriverConfig, 10 | EndpointSpec, 11 | Mount, 12 | NetworkAttachmentConfig, 13 | Placement, 14 | PlacementPreference, 15 | Privileges, 16 | Resources, 17 | RestartPolicy, 18 | RollbackConfig, 19 | SecretReference, 20 | ServiceMode, 21 | TaskTemplate, 22 | UpdateConfig, 23 | ) 24 | from .swarm import SwarmExternalCA, SwarmSpec 25 | -------------------------------------------------------------------------------- /docker/types/base.py: -------------------------------------------------------------------------------- 1 | class DictType(dict): 2 | def __init__(self, init): 3 | for k, v in init.items(): 4 | self[k] = v 5 | -------------------------------------------------------------------------------- /docker/types/daemon.py: -------------------------------------------------------------------------------- 1 | import socket 2 | 3 | import urllib3 4 | 5 | from ..errors import DockerException 6 | 7 | 8 | class CancellableStream: 9 | """ 10 | Stream wrapper for real-time events, logs, etc. from the server. 11 | 12 | Example: 13 | >>> events = client.events() 14 | >>> for event in events: 15 | ... print(event) 16 | >>> # and cancel from another thread 17 | >>> events.close() 18 | """ 19 | 20 | def __init__(self, stream, response): 21 | self._stream = stream 22 | self._response = response 23 | 24 | def __iter__(self): 25 | return self 26 | 27 | def __next__(self): 28 | try: 29 | return next(self._stream) 30 | except urllib3.exceptions.ProtocolError: 31 | raise StopIteration from None 32 | except OSError: 33 | raise StopIteration from None 34 | 35 | next = __next__ 36 | 37 | def close(self): 38 | """ 39 | Closes the event streaming. 40 | """ 41 | 42 | if not self._response.raw.closed: 43 | # find the underlying socket object 44 | # based on api.client._get_raw_response_socket 45 | 46 | sock_fp = self._response.raw._fp.fp 47 | 48 | if hasattr(sock_fp, 'raw'): 49 | sock_raw = sock_fp.raw 50 | 51 | if hasattr(sock_raw, 'sock'): 52 | sock = sock_raw.sock 53 | 54 | elif hasattr(sock_raw, '_sock'): 55 | sock = sock_raw._sock 56 | 57 | elif hasattr(sock_fp, 'channel'): 58 | # We're working with a paramiko (SSH) channel, which doesn't 59 | # support cancelable streams with the current implementation 60 | raise DockerException( 61 | 'Cancellable streams not supported for the SSH protocol' 62 | ) 63 | else: 64 | sock = sock_fp._sock 65 | 66 | if hasattr(urllib3.contrib, 'pyopenssl') and isinstance( 67 | sock, urllib3.contrib.pyopenssl.WrappedSocket): 68 | sock = sock.socket 69 | 70 | sock.shutdown(socket.SHUT_RDWR) 71 | sock.close() 72 | -------------------------------------------------------------------------------- /docker/types/healthcheck.py: -------------------------------------------------------------------------------- 1 | from .base import DictType 2 | 3 | 4 | class Healthcheck(DictType): 5 | """ 6 | Defines a healthcheck configuration for a container or service. 7 | 8 | Args: 9 | test (:py:class:`list` or str): Test to perform to determine 10 | container health. Possible values: 11 | 12 | - Empty list: Inherit healthcheck from parent image 13 | - ``["NONE"]``: Disable healthcheck 14 | - ``["CMD", args...]``: exec arguments directly. 15 | - ``["CMD-SHELL", command]``: Run command in the system's 16 | default shell. 17 | 18 | If a string is provided, it will be used as a ``CMD-SHELL`` 19 | command. 20 | interval (int): The time to wait between checks in nanoseconds. It 21 | should be 0 or at least 1000000 (1 ms). 22 | timeout (int): The time to wait before considering the check to 23 | have hung. It should be 0 or at least 1000000 (1 ms). 24 | retries (int): The number of consecutive failures needed to 25 | consider a container as unhealthy. 26 | start_period (int): Start period for the container to 27 | initialize before starting health-retries countdown in 28 | nanoseconds. It should be 0 or at least 1000000 (1 ms). 29 | """ 30 | def __init__(self, **kwargs): 31 | test = kwargs.get('test', kwargs.get('Test')) 32 | if isinstance(test, str): 33 | test = ["CMD-SHELL", test] 34 | 35 | interval = kwargs.get('interval', kwargs.get('Interval')) 36 | timeout = kwargs.get('timeout', kwargs.get('Timeout')) 37 | retries = kwargs.get('retries', kwargs.get('Retries')) 38 | start_period = kwargs.get('start_period', kwargs.get('StartPeriod')) 39 | 40 | super().__init__({ 41 | 'Test': test, 42 | 'Interval': interval, 43 | 'Timeout': timeout, 44 | 'Retries': retries, 45 | 'StartPeriod': start_period 46 | }) 47 | 48 | @property 49 | def test(self): 50 | return self['Test'] 51 | 52 | @test.setter 53 | def test(self, value): 54 | if isinstance(value, str): 55 | value = ["CMD-SHELL", value] 56 | self['Test'] = value 57 | 58 | @property 59 | def interval(self): 60 | return self['Interval'] 61 | 62 | @interval.setter 63 | def interval(self, value): 64 | self['Interval'] = value 65 | 66 | @property 67 | def timeout(self): 68 | return self['Timeout'] 69 | 70 | @timeout.setter 71 | def timeout(self, value): 72 | self['Timeout'] = value 73 | 74 | @property 75 | def retries(self): 76 | return self['Retries'] 77 | 78 | @retries.setter 79 | def retries(self, value): 80 | self['Retries'] = value 81 | 82 | @property 83 | def start_period(self): 84 | return self['StartPeriod'] 85 | 86 | @start_period.setter 87 | def start_period(self, value): 88 | self['StartPeriod'] = value 89 | -------------------------------------------------------------------------------- /docker/types/networks.py: -------------------------------------------------------------------------------- 1 | from .. import errors 2 | from ..utils import normalize_links, version_lt 3 | 4 | 5 | class EndpointConfig(dict): 6 | def __init__(self, version, aliases=None, links=None, ipv4_address=None, 7 | ipv6_address=None, link_local_ips=None, driver_opt=None, 8 | mac_address=None): 9 | if version_lt(version, '1.22'): 10 | raise errors.InvalidVersion( 11 | 'Endpoint config is not supported for API version < 1.22' 12 | ) 13 | 14 | if aliases: 15 | self["Aliases"] = aliases 16 | 17 | if links: 18 | self["Links"] = normalize_links(links) 19 | 20 | ipam_config = {} 21 | if ipv4_address: 22 | ipam_config['IPv4Address'] = ipv4_address 23 | 24 | if ipv6_address: 25 | ipam_config['IPv6Address'] = ipv6_address 26 | 27 | if mac_address: 28 | if version_lt(version, '1.25'): 29 | raise errors.InvalidVersion( 30 | 'mac_address is not supported for API version < 1.25' 31 | ) 32 | self['MacAddress'] = mac_address 33 | 34 | if link_local_ips is not None: 35 | if version_lt(version, '1.24'): 36 | raise errors.InvalidVersion( 37 | 'link_local_ips is not supported for API version < 1.24' 38 | ) 39 | ipam_config['LinkLocalIPs'] = link_local_ips 40 | 41 | if ipam_config: 42 | self['IPAMConfig'] = ipam_config 43 | 44 | if driver_opt: 45 | if version_lt(version, '1.32'): 46 | raise errors.InvalidVersion( 47 | 'DriverOpts is not supported for API version < 1.32' 48 | ) 49 | if not isinstance(driver_opt, dict): 50 | raise TypeError('driver_opt must be a dictionary') 51 | self['DriverOpts'] = driver_opt 52 | 53 | 54 | class NetworkingConfig(dict): 55 | def __init__(self, endpoints_config=None): 56 | if endpoints_config: 57 | self["EndpointsConfig"] = endpoints_config 58 | 59 | 60 | class IPAMConfig(dict): 61 | """ 62 | Create an IPAM (IP Address Management) config dictionary to be used with 63 | :py:meth:`~docker.api.network.NetworkApiMixin.create_network`. 64 | 65 | Args: 66 | 67 | driver (str): The IPAM driver to use. Defaults to ``default``. 68 | pool_configs (:py:class:`list`): A list of pool configurations 69 | (:py:class:`~docker.types.IPAMPool`). Defaults to empty list. 70 | options (dict): Driver options as a key-value dictionary. 71 | Defaults to `None`. 72 | 73 | Example: 74 | 75 | >>> ipam_config = docker.types.IPAMConfig(driver='default') 76 | >>> network = client.create_network('network1', ipam=ipam_config) 77 | 78 | """ 79 | def __init__(self, driver='default', pool_configs=None, options=None): 80 | self.update({ 81 | 'Driver': driver, 82 | 'Config': pool_configs or [] 83 | }) 84 | 85 | if options: 86 | if not isinstance(options, dict): 87 | raise TypeError('IPAMConfig options must be a dictionary') 88 | self['Options'] = options 89 | 90 | 91 | class IPAMPool(dict): 92 | """ 93 | Create an IPAM pool config dictionary to be added to the 94 | ``pool_configs`` parameter of 95 | :py:class:`~docker.types.IPAMConfig`. 96 | 97 | Args: 98 | 99 | subnet (str): Custom subnet for this IPAM pool using the CIDR 100 | notation. Defaults to ``None``. 101 | iprange (str): Custom IP range for endpoints in this IPAM pool using 102 | the CIDR notation. Defaults to ``None``. 103 | gateway (str): Custom IP address for the pool's gateway. 104 | aux_addresses (dict): A dictionary of ``key -> ip_address`` 105 | relationships specifying auxiliary addresses that need to be 106 | allocated by the IPAM driver. 107 | 108 | Example: 109 | 110 | >>> ipam_pool = docker.types.IPAMPool( 111 | subnet='124.42.0.0/16', 112 | iprange='124.42.0.0/24', 113 | gateway='124.42.0.254', 114 | aux_addresses={ 115 | 'reserved1': '124.42.1.1' 116 | } 117 | ) 118 | >>> ipam_config = docker.types.IPAMConfig( 119 | pool_configs=[ipam_pool]) 120 | """ 121 | def __init__(self, subnet=None, iprange=None, gateway=None, 122 | aux_addresses=None): 123 | self.update({ 124 | 'Subnet': subnet, 125 | 'IPRange': iprange, 126 | 'Gateway': gateway, 127 | 'AuxiliaryAddresses': aux_addresses 128 | }) 129 | -------------------------------------------------------------------------------- /docker/types/swarm.py: -------------------------------------------------------------------------------- 1 | from ..errors import InvalidVersion 2 | from ..utils import version_lt 3 | 4 | 5 | class SwarmSpec(dict): 6 | """ 7 | Describe a Swarm's configuration and options. Use 8 | :py:meth:`~docker.api.swarm.SwarmApiMixin.create_swarm_spec` 9 | to instantiate. 10 | """ 11 | def __init__(self, version, task_history_retention_limit=None, 12 | snapshot_interval=None, keep_old_snapshots=None, 13 | log_entries_for_slow_followers=None, heartbeat_tick=None, 14 | election_tick=None, dispatcher_heartbeat_period=None, 15 | node_cert_expiry=None, external_cas=None, name=None, 16 | labels=None, signing_ca_cert=None, signing_ca_key=None, 17 | ca_force_rotate=None, autolock_managers=None, 18 | log_driver=None): 19 | if task_history_retention_limit is not None: 20 | self['Orchestration'] = { 21 | 'TaskHistoryRetentionLimit': task_history_retention_limit 22 | } 23 | if any([snapshot_interval, 24 | keep_old_snapshots, 25 | log_entries_for_slow_followers, 26 | heartbeat_tick, 27 | election_tick]): 28 | self['Raft'] = { 29 | 'SnapshotInterval': snapshot_interval, 30 | 'KeepOldSnapshots': keep_old_snapshots, 31 | 'LogEntriesForSlowFollowers': log_entries_for_slow_followers, 32 | 'HeartbeatTick': heartbeat_tick, 33 | 'ElectionTick': election_tick 34 | } 35 | 36 | if dispatcher_heartbeat_period: 37 | self['Dispatcher'] = { 38 | 'HeartbeatPeriod': dispatcher_heartbeat_period 39 | } 40 | 41 | ca_config = {} 42 | if node_cert_expiry is not None: 43 | ca_config['NodeCertExpiry'] = node_cert_expiry 44 | if external_cas: 45 | if version_lt(version, '1.25'): 46 | if len(external_cas) > 1: 47 | raise InvalidVersion( 48 | 'Support for multiple external CAs is not available ' 49 | 'for API version < 1.25' 50 | ) 51 | ca_config['ExternalCA'] = external_cas[0] 52 | else: 53 | ca_config['ExternalCAs'] = external_cas 54 | if signing_ca_key: 55 | if version_lt(version, '1.30'): 56 | raise InvalidVersion( 57 | 'signing_ca_key is not supported in API version < 1.30' 58 | ) 59 | ca_config['SigningCAKey'] = signing_ca_key 60 | if signing_ca_cert: 61 | if version_lt(version, '1.30'): 62 | raise InvalidVersion( 63 | 'signing_ca_cert is not supported in API version < 1.30' 64 | ) 65 | ca_config['SigningCACert'] = signing_ca_cert 66 | if ca_force_rotate is not None: 67 | if version_lt(version, '1.30'): 68 | raise InvalidVersion( 69 | 'force_rotate is not supported in API version < 1.30' 70 | ) 71 | ca_config['ForceRotate'] = ca_force_rotate 72 | if ca_config: 73 | self['CAConfig'] = ca_config 74 | 75 | if autolock_managers is not None: 76 | if version_lt(version, '1.25'): 77 | raise InvalidVersion( 78 | 'autolock_managers is not supported in API version < 1.25' 79 | ) 80 | 81 | self['EncryptionConfig'] = {'AutoLockManagers': autolock_managers} 82 | 83 | if log_driver is not None: 84 | if version_lt(version, '1.25'): 85 | raise InvalidVersion( 86 | 'log_driver is not supported in API version < 1.25' 87 | ) 88 | 89 | self['TaskDefaults'] = {'LogDriver': log_driver} 90 | 91 | if name is not None: 92 | self['Name'] = name 93 | if labels is not None: 94 | self['Labels'] = labels 95 | 96 | 97 | class SwarmExternalCA(dict): 98 | """ 99 | Configuration for forwarding signing requests to an external 100 | certificate authority. 101 | 102 | Args: 103 | url (string): URL where certificate signing requests should be 104 | sent. 105 | protocol (string): Protocol for communication with the external CA. 106 | options (dict): An object with key/value pairs that are interpreted 107 | as protocol-specific options for the external CA driver. 108 | ca_cert (string): The root CA certificate (in PEM format) this 109 | external CA uses to issue TLS certificates (assumed to be to 110 | the current swarm root CA certificate if not provided). 111 | 112 | 113 | 114 | """ 115 | def __init__(self, url, protocol=None, options=None, ca_cert=None): 116 | self['URL'] = url 117 | self['Protocol'] = protocol 118 | self['Options'] = options 119 | self['CACert'] = ca_cert 120 | -------------------------------------------------------------------------------- /docker/utils/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | from .build import create_archive, exclude_paths, match_tag, mkbuildcontext, tar 3 | from .decorators import check_resource, minimum_version, update_headers 4 | from .utils import ( 5 | compare_version, 6 | convert_filters, 7 | convert_port_bindings, 8 | convert_service_networks, 9 | convert_volume_binds, 10 | create_host_config, 11 | create_ipam_config, 12 | create_ipam_pool, 13 | datetime_to_timestamp, 14 | decode_json_header, 15 | format_environment, 16 | format_extra_hosts, 17 | kwargs_from_env, 18 | normalize_links, 19 | parse_bytes, 20 | parse_devices, 21 | parse_env_file, 22 | parse_host, 23 | parse_repository_tag, 24 | split_command, 25 | version_gte, 26 | version_lt, 27 | ) 28 | 29 | -------------------------------------------------------------------------------- /docker/utils/config.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import os 4 | 5 | from ..constants import IS_WINDOWS_PLATFORM 6 | 7 | DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json') 8 | LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg' 9 | 10 | log = logging.getLogger(__name__) 11 | 12 | 13 | def find_config_file(config_path=None): 14 | paths = list(filter(None, [ 15 | config_path, # 1 16 | config_path_from_environment(), # 2 17 | os.path.join(home_dir(), DOCKER_CONFIG_FILENAME), # 3 18 | os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4 19 | ])) 20 | 21 | log.debug(f"Trying paths: {repr(paths)}") 22 | 23 | for path in paths: 24 | if os.path.exists(path): 25 | log.debug(f"Found file at path: {path}") 26 | return path 27 | 28 | log.debug("No config file found") 29 | 30 | return None 31 | 32 | 33 | def config_path_from_environment(): 34 | config_dir = os.environ.get('DOCKER_CONFIG') 35 | if not config_dir: 36 | return None 37 | return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME)) 38 | 39 | 40 | def home_dir(): 41 | """ 42 | Get the user's home directory, using the same logic as the Docker Engine 43 | client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX. 44 | """ 45 | if IS_WINDOWS_PLATFORM: 46 | return os.environ.get('USERPROFILE', '') 47 | else: 48 | return os.path.expanduser('~') 49 | 50 | 51 | def load_general_config(config_path=None): 52 | config_file = find_config_file(config_path) 53 | 54 | if not config_file: 55 | return {} 56 | 57 | try: 58 | with open(config_file) as f: 59 | return json.load(f) 60 | except (OSError, ValueError) as e: 61 | # In the case of a legacy `.dockercfg` file, we won't 62 | # be able to load any JSON data. 63 | log.debug(e) 64 | 65 | log.debug("All parsing attempts failed - returning empty config") 66 | return {} 67 | -------------------------------------------------------------------------------- /docker/utils/decorators.py: -------------------------------------------------------------------------------- 1 | import functools 2 | 3 | from .. import errors 4 | from . import utils 5 | 6 | 7 | def check_resource(resource_name): 8 | def decorator(f): 9 | @functools.wraps(f) 10 | def wrapped(self, resource_id=None, *args, **kwargs): 11 | if resource_id is None and kwargs.get(resource_name): 12 | resource_id = kwargs.pop(resource_name) 13 | if isinstance(resource_id, dict): 14 | resource_id = resource_id.get('Id', resource_id.get('ID')) 15 | if not resource_id: 16 | raise errors.NullResource( 17 | 'Resource ID was not provided' 18 | ) 19 | return f(self, resource_id, *args, **kwargs) 20 | return wrapped 21 | return decorator 22 | 23 | 24 | def minimum_version(version): 25 | def decorator(f): 26 | @functools.wraps(f) 27 | def wrapper(self, *args, **kwargs): 28 | if utils.version_lt(self._version, version): 29 | raise errors.InvalidVersion( 30 | f'{f.__name__} is not available for version < {version}', 31 | ) 32 | return f(self, *args, **kwargs) 33 | return wrapper 34 | return decorator 35 | 36 | 37 | def update_headers(f): 38 | def inner(self, *args, **kwargs): 39 | if 'HttpHeaders' in self._general_configs: 40 | if not kwargs.get('headers'): 41 | kwargs['headers'] = self._general_configs['HttpHeaders'] 42 | else: 43 | kwargs['headers'].update(self._general_configs['HttpHeaders']) 44 | return f(self, *args, **kwargs) 45 | return inner 46 | -------------------------------------------------------------------------------- /docker/utils/fnmatch.py: -------------------------------------------------------------------------------- 1 | """Filename matching with shell patterns. 2 | 3 | fnmatch(FILENAME, PATTERN) matches according to the local convention. 4 | fnmatchcase(FILENAME, PATTERN) always takes case in account. 5 | 6 | The functions operate by translating the pattern into a regular 7 | expression. They cache the compiled regular expressions for speed. 8 | 9 | The function translate(PATTERN) returns a regular expression 10 | corresponding to PATTERN. (It does not compile it.) 11 | """ 12 | 13 | import re 14 | 15 | __all__ = ["fnmatch", "fnmatchcase", "translate"] 16 | 17 | _cache = {} 18 | _MAXCACHE = 100 19 | 20 | 21 | def _purge(): 22 | """Clear the pattern cache""" 23 | _cache.clear() 24 | 25 | 26 | def fnmatch(name, pat): 27 | """Test whether FILENAME matches PATTERN. 28 | 29 | Patterns are Unix shell style: 30 | 31 | * matches everything 32 | ? matches any single character 33 | [seq] matches any character in seq 34 | [!seq] matches any char not in seq 35 | 36 | An initial period in FILENAME is not special. 37 | Both FILENAME and PATTERN are first case-normalized 38 | if the operating system requires it. 39 | If you don't want this, use fnmatchcase(FILENAME, PATTERN). 40 | """ 41 | 42 | name = name.lower() 43 | pat = pat.lower() 44 | return fnmatchcase(name, pat) 45 | 46 | 47 | def fnmatchcase(name, pat): 48 | """Test whether FILENAME matches PATTERN, including case. 49 | This is a version of fnmatch() which doesn't case-normalize 50 | its arguments. 51 | """ 52 | 53 | try: 54 | re_pat = _cache[pat] 55 | except KeyError: 56 | res = translate(pat) 57 | if len(_cache) >= _MAXCACHE: 58 | _cache.clear() 59 | _cache[pat] = re_pat = re.compile(res) 60 | return re_pat.match(name) is not None 61 | 62 | 63 | def translate(pat): 64 | """Translate a shell PATTERN to a regular expression. 65 | 66 | There is no way to quote meta-characters. 67 | """ 68 | i, n = 0, len(pat) 69 | res = '^' 70 | while i < n: 71 | c = pat[i] 72 | i = i + 1 73 | if c == '*': 74 | if i < n and pat[i] == '*': 75 | # is some flavor of "**" 76 | i = i + 1 77 | # Treat **/ as ** so eat the "/" 78 | if i < n and pat[i] == '/': 79 | i = i + 1 80 | if i >= n: 81 | # is "**EOF" - to align with .gitignore just accept all 82 | res = f"{res}.*" 83 | else: 84 | # is "**" 85 | # Note that this allows for any # of /'s (even 0) because 86 | # the .* will eat everything, even /'s 87 | res = f"{res}(.*/)?" 88 | else: 89 | # is "*" so map it to anything but "/" 90 | res = f"{res}[^/]*" 91 | elif c == '?': 92 | # "?" is any char except "/" 93 | res = f"{res}[^/]" 94 | elif c == '[': 95 | j = i 96 | if j < n and pat[j] == '!': 97 | j = j + 1 98 | if j < n and pat[j] == ']': 99 | j = j + 1 100 | while j < n and pat[j] != ']': 101 | j = j + 1 102 | if j >= n: 103 | res = f"{res}\\[" 104 | else: 105 | stuff = pat[i:j].replace('\\', '\\\\') 106 | i = j + 1 107 | if stuff[0] == '!': 108 | stuff = f"^{stuff[1:]}" 109 | elif stuff[0] == '^': 110 | stuff = f"\\{stuff}" 111 | res = f'{res}[{stuff}]' 112 | else: 113 | res = res + re.escape(c) 114 | 115 | return f"{res}$" 116 | -------------------------------------------------------------------------------- /docker/utils/json_stream.py: -------------------------------------------------------------------------------- 1 | import json 2 | import json.decoder 3 | 4 | from ..errors import StreamParseError 5 | 6 | json_decoder = json.JSONDecoder() 7 | 8 | 9 | def stream_as_text(stream): 10 | """ 11 | Given a stream of bytes or text, if any of the items in the stream 12 | are bytes convert them to text. 13 | This function can be removed once we return text streams 14 | instead of byte streams. 15 | """ 16 | for data in stream: 17 | if not isinstance(data, str): 18 | data = data.decode('utf-8', 'replace') 19 | yield data 20 | 21 | 22 | def json_splitter(buffer): 23 | """Attempt to parse a json object from a buffer. If there is at least one 24 | object, return it and the rest of the buffer, otherwise return None. 25 | """ 26 | buffer = buffer.strip() 27 | try: 28 | obj, index = json_decoder.raw_decode(buffer) 29 | rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():] 30 | return obj, rest 31 | except ValueError: 32 | return None 33 | 34 | 35 | def json_stream(stream): 36 | """Given a stream of text, return a stream of json objects. 37 | This handles streams which are inconsistently buffered (some entries may 38 | be newline delimited, and others are not). 39 | """ 40 | return split_buffer(stream, json_splitter, json_decoder.decode) 41 | 42 | 43 | def line_splitter(buffer, separator='\n'): 44 | index = buffer.find(str(separator)) 45 | if index == -1: 46 | return None 47 | return buffer[:index + 1], buffer[index + 1:] 48 | 49 | 50 | def split_buffer(stream, splitter=None, decoder=lambda a: a): 51 | """Given a generator which yields strings and a splitter function, 52 | joins all input, splits on the separator and yields each chunk. 53 | Unlike string.split(), each chunk includes the trailing 54 | separator, except for the last one if none was found on the end 55 | of the input. 56 | """ 57 | splitter = splitter or line_splitter 58 | buffered = '' 59 | 60 | for data in stream_as_text(stream): 61 | buffered += data 62 | while True: 63 | buffer_split = splitter(buffered) 64 | if buffer_split is None: 65 | break 66 | 67 | item, buffered = buffer_split 68 | yield item 69 | 70 | if buffered: 71 | try: 72 | yield decoder(buffered) 73 | except Exception as e: 74 | raise StreamParseError(e) from e 75 | -------------------------------------------------------------------------------- /docker/utils/ports.py: -------------------------------------------------------------------------------- 1 | import re 2 | 3 | PORT_SPEC = re.compile( 4 | "^" # Match full string 5 | "(" # External part 6 | r"(\[?(?P[a-fA-F\d.:]+)\]?:)?" # Address 7 | r"(?P[\d]*)(-(?P[\d]+))?:" # External range 8 | ")?" 9 | r"(?P[\d]+)(-(?P[\d]+))?" # Internal range 10 | "(?P/(udp|tcp|sctp))?" # Protocol 11 | "$" # Match full string 12 | ) 13 | 14 | 15 | def add_port_mapping(port_bindings, internal_port, external): 16 | if internal_port in port_bindings: 17 | port_bindings[internal_port].append(external) 18 | else: 19 | port_bindings[internal_port] = [external] 20 | 21 | 22 | def add_port(port_bindings, internal_port_range, external_range): 23 | if external_range is None: 24 | for internal_port in internal_port_range: 25 | add_port_mapping(port_bindings, internal_port, None) 26 | else: 27 | ports = zip(internal_port_range, external_range) 28 | for internal_port, external_port in ports: 29 | add_port_mapping(port_bindings, internal_port, external_port) 30 | 31 | 32 | def build_port_bindings(ports): 33 | port_bindings = {} 34 | for port in ports: 35 | internal_port_range, external_range = split_port(port) 36 | add_port(port_bindings, internal_port_range, external_range) 37 | return port_bindings 38 | 39 | 40 | def _raise_invalid_port(port): 41 | raise ValueError('Invalid port "%s", should be ' 42 | '[[remote_ip:]remote_port[-remote_port]:]' 43 | 'port[/protocol]' % port) 44 | 45 | 46 | def port_range(start, end, proto, randomly_available_port=False): 47 | if not start: 48 | return start 49 | if not end: 50 | return [start + proto] 51 | if randomly_available_port: 52 | return [f"{start}-{end}{proto}"] 53 | return [str(port) + proto for port in range(int(start), int(end) + 1)] 54 | 55 | 56 | def split_port(port): 57 | if hasattr(port, 'legacy_repr'): 58 | # This is the worst hack, but it prevents a bug in Compose 1.14.0 59 | # https://github.com/docker/docker-py/issues/1668 60 | # TODO: remove once fixed in Compose stable 61 | port = port.legacy_repr() 62 | port = str(port) 63 | match = PORT_SPEC.match(port) 64 | if match is None: 65 | _raise_invalid_port(port) 66 | parts = match.groupdict() 67 | 68 | host = parts['host'] 69 | proto = parts['proto'] or '' 70 | internal = port_range(parts['int'], parts['int_end'], proto) 71 | external = port_range( 72 | parts['ext'], parts['ext_end'], '', len(internal) == 1) 73 | 74 | if host is None: 75 | if external is not None and len(internal) != len(external): 76 | raise ValueError('Port ranges don\'t match in length') 77 | return internal, external 78 | else: 79 | if not external: 80 | external = [None] * len(internal) 81 | elif len(internal) != len(external): 82 | raise ValueError('Port ranges don\'t match in length') 83 | return internal, [(host, ext_port) for ext_port in external] 84 | -------------------------------------------------------------------------------- /docker/utils/proxy.py: -------------------------------------------------------------------------------- 1 | from .utils import format_environment 2 | 3 | 4 | class ProxyConfig(dict): 5 | ''' 6 | Hold the client's proxy configuration 7 | ''' 8 | @property 9 | def http(self): 10 | return self.get('http') 11 | 12 | @property 13 | def https(self): 14 | return self.get('https') 15 | 16 | @property 17 | def ftp(self): 18 | return self.get('ftp') 19 | 20 | @property 21 | def no_proxy(self): 22 | return self.get('no_proxy') 23 | 24 | @staticmethod 25 | def from_dict(config): 26 | ''' 27 | Instantiate a new ProxyConfig from a dictionary that represents a 28 | client configuration, as described in `the documentation`_. 29 | 30 | .. _the documentation: 31 | https://docs.docker.com/network/proxy/#configure-the-docker-client 32 | ''' 33 | return ProxyConfig( 34 | http=config.get('httpProxy'), 35 | https=config.get('httpsProxy'), 36 | ftp=config.get('ftpProxy'), 37 | no_proxy=config.get('noProxy'), 38 | ) 39 | 40 | def get_environment(self): 41 | ''' 42 | Return a dictionary representing the environment variables used to 43 | set the proxy settings. 44 | ''' 45 | env = {} 46 | if self.http: 47 | env['http_proxy'] = env['HTTP_PROXY'] = self.http 48 | if self.https: 49 | env['https_proxy'] = env['HTTPS_PROXY'] = self.https 50 | if self.ftp: 51 | env['ftp_proxy'] = env['FTP_PROXY'] = self.ftp 52 | if self.no_proxy: 53 | env['no_proxy'] = env['NO_PROXY'] = self.no_proxy 54 | return env 55 | 56 | def inject_proxy_environment(self, environment): 57 | ''' 58 | Given a list of strings representing environment variables, prepend the 59 | environment variables corresponding to the proxy settings. 60 | ''' 61 | if not self: 62 | return environment 63 | 64 | proxy_env = format_environment(self.get_environment()) 65 | if not environment: 66 | return proxy_env 67 | # It is important to prepend our variables, because we want the 68 | # variables defined in "environment" to take precedence. 69 | return proxy_env + environment 70 | 71 | def __str__(self): 72 | return ( 73 | 'ProxyConfig(' 74 | f'http={self.http}, https={self.https}, ' 75 | f'ftp={self.ftp}, no_proxy={self.no_proxy}' 76 | ')' 77 | ) 78 | -------------------------------------------------------------------------------- /docker/version.py: -------------------------------------------------------------------------------- 1 | try: 2 | from ._version import __version__ 3 | except ImportError: 4 | from importlib.metadata import PackageNotFoundError, version 5 | try: 6 | __version__ = version('docker') 7 | except PackageNotFoundError: 8 | __version__ = '0.0.0' 9 | -------------------------------------------------------------------------------- /docs/_static/custom.css: -------------------------------------------------------------------------------- 1 | dl.hide-signature > dt { 2 | display: none; 3 | } 4 | 5 | dl.field-list > dt { 6 | /* prevent code blocks from forcing wrapping on the "Parameters" header */ 7 | word-break: initial; 8 | } 9 | 10 | code.literal{ 11 | hyphens: none; 12 | } 13 | -------------------------------------------------------------------------------- /docs/_templates/page.html: -------------------------------------------------------------------------------- 1 | {% extends "!page.html" %} 2 | {% set css_files = css_files + ["_static/custom.css"] %} 3 | -------------------------------------------------------------------------------- /docs/api.rst: -------------------------------------------------------------------------------- 1 | Low-level API 2 | ============= 3 | 4 | The main object-orientated API is built on top of :py:class:`APIClient`. Each method on :py:class:`APIClient` maps one-to-one with a REST API endpoint, and returns the response that the API responds with. 5 | 6 | It's possible to use :py:class:`APIClient` directly. Some basic things (e.g. running a container) consist of several API calls and are complex to do with the low-level API, but it's useful if you need extra flexibility and power. 7 | 8 | .. py:module:: docker.api 9 | 10 | .. autoclass:: docker.api.client.APIClient 11 | 12 | Configs 13 | ------- 14 | 15 | .. py:module:: docker.api.config 16 | 17 | .. rst-class:: hide-signature 18 | .. autoclass:: ConfigApiMixin 19 | :members: 20 | :undoc-members: 21 | 22 | Containers 23 | ---------- 24 | 25 | .. py:module:: docker.api.container 26 | 27 | .. rst-class:: hide-signature 28 | .. autoclass:: ContainerApiMixin 29 | :members: 30 | :undoc-members: 31 | 32 | Images 33 | ------ 34 | 35 | .. py:module:: docker.api.image 36 | 37 | .. rst-class:: hide-signature 38 | .. autoclass:: ImageApiMixin 39 | :members: 40 | :undoc-members: 41 | 42 | Building images 43 | --------------- 44 | 45 | .. py:module:: docker.api.build 46 | 47 | .. rst-class:: hide-signature 48 | .. autoclass:: BuildApiMixin 49 | :members: 50 | :undoc-members: 51 | 52 | Networks 53 | -------- 54 | 55 | .. rst-class:: hide-signature 56 | .. autoclass:: docker.api.network.NetworkApiMixin 57 | :members: 58 | :undoc-members: 59 | 60 | Volumes 61 | ------- 62 | 63 | .. py:module:: docker.api.volume 64 | 65 | .. rst-class:: hide-signature 66 | .. autoclass:: VolumeApiMixin 67 | :members: 68 | :undoc-members: 69 | 70 | Executing commands in containers 71 | -------------------------------- 72 | 73 | .. py:module:: docker.api.exec_api 74 | 75 | .. rst-class:: hide-signature 76 | .. autoclass:: ExecApiMixin 77 | :members: 78 | :undoc-members: 79 | 80 | Swarms 81 | ------ 82 | 83 | .. py:module:: docker.api.swarm 84 | 85 | .. rst-class:: hide-signature 86 | .. autoclass:: SwarmApiMixin 87 | :members: 88 | :undoc-members: 89 | 90 | Services 91 | -------- 92 | 93 | .. py:module:: docker.api.service 94 | 95 | .. rst-class:: hide-signature 96 | .. autoclass:: ServiceApiMixin 97 | :members: 98 | :undoc-members: 99 | 100 | Plugins 101 | ------- 102 | 103 | .. py:module:: docker.api.plugin 104 | 105 | .. rst-class:: hide-signature 106 | .. autoclass:: PluginApiMixin 107 | :members: 108 | :undoc-members: 109 | 110 | Secrets 111 | ------- 112 | 113 | .. py:module:: docker.api.secret 114 | 115 | .. rst-class:: hide-signature 116 | .. autoclass:: SecretApiMixin 117 | :members: 118 | :undoc-members: 119 | 120 | The Docker daemon 121 | ----------------- 122 | 123 | .. py:module:: docker.api.daemon 124 | 125 | .. rst-class:: hide-signature 126 | .. autoclass:: DaemonApiMixin 127 | :members: 128 | :undoc-members: 129 | 130 | Configuration types 131 | ------------------- 132 | 133 | .. py:module:: docker.types 134 | 135 | .. autoclass:: ConfigReference 136 | .. autoclass:: ContainerSpec 137 | .. autoclass:: DNSConfig 138 | .. autoclass:: DriverConfig 139 | .. autoclass:: EndpointSpec 140 | .. autoclass:: Healthcheck 141 | .. autoclass:: IPAMConfig 142 | .. autoclass:: IPAMPool 143 | .. autoclass:: LogConfig 144 | .. autoclass:: Mount 145 | .. autoclass:: NetworkAttachmentConfig 146 | .. autoclass:: Placement 147 | .. autoclass:: PlacementPreference 148 | .. autoclass:: Privileges 149 | .. autoclass:: Resources 150 | .. autoclass:: RestartPolicy 151 | .. autoclass:: RollbackConfig 152 | .. autoclass:: SecretReference 153 | .. autoclass:: ServiceMode 154 | .. autoclass:: SwarmExternalCA 155 | .. autoclass:: SwarmSpec(*args, **kwargs) 156 | .. autoclass:: TaskTemplate 157 | .. autoclass:: Ulimit 158 | .. autoclass:: UpdateConfig 159 | -------------------------------------------------------------------------------- /docs/client.rst: -------------------------------------------------------------------------------- 1 | Client 2 | ====== 3 | .. py:module:: docker.client 4 | 5 | 6 | Creating a client 7 | ----------------- 8 | 9 | To communicate with the Docker daemon, you first need to instantiate a client. The easiest way to do that is by calling the function :py:func:`~docker.client.from_env`. It can also be configured manually by instantiating a :py:class:`~docker.client.DockerClient` class. 10 | 11 | .. autofunction:: from_env() 12 | 13 | Client reference 14 | ---------------- 15 | 16 | .. autoclass:: DockerClient() 17 | 18 | .. autoattribute:: configs 19 | .. autoattribute:: containers 20 | .. autoattribute:: images 21 | .. autoattribute:: networks 22 | .. autoattribute:: nodes 23 | .. autoattribute:: plugins 24 | .. autoattribute:: secrets 25 | .. autoattribute:: services 26 | .. autoattribute:: swarm 27 | .. autoattribute:: volumes 28 | 29 | .. automethod:: close() 30 | .. automethod:: df() 31 | .. automethod:: events() 32 | .. automethod:: info() 33 | .. automethod:: login() 34 | .. automethod:: ping() 35 | .. automethod:: version() 36 | -------------------------------------------------------------------------------- /docs/configs.rst: -------------------------------------------------------------------------------- 1 | Configs 2 | ======= 3 | 4 | .. py:module:: docker.models.configs 5 | 6 | Manage configs on the server. 7 | 8 | Methods available on ``client.configs``: 9 | 10 | .. rst-class:: hide-signature 11 | .. py:class:: ConfigCollection 12 | 13 | .. automethod:: create 14 | .. automethod:: get 15 | .. automethod:: list 16 | 17 | 18 | Config objects 19 | -------------- 20 | 21 | .. autoclass:: Config() 22 | 23 | .. autoattribute:: id 24 | .. autoattribute:: name 25 | .. py:attribute:: attrs 26 | 27 | The raw representation of this object from the server. 28 | 29 | .. automethod:: reload 30 | .. automethod:: remove 31 | -------------------------------------------------------------------------------- /docs/containers.rst: -------------------------------------------------------------------------------- 1 | Containers 2 | ========== 3 | 4 | .. py:module:: docker.models.containers 5 | 6 | Run and manage containers on the server. 7 | 8 | Methods available on ``client.containers``: 9 | 10 | .. rst-class:: hide-signature 11 | .. autoclass:: ContainerCollection 12 | 13 | .. automethod:: run(image, command=None, **kwargs) 14 | .. automethod:: create(image, command=None, **kwargs) 15 | .. automethod:: get(id_or_name) 16 | .. automethod:: list(**kwargs) 17 | .. automethod:: prune 18 | 19 | Container objects 20 | ----------------- 21 | 22 | .. autoclass:: Container() 23 | 24 | .. py:attribute:: attrs 25 | .. autoattribute:: id 26 | .. autoattribute:: image 27 | .. autoattribute:: labels 28 | .. autoattribute:: name 29 | .. autoattribute:: short_id 30 | .. autoattribute:: status 31 | 32 | The raw representation of this object from the server. 33 | 34 | .. automethod:: attach 35 | .. automethod:: attach_socket 36 | .. automethod:: commit 37 | .. automethod:: diff 38 | .. automethod:: exec_run 39 | .. automethod:: export 40 | .. automethod:: get_archive 41 | .. automethod:: kill 42 | .. automethod:: logs 43 | .. automethod:: pause 44 | .. automethod:: put_archive 45 | .. automethod:: reload 46 | .. automethod:: remove 47 | .. automethod:: rename 48 | .. automethod:: resize 49 | .. automethod:: restart 50 | .. automethod:: start 51 | .. automethod:: stats 52 | .. automethod:: stop 53 | .. automethod:: top 54 | .. automethod:: unpause 55 | .. automethod:: update 56 | .. automethod:: wait 57 | -------------------------------------------------------------------------------- /docs/favicon_whale.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/docker/docker-py/526a9db743a80174aabb9b43dbb8b697b98ef497/docs/favicon_whale.png -------------------------------------------------------------------------------- /docs/images.rst: -------------------------------------------------------------------------------- 1 | Images 2 | ====== 3 | 4 | .. py:module:: docker.models.images 5 | 6 | Manage images on the server. 7 | 8 | Methods available on ``client.images``: 9 | 10 | .. rst-class:: hide-signature 11 | .. py:class:: ImageCollection 12 | 13 | .. automethod:: build 14 | .. automethod:: get 15 | .. automethod:: get_registry_data 16 | .. automethod:: list(**kwargs) 17 | .. automethod:: load 18 | .. automethod:: prune 19 | .. automethod:: pull 20 | .. automethod:: push 21 | .. automethod:: remove 22 | .. automethod:: search 23 | 24 | 25 | Image objects 26 | ------------- 27 | 28 | .. autoclass:: Image() 29 | 30 | .. py:attribute:: attrs 31 | 32 | The raw representation of this object from the server. 33 | 34 | .. autoattribute:: id 35 | .. autoattribute:: labels 36 | .. autoattribute:: short_id 37 | .. autoattribute:: tags 38 | 39 | 40 | 41 | .. automethod:: history 42 | .. automethod:: reload 43 | .. automethod:: save 44 | .. automethod:: tag 45 | 46 | RegistryData objects 47 | -------------------- 48 | 49 | .. autoclass:: RegistryData() 50 | 51 | .. py:attribute:: attrs 52 | 53 | The raw representation of this object from the server. 54 | 55 | .. autoattribute:: id 56 | .. autoattribute:: short_id 57 | 58 | 59 | 60 | .. automethod:: has_platform 61 | .. automethod:: pull 62 | .. automethod:: reload 63 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | Docker SDK for Python 2 | ===================== 3 | 4 | A Python library for the Docker Engine API. It lets you do anything the ``docker`` command does, but from within Python apps – run containers, manage containers, manage Swarms, etc. 5 | 6 | For more information about the Engine API, `see its documentation `_. 7 | 8 | Installation 9 | ------------ 10 | 11 | The latest stable version `is available on PyPI `_. Either add ``docker`` to your ``requirements.txt`` file or install with pip:: 12 | 13 | pip install docker 14 | 15 | Getting started 16 | --------------- 17 | 18 | To talk to a Docker daemon, you first need to instantiate a client. You can use :py:func:`~docker.client.from_env` to connect using the default socket or the configuration in your environment: 19 | 20 | .. code-block:: python 21 | 22 | import docker 23 | client = docker.from_env() 24 | 25 | You can now run containers: 26 | 27 | .. code-block:: python 28 | 29 | >>> client.containers.run("ubuntu", "echo hello world") 30 | 'hello world\n' 31 | 32 | You can run containers in the background: 33 | 34 | .. code-block:: python 35 | 36 | >>> client.containers.run("bfirsh/reticulate-splines", detach=True) 37 | 38 | 39 | You can manage containers: 40 | 41 | .. code-block:: python 42 | 43 | >>> client.containers.list() 44 | [, , ...] 45 | 46 | >>> container = client.containers.get('45e6d2de7c54') 47 | 48 | >>> container.attrs['Config']['Image'] 49 | "bfirsh/reticulate-splines" 50 | 51 | >>> container.logs() 52 | "Reticulating spline 1...\n" 53 | 54 | >>> container.stop() 55 | 56 | You can stream logs: 57 | 58 | .. code-block:: python 59 | 60 | >>> for line in container.logs(stream=True): 61 | ... print(line.strip()) 62 | Reticulating spline 2... 63 | Reticulating spline 3... 64 | ... 65 | 66 | You can manage images: 67 | 68 | .. code-block:: python 69 | 70 | >>> client.images.pull('nginx') 71 | 72 | 73 | >>> client.images.list() 74 | [, , ...] 75 | 76 | That's just a taste of what you can do with the Docker SDK for Python. For more, :doc:`take a look at the reference `. 77 | 78 | .. toctree:: 79 | :hidden: 80 | :maxdepth: 2 81 | 82 | client 83 | configs 84 | containers 85 | images 86 | networks 87 | nodes 88 | plugins 89 | secrets 90 | services 91 | swarm 92 | volumes 93 | api 94 | tls 95 | user_guides/index 96 | change-log 97 | -------------------------------------------------------------------------------- /docs/networks.rst: -------------------------------------------------------------------------------- 1 | Networks 2 | ======== 3 | 4 | .. py:module:: docker.models.networks 5 | 6 | Create and manage networks on the server. For more information about networks, `see the Engine documentation `_. 7 | 8 | Methods available on ``client.networks``: 9 | 10 | .. rst-class:: hide-signature 11 | .. py:class:: NetworkCollection 12 | 13 | .. automethod:: create 14 | .. automethod:: get 15 | .. automethod:: list 16 | .. automethod:: prune 17 | 18 | Network objects 19 | ----------------- 20 | 21 | .. autoclass:: Network() 22 | 23 | .. autoattribute:: id 24 | .. autoattribute:: short_id 25 | .. autoattribute:: name 26 | .. autoattribute:: containers 27 | .. py:attribute:: attrs 28 | 29 | The raw representation of this object from the server. 30 | 31 | .. automethod:: connect 32 | .. automethod:: disconnect 33 | .. automethod:: reload 34 | .. automethod:: remove 35 | -------------------------------------------------------------------------------- /docs/nodes.rst: -------------------------------------------------------------------------------- 1 | Nodes 2 | ===== 3 | 4 | .. py:module:: docker.models.nodes 5 | 6 | Get and list nodes in a swarm. Before you can use these methods, you first need to :doc:`join or initialize a swarm `. 7 | 8 | Methods available on ``client.nodes``: 9 | 10 | .. rst-class:: hide-signature 11 | .. py:class:: NodeCollection 12 | 13 | .. automethod:: get(id_or_name) 14 | .. automethod:: list(**kwargs) 15 | 16 | Node objects 17 | ------------ 18 | 19 | .. autoclass:: Node() 20 | 21 | .. autoattribute:: id 22 | .. autoattribute:: short_id 23 | .. py:attribute:: attrs 24 | 25 | The raw representation of this object from the server. 26 | 27 | .. autoattribute:: version 28 | 29 | .. automethod:: reload 30 | .. automethod:: update 31 | -------------------------------------------------------------------------------- /docs/plugins.rst: -------------------------------------------------------------------------------- 1 | Plugins 2 | ======= 3 | 4 | .. py:module:: docker.models.plugins 5 | 6 | Manage plugins on the server. 7 | 8 | Methods available on ``client.plugins``: 9 | 10 | .. rst-class:: hide-signature 11 | .. py:class:: PluginCollection 12 | 13 | .. automethod:: get 14 | .. automethod:: install 15 | .. automethod:: list 16 | 17 | 18 | Plugin objects 19 | -------------- 20 | 21 | .. autoclass:: Plugin() 22 | 23 | .. autoattribute:: id 24 | .. autoattribute:: short_id 25 | .. autoattribute:: name 26 | .. autoattribute:: enabled 27 | .. autoattribute:: settings 28 | .. py:attribute:: attrs 29 | 30 | The raw representation of this object from the server. 31 | 32 | .. automethod:: configure 33 | .. automethod:: disable 34 | .. automethod:: enable 35 | .. automethod:: reload 36 | .. automethod:: push 37 | .. automethod:: remove 38 | .. automethod:: upgrade 39 | -------------------------------------------------------------------------------- /docs/secrets.rst: -------------------------------------------------------------------------------- 1 | Secrets 2 | ======= 3 | 4 | .. py:module:: docker.models.secrets 5 | 6 | Manage secrets on the server. 7 | 8 | Methods available on ``client.secrets``: 9 | 10 | .. rst-class:: hide-signature 11 | .. py:class:: SecretCollection 12 | 13 | .. automethod:: create 14 | .. automethod:: get 15 | .. automethod:: list 16 | 17 | 18 | Secret objects 19 | -------------- 20 | 21 | .. autoclass:: Secret() 22 | 23 | .. autoattribute:: id 24 | .. autoattribute:: name 25 | .. py:attribute:: attrs 26 | 27 | The raw representation of this object from the server. 28 | 29 | .. automethod:: reload 30 | .. automethod:: remove 31 | -------------------------------------------------------------------------------- /docs/services.rst: -------------------------------------------------------------------------------- 1 | Services 2 | ======== 3 | 4 | .. py:module:: docker.models.services 5 | 6 | Manage services on a swarm. For more information about services, `see the Engine documentation `_. 7 | 8 | Before you can use any of these methods, you first need to :doc:`join or initialize a swarm `. 9 | 10 | Methods available on ``client.services``: 11 | 12 | .. rst-class:: hide-signature 13 | .. py:class:: ServiceCollection 14 | 15 | .. automethod:: create 16 | .. automethod:: get 17 | .. automethod:: list 18 | 19 | Service objects 20 | --------------- 21 | 22 | .. autoclass:: Service() 23 | 24 | .. autoattribute:: id 25 | .. autoattribute:: short_id 26 | .. autoattribute:: name 27 | .. autoattribute:: version 28 | .. py:attribute:: attrs 29 | 30 | The raw representation of this object from the server. 31 | 32 | 33 | .. automethod:: force_update 34 | .. automethod:: logs 35 | .. automethod:: reload 36 | .. automethod:: remove 37 | .. automethod:: scale 38 | .. automethod:: tasks 39 | .. automethod:: update 40 | -------------------------------------------------------------------------------- /docs/swarm.rst: -------------------------------------------------------------------------------- 1 | Swarm 2 | ===== 3 | 4 | .. py:module:: docker.models.swarm 5 | 6 | Manage `Docker Engine's swarm mode `_. 7 | 8 | To use any swarm methods, you first need to make the Engine part of a swarm. This can be done by either initializing a new swarm with :py:meth:`~Swarm.init`, or joining an existing swarm with :py:meth:`~Swarm.join`. 9 | 10 | These methods are available on ``client.swarm``: 11 | 12 | .. rst-class:: hide-signature 13 | .. py:class:: Swarm 14 | 15 | .. automethod:: get_unlock_key() 16 | .. automethod:: init() 17 | .. automethod:: join() 18 | .. automethod:: leave() 19 | .. automethod:: unlock() 20 | .. automethod:: update() 21 | .. automethod:: reload() 22 | 23 | .. autoattribute:: version 24 | .. py:attribute:: attrs 25 | 26 | The raw representation of this object from the server. 27 | -------------------------------------------------------------------------------- /docs/tls.rst: -------------------------------------------------------------------------------- 1 | Using TLS 2 | ========= 3 | 4 | .. py:module:: docker.tls 5 | 6 | Both the main :py:class:`~docker.client.DockerClient` and low-level 7 | :py:class:`~docker.api.client.APIClient` can connect to the Docker daemon with TLS. 8 | 9 | This is all configured automatically for you if you're using :py:func:`~docker.client.from_env`, but if you need some extra control it is possible to configure it manually by using a :py:class:`TLSConfig` object. 10 | 11 | Examples 12 | -------- 13 | 14 | For example, to check the server against a specific CA certificate: 15 | 16 | .. code-block:: python 17 | 18 | tls_config = docker.tls.TLSConfig(ca_cert='/path/to/ca.pem', verify=True) 19 | client = docker.DockerClient(base_url='', tls=tls_config) 20 | 21 | This is the equivalent of ``docker --tlsverify --tlscacert /path/to/ca.pem ...``. 22 | 23 | To authenticate with client certs: 24 | 25 | .. code-block:: python 26 | 27 | tls_config = docker.tls.TLSConfig( 28 | client_cert=('/path/to/client-cert.pem', '/path/to/client-key.pem') 29 | ) 30 | client = docker.DockerClient(base_url='', tls=tls_config) 31 | 32 | This is the equivalent of ``docker --tls --tlscert /path/to/client-cert.pem --tlskey /path/to/client-key.pem ...``. 33 | 34 | Reference 35 | --------- 36 | 37 | .. autoclass:: TLSConfig() 38 | -------------------------------------------------------------------------------- /docs/user_guides/index.rst: -------------------------------------------------------------------------------- 1 | User guides and tutorials 2 | ========================= 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | 7 | multiplex 8 | swarm_services -------------------------------------------------------------------------------- /docs/user_guides/multiplex.rst: -------------------------------------------------------------------------------- 1 | Handling multiplexed streams 2 | ============================ 3 | 4 | .. note:: 5 | The following instruction assume you're interested in getting output from 6 | an ``exec`` command. These instruction are similarly applicable to the 7 | output of ``attach``. 8 | 9 | First create a container that runs in the background: 10 | 11 | >>> client = docker.from_env() 12 | >>> container = client.containers.run( 13 | ... 'bfirsh/reticulate-splines', detach=True) 14 | 15 | Prepare the command we are going to use. It prints "hello stdout" 16 | in `stdout`, followed by "hello stderr" in `stderr`: 17 | 18 | >>> cmd = '/bin/sh -c "echo hello stdout ; echo hello stderr >&2"' 19 | 20 | We'll run this command with all four the combinations of ``stream`` 21 | and ``demux``. 22 | 23 | With ``stream=False`` and ``demux=False``, the output is a string 24 | that contains both the `stdout` and the `stderr` output: 25 | 26 | >>> res = container.exec_run(cmd, stream=False, demux=False) 27 | >>> res.output 28 | b'hello stderr\nhello stdout\n' 29 | 30 | With ``stream=True``, and ``demux=False``, the output is a 31 | generator that yields strings containing the output of both 32 | `stdout` and `stderr`: 33 | 34 | >>> res = container.exec_run(cmd, stream=True, demux=False) 35 | >>> next(res.output) 36 | b'hello stdout\n' 37 | >>> next(res.output) 38 | b'hello stderr\n' 39 | >>> next(res.output) 40 | Traceback (most recent call last): 41 | File "", line 1, in 42 | StopIteration 43 | 44 | With ``stream=True`` and ``demux=True``, the generator now 45 | separates the streams, and yield tuples 46 | ``(stdout, stderr)``: 47 | 48 | >>> res = container.exec_run(cmd, stream=True, demux=True) 49 | >>> next(res.output) 50 | (b'hello stdout\n', None) 51 | >>> next(res.output) 52 | (None, b'hello stderr\n') 53 | >>> next(res.output) 54 | Traceback (most recent call last): 55 | File "", line 1, in 56 | StopIteration 57 | 58 | Finally, with ``stream=False`` and ``demux=True``, the output is a tuple ``(stdout, stderr)``: 59 | 60 | >>> res = container.exec_run(cmd, stream=False, demux=True) 61 | >>> res.output 62 | (b'hello stdout\n', b'hello stderr\n') -------------------------------------------------------------------------------- /docs/user_guides/swarm_services.md: -------------------------------------------------------------------------------- 1 | # Swarm services 2 | 3 | > Warning: 4 | > This is a stale document and may contain outdated information. 5 | > Refer to the API docs for updated classes and method signatures. 6 | 7 | Starting with Engine version 1.12 (API 1.24), it is possible to manage services 8 | using the Docker Engine API. Note that the engine needs to be part of a 9 | [Swarm cluster](../swarm.html) before you can use the service-related methods. 10 | 11 | ## Creating a service 12 | 13 | The `APIClient.create_service` method lets you create a new service inside the 14 | cluster. The method takes several arguments, `task_template` being mandatory. 15 | This dictionary of values is most easily produced by instantiating a 16 | `TaskTemplate` object. 17 | 18 | ```python 19 | container_spec = docker.types.ContainerSpec( 20 | image='busybox', command=['echo', 'hello'] 21 | ) 22 | task_tmpl = docker.types.TaskTemplate(container_spec) 23 | service_id = client.create_service(task_tmpl, name=name) 24 | ``` 25 | 26 | ## Listing services 27 | 28 | List all existing services using the `APIClient.services` method. 29 | 30 | ```python 31 | client.services(filters={'name': 'mysql'}) 32 | ``` 33 | 34 | ## Retrieving service configuration 35 | 36 | To retrieve detailed information and configuration for a specific service, you 37 | may use the `APIClient.inspect_service` method using the service's ID or name. 38 | 39 | ```python 40 | client.inspect_service(service='my_service_name') 41 | ``` 42 | 43 | ## Updating service configuration 44 | 45 | The `APIClient.update_service` method lets you update a service's configuration. 46 | The mandatory `version` argument (used to prevent concurrent writes) can be 47 | retrieved using `APIClient.inspect_service`. 48 | 49 | ```python 50 | container_spec = docker.types.ContainerSpec( 51 | image='busybox', command=['echo', 'hello world'] 52 | ) 53 | task_tmpl = docker.types.TaskTemplate(container_spec) 54 | 55 | svc_version = client.inspect_service(svc_id)['Version']['Index'] 56 | 57 | client.update_service( 58 | svc_id, svc_version, name='new_name', task_template=task_tmpl 59 | ) 60 | ``` 61 | 62 | ## Removing a service 63 | 64 | A service may be removed simply using the `APIClient.remove_service` method. 65 | Either the service name or service ID can be used as argument. 66 | 67 | ```python 68 | client.remove_service('my_service_name') 69 | ``` 70 | -------------------------------------------------------------------------------- /docs/volumes.rst: -------------------------------------------------------------------------------- 1 | Volumes 2 | ======= 3 | 4 | .. py:module:: docker.models.volumes 5 | 6 | Manage volumes on the server. 7 | 8 | Methods available on ``client.volumes``: 9 | 10 | .. rst-class:: hide-signature 11 | .. py:class:: VolumeCollection 12 | 13 | .. automethod:: create 14 | .. automethod:: get 15 | .. automethod:: list 16 | .. automethod:: prune 17 | 18 | Volume objects 19 | -------------- 20 | 21 | .. autoclass:: Volume() 22 | 23 | .. autoattribute:: id 24 | .. autoattribute:: short_id 25 | .. autoattribute:: name 26 | .. py:attribute:: attrs 27 | 28 | The raw representation of this object from the server. 29 | 30 | 31 | .. automethod:: reload 32 | .. automethod:: remove 33 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["hatchling", "hatch-vcs"] 3 | build-backend = "hatchling.build" 4 | 5 | [project] 6 | name = "docker" 7 | dynamic = ["version"] 8 | description = "A Python library for the Docker Engine API." 9 | readme = "README.md" 10 | license = "Apache-2.0" 11 | requires-python = ">=3.8" 12 | maintainers = [ 13 | { name = "Docker Inc.", email = "no-reply@docker.com" }, 14 | ] 15 | classifiers = [ 16 | "Development Status :: 5 - Production/Stable", 17 | "Environment :: Other Environment", 18 | "Intended Audience :: Developers", 19 | "License :: OSI Approved :: Apache Software License", 20 | "Operating System :: OS Independent", 21 | "Programming Language :: Python", 22 | "Programming Language :: Python :: 3", 23 | "Programming Language :: Python :: 3.8", 24 | "Programming Language :: Python :: 3.9", 25 | "Programming Language :: Python :: 3.10", 26 | "Programming Language :: Python :: 3.11", 27 | "Programming Language :: Python :: 3.12", 28 | "Topic :: Software Development", 29 | "Topic :: Utilities", 30 | ] 31 | 32 | dependencies = [ 33 | "requests >= 2.26.0", 34 | "urllib3 >= 1.26.0", 35 | "pywin32>=304; sys_platform == \"win32\"", 36 | ] 37 | 38 | [project.optional-dependencies] 39 | # ssh feature allows DOCKER_HOST=ssh://... style connections 40 | ssh = [ 41 | "paramiko>=2.4.3", 42 | ] 43 | # tls is always supported, the feature is a no-op for backwards compatibility 44 | tls = [] 45 | # websockets can be used as an alternate container attach mechanism but 46 | # by default docker-py hijacks the TCP connection and does not use Websockets 47 | # unless attach_socket(container, ws=True) is called 48 | websockets = [ 49 | "websocket-client >= 1.3.0", 50 | ] 51 | # docs are dependencies required to build the ReadTheDocs site 52 | # this is only needed for CI / working on the docs! 53 | docs = [ 54 | "myst-parser==0.18.0", 55 | "Sphinx==5.1.1", 56 | 57 | ] 58 | # dev are dependencies required to test & lint this project 59 | # this is only needed if you are making code changes to docker-py! 60 | dev = [ 61 | "coverage==7.2.7", 62 | "pytest==7.4.2", 63 | "pytest-cov==4.1.0", 64 | "pytest-timeout==2.1.0", 65 | "ruff==0.1.8", 66 | ] 67 | 68 | [project.urls] 69 | Changelog = "https://docker-py.readthedocs.io/en/stable/change-log.html" 70 | Documentation = "https://docker-py.readthedocs.io" 71 | Homepage = "https://github.com/docker/docker-py" 72 | Source = "https://github.com/docker/docker-py" 73 | Tracker = "https://github.com/docker/docker-py/issues" 74 | 75 | [tool.hatch.version] 76 | source = "vcs" 77 | 78 | [tool.hatch.build.hooks.vcs] 79 | version-file = "docker/_version.py" 80 | 81 | [tool.hatch.build.targets.sdist] 82 | include = [ 83 | "/docker", 84 | ] 85 | 86 | [tool.ruff] 87 | target-version = "py38" 88 | extend-select = [ 89 | "B", 90 | "C", 91 | "F", 92 | "I", 93 | "UP", 94 | "W", 95 | ] 96 | ignore = [ 97 | "UP012", # unnecessary `UTF-8` argument (we want to be explicit) 98 | "C901", # too complex (there's a whole bunch of these) 99 | ] 100 | 101 | [tool.ruff.per-file-ignores] 102 | "**/__init__.py" = ["F401"] 103 | -------------------------------------------------------------------------------- /pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts = --tb=short -rxs 3 | 4 | junit_suite_name = docker-py 5 | junit_family = xunit2 6 | -------------------------------------------------------------------------------- /scripts/release.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Create the official release 4 | # 5 | 6 | VERSION=$1 7 | REPO=docker/docker-py 8 | GITHUB_REPO=git@github.com:$REPO 9 | 10 | if [ -z $VERSION ]; then 11 | echo "Usage: $0 VERSION [upload]" 12 | exit 1 13 | fi 14 | 15 | echo "##> Removing stale build files and other untracked files" 16 | git clean -x -d -i 17 | test -z "$(git clean -x -d -n)" || exit 1 18 | 19 | echo "##> Tagging the release as $VERSION" 20 | git tag $VERSION 21 | if [[ $? != 0 ]]; then 22 | head_commit=$(git show --pretty=format:%H HEAD) 23 | tag_commit=$(git show --pretty=format:%H $VERSION) 24 | if [[ $head_commit != $tag_commit ]]; then 25 | echo "ERROR: tag already exists, but isn't the current HEAD" 26 | exit 1 27 | fi 28 | fi 29 | if [[ $2 == 'upload' ]]; then 30 | echo "##> Pushing tag to github" 31 | git push $GITHUB_REPO $VERSION || exit 1 32 | fi 33 | 34 | 35 | echo "##> sdist & wheel" 36 | python setup.py sdist bdist_wheel 37 | 38 | if [[ $2 == 'upload' ]]; then 39 | echo '##> Uploading sdist to pypi' 40 | twine upload dist/docker-$VERSION* 41 | fi 42 | -------------------------------------------------------------------------------- /scripts/versions.py: -------------------------------------------------------------------------------- 1 | import operator 2 | import re 3 | from collections import namedtuple 4 | 5 | import requests 6 | 7 | base_url = 'https://download.docker.com/linux/static/{0}/x86_64/' 8 | categories = [ 9 | 'edge', 10 | 'stable', 11 | 'test' 12 | ] 13 | 14 | STAGES = ['tp', 'beta', 'rc'] 15 | 16 | 17 | class Version(namedtuple('_Version', 'major minor patch stage edition')): 18 | 19 | @classmethod 20 | def parse(cls, version): 21 | edition = None 22 | version = version.lstrip('v') 23 | version, _, stage = version.partition('-') 24 | if stage: 25 | if not any(marker in stage for marker in STAGES): 26 | edition = stage 27 | stage = None 28 | elif '-' in stage: 29 | edition, stage = stage.split('-', 1) 30 | major, minor, patch = version.split('.', 2) 31 | return cls(major, minor, patch, stage, edition) 32 | 33 | @property 34 | def major_minor(self): 35 | return self.major, self.minor 36 | 37 | @property 38 | def order(self): 39 | """Return a representation that allows this object to be sorted 40 | correctly with the default comparator. 41 | """ 42 | # non-GA releases should appear before GA releases 43 | # Order: tp -> beta -> rc -> GA 44 | if self.stage: 45 | for st in STAGES: 46 | if st in self.stage: 47 | stage = (STAGES.index(st), self.stage) 48 | break 49 | else: 50 | stage = (len(STAGES),) 51 | 52 | return (int(self.major), int(self.minor), int(self.patch)) + stage 53 | 54 | def __str__(self): 55 | stage = f'-{self.stage}' if self.stage else '' 56 | edition = f'-{self.edition}' if self.edition else '' 57 | return '.'.join(map(str, self[:3])) + edition + stage 58 | 59 | 60 | def main(): 61 | results = set() 62 | for url in [base_url.format(cat) for cat in categories]: 63 | res = requests.get(url) 64 | content = res.text 65 | versions = [Version.parse(v) for v in re.findall( 66 | r'"docker-([0-9]+\.[0-9]+\.[0-9]+-?.*)\.tgz"', content 67 | )] 68 | sorted_versions = sorted( 69 | versions, reverse=True, key=operator.attrgetter('order') 70 | ) 71 | latest = sorted_versions[0] 72 | results.add(str(latest)) 73 | print(' '.join(results)) 74 | 75 | if __name__ == '__main__': 76 | main() 77 | -------------------------------------------------------------------------------- /tests/Dockerfile: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | ARG PYTHON_VERSION=3.12 4 | FROM python:${PYTHON_VERSION} 5 | 6 | RUN apt-get update && apt-get -y install --no-install-recommends \ 7 | gnupg2 \ 8 | pass 9 | 10 | # Add SSH keys and set permissions 11 | COPY tests/ssh/config/client /root/.ssh 12 | COPY tests/ssh/config/server/known_ed25519.pub /root/.ssh/known_hosts 13 | RUN sed -i '1s;^;dpy-dind-ssh ;' /root/.ssh/known_hosts 14 | RUN chmod -R 600 /root/.ssh 15 | 16 | COPY ./tests/gpg-keys /gpg-keys 17 | RUN gpg2 --import gpg-keys/secret 18 | RUN gpg2 --import-ownertrust gpg-keys/ownertrust 19 | RUN yes | pass init $(gpg2 --no-auto-check-trustdb --list-secret-key | awk '/^sec/{getline; $1=$1; print}') 20 | RUN gpg2 --check-trustdb 21 | ARG CREDSTORE_VERSION=v0.6.3 22 | RUN curl -sSL -o /opt/docker-credential-pass.tar.gz \ 23 | https://github.com/docker/docker-credential-helpers/releases/download/$CREDSTORE_VERSION/docker-credential-pass-$CREDSTORE_VERSION-amd64.tar.gz && \ 24 | tar -xf /opt/docker-credential-pass.tar.gz -O > /usr/local/bin/docker-credential-pass && \ 25 | rm -rf /opt/docker-credential-pass.tar.gz && \ 26 | chmod +x /usr/local/bin/docker-credential-pass 27 | 28 | WORKDIR /src 29 | COPY . . 30 | 31 | ARG VERSION=0.0.0.dev0 32 | RUN --mount=type=cache,target=/cache/pip \ 33 | PIP_CACHE_DIR=/cache/pip \ 34 | SETUPTOOLS_SCM_PRETEND_VERSION=${VERSION} \ 35 | pip install .[dev,ssh,websockets] 36 | -------------------------------------------------------------------------------- /tests/Dockerfile-dind-certs: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | ARG PYTHON_VERSION=3.12 4 | 5 | FROM python:${PYTHON_VERSION} 6 | RUN mkdir /tmp/certs 7 | VOLUME /certs 8 | 9 | WORKDIR /tmp/certs 10 | RUN openssl genrsa -aes256 -passout pass:foobar -out ca-key.pem 4096 11 | RUN echo "[req]\nprompt=no\ndistinguished_name = req_distinguished_name\n[req_distinguished_name]\ncountryName=AU" > /tmp/config 12 | RUN openssl req -new -x509 -passin pass:foobar -config /tmp/config -days 365 -key ca-key.pem -sha256 -out ca.pem 13 | RUN openssl genrsa -out server-key.pem -passout pass:foobar 4096 14 | RUN openssl req -subj "/CN=docker" -sha256 -new -key server-key.pem -out server.csr 15 | RUN echo subjectAltName = DNS:docker,DNS:localhost > extfile.cnf 16 | RUN openssl x509 -req -days 365 -passin pass:foobar -sha256 -in server.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out server-cert.pem -extfile extfile.cnf 17 | RUN openssl genrsa -out key.pem 4096 18 | RUN openssl req -passin pass:foobar -subj '/CN=client' -new -key key.pem -out client.csr 19 | RUN echo extendedKeyUsage = clientAuth > extfile.cnf 20 | RUN openssl x509 -req -passin pass:foobar -days 365 -sha256 -in client.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out cert.pem -extfile extfile.cnf 21 | RUN chmod -v 0400 ca-key.pem key.pem server-key.pem 22 | RUN chmod -v 0444 ca.pem server-cert.pem cert.pem 23 | 24 | CMD cp -R /tmp/certs/* /certs && while true; do sleep 1; done 25 | -------------------------------------------------------------------------------- /tests/Dockerfile-ssh-dind: -------------------------------------------------------------------------------- 1 | # syntax=docker/dockerfile:1 2 | 3 | ARG API_VERSION=1.45 4 | ARG ENGINE_VERSION=26.1 5 | 6 | FROM docker:${ENGINE_VERSION}-dind 7 | 8 | RUN apk add --no-cache --upgrade \ 9 | openssh 10 | 11 | COPY tests/ssh/config/server /etc/ssh/ 12 | 13 | # set authorized keys for client paswordless connection 14 | COPY tests/ssh/config/client/id_rsa.pub /root/.ssh/authorized_keys 15 | 16 | # RUN echo "root:root" | chpasswd 17 | RUN chmod -R 600 /etc/ssh \ 18 | && chmod -R 600 /root/.ssh \ 19 | && ln -s /usr/local/bin/docker /usr/bin/docker 20 | EXPOSE 22 21 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/docker/docker-py/526a9db743a80174aabb9b43dbb8b697b98ef497/tests/__init__.py -------------------------------------------------------------------------------- /tests/gpg-keys/ownertrust: -------------------------------------------------------------------------------- 1 | # List of assigned trustvalues, created Wed 25 Apr 2018 01:28:17 PM PDT 2 | # (Use "gpg --import-ownertrust" to restore them) 3 | 9781B87DAB042E6FD51388A5464ED987A7B21401:6: 4 | -------------------------------------------------------------------------------- /tests/gpg-keys/secret: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/docker/docker-py/526a9db743a80174aabb9b43dbb8b697b98ef497/tests/gpg-keys/secret -------------------------------------------------------------------------------- /tests/helpers.py: -------------------------------------------------------------------------------- 1 | import functools 2 | import os 3 | import os.path 4 | import random 5 | import re 6 | import socket 7 | import tarfile 8 | import tempfile 9 | import time 10 | 11 | import paramiko 12 | import pytest 13 | 14 | import docker 15 | 16 | 17 | def make_tree(dirs, files): 18 | base = tempfile.mkdtemp() 19 | 20 | for path in dirs: 21 | os.makedirs(os.path.join(base, path)) 22 | 23 | for path in files: 24 | with open(os.path.join(base, path), 'w') as f: 25 | f.write("content") 26 | 27 | return base 28 | 29 | 30 | def simple_tar(path): 31 | f = tempfile.NamedTemporaryFile() 32 | t = tarfile.open(mode='w', fileobj=f) 33 | 34 | abs_path = os.path.abspath(path) 35 | t.add(abs_path, arcname=os.path.basename(path), recursive=False) 36 | 37 | t.close() 38 | f.seek(0) 39 | return f 40 | 41 | 42 | def untar_file(tardata, filename): 43 | with tarfile.open(mode='r', fileobj=tardata) as t: 44 | f = t.extractfile(filename) 45 | result = f.read() 46 | f.close() 47 | return result 48 | 49 | 50 | def skip_if_desktop(): 51 | def fn(f): 52 | @functools.wraps(f) 53 | def wrapped(self, *args, **kwargs): 54 | info = self.client.info() 55 | if info['Name'] == 'docker-desktop': 56 | pytest.skip('Test does not support Docker Desktop') 57 | return f(self, *args, **kwargs) 58 | 59 | return wrapped 60 | 61 | return fn 62 | 63 | def requires_api_version(version): 64 | test_version = os.environ.get( 65 | 'DOCKER_TEST_API_VERSION', docker.constants.DEFAULT_DOCKER_API_VERSION 66 | ) 67 | 68 | return pytest.mark.skipif( 69 | docker.utils.version_lt(test_version, version), 70 | reason=f"API version is too low (< {version})" 71 | ) 72 | 73 | 74 | def requires_experimental(until=None): 75 | test_version = os.environ.get( 76 | 'DOCKER_TEST_API_VERSION', docker.constants.DEFAULT_DOCKER_API_VERSION 77 | ) 78 | 79 | def req_exp(f): 80 | @functools.wraps(f) 81 | def wrapped(self, *args, **kwargs): 82 | if not self.client.info()['ExperimentalBuild']: 83 | pytest.skip('Feature requires Docker Engine experimental mode') 84 | return f(self, *args, **kwargs) 85 | 86 | if until and docker.utils.version_gte(test_version, until): 87 | return f 88 | return wrapped 89 | 90 | return req_exp 91 | 92 | 93 | def wait_on_condition(condition, delay=0.1, timeout=40): 94 | start_time = time.time() 95 | while not condition(): 96 | if time.time() - start_time > timeout: 97 | raise AssertionError(f"Timeout: {condition}") 98 | time.sleep(delay) 99 | 100 | 101 | def random_name(): 102 | return f'dockerpytest_{random.getrandbits(64):x}' 103 | 104 | 105 | def force_leave_swarm(client): 106 | """Actually force leave a Swarm. There seems to be a bug in Swarm that 107 | occasionally throws "context deadline exceeded" errors when leaving.""" 108 | while True: 109 | try: 110 | if isinstance(client, docker.DockerClient): 111 | return client.swarm.leave(force=True) 112 | return client.leave_swarm(force=True) # elif APIClient 113 | except docker.errors.APIError as e: 114 | if e.explanation == "context deadline exceeded": 115 | continue 116 | else: 117 | return 118 | 119 | 120 | def swarm_listen_addr(): 121 | return f'0.0.0.0:{random.randrange(10000, 25000)}' 122 | 123 | 124 | def assert_cat_socket_detached_with_keys(sock, inputs): 125 | if hasattr(sock, '_sock'): 126 | sock = sock._sock 127 | 128 | for i in inputs: 129 | sock.sendall(i) 130 | time.sleep(0.5) 131 | 132 | # If we're using a Unix socket, the sock.send call will fail with a 133 | # BrokenPipeError ; INET sockets will just stop receiving / sending data 134 | # but will not raise an error 135 | if isinstance(sock, paramiko.Channel): 136 | with pytest.raises(OSError): 137 | sock.sendall(b'make sure the socket is closed\n') 138 | else: 139 | if getattr(sock, 'family', -9) == getattr(socket, 'AF_UNIX', -1): 140 | # We do not want to use pytest.raises here because future versions 141 | # of the daemon no longer cause this to raise an error. 142 | try: 143 | sock.sendall(b'make sure the socket is closed\n') 144 | except OSError: 145 | return 146 | 147 | sock.sendall(b"make sure the socket is closed\n") 148 | data = sock.recv(128) 149 | # New in 18.06: error message is broadcast over the socket when reading 150 | # after detach 151 | assert data == b'' or data.startswith( 152 | b'exec attach failed: error on attach stdin: read escape sequence' 153 | ) 154 | 155 | 156 | def ctrl_with(char): 157 | if re.match('[a-z]', char): 158 | return chr(ord(char) - ord('a') + 1).encode('ascii') 159 | else: 160 | raise Exception('char must be [a-z]') 161 | -------------------------------------------------------------------------------- /tests/integration/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/docker/docker-py/526a9db743a80174aabb9b43dbb8b697b98ef497/tests/integration/__init__.py -------------------------------------------------------------------------------- /tests/integration/api_client_test.py: -------------------------------------------------------------------------------- 1 | import time 2 | import unittest 3 | import warnings 4 | 5 | import docker 6 | from docker.utils import kwargs_from_env 7 | 8 | from .base import BaseAPIIntegrationTest 9 | 10 | 11 | class InformationTest(BaseAPIIntegrationTest): 12 | def test_version(self): 13 | res = self.client.version() 14 | assert 'GoVersion' in res 15 | assert 'Version' in res 16 | 17 | def test_info(self): 18 | res = self.client.info() 19 | assert 'Containers' in res 20 | assert 'Images' in res 21 | assert 'Debug' in res 22 | 23 | 24 | class AutoDetectVersionTest(unittest.TestCase): 25 | def test_client_init(self): 26 | client = docker.APIClient(version='auto', **kwargs_from_env()) 27 | client_version = client._version 28 | api_version = client.version(api_version=False)['ApiVersion'] 29 | assert client_version == api_version 30 | api_version_2 = client.version()['ApiVersion'] 31 | assert client_version == api_version_2 32 | client.close() 33 | 34 | 35 | class ConnectionTimeoutTest(unittest.TestCase): 36 | def setUp(self): 37 | self.timeout = 0.5 38 | self.client = docker.api.APIClient( 39 | version=docker.constants.MINIMUM_DOCKER_API_VERSION, 40 | base_url='http://192.168.10.2:4243', 41 | timeout=self.timeout 42 | ) 43 | 44 | def test_timeout(self): 45 | start = time.time() 46 | res = None 47 | # This call isn't supposed to complete, and it should fail fast. 48 | try: 49 | res = self.client.inspect_container('id') 50 | except Exception: 51 | pass 52 | end = time.time() 53 | assert res is None 54 | assert end - start < 2 * self.timeout 55 | 56 | 57 | class UnixconnTest(unittest.TestCase): 58 | """ 59 | Test UNIX socket connection adapter. 60 | """ 61 | 62 | def test_resource_warnings(self): 63 | """ 64 | Test no warnings are produced when using the client. 65 | """ 66 | 67 | with warnings.catch_warnings(record=True) as w: 68 | warnings.simplefilter('always') 69 | 70 | client = docker.APIClient(version='auto', **kwargs_from_env()) 71 | client.images() 72 | client.close() 73 | del client 74 | 75 | assert len(w) == 0, f"No warnings produced: {w[0].message}" 76 | -------------------------------------------------------------------------------- /tests/integration/api_config_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | import docker 4 | 5 | from ..helpers import force_leave_swarm, requires_api_version 6 | from .base import BaseAPIIntegrationTest 7 | 8 | 9 | @requires_api_version('1.30') 10 | class ConfigAPITest(BaseAPIIntegrationTest): 11 | @classmethod 12 | def setup_class(cls): 13 | client = cls.get_client_instance() 14 | force_leave_swarm(client) 15 | cls._init_swarm(client) 16 | 17 | @classmethod 18 | def teardown_class(cls): 19 | client = cls.get_client_instance() 20 | force_leave_swarm(client) 21 | 22 | def test_create_config(self): 23 | config_id = self.client.create_config( 24 | 'favorite_character', 'sakuya izayoi' 25 | ) 26 | self.tmp_configs.append(config_id) 27 | assert 'ID' in config_id 28 | data = self.client.inspect_config(config_id) 29 | assert data['Spec']['Name'] == 'favorite_character' 30 | 31 | def test_create_config_unicode_data(self): 32 | config_id = self.client.create_config( 33 | 'favorite_character', 'いざよいさくや' 34 | ) 35 | self.tmp_configs.append(config_id) 36 | assert 'ID' in config_id 37 | data = self.client.inspect_config(config_id) 38 | assert data['Spec']['Name'] == 'favorite_character' 39 | 40 | def test_inspect_config(self): 41 | config_name = 'favorite_character' 42 | config_id = self.client.create_config( 43 | config_name, 'sakuya izayoi' 44 | ) 45 | self.tmp_configs.append(config_id) 46 | data = self.client.inspect_config(config_id) 47 | assert data['Spec']['Name'] == config_name 48 | assert 'ID' in data 49 | assert 'Version' in data 50 | 51 | def test_remove_config(self): 52 | config_name = 'favorite_character' 53 | config_id = self.client.create_config( 54 | config_name, 'sakuya izayoi' 55 | ) 56 | self.tmp_configs.append(config_id) 57 | 58 | assert self.client.remove_config(config_id) 59 | with pytest.raises(docker.errors.NotFound): 60 | self.client.inspect_config(config_id) 61 | 62 | def test_list_configs(self): 63 | config_name = 'favorite_character' 64 | config_id = self.client.create_config( 65 | config_name, 'sakuya izayoi' 66 | ) 67 | self.tmp_configs.append(config_id) 68 | 69 | data = self.client.configs(filters={'name': ['favorite_character']}) 70 | assert len(data) == 1 71 | assert data[0]['ID'] == config_id['ID'] 72 | 73 | @requires_api_version('1.37') 74 | def test_create_config_with_templating(self): 75 | config_id = self.client.create_config( 76 | 'favorite_character', 'sakuya izayoi', 77 | templating={'name': 'golang'} 78 | ) 79 | self.tmp_configs.append(config_id) 80 | assert 'ID' in config_id 81 | data = self.client.inspect_config(config_id) 82 | assert data['Spec']['Name'] == 'favorite_character' 83 | assert 'Templating' in data['Spec'] 84 | assert data['Spec']['Templating']['Name'] == 'golang' 85 | -------------------------------------------------------------------------------- /tests/integration/api_healthcheck_test.py: -------------------------------------------------------------------------------- 1 | from .. import helpers 2 | from .base import TEST_IMG, BaseAPIIntegrationTest 3 | 4 | SECOND = 1000000000 5 | 6 | 7 | def wait_on_health_status(client, container, status): 8 | def condition(): 9 | res = client.inspect_container(container) 10 | return res['State']['Health']['Status'] == status 11 | return helpers.wait_on_condition(condition) 12 | 13 | 14 | class HealthcheckTest(BaseAPIIntegrationTest): 15 | 16 | @helpers.requires_api_version('1.24') 17 | def test_healthcheck_shell_command(self): 18 | container = self.client.create_container( 19 | TEST_IMG, 'top', healthcheck={'test': 'echo "hello world"'}) 20 | self.tmp_containers.append(container) 21 | 22 | res = self.client.inspect_container(container) 23 | assert res['Config']['Healthcheck']['Test'] == [ 24 | 'CMD-SHELL', 'echo "hello world"' 25 | ] 26 | 27 | @helpers.requires_api_version('1.24') 28 | def test_healthcheck_passes(self): 29 | container = self.client.create_container( 30 | TEST_IMG, 'top', healthcheck={ 31 | 'test': "true", 32 | 'interval': 1 * SECOND, 33 | 'timeout': 1 * SECOND, 34 | 'retries': 1, 35 | }) 36 | self.tmp_containers.append(container) 37 | self.client.start(container) 38 | wait_on_health_status(self.client, container, "healthy") 39 | 40 | @helpers.requires_api_version('1.24') 41 | def test_healthcheck_fails(self): 42 | container = self.client.create_container( 43 | TEST_IMG, 'top', healthcheck={ 44 | 'test': "false", 45 | 'interval': 1 * SECOND, 46 | 'timeout': 1 * SECOND, 47 | 'retries': 1, 48 | }) 49 | self.tmp_containers.append(container) 50 | self.client.start(container) 51 | wait_on_health_status(self.client, container, "unhealthy") 52 | 53 | @helpers.requires_api_version('1.29') 54 | def test_healthcheck_start_period(self): 55 | container = self.client.create_container( 56 | TEST_IMG, 'top', healthcheck={ 57 | 'test': "echo 'x' >> /counter.txt && " 58 | "test `cat /counter.txt | wc -l` -ge 3", 59 | 'interval': 1 * SECOND, 60 | 'timeout': 1 * SECOND, 61 | 'retries': 1, 62 | 'start_period': 3 * SECOND 63 | } 64 | ) 65 | 66 | self.tmp_containers.append(container) 67 | self.client.start(container) 68 | wait_on_health_status(self.client, container, "healthy") 69 | -------------------------------------------------------------------------------- /tests/integration/api_secret_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | import docker 4 | 5 | from ..helpers import force_leave_swarm, requires_api_version 6 | from .base import BaseAPIIntegrationTest 7 | 8 | 9 | @requires_api_version('1.25') 10 | class SecretAPITest(BaseAPIIntegrationTest): 11 | @classmethod 12 | def setup_class(cls): 13 | client = cls.get_client_instance() 14 | force_leave_swarm(client) 15 | cls._init_swarm(client) 16 | 17 | @classmethod 18 | def teardown_class(cls): 19 | client = cls.get_client_instance() 20 | force_leave_swarm(client) 21 | 22 | def test_create_secret(self): 23 | secret_id = self.client.create_secret( 24 | 'favorite_character', 'sakuya izayoi' 25 | ) 26 | self.tmp_secrets.append(secret_id) 27 | assert 'ID' in secret_id 28 | data = self.client.inspect_secret(secret_id) 29 | assert data['Spec']['Name'] == 'favorite_character' 30 | 31 | def test_create_secret_unicode_data(self): 32 | secret_id = self.client.create_secret( 33 | 'favorite_character', 'いざよいさくや' 34 | ) 35 | self.tmp_secrets.append(secret_id) 36 | assert 'ID' in secret_id 37 | data = self.client.inspect_secret(secret_id) 38 | assert data['Spec']['Name'] == 'favorite_character' 39 | 40 | def test_inspect_secret(self): 41 | secret_name = 'favorite_character' 42 | secret_id = self.client.create_secret( 43 | secret_name, 'sakuya izayoi' 44 | ) 45 | self.tmp_secrets.append(secret_id) 46 | data = self.client.inspect_secret(secret_id) 47 | assert data['Spec']['Name'] == secret_name 48 | assert 'ID' in data 49 | assert 'Version' in data 50 | 51 | def test_remove_secret(self): 52 | secret_name = 'favorite_character' 53 | secret_id = self.client.create_secret( 54 | secret_name, 'sakuya izayoi' 55 | ) 56 | self.tmp_secrets.append(secret_id) 57 | 58 | assert self.client.remove_secret(secret_id) 59 | with pytest.raises(docker.errors.NotFound): 60 | self.client.inspect_secret(secret_id) 61 | 62 | def test_list_secrets(self): 63 | secret_name = 'favorite_character' 64 | secret_id = self.client.create_secret( 65 | secret_name, 'sakuya izayoi' 66 | ) 67 | self.tmp_secrets.append(secret_id) 68 | 69 | data = self.client.secrets(filters={'names': ['favorite_character']}) 70 | assert len(data) == 1 71 | assert data[0]['ID'] == secret_id['ID'] 72 | -------------------------------------------------------------------------------- /tests/integration/api_volume_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | import docker 4 | 5 | from ..helpers import requires_api_version 6 | from .base import BaseAPIIntegrationTest 7 | 8 | 9 | class TestVolumes(BaseAPIIntegrationTest): 10 | def test_create_volume(self): 11 | name = 'perfectcherryblossom' 12 | self.tmp_volumes.append(name) 13 | result = self.client.create_volume(name) 14 | assert 'Name' in result 15 | assert result['Name'] == name 16 | assert 'Driver' in result 17 | assert result['Driver'] == 'local' 18 | 19 | def test_create_volume_invalid_driver(self): 20 | # special name to avoid exponential timeout loop 21 | # https://github.com/moby/moby/blob/9e00a63d65434cdedc444e79a2b33a7c202b10d8/pkg/plugins/client.go#L253-L254 22 | driver_name = 'this-plugin-does-not-exist' 23 | 24 | with pytest.raises(docker.errors.APIError) as cm: 25 | self.client.create_volume('perfectcherryblossom', driver_name) 26 | assert ( 27 | cm.value.response.status_code == 404 or 28 | cm.value.response.status_code == 400 29 | ) 30 | 31 | def test_list_volumes(self): 32 | name = 'imperishablenight' 33 | self.tmp_volumes.append(name) 34 | volume_info = self.client.create_volume(name) 35 | result = self.client.volumes() 36 | assert 'Volumes' in result 37 | volumes = result['Volumes'] 38 | assert volume_info in volumes 39 | 40 | def test_inspect_volume(self): 41 | name = 'embodimentofscarletdevil' 42 | self.tmp_volumes.append(name) 43 | volume_info = self.client.create_volume(name) 44 | result = self.client.inspect_volume(name) 45 | assert volume_info == result 46 | 47 | def test_inspect_nonexistent_volume(self): 48 | name = 'embodimentofscarletdevil' 49 | with pytest.raises(docker.errors.NotFound): 50 | self.client.inspect_volume(name) 51 | 52 | def test_remove_volume(self): 53 | name = 'shootthebullet' 54 | self.tmp_volumes.append(name) 55 | self.client.create_volume(name) 56 | self.client.remove_volume(name) 57 | 58 | @requires_api_version('1.25') 59 | def test_force_remove_volume(self): 60 | name = 'shootthebullet' 61 | self.tmp_volumes.append(name) 62 | self.client.create_volume(name) 63 | self.client.remove_volume(name, force=True) 64 | 65 | @requires_api_version('1.25') 66 | def test_prune_volumes(self): 67 | v = self.client.create_volume() 68 | self.tmp_volumes.append(v["Name"]) 69 | result = self.client.prune_volumes() 70 | assert v["Name"] in result['VolumesDeleted'] 71 | 72 | def test_remove_nonexistent_volume(self): 73 | name = 'shootthebullet' 74 | with pytest.raises(docker.errors.NotFound): 75 | self.client.remove_volume(name) 76 | -------------------------------------------------------------------------------- /tests/integration/base.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import unittest 4 | 5 | import docker 6 | from docker.utils import kwargs_from_env 7 | 8 | from .. import helpers 9 | 10 | TEST_IMG = 'alpine:3.10' 11 | TEST_API_VERSION = os.environ.get('DOCKER_TEST_API_VERSION') 12 | 13 | 14 | class BaseIntegrationTest(unittest.TestCase): 15 | """ 16 | A base class for integration test cases. It cleans up the Docker server 17 | after itself. 18 | """ 19 | 20 | def setUp(self): 21 | self.tmp_imgs = [] 22 | self.tmp_containers = [] 23 | self.tmp_folders = [] 24 | self.tmp_volumes = [] 25 | self.tmp_networks = [] 26 | self.tmp_plugins = [] 27 | self.tmp_secrets = [] 28 | self.tmp_configs = [] 29 | 30 | def tearDown(self): 31 | client = docker.from_env(version=TEST_API_VERSION) 32 | try: 33 | for img in self.tmp_imgs: 34 | try: 35 | client.api.remove_image(img) 36 | except docker.errors.APIError: 37 | pass 38 | for container in self.tmp_containers: 39 | try: 40 | client.api.remove_container(container, force=True, v=True) 41 | except docker.errors.APIError: 42 | pass 43 | for network in self.tmp_networks: 44 | try: 45 | client.api.remove_network(network) 46 | except docker.errors.APIError: 47 | pass 48 | for volume in self.tmp_volumes: 49 | try: 50 | client.api.remove_volume(volume) 51 | except docker.errors.APIError: 52 | pass 53 | 54 | for secret in self.tmp_secrets: 55 | try: 56 | client.api.remove_secret(secret) 57 | except docker.errors.APIError: 58 | pass 59 | 60 | for config in self.tmp_configs: 61 | try: 62 | client.api.remove_config(config) 63 | except docker.errors.APIError: 64 | pass 65 | 66 | for folder in self.tmp_folders: 67 | shutil.rmtree(folder) 68 | finally: 69 | client.close() 70 | 71 | 72 | class BaseAPIIntegrationTest(BaseIntegrationTest): 73 | """ 74 | A test case for `APIClient` integration tests. It sets up an `APIClient` 75 | as `self.client`. 76 | """ 77 | 78 | def setUp(self): 79 | super().setUp() 80 | self.client = self.get_client_instance() 81 | 82 | def tearDown(self): 83 | super().tearDown() 84 | self.client.close() 85 | 86 | @staticmethod 87 | def get_client_instance(): 88 | return docker.APIClient( 89 | version=TEST_API_VERSION, timeout=60, **kwargs_from_env() 90 | ) 91 | 92 | @staticmethod 93 | def _init_swarm(client, **kwargs): 94 | return client.init_swarm( 95 | '127.0.0.1', listen_addr=helpers.swarm_listen_addr(), **kwargs 96 | ) 97 | 98 | def run_container(self, *args, **kwargs): 99 | container = self.client.create_container(*args, **kwargs) 100 | self.tmp_containers.append(container) 101 | self.client.start(container) 102 | exitcode = self.client.wait(container)['StatusCode'] 103 | 104 | if exitcode != 0: 105 | output = self.client.logs(container) 106 | raise Exception( 107 | f"Container exited with code {exitcode}:\n{output}") 108 | 109 | return container 110 | 111 | def create_and_start(self, image=TEST_IMG, command='top', **kwargs): 112 | container = self.client.create_container( 113 | image=image, command=command, **kwargs) 114 | self.tmp_containers.append(container) 115 | self.client.start(container) 116 | return container 117 | 118 | def execute(self, container, cmd, exit_code=0, **kwargs): 119 | exc = self.client.exec_create(container, cmd, **kwargs) 120 | output = self.client.exec_start(exc) 121 | actual_exit_code = self.client.exec_inspect(exc)['ExitCode'] 122 | msg = "Expected `{}` to exit with code {} but returned {}:\n{}".format( 123 | " ".join(cmd), exit_code, actual_exit_code, output) 124 | assert actual_exit_code == exit_code, msg 125 | 126 | def init_swarm(self, **kwargs): 127 | return self._init_swarm(self.client, **kwargs) 128 | -------------------------------------------------------------------------------- /tests/integration/client_test.py: -------------------------------------------------------------------------------- 1 | import threading 2 | import unittest 3 | from datetime import datetime, timedelta 4 | 5 | import docker 6 | 7 | from ..helpers import requires_api_version 8 | from .base import TEST_API_VERSION 9 | 10 | 11 | class ClientTest(unittest.TestCase): 12 | client = docker.from_env(version=TEST_API_VERSION) 13 | 14 | def test_info(self): 15 | info = self.client.info() 16 | assert 'ID' in info 17 | assert 'Name' in info 18 | 19 | def test_ping(self): 20 | assert self.client.ping() is True 21 | 22 | def test_version(self): 23 | assert 'Version' in self.client.version() 24 | 25 | @requires_api_version('1.25') 26 | def test_df(self): 27 | data = self.client.df() 28 | assert 'LayersSize' in data 29 | assert 'Containers' in data 30 | assert 'Volumes' in data 31 | assert 'Images' in data 32 | 33 | 34 | class CancellableEventsTest(unittest.TestCase): 35 | client = docker.from_env(version=TEST_API_VERSION) 36 | 37 | def test_cancel_events(self): 38 | start = datetime.now() 39 | 40 | events = self.client.events(until=start + timedelta(seconds=5)) 41 | 42 | cancel_thread = threading.Timer(2, events.close) 43 | cancel_thread.start() 44 | 45 | for _ in events: 46 | pass 47 | 48 | self.assertLess(datetime.now() - start, timedelta(seconds=3)) 49 | -------------------------------------------------------------------------------- /tests/integration/conftest.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import warnings 3 | 4 | import pytest 5 | 6 | import docker.errors 7 | from docker.utils import kwargs_from_env 8 | 9 | from .base import TEST_IMG 10 | 11 | 12 | @pytest.fixture(autouse=True, scope='session') 13 | def setup_test_session(): 14 | warnings.simplefilter('error') 15 | c = docker.APIClient(version='auto', **kwargs_from_env()) 16 | try: 17 | c.inspect_image(TEST_IMG) 18 | except docker.errors.NotFound: 19 | print(f"\npulling {TEST_IMG}", file=sys.stderr) 20 | for data in c.pull(TEST_IMG, stream=True, decode=True): 21 | status = data.get("status") 22 | progress = data.get("progress") 23 | detail = f"{status} - {progress}" 24 | print(detail, file=sys.stderr) 25 | 26 | # Double make sure we now have busybox 27 | c.inspect_image(TEST_IMG) 28 | c.close() 29 | -------------------------------------------------------------------------------- /tests/integration/context_api_test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import tempfile 3 | 4 | import pytest 5 | 6 | from docker import errors 7 | from docker.context import ContextAPI 8 | from docker.tls import TLSConfig 9 | 10 | from .base import BaseAPIIntegrationTest 11 | 12 | 13 | class ContextLifecycleTest(BaseAPIIntegrationTest): 14 | def test_lifecycle(self): 15 | assert ContextAPI.get_context().Name == "default" 16 | assert not ContextAPI.get_context("test") 17 | assert ContextAPI.get_current_context().Name == "default" 18 | 19 | dirpath = tempfile.mkdtemp() 20 | ca = tempfile.NamedTemporaryFile( 21 | prefix=os.path.join(dirpath, "ca.pem"), mode="r") 22 | cert = tempfile.NamedTemporaryFile( 23 | prefix=os.path.join(dirpath, "cert.pem"), mode="r") 24 | key = tempfile.NamedTemporaryFile( 25 | prefix=os.path.join(dirpath, "key.pem"), mode="r") 26 | 27 | # create context 'test 28 | docker_tls = TLSConfig( 29 | client_cert=(cert.name, key.name), 30 | ca_cert=ca.name) 31 | ContextAPI.create_context( 32 | "test", tls_cfg=docker_tls) 33 | 34 | # check for a context 'test' in the context store 35 | assert any(ctx.Name == "test" for ctx in ContextAPI.contexts()) 36 | # retrieve a context object for 'test' 37 | assert ContextAPI.get_context("test") 38 | # remove context 39 | ContextAPI.remove_context("test") 40 | with pytest.raises(errors.ContextNotFound): 41 | ContextAPI.inspect_context("test") 42 | # check there is no 'test' context in store 43 | assert not ContextAPI.get_context("test") 44 | 45 | ca.close() 46 | key.close() 47 | cert.close() 48 | 49 | def test_context_remove(self): 50 | ContextAPI.create_context("test") 51 | assert ContextAPI.inspect_context("test")["Name"] == "test" 52 | 53 | ContextAPI.remove_context("test") 54 | with pytest.raises(errors.ContextNotFound): 55 | ContextAPI.inspect_context("test") 56 | 57 | def test_load_context_without_orchestrator(self): 58 | ContextAPI.create_context("test") 59 | ctx = ContextAPI.get_context("test") 60 | assert ctx 61 | assert ctx.Name == "test" 62 | assert ctx.Orchestrator is None 63 | -------------------------------------------------------------------------------- /tests/integration/credentials/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/docker/docker-py/526a9db743a80174aabb9b43dbb8b697b98ef497/tests/integration/credentials/__init__.py -------------------------------------------------------------------------------- /tests/integration/credentials/create_gpg_key.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/sh 2 | haveged 3 | gpg --batch --gen-key <<-EOF 4 | %echo Generating a standard key 5 | Key-Type: DSA 6 | Key-Length: 1024 7 | Subkey-Type: ELG-E 8 | Subkey-Length: 1024 9 | Name-Real: Sakuya Izayoi 10 | Name-Email: sakuya@gensokyo.jp 11 | Expire-Date: 0 12 | EOF -------------------------------------------------------------------------------- /tests/integration/credentials/store_test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import random 3 | import shutil 4 | import sys 5 | 6 | import pytest 7 | 8 | from docker.credentials import ( 9 | DEFAULT_LINUX_STORE, 10 | DEFAULT_OSX_STORE, 11 | CredentialsNotFound, 12 | Store, 13 | StoreError, 14 | ) 15 | 16 | 17 | class TestStore: 18 | def teardown_method(self): 19 | for server in self.tmp_keys: 20 | try: 21 | self.store.erase(server) 22 | except StoreError: 23 | pass 24 | 25 | def setup_method(self): 26 | self.tmp_keys = [] 27 | if sys.platform.startswith('linux'): 28 | if shutil.which(f"docker-credential-{DEFAULT_LINUX_STORE}"): 29 | self.store = Store(DEFAULT_LINUX_STORE) 30 | elif shutil.which('docker-credential-pass'): 31 | self.store = Store('pass') 32 | else: 33 | raise Exception('No supported docker-credential store in PATH') 34 | elif sys.platform.startswith('darwin'): 35 | self.store = Store(DEFAULT_OSX_STORE) 36 | 37 | def get_random_servername(self): 38 | res = f'pycreds_test_{random.getrandbits(32):x}' 39 | self.tmp_keys.append(res) 40 | return res 41 | 42 | def test_store_and_get(self): 43 | key = self.get_random_servername() 44 | self.store.store(server=key, username='user', secret='pass') 45 | data = self.store.get(key) 46 | assert data == { 47 | 'ServerURL': key, 48 | 'Username': 'user', 49 | 'Secret': 'pass' 50 | } 51 | 52 | def test_get_nonexistent(self): 53 | key = self.get_random_servername() 54 | with pytest.raises(CredentialsNotFound): 55 | self.store.get(key) 56 | 57 | def test_store_and_erase(self): 58 | key = self.get_random_servername() 59 | self.store.store(server=key, username='user', secret='pass') 60 | self.store.erase(key) 61 | with pytest.raises(CredentialsNotFound): 62 | self.store.get(key) 63 | 64 | def test_unicode_strings(self): 65 | key = self.get_random_servername() 66 | key = key 67 | self.store.store(server=key, username='user', secret='pass') 68 | data = self.store.get(key) 69 | assert data 70 | self.store.erase(key) 71 | with pytest.raises(CredentialsNotFound): 72 | self.store.get(key) 73 | 74 | def test_list(self): 75 | names = (self.get_random_servername(), self.get_random_servername()) 76 | self.store.store(names[0], username='sakuya', secret='izayoi') 77 | self.store.store(names[1], username='reimu', secret='hakurei') 78 | data = self.store.list() 79 | assert names[0] in data 80 | assert data[names[0]] == 'sakuya' 81 | assert names[1] in data 82 | assert data[names[1]] == 'reimu' 83 | 84 | def test_execute_with_env_override(self): 85 | self.store.exe = 'env' 86 | self.store.environment = {'FOO': 'bar'} 87 | data = self.store._execute('--null', '') 88 | assert b'\0FOO=bar\0' in data 89 | assert 'FOO' not in os.environ 90 | 91 | def test_unavailable_store(self): 92 | some_unavailable_store = None 93 | with pytest.warns(UserWarning): 94 | some_unavailable_store = Store('that-does-not-exist') 95 | with pytest.raises(StoreError): 96 | some_unavailable_store.get('anything-this-does-not-matter') 97 | -------------------------------------------------------------------------------- /tests/integration/credentials/utils_test.py: -------------------------------------------------------------------------------- 1 | import os 2 | from unittest import mock 3 | 4 | from docker.credentials.utils import create_environment_dict 5 | 6 | 7 | @mock.patch.dict(os.environ) 8 | def test_create_environment_dict(): 9 | base = {'FOO': 'bar', 'BAZ': 'foobar'} 10 | os.environ = base # noqa: B003 11 | assert create_environment_dict({'FOO': 'baz'}) == { 12 | 'FOO': 'baz', 'BAZ': 'foobar', 13 | } 14 | assert create_environment_dict({'HELLO': 'world'}) == { 15 | 'FOO': 'bar', 'BAZ': 'foobar', 'HELLO': 'world', 16 | } 17 | 18 | assert os.environ == base 19 | -------------------------------------------------------------------------------- /tests/integration/errors_test.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from docker.errors import APIError 4 | 5 | from .base import TEST_IMG, BaseAPIIntegrationTest 6 | 7 | 8 | class ErrorsTest(BaseAPIIntegrationTest): 9 | def test_api_error_parses_json(self): 10 | container = self.client.create_container(TEST_IMG, ['sleep', '10']) 11 | self.client.start(container['Id']) 12 | with pytest.raises(APIError) as cm: 13 | self.client.remove_container(container['Id']) 14 | explanation = cm.value.explanation.lower() 15 | assert 'stop the container before' in explanation 16 | assert '{"message":' not in explanation 17 | self.client.remove_container(container['Id'], force=True) 18 | -------------------------------------------------------------------------------- /tests/integration/models_networks_test.py: -------------------------------------------------------------------------------- 1 | import docker 2 | 3 | from .. import helpers 4 | from .base import TEST_API_VERSION, BaseIntegrationTest 5 | 6 | 7 | class NetworkCollectionTest(BaseIntegrationTest): 8 | 9 | def test_create(self): 10 | client = docker.from_env(version=TEST_API_VERSION) 11 | name = helpers.random_name() 12 | network = client.networks.create(name, labels={'foo': 'bar'}) 13 | self.tmp_networks.append(network.id) 14 | assert network.name == name 15 | assert network.attrs['Labels']['foo'] == "bar" 16 | 17 | def test_get(self): 18 | client = docker.from_env(version=TEST_API_VERSION) 19 | name = helpers.random_name() 20 | network_id = client.networks.create(name).id 21 | self.tmp_networks.append(network_id) 22 | network = client.networks.get(network_id) 23 | assert network.name == name 24 | 25 | def test_list_remove(self): 26 | client = docker.from_env(version=TEST_API_VERSION) 27 | name = helpers.random_name() 28 | network = client.networks.create(name) 29 | self.tmp_networks.append(network.id) 30 | assert network.id in [n.id for n in client.networks.list()] 31 | assert network.id not in [ 32 | n.id for n in 33 | client.networks.list(ids=["fdhjklfdfdshjkfds"]) 34 | ] 35 | assert network.id in [ 36 | n.id for n in 37 | client.networks.list(ids=[network.id]) 38 | ] 39 | assert network.id not in [ 40 | n.id for n in 41 | client.networks.list(names=["fdshjklfdsjhkl"]) 42 | ] 43 | assert network.id in [ 44 | n.id for n in 45 | client.networks.list(names=[name]) 46 | ] 47 | network.remove() 48 | assert network.id not in [n.id for n in client.networks.list()] 49 | 50 | 51 | class NetworkTest(BaseIntegrationTest): 52 | 53 | def test_connect_disconnect(self): 54 | client = docker.from_env(version=TEST_API_VERSION) 55 | network = client.networks.create(helpers.random_name()) 56 | self.tmp_networks.append(network.id) 57 | container = client.containers.create("alpine", "sleep 300") 58 | self.tmp_containers.append(container.id) 59 | assert network.containers == [] 60 | network.connect(container) 61 | container.start() 62 | assert client.networks.get(network.id).containers == [container] 63 | network_containers = [ 64 | c 65 | for net in client.networks.list(ids=[network.id], greedy=True) 66 | for c in net.containers 67 | ] 68 | assert network_containers == [container] 69 | network.disconnect(container) 70 | assert network.containers == [] 71 | assert client.networks.get(network.id).containers == [] 72 | -------------------------------------------------------------------------------- /tests/integration/models_nodes_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import docker 4 | 5 | from .. import helpers 6 | from .base import TEST_API_VERSION 7 | 8 | 9 | class NodesTest(unittest.TestCase): 10 | def setUp(self): 11 | helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION)) 12 | 13 | def tearDown(self): 14 | helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION)) 15 | 16 | def test_list_get_update(self): 17 | client = docker.from_env(version=TEST_API_VERSION) 18 | client.swarm.init('127.0.0.1', listen_addr=helpers.swarm_listen_addr()) 19 | nodes = client.nodes.list() 20 | assert len(nodes) == 1 21 | assert nodes[0].attrs['Spec']['Role'] == 'manager' 22 | 23 | node = client.nodes.get(nodes[0].id) 24 | assert node.id == nodes[0].id 25 | assert node.attrs['Spec']['Role'] == 'manager' 26 | assert node.version > 0 27 | 28 | node = client.nodes.list()[0] 29 | assert not node.attrs['Spec'].get('Labels') 30 | node.update({ 31 | 'Availability': 'active', 32 | 'Name': 'node-name', 33 | 'Role': 'manager', 34 | 'Labels': {'foo': 'bar'} 35 | }) 36 | node.reload() 37 | assert node.attrs['Spec']['Labels'] == {'foo': 'bar'} 38 | -------------------------------------------------------------------------------- /tests/integration/models_resources_test.py: -------------------------------------------------------------------------------- 1 | import docker 2 | 3 | from .base import TEST_API_VERSION, BaseIntegrationTest 4 | 5 | 6 | class ModelTest(BaseIntegrationTest): 7 | 8 | def test_reload(self): 9 | client = docker.from_env(version=TEST_API_VERSION) 10 | container = client.containers.run("alpine", "sleep 300", detach=True) 11 | self.tmp_containers.append(container.id) 12 | first_started_at = container.attrs['State']['StartedAt'] 13 | container.kill() 14 | container.start() 15 | assert container.attrs['State']['StartedAt'] == first_started_at 16 | container.reload() 17 | assert container.attrs['State']['StartedAt'] != first_started_at 18 | -------------------------------------------------------------------------------- /tests/integration/models_swarm_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import pytest 4 | 5 | import docker 6 | 7 | from .. import helpers 8 | from .base import TEST_API_VERSION 9 | 10 | 11 | class SwarmTest(unittest.TestCase): 12 | def setUp(self): 13 | helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION)) 14 | 15 | def tearDown(self): 16 | helpers.force_leave_swarm(docker.from_env(version=TEST_API_VERSION)) 17 | 18 | def test_init_update_leave(self): 19 | client = docker.from_env(version=TEST_API_VERSION) 20 | client.swarm.init( 21 | advertise_addr='127.0.0.1', snapshot_interval=5000, 22 | listen_addr=helpers.swarm_listen_addr() 23 | ) 24 | assert client.swarm.attrs['Spec']['Raft']['SnapshotInterval'] == 5000 25 | client.swarm.update(snapshot_interval=10000) 26 | assert client.swarm.attrs['Spec']['Raft']['SnapshotInterval'] == 10000 27 | assert client.swarm.id 28 | assert client.swarm.leave(force=True) 29 | with pytest.raises(docker.errors.APIError) as cm: 30 | client.swarm.reload() 31 | assert ( 32 | cm.value.response.status_code == 406 or 33 | cm.value.response.status_code == 503 34 | ) 35 | 36 | def test_join_on_already_joined_swarm(self): 37 | client = docker.from_env(version=TEST_API_VERSION) 38 | client.swarm.init() 39 | join_token = client.swarm.attrs['JoinTokens']['Manager'] 40 | with pytest.raises(docker.errors.APIError) as cm: 41 | client.swarm.join( 42 | remote_addrs=['127.0.0.1'], 43 | join_token=join_token, 44 | ) 45 | assert cm.value.response.status_code == 503 46 | assert 'This node is already part of a swarm.' in cm.value.explanation 47 | -------------------------------------------------------------------------------- /tests/integration/models_volumes_test.py: -------------------------------------------------------------------------------- 1 | import docker 2 | 3 | from .base import TEST_API_VERSION, BaseIntegrationTest 4 | 5 | 6 | class VolumesTest(BaseIntegrationTest): 7 | def test_create_get(self): 8 | client = docker.from_env(version=TEST_API_VERSION) 9 | volume = client.volumes.create( 10 | 'dockerpytest_1', 11 | driver='local', 12 | labels={'labelkey': 'labelvalue'} 13 | ) 14 | self.tmp_volumes.append(volume.id) 15 | assert volume.id 16 | assert volume.name == 'dockerpytest_1' 17 | assert volume.attrs['Labels'] == {'labelkey': 'labelvalue'} 18 | 19 | volume = client.volumes.get(volume.id) 20 | assert volume.name == 'dockerpytest_1' 21 | 22 | def test_list_remove(self): 23 | client = docker.from_env(version=TEST_API_VERSION) 24 | volume = client.volumes.create('dockerpytest_1') 25 | self.tmp_volumes.append(volume.id) 26 | assert volume in client.volumes.list() 27 | assert volume in client.volumes.list(filters={'name': 'dockerpytest_'}) 28 | assert volume not in client.volumes.list(filters={'name': 'foobar'}) 29 | 30 | volume.remove() 31 | assert volume not in client.volumes.list() 32 | -------------------------------------------------------------------------------- /tests/integration/regression_test.py: -------------------------------------------------------------------------------- 1 | import io 2 | import random 3 | 4 | import pytest 5 | 6 | import docker 7 | 8 | from .base import TEST_IMG, BaseAPIIntegrationTest 9 | 10 | 11 | class TestRegressions(BaseAPIIntegrationTest): 12 | @pytest.mark.xfail(True, reason='Docker API always returns chunked resp') 13 | def test_443_handle_nonchunked_response_in_stream(self): 14 | dfile = io.BytesIO() 15 | with pytest.raises(docker.errors.APIError) as exc: 16 | for _line in self.client.build(fileobj=dfile, tag="a/b/c"): 17 | pass 18 | assert exc.value.is_error() 19 | dfile.close() 20 | 21 | def test_542_truncate_ids_client_side(self): 22 | self.client.start( 23 | self.client.create_container(TEST_IMG, ['true']) 24 | ) 25 | result = self.client.containers(all=True, trunc=True) 26 | assert len(result[0]['Id']) == 12 27 | 28 | def test_647_support_doubleslash_in_image_names(self): 29 | with pytest.raises(docker.errors.APIError): 30 | self.client.inspect_image('gensokyo.jp//kirisame') 31 | 32 | def test_649_handle_timeout_value_none(self): 33 | self.client.timeout = None 34 | ctnr = self.client.create_container(TEST_IMG, ['sleep', '2']) 35 | self.client.start(ctnr) 36 | self.client.stop(ctnr) 37 | 38 | def test_715_handle_user_param_as_int_value(self): 39 | ctnr = self.client.create_container(TEST_IMG, ['id', '-u'], user=1000) 40 | self.client.start(ctnr) 41 | self.client.wait(ctnr) 42 | logs = self.client.logs(ctnr) 43 | logs = logs.decode('utf-8') 44 | assert logs == '1000\n' 45 | 46 | def test_792_explicit_port_protocol(self): 47 | 48 | tcp_port, udp_port = random.sample(range(9999, 32000), 2) 49 | ctnr = self.client.create_container( 50 | TEST_IMG, ['sleep', '9999'], ports=[2000, (2000, 'udp')], 51 | host_config=self.client.create_host_config( 52 | port_bindings={'2000/tcp': tcp_port, '2000/udp': udp_port} 53 | ) 54 | ) 55 | self.tmp_containers.append(ctnr) 56 | self.client.start(ctnr) 57 | assert self.client.port( 58 | ctnr, 2000 59 | )[0]['HostPort'] == str(tcp_port) 60 | assert self.client.port( 61 | ctnr, '2000/tcp' 62 | )[0]['HostPort'] == str(tcp_port) 63 | assert self.client.port( 64 | ctnr, '2000/udp' 65 | )[0]['HostPort'] == str(udp_port) 66 | -------------------------------------------------------------------------------- /tests/integration/testdata/dummy-plugin/config.json: -------------------------------------------------------------------------------- 1 | { 2 | "description": "Dummy test plugin for docker python SDK", 3 | "documentation": "https://github.com/docker/docker-py", 4 | "entrypoint": ["/dummy"], 5 | "network": { 6 | "type": "host" 7 | }, 8 | "interface" : { 9 | "types": ["docker.volumedriver/1.0"], 10 | "socket": "dummy.sock" 11 | }, 12 | "env": [ 13 | { 14 | "name":"DEBUG", 15 | "settable":["value"], 16 | "value":"0" 17 | } 18 | ] 19 | } 20 | -------------------------------------------------------------------------------- /tests/integration/testdata/dummy-plugin/rootfs/dummy/file.txt: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/docker/docker-py/526a9db743a80174aabb9b43dbb8b697b98ef497/tests/integration/testdata/dummy-plugin/rootfs/dummy/file.txt -------------------------------------------------------------------------------- /tests/ssh/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/docker/docker-py/526a9db743a80174aabb9b43dbb8b697b98ef497/tests/ssh/__init__.py -------------------------------------------------------------------------------- /tests/ssh/base.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import unittest 4 | 5 | import pytest 6 | 7 | import docker 8 | from docker.utils import kwargs_from_env 9 | 10 | from .. import helpers 11 | 12 | TEST_IMG = 'alpine:3.10' 13 | TEST_API_VERSION = os.environ.get('DOCKER_TEST_API_VERSION') 14 | 15 | 16 | class BaseIntegrationTest(unittest.TestCase): 17 | """ 18 | A base class for integration test cases. It cleans up the Docker server 19 | after itself. 20 | """ 21 | 22 | def setUp(self): 23 | self.tmp_imgs = [] 24 | self.tmp_containers = [] 25 | self.tmp_folders = [] 26 | self.tmp_volumes = [] 27 | self.tmp_networks = [] 28 | self.tmp_plugins = [] 29 | self.tmp_secrets = [] 30 | self.tmp_configs = [] 31 | 32 | def tearDown(self): 33 | client = docker.from_env(version=TEST_API_VERSION, use_ssh_client=True) 34 | try: 35 | for img in self.tmp_imgs: 36 | try: 37 | client.api.remove_image(img) 38 | except docker.errors.APIError: 39 | pass 40 | for container in self.tmp_containers: 41 | try: 42 | client.api.remove_container(container, force=True, v=True) 43 | except docker.errors.APIError: 44 | pass 45 | for network in self.tmp_networks: 46 | try: 47 | client.api.remove_network(network) 48 | except docker.errors.APIError: 49 | pass 50 | for volume in self.tmp_volumes: 51 | try: 52 | client.api.remove_volume(volume) 53 | except docker.errors.APIError: 54 | pass 55 | 56 | for secret in self.tmp_secrets: 57 | try: 58 | client.api.remove_secret(secret) 59 | except docker.errors.APIError: 60 | pass 61 | 62 | for config in self.tmp_configs: 63 | try: 64 | client.api.remove_config(config) 65 | except docker.errors.APIError: 66 | pass 67 | 68 | for folder in self.tmp_folders: 69 | shutil.rmtree(folder) 70 | finally: 71 | client.close() 72 | 73 | 74 | @pytest.mark.skipif(not os.environ.get('DOCKER_HOST', '').startswith('ssh://'), 75 | reason='DOCKER_HOST is not an SSH target') 76 | class BaseAPIIntegrationTest(BaseIntegrationTest): 77 | """ 78 | A test case for `APIClient` integration tests. It sets up an `APIClient` 79 | as `self.client`. 80 | """ 81 | @classmethod 82 | def setUpClass(cls): 83 | cls.client = cls.get_client_instance() 84 | cls.client.pull(TEST_IMG) 85 | 86 | def tearDown(self): 87 | super().tearDown() 88 | self.client.close() 89 | 90 | @staticmethod 91 | def get_client_instance(): 92 | return docker.APIClient( 93 | version=TEST_API_VERSION, 94 | timeout=60, 95 | use_ssh_client=True, 96 | **kwargs_from_env() 97 | ) 98 | 99 | @staticmethod 100 | def _init_swarm(client, **kwargs): 101 | return client.init_swarm( 102 | '127.0.0.1', listen_addr=helpers.swarm_listen_addr(), **kwargs 103 | ) 104 | 105 | def run_container(self, *args, **kwargs): 106 | container = self.client.create_container(*args, **kwargs) 107 | self.tmp_containers.append(container) 108 | self.client.start(container) 109 | exitcode = self.client.wait(container)['StatusCode'] 110 | 111 | if exitcode != 0: 112 | output = self.client.logs(container) 113 | raise Exception( 114 | f"Container exited with code {exitcode}:\n{output}") 115 | 116 | return container 117 | 118 | def create_and_start(self, image=TEST_IMG, command='top', **kwargs): 119 | container = self.client.create_container( 120 | image=image, command=command, **kwargs) 121 | self.tmp_containers.append(container) 122 | self.client.start(container) 123 | return container 124 | 125 | def execute(self, container, cmd, exit_code=0, **kwargs): 126 | exc = self.client.exec_create(container, cmd, **kwargs) 127 | output = self.client.exec_start(exc) 128 | actual_exit_code = self.client.exec_inspect(exc)['ExitCode'] 129 | msg = "Expected `{}` to exit with code {} but returned {}:\n{}".format( 130 | " ".join(cmd), exit_code, actual_exit_code, output) 131 | assert actual_exit_code == exit_code, msg 132 | 133 | def init_swarm(self, **kwargs): 134 | return self._init_swarm(self.client, **kwargs) 135 | -------------------------------------------------------------------------------- /tests/ssh/config/client/id_rsa: -------------------------------------------------------------------------------- 1 | -----BEGIN OPENSSH PRIVATE KEY----- 2 | b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn 3 | NhAAAAAwEAAQAAAYEAvwYl5Gy/aBGxNzyb9UtqddlyuQR1t6kE+UX/gmBtAE2MjDyFTOvi 4 | F1cn90DcaZ7z172zwUCQrNKh3rj8GcthrG7d+UJ5pYK3MxT4l16LAg9jfsK3DkD2Rri40M 5 | lFD9siUVUky6afM5NhfMN5WhiAdyZNYVHDFBMXpisUGJPy+NG+a1ypGqy5OWsAbonI0UrT 6 | K3IT0R2dp+9eUxvs0r3/LQf1B0VymD6movyXuXoh98hlMwmOM5/rhKKgBW+FfJaSI/EcNx 7 | F5gmFcBtL4PuOECENoCZyIU5XJscJMp72Z/e57oODS5RiUPrAwpyLzGqcnB3xpDZQc93xb 8 | bvzkbMT6WW0zYP/Z6Gt2X/DqSMLxPxRzT6g3LRpbcMRIEMY+XxN+MdH2JxdPLXowFCSQmR 9 | N2LBoDWm7EuKQ/pEYSPN3hWb4I90NQHkytFfW0TO47o3HPUc/lfRm+c2BBzf5fD8RFZY9D 10 | pVEX/WZZJzUCvMUYefe4w1031UCgjDv50Wlh9m6tAAAFeM2kMyHNpDMhAAAAB3NzaC1yc2 11 | EAAAGBAL8GJeRsv2gRsTc8m/VLanXZcrkEdbepBPlF/4JgbQBNjIw8hUzr4hdXJ/dA3Gme 12 | 89e9s8FAkKzSod64/BnLYaxu3flCeaWCtzMU+JdeiwIPY37Ctw5A9ka4uNDJRQ/bIlFVJM 13 | umnzOTYXzDeVoYgHcmTWFRwxQTF6YrFBiT8vjRvmtcqRqsuTlrAG6JyNFK0ytyE9Ednafv 14 | XlMb7NK9/y0H9QdFcpg+pqL8l7l6IffIZTMJjjOf64SioAVvhXyWkiPxHDcReYJhXAbS+D 15 | 7jhAhDaAmciFOVybHCTKe9mf3ue6Dg0uUYlD6wMKci8xqnJwd8aQ2UHPd8W2785GzE+llt 16 | M2D/2ehrdl/w6kjC8T8Uc0+oNy0aW3DESBDGPl8TfjHR9icXTy16MBQkkJkTdiwaA1puxL 17 | ikP6RGEjzd4Vm+CPdDUB5MrRX1tEzuO6Nxz1HP5X0ZvnNgQc3+Xw/ERWWPQ6VRF/1mWSc1 18 | ArzFGHn3uMNdN9VAoIw7+dFpYfZurQAAAAMBAAEAAAGBAKtnotyiz+Vb6r57vh2OvEpfAd 19 | gOrmpMWVArhSfBykz5SOIU9C+fgVIcPJpaMuz7WiX97Ku9eZP5tJGbP2sN2ejV2ovtICZp 20 | cmV9rcp1ZRpGIKr/oS5DEDlJS1zdHQErSlHcqpWqPzQSTOmcpOk5Dxza25g1u2vp7dCG2x 21 | NqvhySZ+ECViK/Vby1zL9jFzTlhTJ4vFtpzauA2AyPBCPdpHkNqMoLgNYncXLSYHpnos8p 22 | m9T+AAFGwBhVrGz0Mr0mhRDnV/PgbKplKT7l+CGceb8LuWmj/vzuP5Wv6dglw3hJnT2V5p 23 | nTBp3dJ6R006+yvr5T/Xb+ObGqFfgfenjLfHjqbJ/gZdGWt4Le84g8tmSkjJBJ2Yj3kynQ 24 | sdfv9k7JJ4t5euoje0XW0YVN1ih5DdyO4hHDRD1lSTFYT5Gl2sCTt28qsMC12rWzFkezJo 25 | Fhewq2Ddtg4AK6SxqH4rFQCmgOR/ci7jv9TXS9xEQxYliyN5aNymRTyXmwqBIzjNKR6QAA 26 | AMEAxpme2upng9LS6Epa83d1gnWUilYPbpb1C8+1FgpnBv9zkjFE1vY0Vu4i9LcLGlCQ0x 27 | PB1Z16TQlEluqiSuSA0eyaWSQBF9NyGsOCOZ63lpJs/2FRBfcbUvHhv8/g1fv/xvI+FnE+ 28 | DoAhz8V3byU8HUZer7pQY3hSxisdYdsaromxC8DSSPFQoxpxwh7WuP4c3veWkdL13h4fSN 29 | khGr3G1XGfsZOu6V6F1i7yMU6OcwBAxzPsHqZv66sT8lE6n4xjAAAAwQDzAaVaJqZ2ROoF 30 | loltJZUtE7o+zpoDzjOJyGYaCYTU4dHPN1aeYBjw8QfmJhdmZfJp9AeJDB/W0wzoHi2ONI 31 | chnQ1EdbCLk9pvA7rhfVdZaxPeHwniDp2iA/wZKTRG3hav9nEzS72uXuZprCsbBvGXeR0z 32 | iuIx5odVXG8qyuI9lDY6B/IoLg7zd+V6iw9mqWYlLLsgHiAvg32LAT4j0KoTufOqpnxqTQ 33 | P2EguTmxDWkfQmbEHdJvbD2tLQ90zMlwMAAADBAMk88wOA1i/TibH5gm/lAtKPcNKbrHfk 34 | 7O9gdSZd2HL0fLjptpOplS89Y7muTElsRDRGiKq+7KV/sxQRNcITkxdTKu8CKnftFWHrLk 35 | 9WHWVHXbu9h8ttsKeUr9i27ojxpe5I82of8k7fJTg1LxMnGzuDZfq1BGsQnOWrY7r1Yjcd 36 | 8EtSrwOB+J/S4U+rR6kwUEFYeBkhE599P1EtHTCm8kWh368di9Q+Y/VIOa3qRx4hxuiCLI 37 | qj4ZpdVMk2cCNcjwAAAAAB 38 | -----END OPENSSH PRIVATE KEY----- 39 | -------------------------------------------------------------------------------- /tests/ssh/config/client/id_rsa.pub: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQC/BiXkbL9oEbE3PJv1S2p12XK5BHW3qQT5Rf+CYG0ATYyMPIVM6+IXVyf3QNxpnvPXvbPBQJCs0qHeuPwZy2Gsbt35QnmlgrczFPiXXosCD2N+wrcOQPZGuLjQyUUP2yJRVSTLpp8zk2F8w3laGIB3Jk1hUcMUExemKxQYk/L40b5rXKkarLk5awBuicjRStMrchPRHZ2n715TG+zSvf8tB/UHRXKYPqai/Je5eiH3yGUzCY4zn+uEoqAFb4V8lpIj8Rw3EXmCYVwG0vg+44QIQ2gJnIhTlcmxwkynvZn97nug4NLlGJQ+sDCnIvMapycHfGkNlBz3fFtu/ORsxPpZbTNg/9noa3Zf8OpIwvE/FHNPqDctGltwxEgQxj5fE34x0fYnF08tejAUJJCZE3YsGgNabsS4pD+kRhI83eFZvgj3Q1AeTK0V9bRM7jujcc9Rz+V9Gb5zYEHN/l8PxEVlj0OlURf9ZlknNQK8xRh597jDXTfVQKCMO/nRaWH2bq0= 2 | -------------------------------------------------------------------------------- /tests/ssh/config/server/known_ed25519: -------------------------------------------------------------------------------- 1 | -----BEGIN OPENSSH PRIVATE KEY----- 2 | b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW 3 | QyNTUxOQAAACCGsfNXVP18N7XC6IQGuuxXQRbTxlPGLj+5/CByj9eg4QAAAJgIMffcCDH3 4 | 3AAAAAtzc2gtZWQyNTUxOQAAACCGsfNXVP18N7XC6IQGuuxXQRbTxlPGLj+5/CByj9eg4Q 5 | AAAEDeXnt5AuNk4oTHjMU1vUsEwh64fuEPu4hXsG6wCVt/6Iax81dU/Xw3tcLohAa67FdB 6 | FtPGU8YuP7n8IHKP16DhAAAAEXJvb3RAMGRkZmQyMWRkYjM3AQIDBA== 7 | -----END OPENSSH PRIVATE KEY----- 8 | -------------------------------------------------------------------------------- /tests/ssh/config/server/known_ed25519.pub: -------------------------------------------------------------------------------- 1 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIax81dU/Xw3tcLohAa67FdBFtPGU8YuP7n8IHKP16Dh docker-py integration tests known 2 | -------------------------------------------------------------------------------- /tests/ssh/config/server/sshd_config: -------------------------------------------------------------------------------- 1 | IgnoreUserKnownHosts yes 2 | PubkeyAuthentication yes 3 | PermitRootLogin yes 4 | -------------------------------------------------------------------------------- /tests/ssh/config/server/unknown_ed25519: -------------------------------------------------------------------------------- 1 | -----BEGIN OPENSSH PRIVATE KEY----- 2 | b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW 3 | QyNTUxOQAAACCGsfNXVP18N7XC6IQGuuxXQRbTxlPGLj+5/CByj9eg4QAAAJgIMffcCDH3 4 | 3AAAAAtzc2gtZWQyNTUxOQAAACCGsfNXVP18N7XC6IQGuuxXQRbTxlPGLj+5/CByj9eg4Q 5 | AAAEDeXnt5AuNk4oTHjMU1vUsEwh64fuEPu4hXsG6wCVt/6Iax81dU/Xw3tcLohAa67FdB 6 | FtPGU8YuP7n8IHKP16DhAAAAEXJvb3RAMGRkZmQyMWRkYjM3AQIDBA== 7 | -----END OPENSSH PRIVATE KEY----- 8 | -------------------------------------------------------------------------------- /tests/ssh/config/server/unknown_ed25519.pub: -------------------------------------------------------------------------------- 1 | ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIIax81dU/Xw3tcLohAa67FdBFtPGU8YuP7n8IHKP16Dh docker-py integration tests unknown 2 | -------------------------------------------------------------------------------- /tests/ssh/connect_test.py: -------------------------------------------------------------------------------- 1 | import os 2 | import unittest 3 | 4 | import paramiko.ssh_exception 5 | import pytest 6 | 7 | import docker 8 | 9 | from .base import TEST_API_VERSION 10 | 11 | 12 | class SSHConnectionTest(unittest.TestCase): 13 | @pytest.mark.skipif('UNKNOWN_DOCKER_SSH_HOST' not in os.environ, 14 | reason='Unknown Docker SSH host not configured') 15 | def test_ssh_unknown_host(self): 16 | with self.assertRaises(paramiko.ssh_exception.SSHException) as cm: 17 | docker.APIClient( 18 | version=TEST_API_VERSION, 19 | timeout=60, 20 | # test only valid with Paramiko 21 | use_ssh_client=False, 22 | base_url=os.environ['UNKNOWN_DOCKER_SSH_HOST'], 23 | ) 24 | self.assertIn('not found in known_hosts', str(cm.exception)) 25 | -------------------------------------------------------------------------------- /tests/unit/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/docker/docker-py/526a9db743a80174aabb9b43dbb8b697b98ef497/tests/unit/__init__.py -------------------------------------------------------------------------------- /tests/unit/api_exec_test.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | from . import fake_api 4 | from .api_test import ( 5 | DEFAULT_TIMEOUT_SECONDS, 6 | BaseAPIClientTest, 7 | fake_request, 8 | url_prefix, 9 | ) 10 | 11 | 12 | class ExecTest(BaseAPIClientTest): 13 | def test_exec_create(self): 14 | self.client.exec_create(fake_api.FAKE_CONTAINER_ID, ['ls', '-1']) 15 | 16 | args = fake_request.call_args 17 | assert 'POST' == args[0][0], url_prefix + 'containers/{}/exec'.format( 18 | fake_api.FAKE_CONTAINER_ID 19 | ) 20 | 21 | assert json.loads(args[1]['data']) == { 22 | 'Tty': False, 23 | 'AttachStdout': True, 24 | 'Container': fake_api.FAKE_CONTAINER_ID, 25 | 'Cmd': ['ls', '-1'], 26 | 'Privileged': False, 27 | 'AttachStdin': False, 28 | 'AttachStderr': True, 29 | 'User': '' 30 | } 31 | 32 | assert args[1]['headers'] == {'Content-Type': 'application/json'} 33 | 34 | def test_exec_start(self): 35 | self.client.exec_start(fake_api.FAKE_EXEC_ID) 36 | 37 | args = fake_request.call_args 38 | assert args[0][1] == f"{url_prefix}exec/{fake_api.FAKE_EXEC_ID}/start" 39 | 40 | assert json.loads(args[1]['data']) == { 41 | 'Tty': False, 42 | 'Detach': False, 43 | } 44 | 45 | assert args[1]['headers'] == { 46 | 'Content-Type': 'application/json', 47 | 'Connection': 'Upgrade', 48 | 'Upgrade': 'tcp' 49 | } 50 | 51 | def test_exec_start_detached(self): 52 | self.client.exec_start(fake_api.FAKE_EXEC_ID, detach=True) 53 | 54 | args = fake_request.call_args 55 | assert args[0][1] == f"{url_prefix}exec/{fake_api.FAKE_EXEC_ID}/start" 56 | 57 | assert json.loads(args[1]['data']) == { 58 | 'Tty': False, 59 | 'Detach': True 60 | } 61 | 62 | assert args[1]['headers'] == { 63 | 'Content-Type': 'application/json' 64 | } 65 | 66 | def test_exec_inspect(self): 67 | self.client.exec_inspect(fake_api.FAKE_EXEC_ID) 68 | 69 | args = fake_request.call_args 70 | assert args[0][1] == f"{url_prefix}exec/{fake_api.FAKE_EXEC_ID}/json" 71 | 72 | def test_exec_resize(self): 73 | self.client.exec_resize(fake_api.FAKE_EXEC_ID, height=20, width=60) 74 | 75 | fake_request.assert_called_with( 76 | 'POST', 77 | f"{url_prefix}exec/{fake_api.FAKE_EXEC_ID}/resize", 78 | params={'h': 20, 'w': 60}, 79 | timeout=DEFAULT_TIMEOUT_SECONDS 80 | ) 81 | -------------------------------------------------------------------------------- /tests/unit/api_volume_test.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | import pytest 4 | 5 | from ..helpers import requires_api_version 6 | from .api_test import BaseAPIClientTest, fake_request, url_prefix 7 | 8 | 9 | class VolumeTest(BaseAPIClientTest): 10 | def test_list_volumes(self): 11 | volumes = self.client.volumes() 12 | assert 'Volumes' in volumes 13 | assert len(volumes['Volumes']) == 2 14 | args = fake_request.call_args 15 | 16 | assert args[0][0] == 'GET' 17 | assert args[0][1] == f"{url_prefix}volumes" 18 | 19 | def test_list_volumes_and_filters(self): 20 | volumes = self.client.volumes(filters={'dangling': True}) 21 | assert 'Volumes' in volumes 22 | assert len(volumes['Volumes']) == 2 23 | args = fake_request.call_args 24 | 25 | assert args[0][0] == 'GET' 26 | assert args[0][1] == f"{url_prefix}volumes" 27 | assert args[1] == {'params': {'filters': '{"dangling": ["true"]}'}, 28 | 'timeout': 60} 29 | 30 | def test_create_volume(self): 31 | name = 'perfectcherryblossom' 32 | result = self.client.create_volume(name) 33 | assert 'Name' in result 34 | assert result['Name'] == name 35 | assert 'Driver' in result 36 | assert result['Driver'] == 'local' 37 | args = fake_request.call_args 38 | 39 | assert args[0][0] == 'POST' 40 | assert args[0][1] == f"{url_prefix}volumes/create" 41 | assert json.loads(args[1]['data']) == {'Name': name} 42 | 43 | @requires_api_version('1.23') 44 | def test_create_volume_with_labels(self): 45 | name = 'perfectcherryblossom' 46 | result = self.client.create_volume(name, labels={ 47 | 'com.example.some-label': 'some-value' 48 | }) 49 | assert result["Labels"] == { 50 | 'com.example.some-label': 'some-value' 51 | } 52 | 53 | @requires_api_version('1.23') 54 | def test_create_volume_with_invalid_labels(self): 55 | name = 'perfectcherryblossom' 56 | with pytest.raises(TypeError): 57 | self.client.create_volume(name, labels=1) 58 | 59 | def test_create_volume_with_driver(self): 60 | name = 'perfectcherryblossom' 61 | driver_name = 'sshfs' 62 | self.client.create_volume(name, driver=driver_name) 63 | args = fake_request.call_args 64 | 65 | assert args[0][0] == 'POST' 66 | assert args[0][1] == f"{url_prefix}volumes/create" 67 | data = json.loads(args[1]['data']) 68 | assert 'Driver' in data 69 | assert data['Driver'] == driver_name 70 | 71 | def test_create_volume_invalid_opts_type(self): 72 | with pytest.raises(TypeError): 73 | self.client.create_volume( 74 | 'perfectcherryblossom', driver_opts='hello=world' 75 | ) 76 | 77 | with pytest.raises(TypeError): 78 | self.client.create_volume( 79 | 'perfectcherryblossom', driver_opts=['hello=world'] 80 | ) 81 | 82 | with pytest.raises(TypeError): 83 | self.client.create_volume( 84 | 'perfectcherryblossom', driver_opts='' 85 | ) 86 | 87 | @requires_api_version('1.24') 88 | def test_create_volume_with_no_specified_name(self): 89 | result = self.client.create_volume(name=None) 90 | assert 'Name' in result 91 | assert result['Name'] is not None 92 | assert 'Driver' in result 93 | assert result['Driver'] == 'local' 94 | assert 'Scope' in result 95 | assert result['Scope'] == 'local' 96 | 97 | def test_inspect_volume(self): 98 | name = 'perfectcherryblossom' 99 | result = self.client.inspect_volume(name) 100 | assert 'Name' in result 101 | assert result['Name'] == name 102 | assert 'Driver' in result 103 | assert result['Driver'] == 'local' 104 | args = fake_request.call_args 105 | 106 | assert args[0][0] == 'GET' 107 | assert args[0][1] == f'{url_prefix}volumes/{name}' 108 | 109 | def test_remove_volume(self): 110 | name = 'perfectcherryblossom' 111 | self.client.remove_volume(name) 112 | args = fake_request.call_args 113 | 114 | assert args[0][0] == 'DELETE' 115 | assert args[0][1] == f'{url_prefix}volumes/{name}' 116 | -------------------------------------------------------------------------------- /tests/unit/context_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import pytest 4 | 5 | import docker 6 | from docker.constants import DEFAULT_NPIPE, DEFAULT_UNIX_SOCKET, IS_WINDOWS_PLATFORM 7 | from docker.context import Context, ContextAPI 8 | 9 | 10 | class BaseContextTest(unittest.TestCase): 11 | @pytest.mark.skipif( 12 | IS_WINDOWS_PLATFORM, reason='Linux specific path check' 13 | ) 14 | def test_url_compatibility_on_linux(self): 15 | c = Context("test") 16 | assert c.Host == DEFAULT_UNIX_SOCKET[5:] 17 | 18 | @pytest.mark.skipif( 19 | not IS_WINDOWS_PLATFORM, reason='Windows specific path check' 20 | ) 21 | def test_url_compatibility_on_windows(self): 22 | c = Context("test") 23 | assert c.Host == DEFAULT_NPIPE 24 | 25 | def test_fail_on_default_context_create(self): 26 | with pytest.raises(docker.errors.ContextException): 27 | ContextAPI.create_context("default") 28 | 29 | def test_default_in_context_list(self): 30 | found = False 31 | ctx = ContextAPI.contexts() 32 | for c in ctx: 33 | if c.Name == "default": 34 | found = True 35 | assert found is True 36 | 37 | def test_get_current_context(self): 38 | assert ContextAPI.get_current_context().Name == "default" 39 | 40 | def test_https_host(self): 41 | c = Context("test", host="tcp://testdomain:8080", tls=True) 42 | assert c.Host == "https://testdomain:8080" 43 | 44 | def test_context_inspect_without_params(self): 45 | ctx = ContextAPI.inspect_context() 46 | assert ctx["Name"] == "default" 47 | assert ctx["Metadata"]["StackOrchestrator"] == "swarm" 48 | assert ctx["Endpoints"]["docker"]["Host"] in ( 49 | DEFAULT_NPIPE, 50 | DEFAULT_UNIX_SOCKET[5:], 51 | ) 52 | -------------------------------------------------------------------------------- /tests/unit/fake_api_client.py: -------------------------------------------------------------------------------- 1 | import copy 2 | from unittest import mock 3 | 4 | import docker 5 | from docker.constants import DEFAULT_DOCKER_API_VERSION 6 | 7 | from . import fake_api 8 | 9 | 10 | class CopyReturnMagicMock(mock.MagicMock): 11 | """ 12 | A MagicMock which deep copies every return value. 13 | """ 14 | def _mock_call(self, *args, **kwargs): 15 | ret = super()._mock_call(*args, **kwargs) 16 | if isinstance(ret, (dict, list)): 17 | ret = copy.deepcopy(ret) 18 | return ret 19 | 20 | 21 | def make_fake_api_client(overrides=None): 22 | """ 23 | Returns non-complete fake APIClient. 24 | 25 | This returns most of the default cases correctly, but most arguments that 26 | change behaviour will not work. 27 | """ 28 | 29 | if overrides is None: 30 | overrides = {} 31 | api_client = docker.APIClient(version=DEFAULT_DOCKER_API_VERSION) 32 | mock_attrs = { 33 | 'build.return_value': fake_api.FAKE_IMAGE_ID, 34 | 'commit.return_value': fake_api.post_fake_commit()[1], 35 | 'containers.return_value': fake_api.get_fake_containers()[1], 36 | 'create_container.return_value': 37 | fake_api.post_fake_create_container()[1], 38 | 'create_host_config.side_effect': api_client.create_host_config, 39 | 'create_network.return_value': fake_api.post_fake_network()[1], 40 | 'create_secret.return_value': fake_api.post_fake_secret()[1], 41 | 'create_config.return_value': fake_api.post_fake_config()[1], 42 | 'exec_create.return_value': fake_api.post_fake_exec_create()[1], 43 | 'exec_start.return_value': fake_api.post_fake_exec_start()[1], 44 | 'images.return_value': fake_api.get_fake_images()[1], 45 | 'inspect_container.return_value': 46 | fake_api.get_fake_inspect_container()[1], 47 | 'inspect_image.return_value': fake_api.get_fake_inspect_image()[1], 48 | 'inspect_network.return_value': fake_api.get_fake_network()[1], 49 | 'logs.return_value': [b'hello world\n'], 50 | 'networks.return_value': fake_api.get_fake_network_list()[1], 51 | 'start.return_value': None, 52 | 'wait.return_value': {'StatusCode': 0}, 53 | 'version.return_value': fake_api.get_fake_version() 54 | } 55 | mock_attrs.update(overrides) 56 | mock_client = CopyReturnMagicMock(**mock_attrs) 57 | 58 | mock_client._version = docker.constants.DEFAULT_DOCKER_API_VERSION 59 | return mock_client 60 | 61 | 62 | def make_fake_client(overrides=None): 63 | """ 64 | Returns a Client with a fake APIClient. 65 | """ 66 | client = docker.DockerClient(version=DEFAULT_DOCKER_API_VERSION) 67 | client.api = make_fake_api_client(overrides) 68 | return client 69 | -------------------------------------------------------------------------------- /tests/unit/fake_stat.py: -------------------------------------------------------------------------------- 1 | OBJ = { 2 | "read": "2015-02-11T19:20:46.667237763+02:00", 3 | "network": { 4 | "rx_bytes": 567224, 5 | "rx_packets": 3773, 6 | "rx_errors": 0, 7 | "rx_dropped": 0, 8 | "tx_bytes": 1176, 9 | "tx_packets": 13, 10 | "tx_errors": 0, 11 | "tx_dropped": 0 12 | }, 13 | "cpu_stats": { 14 | "cpu_usage": { 15 | "total_usage": 157260874053, 16 | "percpu_usage": [ 17 | 52196306950, 18 | 24118413549, 19 | 53292684398, 20 | 27653469156 21 | ], 22 | "usage_in_kernelmode": 37140000000, 23 | "usage_in_usermode": 62140000000 24 | }, 25 | "system_cpu_usage": 3.0881377e+14, 26 | "throttling_data": { 27 | "periods": 0, 28 | "throttled_periods": 0, 29 | "throttled_time": 0 30 | } 31 | }, 32 | "memory_stats": { 33 | "usage": 179314688, 34 | "max_usage": 258166784, 35 | "stats": { 36 | "active_anon": 90804224, 37 | "active_file": 2195456, 38 | "cache": 3096576, 39 | "hierarchical_memory_limit": 1.844674407371e+19, 40 | "inactive_anon": 85516288, 41 | "inactive_file": 798720, 42 | "mapped_file": 2646016, 43 | "pgfault": 101034, 44 | "pgmajfault": 1207, 45 | "pgpgin": 115814, 46 | "pgpgout": 75613, 47 | "rss": 176218112, 48 | "rss_huge": 12582912, 49 | "total_active_anon": 90804224, 50 | "total_active_file": 2195456, 51 | "total_cache": 3096576, 52 | "total_inactive_anon": 85516288, 53 | "total_inactive_file": 798720, 54 | "total_mapped_file": 2646016, 55 | "total_pgfault": 101034, 56 | "total_pgmajfault": 1207, 57 | "total_pgpgin": 115814, 58 | "total_pgpgout": 75613, 59 | "total_rss": 176218112, 60 | "total_rss_huge": 12582912, 61 | "total_unevictable": 0, 62 | "total_writeback": 0, 63 | "unevictable": 0, 64 | "writeback": 0 65 | }, 66 | "failcnt": 0, 67 | "limit": 8039038976 68 | }, 69 | "blkio_stats": { 70 | "io_service_bytes_recursive": [ 71 | { 72 | "major": 8, 73 | "minor": 0, 74 | "op": "Read", 75 | "value": 72843264 76 | }, { 77 | "major": 8, 78 | "minor": 0, 79 | "op": "Write", 80 | "value": 4096 81 | }, { 82 | "major": 8, 83 | "minor": 0, 84 | "op": "Sync", 85 | "value": 4096 86 | }, { 87 | "major": 8, 88 | "minor": 0, 89 | "op": "Async", 90 | "value": 72843264 91 | }, { 92 | "major": 8, 93 | "minor": 0, 94 | "op": "Total", 95 | "value": 72847360 96 | } 97 | ], 98 | "io_serviced_recursive": [ 99 | { 100 | "major": 8, 101 | "minor": 0, 102 | "op": "Read", 103 | "value": 10581 104 | }, { 105 | "major": 8, 106 | "minor": 0, 107 | "op": "Write", 108 | "value": 1 109 | }, { 110 | "major": 8, 111 | "minor": 0, 112 | "op": "Sync", 113 | "value": 1 114 | }, { 115 | "major": 8, 116 | "minor": 0, 117 | "op": "Async", 118 | "value": 10581 119 | }, { 120 | "major": 8, 121 | "minor": 0, 122 | "op": "Total", 123 | "value": 10582 124 | } 125 | ], 126 | "io_queue_recursive": [], 127 | "io_service_time_recursive": [], 128 | "io_wait_time_recursive": [], 129 | "io_merged_recursive": [], 130 | "io_time_recursive": [], 131 | "sectors_recursive": [] 132 | } 133 | } 134 | -------------------------------------------------------------------------------- /tests/unit/models_configs_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from .fake_api import FAKE_CONFIG_NAME 4 | from .fake_api_client import make_fake_client 5 | 6 | 7 | class CreateConfigsTest(unittest.TestCase): 8 | def test_create_config(self): 9 | client = make_fake_client() 10 | config = client.configs.create(name="super_config", data="config") 11 | assert config.__repr__() == f"" 12 | -------------------------------------------------------------------------------- /tests/unit/models_networks_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from .fake_api import FAKE_CONTAINER_ID, FAKE_NETWORK_ID 4 | from .fake_api_client import make_fake_client 5 | 6 | 7 | class NetworkCollectionTest(unittest.TestCase): 8 | 9 | def test_create(self): 10 | client = make_fake_client() 11 | network = client.networks.create("foobar", labels={'foo': 'bar'}) 12 | assert network.id == FAKE_NETWORK_ID 13 | client.api.inspect_network.assert_called_once_with(FAKE_NETWORK_ID) 14 | client.api.create_network.assert_called_once_with( 15 | "foobar", 16 | labels={'foo': 'bar'} 17 | ) 18 | 19 | def test_get(self): 20 | client = make_fake_client() 21 | network = client.networks.get(FAKE_NETWORK_ID) 22 | assert network.id == FAKE_NETWORK_ID 23 | client.api.inspect_network.assert_called_once_with(FAKE_NETWORK_ID) 24 | 25 | def test_list(self): 26 | client = make_fake_client() 27 | networks = client.networks.list() 28 | assert networks[0].id == FAKE_NETWORK_ID 29 | client.api.networks.assert_called_once_with() 30 | 31 | client = make_fake_client() 32 | client.networks.list(ids=["abc"]) 33 | client.api.networks.assert_called_once_with(ids=["abc"]) 34 | 35 | client = make_fake_client() 36 | client.networks.list(names=["foobar"]) 37 | client.api.networks.assert_called_once_with(names=["foobar"]) 38 | 39 | 40 | class NetworkTest(unittest.TestCase): 41 | 42 | def test_connect(self): 43 | client = make_fake_client() 44 | network = client.networks.get(FAKE_NETWORK_ID) 45 | network.connect(FAKE_CONTAINER_ID) 46 | client.api.connect_container_to_network.assert_called_once_with( 47 | FAKE_CONTAINER_ID, 48 | FAKE_NETWORK_ID 49 | ) 50 | 51 | def test_disconnect(self): 52 | client = make_fake_client() 53 | network = client.networks.get(FAKE_NETWORK_ID) 54 | network.disconnect(FAKE_CONTAINER_ID) 55 | client.api.disconnect_container_from_network.assert_called_once_with( 56 | FAKE_CONTAINER_ID, 57 | FAKE_NETWORK_ID 58 | ) 59 | 60 | def test_remove(self): 61 | client = make_fake_client() 62 | network = client.networks.get(FAKE_NETWORK_ID) 63 | network.remove() 64 | client.api.remove_network.assert_called_once_with(FAKE_NETWORK_ID) 65 | -------------------------------------------------------------------------------- /tests/unit/models_resources_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from .fake_api import FAKE_CONTAINER_ID 4 | from .fake_api_client import make_fake_client 5 | 6 | 7 | class ModelTest(unittest.TestCase): 8 | def test_reload(self): 9 | client = make_fake_client() 10 | container = client.containers.get(FAKE_CONTAINER_ID) 11 | container.attrs['Name'] = "oldname" 12 | container.reload() 13 | assert client.api.inspect_container.call_count == 2 14 | assert container.attrs['Name'] == "foobar" 15 | 16 | def test_hash(self): 17 | client = make_fake_client() 18 | container1 = client.containers.get(FAKE_CONTAINER_ID) 19 | my_set = {container1} 20 | assert len(my_set) == 1 21 | 22 | container2 = client.containers.get(FAKE_CONTAINER_ID) 23 | my_set.add(container2) 24 | assert len(my_set) == 1 25 | 26 | image1 = client.images.get(FAKE_CONTAINER_ID) 27 | my_set.add(image1) 28 | assert len(my_set) == 2 29 | -------------------------------------------------------------------------------- /tests/unit/models_secrets_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from .fake_api import FAKE_SECRET_NAME 4 | from .fake_api_client import make_fake_client 5 | 6 | 7 | class CreateServiceTest(unittest.TestCase): 8 | def test_secrets_repr(self): 9 | client = make_fake_client() 10 | secret = client.secrets.create(name="super_secret", data="secret") 11 | assert secret.__repr__() == f"" 12 | -------------------------------------------------------------------------------- /tests/unit/models_services_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from docker.models.services import _get_create_service_kwargs 4 | 5 | 6 | class CreateServiceKwargsTest(unittest.TestCase): 7 | def test_get_create_service_kwargs(self): 8 | kwargs = _get_create_service_kwargs('test', { 9 | 'image': 'foo', 10 | 'command': 'true', 11 | 'name': 'somename', 12 | 'labels': {'key': 'value'}, 13 | 'hostname': 'test_host', 14 | 'mode': 'global', 15 | 'rollback_config': {'rollback': 'config'}, 16 | 'update_config': {'update': 'config'}, 17 | 'networks': ['somenet'], 18 | 'endpoint_spec': {'blah': 'blah'}, 19 | 'container_labels': {'containerkey': 'containervalue'}, 20 | 'resources': {'foo': 'bar'}, 21 | 'restart_policy': {'restart': 'policy'}, 22 | 'log_driver': 'logdriver', 23 | 'log_driver_options': {'foo': 'bar'}, 24 | 'args': ['some', 'args'], 25 | 'env': {'FOO': 'bar'}, 26 | 'workdir': '/', 27 | 'user': 'bob', 28 | 'mounts': [{'some': 'mounts'}], 29 | 'stop_grace_period': 5, 30 | 'constraints': ['foo=bar'], 31 | 'preferences': ['bar=baz'], 32 | 'platforms': [('x86_64', 'linux')], 33 | 'maxreplicas': 1, 34 | 'sysctls': {'foo': 'bar'} 35 | }) 36 | 37 | task_template = kwargs.pop('task_template') 38 | 39 | assert kwargs == { 40 | 'name': 'somename', 41 | 'labels': {'key': 'value'}, 42 | 'mode': 'global', 43 | 'rollback_config': {'rollback': 'config'}, 44 | 'update_config': {'update': 'config'}, 45 | 'endpoint_spec': {'blah': 'blah'}, 46 | } 47 | assert set(task_template.keys()) == { 48 | 'ContainerSpec', 'Resources', 'RestartPolicy', 'Placement', 49 | 'LogDriver', 'Networks' 50 | } 51 | assert task_template['Placement'] == { 52 | 'Constraints': ['foo=bar'], 53 | 'Preferences': ['bar=baz'], 54 | 'Platforms': [{'Architecture': 'x86_64', 'OS': 'linux'}], 55 | 'MaxReplicas': 1, 56 | } 57 | assert task_template['LogDriver'] == { 58 | 'Name': 'logdriver', 59 | 'Options': {'foo': 'bar'} 60 | } 61 | assert task_template['Networks'] == [{'Target': 'somenet'}] 62 | assert set(task_template['ContainerSpec'].keys()) == { 63 | 'Image', 'Command', 'Args', 'Hostname', 'Env', 'Dir', 'User', 64 | 'Labels', 'Mounts', 'StopGracePeriod', 'Sysctls' 65 | } 66 | -------------------------------------------------------------------------------- /tests/unit/sshadapter_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | import docker 4 | from docker.transport.sshconn import SSHSocket 5 | 6 | 7 | class SSHAdapterTest(unittest.TestCase): 8 | @staticmethod 9 | def test_ssh_hostname_prefix_trim(): 10 | conn = docker.transport.SSHHTTPAdapter( 11 | base_url="ssh://user@hostname:1234", shell_out=True) 12 | assert conn.ssh_host == "user@hostname:1234" 13 | 14 | @staticmethod 15 | def test_ssh_parse_url(): 16 | c = SSHSocket(host="user@hostname:1234") 17 | assert c.host == "hostname" 18 | assert c.port == "1234" 19 | assert c.user == "user" 20 | 21 | @staticmethod 22 | def test_ssh_parse_hostname_only(): 23 | c = SSHSocket(host="hostname") 24 | assert c.host == "hostname" 25 | assert c.port is None 26 | assert c.user is None 27 | 28 | @staticmethod 29 | def test_ssh_parse_user_and_hostname(): 30 | c = SSHSocket(host="user@hostname") 31 | assert c.host == "hostname" 32 | assert c.port is None 33 | assert c.user == "user" 34 | 35 | @staticmethod 36 | def test_ssh_parse_hostname_and_port(): 37 | c = SSHSocket(host="hostname:22") 38 | assert c.host == "hostname" 39 | assert c.port == "22" 40 | assert c.user is None 41 | -------------------------------------------------------------------------------- /tests/unit/swarm_test.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | from ..helpers import requires_api_version 4 | from . import fake_api 5 | from .api_test import BaseAPIClientTest, fake_request, url_prefix 6 | 7 | 8 | class SwarmTest(BaseAPIClientTest): 9 | @requires_api_version('1.24') 10 | def test_node_update(self): 11 | node_spec = { 12 | 'Availability': 'active', 13 | 'Name': 'node-name', 14 | 'Role': 'manager', 15 | 'Labels': {'foo': 'bar'} 16 | } 17 | 18 | self.client.update_node( 19 | node_id=fake_api.FAKE_NODE_ID, version=1, node_spec=node_spec 20 | ) 21 | args = fake_request.call_args 22 | assert args[0][1] == ( 23 | f"{url_prefix}nodes/24ifsmvkjbyhk/update?version=1" 24 | ) 25 | assert json.loads(args[1]['data']) == node_spec 26 | assert args[1]['headers']['Content-Type'] == 'application/json' 27 | 28 | @requires_api_version('1.24') 29 | def test_join_swarm(self): 30 | remote_addr = ['1.2.3.4:2377'] 31 | listen_addr = '2.3.4.5:2377' 32 | join_token = 'A_BEAUTIFUL_JOIN_TOKEN' 33 | 34 | data = { 35 | 'RemoteAddrs': remote_addr, 36 | 'ListenAddr': listen_addr, 37 | 'JoinToken': join_token 38 | } 39 | 40 | self.client.join_swarm( 41 | remote_addrs=remote_addr, 42 | listen_addr=listen_addr, 43 | join_token=join_token 44 | ) 45 | 46 | args = fake_request.call_args 47 | 48 | assert (args[0][1] == f"{url_prefix}swarm/join") 49 | assert (json.loads(args[1]['data']) == data) 50 | assert (args[1]['headers']['Content-Type'] == 'application/json') 51 | 52 | @requires_api_version('1.24') 53 | def test_join_swarm_no_listen_address_takes_default(self): 54 | remote_addr = ['1.2.3.4:2377'] 55 | join_token = 'A_BEAUTIFUL_JOIN_TOKEN' 56 | 57 | data = { 58 | 'RemoteAddrs': remote_addr, 59 | 'ListenAddr': '0.0.0.0:2377', 60 | 'JoinToken': join_token 61 | } 62 | 63 | self.client.join_swarm(remote_addrs=remote_addr, join_token=join_token) 64 | 65 | args = fake_request.call_args 66 | 67 | assert (args[0][1] == f"{url_prefix}swarm/join") 68 | assert (json.loads(args[1]['data']) == data) 69 | assert (args[1]['headers']['Content-Type'] == 'application/json') 70 | -------------------------------------------------------------------------------- /tests/unit/testdata/certs/ca.pem: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/docker/docker-py/526a9db743a80174aabb9b43dbb8b697b98ef497/tests/unit/testdata/certs/ca.pem -------------------------------------------------------------------------------- /tests/unit/testdata/certs/cert.pem: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/docker/docker-py/526a9db743a80174aabb9b43dbb8b697b98ef497/tests/unit/testdata/certs/cert.pem -------------------------------------------------------------------------------- /tests/unit/testdata/certs/key.pem: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/docker/docker-py/526a9db743a80174aabb9b43dbb8b697b98ef497/tests/unit/testdata/certs/key.pem -------------------------------------------------------------------------------- /tests/unit/types_containers_test.py: -------------------------------------------------------------------------------- 1 | from docker.types.containers import ContainerConfig 2 | 3 | 4 | def test_uid_0_is_not_elided(): 5 | x = ContainerConfig(image='i', version='v', command='true', user=0) 6 | assert x['User'] == '0' 7 | -------------------------------------------------------------------------------- /tests/unit/utils_config_test.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | import shutil 4 | import tempfile 5 | import unittest 6 | from unittest import mock 7 | 8 | from pytest import fixture, mark 9 | 10 | from docker.utils import config 11 | 12 | 13 | class FindConfigFileTest(unittest.TestCase): 14 | 15 | @fixture(autouse=True) 16 | def tmpdir(self, tmpdir): 17 | self.mkdir = tmpdir.mkdir 18 | 19 | def test_find_config_fallback(self): 20 | tmpdir = self.mkdir('test_find_config_fallback') 21 | 22 | with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}): 23 | assert config.find_config_file() is None 24 | 25 | def test_find_config_from_explicit_path(self): 26 | tmpdir = self.mkdir('test_find_config_from_explicit_path') 27 | config_path = tmpdir.ensure('my-config-file.json') 28 | 29 | assert config.find_config_file(str(config_path)) == str(config_path) 30 | 31 | def test_find_config_from_environment(self): 32 | tmpdir = self.mkdir('test_find_config_from_environment') 33 | config_path = tmpdir.ensure('config.json') 34 | 35 | with mock.patch.dict(os.environ, {'DOCKER_CONFIG': str(tmpdir)}): 36 | assert config.find_config_file() == str(config_path) 37 | 38 | @mark.skipif("sys.platform == 'win32'") 39 | def test_find_config_from_home_posix(self): 40 | tmpdir = self.mkdir('test_find_config_from_home_posix') 41 | config_path = tmpdir.ensure('.docker', 'config.json') 42 | 43 | with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}): 44 | assert config.find_config_file() == str(config_path) 45 | 46 | @mark.skipif("sys.platform == 'win32'") 47 | def test_find_config_from_home_legacy_name(self): 48 | tmpdir = self.mkdir('test_find_config_from_home_legacy_name') 49 | config_path = tmpdir.ensure('.dockercfg') 50 | 51 | with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}): 52 | assert config.find_config_file() == str(config_path) 53 | 54 | @mark.skipif("sys.platform != 'win32'") 55 | def test_find_config_from_home_windows(self): 56 | tmpdir = self.mkdir('test_find_config_from_home_windows') 57 | config_path = tmpdir.ensure('.docker', 'config.json') 58 | 59 | with mock.patch.dict(os.environ, {'USERPROFILE': str(tmpdir)}): 60 | assert config.find_config_file() == str(config_path) 61 | 62 | 63 | class LoadConfigTest(unittest.TestCase): 64 | def test_load_config_no_file(self): 65 | folder = tempfile.mkdtemp() 66 | self.addCleanup(shutil.rmtree, folder) 67 | cfg = config.load_general_config(folder) 68 | assert cfg is not None 69 | assert isinstance(cfg, dict) 70 | assert not cfg 71 | 72 | def test_load_config_custom_headers(self): 73 | folder = tempfile.mkdtemp() 74 | self.addCleanup(shutil.rmtree, folder) 75 | 76 | dockercfg_path = os.path.join(folder, 'config.json') 77 | config_data = { 78 | 'HttpHeaders': { 79 | 'Name': 'Spike', 80 | 'Surname': 'Spiegel' 81 | }, 82 | } 83 | 84 | with open(dockercfg_path, 'w') as f: 85 | json.dump(config_data, f) 86 | 87 | cfg = config.load_general_config(dockercfg_path) 88 | assert 'HttpHeaders' in cfg 89 | assert cfg['HttpHeaders'] == { 90 | 'Name': 'Spike', 91 | 'Surname': 'Spiegel' 92 | } 93 | 94 | def test_load_config_detach_keys(self): 95 | folder = tempfile.mkdtemp() 96 | self.addCleanup(shutil.rmtree, folder) 97 | dockercfg_path = os.path.join(folder, 'config.json') 98 | config_data = { 99 | 'detachKeys': 'ctrl-q, ctrl-u, ctrl-i' 100 | } 101 | with open(dockercfg_path, 'w') as f: 102 | json.dump(config_data, f) 103 | 104 | cfg = config.load_general_config(dockercfg_path) 105 | assert cfg == config_data 106 | 107 | def test_load_config_from_env(self): 108 | folder = tempfile.mkdtemp() 109 | self.addCleanup(shutil.rmtree, folder) 110 | dockercfg_path = os.path.join(folder, 'config.json') 111 | config_data = { 112 | 'detachKeys': 'ctrl-q, ctrl-u, ctrl-i' 113 | } 114 | with open(dockercfg_path, 'w') as f: 115 | json.dump(config_data, f) 116 | 117 | with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}): 118 | cfg = config.load_general_config(None) 119 | assert cfg == config_data 120 | -------------------------------------------------------------------------------- /tests/unit/utils_json_stream_test.py: -------------------------------------------------------------------------------- 1 | from docker.utils.json_stream import json_splitter, json_stream, stream_as_text 2 | 3 | 4 | class TestJsonSplitter: 5 | 6 | def test_json_splitter_no_object(self): 7 | data = '{"foo": "bar' 8 | assert json_splitter(data) is None 9 | 10 | def test_json_splitter_with_object(self): 11 | data = '{"foo": "bar"}\n \n{"next": "obj"}' 12 | assert json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}') 13 | 14 | def test_json_splitter_leading_whitespace(self): 15 | data = '\n \r{"foo": "bar"}\n\n {"next": "obj"}' 16 | assert json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}') 17 | 18 | 19 | class TestStreamAsText: 20 | 21 | def test_stream_with_non_utf_unicode_character(self): 22 | stream = [b'\xed\xf3\xf3'] 23 | output, = stream_as_text(stream) 24 | assert output == '���' 25 | 26 | def test_stream_with_utf_character(self): 27 | stream = ['ěĝ'.encode()] 28 | output, = stream_as_text(stream) 29 | assert output == 'ěĝ' 30 | 31 | 32 | class TestJsonStream: 33 | 34 | def test_with_falsy_entries(self): 35 | stream = [ 36 | '{"one": "two"}\n{}\n', 37 | "[1, 2, 3]\n[]\n", 38 | ] 39 | output = list(json_stream(stream)) 40 | assert output == [ 41 | {'one': 'two'}, 42 | {}, 43 | [1, 2, 3], 44 | [], 45 | ] 46 | 47 | def test_with_leading_whitespace(self): 48 | stream = [ 49 | '\n \r\n {"one": "two"}{"x": 1}', 50 | ' {"three": "four"}\t\t{"x": 2}' 51 | ] 52 | output = list(json_stream(stream)) 53 | assert output == [ 54 | {'one': 'two'}, 55 | {'x': 1}, 56 | {'three': 'four'}, 57 | {'x': 2} 58 | ] 59 | -------------------------------------------------------------------------------- /tests/unit/utils_proxy_test.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | 3 | from docker.utils.proxy import ProxyConfig 4 | 5 | HTTP = 'http://test:80' 6 | HTTPS = 'https://test:443' 7 | FTP = 'ftp://user:password@host:23' 8 | NO_PROXY = 'localhost,.localdomain' 9 | CONFIG = ProxyConfig(http=HTTP, https=HTTPS, ftp=FTP, no_proxy=NO_PROXY) 10 | ENV = { 11 | 'http_proxy': HTTP, 12 | 'HTTP_PROXY': HTTP, 13 | 'https_proxy': HTTPS, 14 | 'HTTPS_PROXY': HTTPS, 15 | 'ftp_proxy': FTP, 16 | 'FTP_PROXY': FTP, 17 | 'no_proxy': NO_PROXY, 18 | 'NO_PROXY': NO_PROXY, 19 | } 20 | 21 | 22 | class ProxyConfigTest(unittest.TestCase): 23 | 24 | def test_from_dict(self): 25 | config = ProxyConfig.from_dict({ 26 | 'httpProxy': HTTP, 27 | 'httpsProxy': HTTPS, 28 | 'ftpProxy': FTP, 29 | 'noProxy': NO_PROXY 30 | }) 31 | self.assertEqual(CONFIG.http, config.http) 32 | self.assertEqual(CONFIG.https, config.https) 33 | self.assertEqual(CONFIG.ftp, config.ftp) 34 | self.assertEqual(CONFIG.no_proxy, config.no_proxy) 35 | 36 | def test_new(self): 37 | config = ProxyConfig() 38 | self.assertIsNone(config.http) 39 | self.assertIsNone(config.https) 40 | self.assertIsNone(config.ftp) 41 | self.assertIsNone(config.no_proxy) 42 | 43 | config = ProxyConfig(http='a', https='b', ftp='c', no_proxy='d') 44 | self.assertEqual(config.http, 'a') 45 | self.assertEqual(config.https, 'b') 46 | self.assertEqual(config.ftp, 'c') 47 | self.assertEqual(config.no_proxy, 'd') 48 | 49 | def test_truthiness(self): 50 | assert not ProxyConfig() 51 | assert ProxyConfig(http='non-zero') 52 | assert ProxyConfig(https='non-zero') 53 | assert ProxyConfig(ftp='non-zero') 54 | assert ProxyConfig(no_proxy='non-zero') 55 | 56 | def test_environment(self): 57 | self.assertDictEqual(CONFIG.get_environment(), ENV) 58 | empty = ProxyConfig() 59 | self.assertDictEqual(empty.get_environment(), {}) 60 | 61 | def test_inject_proxy_environment(self): 62 | # Proxy config is non null, env is None. 63 | self.assertSetEqual( 64 | set(CONFIG.inject_proxy_environment(None)), 65 | {f'{k}={v}' for k, v in ENV.items()}) 66 | 67 | # Proxy config is null, env is None. 68 | self.assertIsNone(ProxyConfig().inject_proxy_environment(None), None) 69 | 70 | env = ['FOO=BAR', 'BAR=BAZ'] 71 | 72 | # Proxy config is non null, env is non null 73 | actual = CONFIG.inject_proxy_environment(env) 74 | expected = [f'{k}={v}' for k, v in ENV.items()] + env 75 | # It's important that the first 8 variables are the ones from the proxy 76 | # config, and the last 2 are the ones from the input environment 77 | self.assertSetEqual(set(actual[:8]), set(expected[:8])) 78 | self.assertSetEqual(set(actual[-2:]), set(expected[-2:])) 79 | 80 | # Proxy is null, and is non null 81 | self.assertListEqual(ProxyConfig().inject_proxy_environment(env), env) 82 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py{37,38,39,310,311,312}, ruff 3 | skipsdist=True 4 | 5 | [testenv] 6 | usedevelop=True 7 | commands = 8 | py.test -v --cov=docker {posargs:tests/unit} 9 | extras = dev 10 | 11 | [testenv:ruff] 12 | commands = ruff docker tests setup.py 13 | extras = dev 14 | --------------------------------------------------------------------------------