├── .github ├── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── PULL_REQUEST_TEMPLATE.md └── workflows │ └── main.yml ├── .gitignore ├── CHANGELOG.md ├── CONTRIBUTING.md ├── Cargo.toml ├── LICENSE ├── Makefile ├── README.md ├── docker-api-stubs ├── base │ └── models.rs ├── build.sh └── lib │ ├── Cargo.toml │ └── src │ ├── lib.rs │ └── models.rs ├── examples ├── common.rs ├── container.rs ├── exec.rs ├── image.rs ├── network.rs ├── service.rs ├── system.rs └── volume.rs ├── src ├── api │ ├── config.rs │ ├── container.rs │ ├── exec.rs │ ├── image.rs │ ├── mod.rs │ ├── network.rs │ ├── node.rs │ ├── plugin.rs │ ├── secret.rs │ ├── service.rs │ ├── swarm.rs │ ├── system.rs │ ├── task.rs │ └── volume.rs ├── builder.rs ├── docker.rs ├── errors.rs ├── lib.rs ├── models.rs ├── opts │ ├── config.rs │ ├── container.rs │ ├── exec.rs │ ├── image.rs │ ├── mod.rs │ ├── network.rs │ ├── node.rs │ ├── plugin.rs │ ├── secret.rs │ ├── service.rs │ ├── swarm.rs │ ├── system.rs │ ├── task.rs │ └── volume.rs └── stream.rs └── tests ├── common.rs ├── container_tests.rs ├── docker_tests.rs ├── image_tests.rs └── network_tests.rs /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | --- 5 | 6 | 7 | 8 | 9 | - Crate version: 10 | - OS: 11 | - Output of running `docker version` on the command line: 12 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | --- 5 | 6 | 7 | 8 | -------------------------------------------------------------------------------- /.github/PULL_REQUEST_TEMPLATE.md: -------------------------------------------------------------------------------- 1 | 6 | 7 | ## What did you implement: 8 | 9 | 12 | 13 | Closes: #xxx 14 | 15 | ## How did you verify your change: 16 | 17 | ## What (if anything) would need to be called out in the CHANGELOG for the next release: -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Main 2 | 3 | on: 4 | workflow_dispatch: 5 | push: 6 | paths-ignore: 7 | - "*.md" 8 | branches: 9 | - master 10 | tags: 11 | - "**" 12 | pull_request: 13 | paths-ignore: 14 | - "*.md" 15 | branches: 16 | - master 17 | 18 | env: 19 | CARGO_TERM_COLOR: always 20 | 21 | jobs: 22 | lint: 23 | strategy: 24 | matrix: 25 | os: [ubuntu-latest, macos-latest, windows-latest] 26 | runs-on: ${{ matrix.os }} 27 | steps: 28 | - name: Set up Rust 29 | uses: hecrj/setup-rust-action@v1 30 | with: 31 | components: clippy,rustfmt 32 | - uses: actions/checkout@v2 33 | - run: | 34 | cargo clippy --all-targets --all-features -- -D clippy::all 35 | cargo fmt --all -- --check 36 | 37 | test: 38 | needs: [lint] 39 | runs-on: ubuntu-latest 40 | steps: 41 | - name: Setup Rust 42 | uses: hecrj/setup-rust-action@v1 43 | with: 44 | rust-version: stable 45 | - name: Checkout 46 | uses: actions/checkout@v1 47 | - name: Test 48 | run: | 49 | docker pull ubuntu:latest 50 | cargo test --all-features --all-targets 51 | cargo test --doc 52 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | docker-api-stubs/build 2 | docker-api-stubs/lib/target 3 | .vscode 4 | .idea 5 | /target 6 | Cargo.lock 7 | 8 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | # 2 | - Bump default API version to v1.43 3 | - Move opts structures for config, secret and task from `api` module to `opts` module 4 | 5 | # 0.14.0 6 | - Add `ContainerCreateOptsBuilder::network_config` 7 | - `Docker` initializers like `new`, `tcp`, `unix`, `tls` now create an unversioned connector that will use the server's latest version instead of setting it to `LATEST_API_VERSION`. 8 | This means that by default this crate will be easier to use with older versions of Docker. 9 | - `Exec::start` and `Container::exec` now take `ExecStartOpts` options as additional parameter 10 | - Add missing `attach_stdin` to `ExecCreateOpts` 11 | - `Exec::start` and `Container::exec` signature changed. It is now async and returns a result with `tty::Multiplexer` (same as attach) 12 | so that it can handle writing to STDIN. 13 | - Add `ContainerCreateOptsBuilder::log_driver_config` 14 | - `tty::Multiplexer` now doesn't rely on Docker client lifetime - this means a change in function signature for `Container::attach`, `Container::exec` and `Exec::start` 15 | - Add `ImageFilter::Reference` variant 16 | - Signature of `ContainerCreateOptsBuilder` changed so that it takes a `Into` instead of `u32` as a argument for `hostport`. The new type `HostPort` lets you specify the IP of the host in addtion to the port. 17 | 18 | # 0.13.0 19 | - Fix Container::attach output when TTY is enabled on container 20 | - Add `Image::build_par` that uses multithreaded compression algorithm for creating context directory archive. This method is behind `par-compression` feature flag. 21 | - Add `ImageListOptsBuilder::shared_size` parameter 22 | - Add `SystemDataUsageOpts` parameter to `Docker::data_usage` 23 | - `Container::stop` and `Container::restart` now take `ContainerStopOpts` and `ContainerRestartOpts` as paramaters respectively 24 | - Add `VolumeCreateOptsBuilder::cluster_spec` parameter 25 | - Add `ExecCreateOptsBuilder::console_size` parameter 26 | - Add `Volume::update` endpoint 27 | - Bumped default API version and models to v1.42 28 | - Add parameters Hostname,Domainname,IpcMode and PidMode to `ContainerCreateOptsBuilder` 29 | - Fix `Container::commit` and change the signature to take optional container config as second parameter 30 | 31 | # 0.12.0 32 | - Fix some integer fields that could be negative but previously were a usize like `ImageSummary::containers` 33 | - Fix deserialization of nullable map types like `ImageSummary::labels` 34 | - Rename `ExecContainerOpts` -> `ExecCreateOpts` 35 | - Rename `ExecCreateOpts::cmd` -> `ExecCreateOpts::command` 36 | - Rename `ContainerCreateOpts::cmd` -> `ContainerCreateOpts::command` 37 | - Change `Container::copy_from` argument type from `&Path` to a more generic `impl AsRef` 38 | - Change `ContainerCreateOpts::builder` to not require a name 39 | - Add a missing `ContainerOptsBuilder::image` method to set the image of container 40 | - Rename `ContainerOptsBuilder` -> `ContainerCreateOptsBuilder` 41 | - Rename `RmContainerOpts` -> `ContainerRemoveOpts` 42 | - Rename `RmImageOpts` -> `ImageRemoveOpts` 43 | - Add `Clone` implementation to `ContainerCreateOpts` 44 | - Add `Clone` implementation to `ImageBuildOpts` 45 | - Add `Clone` implementation to `NetworkCreateOpts` 46 | 47 | # 0.11.0 48 | - *BREAKING* Rename `BuildOpts` -> `ImageBuildOpts` and `BuildOptsBuilder` -> `ImageBuildOptsBuilder` 49 | - Fix `Configs::create` endpoint 50 | 51 | # 0.10.0 52 | - *BREAKING* Use models generated from swagger schema 53 | - Remove `*Id` and `*IdRef` type aliases and use a common `Id` type for object IDs 54 | 55 | # 0.9.0 56 | - Fix `ContainersPruneInfo` deserialization 57 | - Logs endpoint now correctly returns `TtyChunk` instead of `Bytes` 58 | - *BREAKING* All API structs no longer have a `'docker` lifetime. This change makes it easier to create self working objects without the lifetime hell and according to 59 | hyper client documentation it is cheap to clone and cloning is the recommended way to share a client. 60 | - Add `Container::commit` that creates a image based on an existing container 61 | - Use rust 2021 edition 62 | 63 | # 0.8.0 64 | - Make `ContainerInfo::state` and `ContainerSummary::state` strongly typed. 65 | - Fix `Docker::info` response deserialization 66 | - Fix `Docker::data_usage` response deserialization 67 | - Add missing `ContainerStatus::Configured` 68 | - Fix `ContainerInfo::mounts` field deserialization 69 | - Fix `ContainerDetails::config` field deserialization 70 | - Fix `LogConfig::config` field deserialization 71 | - Fix network creation [#11](https://github.com/vv9k/docker-api-rs/pull/11) 72 | - Add missing `NetworkInfo::created` field 73 | - Fix `Network::delete` endpoint 74 | - Fix `Network::disconnect` endpoint. It now uses correct `ContainerDisconnectionOpts`. 75 | - Add `ContainerCreateOptsBuilder::security_opts` function to specify container security options [#12](https://github.com/vv9k/docker-api-rs/pull/12) 76 | - Add `NetworkCreateOptsBuilder::ipam` function to specify ip address management config of a network 77 | 78 | # 0.7.0 79 | - Make `PidsStats`, `Descriptor` and `DistributionInspectInfo` struct fields publicly accessible. 80 | - Add ability to push image to registry with `Image::push` or `Images::push`. 81 | - Add `online_cpus` field to `CpuStats` 82 | - Fix `Image::history` response deserialization 83 | - Fix `Container::logs` and `Service::logs` endpoints 84 | - Fix `Stats` field name from `network_stats` -> `networks` 85 | - Add missing clone implementations to some image api data types 86 | - All builder pattern methods now take an owned value and consume the builder on final build 87 | - Add a default implementation to `Isolation` to fix deserialization of `Info` 88 | - Fix `Docker::data_usage` response deserialization - fields of `VolumeInfo`: `labels`, `options`, `status` are now an `Option` 89 | - Add a way to initialize Docker with a different API version 90 | - Fix `ImageSummary` deserialization - `repo_tags` field is now an option as it can be a null sometimes 91 | - Add `Docker::new_versioned`, `Docker::unix_versioned`, `Docker::tls_versioned`, `Docker::tcp_versioned` initializers that let the user specify initially used client API version 92 | - Add `Docker::adjust_api_version` that verifies the API version returned by the server and adjusts the client version for further requests 93 | - Add Id and IdRef type aliases for each api type 94 | - Fix `Images::prune` response deserialization 95 | - Fix filter parameters serialization like `ImagePruneFilter` etc. 96 | - Fix `Images::clear_cache` response deserialization 97 | - Rename all `data` modules to `models` 98 | - Make `Change::kind` field into strongly typed value 99 | - Fix `Container::changes` response deserialization 100 | 101 | # 0.6.0 102 | - `name` field of `ContainerCreateOpts` is now private. Use the `ContainerCreateOpts::builder` function that takes in a `name` parameter. 103 | - Use missing `name` parameter when creating a container [#6](https://github.com/vv9k/docker-api-rs/pull/6) 104 | - `NetworkSettings::global_ipv6_prefix_len` is now correctly a number 105 | - Fix return type of inspecting a container 106 | - Add new fields to `HostConfig` - `blkio_weight`, `blkio_weight_device`, `device_cgroup_rules`, `kernel_memory` 107 | - Fix name of `HealthcheckResult` field from `started` to `start`. 108 | 109 | # 0.5.1 110 | - Fix `ContainerConfig` desserialization (`cmd` field might be ommited from the response) 111 | 112 | # 0.5.0 113 | - Add missing `ContainerSpec` fields, use correct `TaskSpec` in `ServiceSpec` 114 | - Fix `ContainerConfig::exposed_ports` serialization 115 | 116 | 117 | # 0.4.0 118 | - Fix list endpoints 119 | - Add ability to create a image with labels 120 | - Add lots of missing filter variants and document them 121 | - `ContainerOptsBuilder::expose` and `ContainerOptsBuilder::publish` now take a strongly typed `PublishPort` 122 | as input parameter. 123 | - Add missing fields to `NetworkCreateOptsBuilder` 124 | - Add missing fields to `ContainerConnectionOptsBuilder` 125 | - Rename `Mount` to `MountPoint`, add `Mount` struct 126 | - Add missing fields to `TaskSpec` 127 | - Fix types on `ContainerConfig`, fix deserializing `ContainerConfig::exposed_ports` 128 | - Add logging on transport for easier debugging 129 | - Fix delete endpoints from failing to deserialize the return type 130 | 131 | # 0.3.3 132 | - Fix return type of `Image::inspect` 133 | - Make api structs like `Container` thread safe 134 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing to docker-api 2 | Contributing to docker-api isn't limited to just filing bugs, users are more than welcomed to make suggestions, report any issue they may find, and make pull requests to help make docker-api better. 3 | 4 | ## Working on docker-api 5 | ### Prerequisites 6 | * The [rust toolchain](https://rustup.rs/) 7 | * [Git](https://git-scm.com/) 8 | 9 | 10 | ### Getting docker-api 11 | 1. Fork a copy of our repo 12 | 2. Open up Git in an environment of your choice 13 | 3. Run the following 14 | 15 | ```sh 16 | $ git clone https://github.com/YOUR-GITHUB-PROFILE-NAME/docker-api-rs.git 17 | $ cd docker-api 18 | ``` 19 | 20 | 21 | ### Please pay attention to 22 | 1. open an issue describing the feature/bug you wish to contribute first to start a discussion, explain why, what and how 23 | 2. use rustfmt, see below how to configure 24 | 3. try to write tests covering code you produce as much as possible, especially critical code branches 25 | 4. add notes/hightlights for the changelog in the pull request description 26 | 27 | 28 | ### Finding issues to fix 29 | After you've forked and cloned our repo, you can find issues to work on by heading over to our [issues list](https://github.com/vv9k/docker-api-rs/issues) 30 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "docker-api" 3 | version = "0.14.0" 4 | authors = ["Wojciech Kępka a rust interface to [Docker](https://www.docker.com/) containers 6 | 7 | ## Install 8 | 9 | Add the following to your `Cargo.toml` file 10 | 11 | ```toml 12 | [dependencies] 13 | docker-api = "0.14" 14 | ``` 15 | 16 | ## Supported API 17 | Default endpoints include: 18 | - Containers 19 | - Images 20 | - Networks 21 | - Volumes 22 | - Exec 23 | - System 24 | 25 | To enable swarm endpoints add a `swarm` feature to `Cargo.toml` like so: 26 | ```toml 27 | docker-api = { version = "0.14", features = ["swarm"] } 28 | ``` 29 | 30 | Swarm endpoints include: 31 | - Swarm 32 | - Nodes 33 | - Services 34 | - Tasks 35 | - Secrets 36 | - Configs 37 | - Plugins 38 | 39 | Latest stable version of this crate supports API version: **v1.42** 40 | Master branch supports: **v1.43** 41 | 42 | ## Features 43 | 44 | ### SSL Connection 45 | 46 | To enable HTTPS connection to docker add a `tls` flag to `Cargo.toml`. 47 | 48 | ### Chrono 49 | 50 | To enable chrono DateTime timestamps add a `chrono` feature flag to `Cargo.toml`. 51 | 52 | ### Default features 53 | 54 | By default only `chrono` feature is enabled. To disable it use: 55 | ```toml 56 | docker-api = { version = "0.14", default-features = false } 57 | ``` 58 | 59 | ## Usage 60 | 61 | Examples for most API endpoints can be found in the [examples directory](https://github.com/vv9k/docker-api-rs/tree/master/examples). 62 | 63 | 64 | ## Notice 65 | This crate is a fork of [shiplift](https://github.com/softprops/shiplift). 66 | 67 | ## License 68 | [MIT](https://raw.githubusercontent.com/vv9k/docker-api-rs/master/LICENSE) 69 | -------------------------------------------------------------------------------- /docker-api-stubs/base/models.rs: -------------------------------------------------------------------------------- 1 | #![allow( 2 | non_snake_case, 3 | clippy::redundant_field_names, 4 | clippy::new_without_default, 5 | clippy::too_many_arguments 6 | )] 7 | 8 | use serde::{Deserialize, Serialize}; 9 | use serde_json::Value; 10 | 11 | use std::collections::HashMap; 12 | 13 | use chrono::{DateTime, Utc}; 14 | -------------------------------------------------------------------------------- /docker-api-stubs/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -ex 4 | 5 | DOCKER_SWAGGER_URL="https://docs.docker.com/engine/api" 6 | DOCKER_API_VERSION="v1.43" 7 | DOCKER_SPEC_FILE="${DOCKER_API_VERSION}.yaml" 8 | DOCKER_FULL_URL="${DOCKER_SWAGGER_URL}/${DOCKER_SPEC_FILE}" 9 | RUSTGEN="https://github.com/vv9k/swagger-rustgen.git" 10 | BUILD_DIR=build 11 | BASE_DIR=$PWD 12 | 13 | mkdir $BUILD_DIR || true 14 | 15 | cd $BUILD_DIR 16 | echo $PWD 17 | 18 | curl -LO $DOCKER_FULL_URL 19 | 20 | git clone $RUSTGEN || true 21 | cd swagger-rustgen 22 | cargo build --release 23 | cd $BASE_DIR 24 | 25 | cat base/models.rs > lib/src/models.rs 26 | 27 | $BUILD_DIR/swagger-rustgen/target/release/swagger-gen generate models $BUILD_DIR/$DOCKER_SPEC_FILE >> lib/src/models.rs 28 | 29 | cd lib 30 | 31 | cargo fmt 32 | 33 | # Fix for https://github.com/vv9k/docker-api-rs/pull/29 34 | sed -r -i 's/(PortMap = HashMap)/\1Option<\2>/g' src/models.rs 35 | 36 | cargo fmt 37 | -------------------------------------------------------------------------------- /docker-api-stubs/lib/Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "docker-api-stubs" 3 | version = "0.6.0" 4 | authors = [ "Wojciech Kępka " ] 5 | description = "Generated Docker API swagger stubs for use in docker-api" 6 | license = "MIT" 7 | edition = "2021" 8 | 9 | documentation = "https://docs.rs/docker-api-stubs" 10 | homepage = "https://github.com/vv9k/docker-api-rs" 11 | repository = "https://github.com/vv9k/docker-api-rs" 12 | keywords = ["docker", "api", "stubs", "models"] 13 | 14 | [dependencies] 15 | chrono = { version = "0.4", features = ["serde"] } 16 | 17 | serde = { version = "1.0", features = ["derive"] } 18 | serde_json = "1" 19 | serde_with = { version = "2", features = ["macros"] } 20 | -------------------------------------------------------------------------------- /docker-api-stubs/lib/src/lib.rs: -------------------------------------------------------------------------------- 1 | pub mod models; 2 | -------------------------------------------------------------------------------- /examples/common.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | use docker_api::{conn::TtyChunk, Docker, Result}; 4 | use std::str; 5 | 6 | #[cfg(unix)] 7 | pub fn new_docker() -> Result { 8 | Ok(Docker::unix("/var/run/docker.sock")) 9 | } 10 | 11 | #[cfg(not(unix))] 12 | pub fn new_docker() -> Result { 13 | Docker::new("tcp://127.0.0.1:8080") 14 | } 15 | 16 | pub fn print_chunk(chunk: TtyChunk) { 17 | match chunk { 18 | TtyChunk::StdOut(bytes) => { 19 | println!("Stdout: {}", str::from_utf8(&bytes).unwrap_or_default()) 20 | } 21 | TtyChunk::StdErr(bytes) => { 22 | eprintln!("Stdout: {}", str::from_utf8(&bytes).unwrap_or_default()) 23 | } 24 | TtyChunk::StdIn(_) => unreachable!(), 25 | } 26 | } 27 | 28 | fn main() {} 29 | -------------------------------------------------------------------------------- /examples/container.rs: -------------------------------------------------------------------------------- 1 | #![allow(clippy::suspicious_else_formatting)] 2 | mod common; 3 | use clap::Parser; 4 | use common::{new_docker, print_chunk}; 5 | use futures::StreamExt; 6 | use std::path::PathBuf; 7 | 8 | #[derive(Parser)] 9 | struct Opts { 10 | #[command(subcommand)] 11 | subcmd: Cmd, 12 | } 13 | 14 | #[derive(Parser)] 15 | enum Cmd { 16 | /// Attach to a running containers TTY. 17 | Attach { id: String }, 18 | /// Copy files from a container. 19 | CopyFrom { 20 | id: String, 21 | remote_path: PathBuf, 22 | local_path: PathBuf, 23 | }, 24 | /// Copy files into a container. 25 | CopyInto { 26 | local_path: PathBuf, 27 | id: String, 28 | remote_path: PathBuf, 29 | }, 30 | /// Create a new image from a container 31 | Commit { 32 | /// Container ID 33 | id: String, 34 | #[arg(short, long)] 35 | /// Repository name for the created image 36 | repo: Option, 37 | #[arg(short, long)] 38 | /// Tag name for the create image 39 | tag: Option, 40 | #[arg(short, long)] 41 | /// Commit message 42 | comment: Option, 43 | #[arg(short, long)] 44 | /// Author of the image (e.g., John Hannibal Smith ) 45 | author: Option, 46 | #[arg(short, long)] 47 | /// Whether to pause the container before committing 48 | pause: Option, 49 | #[arg(long)] 50 | /// Dockerfile instructions to apply while committing 51 | changes: Option, 52 | }, 53 | /// Create a new container. 54 | Create { 55 | image: String, 56 | #[arg(short, long = "name")] // for some reason naming field `name` makes clap error. Possibly a bug? 57 | /// The name of the container to create. 58 | nam: Option, 59 | }, 60 | /// Delete an existing container. 61 | Delete { 62 | id: String, 63 | #[arg(short, long)] 64 | force: bool, 65 | }, 66 | /// Execute a command in a running container. 67 | Exec { id: String, cmd: Vec }, 68 | /// Inspect a container. 69 | Inspect { id: String }, 70 | /// List active containers. 71 | List { 72 | #[arg(long, short)] 73 | /// List stopped and running containers. 74 | all: bool, 75 | }, 76 | /// Print logs of a container. 77 | Logs { 78 | id: String, 79 | #[arg(long)] 80 | stdout: bool, 81 | #[arg(long)] 82 | stderr: bool, 83 | }, 84 | /// Delete stopped containers. 85 | Prune { 86 | #[arg(long)] 87 | /// Prune containers before this timestamp. Can be a unix timestamp or duration 88 | /// string like `1h30m` 89 | until: Option, 90 | }, 91 | /// Get information about a file in container. 92 | StatFile { id: String, path: PathBuf }, 93 | /// Stops the container 94 | Stop { 95 | id: String, 96 | #[arg(long)] 97 | /// Time in seconds to wait before stopping the container 98 | wait: Option, 99 | #[arg(long)] 100 | /// Example `SIGINT` 101 | signal: Option, 102 | }, 103 | /// Restarts the container 104 | Restart { 105 | id: String, 106 | #[arg(long)] 107 | /// Time in seconds to wait before restarting the container 108 | wait: Option, 109 | #[arg(long)] 110 | /// Example `SIGINT` 111 | signal: Option, 112 | }, 113 | /// Returns usage statistics of the container. 114 | Stats { id: String }, 115 | /// Returns information about running processes in the container. 116 | Top { 117 | id: String, 118 | /// Arguments passed to `ps` in the container. 119 | psargs: Option, 120 | }, 121 | } 122 | 123 | #[tokio::main] 124 | async fn main() -> Result<(), Box> { 125 | env_logger::init(); 126 | let opts: Opts = Opts::parse(); 127 | let docker = new_docker()?; 128 | 129 | match opts.subcmd { 130 | Cmd::Attach { id } => { 131 | let container = docker.containers().get(&id); 132 | let tty_multiplexer = container.attach().await?; 133 | 134 | let (mut reader, _writer) = tty_multiplexer.split(); 135 | 136 | while let Some(tty_result) = reader.next().await { 137 | match tty_result { 138 | Ok(chunk) => print_chunk(chunk), 139 | Err(e) => eprintln!("Error: {e}"), 140 | } 141 | } 142 | } 143 | Cmd::CopyFrom { 144 | id, 145 | remote_path, 146 | local_path, 147 | } => { 148 | use futures::TryStreamExt; 149 | use tar::Archive; 150 | let bytes = docker 151 | .containers() 152 | .get(&id) 153 | .copy_from(&remote_path) 154 | .try_concat() 155 | .await?; 156 | 157 | let mut archive = Archive::new(&bytes[..]); 158 | archive.unpack(&local_path)?; 159 | } 160 | Cmd::CopyInto { 161 | local_path, 162 | id, 163 | remote_path, 164 | } => { 165 | use std::{fs::File, io::Read}; 166 | 167 | let mut file = File::open(&local_path)?; 168 | let mut bytes = Vec::new(); 169 | file.read_to_end(&mut bytes) 170 | .expect("Cannot read file on the localhost."); 171 | 172 | if let Err(e) = docker 173 | .containers() 174 | .get(&id) 175 | .copy_file_into(remote_path, &bytes) 176 | .await 177 | { 178 | eprintln!("Error: {e}") 179 | } 180 | } 181 | Cmd::Commit { 182 | id, 183 | repo, 184 | tag, 185 | comment, 186 | author, 187 | pause, 188 | changes, 189 | } => { 190 | use docker_api::opts::ContainerCommitOpts; 191 | 192 | let mut opts = ContainerCommitOpts::builder(); 193 | 194 | if let Some(repo) = repo { 195 | opts = opts.repo(repo) 196 | } 197 | if let Some(tag) = tag { 198 | opts = opts.tag(tag) 199 | } 200 | if let Some(comment) = comment { 201 | opts = opts.comment(comment) 202 | } 203 | if let Some(author) = author { 204 | opts = opts.author(author) 205 | } 206 | if let Some(pause) = pause { 207 | opts = opts.pause(pause) 208 | } 209 | if let Some(changes) = changes { 210 | opts = opts.changes(changes) 211 | } 212 | match docker 213 | .containers() 214 | .get(id) 215 | .commit(&opts.build(), None) 216 | .await 217 | { 218 | Ok(id) => println!("{id:?}"), 219 | Err(e) => eprintln!("Error: {e}"), 220 | } 221 | } 222 | Cmd::Create { image, nam } => { 223 | use docker_api::opts::ContainerCreateOpts; 224 | let opts = if let Some(name) = nam { 225 | ContainerCreateOpts::builder() 226 | .image(image) 227 | .name(name) 228 | .build() 229 | } else { 230 | ContainerCreateOpts::builder().image(image).build() 231 | }; 232 | match docker.containers().create(&opts).await { 233 | Ok(info) => println!("{info:?}"), 234 | Err(e) => eprintln!("Error: {e}"), 235 | } 236 | } 237 | Cmd::Delete { id, force } => { 238 | use docker_api::opts::ContainerRemoveOpts; 239 | 240 | let opts = if force { 241 | ContainerRemoveOpts::builder().force(true).build() 242 | } else { 243 | Default::default() 244 | }; 245 | if let Err(e) = docker.containers().get(&id).remove(&opts).await { 246 | eprintln!("Error: {e}") 247 | } 248 | } 249 | Cmd::Exec { id, cmd } => { 250 | use docker_api::opts::ExecCreateOpts; 251 | let options = ExecCreateOpts::builder() 252 | .command(cmd) 253 | .attach_stdout(true) 254 | .attach_stderr(true) 255 | .build(); 256 | 257 | let container = docker.containers().get(&id); 258 | let mut stream = container 259 | .exec(&options, &Default::default()) 260 | .await 261 | .expect("exec stream"); 262 | while let Some(exec_result) = stream.next().await { 263 | match exec_result { 264 | Ok(chunk) => print_chunk(chunk), 265 | Err(e) => eprintln!("Error: {e}"), 266 | } 267 | } 268 | } 269 | Cmd::Inspect { id } => { 270 | match docker.containers().get(&id).inspect().await { 271 | Ok(container) => println!("{container:#?}"), 272 | Err(e) => eprintln!("Error: {e}"), 273 | }; 274 | } 275 | Cmd::List { all } => { 276 | use docker_api::opts::ContainerListOpts; 277 | 278 | let opts = if all { 279 | ContainerListOpts::builder().all(true).build() 280 | } else { 281 | Default::default() 282 | }; 283 | match docker.containers().list(&opts).await { 284 | Ok(containers) => { 285 | containers.into_iter().for_each(|container| { 286 | println!( 287 | "{}\t{}\t{:?}\t{}\t{}", 288 | &container.id.unwrap_or_default()[..12], 289 | container.image.unwrap_or_default(), 290 | container.state, 291 | container.status.unwrap_or_default(), 292 | container.names.map(|n| n[0].to_owned()).unwrap_or_default() 293 | ); 294 | }); 295 | } 296 | Err(e) => eprintln!("Error: {e}"), 297 | } 298 | } 299 | Cmd::Logs { id, stdout, stderr } => { 300 | use docker_api::opts::LogsOpts; 301 | let container = docker.containers().get(&id); 302 | let logs_stream = 303 | container.logs(&LogsOpts::builder().stdout(stdout).stderr(stderr).build()); 304 | 305 | let logs: Vec<_> = logs_stream 306 | .map(|chunk| match chunk { 307 | Ok(chunk) => chunk.to_vec(), 308 | Err(e) => { 309 | eprintln!("Error: {e}"); 310 | vec![] 311 | } 312 | }) 313 | .collect::>() 314 | .await 315 | .into_iter() 316 | .flatten() 317 | .collect::>(); 318 | print!("{}", String::from_utf8_lossy(&logs)); 319 | } 320 | Cmd::Prune { until } => { 321 | use docker_api::opts::{ContainerPruneFilter, ContainerPruneOpts}; 322 | 323 | let opts = if let Some(until) = until { 324 | ContainerPruneOpts::builder() 325 | .filter(vec![ContainerPruneFilter::Until(until)]) 326 | .build() 327 | } else { 328 | Default::default() 329 | }; 330 | 331 | if let Err(e) = docker.containers().prune(&opts).await { 332 | eprintln!("Error: {e}") 333 | } 334 | } 335 | Cmd::StatFile { id, path } => { 336 | let stats = docker.containers().get(&id).stat_file(path).await?; 337 | println!("{stats}"); 338 | } 339 | Cmd::Stats { id } => { 340 | while let Some(result) = docker.containers().get(&id).stats().next().await { 341 | match result { 342 | Ok(stat) => println!("{stat:?}"), 343 | Err(e) => eprintln!("Error: {e}"), 344 | } 345 | } 346 | } 347 | Cmd::Top { id, psargs } => { 348 | match docker.containers().get(&id).top(psargs.as_deref()).await { 349 | Ok(top) => println!("{top:#?}"), 350 | Err(e) => eprintln!("Error: {e}"), 351 | }; 352 | } 353 | Cmd::Stop { id, wait, signal } => { 354 | use docker_api::opts::ContainerStopOpts; 355 | 356 | let mut opts = ContainerStopOpts::builder(); 357 | if let Some(w) = wait { 358 | opts = opts.wait(std::time::Duration::from_secs(w as u64)); 359 | } 360 | if let Some(s) = signal { 361 | opts = opts.signal(s); 362 | } 363 | 364 | match docker.containers().get(&id).stop(&opts.build()).await { 365 | Ok(_) => println!("Container {id} stopped..."), 366 | Err(e) => eprintln!("Error: {e}"), 367 | }; 368 | } 369 | Cmd::Restart { id, wait, signal } => { 370 | use docker_api::opts::ContainerRestartOpts; 371 | 372 | let mut opts = ContainerRestartOpts::builder(); 373 | if let Some(w) = wait { 374 | opts = opts.wait(std::time::Duration::from_secs(w as u64)); 375 | } 376 | if let Some(s) = signal { 377 | opts = opts.signal(s); 378 | } 379 | 380 | match docker.containers().get(&id).restart(&opts.build()).await { 381 | Ok(_) => println!("Container {id} restarted..."), 382 | Err(e) => eprintln!("Error: {e}"), 383 | }; 384 | } 385 | } 386 | 387 | Ok(()) 388 | } 389 | -------------------------------------------------------------------------------- /examples/exec.rs: -------------------------------------------------------------------------------- 1 | mod common; 2 | use clap::Parser; 3 | use common::new_docker; 4 | use docker_api::{conn::TtyChunk, Exec}; 5 | 6 | #[derive(Parser)] 7 | pub struct Opts { 8 | #[command(subcommand)] 9 | subcmd: Cmd, 10 | } 11 | 12 | #[derive(Parser)] 13 | enum Cmd { 14 | /// Run a command in container and inspect it 15 | Inspect { 16 | /// The container to run the command in. 17 | container: String, 18 | /// Command to run. 19 | cmd: Vec, 20 | }, 21 | /// Resize the TTY session used by an exec instance. 22 | Resize { 23 | exec: String, 24 | width: u64, 25 | height: u64, 26 | }, 27 | } 28 | 29 | #[tokio::main] 30 | async fn main() -> Result<(), Box> { 31 | env_logger::init(); 32 | let opts: Opts = Opts::parse(); 33 | let docker = new_docker()?; 34 | 35 | match opts.subcmd { 36 | Cmd::Inspect { container, cmd } => { 37 | use docker_api::opts::ExecCreateOpts; 38 | use futures::StreamExt; 39 | 40 | // Create Opts with specified command 41 | let opts = ExecCreateOpts::builder() 42 | .command(cmd) 43 | .attach_stdout(true) 44 | .attach_stderr(true) 45 | .build(); 46 | 47 | let exec = Exec::create(docker, &container, &opts).await?; 48 | 49 | println!("{:#?}", exec.inspect().await?); 50 | 51 | let mut stream = exec.start(&Default::default()).await?; 52 | 53 | while let Some(Ok(chunk)) = stream.next().await { 54 | println!("{chunk:?}"); 55 | match chunk { 56 | TtyChunk::StdOut(buf) => { 57 | println!("STDOUT: {}", String::from_utf8_lossy(&buf)); 58 | } 59 | TtyChunk::StdErr(buf) => { 60 | println!("STDERR: {}", String::from_utf8_lossy(&buf)); 61 | } 62 | TtyChunk::StdIn(buf) => { 63 | println!("STDIN: {}", String::from_utf8_lossy(&buf)); 64 | } 65 | } 66 | } 67 | 68 | println!("{:#?}", exec.inspect().await?); 69 | } 70 | Cmd::Resize { 71 | exec, 72 | width, 73 | height, 74 | } => { 75 | use docker_api::opts::ExecResizeOpts; 76 | let exec = Exec::get(docker, &exec); 77 | 78 | // Resize its window with given parameters 79 | let resize_opts = ExecResizeOpts::builder() 80 | .width(width) 81 | .height(height) 82 | .build(); 83 | exec.resize(&resize_opts).await?; 84 | } 85 | } 86 | 87 | Ok(()) 88 | } 89 | -------------------------------------------------------------------------------- /examples/image.rs: -------------------------------------------------------------------------------- 1 | mod common; 2 | use clap::Parser; 3 | use common::new_docker; 4 | use futures::StreamExt; 5 | use std::path::PathBuf; 6 | 7 | #[derive(Parser)] 8 | pub struct Opts { 9 | #[command(subcommand)] 10 | subcmd: Cmd, 11 | } 12 | 13 | #[derive(Parser)] 14 | enum Cmd { 15 | /// Build an image. 16 | Build { 17 | /// A path to the directory containing Dockerfile for the image. 18 | path: PathBuf, 19 | #[cfg(feature = "par-compress")] 20 | #[arg(short, long)] 21 | /// Use multithreaded compression algorithm 22 | multithread: bool, 23 | #[arg(default_value = "latest")] 24 | tag: String, 25 | }, 26 | /// Delete an image. 27 | Delete { 28 | image: String, 29 | #[arg(short, long)] 30 | force: bool, 31 | #[arg(long)] 32 | noprune: bool, 33 | }, 34 | /// Export an image as a tar archive. 35 | Export { 36 | image: String, 37 | }, 38 | /// Inspect an image. 39 | Inspect { 40 | image: String, 41 | }, 42 | Import { 43 | path: PathBuf, 44 | }, 45 | /// List existing images. 46 | List { 47 | #[arg(long, short)] 48 | /// Show all images. By default only final layer images are shown. 49 | all: bool, 50 | }, 51 | /// Pull an image from image registry. 52 | Pull { 53 | /// The name or id of the image to pull. 54 | image: String, 55 | /// Username in case authentication is required. 56 | username: Option, 57 | /// Password in case authentication is required. 58 | password: Option, 59 | }, 60 | /// Search for an image. 61 | Search { 62 | image: String, 63 | }, 64 | Tag { 65 | /// Repository containing the image to tag. 66 | repo: String, 67 | /// The name or id of the image to tag. 68 | image: String, 69 | tag: String, 70 | }, 71 | Prune, 72 | } 73 | 74 | #[tokio::main] 75 | async fn main() -> Result<(), Box> { 76 | env_logger::init(); 77 | let docker = new_docker()?; 78 | let opts: Opts = Opts::parse(); 79 | 80 | match opts.subcmd { 81 | Cmd::Build { 82 | path, 83 | tag, 84 | #[cfg(feature = "par-compress")] 85 | multithread, 86 | } => { 87 | use docker_api::opts::ImageBuildOpts; 88 | let options = ImageBuildOpts::builder(path).tag(tag).build(); 89 | 90 | let images = docker.images(); 91 | 92 | #[cfg(feature = "par-compress")] 93 | { 94 | if multithread { 95 | let mut stream = images.build_par(&options); 96 | while let Some(build_result) = stream.next().await { 97 | match build_result { 98 | Ok(output) => println!("{output:?}"), 99 | Err(e) => eprintln!("Error: {e}"), 100 | } 101 | } 102 | } else { 103 | let mut stream = images.build(&options); 104 | while let Some(build_result) = stream.next().await { 105 | match build_result { 106 | Ok(output) => println!("{output:?}"), 107 | Err(e) => eprintln!("Error: {e}"), 108 | } 109 | } 110 | } 111 | } 112 | #[cfg(not(feature = "par-compress"))] 113 | { 114 | let mut stream = images.build(&options); 115 | while let Some(build_result) = stream.next().await { 116 | match build_result { 117 | Ok(output) => println!("{output:?}"), 118 | Err(e) => eprintln!("Error: {e}"), 119 | } 120 | } 121 | } 122 | } 123 | Cmd::Delete { 124 | image, 125 | force, 126 | noprune, 127 | } => { 128 | use docker_api::opts::ImageRemoveOpts; 129 | let opts = ImageRemoveOpts::builder() 130 | .force(force) 131 | .noprune(noprune) 132 | .build(); 133 | match docker.images().get(&image).remove(&opts).await { 134 | Ok(statuses) => { 135 | for status in statuses { 136 | println!("{status:?}"); 137 | } 138 | } 139 | Err(e) => eprintln!("Error: {e}"), 140 | }; 141 | } 142 | Cmd::Export { image } => { 143 | use docker_api::Error; 144 | use std::{fs::OpenOptions, io::Write}; 145 | let mut export_file = OpenOptions::new() 146 | .write(true) 147 | .create(true) 148 | .open(format!("{}.tar", &image))?; 149 | 150 | while let Some(export_result) = docker.images().get(&image).export().next().await { 151 | match export_result.and_then(|bytes| export_file.write(&bytes).map_err(Error::from)) 152 | { 153 | Ok(n) => println!("copied {n} bytes"), 154 | Err(e) => eprintln!("Error: {e}"), 155 | } 156 | } 157 | } 158 | Cmd::Inspect { image } => { 159 | match docker.images().get(&image).inspect().await { 160 | Ok(image) => println!("{image:#?}"), 161 | Err(e) => eprintln!("Error: {e}"), 162 | }; 163 | } 164 | Cmd::Import { path } => { 165 | use std::fs::File; 166 | let f = File::open(path).expect("Unable to open file"); 167 | 168 | let reader = Box::from(f); 169 | 170 | let images = docker.images(); 171 | let mut stream = images.import(reader); 172 | 173 | while let Some(import_result) = stream.next().await { 174 | match import_result { 175 | Ok(output) => println!("{output:?}"), 176 | Err(e) => eprintln!("Error: {e}"), 177 | } 178 | } 179 | } 180 | Cmd::List { all } => { 181 | use docker_api::opts::ImageListOpts; 182 | 183 | let opts = if all { 184 | ImageListOpts::builder().all(true).build() 185 | } else { 186 | Default::default() 187 | }; 188 | match docker.images().list(&opts).await { 189 | Ok(images) => { 190 | images.into_iter().for_each(|image| { 191 | println!( 192 | "---------------------------------\nCreated: {}\nId: {}\nRepo tags: {}\nLabels:\n{}", 193 | image.created, 194 | image.id, 195 | image.repo_tags.join(","), 196 | image 197 | .labels 198 | .into_iter() 199 | .map(|(k, v)| format!(" - {k}={v}")) 200 | .collect::>() 201 | .join("\n"), 202 | ); 203 | }); 204 | } 205 | Err(e) => eprintln!("Error: {e}"), 206 | } 207 | } 208 | Cmd::Pull { 209 | image, 210 | username, 211 | password, 212 | } => { 213 | use docker_api::opts::{PullOpts, RegistryAuth}; 214 | let opts = if let (Some(username), Some(pass)) = (username, password) { 215 | let auth = RegistryAuth::builder() 216 | .username(username) 217 | .password(pass) 218 | .build(); 219 | PullOpts::builder().image(image).auth(auth).build() 220 | } else { 221 | PullOpts::builder().image(image).build() 222 | }; 223 | let images = docker.images(); 224 | let mut stream = images.pull(&opts); 225 | 226 | while let Some(pull_result) = stream.next().await { 227 | match pull_result { 228 | Ok(output) => println!("{output:?}"), 229 | Err(e) => eprintln!("{e}"), 230 | } 231 | } 232 | } 233 | Cmd::Search { image } => { 234 | match docker.images().search(image).await { 235 | Ok(results) => { 236 | for result in results { 237 | println!( 238 | "{} - {}", 239 | result.name.unwrap_or_default(), 240 | result.description.unwrap_or_default() 241 | ); 242 | } 243 | } 244 | Err(e) => eprintln!("Error: {e}"), 245 | }; 246 | } 247 | Cmd::Tag { 248 | repo, 249 | image: name, 250 | tag, 251 | } => { 252 | use docker_api::api::Image; 253 | use docker_api::opts::TagOpts; 254 | 255 | let tag_opts = TagOpts::builder().repo(repo).tag(tag).build(); 256 | 257 | let image = Image::new(docker, name); 258 | 259 | if let Err(e) = image.tag(&tag_opts).await { 260 | eprintln!("Error: {e}") 261 | } 262 | } 263 | Cmd::Prune => { 264 | match docker.images().prune(&Default::default()).await { 265 | Ok(info) => println!("{info:#?}"), 266 | Err(e) => eprintln!("Error: {e}"), 267 | }; 268 | } 269 | } 270 | 271 | Ok(()) 272 | } 273 | -------------------------------------------------------------------------------- /examples/network.rs: -------------------------------------------------------------------------------- 1 | mod common; 2 | use clap::Parser; 3 | use common::new_docker; 4 | 5 | #[derive(Parser)] 6 | pub struct Opts { 7 | #[command(subcommand)] 8 | subcmd: Cmd, 9 | } 10 | 11 | #[derive(Parser)] 12 | enum Cmd { 13 | /// Connect a container to a network. 14 | Connect { 15 | container: String, 16 | network: String, 17 | }, 18 | /// Create a new network. 19 | Create { 20 | network: String, 21 | #[arg(default_value = "bridge")] 22 | driver: String, 23 | }, 24 | /// Delete a network. 25 | Delete { 26 | network: String, 27 | }, 28 | /// Disconnect a container from a network. 29 | Disconnect { 30 | container: String, 31 | network: String, 32 | }, 33 | Inspect { 34 | network: String, 35 | }, 36 | List, 37 | Prune, 38 | } 39 | 40 | #[tokio::main] 41 | async fn main() -> Result<(), Box> { 42 | env_logger::init(); 43 | let docker = new_docker()?; 44 | let opts: Opts = Opts::parse(); 45 | 46 | match opts.subcmd { 47 | Cmd::Connect { container, network } => { 48 | use docker_api::opts::ContainerConnectionOpts; 49 | if let Err(e) = docker 50 | .networks() 51 | .get(&network) 52 | .connect(&ContainerConnectionOpts::builder(&container).build()) 53 | .await 54 | { 55 | eprintln!("Error: {e}") 56 | } 57 | } 58 | Cmd::Create { network, driver } => { 59 | use docker_api::opts::NetworkCreateOpts; 60 | match docker 61 | .networks() 62 | .create(&NetworkCreateOpts::builder(network).driver(driver).build()) 63 | .await 64 | { 65 | Ok(info) => println!("{info:#?}"), 66 | Err(e) => eprintln!("Error: {e}"), 67 | } 68 | } 69 | Cmd::Delete { network } => { 70 | if let Err(e) = docker.networks().get(&network).delete().await { 71 | eprintln!("Error: {e}") 72 | } 73 | } 74 | Cmd::Disconnect { container, network } => { 75 | use docker_api::opts::ContainerDisconnectionOpts; 76 | if let Err(e) = docker 77 | .networks() 78 | .get(network) 79 | .disconnect(&ContainerDisconnectionOpts::builder(container).build()) 80 | .await 81 | { 82 | eprintln!("Error: {e}") 83 | } 84 | } 85 | Cmd::Inspect { network } => { 86 | match docker.networks().get(&network).inspect().await { 87 | Ok(network_info) => println!("{network_info:#?}"), 88 | Err(e) => eprintln!("Error: {e}"), 89 | }; 90 | } 91 | Cmd::List => match docker.networks().list(&Default::default()).await { 92 | Ok(networks) => networks.into_iter().for_each(|net| { 93 | println!( 94 | "----------------------\nId: {}\nName: {}\nDriver: {}\nLabels:\n{}", 95 | net.id.unwrap_or_default(), 96 | net.name.unwrap_or_default(), 97 | net.driver.unwrap_or_default(), 98 | net.labels 99 | .unwrap_or_default() 100 | .iter() 101 | .map(|(k, v)| format!("{k}={v}")) 102 | .collect::>() 103 | .join(",") 104 | ) 105 | }), 106 | Err(e) => eprintln!("Error: {e}"), 107 | }, 108 | Cmd::Prune => { 109 | match docker.networks().prune(&Default::default()).await { 110 | Ok(info) => println!("{info:#?}"), 111 | Err(e) => eprintln!("Error: {e}"), 112 | }; 113 | } 114 | } 115 | 116 | Ok(()) 117 | } 118 | -------------------------------------------------------------------------------- /examples/service.rs: -------------------------------------------------------------------------------- 1 | #[cfg(feature = "swarm")] 2 | mod common; 3 | 4 | #[cfg(feature = "swarm")] 5 | use clap::Parser; 6 | #[cfg(feature = "swarm")] 7 | use common::new_docker; 8 | 9 | #[cfg(feature = "swarm")] 10 | #[derive(Parser)] 11 | pub struct Opts { 12 | #[command(subcommand)] 13 | subcmd: Cmd, 14 | } 15 | 16 | #[cfg(feature = "swarm")] 17 | #[derive(Parser)] 18 | enum Cmd { 19 | Delete { 20 | service: String, 21 | }, 22 | Inspect { 23 | service: String, 24 | }, 25 | List { 26 | #[arg(long)] 27 | with_status: bool, 28 | }, 29 | Logs { 30 | service: String, 31 | #[arg(long)] 32 | stdout: bool, 33 | #[arg(long)] 34 | stderr: bool, 35 | }, 36 | } 37 | 38 | #[cfg(feature = "swarm")] 39 | #[tokio::main] 40 | async fn main() -> Result<(), Box> { 41 | env_logger::init(); 42 | let docker = new_docker()?; 43 | let opts: Opts = Opts::parse(); 44 | 45 | match opts.subcmd { 46 | Cmd::Delete { service } => { 47 | if let Err(e) = docker.services().get(&service).delete().await { 48 | eprintln!("Error: {e}") 49 | } 50 | } 51 | Cmd::Inspect { service } => { 52 | match docker.services().get(&service).inspect().await { 53 | Ok(service) => println!("{service:#?}"), 54 | Err(e) => eprintln!("Error: {e}"), 55 | }; 56 | } 57 | Cmd::List { with_status } => { 58 | use docker_api::opts::ServiceListOpts; 59 | 60 | match docker 61 | .services() 62 | .list(&ServiceListOpts::builder().status(with_status).build()) 63 | .await 64 | { 65 | Ok(services) => { 66 | for s in services { 67 | println!("{s:#?}") 68 | } 69 | } 70 | Err(e) => eprintln!("Error: {e}"), 71 | } 72 | } 73 | Cmd::Logs { 74 | service, 75 | stdout, 76 | stderr, 77 | } => { 78 | use docker_api::opts::LogsOpts; 79 | use futures::StreamExt; 80 | 81 | let service = docker.services().get(&service); 82 | let logs_stream = 83 | service.logs(&LogsOpts::builder().stdout(stdout).stderr(stderr).build()); 84 | 85 | let logs: Vec<_> = logs_stream 86 | .map(|chunk| match chunk { 87 | Ok(chunk) => chunk.to_vec(), 88 | Err(e) => { 89 | eprintln!("Error: {e}"); 90 | vec![] 91 | } 92 | }) 93 | .collect::>() 94 | .await 95 | .into_iter() 96 | .flatten() 97 | .collect::>(); 98 | print!("{}", String::from_utf8_lossy(&logs)); 99 | } 100 | } 101 | 102 | Ok(()) 103 | } 104 | 105 | #[cfg(not(feature = "swarm"))] 106 | fn main() -> Result<(), Box> { 107 | Ok(()) 108 | } 109 | -------------------------------------------------------------------------------- /examples/system.rs: -------------------------------------------------------------------------------- 1 | mod common; 2 | use clap::Parser; 3 | use common::new_docker; 4 | 5 | #[derive(Parser)] 6 | struct Opts { 7 | #[command(subcommand)] 8 | subcmd: Cmd, 9 | } 10 | 11 | #[derive(Parser)] 12 | enum Cmd { 13 | Info, 14 | Ping, 15 | Version, 16 | DataUsage, 17 | Events, 18 | } 19 | 20 | #[tokio::main] 21 | async fn main() -> Result<(), Box> { 22 | env_logger::init(); 23 | let docker = new_docker()?; 24 | let opts: Opts = Opts::parse(); 25 | 26 | match opts.subcmd { 27 | Cmd::Info => { 28 | match docker.info().await { 29 | Ok(info) => println!("{info:#?}"), 30 | Err(e) => eprintln!("Error: {e}"), 31 | }; 32 | } 33 | Cmd::Ping => { 34 | match docker.ping().await { 35 | Ok(ping) => println!("{ping:#?}"), 36 | Err(e) => eprintln!("Error: {e}"), 37 | }; 38 | } 39 | Cmd::Version => { 40 | match docker.version().await { 41 | Ok(ver) => println!("{ver:#?}"), 42 | Err(e) => eprintln!("Error: {e}"), 43 | }; 44 | } 45 | Cmd::DataUsage => { 46 | use docker_api::opts::{DataUsageType, SystemDataUsageOpts}; 47 | match docker 48 | .data_usage( 49 | &SystemDataUsageOpts::builder() 50 | .types([ 51 | DataUsageType::Image, 52 | DataUsageType::Container, 53 | DataUsageType::Volume, 54 | ]) 55 | .build(), 56 | ) 57 | .await 58 | { 59 | Ok(info) => println!("{info:#?}"), 60 | Err(e) => eprintln!("Error: {e}"), 61 | }; 62 | } 63 | Cmd::Events => { 64 | use futures::StreamExt; 65 | while let Some(event_result) = docker.events(&Default::default()).next().await { 66 | match event_result { 67 | Ok(event) => println!("{event:?}"), 68 | Err(e) => eprintln!("Error: {e}"), 69 | } 70 | } 71 | } 72 | } 73 | 74 | Ok(()) 75 | } 76 | -------------------------------------------------------------------------------- /examples/volume.rs: -------------------------------------------------------------------------------- 1 | mod common; 2 | use clap::Parser; 3 | use common::new_docker; 4 | 5 | #[derive(Parser)] 6 | pub struct Opts { 7 | #[command(subcommand)] 8 | subcmd: Cmd, 9 | } 10 | 11 | #[derive(Parser)] 12 | enum Cmd { 13 | Create { 14 | volume: String, 15 | #[arg(default_value = "overlay2")] 16 | driver: String, 17 | }, 18 | Inspect { 19 | volume: String, 20 | }, 21 | Delete { 22 | volume: String, 23 | }, 24 | List, 25 | Prune, 26 | } 27 | 28 | #[tokio::main] 29 | async fn main() -> Result<(), Box> { 30 | env_logger::init(); 31 | let docker = new_docker()?; 32 | let opts: Opts = Opts::parse(); 33 | 34 | match opts.subcmd { 35 | Cmd::Create { volume, driver } => { 36 | use docker_api::opts::VolumeCreateOpts; 37 | match docker 38 | .volumes() 39 | .create( 40 | &VolumeCreateOpts::builder() 41 | .name(volume) 42 | .driver(driver) 43 | .build(), 44 | ) 45 | .await 46 | { 47 | Ok(info) => println!("{info:?}"), 48 | Err(e) => eprintln!("Error: {e}"), 49 | } 50 | } 51 | Cmd::Inspect { volume } => { 52 | match docker.volumes().get(&volume).inspect().await { 53 | Ok(info) => println!("{info:#?}"), 54 | Err(e) => eprintln!("Error: {e}"), 55 | }; 56 | } 57 | Cmd::Delete { volume } => { 58 | match docker.volumes().get(&volume).delete().await { 59 | Ok(info) => println!("{info:#?}"), 60 | Err(e) => eprintln!("Error: {e}"), 61 | }; 62 | } 63 | Cmd::List => { 64 | match docker.volumes().list(&Default::default()).await { 65 | Ok(volumes) => { 66 | for v in volumes.volumes.unwrap() { 67 | println!("{v:#?}") 68 | } 69 | } 70 | Err(e) => eprintln!("Error: {e}"), 71 | }; 72 | } 73 | Cmd::Prune => { 74 | match docker.volumes().prune(&Default::default()).await { 75 | Ok(info) => println!("{info:#?}"), 76 | Err(e) => eprintln!("Error: {e}"), 77 | }; 78 | } 79 | } 80 | 81 | Ok(()) 82 | } 83 | -------------------------------------------------------------------------------- /src/api/config.rs: -------------------------------------------------------------------------------- 1 | //! Configs are application configurations that can be used by services. 2 | //! Swarm mode must be enabled for these endpoints to work. 3 | 4 | use crate::{ 5 | conn::{Headers, Payload}, 6 | models, 7 | opts::{ConfigCreateOpts, ConfigListOpts}, 8 | Result, 9 | }; 10 | 11 | impl_api_ty!(Config => name); 12 | 13 | impl Config { 14 | impl_api_ep! { cfg: Config, resp 15 | Inspect -> &format!("/configs/{}", cfg.name), models::Config 16 | Delete -> &format!("/configs/{}", cfg.name), () 17 | } 18 | 19 | // TODO: add Config::update 20 | } 21 | 22 | impl Configs { 23 | impl_api_ep! { __: Config, resp 24 | List -> "/configs", models::Config 25 | } 26 | 27 | api_doc! { 28 | Config => Create 29 | | 30 | /// Create a new config. 31 | pub async fn create(&self, opts: &ConfigCreateOpts) -> Result { 32 | use serde::Deserialize; 33 | #[derive(Deserialize)] 34 | struct ConfigCreateResponse { 35 | #[serde(rename = "Id")] 36 | pub id: String, 37 | } 38 | self.docker 39 | .post_json("/configs/create", Payload::Json(opts.serialize_vec()?), Headers::none()) 40 | .await 41 | .map(|resp: ConfigCreateResponse| { 42 | Config::new(self.docker.clone(), resp.id) 43 | }) 44 | }} 45 | } 46 | -------------------------------------------------------------------------------- /src/api/container.rs: -------------------------------------------------------------------------------- 1 | //! Create and manage containers. 2 | use crate::opts::{ 3 | ContainerCommitOpts, ContainerCreateOpts, ContainerListOpts, ContainerPruneOpts, 4 | ContainerRemoveOpts, ContainerRestartOpts, ContainerStopOpts, ExecStartOpts, 5 | }; 6 | use crate::{models, stream}; 7 | 8 | use std::{io, path::Path, str}; 9 | 10 | use futures_util::{Stream, TryStreamExt}; 11 | use hyper::Body; 12 | use serde::Deserialize; 13 | 14 | use crate::{ 15 | api::Exec, 16 | conn::{tty, Headers, Payload}, 17 | opts::ExecCreateOpts, 18 | Error, Result, 19 | }; 20 | use containers_api::url::{append_query, construct_ep, encoded_pair}; 21 | 22 | impl_api_ty!(Container => id); 23 | 24 | impl Container { 25 | impl_api_ep! {container: Container, resp 26 | Inspect -> &format!("/containers/{}/json", container.id), models::ContainerInspect200Response 27 | Logs -> &format!("/containers/{}/logs", container.id), () 28 | DeleteWithOpts -> &format!("/containers/{}", container.id), String, delete 29 | } 30 | 31 | api_doc! { Container => Top 32 | | 33 | /// Returns a `top` view of information about the container process. 34 | /// On Unix systems, this is done by running the ps command. This endpoint is not supported on Windows. 35 | pub async fn top(&self, psargs: Option<&str>) -> Result { 36 | let mut ep = format!("/containers/{}/top", self.id); 37 | if let Some(ref args) = psargs { 38 | append_query(&mut ep, encoded_pair("ps_args", args)); 39 | } 40 | self.docker.get_json(&ep).await 41 | }} 42 | 43 | api_doc! { Container => Attach 44 | | 45 | /// Attaches a [`TtyMultiplexer`](TtyMultiplexer) to the container. 46 | /// 47 | /// The [`TtyMultiplexer`](TtyMultiplexer) implements Stream for returning Stdout and Stderr chunks. It also implements [`AsyncWrite`](futures_util::io::AsyncWrite) for writing to Stdin. 48 | /// 49 | /// The multiplexer can be split into its read and write halves with the [`split`](TtyMultiplexer::split) method 50 | pub async fn attach(&self) -> Result { 51 | let inspect = self.inspect().await?; 52 | let is_tty = inspect.config.and_then(|c| c.tty).unwrap_or_default(); 53 | stream::attach( 54 | self.docker.clone(), 55 | format!( 56 | "/containers/{}/attach?stream=1&stdout=1&stderr=1&stdin=1", 57 | self.id 58 | ), 59 | Payload::empty(), 60 | is_tty, 61 | ) 62 | .await 63 | }} 64 | 65 | api_doc! { Container => Changes 66 | | 67 | /// Returns a set of changes made to the container instance. 68 | pub async fn changes(&self) -> Result> { 69 | self.docker 70 | .get_json(&format!("/containers/{}/changes", self.id)) 71 | .await 72 | }} 73 | 74 | api_doc! { Container => Export 75 | | 76 | /// Exports the current docker container into a tarball. 77 | pub fn export(&self) -> impl Stream>> + '_ { 78 | self.docker 79 | .get_stream(format!("/containers/{}/export", self.id)) 80 | .map_ok(|c| c.to_vec()) 81 | }} 82 | 83 | api_doc! { Container => Stats 84 | | 85 | /// Returns a stream of stats specific to this container instance. 86 | pub fn stats(&self) -> impl Stream> + Unpin + '_ { 87 | let codec = asynchronous_codec::LinesCodec {}; 88 | 89 | let reader = Box::pin( 90 | self.docker 91 | .get_stream(format!("/containers/{}/stats", self.id)) 92 | .map_err(|e| io::Error::new(io::ErrorKind::Other, e)), 93 | ) 94 | .into_async_read(); 95 | 96 | Box::pin( 97 | asynchronous_codec::FramedRead::new(reader, codec) 98 | .map_err(Error::IO) 99 | .and_then(|s: String| async move { 100 | log::trace!("{}", s); 101 | serde_json::from_str(&s).map_err(Error::SerdeJsonError) 102 | }), 103 | ) 104 | }} 105 | 106 | api_doc! { Container => Start 107 | | 108 | /// Start the container instance. 109 | pub async fn start(&self) -> Result<()> { 110 | self.docker 111 | .post_string( 112 | &format!("/containers/{}/start", self.id), 113 | Payload::empty(), 114 | Headers::none(), 115 | ) 116 | .await 117 | .map(|_| ()) 118 | }} 119 | 120 | api_doc! { Container => Stop 121 | | 122 | /// Stop the container instance. 123 | pub async fn stop(&self, opts: &ContainerStopOpts) -> Result<()> { 124 | let ep = construct_ep(format!("/containers/{}/stop", self.id), opts.serialize()); 125 | self.docker 126 | .post_string(&ep, Payload::empty(), Headers::none()) 127 | .await 128 | .map(|_| ()) 129 | }} 130 | 131 | api_doc! { Container => Restart 132 | | 133 | /// Restart the container instance. 134 | pub async fn restart(&self, opts: &ContainerRestartOpts) -> Result<()> { 135 | let ep = construct_ep(format!("/containers/{}/restart", self.id), opts.serialize()); 136 | self.docker 137 | .post_string(&ep, Payload::empty(), Headers::none()) 138 | .await 139 | .map(|_| ()) 140 | }} 141 | 142 | api_doc! { Container => Kill 143 | | 144 | /// Kill the container instance. 145 | pub async fn kill(&self, signal: Option<&str>) -> Result<()> { 146 | let mut ep = format!("/containers/{}/kill", self.id); 147 | if let Some(sig) = signal { 148 | append_query(&mut ep, encoded_pair("signal", sig)); 149 | } 150 | self.docker 151 | .post_string(&ep, Payload::empty(), Headers::none()) 152 | .await 153 | .map(|_| ()) 154 | }} 155 | 156 | api_doc! { Container => Rename 157 | | 158 | /// Rename the container instance. 159 | pub async fn rename(&self, name: &str) -> Result<()> { 160 | self.docker 161 | .post_string( 162 | &format!( 163 | "/containers/{}/rename?{}", 164 | self.id, 165 | encoded_pair("name", name) 166 | ), 167 | Payload::empty(), 168 | Headers::none(), 169 | ) 170 | .await 171 | .map(|_| ()) 172 | }} 173 | 174 | api_doc! { Container => Pause 175 | | 176 | /// Pause the container instance. 177 | pub async fn pause(&self) -> Result<()> { 178 | self.docker 179 | .post_string( 180 | &format!("/containers/{}/pause", self.id), 181 | Payload::empty(), 182 | Headers::none(), 183 | ) 184 | .await 185 | .map(|_| ()) 186 | }} 187 | 188 | api_doc! { Container => Unpause 189 | | 190 | /// Unpause the container instance. 191 | pub async fn unpause(&self) -> Result<()> { 192 | self.docker 193 | .post_string( 194 | &format!("/containers/{}/unpause", self.id), 195 | Payload::empty(), 196 | Headers::none(), 197 | ) 198 | .await 199 | .map(|_| ()) 200 | }} 201 | 202 | api_doc! { Container => Wait 203 | | 204 | /// Wait until the container stops. 205 | pub async fn wait(&self) -> Result { 206 | self.docker 207 | .post_json( 208 | format!("/containers/{}/wait", self.id), 209 | Payload::empty(), 210 | Headers::none(), 211 | ) 212 | .await 213 | }} 214 | 215 | api_doc! { Exec 216 | | 217 | /// Execute a command in this container. 218 | pub async fn exec( 219 | &self, 220 | create_opts: &ExecCreateOpts, 221 | start_opts: &ExecStartOpts, 222 | ) -> Result { 223 | Exec::create_and_start(self.docker.clone(), &self.id, create_opts, start_opts).await 224 | }} 225 | 226 | api_doc! { Container => Archive 227 | | 228 | /// Copy a file/folder from the container. The resulting stream is a tarball of the extracted 229 | /// files. 230 | /// 231 | /// If `path` is not an absolute path, it is relative to the container’s root directory. The 232 | /// resource specified by `path` must exist. To assert that the resource is expected to be a 233 | /// directory, `path` should end in `/` or `/`. (assuming a path separator of `/`). If `path` 234 | /// ends in `/.` then this indicates that only the contents of the path directory should be 235 | /// copied. A symlink is always resolved to its target. 236 | pub fn copy_from(&self, path: impl AsRef) -> impl Stream>> + '_ { 237 | self.docker 238 | .get_stream(format!( 239 | "/containers/{}/archive?{}", 240 | self.id, 241 | encoded_pair("path", path.as_ref().to_string_lossy()) 242 | )) 243 | .map_ok(|c| c.to_vec()) 244 | }} 245 | 246 | api_doc! { PutContainer => Archive 247 | | 248 | /// Copy a byte slice as file into (see `bytes`) the container. 249 | /// 250 | /// The file will be copied at the given location (see `path`) and will be owned by root 251 | /// with access mask 644. 252 | pub async fn copy_file_into>(&self, path: P, bytes: &[u8]) -> Result<()> { 253 | let path = path.as_ref(); 254 | 255 | let mut ar = tar::Builder::new(Vec::new()); 256 | let mut header = tar::Header::new_gnu(); 257 | header.set_size(bytes.len() as u64); 258 | header.set_mode(0o0644); 259 | ar.append_data( 260 | &mut header, 261 | path.to_path_buf() 262 | .iter() 263 | .skip(1) 264 | .collect::(), 265 | bytes, 266 | )?; 267 | let data = ar.into_inner()?; 268 | 269 | self.copy_to(Path::new("/"), data.into()).await.map(|_| ()) 270 | }} 271 | 272 | api_doc! { PutContainer => Archive 273 | | 274 | /// Copy a tarball (see `body`) to the container. 275 | /// 276 | /// The tarball will be copied to the container and extracted at the given location (see `path`). 277 | pub async fn copy_to(&self, path: &Path, body: Body) -> Result<()> { 278 | self.docker 279 | .put( 280 | &format!( 281 | "/containers/{}/archive?{}", 282 | self.id, 283 | encoded_pair("path", path.to_string_lossy()) 284 | ), 285 | Payload::XTar(body), 286 | ) 287 | .await 288 | .map(|_| ()) 289 | }} 290 | 291 | api_doc! { Container => ArchiveInfo 292 | | 293 | /// Get information about files in a container. 294 | pub async fn stat_file

(&self, path: P) -> Result 295 | where 296 | P: AsRef, 297 | { 298 | static PATH_STAT_HEADER: &str = "X-Docker-Container-Path-Stat"; 299 | let resp = self 300 | .docker 301 | .head(&format!( 302 | "/containers/{}/archive?{}", 303 | self.id, 304 | encoded_pair("path", path.as_ref().to_string_lossy()) 305 | )) 306 | .await?; 307 | if let Some(header) = resp.headers().get(PATH_STAT_HEADER) { 308 | let header = header.to_str().map_err(|e| { 309 | Error::InvalidResponse(format!("response header was invalid - {e}")) 310 | })?; 311 | 312 | base64::decode(header) 313 | .map_err(|e| { 314 | Error::InvalidResponse(format!("expected header to be valid base64 - {e}")) 315 | }) 316 | .and_then(|s| { 317 | str::from_utf8(s.as_slice()) 318 | .map(str::to_string) 319 | .map_err(|e| { 320 | Error::InvalidResponse(format!( 321 | "expected header to be valid utf8 - {e}" 322 | )) 323 | }) 324 | }) 325 | } else { 326 | Err(Error::InvalidResponse(format!("missing `{PATH_STAT_HEADER}` header"))) 327 | } 328 | }} 329 | 330 | api_doc! { Image => Commit 331 | | 332 | /// Create a new image from this container 333 | pub async fn commit(&self, opts: &ContainerCommitOpts, config: Option<&models::ContainerConfig>) -> Result { 334 | #[derive(Deserialize)] 335 | struct IdStruct { 336 | #[serde(rename = "Id")] 337 | id: String, 338 | } 339 | 340 | let payload = if let Some(config) = config { 341 | Payload::Json(serde_json::to_string(config)?) 342 | } else { 343 | Payload::Json("{}".into()) // empty json 344 | }; 345 | 346 | self.docker 347 | .post_json( 348 | format!( 349 | "/commit?{}", 350 | opts.with_container(self.id().as_ref()) 351 | .serialize() 352 | .unwrap_or_default() 353 | ), 354 | payload, 355 | Headers::none(), 356 | ) 357 | .await 358 | .map(|id: IdStruct| id.id) 359 | }} 360 | } 361 | 362 | impl Containers { 363 | impl_api_ep! {__: Container, resp 364 | List -> "/containers/json", models::ContainerSummary 365 | Prune -> "/containers/prune", models::ContainerPrune200Response 366 | } 367 | 368 | api_doc! { Containers => Create 369 | | 370 | /// Create a container 371 | pub async fn create(&self, opts: &ContainerCreateOpts) -> Result { 372 | let ep = if let Some(name) = opts.name() { 373 | construct_ep("/containers/create", Some(encoded_pair("name", name))) 374 | } else { 375 | "/containers/create".to_owned() 376 | }; 377 | self.docker 378 | .post_json(&ep, Payload::Json(opts.serialize_vec()?), Headers::none()) 379 | .await 380 | .map(|resp: models::ContainerCreateResponse| { 381 | Container::new(self.docker.clone(), resp.id) 382 | }) 383 | }} 384 | } 385 | -------------------------------------------------------------------------------- /src/api/exec.rs: -------------------------------------------------------------------------------- 1 | //! Run new commands inside running containers. 2 | 3 | use hyper::Body; 4 | 5 | use crate::{ 6 | conn::{tty, Headers, Payload}, 7 | models, 8 | opts::{ExecCreateOpts, ExecResizeOpts, ExecStartOpts}, 9 | stream, Docker, Result, 10 | }; 11 | 12 | api_doc! { Exec 13 | /// Interface for docker exec instance 14 | | 15 | pub struct Exec { 16 | docker: Docker, 17 | id: crate::Id, 18 | }} 19 | 20 | impl Exec { 21 | fn new(docker: Docker, id: impl Into) -> Self { 22 | Exec { 23 | docker, 24 | id: id.into(), 25 | } 26 | } 27 | 28 | /// Get a reference to a set of operations available to an already created exec instance. 29 | /// 30 | /// It's in callers responsibility to ensure that exec instance with specified id actually 31 | /// exists. Use [Exec::create](Exec::create) to ensure that the exec instance is created 32 | /// beforehand. 33 | pub fn get(docker: Docker, id: impl Into) -> Exec { 34 | Exec::new(docker, id) 35 | } 36 | 37 | api_doc! { Exec => Inspect 38 | | 39 | /// Inspect this Exec instance 40 | pub async fn inspect(&self) -> Result { 41 | Self::inspect_impl(&self.docker, self.id.as_ref()).await 42 | }} 43 | 44 | async fn inspect_impl(docker: &Docker, id: &str) -> Result { 45 | docker.get_json(&format!("/exec/{id}/json")).await 46 | } 47 | 48 | async fn create_impl( 49 | docker: Docker, 50 | container_id: &str, 51 | opts: &ExecCreateOpts, 52 | ) -> Result { 53 | #[derive(serde::Deserialize)] 54 | #[serde(rename_all = "PascalCase")] 55 | struct Response { 56 | id: String, 57 | } 58 | 59 | docker 60 | .post_json( 61 | &format!("/containers/{}/exec", container_id), 62 | Payload::Json(opts.serialize_vec()?), 63 | Headers::none(), 64 | ) 65 | .await 66 | .map(|resp: Response| resp.id.into()) 67 | } 68 | 69 | api_doc! { Exec => Create 70 | | 71 | /// Creates a new exec instance that will be executed in a container with id == container_id. 72 | pub async fn create( 73 | docker: Docker, 74 | container_id: impl AsRef, 75 | opts: &ExecCreateOpts, 76 | ) -> Result 77 | { 78 | Self::create_impl(docker.clone(), container_id.as_ref(), opts) 79 | .await 80 | .map(|id| Exec::new(docker, id)) 81 | }} 82 | 83 | async fn start_impl( 84 | docker: Docker, 85 | id: &str, 86 | opts: &ExecStartOpts, 87 | ) -> Result { 88 | let endpoint = format!("/exec/{}/start", id); 89 | let inspect_data = Self::inspect_impl(&docker, id).await?; 90 | let is_tty = inspect_data 91 | .process_config 92 | .and_then(|c| c.tty) 93 | .unwrap_or_default(); 94 | 95 | stream::attach( 96 | docker, 97 | endpoint, 98 | Payload::Json(opts.serialize_vec()?.into()), 99 | is_tty, 100 | ) 101 | .await 102 | } 103 | 104 | api_doc! { Exec => Start 105 | | 106 | /// Starts this exec instance returning a multiplexed tty stream. 107 | pub async fn start(&self, opts: &ExecStartOpts) -> Result { 108 | Self::start_impl(self.docker.clone(), self.id.as_ref(), opts).await 109 | }} 110 | 111 | pub(crate) async fn create_and_start( 112 | docker: Docker, 113 | container_id: impl AsRef, 114 | create_opts: &ExecCreateOpts, 115 | start_opts: &ExecStartOpts, 116 | ) -> Result { 117 | let container_id = container_id.as_ref(); 118 | let id = Self::create_impl(docker.clone(), container_id, create_opts).await?; 119 | 120 | Self::start_impl(docker, id.as_ref(), start_opts).await 121 | } 122 | 123 | api_doc! { Exec => Resize 124 | | 125 | /// Resize the TTY session used by an exec instance. This only works if the exec was created 126 | /// with `tty` enabled. 127 | pub async fn resize(&self, opts: &ExecResizeOpts) -> Result<()> { 128 | let body: Body = opts.serialize()?.into(); 129 | 130 | self.docker 131 | .post_json( 132 | &format!("/exec/{}/resize", &self.id), 133 | Payload::Json(body), 134 | Headers::none(), 135 | ) 136 | .await 137 | }} 138 | } 139 | -------------------------------------------------------------------------------- /src/api/image.rs: -------------------------------------------------------------------------------- 1 | //! Create and manage images. 2 | 3 | use crate::{ 4 | models, 5 | opts::{ 6 | ClearCacheOpts, ImageBuildOpts, ImageListOpts, ImagePruneOpts, ImagePushOpts, 7 | ImageRemoveOpts, PullOpts, TagOpts, 8 | }, 9 | }; 10 | 11 | use std::io::Read; 12 | 13 | use futures_util::{stream::Stream, TryFutureExt, TryStreamExt}; 14 | 15 | use containers_api::{ 16 | conn::{Headers, Payload, AUTH_HEADER}, 17 | tarball, 18 | url::{construct_ep, encoded_pair, encoded_pairs}, 19 | }; 20 | 21 | use crate::Result; 22 | 23 | impl_api_ty!(Image => name); 24 | 25 | impl Image { 26 | impl_api_ep! {img: Image, resp 27 | Inspect -> &format!("/images/{}/json", img.name), models::ImageInspect 28 | } 29 | 30 | api_doc! { Image => Delete 31 | | 32 | /// Remove this image with options. 33 | /// 34 | /// Use [`delete`](Image::delete) to delete without options. 35 | pub async fn remove(&self, opts: &ImageRemoveOpts) -> Result> { 36 | let ep = 37 | containers_api::url::construct_ep(format!("/images/{}", self.name), opts.serialize()); 38 | self.docker.delete_json(ep.as_ref()).await 39 | }} 40 | 41 | api_doc! { Image => Delete 42 | | 43 | /// Delete this image with force. 44 | /// 45 | /// Use [`remove`](Image::remove) to delete with options. 46 | pub async fn delete(&self) -> Result> { 47 | self.docker 48 | .delete_json(&format!("/images/{}", self.name)) 49 | .await 50 | }} 51 | 52 | api_doc! { Image => History 53 | | 54 | /// Lists the history of the images set of changes. 55 | pub async fn history(&self) -> Result { 56 | self.docker 57 | .get_json(&format!("/images/{}/history", self.name)) 58 | .await 59 | }} 60 | 61 | api_doc! { Image => Get 62 | | 63 | /// Export this image to a tarball. 64 | pub fn export(&self) -> impl Stream>> + Unpin + '_ { 65 | Box::pin( 66 | self.docker 67 | .get_stream(format!("/images/{}/get", self.name)) 68 | .map_ok(|c| c.to_vec()), 69 | ) 70 | }} 71 | 72 | api_doc! { Image => Tag 73 | | 74 | /// Adds a tag to an image. 75 | pub async fn tag(&self, opts: &TagOpts) -> Result<()> { 76 | let ep = construct_ep(format!("/images/{}/tag", self.name), opts.serialize()); 77 | self.docker 78 | .post_string(&ep, Payload::empty(), Headers::none()) 79 | .await 80 | .map(|_| ()) 81 | }} 82 | 83 | api_doc! { Image => Push 84 | | 85 | /// Push an image to registry. 86 | pub async fn push(&self, opts: &ImagePushOpts) -> Result<()> { 87 | let ep = construct_ep(format!("/images/{}/push", self.name), opts.serialize()); 88 | 89 | let headers = opts 90 | .auth_header() 91 | .map(|auth| Headers::single(AUTH_HEADER, auth)) 92 | .unwrap_or_else(Headers::default); 93 | 94 | self.docker 95 | .post_string(&ep, Payload::empty(), Some(headers)) 96 | .await 97 | .map(|_| ()) 98 | }} 99 | 100 | api_doc! { Distribution => Inspect 101 | | 102 | /// Return image digest and platform information by contacting the registry. 103 | pub async fn distribution_inspect(&self) -> Result { 104 | self.docker 105 | .post_json( 106 | &format!("/distribution/{}/json", self.name), 107 | Payload::empty(), 108 | Headers::none(), 109 | ) 110 | .await 111 | }} 112 | } 113 | 114 | impl Images { 115 | impl_api_ep! {img: Image, resp 116 | List -> "/images/json", models::ImageSummary 117 | Prune -> "/images/prune", models::ImagePrune200Response 118 | } 119 | 120 | api_doc! { Image => Build 121 | | 122 | /// Builds a new image by reading a Dockerfile in a target directory. If speed is 123 | /// important consider using [`Image::build_par`](Image::build_par) that utilizes 124 | /// parallel compression on big directories, to use it enable `par-compression` feature. 125 | pub fn build<'docker>( 126 | &'docker self, 127 | opts: &ImageBuildOpts, 128 | ) -> impl Stream> + Unpin + 'docker { 129 | let ep = construct_ep("/build", opts.serialize()); 130 | let mut bytes = vec![]; 131 | let tar_result = tarball::dir(&mut bytes, &opts.path); 132 | 133 | let docker = &self.docker; 134 | Box::pin( 135 | async move { 136 | tar_result?; 137 | 138 | let value_stream = 139 | docker.post_into_stream(ep, Payload::Tar(bytes), Headers::none()); 140 | 141 | Ok(value_stream) 142 | } 143 | .try_flatten_stream(), 144 | ) 145 | }} 146 | 147 | api_doc! { Image => Build 148 | | 149 | #[cfg(feature = "par-compress")] 150 | /// Builds a new image by reading a Dockerfile in a target directory. Uses parallel 151 | /// compression algorithm to speed up the execution. For a single-threaded version check 152 | /// [`Image::build`](Image::build). 153 | pub fn build_par<'docker>( 154 | &'docker self, 155 | opts: &ImageBuildOpts, 156 | ) -> impl Stream> + Unpin + 'docker { 157 | let ep = construct_ep("/build", opts.serialize()); 158 | 159 | let tar_result = tarball::dir_par(&opts.path); 160 | 161 | let docker = &self.docker; 162 | Box::pin( 163 | async move { 164 | let bytes = tar_result?; 165 | 166 | let value_stream = 167 | docker.post_into_stream(ep, Payload::Tar(bytes), Headers::none()); 168 | 169 | Ok(value_stream) 170 | } 171 | .try_flatten_stream(), 172 | ) 173 | }} 174 | 175 | api_doc! { Image => Search 176 | | 177 | /// Search for docker images by term. 178 | pub async fn search(&self, term: T) -> Result 179 | where 180 | T: AsRef, 181 | { 182 | self.docker 183 | .get_json(&construct_ep( 184 | "/images/search", 185 | Some(encoded_pair("term", term.as_ref())), 186 | )) 187 | .await 188 | }} 189 | 190 | api_doc! { Image => Pull 191 | | 192 | /// Pull and create a new docker images from an existing image. 193 | pub fn pull<'docker>( 194 | &'docker self, 195 | opts: &PullOpts, 196 | ) -> impl Stream> + Unpin + 'docker { 197 | let headers = opts.auth_header().map(|a| Headers::single(AUTH_HEADER, a)); 198 | 199 | Box::pin(self.docker.post_into_stream( 200 | construct_ep("/images/create", opts.serialize()), 201 | Payload::empty(), 202 | headers, 203 | )) 204 | }} 205 | 206 | api_doc! { Image => GetAll 207 | | 208 | /// Exports a collection of named images, 209 | /// either by name, name:tag, or image id, into a tarball. 210 | pub fn export<'docker>( 211 | &'docker self, 212 | names: Vec<&str>, 213 | ) -> impl Stream>> + 'docker { 214 | self.docker 215 | .get_stream(format!( 216 | "/images/get?{}", 217 | encoded_pairs(names.iter().map(|n| ("names", *n))) 218 | )) 219 | .map_ok(|c| c.to_vec()) 220 | }} 221 | 222 | api_doc! { Image => Load 223 | | 224 | /// Imports an image or set of images from a given tarball source. 225 | /// Source can be uncompressed on compressed via gzip, bzip2 or xz. 226 | pub fn import<'docker, R>( 227 | &'docker self, 228 | mut tarball: R, 229 | ) -> impl Stream> + Unpin + 'docker 230 | where 231 | R: Read + Send + 'docker, 232 | { 233 | Box::pin( 234 | async move { 235 | let mut bytes = Vec::default(); 236 | 237 | tarball.read_to_end(&mut bytes)?; 238 | 239 | let value_stream = self.docker.post_into_stream( 240 | "/images/load", 241 | Payload::Tar(bytes), 242 | Headers::none(), 243 | ); 244 | Ok(value_stream) 245 | } 246 | .try_flatten_stream(), 247 | ) 248 | }} 249 | 250 | api_doc! { Image => Push 251 | | 252 | /// Push an image to registry. 253 | pub async fn push(&self, name: impl Into, opts: &ImagePushOpts) -> Result<()> { 254 | let image = Image::new(self.docker.clone(), name); 255 | image.push(opts).await 256 | }} 257 | 258 | api_doc! { Build => Prune 259 | | 260 | /// Clear image build cache. 261 | pub async fn clear_cache( 262 | &self, 263 | opts: &ClearCacheOpts, 264 | ) -> Result { 265 | self.docker 266 | .post_json( 267 | construct_ep("/build/prune", opts.serialize()), 268 | Payload::empty(), 269 | Headers::none(), 270 | ) 271 | .await 272 | }} 273 | } 274 | -------------------------------------------------------------------------------- /src/api/mod.rs: -------------------------------------------------------------------------------- 1 | //! All api endpoints like containers, images, networks... 2 | pub mod container; 3 | pub mod exec; 4 | pub mod image; 5 | pub mod network; 6 | pub mod system; 7 | pub mod volume; 8 | 9 | #[cfg(feature = "swarm")] 10 | #[cfg_attr(docsrs, doc(cfg(feature = "swarm")))] 11 | pub mod config; 12 | #[cfg(feature = "swarm")] 13 | #[cfg_attr(docsrs, doc(cfg(feature = "swarm")))] 14 | pub mod node; 15 | #[cfg(feature = "swarm")] 16 | #[cfg_attr(docsrs, doc(cfg(feature = "swarm")))] 17 | pub mod plugin; 18 | #[cfg(feature = "swarm")] 19 | #[cfg_attr(docsrs, doc(cfg(feature = "swarm")))] 20 | pub mod secret; 21 | #[cfg(feature = "swarm")] 22 | #[cfg_attr(docsrs, doc(cfg(feature = "swarm")))] 23 | pub mod service; 24 | #[cfg(feature = "swarm")] 25 | #[cfg_attr(docsrs, doc(cfg(feature = "swarm")))] 26 | pub mod swarm; 27 | #[cfg(feature = "swarm")] 28 | #[cfg_attr(docsrs, doc(cfg(feature = "swarm")))] 29 | pub mod task; 30 | 31 | pub use {container::*, exec::*, image::*, network::*, system::*, volume::*}; 32 | 33 | #[cfg(feature = "swarm")] 34 | #[cfg_attr(docsrs, doc(cfg(feature = "swarm")))] 35 | pub use {config::*, node::*, plugin::*, secret::*, service::*, swarm::*, task::*}; 36 | -------------------------------------------------------------------------------- /src/api/network.rs: -------------------------------------------------------------------------------- 1 | //! Create and manage user-defined networks that containers can be attached to. 2 | 3 | use crate::{ 4 | conn::{Headers, Payload}, 5 | models, 6 | opts::{ 7 | ContainerConnectionOpts, ContainerDisconnectionOpts, NetworkCreateOpts, NetworkListOpts, 8 | NetworkPruneOpts, 9 | }, 10 | Result, 11 | }; 12 | 13 | impl_api_ty!(Network => id); 14 | 15 | impl Network { 16 | impl_api_ep! { net: Network, resp 17 | Inspect -> &format!("/networks/{}", net.id), models::Network 18 | Delete -> &format!("/networks/{}", net.id), () 19 | } 20 | 21 | api_doc! { Network => Connect 22 | | 23 | /// Connect a container to a network. 24 | pub async fn connect(&self, opts: &ContainerConnectionOpts) -> Result<()> { 25 | self.docker 26 | .post_string( 27 | &format!("/networks/{}/connect", self.id), 28 | Payload::Json(opts.serialize_vec()?), 29 | Headers::none(), 30 | ) 31 | .await 32 | .map(|_| ()) 33 | }} 34 | 35 | api_doc! { Network => Disconnect 36 | | 37 | /// Disconnect a container from a network. 38 | pub async fn disconnect(&self, opts: &ContainerDisconnectionOpts) -> Result<()> { 39 | self.docker 40 | .post_string( 41 | &format!("/networks/{}/disconnect", &self.id), 42 | Payload::Json(opts.serialize_vec()?), 43 | Headers::none(), 44 | ) 45 | .await 46 | .map(|_| ()) 47 | }} 48 | } 49 | 50 | impl Networks { 51 | impl_api_ep! { __: Network, resp 52 | List -> "/networks", models::Network 53 | Prune -> "/networks/prune", models::NetworkPrune200Response 54 | } 55 | 56 | api_doc! { Network => Create 57 | | 58 | /// Create a new network. 59 | pub async fn create(&self, opts: &NetworkCreateOpts) -> Result { 60 | // #TODO: handle missing id and return warnings (?) 61 | self.docker 62 | .post_json( 63 | "/networks/create", 64 | Payload::Json(opts.serialize_vec()?), 65 | Headers::none(), 66 | ) 67 | .await 68 | .map(|resp: models::NetworkCreate201Response| { 69 | Network::new(self.docker.clone(), resp.id.unwrap_or_default()) 70 | }) 71 | }} 72 | } 73 | -------------------------------------------------------------------------------- /src/api/node.rs: -------------------------------------------------------------------------------- 1 | //! Manage Docker nodes 2 | //! 3 | //! Nodes are instances of the Engine participating in a swarm. 4 | //! Swarm mode must be enabled for these endpoints to work. 5 | 6 | use crate::{ 7 | conn::{Headers, Payload}, 8 | models, 9 | opts::{NodeListOpts, NodeUpdateOpts}, 10 | Result, 11 | }; 12 | use containers_api::url::encoded_pair; 13 | 14 | impl_api_ty!(Node => name); 15 | 16 | type Void = (); 17 | 18 | impl Node { 19 | impl_api_ep! {node: Node, resp 20 | Inspect -> &format!("/nodes/{}", node.name), models::Node 21 | ForceDelete -> &format!("/nodes/{}", node.name), Void 22 | } 23 | 24 | api_doc! { Node => Update 25 | | 26 | /// Update a node. 27 | pub async fn update(&self, opts: &NodeUpdateOpts) -> Result<()> { 28 | self.docker 29 | .post( 30 | &format!( 31 | "/nodes/{}/update?{}", 32 | self.name, 33 | encoded_pair("version", opts.version().to_string()) 34 | ), 35 | Payload::Json(opts.serialize_vec()?), 36 | Headers::none() 37 | ) 38 | .await 39 | .map(|_| ()) 40 | }} 41 | } 42 | 43 | impl Nodes { 44 | impl_api_ep! {node: Node, resp 45 | List -> "/nodes", models::Node 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/api/plugin.rs: -------------------------------------------------------------------------------- 1 | //! Install, create and manage plugins 2 | 3 | use crate::{ 4 | conn::{Headers, Payload}, 5 | models, 6 | opts::PluginListOpts, 7 | Result, 8 | }; 9 | use containers_api::url::{construct_ep, encoded_pair}; 10 | 11 | use std::path::Path; 12 | 13 | impl_api_ty!(Plugin => name); 14 | 15 | impl Plugin { 16 | impl_api_ep! {plug: Plugin, resp 17 | Inspect -> &format!("/plugins/{}/json", plug.name), models::Plugin 18 | ForceDelete -> &format!("/plugins/{}", plug.name), models::Plugin 19 | } 20 | 21 | api_doc! { Plugin => Enable 22 | | 23 | /// Enable a plugin. 24 | pub async fn enable(&self, timeout: Option) -> Result<()> { 25 | let query = timeout.map(|timeout| encoded_pair("timeout", timeout)); 26 | self.docker 27 | .post( 28 | &construct_ep(format!("/plugins/{}/enable", self.name), query), 29 | Payload::empty(), 30 | Headers::none() 31 | ) 32 | .await 33 | .map(|_| ()) 34 | }} 35 | 36 | api_doc! { Plugin => Disable 37 | | 38 | /// Disable a plugin. 39 | pub async fn disable(&self) -> Result<()> { 40 | self.docker 41 | .post(&format!("/plugins/{}/disable", self.name), Payload::empty(), Headers::none()) 42 | .await 43 | .map(|_| ()) 44 | }} 45 | 46 | api_doc! { Plugin => Push 47 | | 48 | /// Push a plugin to the registry. 49 | pub async fn push(&self) -> Result<()> { 50 | self.docker 51 | .post(&format!("/plugins/{}/push", self.name), Payload::empty(), Headers::none()) 52 | .await 53 | .map(|_| ()) 54 | }} 55 | 56 | api_doc! { Plugin => Create 57 | | 58 | /// Create a plugin from a tar archive on the file system. The `path` parameter is a path 59 | /// to the tar containing plugin rootfs and manifest. 60 | pub async fn create

(&self, path: P) -> Result<()> 61 | where 62 | P: AsRef, 63 | { 64 | self.docker 65 | .post( 66 | &format!("/plugins/{}/create", self.name), 67 | Payload::Text(path.as_ref().to_string_lossy().to_string()), 68 | Headers::none() 69 | ) 70 | .await 71 | .map(|_| ()) 72 | }} 73 | } 74 | 75 | impl Plugins { 76 | impl_api_ep! {plug: Plugin, resp 77 | List -> "/plugins", models::Plugin 78 | } 79 | } 80 | -------------------------------------------------------------------------------- /src/api/secret.rs: -------------------------------------------------------------------------------- 1 | //! Secrets are sensitive data that can be used by services. Swarm mode must be enabled for these endpoints to work. 2 | 3 | use crate::{ 4 | conn::{Headers, Payload}, 5 | models, 6 | opts::{SecretCreateOpts, SecretListOpts}, 7 | Result, 8 | }; 9 | 10 | impl_api_ty!(Secret => name); 11 | 12 | impl Secret { 13 | impl_api_ep! { secret: Secret, resp 14 | Inspect -> &format!("/secrets/{}", secret.name), models::Secret 15 | Delete -> &format!("/secrets/{}", secret.name), () 16 | } 17 | // TODO: add Secret::update 18 | } 19 | 20 | impl Secrets { 21 | impl_api_ep! { __: Secret, resp 22 | List -> "/secrets", models::Secret 23 | } 24 | 25 | api_doc! { Secret => Create 26 | | 27 | /// Create a new secret. 28 | pub async fn create(&self, opts: &SecretCreateOpts) -> Result { 29 | use serde::Deserialize; 30 | #[derive(Deserialize)] 31 | struct SecretCreateResponse { 32 | #[serde(rename = "Id")] 33 | pub id: String, 34 | } 35 | self.docker 36 | .post_json("/secrets/create", Payload::Json(opts.serialize_vec()?), Headers::none()) 37 | .await 38 | .map(|resp: SecretCreateResponse| { 39 | Secret::new(self.docker.clone(), resp.id) 40 | }) 41 | }} 42 | } 43 | -------------------------------------------------------------------------------- /src/api/service.rs: -------------------------------------------------------------------------------- 1 | //! Manage and inspect services within a swarm. 2 | use crate::{ 3 | conn::{Headers, Payload, AUTH_HEADER}, 4 | models, 5 | opts::{ServiceListOpts, ServiceOpts}, 6 | Result, 7 | }; 8 | 9 | impl_api_ty!(Service => name); 10 | 11 | impl Service { 12 | api_doc! { Service => Create 13 | | 14 | /// Creates a new service from ServiceOpts. 15 | pub async fn create(&self, opts: &ServiceOpts) -> Result { 16 | let headers = opts 17 | .auth_header() 18 | .map(|a| Headers::single(AUTH_HEADER, a)); 19 | self.docker 20 | .post_json( 21 | "/services/create", 22 | Payload::Json(opts.serialize_vec()?), 23 | headers, 24 | ) 25 | .await 26 | }} 27 | 28 | impl_api_ep! { svc: Service, resp 29 | Inspect -> &format!("/services/{}", svc.name), models::Service 30 | Delete -> &format!("/services/{}", svc.name), models::ServiceUpdateResponse 31 | Logs -> &format!("/services/{}/logs", svc.name), () 32 | } 33 | } 34 | 35 | impl Services { 36 | impl_api_ep! { svc: Service, resp 37 | List -> "/services", models::Service 38 | } 39 | } 40 | -------------------------------------------------------------------------------- /src/api/swarm.rs: -------------------------------------------------------------------------------- 1 | //! Control and manage clusters of engines also known as Swarm 2 | 3 | use crate::{ 4 | conn::{Headers, Payload}, 5 | models, 6 | opts::{SwarmInitOpts, SwarmJoinOpts}, 7 | Docker, Result, 8 | }; 9 | 10 | api_doc! { Swarm 11 | | 12 | pub struct Swarm { 13 | docker: Docker, 14 | } 15 | } 16 | 17 | impl Swarm { 18 | pub fn new(docker: Docker) -> Self { 19 | Self { docker } 20 | } 21 | 22 | impl_api_ep! {_swarm: Swarm, resp 23 | Inspect -> "/swarm", models::Swarm 24 | } 25 | 26 | api_doc! { Swarm => Unlockkey 27 | | 28 | /// Get the unlock key. 29 | pub async fn get_unlock_key(&self) -> Result { 30 | self.docker.get_json("/swarm/unlockkey").await 31 | }} 32 | 33 | api_doc! { Swarm => Unlock 34 | | 35 | /// Unlock a locked manager. 36 | pub async fn unlock_manager(&self, key: &models::SwarmUnlockBodyParam) -> Result<()> { 37 | self.docker 38 | .post("/swarm/unlock", Payload::Json(serde_json::to_string(key)?), Headers::none()) 39 | .await 40 | .map(|_| ()) 41 | }} 42 | 43 | api_doc! { Swarm => Init 44 | | 45 | /// Initialize a new swarm. 46 | pub async fn initialize(&self, opts: &SwarmInitOpts) -> Result<()> { 47 | self.docker 48 | .post("/swarm/init", Payload::Json(opts.serialize_vec()?), Headers::none()) 49 | .await 50 | .map(|_| ()) 51 | }} 52 | 53 | api_doc! { Swarm => Join 54 | | 55 | /// Join an existing swarm. 56 | pub async fn join(&self, opts: &SwarmJoinOpts) -> Result<()> { 57 | self.docker 58 | .post("/swarm/join", Payload::Json(opts.serialize_vec()?), Headers::none()) 59 | .await 60 | .map(|_| ()) 61 | }} 62 | 63 | api_doc! { Swarm => Leave 64 | | 65 | /// Leave the current swarm. 66 | pub async fn leave(&self) -> Result<()> { 67 | self.docker 68 | .post("/swarm/leave?force=false", Payload::empty(), Headers::none()) 69 | .await 70 | .map(|_| ()) 71 | }} 72 | 73 | api_doc! { Swarm => Leave 74 | | 75 | /// Leave the current swarm forcefully, even if this is the last manager or that it will break the cluster. 76 | pub async fn force_leave(&self) -> Result<()> { 77 | self.docker 78 | .post("/swarm/leave?force=true", Payload::empty(), Headers::none()) 79 | .await 80 | .map(|_| ()) 81 | }} 82 | } 83 | -------------------------------------------------------------------------------- /src/api/system.rs: -------------------------------------------------------------------------------- 1 | use crate::{ 2 | models, 3 | opts::{EventsOpts, SystemDataUsageOpts}, 4 | Docker, Error, Result, 5 | }; 6 | use containers_api::url::construct_ep; 7 | use futures_util::{Stream, TryStreamExt}; 8 | 9 | use std::{convert::TryFrom, io}; 10 | 11 | impl Docker { 12 | api_doc! { System => Version 13 | | 14 | /// Returns the version of Docker that is running and various information about the system that Docker is running on. 15 | pub async fn version(&self) -> Result { 16 | self.get_json("/version").await 17 | }} 18 | 19 | api_doc! { System => Info 20 | | 21 | /// Returns system information about Docker instance that is running 22 | pub async fn info(&self) -> Result { 23 | self.get_json("/info").await 24 | }} 25 | 26 | api_doc! { System => Ping 27 | | 28 | /// This is a dummy endpoint you can use to test if the server is accessible 29 | pub async fn ping(&self) -> Result { 30 | self.get("/_ping") 31 | .await 32 | .and_then(|resp| models::PingInfo::try_from(resp.headers())) 33 | }} 34 | 35 | api_doc! { System => Events 36 | | 37 | /// Returns a stream of Docker events 38 | pub fn events<'docker>( 39 | &'docker self, 40 | opts: &EventsOpts, 41 | ) -> impl Stream> + Unpin + 'docker { 42 | let ep = construct_ep("/events", opts.serialize()); 43 | let reader = Box::pin( 44 | self.get_stream(ep) 45 | .map_err(|e| io::Error::new(io::ErrorKind::Other, e)), 46 | ) 47 | .into_async_read(); 48 | 49 | Box::pin( 50 | asynchronous_codec::FramedRead::new(reader, asynchronous_codec::LinesCodec) 51 | .map_err(Error::IO) 52 | .and_then(|s: String| async move { 53 | serde_json::from_str(&s).map_err(Error::SerdeJsonError) 54 | }), 55 | ) 56 | }} 57 | 58 | api_doc! { System => DataUsage 59 | | 60 | /// Returns data usage of this Docker instance 61 | pub async fn data_usage(&self, opts: &SystemDataUsageOpts) -> Result { 62 | let ep = construct_ep("/system/df", opts.serialize()); 63 | self.get_json(&ep).await 64 | }} 65 | } 66 | -------------------------------------------------------------------------------- /src/api/task.rs: -------------------------------------------------------------------------------- 1 | //! A task is a container running on a swarm. It is the atomic scheduling unit of swarm. 2 | //! Swarm mode must be enabled for these endpoints to work. 3 | 4 | use crate::{models, opts::TaskListOpts, Result}; 5 | 6 | impl_api_ty!(Task => id); 7 | 8 | impl Task { 9 | impl_api_ep! { task: Task, resp 10 | Inspect -> &format!("/tasks/{}", task.id), models::Task 11 | Logs -> &format!("/tasks/{}/logs", task.id), () 12 | } 13 | } 14 | 15 | impl Tasks { 16 | impl_api_ep! { task: Task, resp 17 | List -> "/tasks", models::Task 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /src/api/volume.rs: -------------------------------------------------------------------------------- 1 | //! Create and manage persistent storage that can be attached to containers. 2 | 3 | use crate::{ 4 | conn::{Headers, Payload}, 5 | models, 6 | opts::{ClusterVolumeUpdateOpts, VolumeCreateOpts, VolumeListOpts, VolumePruneOpts}, 7 | Result, 8 | }; 9 | use containers_api::url; 10 | 11 | impl_api_ty!(Volume => name); 12 | 13 | impl Volume { 14 | impl_api_ep! {vol: Volume, resp 15 | Inspect -> &format!("/volumes/{}", vol.name), models::Volume 16 | Delete -> &format!("/volumes/{}", vol.name), () 17 | } 18 | 19 | api_doc! { Volume => Update 20 | | 21 | /// Update a volume. Valid only for Swarm cluster volumes 22 | pub async fn update(&self, opts: &ClusterVolumeUpdateOpts) -> Result<()> { 23 | let mut ep = format!("/volumes/{}", self.name()); 24 | url::append_query(&mut ep, url::encoded_pair("version", opts.version())); 25 | self.docker.put(&ep, Payload::Json(opts.serialize_vec()?)).await.map(|_| ()) 26 | }} 27 | } 28 | 29 | impl Volumes { 30 | impl_api_ep! {__: Volume, resp 31 | Prune -> "/volumes/prune", models::VolumePrune200Response 32 | } 33 | 34 | api_doc! { Volume => List 35 | | 36 | /// List available volumes 37 | pub async fn list(&self, opts: &VolumeListOpts) -> Result { 38 | let ep = url::construct_ep("/volumes", opts.serialize()); 39 | self.docker.get_json(&ep).await 40 | }} 41 | 42 | api_doc! { Volume => Create 43 | | 44 | /// Create a new volume. 45 | pub async fn create(&self, opts: &VolumeCreateOpts) -> Result { 46 | // #TODO: handle missing id and return warnings (?) 47 | self.docker 48 | .post_json( 49 | "/volumes/create", 50 | Payload::Json(opts.serialize_vec()?), 51 | Headers::none(), 52 | ) 53 | .await 54 | }} 55 | } 56 | -------------------------------------------------------------------------------- /src/builder.rs: -------------------------------------------------------------------------------- 1 | /// Necessary to work around https://github.com/rust-lang/rust/issues/52607. 2 | macro_rules! calculated_doc { 3 | ( 4 | $( 5 | #[doc = $doc:expr] 6 | $thing:item 7 | )* 8 | ) => { 9 | $( 10 | #[doc = $doc] 11 | $thing 12 | )* 13 | }; 14 | } 15 | 16 | macro_rules! impl_api_ty { 17 | ($(#[doc = $docs:expr])* $name:ident => $name_field:ident) => { 18 | paste::item! { 19 | 20 | calculated_doc!{ 21 | #[doc = concat!("Interface for accessing and manipulating Docker ", stringify!($name), ".\n", $($docs,)* "\n", api_url!($name))] 22 | #[derive(Debug)] 23 | pub struct [< $name >] { 24 | docker: crate::Docker, 25 | $name_field: crate::Id, 26 | } 27 | } 28 | impl [< $name >] { 29 | // TODO: this is possible on nightly, figure out what to do 30 | calculated_doc!{ 31 | #[doc = concat!("Exports an interface exposing operations against a ", stringify!($name), " instance.")] 32 | pub fn new(docker: crate::Docker, $name_field: impl Into) -> Self 33 | { 34 | [< $name >] { 35 | docker, 36 | $name_field: $name_field.into(), 37 | } 38 | } 39 | } 40 | 41 | calculated_doc!{ 42 | #[doc = concat!("A getter for ", stringify!($name), " ", stringify!($name_field))] 43 | pub fn $name_field(&self) -> &crate::Id { 44 | &self.$name_field 45 | } 46 | } 47 | 48 | 49 | } 50 | 51 | 52 | calculated_doc!{ 53 | #[doc = concat!("Interface for Docker ", stringify!($name), "s.", stringify!($name), ">")] 54 | #[derive(Debug)] 55 | pub struct [< $name s >] { 56 | docker: crate::Docker, 57 | } 58 | } 59 | 60 | impl [< $name s >] { 61 | calculated_doc!{ 62 | #[doc = concat!("Exports an interface for interacting with Docker ", stringify!($name), "s.")] 63 | pub fn new(docker: crate::Docker) -> Self { 64 | [< $name s >] { docker } 65 | } 66 | } 67 | 68 | calculated_doc!{ 69 | #[doc = concat!("Returns a reference to a set of operations available to a specific ", stringify!($name), ".")] 70 | pub fn get(&self, $name_field: impl Into) -> [< $name >] 71 | { 72 | [< $name >]::new(self.docker.clone(), $name_field) 73 | } 74 | } 75 | } 76 | 77 | } 78 | } 79 | } 80 | 81 | macro_rules! api_url { 82 | () => { 83 | concat!("https://docs.docker.com/engine/api/", version!()) 84 | }; 85 | (operation $ep:expr) => { 86 | concat!("\n[Api Reference](", api_url!(), "/#operation/", $ep, ")") 87 | }; 88 | (tag $ep:expr) => { 89 | concat!("\n[Api Reference](", api_url!(), "/#tag/", $ep, ")") 90 | }; 91 | ($base:ident) => { 92 | api_url!(tag stringify!($base)) 93 | }; 94 | ($base:ident => $op:ident) => { 95 | api_url!(operation concat!(stringify!($base), stringify!($op))) 96 | }; 97 | } 98 | 99 | macro_rules! api_doc { 100 | ( 101 | $base:ident => $op:ident 102 | $(#[doc = $doc:expr])* 103 | | 104 | $it:item 105 | ) => { 106 | calculated_doc!{ 107 | #[doc = concat!(api_url!($base => $op))] 108 | #[doc = "\n"] 109 | $( 110 | #[doc = $doc] 111 | )* 112 | $it 113 | } 114 | }; 115 | ( 116 | $base:ident 117 | $(#[doc = $doc:expr])* 118 | | 119 | $it:item 120 | ) => { 121 | calculated_doc!{ 122 | #[doc = concat!(api_url!($base))] 123 | #[doc = "\n"] 124 | $( 125 | #[doc = $doc] 126 | )* 127 | $it 128 | } 129 | }; 130 | } 131 | 132 | macro_rules! impl_api_ep { 133 | ( 134 | $it:ident: $base:ident, $resp:ident 135 | $( 136 | $op:ident -> $ep:expr, $ret:expr $(,$extra:expr)* 137 | )* 138 | ) => { 139 | $( 140 | impl_api_ep! {$op $it: $base -> $resp $ep, $ret $(,$extra)* } 141 | )* 142 | }; 143 | ( 144 | Inspect $it:ident: $base:ident -> $resp:ident $ep:expr, $ret:expr $(,$extra:expr)* 145 | ) => { 146 | paste::item! { 147 | api_doc! { $base => Inspect 148 | | 149 | #[doc = concat!("Inspect this ", stringify!($base), ".")] 150 | pub async fn inspect(&self) -> Result<$ret> { 151 | let $it = self; 152 | self.docker.get_json($ep).await 153 | }} 154 | } 155 | }; 156 | ( 157 | ForceDelete $it:ident: $base:ident -> $resp:ident $ep:expr, $ret:expr $(,$extra:expr)* 158 | ) => { 159 | 160 | paste::item! { 161 | async fn _delete(&self, force: bool) -> Result<$ret> { 162 | let query = if force { 163 | Some(containers_api::url::encoded_pair("force", force)) 164 | } else { 165 | None 166 | }; 167 | 168 | let $it = self; 169 | let ep = containers_api::url::construct_ep($ep, query); 170 | 171 | self.docker 172 | .delete_json(ep.as_ref()) 173 | .await 174 | } 175 | } 176 | paste::item! { 177 | api_doc! { $base => Delete 178 | | 179 | #[doc = concat!("Delete this ", stringify!($base), ".")] 180 | pub async fn force_delete(&self) -> Result<$ret> { 181 | self._delete(true).await 182 | }} 183 | } 184 | paste::item! { 185 | api_doc! { $base => Delete 186 | | 187 | #[doc = concat!("Delete this ", stringify!($base), ".")] 188 | pub async fn delete(&self) -> Result<$ret> { 189 | self._delete(false).await 190 | }} 191 | } 192 | }; 193 | ( 194 | Delete $it:ident: $base:ident -> $resp:ident $ep:expr, $ret:expr $(,$extra:expr)* 195 | ) => { 196 | paste::item! { 197 | api_doc! { $base => Delete 198 | | 199 | #[doc = concat!("Delete this ", stringify!($base), ".")] 200 | pub async fn delete(&self) -> Result<()> { 201 | let $it = self; 202 | self.docker.delete($ep).await.map(|_| ()) 203 | }} 204 | } 205 | }; 206 | ( 207 | DeleteWithOpts $it:ident: $base:ident -> $resp:ident $ep:expr, $ret:expr $(,$extra:expr)* 208 | ) => { 209 | impl_api_ep! { DeleteWithOpts $it: $base -> $resp $ep, $ret => $($extra)* } 210 | }; 211 | ( 212 | DeleteWithOpts $it:ident: $base:ident -> $resp:ident $ep:expr, $ret:expr => $fn:expr 213 | ) => { 214 | paste::item! { 215 | api_doc! { $base => Delete 216 | | 217 | #[doc = concat!("Delete this ", stringify!($base), ".")] 218 | #[doc = concat!("Use [`delete`](", stringify!($base), "::delete) to delete without options.")] 219 | pub async fn remove(&self, opts: &[< $base RemoveOpts >]) -> Result<$ret> { 220 | let $it = self; 221 | let ep = containers_api::url::construct_ep($ep, opts.serialize()); 222 | self.docker.$fn(ep.as_ref()).await 223 | }} 224 | } 225 | paste::item! { 226 | api_doc! { $base => Delete 227 | | 228 | #[doc = concat!("Delete this ", stringify!($base), ".")] 229 | #[doc = concat!("Use [`remove`](", stringify!($base), "::remove) to customize options.")] 230 | pub async fn delete(&self) -> Result<[< $ret >]> { 231 | let $it = self; 232 | self.docker.$fn($ep).await 233 | }} 234 | } 235 | }; 236 | ( 237 | List $it:ident: $base:ident -> $resp:ident $ep:expr, $ret:expr $(, $extra:expr)* 238 | ) => { 239 | paste::item! { 240 | api_doc! { $base => List 241 | | 242 | #[doc = concat!("List available ", stringify!($base), "s.")] 243 | pub async fn list(&self, opts: &[< $base ListOpts >]) -> Result> { 244 | let ep = containers_api::url::construct_ep($ep, opts.serialize()); 245 | self.docker.get_json(&ep).await 246 | }} 247 | } 248 | }; 249 | ( 250 | Create $it:ident: $base:ident -> $resp:ident $ep:expr, $ret:expr $(, $extra:expr)* 251 | ) => { 252 | paste::item! { 253 | api_doc! { $base => Create 254 | | 255 | #[doc = concat!("Create a new ", stringify!($base), ".")] 256 | pub async fn create(&self, opts: &[< $base CreateOpts >]) -> Result<[< $base >]> { 257 | self.docker.post_json(&$ep, Payload::Json(opts.serialize_vec()?), Headers::none()).await 258 | .map(|$resp: [< $ret >]| [< $base >]::new(self.docker.clone(), $($extra)*)) 259 | }} 260 | } 261 | }; 262 | ( 263 | Prune $it:ident: $base:ident -> $resp:ident $ep:expr, $ret:expr $(, $extra:expr)* 264 | ) => { 265 | paste::item! { 266 | api_doc! { $base => Prune 267 | | 268 | #[doc = concat!("Delete stopped/unused ", stringify!($base), "s.")] 269 | pub async fn prune(&self, opts: &[< $base PruneOpts >]) -> Result<$ret> { 270 | self.docker 271 | .post_json( 272 | &containers_api::url::construct_ep($ep, opts.serialize()), 273 | crate::conn::Payload::empty(), 274 | crate::conn::Headers::none(), 275 | ).await 276 | }} 277 | } 278 | }; 279 | ( 280 | Logs $it:ident: $base:ident -> $resp:ident $ep:expr, $ret:expr $(, $extra:expr)* 281 | ) => { 282 | paste::item! { 283 | api_doc! { $base => Logs 284 | | 285 | #[doc = concat!("Returns a stream of logs from a ", stringify!($base), ".")] 286 | pub fn logs<'docker>( 287 | &'docker self, 288 | opts: &crate::opts::LogsOpts 289 | ) -> impl futures_util::Stream> + Unpin + 'docker { 290 | use containers_api::conn::tty; 291 | use futures_util::TryStreamExt; 292 | let $it = self; 293 | let ep = containers_api::url::construct_ep($ep, opts.serialize()); 294 | 295 | let stream = Box::pin(self.docker.get_stream(ep).map_err(|e| containers_api::conn::Error::Any(Box::new(e)))); 296 | 297 | Box::pin(tty::decode(stream).map_err(crate::Error::Error)) 298 | } 299 | }} 300 | }; 301 | } 302 | -------------------------------------------------------------------------------- /src/docker.rs: -------------------------------------------------------------------------------- 1 | //! Main entrypoint for interacting with the Docker API. 2 | //! 3 | //! API Reference: 4 | use crate::{ 5 | conn::{get_http_connector, Headers, Payload, Transport}, 6 | errors::{Error, Result}, 7 | ApiVersion, Containers, Images, Networks, Volumes, 8 | }; 9 | use containers_api::conn::RequestClient; 10 | 11 | #[cfg(feature = "swarm")] 12 | use crate::{Configs, Nodes, Plugins, Secrets, Services, Swarm, Tasks}; 13 | 14 | #[cfg(feature = "tls")] 15 | use crate::conn::get_https_connector; 16 | #[cfg(unix)] 17 | use crate::conn::get_unix_connector; 18 | 19 | use futures_util::{ 20 | io::{AsyncRead, AsyncWrite}, 21 | stream::Stream, 22 | }; 23 | use hyper::{body::Bytes, Body, Client, Response}; 24 | use serde::de::DeserializeOwned; 25 | use std::future::Future; 26 | use std::path::{Path, PathBuf}; 27 | use std::pin::Pin; 28 | 29 | /// Entrypoint interface for communicating with docker daemon 30 | #[derive(Debug, Clone)] 31 | pub struct Docker { 32 | version: Option, 33 | client: RequestClient, 34 | } 35 | 36 | impl Docker { 37 | /// Creates a new Docker instance by automatically choosing appropriate connection type based 38 | /// on provided `uri`. 39 | /// 40 | /// Supported schemes are: 41 | /// - `unix://` only works when build target is `unix`, otherwise returns an Error 42 | /// - `tcp://` 43 | /// - `http://` 44 | /// 45 | /// To create a Docker instance utilizing TLS use explicit [Docker::tls](Docker::tls) 46 | /// constructor (this requires `tls` feature enabled). 47 | /// 48 | /// This creates an unversioned connector that'll use the latest server version, to use a specific version see 49 | /// [`Docker::unix_versioned`](Docker::unix_versioned). 50 | pub fn new(uri: impl AsRef) -> Result { 51 | Self::new_impl(uri.as_ref(), None) 52 | } 53 | 54 | /// Same as [`Docker::new`](Docker::new) but the API version can be explicitly specified. 55 | pub fn new_versioned(uri: impl AsRef, version: impl Into) -> Result { 56 | Self::new_impl(uri.as_ref(), Some(version.into())) 57 | } 58 | 59 | fn new_impl(uri: &str, version: Option) -> Result { 60 | let mut it = uri.split("://"); 61 | 62 | match it.next() { 63 | #[cfg(unix)] 64 | Some("unix") => { 65 | if let Some(path) = it.next() { 66 | Ok(Self::new_unix_impl(path, version)) 67 | } else { 68 | Err(Error::MissingAuthority) 69 | } 70 | } 71 | #[cfg(not(unix))] 72 | Some("unix") => Err(Error::UnsupportedScheme("unix".to_string())), 73 | Some("tcp") | Some("http") => { 74 | if let Some(host) = it.next() { 75 | Self::new_tcp_impl(host, version) 76 | } else { 77 | Err(Error::MissingAuthority) 78 | } 79 | } 80 | Some(scheme) => Err(Error::UnsupportedScheme(scheme.to_string())), 81 | None => unreachable!(), // This is never possible because calling split on an empty string 82 | // always returns at least one element 83 | } 84 | } 85 | 86 | #[cfg(unix)] 87 | #[cfg_attr(docsrs, doc(cfg(unix)))] 88 | /// Creates a new docker instance for a docker host listening on a given Unix socket. 89 | /// 90 | /// `socket_path` is the part of URI that comes after the `unix://`. For example a URI `unix:///run/docker.sock` has a 91 | /// `socket_path` == "/run/docker.sock". 92 | /// 93 | /// This creates an unversioned connector that'll use the latest server version, to use a specific version see 94 | /// [`Docker::unix_versioned`](Docker::unix_versioned). 95 | pub fn unix(socket_path: impl AsRef) -> Self { 96 | Self::new_unix_impl(socket_path.as_ref(), None) 97 | } 98 | 99 | #[cfg(unix)] 100 | #[cfg_attr(docsrs, doc(cfg(unix)))] 101 | /// Same as [`Docker::unix`](Docker::unix) but the API version can be explicitly specified. 102 | pub fn unix_versioned(socket_path: impl AsRef, version: impl Into) -> Self { 103 | Self::new_unix_impl(socket_path.as_ref(), Some(version.into())) 104 | } 105 | 106 | #[cfg(unix)] 107 | fn new_unix_impl(socket_path: impl Into, version: Option) -> Self { 108 | Docker { 109 | version, 110 | client: RequestClient::new( 111 | Transport::Unix { 112 | client: Client::builder() 113 | .pool_max_idle_per_host(0) 114 | .build(get_unix_connector()), 115 | path: socket_path.into(), 116 | }, 117 | Box::new(validate_response), 118 | ), 119 | } 120 | } 121 | 122 | #[cfg(feature = "tls")] 123 | #[cfg_attr(docsrs, doc(cfg(feature = "tls")))] 124 | /// Creates a new docker instance for a docker host listening on a given TCP socket `host`. 125 | /// `host` is the part of URI that comes after `tcp://` or `http://` or `https://` schemes, 126 | /// also known as authority part. 127 | /// 128 | /// `cert_path` specifies the base path in the filesystem containing a certificate (`cert.pem`) 129 | /// and a key (`key.pem`) that will be used by the client. If verify is `true` a CA file will be 130 | /// added (`ca.pem`) to the connector. 131 | /// 132 | /// Returns an error if the provided host will fail to parse as URL or reading the certificate 133 | /// files will fail. 134 | /// 135 | /// This creates an unversioned connector that'll use the latest server version, to use a specific version see 136 | /// [`Docker::unix_versioned`](Docker::unix_versioned). 137 | pub fn tls(host: impl AsRef, cert_path: impl AsRef, verify: bool) -> Result { 138 | Self::new_tls_impl(host.as_ref(), None, cert_path.as_ref(), verify) 139 | } 140 | 141 | #[cfg(feature = "tls")] 142 | #[cfg_attr(docsrs, doc(cfg(feature = "tls")))] 143 | /// Same as [`Docker::tls`](Docker::tls) but the API version can be explicitly specified. 144 | pub fn tls_versioned( 145 | host: impl AsRef, 146 | version: impl Into, 147 | cert_path: impl AsRef, 148 | verify: bool, 149 | ) -> Result { 150 | Self::new_tls_impl( 151 | host.as_ref(), 152 | Some(version.into()), 153 | cert_path.as_ref(), 154 | verify, 155 | ) 156 | } 157 | 158 | #[cfg(feature = "tls")] 159 | fn new_tls_impl( 160 | host: &str, 161 | version: Option, 162 | cert_path: &Path, 163 | verify: bool, 164 | ) -> Result { 165 | Ok(Self { 166 | version, 167 | client: RequestClient::new( 168 | Transport::EncryptedTcp { 169 | client: Client::builder().build(get_https_connector(cert_path, verify)?), 170 | host: url::Url::parse(&format!("https://{host}")).map_err(Error::InvalidUrl)?, 171 | }, 172 | Box::new(validate_response), 173 | ), 174 | }) 175 | } 176 | 177 | /// Creates a new docker instance for a docker host listening on a given TCP socket `host`. 178 | /// `host` is the part of URI that comes after `tcp://` or `http://` schemes, also known as 179 | /// authority part. 180 | /// 181 | /// TLS is supported with feature `tls` enabled through [Docker::tls](Docker::tls) constructor. 182 | /// 183 | /// Returns an error if the provided host will fail to parse as URL. 184 | /// 185 | /// This creates an unversioned connector that'll use the latest server version, to use a specific version see 186 | /// [`Docker::unix_versioned`](Docker::unix_versioned). 187 | pub fn tcp(host: impl AsRef) -> Result { 188 | Self::new_tcp_impl(host.as_ref(), None) 189 | } 190 | 191 | /// Same as [`Docker::tcp`](Docker::tcp) but the API version can be explicitly specified. 192 | pub fn tcp_versioned(host: impl AsRef, version: impl Into) -> Result { 193 | Self::new_tcp_impl(host.as_ref(), Some(version.into())) 194 | } 195 | 196 | fn new_tcp_impl(host: &str, version: Option) -> Result { 197 | Ok(Self { 198 | version, 199 | client: RequestClient::new( 200 | Transport::Tcp { 201 | client: Client::builder().build(get_http_connector()), 202 | host: url::Url::parse(&format!("tcp://{host}")).map_err(Error::InvalidUrl)?, 203 | }, 204 | Box::new(validate_response), 205 | ), 206 | }) 207 | } 208 | 209 | /// Exports an interface for interacting with Docker images 210 | pub fn images(&'_ self) -> Images { 211 | Images::new(self.clone()) 212 | } 213 | 214 | /// Exports an interface for interacting with Docker containers 215 | pub fn containers(&'_ self) -> Containers { 216 | Containers::new(self.clone()) 217 | } 218 | 219 | /// Exports an interface for interacting with Docker networks 220 | pub fn networks(&'_ self) -> Networks { 221 | Networks::new(self.clone()) 222 | } 223 | 224 | /// Exports an interface for interacting with Docker volumes 225 | pub fn volumes(&'_ self) -> Volumes { 226 | Volumes::new(self.clone()) 227 | } 228 | 229 | /// Verifies the API version returned by the server and adjusts the version used by this client 230 | /// in future requests. 231 | pub async fn adjust_api_version(&mut self) -> Result<()> { 232 | let server_version: ApiVersion = self.version().await.and_then(|v| { 233 | v.api_version 234 | .unwrap_or_default() 235 | .parse::() 236 | .map_err(Error::MalformedVersion) 237 | })?; 238 | 239 | self.version = Some(server_version); 240 | 241 | Ok(()) 242 | } 243 | 244 | //#################################################################################################### 245 | // 246 | // Utility functions to make requests 247 | // 248 | //#################################################################################################### 249 | 250 | fn make_endpoint(&self, endpoint: impl AsRef) -> String { 251 | if let Some(version) = self.version { 252 | version.make_endpoint(endpoint) 253 | } else { 254 | endpoint.as_ref().to_owned() 255 | } 256 | } 257 | 258 | pub(crate) async fn get(&self, endpoint: &str) -> Result> { 259 | self.client.get(self.make_endpoint(endpoint)).await 260 | } 261 | 262 | pub(crate) async fn get_json(&self, endpoint: &str) -> Result { 263 | self.client.get_json(self.make_endpoint(endpoint)).await 264 | } 265 | 266 | #[allow(dead_code)] 267 | pub(crate) async fn post( 268 | &self, 269 | endpoint: &str, 270 | body: Payload, 271 | headers: Option, 272 | ) -> Result> 273 | where 274 | B: Into, 275 | { 276 | self.client 277 | .post(self.make_endpoint(endpoint), body, headers) 278 | .await 279 | } 280 | 281 | pub(crate) async fn post_string( 282 | &self, 283 | endpoint: &str, 284 | body: Payload, 285 | headers: Option, 286 | ) -> Result 287 | where 288 | B: Into, 289 | { 290 | self.client 291 | .post_string(self.make_endpoint(endpoint), body, headers) 292 | .await 293 | } 294 | 295 | pub(crate) async fn post_json( 296 | &self, 297 | endpoint: impl AsRef, 298 | body: Payload, 299 | headers: Option, 300 | ) -> Result 301 | where 302 | T: DeserializeOwned, 303 | B: Into, 304 | { 305 | self.client 306 | .post_json(self.make_endpoint(endpoint), body, headers) 307 | .await 308 | } 309 | 310 | pub(crate) async fn put(&self, endpoint: &str, body: Payload) -> Result 311 | where 312 | B: Into, 313 | { 314 | self.client 315 | .put_string(self.make_endpoint(endpoint), body) 316 | .await 317 | } 318 | 319 | pub(crate) async fn delete(&self, endpoint: &str) -> Result { 320 | self.client 321 | .delete_string(self.make_endpoint(endpoint)) 322 | .await 323 | } 324 | 325 | pub(crate) async fn delete_json(&self, endpoint: &str) -> Result { 326 | self.client.delete_json(self.make_endpoint(endpoint)).await 327 | } 328 | 329 | pub(crate) async fn head(&self, endpoint: &str) -> Result> { 330 | self.client.head(self.make_endpoint(endpoint)).await 331 | } 332 | 333 | #[allow(dead_code)] 334 | /// Send a streaming post request. 335 | /// 336 | /// Use stream_post_into_values if the endpoint returns JSON values 337 | pub(crate) fn post_stream<'a, B>( 338 | &'a self, 339 | endpoint: impl AsRef + 'a, 340 | body: Payload, 341 | headers: Option, 342 | ) -> impl Stream> + 'a 343 | where 344 | B: Into + 'a, 345 | { 346 | self.client 347 | .post_stream(self.make_endpoint(endpoint), body, headers) 348 | } 349 | 350 | /// Send a streaming post request that returns a stream of JSON values 351 | /// 352 | /// When a received chunk does not contain a full JSON reads more chunks from the stream 353 | pub(crate) fn post_into_stream<'a, B, T>( 354 | &'a self, 355 | endpoint: impl AsRef + 'a, 356 | body: Payload, 357 | headers: Option, 358 | ) -> impl Stream> + 'a 359 | where 360 | B: Into + 'a, 361 | T: DeserializeOwned + 'a, 362 | { 363 | self.client 364 | .post_into_stream(self.make_endpoint(endpoint), body, headers) 365 | } 366 | 367 | pub(crate) fn get_stream<'a>( 368 | &'a self, 369 | endpoint: impl AsRef + Unpin + 'a, 370 | ) -> impl Stream> + 'a { 371 | self.client.get_stream(self.make_endpoint(endpoint)) 372 | } 373 | 374 | pub(crate) async fn post_upgrade_stream( 375 | self, 376 | endpoint: impl AsRef, 377 | body: Payload, 378 | ) -> Result 379 | where 380 | B: Into, 381 | { 382 | let ep = self.make_endpoint(endpoint); 383 | self.client.post_upgrade_stream(ep, body).await 384 | } 385 | } 386 | 387 | fn validate_response( 388 | response: Response, 389 | ) -> Pin>> + Send + Sync>> { 390 | use serde::{Deserialize, Serialize}; 391 | #[derive(Serialize, Deserialize)] 392 | struct ErrorResponse { 393 | message: String, 394 | } 395 | 396 | Box::pin(async move { 397 | log::trace!( 398 | "got response {} {:?}", 399 | response.status(), 400 | response.headers() 401 | ); 402 | let status = response.status(); 403 | 404 | use crate::conn::{self, hyper::StatusCode}; 405 | match status { 406 | // Success case: pass on the response 407 | StatusCode::OK 408 | | StatusCode::CREATED 409 | | StatusCode::SWITCHING_PROTOCOLS 410 | | StatusCode::NO_CONTENT => Ok(response), 411 | // Error case: try to deserialize error message 412 | _ => { 413 | let body = response.into_body(); 414 | let bytes = hyper::body::to_bytes(body) 415 | .await 416 | .map_err(conn::Error::from)?; 417 | let message_body = String::from_utf8(bytes.to_vec()).map_err(conn::Error::from)?; 418 | log::trace!("{message_body:#?}"); 419 | let message = serde_json::from_str::(&message_body) 420 | .map(|e| e.message) 421 | .unwrap_or_else(|_| { 422 | status 423 | .canonical_reason() 424 | .unwrap_or("unknown error code") 425 | .to_owned() 426 | }); 427 | Err(Error::Fault { 428 | code: status, 429 | message, 430 | }) 431 | } 432 | } 433 | }) 434 | } 435 | 436 | #[cfg(feature = "swarm")] 437 | impl Docker { 438 | /// Exports an interface for interacting with Docker services. 439 | pub fn services(&'_ self) -> Services { 440 | Services::new(self.clone()) 441 | } 442 | 443 | /// Exports an interface for interacting with Docker configs. 444 | pub fn configs(&'_ self) -> Configs { 445 | Configs::new(self.clone()) 446 | } 447 | 448 | /// Exports an interface for interacting with Docker tasks. 449 | pub fn tasks(&'_ self) -> Tasks { 450 | Tasks::new(self.clone()) 451 | } 452 | 453 | /// Exports an interface for interacting with Docker secrets. 454 | pub fn secrets(&'_ self) -> Secrets { 455 | Secrets::new(self.clone()) 456 | } 457 | 458 | /// Exports an interface for interacting with Docker swarm. 459 | pub fn swarm(&'_ self) -> Swarm { 460 | Swarm::new(self.clone()) 461 | } 462 | 463 | /// Exports an interface for interacting with Docker nodes. 464 | pub fn nodes(&'_ self) -> Nodes { 465 | Nodes::new(self.clone()) 466 | } 467 | 468 | /// Exports an interface for interacting with Docker plugins. 469 | pub fn plugins(&'_ self) -> Plugins { 470 | Plugins::new(self.clone()) 471 | } 472 | } 473 | 474 | #[cfg(test)] 475 | mod tests { 476 | use super::{Docker, Error}; 477 | #[test] 478 | fn creates_correct_docker() { 479 | let d = Docker::new("tcp://127.0.0.1:80"); 480 | d.unwrap(); 481 | let d = Docker::new("http://127.0.0.1:80"); 482 | d.unwrap(); 483 | 484 | #[cfg(unix)] 485 | { 486 | let d = Docker::new("unix://127.0.0.1:80"); 487 | d.unwrap(); 488 | } 489 | #[cfg(not(unix))] 490 | { 491 | let d = Docker::new("unix://127.0.0.1:80"); 492 | assert!(d.is_err()); 493 | match d.unwrap_err() { 494 | Error::UnsupportedScheme(scheme) if &scheme == "unix" => {} 495 | e => panic!(r#"Expected Error::UnsupportedScheme("unix"), got {}"#, e), 496 | } 497 | } 498 | 499 | let d = Docker::new("rand://127.0.0.1:80"); 500 | match d.unwrap_err() { 501 | Error::UnsupportedScheme(scheme) if &scheme == "rand" => {} 502 | e => panic!(r#"Expected Error::UnsupportedScheme("rand"), got {e}"#), 503 | } 504 | 505 | let d = Docker::new("invalid_uri"); 506 | match d.unwrap_err() { 507 | Error::UnsupportedScheme(scheme) if &scheme == "invalid_uri" => {} 508 | e => panic!(r#"Expected Error::UnsupportedScheme("invalid_uri"), got {e}"#), 509 | } 510 | let d = Docker::new(""); 511 | match d.unwrap_err() { 512 | Error::UnsupportedScheme(scheme) if scheme.is_empty() => {} 513 | e => panic!(r#"Expected Error::UnsupportedScheme(""), got {e}"#), 514 | } 515 | } 516 | } 517 | -------------------------------------------------------------------------------- /src/errors.rs: -------------------------------------------------------------------------------- 1 | //! Representations of various client errors 2 | 3 | use hyper::{self, StatusCode}; 4 | use serde_json::Error as SerdeError; 5 | use thiserror::Error as ThisError; 6 | 7 | use futures_util::io::Error as IoError; 8 | 9 | /// Represents the result of all docker operations 10 | pub type Result = std::result::Result; 11 | 12 | #[derive(Debug, ThisError)] 13 | pub enum Error { 14 | #[error(transparent)] 15 | SerdeJsonError(#[from] SerdeError), 16 | #[error(transparent)] 17 | Hyper(#[from] hyper::Error), 18 | #[error(transparent)] 19 | Http(#[from] hyper::http::Error), 20 | #[error(transparent)] 21 | #[allow(clippy::upper_case_acronyms)] 22 | IO(#[from] IoError), 23 | #[error("The response is invalid - {0}")] 24 | InvalidResponse(String), 25 | #[error("error {code} - {message}")] 26 | Fault { code: StatusCode, message: String }, 27 | #[error("The HTTP connection was not upgraded by the docker host")] 28 | ConnectionNotUpgraded, 29 | #[error("Provided scheme `{0}` is not supported")] 30 | UnsupportedScheme(String), 31 | #[error("Provided URI is missing authority part after scheme")] 32 | MissingAuthority, 33 | #[error("Failed to parse url - {0}")] 34 | InvalidUrl(url::ParseError), 35 | #[error("Failed to parse uri - {0}")] 36 | InvalidUri(http::uri::InvalidUri), 37 | #[error("Invalid port - {0}")] 38 | InvalidPort(String), 39 | #[error("Invalid protocol - {0}")] 40 | InvalidProtocol(String), 41 | #[error(transparent)] 42 | MalformedVersion(#[from] containers_api::version::Error), 43 | #[error(transparent)] 44 | Error(#[from] containers_api::conn::Error), 45 | #[error(transparent)] 46 | Any(Box), 47 | #[error("{0}")] 48 | StringError(String), 49 | } 50 | 51 | impl Clone for Error { 52 | fn clone(&self) -> Self { 53 | match self { 54 | Error::SerdeJsonError(err) => Error::StringError(err.to_string()), 55 | Error::IO(err) => Error::StringError(err.to_string()), 56 | Error::Error(err) => Error::StringError(err.to_string()), 57 | e => e.clone(), 58 | } 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /src/lib.rs: -------------------------------------------------------------------------------- 1 | //! docker-api is a rust interface to [Docker](https://www.docker.com/) containers 2 | //! 3 | //! # example 4 | //! 5 | //! ```no_run 6 | //! # async { 7 | //! let docker = docker_api::Docker::new("tcp://127.0.0.1:80").unwrap(); 8 | //! 9 | //! match docker.images().list(&Default::default()).await { 10 | //! Ok(images) => { 11 | //! for image in images { 12 | //! println!("{0:?}", image.repo_tags); 13 | //! } 14 | //! }, 15 | //! Err(e) => eprintln!("Something bad happened! {e}"), 16 | //! } 17 | //! # }; 18 | //! ``` 19 | #![cfg_attr(docsrs, feature(doc_cfg))] 20 | 21 | /// Latest Docker API version supported by this crate. 22 | pub const LATEST_API_VERSION: ApiVersion = ApiVersion::new(1, Some(42), None); 23 | 24 | /// https://github.com/rust-lang/rust/issues/53749 25 | macro_rules! version { 26 | () => { 27 | "v1.42" 28 | }; 29 | } 30 | 31 | #[macro_use] 32 | mod builder; 33 | 34 | pub mod api; 35 | pub mod models; 36 | mod stream; 37 | pub mod conn { 38 | //! Connection related items 39 | pub(crate) use containers_api::conn::*; 40 | pub use containers_api::conn::{Error, Transport, TtyChunk}; 41 | } 42 | pub mod docker; 43 | pub mod errors; 44 | pub mod opts; 45 | 46 | pub use containers_api::id::Id; 47 | pub use containers_api::version::ApiVersion; 48 | 49 | pub use crate::{ 50 | api::{ 51 | container::{self, Container, Containers}, 52 | exec::{self, Exec}, 53 | image::{self, Image, Images}, 54 | network::{self, Network, Networks}, 55 | volume::{self, Volume, Volumes}, 56 | }, 57 | docker::Docker, 58 | errors::{Error, Result}, 59 | }; 60 | 61 | #[cfg(feature = "swarm")] 62 | #[cfg_attr(docsrs, doc(cfg(feature = "swarm")))] 63 | pub use crate::api::{ 64 | config::{self, Config, Configs}, 65 | node::{self, Node, Nodes}, 66 | plugin::{self, Plugin, Plugins}, 67 | secret::{self, Secret, Secrets}, 68 | service::{self, Service, Services}, 69 | swarm::{self, Swarm}, 70 | task::{self, Task, Tasks}, 71 | }; 72 | -------------------------------------------------------------------------------- /src/models.rs: -------------------------------------------------------------------------------- 1 | //! Generated Docker models 2 | 3 | pub use docker_api_stubs::models::*; 4 | 5 | use crate::errors::{Error, Result}; 6 | 7 | use hyper::header::HeaderMap; 8 | use serde::{Deserialize, Serialize}; 9 | 10 | use std::convert::TryFrom; 11 | 12 | #[derive(Serialize, Debug)] 13 | pub struct PingInfo { 14 | pub api_version: String, 15 | pub builder_version: Option, 16 | pub docker_experimental: bool, 17 | pub cache_control: String, 18 | pub pragma: String, 19 | pub os_type: String, 20 | pub server: String, 21 | pub date: String, 22 | } 23 | 24 | impl TryFrom<&HeaderMap> for PingInfo { 25 | type Error = Error; 26 | 27 | fn try_from(value: &HeaderMap) -> Result { 28 | macro_rules! extract_str { 29 | ($id:literal) => {{ 30 | if let Some(val) = value.get($id) { 31 | val.to_str().map(ToString::to_string).map_err(|e| { 32 | Error::InvalidResponse(format!( 33 | "failed to convert header to string - {}", 34 | e 35 | )) 36 | })? 37 | } else { 38 | return Err(Error::InvalidResponse(format!( 39 | "expected `{}` field in headers", 40 | $id 41 | ))); 42 | } 43 | }}; 44 | } 45 | 46 | Ok(PingInfo { 47 | api_version: extract_str!("api-version"), 48 | builder_version: value 49 | .get("builder-version") 50 | .and_then(|v| v.to_str().map(ToString::to_string).ok()), 51 | docker_experimental: extract_str!("docker-experimental").parse().map_err(|e| { 52 | Error::InvalidResponse(format!("expected header value to be bool - {e}")) 53 | })?, 54 | cache_control: extract_str!("cache-control"), 55 | pragma: extract_str!("pragma"), 56 | os_type: extract_str!("ostype"), 57 | date: extract_str!("date"), 58 | server: extract_str!("server"), 59 | }) 60 | } 61 | } 62 | 63 | #[derive(Clone, Serialize, Deserialize, Debug)] 64 | #[serde(untagged)] 65 | /// Represents a response chunk from Docker api when building, pulling or importing an image. 66 | pub enum ImageBuildChunk { 67 | Update { 68 | stream: String, 69 | }, 70 | Error { 71 | error: String, 72 | #[serde(rename = "errorDetail")] 73 | error_detail: ErrorDetail, 74 | }, 75 | Digest { 76 | aux: Aux, 77 | }, 78 | PullStatus { 79 | status: String, 80 | id: Option, 81 | progress: Option, 82 | #[serde(rename = "progressDetail")] 83 | progress_detail: Option, 84 | }, 85 | } 86 | 87 | #[derive(Clone, Serialize, Deserialize, Debug)] 88 | pub struct Aux { 89 | #[serde(rename = "ID")] 90 | pub id: String, 91 | } 92 | 93 | #[derive(Clone, Serialize, Deserialize, Debug)] 94 | pub struct ErrorDetail { 95 | pub message: String, 96 | } 97 | 98 | #[derive(Clone, Serialize, Deserialize, Debug)] 99 | pub struct ProgressDetail { 100 | pub current: Option, 101 | pub total: Option, 102 | } 103 | 104 | pub type Labels = std::collections::HashMap; 105 | -------------------------------------------------------------------------------- /src/opts/config.rs: -------------------------------------------------------------------------------- 1 | use crate::models::{Driver, Labels}; 2 | use crate::{Error, Result}; 3 | use containers_api::opts::{Filter, FilterItem}; 4 | use containers_api::{impl_filter_func, impl_opts_builder}; 5 | use serde::{Deserialize, Serialize}; 6 | 7 | impl_opts_builder!(url => ConfigList); 8 | 9 | pub enum ConfigFilter { 10 | /// The ID of the config. 11 | Id(String), 12 | /// Label in the form of `label=key` 13 | LabelKey(String), 14 | /// Label in the form of `label=key=val` 15 | Label(String, String), 16 | /// The name of the config. 17 | Name(String), 18 | Names(String), 19 | } 20 | 21 | impl Filter for ConfigFilter { 22 | fn query_item(&self) -> FilterItem { 23 | use ConfigFilter::*; 24 | match &self { 25 | Id(id) => FilterItem::new("id", id.to_owned()), 26 | LabelKey(label) => FilterItem::new("label", label.to_owned()), 27 | Label(key, val) => FilterItem::new("label", format!("{key}={val}")), 28 | Name(name) => FilterItem::new("name", name.to_owned()), 29 | Names(names) => FilterItem::new("names", names.to_owned()), 30 | } 31 | } 32 | } 33 | 34 | impl ConfigListOptsBuilder { 35 | impl_filter_func!( 36 | /// Filter listed configs by variants of the enum. 37 | ConfigFilter 38 | ); 39 | } 40 | 41 | #[derive(Clone, Debug, Serialize, Deserialize)] 42 | #[serde(rename_all = "PascalCase")] 43 | /// Structure used to create a new config with [`Configs::create`](crate::Configs::create). 44 | pub struct ConfigCreateOpts { 45 | name: String, 46 | labels: Labels, 47 | data: String, 48 | templating: Driver, 49 | } 50 | 51 | impl ConfigCreateOpts { 52 | /// Create a new config with name and data. This function will take care of 53 | /// encoding the config's data as base64. 54 | pub fn new(name: N, data: D) -> Self 55 | where 56 | N: Into, 57 | D: AsRef, 58 | { 59 | Self { 60 | name: name.into(), 61 | labels: Labels::new(), 62 | data: base64::encode(data.as_ref()), 63 | templating: Driver { 64 | name: "".into(), 65 | options: None, 66 | }, 67 | } 68 | } 69 | 70 | /// Set the templating driver of this config. 71 | pub fn set_templating(mut self, driver: Driver) -> Self { 72 | self.templating = driver; 73 | self 74 | } 75 | 76 | /// Add a label to this config 77 | pub fn add_label(mut self, key: K, val: V) -> Self 78 | where 79 | K: Into, 80 | V: Into, 81 | { 82 | self.labels.insert(key.into(), val.into()); 83 | self 84 | } 85 | 86 | pub fn serialize(&self) -> Result { 87 | serde_json::to_string(&self).map_err(Error::from) 88 | } 89 | 90 | pub fn serialize_vec(&self) -> Result> { 91 | serde_json::to_vec(&self).map_err(Error::from) 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /src/opts/exec.rs: -------------------------------------------------------------------------------- 1 | use containers_api::{impl_field, impl_opts_builder, impl_str_field, impl_vec_field}; 2 | use serde::Serialize; 3 | 4 | impl_opts_builder!(json => ExecCreate); 5 | 6 | #[derive(Copy, Clone, PartialEq, Debug)] 7 | /// Initial size of the console 8 | pub struct ConsoleSize { 9 | pub height: u64, 10 | pub width: u64, 11 | } 12 | 13 | impl Serialize for ConsoleSize { 14 | fn serialize(&self, serializer: S) -> Result 15 | where 16 | S: serde::Serializer, 17 | { 18 | [self.height, self.width].serialize(serializer) 19 | } 20 | } 21 | 22 | impl ExecCreateOptsBuilder { 23 | impl_vec_field!( 24 | /// Command to run, as an array of strings. 25 | command => "Cmd" 26 | ); 27 | 28 | impl_vec_field!( 29 | /// A list of environment variables in the form 'VAR=value'. 30 | env => "Env" 31 | ); 32 | 33 | impl_field!( 34 | /// Attach to stdout of the exec command. 35 | attach_stdout: bool => "AttachStdout" 36 | ); 37 | 38 | impl_field!( 39 | /// Attach to stderr of the exec command. 40 | attach_stderr: bool => "AttachStderr" 41 | ); 42 | 43 | impl_field!( 44 | /// Attach to stdin of the exec command. 45 | attach_stdin: bool => "AttachStdin" 46 | ); 47 | 48 | impl_str_field!( 49 | /// Override the key sequence for detaching a container. Format is a single 50 | /// character [a-Z] or ctrl- where is one of: a-z, @, ^, [, , or _. 51 | detach_keys => "DetachKeys" 52 | ); 53 | 54 | impl_field!( 55 | /// Allocate a pseudo-TTY. 56 | tty: bool => "Tty" 57 | ); 58 | 59 | impl_field!( 60 | /// Runs the exec process with extended privileges. (Default: `false`) 61 | privileged: bool => "Privileged" 62 | ); 63 | 64 | impl_str_field!( 65 | /// The user, and optionally, group to run the exec process inside the container. 66 | /// Format is one of: user, user:group, uid, or uid:gid. 67 | user => "User" 68 | ); 69 | 70 | impl_str_field!( 71 | /// The working directory for the exec process inside the container. 72 | working_dir => "WorkingDir" 73 | ); 74 | 75 | impl_field!( 76 | /// Initial console size 77 | console_size: ConsoleSize => "ConsoleSize" 78 | ); 79 | } 80 | 81 | impl_opts_builder!(json => ExecResize); 82 | 83 | impl ExecResizeOptsBuilder { 84 | impl_field!(height: u64 => "Height"); 85 | impl_field!(width: u64 => "Width"); 86 | } 87 | 88 | impl_opts_builder!(json => ExecStart); 89 | 90 | impl ExecStartOptsBuilder { 91 | impl_field!( 92 | /// Detach from the command. 93 | detach: bool => "Detach" 94 | ); 95 | 96 | impl_field!( 97 | /// Allocate a pseudo-TTY. 98 | tty: bool => "Tty" 99 | ); 100 | 101 | impl_field!( 102 | /// Initial console size 103 | console_size: ConsoleSize => "ConsoleSize" 104 | ); 105 | } 106 | -------------------------------------------------------------------------------- /src/opts/mod.rs: -------------------------------------------------------------------------------- 1 | //! Options used for configuring the behavior of certain API endpoints 2 | mod container; 3 | mod exec; 4 | mod image; 5 | mod network; 6 | mod system; 7 | mod volume; 8 | 9 | #[cfg(feature = "swarm")] 10 | #[cfg_attr(docsrs, doc(cfg(feature = "swarm")))] 11 | mod config; 12 | #[cfg(feature = "swarm")] 13 | #[cfg_attr(docsrs, doc(cfg(feature = "swarm")))] 14 | mod node; 15 | #[cfg(feature = "swarm")] 16 | #[cfg_attr(docsrs, doc(cfg(feature = "swarm")))] 17 | mod plugin; 18 | #[cfg(feature = "swarm")] 19 | #[cfg_attr(docsrs, doc(cfg(feature = "swarm")))] 20 | mod secret; 21 | #[cfg(feature = "swarm")] 22 | #[cfg_attr(docsrs, doc(cfg(feature = "swarm")))] 23 | mod service; 24 | #[cfg(feature = "swarm")] 25 | #[cfg_attr(docsrs, doc(cfg(feature = "swarm")))] 26 | mod swarm; 27 | #[cfg(feature = "swarm")] 28 | #[cfg_attr(docsrs, doc(cfg(feature = "swarm")))] 29 | mod task; 30 | 31 | pub use container::*; 32 | pub use exec::*; 33 | pub use image::*; 34 | pub use network::*; 35 | pub use system::*; 36 | pub use volume::*; 37 | 38 | #[cfg(feature = "swarm")] 39 | pub use config::*; 40 | #[cfg(feature = "swarm")] 41 | pub use node::*; 42 | #[cfg(feature = "swarm")] 43 | pub use plugin::*; 44 | #[cfg(feature = "swarm")] 45 | pub use secret::*; 46 | #[cfg(feature = "swarm")] 47 | pub use service::*; 48 | #[cfg(feature = "swarm")] 49 | pub use swarm::*; 50 | #[cfg(feature = "swarm")] 51 | pub use task::*; 52 | 53 | use containers_api::{impl_opts_builder, impl_url_bool_field, impl_url_field}; 54 | 55 | impl_opts_builder!(url => Logs); 56 | 57 | impl LogsOptsBuilder { 58 | impl_url_bool_field!( 59 | /// Keep connection after returning logs. 60 | follow => "follow" 61 | ); 62 | 63 | impl_url_bool_field!( 64 | /// Return logs from `stdout`. 65 | stdout => "stdout" 66 | ); 67 | 68 | impl_url_bool_field!( 69 | /// Return logs from `stderr`. 70 | stderr => "stderr" 71 | ); 72 | 73 | impl_url_bool_field!( 74 | /// Add timestamps to every log line. 75 | timestamps => "timestamps" 76 | ); 77 | 78 | impl_url_field!( 79 | /// Only return this number of log lines from the end of logs 80 | n_lines: usize => "tail" 81 | ); 82 | 83 | /// Return all log lines. 84 | pub fn all(mut self) -> Self { 85 | self.params.insert("tail", "all".into()); 86 | self 87 | } 88 | 89 | #[cfg(feature = "chrono")] 90 | /// Only return logs since this time. 91 | pub fn since(mut self, timestamp: &chrono::DateTime) -> Self 92 | where 93 | Tz: chrono::TimeZone, 94 | { 95 | self.params 96 | .insert("since", timestamp.timestamp().to_string()); 97 | self 98 | } 99 | 100 | #[cfg(not(feature = "chrono"))] 101 | /// Only return logs since this time, as a UNIX timestamp. 102 | pub fn since(mut self, timestamp: i64) -> Self { 103 | self.params.insert("since", timestamp.to_string()); 104 | self 105 | } 106 | 107 | #[cfg(feature = "chrono")] 108 | /// Only return logs before this time. 109 | pub fn until(mut self, timestamp: &chrono::DateTime) -> Self 110 | where 111 | Tz: chrono::TimeZone, 112 | { 113 | self.params 114 | .insert("until", timestamp.timestamp().to_string()); 115 | self 116 | } 117 | 118 | #[cfg(not(feature = "chrono"))] 119 | /// Only return logs before this time, as a UNIX timestamp. 120 | pub fn until(mut self, timestamp: i64) -> Self { 121 | self.params.insert("until", timestamp.to_string()); 122 | self 123 | } 124 | } 125 | 126 | #[cfg(test)] 127 | mod tests { 128 | use super::*; 129 | #[cfg(feature = "chrono")] 130 | #[test] 131 | fn logs_options() { 132 | let timestamp = chrono::NaiveDateTime::from_timestamp_opt(2_147_483_647, 0); 133 | let since = chrono::DateTime::::from_utc(timestamp.unwrap(), chrono::Utc); 134 | 135 | let options = LogsOptsBuilder::default() 136 | .follow(true) 137 | .stdout(true) 138 | .stderr(true) 139 | .timestamps(true) 140 | .all() 141 | .since(&since) 142 | .build(); 143 | 144 | let serialized = options.serialize().unwrap(); 145 | 146 | assert!(serialized.contains("follow=true")); 147 | assert!(serialized.contains("stdout=true")); 148 | assert!(serialized.contains("stderr=true")); 149 | assert!(serialized.contains("timestamps=true")); 150 | assert!(serialized.contains("tail=all")); 151 | assert!(serialized.contains("since=2147483647")); 152 | 153 | let options = LogsOptsBuilder::default().n_lines(5).until(&since).build(); 154 | 155 | let serialized = options.serialize().unwrap(); 156 | 157 | assert!(serialized.contains("tail=5")); 158 | assert!(serialized.contains("until=2147483647")); 159 | } 160 | 161 | #[cfg(not(feature = "chrono"))] 162 | #[test] 163 | fn logs_options() { 164 | let options = LogsOptsBuilder::default() 165 | .follow(true) 166 | .stdout(true) 167 | .stderr(true) 168 | .timestamps(true) 169 | .all() 170 | .since(2_147_483_647) 171 | .until(2_147_600_000) 172 | .build(); 173 | 174 | let serialized = options.serialize().unwrap(); 175 | 176 | assert!(serialized.contains("follow=true")); 177 | assert!(serialized.contains("stdout=true")); 178 | assert!(serialized.contains("stderr=true")); 179 | assert!(serialized.contains("timestamps=true")); 180 | assert!(serialized.contains("tail=all")); 181 | assert!(serialized.contains("since=2147483647")); 182 | assert!(serialized.contains("until=2147600000")); 183 | } 184 | } 185 | -------------------------------------------------------------------------------- /src/opts/network.rs: -------------------------------------------------------------------------------- 1 | use crate::{models::Ipam, Error, Result}; 2 | use containers_api::opts::{Filter, FilterItem}; 3 | use containers_api::{ 4 | impl_field, impl_filter_func, impl_map_field, impl_opts_builder, impl_str_field, impl_vec_field, 5 | }; 6 | 7 | use std::{collections::HashMap, convert::AsRef}; 8 | 9 | use serde::Serialize; 10 | use serde_json::{json, Value}; 11 | 12 | impl_opts_builder!(url => 13 | /// Options for filtering networks list results" 14 | NetworkList 15 | ); 16 | 17 | /// Used for [`NetworkFilter::Scope`](NetworkFilter::Scope). 18 | pub enum Scope { 19 | Swarm, 20 | Global, 21 | Local, 22 | } 23 | 24 | impl AsRef for Scope { 25 | fn as_ref(&self) -> &str { 26 | match &self { 27 | Scope::Swarm => "swarm", 28 | Scope::Global => "global", 29 | Scope::Local => "local", 30 | } 31 | } 32 | } 33 | 34 | pub enum NetworkType { 35 | Custom, 36 | Builtin, 37 | } 38 | 39 | impl AsRef for NetworkType { 40 | fn as_ref(&self) -> &str { 41 | match &self { 42 | NetworkType::Custom => "custom", 43 | NetworkType::Builtin => "builtin", 44 | } 45 | } 46 | } 47 | 48 | /// A single filter item used to filter the output of listing the networks. 49 | pub enum NetworkFilter { 50 | /// When set to true (or 1), returns all networks that are not in use by a container. 51 | /// When set to false (or 0), only networks that are in use by one or more containers are returned. 52 | Dangling(bool), 53 | /// Matches a network's driver. 54 | Driver(String), 55 | /// Matches all or part of a network ID. 56 | Id(String), 57 | /// Label in the form of `label=key` 58 | LabelKey(String), 59 | /// Label in the form of `label=key=val` 60 | LabelKeyVal(String, String), 61 | /// Matches all or part of a network name. 62 | Name(String), 63 | Scope(Scope), 64 | Type(NetworkType), 65 | } 66 | 67 | impl Filter for NetworkFilter { 68 | fn query_item(&self) -> FilterItem { 69 | use NetworkFilter::*; 70 | 71 | match &self { 72 | Dangling(dangling) => FilterItem::new("dangling", dangling.to_string()), 73 | Driver(driver) => FilterItem::new("driver", driver.to_owned()), 74 | Id(id) => FilterItem::new("id", id.to_owned()), 75 | LabelKey(key) => FilterItem::new("label", key.to_owned()), 76 | LabelKeyVal(key, val) => FilterItem::new("label", format!("{key}={val}")), 77 | Name(name) => FilterItem::new("name", name.to_owned()), 78 | Scope(scope) => FilterItem::new("scope", scope.as_ref().to_owned()), 79 | Type(type_) => FilterItem::new("type", type_.as_ref().to_owned()), 80 | } 81 | } 82 | } 83 | 84 | impl NetworkListOptsBuilder { 85 | impl_filter_func!( 86 | /// Filter the list of networks by one of the variants of the filter. 87 | NetworkFilter 88 | ); 89 | } 90 | 91 | /// Interface for creating new docker network 92 | #[derive(Serialize, Debug, Clone)] 93 | pub struct NetworkCreateOpts { 94 | params: HashMap<&'static str, Value>, 95 | } 96 | 97 | impl NetworkCreateOpts { 98 | /// Return a new instance of a opts-builder for creating a network. 99 | pub fn builder(name: N) -> NetworkCreateOptsBuilder 100 | where 101 | N: AsRef, 102 | { 103 | NetworkCreateOptsBuilder::new(name.as_ref()) 104 | } 105 | 106 | /// Serializes the options as a JSON string. 107 | pub fn serialize(&self) -> Result { 108 | serde_json::to_string(&self.params).map_err(Error::from) 109 | } 110 | 111 | /// Serializes the options as a JSON bytes. 112 | pub fn serialize_vec(&self) -> Result> { 113 | serde_json::to_vec(&self.params).map_err(Error::from) 114 | } 115 | } 116 | 117 | #[derive(Default)] 118 | pub struct NetworkCreateOptsBuilder { 119 | params: HashMap<&'static str, Value>, 120 | } 121 | 122 | impl NetworkCreateOptsBuilder { 123 | pub(crate) fn new(name: &str) -> Self { 124 | let mut params = HashMap::new(); 125 | params.insert("Name", json!(name)); 126 | NetworkCreateOptsBuilder { params } 127 | } 128 | 129 | impl_field!( 130 | /// Check for networks with duplicate names. Since Network is primarily keyed based on a 131 | /// random ID and not on the name, and network name is strictly a user-friendly alias to 132 | /// the network which is uniquely identified using ID, there is no guaranteed way to check 133 | /// for duplicates. CheckDuplicate is there to provide a best effort checking of any 134 | /// networks which has the same name but it is not guaranteed to catch all name collisions. 135 | check_duplicate: bool => "CheckDuplicate" 136 | ); 137 | 138 | impl_str_field!( 139 | /// Name of the network driver plugin to use. 140 | driver => "Driver" 141 | ); 142 | 143 | impl_field!( 144 | /// Restrict external access to the network. 145 | internal: bool => "Internal" 146 | ); 147 | 148 | impl_field!( 149 | /// Globally scoped network is manually attachable by regular containers from workers 150 | /// in swarm mode. 151 | attachable: bool => "Attachable" 152 | ); 153 | 154 | impl_field!( 155 | /// Ingress network is the network which provides the routing-mesh in swarm mode. 156 | ingress: bool => "Ingress" 157 | ); 158 | 159 | impl_field!( 160 | /// Enable IPv6 on the network. 161 | enable_ipv6: bool => "EnableIPv6" 162 | ); 163 | 164 | impl_map_field!(json 165 | /// Network specific options to be used by the drivers. 166 | options => "Options" 167 | ); 168 | 169 | impl_map_field!(json 170 | /// User-defined key/value metadata. 171 | labels => "Labels" 172 | ); 173 | 174 | impl_field!( 175 | /// IP Address Management configuration 176 | ipam: Ipam => "IPAM" 177 | ); 178 | 179 | pub fn build(&self) -> NetworkCreateOpts { 180 | NetworkCreateOpts { 181 | params: self.params.clone(), 182 | } 183 | } 184 | } 185 | #[derive(Serialize, Debug)] 186 | /// Interface for disconnecting a container from a network. 187 | pub struct ContainerDisconnectionOpts { 188 | params: HashMap<&'static str, Value>, 189 | } 190 | 191 | impl ContainerDisconnectionOpts { 192 | /// Serializes the options as a JSON string. 193 | pub fn serialize(&self) -> Result { 194 | serde_json::to_string(&self.params).map_err(Error::from) 195 | } 196 | 197 | /// Serializes the options as a JSON bytes. 198 | pub fn serialize_vec(&self) -> Result> { 199 | serde_json::to_vec(&self.params).map_err(Error::from) 200 | } 201 | 202 | /// Return a new instance of a builder for disconnecting a container from a network. 203 | pub fn builder(container_id: I) -> ContainerDisconnectionOptsBuilder 204 | where 205 | I: AsRef, 206 | { 207 | ContainerDisconnectionOptsBuilder::new(container_id.as_ref()) 208 | } 209 | } 210 | 211 | #[derive(Default)] 212 | pub struct ContainerDisconnectionOptsBuilder { 213 | params: HashMap<&'static str, Value>, 214 | } 215 | 216 | impl ContainerDisconnectionOptsBuilder { 217 | pub(crate) fn new(container_id: &str) -> Self { 218 | ContainerDisconnectionOptsBuilder { 219 | params: [("Container", json!(container_id.to_string()))].into(), 220 | } 221 | } 222 | 223 | impl_field!( 224 | /// Force the container to disconnect from the network. 225 | force: bool => "Force" 226 | ); 227 | 228 | pub fn build(self) -> ContainerDisconnectionOpts { 229 | ContainerDisconnectionOpts { 230 | params: self.params, 231 | } 232 | } 233 | } 234 | 235 | #[derive(Serialize, Debug)] 236 | /// Interface for connecting a container to a network. 237 | pub struct ContainerConnectionOpts { 238 | params: HashMap<&'static str, Value>, 239 | } 240 | 241 | impl ContainerConnectionOpts { 242 | /// Serializes the options as a JSON string. 243 | pub fn serialize(&self) -> Result { 244 | serde_json::to_string(&self.params).map_err(Error::from) 245 | } 246 | 247 | /// Serializes the options as a JSON bytes. 248 | pub fn serialize_vec(&self) -> Result> { 249 | serde_json::to_vec(&self.params).map_err(Error::from) 250 | } 251 | 252 | /// Return a new instance of a builder for connecting a container to a network. 253 | pub fn builder(container_id: I) -> ContainerConnectionOptsBuilder 254 | where 255 | I: AsRef, 256 | { 257 | ContainerConnectionOptsBuilder::new(container_id.as_ref()) 258 | } 259 | } 260 | 261 | #[derive(Default)] 262 | pub struct ContainerConnectionOptsBuilder { 263 | params: HashMap<&'static str, Value>, 264 | container: String, 265 | } 266 | 267 | impl ContainerConnectionOptsBuilder { 268 | pub(crate) fn new(container_id: &str) -> Self { 269 | ContainerConnectionOptsBuilder { 270 | params: HashMap::new(), 271 | container: container_id.to_string(), 272 | } 273 | } 274 | 275 | /// Endpoint's IPAM configuration. 276 | pub fn ipam_config(mut self, config: EndpointIpamConfig) -> Self { 277 | self.params.insert("EndpointConfig", json!(config.params)); 278 | self 279 | } 280 | 281 | impl_vec_field!(aliases => "Aliases"); 282 | 283 | impl_vec_field!(links => "Links"); 284 | 285 | impl_str_field!( 286 | /// Unique ID of the network. 287 | network_id => "NetworkID" 288 | ); 289 | 290 | impl_str_field!( 291 | /// Unique ID for the service endpoint in a Sandbox. 292 | endpoint_id => "EndpointID" 293 | ); 294 | 295 | impl_str_field!( 296 | /// Gateway address for this network. 297 | gateway => "Gateway" 298 | ); 299 | 300 | impl_str_field!( 301 | /// IPv4 address. 302 | ipv4 => "IPAddress" 303 | ); 304 | 305 | impl_field!( 306 | /// Mask length of the IPv4 address. 307 | prefix_len: isize => "IPPrefixLen" 308 | ); 309 | 310 | impl_str_field!( 311 | /// IPv6 gateway address. 312 | ipv6_gateway => "IPv6Gateway" 313 | ); 314 | 315 | impl_str_field!( 316 | /// Global IPv6 address. 317 | ipv6 => "GlobalIPv6Address" 318 | ); 319 | 320 | impl_field!( 321 | /// Mask length of the global IPv6 address. 322 | ipv6_prefix_len: i64 => "GlobalIPv6PrefixLen" 323 | ); 324 | 325 | impl_str_field!( 326 | /// MAC address for the endpoint on this network. 327 | mac => "MacAddress" 328 | ); 329 | 330 | impl_map_field!(json 331 | /// DriverOpts is a mapping of driver options and values. These options are passed directly 332 | /// to the driver and are driver specific. 333 | driver_opts => "DriverOpts" 334 | ); 335 | 336 | pub fn build(self) -> ContainerConnectionOpts { 337 | let mut params = HashMap::new(); 338 | params.insert("EndpointConfig", json!(self.params)); 339 | params.insert("Container", json!(self.container)); 340 | ContainerConnectionOpts { params } 341 | } 342 | } 343 | 344 | #[derive(Default)] 345 | /// Used to configure endpoint IPAM configuration when connection a container to a network. 346 | /// See [`ipam_config`](ContainerConnectOptsBuilder::ipam_config). 347 | pub struct EndpointIpamConfig { 348 | params: HashMap<&'static str, serde_json::Value>, 349 | } 350 | 351 | impl EndpointIpamConfig { 352 | pub fn new() -> Self { 353 | Self::default() 354 | } 355 | 356 | pub fn ipv4(mut self, address: A) -> Self 357 | where 358 | A: Into, 359 | { 360 | self.params.insert("IPv4Address", json!(address.into())); 361 | self 362 | } 363 | 364 | pub fn ipv6(mut self, address: A) -> Self 365 | where 366 | A: Into, 367 | { 368 | self.params.insert("IPv6Address", json!(address.into())); 369 | self 370 | } 371 | 372 | pub fn link_local_ips(mut self, ips: I) -> Self 373 | where 374 | I: IntoIterator, 375 | I::Item: Into, 376 | { 377 | self.params.insert( 378 | "LinkLocalIPs", 379 | json!(ips.into_iter().map(I::Item::into).collect::>()), 380 | ); 381 | self 382 | } 383 | } 384 | 385 | impl_opts_builder!(url => NetworkPrune); 386 | 387 | pub enum NetworkPruneFilter { 388 | /// Prune networks created before this timestamp. The can be Unix timestamps, 389 | /// date formatted timestamps, or Go duration strings (e.g. 10m, 1h30m) computed relative 390 | /// to the daemon machine’s time. 391 | Until(String), 392 | #[cfg(feature = "chrono")] 393 | #[cfg_attr(docsrs, doc(cfg(feature = "chrono")))] 394 | /// Prune networks created before this timestamp. Same as `Until` but takes a datetime object. 395 | UntilDate(chrono::DateTime), 396 | /// Label in the form of `label=key`. 397 | LabelKey(String), 398 | /// Label in the form of `label=key=val`. 399 | Label(String, String), 400 | } 401 | 402 | impl Filter for NetworkPruneFilter { 403 | fn query_item(&self) -> FilterItem { 404 | use NetworkPruneFilter::*; 405 | match &self { 406 | Until(until) => FilterItem::new("until", until.to_owned()), 407 | #[cfg(feature = "chrono")] 408 | UntilDate(until) => FilterItem::new("until", until.timestamp().to_string()), 409 | LabelKey(label) => FilterItem::new("label", label.to_owned()), 410 | Label(key, val) => FilterItem::new("label", format!("{key}={val}")), 411 | } 412 | } 413 | } 414 | 415 | impl NetworkPruneOptsBuilder { 416 | impl_filter_func!( 417 | /// Filter the networks to prune by one of the variants of the enum. 418 | NetworkPruneFilter 419 | ); 420 | } 421 | -------------------------------------------------------------------------------- /src/opts/node.rs: -------------------------------------------------------------------------------- 1 | use crate::models::{NodeSpecAvailabilityInlineItem, NodeSpecRoleInlineItem}; 2 | use crate::{Error, Result}; 3 | use containers_api::opts::{Filter, FilterItem}; 4 | use containers_api::{ 5 | impl_filter_func, impl_map_field, impl_opts_builder, impl_str_enum_field, impl_str_field, 6 | }; 7 | 8 | use serde::Serialize; 9 | 10 | use std::collections::HashMap; 11 | 12 | #[derive(Serialize, Debug)] 13 | pub enum Membership { 14 | Accepted, 15 | Pending, 16 | } 17 | 18 | impl AsRef for Membership { 19 | fn as_ref(&self) -> &str { 20 | match &self { 21 | Membership::Accepted => "accepted", 22 | Membership::Pending => "pending", 23 | } 24 | } 25 | } 26 | 27 | #[derive(Serialize, Debug)] 28 | pub struct NodeUpdateOpts { 29 | version: String, 30 | params: HashMap<&'static str, serde_json::Value>, 31 | } 32 | 33 | impl NodeUpdateOpts { 34 | /// return a new instance of a builder for Opts 35 | pub fn builder>(version: V) -> NodeUpdateOptsBuilder { 36 | NodeUpdateOptsBuilder::new(version) 37 | } 38 | 39 | impl_map_field!(json 40 | /// User-defined key/value metadata 41 | labels => "Labels" 42 | ); 43 | 44 | impl_str_field!( 45 | /// Name for the node. 46 | name => "Name" 47 | ); 48 | 49 | impl_str_enum_field!( 50 | /// Role of the node. 51 | role: NodeSpecRoleInlineItem => "Role" 52 | ); 53 | 54 | impl_str_enum_field!( 55 | /// Availability of the node. 56 | availability: NodeSpecAvailabilityInlineItem => "Availability" 57 | ); 58 | 59 | pub fn serialize(&self) -> Result { 60 | serde_json::to_string(&self.params).map_err(Error::from) 61 | } 62 | 63 | pub fn serialize_vec(&self) -> Result> { 64 | serde_json::to_vec(&self).map_err(Error::from) 65 | } 66 | 67 | pub fn version(&self) -> &str { 68 | &self.version 69 | } 70 | } 71 | 72 | #[derive(Serialize, Debug)] 73 | pub struct NodeUpdateOptsBuilder { 74 | version: String, 75 | params: HashMap<&'static str, serde_json::Value>, 76 | } 77 | 78 | impl NodeUpdateOptsBuilder { 79 | pub fn new>(version: V) -> Self { 80 | Self { 81 | version: version.into(), 82 | params: HashMap::new(), 83 | } 84 | } 85 | } 86 | 87 | /// Filter type used to filter nodes by one of the variants. 88 | pub enum NodeFilter { 89 | Id(String), 90 | /// The engine label 91 | Label(String), 92 | Membership(Membership), 93 | Name(String), 94 | NodeLabel(String), 95 | Role(NodeSpecRoleInlineItem), 96 | } 97 | 98 | impl Filter for NodeFilter { 99 | fn query_item(&self) -> FilterItem { 100 | match &self { 101 | NodeFilter::Id(id) => FilterItem::new("id", id.to_owned()), 102 | NodeFilter::Label(label) => FilterItem::new("label", label.to_owned()), 103 | NodeFilter::Membership(membership) => { 104 | FilterItem::new("membership", membership.as_ref().to_string()) 105 | } 106 | NodeFilter::Name(name) => FilterItem::new("name", name.to_owned()), 107 | NodeFilter::NodeLabel(node) => FilterItem::new("node.label", node.to_owned()), 108 | NodeFilter::Role(role) => FilterItem::new("role", role.as_ref().to_string()), 109 | } 110 | } 111 | } 112 | 113 | impl_opts_builder!(url => NodeList); 114 | 115 | impl NodeListOptsBuilder { 116 | impl_filter_func!(NodeFilter); 117 | } 118 | -------------------------------------------------------------------------------- /src/opts/plugin.rs: -------------------------------------------------------------------------------- 1 | use containers_api::opts::{Filter, FilterItem}; 2 | use containers_api::{impl_filter_func, impl_opts_builder}; 3 | 4 | impl_opts_builder!(url => PluginList); 5 | 6 | pub enum PluginFilter { 7 | Capability(String), 8 | Enable, 9 | Disable, 10 | } 11 | 12 | impl Filter for PluginFilter { 13 | fn query_item(&self) -> FilterItem { 14 | match &self { 15 | PluginFilter::Capability(cap) => FilterItem::new("capability", cap.to_owned()), 16 | PluginFilter::Enable => FilterItem::new("enable", true.to_string()), 17 | PluginFilter::Disable => FilterItem::new("enable", false.to_string()), 18 | } 19 | } 20 | } 21 | 22 | impl PluginListOptsBuilder { 23 | impl_filter_func!( 24 | /// Filter listed plugins by the variants of the enum. 25 | PluginFilter 26 | ); 27 | } 28 | -------------------------------------------------------------------------------- /src/opts/secret.rs: -------------------------------------------------------------------------------- 1 | use crate::models::{Driver, Labels}; 2 | use crate::{Error, Result}; 3 | use containers_api::opts::{Filter, FilterItem}; 4 | use containers_api::{impl_filter_func, impl_opts_builder}; 5 | use serde::{Deserialize, Serialize}; 6 | 7 | impl_opts_builder!(url => SecretList); 8 | 9 | pub enum SecretFilter { 10 | /// The ID of the secret. 11 | Id(String), 12 | /// Label in the form of `label=key` 13 | LabelKey(String), 14 | /// Label in the form of `label=key=val` 15 | Label(String, String), 16 | /// The name of the secret. 17 | Name(String), 18 | Names(String), 19 | } 20 | 21 | impl Filter for SecretFilter { 22 | fn query_item(&self) -> FilterItem { 23 | use SecretFilter::*; 24 | match &self { 25 | Id(id) => FilterItem::new("id", id.to_owned()), 26 | LabelKey(label) => FilterItem::new("label", label.to_owned()), 27 | Label(key, val) => FilterItem::new("label", format!("{key}={val}")), 28 | Name(name) => FilterItem::new("name", name.to_owned()), 29 | Names(names) => FilterItem::new("names", names.to_owned()), 30 | } 31 | } 32 | } 33 | 34 | impl SecretListOptsBuilder { 35 | impl_filter_func!( 36 | /// Filter the list of filters by one of the variants of the enum. 37 | SecretFilter 38 | ); 39 | } 40 | 41 | #[derive(Clone, Debug, Serialize, Deserialize)] 42 | #[serde(rename_all = "PascalCase")] 43 | /// Structure used to create a new secret with [`Secrets::create`](crate::Secrets::create). 44 | pub struct SecretCreateOpts { 45 | name: String, 46 | labels: Labels, 47 | data: String, 48 | driver: Driver, 49 | templating: Driver, 50 | } 51 | 52 | impl SecretCreateOpts { 53 | /// Create a new secret with name and data. This function will take care of 54 | /// encoding the secret's data as base64. 55 | pub fn new(name: N, data: D) -> Self 56 | where 57 | N: Into, 58 | D: AsRef, 59 | { 60 | Self { 61 | name: name.into(), 62 | labels: Labels::new(), 63 | data: base64::encode(data.as_ref()), 64 | driver: Driver { 65 | name: "".into(), 66 | options: None, 67 | }, 68 | templating: Driver { 69 | name: "".into(), 70 | options: None, 71 | }, 72 | } 73 | } 74 | 75 | /// Set the driver of this secret. 76 | pub fn set_driver(mut self, driver: Driver) -> Self { 77 | self.driver = driver; 78 | self 79 | } 80 | 81 | /// Set the templating driver of this secret. 82 | pub fn set_templating(mut self, driver: Driver) -> Self { 83 | self.templating = driver; 84 | self 85 | } 86 | 87 | /// Add a label to this secret 88 | pub fn add_label(mut self, key: K, val: V) -> Self 89 | where 90 | K: Into, 91 | V: Into, 92 | { 93 | self.labels.insert(key.into(), val.into()); 94 | self 95 | } 96 | 97 | pub fn serialize(&self) -> Result { 98 | serde_json::to_string(&self).map_err(Error::from) 99 | } 100 | 101 | pub fn serialize_vec(&self) -> Result> { 102 | serde_json::to_vec(&self).map_err(Error::from) 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /src/opts/service.rs: -------------------------------------------------------------------------------- 1 | use crate::{models, opts::RegistryAuth, Error, Result}; 2 | use containers_api::opts::{Filter, FilterItem}; 3 | use containers_api::{impl_filter_func, impl_opts_builder, impl_url_bool_field}; 4 | 5 | use std::collections::HashMap; 6 | use std::hash::Hash; 7 | 8 | use serde::Serialize; 9 | use serde_json::{json, Value}; 10 | 11 | /// Filter Opts for services listings 12 | pub enum ServiceFilter { 13 | Id(String), 14 | Label(String), 15 | ReplicatedMode, 16 | GlobalMode, 17 | Name(String), 18 | } 19 | 20 | impl Filter for ServiceFilter { 21 | fn query_item(&self) -> FilterItem { 22 | match &self { 23 | ServiceFilter::Id(i) => FilterItem::new("id", i.to_owned()), 24 | ServiceFilter::Label(l) => FilterItem::new("label", l.to_owned()), 25 | ServiceFilter::ReplicatedMode => FilterItem::new("mode", "replicated".to_string()), 26 | ServiceFilter::GlobalMode => FilterItem::new("mode", "global".to_string()), 27 | ServiceFilter::Name(n) => FilterItem::new("name", n.to_string()), 28 | } 29 | } 30 | } 31 | 32 | impl_opts_builder!(url => ServiceList); 33 | 34 | impl ServiceListOptsBuilder { 35 | impl_filter_func!(ServiceFilter); 36 | 37 | impl_url_bool_field!( 38 | /// Include service status, with count of running and desired tasks. 39 | status => "status" 40 | ); 41 | } 42 | 43 | #[derive(Default, Debug)] 44 | pub struct ServiceOpts { 45 | auth: Option, 46 | params: HashMap<&'static str, Value>, 47 | } 48 | 49 | impl ServiceOpts { 50 | /// return a new instance of a builder for Opts 51 | pub fn builder() -> ServiceOptsBuilder { 52 | ServiceOptsBuilder::default() 53 | } 54 | 55 | /// serialize Opts as a string. returns None if no Opts are defined 56 | pub fn serialize(&self) -> Result { 57 | serde_json::to_string(&self.params).map_err(Error::from) 58 | } 59 | 60 | pub fn serialize_vec(&self) -> Result> { 61 | serde_json::to_vec(&self.params).map_err(Error::from) 62 | } 63 | 64 | pub(crate) fn auth_header(&self) -> Option { 65 | self.auth.clone().map(|a| a.serialize()) 66 | } 67 | } 68 | 69 | #[derive(Default)] 70 | pub struct ServiceOptsBuilder { 71 | auth: Option, 72 | params: HashMap<&'static str, Result>, 73 | } 74 | 75 | impl ServiceOptsBuilder { 76 | pub fn name(mut self, name: S) -> Self 77 | where 78 | S: AsRef, 79 | { 80 | self.params.insert("Name", Ok(json!(name.as_ref()))); 81 | self 82 | } 83 | 84 | pub fn labels(mut self, labels: L) -> Self 85 | where 86 | L: IntoIterator, 87 | K: AsRef + Serialize + Eq + Hash, 88 | V: AsRef + Serialize, 89 | { 90 | self.params.insert( 91 | "Labels", 92 | Ok(json!(labels.into_iter().collect::>())), 93 | ); 94 | self 95 | } 96 | 97 | pub fn task_template(mut self, spec: &models::TaskSpec) -> Self { 98 | self.params.insert("TaskTemplate", to_value_result(spec)); 99 | self 100 | } 101 | 102 | pub fn mode(mut self, mode: &models::ServiceSpecModeInlineItem) -> Self { 103 | self.params.insert("Mode", to_value_result(mode)); 104 | self 105 | } 106 | 107 | pub fn update_config(mut self, conf: &models::ServiceSpecUpdateConfigInlineItem) -> Self { 108 | self.params.insert("UpdateConfig", to_value_result(conf)); 109 | self 110 | } 111 | 112 | pub fn rollback_config(mut self, conf: &models::ServiceSpecRollbackConfigInlineItem) -> Self { 113 | self.params.insert("RollbackConfig", to_value_result(conf)); 114 | self 115 | } 116 | 117 | pub fn networks(mut self, networks: N) -> Self 118 | where 119 | N: IntoIterator, 120 | { 121 | self.params.insert( 122 | "Networks", 123 | to_value_result( 124 | networks 125 | .into_iter() 126 | .collect::>(), 127 | ), 128 | ); 129 | self 130 | } 131 | 132 | pub fn endpoint_spec(mut self, spec: &models::EndpointSpec) -> Self { 133 | self.params.insert("EndpointSpec", to_value_result(spec)); 134 | self 135 | } 136 | 137 | pub fn auth(mut self, auth: RegistryAuth) -> Self { 138 | self.auth = Some(auth); 139 | self 140 | } 141 | 142 | pub fn build(self) -> Result { 143 | let mut new_params = HashMap::new(); 144 | for (k, v) in self.params.into_iter() { 145 | new_params.insert(k, v?); 146 | } 147 | Ok(ServiceOpts { 148 | auth: self.auth, 149 | params: new_params, 150 | }) 151 | } 152 | } 153 | 154 | fn to_value_result(value: T) -> Result 155 | where 156 | T: Serialize, 157 | { 158 | Ok(serde_json::to_value(value)?) 159 | } 160 | -------------------------------------------------------------------------------- /src/opts/swarm.rs: -------------------------------------------------------------------------------- 1 | use crate::models::SwarmSpec; 2 | use containers_api::{impl_field, impl_opts_builder, impl_str_field, impl_vec_field}; 3 | 4 | impl_opts_builder!(json => SwarmJoin); 5 | 6 | impl SwarmJoinOptsBuilder { 7 | impl_str_field!( 8 | /// Listen address used for inter-manager communication if the node gets promoted to manager, 9 | /// as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). 10 | listen_addr => "ListenAddr" 11 | ); 12 | 13 | impl_str_field!( 14 | // Externally reachable address advertised to other nodes. This can either be an address/port 15 | // combination in the form 192.168.1.1:4567, or an interface followed by a port number, like eth0:4567. 16 | // If the port number is omitted, the port number from the listen address is used. If AdvertiseAddr is 17 | // not specified, it will be automatically detected when possible. 18 | advertise_addr => "AdvertiseAddr" 19 | ); 20 | 21 | impl_str_field!( 22 | /// Address or interface to use for data path traffic. 23 | data_path_addr => "DataPathAddr" 24 | ); 25 | 26 | impl_vec_field!( 27 | /// Addresses of manager nodes already participating in the swarm. 28 | remote_addrs => "RemoteAddrs" 29 | ); 30 | 31 | impl_str_field!( 32 | /// Secret token for joining this swarm. 33 | join_token => "JoinToken" 34 | ); 35 | } 36 | 37 | impl_opts_builder!(json => SwarmInit); 38 | 39 | impl SwarmInitOptsBuilder { 40 | impl_str_field!( 41 | // Listen address used for inter-manager communication if the node gets promoted to manager, 42 | // as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). 43 | listen_addr => "ListenAddr" 44 | ); 45 | 46 | impl_str_field!( 47 | // Externally reachable address advertised to other nodes. This can either be an address/port 48 | // combination in the form 192.168.1.1:4567, or an interface followed by a port number, like eth0:4567. 49 | // If the port number is omitted, the port number from the listen address is used. If AdvertiseAddr is 50 | // not specified, it will be automatically detected when possible. 51 | advertise_addr => "AdvertiseAddr" 52 | ); 53 | 54 | impl_str_field!( 55 | /// Address or interface to use for data path traffic. 56 | data_path_addr => "DataPathAddr" 57 | ); 58 | 59 | impl_field!( 60 | // Specifies the data path port number for data traffic. Acceptable port range is 1024 to 49151. 61 | // If no port is set or is set to 0, default port 4789 will be used. 62 | data_path_port: u32 => "DataPathPort" 63 | ); 64 | 65 | impl_vec_field!( 66 | // Default Address Pool specifies default subnet pools for global scope networks. 67 | default_addr_pool => "DefaultAddrPool" 68 | ); 69 | 70 | impl_field!( 71 | /// Force creation of a new swarm. 72 | force_new_cluster: bool => "ForceNewCluster" 73 | ); 74 | 75 | impl_field!( 76 | // SubnetSize specifies the subnet size of the networks created from the default subnet pool. 77 | subnet_size: u32 => "SubnetSize" 78 | ); 79 | 80 | impl_field!( 81 | /// User modifiable swarm configuration. 82 | spec: SwarmSpec => "Spec" 83 | ); 84 | } 85 | -------------------------------------------------------------------------------- /src/opts/system.rs: -------------------------------------------------------------------------------- 1 | use containers_api::impl_opts_builder; 2 | use std::collections::HashMap; 3 | 4 | /// Opts for filtering streams of Docker events 5 | #[derive(Default, Debug)] 6 | pub struct EventsOpts { 7 | params: HashMap<&'static str, String>, 8 | } 9 | 10 | impl EventsOpts { 11 | pub fn builder() -> EventsOptsBuilder { 12 | EventsOptsBuilder::default() 13 | } 14 | 15 | /// serialize Opts as a string. returns None if no Opts are defined 16 | pub fn serialize(&self) -> Option { 17 | if self.params.is_empty() { 18 | None 19 | } else { 20 | Some(containers_api::url::encoded_pairs(&self.params)) 21 | } 22 | } 23 | } 24 | 25 | #[derive(Copy, Clone)] 26 | pub enum EventFilterType { 27 | Container, 28 | Image, 29 | Volume, 30 | Network, 31 | Daemon, 32 | } 33 | 34 | impl AsRef for EventFilterType { 35 | fn as_ref(&self) -> &str { 36 | match &self { 37 | EventFilterType::Container => "container", 38 | EventFilterType::Image => "image", 39 | EventFilterType::Volume => "volume", 40 | EventFilterType::Network => "network", 41 | EventFilterType::Daemon => "daemon", 42 | } 43 | } 44 | } 45 | 46 | /// An enumartion used to filter system events. 47 | pub enum EventFilter { 48 | // TODO: use the Filter trait for this enum 49 | Container(String), 50 | Event(String), 51 | Image(String), 52 | Label(String), 53 | Type(EventFilterType), 54 | Volume(String), 55 | Network(String), 56 | Daemon(String), 57 | } 58 | 59 | #[derive(Default)] 60 | /// Builder interface for [`EventOpts`](EventOpts). 61 | pub struct EventsOptsBuilder { 62 | params: HashMap<&'static str, String>, 63 | events: Vec, 64 | containers: Vec, 65 | images: Vec, 66 | labels: Vec, 67 | volumes: Vec, 68 | networks: Vec, 69 | daemons: Vec, 70 | types: Vec, 71 | } 72 | 73 | impl EventsOptsBuilder { 74 | #[cfg(feature = "chrono")] 75 | /// Only return events since this time. 76 | pub fn since(mut self, timestamp: &chrono::DateTime) -> Self 77 | where 78 | Tz: chrono::TimeZone, 79 | { 80 | self.params 81 | .insert("since", timestamp.timestamp().to_string()); 82 | self 83 | } 84 | 85 | #[cfg(not(feature = "chrono"))] 86 | /// Only return events since this time, as a UNIX timestamp. 87 | pub fn since(mut self, timestamp: i64) -> Self { 88 | self.params.insert("since", timestamp.to_string()); 89 | self 90 | } 91 | 92 | #[cfg(feature = "chrono")] 93 | /// Only return events before this time. 94 | pub fn until(mut self, timestamp: &chrono::DateTime) -> Self 95 | where 96 | Tz: chrono::TimeZone, 97 | { 98 | self.params 99 | .insert("until", timestamp.timestamp().to_string()); 100 | self 101 | } 102 | 103 | #[cfg(not(feature = "chrono"))] 104 | /// Only return events before this time, as a UNIX timestamp. 105 | pub fn until(mut self, timestamp: i64) -> Self { 106 | self.params.insert("until", timestamp.to_string()); 107 | self 108 | } 109 | 110 | /// Filter the events by a list of event filters. 111 | pub fn filter(mut self, filters: Vec) -> Self { 112 | let mut params = HashMap::new(); 113 | for f in filters { 114 | match f { 115 | EventFilter::Container(n) => { 116 | self.containers.push(n); 117 | params.insert("container", self.containers.clone()) 118 | } 119 | EventFilter::Event(n) => { 120 | self.events.push(n); 121 | params.insert("event", self.events.clone()) 122 | } 123 | EventFilter::Image(n) => { 124 | self.images.push(n); 125 | params.insert("image", self.images.clone()) 126 | } 127 | EventFilter::Label(n) => { 128 | self.labels.push(n); 129 | params.insert("label", self.labels.clone()) 130 | } 131 | EventFilter::Volume(n) => { 132 | self.volumes.push(n); 133 | params.insert("volume", self.volumes.clone()) 134 | } 135 | EventFilter::Network(n) => { 136 | self.networks.push(n); 137 | params.insert("network", self.networks.clone()) 138 | } 139 | EventFilter::Daemon(n) => { 140 | self.daemons.push(n); 141 | params.insert("daemon", self.daemons.clone()) 142 | } 143 | EventFilter::Type(n) => { 144 | self.types.push(n.as_ref().to_string()); 145 | params.insert("type", self.types.clone()) 146 | } 147 | }; 148 | } 149 | self.params.insert( 150 | "filters", 151 | serde_json::to_string(¶ms).unwrap_or_default(), 152 | ); 153 | self 154 | } 155 | 156 | /// Build the final event options. 157 | pub fn build(self) -> EventsOpts { 158 | EventsOpts { 159 | params: self.params, 160 | } 161 | } 162 | } 163 | 164 | #[derive(Copy, Clone)] 165 | pub enum DataUsageType { 166 | Container, 167 | Image, 168 | Volume, 169 | BuildCache, 170 | } 171 | 172 | impl AsRef for DataUsageType { 173 | fn as_ref(&self) -> &str { 174 | match self { 175 | Self::Container => "container", 176 | Self::Image => "image", 177 | Self::Volume => "volume", 178 | Self::BuildCache => "build-cache", 179 | } 180 | } 181 | } 182 | 183 | impl_opts_builder!(url => SystemDataUsage); 184 | impl SystemDataUsageOptsBuilder { 185 | pub fn types(mut self, types: impl IntoIterator) -> Self { 186 | self.vec_params.insert( 187 | "type", 188 | types.into_iter().map(|s| s.as_ref().into()).collect(), 189 | ); 190 | self 191 | } 192 | } 193 | -------------------------------------------------------------------------------- /src/opts/task.rs: -------------------------------------------------------------------------------- 1 | use containers_api::opts::{Filter, FilterItem}; 2 | use containers_api::{impl_filter_func, impl_opts_builder}; 3 | 4 | impl_opts_builder!(url => TaskList); 5 | 6 | #[derive(Clone, Copy, Debug)] 7 | pub enum TaskStateFilter { 8 | Running, 9 | Shutdown, 10 | Accepted, 11 | } 12 | 13 | impl AsRef for TaskStateFilter { 14 | fn as_ref(&self) -> &str { 15 | match &self { 16 | Self::Running => "running", 17 | Self::Shutdown => "shutdown", 18 | Self::Accepted => "accepted", 19 | } 20 | } 21 | } 22 | 23 | pub enum TaskFilter { 24 | /// The state that the task should be in. 25 | DesiredState(TaskStateFilter), 26 | /// The ID of the config. 27 | Id(String), 28 | /// Label in the form of `label=key` 29 | LabelKey(String), 30 | /// Label in the form of `label=key=val` 31 | Label(String, String), 32 | /// The name of the config. 33 | Name(String), 34 | /// Name of the node. 35 | Node(String), 36 | /// Name of the service. 37 | Service(String), 38 | } 39 | 40 | impl Filter for TaskFilter { 41 | fn query_item(&self) -> FilterItem { 42 | use TaskFilter::*; 43 | match &self { 44 | DesiredState(state) => FilterItem::new("desired-state", state.as_ref().to_string()), 45 | Id(id) => FilterItem::new("id", id.to_owned()), 46 | LabelKey(key) => FilterItem::new("label", key.to_owned()), 47 | Label(key, val) => FilterItem::new("label", format!("{key}={val}")), 48 | Name(name) => FilterItem::new("name", name.to_owned()), 49 | Node(node) => FilterItem::new("node", node.to_owned()), 50 | Service(service) => FilterItem::new("service", service.to_owned()), 51 | } 52 | } 53 | } 54 | 55 | impl TaskListOptsBuilder { 56 | impl_filter_func!( 57 | /// Filter listed tasks by variants of the enum. 58 | TaskFilter 59 | ); 60 | } 61 | -------------------------------------------------------------------------------- /src/opts/volume.rs: -------------------------------------------------------------------------------- 1 | use containers_api::opts::{Filter, FilterItem}; 2 | use containers_api::{ 3 | impl_field, impl_filter_func, impl_map_field, impl_opts_builder, impl_opts_required_builder, 4 | impl_str_field, 5 | }; 6 | 7 | impl_opts_builder!(json => VolumeCreate); 8 | 9 | impl VolumeCreateOptsBuilder { 10 | impl_str_field!( 11 | /// The new volume's name. If not specified, Docker generates a name. 12 | name => "Name" 13 | ); 14 | 15 | impl_str_field!( 16 | /// Name of the volume driver to use. 17 | driver => "Driver" 18 | ); 19 | 20 | impl_map_field!(json 21 | /// A mapping of driver options and values. 22 | /// These options are passed directly to the driver and are driver specific. 23 | driver_opts => "DriverOpts"); 24 | 25 | impl_map_field!(json 26 | /// User-defined key/value metadata. 27 | labels => "Labels" 28 | ); 29 | 30 | impl_field!( 31 | /// Cluster-specific options used to create the volume. 32 | cluster_spec: crate::models::ClusterVolumeSpec => "ClusterVolumeSpec" 33 | ); 34 | } 35 | 36 | impl_opts_builder!(url => VolumePrune); 37 | 38 | impl_opts_builder!(url => VolumeList); 39 | 40 | /// Filter type used to filter volumes by one of the variants. 41 | pub enum VolumeFilter { 42 | /// When set to `true`, returns all volumes that are not in use by a container. 43 | /// When set to `false`, only volumes that are in use by one or more containers are returned. 44 | Dangling(bool), 45 | /// Matches volumes based on their driver. 46 | Driver(String), 47 | /// Label in the form of `label=key`. 48 | LabelKey(String), 49 | /// Label in the form of `label=key=val`. 50 | Label { key: String, val: String }, 51 | /// Matches all or part of a volume name. 52 | Name(String), 53 | } 54 | 55 | impl Filter for VolumeFilter { 56 | fn query_item(&self) -> FilterItem { 57 | use VolumeFilter::*; 58 | match &self { 59 | Dangling(dangling) => FilterItem::new("dangling", dangling.to_string()), 60 | Driver(driver) => FilterItem::new("driver", driver.to_owned()), 61 | LabelKey(label) => FilterItem::new("label", label.to_owned()), 62 | Label { key, val } => FilterItem::new("label", format!("{key}:{val}")), 63 | Name(name) => FilterItem::new("name", name.to_owned()), 64 | } 65 | } 66 | } 67 | 68 | impl VolumePruneOptsBuilder { 69 | impl_filter_func!( 70 | /// Filter pruned volumes by one of the variants of the filter enum. 71 | VolumeFilter 72 | ); 73 | } 74 | 75 | impl VolumeListOptsBuilder { 76 | impl_filter_func!( 77 | /// Filter listed volumes by one of the variants of the filter enum. 78 | VolumeFilter 79 | ); 80 | } 81 | 82 | impl_opts_required_builder!(json => 83 | /// Update swarm cluster volume 84 | ClusterVolumeUpdate, 85 | /// The version number of the volume being updated. This is required to avoid conflicting writes. Found in the volume's ClusterVolume field. 86 | version: i64 => "version" 87 | ); 88 | 89 | impl ClusterVolumeUpdateOptsBuilder { 90 | impl_str_field!( 91 | /// Group defines the volume group of this volume. Volumes belonging to the same group can be referred to by group name when creating Services. 92 | /// Referring to a volume by group instructs Swarm to treat volumes in that group interchangeably for the purpose of scheduling. Volumes with 93 | /// an empty string for a group technically all belong to the same, emptystring group. 94 | group => "Group" 95 | ); 96 | 97 | impl_field!( 98 | /// Defines how the volume is used by tasks. 99 | access_mode: serde_json::Value => "AccessMode" 100 | ); 101 | } 102 | -------------------------------------------------------------------------------- /src/stream.rs: -------------------------------------------------------------------------------- 1 | use containers_api::conn::tty; 2 | use containers_api::conn::Payload; 3 | use futures_util::{AsyncRead, AsyncWrite}; 4 | use hyper::Body; 5 | 6 | use crate::{Docker, Result}; 7 | 8 | /// Attaches a multiplexed TCP stream to the container that can be used to read Stdout, Stderr and write Stdin. 9 | async fn attach_raw( 10 | docker: Docker, 11 | endpoint: String, 12 | payload: Payload, 13 | ) -> Result { 14 | docker.post_upgrade_stream(endpoint, payload).await 15 | } 16 | 17 | pub async fn attach( 18 | docker: Docker, 19 | endpoint: String, 20 | payload: Payload, 21 | is_tty: bool, 22 | ) -> Result { 23 | attach_raw(docker, endpoint, payload).await.map(|s| { 24 | if is_tty { 25 | tty::Multiplexer::new(s, tty::decode_raw) 26 | } else { 27 | tty::Multiplexer::new(s, tty::decode_chunk) 28 | } 29 | }) 30 | } 31 | -------------------------------------------------------------------------------- /tests/common.rs: -------------------------------------------------------------------------------- 1 | #![allow(dead_code)] 2 | 3 | use std::env; 4 | use std::path::PathBuf; 5 | 6 | pub use docker_api::{api, conn, models, models::ImageBuildChunk, opts, Docker}; 7 | pub use futures_util::{StreamExt, TryStreamExt}; 8 | pub use tempfile::TempDir; 9 | 10 | pub const DEFAULT_IMAGE: &str = "ubuntu:latest"; 11 | pub const DEFAULT_CMD: &str = "sleep inf"; 12 | pub const DEFAULT_CMD_ARRAY: &[&str] = &["sleep", "inf"]; 13 | pub const TEST_IMAGE_PATH: &str = "/var/test123"; 14 | 15 | const URI_ENV_VAR: &str = "DOCKER_API_URI"; 16 | 17 | pub fn init_runtime() -> Docker { 18 | let _ = env_logger::try_init(); 19 | if let Ok(uri) = env::var(URI_ENV_VAR) { 20 | Docker::new(uri).unwrap() 21 | } else { 22 | #[cfg(unix)] 23 | { 24 | let uid = nix::unistd::Uid::effective(); 25 | let docker_dir = PathBuf::from(format!("/run/user/{uid}/docker")); 26 | let docker_root_dir = PathBuf::from("/var/run"); 27 | if docker_dir.exists() { 28 | Docker::unix(docker_dir.join("docker.sock")) 29 | } else if docker_root_dir.exists() { 30 | Docker::unix(docker_root_dir.join("docker.sock")) 31 | } else { 32 | panic!( 33 | "Docker socket not found. Tried {URI_ENV_VAR} env variable, {} and {}", 34 | docker_dir.display(), 35 | docker_root_dir.display() 36 | ); 37 | } 38 | } 39 | #[cfg(not(unix))] 40 | { 41 | panic!("Docker socket not found. Try setting the {URI_ENV_VAR} env variable",); 42 | } 43 | } 44 | } 45 | 46 | pub async fn create_base_container( 47 | docker: &Docker, 48 | name: &str, 49 | opts: Option, 50 | ) -> api::Container { 51 | cleanup_container(docker, name).await; 52 | 53 | let opts = opts.unwrap_or_else(|| { 54 | opts::ContainerCreateOpts::builder() 55 | .image(DEFAULT_IMAGE) 56 | .name(name) 57 | .command(DEFAULT_CMD_ARRAY) 58 | .build() 59 | }); 60 | docker 61 | .containers() 62 | .create(&opts) 63 | .await 64 | .expect("created base container"); 65 | docker.containers().get(name) 66 | } 67 | 68 | pub async fn cleanup_container(docker: &Docker, name: &str) { 69 | let _ = docker 70 | .containers() 71 | .get(name) 72 | .remove(&opts::ContainerRemoveOpts::builder().force(true).build()) 73 | .await; 74 | } 75 | 76 | pub async fn get_container_full_id(docker: &Docker, name: &str) -> String { 77 | docker 78 | .containers() 79 | .get(name) 80 | .inspect() 81 | .await 82 | .map(|data| data.id) 83 | .expect("container inspect data") 84 | .expect("container full id") 85 | } 86 | 87 | pub fn tempdir_with_dockerfile(content: Option<&str>) -> TempDir { 88 | let tmp = TempDir::new().expect("temp dir for image"); 89 | let default_dockerfile = format!( 90 | "FROM {DEFAULT_IMAGE}\nRUN echo 1234 > {TEST_IMAGE_PATH}\nRUN echo 321\nCMD sleep inf", 91 | ); 92 | 93 | std::fs::write( 94 | tmp.path().join("Dockerfile"), 95 | content.unwrap_or(default_dockerfile.as_str()), 96 | ) 97 | .expect("saved Dockerfile"); 98 | tmp 99 | } 100 | 101 | pub async fn create_base_image( 102 | docker: &Docker, 103 | tag: &str, 104 | opts: Option, 105 | ) -> api::Image { 106 | let images = docker.images(); 107 | let _ = images 108 | .get(tag) 109 | .remove( 110 | &opts::ImageRemoveOpts::builder() 111 | .force(true) 112 | .noprune(true) 113 | .build(), 114 | ) 115 | .await; 116 | 117 | let tmp = tempdir_with_dockerfile(None); 118 | 119 | println!("Tmp: {}", tmp.path().display()); 120 | println!("Exists: {}", tmp.path().exists()); 121 | let opts = opts.unwrap_or_else(|| opts::ImageBuildOpts::builder(tmp.path()).tag(tag).build()); 122 | 123 | let mut image_stream = images.build(&opts); 124 | let mut digest = None; 125 | while let Some(chunk) = image_stream.next().await { 126 | println!("{chunk:?}"); 127 | assert!(chunk.is_ok()); 128 | if matches!(chunk, Ok(models::ImageBuildChunk::Digest { .. })) { 129 | digest = Some(chunk); 130 | } 131 | } 132 | 133 | match digest.unwrap().unwrap() { 134 | ImageBuildChunk::Digest { aux } => docker.images().get(aux.id), 135 | chunk => panic!("invalid chunk {chunk:?}"), 136 | } 137 | } 138 | 139 | pub async fn get_image_full_id(docker: &Docker, name: &str) -> String { 140 | docker 141 | .images() 142 | .get(name) 143 | .inspect() 144 | .await 145 | .map(|data| data.id) 146 | .expect("image inspect data") 147 | .expect("image full id") 148 | } 149 | 150 | pub async fn create_base_volume( 151 | docker: &Docker, 152 | name: &str, 153 | opts: Option, 154 | ) -> api::Volume { 155 | cleanup_volume(docker, name).await; 156 | 157 | let opts = opts.unwrap_or_else(|| opts::VolumeCreateOpts::builder().name(name).build()); 158 | docker 159 | .volumes() 160 | .create(&opts) 161 | .await 162 | .expect("created base volume"); 163 | docker.volumes().get(name) 164 | } 165 | 166 | pub async fn cleanup_volume(docker: &Docker, name: &str) { 167 | let _ = docker.volumes().get(name).delete().await; 168 | } 169 | 170 | pub async fn create_base_network( 171 | docker: &Docker, 172 | name: &str, 173 | opts: Option, 174 | ) -> api::Network { 175 | cleanup_network(docker, name).await; 176 | 177 | let opts = opts.unwrap_or_else(|| opts::NetworkCreateOpts::builder(name).build()); 178 | docker 179 | .networks() 180 | .create(&opts) 181 | .await 182 | .expect("created base network"); 183 | docker.networks().get(name) 184 | } 185 | 186 | pub async fn cleanup_network(docker: &Docker, name: &str) { 187 | let _ = docker.networks().get(name).delete().await; 188 | } 189 | -------------------------------------------------------------------------------- /tests/docker_tests.rs: -------------------------------------------------------------------------------- 1 | mod common; 2 | 3 | use common::init_runtime; 4 | use docker_api::opts::{DataUsageType, SystemDataUsageOpts}; 5 | 6 | #[tokio::test] 7 | async fn docker_info() { 8 | let docker = init_runtime(); 9 | 10 | let info_result = docker.info().await; 11 | assert!(info_result.is_ok()); 12 | let info_data = info_result.unwrap(); 13 | assert_eq!( 14 | info_data.name.unwrap(), 15 | gethostname::gethostname().into_string().unwrap() 16 | ); 17 | } 18 | 19 | #[tokio::test] 20 | async fn docker_ping() { 21 | let docker = init_runtime(); 22 | 23 | let ping_result = docker.ping().await; 24 | assert!(ping_result.is_ok()); 25 | let ping_data = ping_result.unwrap(); 26 | assert!(!ping_data.api_version.is_empty()); 27 | } 28 | 29 | #[tokio::test] 30 | async fn docker_version() { 31 | let docker = init_runtime(); 32 | 33 | let version_result = docker.version().await; 34 | assert!(version_result.is_ok()); 35 | let version_data = version_result.unwrap(); 36 | 37 | let ping_result = docker.ping().await; 38 | assert!(ping_result.is_ok()); 39 | let ping_data = ping_result.unwrap(); 40 | 41 | assert_eq!(ping_data.api_version, version_data.api_version.unwrap()); 42 | } 43 | 44 | #[tokio::test] 45 | async fn docker_data_usage() { 46 | let docker = init_runtime(); 47 | 48 | let du_result = docker 49 | .data_usage( 50 | &SystemDataUsageOpts::builder() 51 | .types([ 52 | DataUsageType::Image, 53 | DataUsageType::Container, 54 | DataUsageType::Volume, 55 | ]) 56 | .build(), 57 | ) 58 | .await; 59 | assert!(du_result.is_ok()); 60 | let _du_data = du_result.unwrap(); 61 | } 62 | -------------------------------------------------------------------------------- /tests/image_tests.rs: -------------------------------------------------------------------------------- 1 | mod common; 2 | 3 | use common::{ 4 | create_base_image, get_image_full_id, init_runtime, opts, tempdir_with_dockerfile, StreamExt, 5 | TryStreamExt, DEFAULT_IMAGE, 6 | }; 7 | 8 | #[tokio::test] 9 | async fn image_create_inspect_delete() { 10 | let docker = init_runtime(); 11 | 12 | let image = create_base_image(&docker, "test-create-image", None).await; 13 | assert!(image.inspect().await.is_ok()); 14 | let delete_res = image 15 | .remove( 16 | &opts::ImageRemoveOpts::builder() 17 | .force(true) 18 | .noprune(true) 19 | .build(), 20 | ) 21 | .await; 22 | println!("{delete_res:#?}"); 23 | assert!(delete_res.is_ok()); 24 | assert!(image.inspect().await.is_err()); 25 | } 26 | 27 | #[tokio::test] 28 | async fn image_inspect() { 29 | let docker = init_runtime(); 30 | let images = docker.images(); 31 | 32 | let image_name = "test-inspect-image"; 33 | create_base_image(&docker, image_name, None).await; 34 | 35 | let image = images.get(image_name); 36 | 37 | let inspect_result = image.inspect().await; 38 | assert!(inspect_result.is_ok()); 39 | let inspect_data = inspect_result.unwrap(); 40 | assert!(inspect_data 41 | .repo_tags 42 | .as_ref() 43 | .unwrap() 44 | .contains(&format!("{image_name}:latest"))); 45 | assert!(image.delete().await.is_ok()); 46 | } 47 | 48 | #[tokio::test] 49 | async fn image_history() { 50 | let docker = init_runtime(); 51 | let images = docker.images(); 52 | 53 | let image_name = "test-history-image"; 54 | create_base_image(&docker, image_name, None).await; 55 | 56 | let image = images.get(image_name); 57 | 58 | let history_result = image.history().await; 59 | assert!(history_result.is_ok()); 60 | let history_data = history_result.unwrap(); 61 | println!("{history_data:#?}"); 62 | assert!(history_data 63 | .iter() 64 | .any(|item| item.tags.iter().any(|t| t == DEFAULT_IMAGE))); 65 | } 66 | 67 | #[tokio::test] 68 | async fn image_tag() { 69 | let docker = init_runtime(); 70 | let images = docker.images(); 71 | 72 | let image_name = "test-tag-image"; 73 | create_base_image(&docker, image_name, None).await; 74 | 75 | let image = images.get(image_name); 76 | 77 | let opts = opts::TagOpts::builder() 78 | .repo(image_name) 79 | .tag("1.0.0") 80 | .build(); 81 | 82 | assert!(image.tag(&opts).await.is_ok()); 83 | 84 | let new_tag = format!("{image_name}:1.0.0"); 85 | 86 | assert!(image 87 | .inspect() 88 | .await 89 | .expect("image inspect data") 90 | .repo_tags 91 | .expect("repo tags") 92 | .contains(&new_tag)); 93 | 94 | //cleanup 95 | let _ = image.delete().await; 96 | } 97 | 98 | #[tokio::test] 99 | async fn image_export_import() { 100 | let docker = init_runtime(); 101 | let images = docker.images(); 102 | 103 | let image_name = "test-export-image"; 104 | create_base_image(&docker, image_name, None).await; 105 | 106 | let image = images.get(image_name); 107 | 108 | let export_stream = image.export(); 109 | let export_data = export_stream.try_concat().await.expect("image archive"); 110 | assert!(!export_data.is_empty()); 111 | 112 | let _ = image 113 | .remove( 114 | &opts::ImageRemoveOpts::builder() 115 | .force(true) 116 | .noprune(true) 117 | .build(), 118 | ) 119 | .await; 120 | 121 | assert!(image.inspect().await.is_err()); 122 | 123 | let mut import_stream = images.import(&export_data[..]); 124 | while let Some(chunk) = import_stream.next().await { 125 | assert!(chunk.is_ok()); 126 | } 127 | assert!(image.inspect().await.is_ok()); 128 | 129 | let _ = image.delete().await; 130 | assert!(image.inspect().await.is_err()); 131 | } 132 | 133 | #[tokio::test] 134 | async fn image_search() { 135 | let docker = init_runtime(); 136 | let images = docker.images(); 137 | 138 | let search_result = images.search("ubuntu").await; 139 | println!("{search_result:#?}"); 140 | assert!(search_result.is_ok()); 141 | //let search_data = search_result.unwrap(); 142 | //log::error!("{search_data:#?}"); 143 | } 144 | 145 | #[tokio::test] 146 | async fn image_list() { 147 | let docker = init_runtime(); 148 | let images = docker.images(); 149 | 150 | let name_a = "test-list-image"; 151 | let name_b = "test-list-image2"; 152 | 153 | let tmp = tempdir_with_dockerfile(None); 154 | 155 | let label_key = "test-list"; 156 | let value_a = "value_a"; 157 | let value_b = "value_b"; 158 | let opts_a = opts::ImageBuildOpts::builder(tmp.path()) 159 | .labels([(label_key, value_a)]) 160 | .tag(name_a) 161 | .build(); 162 | let opts_b = opts::ImageBuildOpts::builder(tmp.path()) 163 | .labels([(label_key, value_b)]) 164 | .tag(name_b) 165 | .build(); 166 | 167 | create_base_image(&docker, name_a, Some(opts_a.clone())).await; 168 | create_base_image(&docker, name_b, Some(opts_b.clone())).await; 169 | let image_a = images.get(name_a); 170 | let image_b = images.get(name_b); 171 | let full_id_a = get_image_full_id(&docker, name_a).await; 172 | let full_id_b = get_image_full_id(&docker, name_b).await; 173 | 174 | let filter = opts::ImageFilter::LabelKey(label_key.to_string()); 175 | let list_opts = opts::ImageListOpts::builder() 176 | .filter([filter]) 177 | .all(true) 178 | .build(); 179 | let list_result = images.list(&list_opts).await; 180 | assert!(list_result.is_ok()); 181 | let list_data = list_result.unwrap(); 182 | assert_eq!(list_data.len(), 2); 183 | assert!(list_data.iter().any(|data| data.id == full_id_a)); 184 | assert!(list_data.iter().any(|data| data.id == full_id_b)); 185 | 186 | let filter = opts::ImageFilter::Label(label_key.to_string(), value_a.to_string()); 187 | let list_opts = opts::ImageListOpts::builder() 188 | .filter([filter]) 189 | .all(true) 190 | .build(); 191 | let list_result = images.list(&list_opts).await; 192 | // This sometimes breaks when running all tests at the same time 193 | assert!(list_result.is_ok()); 194 | let list_data = list_result.unwrap(); 195 | assert_eq!(list_data.len(), 1); 196 | assert!(list_data.iter().any(|report| report.id == full_id_a)); 197 | assert!(!list_data.iter().any(|report| report.id == full_id_b)); 198 | 199 | let filter = opts::ImageFilter::Label(label_key.to_string(), value_b.to_string()); 200 | let list_opts = opts::ImageListOpts::builder() 201 | .filter([filter]) 202 | .all(true) 203 | .build(); 204 | let list_result = images.list(&list_opts).await; 205 | assert!(list_result.is_ok()); 206 | let list_data = list_result.unwrap(); 207 | assert_eq!(list_data.len(), 1); 208 | assert!(!list_data.iter().any(|report| report.id == full_id_a)); 209 | assert!(list_data.iter().any(|report| report.id == full_id_b)); 210 | 211 | let filter = opts::ImageFilter::Reference(name_a.to_string(), None); 212 | let list_opts = opts::ImageListOpts::builder() 213 | .filter([filter]) 214 | .all(true) 215 | .build(); 216 | let list_result = images.list(&list_opts).await; 217 | assert!(list_result.is_ok()); 218 | let list_data = list_result.unwrap(); 219 | assert_eq!(list_data.len(), 1); 220 | assert_eq!(full_id_a, list_data[0].id); 221 | 222 | let filter = opts::ImageFilter::Reference(name_a.to_string(), Some("latest".to_string())); 223 | let list_opts = opts::ImageListOpts::builder() 224 | .filter([filter]) 225 | .all(true) 226 | .build(); 227 | let list_result = images.list(&list_opts).await; 228 | assert!(list_result.is_ok()); 229 | let list_data = list_result.unwrap(); 230 | assert_eq!(list_data.len(), 1); 231 | assert_eq!(full_id_a, list_data[0].id); 232 | 233 | let _ = image_a.delete().await; 234 | let _ = image_b.delete().await; 235 | } 236 | -------------------------------------------------------------------------------- /tests/network_tests.rs: -------------------------------------------------------------------------------- 1 | #![cfg(unix)] //temporary 2 | mod common; 3 | 4 | use common::{create_base_container, create_base_network, init_runtime, opts}; 5 | 6 | #[tokio::test] 7 | async fn network_create_inspect_delete() { 8 | let docker = init_runtime(); 9 | 10 | let network = create_base_network(&docker, "test-create-network", None).await; 11 | 12 | assert!(network.inspect().await.is_ok()); 13 | assert!(network.delete().await.is_ok()); 14 | assert!(network.inspect().await.is_err()); 15 | let network = create_base_network(&docker, "test-create-network", None).await; 16 | assert!(network.inspect().await.is_ok()); 17 | assert!(network.delete().await.is_ok()); 18 | assert!(network.inspect().await.is_err()); 19 | } 20 | 21 | #[tokio::test] 22 | async fn network_inspect() { 23 | let docker = init_runtime(); 24 | let networks = docker.networks(); 25 | 26 | let network_name = "test-inspect-network"; 27 | create_base_network(&docker, network_name, None).await; 28 | 29 | let network = networks.get(network_name); 30 | 31 | let inspect_result = network.inspect().await; 32 | assert!(inspect_result.is_ok()); 33 | let inspect_data = inspect_result.unwrap(); 34 | assert!(inspect_data.name.as_ref().unwrap().contains(network_name)); 35 | assert!(network.delete().await.is_ok()); 36 | } 37 | 38 | #[tokio::test] 39 | async fn network_prune() { 40 | let docker = init_runtime(); 41 | let networks = docker.networks(); 42 | 43 | let name_a = "test-prune-network"; 44 | let name_b = "test-prune-network2"; 45 | 46 | let label_key = "test-prune"; 47 | let value_a = "value_a"; 48 | let value_b = "value_b"; 49 | let opts_a = opts::NetworkCreateOpts::builder(name_a) 50 | .labels([(label_key, value_a)]) 51 | .build(); 52 | let opts_b = opts::NetworkCreateOpts::builder(name_b) 53 | .labels([(label_key, value_b)]) 54 | .build(); 55 | 56 | create_base_network(&docker, name_a, Some(opts_a.clone())).await; 57 | create_base_network(&docker, name_b, Some(opts_b.clone())).await; 58 | let network_a = networks.get(name_a); 59 | let network_b = networks.get(name_b); 60 | assert!(network_a.inspect().await.is_ok()); 61 | assert!(network_b.inspect().await.is_ok()); 62 | 63 | let filter = opts::NetworkPruneFilter::LabelKey(label_key.to_string()); 64 | let prune_opts = opts::NetworkPruneOpts::builder().filter([filter]).build(); 65 | let prune_result = networks.prune(&prune_opts).await; 66 | assert!(prune_result.is_ok()); 67 | let prune_data = prune_result.unwrap().networks_deleted.unwrap_or_default(); 68 | assert!(prune_data.iter().any(|name| name == name_a)); 69 | assert!(prune_data.iter().any(|name| name == name_b)); 70 | assert!(network_a.inspect().await.is_err()); 71 | assert!(network_b.inspect().await.is_err()); 72 | 73 | create_base_network(&docker, name_a, Some(opts_a.clone())).await; 74 | create_base_network(&docker, name_b, Some(opts_b.clone())).await; 75 | let network_a = networks.get(name_a); 76 | let network_b = networks.get(name_b); 77 | assert!(network_a.inspect().await.is_ok()); 78 | assert!(network_b.inspect().await.is_ok()); 79 | 80 | let filter = opts::NetworkPruneFilter::Label(label_key.to_string(), value_a.to_string()); 81 | let prune_opts = opts::NetworkPruneOpts::builder().filter([filter]).build(); 82 | let prune_result = networks.prune(&prune_opts).await; 83 | assert!(prune_result.is_ok()); 84 | let prune_data = prune_result.unwrap().networks_deleted.unwrap_or_default(); 85 | assert!(prune_data.iter().any(|name| name == name_a)); 86 | assert!(!prune_data.iter().any(|name| name == name_b)); 87 | assert!(network_a.inspect().await.is_err()); 88 | assert!(network_b.inspect().await.is_ok()); 89 | 90 | let filter = opts::NetworkPruneFilter::Label(label_key.to_string(), value_b.to_string()); 91 | let prune_opts = opts::NetworkPruneOpts::builder().filter([filter]).build(); 92 | let prune_result = networks.prune(&prune_opts).await; 93 | assert!(prune_result.is_ok()); 94 | let prune_data = prune_result.unwrap().networks_deleted.unwrap_or_default(); 95 | assert!(prune_data.iter().any(|name| name == name_b)); 96 | 97 | assert!(network_a.inspect().await.is_err()); 98 | assert!(network_b.inspect().await.is_err()); 99 | } 100 | 101 | #[tokio::test] 102 | async fn network_list() { 103 | let docker = init_runtime(); 104 | let networks = docker.networks(); 105 | 106 | let name_a = "test-list-network"; 107 | let name_b = "test-list-network2"; 108 | 109 | let label_key = "test-list"; 110 | let value_a = "value_a"; 111 | let value_b = "value_b"; 112 | let opts_a = opts::NetworkCreateOpts::builder(name_a) 113 | .labels([(label_key, value_a)]) 114 | .build(); 115 | let opts_b = opts::NetworkCreateOpts::builder(name_b) 116 | .labels([(label_key, value_b)]) 117 | .build(); 118 | 119 | create_base_network(&docker, name_a, Some(opts_a.clone())).await; 120 | create_base_network(&docker, name_b, Some(opts_b.clone())).await; 121 | let network_a = networks.get(name_a); 122 | let network_b = networks.get(name_b); 123 | 124 | let filter = opts::NetworkFilter::LabelKey(label_key.to_string()); 125 | let list_opts = opts::NetworkListOpts::builder().filter([filter]).build(); 126 | let list_result = networks.list(&list_opts).await; 127 | assert!(list_result.is_ok()); 128 | let list_data = list_result.unwrap(); 129 | assert_eq!(list_data.len(), 2); 130 | assert!(list_data 131 | .iter() 132 | .any(|data| data.name.as_ref().unwrap() == name_a)); 133 | assert!(list_data 134 | .iter() 135 | .any(|data| data.name.as_ref().unwrap() == name_b)); 136 | 137 | let filter = opts::NetworkFilter::LabelKeyVal(label_key.to_string(), value_a.to_string()); 138 | let list_opts = opts::NetworkListOpts::builder().filter([filter]).build(); 139 | let list_result = networks.list(&list_opts).await; 140 | // This sometimes breaks when running all tests at the same time 141 | assert!(list_result.is_ok()); 142 | let list_data = list_result.unwrap(); 143 | assert_eq!(list_data.len(), 1); 144 | assert!(list_data 145 | .iter() 146 | .any(|data| data.name.as_ref().unwrap() == name_a)); 147 | assert!(!list_data 148 | .iter() 149 | .any(|data| data.name.as_ref().unwrap() == name_b)); 150 | 151 | let filter = opts::NetworkFilter::LabelKeyVal(label_key.to_string(), value_b.to_string()); 152 | let list_opts = opts::NetworkListOpts::builder().filter([filter]).build(); 153 | let list_result = networks.list(&list_opts).await; 154 | assert!(list_result.is_ok()); 155 | let list_data = list_result.unwrap(); 156 | assert_eq!(list_data.len(), 1); 157 | assert!(!list_data 158 | .iter() 159 | .any(|data| data.name.as_ref().unwrap() == name_a)); 160 | assert!(list_data 161 | .iter() 162 | .any(|data| data.name.as_ref().unwrap() == name_b)); 163 | 164 | let _ = network_a.delete().await; 165 | let _ = network_b.delete().await; 166 | } 167 | 168 | #[tokio::test] 169 | async fn network_connect_disconnect() { 170 | let docker = init_runtime(); 171 | 172 | let network_name = "test-connect-network"; 173 | let container_name = "test-connect-network-container"; 174 | let network = create_base_network(&docker, network_name, None).await; 175 | 176 | let container = create_base_container(&docker, container_name, None).await; 177 | 178 | let opts = opts::ContainerConnectionOpts::builder(container_name).build(); 179 | 180 | let connect_result = network.connect(&opts).await; 181 | assert!(connect_result.is_ok()); 182 | connect_result.unwrap(); 183 | 184 | let container_data = container.inspect().await.unwrap(); 185 | assert!(container_data 186 | .network_settings 187 | .unwrap() 188 | .networks 189 | .unwrap() 190 | .get(network_name) 191 | .is_some()); 192 | 193 | let _ = network.delete().await; 194 | let _ = container.delete().await; 195 | } 196 | --------------------------------------------------------------------------------